Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * kernel/sched.c |
| 3 | * |
| 4 | * Kernel scheduler and related syscalls |
| 5 | * |
| 6 | * Copyright (C) 1991-2002 Linus Torvalds |
| 7 | * |
| 8 | * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and |
| 9 | * make semaphores SMP safe |
| 10 | * 1998-11-19 Implemented schedule_timeout() and related stuff |
| 11 | * by Andrea Arcangeli |
| 12 | * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar: |
| 13 | * hybrid priority-list and round-robin design with |
| 14 | * an array-switch method of distributing timeslices |
| 15 | * and per-CPU runqueues. Cleanups and useful suggestions |
| 16 | * by Davide Libenzi, preemptible kernel bits by Robert Love. |
| 17 | * 2003-09-03 Interactivity tuning by Con Kolivas. |
| 18 | * 2004-04-02 Scheduler domains code by Nick Piggin |
Ingo Molnar | c31f2e8 | 2007-07-09 18:52:01 +0200 | [diff] [blame] | 19 | * 2007-04-15 Work begun on replacing all interactivity tuning with a |
| 20 | * fair scheduling design by Con Kolivas. |
| 21 | * 2007-05-05 Load balancing (smp-nice) and other improvements |
| 22 | * by Peter Williams |
| 23 | * 2007-05-06 Interactivity improvements to CFS by Mike Galbraith |
| 24 | * 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri |
Ingo Molnar | b913176 | 2008-01-25 21:08:19 +0100 | [diff] [blame] | 25 | * 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins, |
| 26 | * Thomas Gleixner, Mike Kravetz |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | */ |
| 28 | |
| 29 | #include <linux/mm.h> |
| 30 | #include <linux/module.h> |
| 31 | #include <linux/nmi.h> |
| 32 | #include <linux/init.h> |
Ingo Molnar | dff06c1 | 2007-07-09 18:52:00 +0200 | [diff] [blame] | 33 | #include <linux/uaccess.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 34 | #include <linux/highmem.h> |
| 35 | #include <linux/smp_lock.h> |
| 36 | #include <asm/mmu_context.h> |
| 37 | #include <linux/interrupt.h> |
Randy.Dunlap | c59ede7 | 2006-01-11 12:17:46 -0800 | [diff] [blame] | 38 | #include <linux/capability.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | #include <linux/completion.h> |
| 40 | #include <linux/kernel_stat.h> |
Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 41 | #include <linux/debug_locks.h> |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 42 | #include <linux/perf_event.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 | #include <linux/security.h> |
| 44 | #include <linux/notifier.h> |
| 45 | #include <linux/profile.h> |
Nigel Cunningham | 7dfb710 | 2006-12-06 20:34:23 -0800 | [diff] [blame] | 46 | #include <linux/freezer.h> |
akpm@osdl.org | 198e2f1 | 2006-01-12 01:05:30 -0800 | [diff] [blame] | 47 | #include <linux/vmalloc.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | #include <linux/blkdev.h> |
| 49 | #include <linux/delay.h> |
Pavel Emelyanov | b488893 | 2007-10-18 23:40:14 -0700 | [diff] [blame] | 50 | #include <linux/pid_namespace.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | #include <linux/smp.h> |
| 52 | #include <linux/threads.h> |
| 53 | #include <linux/timer.h> |
| 54 | #include <linux/rcupdate.h> |
| 55 | #include <linux/cpu.h> |
| 56 | #include <linux/cpuset.h> |
| 57 | #include <linux/percpu.h> |
Alexey Dobriyan | b5aadf7 | 2008-10-06 13:23:43 +0400 | [diff] [blame] | 58 | #include <linux/proc_fs.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 59 | #include <linux/seq_file.h> |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 60 | #include <linux/stop_machine.h> |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 61 | #include <linux/sysctl.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 | #include <linux/syscalls.h> |
| 63 | #include <linux/times.h> |
Jay Lan | 8f0ab51 | 2006-09-30 23:28:59 -0700 | [diff] [blame] | 64 | #include <linux/tsacct_kern.h> |
bibo mao | c6fd91f | 2006-03-26 01:38:20 -0800 | [diff] [blame] | 65 | #include <linux/kprobes.h> |
Shailabh Nagar | 0ff9224 | 2006-07-14 00:24:37 -0700 | [diff] [blame] | 66 | #include <linux/delayacct.h> |
Ingo Molnar | dff06c1 | 2007-07-09 18:52:00 +0200 | [diff] [blame] | 67 | #include <linux/unistd.h> |
Jens Axboe | f5ff842 | 2007-09-21 09:19:54 +0200 | [diff] [blame] | 68 | #include <linux/pagemap.h> |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 69 | #include <linux/hrtimer.h> |
Reynes Philippe | 30914a5 | 2008-03-17 16:19:05 -0700 | [diff] [blame] | 70 | #include <linux/tick.h> |
Peter Zijlstra | f00b45c | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 71 | #include <linux/debugfs.h> |
| 72 | #include <linux/ctype.h> |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 73 | #include <linux/ftrace.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 74 | #include <linux/slab.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 75 | |
Eric Dumazet | 5517d86 | 2007-05-08 00:32:57 -0700 | [diff] [blame] | 76 | #include <asm/tlb.h> |
Satyam Sharma | 838225b | 2007-10-24 18:23:50 +0200 | [diff] [blame] | 77 | #include <asm/irq_regs.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 78 | |
Gregory Haskins | 6e0534f | 2008-05-12 21:21:01 +0200 | [diff] [blame] | 79 | #include "sched_cpupri.h" |
Tejun Heo | 21aa9af | 2010-06-08 21:40:37 +0200 | [diff] [blame] | 80 | #include "workqueue_sched.h" |
Gregory Haskins | 6e0534f | 2008-05-12 21:21:01 +0200 | [diff] [blame] | 81 | |
Steven Rostedt | a8d154b | 2009-04-10 09:36:00 -0400 | [diff] [blame] | 82 | #define CREATE_TRACE_POINTS |
Steven Rostedt | ad8d75f | 2009-04-14 19:39:12 -0400 | [diff] [blame] | 83 | #include <trace/events/sched.h> |
Steven Rostedt | a8d154b | 2009-04-10 09:36:00 -0400 | [diff] [blame] | 84 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 85 | /* |
| 86 | * Convert user-nice values [ -20 ... 0 ... 19 ] |
| 87 | * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ], |
| 88 | * and back. |
| 89 | */ |
| 90 | #define NICE_TO_PRIO(nice) (MAX_RT_PRIO + (nice) + 20) |
| 91 | #define PRIO_TO_NICE(prio) ((prio) - MAX_RT_PRIO - 20) |
| 92 | #define TASK_NICE(p) PRIO_TO_NICE((p)->static_prio) |
| 93 | |
| 94 | /* |
| 95 | * 'User priority' is the nice value converted to something we |
| 96 | * can work with better when scaling various scheduler parameters, |
| 97 | * it's a [ 0 ... 39 ] range. |
| 98 | */ |
| 99 | #define USER_PRIO(p) ((p)-MAX_RT_PRIO) |
| 100 | #define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio) |
| 101 | #define MAX_USER_PRIO (USER_PRIO(MAX_PRIO)) |
| 102 | |
| 103 | /* |
Ingo Molnar | d7876a0 | 2008-01-25 21:08:19 +0100 | [diff] [blame] | 104 | * Helpers for converting nanosecond timing to jiffy resolution |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 105 | */ |
Eric Dumazet | d6322fa | 2007-11-09 22:39:38 +0100 | [diff] [blame] | 106 | #define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 107 | |
Ingo Molnar | 6aa645e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 108 | #define NICE_0_LOAD SCHED_LOAD_SCALE |
| 109 | #define NICE_0_SHIFT SCHED_LOAD_SHIFT |
| 110 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 111 | /* |
| 112 | * These are the 'tuning knobs' of the scheduler: |
| 113 | * |
Dmitry Adamushko | a4ec24b | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 114 | * default timeslice is 100 msecs (used only for SCHED_RR tasks). |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 115 | * Timeslices get refilled after they expire. |
| 116 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 117 | #define DEF_TIMESLICE (100 * HZ / 1000) |
Peter Williams | 2dd73a4 | 2006-06-27 02:54:34 -0700 | [diff] [blame] | 118 | |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 119 | /* |
| 120 | * single value that denotes runtime == period, ie unlimited time. |
| 121 | */ |
| 122 | #define RUNTIME_INF ((u64)~0ULL) |
| 123 | |
Ingo Molnar | e05606d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 124 | static inline int rt_policy(int policy) |
| 125 | { |
Roel Kluin | 3f33a7c | 2008-05-13 23:44:11 +0200 | [diff] [blame] | 126 | if (unlikely(policy == SCHED_FIFO || policy == SCHED_RR)) |
Ingo Molnar | e05606d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 127 | return 1; |
| 128 | return 0; |
| 129 | } |
| 130 | |
| 131 | static inline int task_has_rt_policy(struct task_struct *p) |
| 132 | { |
| 133 | return rt_policy(p->policy); |
| 134 | } |
| 135 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 136 | /* |
Ingo Molnar | 6aa645e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 137 | * This is the priority-queue data structure of the RT scheduling class: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 138 | */ |
Ingo Molnar | 6aa645e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 139 | struct rt_prio_array { |
| 140 | DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */ |
| 141 | struct list_head queue[MAX_RT_PRIO]; |
| 142 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 143 | |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 144 | struct rt_bandwidth { |
Ingo Molnar | ea736ed | 2008-03-25 13:51:45 +0100 | [diff] [blame] | 145 | /* nests inside the rq lock: */ |
Thomas Gleixner | 0986b11 | 2009-11-17 15:32:06 +0100 | [diff] [blame] | 146 | raw_spinlock_t rt_runtime_lock; |
Ingo Molnar | ea736ed | 2008-03-25 13:51:45 +0100 | [diff] [blame] | 147 | ktime_t rt_period; |
| 148 | u64 rt_runtime; |
| 149 | struct hrtimer rt_period_timer; |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 150 | }; |
| 151 | |
| 152 | static struct rt_bandwidth def_rt_bandwidth; |
| 153 | |
| 154 | static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun); |
| 155 | |
| 156 | static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer) |
| 157 | { |
| 158 | struct rt_bandwidth *rt_b = |
| 159 | container_of(timer, struct rt_bandwidth, rt_period_timer); |
| 160 | ktime_t now; |
| 161 | int overrun; |
| 162 | int idle = 0; |
| 163 | |
| 164 | for (;;) { |
| 165 | now = hrtimer_cb_get_time(timer); |
| 166 | overrun = hrtimer_forward(timer, now, rt_b->rt_period); |
| 167 | |
| 168 | if (!overrun) |
| 169 | break; |
| 170 | |
| 171 | idle = do_sched_rt_period_timer(rt_b, overrun); |
| 172 | } |
| 173 | |
| 174 | return idle ? HRTIMER_NORESTART : HRTIMER_RESTART; |
| 175 | } |
| 176 | |
| 177 | static |
| 178 | void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime) |
| 179 | { |
| 180 | rt_b->rt_period = ns_to_ktime(period); |
| 181 | rt_b->rt_runtime = runtime; |
| 182 | |
Thomas Gleixner | 0986b11 | 2009-11-17 15:32:06 +0100 | [diff] [blame] | 183 | raw_spin_lock_init(&rt_b->rt_runtime_lock); |
Peter Zijlstra | ac086bc | 2008-04-19 19:44:58 +0200 | [diff] [blame] | 184 | |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 185 | hrtimer_init(&rt_b->rt_period_timer, |
| 186 | CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
| 187 | rt_b->rt_period_timer.function = sched_rt_period_timer; |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 188 | } |
| 189 | |
Krzysztof Helt | c8bfff6 | 2008-09-05 23:46:19 +0200 | [diff] [blame] | 190 | static inline int rt_bandwidth_enabled(void) |
| 191 | { |
| 192 | return sysctl_sched_rt_runtime >= 0; |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 193 | } |
| 194 | |
| 195 | static void start_rt_bandwidth(struct rt_bandwidth *rt_b) |
| 196 | { |
| 197 | ktime_t now; |
| 198 | |
Hiroshi Shimamoto | cac64d0 | 2009-02-25 09:59:26 -0800 | [diff] [blame] | 199 | if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF) |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 200 | return; |
| 201 | |
| 202 | if (hrtimer_active(&rt_b->rt_period_timer)) |
| 203 | return; |
| 204 | |
Thomas Gleixner | 0986b11 | 2009-11-17 15:32:06 +0100 | [diff] [blame] | 205 | raw_spin_lock(&rt_b->rt_runtime_lock); |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 206 | for (;;) { |
Peter Zijlstra | 7f1e2ca | 2009-03-13 12:21:27 +0100 | [diff] [blame] | 207 | unsigned long delta; |
| 208 | ktime_t soft, hard; |
| 209 | |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 210 | if (hrtimer_active(&rt_b->rt_period_timer)) |
| 211 | break; |
| 212 | |
| 213 | now = hrtimer_cb_get_time(&rt_b->rt_period_timer); |
| 214 | hrtimer_forward(&rt_b->rt_period_timer, now, rt_b->rt_period); |
Peter Zijlstra | 7f1e2ca | 2009-03-13 12:21:27 +0100 | [diff] [blame] | 215 | |
| 216 | soft = hrtimer_get_softexpires(&rt_b->rt_period_timer); |
| 217 | hard = hrtimer_get_expires(&rt_b->rt_period_timer); |
| 218 | delta = ktime_to_ns(ktime_sub(hard, soft)); |
| 219 | __hrtimer_start_range_ns(&rt_b->rt_period_timer, soft, delta, |
Arun R Bharadwaj | 5c33386 | 2009-04-16 12:14:37 +0530 | [diff] [blame] | 220 | HRTIMER_MODE_ABS_PINNED, 0); |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 221 | } |
Thomas Gleixner | 0986b11 | 2009-11-17 15:32:06 +0100 | [diff] [blame] | 222 | raw_spin_unlock(&rt_b->rt_runtime_lock); |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 223 | } |
| 224 | |
| 225 | #ifdef CONFIG_RT_GROUP_SCHED |
| 226 | static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b) |
| 227 | { |
| 228 | hrtimer_cancel(&rt_b->rt_period_timer); |
| 229 | } |
| 230 | #endif |
| 231 | |
Heiko Carstens | 712555e | 2008-04-28 11:33:07 +0200 | [diff] [blame] | 232 | /* |
| 233 | * sched_domains_mutex serializes calls to arch_init_sched_domains, |
| 234 | * detach_destroy_domains and partition_sched_domains. |
| 235 | */ |
| 236 | static DEFINE_MUTEX(sched_domains_mutex); |
| 237 | |
Dhaval Giani | 7c94143 | 2010-01-20 13:26:18 +0100 | [diff] [blame] | 238 | #ifdef CONFIG_CGROUP_SCHED |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 239 | |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 240 | #include <linux/cgroup.h> |
| 241 | |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 242 | struct cfs_rq; |
| 243 | |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 244 | static LIST_HEAD(task_groups); |
| 245 | |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 246 | /* task group related information */ |
Ingo Molnar | 4cf86d7 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 247 | struct task_group { |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 248 | struct cgroup_subsys_state css; |
Arun R Bharadwaj | 6c415b9 | 2008-12-01 20:49:05 +0530 | [diff] [blame] | 249 | |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 250 | #ifdef CONFIG_FAIR_GROUP_SCHED |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 251 | /* schedulable entities of this group on each cpu */ |
| 252 | struct sched_entity **se; |
| 253 | /* runqueue "owned" by this group on each cpu */ |
| 254 | struct cfs_rq **cfs_rq; |
| 255 | unsigned long shares; |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 256 | #endif |
| 257 | |
| 258 | #ifdef CONFIG_RT_GROUP_SCHED |
| 259 | struct sched_rt_entity **rt_se; |
| 260 | struct rt_rq **rt_rq; |
| 261 | |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 262 | struct rt_bandwidth rt_bandwidth; |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 263 | #endif |
Srivatsa Vaddagiri | 6b2d770 | 2008-01-25 21:08:00 +0100 | [diff] [blame] | 264 | |
Srivatsa Vaddagiri | ae8393e | 2007-10-29 21:18:11 +0100 | [diff] [blame] | 265 | struct rcu_head rcu; |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 266 | struct list_head list; |
Peter Zijlstra | f473aa5 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 267 | |
| 268 | struct task_group *parent; |
| 269 | struct list_head siblings; |
| 270 | struct list_head children; |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 271 | }; |
| 272 | |
Peter Zijlstra | eff766a | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 273 | #define root_task_group init_task_group |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 274 | |
Peter Zijlstra | 8ed3699 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 275 | /* task_group_lock serializes add/remove of task groups and also changes to |
Srivatsa Vaddagiri | ec2c507 | 2008-01-25 21:07:59 +0100 | [diff] [blame] | 276 | * a task group's cpu shares. |
| 277 | */ |
Peter Zijlstra | 8ed3699 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 278 | static DEFINE_SPINLOCK(task_group_lock); |
Srivatsa Vaddagiri | ec2c507 | 2008-01-25 21:07:59 +0100 | [diff] [blame] | 279 | |
Cyrill Gorcunov | e9036b3 | 2009-10-26 22:24:14 +0300 | [diff] [blame] | 280 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 281 | |
Peter Zijlstra | 57310a9 | 2009-03-09 13:56:21 +0100 | [diff] [blame] | 282 | #ifdef CONFIG_SMP |
| 283 | static int root_task_group_empty(void) |
| 284 | { |
| 285 | return list_empty(&root_task_group.children); |
| 286 | } |
| 287 | #endif |
| 288 | |
Srivatsa Vaddagiri | 93f992c | 2008-01-25 21:07:59 +0100 | [diff] [blame] | 289 | # define INIT_TASK_GROUP_LOAD NICE_0_LOAD |
Srivatsa Vaddagiri | 24e377a | 2007-10-15 17:00:09 +0200 | [diff] [blame] | 290 | |
Miao Xie | cb4ad1f | 2008-04-28 12:54:56 +0800 | [diff] [blame] | 291 | /* |
Lai Jiangshan | 2e08478 | 2008-06-12 16:42:58 +0800 | [diff] [blame] | 292 | * A weight of 0 or 1 can cause arithmetics problems. |
| 293 | * A weight of a cfs_rq is the sum of weights of which entities |
| 294 | * are queued on this cfs_rq, so a weight of a entity should not be |
| 295 | * too large, so as the shares value of a task group. |
Miao Xie | cb4ad1f | 2008-04-28 12:54:56 +0800 | [diff] [blame] | 296 | * (The default weight is 1024 - so there's no practical |
| 297 | * limitation from this.) |
| 298 | */ |
Peter Zijlstra | 18d95a2 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 299 | #define MIN_SHARES 2 |
Lai Jiangshan | 2e08478 | 2008-06-12 16:42:58 +0800 | [diff] [blame] | 300 | #define MAX_SHARES (1UL << 18) |
Peter Zijlstra | 18d95a2 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 301 | |
Srivatsa Vaddagiri | 93f992c | 2008-01-25 21:07:59 +0100 | [diff] [blame] | 302 | static int init_task_group_load = INIT_TASK_GROUP_LOAD; |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 303 | #endif |
| 304 | |
| 305 | /* Default task group. |
| 306 | * Every task in system belong to this group at bootup. |
| 307 | */ |
Mike Travis | 434d53b | 2008-04-04 18:11:04 -0700 | [diff] [blame] | 308 | struct task_group init_task_group; |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 309 | |
Dhaval Giani | 7c94143 | 2010-01-20 13:26:18 +0100 | [diff] [blame] | 310 | #endif /* CONFIG_CGROUP_SCHED */ |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 311 | |
Ingo Molnar | 6aa645e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 312 | /* CFS-related fields in a runqueue */ |
| 313 | struct cfs_rq { |
| 314 | struct load_weight load; |
| 315 | unsigned long nr_running; |
| 316 | |
Ingo Molnar | 6aa645e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 317 | u64 exec_clock; |
Ingo Molnar | e9acbff | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 318 | u64 min_vruntime; |
Ingo Molnar | 6aa645e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 319 | |
| 320 | struct rb_root tasks_timeline; |
| 321 | struct rb_node *rb_leftmost; |
Peter Zijlstra | 4a55bd5 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 322 | |
| 323 | struct list_head tasks; |
| 324 | struct list_head *balance_iterator; |
| 325 | |
| 326 | /* |
| 327 | * 'curr' points to currently running entity on this cfs_rq. |
Ingo Molnar | 6aa645e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 328 | * It is set to NULL otherwise (i.e when none are currently running). |
| 329 | */ |
Peter Zijlstra | 4793241 | 2008-11-04 21:25:09 +0100 | [diff] [blame] | 330 | struct sched_entity *curr, *next, *last; |
Peter Zijlstra | ddc9729 | 2007-10-15 17:00:10 +0200 | [diff] [blame] | 331 | |
Peter Zijlstra | 5ac5c4d | 2008-11-10 10:46:32 +0100 | [diff] [blame] | 332 | unsigned int nr_spread_over; |
Peter Zijlstra | ddc9729 | 2007-10-15 17:00:10 +0200 | [diff] [blame] | 333 | |
Ingo Molnar | 62160e3 | 2007-10-15 17:00:03 +0200 | [diff] [blame] | 334 | #ifdef CONFIG_FAIR_GROUP_SCHED |
Ingo Molnar | 6aa645e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 335 | struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */ |
| 336 | |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 337 | /* |
| 338 | * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in |
Ingo Molnar | 6aa645e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 339 | * a hierarchy). Non-leaf lrqs hold other higher schedulable entities |
| 340 | * (like users, containers etc.) |
| 341 | * |
| 342 | * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This |
| 343 | * list is used during load balance. |
| 344 | */ |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 345 | struct list_head leaf_cfs_rq_list; |
| 346 | struct task_group *tg; /* group that "owns" this runqueue */ |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 347 | |
| 348 | #ifdef CONFIG_SMP |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 349 | /* |
Peter Zijlstra | c8cba85 | 2008-06-27 13:41:23 +0200 | [diff] [blame] | 350 | * the part of load.weight contributed by tasks |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 351 | */ |
Peter Zijlstra | c8cba85 | 2008-06-27 13:41:23 +0200 | [diff] [blame] | 352 | unsigned long task_weight; |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 353 | |
Peter Zijlstra | c8cba85 | 2008-06-27 13:41:23 +0200 | [diff] [blame] | 354 | /* |
| 355 | * h_load = weight * f(tg) |
| 356 | * |
| 357 | * Where f(tg) is the recursive weight fraction assigned to |
| 358 | * this group. |
| 359 | */ |
| 360 | unsigned long h_load; |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 361 | |
Peter Zijlstra | c8cba85 | 2008-06-27 13:41:23 +0200 | [diff] [blame] | 362 | /* |
| 363 | * this cpu's part of tg->shares |
| 364 | */ |
| 365 | unsigned long shares; |
Peter Zijlstra | f1d239f | 2008-06-27 13:41:38 +0200 | [diff] [blame] | 366 | |
| 367 | /* |
| 368 | * load.weight at the time we set shares |
| 369 | */ |
| 370 | unsigned long rq_weight; |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 371 | #endif |
Ingo Molnar | 6aa645e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 372 | #endif |
| 373 | }; |
| 374 | |
| 375 | /* Real-Time classes' related field in a runqueue: */ |
| 376 | struct rt_rq { |
| 377 | struct rt_prio_array active; |
Steven Rostedt | 63489e4 | 2008-01-25 21:08:03 +0100 | [diff] [blame] | 378 | unsigned long rt_nr_running; |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 379 | #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED |
Gregory Haskins | e864c49 | 2008-12-29 09:39:49 -0500 | [diff] [blame] | 380 | struct { |
| 381 | int curr; /* highest queued rt task prio */ |
Gregory Haskins | 398a153 | 2009-01-14 09:10:04 -0500 | [diff] [blame] | 382 | #ifdef CONFIG_SMP |
Gregory Haskins | e864c49 | 2008-12-29 09:39:49 -0500 | [diff] [blame] | 383 | int next; /* next highest */ |
Gregory Haskins | 398a153 | 2009-01-14 09:10:04 -0500 | [diff] [blame] | 384 | #endif |
Gregory Haskins | e864c49 | 2008-12-29 09:39:49 -0500 | [diff] [blame] | 385 | } highest_prio; |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 386 | #endif |
Peter Zijlstra | fa85ae2 | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 387 | #ifdef CONFIG_SMP |
Gregory Haskins | 73fe6aa | 2008-01-25 21:08:07 +0100 | [diff] [blame] | 388 | unsigned long rt_nr_migratory; |
Peter Zijlstra | a1ba4d8 | 2009-04-01 18:40:15 +0200 | [diff] [blame] | 389 | unsigned long rt_nr_total; |
Gregory Haskins | a22d7fc | 2008-01-25 21:08:12 +0100 | [diff] [blame] | 390 | int overloaded; |
Gregory Haskins | 917b627 | 2008-12-29 09:39:53 -0500 | [diff] [blame] | 391 | struct plist_head pushable_tasks; |
Peter Zijlstra | fa85ae2 | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 392 | #endif |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 393 | int rt_throttled; |
Peter Zijlstra | fa85ae2 | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 394 | u64 rt_time; |
Peter Zijlstra | ac086bc | 2008-04-19 19:44:58 +0200 | [diff] [blame] | 395 | u64 rt_runtime; |
Ingo Molnar | ea736ed | 2008-03-25 13:51:45 +0100 | [diff] [blame] | 396 | /* Nests inside the rq lock: */ |
Thomas Gleixner | 0986b11 | 2009-11-17 15:32:06 +0100 | [diff] [blame] | 397 | raw_spinlock_t rt_runtime_lock; |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 398 | |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 399 | #ifdef CONFIG_RT_GROUP_SCHED |
Peter Zijlstra | 23b0fdf | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 400 | unsigned long rt_nr_boosted; |
| 401 | |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 402 | struct rq *rq; |
| 403 | struct list_head leaf_rt_rq_list; |
| 404 | struct task_group *tg; |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 405 | #endif |
Ingo Molnar | 6aa645e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 406 | }; |
| 407 | |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 408 | #ifdef CONFIG_SMP |
| 409 | |
| 410 | /* |
| 411 | * We add the notion of a root-domain which will be used to define per-domain |
Ingo Molnar | 0eab914 | 2008-01-25 21:08:19 +0100 | [diff] [blame] | 412 | * variables. Each exclusive cpuset essentially defines an island domain by |
| 413 | * fully partitioning the member cpus from any other cpuset. Whenever a new |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 414 | * exclusive cpuset is created, we also create and attach a new root-domain |
| 415 | * object. |
| 416 | * |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 417 | */ |
| 418 | struct root_domain { |
| 419 | atomic_t refcount; |
Rusty Russell | c6c4927 | 2008-11-25 02:35:05 +1030 | [diff] [blame] | 420 | cpumask_var_t span; |
| 421 | cpumask_var_t online; |
Gregory Haskins | 637f508 | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 422 | |
Ingo Molnar | 0eab914 | 2008-01-25 21:08:19 +0100 | [diff] [blame] | 423 | /* |
Gregory Haskins | 637f508 | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 424 | * The "RT overload" flag: it gets set if a CPU has more than |
| 425 | * one runnable RT task. |
| 426 | */ |
Rusty Russell | c6c4927 | 2008-11-25 02:35:05 +1030 | [diff] [blame] | 427 | cpumask_var_t rto_mask; |
Ingo Molnar | 0eab914 | 2008-01-25 21:08:19 +0100 | [diff] [blame] | 428 | atomic_t rto_count; |
Gregory Haskins | 6e0534f | 2008-05-12 21:21:01 +0200 | [diff] [blame] | 429 | struct cpupri cpupri; |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 430 | }; |
| 431 | |
Gregory Haskins | dc93852 | 2008-01-25 21:08:26 +0100 | [diff] [blame] | 432 | /* |
| 433 | * By default the system creates a single root-domain with all cpus as |
| 434 | * members (mimicking the global state we have today). |
| 435 | */ |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 436 | static struct root_domain def_root_domain; |
| 437 | |
Christian Dietrich | ed2d372 | 2010-09-06 16:37:05 +0200 | [diff] [blame] | 438 | #endif /* CONFIG_SMP */ |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 439 | |
Ingo Molnar | 6aa645e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 440 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 441 | * This is the main, per-CPU runqueue data structure. |
| 442 | * |
| 443 | * Locking rule: those places that want to lock multiple runqueues |
| 444 | * (such as the load balancing or the thread migration code), lock |
| 445 | * acquire operations must be ordered by ascending &runqueue. |
| 446 | */ |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 447 | struct rq { |
Ingo Molnar | d801649 | 2007-10-18 21:32:55 +0200 | [diff] [blame] | 448 | /* runqueue lock: */ |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 449 | raw_spinlock_t lock; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 450 | |
| 451 | /* |
| 452 | * nr_running and cpu_load should be in the same cacheline because |
| 453 | * remote CPUs use both these fields when doing load calculation. |
| 454 | */ |
| 455 | unsigned long nr_running; |
Ingo Molnar | 6aa645e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 456 | #define CPU_LOAD_IDX_MAX 5 |
| 457 | unsigned long cpu_load[CPU_LOAD_IDX_MAX]; |
Venkatesh Pallipadi | fdf3e95 | 2010-05-17 18:14:43 -0700 | [diff] [blame] | 458 | unsigned long last_load_update_tick; |
Siddha, Suresh B | 46cb4b7 | 2007-05-08 00:32:51 -0700 | [diff] [blame] | 459 | #ifdef CONFIG_NO_HZ |
Mike Galbraith | 39c0cbe | 2010-03-11 17:17:13 +0100 | [diff] [blame] | 460 | u64 nohz_stamp; |
Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 461 | unsigned char nohz_balance_kick; |
Siddha, Suresh B | 46cb4b7 | 2007-05-08 00:32:51 -0700 | [diff] [blame] | 462 | #endif |
Mike Galbraith | a64692a | 2010-03-11 17:16:20 +0100 | [diff] [blame] | 463 | unsigned int skip_clock_update; |
| 464 | |
Ingo Molnar | d801649 | 2007-10-18 21:32:55 +0200 | [diff] [blame] | 465 | /* capture load from *all* tasks on this cpu: */ |
| 466 | struct load_weight load; |
Ingo Molnar | 6aa645e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 467 | unsigned long nr_load_updates; |
| 468 | u64 nr_switches; |
| 469 | |
| 470 | struct cfs_rq cfs; |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 471 | struct rt_rq rt; |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 472 | |
Ingo Molnar | 6aa645e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 473 | #ifdef CONFIG_FAIR_GROUP_SCHED |
Ingo Molnar | d801649 | 2007-10-18 21:32:55 +0200 | [diff] [blame] | 474 | /* list of leaf cfs_rq on this cpu: */ |
| 475 | struct list_head leaf_cfs_rq_list; |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 476 | #endif |
| 477 | #ifdef CONFIG_RT_GROUP_SCHED |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 478 | struct list_head leaf_rt_rq_list; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 479 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 480 | |
| 481 | /* |
| 482 | * This is part of a global counter where only the total sum |
| 483 | * over all CPUs matters. A task can increase this counter on |
| 484 | * one CPU and if it got migrated afterwards it may decrease |
| 485 | * it on another CPU. Always updated under the runqueue lock: |
| 486 | */ |
| 487 | unsigned long nr_uninterruptible; |
| 488 | |
Peter Zijlstra | 34f971f | 2010-09-22 13:53:15 +0200 | [diff] [blame] | 489 | struct task_struct *curr, *idle, *stop; |
Christoph Lameter | c9819f4 | 2006-12-10 02:20:25 -0800 | [diff] [blame] | 490 | unsigned long next_balance; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 491 | struct mm_struct *prev_mm; |
Ingo Molnar | 6aa645e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 492 | |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 493 | u64 clock; |
Venkatesh Pallipadi | 305e683 | 2010-10-04 17:03:21 -0700 | [diff] [blame] | 494 | u64 clock_task; |
Ingo Molnar | 6aa645e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 495 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 496 | atomic_t nr_iowait; |
| 497 | |
| 498 | #ifdef CONFIG_SMP |
Ingo Molnar | 0eab914 | 2008-01-25 21:08:19 +0100 | [diff] [blame] | 499 | struct root_domain *rd; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 500 | struct sched_domain *sd; |
| 501 | |
Peter Zijlstra | e51fd5e | 2010-05-31 12:37:30 +0200 | [diff] [blame] | 502 | unsigned long cpu_power; |
| 503 | |
Henrik Austad | a0a522c | 2009-02-13 20:35:45 +0100 | [diff] [blame] | 504 | unsigned char idle_at_tick; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 505 | /* For active balancing */ |
Gregory Haskins | 3f029d3 | 2009-07-29 11:08:47 -0400 | [diff] [blame] | 506 | int post_schedule; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 507 | int active_balance; |
| 508 | int push_cpu; |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 509 | struct cpu_stop_work active_balance_work; |
Ingo Molnar | d801649 | 2007-10-18 21:32:55 +0200 | [diff] [blame] | 510 | /* cpu of this runqueue: */ |
| 511 | int cpu; |
Gregory Haskins | 1f11eb6a | 2008-06-04 15:04:05 -0400 | [diff] [blame] | 512 | int online; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 513 | |
Peter Zijlstra | a8a51d5 | 2008-06-27 13:41:26 +0200 | [diff] [blame] | 514 | unsigned long avg_load_per_task; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 515 | |
Peter Zijlstra | e9e9250 | 2009-09-01 10:34:37 +0200 | [diff] [blame] | 516 | u64 rt_avg; |
| 517 | u64 age_stamp; |
Mike Galbraith | 1b9508f | 2009-11-04 17:53:50 +0100 | [diff] [blame] | 518 | u64 idle_stamp; |
| 519 | u64 avg_idle; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 520 | #endif |
| 521 | |
Venkatesh Pallipadi | aa48380 | 2010-10-04 17:03:22 -0700 | [diff] [blame] | 522 | #ifdef CONFIG_IRQ_TIME_ACCOUNTING |
| 523 | u64 prev_irq_time; |
| 524 | #endif |
| 525 | |
Thomas Gleixner | dce48a8 | 2009-04-11 10:43:41 +0200 | [diff] [blame] | 526 | /* calc_load related fields */ |
| 527 | unsigned long calc_load_update; |
| 528 | long calc_load_active; |
| 529 | |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 530 | #ifdef CONFIG_SCHED_HRTICK |
Peter Zijlstra | 3165651 | 2008-07-18 18:01:23 +0200 | [diff] [blame] | 531 | #ifdef CONFIG_SMP |
| 532 | int hrtick_csd_pending; |
| 533 | struct call_single_data hrtick_csd; |
| 534 | #endif |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 535 | struct hrtimer hrtick_timer; |
| 536 | #endif |
| 537 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 538 | #ifdef CONFIG_SCHEDSTATS |
| 539 | /* latency stats */ |
| 540 | struct sched_info rq_sched_info; |
Ken Chen | 9c2c480 | 2008-12-16 23:41:22 -0800 | [diff] [blame] | 541 | unsigned long long rq_cpu_time; |
| 542 | /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 543 | |
| 544 | /* sys_sched_yield() stats */ |
Ken Chen | 480b943 | 2007-10-18 21:32:56 +0200 | [diff] [blame] | 545 | unsigned int yld_count; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 546 | |
| 547 | /* schedule() stats */ |
Ken Chen | 480b943 | 2007-10-18 21:32:56 +0200 | [diff] [blame] | 548 | unsigned int sched_switch; |
| 549 | unsigned int sched_count; |
| 550 | unsigned int sched_goidle; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 551 | |
| 552 | /* try_to_wake_up() stats */ |
Ken Chen | 480b943 | 2007-10-18 21:32:56 +0200 | [diff] [blame] | 553 | unsigned int ttwu_count; |
| 554 | unsigned int ttwu_local; |
Ingo Molnar | b8efb56 | 2007-10-15 17:00:10 +0200 | [diff] [blame] | 555 | |
| 556 | /* BKL stats */ |
Ken Chen | 480b943 | 2007-10-18 21:32:56 +0200 | [diff] [blame] | 557 | unsigned int bkl_count; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 558 | #endif |
| 559 | }; |
| 560 | |
Fenghua Yu | f34e3b6 | 2007-07-19 01:48:13 -0700 | [diff] [blame] | 561 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 562 | |
Mike Galbraith | a64692a | 2010-03-11 17:16:20 +0100 | [diff] [blame] | 563 | |
Peter Zijlstra | 1e5a740 | 2010-10-31 12:37:04 +0100 | [diff] [blame] | 564 | static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags); |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 565 | |
Christoph Lameter | 0a2966b | 2006-09-25 23:30:51 -0700 | [diff] [blame] | 566 | static inline int cpu_of(struct rq *rq) |
| 567 | { |
| 568 | #ifdef CONFIG_SMP |
| 569 | return rq->cpu; |
| 570 | #else |
| 571 | return 0; |
| 572 | #endif |
| 573 | } |
| 574 | |
Paul E. McKenney | 497f0ab | 2010-02-22 17:04:51 -0800 | [diff] [blame] | 575 | #define rcu_dereference_check_sched_domain(p) \ |
Paul E. McKenney | d11c563 | 2010-02-22 17:04:50 -0800 | [diff] [blame] | 576 | rcu_dereference_check((p), \ |
| 577 | rcu_read_lock_sched_held() || \ |
| 578 | lockdep_is_held(&sched_domains_mutex)) |
| 579 | |
Ingo Molnar | 20d315d | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 580 | /* |
Nick Piggin | 674311d | 2005-06-25 14:57:27 -0700 | [diff] [blame] | 581 | * The domain tree (rq->sd) is protected by RCU's quiescent state transition. |
Dinakar Guniguntala | 1a20ff2 | 2005-06-25 14:57:33 -0700 | [diff] [blame] | 582 | * See detach_destroy_domains: synchronize_sched for details. |
Nick Piggin | 674311d | 2005-06-25 14:57:27 -0700 | [diff] [blame] | 583 | * |
| 584 | * The domain tree of any CPU may only be accessed from within |
| 585 | * preempt-disabled sections. |
| 586 | */ |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 587 | #define for_each_domain(cpu, __sd) \ |
Paul E. McKenney | 497f0ab | 2010-02-22 17:04:51 -0800 | [diff] [blame] | 588 | for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 589 | |
| 590 | #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) |
| 591 | #define this_rq() (&__get_cpu_var(runqueues)) |
| 592 | #define task_rq(p) cpu_rq(task_cpu(p)) |
| 593 | #define cpu_curr(cpu) (cpu_rq(cpu)->curr) |
Hitoshi Mitake | 54d35f2 | 2009-06-29 14:44:57 +0900 | [diff] [blame] | 594 | #define raw_rq() (&__raw_get_cpu_var(runqueues)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 595 | |
Peter Zijlstra | dc61b1d | 2010-06-08 11:40:42 +0200 | [diff] [blame] | 596 | #ifdef CONFIG_CGROUP_SCHED |
| 597 | |
| 598 | /* |
| 599 | * Return the group to which this tasks belongs. |
| 600 | * |
| 601 | * We use task_subsys_state_check() and extend the RCU verification |
| 602 | * with lockdep_is_held(&task_rq(p)->lock) because cpu_cgroup_attach() |
| 603 | * holds that lock for each task it moves into the cgroup. Therefore |
| 604 | * by holding that lock, we pin the task to the current cgroup. |
| 605 | */ |
| 606 | static inline struct task_group *task_group(struct task_struct *p) |
| 607 | { |
| 608 | struct cgroup_subsys_state *css; |
| 609 | |
| 610 | css = task_subsys_state_check(p, cpu_cgroup_subsys_id, |
| 611 | lockdep_is_held(&task_rq(p)->lock)); |
| 612 | return container_of(css, struct task_group, css); |
| 613 | } |
| 614 | |
| 615 | /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ |
| 616 | static inline void set_task_rq(struct task_struct *p, unsigned int cpu) |
| 617 | { |
| 618 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 619 | p->se.cfs_rq = task_group(p)->cfs_rq[cpu]; |
| 620 | p->se.parent = task_group(p)->se[cpu]; |
| 621 | #endif |
| 622 | |
| 623 | #ifdef CONFIG_RT_GROUP_SCHED |
| 624 | p->rt.rt_rq = task_group(p)->rt_rq[cpu]; |
| 625 | p->rt.parent = task_group(p)->rt_se[cpu]; |
| 626 | #endif |
| 627 | } |
| 628 | |
| 629 | #else /* CONFIG_CGROUP_SCHED */ |
| 630 | |
| 631 | static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { } |
| 632 | static inline struct task_group *task_group(struct task_struct *p) |
| 633 | { |
| 634 | return NULL; |
| 635 | } |
| 636 | |
| 637 | #endif /* CONFIG_CGROUP_SCHED */ |
| 638 | |
Peter Zijlstra | fe44d62 | 2010-12-09 14:15:34 +0100 | [diff] [blame^] | 639 | static void update_rq_clock_task(struct rq *rq, s64 delta); |
Venkatesh Pallipadi | 305e683 | 2010-10-04 17:03:21 -0700 | [diff] [blame] | 640 | |
Peter Zijlstra | fe44d62 | 2010-12-09 14:15:34 +0100 | [diff] [blame^] | 641 | static void update_rq_clock(struct rq *rq) |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 642 | { |
Peter Zijlstra | fe44d62 | 2010-12-09 14:15:34 +0100 | [diff] [blame^] | 643 | s64 delta; |
Venkatesh Pallipadi | 305e683 | 2010-10-04 17:03:21 -0700 | [diff] [blame] | 644 | |
Mike Galbraith | f26f9af | 2010-12-08 11:05:42 +0100 | [diff] [blame] | 645 | if (rq->skip_clock_update) |
| 646 | return; |
Venkatesh Pallipadi | aa48380 | 2010-10-04 17:03:22 -0700 | [diff] [blame] | 647 | |
Peter Zijlstra | fe44d62 | 2010-12-09 14:15:34 +0100 | [diff] [blame^] | 648 | delta = sched_clock_cpu(cpu_of(rq)) - rq->clock; |
| 649 | rq->clock += delta; |
| 650 | update_rq_clock_task(rq, delta); |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 651 | } |
| 652 | |
Ingo Molnar | e436d80 | 2007-07-19 21:28:35 +0200 | [diff] [blame] | 653 | /* |
Ingo Molnar | bf5c91b | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 654 | * Tunables that become constants when CONFIG_SCHED_DEBUG is off: |
| 655 | */ |
| 656 | #ifdef CONFIG_SCHED_DEBUG |
| 657 | # define const_debug __read_mostly |
| 658 | #else |
| 659 | # define const_debug static const |
| 660 | #endif |
| 661 | |
Ingo Molnar | 017730c | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 662 | /** |
| 663 | * runqueue_is_locked |
Randy Dunlap | e17b38b | 2009-10-11 19:12:00 -0700 | [diff] [blame] | 664 | * @cpu: the processor in question. |
Ingo Molnar | 017730c | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 665 | * |
| 666 | * Returns true if the current cpu runqueue is locked. |
| 667 | * This interface allows printk to be called with the runqueue lock |
| 668 | * held and know whether or not it is OK to wake up the klogd. |
| 669 | */ |
Andrew Morton | 89f19f0 | 2009-09-19 11:55:44 -0700 | [diff] [blame] | 670 | int runqueue_is_locked(int cpu) |
Ingo Molnar | 017730c | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 671 | { |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 672 | return raw_spin_is_locked(&cpu_rq(cpu)->lock); |
Ingo Molnar | 017730c | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 673 | } |
| 674 | |
Ingo Molnar | bf5c91b | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 675 | /* |
| 676 | * Debugging: various feature bits |
| 677 | */ |
Peter Zijlstra | f00b45c | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 678 | |
| 679 | #define SCHED_FEAT(name, enabled) \ |
| 680 | __SCHED_FEAT_##name , |
| 681 | |
Ingo Molnar | bf5c91b | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 682 | enum { |
Peter Zijlstra | f00b45c | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 683 | #include "sched_features.h" |
Ingo Molnar | bf5c91b | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 684 | }; |
| 685 | |
Peter Zijlstra | f00b45c | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 686 | #undef SCHED_FEAT |
Ingo Molnar | bf5c91b | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 687 | |
Peter Zijlstra | f00b45c | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 688 | #define SCHED_FEAT(name, enabled) \ |
| 689 | (1UL << __SCHED_FEAT_##name) * enabled | |
| 690 | |
| 691 | const_debug unsigned int sysctl_sched_features = |
| 692 | #include "sched_features.h" |
| 693 | 0; |
| 694 | |
| 695 | #undef SCHED_FEAT |
| 696 | |
| 697 | #ifdef CONFIG_SCHED_DEBUG |
| 698 | #define SCHED_FEAT(name, enabled) \ |
| 699 | #name , |
| 700 | |
Harvey Harrison | 983ed7a | 2008-04-24 18:17:55 -0700 | [diff] [blame] | 701 | static __read_mostly char *sched_feat_names[] = { |
Peter Zijlstra | f00b45c | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 702 | #include "sched_features.h" |
| 703 | NULL |
| 704 | }; |
| 705 | |
| 706 | #undef SCHED_FEAT |
| 707 | |
Li Zefan | 34f3a81 | 2008-10-30 15:23:32 +0800 | [diff] [blame] | 708 | static int sched_feat_show(struct seq_file *m, void *v) |
Peter Zijlstra | f00b45c | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 709 | { |
Peter Zijlstra | f00b45c | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 710 | int i; |
| 711 | |
| 712 | for (i = 0; sched_feat_names[i]; i++) { |
Li Zefan | 34f3a81 | 2008-10-30 15:23:32 +0800 | [diff] [blame] | 713 | if (!(sysctl_sched_features & (1UL << i))) |
| 714 | seq_puts(m, "NO_"); |
| 715 | seq_printf(m, "%s ", sched_feat_names[i]); |
Peter Zijlstra | f00b45c | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 716 | } |
Li Zefan | 34f3a81 | 2008-10-30 15:23:32 +0800 | [diff] [blame] | 717 | seq_puts(m, "\n"); |
Peter Zijlstra | f00b45c | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 718 | |
Li Zefan | 34f3a81 | 2008-10-30 15:23:32 +0800 | [diff] [blame] | 719 | return 0; |
Peter Zijlstra | f00b45c | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 720 | } |
| 721 | |
| 722 | static ssize_t |
| 723 | sched_feat_write(struct file *filp, const char __user *ubuf, |
| 724 | size_t cnt, loff_t *ppos) |
| 725 | { |
| 726 | char buf[64]; |
Mathieu Desnoyers | 7740191 | 2010-09-13 17:47:00 -0400 | [diff] [blame] | 727 | char *cmp; |
Peter Zijlstra | f00b45c | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 728 | int neg = 0; |
| 729 | int i; |
| 730 | |
| 731 | if (cnt > 63) |
| 732 | cnt = 63; |
| 733 | |
| 734 | if (copy_from_user(&buf, ubuf, cnt)) |
| 735 | return -EFAULT; |
| 736 | |
| 737 | buf[cnt] = 0; |
Mathieu Desnoyers | 7740191 | 2010-09-13 17:47:00 -0400 | [diff] [blame] | 738 | cmp = strstrip(buf); |
Peter Zijlstra | f00b45c | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 739 | |
Ingo Molnar | c24b7c5 | 2008-04-18 10:55:34 +0200 | [diff] [blame] | 740 | if (strncmp(buf, "NO_", 3) == 0) { |
Peter Zijlstra | f00b45c | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 741 | neg = 1; |
| 742 | cmp += 3; |
| 743 | } |
| 744 | |
| 745 | for (i = 0; sched_feat_names[i]; i++) { |
Mathieu Desnoyers | 7740191 | 2010-09-13 17:47:00 -0400 | [diff] [blame] | 746 | if (strcmp(cmp, sched_feat_names[i]) == 0) { |
Peter Zijlstra | f00b45c | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 747 | if (neg) |
| 748 | sysctl_sched_features &= ~(1UL << i); |
| 749 | else |
| 750 | sysctl_sched_features |= (1UL << i); |
| 751 | break; |
| 752 | } |
| 753 | } |
| 754 | |
| 755 | if (!sched_feat_names[i]) |
| 756 | return -EINVAL; |
| 757 | |
Jan Blunck | 4299472 | 2009-11-20 17:40:37 +0100 | [diff] [blame] | 758 | *ppos += cnt; |
Peter Zijlstra | f00b45c | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 759 | |
| 760 | return cnt; |
| 761 | } |
| 762 | |
Li Zefan | 34f3a81 | 2008-10-30 15:23:32 +0800 | [diff] [blame] | 763 | static int sched_feat_open(struct inode *inode, struct file *filp) |
| 764 | { |
| 765 | return single_open(filp, sched_feat_show, NULL); |
| 766 | } |
| 767 | |
Alexey Dobriyan | 828c095 | 2009-10-01 15:43:56 -0700 | [diff] [blame] | 768 | static const struct file_operations sched_feat_fops = { |
Li Zefan | 34f3a81 | 2008-10-30 15:23:32 +0800 | [diff] [blame] | 769 | .open = sched_feat_open, |
| 770 | .write = sched_feat_write, |
| 771 | .read = seq_read, |
| 772 | .llseek = seq_lseek, |
| 773 | .release = single_release, |
Peter Zijlstra | f00b45c | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 774 | }; |
| 775 | |
| 776 | static __init int sched_init_debug(void) |
| 777 | { |
Peter Zijlstra | f00b45c | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 778 | debugfs_create_file("sched_features", 0644, NULL, NULL, |
| 779 | &sched_feat_fops); |
| 780 | |
| 781 | return 0; |
| 782 | } |
| 783 | late_initcall(sched_init_debug); |
| 784 | |
| 785 | #endif |
| 786 | |
| 787 | #define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x)) |
Ingo Molnar | bf5c91b | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 788 | |
| 789 | /* |
Peter Zijlstra | b82d9fd | 2007-11-09 22:39:39 +0100 | [diff] [blame] | 790 | * Number of tasks to iterate in a single balance run. |
| 791 | * Limited because this is done with IRQs disabled. |
| 792 | */ |
| 793 | const_debug unsigned int sysctl_sched_nr_migrate = 32; |
| 794 | |
| 795 | /* |
Peter Zijlstra | 2398f2c | 2008-06-27 13:41:35 +0200 | [diff] [blame] | 796 | * ratelimit for updating the group shares. |
Peter Zijlstra | 55cd534 | 2008-08-04 08:54:26 +0200 | [diff] [blame] | 797 | * default: 0.25ms |
Peter Zijlstra | 2398f2c | 2008-06-27 13:41:35 +0200 | [diff] [blame] | 798 | */ |
Peter Zijlstra | 55cd534 | 2008-08-04 08:54:26 +0200 | [diff] [blame] | 799 | unsigned int sysctl_sched_shares_ratelimit = 250000; |
Christian Ehrhardt | 0bcdcf2 | 2009-11-30 12:16:46 +0100 | [diff] [blame] | 800 | unsigned int normalized_sysctl_sched_shares_ratelimit = 250000; |
Peter Zijlstra | 2398f2c | 2008-06-27 13:41:35 +0200 | [diff] [blame] | 801 | |
| 802 | /* |
Peter Zijlstra | ffda12a | 2008-10-17 19:27:02 +0200 | [diff] [blame] | 803 | * Inject some fuzzyness into changing the per-cpu group shares |
| 804 | * this avoids remote rq-locks at the expense of fairness. |
| 805 | * default: 4 |
| 806 | */ |
| 807 | unsigned int sysctl_sched_shares_thresh = 4; |
| 808 | |
| 809 | /* |
Peter Zijlstra | e9e9250 | 2009-09-01 10:34:37 +0200 | [diff] [blame] | 810 | * period over which we average the RT time consumption, measured |
| 811 | * in ms. |
| 812 | * |
| 813 | * default: 1s |
| 814 | */ |
| 815 | const_debug unsigned int sysctl_sched_time_avg = MSEC_PER_SEC; |
| 816 | |
| 817 | /* |
Peter Zijlstra | 9f0c1e5 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 818 | * period over which we measure -rt task cpu usage in us. |
Peter Zijlstra | fa85ae2 | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 819 | * default: 1s |
| 820 | */ |
Peter Zijlstra | 9f0c1e5 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 821 | unsigned int sysctl_sched_rt_period = 1000000; |
Peter Zijlstra | fa85ae2 | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 822 | |
Ingo Molnar | 6892b75 | 2008-02-13 14:02:36 +0100 | [diff] [blame] | 823 | static __read_mostly int scheduler_running; |
| 824 | |
Peter Zijlstra | fa85ae2 | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 825 | /* |
Peter Zijlstra | 9f0c1e5 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 826 | * part of the period that we allow rt tasks to run in us. |
| 827 | * default: 0.95s |
Peter Zijlstra | fa85ae2 | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 828 | */ |
Peter Zijlstra | 9f0c1e5 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 829 | int sysctl_sched_rt_runtime = 950000; |
| 830 | |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 831 | static inline u64 global_rt_period(void) |
| 832 | { |
| 833 | return (u64)sysctl_sched_rt_period * NSEC_PER_USEC; |
| 834 | } |
| 835 | |
| 836 | static inline u64 global_rt_runtime(void) |
| 837 | { |
roel kluin | e26873b | 2008-07-22 16:51:15 -0400 | [diff] [blame] | 838 | if (sysctl_sched_rt_runtime < 0) |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 839 | return RUNTIME_INF; |
| 840 | |
| 841 | return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC; |
| 842 | } |
Peter Zijlstra | fa85ae2 | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 843 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 844 | #ifndef prepare_arch_switch |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 845 | # define prepare_arch_switch(next) do { } while (0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 846 | #endif |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 847 | #ifndef finish_arch_switch |
| 848 | # define finish_arch_switch(prev) do { } while (0) |
| 849 | #endif |
| 850 | |
Dmitry Adamushko | 051a1d1 | 2007-12-18 15:21:13 +0100 | [diff] [blame] | 851 | static inline int task_current(struct rq *rq, struct task_struct *p) |
| 852 | { |
| 853 | return rq->curr == p; |
| 854 | } |
| 855 | |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 856 | #ifndef __ARCH_WANT_UNLOCKED_CTXSW |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 857 | static inline int task_running(struct rq *rq, struct task_struct *p) |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 858 | { |
Dmitry Adamushko | 051a1d1 | 2007-12-18 15:21:13 +0100 | [diff] [blame] | 859 | return task_current(rq, p); |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 860 | } |
| 861 | |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 862 | static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 863 | { |
| 864 | } |
| 865 | |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 866 | static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 867 | { |
Ingo Molnar | da04c03 | 2005-09-13 11:17:59 +0200 | [diff] [blame] | 868 | #ifdef CONFIG_DEBUG_SPINLOCK |
| 869 | /* this is a valid case when another task releases the spinlock */ |
| 870 | rq->lock.owner = current; |
| 871 | #endif |
Ingo Molnar | 8a25d5d | 2006-07-03 00:24:54 -0700 | [diff] [blame] | 872 | /* |
| 873 | * If we are tracking spinlock dependencies then we have to |
| 874 | * fix up the runqueue lock - which gets 'carried over' from |
| 875 | * prev into current: |
| 876 | */ |
| 877 | spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_); |
| 878 | |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 879 | raw_spin_unlock_irq(&rq->lock); |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 880 | } |
| 881 | |
| 882 | #else /* __ARCH_WANT_UNLOCKED_CTXSW */ |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 883 | static inline int task_running(struct rq *rq, struct task_struct *p) |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 884 | { |
| 885 | #ifdef CONFIG_SMP |
| 886 | return p->oncpu; |
| 887 | #else |
Dmitry Adamushko | 051a1d1 | 2007-12-18 15:21:13 +0100 | [diff] [blame] | 888 | return task_current(rq, p); |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 889 | #endif |
| 890 | } |
| 891 | |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 892 | static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 893 | { |
| 894 | #ifdef CONFIG_SMP |
| 895 | /* |
| 896 | * We can optimise this out completely for !SMP, because the |
| 897 | * SMP rebalancing from interrupt is the only thing that cares |
| 898 | * here. |
| 899 | */ |
| 900 | next->oncpu = 1; |
| 901 | #endif |
| 902 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 903 | raw_spin_unlock_irq(&rq->lock); |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 904 | #else |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 905 | raw_spin_unlock(&rq->lock); |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 906 | #endif |
| 907 | } |
| 908 | |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 909 | static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 910 | { |
| 911 | #ifdef CONFIG_SMP |
| 912 | /* |
| 913 | * After ->oncpu is cleared, the task can be moved to a different CPU. |
| 914 | * We must ensure this doesn't happen until the switch is completely |
| 915 | * finished. |
| 916 | */ |
| 917 | smp_wmb(); |
| 918 | prev->oncpu = 0; |
| 919 | #endif |
| 920 | #ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW |
| 921 | local_irq_enable(); |
| 922 | #endif |
| 923 | } |
| 924 | #endif /* __ARCH_WANT_UNLOCKED_CTXSW */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 925 | |
| 926 | /* |
Peter Zijlstra | 65cc8e4 | 2010-03-25 21:05:16 +0100 | [diff] [blame] | 927 | * Check whether the task is waking, we use this to synchronize ->cpus_allowed |
| 928 | * against ttwu(). |
Peter Zijlstra | 0970d29 | 2010-02-15 14:45:54 +0100 | [diff] [blame] | 929 | */ |
| 930 | static inline int task_is_waking(struct task_struct *p) |
| 931 | { |
Peter Zijlstra | 0017d73 | 2010-03-24 18:34:10 +0100 | [diff] [blame] | 932 | return unlikely(p->state == TASK_WAKING); |
Peter Zijlstra | 0970d29 | 2010-02-15 14:45:54 +0100 | [diff] [blame] | 933 | } |
| 934 | |
| 935 | /* |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 936 | * __task_rq_lock - lock the runqueue a given task resides on. |
| 937 | * Must be called interrupts disabled. |
| 938 | */ |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 939 | static inline struct rq *__task_rq_lock(struct task_struct *p) |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 940 | __acquires(rq->lock) |
| 941 | { |
Peter Zijlstra | 0970d29 | 2010-02-15 14:45:54 +0100 | [diff] [blame] | 942 | struct rq *rq; |
| 943 | |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 944 | for (;;) { |
Peter Zijlstra | 0970d29 | 2010-02-15 14:45:54 +0100 | [diff] [blame] | 945 | rq = task_rq(p); |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 946 | raw_spin_lock(&rq->lock); |
Peter Zijlstra | 65cc8e4 | 2010-03-25 21:05:16 +0100 | [diff] [blame] | 947 | if (likely(rq == task_rq(p))) |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 948 | return rq; |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 949 | raw_spin_unlock(&rq->lock); |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 950 | } |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 951 | } |
| 952 | |
| 953 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 954 | * task_rq_lock - lock the runqueue a given task resides on and disable |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 955 | * interrupts. Note the ordering: we can safely lookup the task_rq without |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 956 | * explicitly disabling preemption. |
| 957 | */ |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 958 | static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 959 | __acquires(rq->lock) |
| 960 | { |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 961 | struct rq *rq; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 962 | |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 963 | for (;;) { |
| 964 | local_irq_save(*flags); |
| 965 | rq = task_rq(p); |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 966 | raw_spin_lock(&rq->lock); |
Peter Zijlstra | 65cc8e4 | 2010-03-25 21:05:16 +0100 | [diff] [blame] | 967 | if (likely(rq == task_rq(p))) |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 968 | return rq; |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 969 | raw_spin_unlock_irqrestore(&rq->lock, *flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 970 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 971 | } |
| 972 | |
Alexey Dobriyan | a995744 | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 973 | static void __task_rq_unlock(struct rq *rq) |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 974 | __releases(rq->lock) |
| 975 | { |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 976 | raw_spin_unlock(&rq->lock); |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 977 | } |
| 978 | |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 979 | static inline void task_rq_unlock(struct rq *rq, unsigned long *flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 980 | __releases(rq->lock) |
| 981 | { |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 982 | raw_spin_unlock_irqrestore(&rq->lock, *flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 983 | } |
| 984 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 985 | /* |
Robert P. J. Day | cc2a73b | 2006-12-10 02:20:00 -0800 | [diff] [blame] | 986 | * this_rq_lock - lock this runqueue and disable interrupts. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 987 | */ |
Alexey Dobriyan | a995744 | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 988 | static struct rq *this_rq_lock(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 989 | __acquires(rq->lock) |
| 990 | { |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 991 | struct rq *rq; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 992 | |
| 993 | local_irq_disable(); |
| 994 | rq = this_rq(); |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 995 | raw_spin_lock(&rq->lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 996 | |
| 997 | return rq; |
| 998 | } |
| 999 | |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 1000 | #ifdef CONFIG_SCHED_HRTICK |
| 1001 | /* |
| 1002 | * Use HR-timers to deliver accurate preemption points. |
| 1003 | * |
| 1004 | * Its all a bit involved since we cannot program an hrt while holding the |
| 1005 | * rq->lock. So what we do is store a state in in rq->hrtick_* and ask for a |
| 1006 | * reschedule event. |
| 1007 | * |
| 1008 | * When we get rescheduled we reprogram the hrtick_timer outside of the |
| 1009 | * rq->lock. |
| 1010 | */ |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 1011 | |
| 1012 | /* |
| 1013 | * Use hrtick when: |
| 1014 | * - enabled by features |
| 1015 | * - hrtimer is actually high res |
| 1016 | */ |
| 1017 | static inline int hrtick_enabled(struct rq *rq) |
| 1018 | { |
| 1019 | if (!sched_feat(HRTICK)) |
| 1020 | return 0; |
Ingo Molnar | ba42059 | 2008-07-20 11:02:06 +0200 | [diff] [blame] | 1021 | if (!cpu_active(cpu_of(rq))) |
Peter Zijlstra | b328ca1 | 2008-04-29 10:02:46 +0200 | [diff] [blame] | 1022 | return 0; |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 1023 | return hrtimer_is_hres_active(&rq->hrtick_timer); |
| 1024 | } |
| 1025 | |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 1026 | static void hrtick_clear(struct rq *rq) |
| 1027 | { |
| 1028 | if (hrtimer_active(&rq->hrtick_timer)) |
| 1029 | hrtimer_cancel(&rq->hrtick_timer); |
| 1030 | } |
| 1031 | |
| 1032 | /* |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 1033 | * High-resolution timer tick. |
| 1034 | * Runs from hardirq context with interrupts disabled. |
| 1035 | */ |
| 1036 | static enum hrtimer_restart hrtick(struct hrtimer *timer) |
| 1037 | { |
| 1038 | struct rq *rq = container_of(timer, struct rq, hrtick_timer); |
| 1039 | |
| 1040 | WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); |
| 1041 | |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 1042 | raw_spin_lock(&rq->lock); |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 1043 | update_rq_clock(rq); |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 1044 | rq->curr->sched_class->task_tick(rq, rq->curr, 1); |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 1045 | raw_spin_unlock(&rq->lock); |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 1046 | |
| 1047 | return HRTIMER_NORESTART; |
| 1048 | } |
| 1049 | |
Rabin Vincent | 95e904c | 2008-05-11 05:55:33 +0530 | [diff] [blame] | 1050 | #ifdef CONFIG_SMP |
Peter Zijlstra | 3165651 | 2008-07-18 18:01:23 +0200 | [diff] [blame] | 1051 | /* |
| 1052 | * called from hardirq (IPI) context |
| 1053 | */ |
| 1054 | static void __hrtick_start(void *arg) |
Peter Zijlstra | b328ca1 | 2008-04-29 10:02:46 +0200 | [diff] [blame] | 1055 | { |
Peter Zijlstra | 3165651 | 2008-07-18 18:01:23 +0200 | [diff] [blame] | 1056 | struct rq *rq = arg; |
Peter Zijlstra | b328ca1 | 2008-04-29 10:02:46 +0200 | [diff] [blame] | 1057 | |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 1058 | raw_spin_lock(&rq->lock); |
Peter Zijlstra | 3165651 | 2008-07-18 18:01:23 +0200 | [diff] [blame] | 1059 | hrtimer_restart(&rq->hrtick_timer); |
| 1060 | rq->hrtick_csd_pending = 0; |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 1061 | raw_spin_unlock(&rq->lock); |
Peter Zijlstra | b328ca1 | 2008-04-29 10:02:46 +0200 | [diff] [blame] | 1062 | } |
| 1063 | |
Peter Zijlstra | 3165651 | 2008-07-18 18:01:23 +0200 | [diff] [blame] | 1064 | /* |
| 1065 | * Called to set the hrtick timer state. |
| 1066 | * |
| 1067 | * called with rq->lock held and irqs disabled |
| 1068 | */ |
| 1069 | static void hrtick_start(struct rq *rq, u64 delay) |
Peter Zijlstra | b328ca1 | 2008-04-29 10:02:46 +0200 | [diff] [blame] | 1070 | { |
Peter Zijlstra | 3165651 | 2008-07-18 18:01:23 +0200 | [diff] [blame] | 1071 | struct hrtimer *timer = &rq->hrtick_timer; |
| 1072 | ktime_t time = ktime_add_ns(timer->base->get_time(), delay); |
Peter Zijlstra | b328ca1 | 2008-04-29 10:02:46 +0200 | [diff] [blame] | 1073 | |
Arjan van de Ven | cc584b2 | 2008-09-01 15:02:30 -0700 | [diff] [blame] | 1074 | hrtimer_set_expires(timer, time); |
Peter Zijlstra | 3165651 | 2008-07-18 18:01:23 +0200 | [diff] [blame] | 1075 | |
| 1076 | if (rq == this_rq()) { |
| 1077 | hrtimer_restart(timer); |
| 1078 | } else if (!rq->hrtick_csd_pending) { |
Peter Zijlstra | 6e27563 | 2009-02-25 13:59:48 +0100 | [diff] [blame] | 1079 | __smp_call_function_single(cpu_of(rq), &rq->hrtick_csd, 0); |
Peter Zijlstra | 3165651 | 2008-07-18 18:01:23 +0200 | [diff] [blame] | 1080 | rq->hrtick_csd_pending = 1; |
| 1081 | } |
Peter Zijlstra | b328ca1 | 2008-04-29 10:02:46 +0200 | [diff] [blame] | 1082 | } |
| 1083 | |
| 1084 | static int |
| 1085 | hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu) |
| 1086 | { |
| 1087 | int cpu = (int)(long)hcpu; |
| 1088 | |
| 1089 | switch (action) { |
| 1090 | case CPU_UP_CANCELED: |
| 1091 | case CPU_UP_CANCELED_FROZEN: |
| 1092 | case CPU_DOWN_PREPARE: |
| 1093 | case CPU_DOWN_PREPARE_FROZEN: |
| 1094 | case CPU_DEAD: |
| 1095 | case CPU_DEAD_FROZEN: |
Peter Zijlstra | 3165651 | 2008-07-18 18:01:23 +0200 | [diff] [blame] | 1096 | hrtick_clear(cpu_rq(cpu)); |
Peter Zijlstra | b328ca1 | 2008-04-29 10:02:46 +0200 | [diff] [blame] | 1097 | return NOTIFY_OK; |
| 1098 | } |
| 1099 | |
| 1100 | return NOTIFY_DONE; |
| 1101 | } |
| 1102 | |
Rakib Mullick | fa74820 | 2008-09-22 14:55:45 -0700 | [diff] [blame] | 1103 | static __init void init_hrtick(void) |
Peter Zijlstra | b328ca1 | 2008-04-29 10:02:46 +0200 | [diff] [blame] | 1104 | { |
| 1105 | hotcpu_notifier(hotplug_hrtick, 0); |
| 1106 | } |
Peter Zijlstra | 3165651 | 2008-07-18 18:01:23 +0200 | [diff] [blame] | 1107 | #else |
| 1108 | /* |
| 1109 | * Called to set the hrtick timer state. |
| 1110 | * |
| 1111 | * called with rq->lock held and irqs disabled |
| 1112 | */ |
| 1113 | static void hrtick_start(struct rq *rq, u64 delay) |
| 1114 | { |
Peter Zijlstra | 7f1e2ca | 2009-03-13 12:21:27 +0100 | [diff] [blame] | 1115 | __hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0, |
Arun R Bharadwaj | 5c33386 | 2009-04-16 12:14:37 +0530 | [diff] [blame] | 1116 | HRTIMER_MODE_REL_PINNED, 0); |
Peter Zijlstra | 3165651 | 2008-07-18 18:01:23 +0200 | [diff] [blame] | 1117 | } |
| 1118 | |
Andrew Morton | 006c75f | 2008-09-22 14:55:46 -0700 | [diff] [blame] | 1119 | static inline void init_hrtick(void) |
Peter Zijlstra | 3165651 | 2008-07-18 18:01:23 +0200 | [diff] [blame] | 1120 | { |
| 1121 | } |
Rabin Vincent | 95e904c | 2008-05-11 05:55:33 +0530 | [diff] [blame] | 1122 | #endif /* CONFIG_SMP */ |
Peter Zijlstra | b328ca1 | 2008-04-29 10:02:46 +0200 | [diff] [blame] | 1123 | |
| 1124 | static void init_rq_hrtick(struct rq *rq) |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 1125 | { |
Peter Zijlstra | 3165651 | 2008-07-18 18:01:23 +0200 | [diff] [blame] | 1126 | #ifdef CONFIG_SMP |
| 1127 | rq->hrtick_csd_pending = 0; |
| 1128 | |
| 1129 | rq->hrtick_csd.flags = 0; |
| 1130 | rq->hrtick_csd.func = __hrtick_start; |
| 1131 | rq->hrtick_csd.info = rq; |
| 1132 | #endif |
| 1133 | |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 1134 | hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
| 1135 | rq->hrtick_timer.function = hrtick; |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 1136 | } |
Andrew Morton | 006c75f | 2008-09-22 14:55:46 -0700 | [diff] [blame] | 1137 | #else /* CONFIG_SCHED_HRTICK */ |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 1138 | static inline void hrtick_clear(struct rq *rq) |
| 1139 | { |
| 1140 | } |
| 1141 | |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 1142 | static inline void init_rq_hrtick(struct rq *rq) |
| 1143 | { |
| 1144 | } |
| 1145 | |
Peter Zijlstra | b328ca1 | 2008-04-29 10:02:46 +0200 | [diff] [blame] | 1146 | static inline void init_hrtick(void) |
| 1147 | { |
| 1148 | } |
Andrew Morton | 006c75f | 2008-09-22 14:55:46 -0700 | [diff] [blame] | 1149 | #endif /* CONFIG_SCHED_HRTICK */ |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 1150 | |
Ingo Molnar | 1b9f19c | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1151 | /* |
Ingo Molnar | c24d20d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1152 | * resched_task - mark a task 'to be rescheduled now'. |
| 1153 | * |
| 1154 | * On UP this means the setting of the need_resched flag, on SMP it |
| 1155 | * might also involve a cross-CPU call to trigger the scheduler on |
| 1156 | * the target CPU. |
| 1157 | */ |
| 1158 | #ifdef CONFIG_SMP |
| 1159 | |
| 1160 | #ifndef tsk_is_polling |
| 1161 | #define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG) |
| 1162 | #endif |
| 1163 | |
Peter Zijlstra | 3165651 | 2008-07-18 18:01:23 +0200 | [diff] [blame] | 1164 | static void resched_task(struct task_struct *p) |
Ingo Molnar | c24d20d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1165 | { |
| 1166 | int cpu; |
| 1167 | |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 1168 | assert_raw_spin_locked(&task_rq(p)->lock); |
Ingo Molnar | c24d20d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1169 | |
Lai Jiangshan | 5ed0cec | 2009-03-06 19:40:20 +0800 | [diff] [blame] | 1170 | if (test_tsk_need_resched(p)) |
Ingo Molnar | c24d20d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1171 | return; |
| 1172 | |
Lai Jiangshan | 5ed0cec | 2009-03-06 19:40:20 +0800 | [diff] [blame] | 1173 | set_tsk_need_resched(p); |
Ingo Molnar | c24d20d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1174 | |
| 1175 | cpu = task_cpu(p); |
| 1176 | if (cpu == smp_processor_id()) |
| 1177 | return; |
| 1178 | |
| 1179 | /* NEED_RESCHED must be visible before we test polling */ |
| 1180 | smp_mb(); |
| 1181 | if (!tsk_is_polling(p)) |
| 1182 | smp_send_reschedule(cpu); |
| 1183 | } |
| 1184 | |
| 1185 | static void resched_cpu(int cpu) |
| 1186 | { |
| 1187 | struct rq *rq = cpu_rq(cpu); |
| 1188 | unsigned long flags; |
| 1189 | |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 1190 | if (!raw_spin_trylock_irqsave(&rq->lock, flags)) |
Ingo Molnar | c24d20d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1191 | return; |
| 1192 | resched_task(cpu_curr(cpu)); |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 1193 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
Ingo Molnar | c24d20d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1194 | } |
Thomas Gleixner | 06d8308 | 2008-03-22 09:20:24 +0100 | [diff] [blame] | 1195 | |
| 1196 | #ifdef CONFIG_NO_HZ |
| 1197 | /* |
Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 1198 | * In the semi idle case, use the nearest busy cpu for migrating timers |
| 1199 | * from an idle cpu. This is good for power-savings. |
| 1200 | * |
| 1201 | * We don't do similar optimization for completely idle system, as |
| 1202 | * selecting an idle cpu will add more delays to the timers than intended |
| 1203 | * (as that cpu's timer base may not be uptodate wrt jiffies etc). |
| 1204 | */ |
| 1205 | int get_nohz_timer_target(void) |
| 1206 | { |
| 1207 | int cpu = smp_processor_id(); |
| 1208 | int i; |
| 1209 | struct sched_domain *sd; |
| 1210 | |
| 1211 | for_each_domain(cpu, sd) { |
| 1212 | for_each_cpu(i, sched_domain_span(sd)) |
| 1213 | if (!idle_cpu(i)) |
| 1214 | return i; |
| 1215 | } |
| 1216 | return cpu; |
| 1217 | } |
| 1218 | /* |
Thomas Gleixner | 06d8308 | 2008-03-22 09:20:24 +0100 | [diff] [blame] | 1219 | * When add_timer_on() enqueues a timer into the timer wheel of an |
| 1220 | * idle CPU then this timer might expire before the next timer event |
| 1221 | * which is scheduled to wake up that CPU. In case of a completely |
| 1222 | * idle system the next event might even be infinite time into the |
| 1223 | * future. wake_up_idle_cpu() ensures that the CPU is woken up and |
| 1224 | * leaves the inner idle loop so the newly added timer is taken into |
| 1225 | * account when the CPU goes back to idle and evaluates the timer |
| 1226 | * wheel for the next timer event. |
| 1227 | */ |
| 1228 | void wake_up_idle_cpu(int cpu) |
| 1229 | { |
| 1230 | struct rq *rq = cpu_rq(cpu); |
| 1231 | |
| 1232 | if (cpu == smp_processor_id()) |
| 1233 | return; |
| 1234 | |
| 1235 | /* |
| 1236 | * This is safe, as this function is called with the timer |
| 1237 | * wheel base lock of (cpu) held. When the CPU is on the way |
| 1238 | * to idle and has not yet set rq->curr to idle then it will |
| 1239 | * be serialized on the timer wheel base lock and take the new |
| 1240 | * timer into account automatically. |
| 1241 | */ |
| 1242 | if (rq->curr != rq->idle) |
| 1243 | return; |
| 1244 | |
| 1245 | /* |
| 1246 | * We can set TIF_RESCHED on the idle task of the other CPU |
| 1247 | * lockless. The worst case is that the other CPU runs the |
| 1248 | * idle task through an additional NOOP schedule() |
| 1249 | */ |
Lai Jiangshan | 5ed0cec | 2009-03-06 19:40:20 +0800 | [diff] [blame] | 1250 | set_tsk_need_resched(rq->idle); |
Thomas Gleixner | 06d8308 | 2008-03-22 09:20:24 +0100 | [diff] [blame] | 1251 | |
| 1252 | /* NEED_RESCHED must be visible before we test polling */ |
| 1253 | smp_mb(); |
| 1254 | if (!tsk_is_polling(rq->idle)) |
| 1255 | smp_send_reschedule(cpu); |
| 1256 | } |
Mike Galbraith | 39c0cbe | 2010-03-11 17:17:13 +0100 | [diff] [blame] | 1257 | |
Dhaval Giani | 6d6bc0a | 2008-05-30 14:23:45 +0200 | [diff] [blame] | 1258 | #endif /* CONFIG_NO_HZ */ |
Thomas Gleixner | 06d8308 | 2008-03-22 09:20:24 +0100 | [diff] [blame] | 1259 | |
Peter Zijlstra | e9e9250 | 2009-09-01 10:34:37 +0200 | [diff] [blame] | 1260 | static u64 sched_avg_period(void) |
| 1261 | { |
| 1262 | return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2; |
| 1263 | } |
| 1264 | |
| 1265 | static void sched_avg_update(struct rq *rq) |
| 1266 | { |
| 1267 | s64 period = sched_avg_period(); |
| 1268 | |
| 1269 | while ((s64)(rq->clock - rq->age_stamp) > period) { |
Will Deacon | 0d98bb2 | 2010-05-24 12:11:43 -0700 | [diff] [blame] | 1270 | /* |
| 1271 | * Inline assembly required to prevent the compiler |
| 1272 | * optimising this loop into a divmod call. |
| 1273 | * See __iter_div_u64_rem() for another example of this. |
| 1274 | */ |
| 1275 | asm("" : "+rm" (rq->age_stamp)); |
Peter Zijlstra | e9e9250 | 2009-09-01 10:34:37 +0200 | [diff] [blame] | 1276 | rq->age_stamp += period; |
| 1277 | rq->rt_avg /= 2; |
| 1278 | } |
| 1279 | } |
| 1280 | |
| 1281 | static void sched_rt_avg_update(struct rq *rq, u64 rt_delta) |
| 1282 | { |
| 1283 | rq->rt_avg += rt_delta; |
| 1284 | sched_avg_update(rq); |
| 1285 | } |
| 1286 | |
Dhaval Giani | 6d6bc0a | 2008-05-30 14:23:45 +0200 | [diff] [blame] | 1287 | #else /* !CONFIG_SMP */ |
Peter Zijlstra | 3165651 | 2008-07-18 18:01:23 +0200 | [diff] [blame] | 1288 | static void resched_task(struct task_struct *p) |
Ingo Molnar | c24d20d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1289 | { |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 1290 | assert_raw_spin_locked(&task_rq(p)->lock); |
Peter Zijlstra | 3165651 | 2008-07-18 18:01:23 +0200 | [diff] [blame] | 1291 | set_tsk_need_resched(p); |
Ingo Molnar | c24d20d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1292 | } |
Peter Zijlstra | e9e9250 | 2009-09-01 10:34:37 +0200 | [diff] [blame] | 1293 | |
| 1294 | static void sched_rt_avg_update(struct rq *rq, u64 rt_delta) |
| 1295 | { |
| 1296 | } |
Suresh Siddha | da2b71e | 2010-08-23 13:42:51 -0700 | [diff] [blame] | 1297 | |
| 1298 | static void sched_avg_update(struct rq *rq) |
| 1299 | { |
| 1300 | } |
Dhaval Giani | 6d6bc0a | 2008-05-30 14:23:45 +0200 | [diff] [blame] | 1301 | #endif /* CONFIG_SMP */ |
Ingo Molnar | c24d20d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1302 | |
Ingo Molnar | 45bf76d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1303 | #if BITS_PER_LONG == 32 |
| 1304 | # define WMULT_CONST (~0UL) |
| 1305 | #else |
| 1306 | # define WMULT_CONST (1UL << 32) |
| 1307 | #endif |
| 1308 | |
| 1309 | #define WMULT_SHIFT 32 |
| 1310 | |
Ingo Molnar | 194081e | 2007-08-09 11:16:51 +0200 | [diff] [blame] | 1311 | /* |
| 1312 | * Shift right and round: |
| 1313 | */ |
Ingo Molnar | cf2ab46 | 2007-09-05 14:32:49 +0200 | [diff] [blame] | 1314 | #define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y)) |
Ingo Molnar | 194081e | 2007-08-09 11:16:51 +0200 | [diff] [blame] | 1315 | |
Peter Zijlstra | a7be37a | 2008-06-27 13:41:11 +0200 | [diff] [blame] | 1316 | /* |
| 1317 | * delta *= weight / lw |
| 1318 | */ |
Ingo Molnar | cb1c4fc | 2007-08-02 17:41:40 +0200 | [diff] [blame] | 1319 | static unsigned long |
Ingo Molnar | 45bf76d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1320 | calc_delta_mine(unsigned long delta_exec, unsigned long weight, |
| 1321 | struct load_weight *lw) |
| 1322 | { |
| 1323 | u64 tmp; |
| 1324 | |
Lai Jiangshan | 7a232e0 | 2008-06-12 16:43:07 +0800 | [diff] [blame] | 1325 | if (!lw->inv_weight) { |
| 1326 | if (BITS_PER_LONG > 32 && unlikely(lw->weight >= WMULT_CONST)) |
| 1327 | lw->inv_weight = 1; |
| 1328 | else |
| 1329 | lw->inv_weight = 1 + (WMULT_CONST-lw->weight/2) |
| 1330 | / (lw->weight+1); |
| 1331 | } |
Ingo Molnar | 45bf76d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1332 | |
| 1333 | tmp = (u64)delta_exec * weight; |
| 1334 | /* |
| 1335 | * Check whether we'd overflow the 64-bit multiplication: |
| 1336 | */ |
Ingo Molnar | 194081e | 2007-08-09 11:16:51 +0200 | [diff] [blame] | 1337 | if (unlikely(tmp > WMULT_CONST)) |
Ingo Molnar | cf2ab46 | 2007-09-05 14:32:49 +0200 | [diff] [blame] | 1338 | tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight, |
Ingo Molnar | 194081e | 2007-08-09 11:16:51 +0200 | [diff] [blame] | 1339 | WMULT_SHIFT/2); |
| 1340 | else |
Ingo Molnar | cf2ab46 | 2007-09-05 14:32:49 +0200 | [diff] [blame] | 1341 | tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT); |
Ingo Molnar | 45bf76d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1342 | |
Ingo Molnar | ecf691d | 2007-08-02 17:41:40 +0200 | [diff] [blame] | 1343 | return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX); |
Ingo Molnar | 45bf76d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1344 | } |
| 1345 | |
Ingo Molnar | 1091985 | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 1346 | static inline void update_load_add(struct load_weight *lw, unsigned long inc) |
Ingo Molnar | 45bf76d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1347 | { |
| 1348 | lw->weight += inc; |
Ingo Molnar | e89996a | 2008-03-14 23:48:28 +0100 | [diff] [blame] | 1349 | lw->inv_weight = 0; |
Ingo Molnar | 45bf76d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1350 | } |
| 1351 | |
Ingo Molnar | 1091985 | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 1352 | static inline void update_load_sub(struct load_weight *lw, unsigned long dec) |
Ingo Molnar | 45bf76d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1353 | { |
| 1354 | lw->weight -= dec; |
Ingo Molnar | e89996a | 2008-03-14 23:48:28 +0100 | [diff] [blame] | 1355 | lw->inv_weight = 0; |
Ingo Molnar | 45bf76d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1356 | } |
| 1357 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1358 | /* |
Peter Williams | 2dd73a4 | 2006-06-27 02:54:34 -0700 | [diff] [blame] | 1359 | * To aid in avoiding the subversion of "niceness" due to uneven distribution |
| 1360 | * of tasks with abnormal "nice" values across CPUs the contribution that |
| 1361 | * each task makes to its run queue's load is weighted according to its |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 1362 | * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a |
Peter Williams | 2dd73a4 | 2006-06-27 02:54:34 -0700 | [diff] [blame] | 1363 | * scaled version of the new time slice allocation that they receive on time |
| 1364 | * slice expiry etc. |
| 1365 | */ |
| 1366 | |
Peter Zijlstra | cce7ade | 2009-01-15 14:53:37 +0100 | [diff] [blame] | 1367 | #define WEIGHT_IDLEPRIO 3 |
| 1368 | #define WMULT_IDLEPRIO 1431655765 |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1369 | |
| 1370 | /* |
| 1371 | * Nice levels are multiplicative, with a gentle 10% change for every |
| 1372 | * nice level changed. I.e. when a CPU-bound task goes from nice 0 to |
| 1373 | * nice 1, it will get ~10% less CPU time than another CPU-bound task |
| 1374 | * that remained on nice 0. |
| 1375 | * |
| 1376 | * The "10% effect" is relative and cumulative: from _any_ nice level, |
| 1377 | * if you go up 1 level, it's -10% CPU usage, if you go down 1 level |
Ingo Molnar | f9153ee | 2007-07-16 09:46:30 +0200 | [diff] [blame] | 1378 | * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25. |
| 1379 | * If a task goes up by ~10% and another task goes down by ~10% then |
| 1380 | * the relative distance between them is ~25%.) |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1381 | */ |
| 1382 | static const int prio_to_weight[40] = { |
Ingo Molnar | 254753d | 2007-08-09 11:16:51 +0200 | [diff] [blame] | 1383 | /* -20 */ 88761, 71755, 56483, 46273, 36291, |
| 1384 | /* -15 */ 29154, 23254, 18705, 14949, 11916, |
| 1385 | /* -10 */ 9548, 7620, 6100, 4904, 3906, |
| 1386 | /* -5 */ 3121, 2501, 1991, 1586, 1277, |
| 1387 | /* 0 */ 1024, 820, 655, 526, 423, |
| 1388 | /* 5 */ 335, 272, 215, 172, 137, |
| 1389 | /* 10 */ 110, 87, 70, 56, 45, |
| 1390 | /* 15 */ 36, 29, 23, 18, 15, |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1391 | }; |
| 1392 | |
Ingo Molnar | 5714d2d | 2007-07-16 09:46:31 +0200 | [diff] [blame] | 1393 | /* |
| 1394 | * Inverse (2^32/x) values of the prio_to_weight[] array, precalculated. |
| 1395 | * |
| 1396 | * In cases where the weight does not change often, we can use the |
| 1397 | * precalculated inverse to speed up arithmetics by turning divisions |
| 1398 | * into multiplications: |
| 1399 | */ |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1400 | static const u32 prio_to_wmult[40] = { |
Ingo Molnar | 254753d | 2007-08-09 11:16:51 +0200 | [diff] [blame] | 1401 | /* -20 */ 48388, 59856, 76040, 92818, 118348, |
| 1402 | /* -15 */ 147320, 184698, 229616, 287308, 360437, |
| 1403 | /* -10 */ 449829, 563644, 704093, 875809, 1099582, |
| 1404 | /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326, |
| 1405 | /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587, |
| 1406 | /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126, |
| 1407 | /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717, |
| 1408 | /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153, |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1409 | }; |
Peter Williams | 2dd73a4 | 2006-06-27 02:54:34 -0700 | [diff] [blame] | 1410 | |
Bharata B Rao | ef12fef | 2009-03-31 10:02:22 +0530 | [diff] [blame] | 1411 | /* Time spent by the tasks of the cpu accounting group executing in ... */ |
| 1412 | enum cpuacct_stat_index { |
| 1413 | CPUACCT_STAT_USER, /* ... user mode */ |
| 1414 | CPUACCT_STAT_SYSTEM, /* ... kernel mode */ |
| 1415 | |
| 1416 | CPUACCT_STAT_NSTATS, |
| 1417 | }; |
| 1418 | |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 1419 | #ifdef CONFIG_CGROUP_CPUACCT |
| 1420 | static void cpuacct_charge(struct task_struct *tsk, u64 cputime); |
Bharata B Rao | ef12fef | 2009-03-31 10:02:22 +0530 | [diff] [blame] | 1421 | static void cpuacct_update_stats(struct task_struct *tsk, |
| 1422 | enum cpuacct_stat_index idx, cputime_t val); |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 1423 | #else |
| 1424 | static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {} |
Bharata B Rao | ef12fef | 2009-03-31 10:02:22 +0530 | [diff] [blame] | 1425 | static inline void cpuacct_update_stats(struct task_struct *tsk, |
| 1426 | enum cpuacct_stat_index idx, cputime_t val) {} |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 1427 | #endif |
| 1428 | |
Peter Zijlstra | 18d95a2 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 1429 | static inline void inc_cpu_load(struct rq *rq, unsigned long load) |
| 1430 | { |
| 1431 | update_load_add(&rq->load, load); |
| 1432 | } |
| 1433 | |
| 1434 | static inline void dec_cpu_load(struct rq *rq, unsigned long load) |
| 1435 | { |
| 1436 | update_load_sub(&rq->load, load); |
| 1437 | } |
| 1438 | |
Ingo Molnar | 7940ca3 | 2008-08-19 13:40:47 +0200 | [diff] [blame] | 1439 | #if (defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)) || defined(CONFIG_RT_GROUP_SCHED) |
Peter Zijlstra | eb75580 | 2008-08-19 12:33:05 +0200 | [diff] [blame] | 1440 | typedef int (*tg_visitor)(struct task_group *, void *); |
| 1441 | |
| 1442 | /* |
| 1443 | * Iterate the full tree, calling @down when first entering a node and @up when |
| 1444 | * leaving it for the final time. |
| 1445 | */ |
| 1446 | static int walk_tg_tree(tg_visitor down, tg_visitor up, void *data) |
| 1447 | { |
| 1448 | struct task_group *parent, *child; |
| 1449 | int ret; |
| 1450 | |
| 1451 | rcu_read_lock(); |
| 1452 | parent = &root_task_group; |
| 1453 | down: |
| 1454 | ret = (*down)(parent, data); |
| 1455 | if (ret) |
| 1456 | goto out_unlock; |
| 1457 | list_for_each_entry_rcu(child, &parent->children, siblings) { |
| 1458 | parent = child; |
| 1459 | goto down; |
| 1460 | |
| 1461 | up: |
| 1462 | continue; |
| 1463 | } |
| 1464 | ret = (*up)(parent, data); |
| 1465 | if (ret) |
| 1466 | goto out_unlock; |
| 1467 | |
| 1468 | child = parent; |
| 1469 | parent = parent->parent; |
| 1470 | if (parent) |
| 1471 | goto up; |
| 1472 | out_unlock: |
| 1473 | rcu_read_unlock(); |
| 1474 | |
| 1475 | return ret; |
| 1476 | } |
| 1477 | |
| 1478 | static int tg_nop(struct task_group *tg, void *data) |
| 1479 | { |
| 1480 | return 0; |
| 1481 | } |
| 1482 | #endif |
| 1483 | |
Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 1484 | #ifdef CONFIG_SMP |
Peter Zijlstra | f5f08f3 | 2009-09-10 13:35:28 +0200 | [diff] [blame] | 1485 | /* Used instead of source_load when we know the type == 0 */ |
| 1486 | static unsigned long weighted_cpuload(const int cpu) |
| 1487 | { |
| 1488 | return cpu_rq(cpu)->load.weight; |
| 1489 | } |
| 1490 | |
| 1491 | /* |
| 1492 | * Return a low guess at the load of a migration-source cpu weighted |
| 1493 | * according to the scheduling class and "nice" value. |
| 1494 | * |
| 1495 | * We want to under-estimate the load of migration sources, to |
| 1496 | * balance conservatively. |
| 1497 | */ |
| 1498 | static unsigned long source_load(int cpu, int type) |
| 1499 | { |
| 1500 | struct rq *rq = cpu_rq(cpu); |
| 1501 | unsigned long total = weighted_cpuload(cpu); |
| 1502 | |
| 1503 | if (type == 0 || !sched_feat(LB_BIAS)) |
| 1504 | return total; |
| 1505 | |
| 1506 | return min(rq->cpu_load[type-1], total); |
| 1507 | } |
| 1508 | |
| 1509 | /* |
| 1510 | * Return a high guess at the load of a migration-target cpu weighted |
| 1511 | * according to the scheduling class and "nice" value. |
| 1512 | */ |
| 1513 | static unsigned long target_load(int cpu, int type) |
| 1514 | { |
| 1515 | struct rq *rq = cpu_rq(cpu); |
| 1516 | unsigned long total = weighted_cpuload(cpu); |
| 1517 | |
| 1518 | if (type == 0 || !sched_feat(LB_BIAS)) |
| 1519 | return total; |
| 1520 | |
| 1521 | return max(rq->cpu_load[type-1], total); |
| 1522 | } |
| 1523 | |
Peter Zijlstra | ae154be | 2009-09-10 14:40:57 +0200 | [diff] [blame] | 1524 | static unsigned long power_of(int cpu) |
| 1525 | { |
Peter Zijlstra | e51fd5e | 2010-05-31 12:37:30 +0200 | [diff] [blame] | 1526 | return cpu_rq(cpu)->cpu_power; |
Peter Zijlstra | ae154be | 2009-09-10 14:40:57 +0200 | [diff] [blame] | 1527 | } |
| 1528 | |
Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 1529 | static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd); |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1530 | |
Peter Zijlstra | a8a51d5 | 2008-06-27 13:41:26 +0200 | [diff] [blame] | 1531 | static unsigned long cpu_avg_load_per_task(int cpu) |
| 1532 | { |
| 1533 | struct rq *rq = cpu_rq(cpu); |
Ingo Molnar | af6d596 | 2008-11-29 20:45:15 +0100 | [diff] [blame] | 1534 | unsigned long nr_running = ACCESS_ONCE(rq->nr_running); |
Peter Zijlstra | a8a51d5 | 2008-06-27 13:41:26 +0200 | [diff] [blame] | 1535 | |
Steven Rostedt | 4cd4262 | 2008-11-26 21:04:24 -0500 | [diff] [blame] | 1536 | if (nr_running) |
| 1537 | rq->avg_load_per_task = rq->load.weight / nr_running; |
Balbir Singh | a2d4777 | 2008-11-12 16:19:00 +0530 | [diff] [blame] | 1538 | else |
| 1539 | rq->avg_load_per_task = 0; |
Peter Zijlstra | a8a51d5 | 2008-06-27 13:41:26 +0200 | [diff] [blame] | 1540 | |
| 1541 | return rq->avg_load_per_task; |
| 1542 | } |
| 1543 | |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1544 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 1545 | |
Tejun Heo | 43cf38e | 2010-02-02 14:38:57 +0900 | [diff] [blame] | 1546 | static __read_mostly unsigned long __percpu *update_shares_data; |
Peter Zijlstra | 34d76c4 | 2009-08-27 13:08:56 +0200 | [diff] [blame] | 1547 | |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1548 | static void __set_se_shares(struct sched_entity *se, unsigned long shares); |
| 1549 | |
| 1550 | /* |
| 1551 | * Calculate and set the cpu's group shares. |
| 1552 | */ |
Peter Zijlstra | 34d76c4 | 2009-08-27 13:08:56 +0200 | [diff] [blame] | 1553 | static void update_group_shares_cpu(struct task_group *tg, int cpu, |
| 1554 | unsigned long sd_shares, |
| 1555 | unsigned long sd_rq_weight, |
Jiri Kosina | 4a6cc4b | 2009-10-29 00:26:00 +0900 | [diff] [blame] | 1556 | unsigned long *usd_rq_weight) |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1557 | { |
Peter Zijlstra | 34d76c4 | 2009-08-27 13:08:56 +0200 | [diff] [blame] | 1558 | unsigned long shares, rq_weight; |
Peter Zijlstra | a500427 | 2009-07-27 14:04:49 +0200 | [diff] [blame] | 1559 | int boost = 0; |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1560 | |
Jiri Kosina | 4a6cc4b | 2009-10-29 00:26:00 +0900 | [diff] [blame] | 1561 | rq_weight = usd_rq_weight[cpu]; |
Peter Zijlstra | a500427 | 2009-07-27 14:04:49 +0200 | [diff] [blame] | 1562 | if (!rq_weight) { |
| 1563 | boost = 1; |
| 1564 | rq_weight = NICE_0_LOAD; |
| 1565 | } |
Peter Zijlstra | c8cba85 | 2008-06-27 13:41:23 +0200 | [diff] [blame] | 1566 | |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1567 | /* |
Peter Zijlstra | a8af724 | 2009-08-21 13:58:54 +0200 | [diff] [blame] | 1568 | * \Sum_j shares_j * rq_weight_i |
| 1569 | * shares_i = ----------------------------- |
| 1570 | * \Sum_j rq_weight_j |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1571 | */ |
Ken Chen | ec4e0e2 | 2008-11-18 22:41:57 -0800 | [diff] [blame] | 1572 | shares = (sd_shares * rq_weight) / sd_rq_weight; |
Peter Zijlstra | ffda12a | 2008-10-17 19:27:02 +0200 | [diff] [blame] | 1573 | shares = clamp_t(unsigned long, shares, MIN_SHARES, MAX_SHARES); |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1574 | |
Peter Zijlstra | ffda12a | 2008-10-17 19:27:02 +0200 | [diff] [blame] | 1575 | if (abs(shares - tg->se[cpu]->load.weight) > |
| 1576 | sysctl_sched_shares_thresh) { |
| 1577 | struct rq *rq = cpu_rq(cpu); |
| 1578 | unsigned long flags; |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1579 | |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 1580 | raw_spin_lock_irqsave(&rq->lock, flags); |
Peter Zijlstra | 34d76c4 | 2009-08-27 13:08:56 +0200 | [diff] [blame] | 1581 | tg->cfs_rq[cpu]->rq_weight = boost ? 0 : rq_weight; |
Peter Zijlstra | a500427 | 2009-07-27 14:04:49 +0200 | [diff] [blame] | 1582 | tg->cfs_rq[cpu]->shares = boost ? 0 : shares; |
Peter Zijlstra | ffda12a | 2008-10-17 19:27:02 +0200 | [diff] [blame] | 1583 | __set_se_shares(tg->se[cpu], shares); |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 1584 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
Peter Zijlstra | ffda12a | 2008-10-17 19:27:02 +0200 | [diff] [blame] | 1585 | } |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1586 | } |
| 1587 | |
| 1588 | /* |
Peter Zijlstra | c8cba85 | 2008-06-27 13:41:23 +0200 | [diff] [blame] | 1589 | * Re-compute the task group their per cpu shares over the given domain. |
| 1590 | * This needs to be done in a bottom-up fashion because the rq weight of a |
| 1591 | * parent group depends on the shares of its child groups. |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1592 | */ |
Peter Zijlstra | eb75580 | 2008-08-19 12:33:05 +0200 | [diff] [blame] | 1593 | static int tg_shares_up(struct task_group *tg, void *data) |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1594 | { |
Peter Zijlstra | cd8ad40 | 2009-12-03 18:00:07 +0100 | [diff] [blame] | 1595 | unsigned long weight, rq_weight = 0, sum_weight = 0, shares = 0; |
Jiri Kosina | 4a6cc4b | 2009-10-29 00:26:00 +0900 | [diff] [blame] | 1596 | unsigned long *usd_rq_weight; |
Peter Zijlstra | eb75580 | 2008-08-19 12:33:05 +0200 | [diff] [blame] | 1597 | struct sched_domain *sd = data; |
Peter Zijlstra | 34d76c4 | 2009-08-27 13:08:56 +0200 | [diff] [blame] | 1598 | unsigned long flags; |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1599 | int i; |
| 1600 | |
Peter Zijlstra | 34d76c4 | 2009-08-27 13:08:56 +0200 | [diff] [blame] | 1601 | if (!tg->se[0]) |
| 1602 | return 0; |
| 1603 | |
| 1604 | local_irq_save(flags); |
Jiri Kosina | 4a6cc4b | 2009-10-29 00:26:00 +0900 | [diff] [blame] | 1605 | usd_rq_weight = per_cpu_ptr(update_shares_data, smp_processor_id()); |
Peter Zijlstra | 34d76c4 | 2009-08-27 13:08:56 +0200 | [diff] [blame] | 1606 | |
Rusty Russell | 758b2cd | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 1607 | for_each_cpu(i, sched_domain_span(sd)) { |
Peter Zijlstra | 34d76c4 | 2009-08-27 13:08:56 +0200 | [diff] [blame] | 1608 | weight = tg->cfs_rq[i]->load.weight; |
Jiri Kosina | 4a6cc4b | 2009-10-29 00:26:00 +0900 | [diff] [blame] | 1609 | usd_rq_weight[i] = weight; |
Peter Zijlstra | 34d76c4 | 2009-08-27 13:08:56 +0200 | [diff] [blame] | 1610 | |
Peter Zijlstra | cd8ad40 | 2009-12-03 18:00:07 +0100 | [diff] [blame] | 1611 | rq_weight += weight; |
Ken Chen | ec4e0e2 | 2008-11-18 22:41:57 -0800 | [diff] [blame] | 1612 | /* |
| 1613 | * If there are currently no tasks on the cpu pretend there |
| 1614 | * is one of average load so that when a new task gets to |
| 1615 | * run here it will not get delayed by group starvation. |
| 1616 | */ |
Ken Chen | ec4e0e2 | 2008-11-18 22:41:57 -0800 | [diff] [blame] | 1617 | if (!weight) |
| 1618 | weight = NICE_0_LOAD; |
| 1619 | |
Peter Zijlstra | cd8ad40 | 2009-12-03 18:00:07 +0100 | [diff] [blame] | 1620 | sum_weight += weight; |
Peter Zijlstra | c8cba85 | 2008-06-27 13:41:23 +0200 | [diff] [blame] | 1621 | shares += tg->cfs_rq[i]->shares; |
| 1622 | } |
| 1623 | |
Peter Zijlstra | cd8ad40 | 2009-12-03 18:00:07 +0100 | [diff] [blame] | 1624 | if (!rq_weight) |
| 1625 | rq_weight = sum_weight; |
| 1626 | |
Peter Zijlstra | c8cba85 | 2008-06-27 13:41:23 +0200 | [diff] [blame] | 1627 | if ((!shares && rq_weight) || shares > tg->shares) |
| 1628 | shares = tg->shares; |
| 1629 | |
| 1630 | if (!sd->parent || !(sd->parent->flags & SD_LOAD_BALANCE)) |
| 1631 | shares = tg->shares; |
| 1632 | |
Rusty Russell | 758b2cd | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 1633 | for_each_cpu(i, sched_domain_span(sd)) |
Jiri Kosina | 4a6cc4b | 2009-10-29 00:26:00 +0900 | [diff] [blame] | 1634 | update_group_shares_cpu(tg, i, shares, rq_weight, usd_rq_weight); |
Peter Zijlstra | 34d76c4 | 2009-08-27 13:08:56 +0200 | [diff] [blame] | 1635 | |
| 1636 | local_irq_restore(flags); |
Peter Zijlstra | eb75580 | 2008-08-19 12:33:05 +0200 | [diff] [blame] | 1637 | |
| 1638 | return 0; |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1639 | } |
| 1640 | |
| 1641 | /* |
Peter Zijlstra | c8cba85 | 2008-06-27 13:41:23 +0200 | [diff] [blame] | 1642 | * Compute the cpu's hierarchical load factor for each task group. |
| 1643 | * This needs to be done in a top-down fashion because the load of a child |
| 1644 | * group is a fraction of its parents load. |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1645 | */ |
Peter Zijlstra | eb75580 | 2008-08-19 12:33:05 +0200 | [diff] [blame] | 1646 | static int tg_load_down(struct task_group *tg, void *data) |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1647 | { |
Peter Zijlstra | c8cba85 | 2008-06-27 13:41:23 +0200 | [diff] [blame] | 1648 | unsigned long load; |
Peter Zijlstra | eb75580 | 2008-08-19 12:33:05 +0200 | [diff] [blame] | 1649 | long cpu = (long)data; |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1650 | |
Peter Zijlstra | c8cba85 | 2008-06-27 13:41:23 +0200 | [diff] [blame] | 1651 | if (!tg->parent) { |
| 1652 | load = cpu_rq(cpu)->load.weight; |
| 1653 | } else { |
| 1654 | load = tg->parent->cfs_rq[cpu]->h_load; |
| 1655 | load *= tg->cfs_rq[cpu]->shares; |
| 1656 | load /= tg->parent->cfs_rq[cpu]->load.weight + 1; |
| 1657 | } |
| 1658 | |
| 1659 | tg->cfs_rq[cpu]->h_load = load; |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1660 | |
Peter Zijlstra | eb75580 | 2008-08-19 12:33:05 +0200 | [diff] [blame] | 1661 | return 0; |
Peter Zijlstra | 4d8d595 | 2008-06-27 13:41:19 +0200 | [diff] [blame] | 1662 | } |
| 1663 | |
Peter Zijlstra | c8cba85 | 2008-06-27 13:41:23 +0200 | [diff] [blame] | 1664 | static void update_shares(struct sched_domain *sd) |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1665 | { |
Peter Zijlstra | e709715 | 2009-06-03 15:41:20 +0200 | [diff] [blame] | 1666 | s64 elapsed; |
| 1667 | u64 now; |
| 1668 | |
| 1669 | if (root_task_group_empty()) |
| 1670 | return; |
| 1671 | |
Peter Zijlstra | c676329 | 2010-05-25 10:48:51 +0200 | [diff] [blame] | 1672 | now = local_clock(); |
Peter Zijlstra | e709715 | 2009-06-03 15:41:20 +0200 | [diff] [blame] | 1673 | elapsed = now - sd->last_update; |
Peter Zijlstra | 2398f2c | 2008-06-27 13:41:35 +0200 | [diff] [blame] | 1674 | |
| 1675 | if (elapsed >= (s64)(u64)sysctl_sched_shares_ratelimit) { |
| 1676 | sd->last_update = now; |
Peter Zijlstra | eb75580 | 2008-08-19 12:33:05 +0200 | [diff] [blame] | 1677 | walk_tg_tree(tg_nop, tg_shares_up, sd); |
Peter Zijlstra | 2398f2c | 2008-06-27 13:41:35 +0200 | [diff] [blame] | 1678 | } |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1679 | } |
| 1680 | |
Peter Zijlstra | eb75580 | 2008-08-19 12:33:05 +0200 | [diff] [blame] | 1681 | static void update_h_load(long cpu) |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1682 | { |
Peter Zijlstra | eb75580 | 2008-08-19 12:33:05 +0200 | [diff] [blame] | 1683 | walk_tg_tree(tg_load_down, tg_nop, (void *)cpu); |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1684 | } |
| 1685 | |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1686 | #else |
| 1687 | |
Peter Zijlstra | c8cba85 | 2008-06-27 13:41:23 +0200 | [diff] [blame] | 1688 | static inline void update_shares(struct sched_domain *sd) |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1689 | { |
| 1690 | } |
| 1691 | |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1692 | #endif |
| 1693 | |
Gregory Haskins | 8f45e2b | 2008-12-29 09:39:51 -0500 | [diff] [blame] | 1694 | #ifdef CONFIG_PREEMPT |
| 1695 | |
Peter Zijlstra | b78bb86 | 2009-09-15 14:23:18 +0200 | [diff] [blame] | 1696 | static void double_rq_lock(struct rq *rq1, struct rq *rq2); |
| 1697 | |
Alexey Dobriyan | 70574a9 | 2008-11-28 22:08:00 +0300 | [diff] [blame] | 1698 | /* |
Gregory Haskins | 8f45e2b | 2008-12-29 09:39:51 -0500 | [diff] [blame] | 1699 | * fair double_lock_balance: Safely acquires both rq->locks in a fair |
| 1700 | * way at the expense of forcing extra atomic operations in all |
| 1701 | * invocations. This assures that the double_lock is acquired using the |
| 1702 | * same underlying policy as the spinlock_t on this architecture, which |
| 1703 | * reduces latency compared to the unfair variant below. However, it |
| 1704 | * also adds more overhead and therefore may reduce throughput. |
Alexey Dobriyan | 70574a9 | 2008-11-28 22:08:00 +0300 | [diff] [blame] | 1705 | */ |
Gregory Haskins | 8f45e2b | 2008-12-29 09:39:51 -0500 | [diff] [blame] | 1706 | static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) |
| 1707 | __releases(this_rq->lock) |
| 1708 | __acquires(busiest->lock) |
| 1709 | __acquires(this_rq->lock) |
| 1710 | { |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 1711 | raw_spin_unlock(&this_rq->lock); |
Gregory Haskins | 8f45e2b | 2008-12-29 09:39:51 -0500 | [diff] [blame] | 1712 | double_rq_lock(this_rq, busiest); |
| 1713 | |
| 1714 | return 1; |
| 1715 | } |
| 1716 | |
| 1717 | #else |
| 1718 | /* |
| 1719 | * Unfair double_lock_balance: Optimizes throughput at the expense of |
| 1720 | * latency by eliminating extra atomic operations when the locks are |
| 1721 | * already in proper order on entry. This favors lower cpu-ids and will |
| 1722 | * grant the double lock to lower cpus over higher ids under contention, |
| 1723 | * regardless of entry order into the function. |
| 1724 | */ |
| 1725 | static int _double_lock_balance(struct rq *this_rq, struct rq *busiest) |
Alexey Dobriyan | 70574a9 | 2008-11-28 22:08:00 +0300 | [diff] [blame] | 1726 | __releases(this_rq->lock) |
| 1727 | __acquires(busiest->lock) |
| 1728 | __acquires(this_rq->lock) |
| 1729 | { |
| 1730 | int ret = 0; |
| 1731 | |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 1732 | if (unlikely(!raw_spin_trylock(&busiest->lock))) { |
Alexey Dobriyan | 70574a9 | 2008-11-28 22:08:00 +0300 | [diff] [blame] | 1733 | if (busiest < this_rq) { |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 1734 | raw_spin_unlock(&this_rq->lock); |
| 1735 | raw_spin_lock(&busiest->lock); |
| 1736 | raw_spin_lock_nested(&this_rq->lock, |
| 1737 | SINGLE_DEPTH_NESTING); |
Alexey Dobriyan | 70574a9 | 2008-11-28 22:08:00 +0300 | [diff] [blame] | 1738 | ret = 1; |
| 1739 | } else |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 1740 | raw_spin_lock_nested(&busiest->lock, |
| 1741 | SINGLE_DEPTH_NESTING); |
Alexey Dobriyan | 70574a9 | 2008-11-28 22:08:00 +0300 | [diff] [blame] | 1742 | } |
| 1743 | return ret; |
| 1744 | } |
| 1745 | |
Gregory Haskins | 8f45e2b | 2008-12-29 09:39:51 -0500 | [diff] [blame] | 1746 | #endif /* CONFIG_PREEMPT */ |
| 1747 | |
| 1748 | /* |
| 1749 | * double_lock_balance - lock the busiest runqueue, this_rq is locked already. |
| 1750 | */ |
| 1751 | static int double_lock_balance(struct rq *this_rq, struct rq *busiest) |
| 1752 | { |
| 1753 | if (unlikely(!irqs_disabled())) { |
| 1754 | /* printk() doesn't work good under rq->lock */ |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 1755 | raw_spin_unlock(&this_rq->lock); |
Gregory Haskins | 8f45e2b | 2008-12-29 09:39:51 -0500 | [diff] [blame] | 1756 | BUG_ON(1); |
| 1757 | } |
| 1758 | |
| 1759 | return _double_lock_balance(this_rq, busiest); |
| 1760 | } |
| 1761 | |
Alexey Dobriyan | 70574a9 | 2008-11-28 22:08:00 +0300 | [diff] [blame] | 1762 | static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) |
| 1763 | __releases(busiest->lock) |
| 1764 | { |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 1765 | raw_spin_unlock(&busiest->lock); |
Alexey Dobriyan | 70574a9 | 2008-11-28 22:08:00 +0300 | [diff] [blame] | 1766 | lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); |
| 1767 | } |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 1768 | |
| 1769 | /* |
| 1770 | * double_rq_lock - safely lock two runqueues |
| 1771 | * |
| 1772 | * Note this does not disable interrupts like task_rq_lock, |
| 1773 | * you need to do so manually before calling. |
| 1774 | */ |
| 1775 | static void double_rq_lock(struct rq *rq1, struct rq *rq2) |
| 1776 | __acquires(rq1->lock) |
| 1777 | __acquires(rq2->lock) |
| 1778 | { |
| 1779 | BUG_ON(!irqs_disabled()); |
| 1780 | if (rq1 == rq2) { |
| 1781 | raw_spin_lock(&rq1->lock); |
| 1782 | __acquire(rq2->lock); /* Fake it out ;) */ |
| 1783 | } else { |
| 1784 | if (rq1 < rq2) { |
| 1785 | raw_spin_lock(&rq1->lock); |
| 1786 | raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING); |
| 1787 | } else { |
| 1788 | raw_spin_lock(&rq2->lock); |
| 1789 | raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING); |
| 1790 | } |
| 1791 | } |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 1792 | } |
| 1793 | |
| 1794 | /* |
| 1795 | * double_rq_unlock - safely unlock two runqueues |
| 1796 | * |
| 1797 | * Note this does not restore interrupts like task_rq_unlock, |
| 1798 | * you need to do so manually after calling. |
| 1799 | */ |
| 1800 | static void double_rq_unlock(struct rq *rq1, struct rq *rq2) |
| 1801 | __releases(rq1->lock) |
| 1802 | __releases(rq2->lock) |
| 1803 | { |
| 1804 | raw_spin_unlock(&rq1->lock); |
| 1805 | if (rq1 != rq2) |
| 1806 | raw_spin_unlock(&rq2->lock); |
| 1807 | else |
| 1808 | __release(rq2->lock); |
| 1809 | } |
| 1810 | |
Peter Zijlstra | 18d95a2 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 1811 | #endif |
Peter Zijlstra | 18d95a2 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 1812 | |
| 1813 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 1814 | static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares) |
| 1815 | { |
Vegard Nossum | 3043209 | 2008-06-27 21:35:50 +0200 | [diff] [blame] | 1816 | #ifdef CONFIG_SMP |
Ingo Molnar | 34e83e8 | 2008-06-27 15:42:36 +0200 | [diff] [blame] | 1817 | cfs_rq->shares = shares; |
| 1818 | #endif |
Peter Zijlstra | 18d95a2 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 1819 | } |
| 1820 | #endif |
| 1821 | |
Peter Zijlstra | 74f5187 | 2010-04-22 21:50:19 +0200 | [diff] [blame] | 1822 | static void calc_load_account_idle(struct rq *this_rq); |
Christian Ehrhardt | 0bcdcf2 | 2009-11-30 12:16:46 +0100 | [diff] [blame] | 1823 | static void update_sysctl(void); |
Christian Ehrhardt | acb4a84 | 2009-11-30 12:16:48 +0100 | [diff] [blame] | 1824 | static int get_update_sysctl_factor(void); |
Venkatesh Pallipadi | fdf3e95 | 2010-05-17 18:14:43 -0700 | [diff] [blame] | 1825 | static void update_cpu_load(struct rq *this_rq); |
Thomas Gleixner | dce48a8 | 2009-04-11 10:43:41 +0200 | [diff] [blame] | 1826 | |
Peter Zijlstra | cd29fe6 | 2009-11-27 17:32:46 +0100 | [diff] [blame] | 1827 | static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) |
| 1828 | { |
| 1829 | set_task_rq(p, cpu); |
| 1830 | #ifdef CONFIG_SMP |
| 1831 | /* |
| 1832 | * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be |
| 1833 | * successfuly executed on another CPU. We must ensure that updates of |
| 1834 | * per-task data have been completed by this moment. |
| 1835 | */ |
| 1836 | smp_wmb(); |
| 1837 | task_thread_info(p)->cpu = cpu; |
| 1838 | #endif |
| 1839 | } |
Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 1840 | |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 1841 | static const struct sched_class rt_sched_class; |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1842 | |
Peter Zijlstra | 34f971f | 2010-09-22 13:53:15 +0200 | [diff] [blame] | 1843 | #define sched_class_highest (&stop_sched_class) |
Gregory Haskins | 1f11eb6a | 2008-06-04 15:04:05 -0400 | [diff] [blame] | 1844 | #define for_each_class(class) \ |
| 1845 | for (class = sched_class_highest; class; class = class->next) |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1846 | |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 1847 | #include "sched_stats.h" |
| 1848 | |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1849 | static void inc_nr_running(struct rq *rq) |
Ingo Molnar | 6363ca5 | 2008-05-29 11:28:57 +0200 | [diff] [blame] | 1850 | { |
| 1851 | rq->nr_running++; |
Ingo Molnar | 6363ca5 | 2008-05-29 11:28:57 +0200 | [diff] [blame] | 1852 | } |
| 1853 | |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1854 | static void dec_nr_running(struct rq *rq) |
Ingo Molnar | 9c21724 | 2007-08-02 17:41:40 +0200 | [diff] [blame] | 1855 | { |
| 1856 | rq->nr_running--; |
Ingo Molnar | 9c21724 | 2007-08-02 17:41:40 +0200 | [diff] [blame] | 1857 | } |
| 1858 | |
Ingo Molnar | 45bf76d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1859 | static void set_load_weight(struct task_struct *p) |
| 1860 | { |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1861 | /* |
| 1862 | * SCHED_IDLE tasks get minimal weight: |
| 1863 | */ |
| 1864 | if (p->policy == SCHED_IDLE) { |
| 1865 | p->se.load.weight = WEIGHT_IDLEPRIO; |
| 1866 | p->se.load.inv_weight = WMULT_IDLEPRIO; |
| 1867 | return; |
| 1868 | } |
| 1869 | |
| 1870 | p->se.load.weight = prio_to_weight[p->static_prio - MAX_RT_PRIO]; |
| 1871 | p->se.load.inv_weight = prio_to_wmult[p->static_prio - MAX_RT_PRIO]; |
Ingo Molnar | 45bf76d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1872 | } |
| 1873 | |
Peter Zijlstra | 371fd7e | 2010-03-24 16:38:48 +0100 | [diff] [blame] | 1874 | static void enqueue_task(struct rq *rq, struct task_struct *p, int flags) |
Gregory Haskins | 2087a1a | 2008-06-27 14:30:00 -0600 | [diff] [blame] | 1875 | { |
Mike Galbraith | a64692a | 2010-03-11 17:16:20 +0100 | [diff] [blame] | 1876 | update_rq_clock(rq); |
Ingo Molnar | 71f8bd4 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1877 | sched_info_queued(p); |
Peter Zijlstra | 371fd7e | 2010-03-24 16:38:48 +0100 | [diff] [blame] | 1878 | p->sched_class->enqueue_task(rq, p, flags); |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1879 | p->se.on_rq = 1; |
| 1880 | } |
| 1881 | |
Peter Zijlstra | 371fd7e | 2010-03-24 16:38:48 +0100 | [diff] [blame] | 1882 | static void dequeue_task(struct rq *rq, struct task_struct *p, int flags) |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1883 | { |
Mike Galbraith | a64692a | 2010-03-11 17:16:20 +0100 | [diff] [blame] | 1884 | update_rq_clock(rq); |
Ankita Garg | 46ac22b | 2008-07-01 14:30:06 +0530 | [diff] [blame] | 1885 | sched_info_dequeued(p); |
Peter Zijlstra | 371fd7e | 2010-03-24 16:38:48 +0100 | [diff] [blame] | 1886 | p->sched_class->dequeue_task(rq, p, flags); |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1887 | p->se.on_rq = 0; |
Ingo Molnar | 71f8bd4 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1888 | } |
| 1889 | |
| 1890 | /* |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 1891 | * activate_task - move a task to the runqueue. |
| 1892 | */ |
Peter Zijlstra | 371fd7e | 2010-03-24 16:38:48 +0100 | [diff] [blame] | 1893 | static void activate_task(struct rq *rq, struct task_struct *p, int flags) |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 1894 | { |
| 1895 | if (task_contributes_to_load(p)) |
| 1896 | rq->nr_uninterruptible--; |
| 1897 | |
Peter Zijlstra | 371fd7e | 2010-03-24 16:38:48 +0100 | [diff] [blame] | 1898 | enqueue_task(rq, p, flags); |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 1899 | inc_nr_running(rq); |
| 1900 | } |
| 1901 | |
| 1902 | /* |
| 1903 | * deactivate_task - remove a task from the runqueue. |
| 1904 | */ |
Peter Zijlstra | 371fd7e | 2010-03-24 16:38:48 +0100 | [diff] [blame] | 1905 | static void deactivate_task(struct rq *rq, struct task_struct *p, int flags) |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 1906 | { |
| 1907 | if (task_contributes_to_load(p)) |
| 1908 | rq->nr_uninterruptible++; |
| 1909 | |
Peter Zijlstra | 371fd7e | 2010-03-24 16:38:48 +0100 | [diff] [blame] | 1910 | dequeue_task(rq, p, flags); |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 1911 | dec_nr_running(rq); |
| 1912 | } |
| 1913 | |
Venkatesh Pallipadi | b52bfee | 2010-10-04 17:03:19 -0700 | [diff] [blame] | 1914 | #ifdef CONFIG_IRQ_TIME_ACCOUNTING |
| 1915 | |
Venkatesh Pallipadi | 305e683 | 2010-10-04 17:03:21 -0700 | [diff] [blame] | 1916 | /* |
| 1917 | * There are no locks covering percpu hardirq/softirq time. |
| 1918 | * They are only modified in account_system_vtime, on corresponding CPU |
| 1919 | * with interrupts disabled. So, writes are safe. |
| 1920 | * They are read and saved off onto struct rq in update_rq_clock(). |
| 1921 | * This may result in other CPU reading this CPU's irq time and can |
| 1922 | * race with irq/account_system_vtime on this CPU. We would either get old |
| 1923 | * or new value (or semi updated value on 32 bit) with a side effect of |
| 1924 | * accounting a slice of irq time to wrong task when irq is in progress |
| 1925 | * while we read rq->clock. That is a worthy compromise in place of having |
| 1926 | * locks on each irq in account_system_time. |
| 1927 | */ |
Venkatesh Pallipadi | b52bfee | 2010-10-04 17:03:19 -0700 | [diff] [blame] | 1928 | static DEFINE_PER_CPU(u64, cpu_hardirq_time); |
| 1929 | static DEFINE_PER_CPU(u64, cpu_softirq_time); |
| 1930 | |
| 1931 | static DEFINE_PER_CPU(u64, irq_start_time); |
| 1932 | static int sched_clock_irqtime; |
| 1933 | |
| 1934 | void enable_sched_clock_irqtime(void) |
| 1935 | { |
| 1936 | sched_clock_irqtime = 1; |
| 1937 | } |
| 1938 | |
| 1939 | void disable_sched_clock_irqtime(void) |
| 1940 | { |
| 1941 | sched_clock_irqtime = 0; |
| 1942 | } |
| 1943 | |
Peter Zijlstra | fe44d62 | 2010-12-09 14:15:34 +0100 | [diff] [blame^] | 1944 | static inline u64 irq_time_cpu(int cpu) |
Venkatesh Pallipadi | 305e683 | 2010-10-04 17:03:21 -0700 | [diff] [blame] | 1945 | { |
Venkatesh Pallipadi | 305e683 | 2010-10-04 17:03:21 -0700 | [diff] [blame] | 1946 | return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu); |
| 1947 | } |
| 1948 | |
Peter Zijlstra | fe44d62 | 2010-12-09 14:15:34 +0100 | [diff] [blame^] | 1949 | /* |
| 1950 | * Called before incrementing preempt_count on {soft,}irq_enter |
| 1951 | * and before decrementing preempt_count on {soft,}irq_exit. |
| 1952 | */ |
Venkatesh Pallipadi | b52bfee | 2010-10-04 17:03:19 -0700 | [diff] [blame] | 1953 | void account_system_vtime(struct task_struct *curr) |
| 1954 | { |
| 1955 | unsigned long flags; |
Peter Zijlstra | fe44d62 | 2010-12-09 14:15:34 +0100 | [diff] [blame^] | 1956 | s64 delta; |
Venkatesh Pallipadi | b52bfee | 2010-10-04 17:03:19 -0700 | [diff] [blame] | 1957 | int cpu; |
Venkatesh Pallipadi | b52bfee | 2010-10-04 17:03:19 -0700 | [diff] [blame] | 1958 | |
| 1959 | if (!sched_clock_irqtime) |
| 1960 | return; |
| 1961 | |
| 1962 | local_irq_save(flags); |
| 1963 | |
Venkatesh Pallipadi | b52bfee | 2010-10-04 17:03:19 -0700 | [diff] [blame] | 1964 | cpu = smp_processor_id(); |
Peter Zijlstra | fe44d62 | 2010-12-09 14:15:34 +0100 | [diff] [blame^] | 1965 | delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time); |
| 1966 | __this_cpu_add(irq_start_time, delta); |
| 1967 | |
Venkatesh Pallipadi | b52bfee | 2010-10-04 17:03:19 -0700 | [diff] [blame] | 1968 | /* |
| 1969 | * We do not account for softirq time from ksoftirqd here. |
| 1970 | * We want to continue accounting softirq time to ksoftirqd thread |
| 1971 | * in that case, so as not to confuse scheduler with a special task |
| 1972 | * that do not consume any time, but still wants to run. |
| 1973 | */ |
| 1974 | if (hardirq_count()) |
Peter Zijlstra | fe44d62 | 2010-12-09 14:15:34 +0100 | [diff] [blame^] | 1975 | __this_cpu_add(cpu_hardirq_time, delta); |
Venkatesh Pallipadi | b52bfee | 2010-10-04 17:03:19 -0700 | [diff] [blame] | 1976 | else if (in_serving_softirq() && !(curr->flags & PF_KSOFTIRQD)) |
Peter Zijlstra | fe44d62 | 2010-12-09 14:15:34 +0100 | [diff] [blame^] | 1977 | __this_cpu_add(cpu_softirq_time, delta); |
Venkatesh Pallipadi | b52bfee | 2010-10-04 17:03:19 -0700 | [diff] [blame] | 1978 | |
| 1979 | local_irq_restore(flags); |
| 1980 | } |
Ingo Molnar | b7dadc3 | 2010-10-18 20:00:37 +0200 | [diff] [blame] | 1981 | EXPORT_SYMBOL_GPL(account_system_vtime); |
Venkatesh Pallipadi | b52bfee | 2010-10-04 17:03:19 -0700 | [diff] [blame] | 1982 | |
Peter Zijlstra | fe44d62 | 2010-12-09 14:15:34 +0100 | [diff] [blame^] | 1983 | static void update_rq_clock_task(struct rq *rq, s64 delta) |
Venkatesh Pallipadi | aa48380 | 2010-10-04 17:03:22 -0700 | [diff] [blame] | 1984 | { |
Peter Zijlstra | fe44d62 | 2010-12-09 14:15:34 +0100 | [diff] [blame^] | 1985 | s64 irq_delta; |
| 1986 | |
| 1987 | irq_delta = irq_time_cpu(cpu_of(rq)) - rq->prev_irq_time; |
| 1988 | |
| 1989 | /* |
| 1990 | * Since irq_time is only updated on {soft,}irq_exit, we might run into |
| 1991 | * this case when a previous update_rq_clock() happened inside a |
| 1992 | * {soft,}irq region. |
| 1993 | * |
| 1994 | * When this happens, we stop ->clock_task and only update the |
| 1995 | * prev_irq_time stamp to account for the part that fit, so that a next |
| 1996 | * update will consume the rest. This ensures ->clock_task is |
| 1997 | * monotonic. |
| 1998 | * |
| 1999 | * It does however cause some slight miss-attribution of {soft,}irq |
| 2000 | * time, a more accurate solution would be to update the irq_time using |
| 2001 | * the current rq->clock timestamp, except that would require using |
| 2002 | * atomic ops. |
| 2003 | */ |
| 2004 | if (irq_delta > delta) |
| 2005 | irq_delta = delta; |
| 2006 | |
| 2007 | rq->prev_irq_time += irq_delta; |
| 2008 | delta -= irq_delta; |
| 2009 | rq->clock_task += delta; |
| 2010 | |
| 2011 | if (irq_delta && sched_feat(NONIRQ_POWER)) |
| 2012 | sched_rt_avg_update(rq, irq_delta); |
Venkatesh Pallipadi | aa48380 | 2010-10-04 17:03:22 -0700 | [diff] [blame] | 2013 | } |
| 2014 | |
Peter Zijlstra | fe44d62 | 2010-12-09 14:15:34 +0100 | [diff] [blame^] | 2015 | #else /* CONFIG_IRQ_TIME_ACCOUNTING */ |
Venkatesh Pallipadi | 305e683 | 2010-10-04 17:03:21 -0700 | [diff] [blame] | 2016 | |
Peter Zijlstra | fe44d62 | 2010-12-09 14:15:34 +0100 | [diff] [blame^] | 2017 | static void update_rq_clock_task(struct rq *rq, s64 delta) |
Venkatesh Pallipadi | 305e683 | 2010-10-04 17:03:21 -0700 | [diff] [blame] | 2018 | { |
Peter Zijlstra | fe44d62 | 2010-12-09 14:15:34 +0100 | [diff] [blame^] | 2019 | rq->clock_task += delta; |
Venkatesh Pallipadi | 305e683 | 2010-10-04 17:03:21 -0700 | [diff] [blame] | 2020 | } |
| 2021 | |
Peter Zijlstra | fe44d62 | 2010-12-09 14:15:34 +0100 | [diff] [blame^] | 2022 | #endif /* CONFIG_IRQ_TIME_ACCOUNTING */ |
Venkatesh Pallipadi | b52bfee | 2010-10-04 17:03:19 -0700 | [diff] [blame] | 2023 | |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2024 | #include "sched_idletask.c" |
| 2025 | #include "sched_fair.c" |
| 2026 | #include "sched_rt.c" |
Peter Zijlstra | 34f971f | 2010-09-22 13:53:15 +0200 | [diff] [blame] | 2027 | #include "sched_stoptask.c" |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2028 | #ifdef CONFIG_SCHED_DEBUG |
| 2029 | # include "sched_debug.c" |
| 2030 | #endif |
| 2031 | |
Peter Zijlstra | 34f971f | 2010-09-22 13:53:15 +0200 | [diff] [blame] | 2032 | void sched_set_stop_task(int cpu, struct task_struct *stop) |
| 2033 | { |
| 2034 | struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; |
| 2035 | struct task_struct *old_stop = cpu_rq(cpu)->stop; |
| 2036 | |
| 2037 | if (stop) { |
| 2038 | /* |
| 2039 | * Make it appear like a SCHED_FIFO task, its something |
| 2040 | * userspace knows about and won't get confused about. |
| 2041 | * |
| 2042 | * Also, it will make PI more or less work without too |
| 2043 | * much confusion -- but then, stop work should not |
| 2044 | * rely on PI working anyway. |
| 2045 | */ |
| 2046 | sched_setscheduler_nocheck(stop, SCHED_FIFO, ¶m); |
| 2047 | |
| 2048 | stop->sched_class = &stop_sched_class; |
| 2049 | } |
| 2050 | |
| 2051 | cpu_rq(cpu)->stop = stop; |
| 2052 | |
| 2053 | if (old_stop) { |
| 2054 | /* |
| 2055 | * Reset it back to a normal scheduling class so that |
| 2056 | * it can die in pieces. |
| 2057 | */ |
| 2058 | old_stop->sched_class = &rt_sched_class; |
| 2059 | } |
| 2060 | } |
| 2061 | |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2062 | /* |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 2063 | * __normal_prio - return the priority that is based on the static prio |
Ingo Molnar | 71f8bd4 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 2064 | */ |
Ingo Molnar | 1453118 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 2065 | static inline int __normal_prio(struct task_struct *p) |
| 2066 | { |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 2067 | return p->static_prio; |
Ingo Molnar | 1453118 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 2068 | } |
| 2069 | |
| 2070 | /* |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 2071 | * Calculate the expected normal priority: i.e. priority |
| 2072 | * without taking RT-inheritance into account. Might be |
| 2073 | * boosted by interactivity modifiers. Changes upon fork, |
| 2074 | * setprio syscalls, and whenever the interactivity |
| 2075 | * estimator recalculates. |
| 2076 | */ |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 2077 | static inline int normal_prio(struct task_struct *p) |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 2078 | { |
| 2079 | int prio; |
| 2080 | |
Ingo Molnar | e05606d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 2081 | if (task_has_rt_policy(p)) |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 2082 | prio = MAX_RT_PRIO-1 - p->rt_priority; |
| 2083 | else |
| 2084 | prio = __normal_prio(p); |
| 2085 | return prio; |
| 2086 | } |
| 2087 | |
| 2088 | /* |
| 2089 | * Calculate the current priority, i.e. the priority |
| 2090 | * taken into account by the scheduler. This value might |
| 2091 | * be boosted by RT tasks, or might be boosted by |
| 2092 | * interactivity modifiers. Will be RT if the task got |
| 2093 | * RT-boosted. If not then it returns p->normal_prio. |
| 2094 | */ |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 2095 | static int effective_prio(struct task_struct *p) |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 2096 | { |
| 2097 | p->normal_prio = normal_prio(p); |
| 2098 | /* |
| 2099 | * If we are RT tasks or we were boosted to RT priority, |
| 2100 | * keep the priority unchanged. Otherwise, update priority |
| 2101 | * to the normal priority: |
| 2102 | */ |
| 2103 | if (!rt_prio(p->prio)) |
| 2104 | return p->normal_prio; |
| 2105 | return p->prio; |
| 2106 | } |
| 2107 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2108 | /** |
| 2109 | * task_curr - is this task currently executing on a CPU? |
| 2110 | * @p: the task in question. |
| 2111 | */ |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 2112 | inline int task_curr(const struct task_struct *p) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2113 | { |
| 2114 | return cpu_curr(task_cpu(p)) == p; |
| 2115 | } |
| 2116 | |
Steven Rostedt | cb46984 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 2117 | static inline void check_class_changed(struct rq *rq, struct task_struct *p, |
| 2118 | const struct sched_class *prev_class, |
| 2119 | int oldprio, int running) |
| 2120 | { |
| 2121 | if (prev_class != p->sched_class) { |
| 2122 | if (prev_class->switched_from) |
| 2123 | prev_class->switched_from(rq, p, running); |
| 2124 | p->sched_class->switched_to(rq, p, running); |
| 2125 | } else |
| 2126 | p->sched_class->prio_changed(rq, p, oldprio, running); |
| 2127 | } |
| 2128 | |
Peter Zijlstra | 1e5a740 | 2010-10-31 12:37:04 +0100 | [diff] [blame] | 2129 | static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) |
| 2130 | { |
| 2131 | const struct sched_class *class; |
| 2132 | |
| 2133 | if (p->sched_class == rq->curr->sched_class) { |
| 2134 | rq->curr->sched_class->check_preempt_curr(rq, p, flags); |
| 2135 | } else { |
| 2136 | for_each_class(class) { |
| 2137 | if (class == rq->curr->sched_class) |
| 2138 | break; |
| 2139 | if (class == p->sched_class) { |
| 2140 | resched_task(rq->curr); |
| 2141 | break; |
| 2142 | } |
| 2143 | } |
| 2144 | } |
| 2145 | |
| 2146 | /* |
| 2147 | * A queue event has occurred, and we're going to schedule. In |
| 2148 | * this case, we can save a useless back to back clock update. |
| 2149 | */ |
Mike Galbraith | f26f9af | 2010-12-08 11:05:42 +0100 | [diff] [blame] | 2150 | if (rq->curr->se.on_rq && test_tsk_need_resched(rq->curr)) |
Peter Zijlstra | 1e5a740 | 2010-10-31 12:37:04 +0100 | [diff] [blame] | 2151 | rq->skip_clock_update = 1; |
| 2152 | } |
| 2153 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2154 | #ifdef CONFIG_SMP |
Ingo Molnar | cc36773 | 2007-10-15 17:00:18 +0200 | [diff] [blame] | 2155 | /* |
| 2156 | * Is this task likely cache-hot: |
| 2157 | */ |
Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 2158 | static int |
Ingo Molnar | cc36773 | 2007-10-15 17:00:18 +0200 | [diff] [blame] | 2159 | task_hot(struct task_struct *p, u64 now, struct sched_domain *sd) |
| 2160 | { |
| 2161 | s64 delta; |
| 2162 | |
Peter Zijlstra | e6c8fba | 2009-12-16 18:04:33 +0100 | [diff] [blame] | 2163 | if (p->sched_class != &fair_sched_class) |
| 2164 | return 0; |
| 2165 | |
Nikhil Rao | ef8002f | 2010-10-13 12:09:35 -0700 | [diff] [blame] | 2166 | if (unlikely(p->policy == SCHED_IDLE)) |
| 2167 | return 0; |
| 2168 | |
Ingo Molnar | f540a60 | 2008-03-15 17:10:34 +0100 | [diff] [blame] | 2169 | /* |
| 2170 | * Buddy candidates are cache hot: |
| 2171 | */ |
Mike Galbraith | f685cea | 2009-10-23 23:09:22 +0200 | [diff] [blame] | 2172 | if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running && |
Peter Zijlstra | 4793241 | 2008-11-04 21:25:09 +0100 | [diff] [blame] | 2173 | (&p->se == cfs_rq_of(&p->se)->next || |
| 2174 | &p->se == cfs_rq_of(&p->se)->last)) |
Ingo Molnar | f540a60 | 2008-03-15 17:10:34 +0100 | [diff] [blame] | 2175 | return 1; |
| 2176 | |
Ingo Molnar | 6bc1665 | 2007-10-15 17:00:18 +0200 | [diff] [blame] | 2177 | if (sysctl_sched_migration_cost == -1) |
| 2178 | return 1; |
| 2179 | if (sysctl_sched_migration_cost == 0) |
| 2180 | return 0; |
| 2181 | |
Ingo Molnar | cc36773 | 2007-10-15 17:00:18 +0200 | [diff] [blame] | 2182 | delta = now - p->se.exec_start; |
| 2183 | |
| 2184 | return delta < (s64)sysctl_sched_migration_cost; |
| 2185 | } |
| 2186 | |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 2187 | void set_task_cpu(struct task_struct *p, unsigned int new_cpu) |
Ingo Molnar | c65cc87 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 2188 | { |
Peter Zijlstra | e291200 | 2009-12-16 18:04:36 +0100 | [diff] [blame] | 2189 | #ifdef CONFIG_SCHED_DEBUG |
| 2190 | /* |
| 2191 | * We should never call set_task_cpu() on a blocked task, |
| 2192 | * ttwu() will sort out the placement. |
| 2193 | */ |
Peter Zijlstra | 077614e | 2009-12-17 13:16:31 +0100 | [diff] [blame] | 2194 | WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING && |
| 2195 | !(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE)); |
Peter Zijlstra | e291200 | 2009-12-16 18:04:36 +0100 | [diff] [blame] | 2196 | #endif |
| 2197 | |
Mathieu Desnoyers | de1d728 | 2009-05-05 16:49:59 +0800 | [diff] [blame] | 2198 | trace_sched_migrate_task(p, new_cpu); |
Peter Zijlstra | cbc34ed | 2008-12-10 08:08:22 +0100 | [diff] [blame] | 2199 | |
Peter Zijlstra | 0c69774 | 2009-12-22 15:43:19 +0100 | [diff] [blame] | 2200 | if (task_cpu(p) != new_cpu) { |
| 2201 | p->se.nr_migrations++; |
| 2202 | perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 1, NULL, 0); |
| 2203 | } |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 2204 | |
| 2205 | __set_task_cpu(p, new_cpu); |
Ingo Molnar | c65cc87 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 2206 | } |
| 2207 | |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 2208 | struct migration_arg { |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 2209 | struct task_struct *task; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2210 | int dest_cpu; |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 2211 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2212 | |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 2213 | static int migration_cpu_stop(void *data); |
| 2214 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2215 | /* |
| 2216 | * The task's runqueue lock must be held. |
| 2217 | * Returns true if you have to wait for migration thread. |
| 2218 | */ |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 2219 | static bool migrate_task(struct task_struct *p, int dest_cpu) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2220 | { |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 2221 | struct rq *rq = task_rq(p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2222 | |
| 2223 | /* |
| 2224 | * If the task is not on a runqueue (and not running), then |
Peter Zijlstra | e291200 | 2009-12-16 18:04:36 +0100 | [diff] [blame] | 2225 | * the next wake-up will properly place the task. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2226 | */ |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 2227 | return p->se.on_rq || task_running(rq, p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2228 | } |
| 2229 | |
| 2230 | /* |
| 2231 | * wait_task_inactive - wait for a thread to unschedule. |
| 2232 | * |
Roland McGrath | 85ba2d8 | 2008-07-25 19:45:58 -0700 | [diff] [blame] | 2233 | * If @match_state is nonzero, it's the @p->state value just checked and |
| 2234 | * not expected to change. If it changes, i.e. @p might have woken up, |
| 2235 | * then return zero. When we succeed in waiting for @p to be off its CPU, |
| 2236 | * we return a positive number (its total switch count). If a second call |
| 2237 | * a short while later returns the same number, the caller can be sure that |
| 2238 | * @p has remained unscheduled the whole time. |
| 2239 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2240 | * The caller must ensure that the task *will* unschedule sometime soon, |
| 2241 | * else this function might spin for a *long* time. This function can't |
| 2242 | * be called with interrupts off, or it may introduce deadlock with |
| 2243 | * smp_call_function() if an IPI is sent by the same process we are |
| 2244 | * waiting to become inactive. |
| 2245 | */ |
Roland McGrath | 85ba2d8 | 2008-07-25 19:45:58 -0700 | [diff] [blame] | 2246 | unsigned long wait_task_inactive(struct task_struct *p, long match_state) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2247 | { |
| 2248 | unsigned long flags; |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 2249 | int running, on_rq; |
Roland McGrath | 85ba2d8 | 2008-07-25 19:45:58 -0700 | [diff] [blame] | 2250 | unsigned long ncsw; |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 2251 | struct rq *rq; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2252 | |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 2253 | for (;;) { |
| 2254 | /* |
| 2255 | * We do the initial early heuristics without holding |
| 2256 | * any task-queue locks at all. We'll only try to get |
| 2257 | * the runqueue lock when things look like they will |
| 2258 | * work out! |
| 2259 | */ |
| 2260 | rq = task_rq(p); |
Linus Torvalds | fa490cf | 2007-06-18 09:34:40 -0700 | [diff] [blame] | 2261 | |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 2262 | /* |
| 2263 | * If the task is actively running on another CPU |
| 2264 | * still, just relax and busy-wait without holding |
| 2265 | * any locks. |
| 2266 | * |
| 2267 | * NOTE! Since we don't hold any locks, it's not |
| 2268 | * even sure that "rq" stays as the right runqueue! |
| 2269 | * But we don't care, since "task_running()" will |
| 2270 | * return false if the runqueue has changed and p |
| 2271 | * is actually now running somewhere else! |
| 2272 | */ |
Roland McGrath | 85ba2d8 | 2008-07-25 19:45:58 -0700 | [diff] [blame] | 2273 | while (task_running(rq, p)) { |
| 2274 | if (match_state && unlikely(p->state != match_state)) |
| 2275 | return 0; |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 2276 | cpu_relax(); |
Roland McGrath | 85ba2d8 | 2008-07-25 19:45:58 -0700 | [diff] [blame] | 2277 | } |
Linus Torvalds | fa490cf | 2007-06-18 09:34:40 -0700 | [diff] [blame] | 2278 | |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 2279 | /* |
| 2280 | * Ok, time to look more closely! We need the rq |
| 2281 | * lock now, to be *sure*. If we're wrong, we'll |
| 2282 | * just go back and repeat. |
| 2283 | */ |
| 2284 | rq = task_rq_lock(p, &flags); |
Peter Zijlstra | 27a9da6 | 2010-05-04 20:36:56 +0200 | [diff] [blame] | 2285 | trace_sched_wait_task(p); |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 2286 | running = task_running(rq, p); |
| 2287 | on_rq = p->se.on_rq; |
Roland McGrath | 85ba2d8 | 2008-07-25 19:45:58 -0700 | [diff] [blame] | 2288 | ncsw = 0; |
Oleg Nesterov | f31e11d | 2008-08-20 16:54:44 -0700 | [diff] [blame] | 2289 | if (!match_state || p->state == match_state) |
Oleg Nesterov | 93dcf55 | 2008-08-20 16:54:44 -0700 | [diff] [blame] | 2290 | ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 2291 | task_rq_unlock(rq, &flags); |
Linus Torvalds | fa490cf | 2007-06-18 09:34:40 -0700 | [diff] [blame] | 2292 | |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 2293 | /* |
Roland McGrath | 85ba2d8 | 2008-07-25 19:45:58 -0700 | [diff] [blame] | 2294 | * If it changed from the expected state, bail out now. |
| 2295 | */ |
| 2296 | if (unlikely(!ncsw)) |
| 2297 | break; |
| 2298 | |
| 2299 | /* |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 2300 | * Was it really running after all now that we |
| 2301 | * checked with the proper locks actually held? |
| 2302 | * |
| 2303 | * Oops. Go back and try again.. |
| 2304 | */ |
| 2305 | if (unlikely(running)) { |
| 2306 | cpu_relax(); |
| 2307 | continue; |
| 2308 | } |
| 2309 | |
| 2310 | /* |
| 2311 | * It's not enough that it's not actively running, |
| 2312 | * it must be off the runqueue _entirely_, and not |
| 2313 | * preempted! |
| 2314 | * |
Luis Henriques | 80dd99b | 2009-03-16 19:58:09 +0000 | [diff] [blame] | 2315 | * So if it was still runnable (but just not actively |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 2316 | * running right now), it's preempted, and we should |
| 2317 | * yield - it could be a while. |
| 2318 | */ |
| 2319 | if (unlikely(on_rq)) { |
| 2320 | schedule_timeout_uninterruptible(1); |
| 2321 | continue; |
| 2322 | } |
| 2323 | |
| 2324 | /* |
| 2325 | * Ahh, all good. It wasn't running, and it wasn't |
| 2326 | * runnable, which means that it will never become |
| 2327 | * running in the future either. We're all done! |
| 2328 | */ |
| 2329 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2330 | } |
Roland McGrath | 85ba2d8 | 2008-07-25 19:45:58 -0700 | [diff] [blame] | 2331 | |
| 2332 | return ncsw; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2333 | } |
| 2334 | |
| 2335 | /*** |
| 2336 | * kick_process - kick a running thread to enter/exit the kernel |
| 2337 | * @p: the to-be-kicked thread |
| 2338 | * |
| 2339 | * Cause a process which is running on another CPU to enter |
| 2340 | * kernel-mode, without any delay. (to get signals handled.) |
| 2341 | * |
| 2342 | * NOTE: this function doesnt have to take the runqueue lock, |
| 2343 | * because all it wants to ensure is that the remote task enters |
| 2344 | * the kernel. If the IPI races and the task has been migrated |
| 2345 | * to another CPU then no harm is done and the purpose has been |
| 2346 | * achieved as well. |
| 2347 | */ |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 2348 | void kick_process(struct task_struct *p) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2349 | { |
| 2350 | int cpu; |
| 2351 | |
| 2352 | preempt_disable(); |
| 2353 | cpu = task_cpu(p); |
| 2354 | if ((cpu != smp_processor_id()) && task_curr(p)) |
| 2355 | smp_send_reschedule(cpu); |
| 2356 | preempt_enable(); |
| 2357 | } |
Rusty Russell | b43e352 | 2009-06-12 22:27:00 -0600 | [diff] [blame] | 2358 | EXPORT_SYMBOL_GPL(kick_process); |
Nick Piggin | 476d139 | 2005-06-25 14:57:29 -0700 | [diff] [blame] | 2359 | #endif /* CONFIG_SMP */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2360 | |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 2361 | /** |
| 2362 | * task_oncpu_function_call - call a function on the cpu on which a task runs |
| 2363 | * @p: the task to evaluate |
| 2364 | * @func: the function to be called |
| 2365 | * @info: the function call argument |
| 2366 | * |
| 2367 | * Calls the function @func when the task is currently running. This might |
| 2368 | * be on the current CPU, which just calls the function directly |
| 2369 | */ |
| 2370 | void task_oncpu_function_call(struct task_struct *p, |
| 2371 | void (*func) (void *info), void *info) |
| 2372 | { |
| 2373 | int cpu; |
| 2374 | |
| 2375 | preempt_disable(); |
| 2376 | cpu = task_cpu(p); |
| 2377 | if (task_curr(p)) |
| 2378 | smp_call_function_single(cpu, func, info, 1); |
| 2379 | preempt_enable(); |
| 2380 | } |
| 2381 | |
Peter Zijlstra | 970b13b | 2009-11-25 13:31:39 +0100 | [diff] [blame] | 2382 | #ifdef CONFIG_SMP |
Oleg Nesterov | 30da688 | 2010-03-15 10:10:19 +0100 | [diff] [blame] | 2383 | /* |
| 2384 | * ->cpus_allowed is protected by either TASK_WAKING or rq->lock held. |
| 2385 | */ |
Peter Zijlstra | 5da9a0f | 2009-12-16 18:04:38 +0100 | [diff] [blame] | 2386 | static int select_fallback_rq(int cpu, struct task_struct *p) |
| 2387 | { |
| 2388 | int dest_cpu; |
| 2389 | const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(cpu)); |
| 2390 | |
| 2391 | /* Look for allowed, online CPU in same node. */ |
| 2392 | for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask) |
| 2393 | if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed)) |
| 2394 | return dest_cpu; |
| 2395 | |
| 2396 | /* Any allowed, online CPU? */ |
| 2397 | dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask); |
| 2398 | if (dest_cpu < nr_cpu_ids) |
| 2399 | return dest_cpu; |
| 2400 | |
| 2401 | /* No more Mr. Nice Guy. */ |
Oleg Nesterov | 897f0b3 | 2010-03-15 10:10:03 +0100 | [diff] [blame] | 2402 | if (unlikely(dest_cpu >= nr_cpu_ids)) { |
Oleg Nesterov | 9084bb8 | 2010-03-15 10:10:27 +0100 | [diff] [blame] | 2403 | dest_cpu = cpuset_cpus_allowed_fallback(p); |
Peter Zijlstra | 5da9a0f | 2009-12-16 18:04:38 +0100 | [diff] [blame] | 2404 | /* |
| 2405 | * Don't tell them about moving exiting tasks or |
| 2406 | * kernel threads (both mm NULL), since they never |
| 2407 | * leave kernel. |
| 2408 | */ |
| 2409 | if (p->mm && printk_ratelimit()) { |
| 2410 | printk(KERN_INFO "process %d (%s) no " |
| 2411 | "longer affine to cpu%d\n", |
| 2412 | task_pid_nr(p), p->comm, cpu); |
| 2413 | } |
| 2414 | } |
| 2415 | |
| 2416 | return dest_cpu; |
| 2417 | } |
| 2418 | |
Peter Zijlstra | e291200 | 2009-12-16 18:04:36 +0100 | [diff] [blame] | 2419 | /* |
Oleg Nesterov | 30da688 | 2010-03-15 10:10:19 +0100 | [diff] [blame] | 2420 | * The caller (fork, wakeup) owns TASK_WAKING, ->cpus_allowed is stable. |
Peter Zijlstra | e291200 | 2009-12-16 18:04:36 +0100 | [diff] [blame] | 2421 | */ |
Peter Zijlstra | 970b13b | 2009-11-25 13:31:39 +0100 | [diff] [blame] | 2422 | static inline |
Peter Zijlstra | 0017d73 | 2010-03-24 18:34:10 +0100 | [diff] [blame] | 2423 | int select_task_rq(struct rq *rq, struct task_struct *p, int sd_flags, int wake_flags) |
Peter Zijlstra | 970b13b | 2009-11-25 13:31:39 +0100 | [diff] [blame] | 2424 | { |
Peter Zijlstra | 0017d73 | 2010-03-24 18:34:10 +0100 | [diff] [blame] | 2425 | int cpu = p->sched_class->select_task_rq(rq, p, sd_flags, wake_flags); |
Peter Zijlstra | e291200 | 2009-12-16 18:04:36 +0100 | [diff] [blame] | 2426 | |
| 2427 | /* |
| 2428 | * In order not to call set_task_cpu() on a blocking task we need |
| 2429 | * to rely on ttwu() to place the task on a valid ->cpus_allowed |
| 2430 | * cpu. |
| 2431 | * |
| 2432 | * Since this is common to all placement strategies, this lives here. |
| 2433 | * |
| 2434 | * [ this allows ->select_task() to simply return task_cpu(p) and |
| 2435 | * not worry about this generic constraint ] |
| 2436 | */ |
| 2437 | if (unlikely(!cpumask_test_cpu(cpu, &p->cpus_allowed) || |
Peter Zijlstra | 70f1120 | 2009-12-20 17:36:27 +0100 | [diff] [blame] | 2438 | !cpu_online(cpu))) |
Peter Zijlstra | 5da9a0f | 2009-12-16 18:04:38 +0100 | [diff] [blame] | 2439 | cpu = select_fallback_rq(task_cpu(p), p); |
Peter Zijlstra | e291200 | 2009-12-16 18:04:36 +0100 | [diff] [blame] | 2440 | |
| 2441 | return cpu; |
Peter Zijlstra | 970b13b | 2009-11-25 13:31:39 +0100 | [diff] [blame] | 2442 | } |
Mike Galbraith | 09a40af | 2010-04-15 07:29:59 +0200 | [diff] [blame] | 2443 | |
| 2444 | static void update_avg(u64 *avg, u64 sample) |
| 2445 | { |
| 2446 | s64 diff = sample - *avg; |
| 2447 | *avg += diff >> 3; |
| 2448 | } |
Peter Zijlstra | 970b13b | 2009-11-25 13:31:39 +0100 | [diff] [blame] | 2449 | #endif |
| 2450 | |
Tejun Heo | 9ed3811 | 2009-12-03 15:08:03 +0900 | [diff] [blame] | 2451 | static inline void ttwu_activate(struct task_struct *p, struct rq *rq, |
| 2452 | bool is_sync, bool is_migrate, bool is_local, |
| 2453 | unsigned long en_flags) |
| 2454 | { |
| 2455 | schedstat_inc(p, se.statistics.nr_wakeups); |
| 2456 | if (is_sync) |
| 2457 | schedstat_inc(p, se.statistics.nr_wakeups_sync); |
| 2458 | if (is_migrate) |
| 2459 | schedstat_inc(p, se.statistics.nr_wakeups_migrate); |
| 2460 | if (is_local) |
| 2461 | schedstat_inc(p, se.statistics.nr_wakeups_local); |
| 2462 | else |
| 2463 | schedstat_inc(p, se.statistics.nr_wakeups_remote); |
| 2464 | |
| 2465 | activate_task(rq, p, en_flags); |
| 2466 | } |
| 2467 | |
| 2468 | static inline void ttwu_post_activation(struct task_struct *p, struct rq *rq, |
| 2469 | int wake_flags, bool success) |
| 2470 | { |
| 2471 | trace_sched_wakeup(p, success); |
| 2472 | check_preempt_curr(rq, p, wake_flags); |
| 2473 | |
| 2474 | p->state = TASK_RUNNING; |
| 2475 | #ifdef CONFIG_SMP |
| 2476 | if (p->sched_class->task_woken) |
| 2477 | p->sched_class->task_woken(rq, p); |
| 2478 | |
| 2479 | if (unlikely(rq->idle_stamp)) { |
| 2480 | u64 delta = rq->clock - rq->idle_stamp; |
| 2481 | u64 max = 2*sysctl_sched_migration_cost; |
| 2482 | |
| 2483 | if (delta > max) |
| 2484 | rq->avg_idle = max; |
| 2485 | else |
| 2486 | update_avg(&rq->avg_idle, delta); |
| 2487 | rq->idle_stamp = 0; |
| 2488 | } |
| 2489 | #endif |
Tejun Heo | 21aa9af | 2010-06-08 21:40:37 +0200 | [diff] [blame] | 2490 | /* if a worker is waking up, notify workqueue */ |
| 2491 | if ((p->flags & PF_WQ_WORKER) && success) |
| 2492 | wq_worker_waking_up(p, cpu_of(rq)); |
Tejun Heo | 9ed3811 | 2009-12-03 15:08:03 +0900 | [diff] [blame] | 2493 | } |
| 2494 | |
| 2495 | /** |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2496 | * try_to_wake_up - wake up a thread |
Tejun Heo | 9ed3811 | 2009-12-03 15:08:03 +0900 | [diff] [blame] | 2497 | * @p: the thread to be awakened |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2498 | * @state: the mask of task states that can be woken |
Tejun Heo | 9ed3811 | 2009-12-03 15:08:03 +0900 | [diff] [blame] | 2499 | * @wake_flags: wake modifier flags (WF_*) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2500 | * |
| 2501 | * Put it on the run-queue if it's not already there. The "current" |
| 2502 | * thread is always on the run-queue (except when the actual |
| 2503 | * re-schedule is in progress), and as such you're allowed to do |
| 2504 | * the simpler "current->state = TASK_RUNNING" to mark yourself |
| 2505 | * runnable without the overhead of this. |
| 2506 | * |
Tejun Heo | 9ed3811 | 2009-12-03 15:08:03 +0900 | [diff] [blame] | 2507 | * Returns %true if @p was woken up, %false if it was already running |
| 2508 | * or @state didn't match @p's state. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2509 | */ |
Peter Zijlstra | 7d47872 | 2009-09-14 19:55:44 +0200 | [diff] [blame] | 2510 | static int try_to_wake_up(struct task_struct *p, unsigned int state, |
| 2511 | int wake_flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2512 | { |
Ingo Molnar | cc36773 | 2007-10-15 17:00:18 +0200 | [diff] [blame] | 2513 | int cpu, orig_cpu, this_cpu, success = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2514 | unsigned long flags; |
Peter Zijlstra | 371fd7e | 2010-03-24 16:38:48 +0100 | [diff] [blame] | 2515 | unsigned long en_flags = ENQUEUE_WAKEUP; |
Dan Carpenter | ab3b3aa | 2010-03-06 14:17:52 +0300 | [diff] [blame] | 2516 | struct rq *rq; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2517 | |
Peter Zijlstra | e9c8431 | 2009-09-15 14:43:03 +0200 | [diff] [blame] | 2518 | this_cpu = get_cpu(); |
Peter Zijlstra | 2398f2c | 2008-06-27 13:41:35 +0200 | [diff] [blame] | 2519 | |
Linus Torvalds | 04e2f17 | 2008-02-23 18:05:03 -0800 | [diff] [blame] | 2520 | smp_wmb(); |
Dan Carpenter | ab3b3aa | 2010-03-06 14:17:52 +0300 | [diff] [blame] | 2521 | rq = task_rq_lock(p, &flags); |
Peter Zijlstra | e9c8431 | 2009-09-15 14:43:03 +0200 | [diff] [blame] | 2522 | if (!(p->state & state)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2523 | goto out; |
| 2524 | |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 2525 | if (p->se.on_rq) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2526 | goto out_running; |
| 2527 | |
| 2528 | cpu = task_cpu(p); |
Ingo Molnar | cc36773 | 2007-10-15 17:00:18 +0200 | [diff] [blame] | 2529 | orig_cpu = cpu; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2530 | |
| 2531 | #ifdef CONFIG_SMP |
| 2532 | if (unlikely(task_running(rq, p))) |
| 2533 | goto out_activate; |
| 2534 | |
Peter Zijlstra | e9c8431 | 2009-09-15 14:43:03 +0200 | [diff] [blame] | 2535 | /* |
| 2536 | * In order to handle concurrent wakeups and release the rq->lock |
| 2537 | * we put the task in TASK_WAKING state. |
Ingo Molnar | eb24073 | 2009-09-16 21:09:13 +0200 | [diff] [blame] | 2538 | * |
| 2539 | * First fix up the nr_uninterruptible count: |
Peter Zijlstra | e9c8431 | 2009-09-15 14:43:03 +0200 | [diff] [blame] | 2540 | */ |
Peter Zijlstra | cc87f76 | 2010-03-26 12:22:14 +0100 | [diff] [blame] | 2541 | if (task_contributes_to_load(p)) { |
| 2542 | if (likely(cpu_online(orig_cpu))) |
| 2543 | rq->nr_uninterruptible--; |
| 2544 | else |
| 2545 | this_rq()->nr_uninterruptible--; |
| 2546 | } |
Peter Zijlstra | e9c8431 | 2009-09-15 14:43:03 +0200 | [diff] [blame] | 2547 | p->state = TASK_WAKING; |
Peter Zijlstra | efbbd05 | 2009-12-16 18:04:40 +0100 | [diff] [blame] | 2548 | |
Peter Zijlstra | 371fd7e | 2010-03-24 16:38:48 +0100 | [diff] [blame] | 2549 | if (p->sched_class->task_waking) { |
Peter Zijlstra | efbbd05 | 2009-12-16 18:04:40 +0100 | [diff] [blame] | 2550 | p->sched_class->task_waking(rq, p); |
Peter Zijlstra | 371fd7e | 2010-03-24 16:38:48 +0100 | [diff] [blame] | 2551 | en_flags |= ENQUEUE_WAKING; |
Peter Zijlstra | 0970d29 | 2010-02-15 14:45:54 +0100 | [diff] [blame] | 2552 | } |
Peter Zijlstra | ab19cb2 | 2009-11-27 15:44:43 +0100 | [diff] [blame] | 2553 | |
Peter Zijlstra | 0017d73 | 2010-03-24 18:34:10 +0100 | [diff] [blame] | 2554 | cpu = select_task_rq(rq, p, SD_BALANCE_WAKE, wake_flags); |
| 2555 | if (cpu != orig_cpu) |
Mike Galbraith | f5dc375 | 2009-10-09 08:35:03 +0200 | [diff] [blame] | 2556 | set_task_cpu(p, cpu); |
Peter Zijlstra | 0017d73 | 2010-03-24 18:34:10 +0100 | [diff] [blame] | 2557 | __task_rq_unlock(rq); |
Peter Zijlstra | ab19cb2 | 2009-11-27 15:44:43 +0100 | [diff] [blame] | 2558 | |
Peter Zijlstra | 0970d29 | 2010-02-15 14:45:54 +0100 | [diff] [blame] | 2559 | rq = cpu_rq(cpu); |
| 2560 | raw_spin_lock(&rq->lock); |
Mike Galbraith | f5dc375 | 2009-10-09 08:35:03 +0200 | [diff] [blame] | 2561 | |
Peter Zijlstra | 0970d29 | 2010-02-15 14:45:54 +0100 | [diff] [blame] | 2562 | /* |
| 2563 | * We migrated the task without holding either rq->lock, however |
| 2564 | * since the task is not on the task list itself, nobody else |
| 2565 | * will try and migrate the task, hence the rq should match the |
| 2566 | * cpu we just moved it to. |
| 2567 | */ |
| 2568 | WARN_ON(task_cpu(p) != cpu); |
Peter Zijlstra | e9c8431 | 2009-09-15 14:43:03 +0200 | [diff] [blame] | 2569 | WARN_ON(p->state != TASK_WAKING); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2570 | |
Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 2571 | #ifdef CONFIG_SCHEDSTATS |
| 2572 | schedstat_inc(rq, ttwu_count); |
| 2573 | if (cpu == this_cpu) |
| 2574 | schedstat_inc(rq, ttwu_local); |
| 2575 | else { |
| 2576 | struct sched_domain *sd; |
| 2577 | for_each_domain(this_cpu, sd) { |
Rusty Russell | 758b2cd | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 2578 | if (cpumask_test_cpu(cpu, sched_domain_span(sd))) { |
Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 2579 | schedstat_inc(sd, ttwu_wake_remote); |
| 2580 | break; |
| 2581 | } |
| 2582 | } |
| 2583 | } |
Dhaval Giani | 6d6bc0a | 2008-05-30 14:23:45 +0200 | [diff] [blame] | 2584 | #endif /* CONFIG_SCHEDSTATS */ |
Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 2585 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2586 | out_activate: |
| 2587 | #endif /* CONFIG_SMP */ |
Tejun Heo | 9ed3811 | 2009-12-03 15:08:03 +0900 | [diff] [blame] | 2588 | ttwu_activate(p, rq, wake_flags & WF_SYNC, orig_cpu != cpu, |
| 2589 | cpu == this_cpu, en_flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2590 | success = 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2591 | out_running: |
Tejun Heo | 9ed3811 | 2009-12-03 15:08:03 +0900 | [diff] [blame] | 2592 | ttwu_post_activation(p, rq, wake_flags, success); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2593 | out: |
| 2594 | task_rq_unlock(rq, &flags); |
Peter Zijlstra | e9c8431 | 2009-09-15 14:43:03 +0200 | [diff] [blame] | 2595 | put_cpu(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2596 | |
| 2597 | return success; |
| 2598 | } |
| 2599 | |
David Howells | 50fa610 | 2009-04-28 15:01:38 +0100 | [diff] [blame] | 2600 | /** |
Tejun Heo | 21aa9af | 2010-06-08 21:40:37 +0200 | [diff] [blame] | 2601 | * try_to_wake_up_local - try to wake up a local task with rq lock held |
| 2602 | * @p: the thread to be awakened |
| 2603 | * |
| 2604 | * Put @p on the run-queue if it's not alredy there. The caller must |
| 2605 | * ensure that this_rq() is locked, @p is bound to this_rq() and not |
| 2606 | * the current task. this_rq() stays locked over invocation. |
| 2607 | */ |
| 2608 | static void try_to_wake_up_local(struct task_struct *p) |
| 2609 | { |
| 2610 | struct rq *rq = task_rq(p); |
| 2611 | bool success = false; |
| 2612 | |
| 2613 | BUG_ON(rq != this_rq()); |
| 2614 | BUG_ON(p == current); |
| 2615 | lockdep_assert_held(&rq->lock); |
| 2616 | |
| 2617 | if (!(p->state & TASK_NORMAL)) |
| 2618 | return; |
| 2619 | |
| 2620 | if (!p->se.on_rq) { |
| 2621 | if (likely(!task_running(rq, p))) { |
| 2622 | schedstat_inc(rq, ttwu_count); |
| 2623 | schedstat_inc(rq, ttwu_local); |
| 2624 | } |
| 2625 | ttwu_activate(p, rq, false, false, true, ENQUEUE_WAKEUP); |
| 2626 | success = true; |
| 2627 | } |
| 2628 | ttwu_post_activation(p, rq, 0, success); |
| 2629 | } |
| 2630 | |
| 2631 | /** |
David Howells | 50fa610 | 2009-04-28 15:01:38 +0100 | [diff] [blame] | 2632 | * wake_up_process - Wake up a specific process |
| 2633 | * @p: The process to be woken up. |
| 2634 | * |
| 2635 | * Attempt to wake up the nominated process and move it to the set of runnable |
| 2636 | * processes. Returns 1 if the process was woken up, 0 if it was already |
| 2637 | * running. |
| 2638 | * |
| 2639 | * It may be assumed that this function implies a write memory barrier before |
| 2640 | * changing the task state if and only if any tasks are woken up. |
| 2641 | */ |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 2642 | int wake_up_process(struct task_struct *p) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2643 | { |
Matthew Wilcox | d9514f6 | 2007-12-06 11:07:07 -0500 | [diff] [blame] | 2644 | return try_to_wake_up(p, TASK_ALL, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2645 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2646 | EXPORT_SYMBOL(wake_up_process); |
| 2647 | |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 2648 | int wake_up_state(struct task_struct *p, unsigned int state) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2649 | { |
| 2650 | return try_to_wake_up(p, state, 0); |
| 2651 | } |
| 2652 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2653 | /* |
| 2654 | * Perform scheduler related setup for a newly forked process p. |
| 2655 | * p is forked by current. |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 2656 | * |
| 2657 | * __sched_fork() is basic setup used by init_idle() too: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2658 | */ |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 2659 | static void __sched_fork(struct task_struct *p) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2660 | { |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 2661 | p->se.exec_start = 0; |
| 2662 | p->se.sum_exec_runtime = 0; |
Ingo Molnar | f6cf891 | 2007-08-28 12:53:24 +0200 | [diff] [blame] | 2663 | p->se.prev_sum_exec_runtime = 0; |
Ingo Molnar | 6c594c2 | 2008-12-14 12:34:15 +0100 | [diff] [blame] | 2664 | p->se.nr_migrations = 0; |
Ingo Molnar | 6cfb0d5 | 2007-08-02 17:41:40 +0200 | [diff] [blame] | 2665 | |
| 2666 | #ifdef CONFIG_SCHEDSTATS |
Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 2667 | memset(&p->se.statistics, 0, sizeof(p->se.statistics)); |
Ingo Molnar | 6cfb0d5 | 2007-08-02 17:41:40 +0200 | [diff] [blame] | 2668 | #endif |
Nick Piggin | 476d139 | 2005-06-25 14:57:29 -0700 | [diff] [blame] | 2669 | |
Peter Zijlstra | fa71706 | 2008-01-25 21:08:27 +0100 | [diff] [blame] | 2670 | INIT_LIST_HEAD(&p->rt.run_list); |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 2671 | p->se.on_rq = 0; |
Peter Zijlstra | 4a55bd5 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 2672 | INIT_LIST_HEAD(&p->se.group_node); |
Nick Piggin | 476d139 | 2005-06-25 14:57:29 -0700 | [diff] [blame] | 2673 | |
Avi Kivity | e107be3 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 2674 | #ifdef CONFIG_PREEMPT_NOTIFIERS |
| 2675 | INIT_HLIST_HEAD(&p->preempt_notifiers); |
| 2676 | #endif |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 2677 | } |
| 2678 | |
| 2679 | /* |
| 2680 | * fork()/clone()-time setup: |
| 2681 | */ |
| 2682 | void sched_fork(struct task_struct *p, int clone_flags) |
| 2683 | { |
| 2684 | int cpu = get_cpu(); |
| 2685 | |
| 2686 | __sched_fork(p); |
Peter Zijlstra | 06b83b5 | 2009-12-16 18:04:35 +0100 | [diff] [blame] | 2687 | /* |
Peter Zijlstra | 0017d73 | 2010-03-24 18:34:10 +0100 | [diff] [blame] | 2688 | * We mark the process as running here. This guarantees that |
Peter Zijlstra | 06b83b5 | 2009-12-16 18:04:35 +0100 | [diff] [blame] | 2689 | * nobody will actually run it, and a signal or other external |
| 2690 | * event cannot wake it up and insert it on the runqueue either. |
| 2691 | */ |
Peter Zijlstra | 0017d73 | 2010-03-24 18:34:10 +0100 | [diff] [blame] | 2692 | p->state = TASK_RUNNING; |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 2693 | |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 2694 | /* |
Mike Galbraith | b9dc29e | 2009-06-17 10:46:01 +0200 | [diff] [blame] | 2695 | * Revert to default priority/policy on fork if requested. |
| 2696 | */ |
| 2697 | if (unlikely(p->sched_reset_on_fork)) { |
Peter Williams | f83f9ac | 2009-09-24 06:47:10 +0000 | [diff] [blame] | 2698 | if (p->policy == SCHED_FIFO || p->policy == SCHED_RR) { |
Mike Galbraith | b9dc29e | 2009-06-17 10:46:01 +0200 | [diff] [blame] | 2699 | p->policy = SCHED_NORMAL; |
Peter Williams | f83f9ac | 2009-09-24 06:47:10 +0000 | [diff] [blame] | 2700 | p->normal_prio = p->static_prio; |
| 2701 | } |
Mike Galbraith | b9dc29e | 2009-06-17 10:46:01 +0200 | [diff] [blame] | 2702 | |
Mike Galbraith | 6c697bd | 2009-06-17 10:48:02 +0200 | [diff] [blame] | 2703 | if (PRIO_TO_NICE(p->static_prio) < 0) { |
| 2704 | p->static_prio = NICE_TO_PRIO(0); |
Peter Williams | f83f9ac | 2009-09-24 06:47:10 +0000 | [diff] [blame] | 2705 | p->normal_prio = p->static_prio; |
Mike Galbraith | 6c697bd | 2009-06-17 10:48:02 +0200 | [diff] [blame] | 2706 | set_load_weight(p); |
| 2707 | } |
| 2708 | |
Mike Galbraith | b9dc29e | 2009-06-17 10:46:01 +0200 | [diff] [blame] | 2709 | /* |
| 2710 | * We don't need the reset flag anymore after the fork. It has |
| 2711 | * fulfilled its duty: |
| 2712 | */ |
| 2713 | p->sched_reset_on_fork = 0; |
| 2714 | } |
Lennart Poettering | ca94c44 | 2009-06-15 17:17:47 +0200 | [diff] [blame] | 2715 | |
Peter Williams | f83f9ac | 2009-09-24 06:47:10 +0000 | [diff] [blame] | 2716 | /* |
| 2717 | * Make sure we do not leak PI boosting priority to the child. |
| 2718 | */ |
| 2719 | p->prio = current->normal_prio; |
| 2720 | |
Hiroshi Shimamoto | 2ddbf95 | 2007-10-15 17:00:11 +0200 | [diff] [blame] | 2721 | if (!rt_prio(p->prio)) |
| 2722 | p->sched_class = &fair_sched_class; |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 2723 | |
Peter Zijlstra | cd29fe6 | 2009-11-27 17:32:46 +0100 | [diff] [blame] | 2724 | if (p->sched_class->task_fork) |
| 2725 | p->sched_class->task_fork(p); |
| 2726 | |
Peter Zijlstra | 8695159 | 2010-06-22 11:44:53 +0200 | [diff] [blame] | 2727 | /* |
| 2728 | * The child is not yet in the pid-hash so no cgroup attach races, |
| 2729 | * and the cgroup is pinned to this child due to cgroup_fork() |
| 2730 | * is ran before sched_fork(). |
| 2731 | * |
| 2732 | * Silence PROVE_RCU. |
| 2733 | */ |
| 2734 | rcu_read_lock(); |
Peter Zijlstra | 5f3edc1 | 2009-09-10 13:42:00 +0200 | [diff] [blame] | 2735 | set_task_cpu(p, cpu); |
Peter Zijlstra | 8695159 | 2010-06-22 11:44:53 +0200 | [diff] [blame] | 2736 | rcu_read_unlock(); |
Peter Zijlstra | 5f3edc1 | 2009-09-10 13:42:00 +0200 | [diff] [blame] | 2737 | |
Chandra Seetharaman | 52f17b6 | 2006-07-14 00:24:38 -0700 | [diff] [blame] | 2738 | #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 2739 | if (likely(sched_info_on())) |
Chandra Seetharaman | 52f17b6 | 2006-07-14 00:24:38 -0700 | [diff] [blame] | 2740 | memset(&p->sched_info, 0, sizeof(p->sched_info)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2741 | #endif |
Chen, Kenneth W | d6077cb | 2006-02-14 13:53:10 -0800 | [diff] [blame] | 2742 | #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 2743 | p->oncpu = 0; |
| 2744 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2745 | #ifdef CONFIG_PREEMPT |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 2746 | /* Want to start with kernel preemption disabled. */ |
Al Viro | a1261f54 | 2005-11-13 16:06:55 -0800 | [diff] [blame] | 2747 | task_thread_info(p)->preempt_count = 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2748 | #endif |
Gregory Haskins | 917b627 | 2008-12-29 09:39:53 -0500 | [diff] [blame] | 2749 | plist_node_init(&p->pushable_tasks, MAX_PRIO); |
| 2750 | |
Nick Piggin | 476d139 | 2005-06-25 14:57:29 -0700 | [diff] [blame] | 2751 | put_cpu(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2752 | } |
| 2753 | |
| 2754 | /* |
| 2755 | * wake_up_new_task - wake up a newly created task for the first time. |
| 2756 | * |
| 2757 | * This function will do some initial scheduler statistics housekeeping |
| 2758 | * that must be done for every newly created context, then puts the task |
| 2759 | * on the runqueue and wakes it. |
| 2760 | */ |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 2761 | void wake_up_new_task(struct task_struct *p, unsigned long clone_flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2762 | { |
| 2763 | unsigned long flags; |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 2764 | struct rq *rq; |
Andrew Morton | c890692 | 2010-03-11 14:08:43 -0800 | [diff] [blame] | 2765 | int cpu __maybe_unused = get_cpu(); |
Peter Zijlstra | fabf318 | 2010-01-21 21:04:57 +0100 | [diff] [blame] | 2766 | |
| 2767 | #ifdef CONFIG_SMP |
Peter Zijlstra | 0017d73 | 2010-03-24 18:34:10 +0100 | [diff] [blame] | 2768 | rq = task_rq_lock(p, &flags); |
| 2769 | p->state = TASK_WAKING; |
| 2770 | |
Peter Zijlstra | fabf318 | 2010-01-21 21:04:57 +0100 | [diff] [blame] | 2771 | /* |
| 2772 | * Fork balancing, do it here and not earlier because: |
| 2773 | * - cpus_allowed can change in the fork path |
| 2774 | * - any previously selected cpu might disappear through hotplug |
| 2775 | * |
Peter Zijlstra | 0017d73 | 2010-03-24 18:34:10 +0100 | [diff] [blame] | 2776 | * We set TASK_WAKING so that select_task_rq() can drop rq->lock |
| 2777 | * without people poking at ->cpus_allowed. |
Peter Zijlstra | fabf318 | 2010-01-21 21:04:57 +0100 | [diff] [blame] | 2778 | */ |
Peter Zijlstra | 0017d73 | 2010-03-24 18:34:10 +0100 | [diff] [blame] | 2779 | cpu = select_task_rq(rq, p, SD_BALANCE_FORK, 0); |
Peter Zijlstra | fabf318 | 2010-01-21 21:04:57 +0100 | [diff] [blame] | 2780 | set_task_cpu(p, cpu); |
Peter Zijlstra | 0017d73 | 2010-03-24 18:34:10 +0100 | [diff] [blame] | 2781 | |
| 2782 | p->state = TASK_RUNNING; |
| 2783 | task_rq_unlock(rq, &flags); |
Peter Zijlstra | fabf318 | 2010-01-21 21:04:57 +0100 | [diff] [blame] | 2784 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2785 | |
Peter Zijlstra | 0017d73 | 2010-03-24 18:34:10 +0100 | [diff] [blame] | 2786 | rq = task_rq_lock(p, &flags); |
Peter Zijlstra | cd29fe6 | 2009-11-27 17:32:46 +0100 | [diff] [blame] | 2787 | activate_task(rq, p, 0); |
Peter Zijlstra | 27a9da6 | 2010-05-04 20:36:56 +0200 | [diff] [blame] | 2788 | trace_sched_wakeup_new(p, 1); |
Peter Zijlstra | a7558e0 | 2009-09-14 20:02:34 +0200 | [diff] [blame] | 2789 | check_preempt_curr(rq, p, WF_FORK); |
Steven Rostedt | 9a897c5 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 2790 | #ifdef CONFIG_SMP |
Peter Zijlstra | efbbd05 | 2009-12-16 18:04:40 +0100 | [diff] [blame] | 2791 | if (p->sched_class->task_woken) |
| 2792 | p->sched_class->task_woken(rq, p); |
Steven Rostedt | 9a897c5 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 2793 | #endif |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 2794 | task_rq_unlock(rq, &flags); |
Peter Zijlstra | fabf318 | 2010-01-21 21:04:57 +0100 | [diff] [blame] | 2795 | put_cpu(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2796 | } |
| 2797 | |
Avi Kivity | e107be3 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 2798 | #ifdef CONFIG_PREEMPT_NOTIFIERS |
| 2799 | |
| 2800 | /** |
Luis Henriques | 80dd99b | 2009-03-16 19:58:09 +0000 | [diff] [blame] | 2801 | * preempt_notifier_register - tell me when current is being preempted & rescheduled |
Randy Dunlap | 421cee2 | 2007-07-31 00:37:50 -0700 | [diff] [blame] | 2802 | * @notifier: notifier struct to register |
Avi Kivity | e107be3 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 2803 | */ |
| 2804 | void preempt_notifier_register(struct preempt_notifier *notifier) |
| 2805 | { |
| 2806 | hlist_add_head(¬ifier->link, ¤t->preempt_notifiers); |
| 2807 | } |
| 2808 | EXPORT_SYMBOL_GPL(preempt_notifier_register); |
| 2809 | |
| 2810 | /** |
| 2811 | * preempt_notifier_unregister - no longer interested in preemption notifications |
Randy Dunlap | 421cee2 | 2007-07-31 00:37:50 -0700 | [diff] [blame] | 2812 | * @notifier: notifier struct to unregister |
Avi Kivity | e107be3 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 2813 | * |
| 2814 | * This is safe to call from within a preemption notifier. |
| 2815 | */ |
| 2816 | void preempt_notifier_unregister(struct preempt_notifier *notifier) |
| 2817 | { |
| 2818 | hlist_del(¬ifier->link); |
| 2819 | } |
| 2820 | EXPORT_SYMBOL_GPL(preempt_notifier_unregister); |
| 2821 | |
| 2822 | static void fire_sched_in_preempt_notifiers(struct task_struct *curr) |
| 2823 | { |
| 2824 | struct preempt_notifier *notifier; |
| 2825 | struct hlist_node *node; |
| 2826 | |
| 2827 | hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link) |
| 2828 | notifier->ops->sched_in(notifier, raw_smp_processor_id()); |
| 2829 | } |
| 2830 | |
| 2831 | static void |
| 2832 | fire_sched_out_preempt_notifiers(struct task_struct *curr, |
| 2833 | struct task_struct *next) |
| 2834 | { |
| 2835 | struct preempt_notifier *notifier; |
| 2836 | struct hlist_node *node; |
| 2837 | |
| 2838 | hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link) |
| 2839 | notifier->ops->sched_out(notifier, next); |
| 2840 | } |
| 2841 | |
Dhaval Giani | 6d6bc0a | 2008-05-30 14:23:45 +0200 | [diff] [blame] | 2842 | #else /* !CONFIG_PREEMPT_NOTIFIERS */ |
Avi Kivity | e107be3 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 2843 | |
| 2844 | static void fire_sched_in_preempt_notifiers(struct task_struct *curr) |
| 2845 | { |
| 2846 | } |
| 2847 | |
| 2848 | static void |
| 2849 | fire_sched_out_preempt_notifiers(struct task_struct *curr, |
| 2850 | struct task_struct *next) |
| 2851 | { |
| 2852 | } |
| 2853 | |
Dhaval Giani | 6d6bc0a | 2008-05-30 14:23:45 +0200 | [diff] [blame] | 2854 | #endif /* CONFIG_PREEMPT_NOTIFIERS */ |
Avi Kivity | e107be3 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 2855 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2856 | /** |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 2857 | * prepare_task_switch - prepare to switch tasks |
| 2858 | * @rq: the runqueue preparing to switch |
Randy Dunlap | 421cee2 | 2007-07-31 00:37:50 -0700 | [diff] [blame] | 2859 | * @prev: the current task that is being switched out |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 2860 | * @next: the task we are going to switch to. |
| 2861 | * |
| 2862 | * This is called with the rq lock held and interrupts off. It must |
| 2863 | * be paired with a subsequent finish_task_switch after the context |
| 2864 | * switch. |
| 2865 | * |
| 2866 | * prepare_task_switch sets up locking and calls architecture specific |
| 2867 | * hooks. |
| 2868 | */ |
Avi Kivity | e107be3 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 2869 | static inline void |
| 2870 | prepare_task_switch(struct rq *rq, struct task_struct *prev, |
| 2871 | struct task_struct *next) |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 2872 | { |
Avi Kivity | e107be3 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 2873 | fire_sched_out_preempt_notifiers(prev, next); |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 2874 | prepare_lock_switch(rq, next); |
| 2875 | prepare_arch_switch(next); |
| 2876 | } |
| 2877 | |
| 2878 | /** |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2879 | * finish_task_switch - clean up after a task-switch |
Jeff Garzik | 344baba | 2005-09-07 01:15:17 -0400 | [diff] [blame] | 2880 | * @rq: runqueue associated with task-switch |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2881 | * @prev: the thread we just switched away from. |
| 2882 | * |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 2883 | * finish_task_switch must be called after the context switch, paired |
| 2884 | * with a prepare_task_switch call before the context switch. |
| 2885 | * finish_task_switch will reconcile locking set up by prepare_task_switch, |
| 2886 | * and do any other architecture-specific cleanup actions. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2887 | * |
| 2888 | * Note that we may have delayed dropping an mm in context_switch(). If |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 2889 | * so, we finish that here outside of the runqueue lock. (Doing it |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2890 | * with the lock held can cause deadlocks; see schedule() for |
| 2891 | * details.) |
| 2892 | */ |
Alexey Dobriyan | a995744 | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 2893 | static void finish_task_switch(struct rq *rq, struct task_struct *prev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2894 | __releases(rq->lock) |
| 2895 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2896 | struct mm_struct *mm = rq->prev_mm; |
Oleg Nesterov | 55a101f | 2006-09-29 02:01:10 -0700 | [diff] [blame] | 2897 | long prev_state; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2898 | |
| 2899 | rq->prev_mm = NULL; |
| 2900 | |
| 2901 | /* |
| 2902 | * A task struct has one reference for the use as "current". |
Oleg Nesterov | c394cc9 | 2006-09-29 02:01:11 -0700 | [diff] [blame] | 2903 | * If a task dies, then it sets TASK_DEAD in tsk->state and calls |
Oleg Nesterov | 55a101f | 2006-09-29 02:01:10 -0700 | [diff] [blame] | 2904 | * schedule one last time. The schedule call will never return, and |
| 2905 | * the scheduled task must drop that reference. |
Oleg Nesterov | c394cc9 | 2006-09-29 02:01:11 -0700 | [diff] [blame] | 2906 | * The test for TASK_DEAD must occur while the runqueue locks are |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2907 | * still held, otherwise prev could be scheduled on another cpu, die |
| 2908 | * there before we look at prev->state, and then the reference would |
| 2909 | * be dropped twice. |
| 2910 | * Manfred Spraul <manfred@colorfullife.com> |
| 2911 | */ |
Oleg Nesterov | 55a101f | 2006-09-29 02:01:10 -0700 | [diff] [blame] | 2912 | prev_state = prev->state; |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 2913 | finish_arch_switch(prev); |
Jamie Iles | 8381f65 | 2010-01-08 15:27:33 +0000 | [diff] [blame] | 2914 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW |
| 2915 | local_irq_disable(); |
| 2916 | #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ |
Peter Zijlstra | 49f4743 | 2009-12-27 11:51:52 +0100 | [diff] [blame] | 2917 | perf_event_task_sched_in(current); |
Jamie Iles | 8381f65 | 2010-01-08 15:27:33 +0000 | [diff] [blame] | 2918 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW |
| 2919 | local_irq_enable(); |
| 2920 | #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 2921 | finish_lock_switch(rq, prev); |
Steven Rostedt | e8fa136 | 2008-01-25 21:08:05 +0100 | [diff] [blame] | 2922 | |
Avi Kivity | e107be3 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 2923 | fire_sched_in_preempt_notifiers(current); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2924 | if (mm) |
| 2925 | mmdrop(mm); |
Oleg Nesterov | c394cc9 | 2006-09-29 02:01:11 -0700 | [diff] [blame] | 2926 | if (unlikely(prev_state == TASK_DEAD)) { |
bibo mao | c6fd91f | 2006-03-26 01:38:20 -0800 | [diff] [blame] | 2927 | /* |
| 2928 | * Remove function-return probe instances associated with this |
| 2929 | * task and put them back on the free list. |
Ingo Molnar | 9761eea | 2007-07-09 18:52:00 +0200 | [diff] [blame] | 2930 | */ |
bibo mao | c6fd91f | 2006-03-26 01:38:20 -0800 | [diff] [blame] | 2931 | kprobe_flush_task(prev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2932 | put_task_struct(prev); |
bibo mao | c6fd91f | 2006-03-26 01:38:20 -0800 | [diff] [blame] | 2933 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2934 | } |
| 2935 | |
Gregory Haskins | 3f029d3 | 2009-07-29 11:08:47 -0400 | [diff] [blame] | 2936 | #ifdef CONFIG_SMP |
| 2937 | |
| 2938 | /* assumes rq->lock is held */ |
| 2939 | static inline void pre_schedule(struct rq *rq, struct task_struct *prev) |
| 2940 | { |
| 2941 | if (prev->sched_class->pre_schedule) |
| 2942 | prev->sched_class->pre_schedule(rq, prev); |
| 2943 | } |
| 2944 | |
| 2945 | /* rq->lock is NOT held, but preemption is disabled */ |
| 2946 | static inline void post_schedule(struct rq *rq) |
| 2947 | { |
| 2948 | if (rq->post_schedule) { |
| 2949 | unsigned long flags; |
| 2950 | |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 2951 | raw_spin_lock_irqsave(&rq->lock, flags); |
Gregory Haskins | 3f029d3 | 2009-07-29 11:08:47 -0400 | [diff] [blame] | 2952 | if (rq->curr->sched_class->post_schedule) |
| 2953 | rq->curr->sched_class->post_schedule(rq); |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 2954 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
Gregory Haskins | 3f029d3 | 2009-07-29 11:08:47 -0400 | [diff] [blame] | 2955 | |
| 2956 | rq->post_schedule = 0; |
| 2957 | } |
| 2958 | } |
| 2959 | |
| 2960 | #else |
| 2961 | |
| 2962 | static inline void pre_schedule(struct rq *rq, struct task_struct *p) |
| 2963 | { |
| 2964 | } |
| 2965 | |
| 2966 | static inline void post_schedule(struct rq *rq) |
| 2967 | { |
| 2968 | } |
| 2969 | |
| 2970 | #endif |
| 2971 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2972 | /** |
| 2973 | * schedule_tail - first thing a freshly forked thread must call. |
| 2974 | * @prev: the thread we just switched away from. |
| 2975 | */ |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 2976 | asmlinkage void schedule_tail(struct task_struct *prev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2977 | __releases(rq->lock) |
| 2978 | { |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 2979 | struct rq *rq = this_rq(); |
| 2980 | |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 2981 | finish_task_switch(rq, prev); |
Steven Rostedt | da19ab5 | 2009-07-29 00:21:22 -0400 | [diff] [blame] | 2982 | |
Gregory Haskins | 3f029d3 | 2009-07-29 11:08:47 -0400 | [diff] [blame] | 2983 | /* |
| 2984 | * FIXME: do we need to worry about rq being invalidated by the |
| 2985 | * task_switch? |
| 2986 | */ |
| 2987 | post_schedule(rq); |
Steven Rostedt | da19ab5 | 2009-07-29 00:21:22 -0400 | [diff] [blame] | 2988 | |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 2989 | #ifdef __ARCH_WANT_UNLOCKED_CTXSW |
| 2990 | /* In this case, finish_task_switch does not reenable preemption */ |
| 2991 | preempt_enable(); |
| 2992 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2993 | if (current->set_child_tid) |
Pavel Emelyanov | b488893 | 2007-10-18 23:40:14 -0700 | [diff] [blame] | 2994 | put_user(task_pid_vnr(current), current->set_child_tid); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2995 | } |
| 2996 | |
| 2997 | /* |
| 2998 | * context_switch - switch to the new MM and the new |
| 2999 | * thread's register state. |
| 3000 | */ |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3001 | static inline void |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 3002 | context_switch(struct rq *rq, struct task_struct *prev, |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 3003 | struct task_struct *next) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3004 | { |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3005 | struct mm_struct *mm, *oldmm; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3006 | |
Avi Kivity | e107be3 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 3007 | prepare_task_switch(rq, prev, next); |
Peter Zijlstra | 27a9da6 | 2010-05-04 20:36:56 +0200 | [diff] [blame] | 3008 | trace_sched_switch(prev, next); |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3009 | mm = next->mm; |
| 3010 | oldmm = prev->active_mm; |
Zachary Amsden | 9226d12 | 2007-02-13 13:26:21 +0100 | [diff] [blame] | 3011 | /* |
| 3012 | * For paravirt, this is coupled with an exit in switch_to to |
| 3013 | * combine the page table reload and the switch backend into |
| 3014 | * one hypercall. |
| 3015 | */ |
Jeremy Fitzhardinge | 224101e | 2009-02-18 11:18:57 -0800 | [diff] [blame] | 3016 | arch_start_context_switch(prev); |
Zachary Amsden | 9226d12 | 2007-02-13 13:26:21 +0100 | [diff] [blame] | 3017 | |
Heiko Carstens | 31915ab | 2010-09-16 14:42:25 +0200 | [diff] [blame] | 3018 | if (!mm) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3019 | next->active_mm = oldmm; |
| 3020 | atomic_inc(&oldmm->mm_count); |
| 3021 | enter_lazy_tlb(oldmm, next); |
| 3022 | } else |
| 3023 | switch_mm(oldmm, mm, next); |
| 3024 | |
Heiko Carstens | 31915ab | 2010-09-16 14:42:25 +0200 | [diff] [blame] | 3025 | if (!prev->mm) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3026 | prev->active_mm = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3027 | rq->prev_mm = oldmm; |
| 3028 | } |
Ingo Molnar | 3a5f5e4 | 2006-07-14 00:24:27 -0700 | [diff] [blame] | 3029 | /* |
| 3030 | * Since the runqueue lock will be released by the next |
| 3031 | * task (which is an invalid locking op but in the case |
| 3032 | * of the scheduler it's an obvious special-case), so we |
| 3033 | * do an early lockdep release here: |
| 3034 | */ |
| 3035 | #ifndef __ARCH_WANT_UNLOCKED_CTXSW |
Ingo Molnar | 8a25d5d | 2006-07-03 00:24:54 -0700 | [diff] [blame] | 3036 | spin_release(&rq->lock.dep_map, 1, _THIS_IP_); |
Ingo Molnar | 3a5f5e4 | 2006-07-14 00:24:27 -0700 | [diff] [blame] | 3037 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3038 | |
| 3039 | /* Here we just switch the register state and the stack. */ |
| 3040 | switch_to(prev, next, prev); |
| 3041 | |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3042 | barrier(); |
| 3043 | /* |
| 3044 | * this_rq must be evaluated again because prev may have moved |
| 3045 | * CPUs since it called schedule(), thus the 'rq' on its stack |
| 3046 | * frame will be invalid. |
| 3047 | */ |
| 3048 | finish_task_switch(this_rq(), prev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3049 | } |
| 3050 | |
| 3051 | /* |
| 3052 | * nr_running, nr_uninterruptible and nr_context_switches: |
| 3053 | * |
| 3054 | * externally visible scheduler statistics: current number of runnable |
| 3055 | * threads, current number of uninterruptible-sleeping threads, total |
| 3056 | * number of context switches performed since bootup. |
| 3057 | */ |
| 3058 | unsigned long nr_running(void) |
| 3059 | { |
| 3060 | unsigned long i, sum = 0; |
| 3061 | |
| 3062 | for_each_online_cpu(i) |
| 3063 | sum += cpu_rq(i)->nr_running; |
| 3064 | |
| 3065 | return sum; |
| 3066 | } |
| 3067 | |
| 3068 | unsigned long nr_uninterruptible(void) |
| 3069 | { |
| 3070 | unsigned long i, sum = 0; |
| 3071 | |
KAMEZAWA Hiroyuki | 0a94502 | 2006-03-28 01:56:37 -0800 | [diff] [blame] | 3072 | for_each_possible_cpu(i) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3073 | sum += cpu_rq(i)->nr_uninterruptible; |
| 3074 | |
| 3075 | /* |
| 3076 | * Since we read the counters lockless, it might be slightly |
| 3077 | * inaccurate. Do not allow it to go below zero though: |
| 3078 | */ |
| 3079 | if (unlikely((long)sum < 0)) |
| 3080 | sum = 0; |
| 3081 | |
| 3082 | return sum; |
| 3083 | } |
| 3084 | |
| 3085 | unsigned long long nr_context_switches(void) |
| 3086 | { |
Steven Rostedt | cc94abf | 2006-06-27 02:54:31 -0700 | [diff] [blame] | 3087 | int i; |
| 3088 | unsigned long long sum = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3089 | |
KAMEZAWA Hiroyuki | 0a94502 | 2006-03-28 01:56:37 -0800 | [diff] [blame] | 3090 | for_each_possible_cpu(i) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3091 | sum += cpu_rq(i)->nr_switches; |
| 3092 | |
| 3093 | return sum; |
| 3094 | } |
| 3095 | |
| 3096 | unsigned long nr_iowait(void) |
| 3097 | { |
| 3098 | unsigned long i, sum = 0; |
| 3099 | |
KAMEZAWA Hiroyuki | 0a94502 | 2006-03-28 01:56:37 -0800 | [diff] [blame] | 3100 | for_each_possible_cpu(i) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3101 | sum += atomic_read(&cpu_rq(i)->nr_iowait); |
| 3102 | |
| 3103 | return sum; |
| 3104 | } |
| 3105 | |
Peter Zijlstra | 8c215bd | 2010-07-01 09:07:17 +0200 | [diff] [blame] | 3106 | unsigned long nr_iowait_cpu(int cpu) |
Arjan van de Ven | 69d2587 | 2009-09-21 17:04:08 -0700 | [diff] [blame] | 3107 | { |
Peter Zijlstra | 8c215bd | 2010-07-01 09:07:17 +0200 | [diff] [blame] | 3108 | struct rq *this = cpu_rq(cpu); |
Arjan van de Ven | 69d2587 | 2009-09-21 17:04:08 -0700 | [diff] [blame] | 3109 | return atomic_read(&this->nr_iowait); |
| 3110 | } |
| 3111 | |
| 3112 | unsigned long this_cpu_load(void) |
| 3113 | { |
| 3114 | struct rq *this = this_rq(); |
| 3115 | return this->cpu_load[0]; |
| 3116 | } |
| 3117 | |
| 3118 | |
Thomas Gleixner | dce48a8 | 2009-04-11 10:43:41 +0200 | [diff] [blame] | 3119 | /* Variables and functions for calc_load */ |
| 3120 | static atomic_long_t calc_load_tasks; |
| 3121 | static unsigned long calc_load_update; |
| 3122 | unsigned long avenrun[3]; |
| 3123 | EXPORT_SYMBOL(avenrun); |
| 3124 | |
Peter Zijlstra | 74f5187 | 2010-04-22 21:50:19 +0200 | [diff] [blame] | 3125 | static long calc_load_fold_active(struct rq *this_rq) |
| 3126 | { |
| 3127 | long nr_active, delta = 0; |
| 3128 | |
| 3129 | nr_active = this_rq->nr_running; |
| 3130 | nr_active += (long) this_rq->nr_uninterruptible; |
| 3131 | |
| 3132 | if (nr_active != this_rq->calc_load_active) { |
| 3133 | delta = nr_active - this_rq->calc_load_active; |
| 3134 | this_rq->calc_load_active = nr_active; |
| 3135 | } |
| 3136 | |
| 3137 | return delta; |
| 3138 | } |
| 3139 | |
Peter Zijlstra | 0f004f5 | 2010-11-30 19:48:45 +0100 | [diff] [blame] | 3140 | static unsigned long |
| 3141 | calc_load(unsigned long load, unsigned long exp, unsigned long active) |
| 3142 | { |
| 3143 | load *= exp; |
| 3144 | load += active * (FIXED_1 - exp); |
| 3145 | load += 1UL << (FSHIFT - 1); |
| 3146 | return load >> FSHIFT; |
| 3147 | } |
| 3148 | |
Peter Zijlstra | 74f5187 | 2010-04-22 21:50:19 +0200 | [diff] [blame] | 3149 | #ifdef CONFIG_NO_HZ |
| 3150 | /* |
| 3151 | * For NO_HZ we delay the active fold to the next LOAD_FREQ update. |
| 3152 | * |
| 3153 | * When making the ILB scale, we should try to pull this in as well. |
| 3154 | */ |
| 3155 | static atomic_long_t calc_load_tasks_idle; |
| 3156 | |
| 3157 | static void calc_load_account_idle(struct rq *this_rq) |
| 3158 | { |
| 3159 | long delta; |
| 3160 | |
| 3161 | delta = calc_load_fold_active(this_rq); |
| 3162 | if (delta) |
| 3163 | atomic_long_add(delta, &calc_load_tasks_idle); |
| 3164 | } |
| 3165 | |
| 3166 | static long calc_load_fold_idle(void) |
| 3167 | { |
| 3168 | long delta = 0; |
| 3169 | |
| 3170 | /* |
| 3171 | * Its got a race, we don't care... |
| 3172 | */ |
| 3173 | if (atomic_long_read(&calc_load_tasks_idle)) |
| 3174 | delta = atomic_long_xchg(&calc_load_tasks_idle, 0); |
| 3175 | |
| 3176 | return delta; |
| 3177 | } |
Peter Zijlstra | 0f004f5 | 2010-11-30 19:48:45 +0100 | [diff] [blame] | 3178 | |
| 3179 | /** |
| 3180 | * fixed_power_int - compute: x^n, in O(log n) time |
| 3181 | * |
| 3182 | * @x: base of the power |
| 3183 | * @frac_bits: fractional bits of @x |
| 3184 | * @n: power to raise @x to. |
| 3185 | * |
| 3186 | * By exploiting the relation between the definition of the natural power |
| 3187 | * function: x^n := x*x*...*x (x multiplied by itself for n times), and |
| 3188 | * the binary encoding of numbers used by computers: n := \Sum n_i * 2^i, |
| 3189 | * (where: n_i \elem {0, 1}, the binary vector representing n), |
| 3190 | * we find: x^n := x^(\Sum n_i * 2^i) := \Prod x^(n_i * 2^i), which is |
| 3191 | * of course trivially computable in O(log_2 n), the length of our binary |
| 3192 | * vector. |
| 3193 | */ |
| 3194 | static unsigned long |
| 3195 | fixed_power_int(unsigned long x, unsigned int frac_bits, unsigned int n) |
| 3196 | { |
| 3197 | unsigned long result = 1UL << frac_bits; |
| 3198 | |
| 3199 | if (n) for (;;) { |
| 3200 | if (n & 1) { |
| 3201 | result *= x; |
| 3202 | result += 1UL << (frac_bits - 1); |
| 3203 | result >>= frac_bits; |
| 3204 | } |
| 3205 | n >>= 1; |
| 3206 | if (!n) |
| 3207 | break; |
| 3208 | x *= x; |
| 3209 | x += 1UL << (frac_bits - 1); |
| 3210 | x >>= frac_bits; |
| 3211 | } |
| 3212 | |
| 3213 | return result; |
| 3214 | } |
| 3215 | |
| 3216 | /* |
| 3217 | * a1 = a0 * e + a * (1 - e) |
| 3218 | * |
| 3219 | * a2 = a1 * e + a * (1 - e) |
| 3220 | * = (a0 * e + a * (1 - e)) * e + a * (1 - e) |
| 3221 | * = a0 * e^2 + a * (1 - e) * (1 + e) |
| 3222 | * |
| 3223 | * a3 = a2 * e + a * (1 - e) |
| 3224 | * = (a0 * e^2 + a * (1 - e) * (1 + e)) * e + a * (1 - e) |
| 3225 | * = a0 * e^3 + a * (1 - e) * (1 + e + e^2) |
| 3226 | * |
| 3227 | * ... |
| 3228 | * |
| 3229 | * an = a0 * e^n + a * (1 - e) * (1 + e + ... + e^n-1) [1] |
| 3230 | * = a0 * e^n + a * (1 - e) * (1 - e^n)/(1 - e) |
| 3231 | * = a0 * e^n + a * (1 - e^n) |
| 3232 | * |
| 3233 | * [1] application of the geometric series: |
| 3234 | * |
| 3235 | * n 1 - x^(n+1) |
| 3236 | * S_n := \Sum x^i = ------------- |
| 3237 | * i=0 1 - x |
| 3238 | */ |
| 3239 | static unsigned long |
| 3240 | calc_load_n(unsigned long load, unsigned long exp, |
| 3241 | unsigned long active, unsigned int n) |
| 3242 | { |
| 3243 | |
| 3244 | return calc_load(load, fixed_power_int(exp, FSHIFT, n), active); |
| 3245 | } |
| 3246 | |
| 3247 | /* |
| 3248 | * NO_HZ can leave us missing all per-cpu ticks calling |
| 3249 | * calc_load_account_active(), but since an idle CPU folds its delta into |
| 3250 | * calc_load_tasks_idle per calc_load_account_idle(), all we need to do is fold |
| 3251 | * in the pending idle delta if our idle period crossed a load cycle boundary. |
| 3252 | * |
| 3253 | * Once we've updated the global active value, we need to apply the exponential |
| 3254 | * weights adjusted to the number of cycles missed. |
| 3255 | */ |
| 3256 | static void calc_global_nohz(unsigned long ticks) |
| 3257 | { |
| 3258 | long delta, active, n; |
| 3259 | |
| 3260 | if (time_before(jiffies, calc_load_update)) |
| 3261 | return; |
| 3262 | |
| 3263 | /* |
| 3264 | * If we crossed a calc_load_update boundary, make sure to fold |
| 3265 | * any pending idle changes, the respective CPUs might have |
| 3266 | * missed the tick driven calc_load_account_active() update |
| 3267 | * due to NO_HZ. |
| 3268 | */ |
| 3269 | delta = calc_load_fold_idle(); |
| 3270 | if (delta) |
| 3271 | atomic_long_add(delta, &calc_load_tasks); |
| 3272 | |
| 3273 | /* |
| 3274 | * If we were idle for multiple load cycles, apply them. |
| 3275 | */ |
| 3276 | if (ticks >= LOAD_FREQ) { |
| 3277 | n = ticks / LOAD_FREQ; |
| 3278 | |
| 3279 | active = atomic_long_read(&calc_load_tasks); |
| 3280 | active = active > 0 ? active * FIXED_1 : 0; |
| 3281 | |
| 3282 | avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n); |
| 3283 | avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n); |
| 3284 | avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n); |
| 3285 | |
| 3286 | calc_load_update += n * LOAD_FREQ; |
| 3287 | } |
| 3288 | |
| 3289 | /* |
| 3290 | * Its possible the remainder of the above division also crosses |
| 3291 | * a LOAD_FREQ period, the regular check in calc_global_load() |
| 3292 | * which comes after this will take care of that. |
| 3293 | * |
| 3294 | * Consider us being 11 ticks before a cycle completion, and us |
| 3295 | * sleeping for 4*LOAD_FREQ + 22 ticks, then the above code will |
| 3296 | * age us 4 cycles, and the test in calc_global_load() will |
| 3297 | * pick up the final one. |
| 3298 | */ |
| 3299 | } |
Peter Zijlstra | 74f5187 | 2010-04-22 21:50:19 +0200 | [diff] [blame] | 3300 | #else |
| 3301 | static void calc_load_account_idle(struct rq *this_rq) |
| 3302 | { |
| 3303 | } |
| 3304 | |
| 3305 | static inline long calc_load_fold_idle(void) |
| 3306 | { |
| 3307 | return 0; |
| 3308 | } |
Peter Zijlstra | 0f004f5 | 2010-11-30 19:48:45 +0100 | [diff] [blame] | 3309 | |
| 3310 | static void calc_global_nohz(unsigned long ticks) |
| 3311 | { |
| 3312 | } |
Peter Zijlstra | 74f5187 | 2010-04-22 21:50:19 +0200 | [diff] [blame] | 3313 | #endif |
| 3314 | |
Thomas Gleixner | 2d02494 | 2009-05-02 20:08:52 +0200 | [diff] [blame] | 3315 | /** |
| 3316 | * get_avenrun - get the load average array |
| 3317 | * @loads: pointer to dest load array |
| 3318 | * @offset: offset to add |
| 3319 | * @shift: shift count to shift the result left |
| 3320 | * |
| 3321 | * These values are estimates at best, so no need for locking. |
| 3322 | */ |
| 3323 | void get_avenrun(unsigned long *loads, unsigned long offset, int shift) |
| 3324 | { |
| 3325 | loads[0] = (avenrun[0] + offset) << shift; |
| 3326 | loads[1] = (avenrun[1] + offset) << shift; |
| 3327 | loads[2] = (avenrun[2] + offset) << shift; |
| 3328 | } |
| 3329 | |
Thomas Gleixner | dce48a8 | 2009-04-11 10:43:41 +0200 | [diff] [blame] | 3330 | /* |
| 3331 | * calc_load - update the avenrun load estimates 10 ticks after the |
| 3332 | * CPUs have updated calc_load_tasks. |
| 3333 | */ |
Peter Zijlstra | 0f004f5 | 2010-11-30 19:48:45 +0100 | [diff] [blame] | 3334 | void calc_global_load(unsigned long ticks) |
Thomas Gleixner | dce48a8 | 2009-04-11 10:43:41 +0200 | [diff] [blame] | 3335 | { |
Thomas Gleixner | dce48a8 | 2009-04-11 10:43:41 +0200 | [diff] [blame] | 3336 | long active; |
| 3337 | |
Peter Zijlstra | 0f004f5 | 2010-11-30 19:48:45 +0100 | [diff] [blame] | 3338 | calc_global_nohz(ticks); |
| 3339 | |
| 3340 | if (time_before(jiffies, calc_load_update + 10)) |
Thomas Gleixner | dce48a8 | 2009-04-11 10:43:41 +0200 | [diff] [blame] | 3341 | return; |
| 3342 | |
| 3343 | active = atomic_long_read(&calc_load_tasks); |
| 3344 | active = active > 0 ? active * FIXED_1 : 0; |
| 3345 | |
| 3346 | avenrun[0] = calc_load(avenrun[0], EXP_1, active); |
| 3347 | avenrun[1] = calc_load(avenrun[1], EXP_5, active); |
| 3348 | avenrun[2] = calc_load(avenrun[2], EXP_15, active); |
| 3349 | |
| 3350 | calc_load_update += LOAD_FREQ; |
| 3351 | } |
| 3352 | |
| 3353 | /* |
Peter Zijlstra | 74f5187 | 2010-04-22 21:50:19 +0200 | [diff] [blame] | 3354 | * Called from update_cpu_load() to periodically update this CPU's |
| 3355 | * active count. |
Thomas Gleixner | dce48a8 | 2009-04-11 10:43:41 +0200 | [diff] [blame] | 3356 | */ |
| 3357 | static void calc_load_account_active(struct rq *this_rq) |
| 3358 | { |
Peter Zijlstra | 74f5187 | 2010-04-22 21:50:19 +0200 | [diff] [blame] | 3359 | long delta; |
Thomas Gleixner | dce48a8 | 2009-04-11 10:43:41 +0200 | [diff] [blame] | 3360 | |
Peter Zijlstra | 74f5187 | 2010-04-22 21:50:19 +0200 | [diff] [blame] | 3361 | if (time_before(jiffies, this_rq->calc_load_update)) |
| 3362 | return; |
Thomas Gleixner | dce48a8 | 2009-04-11 10:43:41 +0200 | [diff] [blame] | 3363 | |
Peter Zijlstra | 74f5187 | 2010-04-22 21:50:19 +0200 | [diff] [blame] | 3364 | delta = calc_load_fold_active(this_rq); |
| 3365 | delta += calc_load_fold_idle(); |
| 3366 | if (delta) |
Thomas Gleixner | dce48a8 | 2009-04-11 10:43:41 +0200 | [diff] [blame] | 3367 | atomic_long_add(delta, &calc_load_tasks); |
Peter Zijlstra | 74f5187 | 2010-04-22 21:50:19 +0200 | [diff] [blame] | 3368 | |
| 3369 | this_rq->calc_load_update += LOAD_FREQ; |
Jack Steiner | db1b1fe | 2006-03-31 02:31:21 -0800 | [diff] [blame] | 3370 | } |
| 3371 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3372 | /* |
Venkatesh Pallipadi | fdf3e95 | 2010-05-17 18:14:43 -0700 | [diff] [blame] | 3373 | * The exact cpuload at various idx values, calculated at every tick would be |
| 3374 | * load = (2^idx - 1) / 2^idx * load + 1 / 2^idx * cur_load |
| 3375 | * |
| 3376 | * If a cpu misses updates for n-1 ticks (as it was idle) and update gets called |
| 3377 | * on nth tick when cpu may be busy, then we have: |
| 3378 | * load = ((2^idx - 1) / 2^idx)^(n-1) * load |
| 3379 | * load = (2^idx - 1) / 2^idx) * load + 1 / 2^idx * cur_load |
| 3380 | * |
| 3381 | * decay_load_missed() below does efficient calculation of |
| 3382 | * load = ((2^idx - 1) / 2^idx)^(n-1) * load |
| 3383 | * avoiding 0..n-1 loop doing load = ((2^idx - 1) / 2^idx) * load |
| 3384 | * |
| 3385 | * The calculation is approximated on a 128 point scale. |
| 3386 | * degrade_zero_ticks is the number of ticks after which load at any |
| 3387 | * particular idx is approximated to be zero. |
| 3388 | * degrade_factor is a precomputed table, a row for each load idx. |
| 3389 | * Each column corresponds to degradation factor for a power of two ticks, |
| 3390 | * based on 128 point scale. |
| 3391 | * Example: |
| 3392 | * row 2, col 3 (=12) says that the degradation at load idx 2 after |
| 3393 | * 8 ticks is 12/128 (which is an approximation of exact factor 3^8/4^8). |
| 3394 | * |
| 3395 | * With this power of 2 load factors, we can degrade the load n times |
| 3396 | * by looking at 1 bits in n and doing as many mult/shift instead of |
| 3397 | * n mult/shifts needed by the exact degradation. |
| 3398 | */ |
| 3399 | #define DEGRADE_SHIFT 7 |
| 3400 | static const unsigned char |
| 3401 | degrade_zero_ticks[CPU_LOAD_IDX_MAX] = {0, 8, 32, 64, 128}; |
| 3402 | static const unsigned char |
| 3403 | degrade_factor[CPU_LOAD_IDX_MAX][DEGRADE_SHIFT + 1] = { |
| 3404 | {0, 0, 0, 0, 0, 0, 0, 0}, |
| 3405 | {64, 32, 8, 0, 0, 0, 0, 0}, |
| 3406 | {96, 72, 40, 12, 1, 0, 0}, |
| 3407 | {112, 98, 75, 43, 15, 1, 0}, |
| 3408 | {120, 112, 98, 76, 45, 16, 2} }; |
| 3409 | |
| 3410 | /* |
| 3411 | * Update cpu_load for any missed ticks, due to tickless idle. The backlog |
| 3412 | * would be when CPU is idle and so we just decay the old load without |
| 3413 | * adding any new load. |
| 3414 | */ |
| 3415 | static unsigned long |
| 3416 | decay_load_missed(unsigned long load, unsigned long missed_updates, int idx) |
| 3417 | { |
| 3418 | int j = 0; |
| 3419 | |
| 3420 | if (!missed_updates) |
| 3421 | return load; |
| 3422 | |
| 3423 | if (missed_updates >= degrade_zero_ticks[idx]) |
| 3424 | return 0; |
| 3425 | |
| 3426 | if (idx == 1) |
| 3427 | return load >> missed_updates; |
| 3428 | |
| 3429 | while (missed_updates) { |
| 3430 | if (missed_updates % 2) |
| 3431 | load = (load * degrade_factor[idx][j]) >> DEGRADE_SHIFT; |
| 3432 | |
| 3433 | missed_updates >>= 1; |
| 3434 | j++; |
| 3435 | } |
| 3436 | return load; |
| 3437 | } |
| 3438 | |
| 3439 | /* |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3440 | * Update rq->cpu_load[] statistics. This function is usually called every |
Venkatesh Pallipadi | fdf3e95 | 2010-05-17 18:14:43 -0700 | [diff] [blame] | 3441 | * scheduler tick (TICK_NSEC). With tickless idle this will not be called |
| 3442 | * every tick. We fix it up based on jiffies. |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 3443 | */ |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3444 | static void update_cpu_load(struct rq *this_rq) |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 3445 | { |
Dmitry Adamushko | 495eca4 | 2007-10-15 17:00:06 +0200 | [diff] [blame] | 3446 | unsigned long this_load = this_rq->load.weight; |
Venkatesh Pallipadi | fdf3e95 | 2010-05-17 18:14:43 -0700 | [diff] [blame] | 3447 | unsigned long curr_jiffies = jiffies; |
| 3448 | unsigned long pending_updates; |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3449 | int i, scale; |
| 3450 | |
| 3451 | this_rq->nr_load_updates++; |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3452 | |
Venkatesh Pallipadi | fdf3e95 | 2010-05-17 18:14:43 -0700 | [diff] [blame] | 3453 | /* Avoid repeated calls on same jiffy, when moving in and out of idle */ |
| 3454 | if (curr_jiffies == this_rq->last_load_update_tick) |
| 3455 | return; |
| 3456 | |
| 3457 | pending_updates = curr_jiffies - this_rq->last_load_update_tick; |
| 3458 | this_rq->last_load_update_tick = curr_jiffies; |
| 3459 | |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3460 | /* Update our load: */ |
Venkatesh Pallipadi | fdf3e95 | 2010-05-17 18:14:43 -0700 | [diff] [blame] | 3461 | this_rq->cpu_load[0] = this_load; /* Fasttrack for idx 0 */ |
| 3462 | for (i = 1, scale = 2; i < CPU_LOAD_IDX_MAX; i++, scale += scale) { |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3463 | unsigned long old_load, new_load; |
| 3464 | |
| 3465 | /* scale is effectively 1 << i now, and >> i divides by scale */ |
| 3466 | |
| 3467 | old_load = this_rq->cpu_load[i]; |
Venkatesh Pallipadi | fdf3e95 | 2010-05-17 18:14:43 -0700 | [diff] [blame] | 3468 | old_load = decay_load_missed(old_load, pending_updates - 1, i); |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3469 | new_load = this_load; |
Ingo Molnar | a25707f | 2007-10-15 17:00:03 +0200 | [diff] [blame] | 3470 | /* |
| 3471 | * Round up the averaging division if load is increasing. This |
| 3472 | * prevents us from getting stuck on 9 if the load is 10, for |
| 3473 | * example. |
| 3474 | */ |
| 3475 | if (new_load > old_load) |
Venkatesh Pallipadi | fdf3e95 | 2010-05-17 18:14:43 -0700 | [diff] [blame] | 3476 | new_load += scale - 1; |
| 3477 | |
| 3478 | this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i; |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3479 | } |
Suresh Siddha | da2b71e | 2010-08-23 13:42:51 -0700 | [diff] [blame] | 3480 | |
| 3481 | sched_avg_update(this_rq); |
Venkatesh Pallipadi | fdf3e95 | 2010-05-17 18:14:43 -0700 | [diff] [blame] | 3482 | } |
| 3483 | |
| 3484 | static void update_cpu_load_active(struct rq *this_rq) |
| 3485 | { |
| 3486 | update_cpu_load(this_rq); |
Thomas Gleixner | dce48a8 | 2009-04-11 10:43:41 +0200 | [diff] [blame] | 3487 | |
Peter Zijlstra | 74f5187 | 2010-04-22 21:50:19 +0200 | [diff] [blame] | 3488 | calc_load_account_active(this_rq); |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 3489 | } |
| 3490 | |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3491 | #ifdef CONFIG_SMP |
| 3492 | |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 3493 | /* |
Peter Zijlstra | 3802290 | 2009-12-16 18:04:37 +0100 | [diff] [blame] | 3494 | * sched_exec - execve() is a valuable balancing opportunity, because at |
| 3495 | * this point the task has the smallest effective memory and cache footprint. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3496 | */ |
Peter Zijlstra | 3802290 | 2009-12-16 18:04:37 +0100 | [diff] [blame] | 3497 | void sched_exec(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3498 | { |
Peter Zijlstra | 3802290 | 2009-12-16 18:04:37 +0100 | [diff] [blame] | 3499 | struct task_struct *p = current; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3500 | unsigned long flags; |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 3501 | struct rq *rq; |
Peter Zijlstra | 0017d73 | 2010-03-24 18:34:10 +0100 | [diff] [blame] | 3502 | int dest_cpu; |
Peter Zijlstra | 3802290 | 2009-12-16 18:04:37 +0100 | [diff] [blame] | 3503 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3504 | rq = task_rq_lock(p, &flags); |
Peter Zijlstra | 0017d73 | 2010-03-24 18:34:10 +0100 | [diff] [blame] | 3505 | dest_cpu = p->sched_class->select_task_rq(rq, p, SD_BALANCE_EXEC, 0); |
| 3506 | if (dest_cpu == smp_processor_id()) |
| 3507 | goto unlock; |
Peter Zijlstra | 3802290 | 2009-12-16 18:04:37 +0100 | [diff] [blame] | 3508 | |
| 3509 | /* |
| 3510 | * select_task_rq() can race against ->cpus_allowed |
| 3511 | */ |
Oleg Nesterov | 30da688 | 2010-03-15 10:10:19 +0100 | [diff] [blame] | 3512 | if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed) && |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 3513 | likely(cpu_active(dest_cpu)) && migrate_task(p, dest_cpu)) { |
| 3514 | struct migration_arg arg = { p, dest_cpu }; |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 3515 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3516 | task_rq_unlock(rq, &flags); |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 3517 | stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3518 | return; |
| 3519 | } |
Peter Zijlstra | 0017d73 | 2010-03-24 18:34:10 +0100 | [diff] [blame] | 3520 | unlock: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3521 | task_rq_unlock(rq, &flags); |
| 3522 | } |
| 3523 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3524 | #endif |
| 3525 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3526 | DEFINE_PER_CPU(struct kernel_stat, kstat); |
| 3527 | |
| 3528 | EXPORT_PER_CPU_SYMBOL(kstat); |
| 3529 | |
| 3530 | /* |
Hidetoshi Seto | c5f8d99 | 2009-03-31 16:56:03 +0900 | [diff] [blame] | 3531 | * Return any ns on the sched_clock that have not yet been accounted in |
Frank Mayhar | f06febc | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 3532 | * @p in case that task is currently running. |
Hidetoshi Seto | c5f8d99 | 2009-03-31 16:56:03 +0900 | [diff] [blame] | 3533 | * |
| 3534 | * Called with task_rq_lock() held on @rq. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3535 | */ |
Hidetoshi Seto | c5f8d99 | 2009-03-31 16:56:03 +0900 | [diff] [blame] | 3536 | static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq) |
| 3537 | { |
| 3538 | u64 ns = 0; |
| 3539 | |
| 3540 | if (task_current(rq, p)) { |
| 3541 | update_rq_clock(rq); |
Venkatesh Pallipadi | 305e683 | 2010-10-04 17:03:21 -0700 | [diff] [blame] | 3542 | ns = rq->clock_task - p->se.exec_start; |
Hidetoshi Seto | c5f8d99 | 2009-03-31 16:56:03 +0900 | [diff] [blame] | 3543 | if ((s64)ns < 0) |
| 3544 | ns = 0; |
| 3545 | } |
| 3546 | |
| 3547 | return ns; |
| 3548 | } |
| 3549 | |
Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 3550 | unsigned long long task_delta_exec(struct task_struct *p) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3551 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3552 | unsigned long flags; |
Ingo Molnar | 41b86e9 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 3553 | struct rq *rq; |
Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 3554 | u64 ns = 0; |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 3555 | |
Ingo Molnar | 41b86e9 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 3556 | rq = task_rq_lock(p, &flags); |
Hidetoshi Seto | c5f8d99 | 2009-03-31 16:56:03 +0900 | [diff] [blame] | 3557 | ns = do_task_delta_exec(p, rq); |
| 3558 | task_rq_unlock(rq, &flags); |
Ingo Molnar | 1508487 | 2008-09-30 08:28:17 +0200 | [diff] [blame] | 3559 | |
Hidetoshi Seto | c5f8d99 | 2009-03-31 16:56:03 +0900 | [diff] [blame] | 3560 | return ns; |
| 3561 | } |
Frank Mayhar | f06febc | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 3562 | |
Hidetoshi Seto | c5f8d99 | 2009-03-31 16:56:03 +0900 | [diff] [blame] | 3563 | /* |
| 3564 | * Return accounted runtime for the task. |
| 3565 | * In case the task is currently running, return the runtime plus current's |
| 3566 | * pending runtime that have not been accounted yet. |
| 3567 | */ |
| 3568 | unsigned long long task_sched_runtime(struct task_struct *p) |
| 3569 | { |
| 3570 | unsigned long flags; |
| 3571 | struct rq *rq; |
| 3572 | u64 ns = 0; |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 3573 | |
Hidetoshi Seto | c5f8d99 | 2009-03-31 16:56:03 +0900 | [diff] [blame] | 3574 | rq = task_rq_lock(p, &flags); |
| 3575 | ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq); |
| 3576 | task_rq_unlock(rq, &flags); |
| 3577 | |
| 3578 | return ns; |
| 3579 | } |
| 3580 | |
| 3581 | /* |
| 3582 | * Return sum_exec_runtime for the thread group. |
| 3583 | * In case the task is currently running, return the sum plus current's |
| 3584 | * pending runtime that have not been accounted yet. |
| 3585 | * |
| 3586 | * Note that the thread group might have other running tasks as well, |
| 3587 | * so the return value not includes other pending runtime that other |
| 3588 | * running tasks might have. |
| 3589 | */ |
| 3590 | unsigned long long thread_group_sched_runtime(struct task_struct *p) |
| 3591 | { |
| 3592 | struct task_cputime totals; |
| 3593 | unsigned long flags; |
| 3594 | struct rq *rq; |
| 3595 | u64 ns; |
| 3596 | |
| 3597 | rq = task_rq_lock(p, &flags); |
| 3598 | thread_group_cputime(p, &totals); |
| 3599 | ns = totals.sum_exec_runtime + do_task_delta_exec(p, rq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3600 | task_rq_unlock(rq, &flags); |
| 3601 | |
| 3602 | return ns; |
| 3603 | } |
| 3604 | |
| 3605 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3606 | * Account user cpu time to a process. |
| 3607 | * @p: the process that the cpu time gets accounted to |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3608 | * @cputime: the cpu time spent in user space since the last update |
Martin Schwidefsky | 457533a | 2008-12-31 15:11:37 +0100 | [diff] [blame] | 3609 | * @cputime_scaled: cputime scaled by cpu frequency |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3610 | */ |
Martin Schwidefsky | 457533a | 2008-12-31 15:11:37 +0100 | [diff] [blame] | 3611 | void account_user_time(struct task_struct *p, cputime_t cputime, |
| 3612 | cputime_t cputime_scaled) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3613 | { |
| 3614 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; |
| 3615 | cputime64_t tmp; |
| 3616 | |
Martin Schwidefsky | 457533a | 2008-12-31 15:11:37 +0100 | [diff] [blame] | 3617 | /* Add user time to process. */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3618 | p->utime = cputime_add(p->utime, cputime); |
Martin Schwidefsky | 457533a | 2008-12-31 15:11:37 +0100 | [diff] [blame] | 3619 | p->utimescaled = cputime_add(p->utimescaled, cputime_scaled); |
Frank Mayhar | f06febc | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 3620 | account_group_user_time(p, cputime); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3621 | |
| 3622 | /* Add user time to cpustat. */ |
| 3623 | tmp = cputime_to_cputime64(cputime); |
| 3624 | if (TASK_NICE(p) > 0) |
| 3625 | cpustat->nice = cputime64_add(cpustat->nice, tmp); |
| 3626 | else |
| 3627 | cpustat->user = cputime64_add(cpustat->user, tmp); |
Bharata B Rao | ef12fef | 2009-03-31 10:02:22 +0530 | [diff] [blame] | 3628 | |
| 3629 | cpuacct_update_stats(p, CPUACCT_STAT_USER, cputime); |
Jonathan Lim | 49b5cf3 | 2008-07-25 01:48:40 -0700 | [diff] [blame] | 3630 | /* Account for user time used */ |
| 3631 | acct_update_integrals(p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3632 | } |
| 3633 | |
| 3634 | /* |
Laurent Vivier | 94886b8 | 2007-10-15 17:00:19 +0200 | [diff] [blame] | 3635 | * Account guest cpu time to a process. |
| 3636 | * @p: the process that the cpu time gets accounted to |
| 3637 | * @cputime: the cpu time spent in virtual machine since the last update |
Martin Schwidefsky | 457533a | 2008-12-31 15:11:37 +0100 | [diff] [blame] | 3638 | * @cputime_scaled: cputime scaled by cpu frequency |
Laurent Vivier | 94886b8 | 2007-10-15 17:00:19 +0200 | [diff] [blame] | 3639 | */ |
Martin Schwidefsky | 457533a | 2008-12-31 15:11:37 +0100 | [diff] [blame] | 3640 | static void account_guest_time(struct task_struct *p, cputime_t cputime, |
| 3641 | cputime_t cputime_scaled) |
Laurent Vivier | 94886b8 | 2007-10-15 17:00:19 +0200 | [diff] [blame] | 3642 | { |
| 3643 | cputime64_t tmp; |
| 3644 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; |
| 3645 | |
| 3646 | tmp = cputime_to_cputime64(cputime); |
| 3647 | |
Martin Schwidefsky | 457533a | 2008-12-31 15:11:37 +0100 | [diff] [blame] | 3648 | /* Add guest time to process. */ |
Laurent Vivier | 94886b8 | 2007-10-15 17:00:19 +0200 | [diff] [blame] | 3649 | p->utime = cputime_add(p->utime, cputime); |
Martin Schwidefsky | 457533a | 2008-12-31 15:11:37 +0100 | [diff] [blame] | 3650 | p->utimescaled = cputime_add(p->utimescaled, cputime_scaled); |
Frank Mayhar | f06febc | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 3651 | account_group_user_time(p, cputime); |
Laurent Vivier | 94886b8 | 2007-10-15 17:00:19 +0200 | [diff] [blame] | 3652 | p->gtime = cputime_add(p->gtime, cputime); |
| 3653 | |
Martin Schwidefsky | 457533a | 2008-12-31 15:11:37 +0100 | [diff] [blame] | 3654 | /* Add guest time to cpustat. */ |
Ryota Ozaki | ce0e7b2 | 2009-10-24 01:20:10 +0900 | [diff] [blame] | 3655 | if (TASK_NICE(p) > 0) { |
| 3656 | cpustat->nice = cputime64_add(cpustat->nice, tmp); |
| 3657 | cpustat->guest_nice = cputime64_add(cpustat->guest_nice, tmp); |
| 3658 | } else { |
| 3659 | cpustat->user = cputime64_add(cpustat->user, tmp); |
| 3660 | cpustat->guest = cputime64_add(cpustat->guest, tmp); |
| 3661 | } |
Laurent Vivier | 94886b8 | 2007-10-15 17:00:19 +0200 | [diff] [blame] | 3662 | } |
| 3663 | |
| 3664 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3665 | * Account system cpu time to a process. |
| 3666 | * @p: the process that the cpu time gets accounted to |
| 3667 | * @hardirq_offset: the offset to subtract from hardirq_count() |
| 3668 | * @cputime: the cpu time spent in kernel space since the last update |
Martin Schwidefsky | 457533a | 2008-12-31 15:11:37 +0100 | [diff] [blame] | 3669 | * @cputime_scaled: cputime scaled by cpu frequency |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3670 | */ |
| 3671 | void account_system_time(struct task_struct *p, int hardirq_offset, |
Martin Schwidefsky | 457533a | 2008-12-31 15:11:37 +0100 | [diff] [blame] | 3672 | cputime_t cputime, cputime_t cputime_scaled) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3673 | { |
| 3674 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3675 | cputime64_t tmp; |
| 3676 | |
Harvey Harrison | 983ed7a | 2008-04-24 18:17:55 -0700 | [diff] [blame] | 3677 | if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) { |
Martin Schwidefsky | 457533a | 2008-12-31 15:11:37 +0100 | [diff] [blame] | 3678 | account_guest_time(p, cputime, cputime_scaled); |
Harvey Harrison | 983ed7a | 2008-04-24 18:17:55 -0700 | [diff] [blame] | 3679 | return; |
| 3680 | } |
Laurent Vivier | 94886b8 | 2007-10-15 17:00:19 +0200 | [diff] [blame] | 3681 | |
Martin Schwidefsky | 457533a | 2008-12-31 15:11:37 +0100 | [diff] [blame] | 3682 | /* Add system time to process. */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3683 | p->stime = cputime_add(p->stime, cputime); |
Martin Schwidefsky | 457533a | 2008-12-31 15:11:37 +0100 | [diff] [blame] | 3684 | p->stimescaled = cputime_add(p->stimescaled, cputime_scaled); |
Frank Mayhar | f06febc | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 3685 | account_group_system_time(p, cputime); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3686 | |
| 3687 | /* Add system time to cpustat. */ |
| 3688 | tmp = cputime_to_cputime64(cputime); |
| 3689 | if (hardirq_count() - hardirq_offset) |
| 3690 | cpustat->irq = cputime64_add(cpustat->irq, tmp); |
Venkatesh Pallipadi | 75e1056 | 2010-10-04 17:03:16 -0700 | [diff] [blame] | 3691 | else if (in_serving_softirq()) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3692 | cpustat->softirq = cputime64_add(cpustat->softirq, tmp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3693 | else |
Martin Schwidefsky | 79741dd | 2008-12-31 15:11:38 +0100 | [diff] [blame] | 3694 | cpustat->system = cputime64_add(cpustat->system, tmp); |
| 3695 | |
Bharata B Rao | ef12fef | 2009-03-31 10:02:22 +0530 | [diff] [blame] | 3696 | cpuacct_update_stats(p, CPUACCT_STAT_SYSTEM, cputime); |
| 3697 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3698 | /* Account for system time used */ |
| 3699 | acct_update_integrals(p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3700 | } |
| 3701 | |
| 3702 | /* |
| 3703 | * Account for involuntary wait time. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3704 | * @steal: the cpu time spent in involuntary wait |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3705 | */ |
Martin Schwidefsky | 79741dd | 2008-12-31 15:11:38 +0100 | [diff] [blame] | 3706 | void account_steal_time(cputime_t cputime) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3707 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3708 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; |
Martin Schwidefsky | 79741dd | 2008-12-31 15:11:38 +0100 | [diff] [blame] | 3709 | cputime64_t cputime64 = cputime_to_cputime64(cputime); |
| 3710 | |
| 3711 | cpustat->steal = cputime64_add(cpustat->steal, cputime64); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3712 | } |
| 3713 | |
Christoph Lameter | 7835b98 | 2006-12-10 02:20:22 -0800 | [diff] [blame] | 3714 | /* |
Martin Schwidefsky | 79741dd | 2008-12-31 15:11:38 +0100 | [diff] [blame] | 3715 | * Account for idle time. |
| 3716 | * @cputime: the cpu time spent in idle wait |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3717 | */ |
Martin Schwidefsky | 79741dd | 2008-12-31 15:11:38 +0100 | [diff] [blame] | 3718 | void account_idle_time(cputime_t cputime) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3719 | { |
| 3720 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; |
Martin Schwidefsky | 79741dd | 2008-12-31 15:11:38 +0100 | [diff] [blame] | 3721 | cputime64_t cputime64 = cputime_to_cputime64(cputime); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3722 | struct rq *rq = this_rq(); |
| 3723 | |
Martin Schwidefsky | 79741dd | 2008-12-31 15:11:38 +0100 | [diff] [blame] | 3724 | if (atomic_read(&rq->nr_iowait) > 0) |
| 3725 | cpustat->iowait = cputime64_add(cpustat->iowait, cputime64); |
| 3726 | else |
| 3727 | cpustat->idle = cputime64_add(cpustat->idle, cputime64); |
Christoph Lameter | 7835b98 | 2006-12-10 02:20:22 -0800 | [diff] [blame] | 3728 | } |
| 3729 | |
Martin Schwidefsky | 79741dd | 2008-12-31 15:11:38 +0100 | [diff] [blame] | 3730 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING |
| 3731 | |
| 3732 | /* |
| 3733 | * Account a single tick of cpu time. |
| 3734 | * @p: the process that the cpu time gets accounted to |
| 3735 | * @user_tick: indicates if the tick is a user or a system tick |
| 3736 | */ |
| 3737 | void account_process_tick(struct task_struct *p, int user_tick) |
| 3738 | { |
Stanislaw Gruszka | a42548a | 2009-07-29 12:15:29 +0200 | [diff] [blame] | 3739 | cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy); |
Martin Schwidefsky | 79741dd | 2008-12-31 15:11:38 +0100 | [diff] [blame] | 3740 | struct rq *rq = this_rq(); |
| 3741 | |
| 3742 | if (user_tick) |
Stanislaw Gruszka | a42548a | 2009-07-29 12:15:29 +0200 | [diff] [blame] | 3743 | account_user_time(p, cputime_one_jiffy, one_jiffy_scaled); |
Eric Dumazet | f5f293a | 2009-04-29 14:44:49 +0200 | [diff] [blame] | 3744 | else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET)) |
Stanislaw Gruszka | a42548a | 2009-07-29 12:15:29 +0200 | [diff] [blame] | 3745 | account_system_time(p, HARDIRQ_OFFSET, cputime_one_jiffy, |
Martin Schwidefsky | 79741dd | 2008-12-31 15:11:38 +0100 | [diff] [blame] | 3746 | one_jiffy_scaled); |
| 3747 | else |
Stanislaw Gruszka | a42548a | 2009-07-29 12:15:29 +0200 | [diff] [blame] | 3748 | account_idle_time(cputime_one_jiffy); |
Martin Schwidefsky | 79741dd | 2008-12-31 15:11:38 +0100 | [diff] [blame] | 3749 | } |
| 3750 | |
| 3751 | /* |
| 3752 | * Account multiple ticks of steal time. |
| 3753 | * @p: the process from which the cpu time has been stolen |
| 3754 | * @ticks: number of stolen ticks |
| 3755 | */ |
| 3756 | void account_steal_ticks(unsigned long ticks) |
| 3757 | { |
| 3758 | account_steal_time(jiffies_to_cputime(ticks)); |
| 3759 | } |
| 3760 | |
| 3761 | /* |
| 3762 | * Account multiple ticks of idle time. |
| 3763 | * @ticks: number of stolen ticks |
| 3764 | */ |
| 3765 | void account_idle_ticks(unsigned long ticks) |
| 3766 | { |
| 3767 | account_idle_time(jiffies_to_cputime(ticks)); |
| 3768 | } |
| 3769 | |
| 3770 | #endif |
| 3771 | |
Christoph Lameter | 7835b98 | 2006-12-10 02:20:22 -0800 | [diff] [blame] | 3772 | /* |
Balbir Singh | 4904862 | 2008-09-05 18:12:23 +0200 | [diff] [blame] | 3773 | * Use precise platform statistics if available: |
| 3774 | */ |
| 3775 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
Hidetoshi Seto | d180c5b | 2009-11-26 14:48:30 +0900 | [diff] [blame] | 3776 | void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st) |
Balbir Singh | 4904862 | 2008-09-05 18:12:23 +0200 | [diff] [blame] | 3777 | { |
Hidetoshi Seto | d99ca3b | 2009-12-02 17:26:47 +0900 | [diff] [blame] | 3778 | *ut = p->utime; |
| 3779 | *st = p->stime; |
Balbir Singh | 4904862 | 2008-09-05 18:12:23 +0200 | [diff] [blame] | 3780 | } |
| 3781 | |
Hidetoshi Seto | 0cf55e1 | 2009-12-02 17:28:07 +0900 | [diff] [blame] | 3782 | void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st) |
Balbir Singh | 4904862 | 2008-09-05 18:12:23 +0200 | [diff] [blame] | 3783 | { |
Hidetoshi Seto | 0cf55e1 | 2009-12-02 17:28:07 +0900 | [diff] [blame] | 3784 | struct task_cputime cputime; |
| 3785 | |
| 3786 | thread_group_cputime(p, &cputime); |
| 3787 | |
| 3788 | *ut = cputime.utime; |
| 3789 | *st = cputime.stime; |
Balbir Singh | 4904862 | 2008-09-05 18:12:23 +0200 | [diff] [blame] | 3790 | } |
| 3791 | #else |
Hidetoshi Seto | 761b1d2 | 2009-11-12 13:33:45 +0900 | [diff] [blame] | 3792 | |
| 3793 | #ifndef nsecs_to_cputime |
Hidetoshi Seto | b7b20df9 | 2009-11-26 14:49:27 +0900 | [diff] [blame] | 3794 | # define nsecs_to_cputime(__nsecs) nsecs_to_jiffies(__nsecs) |
Hidetoshi Seto | 761b1d2 | 2009-11-12 13:33:45 +0900 | [diff] [blame] | 3795 | #endif |
| 3796 | |
Hidetoshi Seto | d180c5b | 2009-11-26 14:48:30 +0900 | [diff] [blame] | 3797 | void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st) |
Balbir Singh | 4904862 | 2008-09-05 18:12:23 +0200 | [diff] [blame] | 3798 | { |
Hidetoshi Seto | d99ca3b | 2009-12-02 17:26:47 +0900 | [diff] [blame] | 3799 | cputime_t rtime, utime = p->utime, total = cputime_add(utime, p->stime); |
Balbir Singh | 4904862 | 2008-09-05 18:12:23 +0200 | [diff] [blame] | 3800 | |
| 3801 | /* |
| 3802 | * Use CFS's precise accounting: |
| 3803 | */ |
Hidetoshi Seto | d180c5b | 2009-11-26 14:48:30 +0900 | [diff] [blame] | 3804 | rtime = nsecs_to_cputime(p->se.sum_exec_runtime); |
Balbir Singh | 4904862 | 2008-09-05 18:12:23 +0200 | [diff] [blame] | 3805 | |
| 3806 | if (total) { |
Stanislaw Gruszka | e75e863 | 2010-09-14 16:35:14 +0200 | [diff] [blame] | 3807 | u64 temp = rtime; |
Balbir Singh | 4904862 | 2008-09-05 18:12:23 +0200 | [diff] [blame] | 3808 | |
Stanislaw Gruszka | e75e863 | 2010-09-14 16:35:14 +0200 | [diff] [blame] | 3809 | temp *= utime; |
Balbir Singh | 4904862 | 2008-09-05 18:12:23 +0200 | [diff] [blame] | 3810 | do_div(temp, total); |
Hidetoshi Seto | d180c5b | 2009-11-26 14:48:30 +0900 | [diff] [blame] | 3811 | utime = (cputime_t)temp; |
| 3812 | } else |
| 3813 | utime = rtime; |
Balbir Singh | 4904862 | 2008-09-05 18:12:23 +0200 | [diff] [blame] | 3814 | |
| 3815 | /* |
Hidetoshi Seto | d180c5b | 2009-11-26 14:48:30 +0900 | [diff] [blame] | 3816 | * Compare with previous values, to keep monotonicity: |
Balbir Singh | 4904862 | 2008-09-05 18:12:23 +0200 | [diff] [blame] | 3817 | */ |
Hidetoshi Seto | 761b1d2 | 2009-11-12 13:33:45 +0900 | [diff] [blame] | 3818 | p->prev_utime = max(p->prev_utime, utime); |
Hidetoshi Seto | d99ca3b | 2009-12-02 17:26:47 +0900 | [diff] [blame] | 3819 | p->prev_stime = max(p->prev_stime, cputime_sub(rtime, p->prev_utime)); |
Balbir Singh | 4904862 | 2008-09-05 18:12:23 +0200 | [diff] [blame] | 3820 | |
Hidetoshi Seto | d99ca3b | 2009-12-02 17:26:47 +0900 | [diff] [blame] | 3821 | *ut = p->prev_utime; |
| 3822 | *st = p->prev_stime; |
Hidetoshi Seto | d180c5b | 2009-11-26 14:48:30 +0900 | [diff] [blame] | 3823 | } |
Balbir Singh | 4904862 | 2008-09-05 18:12:23 +0200 | [diff] [blame] | 3824 | |
Hidetoshi Seto | 0cf55e1 | 2009-12-02 17:28:07 +0900 | [diff] [blame] | 3825 | /* |
| 3826 | * Must be called with siglock held. |
| 3827 | */ |
| 3828 | void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st) |
| 3829 | { |
| 3830 | struct signal_struct *sig = p->signal; |
| 3831 | struct task_cputime cputime; |
| 3832 | cputime_t rtime, utime, total; |
| 3833 | |
| 3834 | thread_group_cputime(p, &cputime); |
| 3835 | |
| 3836 | total = cputime_add(cputime.utime, cputime.stime); |
| 3837 | rtime = nsecs_to_cputime(cputime.sum_exec_runtime); |
| 3838 | |
| 3839 | if (total) { |
Stanislaw Gruszka | e75e863 | 2010-09-14 16:35:14 +0200 | [diff] [blame] | 3840 | u64 temp = rtime; |
Hidetoshi Seto | 0cf55e1 | 2009-12-02 17:28:07 +0900 | [diff] [blame] | 3841 | |
Stanislaw Gruszka | e75e863 | 2010-09-14 16:35:14 +0200 | [diff] [blame] | 3842 | temp *= cputime.utime; |
Hidetoshi Seto | 0cf55e1 | 2009-12-02 17:28:07 +0900 | [diff] [blame] | 3843 | do_div(temp, total); |
| 3844 | utime = (cputime_t)temp; |
| 3845 | } else |
| 3846 | utime = rtime; |
| 3847 | |
| 3848 | sig->prev_utime = max(sig->prev_utime, utime); |
| 3849 | sig->prev_stime = max(sig->prev_stime, |
| 3850 | cputime_sub(rtime, sig->prev_utime)); |
| 3851 | |
| 3852 | *ut = sig->prev_utime; |
| 3853 | *st = sig->prev_stime; |
Balbir Singh | 4904862 | 2008-09-05 18:12:23 +0200 | [diff] [blame] | 3854 | } |
| 3855 | #endif |
| 3856 | |
Balbir Singh | 4904862 | 2008-09-05 18:12:23 +0200 | [diff] [blame] | 3857 | /* |
Christoph Lameter | 7835b98 | 2006-12-10 02:20:22 -0800 | [diff] [blame] | 3858 | * This function gets called by the timer code, with HZ frequency. |
| 3859 | * We call it with interrupts disabled. |
| 3860 | * |
| 3861 | * It also gets called by the fork code, when changing the parent's |
| 3862 | * timeslices. |
| 3863 | */ |
| 3864 | void scheduler_tick(void) |
| 3865 | { |
Christoph Lameter | 7835b98 | 2006-12-10 02:20:22 -0800 | [diff] [blame] | 3866 | int cpu = smp_processor_id(); |
| 3867 | struct rq *rq = cpu_rq(cpu); |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3868 | struct task_struct *curr = rq->curr; |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 3869 | |
| 3870 | sched_clock_tick(); |
Christoph Lameter | 7835b98 | 2006-12-10 02:20:22 -0800 | [diff] [blame] | 3871 | |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 3872 | raw_spin_lock(&rq->lock); |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 3873 | update_rq_clock(rq); |
Venkatesh Pallipadi | fdf3e95 | 2010-05-17 18:14:43 -0700 | [diff] [blame] | 3874 | update_cpu_load_active(rq); |
Peter Zijlstra | fa85ae2 | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 3875 | curr->sched_class->task_tick(rq, curr, 0); |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 3876 | raw_spin_unlock(&rq->lock); |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3877 | |
Peter Zijlstra | e9d2b06 | 2010-09-17 11:28:50 +0200 | [diff] [blame] | 3878 | perf_event_task_tick(); |
Peter Zijlstra | e220d2d | 2009-05-23 18:28:55 +0200 | [diff] [blame] | 3879 | |
Christoph Lameter | e418e1c | 2006-12-10 02:20:23 -0800 | [diff] [blame] | 3880 | #ifdef CONFIG_SMP |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3881 | rq->idle_at_tick = idle_cpu(cpu); |
| 3882 | trigger_load_balance(rq, cpu); |
Christoph Lameter | e418e1c | 2006-12-10 02:20:23 -0800 | [diff] [blame] | 3883 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3884 | } |
| 3885 | |
Lai Jiangshan | 132380a | 2009-04-02 14:18:25 +0800 | [diff] [blame] | 3886 | notrace unsigned long get_parent_ip(unsigned long addr) |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3887 | { |
| 3888 | if (in_lock_functions(addr)) { |
| 3889 | addr = CALLER_ADDR2; |
| 3890 | if (in_lock_functions(addr)) |
| 3891 | addr = CALLER_ADDR3; |
| 3892 | } |
| 3893 | return addr; |
| 3894 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3895 | |
Steven Rostedt | 7e49fcc | 2009-01-22 19:01:40 -0500 | [diff] [blame] | 3896 | #if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \ |
| 3897 | defined(CONFIG_PREEMPT_TRACER)) |
| 3898 | |
Srinivasa Ds | 4362758 | 2008-02-23 15:24:04 -0800 | [diff] [blame] | 3899 | void __kprobes add_preempt_count(int val) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3900 | { |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3901 | #ifdef CONFIG_DEBUG_PREEMPT |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3902 | /* |
| 3903 | * Underflow? |
| 3904 | */ |
Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 3905 | if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0))) |
| 3906 | return; |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3907 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3908 | preempt_count() += val; |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3909 | #ifdef CONFIG_DEBUG_PREEMPT |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3910 | /* |
| 3911 | * Spinlock count overflowing soon? |
| 3912 | */ |
Miguel Ojeda Sandonis | 33859f7 | 2006-12-10 02:20:38 -0800 | [diff] [blame] | 3913 | DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >= |
| 3914 | PREEMPT_MASK - 10); |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3915 | #endif |
| 3916 | if (preempt_count() == val) |
| 3917 | trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3918 | } |
| 3919 | EXPORT_SYMBOL(add_preempt_count); |
| 3920 | |
Srinivasa Ds | 4362758 | 2008-02-23 15:24:04 -0800 | [diff] [blame] | 3921 | void __kprobes sub_preempt_count(int val) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3922 | { |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3923 | #ifdef CONFIG_DEBUG_PREEMPT |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3924 | /* |
| 3925 | * Underflow? |
| 3926 | */ |
Ingo Molnar | 01e3eb8 | 2009-01-12 13:00:50 +0100 | [diff] [blame] | 3927 | if (DEBUG_LOCKS_WARN_ON(val > preempt_count())) |
Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 3928 | return; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3929 | /* |
| 3930 | * Is the spinlock portion underflowing? |
| 3931 | */ |
Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 3932 | if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) && |
| 3933 | !(preempt_count() & PREEMPT_MASK))) |
| 3934 | return; |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3935 | #endif |
Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 3936 | |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3937 | if (preempt_count() == val) |
| 3938 | trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3939 | preempt_count() -= val; |
| 3940 | } |
| 3941 | EXPORT_SYMBOL(sub_preempt_count); |
| 3942 | |
| 3943 | #endif |
| 3944 | |
| 3945 | /* |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3946 | * Print scheduling while atomic bug: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3947 | */ |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3948 | static noinline void __schedule_bug(struct task_struct *prev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3949 | { |
Satyam Sharma | 838225b | 2007-10-24 18:23:50 +0200 | [diff] [blame] | 3950 | struct pt_regs *regs = get_irq_regs(); |
| 3951 | |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 3952 | printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n", |
| 3953 | prev->comm, prev->pid, preempt_count()); |
Satyam Sharma | 838225b | 2007-10-24 18:23:50 +0200 | [diff] [blame] | 3954 | |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3955 | debug_show_held_locks(prev); |
Arjan van de Ven | e21f5b1 | 2008-05-23 09:05:58 -0700 | [diff] [blame] | 3956 | print_modules(); |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3957 | if (irqs_disabled()) |
| 3958 | print_irqtrace_events(prev); |
Satyam Sharma | 838225b | 2007-10-24 18:23:50 +0200 | [diff] [blame] | 3959 | |
| 3960 | if (regs) |
| 3961 | show_regs(regs); |
| 3962 | else |
| 3963 | dump_stack(); |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3964 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3965 | |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3966 | /* |
| 3967 | * Various schedule()-time debugging checks and statistics: |
| 3968 | */ |
| 3969 | static inline void schedule_debug(struct task_struct *prev) |
| 3970 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3971 | /* |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 3972 | * Test if we are atomic. Since do_exit() needs to call into |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3973 | * schedule() atomically, we ignore that path for now. |
| 3974 | * Otherwise, whine if we are scheduling when we should not be. |
| 3975 | */ |
Roel Kluin | 3f33a7c | 2008-05-13 23:44:11 +0200 | [diff] [blame] | 3976 | if (unlikely(in_atomic_preempt_off() && !prev->exit_state)) |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3977 | __schedule_bug(prev); |
| 3978 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3979 | profile_hit(SCHED_PROFILING, __builtin_return_address(0)); |
| 3980 | |
Ingo Molnar | 2d72376 | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 3981 | schedstat_inc(this_rq(), sched_count); |
Ingo Molnar | b8efb56 | 2007-10-15 17:00:10 +0200 | [diff] [blame] | 3982 | #ifdef CONFIG_SCHEDSTATS |
| 3983 | if (unlikely(prev->lock_depth >= 0)) { |
Ingo Molnar | 2d72376 | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 3984 | schedstat_inc(this_rq(), bkl_count); |
| 3985 | schedstat_inc(prev, sched_info.bkl_count); |
Ingo Molnar | b8efb56 | 2007-10-15 17:00:10 +0200 | [diff] [blame] | 3986 | } |
| 3987 | #endif |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3988 | } |
| 3989 | |
Peter Zijlstra | 6cecd08 | 2009-11-30 13:00:37 +0100 | [diff] [blame] | 3990 | static void put_prev_task(struct rq *rq, struct task_struct *prev) |
Mike Galbraith | df1c99d | 2009-03-10 19:08:11 +0100 | [diff] [blame] | 3991 | { |
Mike Galbraith | a64692a | 2010-03-11 17:16:20 +0100 | [diff] [blame] | 3992 | if (prev->se.on_rq) |
| 3993 | update_rq_clock(rq); |
Peter Zijlstra | 6cecd08 | 2009-11-30 13:00:37 +0100 | [diff] [blame] | 3994 | prev->sched_class->put_prev_task(rq, prev); |
Mike Galbraith | df1c99d | 2009-03-10 19:08:11 +0100 | [diff] [blame] | 3995 | } |
| 3996 | |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3997 | /* |
| 3998 | * Pick up the highest-prio task: |
| 3999 | */ |
| 4000 | static inline struct task_struct * |
Wang Chen | b67802e | 2009-03-02 13:55:26 +0800 | [diff] [blame] | 4001 | pick_next_task(struct rq *rq) |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4002 | { |
Ingo Molnar | 5522d5d | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 4003 | const struct sched_class *class; |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4004 | struct task_struct *p; |
| 4005 | |
| 4006 | /* |
| 4007 | * Optimization: we know that if all tasks are in |
| 4008 | * the fair class we can call that function directly: |
| 4009 | */ |
| 4010 | if (likely(rq->nr_running == rq->cfs.nr_running)) { |
Ingo Molnar | fb8d472 | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 4011 | p = fair_sched_class.pick_next_task(rq); |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4012 | if (likely(p)) |
| 4013 | return p; |
| 4014 | } |
| 4015 | |
Peter Zijlstra | 34f971f | 2010-09-22 13:53:15 +0200 | [diff] [blame] | 4016 | for_each_class(class) { |
Ingo Molnar | fb8d472 | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 4017 | p = class->pick_next_task(rq); |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4018 | if (p) |
| 4019 | return p; |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4020 | } |
Peter Zijlstra | 34f971f | 2010-09-22 13:53:15 +0200 | [diff] [blame] | 4021 | |
| 4022 | BUG(); /* the idle class will always have a runnable task */ |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4023 | } |
| 4024 | |
| 4025 | /* |
| 4026 | * schedule() is the main scheduler function. |
| 4027 | */ |
Peter Zijlstra | ff74334 | 2009-03-13 12:21:26 +0100 | [diff] [blame] | 4028 | asmlinkage void __sched schedule(void) |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4029 | { |
| 4030 | struct task_struct *prev, *next; |
Harvey Harrison | 67ca7bd | 2008-02-15 09:56:36 -0800 | [diff] [blame] | 4031 | unsigned long *switch_count; |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4032 | struct rq *rq; |
Peter Zijlstra | 3165651 | 2008-07-18 18:01:23 +0200 | [diff] [blame] | 4033 | int cpu; |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4034 | |
Peter Zijlstra | ff74334 | 2009-03-13 12:21:26 +0100 | [diff] [blame] | 4035 | need_resched: |
| 4036 | preempt_disable(); |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4037 | cpu = smp_processor_id(); |
| 4038 | rq = cpu_rq(cpu); |
Paul E. McKenney | 25502a6 | 2010-04-01 17:37:01 -0700 | [diff] [blame] | 4039 | rcu_note_context_switch(cpu); |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4040 | prev = rq->curr; |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4041 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4042 | release_kernel_lock(prev); |
| 4043 | need_resched_nonpreemptible: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4044 | |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4045 | schedule_debug(prev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4046 | |
Peter Zijlstra | 3165651 | 2008-07-18 18:01:23 +0200 | [diff] [blame] | 4047 | if (sched_feat(HRTICK)) |
Mike Galbraith | f333fdc | 2008-05-12 21:20:55 +0200 | [diff] [blame] | 4048 | hrtick_clear(rq); |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 4049 | |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 4050 | raw_spin_lock_irq(&rq->lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4051 | |
Oleg Nesterov | 246d86b | 2010-05-19 14:57:11 +0200 | [diff] [blame] | 4052 | switch_count = &prev->nivcsw; |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4053 | if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { |
Tejun Heo | 21aa9af | 2010-06-08 21:40:37 +0200 | [diff] [blame] | 4054 | if (unlikely(signal_pending_state(prev->state, prev))) { |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4055 | prev->state = TASK_RUNNING; |
Tejun Heo | 21aa9af | 2010-06-08 21:40:37 +0200 | [diff] [blame] | 4056 | } else { |
| 4057 | /* |
| 4058 | * If a worker is going to sleep, notify and |
| 4059 | * ask workqueue whether it wants to wake up a |
| 4060 | * task to maintain concurrency. If so, wake |
| 4061 | * up the task. |
| 4062 | */ |
| 4063 | if (prev->flags & PF_WQ_WORKER) { |
| 4064 | struct task_struct *to_wakeup; |
| 4065 | |
| 4066 | to_wakeup = wq_worker_sleeping(prev, cpu); |
| 4067 | if (to_wakeup) |
| 4068 | try_to_wake_up_local(to_wakeup); |
| 4069 | } |
Peter Zijlstra | 371fd7e | 2010-03-24 16:38:48 +0100 | [diff] [blame] | 4070 | deactivate_task(rq, prev, DEQUEUE_SLEEP); |
Tejun Heo | 21aa9af | 2010-06-08 21:40:37 +0200 | [diff] [blame] | 4071 | } |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4072 | switch_count = &prev->nvcsw; |
| 4073 | } |
| 4074 | |
Gregory Haskins | 3f029d3 | 2009-07-29 11:08:47 -0400 | [diff] [blame] | 4075 | pre_schedule(rq, prev); |
Steven Rostedt | f65eda4 | 2008-01-25 21:08:07 +0100 | [diff] [blame] | 4076 | |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4077 | if (unlikely(!rq->nr_running)) |
| 4078 | idle_balance(cpu, rq); |
| 4079 | |
Mike Galbraith | df1c99d | 2009-03-10 19:08:11 +0100 | [diff] [blame] | 4080 | put_prev_task(rq, prev); |
Wang Chen | b67802e | 2009-03-02 13:55:26 +0800 | [diff] [blame] | 4081 | next = pick_next_task(rq); |
Mike Galbraith | f26f9af | 2010-12-08 11:05:42 +0100 | [diff] [blame] | 4082 | clear_tsk_need_resched(prev); |
| 4083 | rq->skip_clock_update = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4084 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4085 | if (likely(prev != next)) { |
David Simner | 673a90a | 2008-04-29 10:08:59 +0100 | [diff] [blame] | 4086 | sched_info_switch(prev, next); |
Peter Zijlstra | 49f4743 | 2009-12-27 11:51:52 +0100 | [diff] [blame] | 4087 | perf_event_task_sched_out(prev, next); |
David Simner | 673a90a | 2008-04-29 10:08:59 +0100 | [diff] [blame] | 4088 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4089 | rq->nr_switches++; |
| 4090 | rq->curr = next; |
| 4091 | ++*switch_count; |
Mike Galbraith | f26f9af | 2010-12-08 11:05:42 +0100 | [diff] [blame] | 4092 | WARN_ON_ONCE(test_tsk_need_resched(next)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4093 | |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4094 | context_switch(rq, prev, next); /* unlocks the rq */ |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 4095 | /* |
Oleg Nesterov | 246d86b | 2010-05-19 14:57:11 +0200 | [diff] [blame] | 4096 | * The context switch have flipped the stack from under us |
| 4097 | * and restored the local variables which were saved when |
| 4098 | * this task called schedule() in the past. prev == current |
| 4099 | * is still correct, but it can be moved to another cpu/rq. |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 4100 | */ |
| 4101 | cpu = smp_processor_id(); |
| 4102 | rq = cpu_rq(cpu); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4103 | } else |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 4104 | raw_spin_unlock_irq(&rq->lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4105 | |
Gregory Haskins | 3f029d3 | 2009-07-29 11:08:47 -0400 | [diff] [blame] | 4106 | post_schedule(rq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4107 | |
Oleg Nesterov | 246d86b | 2010-05-19 14:57:11 +0200 | [diff] [blame] | 4108 | if (unlikely(reacquire_kernel_lock(prev))) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4109 | goto need_resched_nonpreemptible; |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 4110 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4111 | preempt_enable_no_resched(); |
Peter Zijlstra | ff74334 | 2009-03-13 12:21:26 +0100 | [diff] [blame] | 4112 | if (need_resched()) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4113 | goto need_resched; |
| 4114 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4115 | EXPORT_SYMBOL(schedule); |
| 4116 | |
Frederic Weisbecker | c08f782 | 2009-12-02 20:49:17 +0100 | [diff] [blame] | 4117 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 4118 | /* |
| 4119 | * Look out! "owner" is an entirely speculative pointer |
| 4120 | * access and not reliable. |
| 4121 | */ |
| 4122 | int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner) |
| 4123 | { |
| 4124 | unsigned int cpu; |
| 4125 | struct rq *rq; |
| 4126 | |
| 4127 | if (!sched_feat(OWNER_SPIN)) |
| 4128 | return 0; |
| 4129 | |
| 4130 | #ifdef CONFIG_DEBUG_PAGEALLOC |
| 4131 | /* |
| 4132 | * Need to access the cpu field knowing that |
| 4133 | * DEBUG_PAGEALLOC could have unmapped it if |
| 4134 | * the mutex owner just released it and exited. |
| 4135 | */ |
| 4136 | if (probe_kernel_address(&owner->cpu, cpu)) |
Benjamin Herrenschmidt | 4b40221 | 2010-04-16 23:20:00 +0200 | [diff] [blame] | 4137 | return 0; |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 4138 | #else |
| 4139 | cpu = owner->cpu; |
| 4140 | #endif |
| 4141 | |
| 4142 | /* |
| 4143 | * Even if the access succeeded (likely case), |
| 4144 | * the cpu field may no longer be valid. |
| 4145 | */ |
| 4146 | if (cpu >= nr_cpumask_bits) |
Benjamin Herrenschmidt | 4b40221 | 2010-04-16 23:20:00 +0200 | [diff] [blame] | 4147 | return 0; |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 4148 | |
| 4149 | /* |
| 4150 | * We need to validate that we can do a |
| 4151 | * get_cpu() and that we have the percpu area. |
| 4152 | */ |
| 4153 | if (!cpu_online(cpu)) |
Benjamin Herrenschmidt | 4b40221 | 2010-04-16 23:20:00 +0200 | [diff] [blame] | 4154 | return 0; |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 4155 | |
| 4156 | rq = cpu_rq(cpu); |
| 4157 | |
| 4158 | for (;;) { |
| 4159 | /* |
| 4160 | * Owner changed, break to re-assess state. |
| 4161 | */ |
Tim Chen | 9d0f4dc | 2010-08-18 15:00:27 -0700 | [diff] [blame] | 4162 | if (lock->owner != owner) { |
| 4163 | /* |
| 4164 | * If the lock has switched to a different owner, |
| 4165 | * we likely have heavy contention. Return 0 to quit |
| 4166 | * optimistic spinning and not contend further: |
| 4167 | */ |
| 4168 | if (lock->owner) |
| 4169 | return 0; |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 4170 | break; |
Tim Chen | 9d0f4dc | 2010-08-18 15:00:27 -0700 | [diff] [blame] | 4171 | } |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 4172 | |
| 4173 | /* |
| 4174 | * Is that owner really running on that cpu? |
| 4175 | */ |
| 4176 | if (task_thread_info(rq->curr) != owner || need_resched()) |
| 4177 | return 0; |
| 4178 | |
| 4179 | cpu_relax(); |
| 4180 | } |
Benjamin Herrenschmidt | 4b40221 | 2010-04-16 23:20:00 +0200 | [diff] [blame] | 4181 | |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 4182 | return 1; |
| 4183 | } |
| 4184 | #endif |
| 4185 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4186 | #ifdef CONFIG_PREEMPT |
| 4187 | /* |
Andreas Mohr | 2ed6e34 | 2006-07-10 04:43:52 -0700 | [diff] [blame] | 4188 | * this is the entry point to schedule() from in-kernel preemption |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 4189 | * off of preempt_enable. Kernel preemptions off return from interrupt |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4190 | * occur there and call schedule directly. |
| 4191 | */ |
Steven Rostedt | d1f74e2 | 2010-06-02 21:52:29 -0400 | [diff] [blame] | 4192 | asmlinkage void __sched notrace preempt_schedule(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4193 | { |
| 4194 | struct thread_info *ti = current_thread_info(); |
Ingo Molnar | 6478d88 | 2008-01-25 21:08:33 +0100 | [diff] [blame] | 4195 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4196 | /* |
| 4197 | * If there is a non-zero preempt_count or interrupts are disabled, |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 4198 | * we do not want to preempt the current task. Just return.. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4199 | */ |
Nick Piggin | beed33a | 2006-10-11 01:21:52 -0700 | [diff] [blame] | 4200 | if (likely(ti->preempt_count || irqs_disabled())) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4201 | return; |
| 4202 | |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 4203 | do { |
Steven Rostedt | d1f74e2 | 2010-06-02 21:52:29 -0400 | [diff] [blame] | 4204 | add_preempt_count_notrace(PREEMPT_ACTIVE); |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 4205 | schedule(); |
Steven Rostedt | d1f74e2 | 2010-06-02 21:52:29 -0400 | [diff] [blame] | 4206 | sub_preempt_count_notrace(PREEMPT_ACTIVE); |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 4207 | |
| 4208 | /* |
| 4209 | * Check again in case we missed a preemption opportunity |
| 4210 | * between schedule and now. |
| 4211 | */ |
| 4212 | barrier(); |
Lai Jiangshan | 5ed0cec | 2009-03-06 19:40:20 +0800 | [diff] [blame] | 4213 | } while (need_resched()); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4214 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4215 | EXPORT_SYMBOL(preempt_schedule); |
| 4216 | |
| 4217 | /* |
Andreas Mohr | 2ed6e34 | 2006-07-10 04:43:52 -0700 | [diff] [blame] | 4218 | * this is the entry point to schedule() from kernel preemption |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4219 | * off of irq context. |
| 4220 | * Note, that this is called and return with irqs disabled. This will |
| 4221 | * protect us against recursive calling from irq. |
| 4222 | */ |
| 4223 | asmlinkage void __sched preempt_schedule_irq(void) |
| 4224 | { |
| 4225 | struct thread_info *ti = current_thread_info(); |
Ingo Molnar | 6478d88 | 2008-01-25 21:08:33 +0100 | [diff] [blame] | 4226 | |
Andreas Mohr | 2ed6e34 | 2006-07-10 04:43:52 -0700 | [diff] [blame] | 4227 | /* Catch callers which need to be fixed */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4228 | BUG_ON(ti->preempt_count || !irqs_disabled()); |
| 4229 | |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 4230 | do { |
| 4231 | add_preempt_count(PREEMPT_ACTIVE); |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 4232 | local_irq_enable(); |
| 4233 | schedule(); |
| 4234 | local_irq_disable(); |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 4235 | sub_preempt_count(PREEMPT_ACTIVE); |
| 4236 | |
| 4237 | /* |
| 4238 | * Check again in case we missed a preemption opportunity |
| 4239 | * between schedule and now. |
| 4240 | */ |
| 4241 | barrier(); |
Lai Jiangshan | 5ed0cec | 2009-03-06 19:40:20 +0800 | [diff] [blame] | 4242 | } while (need_resched()); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4243 | } |
| 4244 | |
| 4245 | #endif /* CONFIG_PREEMPT */ |
| 4246 | |
Peter Zijlstra | 63859d4 | 2009-09-15 19:14:42 +0200 | [diff] [blame] | 4247 | int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags, |
Ingo Molnar | 95cdf3b | 2005-09-10 00:26:11 -0700 | [diff] [blame] | 4248 | void *key) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4249 | { |
Peter Zijlstra | 63859d4 | 2009-09-15 19:14:42 +0200 | [diff] [blame] | 4250 | return try_to_wake_up(curr->private, mode, wake_flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4251 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4252 | EXPORT_SYMBOL(default_wake_function); |
| 4253 | |
| 4254 | /* |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 4255 | * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just |
| 4256 | * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4257 | * number) then we wake all the non-exclusive tasks and one exclusive task. |
| 4258 | * |
| 4259 | * There are circumstances in which we can try to wake a task which has already |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 4260 | * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4261 | * zero in this (rare) case, and we handle it by continuing to scan the queue. |
| 4262 | */ |
Johannes Weiner | 78ddb08 | 2009-04-14 16:53:05 +0200 | [diff] [blame] | 4263 | static void __wake_up_common(wait_queue_head_t *q, unsigned int mode, |
Peter Zijlstra | 63859d4 | 2009-09-15 19:14:42 +0200 | [diff] [blame] | 4264 | int nr_exclusive, int wake_flags, void *key) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4265 | { |
Matthias Kaehlcke | 2e45874 | 2007-10-15 17:00:02 +0200 | [diff] [blame] | 4266 | wait_queue_t *curr, *next; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4267 | |
Matthias Kaehlcke | 2e45874 | 2007-10-15 17:00:02 +0200 | [diff] [blame] | 4268 | list_for_each_entry_safe(curr, next, &q->task_list, task_list) { |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 4269 | unsigned flags = curr->flags; |
| 4270 | |
Peter Zijlstra | 63859d4 | 2009-09-15 19:14:42 +0200 | [diff] [blame] | 4271 | if (curr->func(curr, mode, wake_flags, key) && |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 4272 | (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4273 | break; |
| 4274 | } |
| 4275 | } |
| 4276 | |
| 4277 | /** |
| 4278 | * __wake_up - wake up threads blocked on a waitqueue. |
| 4279 | * @q: the waitqueue |
| 4280 | * @mode: which threads |
| 4281 | * @nr_exclusive: how many wake-one or wake-many threads to wake up |
Martin Waitz | 67be2dd | 2005-05-01 08:59:26 -0700 | [diff] [blame] | 4282 | * @key: is directly passed to the wakeup function |
David Howells | 50fa610 | 2009-04-28 15:01:38 +0100 | [diff] [blame] | 4283 | * |
| 4284 | * It may be assumed that this function implies a write memory barrier before |
| 4285 | * changing the task state if and only if any tasks are woken up. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4286 | */ |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 4287 | void __wake_up(wait_queue_head_t *q, unsigned int mode, |
Ingo Molnar | 95cdf3b | 2005-09-10 00:26:11 -0700 | [diff] [blame] | 4288 | int nr_exclusive, void *key) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4289 | { |
| 4290 | unsigned long flags; |
| 4291 | |
| 4292 | spin_lock_irqsave(&q->lock, flags); |
| 4293 | __wake_up_common(q, mode, nr_exclusive, 0, key); |
| 4294 | spin_unlock_irqrestore(&q->lock, flags); |
| 4295 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4296 | EXPORT_SYMBOL(__wake_up); |
| 4297 | |
| 4298 | /* |
| 4299 | * Same as __wake_up but called with the spinlock in wait_queue_head_t held. |
| 4300 | */ |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 4301 | void __wake_up_locked(wait_queue_head_t *q, unsigned int mode) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4302 | { |
| 4303 | __wake_up_common(q, mode, 1, 0, NULL); |
| 4304 | } |
Michal Nazarewicz | 22c43c8 | 2010-05-05 12:53:11 +0200 | [diff] [blame] | 4305 | EXPORT_SYMBOL_GPL(__wake_up_locked); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4306 | |
Davide Libenzi | 4ede816 | 2009-03-31 15:24:20 -0700 | [diff] [blame] | 4307 | void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key) |
| 4308 | { |
| 4309 | __wake_up_common(q, mode, 1, 0, key); |
| 4310 | } |
| 4311 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4312 | /** |
Davide Libenzi | 4ede816 | 2009-03-31 15:24:20 -0700 | [diff] [blame] | 4313 | * __wake_up_sync_key - wake up threads blocked on a waitqueue. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4314 | * @q: the waitqueue |
| 4315 | * @mode: which threads |
| 4316 | * @nr_exclusive: how many wake-one or wake-many threads to wake up |
Davide Libenzi | 4ede816 | 2009-03-31 15:24:20 -0700 | [diff] [blame] | 4317 | * @key: opaque value to be passed to wakeup targets |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4318 | * |
| 4319 | * The sync wakeup differs that the waker knows that it will schedule |
| 4320 | * away soon, so while the target thread will be woken up, it will not |
| 4321 | * be migrated to another CPU - ie. the two threads are 'synchronized' |
| 4322 | * with each other. This can prevent needless bouncing between CPUs. |
| 4323 | * |
| 4324 | * On UP it can prevent extra preemption. |
David Howells | 50fa610 | 2009-04-28 15:01:38 +0100 | [diff] [blame] | 4325 | * |
| 4326 | * It may be assumed that this function implies a write memory barrier before |
| 4327 | * changing the task state if and only if any tasks are woken up. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4328 | */ |
Davide Libenzi | 4ede816 | 2009-03-31 15:24:20 -0700 | [diff] [blame] | 4329 | void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, |
| 4330 | int nr_exclusive, void *key) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4331 | { |
| 4332 | unsigned long flags; |
Peter Zijlstra | 7d47872 | 2009-09-14 19:55:44 +0200 | [diff] [blame] | 4333 | int wake_flags = WF_SYNC; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4334 | |
| 4335 | if (unlikely(!q)) |
| 4336 | return; |
| 4337 | |
| 4338 | if (unlikely(!nr_exclusive)) |
Peter Zijlstra | 7d47872 | 2009-09-14 19:55:44 +0200 | [diff] [blame] | 4339 | wake_flags = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4340 | |
| 4341 | spin_lock_irqsave(&q->lock, flags); |
Peter Zijlstra | 7d47872 | 2009-09-14 19:55:44 +0200 | [diff] [blame] | 4342 | __wake_up_common(q, mode, nr_exclusive, wake_flags, key); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4343 | spin_unlock_irqrestore(&q->lock, flags); |
| 4344 | } |
Davide Libenzi | 4ede816 | 2009-03-31 15:24:20 -0700 | [diff] [blame] | 4345 | EXPORT_SYMBOL_GPL(__wake_up_sync_key); |
| 4346 | |
| 4347 | /* |
| 4348 | * __wake_up_sync - see __wake_up_sync_key() |
| 4349 | */ |
| 4350 | void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) |
| 4351 | { |
| 4352 | __wake_up_sync_key(q, mode, nr_exclusive, NULL); |
| 4353 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4354 | EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */ |
| 4355 | |
Kevin Diggs | 65eb3dc | 2008-08-26 10:26:54 +0200 | [diff] [blame] | 4356 | /** |
| 4357 | * complete: - signals a single thread waiting on this completion |
| 4358 | * @x: holds the state of this particular completion |
| 4359 | * |
| 4360 | * This will wake up a single thread waiting on this completion. Threads will be |
| 4361 | * awakened in the same order in which they were queued. |
| 4362 | * |
| 4363 | * See also complete_all(), wait_for_completion() and related routines. |
David Howells | 50fa610 | 2009-04-28 15:01:38 +0100 | [diff] [blame] | 4364 | * |
| 4365 | * It may be assumed that this function implies a write memory barrier before |
| 4366 | * changing the task state if and only if any tasks are woken up. |
Kevin Diggs | 65eb3dc | 2008-08-26 10:26:54 +0200 | [diff] [blame] | 4367 | */ |
Ingo Molnar | b15136e | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 4368 | void complete(struct completion *x) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4369 | { |
| 4370 | unsigned long flags; |
| 4371 | |
| 4372 | spin_lock_irqsave(&x->wait.lock, flags); |
| 4373 | x->done++; |
Matthew Wilcox | d9514f6 | 2007-12-06 11:07:07 -0500 | [diff] [blame] | 4374 | __wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4375 | spin_unlock_irqrestore(&x->wait.lock, flags); |
| 4376 | } |
| 4377 | EXPORT_SYMBOL(complete); |
| 4378 | |
Kevin Diggs | 65eb3dc | 2008-08-26 10:26:54 +0200 | [diff] [blame] | 4379 | /** |
| 4380 | * complete_all: - signals all threads waiting on this completion |
| 4381 | * @x: holds the state of this particular completion |
| 4382 | * |
| 4383 | * This will wake up all threads waiting on this particular completion event. |
David Howells | 50fa610 | 2009-04-28 15:01:38 +0100 | [diff] [blame] | 4384 | * |
| 4385 | * It may be assumed that this function implies a write memory barrier before |
| 4386 | * changing the task state if and only if any tasks are woken up. |
Kevin Diggs | 65eb3dc | 2008-08-26 10:26:54 +0200 | [diff] [blame] | 4387 | */ |
Ingo Molnar | b15136e | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 4388 | void complete_all(struct completion *x) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4389 | { |
| 4390 | unsigned long flags; |
| 4391 | |
| 4392 | spin_lock_irqsave(&x->wait.lock, flags); |
| 4393 | x->done += UINT_MAX/2; |
Matthew Wilcox | d9514f6 | 2007-12-06 11:07:07 -0500 | [diff] [blame] | 4394 | __wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4395 | spin_unlock_irqrestore(&x->wait.lock, flags); |
| 4396 | } |
| 4397 | EXPORT_SYMBOL(complete_all); |
| 4398 | |
Andi Kleen | 8cbbe86 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 4399 | static inline long __sched |
| 4400 | do_wait_for_common(struct completion *x, long timeout, int state) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4401 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4402 | if (!x->done) { |
| 4403 | DECLARE_WAITQUEUE(wait, current); |
| 4404 | |
Changli Gao | a93d2f17 | 2010-05-07 14:33:26 +0800 | [diff] [blame] | 4405 | __add_wait_queue_tail_exclusive(&x->wait, &wait); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4406 | do { |
Oleg Nesterov | 94d3d82 | 2008-08-20 16:54:41 -0700 | [diff] [blame] | 4407 | if (signal_pending_state(state, current)) { |
Oleg Nesterov | ea71a54 | 2008-06-20 18:32:20 +0400 | [diff] [blame] | 4408 | timeout = -ERESTARTSYS; |
| 4409 | break; |
Andi Kleen | 8cbbe86 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 4410 | } |
| 4411 | __set_current_state(state); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4412 | spin_unlock_irq(&x->wait.lock); |
Andi Kleen | 8cbbe86 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 4413 | timeout = schedule_timeout(timeout); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4414 | spin_lock_irq(&x->wait.lock); |
Oleg Nesterov | ea71a54 | 2008-06-20 18:32:20 +0400 | [diff] [blame] | 4415 | } while (!x->done && timeout); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4416 | __remove_wait_queue(&x->wait, &wait); |
Oleg Nesterov | ea71a54 | 2008-06-20 18:32:20 +0400 | [diff] [blame] | 4417 | if (!x->done) |
| 4418 | return timeout; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4419 | } |
| 4420 | x->done--; |
Oleg Nesterov | ea71a54 | 2008-06-20 18:32:20 +0400 | [diff] [blame] | 4421 | return timeout ?: 1; |
Andi Kleen | 8cbbe86 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 4422 | } |
| 4423 | |
| 4424 | static long __sched |
| 4425 | wait_for_common(struct completion *x, long timeout, int state) |
| 4426 | { |
| 4427 | might_sleep(); |
| 4428 | |
| 4429 | spin_lock_irq(&x->wait.lock); |
| 4430 | timeout = do_wait_for_common(x, timeout, state); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4431 | spin_unlock_irq(&x->wait.lock); |
Andi Kleen | 8cbbe86 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 4432 | return timeout; |
| 4433 | } |
| 4434 | |
Kevin Diggs | 65eb3dc | 2008-08-26 10:26:54 +0200 | [diff] [blame] | 4435 | /** |
| 4436 | * wait_for_completion: - waits for completion of a task |
| 4437 | * @x: holds the state of this particular completion |
| 4438 | * |
| 4439 | * This waits to be signaled for completion of a specific task. It is NOT |
| 4440 | * interruptible and there is no timeout. |
| 4441 | * |
| 4442 | * See also similar routines (i.e. wait_for_completion_timeout()) with timeout |
| 4443 | * and interrupt capability. Also see complete(). |
| 4444 | */ |
Ingo Molnar | b15136e | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 4445 | void __sched wait_for_completion(struct completion *x) |
Andi Kleen | 8cbbe86 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 4446 | { |
| 4447 | wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4448 | } |
| 4449 | EXPORT_SYMBOL(wait_for_completion); |
| 4450 | |
Kevin Diggs | 65eb3dc | 2008-08-26 10:26:54 +0200 | [diff] [blame] | 4451 | /** |
| 4452 | * wait_for_completion_timeout: - waits for completion of a task (w/timeout) |
| 4453 | * @x: holds the state of this particular completion |
| 4454 | * @timeout: timeout value in jiffies |
| 4455 | * |
| 4456 | * This waits for either a completion of a specific task to be signaled or for a |
| 4457 | * specified timeout to expire. The timeout is in jiffies. It is not |
| 4458 | * interruptible. |
| 4459 | */ |
Ingo Molnar | b15136e | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 4460 | unsigned long __sched |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4461 | wait_for_completion_timeout(struct completion *x, unsigned long timeout) |
| 4462 | { |
Andi Kleen | 8cbbe86 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 4463 | return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4464 | } |
| 4465 | EXPORT_SYMBOL(wait_for_completion_timeout); |
| 4466 | |
Kevin Diggs | 65eb3dc | 2008-08-26 10:26:54 +0200 | [diff] [blame] | 4467 | /** |
| 4468 | * wait_for_completion_interruptible: - waits for completion of a task (w/intr) |
| 4469 | * @x: holds the state of this particular completion |
| 4470 | * |
| 4471 | * This waits for completion of a specific task to be signaled. It is |
| 4472 | * interruptible. |
| 4473 | */ |
Andi Kleen | 8cbbe86 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 4474 | int __sched wait_for_completion_interruptible(struct completion *x) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4475 | { |
Andi Kleen | 51e9799 | 2007-10-18 21:32:55 +0200 | [diff] [blame] | 4476 | long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE); |
| 4477 | if (t == -ERESTARTSYS) |
| 4478 | return t; |
| 4479 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4480 | } |
| 4481 | EXPORT_SYMBOL(wait_for_completion_interruptible); |
| 4482 | |
Kevin Diggs | 65eb3dc | 2008-08-26 10:26:54 +0200 | [diff] [blame] | 4483 | /** |
| 4484 | * wait_for_completion_interruptible_timeout: - waits for completion (w/(to,intr)) |
| 4485 | * @x: holds the state of this particular completion |
| 4486 | * @timeout: timeout value in jiffies |
| 4487 | * |
| 4488 | * This waits for either a completion of a specific task to be signaled or for a |
| 4489 | * specified timeout to expire. It is interruptible. The timeout is in jiffies. |
| 4490 | */ |
Ingo Molnar | b15136e | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 4491 | unsigned long __sched |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4492 | wait_for_completion_interruptible_timeout(struct completion *x, |
| 4493 | unsigned long timeout) |
| 4494 | { |
Andi Kleen | 8cbbe86 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 4495 | return wait_for_common(x, timeout, TASK_INTERRUPTIBLE); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4496 | } |
| 4497 | EXPORT_SYMBOL(wait_for_completion_interruptible_timeout); |
| 4498 | |
Kevin Diggs | 65eb3dc | 2008-08-26 10:26:54 +0200 | [diff] [blame] | 4499 | /** |
| 4500 | * wait_for_completion_killable: - waits for completion of a task (killable) |
| 4501 | * @x: holds the state of this particular completion |
| 4502 | * |
| 4503 | * This waits to be signaled for completion of a specific task. It can be |
| 4504 | * interrupted by a kill signal. |
| 4505 | */ |
Matthew Wilcox | 009e577 | 2007-12-06 12:29:54 -0500 | [diff] [blame] | 4506 | int __sched wait_for_completion_killable(struct completion *x) |
| 4507 | { |
| 4508 | long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE); |
| 4509 | if (t == -ERESTARTSYS) |
| 4510 | return t; |
| 4511 | return 0; |
| 4512 | } |
| 4513 | EXPORT_SYMBOL(wait_for_completion_killable); |
| 4514 | |
Dave Chinner | be4de35 | 2008-08-15 00:40:44 -0700 | [diff] [blame] | 4515 | /** |
Sage Weil | 0aa12fb | 2010-05-29 09:12:30 -0700 | [diff] [blame] | 4516 | * wait_for_completion_killable_timeout: - waits for completion of a task (w/(to,killable)) |
| 4517 | * @x: holds the state of this particular completion |
| 4518 | * @timeout: timeout value in jiffies |
| 4519 | * |
| 4520 | * This waits for either a completion of a specific task to be |
| 4521 | * signaled or for a specified timeout to expire. It can be |
| 4522 | * interrupted by a kill signal. The timeout is in jiffies. |
| 4523 | */ |
| 4524 | unsigned long __sched |
| 4525 | wait_for_completion_killable_timeout(struct completion *x, |
| 4526 | unsigned long timeout) |
| 4527 | { |
| 4528 | return wait_for_common(x, timeout, TASK_KILLABLE); |
| 4529 | } |
| 4530 | EXPORT_SYMBOL(wait_for_completion_killable_timeout); |
| 4531 | |
| 4532 | /** |
Dave Chinner | be4de35 | 2008-08-15 00:40:44 -0700 | [diff] [blame] | 4533 | * try_wait_for_completion - try to decrement a completion without blocking |
| 4534 | * @x: completion structure |
| 4535 | * |
| 4536 | * Returns: 0 if a decrement cannot be done without blocking |
| 4537 | * 1 if a decrement succeeded. |
| 4538 | * |
| 4539 | * If a completion is being used as a counting completion, |
| 4540 | * attempt to decrement the counter without blocking. This |
| 4541 | * enables us to avoid waiting if the resource the completion |
| 4542 | * is protecting is not available. |
| 4543 | */ |
| 4544 | bool try_wait_for_completion(struct completion *x) |
| 4545 | { |
Rafael J. Wysocki | 7539a3b | 2009-12-13 00:07:30 +0100 | [diff] [blame] | 4546 | unsigned long flags; |
Dave Chinner | be4de35 | 2008-08-15 00:40:44 -0700 | [diff] [blame] | 4547 | int ret = 1; |
| 4548 | |
Rafael J. Wysocki | 7539a3b | 2009-12-13 00:07:30 +0100 | [diff] [blame] | 4549 | spin_lock_irqsave(&x->wait.lock, flags); |
Dave Chinner | be4de35 | 2008-08-15 00:40:44 -0700 | [diff] [blame] | 4550 | if (!x->done) |
| 4551 | ret = 0; |
| 4552 | else |
| 4553 | x->done--; |
Rafael J. Wysocki | 7539a3b | 2009-12-13 00:07:30 +0100 | [diff] [blame] | 4554 | spin_unlock_irqrestore(&x->wait.lock, flags); |
Dave Chinner | be4de35 | 2008-08-15 00:40:44 -0700 | [diff] [blame] | 4555 | return ret; |
| 4556 | } |
| 4557 | EXPORT_SYMBOL(try_wait_for_completion); |
| 4558 | |
| 4559 | /** |
| 4560 | * completion_done - Test to see if a completion has any waiters |
| 4561 | * @x: completion structure |
| 4562 | * |
| 4563 | * Returns: 0 if there are waiters (wait_for_completion() in progress) |
| 4564 | * 1 if there are no waiters. |
| 4565 | * |
| 4566 | */ |
| 4567 | bool completion_done(struct completion *x) |
| 4568 | { |
Rafael J. Wysocki | 7539a3b | 2009-12-13 00:07:30 +0100 | [diff] [blame] | 4569 | unsigned long flags; |
Dave Chinner | be4de35 | 2008-08-15 00:40:44 -0700 | [diff] [blame] | 4570 | int ret = 1; |
| 4571 | |
Rafael J. Wysocki | 7539a3b | 2009-12-13 00:07:30 +0100 | [diff] [blame] | 4572 | spin_lock_irqsave(&x->wait.lock, flags); |
Dave Chinner | be4de35 | 2008-08-15 00:40:44 -0700 | [diff] [blame] | 4573 | if (!x->done) |
| 4574 | ret = 0; |
Rafael J. Wysocki | 7539a3b | 2009-12-13 00:07:30 +0100 | [diff] [blame] | 4575 | spin_unlock_irqrestore(&x->wait.lock, flags); |
Dave Chinner | be4de35 | 2008-08-15 00:40:44 -0700 | [diff] [blame] | 4576 | return ret; |
| 4577 | } |
| 4578 | EXPORT_SYMBOL(completion_done); |
| 4579 | |
Andi Kleen | 8cbbe86 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 4580 | static long __sched |
| 4581 | sleep_on_common(wait_queue_head_t *q, int state, long timeout) |
Ingo Molnar | 0fec171 | 2007-07-09 18:52:01 +0200 | [diff] [blame] | 4582 | { |
| 4583 | unsigned long flags; |
| 4584 | wait_queue_t wait; |
| 4585 | |
| 4586 | init_waitqueue_entry(&wait, current); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4587 | |
Andi Kleen | 8cbbe86 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 4588 | __set_current_state(state); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4589 | |
Andi Kleen | 8cbbe86 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 4590 | spin_lock_irqsave(&q->lock, flags); |
| 4591 | __add_wait_queue(q, &wait); |
| 4592 | spin_unlock(&q->lock); |
| 4593 | timeout = schedule_timeout(timeout); |
| 4594 | spin_lock_irq(&q->lock); |
| 4595 | __remove_wait_queue(q, &wait); |
| 4596 | spin_unlock_irqrestore(&q->lock, flags); |
| 4597 | |
| 4598 | return timeout; |
| 4599 | } |
| 4600 | |
| 4601 | void __sched interruptible_sleep_on(wait_queue_head_t *q) |
| 4602 | { |
| 4603 | sleep_on_common(q, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4604 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4605 | EXPORT_SYMBOL(interruptible_sleep_on); |
| 4606 | |
Ingo Molnar | 0fec171 | 2007-07-09 18:52:01 +0200 | [diff] [blame] | 4607 | long __sched |
Ingo Molnar | 95cdf3b | 2005-09-10 00:26:11 -0700 | [diff] [blame] | 4608 | interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4609 | { |
Andi Kleen | 8cbbe86 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 4610 | return sleep_on_common(q, TASK_INTERRUPTIBLE, timeout); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4611 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4612 | EXPORT_SYMBOL(interruptible_sleep_on_timeout); |
| 4613 | |
Ingo Molnar | 0fec171 | 2007-07-09 18:52:01 +0200 | [diff] [blame] | 4614 | void __sched sleep_on(wait_queue_head_t *q) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4615 | { |
Andi Kleen | 8cbbe86 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 4616 | sleep_on_common(q, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4617 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4618 | EXPORT_SYMBOL(sleep_on); |
| 4619 | |
Ingo Molnar | 0fec171 | 2007-07-09 18:52:01 +0200 | [diff] [blame] | 4620 | long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4621 | { |
Andi Kleen | 8cbbe86 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 4622 | return sleep_on_common(q, TASK_UNINTERRUPTIBLE, timeout); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4623 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4624 | EXPORT_SYMBOL(sleep_on_timeout); |
| 4625 | |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 4626 | #ifdef CONFIG_RT_MUTEXES |
| 4627 | |
| 4628 | /* |
| 4629 | * rt_mutex_setprio - set the current priority of a task |
| 4630 | * @p: task |
| 4631 | * @prio: prio value (kernel-internal form) |
| 4632 | * |
| 4633 | * This function changes the 'effective' priority of a task. It does |
| 4634 | * not touch ->normal_prio like __setscheduler(). |
| 4635 | * |
| 4636 | * Used by the rt_mutex code to implement priority inheritance logic. |
| 4637 | */ |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 4638 | void rt_mutex_setprio(struct task_struct *p, int prio) |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 4639 | { |
| 4640 | unsigned long flags; |
Srivatsa Vaddagiri | 83b699e | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 4641 | int oldprio, on_rq, running; |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 4642 | struct rq *rq; |
Thomas Gleixner | 83ab0aa | 2010-02-17 09:05:48 +0100 | [diff] [blame] | 4643 | const struct sched_class *prev_class; |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 4644 | |
| 4645 | BUG_ON(prio < 0 || prio > MAX_PRIO); |
| 4646 | |
| 4647 | rq = task_rq_lock(p, &flags); |
| 4648 | |
Steven Rostedt | a802707 | 2010-09-20 15:13:34 -0400 | [diff] [blame] | 4649 | trace_sched_pi_setprio(p, prio); |
Andrew Morton | d5f9f94 | 2007-05-08 20:27:06 -0700 | [diff] [blame] | 4650 | oldprio = p->prio; |
Thomas Gleixner | 83ab0aa | 2010-02-17 09:05:48 +0100 | [diff] [blame] | 4651 | prev_class = p->sched_class; |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4652 | on_rq = p->se.on_rq; |
Dmitry Adamushko | 051a1d1 | 2007-12-18 15:21:13 +0100 | [diff] [blame] | 4653 | running = task_current(rq, p); |
Hiroshi Shimamoto | 0e1f348 | 2008-03-10 11:01:20 -0700 | [diff] [blame] | 4654 | if (on_rq) |
Ingo Molnar | 69be72c | 2007-08-09 11:16:49 +0200 | [diff] [blame] | 4655 | dequeue_task(rq, p, 0); |
Hiroshi Shimamoto | 0e1f348 | 2008-03-10 11:01:20 -0700 | [diff] [blame] | 4656 | if (running) |
| 4657 | p->sched_class->put_prev_task(rq, p); |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4658 | |
| 4659 | if (rt_prio(prio)) |
| 4660 | p->sched_class = &rt_sched_class; |
| 4661 | else |
| 4662 | p->sched_class = &fair_sched_class; |
| 4663 | |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 4664 | p->prio = prio; |
| 4665 | |
Hiroshi Shimamoto | 0e1f348 | 2008-03-10 11:01:20 -0700 | [diff] [blame] | 4666 | if (running) |
| 4667 | p->sched_class->set_curr_task(rq); |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4668 | if (on_rq) { |
Peter Zijlstra | 371fd7e | 2010-03-24 16:38:48 +0100 | [diff] [blame] | 4669 | enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0); |
Steven Rostedt | cb46984 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 4670 | |
| 4671 | check_class_changed(rq, p, prev_class, oldprio, running); |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 4672 | } |
| 4673 | task_rq_unlock(rq, &flags); |
| 4674 | } |
| 4675 | |
| 4676 | #endif |
| 4677 | |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 4678 | void set_user_nice(struct task_struct *p, long nice) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4679 | { |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4680 | int old_prio, delta, on_rq; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4681 | unsigned long flags; |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 4682 | struct rq *rq; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4683 | |
| 4684 | if (TASK_NICE(p) == nice || nice < -20 || nice > 19) |
| 4685 | return; |
| 4686 | /* |
| 4687 | * We have to be careful, if called from sys_setpriority(), |
| 4688 | * the task might be in the middle of scheduling on another CPU. |
| 4689 | */ |
| 4690 | rq = task_rq_lock(p, &flags); |
| 4691 | /* |
| 4692 | * The RT priorities are set via sched_setscheduler(), but we still |
| 4693 | * allow the 'normal' nice value to be set - but as expected |
| 4694 | * it wont have any effect on scheduling until the task is |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4695 | * SCHED_FIFO/SCHED_RR: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4696 | */ |
Ingo Molnar | e05606d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4697 | if (task_has_rt_policy(p)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4698 | p->static_prio = NICE_TO_PRIO(nice); |
| 4699 | goto out_unlock; |
| 4700 | } |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4701 | on_rq = p->se.on_rq; |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 4702 | if (on_rq) |
Ingo Molnar | 69be72c | 2007-08-09 11:16:49 +0200 | [diff] [blame] | 4703 | dequeue_task(rq, p, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4704 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4705 | p->static_prio = NICE_TO_PRIO(nice); |
Peter Williams | 2dd73a4 | 2006-06-27 02:54:34 -0700 | [diff] [blame] | 4706 | set_load_weight(p); |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 4707 | old_prio = p->prio; |
| 4708 | p->prio = effective_prio(p); |
| 4709 | delta = p->prio - old_prio; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4710 | |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4711 | if (on_rq) { |
Peter Zijlstra | 371fd7e | 2010-03-24 16:38:48 +0100 | [diff] [blame] | 4712 | enqueue_task(rq, p, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4713 | /* |
Andrew Morton | d5f9f94 | 2007-05-08 20:27:06 -0700 | [diff] [blame] | 4714 | * If the task increased its priority or is running and |
| 4715 | * lowered its priority, then reschedule its CPU: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4716 | */ |
Andrew Morton | d5f9f94 | 2007-05-08 20:27:06 -0700 | [diff] [blame] | 4717 | if (delta < 0 || (delta > 0 && task_running(rq, p))) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4718 | resched_task(rq->curr); |
| 4719 | } |
| 4720 | out_unlock: |
| 4721 | task_rq_unlock(rq, &flags); |
| 4722 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4723 | EXPORT_SYMBOL(set_user_nice); |
| 4724 | |
Matt Mackall | e43379f | 2005-05-01 08:59:00 -0700 | [diff] [blame] | 4725 | /* |
| 4726 | * can_nice - check if a task can reduce its nice value |
| 4727 | * @p: task |
| 4728 | * @nice: nice value |
| 4729 | */ |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 4730 | int can_nice(const struct task_struct *p, const int nice) |
Matt Mackall | e43379f | 2005-05-01 08:59:00 -0700 | [diff] [blame] | 4731 | { |
Matt Mackall | 024f474 | 2005-08-18 11:24:19 -0700 | [diff] [blame] | 4732 | /* convert nice value [19,-20] to rlimit style value [1,40] */ |
| 4733 | int nice_rlim = 20 - nice; |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 4734 | |
Jiri Slaby | 78d7d40 | 2010-03-05 13:42:54 -0800 | [diff] [blame] | 4735 | return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) || |
Matt Mackall | e43379f | 2005-05-01 08:59:00 -0700 | [diff] [blame] | 4736 | capable(CAP_SYS_NICE)); |
| 4737 | } |
| 4738 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4739 | #ifdef __ARCH_WANT_SYS_NICE |
| 4740 | |
| 4741 | /* |
| 4742 | * sys_nice - change the priority of the current process. |
| 4743 | * @increment: priority increment |
| 4744 | * |
| 4745 | * sys_setpriority is a more generic, but much slower function that |
| 4746 | * does similar things. |
| 4747 | */ |
Heiko Carstens | 5add95d | 2009-01-14 14:14:08 +0100 | [diff] [blame] | 4748 | SYSCALL_DEFINE1(nice, int, increment) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4749 | { |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 4750 | long nice, retval; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4751 | |
| 4752 | /* |
| 4753 | * Setpriority might change our priority at the same moment. |
| 4754 | * We don't have to worry. Conceptually one call occurs first |
| 4755 | * and we have a single winner. |
| 4756 | */ |
Matt Mackall | e43379f | 2005-05-01 08:59:00 -0700 | [diff] [blame] | 4757 | if (increment < -40) |
| 4758 | increment = -40; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4759 | if (increment > 40) |
| 4760 | increment = 40; |
| 4761 | |
Américo Wang | 2b8f836 | 2009-02-16 18:54:21 +0800 | [diff] [blame] | 4762 | nice = TASK_NICE(current) + increment; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4763 | if (nice < -20) |
| 4764 | nice = -20; |
| 4765 | if (nice > 19) |
| 4766 | nice = 19; |
| 4767 | |
Matt Mackall | e43379f | 2005-05-01 08:59:00 -0700 | [diff] [blame] | 4768 | if (increment < 0 && !can_nice(current, nice)) |
| 4769 | return -EPERM; |
| 4770 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4771 | retval = security_task_setnice(current, nice); |
| 4772 | if (retval) |
| 4773 | return retval; |
| 4774 | |
| 4775 | set_user_nice(current, nice); |
| 4776 | return 0; |
| 4777 | } |
| 4778 | |
| 4779 | #endif |
| 4780 | |
| 4781 | /** |
| 4782 | * task_prio - return the priority value of a given task. |
| 4783 | * @p: the task in question. |
| 4784 | * |
| 4785 | * This is the priority value as seen by users in /proc. |
| 4786 | * RT tasks are offset by -200. Normal tasks are centered |
| 4787 | * around 0, value goes from -16 to +15. |
| 4788 | */ |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 4789 | int task_prio(const struct task_struct *p) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4790 | { |
| 4791 | return p->prio - MAX_RT_PRIO; |
| 4792 | } |
| 4793 | |
| 4794 | /** |
| 4795 | * task_nice - return the nice value of a given task. |
| 4796 | * @p: the task in question. |
| 4797 | */ |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 4798 | int task_nice(const struct task_struct *p) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4799 | { |
| 4800 | return TASK_NICE(p); |
| 4801 | } |
Pavel Roskin | 150d8be | 2008-03-05 16:56:37 -0500 | [diff] [blame] | 4802 | EXPORT_SYMBOL(task_nice); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4803 | |
| 4804 | /** |
| 4805 | * idle_cpu - is a given cpu idle currently? |
| 4806 | * @cpu: the processor in question. |
| 4807 | */ |
| 4808 | int idle_cpu(int cpu) |
| 4809 | { |
| 4810 | return cpu_curr(cpu) == cpu_rq(cpu)->idle; |
| 4811 | } |
| 4812 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4813 | /** |
| 4814 | * idle_task - return the idle task for a given cpu. |
| 4815 | * @cpu: the processor in question. |
| 4816 | */ |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 4817 | struct task_struct *idle_task(int cpu) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4818 | { |
| 4819 | return cpu_rq(cpu)->idle; |
| 4820 | } |
| 4821 | |
| 4822 | /** |
| 4823 | * find_process_by_pid - find a process with a matching PID value. |
| 4824 | * @pid: the pid in question. |
| 4825 | */ |
Alexey Dobriyan | a995744 | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 4826 | static struct task_struct *find_process_by_pid(pid_t pid) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4827 | { |
Pavel Emelyanov | 228ebcb | 2007-10-18 23:40:16 -0700 | [diff] [blame] | 4828 | return pid ? find_task_by_vpid(pid) : current; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4829 | } |
| 4830 | |
| 4831 | /* Actually do priority change: must hold rq lock. */ |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4832 | static void |
| 4833 | __setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4834 | { |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4835 | BUG_ON(p->se.on_rq); |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 4836 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4837 | p->policy = policy; |
| 4838 | p->rt_priority = prio; |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 4839 | p->normal_prio = normal_prio(p); |
| 4840 | /* we are holding p->pi_lock already */ |
| 4841 | p->prio = rt_mutex_getprio(p); |
Peter Zijlstra | ffd44db | 2009-11-10 20:12:01 +0100 | [diff] [blame] | 4842 | if (rt_prio(p->prio)) |
| 4843 | p->sched_class = &rt_sched_class; |
| 4844 | else |
| 4845 | p->sched_class = &fair_sched_class; |
Peter Williams | 2dd73a4 | 2006-06-27 02:54:34 -0700 | [diff] [blame] | 4846 | set_load_weight(p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4847 | } |
| 4848 | |
David Howells | c69e8d9 | 2008-11-14 10:39:19 +1100 | [diff] [blame] | 4849 | /* |
| 4850 | * check the target process has a UID that matches the current process's |
| 4851 | */ |
| 4852 | static bool check_same_owner(struct task_struct *p) |
| 4853 | { |
| 4854 | const struct cred *cred = current_cred(), *pcred; |
| 4855 | bool match; |
| 4856 | |
| 4857 | rcu_read_lock(); |
| 4858 | pcred = __task_cred(p); |
| 4859 | match = (cred->euid == pcred->euid || |
| 4860 | cred->euid == pcred->uid); |
| 4861 | rcu_read_unlock(); |
| 4862 | return match; |
| 4863 | } |
| 4864 | |
Rusty Russell | 961ccdd | 2008-06-23 13:55:38 +1000 | [diff] [blame] | 4865 | static int __sched_setscheduler(struct task_struct *p, int policy, |
| 4866 | struct sched_param *param, bool user) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4867 | { |
Srivatsa Vaddagiri | 83b699e | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 4868 | int retval, oldprio, oldpolicy = -1, on_rq, running; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4869 | unsigned long flags; |
Thomas Gleixner | 83ab0aa | 2010-02-17 09:05:48 +0100 | [diff] [blame] | 4870 | const struct sched_class *prev_class; |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 4871 | struct rq *rq; |
Lennart Poettering | ca94c44 | 2009-06-15 17:17:47 +0200 | [diff] [blame] | 4872 | int reset_on_fork; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4873 | |
Steven Rostedt | 66e5393 | 2006-06-27 02:54:44 -0700 | [diff] [blame] | 4874 | /* may grab non-irq protected spin_locks */ |
| 4875 | BUG_ON(in_interrupt()); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4876 | recheck: |
| 4877 | /* double check policy once rq lock held */ |
Lennart Poettering | ca94c44 | 2009-06-15 17:17:47 +0200 | [diff] [blame] | 4878 | if (policy < 0) { |
| 4879 | reset_on_fork = p->sched_reset_on_fork; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4880 | policy = oldpolicy = p->policy; |
Lennart Poettering | ca94c44 | 2009-06-15 17:17:47 +0200 | [diff] [blame] | 4881 | } else { |
| 4882 | reset_on_fork = !!(policy & SCHED_RESET_ON_FORK); |
| 4883 | policy &= ~SCHED_RESET_ON_FORK; |
| 4884 | |
| 4885 | if (policy != SCHED_FIFO && policy != SCHED_RR && |
| 4886 | policy != SCHED_NORMAL && policy != SCHED_BATCH && |
| 4887 | policy != SCHED_IDLE) |
| 4888 | return -EINVAL; |
| 4889 | } |
| 4890 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4891 | /* |
| 4892 | * Valid priorities for SCHED_FIFO and SCHED_RR are |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4893 | * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL, |
| 4894 | * SCHED_BATCH and SCHED_IDLE is 0. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4895 | */ |
| 4896 | if (param->sched_priority < 0 || |
Ingo Molnar | 95cdf3b | 2005-09-10 00:26:11 -0700 | [diff] [blame] | 4897 | (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) || |
Steven Rostedt | d46523e | 2005-07-25 16:28:39 -0400 | [diff] [blame] | 4898 | (!p->mm && param->sched_priority > MAX_RT_PRIO-1)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4899 | return -EINVAL; |
Ingo Molnar | e05606d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4900 | if (rt_policy(policy) != (param->sched_priority != 0)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4901 | return -EINVAL; |
| 4902 | |
Olivier Croquette | 37e4ab3 | 2005-06-25 14:57:32 -0700 | [diff] [blame] | 4903 | /* |
| 4904 | * Allow unprivileged RT tasks to decrease priority: |
| 4905 | */ |
Rusty Russell | 961ccdd | 2008-06-23 13:55:38 +1000 | [diff] [blame] | 4906 | if (user && !capable(CAP_SYS_NICE)) { |
Ingo Molnar | e05606d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4907 | if (rt_policy(policy)) { |
Oleg Nesterov | a44702e8 | 2010-06-11 01:09:44 +0200 | [diff] [blame] | 4908 | unsigned long rlim_rtprio = |
| 4909 | task_rlimit(p, RLIMIT_RTPRIO); |
Oleg Nesterov | 5fe1d75 | 2006-09-29 02:00:48 -0700 | [diff] [blame] | 4910 | |
Oleg Nesterov | 8dc3e90 | 2006-09-29 02:00:50 -0700 | [diff] [blame] | 4911 | /* can't set/change the rt policy */ |
| 4912 | if (policy != p->policy && !rlim_rtprio) |
| 4913 | return -EPERM; |
| 4914 | |
| 4915 | /* can't increase priority */ |
| 4916 | if (param->sched_priority > p->rt_priority && |
| 4917 | param->sched_priority > rlim_rtprio) |
| 4918 | return -EPERM; |
| 4919 | } |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4920 | /* |
| 4921 | * Like positive nice levels, dont allow tasks to |
| 4922 | * move out of SCHED_IDLE either: |
| 4923 | */ |
| 4924 | if (p->policy == SCHED_IDLE && policy != SCHED_IDLE) |
| 4925 | return -EPERM; |
Oleg Nesterov | 8dc3e90 | 2006-09-29 02:00:50 -0700 | [diff] [blame] | 4926 | |
Olivier Croquette | 37e4ab3 | 2005-06-25 14:57:32 -0700 | [diff] [blame] | 4927 | /* can't change other user's priorities */ |
David Howells | c69e8d9 | 2008-11-14 10:39:19 +1100 | [diff] [blame] | 4928 | if (!check_same_owner(p)) |
Olivier Croquette | 37e4ab3 | 2005-06-25 14:57:32 -0700 | [diff] [blame] | 4929 | return -EPERM; |
Lennart Poettering | ca94c44 | 2009-06-15 17:17:47 +0200 | [diff] [blame] | 4930 | |
| 4931 | /* Normal users shall not reset the sched_reset_on_fork flag */ |
| 4932 | if (p->sched_reset_on_fork && !reset_on_fork) |
| 4933 | return -EPERM; |
Olivier Croquette | 37e4ab3 | 2005-06-25 14:57:32 -0700 | [diff] [blame] | 4934 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4935 | |
Jeremy Fitzhardinge | 725aad2 | 2008-08-03 09:33:03 -0700 | [diff] [blame] | 4936 | if (user) { |
KOSAKI Motohiro | b0ae198 | 2010-10-15 04:21:18 +0900 | [diff] [blame] | 4937 | retval = security_task_setscheduler(p); |
Jeremy Fitzhardinge | 725aad2 | 2008-08-03 09:33:03 -0700 | [diff] [blame] | 4938 | if (retval) |
| 4939 | return retval; |
| 4940 | } |
| 4941 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4942 | /* |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 4943 | * make sure no PI-waiters arrive (or leave) while we are |
| 4944 | * changing the priority of the task: |
| 4945 | */ |
Thomas Gleixner | 1d61548 | 2009-11-17 14:54:03 +0100 | [diff] [blame] | 4946 | raw_spin_lock_irqsave(&p->pi_lock, flags); |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 4947 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4948 | * To be able to change p->policy safely, the apropriate |
| 4949 | * runqueue lock must be held. |
| 4950 | */ |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 4951 | rq = __task_rq_lock(p); |
Peter Zijlstra | dc61b1d | 2010-06-08 11:40:42 +0200 | [diff] [blame] | 4952 | |
Peter Zijlstra | 34f971f | 2010-09-22 13:53:15 +0200 | [diff] [blame] | 4953 | /* |
| 4954 | * Changing the policy of the stop threads its a very bad idea |
| 4955 | */ |
| 4956 | if (p == rq->stop) { |
| 4957 | __task_rq_unlock(rq); |
| 4958 | raw_spin_unlock_irqrestore(&p->pi_lock, flags); |
| 4959 | return -EINVAL; |
| 4960 | } |
| 4961 | |
Peter Zijlstra | dc61b1d | 2010-06-08 11:40:42 +0200 | [diff] [blame] | 4962 | #ifdef CONFIG_RT_GROUP_SCHED |
| 4963 | if (user) { |
| 4964 | /* |
| 4965 | * Do not allow realtime tasks into groups that have no runtime |
| 4966 | * assigned. |
| 4967 | */ |
| 4968 | if (rt_bandwidth_enabled() && rt_policy(policy) && |
| 4969 | task_group(p)->rt_bandwidth.rt_runtime == 0) { |
| 4970 | __task_rq_unlock(rq); |
| 4971 | raw_spin_unlock_irqrestore(&p->pi_lock, flags); |
| 4972 | return -EPERM; |
| 4973 | } |
| 4974 | } |
| 4975 | #endif |
| 4976 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4977 | /* recheck policy now with rq lock held */ |
| 4978 | if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { |
| 4979 | policy = oldpolicy = -1; |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 4980 | __task_rq_unlock(rq); |
Thomas Gleixner | 1d61548 | 2009-11-17 14:54:03 +0100 | [diff] [blame] | 4981 | raw_spin_unlock_irqrestore(&p->pi_lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4982 | goto recheck; |
| 4983 | } |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4984 | on_rq = p->se.on_rq; |
Dmitry Adamushko | 051a1d1 | 2007-12-18 15:21:13 +0100 | [diff] [blame] | 4985 | running = task_current(rq, p); |
Hiroshi Shimamoto | 0e1f348 | 2008-03-10 11:01:20 -0700 | [diff] [blame] | 4986 | if (on_rq) |
Ingo Molnar | 2e1cb74 | 2007-08-09 11:16:49 +0200 | [diff] [blame] | 4987 | deactivate_task(rq, p, 0); |
Hiroshi Shimamoto | 0e1f348 | 2008-03-10 11:01:20 -0700 | [diff] [blame] | 4988 | if (running) |
| 4989 | p->sched_class->put_prev_task(rq, p); |
Dmitry Adamushko | f6b53205 | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 4990 | |
Lennart Poettering | ca94c44 | 2009-06-15 17:17:47 +0200 | [diff] [blame] | 4991 | p->sched_reset_on_fork = reset_on_fork; |
| 4992 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4993 | oldprio = p->prio; |
Thomas Gleixner | 83ab0aa | 2010-02-17 09:05:48 +0100 | [diff] [blame] | 4994 | prev_class = p->sched_class; |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4995 | __setscheduler(rq, p, policy, param->sched_priority); |
Dmitry Adamushko | f6b53205 | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 4996 | |
Hiroshi Shimamoto | 0e1f348 | 2008-03-10 11:01:20 -0700 | [diff] [blame] | 4997 | if (running) |
| 4998 | p->sched_class->set_curr_task(rq); |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4999 | if (on_rq) { |
| 5000 | activate_task(rq, p, 0); |
Steven Rostedt | cb46984 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 5001 | |
| 5002 | check_class_changed(rq, p, prev_class, oldprio, running); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5003 | } |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 5004 | __task_rq_unlock(rq); |
Thomas Gleixner | 1d61548 | 2009-11-17 14:54:03 +0100 | [diff] [blame] | 5005 | raw_spin_unlock_irqrestore(&p->pi_lock, flags); |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 5006 | |
Thomas Gleixner | 95e02ca | 2006-06-27 02:55:02 -0700 | [diff] [blame] | 5007 | rt_mutex_adjust_pi(p); |
| 5008 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5009 | return 0; |
| 5010 | } |
Rusty Russell | 961ccdd | 2008-06-23 13:55:38 +1000 | [diff] [blame] | 5011 | |
| 5012 | /** |
| 5013 | * sched_setscheduler - change the scheduling policy and/or RT priority of a thread. |
| 5014 | * @p: the task in question. |
| 5015 | * @policy: new policy. |
| 5016 | * @param: structure containing the new RT priority. |
| 5017 | * |
| 5018 | * NOTE that the task may be already dead. |
| 5019 | */ |
| 5020 | int sched_setscheduler(struct task_struct *p, int policy, |
| 5021 | struct sched_param *param) |
| 5022 | { |
| 5023 | return __sched_setscheduler(p, policy, param, true); |
| 5024 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5025 | EXPORT_SYMBOL_GPL(sched_setscheduler); |
| 5026 | |
Rusty Russell | 961ccdd | 2008-06-23 13:55:38 +1000 | [diff] [blame] | 5027 | /** |
| 5028 | * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace. |
| 5029 | * @p: the task in question. |
| 5030 | * @policy: new policy. |
| 5031 | * @param: structure containing the new RT priority. |
| 5032 | * |
| 5033 | * Just like sched_setscheduler, only don't bother checking if the |
| 5034 | * current context has permission. For example, this is needed in |
| 5035 | * stop_machine(): we create temporary high priority worker threads, |
| 5036 | * but our caller might not have that capability. |
| 5037 | */ |
| 5038 | int sched_setscheduler_nocheck(struct task_struct *p, int policy, |
| 5039 | struct sched_param *param) |
| 5040 | { |
| 5041 | return __sched_setscheduler(p, policy, param, false); |
| 5042 | } |
| 5043 | |
Ingo Molnar | 95cdf3b | 2005-09-10 00:26:11 -0700 | [diff] [blame] | 5044 | static int |
| 5045 | do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5046 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5047 | struct sched_param lparam; |
| 5048 | struct task_struct *p; |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 5049 | int retval; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5050 | |
| 5051 | if (!param || pid < 0) |
| 5052 | return -EINVAL; |
| 5053 | if (copy_from_user(&lparam, param, sizeof(struct sched_param))) |
| 5054 | return -EFAULT; |
Oleg Nesterov | 5fe1d75 | 2006-09-29 02:00:48 -0700 | [diff] [blame] | 5055 | |
| 5056 | rcu_read_lock(); |
| 5057 | retval = -ESRCH; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5058 | p = find_process_by_pid(pid); |
Oleg Nesterov | 5fe1d75 | 2006-09-29 02:00:48 -0700 | [diff] [blame] | 5059 | if (p != NULL) |
| 5060 | retval = sched_setscheduler(p, policy, &lparam); |
| 5061 | rcu_read_unlock(); |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 5062 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5063 | return retval; |
| 5064 | } |
| 5065 | |
| 5066 | /** |
| 5067 | * sys_sched_setscheduler - set/change the scheduler policy and RT priority |
| 5068 | * @pid: the pid in question. |
| 5069 | * @policy: new policy. |
| 5070 | * @param: structure containing the new RT priority. |
| 5071 | */ |
Heiko Carstens | 5add95d | 2009-01-14 14:14:08 +0100 | [diff] [blame] | 5072 | SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, |
| 5073 | struct sched_param __user *, param) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5074 | { |
Jason Baron | c21761f | 2006-01-18 17:43:03 -0800 | [diff] [blame] | 5075 | /* negative values for policy are not valid */ |
| 5076 | if (policy < 0) |
| 5077 | return -EINVAL; |
| 5078 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5079 | return do_sched_setscheduler(pid, policy, param); |
| 5080 | } |
| 5081 | |
| 5082 | /** |
| 5083 | * sys_sched_setparam - set/change the RT priority of a thread |
| 5084 | * @pid: the pid in question. |
| 5085 | * @param: structure containing the new RT priority. |
| 5086 | */ |
Heiko Carstens | 5add95d | 2009-01-14 14:14:08 +0100 | [diff] [blame] | 5087 | SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5088 | { |
| 5089 | return do_sched_setscheduler(pid, -1, param); |
| 5090 | } |
| 5091 | |
| 5092 | /** |
| 5093 | * sys_sched_getscheduler - get the policy (scheduling class) of a thread |
| 5094 | * @pid: the pid in question. |
| 5095 | */ |
Heiko Carstens | 5add95d | 2009-01-14 14:14:08 +0100 | [diff] [blame] | 5096 | SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5097 | { |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 5098 | struct task_struct *p; |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 5099 | int retval; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5100 | |
| 5101 | if (pid < 0) |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 5102 | return -EINVAL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5103 | |
| 5104 | retval = -ESRCH; |
Thomas Gleixner | 5fe85be | 2009-12-09 10:14:58 +0000 | [diff] [blame] | 5105 | rcu_read_lock(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5106 | p = find_process_by_pid(pid); |
| 5107 | if (p) { |
| 5108 | retval = security_task_getscheduler(p); |
| 5109 | if (!retval) |
Lennart Poettering | ca94c44 | 2009-06-15 17:17:47 +0200 | [diff] [blame] | 5110 | retval = p->policy |
| 5111 | | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5112 | } |
Thomas Gleixner | 5fe85be | 2009-12-09 10:14:58 +0000 | [diff] [blame] | 5113 | rcu_read_unlock(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5114 | return retval; |
| 5115 | } |
| 5116 | |
| 5117 | /** |
Lennart Poettering | ca94c44 | 2009-06-15 17:17:47 +0200 | [diff] [blame] | 5118 | * sys_sched_getparam - get the RT priority of a thread |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5119 | * @pid: the pid in question. |
| 5120 | * @param: structure containing the RT priority. |
| 5121 | */ |
Heiko Carstens | 5add95d | 2009-01-14 14:14:08 +0100 | [diff] [blame] | 5122 | SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5123 | { |
| 5124 | struct sched_param lp; |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 5125 | struct task_struct *p; |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 5126 | int retval; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5127 | |
| 5128 | if (!param || pid < 0) |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 5129 | return -EINVAL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5130 | |
Thomas Gleixner | 5fe85be | 2009-12-09 10:14:58 +0000 | [diff] [blame] | 5131 | rcu_read_lock(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5132 | p = find_process_by_pid(pid); |
| 5133 | retval = -ESRCH; |
| 5134 | if (!p) |
| 5135 | goto out_unlock; |
| 5136 | |
| 5137 | retval = security_task_getscheduler(p); |
| 5138 | if (retval) |
| 5139 | goto out_unlock; |
| 5140 | |
| 5141 | lp.sched_priority = p->rt_priority; |
Thomas Gleixner | 5fe85be | 2009-12-09 10:14:58 +0000 | [diff] [blame] | 5142 | rcu_read_unlock(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5143 | |
| 5144 | /* |
| 5145 | * This one might sleep, we cannot do it with a spinlock held ... |
| 5146 | */ |
| 5147 | retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0; |
| 5148 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5149 | return retval; |
| 5150 | |
| 5151 | out_unlock: |
Thomas Gleixner | 5fe85be | 2009-12-09 10:14:58 +0000 | [diff] [blame] | 5152 | rcu_read_unlock(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5153 | return retval; |
| 5154 | } |
| 5155 | |
Rusty Russell | 96f874e2 | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 5156 | long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5157 | { |
Rusty Russell | 5a16f3d | 2008-11-25 02:35:11 +1030 | [diff] [blame] | 5158 | cpumask_var_t cpus_allowed, new_mask; |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 5159 | struct task_struct *p; |
| 5160 | int retval; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5161 | |
Gautham R Shenoy | 95402b3 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 5162 | get_online_cpus(); |
Thomas Gleixner | 23f5d14 | 2009-12-09 10:15:01 +0000 | [diff] [blame] | 5163 | rcu_read_lock(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5164 | |
| 5165 | p = find_process_by_pid(pid); |
| 5166 | if (!p) { |
Thomas Gleixner | 23f5d14 | 2009-12-09 10:15:01 +0000 | [diff] [blame] | 5167 | rcu_read_unlock(); |
Gautham R Shenoy | 95402b3 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 5168 | put_online_cpus(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5169 | return -ESRCH; |
| 5170 | } |
| 5171 | |
Thomas Gleixner | 23f5d14 | 2009-12-09 10:15:01 +0000 | [diff] [blame] | 5172 | /* Prevent p going away */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5173 | get_task_struct(p); |
Thomas Gleixner | 23f5d14 | 2009-12-09 10:15:01 +0000 | [diff] [blame] | 5174 | rcu_read_unlock(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5175 | |
Rusty Russell | 5a16f3d | 2008-11-25 02:35:11 +1030 | [diff] [blame] | 5176 | if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) { |
| 5177 | retval = -ENOMEM; |
| 5178 | goto out_put_task; |
| 5179 | } |
| 5180 | if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) { |
| 5181 | retval = -ENOMEM; |
| 5182 | goto out_free_cpus_allowed; |
| 5183 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5184 | retval = -EPERM; |
David Howells | c69e8d9 | 2008-11-14 10:39:19 +1100 | [diff] [blame] | 5185 | if (!check_same_owner(p) && !capable(CAP_SYS_NICE)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5186 | goto out_unlock; |
| 5187 | |
KOSAKI Motohiro | b0ae198 | 2010-10-15 04:21:18 +0900 | [diff] [blame] | 5188 | retval = security_task_setscheduler(p); |
David Quigley | e7834f8 | 2006-06-23 02:03:59 -0700 | [diff] [blame] | 5189 | if (retval) |
| 5190 | goto out_unlock; |
| 5191 | |
Rusty Russell | 5a16f3d | 2008-11-25 02:35:11 +1030 | [diff] [blame] | 5192 | cpuset_cpus_allowed(p, cpus_allowed); |
| 5193 | cpumask_and(new_mask, in_mask, cpus_allowed); |
Peter Zijlstra | 4924627 | 2010-10-17 21:46:10 +0200 | [diff] [blame] | 5194 | again: |
Rusty Russell | 5a16f3d | 2008-11-25 02:35:11 +1030 | [diff] [blame] | 5195 | retval = set_cpus_allowed_ptr(p, new_mask); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5196 | |
Paul Menage | 8707d8b | 2007-10-18 23:40:22 -0700 | [diff] [blame] | 5197 | if (!retval) { |
Rusty Russell | 5a16f3d | 2008-11-25 02:35:11 +1030 | [diff] [blame] | 5198 | cpuset_cpus_allowed(p, cpus_allowed); |
| 5199 | if (!cpumask_subset(new_mask, cpus_allowed)) { |
Paul Menage | 8707d8b | 2007-10-18 23:40:22 -0700 | [diff] [blame] | 5200 | /* |
| 5201 | * We must have raced with a concurrent cpuset |
| 5202 | * update. Just reset the cpus_allowed to the |
| 5203 | * cpuset's cpus_allowed |
| 5204 | */ |
Rusty Russell | 5a16f3d | 2008-11-25 02:35:11 +1030 | [diff] [blame] | 5205 | cpumask_copy(new_mask, cpus_allowed); |
Paul Menage | 8707d8b | 2007-10-18 23:40:22 -0700 | [diff] [blame] | 5206 | goto again; |
| 5207 | } |
| 5208 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5209 | out_unlock: |
Rusty Russell | 5a16f3d | 2008-11-25 02:35:11 +1030 | [diff] [blame] | 5210 | free_cpumask_var(new_mask); |
| 5211 | out_free_cpus_allowed: |
| 5212 | free_cpumask_var(cpus_allowed); |
| 5213 | out_put_task: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5214 | put_task_struct(p); |
Gautham R Shenoy | 95402b3 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 5215 | put_online_cpus(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5216 | return retval; |
| 5217 | } |
| 5218 | |
| 5219 | static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, |
Rusty Russell | 96f874e2 | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 5220 | struct cpumask *new_mask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5221 | { |
Rusty Russell | 96f874e2 | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 5222 | if (len < cpumask_size()) |
| 5223 | cpumask_clear(new_mask); |
| 5224 | else if (len > cpumask_size()) |
| 5225 | len = cpumask_size(); |
| 5226 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5227 | return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0; |
| 5228 | } |
| 5229 | |
| 5230 | /** |
| 5231 | * sys_sched_setaffinity - set the cpu affinity of a process |
| 5232 | * @pid: pid of the process |
| 5233 | * @len: length in bytes of the bitmask pointed to by user_mask_ptr |
| 5234 | * @user_mask_ptr: user-space pointer to the new cpu mask |
| 5235 | */ |
Heiko Carstens | 5add95d | 2009-01-14 14:14:08 +0100 | [diff] [blame] | 5236 | SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len, |
| 5237 | unsigned long __user *, user_mask_ptr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5238 | { |
Rusty Russell | 5a16f3d | 2008-11-25 02:35:11 +1030 | [diff] [blame] | 5239 | cpumask_var_t new_mask; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5240 | int retval; |
| 5241 | |
Rusty Russell | 5a16f3d | 2008-11-25 02:35:11 +1030 | [diff] [blame] | 5242 | if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) |
| 5243 | return -ENOMEM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5244 | |
Rusty Russell | 5a16f3d | 2008-11-25 02:35:11 +1030 | [diff] [blame] | 5245 | retval = get_user_cpu_mask(user_mask_ptr, len, new_mask); |
| 5246 | if (retval == 0) |
| 5247 | retval = sched_setaffinity(pid, new_mask); |
| 5248 | free_cpumask_var(new_mask); |
| 5249 | return retval; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5250 | } |
| 5251 | |
Rusty Russell | 96f874e2 | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 5252 | long sched_getaffinity(pid_t pid, struct cpumask *mask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5253 | { |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 5254 | struct task_struct *p; |
Thomas Gleixner | 3160568 | 2009-12-08 20:24:16 +0000 | [diff] [blame] | 5255 | unsigned long flags; |
| 5256 | struct rq *rq; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5257 | int retval; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5258 | |
Gautham R Shenoy | 95402b3 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 5259 | get_online_cpus(); |
Thomas Gleixner | 23f5d14 | 2009-12-09 10:15:01 +0000 | [diff] [blame] | 5260 | rcu_read_lock(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5261 | |
| 5262 | retval = -ESRCH; |
| 5263 | p = find_process_by_pid(pid); |
| 5264 | if (!p) |
| 5265 | goto out_unlock; |
| 5266 | |
David Quigley | e7834f8 | 2006-06-23 02:03:59 -0700 | [diff] [blame] | 5267 | retval = security_task_getscheduler(p); |
| 5268 | if (retval) |
| 5269 | goto out_unlock; |
| 5270 | |
Thomas Gleixner | 3160568 | 2009-12-08 20:24:16 +0000 | [diff] [blame] | 5271 | rq = task_rq_lock(p, &flags); |
Rusty Russell | 96f874e2 | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 5272 | cpumask_and(mask, &p->cpus_allowed, cpu_online_mask); |
Thomas Gleixner | 3160568 | 2009-12-08 20:24:16 +0000 | [diff] [blame] | 5273 | task_rq_unlock(rq, &flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5274 | |
| 5275 | out_unlock: |
Thomas Gleixner | 23f5d14 | 2009-12-09 10:15:01 +0000 | [diff] [blame] | 5276 | rcu_read_unlock(); |
Gautham R Shenoy | 95402b3 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 5277 | put_online_cpus(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5278 | |
Ulrich Drepper | 9531b62 | 2007-08-09 11:16:46 +0200 | [diff] [blame] | 5279 | return retval; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5280 | } |
| 5281 | |
| 5282 | /** |
| 5283 | * sys_sched_getaffinity - get the cpu affinity of a process |
| 5284 | * @pid: pid of the process |
| 5285 | * @len: length in bytes of the bitmask pointed to by user_mask_ptr |
| 5286 | * @user_mask_ptr: user-space pointer to hold the current cpu mask |
| 5287 | */ |
Heiko Carstens | 5add95d | 2009-01-14 14:14:08 +0100 | [diff] [blame] | 5288 | SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, |
| 5289 | unsigned long __user *, user_mask_ptr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5290 | { |
| 5291 | int ret; |
Rusty Russell | f17c860 | 2008-11-25 02:35:11 +1030 | [diff] [blame] | 5292 | cpumask_var_t mask; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5293 | |
Anton Blanchard | 84fba5e | 2010-04-06 17:02:19 +1000 | [diff] [blame] | 5294 | if ((len * BITS_PER_BYTE) < nr_cpu_ids) |
KOSAKI Motohiro | cd3d803 | 2010-03-12 16:15:36 +0900 | [diff] [blame] | 5295 | return -EINVAL; |
| 5296 | if (len & (sizeof(unsigned long)-1)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5297 | return -EINVAL; |
| 5298 | |
Rusty Russell | f17c860 | 2008-11-25 02:35:11 +1030 | [diff] [blame] | 5299 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) |
| 5300 | return -ENOMEM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5301 | |
Rusty Russell | f17c860 | 2008-11-25 02:35:11 +1030 | [diff] [blame] | 5302 | ret = sched_getaffinity(pid, mask); |
| 5303 | if (ret == 0) { |
KOSAKI Motohiro | 8bc037f | 2010-03-17 09:36:58 +0900 | [diff] [blame] | 5304 | size_t retlen = min_t(size_t, len, cpumask_size()); |
KOSAKI Motohiro | cd3d803 | 2010-03-12 16:15:36 +0900 | [diff] [blame] | 5305 | |
| 5306 | if (copy_to_user(user_mask_ptr, mask, retlen)) |
Rusty Russell | f17c860 | 2008-11-25 02:35:11 +1030 | [diff] [blame] | 5307 | ret = -EFAULT; |
| 5308 | else |
KOSAKI Motohiro | cd3d803 | 2010-03-12 16:15:36 +0900 | [diff] [blame] | 5309 | ret = retlen; |
Rusty Russell | f17c860 | 2008-11-25 02:35:11 +1030 | [diff] [blame] | 5310 | } |
| 5311 | free_cpumask_var(mask); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5312 | |
Rusty Russell | f17c860 | 2008-11-25 02:35:11 +1030 | [diff] [blame] | 5313 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5314 | } |
| 5315 | |
| 5316 | /** |
| 5317 | * sys_sched_yield - yield the current processor to other threads. |
| 5318 | * |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 5319 | * This function yields the current CPU to other tasks. If there are no |
| 5320 | * other threads running on this CPU then this function will return. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5321 | */ |
Heiko Carstens | 5add95d | 2009-01-14 14:14:08 +0100 | [diff] [blame] | 5322 | SYSCALL_DEFINE0(sched_yield) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5323 | { |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 5324 | struct rq *rq = this_rq_lock(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5325 | |
Ingo Molnar | 2d72376 | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 5326 | schedstat_inc(rq, yld_count); |
Dmitry Adamushko | 4530d7a | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 5327 | current->sched_class->yield_task(rq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5328 | |
| 5329 | /* |
| 5330 | * Since we are going to call schedule() anyway, there's |
| 5331 | * no need to preempt or enable interrupts: |
| 5332 | */ |
| 5333 | __release(rq->lock); |
Ingo Molnar | 8a25d5d | 2006-07-03 00:24:54 -0700 | [diff] [blame] | 5334 | spin_release(&rq->lock.dep_map, 1, _THIS_IP_); |
Thomas Gleixner | 9828ea9 | 2009-12-03 20:55:53 +0100 | [diff] [blame] | 5335 | do_raw_spin_unlock(&rq->lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5336 | preempt_enable_no_resched(); |
| 5337 | |
| 5338 | schedule(); |
| 5339 | |
| 5340 | return 0; |
| 5341 | } |
| 5342 | |
Peter Zijlstra | d86ee48 | 2009-07-10 14:57:57 +0200 | [diff] [blame] | 5343 | static inline int should_resched(void) |
| 5344 | { |
| 5345 | return need_resched() && !(preempt_count() & PREEMPT_ACTIVE); |
| 5346 | } |
| 5347 | |
Andrew Morton | e7b3840 | 2006-06-30 01:56:00 -0700 | [diff] [blame] | 5348 | static void __cond_resched(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5349 | { |
Frederic Weisbecker | e7aaaa6 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 5350 | add_preempt_count(PREEMPT_ACTIVE); |
| 5351 | schedule(); |
| 5352 | sub_preempt_count(PREEMPT_ACTIVE); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5353 | } |
| 5354 | |
Herbert Xu | 02b67cc3 | 2008-01-25 21:08:28 +0100 | [diff] [blame] | 5355 | int __sched _cond_resched(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5356 | { |
Peter Zijlstra | d86ee48 | 2009-07-10 14:57:57 +0200 | [diff] [blame] | 5357 | if (should_resched()) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5358 | __cond_resched(); |
| 5359 | return 1; |
| 5360 | } |
| 5361 | return 0; |
| 5362 | } |
Herbert Xu | 02b67cc3 | 2008-01-25 21:08:28 +0100 | [diff] [blame] | 5363 | EXPORT_SYMBOL(_cond_resched); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5364 | |
| 5365 | /* |
Frederic Weisbecker | 613afbf | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 5366 | * __cond_resched_lock() - if a reschedule is pending, drop the given lock, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5367 | * call schedule, and on return reacquire the lock. |
| 5368 | * |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 5369 | * This works OK both with and without CONFIG_PREEMPT. We do strange low-level |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5370 | * operations here to prevent schedule() from being called twice (once via |
| 5371 | * spin_unlock(), once by hand). |
| 5372 | */ |
Frederic Weisbecker | 613afbf | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 5373 | int __cond_resched_lock(spinlock_t *lock) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5374 | { |
Peter Zijlstra | d86ee48 | 2009-07-10 14:57:57 +0200 | [diff] [blame] | 5375 | int resched = should_resched(); |
Jan Kara | 6df3cec | 2005-06-13 15:52:32 -0700 | [diff] [blame] | 5376 | int ret = 0; |
| 5377 | |
Peter Zijlstra | f607c66 | 2009-07-20 19:16:29 +0200 | [diff] [blame] | 5378 | lockdep_assert_held(lock); |
| 5379 | |
Nick Piggin | 95c354f | 2008-01-30 13:31:20 +0100 | [diff] [blame] | 5380 | if (spin_needbreak(lock) || resched) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5381 | spin_unlock(lock); |
Peter Zijlstra | d86ee48 | 2009-07-10 14:57:57 +0200 | [diff] [blame] | 5382 | if (resched) |
Nick Piggin | 95c354f | 2008-01-30 13:31:20 +0100 | [diff] [blame] | 5383 | __cond_resched(); |
| 5384 | else |
| 5385 | cpu_relax(); |
Jan Kara | 6df3cec | 2005-06-13 15:52:32 -0700 | [diff] [blame] | 5386 | ret = 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5387 | spin_lock(lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5388 | } |
Jan Kara | 6df3cec | 2005-06-13 15:52:32 -0700 | [diff] [blame] | 5389 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5390 | } |
Frederic Weisbecker | 613afbf | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 5391 | EXPORT_SYMBOL(__cond_resched_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5392 | |
Frederic Weisbecker | 613afbf | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 5393 | int __sched __cond_resched_softirq(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5394 | { |
| 5395 | BUG_ON(!in_softirq()); |
| 5396 | |
Peter Zijlstra | d86ee48 | 2009-07-10 14:57:57 +0200 | [diff] [blame] | 5397 | if (should_resched()) { |
Thomas Gleixner | 98d82567 | 2007-05-23 13:58:18 -0700 | [diff] [blame] | 5398 | local_bh_enable(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5399 | __cond_resched(); |
| 5400 | local_bh_disable(); |
| 5401 | return 1; |
| 5402 | } |
| 5403 | return 0; |
| 5404 | } |
Frederic Weisbecker | 613afbf | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 5405 | EXPORT_SYMBOL(__cond_resched_softirq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5406 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5407 | /** |
| 5408 | * yield - yield the current processor to other threads. |
| 5409 | * |
Robert P. J. Day | 72fd4a3 | 2007-02-10 01:45:59 -0800 | [diff] [blame] | 5410 | * This is a shortcut for kernel-space yielding - it marks the |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5411 | * thread runnable and calls sys_sched_yield(). |
| 5412 | */ |
| 5413 | void __sched yield(void) |
| 5414 | { |
| 5415 | set_current_state(TASK_RUNNING); |
| 5416 | sys_sched_yield(); |
| 5417 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5418 | EXPORT_SYMBOL(yield); |
| 5419 | |
| 5420 | /* |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 5421 | * This task is about to go to sleep on IO. Increment rq->nr_iowait so |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5422 | * that process accounting knows that this is a task in IO wait state. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5423 | */ |
| 5424 | void __sched io_schedule(void) |
| 5425 | { |
Hitoshi Mitake | 54d35f2 | 2009-06-29 14:44:57 +0900 | [diff] [blame] | 5426 | struct rq *rq = raw_rq(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5427 | |
Shailabh Nagar | 0ff9224 | 2006-07-14 00:24:37 -0700 | [diff] [blame] | 5428 | delayacct_blkio_start(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5429 | atomic_inc(&rq->nr_iowait); |
Arjan van de Ven | 8f0dfc3 | 2009-07-20 11:26:58 -0700 | [diff] [blame] | 5430 | current->in_iowait = 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5431 | schedule(); |
Arjan van de Ven | 8f0dfc3 | 2009-07-20 11:26:58 -0700 | [diff] [blame] | 5432 | current->in_iowait = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5433 | atomic_dec(&rq->nr_iowait); |
Shailabh Nagar | 0ff9224 | 2006-07-14 00:24:37 -0700 | [diff] [blame] | 5434 | delayacct_blkio_end(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5435 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5436 | EXPORT_SYMBOL(io_schedule); |
| 5437 | |
| 5438 | long __sched io_schedule_timeout(long timeout) |
| 5439 | { |
Hitoshi Mitake | 54d35f2 | 2009-06-29 14:44:57 +0900 | [diff] [blame] | 5440 | struct rq *rq = raw_rq(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5441 | long ret; |
| 5442 | |
Shailabh Nagar | 0ff9224 | 2006-07-14 00:24:37 -0700 | [diff] [blame] | 5443 | delayacct_blkio_start(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5444 | atomic_inc(&rq->nr_iowait); |
Arjan van de Ven | 8f0dfc3 | 2009-07-20 11:26:58 -0700 | [diff] [blame] | 5445 | current->in_iowait = 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5446 | ret = schedule_timeout(timeout); |
Arjan van de Ven | 8f0dfc3 | 2009-07-20 11:26:58 -0700 | [diff] [blame] | 5447 | current->in_iowait = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5448 | atomic_dec(&rq->nr_iowait); |
Shailabh Nagar | 0ff9224 | 2006-07-14 00:24:37 -0700 | [diff] [blame] | 5449 | delayacct_blkio_end(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5450 | return ret; |
| 5451 | } |
| 5452 | |
| 5453 | /** |
| 5454 | * sys_sched_get_priority_max - return maximum RT priority. |
| 5455 | * @policy: scheduling class. |
| 5456 | * |
| 5457 | * this syscall returns the maximum rt_priority that can be used |
| 5458 | * by a given scheduling class. |
| 5459 | */ |
Heiko Carstens | 5add95d | 2009-01-14 14:14:08 +0100 | [diff] [blame] | 5460 | SYSCALL_DEFINE1(sched_get_priority_max, int, policy) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5461 | { |
| 5462 | int ret = -EINVAL; |
| 5463 | |
| 5464 | switch (policy) { |
| 5465 | case SCHED_FIFO: |
| 5466 | case SCHED_RR: |
| 5467 | ret = MAX_USER_RT_PRIO-1; |
| 5468 | break; |
| 5469 | case SCHED_NORMAL: |
Ingo Molnar | b0a9499 | 2006-01-14 13:20:41 -0800 | [diff] [blame] | 5470 | case SCHED_BATCH: |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 5471 | case SCHED_IDLE: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5472 | ret = 0; |
| 5473 | break; |
| 5474 | } |
| 5475 | return ret; |
| 5476 | } |
| 5477 | |
| 5478 | /** |
| 5479 | * sys_sched_get_priority_min - return minimum RT priority. |
| 5480 | * @policy: scheduling class. |
| 5481 | * |
| 5482 | * this syscall returns the minimum rt_priority that can be used |
| 5483 | * by a given scheduling class. |
| 5484 | */ |
Heiko Carstens | 5add95d | 2009-01-14 14:14:08 +0100 | [diff] [blame] | 5485 | SYSCALL_DEFINE1(sched_get_priority_min, int, policy) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5486 | { |
| 5487 | int ret = -EINVAL; |
| 5488 | |
| 5489 | switch (policy) { |
| 5490 | case SCHED_FIFO: |
| 5491 | case SCHED_RR: |
| 5492 | ret = 1; |
| 5493 | break; |
| 5494 | case SCHED_NORMAL: |
Ingo Molnar | b0a9499 | 2006-01-14 13:20:41 -0800 | [diff] [blame] | 5495 | case SCHED_BATCH: |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 5496 | case SCHED_IDLE: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5497 | ret = 0; |
| 5498 | } |
| 5499 | return ret; |
| 5500 | } |
| 5501 | |
| 5502 | /** |
| 5503 | * sys_sched_rr_get_interval - return the default timeslice of a process. |
| 5504 | * @pid: pid of the process. |
| 5505 | * @interval: userspace pointer to the timeslice value. |
| 5506 | * |
| 5507 | * this syscall writes the default timeslice value of a given process |
| 5508 | * into the user-space timespec buffer. A value of '0' means infinity. |
| 5509 | */ |
Heiko Carstens | 17da2bd | 2009-01-14 14:14:10 +0100 | [diff] [blame] | 5510 | SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, |
Heiko Carstens | 754fe8d | 2009-01-14 14:14:09 +0100 | [diff] [blame] | 5511 | struct timespec __user *, interval) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5512 | { |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 5513 | struct task_struct *p; |
Dmitry Adamushko | a4ec24b | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 5514 | unsigned int time_slice; |
Thomas Gleixner | dba091b | 2009-12-09 09:32:03 +0100 | [diff] [blame] | 5515 | unsigned long flags; |
| 5516 | struct rq *rq; |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 5517 | int retval; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5518 | struct timespec t; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5519 | |
| 5520 | if (pid < 0) |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 5521 | return -EINVAL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5522 | |
| 5523 | retval = -ESRCH; |
Thomas Gleixner | 1a551ae | 2009-12-09 10:15:11 +0000 | [diff] [blame] | 5524 | rcu_read_lock(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5525 | p = find_process_by_pid(pid); |
| 5526 | if (!p) |
| 5527 | goto out_unlock; |
| 5528 | |
| 5529 | retval = security_task_getscheduler(p); |
| 5530 | if (retval) |
| 5531 | goto out_unlock; |
| 5532 | |
Thomas Gleixner | dba091b | 2009-12-09 09:32:03 +0100 | [diff] [blame] | 5533 | rq = task_rq_lock(p, &flags); |
| 5534 | time_slice = p->sched_class->get_rr_interval(rq, p); |
| 5535 | task_rq_unlock(rq, &flags); |
Dmitry Adamushko | a4ec24b | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 5536 | |
Thomas Gleixner | 1a551ae | 2009-12-09 10:15:11 +0000 | [diff] [blame] | 5537 | rcu_read_unlock(); |
Dmitry Adamushko | a4ec24b | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 5538 | jiffies_to_timespec(time_slice, &t); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5539 | retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5540 | return retval; |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 5541 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5542 | out_unlock: |
Thomas Gleixner | 1a551ae | 2009-12-09 10:15:11 +0000 | [diff] [blame] | 5543 | rcu_read_unlock(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5544 | return retval; |
| 5545 | } |
| 5546 | |
Steven Rostedt | 7c731e0 | 2008-05-12 21:20:41 +0200 | [diff] [blame] | 5547 | static const char stat_nam[] = TASK_STATE_TO_CHAR_STR; |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 5548 | |
Ingo Molnar | 82a1fcb | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 5549 | void sched_show_task(struct task_struct *p) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5550 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5551 | unsigned long free = 0; |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 5552 | unsigned state; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5553 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5554 | state = p->state ? __ffs(p->state) + 1 : 0; |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 5555 | printk(KERN_INFO "%-13.13s %c", p->comm, |
Andreas Mohr | 2ed6e34 | 2006-07-10 04:43:52 -0700 | [diff] [blame] | 5556 | state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?'); |
Ingo Molnar | 4bd7732 | 2007-07-11 21:21:47 +0200 | [diff] [blame] | 5557 | #if BITS_PER_LONG == 32 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5558 | if (state == TASK_RUNNING) |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 5559 | printk(KERN_CONT " running "); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5560 | else |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 5561 | printk(KERN_CONT " %08lx ", thread_saved_pc(p)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5562 | #else |
| 5563 | if (state == TASK_RUNNING) |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 5564 | printk(KERN_CONT " running task "); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5565 | else |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 5566 | printk(KERN_CONT " %016lx ", thread_saved_pc(p)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5567 | #endif |
| 5568 | #ifdef CONFIG_DEBUG_STACK_USAGE |
Eric Sandeen | 7c9f886 | 2008-04-22 16:38:23 -0500 | [diff] [blame] | 5569 | free = stack_not_used(p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5570 | #endif |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 5571 | printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free, |
David Rientjes | aa47b7e | 2009-05-04 01:38:05 -0700 | [diff] [blame] | 5572 | task_pid_nr(p), task_pid_nr(p->real_parent), |
| 5573 | (unsigned long)task_thread_info(p)->flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5574 | |
Nick Piggin | 5fb5e6d | 2008-01-25 21:08:34 +0100 | [diff] [blame] | 5575 | show_stack(p, NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5576 | } |
| 5577 | |
Ingo Molnar | e59e2ae | 2006-12-06 20:35:59 -0800 | [diff] [blame] | 5578 | void show_state_filter(unsigned long state_filter) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5579 | { |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 5580 | struct task_struct *g, *p; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5581 | |
Ingo Molnar | 4bd7732 | 2007-07-11 21:21:47 +0200 | [diff] [blame] | 5582 | #if BITS_PER_LONG == 32 |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 5583 | printk(KERN_INFO |
| 5584 | " task PC stack pid father\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5585 | #else |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 5586 | printk(KERN_INFO |
| 5587 | " task PC stack pid father\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5588 | #endif |
| 5589 | read_lock(&tasklist_lock); |
| 5590 | do_each_thread(g, p) { |
| 5591 | /* |
| 5592 | * reset the NMI-timeout, listing all files on a slow |
| 5593 | * console might take alot of time: |
| 5594 | */ |
| 5595 | touch_nmi_watchdog(); |
Ingo Molnar | 39bc89f | 2007-04-25 20:50:03 -0700 | [diff] [blame] | 5596 | if (!state_filter || (p->state & state_filter)) |
Ingo Molnar | 82a1fcb | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 5597 | sched_show_task(p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5598 | } while_each_thread(g, p); |
| 5599 | |
Jeremy Fitzhardinge | 04c9167 | 2007-05-08 00:28:05 -0700 | [diff] [blame] | 5600 | touch_all_softlockup_watchdogs(); |
| 5601 | |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 5602 | #ifdef CONFIG_SCHED_DEBUG |
| 5603 | sysrq_sched_debug_show(); |
| 5604 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5605 | read_unlock(&tasklist_lock); |
Ingo Molnar | e59e2ae | 2006-12-06 20:35:59 -0800 | [diff] [blame] | 5606 | /* |
| 5607 | * Only show locks if all tasks are dumped: |
| 5608 | */ |
Shmulik Ladkani | 93335a2 | 2009-11-25 15:23:41 +0200 | [diff] [blame] | 5609 | if (!state_filter) |
Ingo Molnar | e59e2ae | 2006-12-06 20:35:59 -0800 | [diff] [blame] | 5610 | debug_show_all_locks(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5611 | } |
| 5612 | |
Ingo Molnar | 1df2105 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 5613 | void __cpuinit init_idle_bootup_task(struct task_struct *idle) |
| 5614 | { |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 5615 | idle->sched_class = &idle_sched_class; |
Ingo Molnar | 1df2105 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 5616 | } |
| 5617 | |
Ingo Molnar | f340c0d | 2005-06-28 16:40:42 +0200 | [diff] [blame] | 5618 | /** |
| 5619 | * init_idle - set up an idle thread for a given CPU |
| 5620 | * @idle: task in question |
| 5621 | * @cpu: cpu the idle task belongs to |
| 5622 | * |
| 5623 | * NOTE: this function does not set the idle thread's NEED_RESCHED |
| 5624 | * flag, to make booting more robust. |
| 5625 | */ |
Nick Piggin | 5c1e176 | 2006-10-03 01:14:04 -0700 | [diff] [blame] | 5626 | void __cpuinit init_idle(struct task_struct *idle, int cpu) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5627 | { |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 5628 | struct rq *rq = cpu_rq(cpu); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5629 | unsigned long flags; |
| 5630 | |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 5631 | raw_spin_lock_irqsave(&rq->lock, flags); |
Ingo Molnar | 5cbd54e | 2008-11-12 20:05:50 +0100 | [diff] [blame] | 5632 | |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 5633 | __sched_fork(idle); |
Peter Zijlstra | 06b83b5 | 2009-12-16 18:04:35 +0100 | [diff] [blame] | 5634 | idle->state = TASK_RUNNING; |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 5635 | idle->se.exec_start = sched_clock(); |
| 5636 | |
Rusty Russell | 96f874e2 | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 5637 | cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu)); |
Peter Zijlstra | 6506cf6c | 2010-09-16 17:50:31 +0200 | [diff] [blame] | 5638 | /* |
| 5639 | * We're having a chicken and egg problem, even though we are |
| 5640 | * holding rq->lock, the cpu isn't yet set to this cpu so the |
| 5641 | * lockdep check in task_group() will fail. |
| 5642 | * |
| 5643 | * Similar case to sched_fork(). / Alternatively we could |
| 5644 | * use task_rq_lock() here and obtain the other rq->lock. |
| 5645 | * |
| 5646 | * Silence PROVE_RCU |
| 5647 | */ |
| 5648 | rcu_read_lock(); |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 5649 | __set_task_cpu(idle, cpu); |
Peter Zijlstra | 6506cf6c | 2010-09-16 17:50:31 +0200 | [diff] [blame] | 5650 | rcu_read_unlock(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5651 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5652 | rq->curr = rq->idle = idle; |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 5653 | #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) |
| 5654 | idle->oncpu = 1; |
| 5655 | #endif |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 5656 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5657 | |
| 5658 | /* Set the preempt count _outside_ the spinlocks! */ |
Linus Torvalds | 8e3e076 | 2008-05-10 20:58:02 -0700 | [diff] [blame] | 5659 | #if defined(CONFIG_PREEMPT) |
| 5660 | task_thread_info(idle)->preempt_count = (idle->lock_depth >= 0); |
| 5661 | #else |
Al Viro | a1261f54 | 2005-11-13 16:06:55 -0800 | [diff] [blame] | 5662 | task_thread_info(idle)->preempt_count = 0; |
Linus Torvalds | 8e3e076 | 2008-05-10 20:58:02 -0700 | [diff] [blame] | 5663 | #endif |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 5664 | /* |
| 5665 | * The idle tasks have their own, simple scheduling class: |
| 5666 | */ |
| 5667 | idle->sched_class = &idle_sched_class; |
Frederic Weisbecker | fb52607 | 2008-11-25 21:07:04 +0100 | [diff] [blame] | 5668 | ftrace_graph_init_task(idle); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5669 | } |
| 5670 | |
| 5671 | /* |
| 5672 | * In a system that switches off the HZ timer nohz_cpu_mask |
| 5673 | * indicates which cpus entered this state. This is used |
| 5674 | * in the rcu update to wait only for active cpus. For system |
| 5675 | * which do not switch off the HZ timer nohz_cpu_mask should |
Rusty Russell | 6a7b3dc | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 5676 | * always be CPU_BITS_NONE. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5677 | */ |
Rusty Russell | 6a7b3dc | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 5678 | cpumask_var_t nohz_cpu_mask; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5679 | |
Ingo Molnar | 19978ca | 2007-11-09 22:39:38 +0100 | [diff] [blame] | 5680 | /* |
| 5681 | * Increase the granularity value when there are more CPUs, |
| 5682 | * because with more CPUs the 'effective latency' as visible |
| 5683 | * to users decreases. But the relationship is not linear, |
| 5684 | * so pick a second-best guess by going with the log2 of the |
| 5685 | * number of CPUs. |
| 5686 | * |
| 5687 | * This idea comes from the SD scheduler of Con Kolivas: |
| 5688 | */ |
Christian Ehrhardt | acb4a84 | 2009-11-30 12:16:48 +0100 | [diff] [blame] | 5689 | static int get_update_sysctl_factor(void) |
Christian Ehrhardt | 0bcdcf2 | 2009-11-30 12:16:46 +0100 | [diff] [blame] | 5690 | { |
Mike Galbraith | 4ca3ef7 | 2009-12-10 09:25:53 +0100 | [diff] [blame] | 5691 | unsigned int cpus = min_t(int, num_online_cpus(), 8); |
Christian Ehrhardt | 1983a92 | 2009-11-30 12:16:47 +0100 | [diff] [blame] | 5692 | unsigned int factor; |
| 5693 | |
| 5694 | switch (sysctl_sched_tunable_scaling) { |
| 5695 | case SCHED_TUNABLESCALING_NONE: |
| 5696 | factor = 1; |
| 5697 | break; |
| 5698 | case SCHED_TUNABLESCALING_LINEAR: |
| 5699 | factor = cpus; |
| 5700 | break; |
| 5701 | case SCHED_TUNABLESCALING_LOG: |
| 5702 | default: |
| 5703 | factor = 1 + ilog2(cpus); |
| 5704 | break; |
| 5705 | } |
Christian Ehrhardt | 0bcdcf2 | 2009-11-30 12:16:46 +0100 | [diff] [blame] | 5706 | |
Christian Ehrhardt | acb4a84 | 2009-11-30 12:16:48 +0100 | [diff] [blame] | 5707 | return factor; |
| 5708 | } |
| 5709 | |
| 5710 | static void update_sysctl(void) |
| 5711 | { |
| 5712 | unsigned int factor = get_update_sysctl_factor(); |
| 5713 | |
Christian Ehrhardt | 0bcdcf2 | 2009-11-30 12:16:46 +0100 | [diff] [blame] | 5714 | #define SET_SYSCTL(name) \ |
| 5715 | (sysctl_##name = (factor) * normalized_sysctl_##name) |
| 5716 | SET_SYSCTL(sched_min_granularity); |
| 5717 | SET_SYSCTL(sched_latency); |
| 5718 | SET_SYSCTL(sched_wakeup_granularity); |
| 5719 | SET_SYSCTL(sched_shares_ratelimit); |
| 5720 | #undef SET_SYSCTL |
| 5721 | } |
| 5722 | |
Ingo Molnar | 19978ca | 2007-11-09 22:39:38 +0100 | [diff] [blame] | 5723 | static inline void sched_init_granularity(void) |
| 5724 | { |
Christian Ehrhardt | 0bcdcf2 | 2009-11-30 12:16:46 +0100 | [diff] [blame] | 5725 | update_sysctl(); |
Ingo Molnar | 19978ca | 2007-11-09 22:39:38 +0100 | [diff] [blame] | 5726 | } |
| 5727 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5728 | #ifdef CONFIG_SMP |
| 5729 | /* |
| 5730 | * This is how migration works: |
| 5731 | * |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 5732 | * 1) we invoke migration_cpu_stop() on the target CPU using |
| 5733 | * stop_one_cpu(). |
| 5734 | * 2) stopper starts to run (implicitly forcing the migrated thread |
| 5735 | * off the CPU) |
| 5736 | * 3) it checks whether the migrated task is still in the wrong runqueue. |
| 5737 | * 4) if it's in the wrong runqueue then the migration thread removes |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5738 | * it and puts it into the right queue. |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 5739 | * 5) stopper completes and stop_one_cpu() returns and the migration |
| 5740 | * is done. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5741 | */ |
| 5742 | |
| 5743 | /* |
| 5744 | * Change a given task's CPU affinity. Migrate the thread to a |
| 5745 | * proper CPU and schedule it away if the CPU it's executing on |
| 5746 | * is removed from the allowed bitmask. |
| 5747 | * |
| 5748 | * NOTE: the caller must have a valid reference to the task, the |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 5749 | * task must not exit() & deallocate itself prematurely. The |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5750 | * call is not atomic; no spinlocks may be held. |
| 5751 | */ |
Rusty Russell | 96f874e2 | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 5752 | int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5753 | { |
| 5754 | unsigned long flags; |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 5755 | struct rq *rq; |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 5756 | unsigned int dest_cpu; |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 5757 | int ret = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5758 | |
Peter Zijlstra | 65cc8e4 | 2010-03-25 21:05:16 +0100 | [diff] [blame] | 5759 | /* |
| 5760 | * Serialize against TASK_WAKING so that ttwu() and wunt() can |
| 5761 | * drop the rq->lock and still rely on ->cpus_allowed. |
| 5762 | */ |
| 5763 | again: |
| 5764 | while (task_is_waking(p)) |
| 5765 | cpu_relax(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5766 | rq = task_rq_lock(p, &flags); |
Peter Zijlstra | 65cc8e4 | 2010-03-25 21:05:16 +0100 | [diff] [blame] | 5767 | if (task_is_waking(p)) { |
| 5768 | task_rq_unlock(rq, &flags); |
| 5769 | goto again; |
| 5770 | } |
Peter Zijlstra | e291200 | 2009-12-16 18:04:36 +0100 | [diff] [blame] | 5771 | |
Peter Zijlstra | 6ad4c18 | 2009-11-25 13:31:39 +0100 | [diff] [blame] | 5772 | if (!cpumask_intersects(new_mask, cpu_active_mask)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5773 | ret = -EINVAL; |
| 5774 | goto out; |
| 5775 | } |
| 5776 | |
David Rientjes | 9985b0b | 2008-06-05 12:57:11 -0700 | [diff] [blame] | 5777 | if (unlikely((p->flags & PF_THREAD_BOUND) && p != current && |
Rusty Russell | 96f874e2 | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 5778 | !cpumask_equal(&p->cpus_allowed, new_mask))) { |
David Rientjes | 9985b0b | 2008-06-05 12:57:11 -0700 | [diff] [blame] | 5779 | ret = -EINVAL; |
| 5780 | goto out; |
| 5781 | } |
| 5782 | |
Gregory Haskins | 73fe6aa | 2008-01-25 21:08:07 +0100 | [diff] [blame] | 5783 | if (p->sched_class->set_cpus_allowed) |
Mike Travis | cd8ba7c | 2008-03-26 14:23:49 -0700 | [diff] [blame] | 5784 | p->sched_class->set_cpus_allowed(p, new_mask); |
Gregory Haskins | 73fe6aa | 2008-01-25 21:08:07 +0100 | [diff] [blame] | 5785 | else { |
Rusty Russell | 96f874e2 | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 5786 | cpumask_copy(&p->cpus_allowed, new_mask); |
| 5787 | p->rt.nr_cpus_allowed = cpumask_weight(new_mask); |
Gregory Haskins | 73fe6aa | 2008-01-25 21:08:07 +0100 | [diff] [blame] | 5788 | } |
| 5789 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5790 | /* Can the task run on the task's current CPU? If so, we're done */ |
Rusty Russell | 96f874e2 | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 5791 | if (cpumask_test_cpu(task_cpu(p), new_mask)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5792 | goto out; |
| 5793 | |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 5794 | dest_cpu = cpumask_any_and(cpu_active_mask, new_mask); |
| 5795 | if (migrate_task(p, dest_cpu)) { |
| 5796 | struct migration_arg arg = { p, dest_cpu }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5797 | /* Need help from migration thread: drop lock and wait. */ |
| 5798 | task_rq_unlock(rq, &flags); |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 5799 | stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5800 | tlb_migrate_finish(p->mm); |
| 5801 | return 0; |
| 5802 | } |
| 5803 | out: |
| 5804 | task_rq_unlock(rq, &flags); |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 5805 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5806 | return ret; |
| 5807 | } |
Mike Travis | cd8ba7c | 2008-03-26 14:23:49 -0700 | [diff] [blame] | 5808 | EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5809 | |
| 5810 | /* |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 5811 | * Move (not current) task off this cpu, onto dest cpu. We're doing |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5812 | * this because either it can't run here any more (set_cpus_allowed() |
| 5813 | * away from this CPU, or CPU going down), or because we're |
| 5814 | * attempting to rebalance this task on exec (sched_exec). |
| 5815 | * |
| 5816 | * So we race with normal scheduler movements, but that's OK, as long |
| 5817 | * as the task is no longer on this CPU. |
Kirill Korotaev | efc3081 | 2006-06-27 02:54:32 -0700 | [diff] [blame] | 5818 | * |
| 5819 | * Returns non-zero if task was successfully migrated. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5820 | */ |
Kirill Korotaev | efc3081 | 2006-06-27 02:54:32 -0700 | [diff] [blame] | 5821 | static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5822 | { |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 5823 | struct rq *rq_dest, *rq_src; |
Peter Zijlstra | e291200 | 2009-12-16 18:04:36 +0100 | [diff] [blame] | 5824 | int ret = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5825 | |
Max Krasnyansky | e761b77 | 2008-07-15 04:43:49 -0700 | [diff] [blame] | 5826 | if (unlikely(!cpu_active(dest_cpu))) |
Kirill Korotaev | efc3081 | 2006-06-27 02:54:32 -0700 | [diff] [blame] | 5827 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5828 | |
| 5829 | rq_src = cpu_rq(src_cpu); |
| 5830 | rq_dest = cpu_rq(dest_cpu); |
| 5831 | |
| 5832 | double_rq_lock(rq_src, rq_dest); |
| 5833 | /* Already moved. */ |
| 5834 | if (task_cpu(p) != src_cpu) |
Linus Torvalds | b1e3873 | 2008-07-10 11:25:03 -0700 | [diff] [blame] | 5835 | goto done; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5836 | /* Affinity changed (again). */ |
Rusty Russell | 96f874e2 | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 5837 | if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed)) |
Linus Torvalds | b1e3873 | 2008-07-10 11:25:03 -0700 | [diff] [blame] | 5838 | goto fail; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5839 | |
Peter Zijlstra | e291200 | 2009-12-16 18:04:36 +0100 | [diff] [blame] | 5840 | /* |
| 5841 | * If we're not on a rq, the next wake-up will ensure we're |
| 5842 | * placed properly. |
| 5843 | */ |
| 5844 | if (p->se.on_rq) { |
Ingo Molnar | 2e1cb74 | 2007-08-09 11:16:49 +0200 | [diff] [blame] | 5845 | deactivate_task(rq_src, p, 0); |
Peter Zijlstra | e291200 | 2009-12-16 18:04:36 +0100 | [diff] [blame] | 5846 | set_task_cpu(p, dest_cpu); |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 5847 | activate_task(rq_dest, p, 0); |
Peter Zijlstra | 15afe09 | 2008-09-20 23:38:02 +0200 | [diff] [blame] | 5848 | check_preempt_curr(rq_dest, p, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5849 | } |
Linus Torvalds | b1e3873 | 2008-07-10 11:25:03 -0700 | [diff] [blame] | 5850 | done: |
Kirill Korotaev | efc3081 | 2006-06-27 02:54:32 -0700 | [diff] [blame] | 5851 | ret = 1; |
Linus Torvalds | b1e3873 | 2008-07-10 11:25:03 -0700 | [diff] [blame] | 5852 | fail: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5853 | double_rq_unlock(rq_src, rq_dest); |
Kirill Korotaev | efc3081 | 2006-06-27 02:54:32 -0700 | [diff] [blame] | 5854 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5855 | } |
| 5856 | |
| 5857 | /* |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 5858 | * migration_cpu_stop - this will be executed by a highprio stopper thread |
| 5859 | * and performs thread migration by bumping thread off CPU then |
| 5860 | * 'pushing' onto another runqueue. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5861 | */ |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 5862 | static int migration_cpu_stop(void *data) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5863 | { |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 5864 | struct migration_arg *arg = data; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5865 | |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 5866 | /* |
| 5867 | * The original target cpu might have gone down and we might |
| 5868 | * be on another cpu but it doesn't matter. |
| 5869 | */ |
| 5870 | local_irq_disable(); |
| 5871 | __migrate_task(arg->task, raw_smp_processor_id(), arg->dest_cpu); |
| 5872 | local_irq_enable(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5873 | return 0; |
| 5874 | } |
| 5875 | |
| 5876 | #ifdef CONFIG_HOTPLUG_CPU |
Kirill Korotaev | 054b910 | 2006-12-10 02:20:11 -0800 | [diff] [blame] | 5877 | /* |
Robert P. J. Day | 3a4fa0a | 2007-10-19 23:10:43 +0200 | [diff] [blame] | 5878 | * Figure out where task on dead CPU should go, use force if necessary. |
Kirill Korotaev | 054b910 | 2006-12-10 02:20:11 -0800 | [diff] [blame] | 5879 | */ |
Oleg Nesterov | 6a1bdc1 | 2010-03-15 10:10:23 +0100 | [diff] [blame] | 5880 | void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5881 | { |
Oleg Nesterov | 1445c08 | 2010-03-15 10:10:10 +0100 | [diff] [blame] | 5882 | struct rq *rq = cpu_rq(dead_cpu); |
| 5883 | int needs_cpu, uninitialized_var(dest_cpu); |
| 5884 | unsigned long flags; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5885 | |
Oleg Nesterov | 1445c08 | 2010-03-15 10:10:10 +0100 | [diff] [blame] | 5886 | local_irq_save(flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5887 | |
Oleg Nesterov | 1445c08 | 2010-03-15 10:10:10 +0100 | [diff] [blame] | 5888 | raw_spin_lock(&rq->lock); |
| 5889 | needs_cpu = (task_cpu(p) == dead_cpu) && (p->state != TASK_WAKING); |
| 5890 | if (needs_cpu) |
| 5891 | dest_cpu = select_fallback_rq(dead_cpu, p); |
| 5892 | raw_spin_unlock(&rq->lock); |
Oleg Nesterov | c1804d5 | 2010-03-15 10:10:14 +0100 | [diff] [blame] | 5893 | /* |
| 5894 | * It can only fail if we race with set_cpus_allowed(), |
| 5895 | * in the racer should migrate the task anyway. |
| 5896 | */ |
Oleg Nesterov | 1445c08 | 2010-03-15 10:10:10 +0100 | [diff] [blame] | 5897 | if (needs_cpu) |
Oleg Nesterov | c1804d5 | 2010-03-15 10:10:14 +0100 | [diff] [blame] | 5898 | __migrate_task(p, dead_cpu, dest_cpu); |
Oleg Nesterov | 1445c08 | 2010-03-15 10:10:10 +0100 | [diff] [blame] | 5899 | local_irq_restore(flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5900 | } |
| 5901 | |
| 5902 | /* |
| 5903 | * While a dead CPU has no uninterruptible tasks queued at this point, |
| 5904 | * it might still have a nonzero ->nr_uninterruptible counter, because |
| 5905 | * for performance reasons the counter is not stricly tracking tasks to |
| 5906 | * their home CPUs. So we just add the counter to another CPU's counter, |
| 5907 | * to keep the global sum constant after CPU-down: |
| 5908 | */ |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 5909 | static void migrate_nr_uninterruptible(struct rq *rq_src) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5910 | { |
Peter Zijlstra | 6ad4c18 | 2009-11-25 13:31:39 +0100 | [diff] [blame] | 5911 | struct rq *rq_dest = cpu_rq(cpumask_any(cpu_active_mask)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5912 | unsigned long flags; |
| 5913 | |
| 5914 | local_irq_save(flags); |
| 5915 | double_rq_lock(rq_src, rq_dest); |
| 5916 | rq_dest->nr_uninterruptible += rq_src->nr_uninterruptible; |
| 5917 | rq_src->nr_uninterruptible = 0; |
| 5918 | double_rq_unlock(rq_src, rq_dest); |
| 5919 | local_irq_restore(flags); |
| 5920 | } |
| 5921 | |
| 5922 | /* Run through task list and migrate tasks from the dead cpu. */ |
| 5923 | static void migrate_live_tasks(int src_cpu) |
| 5924 | { |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 5925 | struct task_struct *p, *t; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5926 | |
Oleg Nesterov | f7b4cdd | 2007-10-16 23:30:56 -0700 | [diff] [blame] | 5927 | read_lock(&tasklist_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5928 | |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 5929 | do_each_thread(t, p) { |
| 5930 | if (p == current) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5931 | continue; |
| 5932 | |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 5933 | if (task_cpu(p) == src_cpu) |
| 5934 | move_task_off_dead_cpu(src_cpu, p); |
| 5935 | } while_each_thread(t, p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5936 | |
Oleg Nesterov | f7b4cdd | 2007-10-16 23:30:56 -0700 | [diff] [blame] | 5937 | read_unlock(&tasklist_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5938 | } |
| 5939 | |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 5940 | /* |
| 5941 | * Schedules idle task to be the next runnable task on current CPU. |
Dmitry Adamushko | 94bc9a7 | 2007-11-15 20:57:40 +0100 | [diff] [blame] | 5942 | * It does so by boosting its priority to highest possible. |
| 5943 | * Used by CPU offline code. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5944 | */ |
| 5945 | void sched_idle_next(void) |
| 5946 | { |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 5947 | int this_cpu = smp_processor_id(); |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 5948 | struct rq *rq = cpu_rq(this_cpu); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5949 | struct task_struct *p = rq->idle; |
| 5950 | unsigned long flags; |
| 5951 | |
| 5952 | /* cpu has to be offline */ |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 5953 | BUG_ON(cpu_online(this_cpu)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5954 | |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 5955 | /* |
| 5956 | * Strictly not necessary since rest of the CPUs are stopped by now |
| 5957 | * and interrupts disabled on the current cpu. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5958 | */ |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 5959 | raw_spin_lock_irqsave(&rq->lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5960 | |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 5961 | __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1); |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 5962 | |
Dmitry Adamushko | 94bc9a7 | 2007-11-15 20:57:40 +0100 | [diff] [blame] | 5963 | activate_task(rq, p, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5964 | |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 5965 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5966 | } |
| 5967 | |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 5968 | /* |
| 5969 | * Ensures that the idle task is using init_mm right before its cpu goes |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5970 | * offline. |
| 5971 | */ |
| 5972 | void idle_task_exit(void) |
| 5973 | { |
| 5974 | struct mm_struct *mm = current->active_mm; |
| 5975 | |
| 5976 | BUG_ON(cpu_online(smp_processor_id())); |
| 5977 | |
| 5978 | if (mm != &init_mm) |
| 5979 | switch_mm(mm, &init_mm, current); |
| 5980 | mmdrop(mm); |
| 5981 | } |
| 5982 | |
Kirill Korotaev | 054b910 | 2006-12-10 02:20:11 -0800 | [diff] [blame] | 5983 | /* called under rq->lock with disabled interrupts */ |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 5984 | static void migrate_dead(unsigned int dead_cpu, struct task_struct *p) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5985 | { |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 5986 | struct rq *rq = cpu_rq(dead_cpu); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5987 | |
| 5988 | /* Must be exiting, otherwise would be on tasklist. */ |
Eugene Teo | 270f722 | 2007-10-18 23:40:38 -0700 | [diff] [blame] | 5989 | BUG_ON(!p->exit_state); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5990 | |
| 5991 | /* Cannot have done final schedule yet: would have vanished. */ |
Oleg Nesterov | c394cc9 | 2006-09-29 02:01:11 -0700 | [diff] [blame] | 5992 | BUG_ON(p->state == TASK_DEAD); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5993 | |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 5994 | get_task_struct(p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5995 | |
| 5996 | /* |
| 5997 | * Drop lock around migration; if someone else moves it, |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 5998 | * that's OK. No task can be added to this CPU, so iteration is |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5999 | * fine. |
| 6000 | */ |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 6001 | raw_spin_unlock_irq(&rq->lock); |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 6002 | move_task_off_dead_cpu(dead_cpu, p); |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 6003 | raw_spin_lock_irq(&rq->lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6004 | |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 6005 | put_task_struct(p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6006 | } |
| 6007 | |
| 6008 | /* release_task() removes task from tasklist, so we won't find dead tasks. */ |
| 6009 | static void migrate_dead_tasks(unsigned int dead_cpu) |
| 6010 | { |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 6011 | struct rq *rq = cpu_rq(dead_cpu); |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 6012 | struct task_struct *next; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6013 | |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 6014 | for ( ; ; ) { |
| 6015 | if (!rq->nr_running) |
| 6016 | break; |
Wang Chen | b67802e | 2009-03-02 13:55:26 +0800 | [diff] [blame] | 6017 | next = pick_next_task(rq); |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 6018 | if (!next) |
| 6019 | break; |
Dmitry Adamushko | 79c5379 | 2008-06-29 00:16:56 +0200 | [diff] [blame] | 6020 | next->sched_class->put_prev_task(rq, next); |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 6021 | migrate_dead(dead_cpu, next); |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 6022 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6023 | } |
| 6024 | } |
Thomas Gleixner | dce48a8 | 2009-04-11 10:43:41 +0200 | [diff] [blame] | 6025 | |
| 6026 | /* |
| 6027 | * remove the tasks which were accounted by rq from calc_load_tasks. |
| 6028 | */ |
| 6029 | static void calc_global_load_remove(struct rq *rq) |
| 6030 | { |
| 6031 | atomic_long_sub(rq->calc_load_active, &calc_load_tasks); |
Thomas Gleixner | a468d38 | 2009-07-17 14:15:46 +0200 | [diff] [blame] | 6032 | rq->calc_load_active = 0; |
Thomas Gleixner | dce48a8 | 2009-04-11 10:43:41 +0200 | [diff] [blame] | 6033 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6034 | #endif /* CONFIG_HOTPLUG_CPU */ |
| 6035 | |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 6036 | #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL) |
| 6037 | |
| 6038 | static struct ctl_table sd_ctl_dir[] = { |
Alexey Dobriyan | e036185 | 2007-08-09 11:16:46 +0200 | [diff] [blame] | 6039 | { |
| 6040 | .procname = "sched_domain", |
Eric W. Biederman | c57baf1 | 2007-08-23 15:18:02 +0200 | [diff] [blame] | 6041 | .mode = 0555, |
Alexey Dobriyan | e036185 | 2007-08-09 11:16:46 +0200 | [diff] [blame] | 6042 | }, |
Eric W. Biederman | 5699230 | 2009-11-05 15:38:40 -0800 | [diff] [blame] | 6043 | {} |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 6044 | }; |
| 6045 | |
| 6046 | static struct ctl_table sd_ctl_root[] = { |
Alexey Dobriyan | e036185 | 2007-08-09 11:16:46 +0200 | [diff] [blame] | 6047 | { |
| 6048 | .procname = "kernel", |
Eric W. Biederman | c57baf1 | 2007-08-23 15:18:02 +0200 | [diff] [blame] | 6049 | .mode = 0555, |
Alexey Dobriyan | e036185 | 2007-08-09 11:16:46 +0200 | [diff] [blame] | 6050 | .child = sd_ctl_dir, |
| 6051 | }, |
Eric W. Biederman | 5699230 | 2009-11-05 15:38:40 -0800 | [diff] [blame] | 6052 | {} |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 6053 | }; |
| 6054 | |
| 6055 | static struct ctl_table *sd_alloc_ctl_entry(int n) |
| 6056 | { |
| 6057 | struct ctl_table *entry = |
Milton Miller | 5cf9f06 | 2007-10-15 17:00:19 +0200 | [diff] [blame] | 6058 | kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL); |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 6059 | |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 6060 | return entry; |
| 6061 | } |
| 6062 | |
Milton Miller | 6382bc9 | 2007-10-15 17:00:19 +0200 | [diff] [blame] | 6063 | static void sd_free_ctl_entry(struct ctl_table **tablep) |
| 6064 | { |
Milton Miller | cd790076 | 2007-10-17 16:55:11 +0200 | [diff] [blame] | 6065 | struct ctl_table *entry; |
Milton Miller | 6382bc9 | 2007-10-15 17:00:19 +0200 | [diff] [blame] | 6066 | |
Milton Miller | cd790076 | 2007-10-17 16:55:11 +0200 | [diff] [blame] | 6067 | /* |
| 6068 | * In the intermediate directories, both the child directory and |
| 6069 | * procname are dynamically allocated and could fail but the mode |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 6070 | * will always be set. In the lowest directory the names are |
Milton Miller | cd790076 | 2007-10-17 16:55:11 +0200 | [diff] [blame] | 6071 | * static strings and all have proc handlers. |
| 6072 | */ |
| 6073 | for (entry = *tablep; entry->mode; entry++) { |
Milton Miller | 6382bc9 | 2007-10-15 17:00:19 +0200 | [diff] [blame] | 6074 | if (entry->child) |
| 6075 | sd_free_ctl_entry(&entry->child); |
Milton Miller | cd790076 | 2007-10-17 16:55:11 +0200 | [diff] [blame] | 6076 | if (entry->proc_handler == NULL) |
| 6077 | kfree(entry->procname); |
| 6078 | } |
Milton Miller | 6382bc9 | 2007-10-15 17:00:19 +0200 | [diff] [blame] | 6079 | |
| 6080 | kfree(*tablep); |
| 6081 | *tablep = NULL; |
| 6082 | } |
| 6083 | |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 6084 | static void |
Alexey Dobriyan | e036185 | 2007-08-09 11:16:46 +0200 | [diff] [blame] | 6085 | set_table_entry(struct ctl_table *entry, |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 6086 | const char *procname, void *data, int maxlen, |
| 6087 | mode_t mode, proc_handler *proc_handler) |
| 6088 | { |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 6089 | entry->procname = procname; |
| 6090 | entry->data = data; |
| 6091 | entry->maxlen = maxlen; |
| 6092 | entry->mode = mode; |
| 6093 | entry->proc_handler = proc_handler; |
| 6094 | } |
| 6095 | |
| 6096 | static struct ctl_table * |
| 6097 | sd_alloc_ctl_domain_table(struct sched_domain *sd) |
| 6098 | { |
Ingo Molnar | a5d8c34 | 2008-10-09 11:35:51 +0200 | [diff] [blame] | 6099 | struct ctl_table *table = sd_alloc_ctl_entry(13); |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 6100 | |
Milton Miller | ad1cdc1 | 2007-10-15 17:00:19 +0200 | [diff] [blame] | 6101 | if (table == NULL) |
| 6102 | return NULL; |
| 6103 | |
Alexey Dobriyan | e036185 | 2007-08-09 11:16:46 +0200 | [diff] [blame] | 6104 | set_table_entry(&table[0], "min_interval", &sd->min_interval, |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 6105 | sizeof(long), 0644, proc_doulongvec_minmax); |
Alexey Dobriyan | e036185 | 2007-08-09 11:16:46 +0200 | [diff] [blame] | 6106 | set_table_entry(&table[1], "max_interval", &sd->max_interval, |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 6107 | sizeof(long), 0644, proc_doulongvec_minmax); |
Alexey Dobriyan | e036185 | 2007-08-09 11:16:46 +0200 | [diff] [blame] | 6108 | set_table_entry(&table[2], "busy_idx", &sd->busy_idx, |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 6109 | sizeof(int), 0644, proc_dointvec_minmax); |
Alexey Dobriyan | e036185 | 2007-08-09 11:16:46 +0200 | [diff] [blame] | 6110 | set_table_entry(&table[3], "idle_idx", &sd->idle_idx, |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 6111 | sizeof(int), 0644, proc_dointvec_minmax); |
Alexey Dobriyan | e036185 | 2007-08-09 11:16:46 +0200 | [diff] [blame] | 6112 | set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx, |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 6113 | sizeof(int), 0644, proc_dointvec_minmax); |
Alexey Dobriyan | e036185 | 2007-08-09 11:16:46 +0200 | [diff] [blame] | 6114 | set_table_entry(&table[5], "wake_idx", &sd->wake_idx, |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 6115 | sizeof(int), 0644, proc_dointvec_minmax); |
Alexey Dobriyan | e036185 | 2007-08-09 11:16:46 +0200 | [diff] [blame] | 6116 | set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx, |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 6117 | sizeof(int), 0644, proc_dointvec_minmax); |
Alexey Dobriyan | e036185 | 2007-08-09 11:16:46 +0200 | [diff] [blame] | 6118 | set_table_entry(&table[7], "busy_factor", &sd->busy_factor, |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 6119 | sizeof(int), 0644, proc_dointvec_minmax); |
Alexey Dobriyan | e036185 | 2007-08-09 11:16:46 +0200 | [diff] [blame] | 6120 | set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct, |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 6121 | sizeof(int), 0644, proc_dointvec_minmax); |
Zou Nan hai | ace8b3d | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 6122 | set_table_entry(&table[9], "cache_nice_tries", |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 6123 | &sd->cache_nice_tries, |
| 6124 | sizeof(int), 0644, proc_dointvec_minmax); |
Zou Nan hai | ace8b3d | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 6125 | set_table_entry(&table[10], "flags", &sd->flags, |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 6126 | sizeof(int), 0644, proc_dointvec_minmax); |
Ingo Molnar | a5d8c34 | 2008-10-09 11:35:51 +0200 | [diff] [blame] | 6127 | set_table_entry(&table[11], "name", sd->name, |
| 6128 | CORENAME_MAX_SIZE, 0444, proc_dostring); |
| 6129 | /* &table[12] is terminator */ |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 6130 | |
| 6131 | return table; |
| 6132 | } |
| 6133 | |
Ingo Molnar | 9a4e715 | 2007-11-28 15:52:56 +0100 | [diff] [blame] | 6134 | static ctl_table *sd_alloc_ctl_cpu_table(int cpu) |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 6135 | { |
| 6136 | struct ctl_table *entry, *table; |
| 6137 | struct sched_domain *sd; |
| 6138 | int domain_num = 0, i; |
| 6139 | char buf[32]; |
| 6140 | |
| 6141 | for_each_domain(cpu, sd) |
| 6142 | domain_num++; |
| 6143 | entry = table = sd_alloc_ctl_entry(domain_num + 1); |
Milton Miller | ad1cdc1 | 2007-10-15 17:00:19 +0200 | [diff] [blame] | 6144 | if (table == NULL) |
| 6145 | return NULL; |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 6146 | |
| 6147 | i = 0; |
| 6148 | for_each_domain(cpu, sd) { |
| 6149 | snprintf(buf, 32, "domain%d", i); |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 6150 | entry->procname = kstrdup(buf, GFP_KERNEL); |
Eric W. Biederman | c57baf1 | 2007-08-23 15:18:02 +0200 | [diff] [blame] | 6151 | entry->mode = 0555; |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 6152 | entry->child = sd_alloc_ctl_domain_table(sd); |
| 6153 | entry++; |
| 6154 | i++; |
| 6155 | } |
| 6156 | return table; |
| 6157 | } |
| 6158 | |
| 6159 | static struct ctl_table_header *sd_sysctl_header; |
Milton Miller | 6382bc9 | 2007-10-15 17:00:19 +0200 | [diff] [blame] | 6160 | static void register_sched_domain_sysctl(void) |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 6161 | { |
Peter Zijlstra | 6ad4c18 | 2009-11-25 13:31:39 +0100 | [diff] [blame] | 6162 | int i, cpu_num = num_possible_cpus(); |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 6163 | struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1); |
| 6164 | char buf[32]; |
| 6165 | |
Milton Miller | 7378547 | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 6166 | WARN_ON(sd_ctl_dir[0].child); |
| 6167 | sd_ctl_dir[0].child = entry; |
| 6168 | |
Milton Miller | ad1cdc1 | 2007-10-15 17:00:19 +0200 | [diff] [blame] | 6169 | if (entry == NULL) |
| 6170 | return; |
| 6171 | |
Peter Zijlstra | 6ad4c18 | 2009-11-25 13:31:39 +0100 | [diff] [blame] | 6172 | for_each_possible_cpu(i) { |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 6173 | snprintf(buf, 32, "cpu%d", i); |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 6174 | entry->procname = kstrdup(buf, GFP_KERNEL); |
Eric W. Biederman | c57baf1 | 2007-08-23 15:18:02 +0200 | [diff] [blame] | 6175 | entry->mode = 0555; |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 6176 | entry->child = sd_alloc_ctl_cpu_table(i); |
Milton Miller | 97b6ea7 | 2007-10-15 17:00:19 +0200 | [diff] [blame] | 6177 | entry++; |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 6178 | } |
Milton Miller | 7378547 | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 6179 | |
| 6180 | WARN_ON(sd_sysctl_header); |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 6181 | sd_sysctl_header = register_sysctl_table(sd_ctl_root); |
| 6182 | } |
Milton Miller | 6382bc9 | 2007-10-15 17:00:19 +0200 | [diff] [blame] | 6183 | |
Milton Miller | 7378547 | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 6184 | /* may be called multiple times per register */ |
Milton Miller | 6382bc9 | 2007-10-15 17:00:19 +0200 | [diff] [blame] | 6185 | static void unregister_sched_domain_sysctl(void) |
| 6186 | { |
Milton Miller | 7378547 | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 6187 | if (sd_sysctl_header) |
| 6188 | unregister_sysctl_table(sd_sysctl_header); |
Milton Miller | 6382bc9 | 2007-10-15 17:00:19 +0200 | [diff] [blame] | 6189 | sd_sysctl_header = NULL; |
Milton Miller | 7378547 | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 6190 | if (sd_ctl_dir[0].child) |
| 6191 | sd_free_ctl_entry(&sd_ctl_dir[0].child); |
Milton Miller | 6382bc9 | 2007-10-15 17:00:19 +0200 | [diff] [blame] | 6192 | } |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 6193 | #else |
Milton Miller | 6382bc9 | 2007-10-15 17:00:19 +0200 | [diff] [blame] | 6194 | static void register_sched_domain_sysctl(void) |
| 6195 | { |
| 6196 | } |
| 6197 | static void unregister_sched_domain_sysctl(void) |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 6198 | { |
| 6199 | } |
| 6200 | #endif |
| 6201 | |
Gregory Haskins | 1f11eb6a | 2008-06-04 15:04:05 -0400 | [diff] [blame] | 6202 | static void set_rq_online(struct rq *rq) |
| 6203 | { |
| 6204 | if (!rq->online) { |
| 6205 | const struct sched_class *class; |
| 6206 | |
Rusty Russell | c6c4927 | 2008-11-25 02:35:05 +1030 | [diff] [blame] | 6207 | cpumask_set_cpu(rq->cpu, rq->rd->online); |
Gregory Haskins | 1f11eb6a | 2008-06-04 15:04:05 -0400 | [diff] [blame] | 6208 | rq->online = 1; |
| 6209 | |
| 6210 | for_each_class(class) { |
| 6211 | if (class->rq_online) |
| 6212 | class->rq_online(rq); |
| 6213 | } |
| 6214 | } |
| 6215 | } |
| 6216 | |
| 6217 | static void set_rq_offline(struct rq *rq) |
| 6218 | { |
| 6219 | if (rq->online) { |
| 6220 | const struct sched_class *class; |
| 6221 | |
| 6222 | for_each_class(class) { |
| 6223 | if (class->rq_offline) |
| 6224 | class->rq_offline(rq); |
| 6225 | } |
| 6226 | |
Rusty Russell | c6c4927 | 2008-11-25 02:35:05 +1030 | [diff] [blame] | 6227 | cpumask_clear_cpu(rq->cpu, rq->rd->online); |
Gregory Haskins | 1f11eb6a | 2008-06-04 15:04:05 -0400 | [diff] [blame] | 6228 | rq->online = 0; |
| 6229 | } |
| 6230 | } |
| 6231 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6232 | /* |
| 6233 | * migration_call - callback that gets triggered when a CPU is added. |
| 6234 | * Here we can start up the necessary migration thread for the new CPU. |
| 6235 | */ |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 6236 | static int __cpuinit |
| 6237 | migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6238 | { |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 6239 | int cpu = (long)hcpu; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6240 | unsigned long flags; |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 6241 | struct rq *rq = cpu_rq(cpu); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6242 | |
| 6243 | switch (action) { |
Gautham R Shenoy | 5be9361 | 2007-05-09 02:34:04 -0700 | [diff] [blame] | 6244 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6245 | case CPU_UP_PREPARE: |
Rafael J. Wysocki | 8bb7844 | 2007-05-09 02:35:10 -0700 | [diff] [blame] | 6246 | case CPU_UP_PREPARE_FROZEN: |
Thomas Gleixner | a468d38 | 2009-07-17 14:15:46 +0200 | [diff] [blame] | 6247 | rq->calc_load_update = calc_load_update; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6248 | break; |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 6249 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6250 | case CPU_ONLINE: |
Rafael J. Wysocki | 8bb7844 | 2007-05-09 02:35:10 -0700 | [diff] [blame] | 6251 | case CPU_ONLINE_FROZEN: |
Gregory Haskins | 1f94ef5 | 2008-03-10 16:52:41 -0400 | [diff] [blame] | 6252 | /* Update our root-domain */ |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 6253 | raw_spin_lock_irqsave(&rq->lock, flags); |
Gregory Haskins | 1f94ef5 | 2008-03-10 16:52:41 -0400 | [diff] [blame] | 6254 | if (rq->rd) { |
Rusty Russell | c6c4927 | 2008-11-25 02:35:05 +1030 | [diff] [blame] | 6255 | BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); |
Gregory Haskins | 1f11eb6a | 2008-06-04 15:04:05 -0400 | [diff] [blame] | 6256 | |
| 6257 | set_rq_online(rq); |
Gregory Haskins | 1f94ef5 | 2008-03-10 16:52:41 -0400 | [diff] [blame] | 6258 | } |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 6259 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6260 | break; |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 6261 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6262 | #ifdef CONFIG_HOTPLUG_CPU |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6263 | case CPU_DEAD: |
Rafael J. Wysocki | 8bb7844 | 2007-05-09 02:35:10 -0700 | [diff] [blame] | 6264 | case CPU_DEAD_FROZEN: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6265 | migrate_live_tasks(cpu); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6266 | /* Idle task back to normal (off runqueue, low prio) */ |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 6267 | raw_spin_lock_irq(&rq->lock); |
Ingo Molnar | 2e1cb74 | 2007-08-09 11:16:49 +0200 | [diff] [blame] | 6268 | deactivate_task(rq, rq->idle, 0); |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 6269 | __setscheduler(rq, rq->idle, SCHED_NORMAL, 0); |
| 6270 | rq->idle->sched_class = &idle_sched_class; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6271 | migrate_dead_tasks(cpu); |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 6272 | raw_spin_unlock_irq(&rq->lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6273 | migrate_nr_uninterruptible(rq); |
| 6274 | BUG_ON(rq->nr_running != 0); |
Thomas Gleixner | dce48a8 | 2009-04-11 10:43:41 +0200 | [diff] [blame] | 6275 | calc_global_load_remove(rq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6276 | break; |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 6277 | |
Gregory Haskins | 08f503b | 2008-03-10 17:59:11 -0400 | [diff] [blame] | 6278 | case CPU_DYING: |
| 6279 | case CPU_DYING_FROZEN: |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 6280 | /* Update our root-domain */ |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 6281 | raw_spin_lock_irqsave(&rq->lock, flags); |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 6282 | if (rq->rd) { |
Rusty Russell | c6c4927 | 2008-11-25 02:35:05 +1030 | [diff] [blame] | 6283 | BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); |
Gregory Haskins | 1f11eb6a | 2008-06-04 15:04:05 -0400 | [diff] [blame] | 6284 | set_rq_offline(rq); |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 6285 | } |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 6286 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 6287 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6288 | #endif |
| 6289 | } |
| 6290 | return NOTIFY_OK; |
| 6291 | } |
| 6292 | |
Paul Mackerras | f38b082 | 2009-06-02 21:05:16 +1000 | [diff] [blame] | 6293 | /* |
| 6294 | * Register at high priority so that task migration (migrate_all_tasks) |
| 6295 | * happens before everything else. This has to be lower priority than |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6296 | * the notifier in the perf_event subsystem, though. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6297 | */ |
Chandra Seetharaman | 26c2143 | 2006-06-27 02:54:10 -0700 | [diff] [blame] | 6298 | static struct notifier_block __cpuinitdata migration_notifier = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6299 | .notifier_call = migration_call, |
Tejun Heo | 50a323b | 2010-06-08 21:40:36 +0200 | [diff] [blame] | 6300 | .priority = CPU_PRI_MIGRATION, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6301 | }; |
| 6302 | |
Tejun Heo | 3a101d0 | 2010-06-08 21:40:36 +0200 | [diff] [blame] | 6303 | static int __cpuinit sched_cpu_active(struct notifier_block *nfb, |
| 6304 | unsigned long action, void *hcpu) |
| 6305 | { |
| 6306 | switch (action & ~CPU_TASKS_FROZEN) { |
| 6307 | case CPU_ONLINE: |
| 6308 | case CPU_DOWN_FAILED: |
| 6309 | set_cpu_active((long)hcpu, true); |
| 6310 | return NOTIFY_OK; |
| 6311 | default: |
| 6312 | return NOTIFY_DONE; |
| 6313 | } |
| 6314 | } |
| 6315 | |
| 6316 | static int __cpuinit sched_cpu_inactive(struct notifier_block *nfb, |
| 6317 | unsigned long action, void *hcpu) |
| 6318 | { |
| 6319 | switch (action & ~CPU_TASKS_FROZEN) { |
| 6320 | case CPU_DOWN_PREPARE: |
| 6321 | set_cpu_active((long)hcpu, false); |
| 6322 | return NOTIFY_OK; |
| 6323 | default: |
| 6324 | return NOTIFY_DONE; |
| 6325 | } |
| 6326 | } |
| 6327 | |
Eduard - Gabriel Munteanu | 7babe8d | 2008-07-25 19:45:11 -0700 | [diff] [blame] | 6328 | static int __init migration_init(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6329 | { |
| 6330 | void *cpu = (void *)(long)smp_processor_id(); |
Akinobu Mita | 07dccf3 | 2006-09-29 02:00:22 -0700 | [diff] [blame] | 6331 | int err; |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 6332 | |
Tejun Heo | 3a101d0 | 2010-06-08 21:40:36 +0200 | [diff] [blame] | 6333 | /* Initialize migration for the boot CPU */ |
Akinobu Mita | 07dccf3 | 2006-09-29 02:00:22 -0700 | [diff] [blame] | 6334 | err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu); |
| 6335 | BUG_ON(err == NOTIFY_BAD); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6336 | migration_call(&migration_notifier, CPU_ONLINE, cpu); |
| 6337 | register_cpu_notifier(&migration_notifier); |
Eduard - Gabriel Munteanu | 7babe8d | 2008-07-25 19:45:11 -0700 | [diff] [blame] | 6338 | |
Tejun Heo | 3a101d0 | 2010-06-08 21:40:36 +0200 | [diff] [blame] | 6339 | /* Register cpu active notifiers */ |
| 6340 | cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE); |
| 6341 | cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE); |
| 6342 | |
Thomas Gleixner | a004cd4 | 2009-07-21 09:54:05 +0200 | [diff] [blame] | 6343 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6344 | } |
Eduard - Gabriel Munteanu | 7babe8d | 2008-07-25 19:45:11 -0700 | [diff] [blame] | 6345 | early_initcall(migration_init); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6346 | #endif |
| 6347 | |
| 6348 | #ifdef CONFIG_SMP |
Christoph Lameter | 476f353 | 2007-05-06 14:48:58 -0700 | [diff] [blame] | 6349 | |
Ingo Molnar | 3e9830d | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 6350 | #ifdef CONFIG_SCHED_DEBUG |
Ingo Molnar | 4dcf6af | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 6351 | |
Mike Travis | f663011 | 2009-11-17 18:22:15 -0600 | [diff] [blame] | 6352 | static __read_mostly int sched_domain_debug_enabled; |
| 6353 | |
| 6354 | static int __init sched_domain_debug_setup(char *str) |
| 6355 | { |
| 6356 | sched_domain_debug_enabled = 1; |
| 6357 | |
| 6358 | return 0; |
| 6359 | } |
| 6360 | early_param("sched_debug", sched_domain_debug_setup); |
| 6361 | |
Mike Travis | 7c16ec5 | 2008-04-04 18:11:11 -0700 | [diff] [blame] | 6362 | static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, |
Rusty Russell | 96f874e2 | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 6363 | struct cpumask *groupmask) |
Ingo Molnar | 4dcf6af | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 6364 | { |
| 6365 | struct sched_group *group = sd->groups; |
Mike Travis | 434d53b | 2008-04-04 18:11:04 -0700 | [diff] [blame] | 6366 | char str[256]; |
Ingo Molnar | 4dcf6af | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 6367 | |
Rusty Russell | 968ea6d | 2008-12-13 21:55:51 +1030 | [diff] [blame] | 6368 | cpulist_scnprintf(str, sizeof(str), sched_domain_span(sd)); |
Rusty Russell | 96f874e2 | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 6369 | cpumask_clear(groupmask); |
Ingo Molnar | 4dcf6af | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 6370 | |
| 6371 | printk(KERN_DEBUG "%*s domain %d: ", level, "", level); |
| 6372 | |
| 6373 | if (!(sd->flags & SD_LOAD_BALANCE)) { |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 6374 | printk("does not load-balance\n"); |
Ingo Molnar | 4dcf6af | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 6375 | if (sd->parent) |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 6376 | printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain" |
| 6377 | " has parent"); |
Ingo Molnar | 4dcf6af | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 6378 | return -1; |
| 6379 | } |
| 6380 | |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 6381 | printk(KERN_CONT "span %s level %s\n", str, sd->name); |
Ingo Molnar | 4dcf6af | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 6382 | |
Rusty Russell | 758b2cd | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 6383 | if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) { |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 6384 | printk(KERN_ERR "ERROR: domain->span does not contain " |
| 6385 | "CPU%d\n", cpu); |
Ingo Molnar | 4dcf6af | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 6386 | } |
Rusty Russell | 758b2cd | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 6387 | if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) { |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 6388 | printk(KERN_ERR "ERROR: domain->groups does not contain" |
| 6389 | " CPU%d\n", cpu); |
Ingo Molnar | 4dcf6af | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 6390 | } |
| 6391 | |
| 6392 | printk(KERN_DEBUG "%*s groups:", level + 1, ""); |
| 6393 | do { |
| 6394 | if (!group) { |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 6395 | printk("\n"); |
| 6396 | printk(KERN_ERR "ERROR: group is NULL\n"); |
Ingo Molnar | 4dcf6af | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 6397 | break; |
| 6398 | } |
| 6399 | |
Peter Zijlstra | 18a3885 | 2009-09-01 10:34:39 +0200 | [diff] [blame] | 6400 | if (!group->cpu_power) { |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 6401 | printk(KERN_CONT "\n"); |
| 6402 | printk(KERN_ERR "ERROR: domain->cpu_power not " |
| 6403 | "set\n"); |
Ingo Molnar | 4dcf6af | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 6404 | break; |
| 6405 | } |
| 6406 | |
Rusty Russell | 758b2cd | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 6407 | if (!cpumask_weight(sched_group_cpus(group))) { |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 6408 | printk(KERN_CONT "\n"); |
| 6409 | printk(KERN_ERR "ERROR: empty group\n"); |
Ingo Molnar | 4dcf6af | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 6410 | break; |
| 6411 | } |
| 6412 | |
Rusty Russell | 758b2cd | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 6413 | if (cpumask_intersects(groupmask, sched_group_cpus(group))) { |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 6414 | printk(KERN_CONT "\n"); |
| 6415 | printk(KERN_ERR "ERROR: repeated CPUs\n"); |
Ingo Molnar | 4dcf6af | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 6416 | break; |
| 6417 | } |
| 6418 | |
Rusty Russell | 758b2cd | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 6419 | cpumask_or(groupmask, groupmask, sched_group_cpus(group)); |
Ingo Molnar | 4dcf6af | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 6420 | |
Rusty Russell | 968ea6d | 2008-12-13 21:55:51 +1030 | [diff] [blame] | 6421 | cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group)); |
Gautham R Shenoy | 381512c | 2009-04-14 09:09:36 +0530 | [diff] [blame] | 6422 | |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 6423 | printk(KERN_CONT " %s", str); |
Peter Zijlstra | 18a3885 | 2009-09-01 10:34:39 +0200 | [diff] [blame] | 6424 | if (group->cpu_power != SCHED_LOAD_SCALE) { |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 6425 | printk(KERN_CONT " (cpu_power = %d)", |
| 6426 | group->cpu_power); |
Gautham R Shenoy | 381512c | 2009-04-14 09:09:36 +0530 | [diff] [blame] | 6427 | } |
Ingo Molnar | 4dcf6af | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 6428 | |
| 6429 | group = group->next; |
| 6430 | } while (group != sd->groups); |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 6431 | printk(KERN_CONT "\n"); |
Ingo Molnar | 4dcf6af | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 6432 | |
Rusty Russell | 758b2cd | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 6433 | if (!cpumask_equal(sched_domain_span(sd), groupmask)) |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 6434 | printk(KERN_ERR "ERROR: groups don't span domain->span\n"); |
Ingo Molnar | 4dcf6af | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 6435 | |
Rusty Russell | 758b2cd | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 6436 | if (sd->parent && |
| 6437 | !cpumask_subset(groupmask, sched_domain_span(sd->parent))) |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 6438 | printk(KERN_ERR "ERROR: parent span is not a superset " |
| 6439 | "of domain->span\n"); |
Ingo Molnar | 4dcf6af | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 6440 | return 0; |
| 6441 | } |
| 6442 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6443 | static void sched_domain_debug(struct sched_domain *sd, int cpu) |
| 6444 | { |
Rusty Russell | d5dd3db | 2008-11-25 02:35:12 +1030 | [diff] [blame] | 6445 | cpumask_var_t groupmask; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6446 | int level = 0; |
| 6447 | |
Mike Travis | f663011 | 2009-11-17 18:22:15 -0600 | [diff] [blame] | 6448 | if (!sched_domain_debug_enabled) |
| 6449 | return; |
| 6450 | |
Nick Piggin | 41c7ce9 | 2005-06-25 14:57:24 -0700 | [diff] [blame] | 6451 | if (!sd) { |
| 6452 | printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu); |
| 6453 | return; |
| 6454 | } |
| 6455 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6456 | printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu); |
| 6457 | |
Rusty Russell | d5dd3db | 2008-11-25 02:35:12 +1030 | [diff] [blame] | 6458 | if (!alloc_cpumask_var(&groupmask, GFP_KERNEL)) { |
Mike Travis | 7c16ec5 | 2008-04-04 18:11:11 -0700 | [diff] [blame] | 6459 | printk(KERN_DEBUG "Cannot load-balance (out of memory)\n"); |
| 6460 | return; |
| 6461 | } |
| 6462 | |
Ingo Molnar | 4dcf6af | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 6463 | for (;;) { |
Mike Travis | 7c16ec5 | 2008-04-04 18:11:11 -0700 | [diff] [blame] | 6464 | if (sched_domain_debug_one(sd, cpu, level, groupmask)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6465 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6466 | level++; |
| 6467 | sd = sd->parent; |
Miguel Ojeda Sandonis | 33859f7 | 2006-12-10 02:20:38 -0800 | [diff] [blame] | 6468 | if (!sd) |
Ingo Molnar | 4dcf6af | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 6469 | break; |
| 6470 | } |
Rusty Russell | d5dd3db | 2008-11-25 02:35:12 +1030 | [diff] [blame] | 6471 | free_cpumask_var(groupmask); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6472 | } |
Dhaval Giani | 6d6bc0a | 2008-05-30 14:23:45 +0200 | [diff] [blame] | 6473 | #else /* !CONFIG_SCHED_DEBUG */ |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 6474 | # define sched_domain_debug(sd, cpu) do { } while (0) |
Dhaval Giani | 6d6bc0a | 2008-05-30 14:23:45 +0200 | [diff] [blame] | 6475 | #endif /* CONFIG_SCHED_DEBUG */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6476 | |
Dinakar Guniguntala | 1a20ff2 | 2005-06-25 14:57:33 -0700 | [diff] [blame] | 6477 | static int sd_degenerate(struct sched_domain *sd) |
Suresh Siddha | 245af2c | 2005-06-25 14:57:25 -0700 | [diff] [blame] | 6478 | { |
Rusty Russell | 758b2cd | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 6479 | if (cpumask_weight(sched_domain_span(sd)) == 1) |
Suresh Siddha | 245af2c | 2005-06-25 14:57:25 -0700 | [diff] [blame] | 6480 | return 1; |
| 6481 | |
| 6482 | /* Following flags need at least 2 groups */ |
| 6483 | if (sd->flags & (SD_LOAD_BALANCE | |
| 6484 | SD_BALANCE_NEWIDLE | |
| 6485 | SD_BALANCE_FORK | |
Siddha, Suresh B | 89c4710 | 2006-10-03 01:14:09 -0700 | [diff] [blame] | 6486 | SD_BALANCE_EXEC | |
| 6487 | SD_SHARE_CPUPOWER | |
| 6488 | SD_SHARE_PKG_RESOURCES)) { |
Suresh Siddha | 245af2c | 2005-06-25 14:57:25 -0700 | [diff] [blame] | 6489 | if (sd->groups != sd->groups->next) |
| 6490 | return 0; |
| 6491 | } |
| 6492 | |
| 6493 | /* Following flags don't use groups */ |
Peter Zijlstra | c88d591 | 2009-09-10 13:50:02 +0200 | [diff] [blame] | 6494 | if (sd->flags & (SD_WAKE_AFFINE)) |
Suresh Siddha | 245af2c | 2005-06-25 14:57:25 -0700 | [diff] [blame] | 6495 | return 0; |
| 6496 | |
| 6497 | return 1; |
| 6498 | } |
| 6499 | |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 6500 | static int |
| 6501 | sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) |
Suresh Siddha | 245af2c | 2005-06-25 14:57:25 -0700 | [diff] [blame] | 6502 | { |
| 6503 | unsigned long cflags = sd->flags, pflags = parent->flags; |
| 6504 | |
| 6505 | if (sd_degenerate(parent)) |
| 6506 | return 1; |
| 6507 | |
Rusty Russell | 758b2cd | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 6508 | if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent))) |
Suresh Siddha | 245af2c | 2005-06-25 14:57:25 -0700 | [diff] [blame] | 6509 | return 0; |
| 6510 | |
Suresh Siddha | 245af2c | 2005-06-25 14:57:25 -0700 | [diff] [blame] | 6511 | /* Flags needing groups don't count if only 1 group in parent */ |
| 6512 | if (parent->groups == parent->groups->next) { |
| 6513 | pflags &= ~(SD_LOAD_BALANCE | |
| 6514 | SD_BALANCE_NEWIDLE | |
| 6515 | SD_BALANCE_FORK | |
Siddha, Suresh B | 89c4710 | 2006-10-03 01:14:09 -0700 | [diff] [blame] | 6516 | SD_BALANCE_EXEC | |
| 6517 | SD_SHARE_CPUPOWER | |
| 6518 | SD_SHARE_PKG_RESOURCES); |
Ken Chen | 5436499 | 2008-12-07 18:47:37 -0800 | [diff] [blame] | 6519 | if (nr_node_ids == 1) |
| 6520 | pflags &= ~SD_SERIALIZE; |
Suresh Siddha | 245af2c | 2005-06-25 14:57:25 -0700 | [diff] [blame] | 6521 | } |
| 6522 | if (~cflags & pflags) |
| 6523 | return 0; |
| 6524 | |
| 6525 | return 1; |
| 6526 | } |
| 6527 | |
Rusty Russell | c6c4927 | 2008-11-25 02:35:05 +1030 | [diff] [blame] | 6528 | static void free_rootdomain(struct root_domain *rd) |
| 6529 | { |
Peter Zijlstra | 047106a | 2009-11-16 10:28:09 +0100 | [diff] [blame] | 6530 | synchronize_sched(); |
| 6531 | |
Rusty Russell | 68e7456 | 2008-11-25 02:35:13 +1030 | [diff] [blame] | 6532 | cpupri_cleanup(&rd->cpupri); |
| 6533 | |
Rusty Russell | c6c4927 | 2008-11-25 02:35:05 +1030 | [diff] [blame] | 6534 | free_cpumask_var(rd->rto_mask); |
| 6535 | free_cpumask_var(rd->online); |
| 6536 | free_cpumask_var(rd->span); |
| 6537 | kfree(rd); |
| 6538 | } |
| 6539 | |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 6540 | static void rq_attach_root(struct rq *rq, struct root_domain *rd) |
| 6541 | { |
Ingo Molnar | a0490fa | 2009-02-12 11:35:40 +0100 | [diff] [blame] | 6542 | struct root_domain *old_rd = NULL; |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 6543 | unsigned long flags; |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 6544 | |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 6545 | raw_spin_lock_irqsave(&rq->lock, flags); |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 6546 | |
| 6547 | if (rq->rd) { |
Ingo Molnar | a0490fa | 2009-02-12 11:35:40 +0100 | [diff] [blame] | 6548 | old_rd = rq->rd; |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 6549 | |
Rusty Russell | c6c4927 | 2008-11-25 02:35:05 +1030 | [diff] [blame] | 6550 | if (cpumask_test_cpu(rq->cpu, old_rd->online)) |
Gregory Haskins | 1f11eb6a | 2008-06-04 15:04:05 -0400 | [diff] [blame] | 6551 | set_rq_offline(rq); |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 6552 | |
Rusty Russell | c6c4927 | 2008-11-25 02:35:05 +1030 | [diff] [blame] | 6553 | cpumask_clear_cpu(rq->cpu, old_rd->span); |
Gregory Haskins | dc93852 | 2008-01-25 21:08:26 +0100 | [diff] [blame] | 6554 | |
Ingo Molnar | a0490fa | 2009-02-12 11:35:40 +0100 | [diff] [blame] | 6555 | /* |
| 6556 | * If we dont want to free the old_rt yet then |
| 6557 | * set old_rd to NULL to skip the freeing later |
| 6558 | * in this function: |
| 6559 | */ |
| 6560 | if (!atomic_dec_and_test(&old_rd->refcount)) |
| 6561 | old_rd = NULL; |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 6562 | } |
| 6563 | |
| 6564 | atomic_inc(&rd->refcount); |
| 6565 | rq->rd = rd; |
| 6566 | |
Rusty Russell | c6c4927 | 2008-11-25 02:35:05 +1030 | [diff] [blame] | 6567 | cpumask_set_cpu(rq->cpu, rd->span); |
Gregory Haskins | 00aec93 | 2009-07-30 10:57:23 -0400 | [diff] [blame] | 6568 | if (cpumask_test_cpu(rq->cpu, cpu_active_mask)) |
Gregory Haskins | 1f11eb6a | 2008-06-04 15:04:05 -0400 | [diff] [blame] | 6569 | set_rq_online(rq); |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 6570 | |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 6571 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
Ingo Molnar | a0490fa | 2009-02-12 11:35:40 +0100 | [diff] [blame] | 6572 | |
| 6573 | if (old_rd) |
| 6574 | free_rootdomain(old_rd); |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 6575 | } |
| 6576 | |
Pekka Enberg | 68c38fc | 2010-07-15 23:18:22 +0300 | [diff] [blame] | 6577 | static int init_rootdomain(struct root_domain *rd) |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 6578 | { |
| 6579 | memset(rd, 0, sizeof(*rd)); |
| 6580 | |
Pekka Enberg | 68c38fc | 2010-07-15 23:18:22 +0300 | [diff] [blame] | 6581 | if (!alloc_cpumask_var(&rd->span, GFP_KERNEL)) |
Li Zefan | 0c910d2 | 2009-01-06 17:39:06 +0800 | [diff] [blame] | 6582 | goto out; |
Pekka Enberg | 68c38fc | 2010-07-15 23:18:22 +0300 | [diff] [blame] | 6583 | if (!alloc_cpumask_var(&rd->online, GFP_KERNEL)) |
Rusty Russell | c6c4927 | 2008-11-25 02:35:05 +1030 | [diff] [blame] | 6584 | goto free_span; |
Pekka Enberg | 68c38fc | 2010-07-15 23:18:22 +0300 | [diff] [blame] | 6585 | if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) |
Rusty Russell | c6c4927 | 2008-11-25 02:35:05 +1030 | [diff] [blame] | 6586 | goto free_online; |
Gregory Haskins | 6e0534f | 2008-05-12 21:21:01 +0200 | [diff] [blame] | 6587 | |
Pekka Enberg | 68c38fc | 2010-07-15 23:18:22 +0300 | [diff] [blame] | 6588 | if (cpupri_init(&rd->cpupri) != 0) |
Rusty Russell | 68e7456 | 2008-11-25 02:35:13 +1030 | [diff] [blame] | 6589 | goto free_rto_mask; |
Rusty Russell | c6c4927 | 2008-11-25 02:35:05 +1030 | [diff] [blame] | 6590 | return 0; |
| 6591 | |
Rusty Russell | 68e7456 | 2008-11-25 02:35:13 +1030 | [diff] [blame] | 6592 | free_rto_mask: |
| 6593 | free_cpumask_var(rd->rto_mask); |
Rusty Russell | c6c4927 | 2008-11-25 02:35:05 +1030 | [diff] [blame] | 6594 | free_online: |
| 6595 | free_cpumask_var(rd->online); |
| 6596 | free_span: |
| 6597 | free_cpumask_var(rd->span); |
Li Zefan | 0c910d2 | 2009-01-06 17:39:06 +0800 | [diff] [blame] | 6598 | out: |
Rusty Russell | c6c4927 | 2008-11-25 02:35:05 +1030 | [diff] [blame] | 6599 | return -ENOMEM; |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 6600 | } |
| 6601 | |
| 6602 | static void init_defrootdomain(void) |
| 6603 | { |
Pekka Enberg | 68c38fc | 2010-07-15 23:18:22 +0300 | [diff] [blame] | 6604 | init_rootdomain(&def_root_domain); |
Rusty Russell | c6c4927 | 2008-11-25 02:35:05 +1030 | [diff] [blame] | 6605 | |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 6606 | atomic_set(&def_root_domain.refcount, 1); |
| 6607 | } |
| 6608 | |
Gregory Haskins | dc93852 | 2008-01-25 21:08:26 +0100 | [diff] [blame] | 6609 | static struct root_domain *alloc_rootdomain(void) |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 6610 | { |
| 6611 | struct root_domain *rd; |
| 6612 | |
| 6613 | rd = kmalloc(sizeof(*rd), GFP_KERNEL); |
| 6614 | if (!rd) |
| 6615 | return NULL; |
| 6616 | |
Pekka Enberg | 68c38fc | 2010-07-15 23:18:22 +0300 | [diff] [blame] | 6617 | if (init_rootdomain(rd) != 0) { |
Rusty Russell | c6c4927 | 2008-11-25 02:35:05 +1030 | [diff] [blame] | 6618 | kfree(rd); |
| 6619 | return NULL; |
| 6620 | } |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 6621 | |
| 6622 | return rd; |
| 6623 | } |
| 6624 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6625 | /* |
Ingo Molnar | 0eab914 | 2008-01-25 21:08:19 +0100 | [diff] [blame] | 6626 | * Attach the domain 'sd' to 'cpu' as its base domain. Callers must |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6627 | * hold the hotplug lock. |
| 6628 | */ |
Ingo Molnar | 0eab914 | 2008-01-25 21:08:19 +0100 | [diff] [blame] | 6629 | static void |
| 6630 | cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6631 | { |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 6632 | struct rq *rq = cpu_rq(cpu); |
Suresh Siddha | 245af2c | 2005-06-25 14:57:25 -0700 | [diff] [blame] | 6633 | struct sched_domain *tmp; |
| 6634 | |
Peter Zijlstra | 669c55e | 2010-04-16 14:59:29 +0200 | [diff] [blame] | 6635 | for (tmp = sd; tmp; tmp = tmp->parent) |
| 6636 | tmp->span_weight = cpumask_weight(sched_domain_span(tmp)); |
| 6637 | |
Suresh Siddha | 245af2c | 2005-06-25 14:57:25 -0700 | [diff] [blame] | 6638 | /* Remove the sched domains which do not contribute to scheduling. */ |
Li Zefan | f29c9b1 | 2008-11-06 09:45:16 +0800 | [diff] [blame] | 6639 | for (tmp = sd; tmp; ) { |
Suresh Siddha | 245af2c | 2005-06-25 14:57:25 -0700 | [diff] [blame] | 6640 | struct sched_domain *parent = tmp->parent; |
| 6641 | if (!parent) |
| 6642 | break; |
Li Zefan | f29c9b1 | 2008-11-06 09:45:16 +0800 | [diff] [blame] | 6643 | |
Siddha, Suresh B | 1a84887 | 2006-10-03 01:14:08 -0700 | [diff] [blame] | 6644 | if (sd_parent_degenerate(tmp, parent)) { |
Suresh Siddha | 245af2c | 2005-06-25 14:57:25 -0700 | [diff] [blame] | 6645 | tmp->parent = parent->parent; |
Siddha, Suresh B | 1a84887 | 2006-10-03 01:14:08 -0700 | [diff] [blame] | 6646 | if (parent->parent) |
| 6647 | parent->parent->child = tmp; |
Li Zefan | f29c9b1 | 2008-11-06 09:45:16 +0800 | [diff] [blame] | 6648 | } else |
| 6649 | tmp = tmp->parent; |
Suresh Siddha | 245af2c | 2005-06-25 14:57:25 -0700 | [diff] [blame] | 6650 | } |
| 6651 | |
Siddha, Suresh B | 1a84887 | 2006-10-03 01:14:08 -0700 | [diff] [blame] | 6652 | if (sd && sd_degenerate(sd)) { |
Suresh Siddha | 245af2c | 2005-06-25 14:57:25 -0700 | [diff] [blame] | 6653 | sd = sd->parent; |
Siddha, Suresh B | 1a84887 | 2006-10-03 01:14:08 -0700 | [diff] [blame] | 6654 | if (sd) |
| 6655 | sd->child = NULL; |
| 6656 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6657 | |
| 6658 | sched_domain_debug(sd, cpu); |
| 6659 | |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 6660 | rq_attach_root(rq, rd); |
Nick Piggin | 674311d | 2005-06-25 14:57:27 -0700 | [diff] [blame] | 6661 | rcu_assign_pointer(rq->sd, sd); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6662 | } |
| 6663 | |
| 6664 | /* cpus with isolated domains */ |
Rusty Russell | dcc30a3 | 2008-11-25 02:35:12 +1030 | [diff] [blame] | 6665 | static cpumask_var_t cpu_isolated_map; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6666 | |
| 6667 | /* Setup the mask of cpus configured for isolated domains */ |
| 6668 | static int __init isolated_cpu_setup(char *str) |
| 6669 | { |
Rusty Russell | bdddd29 | 2009-12-02 14:09:16 +1030 | [diff] [blame] | 6670 | alloc_bootmem_cpumask_var(&cpu_isolated_map); |
Rusty Russell | 968ea6d | 2008-12-13 21:55:51 +1030 | [diff] [blame] | 6671 | cpulist_parse(str, cpu_isolated_map); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6672 | return 1; |
| 6673 | } |
| 6674 | |
Ingo Molnar | 8927f49 | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 6675 | __setup("isolcpus=", isolated_cpu_setup); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6676 | |
| 6677 | /* |
Siddha, Suresh B | 6711cab | 2006-12-10 02:20:07 -0800 | [diff] [blame] | 6678 | * init_sched_build_groups takes the cpumask we wish to span, and a pointer |
| 6679 | * to a function which identifies what group(along with sched group) a CPU |
Rusty Russell | 96f874e2 | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 6680 | * belongs to. The return value of group_fn must be a >= 0 and < nr_cpu_ids |
| 6681 | * (due to the fact that we keep track of groups covered with a struct cpumask). |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6682 | * |
| 6683 | * init_sched_build_groups will build a circular linked list of the groups |
| 6684 | * covered by the given span, and will set each group's ->cpumask correctly, |
| 6685 | * and ->cpu_power to 0. |
| 6686 | */ |
Siddha, Suresh B | a616058 | 2006-10-03 01:14:06 -0700 | [diff] [blame] | 6687 | static void |
Rusty Russell | 96f874e2 | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 6688 | init_sched_build_groups(const struct cpumask *span, |
| 6689 | const struct cpumask *cpu_map, |
| 6690 | int (*group_fn)(int cpu, const struct cpumask *cpu_map, |
Mike Travis | 7c16ec5 | 2008-04-04 18:11:11 -0700 | [diff] [blame] | 6691 | struct sched_group **sg, |
Rusty Russell | 96f874e2 | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 6692 | struct cpumask *tmpmask), |
| 6693 | struct cpumask *covered, struct cpumask *tmpmask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6694 | { |
| 6695 | struct sched_group *first = NULL, *last = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6696 | int i; |
| 6697 | |
Rusty Russell | 96f874e2 | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 6698 | cpumask_clear(covered); |
Mike Travis | 7c16ec5 | 2008-04-04 18:11:11 -0700 | [diff] [blame] | 6699 | |
Rusty Russell | abcd083 | 2008-11-25 02:35:02 +1030 | [diff] [blame] | 6700 | for_each_cpu(i, span) { |
Siddha, Suresh B | 6711cab | 2006-12-10 02:20:07 -0800 | [diff] [blame] | 6701 | struct sched_group *sg; |
Mike Travis | 7c16ec5 | 2008-04-04 18:11:11 -0700 | [diff] [blame] | 6702 | int group = group_fn(i, cpu_map, &sg, tmpmask); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6703 | int j; |
| 6704 | |
Rusty Russell | 758b2cd | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 6705 | if (cpumask_test_cpu(i, covered)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6706 | continue; |
| 6707 | |
Rusty Russell | 758b2cd | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 6708 | cpumask_clear(sched_group_cpus(sg)); |
Peter Zijlstra | 18a3885 | 2009-09-01 10:34:39 +0200 | [diff] [blame] | 6709 | sg->cpu_power = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6710 | |
Rusty Russell | abcd083 | 2008-11-25 02:35:02 +1030 | [diff] [blame] | 6711 | for_each_cpu(j, span) { |
Mike Travis | 7c16ec5 | 2008-04-04 18:11:11 -0700 | [diff] [blame] | 6712 | if (group_fn(j, cpu_map, NULL, tmpmask) != group) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6713 | continue; |
| 6714 | |
Rusty Russell | 96f874e2 | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 6715 | cpumask_set_cpu(j, covered); |
Rusty Russell | 758b2cd | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 6716 | cpumask_set_cpu(j, sched_group_cpus(sg)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6717 | } |
| 6718 | if (!first) |
| 6719 | first = sg; |
| 6720 | if (last) |
| 6721 | last->next = sg; |
| 6722 | last = sg; |
| 6723 | } |
| 6724 | last->next = first; |
| 6725 | } |
| 6726 | |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 6727 | #define SD_NODES_PER_DOMAIN 16 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6728 | |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 6729 | #ifdef CONFIG_NUMA |
akpm@osdl.org | 198e2f1 | 2006-01-12 01:05:30 -0800 | [diff] [blame] | 6730 | |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 6731 | /** |
| 6732 | * find_next_best_node - find the next node to include in a sched_domain |
| 6733 | * @node: node whose sched_domain we're building |
| 6734 | * @used_nodes: nodes already in the sched_domain |
| 6735 | * |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 6736 | * Find the next node to include in a given scheduling domain. Simply |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 6737 | * finds the closest node not already in the @used_nodes map. |
| 6738 | * |
| 6739 | * Should use nodemask_t. |
| 6740 | */ |
Mike Travis | c5f59f0 | 2008-04-04 18:11:10 -0700 | [diff] [blame] | 6741 | static int find_next_best_node(int node, nodemask_t *used_nodes) |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 6742 | { |
| 6743 | int i, n, val, min_val, best_node = 0; |
| 6744 | |
| 6745 | min_val = INT_MAX; |
| 6746 | |
Mike Travis | 076ac2a | 2008-05-12 21:21:12 +0200 | [diff] [blame] | 6747 | for (i = 0; i < nr_node_ids; i++) { |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 6748 | /* Start at @node */ |
Mike Travis | 076ac2a | 2008-05-12 21:21:12 +0200 | [diff] [blame] | 6749 | n = (node + i) % nr_node_ids; |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 6750 | |
| 6751 | if (!nr_cpus_node(n)) |
| 6752 | continue; |
| 6753 | |
| 6754 | /* Skip already used nodes */ |
Mike Travis | c5f59f0 | 2008-04-04 18:11:10 -0700 | [diff] [blame] | 6755 | if (node_isset(n, *used_nodes)) |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 6756 | continue; |
| 6757 | |
| 6758 | /* Simple min distance search */ |
| 6759 | val = node_distance(node, n); |
| 6760 | |
| 6761 | if (val < min_val) { |
| 6762 | min_val = val; |
| 6763 | best_node = n; |
| 6764 | } |
| 6765 | } |
| 6766 | |
Mike Travis | c5f59f0 | 2008-04-04 18:11:10 -0700 | [diff] [blame] | 6767 | node_set(best_node, *used_nodes); |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 6768 | return best_node; |
| 6769 | } |
| 6770 | |
| 6771 | /** |
| 6772 | * sched_domain_node_span - get a cpumask for a node's sched_domain |
| 6773 | * @node: node whose cpumask we're constructing |
Randy Dunlap | 7348672 | 2008-04-22 10:07:22 -0700 | [diff] [blame] | 6774 | * @span: resulting cpumask |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 6775 | * |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 6776 | * Given a node, construct a good cpumask for its sched_domain to span. It |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 6777 | * should be one that prevents unnecessary balancing, but also spreads tasks |
| 6778 | * out optimally. |
| 6779 | */ |
Rusty Russell | 96f874e2 | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 6780 | static void sched_domain_node_span(int node, struct cpumask *span) |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 6781 | { |
Mike Travis | c5f59f0 | 2008-04-04 18:11:10 -0700 | [diff] [blame] | 6782 | nodemask_t used_nodes; |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 6783 | int i; |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 6784 | |
Mike Travis | 6ca09df | 2008-12-31 18:08:45 -0800 | [diff] [blame] | 6785 | cpumask_clear(span); |
Mike Travis | c5f59f0 | 2008-04-04 18:11:10 -0700 | [diff] [blame] | 6786 | nodes_clear(used_nodes); |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 6787 | |
Mike Travis | 6ca09df | 2008-12-31 18:08:45 -0800 | [diff] [blame] | 6788 | cpumask_or(span, span, cpumask_of_node(node)); |
Mike Travis | c5f59f0 | 2008-04-04 18:11:10 -0700 | [diff] [blame] | 6789 | node_set(node, used_nodes); |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 6790 | |
| 6791 | for (i = 1; i < SD_NODES_PER_DOMAIN; i++) { |
Mike Travis | c5f59f0 | 2008-04-04 18:11:10 -0700 | [diff] [blame] | 6792 | int next_node = find_next_best_node(node, &used_nodes); |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 6793 | |
Mike Travis | 6ca09df | 2008-12-31 18:08:45 -0800 | [diff] [blame] | 6794 | cpumask_or(span, span, cpumask_of_node(next_node)); |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 6795 | } |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 6796 | } |
Dhaval Giani | 6d6bc0a | 2008-05-30 14:23:45 +0200 | [diff] [blame] | 6797 | #endif /* CONFIG_NUMA */ |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 6798 | |
Siddha, Suresh B | 5c45bf2 | 2006-06-27 02:54:42 -0700 | [diff] [blame] | 6799 | int sched_smt_power_savings = 0, sched_mc_power_savings = 0; |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 6800 | |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 6801 | /* |
Rusty Russell | 6c99e9a | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 6802 | * The cpus mask in sched_group and sched_domain hangs off the end. |
Ingo Molnar | 4200efd | 2009-05-19 09:22:19 +0200 | [diff] [blame] | 6803 | * |
| 6804 | * ( See the the comments in include/linux/sched.h:struct sched_group |
| 6805 | * and struct sched_domain. ) |
Rusty Russell | 6c99e9a | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 6806 | */ |
| 6807 | struct static_sched_group { |
| 6808 | struct sched_group sg; |
| 6809 | DECLARE_BITMAP(cpus, CONFIG_NR_CPUS); |
| 6810 | }; |
| 6811 | |
| 6812 | struct static_sched_domain { |
| 6813 | struct sched_domain sd; |
| 6814 | DECLARE_BITMAP(span, CONFIG_NR_CPUS); |
| 6815 | }; |
| 6816 | |
Andreas Herrmann | 49a02c5 | 2009-08-18 12:51:52 +0200 | [diff] [blame] | 6817 | struct s_data { |
| 6818 | #ifdef CONFIG_NUMA |
| 6819 | int sd_allnodes; |
| 6820 | cpumask_var_t domainspan; |
| 6821 | cpumask_var_t covered; |
| 6822 | cpumask_var_t notcovered; |
| 6823 | #endif |
| 6824 | cpumask_var_t nodemask; |
| 6825 | cpumask_var_t this_sibling_map; |
| 6826 | cpumask_var_t this_core_map; |
Heiko Carstens | 01a0854 | 2010-08-31 10:28:16 +0200 | [diff] [blame] | 6827 | cpumask_var_t this_book_map; |
Andreas Herrmann | 49a02c5 | 2009-08-18 12:51:52 +0200 | [diff] [blame] | 6828 | cpumask_var_t send_covered; |
| 6829 | cpumask_var_t tmpmask; |
| 6830 | struct sched_group **sched_group_nodes; |
| 6831 | struct root_domain *rd; |
| 6832 | }; |
| 6833 | |
Andreas Herrmann | 2109b99 | 2009-08-18 12:53:00 +0200 | [diff] [blame] | 6834 | enum s_alloc { |
| 6835 | sa_sched_groups = 0, |
| 6836 | sa_rootdomain, |
| 6837 | sa_tmpmask, |
| 6838 | sa_send_covered, |
Heiko Carstens | 01a0854 | 2010-08-31 10:28:16 +0200 | [diff] [blame] | 6839 | sa_this_book_map, |
Andreas Herrmann | 2109b99 | 2009-08-18 12:53:00 +0200 | [diff] [blame] | 6840 | sa_this_core_map, |
| 6841 | sa_this_sibling_map, |
| 6842 | sa_nodemask, |
| 6843 | sa_sched_group_nodes, |
| 6844 | #ifdef CONFIG_NUMA |
| 6845 | sa_notcovered, |
| 6846 | sa_covered, |
| 6847 | sa_domainspan, |
| 6848 | #endif |
| 6849 | sa_none, |
| 6850 | }; |
| 6851 | |
Rusty Russell | 6c99e9a | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 6852 | /* |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 6853 | * SMT sched-domains: |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 6854 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6855 | #ifdef CONFIG_SCHED_SMT |
Rusty Russell | 6c99e9a | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 6856 | static DEFINE_PER_CPU(struct static_sched_domain, cpu_domains); |
Tejun Heo | 1871e52 | 2009-10-29 22:34:13 +0900 | [diff] [blame] | 6857 | static DEFINE_PER_CPU(struct static_sched_group, sched_groups); |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 6858 | |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 6859 | static int |
Rusty Russell | 96f874e2 | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 6860 | cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map, |
| 6861 | struct sched_group **sg, struct cpumask *unused) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6862 | { |
Siddha, Suresh B | 6711cab | 2006-12-10 02:20:07 -0800 | [diff] [blame] | 6863 | if (sg) |
Tejun Heo | 1871e52 | 2009-10-29 22:34:13 +0900 | [diff] [blame] | 6864 | *sg = &per_cpu(sched_groups, cpu).sg; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6865 | return cpu; |
| 6866 | } |
Dhaval Giani | 6d6bc0a | 2008-05-30 14:23:45 +0200 | [diff] [blame] | 6867 | #endif /* CONFIG_SCHED_SMT */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6868 | |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 6869 | /* |
| 6870 | * multi-core sched-domains: |
| 6871 | */ |
Siddha, Suresh B | 1e9f28f | 2006-03-27 01:15:22 -0800 | [diff] [blame] | 6872 | #ifdef CONFIG_SCHED_MC |
Rusty Russell | 6c99e9a | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 6873 | static DEFINE_PER_CPU(struct static_sched_domain, core_domains); |
| 6874 | static DEFINE_PER_CPU(struct static_sched_group, sched_group_core); |
Siddha, Suresh B | 1e9f28f | 2006-03-27 01:15:22 -0800 | [diff] [blame] | 6875 | |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 6876 | static int |
Rusty Russell | 96f874e2 | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 6877 | cpu_to_core_group(int cpu, const struct cpumask *cpu_map, |
| 6878 | struct sched_group **sg, struct cpumask *mask) |
Siddha, Suresh B | 1e9f28f | 2006-03-27 01:15:22 -0800 | [diff] [blame] | 6879 | { |
Siddha, Suresh B | 6711cab | 2006-12-10 02:20:07 -0800 | [diff] [blame] | 6880 | int group; |
Heiko Carstens | f269893 | 2010-08-31 10:28:15 +0200 | [diff] [blame] | 6881 | #ifdef CONFIG_SCHED_SMT |
Rusty Russell | c69fc56 | 2009-03-13 14:49:46 +1030 | [diff] [blame] | 6882 | cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map); |
Rusty Russell | 96f874e2 | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 6883 | group = cpumask_first(mask); |
Heiko Carstens | f269893 | 2010-08-31 10:28:15 +0200 | [diff] [blame] | 6884 | #else |
| 6885 | group = cpu; |
| 6886 | #endif |
Siddha, Suresh B | 6711cab | 2006-12-10 02:20:07 -0800 | [diff] [blame] | 6887 | if (sg) |
Rusty Russell | 6c99e9a | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 6888 | *sg = &per_cpu(sched_group_core, group).sg; |
Siddha, Suresh B | 6711cab | 2006-12-10 02:20:07 -0800 | [diff] [blame] | 6889 | return group; |
Siddha, Suresh B | 1e9f28f | 2006-03-27 01:15:22 -0800 | [diff] [blame] | 6890 | } |
Heiko Carstens | f269893 | 2010-08-31 10:28:15 +0200 | [diff] [blame] | 6891 | #endif /* CONFIG_SCHED_MC */ |
Siddha, Suresh B | 1e9f28f | 2006-03-27 01:15:22 -0800 | [diff] [blame] | 6892 | |
Heiko Carstens | 01a0854 | 2010-08-31 10:28:16 +0200 | [diff] [blame] | 6893 | /* |
| 6894 | * book sched-domains: |
| 6895 | */ |
| 6896 | #ifdef CONFIG_SCHED_BOOK |
| 6897 | static DEFINE_PER_CPU(struct static_sched_domain, book_domains); |
| 6898 | static DEFINE_PER_CPU(struct static_sched_group, sched_group_book); |
| 6899 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6900 | static int |
Heiko Carstens | 01a0854 | 2010-08-31 10:28:16 +0200 | [diff] [blame] | 6901 | cpu_to_book_group(int cpu, const struct cpumask *cpu_map, |
| 6902 | struct sched_group **sg, struct cpumask *mask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6903 | { |
Heiko Carstens | 01a0854 | 2010-08-31 10:28:16 +0200 | [diff] [blame] | 6904 | int group = cpu; |
| 6905 | #ifdef CONFIG_SCHED_MC |
| 6906 | cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map); |
| 6907 | group = cpumask_first(mask); |
| 6908 | #elif defined(CONFIG_SCHED_SMT) |
| 6909 | cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map); |
| 6910 | group = cpumask_first(mask); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6911 | #endif |
Heiko Carstens | 01a0854 | 2010-08-31 10:28:16 +0200 | [diff] [blame] | 6912 | if (sg) |
| 6913 | *sg = &per_cpu(sched_group_book, group).sg; |
| 6914 | return group; |
| 6915 | } |
| 6916 | #endif /* CONFIG_SCHED_BOOK */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6917 | |
Rusty Russell | 6c99e9a | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 6918 | static DEFINE_PER_CPU(struct static_sched_domain, phys_domains); |
| 6919 | static DEFINE_PER_CPU(struct static_sched_group, sched_group_phys); |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 6920 | |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 6921 | static int |
Rusty Russell | 96f874e2 | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 6922 | cpu_to_phys_group(int cpu, const struct cpumask *cpu_map, |
| 6923 | struct sched_group **sg, struct cpumask *mask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6924 | { |
Siddha, Suresh B | 6711cab | 2006-12-10 02:20:07 -0800 | [diff] [blame] | 6925 | int group; |
Heiko Carstens | 01a0854 | 2010-08-31 10:28:16 +0200 | [diff] [blame] | 6926 | #ifdef CONFIG_SCHED_BOOK |
| 6927 | cpumask_and(mask, cpu_book_mask(cpu), cpu_map); |
| 6928 | group = cpumask_first(mask); |
| 6929 | #elif defined(CONFIG_SCHED_MC) |
Mike Travis | 6ca09df | 2008-12-31 18:08:45 -0800 | [diff] [blame] | 6930 | cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map); |
Rusty Russell | 96f874e2 | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 6931 | group = cpumask_first(mask); |
Siddha, Suresh B | 1e9f28f | 2006-03-27 01:15:22 -0800 | [diff] [blame] | 6932 | #elif defined(CONFIG_SCHED_SMT) |
Rusty Russell | c69fc56 | 2009-03-13 14:49:46 +1030 | [diff] [blame] | 6933 | cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map); |
Rusty Russell | 96f874e2 | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 6934 | group = cpumask_first(mask); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6935 | #else |
Siddha, Suresh B | 6711cab | 2006-12-10 02:20:07 -0800 | [diff] [blame] | 6936 | group = cpu; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6937 | #endif |
Siddha, Suresh B | 6711cab | 2006-12-10 02:20:07 -0800 | [diff] [blame] | 6938 | if (sg) |
Rusty Russell | 6c99e9a | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 6939 | *sg = &per_cpu(sched_group_phys, group).sg; |
Siddha, Suresh B | 6711cab | 2006-12-10 02:20:07 -0800 | [diff] [blame] | 6940 | return group; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6941 | } |
| 6942 | |
| 6943 | #ifdef CONFIG_NUMA |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 6944 | /* |
| 6945 | * The init_sched_build_groups can't handle what we want to do with node |
| 6946 | * groups, so roll our own. Now each node has its own list of groups which |
| 6947 | * gets dynamically allocated. |
| 6948 | */ |
Rusty Russell | 62ea9ce | 2009-01-11 01:04:16 +0100 | [diff] [blame] | 6949 | static DEFINE_PER_CPU(struct static_sched_domain, node_domains); |
Mike Travis | 434d53b | 2008-04-04 18:11:04 -0700 | [diff] [blame] | 6950 | static struct sched_group ***sched_group_nodes_bycpu; |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 6951 | |
Rusty Russell | 62ea9ce | 2009-01-11 01:04:16 +0100 | [diff] [blame] | 6952 | static DEFINE_PER_CPU(struct static_sched_domain, allnodes_domains); |
Rusty Russell | 6c99e9a | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 6953 | static DEFINE_PER_CPU(struct static_sched_group, sched_group_allnodes); |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 6954 | |
Rusty Russell | 96f874e2 | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 6955 | static int cpu_to_allnodes_group(int cpu, const struct cpumask *cpu_map, |
| 6956 | struct sched_group **sg, |
| 6957 | struct cpumask *nodemask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6958 | { |
Siddha, Suresh B | 6711cab | 2006-12-10 02:20:07 -0800 | [diff] [blame] | 6959 | int group; |
| 6960 | |
Mike Travis | 6ca09df | 2008-12-31 18:08:45 -0800 | [diff] [blame] | 6961 | cpumask_and(nodemask, cpumask_of_node(cpu_to_node(cpu)), cpu_map); |
Rusty Russell | 96f874e2 | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 6962 | group = cpumask_first(nodemask); |
Siddha, Suresh B | 6711cab | 2006-12-10 02:20:07 -0800 | [diff] [blame] | 6963 | |
| 6964 | if (sg) |
Rusty Russell | 6c99e9a | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 6965 | *sg = &per_cpu(sched_group_allnodes, group).sg; |
Siddha, Suresh B | 6711cab | 2006-12-10 02:20:07 -0800 | [diff] [blame] | 6966 | return group; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6967 | } |
Siddha, Suresh B | 6711cab | 2006-12-10 02:20:07 -0800 | [diff] [blame] | 6968 | |
Siddha, Suresh B | 0806903 | 2006-03-27 01:15:23 -0800 | [diff] [blame] | 6969 | static void init_numa_sched_groups_power(struct sched_group *group_head) |
| 6970 | { |
| 6971 | struct sched_group *sg = group_head; |
| 6972 | int j; |
| 6973 | |
| 6974 | if (!sg) |
| 6975 | return; |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 6976 | do { |
Rusty Russell | 758b2cd | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 6977 | for_each_cpu(j, sched_group_cpus(sg)) { |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 6978 | struct sched_domain *sd; |
Siddha, Suresh B | 0806903 | 2006-03-27 01:15:23 -0800 | [diff] [blame] | 6979 | |
Rusty Russell | 6c99e9a | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 6980 | sd = &per_cpu(phys_domains, j).sd; |
Miao Xie | 13318a7 | 2009-04-15 09:59:10 +0800 | [diff] [blame] | 6981 | if (j != group_first_cpu(sd->groups)) { |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 6982 | /* |
| 6983 | * Only add "power" once for each |
| 6984 | * physical package. |
| 6985 | */ |
| 6986 | continue; |
| 6987 | } |
| 6988 | |
Peter Zijlstra | 18a3885 | 2009-09-01 10:34:39 +0200 | [diff] [blame] | 6989 | sg->cpu_power += sd->groups->cpu_power; |
Siddha, Suresh B | 0806903 | 2006-03-27 01:15:23 -0800 | [diff] [blame] | 6990 | } |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 6991 | sg = sg->next; |
| 6992 | } while (sg != group_head); |
Siddha, Suresh B | 0806903 | 2006-03-27 01:15:23 -0800 | [diff] [blame] | 6993 | } |
Andreas Herrmann | 0601a88 | 2009-08-18 13:01:11 +0200 | [diff] [blame] | 6994 | |
| 6995 | static int build_numa_sched_groups(struct s_data *d, |
| 6996 | const struct cpumask *cpu_map, int num) |
| 6997 | { |
| 6998 | struct sched_domain *sd; |
| 6999 | struct sched_group *sg, *prev; |
| 7000 | int n, j; |
| 7001 | |
| 7002 | cpumask_clear(d->covered); |
| 7003 | cpumask_and(d->nodemask, cpumask_of_node(num), cpu_map); |
| 7004 | if (cpumask_empty(d->nodemask)) { |
| 7005 | d->sched_group_nodes[num] = NULL; |
| 7006 | goto out; |
| 7007 | } |
| 7008 | |
| 7009 | sched_domain_node_span(num, d->domainspan); |
| 7010 | cpumask_and(d->domainspan, d->domainspan, cpu_map); |
| 7011 | |
| 7012 | sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(), |
| 7013 | GFP_KERNEL, num); |
| 7014 | if (!sg) { |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 7015 | printk(KERN_WARNING "Can not alloc domain group for node %d\n", |
| 7016 | num); |
Andreas Herrmann | 0601a88 | 2009-08-18 13:01:11 +0200 | [diff] [blame] | 7017 | return -ENOMEM; |
| 7018 | } |
| 7019 | d->sched_group_nodes[num] = sg; |
| 7020 | |
| 7021 | for_each_cpu(j, d->nodemask) { |
| 7022 | sd = &per_cpu(node_domains, j).sd; |
| 7023 | sd->groups = sg; |
| 7024 | } |
| 7025 | |
Peter Zijlstra | 18a3885 | 2009-09-01 10:34:39 +0200 | [diff] [blame] | 7026 | sg->cpu_power = 0; |
Andreas Herrmann | 0601a88 | 2009-08-18 13:01:11 +0200 | [diff] [blame] | 7027 | cpumask_copy(sched_group_cpus(sg), d->nodemask); |
| 7028 | sg->next = sg; |
| 7029 | cpumask_or(d->covered, d->covered, d->nodemask); |
| 7030 | |
| 7031 | prev = sg; |
| 7032 | for (j = 0; j < nr_node_ids; j++) { |
| 7033 | n = (num + j) % nr_node_ids; |
| 7034 | cpumask_complement(d->notcovered, d->covered); |
| 7035 | cpumask_and(d->tmpmask, d->notcovered, cpu_map); |
| 7036 | cpumask_and(d->tmpmask, d->tmpmask, d->domainspan); |
| 7037 | if (cpumask_empty(d->tmpmask)) |
| 7038 | break; |
| 7039 | cpumask_and(d->tmpmask, d->tmpmask, cpumask_of_node(n)); |
| 7040 | if (cpumask_empty(d->tmpmask)) |
| 7041 | continue; |
| 7042 | sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(), |
| 7043 | GFP_KERNEL, num); |
| 7044 | if (!sg) { |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 7045 | printk(KERN_WARNING |
| 7046 | "Can not alloc domain group for node %d\n", j); |
Andreas Herrmann | 0601a88 | 2009-08-18 13:01:11 +0200 | [diff] [blame] | 7047 | return -ENOMEM; |
| 7048 | } |
Peter Zijlstra | 18a3885 | 2009-09-01 10:34:39 +0200 | [diff] [blame] | 7049 | sg->cpu_power = 0; |
Andreas Herrmann | 0601a88 | 2009-08-18 13:01:11 +0200 | [diff] [blame] | 7050 | cpumask_copy(sched_group_cpus(sg), d->tmpmask); |
| 7051 | sg->next = prev->next; |
| 7052 | cpumask_or(d->covered, d->covered, d->tmpmask); |
| 7053 | prev->next = sg; |
| 7054 | prev = sg; |
| 7055 | } |
| 7056 | out: |
| 7057 | return 0; |
| 7058 | } |
Dhaval Giani | 6d6bc0a | 2008-05-30 14:23:45 +0200 | [diff] [blame] | 7059 | #endif /* CONFIG_NUMA */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7060 | |
Siddha, Suresh B | a616058 | 2006-10-03 01:14:06 -0700 | [diff] [blame] | 7061 | #ifdef CONFIG_NUMA |
Srivatsa Vaddagiri | 51888ca | 2006-06-27 02:54:38 -0700 | [diff] [blame] | 7062 | /* Free memory allocated for various sched_group structures */ |
Rusty Russell | 96f874e2 | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 7063 | static void free_sched_groups(const struct cpumask *cpu_map, |
| 7064 | struct cpumask *nodemask) |
Srivatsa Vaddagiri | 51888ca | 2006-06-27 02:54:38 -0700 | [diff] [blame] | 7065 | { |
Siddha, Suresh B | a616058 | 2006-10-03 01:14:06 -0700 | [diff] [blame] | 7066 | int cpu, i; |
Srivatsa Vaddagiri | 51888ca | 2006-06-27 02:54:38 -0700 | [diff] [blame] | 7067 | |
Rusty Russell | abcd083 | 2008-11-25 02:35:02 +1030 | [diff] [blame] | 7068 | for_each_cpu(cpu, cpu_map) { |
Srivatsa Vaddagiri | 51888ca | 2006-06-27 02:54:38 -0700 | [diff] [blame] | 7069 | struct sched_group **sched_group_nodes |
| 7070 | = sched_group_nodes_bycpu[cpu]; |
| 7071 | |
Srivatsa Vaddagiri | 51888ca | 2006-06-27 02:54:38 -0700 | [diff] [blame] | 7072 | if (!sched_group_nodes) |
| 7073 | continue; |
| 7074 | |
Mike Travis | 076ac2a | 2008-05-12 21:21:12 +0200 | [diff] [blame] | 7075 | for (i = 0; i < nr_node_ids; i++) { |
Srivatsa Vaddagiri | 51888ca | 2006-06-27 02:54:38 -0700 | [diff] [blame] | 7076 | struct sched_group *oldsg, *sg = sched_group_nodes[i]; |
| 7077 | |
Mike Travis | 6ca09df | 2008-12-31 18:08:45 -0800 | [diff] [blame] | 7078 | cpumask_and(nodemask, cpumask_of_node(i), cpu_map); |
Rusty Russell | 96f874e2 | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 7079 | if (cpumask_empty(nodemask)) |
Srivatsa Vaddagiri | 51888ca | 2006-06-27 02:54:38 -0700 | [diff] [blame] | 7080 | continue; |
| 7081 | |
| 7082 | if (sg == NULL) |
| 7083 | continue; |
| 7084 | sg = sg->next; |
| 7085 | next_sg: |
| 7086 | oldsg = sg; |
| 7087 | sg = sg->next; |
| 7088 | kfree(oldsg); |
| 7089 | if (oldsg != sched_group_nodes[i]) |
| 7090 | goto next_sg; |
| 7091 | } |
| 7092 | kfree(sched_group_nodes); |
| 7093 | sched_group_nodes_bycpu[cpu] = NULL; |
| 7094 | } |
Srivatsa Vaddagiri | 51888ca | 2006-06-27 02:54:38 -0700 | [diff] [blame] | 7095 | } |
Dhaval Giani | 6d6bc0a | 2008-05-30 14:23:45 +0200 | [diff] [blame] | 7096 | #else /* !CONFIG_NUMA */ |
Rusty Russell | 96f874e2 | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 7097 | static void free_sched_groups(const struct cpumask *cpu_map, |
| 7098 | struct cpumask *nodemask) |
Siddha, Suresh B | a616058 | 2006-10-03 01:14:06 -0700 | [diff] [blame] | 7099 | { |
| 7100 | } |
Dhaval Giani | 6d6bc0a | 2008-05-30 14:23:45 +0200 | [diff] [blame] | 7101 | #endif /* CONFIG_NUMA */ |
Srivatsa Vaddagiri | 51888ca | 2006-06-27 02:54:38 -0700 | [diff] [blame] | 7102 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7103 | /* |
Siddha, Suresh B | 89c4710 | 2006-10-03 01:14:09 -0700 | [diff] [blame] | 7104 | * Initialize sched groups cpu_power. |
| 7105 | * |
| 7106 | * cpu_power indicates the capacity of sched group, which is used while |
| 7107 | * distributing the load between different sched groups in a sched domain. |
| 7108 | * Typically cpu_power for all the groups in a sched domain will be same unless |
| 7109 | * there are asymmetries in the topology. If there are asymmetries, group |
| 7110 | * having more cpu_power will pickup more load compared to the group having |
| 7111 | * less cpu_power. |
Siddha, Suresh B | 89c4710 | 2006-10-03 01:14:09 -0700 | [diff] [blame] | 7112 | */ |
| 7113 | static void init_sched_groups_power(int cpu, struct sched_domain *sd) |
| 7114 | { |
| 7115 | struct sched_domain *child; |
| 7116 | struct sched_group *group; |
Peter Zijlstra | f93e65c | 2009-09-01 10:34:32 +0200 | [diff] [blame] | 7117 | long power; |
| 7118 | int weight; |
Siddha, Suresh B | 89c4710 | 2006-10-03 01:14:09 -0700 | [diff] [blame] | 7119 | |
| 7120 | WARN_ON(!sd || !sd->groups); |
| 7121 | |
Miao Xie | 13318a7 | 2009-04-15 09:59:10 +0800 | [diff] [blame] | 7122 | if (cpu != group_first_cpu(sd->groups)) |
Siddha, Suresh B | 89c4710 | 2006-10-03 01:14:09 -0700 | [diff] [blame] | 7123 | return; |
| 7124 | |
Suresh Siddha | aae6d3d | 2010-09-17 15:02:32 -0700 | [diff] [blame] | 7125 | sd->groups->group_weight = cpumask_weight(sched_group_cpus(sd->groups)); |
| 7126 | |
Siddha, Suresh B | 89c4710 | 2006-10-03 01:14:09 -0700 | [diff] [blame] | 7127 | child = sd->child; |
| 7128 | |
Peter Zijlstra | 18a3885 | 2009-09-01 10:34:39 +0200 | [diff] [blame] | 7129 | sd->groups->cpu_power = 0; |
Eric Dumazet | 5517d86 | 2007-05-08 00:32:57 -0700 | [diff] [blame] | 7130 | |
Peter Zijlstra | f93e65c | 2009-09-01 10:34:32 +0200 | [diff] [blame] | 7131 | if (!child) { |
| 7132 | power = SCHED_LOAD_SCALE; |
| 7133 | weight = cpumask_weight(sched_domain_span(sd)); |
| 7134 | /* |
| 7135 | * SMT siblings share the power of a single core. |
Peter Zijlstra | a52bfd73 | 2009-09-01 10:34:35 +0200 | [diff] [blame] | 7136 | * Usually multiple threads get a better yield out of |
| 7137 | * that one core than a single thread would have, |
| 7138 | * reflect that in sd->smt_gain. |
Peter Zijlstra | f93e65c | 2009-09-01 10:34:32 +0200 | [diff] [blame] | 7139 | */ |
Peter Zijlstra | a52bfd73 | 2009-09-01 10:34:35 +0200 | [diff] [blame] | 7140 | if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) { |
| 7141 | power *= sd->smt_gain; |
Peter Zijlstra | f93e65c | 2009-09-01 10:34:32 +0200 | [diff] [blame] | 7142 | power /= weight; |
Peter Zijlstra | a52bfd73 | 2009-09-01 10:34:35 +0200 | [diff] [blame] | 7143 | power >>= SCHED_LOAD_SHIFT; |
| 7144 | } |
Peter Zijlstra | 18a3885 | 2009-09-01 10:34:39 +0200 | [diff] [blame] | 7145 | sd->groups->cpu_power += power; |
Siddha, Suresh B | 89c4710 | 2006-10-03 01:14:09 -0700 | [diff] [blame] | 7146 | return; |
| 7147 | } |
| 7148 | |
Siddha, Suresh B | 89c4710 | 2006-10-03 01:14:09 -0700 | [diff] [blame] | 7149 | /* |
Peter Zijlstra | f93e65c | 2009-09-01 10:34:32 +0200 | [diff] [blame] | 7150 | * Add cpu_power of each child group to this groups cpu_power. |
Siddha, Suresh B | 89c4710 | 2006-10-03 01:14:09 -0700 | [diff] [blame] | 7151 | */ |
| 7152 | group = child->groups; |
| 7153 | do { |
Peter Zijlstra | 18a3885 | 2009-09-01 10:34:39 +0200 | [diff] [blame] | 7154 | sd->groups->cpu_power += group->cpu_power; |
Siddha, Suresh B | 89c4710 | 2006-10-03 01:14:09 -0700 | [diff] [blame] | 7155 | group = group->next; |
| 7156 | } while (group != child->groups); |
| 7157 | } |
| 7158 | |
| 7159 | /* |
Mike Travis | 7c16ec5 | 2008-04-04 18:11:11 -0700 | [diff] [blame] | 7160 | * Initializers for schedule domains |
| 7161 | * Non-inlined to reduce accumulated stack pressure in build_sched_domains() |
| 7162 | */ |
| 7163 | |
Ingo Molnar | a5d8c34 | 2008-10-09 11:35:51 +0200 | [diff] [blame] | 7164 | #ifdef CONFIG_SCHED_DEBUG |
| 7165 | # define SD_INIT_NAME(sd, type) sd->name = #type |
| 7166 | #else |
| 7167 | # define SD_INIT_NAME(sd, type) do { } while (0) |
| 7168 | #endif |
| 7169 | |
Mike Travis | 7c16ec5 | 2008-04-04 18:11:11 -0700 | [diff] [blame] | 7170 | #define SD_INIT(sd, type) sd_init_##type(sd) |
Ingo Molnar | a5d8c34 | 2008-10-09 11:35:51 +0200 | [diff] [blame] | 7171 | |
Mike Travis | 7c16ec5 | 2008-04-04 18:11:11 -0700 | [diff] [blame] | 7172 | #define SD_INIT_FUNC(type) \ |
| 7173 | static noinline void sd_init_##type(struct sched_domain *sd) \ |
| 7174 | { \ |
| 7175 | memset(sd, 0, sizeof(*sd)); \ |
| 7176 | *sd = SD_##type##_INIT; \ |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 7177 | sd->level = SD_LV_##type; \ |
Ingo Molnar | a5d8c34 | 2008-10-09 11:35:51 +0200 | [diff] [blame] | 7178 | SD_INIT_NAME(sd, type); \ |
Mike Travis | 7c16ec5 | 2008-04-04 18:11:11 -0700 | [diff] [blame] | 7179 | } |
| 7180 | |
| 7181 | SD_INIT_FUNC(CPU) |
| 7182 | #ifdef CONFIG_NUMA |
| 7183 | SD_INIT_FUNC(ALLNODES) |
| 7184 | SD_INIT_FUNC(NODE) |
| 7185 | #endif |
| 7186 | #ifdef CONFIG_SCHED_SMT |
| 7187 | SD_INIT_FUNC(SIBLING) |
| 7188 | #endif |
| 7189 | #ifdef CONFIG_SCHED_MC |
| 7190 | SD_INIT_FUNC(MC) |
| 7191 | #endif |
Heiko Carstens | 01a0854 | 2010-08-31 10:28:16 +0200 | [diff] [blame] | 7192 | #ifdef CONFIG_SCHED_BOOK |
| 7193 | SD_INIT_FUNC(BOOK) |
| 7194 | #endif |
Mike Travis | 7c16ec5 | 2008-04-04 18:11:11 -0700 | [diff] [blame] | 7195 | |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 7196 | static int default_relax_domain_level = -1; |
| 7197 | |
| 7198 | static int __init setup_relax_domain_level(char *str) |
| 7199 | { |
Li Zefan | 30e0e17 | 2008-05-13 10:27:17 +0800 | [diff] [blame] | 7200 | unsigned long val; |
| 7201 | |
| 7202 | val = simple_strtoul(str, NULL, 0); |
| 7203 | if (val < SD_LV_MAX) |
| 7204 | default_relax_domain_level = val; |
| 7205 | |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 7206 | return 1; |
| 7207 | } |
| 7208 | __setup("relax_domain_level=", setup_relax_domain_level); |
| 7209 | |
| 7210 | static void set_domain_attribute(struct sched_domain *sd, |
| 7211 | struct sched_domain_attr *attr) |
| 7212 | { |
| 7213 | int request; |
| 7214 | |
| 7215 | if (!attr || attr->relax_domain_level < 0) { |
| 7216 | if (default_relax_domain_level < 0) |
| 7217 | return; |
| 7218 | else |
| 7219 | request = default_relax_domain_level; |
| 7220 | } else |
| 7221 | request = attr->relax_domain_level; |
| 7222 | if (request < sd->level) { |
| 7223 | /* turn off idle balance on this domain */ |
Peter Zijlstra | c88d591 | 2009-09-10 13:50:02 +0200 | [diff] [blame] | 7224 | sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE); |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 7225 | } else { |
| 7226 | /* turn on idle balance on this domain */ |
Peter Zijlstra | c88d591 | 2009-09-10 13:50:02 +0200 | [diff] [blame] | 7227 | sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE); |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 7228 | } |
| 7229 | } |
| 7230 | |
Andreas Herrmann | 2109b99 | 2009-08-18 12:53:00 +0200 | [diff] [blame] | 7231 | static void __free_domain_allocs(struct s_data *d, enum s_alloc what, |
| 7232 | const struct cpumask *cpu_map) |
| 7233 | { |
| 7234 | switch (what) { |
| 7235 | case sa_sched_groups: |
| 7236 | free_sched_groups(cpu_map, d->tmpmask); /* fall through */ |
| 7237 | d->sched_group_nodes = NULL; |
| 7238 | case sa_rootdomain: |
| 7239 | free_rootdomain(d->rd); /* fall through */ |
| 7240 | case sa_tmpmask: |
| 7241 | free_cpumask_var(d->tmpmask); /* fall through */ |
| 7242 | case sa_send_covered: |
| 7243 | free_cpumask_var(d->send_covered); /* fall through */ |
Heiko Carstens | 01a0854 | 2010-08-31 10:28:16 +0200 | [diff] [blame] | 7244 | case sa_this_book_map: |
| 7245 | free_cpumask_var(d->this_book_map); /* fall through */ |
Andreas Herrmann | 2109b99 | 2009-08-18 12:53:00 +0200 | [diff] [blame] | 7246 | case sa_this_core_map: |
| 7247 | free_cpumask_var(d->this_core_map); /* fall through */ |
| 7248 | case sa_this_sibling_map: |
| 7249 | free_cpumask_var(d->this_sibling_map); /* fall through */ |
| 7250 | case sa_nodemask: |
| 7251 | free_cpumask_var(d->nodemask); /* fall through */ |
| 7252 | case sa_sched_group_nodes: |
| 7253 | #ifdef CONFIG_NUMA |
| 7254 | kfree(d->sched_group_nodes); /* fall through */ |
| 7255 | case sa_notcovered: |
| 7256 | free_cpumask_var(d->notcovered); /* fall through */ |
| 7257 | case sa_covered: |
| 7258 | free_cpumask_var(d->covered); /* fall through */ |
| 7259 | case sa_domainspan: |
| 7260 | free_cpumask_var(d->domainspan); /* fall through */ |
| 7261 | #endif |
| 7262 | case sa_none: |
| 7263 | break; |
| 7264 | } |
| 7265 | } |
| 7266 | |
| 7267 | static enum s_alloc __visit_domain_allocation_hell(struct s_data *d, |
| 7268 | const struct cpumask *cpu_map) |
| 7269 | { |
| 7270 | #ifdef CONFIG_NUMA |
| 7271 | if (!alloc_cpumask_var(&d->domainspan, GFP_KERNEL)) |
| 7272 | return sa_none; |
| 7273 | if (!alloc_cpumask_var(&d->covered, GFP_KERNEL)) |
| 7274 | return sa_domainspan; |
| 7275 | if (!alloc_cpumask_var(&d->notcovered, GFP_KERNEL)) |
| 7276 | return sa_covered; |
| 7277 | /* Allocate the per-node list of sched groups */ |
| 7278 | d->sched_group_nodes = kcalloc(nr_node_ids, |
| 7279 | sizeof(struct sched_group *), GFP_KERNEL); |
| 7280 | if (!d->sched_group_nodes) { |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 7281 | printk(KERN_WARNING "Can not alloc sched group node list\n"); |
Andreas Herrmann | 2109b99 | 2009-08-18 12:53:00 +0200 | [diff] [blame] | 7282 | return sa_notcovered; |
| 7283 | } |
| 7284 | sched_group_nodes_bycpu[cpumask_first(cpu_map)] = d->sched_group_nodes; |
| 7285 | #endif |
| 7286 | if (!alloc_cpumask_var(&d->nodemask, GFP_KERNEL)) |
| 7287 | return sa_sched_group_nodes; |
| 7288 | if (!alloc_cpumask_var(&d->this_sibling_map, GFP_KERNEL)) |
| 7289 | return sa_nodemask; |
| 7290 | if (!alloc_cpumask_var(&d->this_core_map, GFP_KERNEL)) |
| 7291 | return sa_this_sibling_map; |
Heiko Carstens | 01a0854 | 2010-08-31 10:28:16 +0200 | [diff] [blame] | 7292 | if (!alloc_cpumask_var(&d->this_book_map, GFP_KERNEL)) |
Andreas Herrmann | 2109b99 | 2009-08-18 12:53:00 +0200 | [diff] [blame] | 7293 | return sa_this_core_map; |
Heiko Carstens | 01a0854 | 2010-08-31 10:28:16 +0200 | [diff] [blame] | 7294 | if (!alloc_cpumask_var(&d->send_covered, GFP_KERNEL)) |
| 7295 | return sa_this_book_map; |
Andreas Herrmann | 2109b99 | 2009-08-18 12:53:00 +0200 | [diff] [blame] | 7296 | if (!alloc_cpumask_var(&d->tmpmask, GFP_KERNEL)) |
| 7297 | return sa_send_covered; |
| 7298 | d->rd = alloc_rootdomain(); |
| 7299 | if (!d->rd) { |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 7300 | printk(KERN_WARNING "Cannot alloc root domain\n"); |
Andreas Herrmann | 2109b99 | 2009-08-18 12:53:00 +0200 | [diff] [blame] | 7301 | return sa_tmpmask; |
| 7302 | } |
| 7303 | return sa_rootdomain; |
| 7304 | } |
| 7305 | |
Andreas Herrmann | 7f4588f | 2009-08-18 12:54:06 +0200 | [diff] [blame] | 7306 | static struct sched_domain *__build_numa_sched_domains(struct s_data *d, |
| 7307 | const struct cpumask *cpu_map, struct sched_domain_attr *attr, int i) |
| 7308 | { |
| 7309 | struct sched_domain *sd = NULL; |
| 7310 | #ifdef CONFIG_NUMA |
| 7311 | struct sched_domain *parent; |
| 7312 | |
| 7313 | d->sd_allnodes = 0; |
| 7314 | if (cpumask_weight(cpu_map) > |
| 7315 | SD_NODES_PER_DOMAIN * cpumask_weight(d->nodemask)) { |
| 7316 | sd = &per_cpu(allnodes_domains, i).sd; |
| 7317 | SD_INIT(sd, ALLNODES); |
| 7318 | set_domain_attribute(sd, attr); |
| 7319 | cpumask_copy(sched_domain_span(sd), cpu_map); |
| 7320 | cpu_to_allnodes_group(i, cpu_map, &sd->groups, d->tmpmask); |
| 7321 | d->sd_allnodes = 1; |
| 7322 | } |
| 7323 | parent = sd; |
| 7324 | |
| 7325 | sd = &per_cpu(node_domains, i).sd; |
| 7326 | SD_INIT(sd, NODE); |
| 7327 | set_domain_attribute(sd, attr); |
| 7328 | sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd)); |
| 7329 | sd->parent = parent; |
| 7330 | if (parent) |
| 7331 | parent->child = sd; |
| 7332 | cpumask_and(sched_domain_span(sd), sched_domain_span(sd), cpu_map); |
| 7333 | #endif |
| 7334 | return sd; |
| 7335 | } |
| 7336 | |
Andreas Herrmann | 87cce66 | 2009-08-18 12:54:55 +0200 | [diff] [blame] | 7337 | static struct sched_domain *__build_cpu_sched_domain(struct s_data *d, |
| 7338 | const struct cpumask *cpu_map, struct sched_domain_attr *attr, |
| 7339 | struct sched_domain *parent, int i) |
| 7340 | { |
| 7341 | struct sched_domain *sd; |
| 7342 | sd = &per_cpu(phys_domains, i).sd; |
| 7343 | SD_INIT(sd, CPU); |
| 7344 | set_domain_attribute(sd, attr); |
| 7345 | cpumask_copy(sched_domain_span(sd), d->nodemask); |
| 7346 | sd->parent = parent; |
| 7347 | if (parent) |
| 7348 | parent->child = sd; |
| 7349 | cpu_to_phys_group(i, cpu_map, &sd->groups, d->tmpmask); |
| 7350 | return sd; |
| 7351 | } |
| 7352 | |
Heiko Carstens | 01a0854 | 2010-08-31 10:28:16 +0200 | [diff] [blame] | 7353 | static struct sched_domain *__build_book_sched_domain(struct s_data *d, |
| 7354 | const struct cpumask *cpu_map, struct sched_domain_attr *attr, |
| 7355 | struct sched_domain *parent, int i) |
| 7356 | { |
| 7357 | struct sched_domain *sd = parent; |
| 7358 | #ifdef CONFIG_SCHED_BOOK |
| 7359 | sd = &per_cpu(book_domains, i).sd; |
| 7360 | SD_INIT(sd, BOOK); |
| 7361 | set_domain_attribute(sd, attr); |
| 7362 | cpumask_and(sched_domain_span(sd), cpu_map, cpu_book_mask(i)); |
| 7363 | sd->parent = parent; |
| 7364 | parent->child = sd; |
| 7365 | cpu_to_book_group(i, cpu_map, &sd->groups, d->tmpmask); |
| 7366 | #endif |
| 7367 | return sd; |
| 7368 | } |
| 7369 | |
Andreas Herrmann | 410c408 | 2009-08-18 12:56:14 +0200 | [diff] [blame] | 7370 | static struct sched_domain *__build_mc_sched_domain(struct s_data *d, |
| 7371 | const struct cpumask *cpu_map, struct sched_domain_attr *attr, |
| 7372 | struct sched_domain *parent, int i) |
| 7373 | { |
| 7374 | struct sched_domain *sd = parent; |
| 7375 | #ifdef CONFIG_SCHED_MC |
| 7376 | sd = &per_cpu(core_domains, i).sd; |
| 7377 | SD_INIT(sd, MC); |
| 7378 | set_domain_attribute(sd, attr); |
| 7379 | cpumask_and(sched_domain_span(sd), cpu_map, cpu_coregroup_mask(i)); |
| 7380 | sd->parent = parent; |
| 7381 | parent->child = sd; |
| 7382 | cpu_to_core_group(i, cpu_map, &sd->groups, d->tmpmask); |
| 7383 | #endif |
| 7384 | return sd; |
| 7385 | } |
| 7386 | |
Andreas Herrmann | d817353 | 2009-08-18 12:57:03 +0200 | [diff] [blame] | 7387 | static struct sched_domain *__build_smt_sched_domain(struct s_data *d, |
| 7388 | const struct cpumask *cpu_map, struct sched_domain_attr *attr, |
| 7389 | struct sched_domain *parent, int i) |
| 7390 | { |
| 7391 | struct sched_domain *sd = parent; |
| 7392 | #ifdef CONFIG_SCHED_SMT |
| 7393 | sd = &per_cpu(cpu_domains, i).sd; |
| 7394 | SD_INIT(sd, SIBLING); |
| 7395 | set_domain_attribute(sd, attr); |
| 7396 | cpumask_and(sched_domain_span(sd), cpu_map, topology_thread_cpumask(i)); |
| 7397 | sd->parent = parent; |
| 7398 | parent->child = sd; |
| 7399 | cpu_to_cpu_group(i, cpu_map, &sd->groups, d->tmpmask); |
| 7400 | #endif |
| 7401 | return sd; |
| 7402 | } |
| 7403 | |
Andreas Herrmann | 0e8e85c | 2009-08-18 12:57:51 +0200 | [diff] [blame] | 7404 | static void build_sched_groups(struct s_data *d, enum sched_domain_level l, |
| 7405 | const struct cpumask *cpu_map, int cpu) |
| 7406 | { |
| 7407 | switch (l) { |
| 7408 | #ifdef CONFIG_SCHED_SMT |
| 7409 | case SD_LV_SIBLING: /* set up CPU (sibling) groups */ |
| 7410 | cpumask_and(d->this_sibling_map, cpu_map, |
| 7411 | topology_thread_cpumask(cpu)); |
| 7412 | if (cpu == cpumask_first(d->this_sibling_map)) |
| 7413 | init_sched_build_groups(d->this_sibling_map, cpu_map, |
| 7414 | &cpu_to_cpu_group, |
| 7415 | d->send_covered, d->tmpmask); |
| 7416 | break; |
| 7417 | #endif |
Andreas Herrmann | a2af04c | 2009-08-18 12:58:38 +0200 | [diff] [blame] | 7418 | #ifdef CONFIG_SCHED_MC |
| 7419 | case SD_LV_MC: /* set up multi-core groups */ |
| 7420 | cpumask_and(d->this_core_map, cpu_map, cpu_coregroup_mask(cpu)); |
| 7421 | if (cpu == cpumask_first(d->this_core_map)) |
| 7422 | init_sched_build_groups(d->this_core_map, cpu_map, |
| 7423 | &cpu_to_core_group, |
| 7424 | d->send_covered, d->tmpmask); |
| 7425 | break; |
| 7426 | #endif |
Heiko Carstens | 01a0854 | 2010-08-31 10:28:16 +0200 | [diff] [blame] | 7427 | #ifdef CONFIG_SCHED_BOOK |
| 7428 | case SD_LV_BOOK: /* set up book groups */ |
| 7429 | cpumask_and(d->this_book_map, cpu_map, cpu_book_mask(cpu)); |
| 7430 | if (cpu == cpumask_first(d->this_book_map)) |
| 7431 | init_sched_build_groups(d->this_book_map, cpu_map, |
| 7432 | &cpu_to_book_group, |
| 7433 | d->send_covered, d->tmpmask); |
| 7434 | break; |
| 7435 | #endif |
Andreas Herrmann | 8654809 | 2009-08-18 12:59:28 +0200 | [diff] [blame] | 7436 | case SD_LV_CPU: /* set up physical groups */ |
| 7437 | cpumask_and(d->nodemask, cpumask_of_node(cpu), cpu_map); |
| 7438 | if (!cpumask_empty(d->nodemask)) |
| 7439 | init_sched_build_groups(d->nodemask, cpu_map, |
| 7440 | &cpu_to_phys_group, |
| 7441 | d->send_covered, d->tmpmask); |
| 7442 | break; |
Andreas Herrmann | de616e3 | 2009-08-18 13:00:13 +0200 | [diff] [blame] | 7443 | #ifdef CONFIG_NUMA |
| 7444 | case SD_LV_ALLNODES: |
| 7445 | init_sched_build_groups(cpu_map, cpu_map, &cpu_to_allnodes_group, |
| 7446 | d->send_covered, d->tmpmask); |
| 7447 | break; |
| 7448 | #endif |
Andreas Herrmann | 0e8e85c | 2009-08-18 12:57:51 +0200 | [diff] [blame] | 7449 | default: |
| 7450 | break; |
| 7451 | } |
| 7452 | } |
| 7453 | |
Mike Travis | 7c16ec5 | 2008-04-04 18:11:11 -0700 | [diff] [blame] | 7454 | /* |
Dinakar Guniguntala | 1a20ff2 | 2005-06-25 14:57:33 -0700 | [diff] [blame] | 7455 | * Build sched domains for a given set of cpus and attach the sched domains |
| 7456 | * to the individual cpus |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7457 | */ |
Rusty Russell | 96f874e2 | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 7458 | static int __build_sched_domains(const struct cpumask *cpu_map, |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 7459 | struct sched_domain_attr *attr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7460 | { |
Andreas Herrmann | 2109b99 | 2009-08-18 12:53:00 +0200 | [diff] [blame] | 7461 | enum s_alloc alloc_state = sa_none; |
Andreas Herrmann | 49a02c5 | 2009-08-18 12:51:52 +0200 | [diff] [blame] | 7462 | struct s_data d; |
Andreas Herrmann | 294b0c9 | 2009-08-18 13:02:29 +0200 | [diff] [blame] | 7463 | struct sched_domain *sd; |
Andreas Herrmann | 2109b99 | 2009-08-18 12:53:00 +0200 | [diff] [blame] | 7464 | int i; |
John Hawkes | d1b5513 | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 7465 | #ifdef CONFIG_NUMA |
Andreas Herrmann | 49a02c5 | 2009-08-18 12:51:52 +0200 | [diff] [blame] | 7466 | d.sd_allnodes = 0; |
Rusty Russell | 3404c8d | 2008-11-25 02:35:03 +1030 | [diff] [blame] | 7467 | #endif |
| 7468 | |
Andreas Herrmann | 2109b99 | 2009-08-18 12:53:00 +0200 | [diff] [blame] | 7469 | alloc_state = __visit_domain_allocation_hell(&d, cpu_map); |
| 7470 | if (alloc_state != sa_rootdomain) |
| 7471 | goto error; |
| 7472 | alloc_state = sa_sched_groups; |
Mike Travis | 7c16ec5 | 2008-04-04 18:11:11 -0700 | [diff] [blame] | 7473 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7474 | /* |
Dinakar Guniguntala | 1a20ff2 | 2005-06-25 14:57:33 -0700 | [diff] [blame] | 7475 | * Set up domains for cpus specified by the cpu_map. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7476 | */ |
Rusty Russell | abcd083 | 2008-11-25 02:35:02 +1030 | [diff] [blame] | 7477 | for_each_cpu(i, cpu_map) { |
Andreas Herrmann | 49a02c5 | 2009-08-18 12:51:52 +0200 | [diff] [blame] | 7478 | cpumask_and(d.nodemask, cpumask_of_node(cpu_to_node(i)), |
| 7479 | cpu_map); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7480 | |
Andreas Herrmann | 7f4588f | 2009-08-18 12:54:06 +0200 | [diff] [blame] | 7481 | sd = __build_numa_sched_domains(&d, cpu_map, attr, i); |
Andreas Herrmann | 87cce66 | 2009-08-18 12:54:55 +0200 | [diff] [blame] | 7482 | sd = __build_cpu_sched_domain(&d, cpu_map, attr, sd, i); |
Heiko Carstens | 01a0854 | 2010-08-31 10:28:16 +0200 | [diff] [blame] | 7483 | sd = __build_book_sched_domain(&d, cpu_map, attr, sd, i); |
Andreas Herrmann | 410c408 | 2009-08-18 12:56:14 +0200 | [diff] [blame] | 7484 | sd = __build_mc_sched_domain(&d, cpu_map, attr, sd, i); |
Andreas Herrmann | d817353 | 2009-08-18 12:57:03 +0200 | [diff] [blame] | 7485 | sd = __build_smt_sched_domain(&d, cpu_map, attr, sd, i); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7486 | } |
| 7487 | |
Rusty Russell | abcd083 | 2008-11-25 02:35:02 +1030 | [diff] [blame] | 7488 | for_each_cpu(i, cpu_map) { |
Andreas Herrmann | 0e8e85c | 2009-08-18 12:57:51 +0200 | [diff] [blame] | 7489 | build_sched_groups(&d, SD_LV_SIBLING, cpu_map, i); |
Heiko Carstens | 01a0854 | 2010-08-31 10:28:16 +0200 | [diff] [blame] | 7490 | build_sched_groups(&d, SD_LV_BOOK, cpu_map, i); |
Andreas Herrmann | a2af04c | 2009-08-18 12:58:38 +0200 | [diff] [blame] | 7491 | build_sched_groups(&d, SD_LV_MC, cpu_map, i); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7492 | } |
Siddha, Suresh B | 1e9f28f | 2006-03-27 01:15:22 -0800 | [diff] [blame] | 7493 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7494 | /* Set up physical groups */ |
Andreas Herrmann | 8654809 | 2009-08-18 12:59:28 +0200 | [diff] [blame] | 7495 | for (i = 0; i < nr_node_ids; i++) |
| 7496 | build_sched_groups(&d, SD_LV_CPU, cpu_map, i); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7497 | |
| 7498 | #ifdef CONFIG_NUMA |
| 7499 | /* Set up node groups */ |
Andreas Herrmann | de616e3 | 2009-08-18 13:00:13 +0200 | [diff] [blame] | 7500 | if (d.sd_allnodes) |
| 7501 | build_sched_groups(&d, SD_LV_ALLNODES, cpu_map, 0); |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 7502 | |
Andreas Herrmann | 0601a88 | 2009-08-18 13:01:11 +0200 | [diff] [blame] | 7503 | for (i = 0; i < nr_node_ids; i++) |
| 7504 | if (build_numa_sched_groups(&d, cpu_map, i)) |
Srivatsa Vaddagiri | 51888ca | 2006-06-27 02:54:38 -0700 | [diff] [blame] | 7505 | goto error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7506 | #endif |
| 7507 | |
| 7508 | /* Calculate CPU power for physical packages and nodes */ |
Siddha, Suresh B | 5c45bf2 | 2006-06-27 02:54:42 -0700 | [diff] [blame] | 7509 | #ifdef CONFIG_SCHED_SMT |
Rusty Russell | abcd083 | 2008-11-25 02:35:02 +1030 | [diff] [blame] | 7510 | for_each_cpu(i, cpu_map) { |
Andreas Herrmann | 294b0c9 | 2009-08-18 13:02:29 +0200 | [diff] [blame] | 7511 | sd = &per_cpu(cpu_domains, i).sd; |
Siddha, Suresh B | 89c4710 | 2006-10-03 01:14:09 -0700 | [diff] [blame] | 7512 | init_sched_groups_power(i, sd); |
Siddha, Suresh B | 5c45bf2 | 2006-06-27 02:54:42 -0700 | [diff] [blame] | 7513 | } |
| 7514 | #endif |
| 7515 | #ifdef CONFIG_SCHED_MC |
Rusty Russell | abcd083 | 2008-11-25 02:35:02 +1030 | [diff] [blame] | 7516 | for_each_cpu(i, cpu_map) { |
Andreas Herrmann | 294b0c9 | 2009-08-18 13:02:29 +0200 | [diff] [blame] | 7517 | sd = &per_cpu(core_domains, i).sd; |
Siddha, Suresh B | 89c4710 | 2006-10-03 01:14:09 -0700 | [diff] [blame] | 7518 | init_sched_groups_power(i, sd); |
Siddha, Suresh B | 5c45bf2 | 2006-06-27 02:54:42 -0700 | [diff] [blame] | 7519 | } |
| 7520 | #endif |
Heiko Carstens | 01a0854 | 2010-08-31 10:28:16 +0200 | [diff] [blame] | 7521 | #ifdef CONFIG_SCHED_BOOK |
| 7522 | for_each_cpu(i, cpu_map) { |
| 7523 | sd = &per_cpu(book_domains, i).sd; |
| 7524 | init_sched_groups_power(i, sd); |
| 7525 | } |
| 7526 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7527 | |
Rusty Russell | abcd083 | 2008-11-25 02:35:02 +1030 | [diff] [blame] | 7528 | for_each_cpu(i, cpu_map) { |
Andreas Herrmann | 294b0c9 | 2009-08-18 13:02:29 +0200 | [diff] [blame] | 7529 | sd = &per_cpu(phys_domains, i).sd; |
Siddha, Suresh B | 89c4710 | 2006-10-03 01:14:09 -0700 | [diff] [blame] | 7530 | init_sched_groups_power(i, sd); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7531 | } |
| 7532 | |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 7533 | #ifdef CONFIG_NUMA |
Mike Travis | 076ac2a | 2008-05-12 21:21:12 +0200 | [diff] [blame] | 7534 | for (i = 0; i < nr_node_ids; i++) |
Andreas Herrmann | 49a02c5 | 2009-08-18 12:51:52 +0200 | [diff] [blame] | 7535 | init_numa_sched_groups_power(d.sched_group_nodes[i]); |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 7536 | |
Andreas Herrmann | 49a02c5 | 2009-08-18 12:51:52 +0200 | [diff] [blame] | 7537 | if (d.sd_allnodes) { |
Siddha, Suresh B | 6711cab | 2006-12-10 02:20:07 -0800 | [diff] [blame] | 7538 | struct sched_group *sg; |
Siddha, Suresh B | f712c0c7 | 2006-07-30 03:02:59 -0700 | [diff] [blame] | 7539 | |
Rusty Russell | 96f874e2 | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 7540 | cpu_to_allnodes_group(cpumask_first(cpu_map), cpu_map, &sg, |
Andreas Herrmann | 49a02c5 | 2009-08-18 12:51:52 +0200 | [diff] [blame] | 7541 | d.tmpmask); |
Siddha, Suresh B | f712c0c7 | 2006-07-30 03:02:59 -0700 | [diff] [blame] | 7542 | init_numa_sched_groups_power(sg); |
| 7543 | } |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 7544 | #endif |
| 7545 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7546 | /* Attach the domains */ |
Rusty Russell | abcd083 | 2008-11-25 02:35:02 +1030 | [diff] [blame] | 7547 | for_each_cpu(i, cpu_map) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7548 | #ifdef CONFIG_SCHED_SMT |
Rusty Russell | 6c99e9a | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 7549 | sd = &per_cpu(cpu_domains, i).sd; |
Siddha, Suresh B | 1e9f28f | 2006-03-27 01:15:22 -0800 | [diff] [blame] | 7550 | #elif defined(CONFIG_SCHED_MC) |
Rusty Russell | 6c99e9a | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 7551 | sd = &per_cpu(core_domains, i).sd; |
Heiko Carstens | 01a0854 | 2010-08-31 10:28:16 +0200 | [diff] [blame] | 7552 | #elif defined(CONFIG_SCHED_BOOK) |
| 7553 | sd = &per_cpu(book_domains, i).sd; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7554 | #else |
Rusty Russell | 6c99e9a | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 7555 | sd = &per_cpu(phys_domains, i).sd; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7556 | #endif |
Andreas Herrmann | 49a02c5 | 2009-08-18 12:51:52 +0200 | [diff] [blame] | 7557 | cpu_attach_domain(sd, d.rd, i); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7558 | } |
Srivatsa Vaddagiri | 51888ca | 2006-06-27 02:54:38 -0700 | [diff] [blame] | 7559 | |
Andreas Herrmann | 2109b99 | 2009-08-18 12:53:00 +0200 | [diff] [blame] | 7560 | d.sched_group_nodes = NULL; /* don't free this we still need it */ |
| 7561 | __free_domain_allocs(&d, sa_tmpmask, cpu_map); |
| 7562 | return 0; |
Rusty Russell | 3404c8d | 2008-11-25 02:35:03 +1030 | [diff] [blame] | 7563 | |
Srivatsa Vaddagiri | 51888ca | 2006-06-27 02:54:38 -0700 | [diff] [blame] | 7564 | error: |
Andreas Herrmann | 2109b99 | 2009-08-18 12:53:00 +0200 | [diff] [blame] | 7565 | __free_domain_allocs(&d, alloc_state, cpu_map); |
| 7566 | return -ENOMEM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7567 | } |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7568 | |
Rusty Russell | 96f874e2 | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 7569 | static int build_sched_domains(const struct cpumask *cpu_map) |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 7570 | { |
| 7571 | return __build_sched_domains(cpu_map, NULL); |
| 7572 | } |
| 7573 | |
Rusty Russell | acc3f5d | 2009-11-03 14:53:40 +1030 | [diff] [blame] | 7574 | static cpumask_var_t *doms_cur; /* current sched domains */ |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7575 | static int ndoms_cur; /* number of sched domains in 'doms_cur' */ |
Ingo Molnar | 4285f594 | 2008-05-16 17:47:14 +0200 | [diff] [blame] | 7576 | static struct sched_domain_attr *dattr_cur; |
| 7577 | /* attribues of custom domains in 'doms_cur' */ |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7578 | |
| 7579 | /* |
| 7580 | * Special case: If a kmalloc of a doms_cur partition (array of |
Rusty Russell | 4212823 | 2008-11-25 02:35:12 +1030 | [diff] [blame] | 7581 | * cpumask) fails, then fallback to a single sched domain, |
| 7582 | * as determined by the single cpumask fallback_doms. |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7583 | */ |
Rusty Russell | 4212823 | 2008-11-25 02:35:12 +1030 | [diff] [blame] | 7584 | static cpumask_var_t fallback_doms; |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7585 | |
Heiko Carstens | ee79d1b | 2008-12-09 18:49:50 +0100 | [diff] [blame] | 7586 | /* |
| 7587 | * arch_update_cpu_topology lets virtualized architectures update the |
| 7588 | * cpu core maps. It is supposed to return 1 if the topology changed |
| 7589 | * or 0 if it stayed the same. |
| 7590 | */ |
| 7591 | int __attribute__((weak)) arch_update_cpu_topology(void) |
Heiko Carstens | 22e52b0 | 2008-03-12 18:31:59 +0100 | [diff] [blame] | 7592 | { |
Heiko Carstens | ee79d1b | 2008-12-09 18:49:50 +0100 | [diff] [blame] | 7593 | return 0; |
Heiko Carstens | 22e52b0 | 2008-03-12 18:31:59 +0100 | [diff] [blame] | 7594 | } |
| 7595 | |
Rusty Russell | acc3f5d | 2009-11-03 14:53:40 +1030 | [diff] [blame] | 7596 | cpumask_var_t *alloc_sched_domains(unsigned int ndoms) |
| 7597 | { |
| 7598 | int i; |
| 7599 | cpumask_var_t *doms; |
| 7600 | |
| 7601 | doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL); |
| 7602 | if (!doms) |
| 7603 | return NULL; |
| 7604 | for (i = 0; i < ndoms; i++) { |
| 7605 | if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) { |
| 7606 | free_sched_domains(doms, i); |
| 7607 | return NULL; |
| 7608 | } |
| 7609 | } |
| 7610 | return doms; |
| 7611 | } |
| 7612 | |
| 7613 | void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms) |
| 7614 | { |
| 7615 | unsigned int i; |
| 7616 | for (i = 0; i < ndoms; i++) |
| 7617 | free_cpumask_var(doms[i]); |
| 7618 | kfree(doms); |
| 7619 | } |
| 7620 | |
Dinakar Guniguntala | 1a20ff2 | 2005-06-25 14:57:33 -0700 | [diff] [blame] | 7621 | /* |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 7622 | * Set up scheduler domains and groups. Callers must hold the hotplug lock. |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7623 | * For now this just excludes isolated cpus, but could be used to |
| 7624 | * exclude other special cases in the future. |
Dinakar Guniguntala | 1a20ff2 | 2005-06-25 14:57:33 -0700 | [diff] [blame] | 7625 | */ |
Rusty Russell | 96f874e2 | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 7626 | static int arch_init_sched_domains(const struct cpumask *cpu_map) |
Dinakar Guniguntala | 1a20ff2 | 2005-06-25 14:57:33 -0700 | [diff] [blame] | 7627 | { |
Milton Miller | 7378547 | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 7628 | int err; |
| 7629 | |
Heiko Carstens | 22e52b0 | 2008-03-12 18:31:59 +0100 | [diff] [blame] | 7630 | arch_update_cpu_topology(); |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7631 | ndoms_cur = 1; |
Rusty Russell | acc3f5d | 2009-11-03 14:53:40 +1030 | [diff] [blame] | 7632 | doms_cur = alloc_sched_domains(ndoms_cur); |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7633 | if (!doms_cur) |
Rusty Russell | acc3f5d | 2009-11-03 14:53:40 +1030 | [diff] [blame] | 7634 | doms_cur = &fallback_doms; |
| 7635 | cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map); |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 7636 | dattr_cur = NULL; |
Rusty Russell | acc3f5d | 2009-11-03 14:53:40 +1030 | [diff] [blame] | 7637 | err = build_sched_domains(doms_cur[0]); |
Milton Miller | 6382bc9 | 2007-10-15 17:00:19 +0200 | [diff] [blame] | 7638 | register_sched_domain_sysctl(); |
Milton Miller | 7378547 | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 7639 | |
| 7640 | return err; |
Dinakar Guniguntala | 1a20ff2 | 2005-06-25 14:57:33 -0700 | [diff] [blame] | 7641 | } |
| 7642 | |
Rusty Russell | 96f874e2 | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 7643 | static void arch_destroy_sched_domains(const struct cpumask *cpu_map, |
| 7644 | struct cpumask *tmpmask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7645 | { |
Mike Travis | 7c16ec5 | 2008-04-04 18:11:11 -0700 | [diff] [blame] | 7646 | free_sched_groups(cpu_map, tmpmask); |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 7647 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7648 | |
Dinakar Guniguntala | 1a20ff2 | 2005-06-25 14:57:33 -0700 | [diff] [blame] | 7649 | /* |
| 7650 | * Detach sched domains from a group of cpus specified in cpu_map |
| 7651 | * These cpus will now be attached to the NULL domain |
| 7652 | */ |
Rusty Russell | 96f874e2 | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 7653 | static void detach_destroy_domains(const struct cpumask *cpu_map) |
Dinakar Guniguntala | 1a20ff2 | 2005-06-25 14:57:33 -0700 | [diff] [blame] | 7654 | { |
Rusty Russell | 96f874e2 | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 7655 | /* Save because hotplug lock held. */ |
| 7656 | static DECLARE_BITMAP(tmpmask, CONFIG_NR_CPUS); |
Dinakar Guniguntala | 1a20ff2 | 2005-06-25 14:57:33 -0700 | [diff] [blame] | 7657 | int i; |
| 7658 | |
Rusty Russell | abcd083 | 2008-11-25 02:35:02 +1030 | [diff] [blame] | 7659 | for_each_cpu(i, cpu_map) |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 7660 | cpu_attach_domain(NULL, &def_root_domain, i); |
Dinakar Guniguntala | 1a20ff2 | 2005-06-25 14:57:33 -0700 | [diff] [blame] | 7661 | synchronize_sched(); |
Rusty Russell | 96f874e2 | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 7662 | arch_destroy_sched_domains(cpu_map, to_cpumask(tmpmask)); |
Dinakar Guniguntala | 1a20ff2 | 2005-06-25 14:57:33 -0700 | [diff] [blame] | 7663 | } |
| 7664 | |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 7665 | /* handle null as "default" */ |
| 7666 | static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur, |
| 7667 | struct sched_domain_attr *new, int idx_new) |
| 7668 | { |
| 7669 | struct sched_domain_attr tmp; |
| 7670 | |
| 7671 | /* fast path */ |
| 7672 | if (!new && !cur) |
| 7673 | return 1; |
| 7674 | |
| 7675 | tmp = SD_ATTR_INIT; |
| 7676 | return !memcmp(cur ? (cur + idx_cur) : &tmp, |
| 7677 | new ? (new + idx_new) : &tmp, |
| 7678 | sizeof(struct sched_domain_attr)); |
| 7679 | } |
| 7680 | |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7681 | /* |
| 7682 | * Partition sched domains as specified by the 'ndoms_new' |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 7683 | * cpumasks in the array doms_new[] of cpumasks. This compares |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7684 | * doms_new[] to the current sched domain partitioning, doms_cur[]. |
| 7685 | * It destroys each deleted domain and builds each new domain. |
| 7686 | * |
Rusty Russell | acc3f5d | 2009-11-03 14:53:40 +1030 | [diff] [blame] | 7687 | * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'. |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 7688 | * The masks don't intersect (don't overlap.) We should setup one |
| 7689 | * sched domain for each mask. CPUs not in any of the cpumasks will |
| 7690 | * not be load balanced. If the same cpumask appears both in the |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7691 | * current 'doms_cur' domains and in the new 'doms_new', we can leave |
| 7692 | * it as it is. |
| 7693 | * |
Rusty Russell | acc3f5d | 2009-11-03 14:53:40 +1030 | [diff] [blame] | 7694 | * The passed in 'doms_new' should be allocated using |
| 7695 | * alloc_sched_domains. This routine takes ownership of it and will |
| 7696 | * free_sched_domains it when done with it. If the caller failed the |
| 7697 | * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1, |
| 7698 | * and partition_sched_domains() will fallback to the single partition |
| 7699 | * 'fallback_doms', it also forces the domains to be rebuilt. |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7700 | * |
Rusty Russell | 96f874e2 | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 7701 | * If doms_new == NULL it will be replaced with cpu_online_mask. |
Li Zefan | 700018e | 2008-11-18 14:02:03 +0800 | [diff] [blame] | 7702 | * ndoms_new == 0 is a special case for destroying existing domains, |
| 7703 | * and it will not create the default domain. |
Max Krasnyansky | dfb512e | 2008-08-29 13:11:41 -0700 | [diff] [blame] | 7704 | * |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7705 | * Call with hotplug lock held |
| 7706 | */ |
Rusty Russell | acc3f5d | 2009-11-03 14:53:40 +1030 | [diff] [blame] | 7707 | void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 7708 | struct sched_domain_attr *dattr_new) |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7709 | { |
Max Krasnyansky | dfb512e | 2008-08-29 13:11:41 -0700 | [diff] [blame] | 7710 | int i, j, n; |
Heiko Carstens | d65bd5e | 2008-12-09 18:49:51 +0100 | [diff] [blame] | 7711 | int new_topology; |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7712 | |
Heiko Carstens | 712555e | 2008-04-28 11:33:07 +0200 | [diff] [blame] | 7713 | mutex_lock(&sched_domains_mutex); |
Srivatsa Vaddagiri | a183561 | 2008-01-25 21:08:00 +0100 | [diff] [blame] | 7714 | |
Milton Miller | 7378547 | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 7715 | /* always unregister in case we don't destroy any domains */ |
| 7716 | unregister_sched_domain_sysctl(); |
| 7717 | |
Heiko Carstens | d65bd5e | 2008-12-09 18:49:51 +0100 | [diff] [blame] | 7718 | /* Let architecture update cpu core mappings. */ |
| 7719 | new_topology = arch_update_cpu_topology(); |
| 7720 | |
Max Krasnyansky | dfb512e | 2008-08-29 13:11:41 -0700 | [diff] [blame] | 7721 | n = doms_new ? ndoms_new : 0; |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7722 | |
| 7723 | /* Destroy deleted domains */ |
| 7724 | for (i = 0; i < ndoms_cur; i++) { |
Heiko Carstens | d65bd5e | 2008-12-09 18:49:51 +0100 | [diff] [blame] | 7725 | for (j = 0; j < n && !new_topology; j++) { |
Rusty Russell | acc3f5d | 2009-11-03 14:53:40 +1030 | [diff] [blame] | 7726 | if (cpumask_equal(doms_cur[i], doms_new[j]) |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 7727 | && dattrs_equal(dattr_cur, i, dattr_new, j)) |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7728 | goto match1; |
| 7729 | } |
| 7730 | /* no match - a current sched domain not in new doms_new[] */ |
Rusty Russell | acc3f5d | 2009-11-03 14:53:40 +1030 | [diff] [blame] | 7731 | detach_destroy_domains(doms_cur[i]); |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7732 | match1: |
| 7733 | ; |
| 7734 | } |
| 7735 | |
Max Krasnyansky | e761b77 | 2008-07-15 04:43:49 -0700 | [diff] [blame] | 7736 | if (doms_new == NULL) { |
| 7737 | ndoms_cur = 0; |
Rusty Russell | acc3f5d | 2009-11-03 14:53:40 +1030 | [diff] [blame] | 7738 | doms_new = &fallback_doms; |
Peter Zijlstra | 6ad4c18 | 2009-11-25 13:31:39 +0100 | [diff] [blame] | 7739 | cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map); |
Li Zefan | faa2f98 | 2008-11-04 16:20:23 +0800 | [diff] [blame] | 7740 | WARN_ON_ONCE(dattr_new); |
Max Krasnyansky | e761b77 | 2008-07-15 04:43:49 -0700 | [diff] [blame] | 7741 | } |
| 7742 | |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7743 | /* Build new domains */ |
| 7744 | for (i = 0; i < ndoms_new; i++) { |
Heiko Carstens | d65bd5e | 2008-12-09 18:49:51 +0100 | [diff] [blame] | 7745 | for (j = 0; j < ndoms_cur && !new_topology; j++) { |
Rusty Russell | acc3f5d | 2009-11-03 14:53:40 +1030 | [diff] [blame] | 7746 | if (cpumask_equal(doms_new[i], doms_cur[j]) |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 7747 | && dattrs_equal(dattr_new, i, dattr_cur, j)) |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7748 | goto match2; |
| 7749 | } |
| 7750 | /* no match - add a new doms_new */ |
Rusty Russell | acc3f5d | 2009-11-03 14:53:40 +1030 | [diff] [blame] | 7751 | __build_sched_domains(doms_new[i], |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 7752 | dattr_new ? dattr_new + i : NULL); |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7753 | match2: |
| 7754 | ; |
| 7755 | } |
| 7756 | |
| 7757 | /* Remember the new sched domains */ |
Rusty Russell | acc3f5d | 2009-11-03 14:53:40 +1030 | [diff] [blame] | 7758 | if (doms_cur != &fallback_doms) |
| 7759 | free_sched_domains(doms_cur, ndoms_cur); |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 7760 | kfree(dattr_cur); /* kfree(NULL) is safe */ |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7761 | doms_cur = doms_new; |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 7762 | dattr_cur = dattr_new; |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7763 | ndoms_cur = ndoms_new; |
Milton Miller | 7378547 | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 7764 | |
| 7765 | register_sched_domain_sysctl(); |
Srivatsa Vaddagiri | a183561 | 2008-01-25 21:08:00 +0100 | [diff] [blame] | 7766 | |
Heiko Carstens | 712555e | 2008-04-28 11:33:07 +0200 | [diff] [blame] | 7767 | mutex_unlock(&sched_domains_mutex); |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7768 | } |
| 7769 | |
Siddha, Suresh B | 5c45bf2 | 2006-06-27 02:54:42 -0700 | [diff] [blame] | 7770 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) |
Li Zefan | c70f22d | 2009-01-05 19:07:50 +0800 | [diff] [blame] | 7771 | static void arch_reinit_sched_domains(void) |
Siddha, Suresh B | 5c45bf2 | 2006-06-27 02:54:42 -0700 | [diff] [blame] | 7772 | { |
Gautham R Shenoy | 95402b3 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 7773 | get_online_cpus(); |
Max Krasnyansky | dfb512e | 2008-08-29 13:11:41 -0700 | [diff] [blame] | 7774 | |
| 7775 | /* Destroy domains first to force the rebuild */ |
| 7776 | partition_sched_domains(0, NULL, NULL); |
| 7777 | |
Max Krasnyansky | e761b77 | 2008-07-15 04:43:49 -0700 | [diff] [blame] | 7778 | rebuild_sched_domains(); |
Gautham R Shenoy | 95402b3 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 7779 | put_online_cpus(); |
Siddha, Suresh B | 5c45bf2 | 2006-06-27 02:54:42 -0700 | [diff] [blame] | 7780 | } |
| 7781 | |
| 7782 | static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt) |
| 7783 | { |
Gautham R Shenoy | afb8a9b | 2008-12-18 23:26:09 +0530 | [diff] [blame] | 7784 | unsigned int level = 0; |
Siddha, Suresh B | 5c45bf2 | 2006-06-27 02:54:42 -0700 | [diff] [blame] | 7785 | |
Gautham R Shenoy | afb8a9b | 2008-12-18 23:26:09 +0530 | [diff] [blame] | 7786 | if (sscanf(buf, "%u", &level) != 1) |
| 7787 | return -EINVAL; |
| 7788 | |
| 7789 | /* |
| 7790 | * level is always be positive so don't check for |
| 7791 | * level < POWERSAVINGS_BALANCE_NONE which is 0 |
| 7792 | * What happens on 0 or 1 byte write, |
| 7793 | * need to check for count as well? |
| 7794 | */ |
| 7795 | |
| 7796 | if (level >= MAX_POWERSAVINGS_BALANCE_LEVELS) |
Siddha, Suresh B | 5c45bf2 | 2006-06-27 02:54:42 -0700 | [diff] [blame] | 7797 | return -EINVAL; |
| 7798 | |
| 7799 | if (smt) |
Gautham R Shenoy | afb8a9b | 2008-12-18 23:26:09 +0530 | [diff] [blame] | 7800 | sched_smt_power_savings = level; |
Siddha, Suresh B | 5c45bf2 | 2006-06-27 02:54:42 -0700 | [diff] [blame] | 7801 | else |
Gautham R Shenoy | afb8a9b | 2008-12-18 23:26:09 +0530 | [diff] [blame] | 7802 | sched_mc_power_savings = level; |
Siddha, Suresh B | 5c45bf2 | 2006-06-27 02:54:42 -0700 | [diff] [blame] | 7803 | |
Li Zefan | c70f22d | 2009-01-05 19:07:50 +0800 | [diff] [blame] | 7804 | arch_reinit_sched_domains(); |
Siddha, Suresh B | 5c45bf2 | 2006-06-27 02:54:42 -0700 | [diff] [blame] | 7805 | |
Li Zefan | c70f22d | 2009-01-05 19:07:50 +0800 | [diff] [blame] | 7806 | return count; |
Siddha, Suresh B | 5c45bf2 | 2006-06-27 02:54:42 -0700 | [diff] [blame] | 7807 | } |
| 7808 | |
Adrian Bunk | 6707de00 | 2007-08-12 18:08:19 +0200 | [diff] [blame] | 7809 | #ifdef CONFIG_SCHED_MC |
Andi Kleen | f718cd4 | 2008-07-29 22:33:52 -0700 | [diff] [blame] | 7810 | static ssize_t sched_mc_power_savings_show(struct sysdev_class *class, |
Andi Kleen | c9be0a3 | 2010-01-05 12:47:58 +0100 | [diff] [blame] | 7811 | struct sysdev_class_attribute *attr, |
Andi Kleen | f718cd4 | 2008-07-29 22:33:52 -0700 | [diff] [blame] | 7812 | char *page) |
Adrian Bunk | 6707de00 | 2007-08-12 18:08:19 +0200 | [diff] [blame] | 7813 | { |
| 7814 | return sprintf(page, "%u\n", sched_mc_power_savings); |
| 7815 | } |
Andi Kleen | f718cd4 | 2008-07-29 22:33:52 -0700 | [diff] [blame] | 7816 | static ssize_t sched_mc_power_savings_store(struct sysdev_class *class, |
Andi Kleen | c9be0a3 | 2010-01-05 12:47:58 +0100 | [diff] [blame] | 7817 | struct sysdev_class_attribute *attr, |
Adrian Bunk | 6707de00 | 2007-08-12 18:08:19 +0200 | [diff] [blame] | 7818 | const char *buf, size_t count) |
| 7819 | { |
| 7820 | return sched_power_savings_store(buf, count, 0); |
| 7821 | } |
Andi Kleen | f718cd4 | 2008-07-29 22:33:52 -0700 | [diff] [blame] | 7822 | static SYSDEV_CLASS_ATTR(sched_mc_power_savings, 0644, |
| 7823 | sched_mc_power_savings_show, |
| 7824 | sched_mc_power_savings_store); |
Adrian Bunk | 6707de00 | 2007-08-12 18:08:19 +0200 | [diff] [blame] | 7825 | #endif |
| 7826 | |
| 7827 | #ifdef CONFIG_SCHED_SMT |
Andi Kleen | f718cd4 | 2008-07-29 22:33:52 -0700 | [diff] [blame] | 7828 | static ssize_t sched_smt_power_savings_show(struct sysdev_class *dev, |
Andi Kleen | c9be0a3 | 2010-01-05 12:47:58 +0100 | [diff] [blame] | 7829 | struct sysdev_class_attribute *attr, |
Andi Kleen | f718cd4 | 2008-07-29 22:33:52 -0700 | [diff] [blame] | 7830 | char *page) |
Adrian Bunk | 6707de00 | 2007-08-12 18:08:19 +0200 | [diff] [blame] | 7831 | { |
| 7832 | return sprintf(page, "%u\n", sched_smt_power_savings); |
| 7833 | } |
Andi Kleen | f718cd4 | 2008-07-29 22:33:52 -0700 | [diff] [blame] | 7834 | static ssize_t sched_smt_power_savings_store(struct sysdev_class *dev, |
Andi Kleen | c9be0a3 | 2010-01-05 12:47:58 +0100 | [diff] [blame] | 7835 | struct sysdev_class_attribute *attr, |
Adrian Bunk | 6707de00 | 2007-08-12 18:08:19 +0200 | [diff] [blame] | 7836 | const char *buf, size_t count) |
| 7837 | { |
| 7838 | return sched_power_savings_store(buf, count, 1); |
| 7839 | } |
Andi Kleen | f718cd4 | 2008-07-29 22:33:52 -0700 | [diff] [blame] | 7840 | static SYSDEV_CLASS_ATTR(sched_smt_power_savings, 0644, |
| 7841 | sched_smt_power_savings_show, |
Adrian Bunk | 6707de00 | 2007-08-12 18:08:19 +0200 | [diff] [blame] | 7842 | sched_smt_power_savings_store); |
| 7843 | #endif |
| 7844 | |
Li Zefan | 39aac64 | 2009-01-05 19:18:02 +0800 | [diff] [blame] | 7845 | int __init sched_create_sysfs_power_savings_entries(struct sysdev_class *cls) |
Siddha, Suresh B | 5c45bf2 | 2006-06-27 02:54:42 -0700 | [diff] [blame] | 7846 | { |
| 7847 | int err = 0; |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 7848 | |
Siddha, Suresh B | 5c45bf2 | 2006-06-27 02:54:42 -0700 | [diff] [blame] | 7849 | #ifdef CONFIG_SCHED_SMT |
| 7850 | if (smt_capable()) |
| 7851 | err = sysfs_create_file(&cls->kset.kobj, |
| 7852 | &attr_sched_smt_power_savings.attr); |
| 7853 | #endif |
| 7854 | #ifdef CONFIG_SCHED_MC |
| 7855 | if (!err && mc_capable()) |
| 7856 | err = sysfs_create_file(&cls->kset.kobj, |
| 7857 | &attr_sched_mc_power_savings.attr); |
| 7858 | #endif |
| 7859 | return err; |
| 7860 | } |
Dhaval Giani | 6d6bc0a | 2008-05-30 14:23:45 +0200 | [diff] [blame] | 7861 | #endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */ |
Siddha, Suresh B | 5c45bf2 | 2006-06-27 02:54:42 -0700 | [diff] [blame] | 7862 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7863 | /* |
Tejun Heo | 3a101d0 | 2010-06-08 21:40:36 +0200 | [diff] [blame] | 7864 | * Update cpusets according to cpu_active mask. If cpusets are |
| 7865 | * disabled, cpuset_update_active_cpus() becomes a simple wrapper |
| 7866 | * around partition_sched_domains(). |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7867 | */ |
Tejun Heo | 0b2e918 | 2010-06-21 23:53:31 +0200 | [diff] [blame] | 7868 | static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action, |
| 7869 | void *hcpu) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7870 | { |
Tejun Heo | 3a101d0 | 2010-06-08 21:40:36 +0200 | [diff] [blame] | 7871 | switch (action & ~CPU_TASKS_FROZEN) { |
Max Krasnyansky | e761b77 | 2008-07-15 04:43:49 -0700 | [diff] [blame] | 7872 | case CPU_ONLINE: |
Peter Zijlstra | 6ad4c18 | 2009-11-25 13:31:39 +0100 | [diff] [blame] | 7873 | case CPU_DOWN_FAILED: |
Tejun Heo | 3a101d0 | 2010-06-08 21:40:36 +0200 | [diff] [blame] | 7874 | cpuset_update_active_cpus(); |
Max Krasnyansky | e761b77 | 2008-07-15 04:43:49 -0700 | [diff] [blame] | 7875 | return NOTIFY_OK; |
Max Krasnyansky | e761b77 | 2008-07-15 04:43:49 -0700 | [diff] [blame] | 7876 | default: |
| 7877 | return NOTIFY_DONE; |
| 7878 | } |
| 7879 | } |
Tejun Heo | 3a101d0 | 2010-06-08 21:40:36 +0200 | [diff] [blame] | 7880 | |
Tejun Heo | 0b2e918 | 2010-06-21 23:53:31 +0200 | [diff] [blame] | 7881 | static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action, |
| 7882 | void *hcpu) |
Tejun Heo | 3a101d0 | 2010-06-08 21:40:36 +0200 | [diff] [blame] | 7883 | { |
| 7884 | switch (action & ~CPU_TASKS_FROZEN) { |
| 7885 | case CPU_DOWN_PREPARE: |
| 7886 | cpuset_update_active_cpus(); |
| 7887 | return NOTIFY_OK; |
| 7888 | default: |
| 7889 | return NOTIFY_DONE; |
| 7890 | } |
| 7891 | } |
Max Krasnyansky | e761b77 | 2008-07-15 04:43:49 -0700 | [diff] [blame] | 7892 | |
| 7893 | static int update_runtime(struct notifier_block *nfb, |
| 7894 | unsigned long action, void *hcpu) |
| 7895 | { |
Peter Zijlstra | 7def2be | 2008-06-05 14:49:58 +0200 | [diff] [blame] | 7896 | int cpu = (int)(long)hcpu; |
| 7897 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7898 | switch (action) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7899 | case CPU_DOWN_PREPARE: |
Rafael J. Wysocki | 8bb7844 | 2007-05-09 02:35:10 -0700 | [diff] [blame] | 7900 | case CPU_DOWN_PREPARE_FROZEN: |
Peter Zijlstra | 7def2be | 2008-06-05 14:49:58 +0200 | [diff] [blame] | 7901 | disable_runtime(cpu_rq(cpu)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7902 | return NOTIFY_OK; |
| 7903 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7904 | case CPU_DOWN_FAILED: |
Rafael J. Wysocki | 8bb7844 | 2007-05-09 02:35:10 -0700 | [diff] [blame] | 7905 | case CPU_DOWN_FAILED_FROZEN: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7906 | case CPU_ONLINE: |
Rafael J. Wysocki | 8bb7844 | 2007-05-09 02:35:10 -0700 | [diff] [blame] | 7907 | case CPU_ONLINE_FROZEN: |
Peter Zijlstra | 7def2be | 2008-06-05 14:49:58 +0200 | [diff] [blame] | 7908 | enable_runtime(cpu_rq(cpu)); |
Max Krasnyansky | e761b77 | 2008-07-15 04:43:49 -0700 | [diff] [blame] | 7909 | return NOTIFY_OK; |
| 7910 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7911 | default: |
| 7912 | return NOTIFY_DONE; |
| 7913 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7914 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7915 | |
| 7916 | void __init sched_init_smp(void) |
| 7917 | { |
Rusty Russell | dcc30a3 | 2008-11-25 02:35:12 +1030 | [diff] [blame] | 7918 | cpumask_var_t non_isolated_cpus; |
| 7919 | |
| 7920 | alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL); |
Yong Zhang | cb5fd13 | 2009-09-14 20:20:16 +0800 | [diff] [blame] | 7921 | alloc_cpumask_var(&fallback_doms, GFP_KERNEL); |
Nick Piggin | 5c1e176 | 2006-10-03 01:14:04 -0700 | [diff] [blame] | 7922 | |
Mike Travis | 434d53b | 2008-04-04 18:11:04 -0700 | [diff] [blame] | 7923 | #if defined(CONFIG_NUMA) |
| 7924 | sched_group_nodes_bycpu = kzalloc(nr_cpu_ids * sizeof(void **), |
| 7925 | GFP_KERNEL); |
| 7926 | BUG_ON(sched_group_nodes_bycpu == NULL); |
| 7927 | #endif |
Gautham R Shenoy | 95402b3 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 7928 | get_online_cpus(); |
Heiko Carstens | 712555e | 2008-04-28 11:33:07 +0200 | [diff] [blame] | 7929 | mutex_lock(&sched_domains_mutex); |
Peter Zijlstra | 6ad4c18 | 2009-11-25 13:31:39 +0100 | [diff] [blame] | 7930 | arch_init_sched_domains(cpu_active_mask); |
Rusty Russell | dcc30a3 | 2008-11-25 02:35:12 +1030 | [diff] [blame] | 7931 | cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map); |
| 7932 | if (cpumask_empty(non_isolated_cpus)) |
| 7933 | cpumask_set_cpu(smp_processor_id(), non_isolated_cpus); |
Heiko Carstens | 712555e | 2008-04-28 11:33:07 +0200 | [diff] [blame] | 7934 | mutex_unlock(&sched_domains_mutex); |
Gautham R Shenoy | 95402b3 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 7935 | put_online_cpus(); |
Max Krasnyansky | e761b77 | 2008-07-15 04:43:49 -0700 | [diff] [blame] | 7936 | |
Tejun Heo | 3a101d0 | 2010-06-08 21:40:36 +0200 | [diff] [blame] | 7937 | hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE); |
| 7938 | hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE); |
Max Krasnyansky | e761b77 | 2008-07-15 04:43:49 -0700 | [diff] [blame] | 7939 | |
| 7940 | /* RT runtime code needs to handle some hotplug events */ |
| 7941 | hotcpu_notifier(update_runtime, 0); |
| 7942 | |
Peter Zijlstra | b328ca1 | 2008-04-29 10:02:46 +0200 | [diff] [blame] | 7943 | init_hrtick(); |
Nick Piggin | 5c1e176 | 2006-10-03 01:14:04 -0700 | [diff] [blame] | 7944 | |
| 7945 | /* Move init over to a non-isolated CPU */ |
Rusty Russell | dcc30a3 | 2008-11-25 02:35:12 +1030 | [diff] [blame] | 7946 | if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0) |
Nick Piggin | 5c1e176 | 2006-10-03 01:14:04 -0700 | [diff] [blame] | 7947 | BUG(); |
Ingo Molnar | 19978ca | 2007-11-09 22:39:38 +0100 | [diff] [blame] | 7948 | sched_init_granularity(); |
Rusty Russell | dcc30a3 | 2008-11-25 02:35:12 +1030 | [diff] [blame] | 7949 | free_cpumask_var(non_isolated_cpus); |
Rusty Russell | 4212823 | 2008-11-25 02:35:12 +1030 | [diff] [blame] | 7950 | |
Rusty Russell | 0e3900e | 2008-11-25 02:35:13 +1030 | [diff] [blame] | 7951 | init_sched_rt_class(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7952 | } |
| 7953 | #else |
| 7954 | void __init sched_init_smp(void) |
| 7955 | { |
Ingo Molnar | 19978ca | 2007-11-09 22:39:38 +0100 | [diff] [blame] | 7956 | sched_init_granularity(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7957 | } |
| 7958 | #endif /* CONFIG_SMP */ |
| 7959 | |
Arun R Bharadwaj | cd1bb94 | 2009-04-16 12:15:34 +0530 | [diff] [blame] | 7960 | const_debug unsigned int sysctl_timer_migration = 1; |
| 7961 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7962 | int in_sched_functions(unsigned long addr) |
| 7963 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7964 | return in_lock_functions(addr) || |
| 7965 | (addr >= (unsigned long)__sched_text_start |
| 7966 | && addr < (unsigned long)__sched_text_end); |
| 7967 | } |
| 7968 | |
Alexey Dobriyan | a995744 | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 7969 | static void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq) |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 7970 | { |
| 7971 | cfs_rq->tasks_timeline = RB_ROOT; |
Peter Zijlstra | 4a55bd5 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 7972 | INIT_LIST_HEAD(&cfs_rq->tasks); |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 7973 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 7974 | cfs_rq->rq = rq; |
| 7975 | #endif |
Peter Zijlstra | 67e9fb2 | 2007-10-15 17:00:10 +0200 | [diff] [blame] | 7976 | cfs_rq->min_vruntime = (u64)(-(1LL << 20)); |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 7977 | } |
| 7978 | |
Peter Zijlstra | fa85ae2 | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 7979 | static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq) |
| 7980 | { |
| 7981 | struct rt_prio_array *array; |
| 7982 | int i; |
| 7983 | |
| 7984 | array = &rt_rq->active; |
| 7985 | for (i = 0; i < MAX_RT_PRIO; i++) { |
| 7986 | INIT_LIST_HEAD(array->queue + i); |
| 7987 | __clear_bit(i, array->bitmap); |
| 7988 | } |
| 7989 | /* delimiter for bitsearch: */ |
| 7990 | __set_bit(MAX_RT_PRIO, array->bitmap); |
| 7991 | |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 7992 | #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED |
Gregory Haskins | e864c49 | 2008-12-29 09:39:49 -0500 | [diff] [blame] | 7993 | rt_rq->highest_prio.curr = MAX_RT_PRIO; |
Gregory Haskins | 398a153 | 2009-01-14 09:10:04 -0500 | [diff] [blame] | 7994 | #ifdef CONFIG_SMP |
Gregory Haskins | e864c49 | 2008-12-29 09:39:49 -0500 | [diff] [blame] | 7995 | rt_rq->highest_prio.next = MAX_RT_PRIO; |
Peter Zijlstra | 48d5e25 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 7996 | #endif |
Peter Zijlstra | fa85ae2 | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 7997 | #endif |
| 7998 | #ifdef CONFIG_SMP |
| 7999 | rt_rq->rt_nr_migratory = 0; |
Peter Zijlstra | fa85ae2 | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 8000 | rt_rq->overloaded = 0; |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 8001 | plist_head_init_raw(&rt_rq->pushable_tasks, &rq->lock); |
Peter Zijlstra | fa85ae2 | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 8002 | #endif |
| 8003 | |
| 8004 | rt_rq->rt_time = 0; |
| 8005 | rt_rq->rt_throttled = 0; |
Peter Zijlstra | ac086bc | 2008-04-19 19:44:58 +0200 | [diff] [blame] | 8006 | rt_rq->rt_runtime = 0; |
Thomas Gleixner | 0986b11 | 2009-11-17 15:32:06 +0100 | [diff] [blame] | 8007 | raw_spin_lock_init(&rt_rq->rt_runtime_lock); |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8008 | |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8009 | #ifdef CONFIG_RT_GROUP_SCHED |
Peter Zijlstra | 23b0fdf | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 8010 | rt_rq->rt_nr_boosted = 0; |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8011 | rt_rq->rq = rq; |
| 8012 | #endif |
Peter Zijlstra | fa85ae2 | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 8013 | } |
| 8014 | |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8015 | #ifdef CONFIG_FAIR_GROUP_SCHED |
Dhaval Giani | ec7dc8a | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 8016 | static void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, |
| 8017 | struct sched_entity *se, int cpu, int add, |
| 8018 | struct sched_entity *parent) |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8019 | { |
Dhaval Giani | ec7dc8a | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 8020 | struct rq *rq = cpu_rq(cpu); |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8021 | tg->cfs_rq[cpu] = cfs_rq; |
| 8022 | init_cfs_rq(cfs_rq, rq); |
| 8023 | cfs_rq->tg = tg; |
| 8024 | if (add) |
| 8025 | list_add(&cfs_rq->leaf_cfs_rq_list, &rq->leaf_cfs_rq_list); |
| 8026 | |
| 8027 | tg->se[cpu] = se; |
Dhaval Giani | 354d60c | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 8028 | /* se could be NULL for init_task_group */ |
| 8029 | if (!se) |
| 8030 | return; |
| 8031 | |
Dhaval Giani | ec7dc8a | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 8032 | if (!parent) |
| 8033 | se->cfs_rq = &rq->cfs; |
| 8034 | else |
| 8035 | se->cfs_rq = parent->my_q; |
| 8036 | |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8037 | se->my_q = cfs_rq; |
| 8038 | se->load.weight = tg->shares; |
Peter Zijlstra | e05510d | 2008-05-05 23:56:17 +0200 | [diff] [blame] | 8039 | se->load.inv_weight = 0; |
Dhaval Giani | ec7dc8a | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 8040 | se->parent = parent; |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8041 | } |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8042 | #endif |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8043 | |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8044 | #ifdef CONFIG_RT_GROUP_SCHED |
Dhaval Giani | ec7dc8a | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 8045 | static void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, |
| 8046 | struct sched_rt_entity *rt_se, int cpu, int add, |
| 8047 | struct sched_rt_entity *parent) |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8048 | { |
Dhaval Giani | ec7dc8a | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 8049 | struct rq *rq = cpu_rq(cpu); |
| 8050 | |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8051 | tg->rt_rq[cpu] = rt_rq; |
| 8052 | init_rt_rq(rt_rq, rq); |
| 8053 | rt_rq->tg = tg; |
Peter Zijlstra | ac086bc | 2008-04-19 19:44:58 +0200 | [diff] [blame] | 8054 | rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime; |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8055 | if (add) |
| 8056 | list_add(&rt_rq->leaf_rt_rq_list, &rq->leaf_rt_rq_list); |
| 8057 | |
| 8058 | tg->rt_se[cpu] = rt_se; |
Dhaval Giani | 354d60c | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 8059 | if (!rt_se) |
| 8060 | return; |
| 8061 | |
Dhaval Giani | ec7dc8a | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 8062 | if (!parent) |
| 8063 | rt_se->rt_rq = &rq->rt; |
| 8064 | else |
| 8065 | rt_se->rt_rq = parent->my_q; |
| 8066 | |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8067 | rt_se->my_q = rt_rq; |
Dhaval Giani | ec7dc8a | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 8068 | rt_se->parent = parent; |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8069 | INIT_LIST_HEAD(&rt_se->run_list); |
| 8070 | } |
| 8071 | #endif |
| 8072 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8073 | void __init sched_init(void) |
| 8074 | { |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 8075 | int i, j; |
Mike Travis | 434d53b | 2008-04-04 18:11:04 -0700 | [diff] [blame] | 8076 | unsigned long alloc_size = 0, ptr; |
| 8077 | |
| 8078 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 8079 | alloc_size += 2 * nr_cpu_ids * sizeof(void **); |
| 8080 | #endif |
| 8081 | #ifdef CONFIG_RT_GROUP_SCHED |
| 8082 | alloc_size += 2 * nr_cpu_ids * sizeof(void **); |
| 8083 | #endif |
Rusty Russell | df7c8e8 | 2009-03-19 15:22:20 +1030 | [diff] [blame] | 8084 | #ifdef CONFIG_CPUMASK_OFFSTACK |
Rusty Russell | 8c083f0 | 2009-03-19 15:22:20 +1030 | [diff] [blame] | 8085 | alloc_size += num_possible_cpus() * cpumask_size(); |
Rusty Russell | df7c8e8 | 2009-03-19 15:22:20 +1030 | [diff] [blame] | 8086 | #endif |
Mike Travis | 434d53b | 2008-04-04 18:11:04 -0700 | [diff] [blame] | 8087 | if (alloc_size) { |
Pekka Enberg | 36b7b6d | 2009-06-10 23:42:36 +0300 | [diff] [blame] | 8088 | ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT); |
Mike Travis | 434d53b | 2008-04-04 18:11:04 -0700 | [diff] [blame] | 8089 | |
| 8090 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 8091 | init_task_group.se = (struct sched_entity **)ptr; |
| 8092 | ptr += nr_cpu_ids * sizeof(void **); |
| 8093 | |
| 8094 | init_task_group.cfs_rq = (struct cfs_rq **)ptr; |
| 8095 | ptr += nr_cpu_ids * sizeof(void **); |
Peter Zijlstra | eff766a | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 8096 | |
Dhaval Giani | 6d6bc0a | 2008-05-30 14:23:45 +0200 | [diff] [blame] | 8097 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
Mike Travis | 434d53b | 2008-04-04 18:11:04 -0700 | [diff] [blame] | 8098 | #ifdef CONFIG_RT_GROUP_SCHED |
| 8099 | init_task_group.rt_se = (struct sched_rt_entity **)ptr; |
| 8100 | ptr += nr_cpu_ids * sizeof(void **); |
| 8101 | |
| 8102 | init_task_group.rt_rq = (struct rt_rq **)ptr; |
Peter Zijlstra | eff766a | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 8103 | ptr += nr_cpu_ids * sizeof(void **); |
| 8104 | |
Dhaval Giani | 6d6bc0a | 2008-05-30 14:23:45 +0200 | [diff] [blame] | 8105 | #endif /* CONFIG_RT_GROUP_SCHED */ |
Rusty Russell | df7c8e8 | 2009-03-19 15:22:20 +1030 | [diff] [blame] | 8106 | #ifdef CONFIG_CPUMASK_OFFSTACK |
| 8107 | for_each_possible_cpu(i) { |
| 8108 | per_cpu(load_balance_tmpmask, i) = (void *)ptr; |
| 8109 | ptr += cpumask_size(); |
| 8110 | } |
| 8111 | #endif /* CONFIG_CPUMASK_OFFSTACK */ |
Mike Travis | 434d53b | 2008-04-04 18:11:04 -0700 | [diff] [blame] | 8112 | } |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 8113 | |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 8114 | #ifdef CONFIG_SMP |
| 8115 | init_defrootdomain(); |
| 8116 | #endif |
| 8117 | |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 8118 | init_rt_bandwidth(&def_rt_bandwidth, |
| 8119 | global_rt_period(), global_rt_runtime()); |
| 8120 | |
| 8121 | #ifdef CONFIG_RT_GROUP_SCHED |
| 8122 | init_rt_bandwidth(&init_task_group.rt_bandwidth, |
| 8123 | global_rt_period(), global_rt_runtime()); |
Dhaval Giani | 6d6bc0a | 2008-05-30 14:23:45 +0200 | [diff] [blame] | 8124 | #endif /* CONFIG_RT_GROUP_SCHED */ |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 8125 | |
Dhaval Giani | 7c94143 | 2010-01-20 13:26:18 +0100 | [diff] [blame] | 8126 | #ifdef CONFIG_CGROUP_SCHED |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8127 | list_add(&init_task_group.list, &task_groups); |
Peter Zijlstra | f473aa5 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 8128 | INIT_LIST_HEAD(&init_task_group.children); |
| 8129 | |
Dhaval Giani | 7c94143 | 2010-01-20 13:26:18 +0100 | [diff] [blame] | 8130 | #endif /* CONFIG_CGROUP_SCHED */ |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8131 | |
Jiri Kosina | 4a6cc4b | 2009-10-29 00:26:00 +0900 | [diff] [blame] | 8132 | #if defined CONFIG_FAIR_GROUP_SCHED && defined CONFIG_SMP |
| 8133 | update_shares_data = __alloc_percpu(nr_cpu_ids * sizeof(unsigned long), |
| 8134 | __alignof__(unsigned long)); |
| 8135 | #endif |
KAMEZAWA Hiroyuki | 0a94502 | 2006-03-28 01:56:37 -0800 | [diff] [blame] | 8136 | for_each_possible_cpu(i) { |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 8137 | struct rq *rq; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8138 | |
| 8139 | rq = cpu_rq(i); |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 8140 | raw_spin_lock_init(&rq->lock); |
Nick Piggin | 7897986 | 2005-06-25 14:57:13 -0700 | [diff] [blame] | 8141 | rq->nr_running = 0; |
Thomas Gleixner | dce48a8 | 2009-04-11 10:43:41 +0200 | [diff] [blame] | 8142 | rq->calc_load_active = 0; |
| 8143 | rq->calc_load_update = jiffies + LOAD_FREQ; |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 8144 | init_cfs_rq(&rq->cfs, rq); |
Peter Zijlstra | fa85ae2 | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 8145 | init_rt_rq(&rq->rt, rq); |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8146 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 8147 | init_task_group.shares = init_task_group_load; |
| 8148 | INIT_LIST_HEAD(&rq->leaf_cfs_rq_list); |
Dhaval Giani | 354d60c | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 8149 | #ifdef CONFIG_CGROUP_SCHED |
| 8150 | /* |
| 8151 | * How much cpu bandwidth does init_task_group get? |
| 8152 | * |
| 8153 | * In case of task-groups formed thr' the cgroup filesystem, it |
| 8154 | * gets 100% of the cpu resources in the system. This overall |
| 8155 | * system cpu resource is divided among the tasks of |
| 8156 | * init_task_group and its child task-groups in a fair manner, |
| 8157 | * based on each entity's (task or task-group's) weight |
| 8158 | * (se->load.weight). |
| 8159 | * |
| 8160 | * In other words, if init_task_group has 10 tasks of weight |
| 8161 | * 1024) and two child groups A0 and A1 (of weight 1024 each), |
| 8162 | * then A0's share of the cpu resource is: |
| 8163 | * |
Ingo Molnar | 0d905bc | 2009-05-04 19:13:30 +0200 | [diff] [blame] | 8164 | * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33% |
Dhaval Giani | 354d60c | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 8165 | * |
| 8166 | * We achieve this by letting init_task_group's tasks sit |
| 8167 | * directly in rq->cfs (i.e init_task_group->se[] = NULL). |
| 8168 | */ |
Dhaval Giani | ec7dc8a | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 8169 | init_tg_cfs_entry(&init_task_group, &rq->cfs, NULL, i, 1, NULL); |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8170 | #endif |
Dhaval Giani | 354d60c | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 8171 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
| 8172 | |
| 8173 | rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime; |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8174 | #ifdef CONFIG_RT_GROUP_SCHED |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8175 | INIT_LIST_HEAD(&rq->leaf_rt_rq_list); |
Dhaval Giani | 354d60c | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 8176 | #ifdef CONFIG_CGROUP_SCHED |
Dhaval Giani | ec7dc8a | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 8177 | init_tg_rt_entry(&init_task_group, &rq->rt, NULL, i, 1, NULL); |
Dhaval Giani | 354d60c | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 8178 | #endif |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8179 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8180 | |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 8181 | for (j = 0; j < CPU_LOAD_IDX_MAX; j++) |
| 8182 | rq->cpu_load[j] = 0; |
Venkatesh Pallipadi | fdf3e95 | 2010-05-17 18:14:43 -0700 | [diff] [blame] | 8183 | |
| 8184 | rq->last_load_update_tick = jiffies; |
| 8185 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8186 | #ifdef CONFIG_SMP |
Nick Piggin | 41c7ce9 | 2005-06-25 14:57:24 -0700 | [diff] [blame] | 8187 | rq->sd = NULL; |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 8188 | rq->rd = NULL; |
Peter Zijlstra | e51fd5e | 2010-05-31 12:37:30 +0200 | [diff] [blame] | 8189 | rq->cpu_power = SCHED_LOAD_SCALE; |
Gregory Haskins | 3f029d3 | 2009-07-29 11:08:47 -0400 | [diff] [blame] | 8190 | rq->post_schedule = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8191 | rq->active_balance = 0; |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 8192 | rq->next_balance = jiffies; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8193 | rq->push_cpu = 0; |
Christoph Lameter | 0a2966b | 2006-09-25 23:30:51 -0700 | [diff] [blame] | 8194 | rq->cpu = i; |
Gregory Haskins | 1f11eb6a | 2008-06-04 15:04:05 -0400 | [diff] [blame] | 8195 | rq->online = 0; |
Mike Galbraith | eae0c9d | 2009-11-10 03:50:02 +0100 | [diff] [blame] | 8196 | rq->idle_stamp = 0; |
| 8197 | rq->avg_idle = 2*sysctl_sched_migration_cost; |
Gregory Haskins | dc93852 | 2008-01-25 21:08:26 +0100 | [diff] [blame] | 8198 | rq_attach_root(rq, &def_root_domain); |
Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 8199 | #ifdef CONFIG_NO_HZ |
| 8200 | rq->nohz_balance_kick = 0; |
| 8201 | init_sched_softirq_csd(&per_cpu(remote_sched_softirq_cb, i)); |
| 8202 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8203 | #endif |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 8204 | init_rq_hrtick(rq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8205 | atomic_set(&rq->nr_iowait, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8206 | } |
| 8207 | |
Peter Williams | 2dd73a4 | 2006-06-27 02:54:34 -0700 | [diff] [blame] | 8208 | set_load_weight(&init_task); |
Heiko Carstens | b50f60c | 2006-07-30 03:03:52 -0700 | [diff] [blame] | 8209 | |
Avi Kivity | e107be3 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 8210 | #ifdef CONFIG_PREEMPT_NOTIFIERS |
| 8211 | INIT_HLIST_HEAD(&init_task.preempt_notifiers); |
| 8212 | #endif |
| 8213 | |
Christoph Lameter | c9819f4 | 2006-12-10 02:20:25 -0800 | [diff] [blame] | 8214 | #ifdef CONFIG_SMP |
Carlos R. Mafra | 962cf36 | 2008-05-15 11:15:37 -0300 | [diff] [blame] | 8215 | open_softirq(SCHED_SOFTIRQ, run_rebalance_domains); |
Christoph Lameter | c9819f4 | 2006-12-10 02:20:25 -0800 | [diff] [blame] | 8216 | #endif |
| 8217 | |
Heiko Carstens | b50f60c | 2006-07-30 03:03:52 -0700 | [diff] [blame] | 8218 | #ifdef CONFIG_RT_MUTEXES |
Thomas Gleixner | 1d61548 | 2009-11-17 14:54:03 +0100 | [diff] [blame] | 8219 | plist_head_init_raw(&init_task.pi_waiters, &init_task.pi_lock); |
Heiko Carstens | b50f60c | 2006-07-30 03:03:52 -0700 | [diff] [blame] | 8220 | #endif |
| 8221 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8222 | /* |
| 8223 | * The boot idle thread does lazy MMU switching as well: |
| 8224 | */ |
| 8225 | atomic_inc(&init_mm.mm_count); |
| 8226 | enter_lazy_tlb(&init_mm, current); |
| 8227 | |
| 8228 | /* |
| 8229 | * Make us the idle thread. Technically, schedule() should not be |
| 8230 | * called from this thread, however somewhere below it might be, |
| 8231 | * but because we are the idle thread, we just pick up running again |
| 8232 | * when this runqueue becomes "idle". |
| 8233 | */ |
| 8234 | init_idle(current, smp_processor_id()); |
Thomas Gleixner | dce48a8 | 2009-04-11 10:43:41 +0200 | [diff] [blame] | 8235 | |
| 8236 | calc_load_update = jiffies + LOAD_FREQ; |
| 8237 | |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 8238 | /* |
| 8239 | * During early bootup we pretend to be a normal task: |
| 8240 | */ |
| 8241 | current->sched_class = &fair_sched_class; |
Ingo Molnar | 6892b75 | 2008-02-13 14:02:36 +0100 | [diff] [blame] | 8242 | |
Rusty Russell | 6a7b3dc | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 8243 | /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */ |
Rusty Russell | 49557e6 | 2009-11-02 20:37:20 +1030 | [diff] [blame] | 8244 | zalloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT); |
Rusty Russell | bf4d83f | 2008-11-25 09:57:51 +1030 | [diff] [blame] | 8245 | #ifdef CONFIG_SMP |
Rusty Russell | 7d1e6a9 | 2008-11-25 02:35:09 +1030 | [diff] [blame] | 8246 | #ifdef CONFIG_NO_HZ |
Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 8247 | zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT); |
| 8248 | alloc_cpumask_var(&nohz.grp_idle_mask, GFP_NOWAIT); |
| 8249 | atomic_set(&nohz.load_balancer, nr_cpu_ids); |
| 8250 | atomic_set(&nohz.first_pick_cpu, nr_cpu_ids); |
| 8251 | atomic_set(&nohz.second_pick_cpu, nr_cpu_ids); |
Rusty Russell | 7d1e6a9 | 2008-11-25 02:35:09 +1030 | [diff] [blame] | 8252 | #endif |
Rusty Russell | bdddd29 | 2009-12-02 14:09:16 +1030 | [diff] [blame] | 8253 | /* May be allocated at isolcpus cmdline parse time */ |
| 8254 | if (cpu_isolated_map == NULL) |
| 8255 | zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT); |
Rusty Russell | bf4d83f | 2008-11-25 09:57:51 +1030 | [diff] [blame] | 8256 | #endif /* SMP */ |
Rusty Russell | 6a7b3dc | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 8257 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8258 | perf_event_init(); |
Ingo Molnar | 0d905bc | 2009-05-04 19:13:30 +0200 | [diff] [blame] | 8259 | |
Ingo Molnar | 6892b75 | 2008-02-13 14:02:36 +0100 | [diff] [blame] | 8260 | scheduler_running = 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8261 | } |
| 8262 | |
| 8263 | #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP |
Frederic Weisbecker | e4aafea | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 8264 | static inline int preempt_count_equals(int preempt_offset) |
| 8265 | { |
Frederic Weisbecker | 234da7b | 2009-12-16 20:21:05 +0100 | [diff] [blame] | 8266 | int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth(); |
Frederic Weisbecker | e4aafea | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 8267 | |
| 8268 | return (nested == PREEMPT_INATOMIC_BASE + preempt_offset); |
| 8269 | } |
| 8270 | |
Simon Kagstrom | d894837 | 2009-12-23 11:08:18 +0100 | [diff] [blame] | 8271 | void __might_sleep(const char *file, int line, int preempt_offset) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8272 | { |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 8273 | #ifdef in_atomic |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8274 | static unsigned long prev_jiffy; /* ratelimiting */ |
| 8275 | |
Frederic Weisbecker | e4aafea | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 8276 | if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) || |
| 8277 | system_state != SYSTEM_RUNNING || oops_in_progress) |
Ingo Molnar | aef745f | 2008-08-28 11:34:43 +0200 | [diff] [blame] | 8278 | return; |
| 8279 | if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) |
| 8280 | return; |
| 8281 | prev_jiffy = jiffies; |
| 8282 | |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 8283 | printk(KERN_ERR |
| 8284 | "BUG: sleeping function called from invalid context at %s:%d\n", |
| 8285 | file, line); |
| 8286 | printk(KERN_ERR |
| 8287 | "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n", |
| 8288 | in_atomic(), irqs_disabled(), |
| 8289 | current->pid, current->comm); |
Ingo Molnar | aef745f | 2008-08-28 11:34:43 +0200 | [diff] [blame] | 8290 | |
| 8291 | debug_show_held_locks(current); |
| 8292 | if (irqs_disabled()) |
| 8293 | print_irqtrace_events(current); |
| 8294 | dump_stack(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8295 | #endif |
| 8296 | } |
| 8297 | EXPORT_SYMBOL(__might_sleep); |
| 8298 | #endif |
| 8299 | |
| 8300 | #ifdef CONFIG_MAGIC_SYSRQ |
Andi Kleen | 3a5e4dc | 2007-10-15 17:00:15 +0200 | [diff] [blame] | 8301 | static void normalize_task(struct rq *rq, struct task_struct *p) |
| 8302 | { |
| 8303 | int on_rq; |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 8304 | |
Andi Kleen | 3a5e4dc | 2007-10-15 17:00:15 +0200 | [diff] [blame] | 8305 | on_rq = p->se.on_rq; |
| 8306 | if (on_rq) |
| 8307 | deactivate_task(rq, p, 0); |
| 8308 | __setscheduler(rq, p, SCHED_NORMAL, 0); |
| 8309 | if (on_rq) { |
| 8310 | activate_task(rq, p, 0); |
| 8311 | resched_task(rq->curr); |
| 8312 | } |
| 8313 | } |
| 8314 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8315 | void normalize_rt_tasks(void) |
| 8316 | { |
Ingo Molnar | a0f98a1 | 2007-06-17 18:37:45 +0200 | [diff] [blame] | 8317 | struct task_struct *g, *p; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8318 | unsigned long flags; |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 8319 | struct rq *rq; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8320 | |
Peter Zijlstra | 4cf5d77 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 8321 | read_lock_irqsave(&tasklist_lock, flags); |
Ingo Molnar | a0f98a1 | 2007-06-17 18:37:45 +0200 | [diff] [blame] | 8322 | do_each_thread(g, p) { |
Ingo Molnar | 178be79 | 2007-10-15 17:00:18 +0200 | [diff] [blame] | 8323 | /* |
| 8324 | * Only normalize user tasks: |
| 8325 | */ |
| 8326 | if (!p->mm) |
| 8327 | continue; |
| 8328 | |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 8329 | p->se.exec_start = 0; |
Ingo Molnar | 6cfb0d5 | 2007-08-02 17:41:40 +0200 | [diff] [blame] | 8330 | #ifdef CONFIG_SCHEDSTATS |
Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 8331 | p->se.statistics.wait_start = 0; |
| 8332 | p->se.statistics.sleep_start = 0; |
| 8333 | p->se.statistics.block_start = 0; |
Ingo Molnar | 6cfb0d5 | 2007-08-02 17:41:40 +0200 | [diff] [blame] | 8334 | #endif |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 8335 | |
| 8336 | if (!rt_task(p)) { |
| 8337 | /* |
| 8338 | * Renice negative nice level userspace |
| 8339 | * tasks back to 0: |
| 8340 | */ |
| 8341 | if (TASK_NICE(p) < 0 && p->mm) |
| 8342 | set_user_nice(p, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8343 | continue; |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 8344 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8345 | |
Thomas Gleixner | 1d61548 | 2009-11-17 14:54:03 +0100 | [diff] [blame] | 8346 | raw_spin_lock(&p->pi_lock); |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 8347 | rq = __task_rq_lock(p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8348 | |
Ingo Molnar | 178be79 | 2007-10-15 17:00:18 +0200 | [diff] [blame] | 8349 | normalize_task(rq, p); |
Andi Kleen | 3a5e4dc | 2007-10-15 17:00:15 +0200 | [diff] [blame] | 8350 | |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 8351 | __task_rq_unlock(rq); |
Thomas Gleixner | 1d61548 | 2009-11-17 14:54:03 +0100 | [diff] [blame] | 8352 | raw_spin_unlock(&p->pi_lock); |
Ingo Molnar | a0f98a1 | 2007-06-17 18:37:45 +0200 | [diff] [blame] | 8353 | } while_each_thread(g, p); |
| 8354 | |
Peter Zijlstra | 4cf5d77 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 8355 | read_unlock_irqrestore(&tasklist_lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8356 | } |
| 8357 | |
| 8358 | #endif /* CONFIG_MAGIC_SYSRQ */ |
Linus Torvalds | 1df5c10 | 2005-09-12 07:59:21 -0700 | [diff] [blame] | 8359 | |
Jason Wessel | 67fc4e0 | 2010-05-20 21:04:21 -0500 | [diff] [blame] | 8360 | #if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) |
Linus Torvalds | 1df5c10 | 2005-09-12 07:59:21 -0700 | [diff] [blame] | 8361 | /* |
Jason Wessel | 67fc4e0 | 2010-05-20 21:04:21 -0500 | [diff] [blame] | 8362 | * These functions are only useful for the IA64 MCA handling, or kdb. |
Linus Torvalds | 1df5c10 | 2005-09-12 07:59:21 -0700 | [diff] [blame] | 8363 | * |
| 8364 | * They can only be called when the whole system has been |
| 8365 | * stopped - every CPU needs to be quiescent, and no scheduling |
| 8366 | * activity can take place. Using them for anything else would |
| 8367 | * be a serious bug, and as a result, they aren't even visible |
| 8368 | * under any other configuration. |
| 8369 | */ |
| 8370 | |
| 8371 | /** |
| 8372 | * curr_task - return the current task for a given cpu. |
| 8373 | * @cpu: the processor in question. |
| 8374 | * |
| 8375 | * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! |
| 8376 | */ |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 8377 | struct task_struct *curr_task(int cpu) |
Linus Torvalds | 1df5c10 | 2005-09-12 07:59:21 -0700 | [diff] [blame] | 8378 | { |
| 8379 | return cpu_curr(cpu); |
| 8380 | } |
| 8381 | |
Jason Wessel | 67fc4e0 | 2010-05-20 21:04:21 -0500 | [diff] [blame] | 8382 | #endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */ |
| 8383 | |
| 8384 | #ifdef CONFIG_IA64 |
Linus Torvalds | 1df5c10 | 2005-09-12 07:59:21 -0700 | [diff] [blame] | 8385 | /** |
| 8386 | * set_curr_task - set the current task for a given cpu. |
| 8387 | * @cpu: the processor in question. |
| 8388 | * @p: the task pointer to set. |
| 8389 | * |
| 8390 | * Description: This function must only be used when non-maskable interrupts |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 8391 | * are serviced on a separate stack. It allows the architecture to switch the |
| 8392 | * notion of the current task on a cpu in a non-blocking manner. This function |
Linus Torvalds | 1df5c10 | 2005-09-12 07:59:21 -0700 | [diff] [blame] | 8393 | * must be called with all CPU's synchronized, and interrupts disabled, the |
| 8394 | * and caller must save the original value of the current task (see |
| 8395 | * curr_task() above) and restore that value before reenabling interrupts and |
| 8396 | * re-starting the system. |
| 8397 | * |
| 8398 | * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! |
| 8399 | */ |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 8400 | void set_curr_task(int cpu, struct task_struct *p) |
Linus Torvalds | 1df5c10 | 2005-09-12 07:59:21 -0700 | [diff] [blame] | 8401 | { |
| 8402 | cpu_curr(cpu) = p; |
| 8403 | } |
| 8404 | |
| 8405 | #endif |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8406 | |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8407 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 8408 | static void free_fair_sched_group(struct task_group *tg) |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8409 | { |
| 8410 | int i; |
| 8411 | |
| 8412 | for_each_possible_cpu(i) { |
| 8413 | if (tg->cfs_rq) |
| 8414 | kfree(tg->cfs_rq[i]); |
| 8415 | if (tg->se) |
| 8416 | kfree(tg->se[i]); |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8417 | } |
| 8418 | |
| 8419 | kfree(tg->cfs_rq); |
| 8420 | kfree(tg->se); |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8421 | } |
| 8422 | |
Dhaval Giani | ec7dc8a | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 8423 | static |
| 8424 | int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8425 | { |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8426 | struct cfs_rq *cfs_rq; |
Li Zefan | eab1722 | 2008-10-29 17:03:22 +0800 | [diff] [blame] | 8427 | struct sched_entity *se; |
Srivatsa Vaddagiri | 9b5b775 | 2007-10-15 17:00:09 +0200 | [diff] [blame] | 8428 | struct rq *rq; |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8429 | int i; |
| 8430 | |
Mike Travis | 434d53b | 2008-04-04 18:11:04 -0700 | [diff] [blame] | 8431 | tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL); |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8432 | if (!tg->cfs_rq) |
| 8433 | goto err; |
Mike Travis | 434d53b | 2008-04-04 18:11:04 -0700 | [diff] [blame] | 8434 | tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL); |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8435 | if (!tg->se) |
| 8436 | goto err; |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8437 | |
| 8438 | tg->shares = NICE_0_LOAD; |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8439 | |
| 8440 | for_each_possible_cpu(i) { |
Srivatsa Vaddagiri | 9b5b775 | 2007-10-15 17:00:09 +0200 | [diff] [blame] | 8441 | rq = cpu_rq(i); |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8442 | |
Li Zefan | eab1722 | 2008-10-29 17:03:22 +0800 | [diff] [blame] | 8443 | cfs_rq = kzalloc_node(sizeof(struct cfs_rq), |
| 8444 | GFP_KERNEL, cpu_to_node(i)); |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8445 | if (!cfs_rq) |
| 8446 | goto err; |
| 8447 | |
Li Zefan | eab1722 | 2008-10-29 17:03:22 +0800 | [diff] [blame] | 8448 | se = kzalloc_node(sizeof(struct sched_entity), |
| 8449 | GFP_KERNEL, cpu_to_node(i)); |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8450 | if (!se) |
Phil Carmody | dfc12eb | 2009-12-10 14:29:37 +0200 | [diff] [blame] | 8451 | goto err_free_rq; |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8452 | |
Li Zefan | eab1722 | 2008-10-29 17:03:22 +0800 | [diff] [blame] | 8453 | init_tg_cfs_entry(tg, cfs_rq, se, i, 0, parent->se[i]); |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8454 | } |
| 8455 | |
| 8456 | return 1; |
| 8457 | |
Peter Zijlstra | 4924627 | 2010-10-17 21:46:10 +0200 | [diff] [blame] | 8458 | err_free_rq: |
Phil Carmody | dfc12eb | 2009-12-10 14:29:37 +0200 | [diff] [blame] | 8459 | kfree(cfs_rq); |
Peter Zijlstra | 4924627 | 2010-10-17 21:46:10 +0200 | [diff] [blame] | 8460 | err: |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8461 | return 0; |
| 8462 | } |
| 8463 | |
| 8464 | static inline void register_fair_sched_group(struct task_group *tg, int cpu) |
| 8465 | { |
| 8466 | list_add_rcu(&tg->cfs_rq[cpu]->leaf_cfs_rq_list, |
| 8467 | &cpu_rq(cpu)->leaf_cfs_rq_list); |
| 8468 | } |
| 8469 | |
| 8470 | static inline void unregister_fair_sched_group(struct task_group *tg, int cpu) |
| 8471 | { |
| 8472 | list_del_rcu(&tg->cfs_rq[cpu]->leaf_cfs_rq_list); |
| 8473 | } |
Dhaval Giani | 6d6bc0a | 2008-05-30 14:23:45 +0200 | [diff] [blame] | 8474 | #else /* !CONFG_FAIR_GROUP_SCHED */ |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8475 | static inline void free_fair_sched_group(struct task_group *tg) |
| 8476 | { |
| 8477 | } |
| 8478 | |
Dhaval Giani | ec7dc8a | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 8479 | static inline |
| 8480 | int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8481 | { |
| 8482 | return 1; |
| 8483 | } |
| 8484 | |
| 8485 | static inline void register_fair_sched_group(struct task_group *tg, int cpu) |
| 8486 | { |
| 8487 | } |
| 8488 | |
| 8489 | static inline void unregister_fair_sched_group(struct task_group *tg, int cpu) |
| 8490 | { |
| 8491 | } |
Dhaval Giani | 6d6bc0a | 2008-05-30 14:23:45 +0200 | [diff] [blame] | 8492 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8493 | |
| 8494 | #ifdef CONFIG_RT_GROUP_SCHED |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8495 | static void free_rt_sched_group(struct task_group *tg) |
| 8496 | { |
| 8497 | int i; |
| 8498 | |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 8499 | destroy_rt_bandwidth(&tg->rt_bandwidth); |
| 8500 | |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8501 | for_each_possible_cpu(i) { |
| 8502 | if (tg->rt_rq) |
| 8503 | kfree(tg->rt_rq[i]); |
| 8504 | if (tg->rt_se) |
| 8505 | kfree(tg->rt_se[i]); |
| 8506 | } |
| 8507 | |
| 8508 | kfree(tg->rt_rq); |
| 8509 | kfree(tg->rt_se); |
| 8510 | } |
| 8511 | |
Dhaval Giani | ec7dc8a | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 8512 | static |
| 8513 | int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8514 | { |
| 8515 | struct rt_rq *rt_rq; |
Li Zefan | eab1722 | 2008-10-29 17:03:22 +0800 | [diff] [blame] | 8516 | struct sched_rt_entity *rt_se; |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8517 | struct rq *rq; |
| 8518 | int i; |
| 8519 | |
Mike Travis | 434d53b | 2008-04-04 18:11:04 -0700 | [diff] [blame] | 8520 | tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL); |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8521 | if (!tg->rt_rq) |
| 8522 | goto err; |
Mike Travis | 434d53b | 2008-04-04 18:11:04 -0700 | [diff] [blame] | 8523 | tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL); |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8524 | if (!tg->rt_se) |
| 8525 | goto err; |
| 8526 | |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 8527 | init_rt_bandwidth(&tg->rt_bandwidth, |
| 8528 | ktime_to_ns(def_rt_bandwidth.rt_period), 0); |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8529 | |
| 8530 | for_each_possible_cpu(i) { |
| 8531 | rq = cpu_rq(i); |
| 8532 | |
Li Zefan | eab1722 | 2008-10-29 17:03:22 +0800 | [diff] [blame] | 8533 | rt_rq = kzalloc_node(sizeof(struct rt_rq), |
| 8534 | GFP_KERNEL, cpu_to_node(i)); |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8535 | if (!rt_rq) |
| 8536 | goto err; |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8537 | |
Li Zefan | eab1722 | 2008-10-29 17:03:22 +0800 | [diff] [blame] | 8538 | rt_se = kzalloc_node(sizeof(struct sched_rt_entity), |
| 8539 | GFP_KERNEL, cpu_to_node(i)); |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8540 | if (!rt_se) |
Phil Carmody | dfc12eb | 2009-12-10 14:29:37 +0200 | [diff] [blame] | 8541 | goto err_free_rq; |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8542 | |
Li Zefan | eab1722 | 2008-10-29 17:03:22 +0800 | [diff] [blame] | 8543 | init_tg_rt_entry(tg, rt_rq, rt_se, i, 0, parent->rt_se[i]); |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8544 | } |
| 8545 | |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8546 | return 1; |
| 8547 | |
Peter Zijlstra | 4924627 | 2010-10-17 21:46:10 +0200 | [diff] [blame] | 8548 | err_free_rq: |
Phil Carmody | dfc12eb | 2009-12-10 14:29:37 +0200 | [diff] [blame] | 8549 | kfree(rt_rq); |
Peter Zijlstra | 4924627 | 2010-10-17 21:46:10 +0200 | [diff] [blame] | 8550 | err: |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8551 | return 0; |
| 8552 | } |
| 8553 | |
| 8554 | static inline void register_rt_sched_group(struct task_group *tg, int cpu) |
| 8555 | { |
| 8556 | list_add_rcu(&tg->rt_rq[cpu]->leaf_rt_rq_list, |
| 8557 | &cpu_rq(cpu)->leaf_rt_rq_list); |
| 8558 | } |
| 8559 | |
| 8560 | static inline void unregister_rt_sched_group(struct task_group *tg, int cpu) |
| 8561 | { |
| 8562 | list_del_rcu(&tg->rt_rq[cpu]->leaf_rt_rq_list); |
| 8563 | } |
Dhaval Giani | 6d6bc0a | 2008-05-30 14:23:45 +0200 | [diff] [blame] | 8564 | #else /* !CONFIG_RT_GROUP_SCHED */ |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8565 | static inline void free_rt_sched_group(struct task_group *tg) |
| 8566 | { |
| 8567 | } |
| 8568 | |
Dhaval Giani | ec7dc8a | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 8569 | static inline |
| 8570 | int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8571 | { |
| 8572 | return 1; |
| 8573 | } |
| 8574 | |
| 8575 | static inline void register_rt_sched_group(struct task_group *tg, int cpu) |
| 8576 | { |
| 8577 | } |
| 8578 | |
| 8579 | static inline void unregister_rt_sched_group(struct task_group *tg, int cpu) |
| 8580 | { |
| 8581 | } |
Dhaval Giani | 6d6bc0a | 2008-05-30 14:23:45 +0200 | [diff] [blame] | 8582 | #endif /* CONFIG_RT_GROUP_SCHED */ |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8583 | |
Dhaval Giani | 7c94143 | 2010-01-20 13:26:18 +0100 | [diff] [blame] | 8584 | #ifdef CONFIG_CGROUP_SCHED |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8585 | static void free_sched_group(struct task_group *tg) |
| 8586 | { |
| 8587 | free_fair_sched_group(tg); |
| 8588 | free_rt_sched_group(tg); |
| 8589 | kfree(tg); |
| 8590 | } |
| 8591 | |
| 8592 | /* allocate runqueue etc for a new task group */ |
Dhaval Giani | ec7dc8a | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 8593 | struct task_group *sched_create_group(struct task_group *parent) |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8594 | { |
| 8595 | struct task_group *tg; |
| 8596 | unsigned long flags; |
| 8597 | int i; |
| 8598 | |
| 8599 | tg = kzalloc(sizeof(*tg), GFP_KERNEL); |
| 8600 | if (!tg) |
| 8601 | return ERR_PTR(-ENOMEM); |
| 8602 | |
Dhaval Giani | ec7dc8a | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 8603 | if (!alloc_fair_sched_group(tg, parent)) |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8604 | goto err; |
| 8605 | |
Dhaval Giani | ec7dc8a | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 8606 | if (!alloc_rt_sched_group(tg, parent)) |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8607 | goto err; |
| 8608 | |
Peter Zijlstra | 8ed3699 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 8609 | spin_lock_irqsave(&task_group_lock, flags); |
Srivatsa Vaddagiri | 9b5b775 | 2007-10-15 17:00:09 +0200 | [diff] [blame] | 8610 | for_each_possible_cpu(i) { |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8611 | register_fair_sched_group(tg, i); |
| 8612 | register_rt_sched_group(tg, i); |
Srivatsa Vaddagiri | 9b5b775 | 2007-10-15 17:00:09 +0200 | [diff] [blame] | 8613 | } |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8614 | list_add_rcu(&tg->list, &task_groups); |
Peter Zijlstra | f473aa5 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 8615 | |
| 8616 | WARN_ON(!parent); /* root should already exist */ |
| 8617 | |
| 8618 | tg->parent = parent; |
Peter Zijlstra | f473aa5 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 8619 | INIT_LIST_HEAD(&tg->children); |
Zhang, Yanmin | 09f2724 | 2030-08-14 15:56:40 +0800 | [diff] [blame] | 8620 | list_add_rcu(&tg->siblings, &parent->children); |
Peter Zijlstra | 8ed3699 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 8621 | spin_unlock_irqrestore(&task_group_lock, flags); |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8622 | |
Srivatsa Vaddagiri | 9b5b775 | 2007-10-15 17:00:09 +0200 | [diff] [blame] | 8623 | return tg; |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8624 | |
| 8625 | err: |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8626 | free_sched_group(tg); |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8627 | return ERR_PTR(-ENOMEM); |
| 8628 | } |
| 8629 | |
Srivatsa Vaddagiri | 9b5b775 | 2007-10-15 17:00:09 +0200 | [diff] [blame] | 8630 | /* rcu callback to free various structures associated with a task group */ |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8631 | static void free_sched_group_rcu(struct rcu_head *rhp) |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8632 | { |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8633 | /* now it should be safe to free those cfs_rqs */ |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8634 | free_sched_group(container_of(rhp, struct task_group, rcu)); |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8635 | } |
| 8636 | |
Srivatsa Vaddagiri | 9b5b775 | 2007-10-15 17:00:09 +0200 | [diff] [blame] | 8637 | /* Destroy runqueue etc associated with a task group */ |
Ingo Molnar | 4cf86d7 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 8638 | void sched_destroy_group(struct task_group *tg) |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8639 | { |
Peter Zijlstra | 8ed3699 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 8640 | unsigned long flags; |
Srivatsa Vaddagiri | 9b5b775 | 2007-10-15 17:00:09 +0200 | [diff] [blame] | 8641 | int i; |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8642 | |
Peter Zijlstra | 8ed3699 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 8643 | spin_lock_irqsave(&task_group_lock, flags); |
Srivatsa Vaddagiri | 9b5b775 | 2007-10-15 17:00:09 +0200 | [diff] [blame] | 8644 | for_each_possible_cpu(i) { |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8645 | unregister_fair_sched_group(tg, i); |
| 8646 | unregister_rt_sched_group(tg, i); |
Srivatsa Vaddagiri | 9b5b775 | 2007-10-15 17:00:09 +0200 | [diff] [blame] | 8647 | } |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8648 | list_del_rcu(&tg->list); |
Peter Zijlstra | f473aa5 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 8649 | list_del_rcu(&tg->siblings); |
Peter Zijlstra | 8ed3699 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 8650 | spin_unlock_irqrestore(&task_group_lock, flags); |
Srivatsa Vaddagiri | 9b5b775 | 2007-10-15 17:00:09 +0200 | [diff] [blame] | 8651 | |
Srivatsa Vaddagiri | 9b5b775 | 2007-10-15 17:00:09 +0200 | [diff] [blame] | 8652 | /* wait for possible concurrent references to cfs_rqs complete */ |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8653 | call_rcu(&tg->rcu, free_sched_group_rcu); |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8654 | } |
| 8655 | |
Srivatsa Vaddagiri | 9b5b775 | 2007-10-15 17:00:09 +0200 | [diff] [blame] | 8656 | /* change task's runqueue when it moves between groups. |
Ingo Molnar | 3a25201 | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 8657 | * The caller of this function should have put the task in its new group |
| 8658 | * by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to |
| 8659 | * reflect its new group. |
Srivatsa Vaddagiri | 9b5b775 | 2007-10-15 17:00:09 +0200 | [diff] [blame] | 8660 | */ |
| 8661 | void sched_move_task(struct task_struct *tsk) |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8662 | { |
| 8663 | int on_rq, running; |
| 8664 | unsigned long flags; |
| 8665 | struct rq *rq; |
| 8666 | |
| 8667 | rq = task_rq_lock(tsk, &flags); |
| 8668 | |
Dmitry Adamushko | 051a1d1 | 2007-12-18 15:21:13 +0100 | [diff] [blame] | 8669 | running = task_current(rq, tsk); |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8670 | on_rq = tsk->se.on_rq; |
| 8671 | |
Hiroshi Shimamoto | 0e1f348 | 2008-03-10 11:01:20 -0700 | [diff] [blame] | 8672 | if (on_rq) |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8673 | dequeue_task(rq, tsk, 0); |
Hiroshi Shimamoto | 0e1f348 | 2008-03-10 11:01:20 -0700 | [diff] [blame] | 8674 | if (unlikely(running)) |
| 8675 | tsk->sched_class->put_prev_task(rq, tsk); |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8676 | |
Peter Zijlstra | 810b381 | 2008-02-29 15:21:01 -0500 | [diff] [blame] | 8677 | #ifdef CONFIG_FAIR_GROUP_SCHED |
Peter Zijlstra | b2b5ce0 | 2010-10-15 15:24:15 +0200 | [diff] [blame] | 8678 | if (tsk->sched_class->task_move_group) |
| 8679 | tsk->sched_class->task_move_group(tsk, on_rq); |
| 8680 | else |
Peter Zijlstra | 810b381 | 2008-02-29 15:21:01 -0500 | [diff] [blame] | 8681 | #endif |
Peter Zijlstra | b2b5ce0 | 2010-10-15 15:24:15 +0200 | [diff] [blame] | 8682 | set_task_rq(tsk, task_cpu(tsk)); |
Peter Zijlstra | 810b381 | 2008-02-29 15:21:01 -0500 | [diff] [blame] | 8683 | |
Hiroshi Shimamoto | 0e1f348 | 2008-03-10 11:01:20 -0700 | [diff] [blame] | 8684 | if (unlikely(running)) |
| 8685 | tsk->sched_class->set_curr_task(rq); |
| 8686 | if (on_rq) |
Peter Zijlstra | 371fd7e | 2010-03-24 16:38:48 +0100 | [diff] [blame] | 8687 | enqueue_task(rq, tsk, 0); |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8688 | |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8689 | task_rq_unlock(rq, &flags); |
| 8690 | } |
Dhaval Giani | 7c94143 | 2010-01-20 13:26:18 +0100 | [diff] [blame] | 8691 | #endif /* CONFIG_CGROUP_SCHED */ |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8692 | |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8693 | #ifdef CONFIG_FAIR_GROUP_SCHED |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 8694 | static void __set_se_shares(struct sched_entity *se, unsigned long shares) |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8695 | { |
| 8696 | struct cfs_rq *cfs_rq = se->cfs_rq; |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8697 | int on_rq; |
| 8698 | |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8699 | on_rq = se->on_rq; |
Peter Zijlstra | 62fb185 | 2008-02-25 17:34:02 +0100 | [diff] [blame] | 8700 | if (on_rq) |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8701 | dequeue_entity(cfs_rq, se, 0); |
| 8702 | |
| 8703 | se->load.weight = shares; |
Peter Zijlstra | e05510d | 2008-05-05 23:56:17 +0200 | [diff] [blame] | 8704 | se->load.inv_weight = 0; |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8705 | |
Peter Zijlstra | 62fb185 | 2008-02-25 17:34:02 +0100 | [diff] [blame] | 8706 | if (on_rq) |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8707 | enqueue_entity(cfs_rq, se, 0); |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 8708 | } |
Peter Zijlstra | 62fb185 | 2008-02-25 17:34:02 +0100 | [diff] [blame] | 8709 | |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 8710 | static void set_se_shares(struct sched_entity *se, unsigned long shares) |
| 8711 | { |
| 8712 | struct cfs_rq *cfs_rq = se->cfs_rq; |
| 8713 | struct rq *rq = cfs_rq->rq; |
| 8714 | unsigned long flags; |
| 8715 | |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 8716 | raw_spin_lock_irqsave(&rq->lock, flags); |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 8717 | __set_se_shares(se, shares); |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 8718 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8719 | } |
| 8720 | |
Peter Zijlstra | 8ed3699 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 8721 | static DEFINE_MUTEX(shares_mutex); |
| 8722 | |
Ingo Molnar | 4cf86d7 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 8723 | int sched_group_set_shares(struct task_group *tg, unsigned long shares) |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8724 | { |
| 8725 | int i; |
Peter Zijlstra | 8ed3699 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 8726 | unsigned long flags; |
Ingo Molnar | c61935f | 2008-01-22 11:24:58 +0100 | [diff] [blame] | 8727 | |
Peter Zijlstra | 62fb185 | 2008-02-25 17:34:02 +0100 | [diff] [blame] | 8728 | /* |
Dhaval Giani | ec7dc8a | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 8729 | * We can't change the weight of the root cgroup. |
| 8730 | */ |
| 8731 | if (!tg->se[0]) |
| 8732 | return -EINVAL; |
| 8733 | |
Peter Zijlstra | 18d95a2 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 8734 | if (shares < MIN_SHARES) |
| 8735 | shares = MIN_SHARES; |
Miao Xie | cb4ad1f | 2008-04-28 12:54:56 +0800 | [diff] [blame] | 8736 | else if (shares > MAX_SHARES) |
| 8737 | shares = MAX_SHARES; |
Peter Zijlstra | 62fb185 | 2008-02-25 17:34:02 +0100 | [diff] [blame] | 8738 | |
Peter Zijlstra | 8ed3699 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 8739 | mutex_lock(&shares_mutex); |
Srivatsa Vaddagiri | 9b5b775 | 2007-10-15 17:00:09 +0200 | [diff] [blame] | 8740 | if (tg->shares == shares) |
Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 8741 | goto done; |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8742 | |
Peter Zijlstra | 8ed3699 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 8743 | spin_lock_irqsave(&task_group_lock, flags); |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8744 | for_each_possible_cpu(i) |
| 8745 | unregister_fair_sched_group(tg, i); |
Peter Zijlstra | f473aa5 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 8746 | list_del_rcu(&tg->siblings); |
Peter Zijlstra | 8ed3699 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 8747 | spin_unlock_irqrestore(&task_group_lock, flags); |
Srivatsa Vaddagiri | 6b2d770 | 2008-01-25 21:08:00 +0100 | [diff] [blame] | 8748 | |
| 8749 | /* wait for any ongoing reference to this group to finish */ |
| 8750 | synchronize_sched(); |
| 8751 | |
| 8752 | /* |
| 8753 | * Now we are free to modify the group's share on each cpu |
| 8754 | * w/o tripping rebalance_share or load_balance_fair. |
| 8755 | */ |
| 8756 | tg->shares = shares; |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 8757 | for_each_possible_cpu(i) { |
| 8758 | /* |
| 8759 | * force a rebalance |
| 8760 | */ |
| 8761 | cfs_rq_set_shares(tg->cfs_rq[i], 0); |
Miao Xie | cb4ad1f | 2008-04-28 12:54:56 +0800 | [diff] [blame] | 8762 | set_se_shares(tg->se[i], shares); |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 8763 | } |
Srivatsa Vaddagiri | 6b2d770 | 2008-01-25 21:08:00 +0100 | [diff] [blame] | 8764 | |
| 8765 | /* |
| 8766 | * Enable load balance activity on this group, by inserting it back on |
| 8767 | * each cpu's rq->leaf_cfs_rq_list. |
| 8768 | */ |
Peter Zijlstra | 8ed3699 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 8769 | spin_lock_irqsave(&task_group_lock, flags); |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8770 | for_each_possible_cpu(i) |
| 8771 | register_fair_sched_group(tg, i); |
Peter Zijlstra | f473aa5 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 8772 | list_add_rcu(&tg->siblings, &tg->parent->children); |
Peter Zijlstra | 8ed3699 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 8773 | spin_unlock_irqrestore(&task_group_lock, flags); |
Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 8774 | done: |
Peter Zijlstra | 8ed3699 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 8775 | mutex_unlock(&shares_mutex); |
Srivatsa Vaddagiri | 9b5b775 | 2007-10-15 17:00:09 +0200 | [diff] [blame] | 8776 | return 0; |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8777 | } |
| 8778 | |
Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 8779 | unsigned long sched_group_shares(struct task_group *tg) |
| 8780 | { |
| 8781 | return tg->shares; |
| 8782 | } |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8783 | #endif |
Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 8784 | |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8785 | #ifdef CONFIG_RT_GROUP_SCHED |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8786 | /* |
Peter Zijlstra | 9f0c1e5 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 8787 | * Ensure that the real time constraints are schedulable. |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8788 | */ |
Peter Zijlstra | 9f0c1e5 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 8789 | static DEFINE_MUTEX(rt_constraints_mutex); |
| 8790 | |
| 8791 | static unsigned long to_ratio(u64 period, u64 runtime) |
| 8792 | { |
| 8793 | if (runtime == RUNTIME_INF) |
Peter Zijlstra | 9a7e0b1 | 2008-08-19 12:33:06 +0200 | [diff] [blame] | 8794 | return 1ULL << 20; |
Peter Zijlstra | 9f0c1e5 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 8795 | |
Peter Zijlstra | 9a7e0b1 | 2008-08-19 12:33:06 +0200 | [diff] [blame] | 8796 | return div64_u64(runtime << 20, period); |
Peter Zijlstra | 9f0c1e5 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 8797 | } |
| 8798 | |
Dhaval Giani | 521f1a24 | 2008-02-28 15:21:56 +0530 | [diff] [blame] | 8799 | /* Must be called with tasklist_lock held */ |
| 8800 | static inline int tg_has_rt_tasks(struct task_group *tg) |
| 8801 | { |
| 8802 | struct task_struct *g, *p; |
Peter Zijlstra | 9a7e0b1 | 2008-08-19 12:33:06 +0200 | [diff] [blame] | 8803 | |
Dhaval Giani | 521f1a24 | 2008-02-28 15:21:56 +0530 | [diff] [blame] | 8804 | do_each_thread(g, p) { |
| 8805 | if (rt_task(p) && rt_rq_of_se(&p->rt)->tg == tg) |
| 8806 | return 1; |
| 8807 | } while_each_thread(g, p); |
Peter Zijlstra | 9a7e0b1 | 2008-08-19 12:33:06 +0200 | [diff] [blame] | 8808 | |
Dhaval Giani | 521f1a24 | 2008-02-28 15:21:56 +0530 | [diff] [blame] | 8809 | return 0; |
| 8810 | } |
| 8811 | |
Peter Zijlstra | 9a7e0b1 | 2008-08-19 12:33:06 +0200 | [diff] [blame] | 8812 | struct rt_schedulable_data { |
| 8813 | struct task_group *tg; |
| 8814 | u64 rt_period; |
| 8815 | u64 rt_runtime; |
| 8816 | }; |
| 8817 | |
| 8818 | static int tg_schedulable(struct task_group *tg, void *data) |
| 8819 | { |
| 8820 | struct rt_schedulable_data *d = data; |
| 8821 | struct task_group *child; |
| 8822 | unsigned long total, sum = 0; |
| 8823 | u64 period, runtime; |
| 8824 | |
| 8825 | period = ktime_to_ns(tg->rt_bandwidth.rt_period); |
| 8826 | runtime = tg->rt_bandwidth.rt_runtime; |
| 8827 | |
| 8828 | if (tg == d->tg) { |
| 8829 | period = d->rt_period; |
| 8830 | runtime = d->rt_runtime; |
| 8831 | } |
| 8832 | |
Peter Zijlstra | 4653f80 | 2008-09-23 15:33:44 +0200 | [diff] [blame] | 8833 | /* |
| 8834 | * Cannot have more runtime than the period. |
| 8835 | */ |
| 8836 | if (runtime > period && runtime != RUNTIME_INF) |
| 8837 | return -EINVAL; |
| 8838 | |
| 8839 | /* |
| 8840 | * Ensure we don't starve existing RT tasks. |
| 8841 | */ |
Peter Zijlstra | 9a7e0b1 | 2008-08-19 12:33:06 +0200 | [diff] [blame] | 8842 | if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg)) |
| 8843 | return -EBUSY; |
| 8844 | |
| 8845 | total = to_ratio(period, runtime); |
| 8846 | |
Peter Zijlstra | 4653f80 | 2008-09-23 15:33:44 +0200 | [diff] [blame] | 8847 | /* |
| 8848 | * Nobody can have more than the global setting allows. |
| 8849 | */ |
| 8850 | if (total > to_ratio(global_rt_period(), global_rt_runtime())) |
| 8851 | return -EINVAL; |
| 8852 | |
| 8853 | /* |
| 8854 | * The sum of our children's runtime should not exceed our own. |
| 8855 | */ |
Peter Zijlstra | 9a7e0b1 | 2008-08-19 12:33:06 +0200 | [diff] [blame] | 8856 | list_for_each_entry_rcu(child, &tg->children, siblings) { |
| 8857 | period = ktime_to_ns(child->rt_bandwidth.rt_period); |
| 8858 | runtime = child->rt_bandwidth.rt_runtime; |
| 8859 | |
| 8860 | if (child == d->tg) { |
| 8861 | period = d->rt_period; |
| 8862 | runtime = d->rt_runtime; |
| 8863 | } |
| 8864 | |
| 8865 | sum += to_ratio(period, runtime); |
| 8866 | } |
| 8867 | |
| 8868 | if (sum > total) |
| 8869 | return -EINVAL; |
| 8870 | |
| 8871 | return 0; |
| 8872 | } |
| 8873 | |
| 8874 | static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) |
| 8875 | { |
| 8876 | struct rt_schedulable_data data = { |
| 8877 | .tg = tg, |
| 8878 | .rt_period = period, |
| 8879 | .rt_runtime = runtime, |
| 8880 | }; |
| 8881 | |
| 8882 | return walk_tg_tree(tg_schedulable, tg_nop, &data); |
| 8883 | } |
| 8884 | |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 8885 | static int tg_set_bandwidth(struct task_group *tg, |
| 8886 | u64 rt_period, u64 rt_runtime) |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8887 | { |
Peter Zijlstra | ac086bc | 2008-04-19 19:44:58 +0200 | [diff] [blame] | 8888 | int i, err = 0; |
Peter Zijlstra | 9f0c1e5 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 8889 | |
Peter Zijlstra | 9f0c1e5 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 8890 | mutex_lock(&rt_constraints_mutex); |
Dhaval Giani | 521f1a24 | 2008-02-28 15:21:56 +0530 | [diff] [blame] | 8891 | read_lock(&tasklist_lock); |
Peter Zijlstra | 9a7e0b1 | 2008-08-19 12:33:06 +0200 | [diff] [blame] | 8892 | err = __rt_schedulable(tg, rt_period, rt_runtime); |
| 8893 | if (err) |
Dhaval Giani | 521f1a24 | 2008-02-28 15:21:56 +0530 | [diff] [blame] | 8894 | goto unlock; |
Peter Zijlstra | ac086bc | 2008-04-19 19:44:58 +0200 | [diff] [blame] | 8895 | |
Thomas Gleixner | 0986b11 | 2009-11-17 15:32:06 +0100 | [diff] [blame] | 8896 | raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock); |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 8897 | tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period); |
| 8898 | tg->rt_bandwidth.rt_runtime = rt_runtime; |
Peter Zijlstra | ac086bc | 2008-04-19 19:44:58 +0200 | [diff] [blame] | 8899 | |
| 8900 | for_each_possible_cpu(i) { |
| 8901 | struct rt_rq *rt_rq = tg->rt_rq[i]; |
| 8902 | |
Thomas Gleixner | 0986b11 | 2009-11-17 15:32:06 +0100 | [diff] [blame] | 8903 | raw_spin_lock(&rt_rq->rt_runtime_lock); |
Peter Zijlstra | ac086bc | 2008-04-19 19:44:58 +0200 | [diff] [blame] | 8904 | rt_rq->rt_runtime = rt_runtime; |
Thomas Gleixner | 0986b11 | 2009-11-17 15:32:06 +0100 | [diff] [blame] | 8905 | raw_spin_unlock(&rt_rq->rt_runtime_lock); |
Peter Zijlstra | ac086bc | 2008-04-19 19:44:58 +0200 | [diff] [blame] | 8906 | } |
Thomas Gleixner | 0986b11 | 2009-11-17 15:32:06 +0100 | [diff] [blame] | 8907 | raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock); |
Peter Zijlstra | 4924627 | 2010-10-17 21:46:10 +0200 | [diff] [blame] | 8908 | unlock: |
Dhaval Giani | 521f1a24 | 2008-02-28 15:21:56 +0530 | [diff] [blame] | 8909 | read_unlock(&tasklist_lock); |
Peter Zijlstra | 9f0c1e5 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 8910 | mutex_unlock(&rt_constraints_mutex); |
| 8911 | |
| 8912 | return err; |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8913 | } |
| 8914 | |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 8915 | int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us) |
| 8916 | { |
| 8917 | u64 rt_runtime, rt_period; |
| 8918 | |
| 8919 | rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period); |
| 8920 | rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC; |
| 8921 | if (rt_runtime_us < 0) |
| 8922 | rt_runtime = RUNTIME_INF; |
| 8923 | |
| 8924 | return tg_set_bandwidth(tg, rt_period, rt_runtime); |
| 8925 | } |
| 8926 | |
Peter Zijlstra | 9f0c1e5 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 8927 | long sched_group_rt_runtime(struct task_group *tg) |
| 8928 | { |
| 8929 | u64 rt_runtime_us; |
| 8930 | |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 8931 | if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF) |
Peter Zijlstra | 9f0c1e5 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 8932 | return -1; |
| 8933 | |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 8934 | rt_runtime_us = tg->rt_bandwidth.rt_runtime; |
Peter Zijlstra | 9f0c1e5 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 8935 | do_div(rt_runtime_us, NSEC_PER_USEC); |
| 8936 | return rt_runtime_us; |
| 8937 | } |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 8938 | |
| 8939 | int sched_group_set_rt_period(struct task_group *tg, long rt_period_us) |
| 8940 | { |
| 8941 | u64 rt_runtime, rt_period; |
| 8942 | |
| 8943 | rt_period = (u64)rt_period_us * NSEC_PER_USEC; |
| 8944 | rt_runtime = tg->rt_bandwidth.rt_runtime; |
| 8945 | |
Raistlin | 619b048 | 2008-06-26 18:54:09 +0200 | [diff] [blame] | 8946 | if (rt_period == 0) |
| 8947 | return -EINVAL; |
| 8948 | |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 8949 | return tg_set_bandwidth(tg, rt_period, rt_runtime); |
| 8950 | } |
| 8951 | |
| 8952 | long sched_group_rt_period(struct task_group *tg) |
| 8953 | { |
| 8954 | u64 rt_period_us; |
| 8955 | |
| 8956 | rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period); |
| 8957 | do_div(rt_period_us, NSEC_PER_USEC); |
| 8958 | return rt_period_us; |
| 8959 | } |
| 8960 | |
| 8961 | static int sched_rt_global_constraints(void) |
| 8962 | { |
Peter Zijlstra | 4653f80 | 2008-09-23 15:33:44 +0200 | [diff] [blame] | 8963 | u64 runtime, period; |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 8964 | int ret = 0; |
| 8965 | |
Hiroshi Shimamoto | ec5d498 | 2008-09-10 17:00:19 -0700 | [diff] [blame] | 8966 | if (sysctl_sched_rt_period <= 0) |
| 8967 | return -EINVAL; |
| 8968 | |
Peter Zijlstra | 4653f80 | 2008-09-23 15:33:44 +0200 | [diff] [blame] | 8969 | runtime = global_rt_runtime(); |
| 8970 | period = global_rt_period(); |
| 8971 | |
| 8972 | /* |
| 8973 | * Sanity check on the sysctl variables. |
| 8974 | */ |
| 8975 | if (runtime > period && runtime != RUNTIME_INF) |
| 8976 | return -EINVAL; |
Peter Zijlstra | 10b612f | 2008-06-19 14:22:27 +0200 | [diff] [blame] | 8977 | |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 8978 | mutex_lock(&rt_constraints_mutex); |
Peter Zijlstra | 9a7e0b1 | 2008-08-19 12:33:06 +0200 | [diff] [blame] | 8979 | read_lock(&tasklist_lock); |
Peter Zijlstra | 4653f80 | 2008-09-23 15:33:44 +0200 | [diff] [blame] | 8980 | ret = __rt_schedulable(NULL, 0, 0); |
Peter Zijlstra | 9a7e0b1 | 2008-08-19 12:33:06 +0200 | [diff] [blame] | 8981 | read_unlock(&tasklist_lock); |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 8982 | mutex_unlock(&rt_constraints_mutex); |
| 8983 | |
| 8984 | return ret; |
| 8985 | } |
Dhaval Giani | 54e9912 | 2009-02-27 15:13:54 +0530 | [diff] [blame] | 8986 | |
| 8987 | int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk) |
| 8988 | { |
| 8989 | /* Don't accept realtime tasks when there is no way for them to run */ |
| 8990 | if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0) |
| 8991 | return 0; |
| 8992 | |
| 8993 | return 1; |
| 8994 | } |
| 8995 | |
Dhaval Giani | 6d6bc0a | 2008-05-30 14:23:45 +0200 | [diff] [blame] | 8996 | #else /* !CONFIG_RT_GROUP_SCHED */ |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 8997 | static int sched_rt_global_constraints(void) |
| 8998 | { |
Peter Zijlstra | ac086bc | 2008-04-19 19:44:58 +0200 | [diff] [blame] | 8999 | unsigned long flags; |
| 9000 | int i; |
| 9001 | |
Hiroshi Shimamoto | ec5d498 | 2008-09-10 17:00:19 -0700 | [diff] [blame] | 9002 | if (sysctl_sched_rt_period <= 0) |
| 9003 | return -EINVAL; |
| 9004 | |
Peter Zijlstra | 60aa605 | 2009-05-05 17:50:21 +0200 | [diff] [blame] | 9005 | /* |
| 9006 | * There's always some RT tasks in the root group |
| 9007 | * -- migration, kstopmachine etc.. |
| 9008 | */ |
| 9009 | if (sysctl_sched_rt_runtime == 0) |
| 9010 | return -EBUSY; |
| 9011 | |
Thomas Gleixner | 0986b11 | 2009-11-17 15:32:06 +0100 | [diff] [blame] | 9012 | raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags); |
Peter Zijlstra | ac086bc | 2008-04-19 19:44:58 +0200 | [diff] [blame] | 9013 | for_each_possible_cpu(i) { |
| 9014 | struct rt_rq *rt_rq = &cpu_rq(i)->rt; |
| 9015 | |
Thomas Gleixner | 0986b11 | 2009-11-17 15:32:06 +0100 | [diff] [blame] | 9016 | raw_spin_lock(&rt_rq->rt_runtime_lock); |
Peter Zijlstra | ac086bc | 2008-04-19 19:44:58 +0200 | [diff] [blame] | 9017 | rt_rq->rt_runtime = global_rt_runtime(); |
Thomas Gleixner | 0986b11 | 2009-11-17 15:32:06 +0100 | [diff] [blame] | 9018 | raw_spin_unlock(&rt_rq->rt_runtime_lock); |
Peter Zijlstra | ac086bc | 2008-04-19 19:44:58 +0200 | [diff] [blame] | 9019 | } |
Thomas Gleixner | 0986b11 | 2009-11-17 15:32:06 +0100 | [diff] [blame] | 9020 | raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags); |
Peter Zijlstra | ac086bc | 2008-04-19 19:44:58 +0200 | [diff] [blame] | 9021 | |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 9022 | return 0; |
| 9023 | } |
Dhaval Giani | 6d6bc0a | 2008-05-30 14:23:45 +0200 | [diff] [blame] | 9024 | #endif /* CONFIG_RT_GROUP_SCHED */ |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 9025 | |
| 9026 | int sched_rt_handler(struct ctl_table *table, int write, |
Alexey Dobriyan | 8d65af7 | 2009-09-23 15:57:19 -0700 | [diff] [blame] | 9027 | void __user *buffer, size_t *lenp, |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 9028 | loff_t *ppos) |
| 9029 | { |
| 9030 | int ret; |
| 9031 | int old_period, old_runtime; |
| 9032 | static DEFINE_MUTEX(mutex); |
| 9033 | |
| 9034 | mutex_lock(&mutex); |
| 9035 | old_period = sysctl_sched_rt_period; |
| 9036 | old_runtime = sysctl_sched_rt_runtime; |
| 9037 | |
Alexey Dobriyan | 8d65af7 | 2009-09-23 15:57:19 -0700 | [diff] [blame] | 9038 | ret = proc_dointvec(table, write, buffer, lenp, ppos); |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 9039 | |
| 9040 | if (!ret && write) { |
| 9041 | ret = sched_rt_global_constraints(); |
| 9042 | if (ret) { |
| 9043 | sysctl_sched_rt_period = old_period; |
| 9044 | sysctl_sched_rt_runtime = old_runtime; |
| 9045 | } else { |
| 9046 | def_rt_bandwidth.rt_runtime = global_rt_runtime(); |
| 9047 | def_rt_bandwidth.rt_period = |
| 9048 | ns_to_ktime(global_rt_period()); |
| 9049 | } |
| 9050 | } |
| 9051 | mutex_unlock(&mutex); |
| 9052 | |
| 9053 | return ret; |
| 9054 | } |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 9055 | |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 9056 | #ifdef CONFIG_CGROUP_SCHED |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 9057 | |
| 9058 | /* return corresponding task_group object of a cgroup */ |
Paul Menage | 2b01dfe | 2007-10-24 18:23:50 +0200 | [diff] [blame] | 9059 | static inline struct task_group *cgroup_tg(struct cgroup *cgrp) |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 9060 | { |
Paul Menage | 2b01dfe | 2007-10-24 18:23:50 +0200 | [diff] [blame] | 9061 | return container_of(cgroup_subsys_state(cgrp, cpu_cgroup_subsys_id), |
| 9062 | struct task_group, css); |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 9063 | } |
| 9064 | |
| 9065 | static struct cgroup_subsys_state * |
Paul Menage | 2b01dfe | 2007-10-24 18:23:50 +0200 | [diff] [blame] | 9066 | cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp) |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 9067 | { |
Dhaval Giani | ec7dc8a | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 9068 | struct task_group *tg, *parent; |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 9069 | |
Paul Menage | 2b01dfe | 2007-10-24 18:23:50 +0200 | [diff] [blame] | 9070 | if (!cgrp->parent) { |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 9071 | /* This is early initialization for the top cgroup */ |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 9072 | return &init_task_group.css; |
| 9073 | } |
| 9074 | |
Dhaval Giani | ec7dc8a | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 9075 | parent = cgroup_tg(cgrp->parent); |
| 9076 | tg = sched_create_group(parent); |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 9077 | if (IS_ERR(tg)) |
| 9078 | return ERR_PTR(-ENOMEM); |
| 9079 | |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 9080 | return &tg->css; |
| 9081 | } |
| 9082 | |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 9083 | static void |
| 9084 | cpu_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp) |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 9085 | { |
Paul Menage | 2b01dfe | 2007-10-24 18:23:50 +0200 | [diff] [blame] | 9086 | struct task_group *tg = cgroup_tg(cgrp); |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 9087 | |
| 9088 | sched_destroy_group(tg); |
| 9089 | } |
| 9090 | |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 9091 | static int |
Ben Blum | be367d0 | 2009-09-23 15:56:31 -0700 | [diff] [blame] | 9092 | cpu_cgroup_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk) |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 9093 | { |
Peter Zijlstra | b68aa23 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 9094 | #ifdef CONFIG_RT_GROUP_SCHED |
Dhaval Giani | 54e9912 | 2009-02-27 15:13:54 +0530 | [diff] [blame] | 9095 | if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk)) |
Peter Zijlstra | b68aa23 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 9096 | return -EINVAL; |
| 9097 | #else |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 9098 | /* We don't support RT-tasks being in separate groups */ |
| 9099 | if (tsk->sched_class != &fair_sched_class) |
| 9100 | return -EINVAL; |
Peter Zijlstra | b68aa23 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 9101 | #endif |
Ben Blum | be367d0 | 2009-09-23 15:56:31 -0700 | [diff] [blame] | 9102 | return 0; |
| 9103 | } |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 9104 | |
Ben Blum | be367d0 | 2009-09-23 15:56:31 -0700 | [diff] [blame] | 9105 | static int |
| 9106 | cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, |
| 9107 | struct task_struct *tsk, bool threadgroup) |
| 9108 | { |
| 9109 | int retval = cpu_cgroup_can_attach_task(cgrp, tsk); |
| 9110 | if (retval) |
| 9111 | return retval; |
| 9112 | if (threadgroup) { |
| 9113 | struct task_struct *c; |
| 9114 | rcu_read_lock(); |
| 9115 | list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) { |
| 9116 | retval = cpu_cgroup_can_attach_task(cgrp, c); |
| 9117 | if (retval) { |
| 9118 | rcu_read_unlock(); |
| 9119 | return retval; |
| 9120 | } |
| 9121 | } |
| 9122 | rcu_read_unlock(); |
| 9123 | } |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 9124 | return 0; |
| 9125 | } |
| 9126 | |
| 9127 | static void |
Paul Menage | 2b01dfe | 2007-10-24 18:23:50 +0200 | [diff] [blame] | 9128 | cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, |
Ben Blum | be367d0 | 2009-09-23 15:56:31 -0700 | [diff] [blame] | 9129 | struct cgroup *old_cont, struct task_struct *tsk, |
| 9130 | bool threadgroup) |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 9131 | { |
| 9132 | sched_move_task(tsk); |
Ben Blum | be367d0 | 2009-09-23 15:56:31 -0700 | [diff] [blame] | 9133 | if (threadgroup) { |
| 9134 | struct task_struct *c; |
| 9135 | rcu_read_lock(); |
| 9136 | list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) { |
| 9137 | sched_move_task(c); |
| 9138 | } |
| 9139 | rcu_read_unlock(); |
| 9140 | } |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 9141 | } |
| 9142 | |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 9143 | #ifdef CONFIG_FAIR_GROUP_SCHED |
Paul Menage | f4c753b | 2008-04-29 00:59:56 -0700 | [diff] [blame] | 9144 | static int cpu_shares_write_u64(struct cgroup *cgrp, struct cftype *cftype, |
Paul Menage | 2b01dfe | 2007-10-24 18:23:50 +0200 | [diff] [blame] | 9145 | u64 shareval) |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 9146 | { |
Paul Menage | 2b01dfe | 2007-10-24 18:23:50 +0200 | [diff] [blame] | 9147 | return sched_group_set_shares(cgroup_tg(cgrp), shareval); |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 9148 | } |
| 9149 | |
Paul Menage | f4c753b | 2008-04-29 00:59:56 -0700 | [diff] [blame] | 9150 | static u64 cpu_shares_read_u64(struct cgroup *cgrp, struct cftype *cft) |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 9151 | { |
Paul Menage | 2b01dfe | 2007-10-24 18:23:50 +0200 | [diff] [blame] | 9152 | struct task_group *tg = cgroup_tg(cgrp); |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 9153 | |
| 9154 | return (u64) tg->shares; |
| 9155 | } |
Dhaval Giani | 6d6bc0a | 2008-05-30 14:23:45 +0200 | [diff] [blame] | 9156 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 9157 | |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 9158 | #ifdef CONFIG_RT_GROUP_SCHED |
Mirco Tischler | 0c70814 | 2008-05-14 16:05:46 -0700 | [diff] [blame] | 9159 | static int cpu_rt_runtime_write(struct cgroup *cgrp, struct cftype *cft, |
Paul Menage | 06ecb27 | 2008-04-29 01:00:06 -0700 | [diff] [blame] | 9160 | s64 val) |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 9161 | { |
Paul Menage | 06ecb27 | 2008-04-29 01:00:06 -0700 | [diff] [blame] | 9162 | return sched_group_set_rt_runtime(cgroup_tg(cgrp), val); |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 9163 | } |
| 9164 | |
Paul Menage | 06ecb27 | 2008-04-29 01:00:06 -0700 | [diff] [blame] | 9165 | static s64 cpu_rt_runtime_read(struct cgroup *cgrp, struct cftype *cft) |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 9166 | { |
Paul Menage | 06ecb27 | 2008-04-29 01:00:06 -0700 | [diff] [blame] | 9167 | return sched_group_rt_runtime(cgroup_tg(cgrp)); |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 9168 | } |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 9169 | |
| 9170 | static int cpu_rt_period_write_uint(struct cgroup *cgrp, struct cftype *cftype, |
| 9171 | u64 rt_period_us) |
| 9172 | { |
| 9173 | return sched_group_set_rt_period(cgroup_tg(cgrp), rt_period_us); |
| 9174 | } |
| 9175 | |
| 9176 | static u64 cpu_rt_period_read_uint(struct cgroup *cgrp, struct cftype *cft) |
| 9177 | { |
| 9178 | return sched_group_rt_period(cgroup_tg(cgrp)); |
| 9179 | } |
Dhaval Giani | 6d6bc0a | 2008-05-30 14:23:45 +0200 | [diff] [blame] | 9180 | #endif /* CONFIG_RT_GROUP_SCHED */ |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 9181 | |
Paul Menage | fe5c7cc | 2007-10-29 21:18:11 +0100 | [diff] [blame] | 9182 | static struct cftype cpu_files[] = { |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 9183 | #ifdef CONFIG_FAIR_GROUP_SCHED |
Paul Menage | fe5c7cc | 2007-10-29 21:18:11 +0100 | [diff] [blame] | 9184 | { |
| 9185 | .name = "shares", |
Paul Menage | f4c753b | 2008-04-29 00:59:56 -0700 | [diff] [blame] | 9186 | .read_u64 = cpu_shares_read_u64, |
| 9187 | .write_u64 = cpu_shares_write_u64, |
Paul Menage | fe5c7cc | 2007-10-29 21:18:11 +0100 | [diff] [blame] | 9188 | }, |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 9189 | #endif |
| 9190 | #ifdef CONFIG_RT_GROUP_SCHED |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 9191 | { |
Peter Zijlstra | 9f0c1e5 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 9192 | .name = "rt_runtime_us", |
Paul Menage | 06ecb27 | 2008-04-29 01:00:06 -0700 | [diff] [blame] | 9193 | .read_s64 = cpu_rt_runtime_read, |
| 9194 | .write_s64 = cpu_rt_runtime_write, |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 9195 | }, |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 9196 | { |
| 9197 | .name = "rt_period_us", |
Paul Menage | f4c753b | 2008-04-29 00:59:56 -0700 | [diff] [blame] | 9198 | .read_u64 = cpu_rt_period_read_uint, |
| 9199 | .write_u64 = cpu_rt_period_write_uint, |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 9200 | }, |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 9201 | #endif |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 9202 | }; |
| 9203 | |
| 9204 | static int cpu_cgroup_populate(struct cgroup_subsys *ss, struct cgroup *cont) |
| 9205 | { |
Paul Menage | fe5c7cc | 2007-10-29 21:18:11 +0100 | [diff] [blame] | 9206 | return cgroup_add_files(cont, ss, cpu_files, ARRAY_SIZE(cpu_files)); |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 9207 | } |
| 9208 | |
| 9209 | struct cgroup_subsys cpu_cgroup_subsys = { |
Ingo Molnar | 38605ca | 2007-10-29 21:18:11 +0100 | [diff] [blame] | 9210 | .name = "cpu", |
| 9211 | .create = cpu_cgroup_create, |
| 9212 | .destroy = cpu_cgroup_destroy, |
| 9213 | .can_attach = cpu_cgroup_can_attach, |
| 9214 | .attach = cpu_cgroup_attach, |
| 9215 | .populate = cpu_cgroup_populate, |
| 9216 | .subsys_id = cpu_cgroup_subsys_id, |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 9217 | .early_init = 1, |
| 9218 | }; |
| 9219 | |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 9220 | #endif /* CONFIG_CGROUP_SCHED */ |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 9221 | |
| 9222 | #ifdef CONFIG_CGROUP_CPUACCT |
| 9223 | |
| 9224 | /* |
| 9225 | * CPU accounting code for task groups. |
| 9226 | * |
| 9227 | * Based on the work by Paul Menage (menage@google.com) and Balbir Singh |
| 9228 | * (balbir@in.ibm.com). |
| 9229 | */ |
| 9230 | |
Bharata B Rao | 934352f | 2008-11-10 20:41:13 +0530 | [diff] [blame] | 9231 | /* track cpu usage of a group of tasks and its child groups */ |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 9232 | struct cpuacct { |
| 9233 | struct cgroup_subsys_state css; |
| 9234 | /* cpuusage holds pointer to a u64-type object on every cpu */ |
Tejun Heo | 43cf38e | 2010-02-02 14:38:57 +0900 | [diff] [blame] | 9235 | u64 __percpu *cpuusage; |
Bharata B Rao | ef12fef | 2009-03-31 10:02:22 +0530 | [diff] [blame] | 9236 | struct percpu_counter cpustat[CPUACCT_STAT_NSTATS]; |
Bharata B Rao | 934352f | 2008-11-10 20:41:13 +0530 | [diff] [blame] | 9237 | struct cpuacct *parent; |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 9238 | }; |
| 9239 | |
| 9240 | struct cgroup_subsys cpuacct_subsys; |
| 9241 | |
| 9242 | /* return cpu accounting group corresponding to this container */ |
Dhaval Giani | 32cd756 | 2008-02-29 10:02:43 +0530 | [diff] [blame] | 9243 | static inline struct cpuacct *cgroup_ca(struct cgroup *cgrp) |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 9244 | { |
Dhaval Giani | 32cd756 | 2008-02-29 10:02:43 +0530 | [diff] [blame] | 9245 | return container_of(cgroup_subsys_state(cgrp, cpuacct_subsys_id), |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 9246 | struct cpuacct, css); |
| 9247 | } |
| 9248 | |
| 9249 | /* return cpu accounting group to which this task belongs */ |
| 9250 | static inline struct cpuacct *task_ca(struct task_struct *tsk) |
| 9251 | { |
| 9252 | return container_of(task_subsys_state(tsk, cpuacct_subsys_id), |
| 9253 | struct cpuacct, css); |
| 9254 | } |
| 9255 | |
| 9256 | /* create a new cpu accounting group */ |
| 9257 | static struct cgroup_subsys_state *cpuacct_create( |
Dhaval Giani | 32cd756 | 2008-02-29 10:02:43 +0530 | [diff] [blame] | 9258 | struct cgroup_subsys *ss, struct cgroup *cgrp) |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 9259 | { |
| 9260 | struct cpuacct *ca = kzalloc(sizeof(*ca), GFP_KERNEL); |
Bharata B Rao | ef12fef | 2009-03-31 10:02:22 +0530 | [diff] [blame] | 9261 | int i; |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 9262 | |
| 9263 | if (!ca) |
Bharata B Rao | ef12fef | 2009-03-31 10:02:22 +0530 | [diff] [blame] | 9264 | goto out; |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 9265 | |
| 9266 | ca->cpuusage = alloc_percpu(u64); |
Bharata B Rao | ef12fef | 2009-03-31 10:02:22 +0530 | [diff] [blame] | 9267 | if (!ca->cpuusage) |
| 9268 | goto out_free_ca; |
| 9269 | |
| 9270 | for (i = 0; i < CPUACCT_STAT_NSTATS; i++) |
| 9271 | if (percpu_counter_init(&ca->cpustat[i], 0)) |
| 9272 | goto out_free_counters; |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 9273 | |
Bharata B Rao | 934352f | 2008-11-10 20:41:13 +0530 | [diff] [blame] | 9274 | if (cgrp->parent) |
| 9275 | ca->parent = cgroup_ca(cgrp->parent); |
| 9276 | |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 9277 | return &ca->css; |
Bharata B Rao | ef12fef | 2009-03-31 10:02:22 +0530 | [diff] [blame] | 9278 | |
| 9279 | out_free_counters: |
| 9280 | while (--i >= 0) |
| 9281 | percpu_counter_destroy(&ca->cpustat[i]); |
| 9282 | free_percpu(ca->cpuusage); |
| 9283 | out_free_ca: |
| 9284 | kfree(ca); |
| 9285 | out: |
| 9286 | return ERR_PTR(-ENOMEM); |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 9287 | } |
| 9288 | |
| 9289 | /* destroy an existing cpu accounting group */ |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 9290 | static void |
Dhaval Giani | 32cd756 | 2008-02-29 10:02:43 +0530 | [diff] [blame] | 9291 | cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp) |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 9292 | { |
Dhaval Giani | 32cd756 | 2008-02-29 10:02:43 +0530 | [diff] [blame] | 9293 | struct cpuacct *ca = cgroup_ca(cgrp); |
Bharata B Rao | ef12fef | 2009-03-31 10:02:22 +0530 | [diff] [blame] | 9294 | int i; |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 9295 | |
Bharata B Rao | ef12fef | 2009-03-31 10:02:22 +0530 | [diff] [blame] | 9296 | for (i = 0; i < CPUACCT_STAT_NSTATS; i++) |
| 9297 | percpu_counter_destroy(&ca->cpustat[i]); |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 9298 | free_percpu(ca->cpuusage); |
| 9299 | kfree(ca); |
| 9300 | } |
| 9301 | |
Ken Chen | 720f549 | 2008-12-15 22:02:01 -0800 | [diff] [blame] | 9302 | static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu) |
| 9303 | { |
Rusty Russell | b36128c | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 9304 | u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu); |
Ken Chen | 720f549 | 2008-12-15 22:02:01 -0800 | [diff] [blame] | 9305 | u64 data; |
| 9306 | |
| 9307 | #ifndef CONFIG_64BIT |
| 9308 | /* |
| 9309 | * Take rq->lock to make 64-bit read safe on 32-bit platforms. |
| 9310 | */ |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 9311 | raw_spin_lock_irq(&cpu_rq(cpu)->lock); |
Ken Chen | 720f549 | 2008-12-15 22:02:01 -0800 | [diff] [blame] | 9312 | data = *cpuusage; |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 9313 | raw_spin_unlock_irq(&cpu_rq(cpu)->lock); |
Ken Chen | 720f549 | 2008-12-15 22:02:01 -0800 | [diff] [blame] | 9314 | #else |
| 9315 | data = *cpuusage; |
| 9316 | #endif |
| 9317 | |
| 9318 | return data; |
| 9319 | } |
| 9320 | |
| 9321 | static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val) |
| 9322 | { |
Rusty Russell | b36128c | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 9323 | u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu); |
Ken Chen | 720f549 | 2008-12-15 22:02:01 -0800 | [diff] [blame] | 9324 | |
| 9325 | #ifndef CONFIG_64BIT |
| 9326 | /* |
| 9327 | * Take rq->lock to make 64-bit write safe on 32-bit platforms. |
| 9328 | */ |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 9329 | raw_spin_lock_irq(&cpu_rq(cpu)->lock); |
Ken Chen | 720f549 | 2008-12-15 22:02:01 -0800 | [diff] [blame] | 9330 | *cpuusage = val; |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 9331 | raw_spin_unlock_irq(&cpu_rq(cpu)->lock); |
Ken Chen | 720f549 | 2008-12-15 22:02:01 -0800 | [diff] [blame] | 9332 | #else |
| 9333 | *cpuusage = val; |
| 9334 | #endif |
| 9335 | } |
| 9336 | |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 9337 | /* return total cpu usage (in nanoseconds) of a group */ |
Dhaval Giani | 32cd756 | 2008-02-29 10:02:43 +0530 | [diff] [blame] | 9338 | static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft) |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 9339 | { |
Dhaval Giani | 32cd756 | 2008-02-29 10:02:43 +0530 | [diff] [blame] | 9340 | struct cpuacct *ca = cgroup_ca(cgrp); |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 9341 | u64 totalcpuusage = 0; |
| 9342 | int i; |
| 9343 | |
Ken Chen | 720f549 | 2008-12-15 22:02:01 -0800 | [diff] [blame] | 9344 | for_each_present_cpu(i) |
| 9345 | totalcpuusage += cpuacct_cpuusage_read(ca, i); |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 9346 | |
| 9347 | return totalcpuusage; |
| 9348 | } |
| 9349 | |
Dhaval Giani | 0297b80 | 2008-02-29 10:02:44 +0530 | [diff] [blame] | 9350 | static int cpuusage_write(struct cgroup *cgrp, struct cftype *cftype, |
| 9351 | u64 reset) |
| 9352 | { |
| 9353 | struct cpuacct *ca = cgroup_ca(cgrp); |
| 9354 | int err = 0; |
| 9355 | int i; |
| 9356 | |
| 9357 | if (reset) { |
| 9358 | err = -EINVAL; |
| 9359 | goto out; |
| 9360 | } |
| 9361 | |
Ken Chen | 720f549 | 2008-12-15 22:02:01 -0800 | [diff] [blame] | 9362 | for_each_present_cpu(i) |
| 9363 | cpuacct_cpuusage_write(ca, i, 0); |
Dhaval Giani | 0297b80 | 2008-02-29 10:02:44 +0530 | [diff] [blame] | 9364 | |
Dhaval Giani | 0297b80 | 2008-02-29 10:02:44 +0530 | [diff] [blame] | 9365 | out: |
| 9366 | return err; |
| 9367 | } |
| 9368 | |
Ken Chen | e9515c3 | 2008-12-15 22:04:15 -0800 | [diff] [blame] | 9369 | static int cpuacct_percpu_seq_read(struct cgroup *cgroup, struct cftype *cft, |
| 9370 | struct seq_file *m) |
| 9371 | { |
| 9372 | struct cpuacct *ca = cgroup_ca(cgroup); |
| 9373 | u64 percpu; |
| 9374 | int i; |
| 9375 | |
| 9376 | for_each_present_cpu(i) { |
| 9377 | percpu = cpuacct_cpuusage_read(ca, i); |
| 9378 | seq_printf(m, "%llu ", (unsigned long long) percpu); |
| 9379 | } |
| 9380 | seq_printf(m, "\n"); |
| 9381 | return 0; |
| 9382 | } |
| 9383 | |
Bharata B Rao | ef12fef | 2009-03-31 10:02:22 +0530 | [diff] [blame] | 9384 | static const char *cpuacct_stat_desc[] = { |
| 9385 | [CPUACCT_STAT_USER] = "user", |
| 9386 | [CPUACCT_STAT_SYSTEM] = "system", |
| 9387 | }; |
| 9388 | |
| 9389 | static int cpuacct_stats_show(struct cgroup *cgrp, struct cftype *cft, |
| 9390 | struct cgroup_map_cb *cb) |
| 9391 | { |
| 9392 | struct cpuacct *ca = cgroup_ca(cgrp); |
| 9393 | int i; |
| 9394 | |
| 9395 | for (i = 0; i < CPUACCT_STAT_NSTATS; i++) { |
| 9396 | s64 val = percpu_counter_read(&ca->cpustat[i]); |
| 9397 | val = cputime64_to_clock_t(val); |
| 9398 | cb->fill(cb, cpuacct_stat_desc[i], val); |
| 9399 | } |
| 9400 | return 0; |
| 9401 | } |
| 9402 | |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 9403 | static struct cftype files[] = { |
| 9404 | { |
| 9405 | .name = "usage", |
Paul Menage | f4c753b | 2008-04-29 00:59:56 -0700 | [diff] [blame] | 9406 | .read_u64 = cpuusage_read, |
| 9407 | .write_u64 = cpuusage_write, |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 9408 | }, |
Ken Chen | e9515c3 | 2008-12-15 22:04:15 -0800 | [diff] [blame] | 9409 | { |
| 9410 | .name = "usage_percpu", |
| 9411 | .read_seq_string = cpuacct_percpu_seq_read, |
| 9412 | }, |
Bharata B Rao | ef12fef | 2009-03-31 10:02:22 +0530 | [diff] [blame] | 9413 | { |
| 9414 | .name = "stat", |
| 9415 | .read_map = cpuacct_stats_show, |
| 9416 | }, |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 9417 | }; |
| 9418 | |
Dhaval Giani | 32cd756 | 2008-02-29 10:02:43 +0530 | [diff] [blame] | 9419 | static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cgrp) |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 9420 | { |
Dhaval Giani | 32cd756 | 2008-02-29 10:02:43 +0530 | [diff] [blame] | 9421 | return cgroup_add_files(cgrp, ss, files, ARRAY_SIZE(files)); |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 9422 | } |
| 9423 | |
| 9424 | /* |
| 9425 | * charge this task's execution time to its accounting group. |
| 9426 | * |
| 9427 | * called with rq->lock held. |
| 9428 | */ |
| 9429 | static void cpuacct_charge(struct task_struct *tsk, u64 cputime) |
| 9430 | { |
| 9431 | struct cpuacct *ca; |
Bharata B Rao | 934352f | 2008-11-10 20:41:13 +0530 | [diff] [blame] | 9432 | int cpu; |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 9433 | |
Li Zefan | c40c6f8 | 2009-02-26 15:40:15 +0800 | [diff] [blame] | 9434 | if (unlikely(!cpuacct_subsys.active)) |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 9435 | return; |
| 9436 | |
Bharata B Rao | 934352f | 2008-11-10 20:41:13 +0530 | [diff] [blame] | 9437 | cpu = task_cpu(tsk); |
Bharata B Rao | a18b83b | 2009-03-23 10:02:53 +0530 | [diff] [blame] | 9438 | |
| 9439 | rcu_read_lock(); |
| 9440 | |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 9441 | ca = task_ca(tsk); |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 9442 | |
Bharata B Rao | 934352f | 2008-11-10 20:41:13 +0530 | [diff] [blame] | 9443 | for (; ca; ca = ca->parent) { |
Rusty Russell | b36128c | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 9444 | u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu); |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 9445 | *cpuusage += cputime; |
| 9446 | } |
Bharata B Rao | a18b83b | 2009-03-23 10:02:53 +0530 | [diff] [blame] | 9447 | |
| 9448 | rcu_read_unlock(); |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 9449 | } |
| 9450 | |
Bharata B Rao | ef12fef | 2009-03-31 10:02:22 +0530 | [diff] [blame] | 9451 | /* |
Anton Blanchard | fa535a7 | 2010-02-02 14:46:13 -0800 | [diff] [blame] | 9452 | * When CONFIG_VIRT_CPU_ACCOUNTING is enabled one jiffy can be very large |
| 9453 | * in cputime_t units. As a result, cpuacct_update_stats calls |
| 9454 | * percpu_counter_add with values large enough to always overflow the |
| 9455 | * per cpu batch limit causing bad SMP scalability. |
| 9456 | * |
| 9457 | * To fix this we scale percpu_counter_batch by cputime_one_jiffy so we |
| 9458 | * batch the same amount of time with CONFIG_VIRT_CPU_ACCOUNTING disabled |
| 9459 | * and enabled. We cap it at INT_MAX which is the largest allowed batch value. |
| 9460 | */ |
| 9461 | #ifdef CONFIG_SMP |
| 9462 | #define CPUACCT_BATCH \ |
| 9463 | min_t(long, percpu_counter_batch * cputime_one_jiffy, INT_MAX) |
| 9464 | #else |
| 9465 | #define CPUACCT_BATCH 0 |
| 9466 | #endif |
| 9467 | |
| 9468 | /* |
Bharata B Rao | ef12fef | 2009-03-31 10:02:22 +0530 | [diff] [blame] | 9469 | * Charge the system/user time to the task's accounting group. |
| 9470 | */ |
| 9471 | static void cpuacct_update_stats(struct task_struct *tsk, |
| 9472 | enum cpuacct_stat_index idx, cputime_t val) |
| 9473 | { |
| 9474 | struct cpuacct *ca; |
Anton Blanchard | fa535a7 | 2010-02-02 14:46:13 -0800 | [diff] [blame] | 9475 | int batch = CPUACCT_BATCH; |
Bharata B Rao | ef12fef | 2009-03-31 10:02:22 +0530 | [diff] [blame] | 9476 | |
| 9477 | if (unlikely(!cpuacct_subsys.active)) |
| 9478 | return; |
| 9479 | |
| 9480 | rcu_read_lock(); |
| 9481 | ca = task_ca(tsk); |
| 9482 | |
| 9483 | do { |
Anton Blanchard | fa535a7 | 2010-02-02 14:46:13 -0800 | [diff] [blame] | 9484 | __percpu_counter_add(&ca->cpustat[idx], val, batch); |
Bharata B Rao | ef12fef | 2009-03-31 10:02:22 +0530 | [diff] [blame] | 9485 | ca = ca->parent; |
| 9486 | } while (ca); |
| 9487 | rcu_read_unlock(); |
| 9488 | } |
| 9489 | |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 9490 | struct cgroup_subsys cpuacct_subsys = { |
| 9491 | .name = "cpuacct", |
| 9492 | .create = cpuacct_create, |
| 9493 | .destroy = cpuacct_destroy, |
| 9494 | .populate = cpuacct_populate, |
| 9495 | .subsys_id = cpuacct_subsys_id, |
| 9496 | }; |
| 9497 | #endif /* CONFIG_CGROUP_CPUACCT */ |
Paul E. McKenney | 03b042b | 2009-06-25 09:08:16 -0700 | [diff] [blame] | 9498 | |
| 9499 | #ifndef CONFIG_SMP |
| 9500 | |
Paul E. McKenney | 03b042b | 2009-06-25 09:08:16 -0700 | [diff] [blame] | 9501 | void synchronize_sched_expedited(void) |
| 9502 | { |
Paul E. McKenney | fc390cd | 2010-05-06 11:42:52 -0700 | [diff] [blame] | 9503 | barrier(); |
Paul E. McKenney | 03b042b | 2009-06-25 09:08:16 -0700 | [diff] [blame] | 9504 | } |
| 9505 | EXPORT_SYMBOL_GPL(synchronize_sched_expedited); |
| 9506 | |
| 9507 | #else /* #ifndef CONFIG_SMP */ |
| 9508 | |
Paul E. McKenney | cc631fb | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 9509 | static atomic_t synchronize_sched_expedited_count = ATOMIC_INIT(0); |
Paul E. McKenney | 03b042b | 2009-06-25 09:08:16 -0700 | [diff] [blame] | 9510 | |
Paul E. McKenney | cc631fb | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 9511 | static int synchronize_sched_expedited_cpu_stop(void *data) |
Paul E. McKenney | 03b042b | 2009-06-25 09:08:16 -0700 | [diff] [blame] | 9512 | { |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 9513 | /* |
| 9514 | * There must be a full memory barrier on each affected CPU |
| 9515 | * between the time that try_stop_cpus() is called and the |
| 9516 | * time that it returns. |
| 9517 | * |
| 9518 | * In the current initial implementation of cpu_stop, the |
| 9519 | * above condition is already met when the control reaches |
| 9520 | * this point and the following smp_mb() is not strictly |
| 9521 | * necessary. Do smp_mb() anyway for documentation and |
| 9522 | * robustness against future implementation changes. |
| 9523 | */ |
Paul E. McKenney | cc631fb | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 9524 | smp_mb(); /* See above comment block. */ |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 9525 | return 0; |
Paul E. McKenney | 03b042b | 2009-06-25 09:08:16 -0700 | [diff] [blame] | 9526 | } |
Paul E. McKenney | 03b042b | 2009-06-25 09:08:16 -0700 | [diff] [blame] | 9527 | |
| 9528 | /* |
| 9529 | * Wait for an rcu-sched grace period to elapse, but use "big hammer" |
| 9530 | * approach to force grace period to end quickly. This consumes |
| 9531 | * significant time on all CPUs, and is thus not recommended for |
| 9532 | * any sort of common-case code. |
| 9533 | * |
| 9534 | * Note that it is illegal to call this function while holding any |
| 9535 | * lock that is acquired by a CPU-hotplug notifier. Failing to |
| 9536 | * observe this restriction will result in deadlock. |
| 9537 | */ |
| 9538 | void synchronize_sched_expedited(void) |
| 9539 | { |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 9540 | int snap, trycount = 0; |
Paul E. McKenney | 03b042b | 2009-06-25 09:08:16 -0700 | [diff] [blame] | 9541 | |
| 9542 | smp_mb(); /* ensure prior mod happens before capturing snap. */ |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 9543 | snap = atomic_read(&synchronize_sched_expedited_count) + 1; |
Paul E. McKenney | 03b042b | 2009-06-25 09:08:16 -0700 | [diff] [blame] | 9544 | get_online_cpus(); |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 9545 | while (try_stop_cpus(cpu_online_mask, |
| 9546 | synchronize_sched_expedited_cpu_stop, |
Tejun Heo | 94458d5 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 9547 | NULL) == -EAGAIN) { |
Paul E. McKenney | 03b042b | 2009-06-25 09:08:16 -0700 | [diff] [blame] | 9548 | put_online_cpus(); |
| 9549 | if (trycount++ < 10) |
| 9550 | udelay(trycount * num_online_cpus()); |
| 9551 | else { |
| 9552 | synchronize_sched(); |
| 9553 | return; |
| 9554 | } |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 9555 | if (atomic_read(&synchronize_sched_expedited_count) - snap > 0) { |
Paul E. McKenney | 03b042b | 2009-06-25 09:08:16 -0700 | [diff] [blame] | 9556 | smp_mb(); /* ensure test happens before caller kfree */ |
| 9557 | return; |
| 9558 | } |
| 9559 | get_online_cpus(); |
| 9560 | } |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 9561 | atomic_inc(&synchronize_sched_expedited_count); |
Paul E. McKenney | cc631fb | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 9562 | smp_mb__after_atomic_inc(); /* ensure post-GP actions seen after GP. */ |
Paul E. McKenney | 03b042b | 2009-06-25 09:08:16 -0700 | [diff] [blame] | 9563 | put_online_cpus(); |
Paul E. McKenney | 03b042b | 2009-06-25 09:08:16 -0700 | [diff] [blame] | 9564 | } |
| 9565 | EXPORT_SYMBOL_GPL(synchronize_sched_expedited); |
| 9566 | |
| 9567 | #endif /* #else #ifndef CONFIG_SMP */ |