Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * kernel/sched.c |
| 3 | * |
| 4 | * Kernel scheduler and related syscalls |
| 5 | * |
| 6 | * Copyright (C) 1991-2002 Linus Torvalds |
| 7 | * |
| 8 | * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and |
| 9 | * make semaphores SMP safe |
| 10 | * 1998-11-19 Implemented schedule_timeout() and related stuff |
| 11 | * by Andrea Arcangeli |
| 12 | * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar: |
| 13 | * hybrid priority-list and round-robin design with |
| 14 | * an array-switch method of distributing timeslices |
| 15 | * and per-CPU runqueues. Cleanups and useful suggestions |
| 16 | * by Davide Libenzi, preemptible kernel bits by Robert Love. |
| 17 | * 2003-09-03 Interactivity tuning by Con Kolivas. |
| 18 | * 2004-04-02 Scheduler domains code by Nick Piggin |
Ingo Molnar | c31f2e8 | 2007-07-09 18:52:01 +0200 | [diff] [blame] | 19 | * 2007-04-15 Work begun on replacing all interactivity tuning with a |
| 20 | * fair scheduling design by Con Kolivas. |
| 21 | * 2007-05-05 Load balancing (smp-nice) and other improvements |
| 22 | * by Peter Williams |
| 23 | * 2007-05-06 Interactivity improvements to CFS by Mike Galbraith |
| 24 | * 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri |
Ingo Molnar | b913176 | 2008-01-25 21:08:19 +0100 | [diff] [blame] | 25 | * 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins, |
| 26 | * Thomas Gleixner, Mike Kravetz |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | */ |
| 28 | |
| 29 | #include <linux/mm.h> |
| 30 | #include <linux/module.h> |
| 31 | #include <linux/nmi.h> |
| 32 | #include <linux/init.h> |
Ingo Molnar | dff06c1 | 2007-07-09 18:52:00 +0200 | [diff] [blame] | 33 | #include <linux/uaccess.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 34 | #include <linux/highmem.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | #include <asm/mmu_context.h> |
| 36 | #include <linux/interrupt.h> |
Randy.Dunlap | c59ede7 | 2006-01-11 12:17:46 -0800 | [diff] [blame] | 37 | #include <linux/capability.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | #include <linux/completion.h> |
| 39 | #include <linux/kernel_stat.h> |
Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 40 | #include <linux/debug_locks.h> |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 41 | #include <linux/perf_event.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 42 | #include <linux/security.h> |
| 43 | #include <linux/notifier.h> |
| 44 | #include <linux/profile.h> |
Nigel Cunningham | 7dfb710 | 2006-12-06 20:34:23 -0800 | [diff] [blame] | 45 | #include <linux/freezer.h> |
akpm@osdl.org | 198e2f1 | 2006-01-12 01:05:30 -0800 | [diff] [blame] | 46 | #include <linux/vmalloc.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 | #include <linux/blkdev.h> |
| 48 | #include <linux/delay.h> |
Pavel Emelyanov | b488893 | 2007-10-18 23:40:14 -0700 | [diff] [blame] | 49 | #include <linux/pid_namespace.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 50 | #include <linux/smp.h> |
| 51 | #include <linux/threads.h> |
| 52 | #include <linux/timer.h> |
| 53 | #include <linux/rcupdate.h> |
| 54 | #include <linux/cpu.h> |
| 55 | #include <linux/cpuset.h> |
| 56 | #include <linux/percpu.h> |
Alexey Dobriyan | b5aadf7 | 2008-10-06 13:23:43 +0400 | [diff] [blame] | 57 | #include <linux/proc_fs.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 58 | #include <linux/seq_file.h> |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 59 | #include <linux/stop_machine.h> |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 60 | #include <linux/sysctl.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 61 | #include <linux/syscalls.h> |
| 62 | #include <linux/times.h> |
Jay Lan | 8f0ab51 | 2006-09-30 23:28:59 -0700 | [diff] [blame] | 63 | #include <linux/tsacct_kern.h> |
bibo mao | c6fd91f | 2006-03-26 01:38:20 -0800 | [diff] [blame] | 64 | #include <linux/kprobes.h> |
Shailabh Nagar | 0ff9224 | 2006-07-14 00:24:37 -0700 | [diff] [blame] | 65 | #include <linux/delayacct.h> |
Ingo Molnar | dff06c1 | 2007-07-09 18:52:00 +0200 | [diff] [blame] | 66 | #include <linux/unistd.h> |
Jens Axboe | f5ff842 | 2007-09-21 09:19:54 +0200 | [diff] [blame] | 67 | #include <linux/pagemap.h> |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 68 | #include <linux/hrtimer.h> |
Reynes Philippe | 30914a5 | 2008-03-17 16:19:05 -0700 | [diff] [blame] | 69 | #include <linux/tick.h> |
Peter Zijlstra | f00b45c | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 70 | #include <linux/debugfs.h> |
| 71 | #include <linux/ctype.h> |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 72 | #include <linux/ftrace.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 73 | #include <linux/slab.h> |
Carsten Emde | f1c6f1a | 2011-10-26 23:14:16 +0200 | [diff] [blame] | 74 | #include <linux/init_task.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 75 | |
Eric Dumazet | 5517d86 | 2007-05-08 00:32:57 -0700 | [diff] [blame] | 76 | #include <asm/tlb.h> |
Satyam Sharma | 838225b | 2007-10-24 18:23:50 +0200 | [diff] [blame] | 77 | #include <asm/irq_regs.h> |
Gerald Schaefer | 335d7af | 2010-11-22 15:47:36 +0100 | [diff] [blame] | 78 | #include <asm/mutex.h> |
Glauber Costa | e6e6685 | 2011-07-11 15:28:17 -0400 | [diff] [blame] | 79 | #ifdef CONFIG_PARAVIRT |
| 80 | #include <asm/paravirt.h> |
| 81 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 82 | |
Gregory Haskins | 6e0534f | 2008-05-12 21:21:01 +0200 | [diff] [blame] | 83 | #include "sched_cpupri.h" |
Tejun Heo | 21aa9af | 2010-06-08 21:40:37 +0200 | [diff] [blame] | 84 | #include "workqueue_sched.h" |
Mike Galbraith | 5091faa | 2010-11-30 14:18:03 +0100 | [diff] [blame] | 85 | #include "sched_autogroup.h" |
Gregory Haskins | 6e0534f | 2008-05-12 21:21:01 +0200 | [diff] [blame] | 86 | |
Steven Rostedt | a8d154b | 2009-04-10 09:36:00 -0400 | [diff] [blame] | 87 | #define CREATE_TRACE_POINTS |
Steven Rostedt | ad8d75f | 2009-04-14 19:39:12 -0400 | [diff] [blame] | 88 | #include <trace/events/sched.h> |
Steven Rostedt | a8d154b | 2009-04-10 09:36:00 -0400 | [diff] [blame] | 89 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 90 | /* |
| 91 | * Convert user-nice values [ -20 ... 0 ... 19 ] |
| 92 | * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ], |
| 93 | * and back. |
| 94 | */ |
| 95 | #define NICE_TO_PRIO(nice) (MAX_RT_PRIO + (nice) + 20) |
| 96 | #define PRIO_TO_NICE(prio) ((prio) - MAX_RT_PRIO - 20) |
| 97 | #define TASK_NICE(p) PRIO_TO_NICE((p)->static_prio) |
| 98 | |
| 99 | /* |
| 100 | * 'User priority' is the nice value converted to something we |
| 101 | * can work with better when scaling various scheduler parameters, |
| 102 | * it's a [ 0 ... 39 ] range. |
| 103 | */ |
| 104 | #define USER_PRIO(p) ((p)-MAX_RT_PRIO) |
| 105 | #define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio) |
| 106 | #define MAX_USER_PRIO (USER_PRIO(MAX_PRIO)) |
| 107 | |
| 108 | /* |
Ingo Molnar | d7876a0 | 2008-01-25 21:08:19 +0100 | [diff] [blame] | 109 | * Helpers for converting nanosecond timing to jiffy resolution |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 110 | */ |
Eric Dumazet | d6322fa | 2007-11-09 22:39:38 +0100 | [diff] [blame] | 111 | #define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 112 | |
Ingo Molnar | 6aa645e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 113 | #define NICE_0_LOAD SCHED_LOAD_SCALE |
| 114 | #define NICE_0_SHIFT SCHED_LOAD_SHIFT |
| 115 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 116 | /* |
| 117 | * These are the 'tuning knobs' of the scheduler: |
| 118 | * |
Dmitry Adamushko | a4ec24b | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 119 | * default timeslice is 100 msecs (used only for SCHED_RR tasks). |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 120 | * Timeslices get refilled after they expire. |
| 121 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 122 | #define DEF_TIMESLICE (100 * HZ / 1000) |
Peter Williams | 2dd73a4 | 2006-06-27 02:54:34 -0700 | [diff] [blame] | 123 | |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 124 | /* |
| 125 | * single value that denotes runtime == period, ie unlimited time. |
| 126 | */ |
| 127 | #define RUNTIME_INF ((u64)~0ULL) |
| 128 | |
Ingo Molnar | e05606d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 129 | static inline int rt_policy(int policy) |
| 130 | { |
Steven Rostedt | 63f0124 | 2010-12-06 14:48:10 -0500 | [diff] [blame] | 131 | if (policy == SCHED_FIFO || policy == SCHED_RR) |
Ingo Molnar | e05606d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 132 | return 1; |
| 133 | return 0; |
| 134 | } |
| 135 | |
| 136 | static inline int task_has_rt_policy(struct task_struct *p) |
| 137 | { |
| 138 | return rt_policy(p->policy); |
| 139 | } |
| 140 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 141 | /* |
Ingo Molnar | 6aa645e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 142 | * This is the priority-queue data structure of the RT scheduling class: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 143 | */ |
Ingo Molnar | 6aa645e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 144 | struct rt_prio_array { |
| 145 | DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */ |
| 146 | struct list_head queue[MAX_RT_PRIO]; |
| 147 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 148 | |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 149 | struct rt_bandwidth { |
Ingo Molnar | ea736ed | 2008-03-25 13:51:45 +0100 | [diff] [blame] | 150 | /* nests inside the rq lock: */ |
Thomas Gleixner | 0986b11 | 2009-11-17 15:32:06 +0100 | [diff] [blame] | 151 | raw_spinlock_t rt_runtime_lock; |
Ingo Molnar | ea736ed | 2008-03-25 13:51:45 +0100 | [diff] [blame] | 152 | ktime_t rt_period; |
| 153 | u64 rt_runtime; |
| 154 | struct hrtimer rt_period_timer; |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 155 | }; |
| 156 | |
| 157 | static struct rt_bandwidth def_rt_bandwidth; |
| 158 | |
| 159 | static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun); |
| 160 | |
| 161 | static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer) |
| 162 | { |
| 163 | struct rt_bandwidth *rt_b = |
| 164 | container_of(timer, struct rt_bandwidth, rt_period_timer); |
| 165 | ktime_t now; |
| 166 | int overrun; |
| 167 | int idle = 0; |
| 168 | |
| 169 | for (;;) { |
| 170 | now = hrtimer_cb_get_time(timer); |
| 171 | overrun = hrtimer_forward(timer, now, rt_b->rt_period); |
| 172 | |
| 173 | if (!overrun) |
| 174 | break; |
| 175 | |
| 176 | idle = do_sched_rt_period_timer(rt_b, overrun); |
| 177 | } |
| 178 | |
| 179 | return idle ? HRTIMER_NORESTART : HRTIMER_RESTART; |
| 180 | } |
| 181 | |
| 182 | static |
| 183 | void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime) |
| 184 | { |
| 185 | rt_b->rt_period = ns_to_ktime(period); |
| 186 | rt_b->rt_runtime = runtime; |
| 187 | |
Thomas Gleixner | 0986b11 | 2009-11-17 15:32:06 +0100 | [diff] [blame] | 188 | raw_spin_lock_init(&rt_b->rt_runtime_lock); |
Peter Zijlstra | ac086bc | 2008-04-19 19:44:58 +0200 | [diff] [blame] | 189 | |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 190 | hrtimer_init(&rt_b->rt_period_timer, |
| 191 | CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
| 192 | rt_b->rt_period_timer.function = sched_rt_period_timer; |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 193 | } |
| 194 | |
Krzysztof Helt | c8bfff6 | 2008-09-05 23:46:19 +0200 | [diff] [blame] | 195 | static inline int rt_bandwidth_enabled(void) |
| 196 | { |
| 197 | return sysctl_sched_rt_runtime >= 0; |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 198 | } |
| 199 | |
Paul Turner | 58088ad | 2011-07-21 09:43:31 -0700 | [diff] [blame] | 200 | static void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period) |
| 201 | { |
| 202 | unsigned long delta; |
| 203 | ktime_t soft, hard, now; |
| 204 | |
| 205 | for (;;) { |
| 206 | if (hrtimer_active(period_timer)) |
| 207 | break; |
| 208 | |
| 209 | now = hrtimer_cb_get_time(period_timer); |
| 210 | hrtimer_forward(period_timer, now, period); |
| 211 | |
| 212 | soft = hrtimer_get_softexpires(period_timer); |
| 213 | hard = hrtimer_get_expires(period_timer); |
| 214 | delta = ktime_to_ns(ktime_sub(hard, soft)); |
| 215 | __hrtimer_start_range_ns(period_timer, soft, delta, |
| 216 | HRTIMER_MODE_ABS_PINNED, 0); |
| 217 | } |
| 218 | } |
| 219 | |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 220 | static void start_rt_bandwidth(struct rt_bandwidth *rt_b) |
| 221 | { |
Hiroshi Shimamoto | cac64d0 | 2009-02-25 09:59:26 -0800 | [diff] [blame] | 222 | if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF) |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 223 | return; |
| 224 | |
| 225 | if (hrtimer_active(&rt_b->rt_period_timer)) |
| 226 | return; |
| 227 | |
Thomas Gleixner | 0986b11 | 2009-11-17 15:32:06 +0100 | [diff] [blame] | 228 | raw_spin_lock(&rt_b->rt_runtime_lock); |
Paul Turner | 58088ad | 2011-07-21 09:43:31 -0700 | [diff] [blame] | 229 | start_bandwidth_timer(&rt_b->rt_period_timer, rt_b->rt_period); |
Thomas Gleixner | 0986b11 | 2009-11-17 15:32:06 +0100 | [diff] [blame] | 230 | raw_spin_unlock(&rt_b->rt_runtime_lock); |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 231 | } |
| 232 | |
| 233 | #ifdef CONFIG_RT_GROUP_SCHED |
| 234 | static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b) |
| 235 | { |
| 236 | hrtimer_cancel(&rt_b->rt_period_timer); |
| 237 | } |
| 238 | #endif |
| 239 | |
Heiko Carstens | 712555e | 2008-04-28 11:33:07 +0200 | [diff] [blame] | 240 | /* |
Peter Zijlstra | c4a8849 | 2011-04-07 14:09:42 +0200 | [diff] [blame] | 241 | * sched_domains_mutex serializes calls to init_sched_domains, |
Heiko Carstens | 712555e | 2008-04-28 11:33:07 +0200 | [diff] [blame] | 242 | * detach_destroy_domains and partition_sched_domains. |
| 243 | */ |
| 244 | static DEFINE_MUTEX(sched_domains_mutex); |
| 245 | |
Dhaval Giani | 7c94143 | 2010-01-20 13:26:18 +0100 | [diff] [blame] | 246 | #ifdef CONFIG_CGROUP_SCHED |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 247 | |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 248 | #include <linux/cgroup.h> |
| 249 | |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 250 | struct cfs_rq; |
| 251 | |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 252 | static LIST_HEAD(task_groups); |
| 253 | |
Paul Turner | ab84d31 | 2011-07-21 09:43:28 -0700 | [diff] [blame] | 254 | struct cfs_bandwidth { |
| 255 | #ifdef CONFIG_CFS_BANDWIDTH |
| 256 | raw_spinlock_t lock; |
| 257 | ktime_t period; |
Paul Turner | ec12cb7 | 2011-07-21 09:43:30 -0700 | [diff] [blame] | 258 | u64 quota, runtime; |
Paul Turner | a790de9 | 2011-07-21 09:43:29 -0700 | [diff] [blame] | 259 | s64 hierarchal_quota; |
Paul Turner | a9cf55b | 2011-07-21 09:43:32 -0700 | [diff] [blame] | 260 | u64 runtime_expires; |
Paul Turner | 58088ad | 2011-07-21 09:43:31 -0700 | [diff] [blame] | 261 | |
| 262 | int idle, timer_active; |
Paul Turner | d8b4986 | 2011-07-21 09:43:41 -0700 | [diff] [blame] | 263 | struct hrtimer period_timer, slack_timer; |
Paul Turner | 85dac90 | 2011-07-21 09:43:33 -0700 | [diff] [blame] | 264 | struct list_head throttled_cfs_rq; |
| 265 | |
Nikhil Rao | e8da1b1 | 2011-07-21 09:43:40 -0700 | [diff] [blame] | 266 | /* statistics */ |
| 267 | int nr_periods, nr_throttled; |
| 268 | u64 throttled_time; |
Paul Turner | ab84d31 | 2011-07-21 09:43:28 -0700 | [diff] [blame] | 269 | #endif |
| 270 | }; |
| 271 | |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 272 | /* task group related information */ |
Ingo Molnar | 4cf86d7 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 273 | struct task_group { |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 274 | struct cgroup_subsys_state css; |
Arun R Bharadwaj | 6c415b9 | 2008-12-01 20:49:05 +0530 | [diff] [blame] | 275 | |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 276 | #ifdef CONFIG_FAIR_GROUP_SCHED |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 277 | /* schedulable entities of this group on each cpu */ |
| 278 | struct sched_entity **se; |
| 279 | /* runqueue "owned" by this group on each cpu */ |
| 280 | struct cfs_rq **cfs_rq; |
| 281 | unsigned long shares; |
Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 282 | |
| 283 | atomic_t load_weight; |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 284 | #endif |
| 285 | |
| 286 | #ifdef CONFIG_RT_GROUP_SCHED |
| 287 | struct sched_rt_entity **rt_se; |
| 288 | struct rt_rq **rt_rq; |
| 289 | |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 290 | struct rt_bandwidth rt_bandwidth; |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 291 | #endif |
Srivatsa Vaddagiri | 6b2d770 | 2008-01-25 21:08:00 +0100 | [diff] [blame] | 292 | |
Srivatsa Vaddagiri | ae8393e | 2007-10-29 21:18:11 +0100 | [diff] [blame] | 293 | struct rcu_head rcu; |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 294 | struct list_head list; |
Peter Zijlstra | f473aa5 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 295 | |
| 296 | struct task_group *parent; |
| 297 | struct list_head siblings; |
| 298 | struct list_head children; |
Mike Galbraith | 5091faa | 2010-11-30 14:18:03 +0100 | [diff] [blame] | 299 | |
| 300 | #ifdef CONFIG_SCHED_AUTOGROUP |
| 301 | struct autogroup *autogroup; |
| 302 | #endif |
Paul Turner | ab84d31 | 2011-07-21 09:43:28 -0700 | [diff] [blame] | 303 | |
| 304 | struct cfs_bandwidth cfs_bandwidth; |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 305 | }; |
| 306 | |
Peter Zijlstra | 3d4b47b | 2010-11-15 15:47:01 -0800 | [diff] [blame] | 307 | /* task_group_lock serializes the addition/removal of task groups */ |
Peter Zijlstra | 8ed3699 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 308 | static DEFINE_SPINLOCK(task_group_lock); |
Srivatsa Vaddagiri | ec2c507 | 2008-01-25 21:07:59 +0100 | [diff] [blame] | 309 | |
Cyrill Gorcunov | e9036b3 | 2009-10-26 22:24:14 +0300 | [diff] [blame] | 310 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 311 | |
Yong Zhang | 07e06b0 | 2011-01-07 15:17:36 +0800 | [diff] [blame] | 312 | # define ROOT_TASK_GROUP_LOAD NICE_0_LOAD |
Srivatsa Vaddagiri | 24e377a | 2007-10-15 17:00:09 +0200 | [diff] [blame] | 313 | |
Miao Xie | cb4ad1f | 2008-04-28 12:54:56 +0800 | [diff] [blame] | 314 | /* |
Lai Jiangshan | 2e08478 | 2008-06-12 16:42:58 +0800 | [diff] [blame] | 315 | * A weight of 0 or 1 can cause arithmetics problems. |
| 316 | * A weight of a cfs_rq is the sum of weights of which entities |
| 317 | * are queued on this cfs_rq, so a weight of a entity should not be |
| 318 | * too large, so as the shares value of a task group. |
Miao Xie | cb4ad1f | 2008-04-28 12:54:56 +0800 | [diff] [blame] | 319 | * (The default weight is 1024 - so there's no practical |
| 320 | * limitation from this.) |
| 321 | */ |
Mike Galbraith | cd62287 | 2011-06-04 15:03:20 +0200 | [diff] [blame] | 322 | #define MIN_SHARES (1UL << 1) |
| 323 | #define MAX_SHARES (1UL << 18) |
Peter Zijlstra | 18d95a2 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 324 | |
Yong Zhang | 07e06b0 | 2011-01-07 15:17:36 +0800 | [diff] [blame] | 325 | static int root_task_group_load = ROOT_TASK_GROUP_LOAD; |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 326 | #endif |
| 327 | |
| 328 | /* Default task group. |
| 329 | * Every task in system belong to this group at bootup. |
| 330 | */ |
Yong Zhang | 07e06b0 | 2011-01-07 15:17:36 +0800 | [diff] [blame] | 331 | struct task_group root_task_group; |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 332 | |
Dhaval Giani | 7c94143 | 2010-01-20 13:26:18 +0100 | [diff] [blame] | 333 | #endif /* CONFIG_CGROUP_SCHED */ |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 334 | |
Ingo Molnar | 6aa645e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 335 | /* CFS-related fields in a runqueue */ |
| 336 | struct cfs_rq { |
| 337 | struct load_weight load; |
Paul Turner | 953bfcd | 2011-07-21 09:43:27 -0700 | [diff] [blame] | 338 | unsigned long nr_running, h_nr_running; |
Ingo Molnar | 6aa645e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 339 | |
Ingo Molnar | 6aa645e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 340 | u64 exec_clock; |
Ingo Molnar | e9acbff | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 341 | u64 min_vruntime; |
Peter Zijlstra | 3fe1698 | 2011-04-05 17:23:48 +0200 | [diff] [blame] | 342 | #ifndef CONFIG_64BIT |
| 343 | u64 min_vruntime_copy; |
| 344 | #endif |
Ingo Molnar | 6aa645e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 345 | |
| 346 | struct rb_root tasks_timeline; |
| 347 | struct rb_node *rb_leftmost; |
Peter Zijlstra | 4a55bd5 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 348 | |
| 349 | struct list_head tasks; |
| 350 | struct list_head *balance_iterator; |
| 351 | |
| 352 | /* |
| 353 | * 'curr' points to currently running entity on this cfs_rq. |
Ingo Molnar | 6aa645e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 354 | * It is set to NULL otherwise (i.e when none are currently running). |
| 355 | */ |
Rik van Riel | ac53db5 | 2011-02-01 09:51:03 -0500 | [diff] [blame] | 356 | struct sched_entity *curr, *next, *last, *skip; |
Peter Zijlstra | ddc9729 | 2007-10-15 17:00:10 +0200 | [diff] [blame] | 357 | |
Rakib Mullick | 4934a4d | 2011-05-04 22:53:46 +0600 | [diff] [blame] | 358 | #ifdef CONFIG_SCHED_DEBUG |
Peter Zijlstra | 5ac5c4d | 2008-11-10 10:46:32 +0100 | [diff] [blame] | 359 | unsigned int nr_spread_over; |
Rakib Mullick | 4934a4d | 2011-05-04 22:53:46 +0600 | [diff] [blame] | 360 | #endif |
Peter Zijlstra | ddc9729 | 2007-10-15 17:00:10 +0200 | [diff] [blame] | 361 | |
Ingo Molnar | 62160e3 | 2007-10-15 17:00:03 +0200 | [diff] [blame] | 362 | #ifdef CONFIG_FAIR_GROUP_SCHED |
Ingo Molnar | 6aa645e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 363 | struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */ |
| 364 | |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 365 | /* |
| 366 | * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in |
Ingo Molnar | 6aa645e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 367 | * a hierarchy). Non-leaf lrqs hold other higher schedulable entities |
| 368 | * (like users, containers etc.) |
| 369 | * |
| 370 | * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This |
| 371 | * list is used during load balance. |
| 372 | */ |
Peter Zijlstra | 3d4b47b | 2010-11-15 15:47:01 -0800 | [diff] [blame] | 373 | int on_list; |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 374 | struct list_head leaf_cfs_rq_list; |
| 375 | struct task_group *tg; /* group that "owns" this runqueue */ |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 376 | |
| 377 | #ifdef CONFIG_SMP |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 378 | /* |
Peter Zijlstra | c8cba85 | 2008-06-27 13:41:23 +0200 | [diff] [blame] | 379 | * the part of load.weight contributed by tasks |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 380 | */ |
Peter Zijlstra | c8cba85 | 2008-06-27 13:41:23 +0200 | [diff] [blame] | 381 | unsigned long task_weight; |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 382 | |
Peter Zijlstra | c8cba85 | 2008-06-27 13:41:23 +0200 | [diff] [blame] | 383 | /* |
| 384 | * h_load = weight * f(tg) |
| 385 | * |
| 386 | * Where f(tg) is the recursive weight fraction assigned to |
| 387 | * this group. |
| 388 | */ |
| 389 | unsigned long h_load; |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 390 | |
Peter Zijlstra | c8cba85 | 2008-06-27 13:41:23 +0200 | [diff] [blame] | 391 | /* |
Paul Turner | 3b3d190 | 2010-11-15 15:47:08 -0800 | [diff] [blame] | 392 | * Maintaining per-cpu shares distribution for group scheduling |
| 393 | * |
| 394 | * load_stamp is the last time we updated the load average |
| 395 | * load_last is the last time we updated the load average and saw load |
| 396 | * load_unacc_exec_time is currently unaccounted execution time |
Peter Zijlstra | c8cba85 | 2008-06-27 13:41:23 +0200 | [diff] [blame] | 397 | */ |
Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 398 | u64 load_avg; |
| 399 | u64 load_period; |
Paul Turner | 3b3d190 | 2010-11-15 15:47:08 -0800 | [diff] [blame] | 400 | u64 load_stamp, load_last, load_unacc_exec_time; |
Peter Zijlstra | f1d239f | 2008-06-27 13:41:38 +0200 | [diff] [blame] | 401 | |
Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 402 | unsigned long load_contribution; |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 403 | #endif |
Paul Turner | ab84d31 | 2011-07-21 09:43:28 -0700 | [diff] [blame] | 404 | #ifdef CONFIG_CFS_BANDWIDTH |
| 405 | int runtime_enabled; |
Paul Turner | a9cf55b | 2011-07-21 09:43:32 -0700 | [diff] [blame] | 406 | u64 runtime_expires; |
Paul Turner | ab84d31 | 2011-07-21 09:43:28 -0700 | [diff] [blame] | 407 | s64 runtime_remaining; |
Paul Turner | 85dac90 | 2011-07-21 09:43:33 -0700 | [diff] [blame] | 408 | |
Nikhil Rao | e8da1b1 | 2011-07-21 09:43:40 -0700 | [diff] [blame] | 409 | u64 throttled_timestamp; |
Paul Turner | 64660c8 | 2011-07-21 09:43:36 -0700 | [diff] [blame] | 410 | int throttled, throttle_count; |
Paul Turner | 85dac90 | 2011-07-21 09:43:33 -0700 | [diff] [blame] | 411 | struct list_head throttled_list; |
Paul Turner | ab84d31 | 2011-07-21 09:43:28 -0700 | [diff] [blame] | 412 | #endif |
Ingo Molnar | 6aa645e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 413 | #endif |
| 414 | }; |
| 415 | |
Paul Turner | ab84d31 | 2011-07-21 09:43:28 -0700 | [diff] [blame] | 416 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 417 | #ifdef CONFIG_CFS_BANDWIDTH |
| 418 | static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg) |
| 419 | { |
| 420 | return &tg->cfs_bandwidth; |
| 421 | } |
| 422 | |
| 423 | static inline u64 default_cfs_period(void); |
Paul Turner | 58088ad | 2011-07-21 09:43:31 -0700 | [diff] [blame] | 424 | static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun); |
Paul Turner | d8b4986 | 2011-07-21 09:43:41 -0700 | [diff] [blame] | 425 | static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b); |
| 426 | |
| 427 | static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer) |
| 428 | { |
| 429 | struct cfs_bandwidth *cfs_b = |
| 430 | container_of(timer, struct cfs_bandwidth, slack_timer); |
| 431 | do_sched_cfs_slack_timer(cfs_b); |
| 432 | |
| 433 | return HRTIMER_NORESTART; |
| 434 | } |
Paul Turner | 58088ad | 2011-07-21 09:43:31 -0700 | [diff] [blame] | 435 | |
| 436 | static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer) |
| 437 | { |
| 438 | struct cfs_bandwidth *cfs_b = |
| 439 | container_of(timer, struct cfs_bandwidth, period_timer); |
| 440 | ktime_t now; |
| 441 | int overrun; |
| 442 | int idle = 0; |
| 443 | |
| 444 | for (;;) { |
| 445 | now = hrtimer_cb_get_time(timer); |
| 446 | overrun = hrtimer_forward(timer, now, cfs_b->period); |
| 447 | |
| 448 | if (!overrun) |
| 449 | break; |
| 450 | |
| 451 | idle = do_sched_cfs_period_timer(cfs_b, overrun); |
| 452 | } |
| 453 | |
| 454 | return idle ? HRTIMER_NORESTART : HRTIMER_RESTART; |
| 455 | } |
Paul Turner | ab84d31 | 2011-07-21 09:43:28 -0700 | [diff] [blame] | 456 | |
| 457 | static void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) |
| 458 | { |
| 459 | raw_spin_lock_init(&cfs_b->lock); |
Paul Turner | ec12cb7 | 2011-07-21 09:43:30 -0700 | [diff] [blame] | 460 | cfs_b->runtime = 0; |
Paul Turner | ab84d31 | 2011-07-21 09:43:28 -0700 | [diff] [blame] | 461 | cfs_b->quota = RUNTIME_INF; |
| 462 | cfs_b->period = ns_to_ktime(default_cfs_period()); |
Paul Turner | 58088ad | 2011-07-21 09:43:31 -0700 | [diff] [blame] | 463 | |
Paul Turner | 85dac90 | 2011-07-21 09:43:33 -0700 | [diff] [blame] | 464 | INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq); |
Paul Turner | 58088ad | 2011-07-21 09:43:31 -0700 | [diff] [blame] | 465 | hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
| 466 | cfs_b->period_timer.function = sched_cfs_period_timer; |
Paul Turner | d8b4986 | 2011-07-21 09:43:41 -0700 | [diff] [blame] | 467 | hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
| 468 | cfs_b->slack_timer.function = sched_cfs_slack_timer; |
Paul Turner | ab84d31 | 2011-07-21 09:43:28 -0700 | [diff] [blame] | 469 | } |
| 470 | |
| 471 | static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) |
| 472 | { |
| 473 | cfs_rq->runtime_enabled = 0; |
Paul Turner | 85dac90 | 2011-07-21 09:43:33 -0700 | [diff] [blame] | 474 | INIT_LIST_HEAD(&cfs_rq->throttled_list); |
Paul Turner | ab84d31 | 2011-07-21 09:43:28 -0700 | [diff] [blame] | 475 | } |
| 476 | |
Paul Turner | 58088ad | 2011-07-21 09:43:31 -0700 | [diff] [blame] | 477 | /* requires cfs_b->lock, may release to reprogram timer */ |
| 478 | static void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b) |
| 479 | { |
| 480 | /* |
| 481 | * The timer may be active because we're trying to set a new bandwidth |
| 482 | * period or because we're racing with the tear-down path |
| 483 | * (timer_active==0 becomes visible before the hrtimer call-back |
| 484 | * terminates). In either case we ensure that it's re-programmed |
| 485 | */ |
| 486 | while (unlikely(hrtimer_active(&cfs_b->period_timer))) { |
| 487 | raw_spin_unlock(&cfs_b->lock); |
| 488 | /* ensure cfs_b->lock is available while we wait */ |
| 489 | hrtimer_cancel(&cfs_b->period_timer); |
| 490 | |
| 491 | raw_spin_lock(&cfs_b->lock); |
| 492 | /* if someone else restarted the timer then we're done */ |
| 493 | if (cfs_b->timer_active) |
| 494 | return; |
| 495 | } |
| 496 | |
| 497 | cfs_b->timer_active = 1; |
| 498 | start_bandwidth_timer(&cfs_b->period_timer, cfs_b->period); |
| 499 | } |
| 500 | |
Paul Turner | ab84d31 | 2011-07-21 09:43:28 -0700 | [diff] [blame] | 501 | static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) |
Paul Turner | 58088ad | 2011-07-21 09:43:31 -0700 | [diff] [blame] | 502 | { |
| 503 | hrtimer_cancel(&cfs_b->period_timer); |
Paul Turner | d8b4986 | 2011-07-21 09:43:41 -0700 | [diff] [blame] | 504 | hrtimer_cancel(&cfs_b->slack_timer); |
Paul Turner | 58088ad | 2011-07-21 09:43:31 -0700 | [diff] [blame] | 505 | } |
Paul Turner | ab84d31 | 2011-07-21 09:43:28 -0700 | [diff] [blame] | 506 | #else |
| 507 | static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} |
| 508 | static void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {} |
| 509 | static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {} |
| 510 | |
| 511 | static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg) |
| 512 | { |
| 513 | return NULL; |
| 514 | } |
| 515 | #endif /* CONFIG_CFS_BANDWIDTH */ |
| 516 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
| 517 | |
Ingo Molnar | 6aa645e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 518 | /* Real-Time classes' related field in a runqueue: */ |
| 519 | struct rt_rq { |
| 520 | struct rt_prio_array active; |
Steven Rostedt | 63489e4 | 2008-01-25 21:08:03 +0100 | [diff] [blame] | 521 | unsigned long rt_nr_running; |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 522 | #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED |
Gregory Haskins | e864c49 | 2008-12-29 09:39:49 -0500 | [diff] [blame] | 523 | struct { |
| 524 | int curr; /* highest queued rt task prio */ |
Gregory Haskins | 398a153 | 2009-01-14 09:10:04 -0500 | [diff] [blame] | 525 | #ifdef CONFIG_SMP |
Gregory Haskins | e864c49 | 2008-12-29 09:39:49 -0500 | [diff] [blame] | 526 | int next; /* next highest */ |
Gregory Haskins | 398a153 | 2009-01-14 09:10:04 -0500 | [diff] [blame] | 527 | #endif |
Gregory Haskins | e864c49 | 2008-12-29 09:39:49 -0500 | [diff] [blame] | 528 | } highest_prio; |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 529 | #endif |
Peter Zijlstra | fa85ae2 | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 530 | #ifdef CONFIG_SMP |
Gregory Haskins | 73fe6aa | 2008-01-25 21:08:07 +0100 | [diff] [blame] | 531 | unsigned long rt_nr_migratory; |
Peter Zijlstra | a1ba4d8 | 2009-04-01 18:40:15 +0200 | [diff] [blame] | 532 | unsigned long rt_nr_total; |
Gregory Haskins | a22d7fc | 2008-01-25 21:08:12 +0100 | [diff] [blame] | 533 | int overloaded; |
Gregory Haskins | 917b627 | 2008-12-29 09:39:53 -0500 | [diff] [blame] | 534 | struct plist_head pushable_tasks; |
Peter Zijlstra | fa85ae2 | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 535 | #endif |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 536 | int rt_throttled; |
Peter Zijlstra | fa85ae2 | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 537 | u64 rt_time; |
Peter Zijlstra | ac086bc | 2008-04-19 19:44:58 +0200 | [diff] [blame] | 538 | u64 rt_runtime; |
Ingo Molnar | ea736ed | 2008-03-25 13:51:45 +0100 | [diff] [blame] | 539 | /* Nests inside the rq lock: */ |
Thomas Gleixner | 0986b11 | 2009-11-17 15:32:06 +0100 | [diff] [blame] | 540 | raw_spinlock_t rt_runtime_lock; |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 541 | |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 542 | #ifdef CONFIG_RT_GROUP_SCHED |
Peter Zijlstra | 23b0fdf | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 543 | unsigned long rt_nr_boosted; |
| 544 | |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 545 | struct rq *rq; |
| 546 | struct list_head leaf_rt_rq_list; |
| 547 | struct task_group *tg; |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 548 | #endif |
Ingo Molnar | 6aa645e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 549 | }; |
| 550 | |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 551 | #ifdef CONFIG_SMP |
| 552 | |
| 553 | /* |
| 554 | * We add the notion of a root-domain which will be used to define per-domain |
Ingo Molnar | 0eab914 | 2008-01-25 21:08:19 +0100 | [diff] [blame] | 555 | * variables. Each exclusive cpuset essentially defines an island domain by |
| 556 | * fully partitioning the member cpus from any other cpuset. Whenever a new |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 557 | * exclusive cpuset is created, we also create and attach a new root-domain |
| 558 | * object. |
| 559 | * |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 560 | */ |
| 561 | struct root_domain { |
| 562 | atomic_t refcount; |
Richard Kennedy | 26a148e | 2011-07-15 11:41:31 +0100 | [diff] [blame] | 563 | atomic_t rto_count; |
Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 564 | struct rcu_head rcu; |
Rusty Russell | c6c4927 | 2008-11-25 02:35:05 +1030 | [diff] [blame] | 565 | cpumask_var_t span; |
| 566 | cpumask_var_t online; |
Gregory Haskins | 637f508 | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 567 | |
Ingo Molnar | 0eab914 | 2008-01-25 21:08:19 +0100 | [diff] [blame] | 568 | /* |
Gregory Haskins | 637f508 | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 569 | * The "RT overload" flag: it gets set if a CPU has more than |
| 570 | * one runnable RT task. |
| 571 | */ |
Rusty Russell | c6c4927 | 2008-11-25 02:35:05 +1030 | [diff] [blame] | 572 | cpumask_var_t rto_mask; |
Gregory Haskins | 6e0534f | 2008-05-12 21:21:01 +0200 | [diff] [blame] | 573 | struct cpupri cpupri; |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 574 | }; |
| 575 | |
Gregory Haskins | dc93852 | 2008-01-25 21:08:26 +0100 | [diff] [blame] | 576 | /* |
| 577 | * By default the system creates a single root-domain with all cpus as |
| 578 | * members (mimicking the global state we have today). |
| 579 | */ |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 580 | static struct root_domain def_root_domain; |
| 581 | |
Christian Dietrich | ed2d372 | 2010-09-06 16:37:05 +0200 | [diff] [blame] | 582 | #endif /* CONFIG_SMP */ |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 583 | |
Ingo Molnar | 6aa645e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 584 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 585 | * This is the main, per-CPU runqueue data structure. |
| 586 | * |
| 587 | * Locking rule: those places that want to lock multiple runqueues |
| 588 | * (such as the load balancing or the thread migration code), lock |
| 589 | * acquire operations must be ordered by ascending &runqueue. |
| 590 | */ |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 591 | struct rq { |
Ingo Molnar | d801649 | 2007-10-18 21:32:55 +0200 | [diff] [blame] | 592 | /* runqueue lock: */ |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 593 | raw_spinlock_t lock; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 594 | |
| 595 | /* |
| 596 | * nr_running and cpu_load should be in the same cacheline because |
| 597 | * remote CPUs use both these fields when doing load calculation. |
| 598 | */ |
| 599 | unsigned long nr_running; |
Ingo Molnar | 6aa645e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 600 | #define CPU_LOAD_IDX_MAX 5 |
| 601 | unsigned long cpu_load[CPU_LOAD_IDX_MAX]; |
Venkatesh Pallipadi | fdf3e95 | 2010-05-17 18:14:43 -0700 | [diff] [blame] | 602 | unsigned long last_load_update_tick; |
Siddha, Suresh B | 46cb4b7 | 2007-05-08 00:32:51 -0700 | [diff] [blame] | 603 | #ifdef CONFIG_NO_HZ |
Mike Galbraith | 39c0cbe | 2010-03-11 17:17:13 +0100 | [diff] [blame] | 604 | u64 nohz_stamp; |
Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 605 | unsigned char nohz_balance_kick; |
Siddha, Suresh B | 46cb4b7 | 2007-05-08 00:32:51 -0700 | [diff] [blame] | 606 | #endif |
Mike Galbraith | 61eadef | 2011-04-29 08:36:50 +0200 | [diff] [blame] | 607 | int skip_clock_update; |
Mike Galbraith | a64692a | 2010-03-11 17:16:20 +0100 | [diff] [blame] | 608 | |
Ingo Molnar | d801649 | 2007-10-18 21:32:55 +0200 | [diff] [blame] | 609 | /* capture load from *all* tasks on this cpu: */ |
| 610 | struct load_weight load; |
Ingo Molnar | 6aa645e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 611 | unsigned long nr_load_updates; |
| 612 | u64 nr_switches; |
| 613 | |
| 614 | struct cfs_rq cfs; |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 615 | struct rt_rq rt; |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 616 | |
Ingo Molnar | 6aa645e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 617 | #ifdef CONFIG_FAIR_GROUP_SCHED |
Ingo Molnar | d801649 | 2007-10-18 21:32:55 +0200 | [diff] [blame] | 618 | /* list of leaf cfs_rq on this cpu: */ |
| 619 | struct list_head leaf_cfs_rq_list; |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 620 | #endif |
| 621 | #ifdef CONFIG_RT_GROUP_SCHED |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 622 | struct list_head leaf_rt_rq_list; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 623 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 624 | |
| 625 | /* |
| 626 | * This is part of a global counter where only the total sum |
| 627 | * over all CPUs matters. A task can increase this counter on |
| 628 | * one CPU and if it got migrated afterwards it may decrease |
| 629 | * it on another CPU. Always updated under the runqueue lock: |
| 630 | */ |
| 631 | unsigned long nr_uninterruptible; |
| 632 | |
Peter Zijlstra | 34f971f | 2010-09-22 13:53:15 +0200 | [diff] [blame] | 633 | struct task_struct *curr, *idle, *stop; |
Christoph Lameter | c9819f4 | 2006-12-10 02:20:25 -0800 | [diff] [blame] | 634 | unsigned long next_balance; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 635 | struct mm_struct *prev_mm; |
Ingo Molnar | 6aa645e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 636 | |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 637 | u64 clock; |
Venkatesh Pallipadi | 305e683 | 2010-10-04 17:03:21 -0700 | [diff] [blame] | 638 | u64 clock_task; |
Ingo Molnar | 6aa645e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 639 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 640 | atomic_t nr_iowait; |
| 641 | |
| 642 | #ifdef CONFIG_SMP |
Ingo Molnar | 0eab914 | 2008-01-25 21:08:19 +0100 | [diff] [blame] | 643 | struct root_domain *rd; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 644 | struct sched_domain *sd; |
| 645 | |
Peter Zijlstra | e51fd5e | 2010-05-31 12:37:30 +0200 | [diff] [blame] | 646 | unsigned long cpu_power; |
| 647 | |
Suresh Siddha | 6eb57e0 | 2011-10-03 15:09:01 -0700 | [diff] [blame] | 648 | unsigned char idle_balance; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 649 | /* For active balancing */ |
Gregory Haskins | 3f029d3 | 2009-07-29 11:08:47 -0400 | [diff] [blame] | 650 | int post_schedule; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 651 | int active_balance; |
| 652 | int push_cpu; |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 653 | struct cpu_stop_work active_balance_work; |
Ingo Molnar | d801649 | 2007-10-18 21:32:55 +0200 | [diff] [blame] | 654 | /* cpu of this runqueue: */ |
| 655 | int cpu; |
Gregory Haskins | 1f11eb6 | 2008-06-04 15:04:05 -0400 | [diff] [blame] | 656 | int online; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 657 | |
Peter Zijlstra | e9e9250 | 2009-09-01 10:34:37 +0200 | [diff] [blame] | 658 | u64 rt_avg; |
| 659 | u64 age_stamp; |
Mike Galbraith | 1b9508f | 2009-11-04 17:53:50 +0100 | [diff] [blame] | 660 | u64 idle_stamp; |
| 661 | u64 avg_idle; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 662 | #endif |
| 663 | |
Venkatesh Pallipadi | aa48380 | 2010-10-04 17:03:22 -0700 | [diff] [blame] | 664 | #ifdef CONFIG_IRQ_TIME_ACCOUNTING |
| 665 | u64 prev_irq_time; |
| 666 | #endif |
Glauber Costa | e6e6685 | 2011-07-11 15:28:17 -0400 | [diff] [blame] | 667 | #ifdef CONFIG_PARAVIRT |
| 668 | u64 prev_steal_time; |
| 669 | #endif |
Glauber Costa | 095c0aa | 2011-07-11 15:28:18 -0400 | [diff] [blame] | 670 | #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING |
| 671 | u64 prev_steal_time_rq; |
| 672 | #endif |
Venkatesh Pallipadi | aa48380 | 2010-10-04 17:03:22 -0700 | [diff] [blame] | 673 | |
Thomas Gleixner | dce48a8 | 2009-04-11 10:43:41 +0200 | [diff] [blame] | 674 | /* calc_load related fields */ |
| 675 | unsigned long calc_load_update; |
| 676 | long calc_load_active; |
| 677 | |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 678 | #ifdef CONFIG_SCHED_HRTICK |
Peter Zijlstra | 3165651 | 2008-07-18 18:01:23 +0200 | [diff] [blame] | 679 | #ifdef CONFIG_SMP |
| 680 | int hrtick_csd_pending; |
| 681 | struct call_single_data hrtick_csd; |
| 682 | #endif |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 683 | struct hrtimer hrtick_timer; |
| 684 | #endif |
| 685 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 686 | #ifdef CONFIG_SCHEDSTATS |
| 687 | /* latency stats */ |
| 688 | struct sched_info rq_sched_info; |
Ken Chen | 9c2c480 | 2008-12-16 23:41:22 -0800 | [diff] [blame] | 689 | unsigned long long rq_cpu_time; |
| 690 | /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 691 | |
| 692 | /* sys_sched_yield() stats */ |
Ken Chen | 480b943 | 2007-10-18 21:32:56 +0200 | [diff] [blame] | 693 | unsigned int yld_count; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 694 | |
| 695 | /* schedule() stats */ |
Ken Chen | 480b943 | 2007-10-18 21:32:56 +0200 | [diff] [blame] | 696 | unsigned int sched_switch; |
| 697 | unsigned int sched_count; |
| 698 | unsigned int sched_goidle; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 699 | |
| 700 | /* try_to_wake_up() stats */ |
Ken Chen | 480b943 | 2007-10-18 21:32:56 +0200 | [diff] [blame] | 701 | unsigned int ttwu_count; |
| 702 | unsigned int ttwu_local; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 703 | #endif |
Peter Zijlstra | 317f394 | 2011-04-05 17:23:58 +0200 | [diff] [blame] | 704 | |
| 705 | #ifdef CONFIG_SMP |
Peter Zijlstra | fa14ff4 | 2011-09-12 13:06:17 +0200 | [diff] [blame] | 706 | struct llist_head wake_list; |
Peter Zijlstra | 317f394 | 2011-04-05 17:23:58 +0200 | [diff] [blame] | 707 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 708 | }; |
| 709 | |
Fenghua Yu | f34e3b6 | 2007-07-19 01:48:13 -0700 | [diff] [blame] | 710 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 711 | |
Mike Galbraith | a64692a | 2010-03-11 17:16:20 +0100 | [diff] [blame] | 712 | |
Peter Zijlstra | 1e5a740 | 2010-10-31 12:37:04 +0100 | [diff] [blame] | 713 | static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags); |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 714 | |
Christoph Lameter | 0a2966b | 2006-09-25 23:30:51 -0700 | [diff] [blame] | 715 | static inline int cpu_of(struct rq *rq) |
| 716 | { |
| 717 | #ifdef CONFIG_SMP |
| 718 | return rq->cpu; |
| 719 | #else |
| 720 | return 0; |
| 721 | #endif |
| 722 | } |
| 723 | |
Paul E. McKenney | 497f0ab | 2010-02-22 17:04:51 -0800 | [diff] [blame] | 724 | #define rcu_dereference_check_sched_domain(p) \ |
Paul E. McKenney | d11c563 | 2010-02-22 17:04:50 -0800 | [diff] [blame] | 725 | rcu_dereference_check((p), \ |
Paul E. McKenney | d11c563 | 2010-02-22 17:04:50 -0800 | [diff] [blame] | 726 | lockdep_is_held(&sched_domains_mutex)) |
| 727 | |
Ingo Molnar | 20d315d | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 728 | /* |
Nick Piggin | 674311d | 2005-06-25 14:57:27 -0700 | [diff] [blame] | 729 | * The domain tree (rq->sd) is protected by RCU's quiescent state transition. |
Dinakar Guniguntala | 1a20ff2 | 2005-06-25 14:57:33 -0700 | [diff] [blame] | 730 | * See detach_destroy_domains: synchronize_sched for details. |
Nick Piggin | 674311d | 2005-06-25 14:57:27 -0700 | [diff] [blame] | 731 | * |
| 732 | * The domain tree of any CPU may only be accessed from within |
| 733 | * preempt-disabled sections. |
| 734 | */ |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 735 | #define for_each_domain(cpu, __sd) \ |
Paul E. McKenney | 497f0ab | 2010-02-22 17:04:51 -0800 | [diff] [blame] | 736 | for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 737 | |
| 738 | #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) |
| 739 | #define this_rq() (&__get_cpu_var(runqueues)) |
| 740 | #define task_rq(p) cpu_rq(task_cpu(p)) |
| 741 | #define cpu_curr(cpu) (cpu_rq(cpu)->curr) |
Hitoshi Mitake | 54d35f2 | 2009-06-29 14:44:57 +0900 | [diff] [blame] | 742 | #define raw_rq() (&__raw_get_cpu_var(runqueues)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 743 | |
Peter Zijlstra | dc61b1d | 2010-06-08 11:40:42 +0200 | [diff] [blame] | 744 | #ifdef CONFIG_CGROUP_SCHED |
| 745 | |
| 746 | /* |
| 747 | * Return the group to which this tasks belongs. |
| 748 | * |
Peter Zijlstra | 6c6c54e | 2011-06-03 17:37:07 +0200 | [diff] [blame] | 749 | * We use task_subsys_state_check() and extend the RCU verification with |
| 750 | * pi->lock and rq->lock because cpu_cgroup_attach() holds those locks for each |
| 751 | * task it moves into the cgroup. Therefore by holding either of those locks, |
| 752 | * we pin the task to the current cgroup. |
Peter Zijlstra | dc61b1d | 2010-06-08 11:40:42 +0200 | [diff] [blame] | 753 | */ |
| 754 | static inline struct task_group *task_group(struct task_struct *p) |
| 755 | { |
Mike Galbraith | 5091faa | 2010-11-30 14:18:03 +0100 | [diff] [blame] | 756 | struct task_group *tg; |
Peter Zijlstra | dc61b1d | 2010-06-08 11:40:42 +0200 | [diff] [blame] | 757 | struct cgroup_subsys_state *css; |
| 758 | |
| 759 | css = task_subsys_state_check(p, cpu_cgroup_subsys_id, |
Peter Zijlstra | 6c6c54e | 2011-06-03 17:37:07 +0200 | [diff] [blame] | 760 | lockdep_is_held(&p->pi_lock) || |
| 761 | lockdep_is_held(&task_rq(p)->lock)); |
Mike Galbraith | 5091faa | 2010-11-30 14:18:03 +0100 | [diff] [blame] | 762 | tg = container_of(css, struct task_group, css); |
| 763 | |
| 764 | return autogroup_task_group(p, tg); |
Peter Zijlstra | dc61b1d | 2010-06-08 11:40:42 +0200 | [diff] [blame] | 765 | } |
| 766 | |
| 767 | /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ |
| 768 | static inline void set_task_rq(struct task_struct *p, unsigned int cpu) |
| 769 | { |
| 770 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 771 | p->se.cfs_rq = task_group(p)->cfs_rq[cpu]; |
| 772 | p->se.parent = task_group(p)->se[cpu]; |
| 773 | #endif |
| 774 | |
| 775 | #ifdef CONFIG_RT_GROUP_SCHED |
| 776 | p->rt.rt_rq = task_group(p)->rt_rq[cpu]; |
| 777 | p->rt.parent = task_group(p)->rt_se[cpu]; |
| 778 | #endif |
| 779 | } |
| 780 | |
| 781 | #else /* CONFIG_CGROUP_SCHED */ |
| 782 | |
| 783 | static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { } |
| 784 | static inline struct task_group *task_group(struct task_struct *p) |
| 785 | { |
| 786 | return NULL; |
| 787 | } |
| 788 | |
| 789 | #endif /* CONFIG_CGROUP_SCHED */ |
| 790 | |
Peter Zijlstra | fe44d62 | 2010-12-09 14:15:34 +0100 | [diff] [blame] | 791 | static void update_rq_clock_task(struct rq *rq, s64 delta); |
Venkatesh Pallipadi | 305e683 | 2010-10-04 17:03:21 -0700 | [diff] [blame] | 792 | |
Peter Zijlstra | fe44d62 | 2010-12-09 14:15:34 +0100 | [diff] [blame] | 793 | static void update_rq_clock(struct rq *rq) |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 794 | { |
Peter Zijlstra | fe44d62 | 2010-12-09 14:15:34 +0100 | [diff] [blame] | 795 | s64 delta; |
Venkatesh Pallipadi | 305e683 | 2010-10-04 17:03:21 -0700 | [diff] [blame] | 796 | |
Mike Galbraith | 61eadef | 2011-04-29 08:36:50 +0200 | [diff] [blame] | 797 | if (rq->skip_clock_update > 0) |
Mike Galbraith | f26f9af | 2010-12-08 11:05:42 +0100 | [diff] [blame] | 798 | return; |
Venkatesh Pallipadi | aa48380 | 2010-10-04 17:03:22 -0700 | [diff] [blame] | 799 | |
Peter Zijlstra | fe44d62 | 2010-12-09 14:15:34 +0100 | [diff] [blame] | 800 | delta = sched_clock_cpu(cpu_of(rq)) - rq->clock; |
| 801 | rq->clock += delta; |
| 802 | update_rq_clock_task(rq, delta); |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 803 | } |
| 804 | |
Ingo Molnar | e436d80 | 2007-07-19 21:28:35 +0200 | [diff] [blame] | 805 | /* |
Ingo Molnar | bf5c91b | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 806 | * Tunables that become constants when CONFIG_SCHED_DEBUG is off: |
| 807 | */ |
| 808 | #ifdef CONFIG_SCHED_DEBUG |
| 809 | # define const_debug __read_mostly |
| 810 | #else |
| 811 | # define const_debug static const |
| 812 | #endif |
| 813 | |
Ingo Molnar | 017730c | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 814 | /** |
Randy Dunlap | 1fd06bb | 2011-03-15 16:12:30 -0700 | [diff] [blame] | 815 | * runqueue_is_locked - Returns true if the current cpu runqueue is locked |
Randy Dunlap | e17b38b | 2009-10-11 19:12:00 -0700 | [diff] [blame] | 816 | * @cpu: the processor in question. |
Ingo Molnar | 017730c | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 817 | * |
Ingo Molnar | 017730c | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 818 | * This interface allows printk to be called with the runqueue lock |
| 819 | * held and know whether or not it is OK to wake up the klogd. |
| 820 | */ |
Andrew Morton | 89f19f0 | 2009-09-19 11:55:44 -0700 | [diff] [blame] | 821 | int runqueue_is_locked(int cpu) |
Ingo Molnar | 017730c | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 822 | { |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 823 | return raw_spin_is_locked(&cpu_rq(cpu)->lock); |
Ingo Molnar | 017730c | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 824 | } |
| 825 | |
Ingo Molnar | bf5c91b | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 826 | /* |
| 827 | * Debugging: various feature bits |
| 828 | */ |
Peter Zijlstra | f00b45c | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 829 | |
| 830 | #define SCHED_FEAT(name, enabled) \ |
| 831 | __SCHED_FEAT_##name , |
| 832 | |
Ingo Molnar | bf5c91b | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 833 | enum { |
Peter Zijlstra | f00b45c | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 834 | #include "sched_features.h" |
Ingo Molnar | bf5c91b | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 835 | }; |
| 836 | |
Peter Zijlstra | f00b45c | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 837 | #undef SCHED_FEAT |
Ingo Molnar | bf5c91b | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 838 | |
Peter Zijlstra | f00b45c | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 839 | #define SCHED_FEAT(name, enabled) \ |
| 840 | (1UL << __SCHED_FEAT_##name) * enabled | |
| 841 | |
| 842 | const_debug unsigned int sysctl_sched_features = |
| 843 | #include "sched_features.h" |
| 844 | 0; |
| 845 | |
| 846 | #undef SCHED_FEAT |
| 847 | |
| 848 | #ifdef CONFIG_SCHED_DEBUG |
| 849 | #define SCHED_FEAT(name, enabled) \ |
| 850 | #name , |
| 851 | |
Harvey Harrison | 983ed7a | 2008-04-24 18:17:55 -0700 | [diff] [blame] | 852 | static __read_mostly char *sched_feat_names[] = { |
Peter Zijlstra | f00b45c | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 853 | #include "sched_features.h" |
| 854 | NULL |
| 855 | }; |
| 856 | |
| 857 | #undef SCHED_FEAT |
| 858 | |
Li Zefan | 34f3a81 | 2008-10-30 15:23:32 +0800 | [diff] [blame] | 859 | static int sched_feat_show(struct seq_file *m, void *v) |
Peter Zijlstra | f00b45c | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 860 | { |
Peter Zijlstra | f00b45c | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 861 | int i; |
| 862 | |
| 863 | for (i = 0; sched_feat_names[i]; i++) { |
Li Zefan | 34f3a81 | 2008-10-30 15:23:32 +0800 | [diff] [blame] | 864 | if (!(sysctl_sched_features & (1UL << i))) |
| 865 | seq_puts(m, "NO_"); |
| 866 | seq_printf(m, "%s ", sched_feat_names[i]); |
Peter Zijlstra | f00b45c | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 867 | } |
Li Zefan | 34f3a81 | 2008-10-30 15:23:32 +0800 | [diff] [blame] | 868 | seq_puts(m, "\n"); |
Peter Zijlstra | f00b45c | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 869 | |
Li Zefan | 34f3a81 | 2008-10-30 15:23:32 +0800 | [diff] [blame] | 870 | return 0; |
Peter Zijlstra | f00b45c | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 871 | } |
| 872 | |
| 873 | static ssize_t |
| 874 | sched_feat_write(struct file *filp, const char __user *ubuf, |
| 875 | size_t cnt, loff_t *ppos) |
| 876 | { |
| 877 | char buf[64]; |
Mathieu Desnoyers | 7740191 | 2010-09-13 17:47:00 -0400 | [diff] [blame] | 878 | char *cmp; |
Peter Zijlstra | f00b45c | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 879 | int neg = 0; |
| 880 | int i; |
| 881 | |
| 882 | if (cnt > 63) |
| 883 | cnt = 63; |
| 884 | |
| 885 | if (copy_from_user(&buf, ubuf, cnt)) |
| 886 | return -EFAULT; |
| 887 | |
| 888 | buf[cnt] = 0; |
Mathieu Desnoyers | 7740191 | 2010-09-13 17:47:00 -0400 | [diff] [blame] | 889 | cmp = strstrip(buf); |
Peter Zijlstra | f00b45c | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 890 | |
Hillf Danton | 524429c | 2011-01-06 20:58:12 +0800 | [diff] [blame] | 891 | if (strncmp(cmp, "NO_", 3) == 0) { |
Peter Zijlstra | f00b45c | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 892 | neg = 1; |
| 893 | cmp += 3; |
| 894 | } |
| 895 | |
| 896 | for (i = 0; sched_feat_names[i]; i++) { |
Mathieu Desnoyers | 7740191 | 2010-09-13 17:47:00 -0400 | [diff] [blame] | 897 | if (strcmp(cmp, sched_feat_names[i]) == 0) { |
Peter Zijlstra | f00b45c | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 898 | if (neg) |
| 899 | sysctl_sched_features &= ~(1UL << i); |
| 900 | else |
| 901 | sysctl_sched_features |= (1UL << i); |
| 902 | break; |
| 903 | } |
| 904 | } |
| 905 | |
| 906 | if (!sched_feat_names[i]) |
| 907 | return -EINVAL; |
| 908 | |
Jan Blunck | 4299472 | 2009-11-20 17:40:37 +0100 | [diff] [blame] | 909 | *ppos += cnt; |
Peter Zijlstra | f00b45c | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 910 | |
| 911 | return cnt; |
| 912 | } |
| 913 | |
Li Zefan | 34f3a81 | 2008-10-30 15:23:32 +0800 | [diff] [blame] | 914 | static int sched_feat_open(struct inode *inode, struct file *filp) |
| 915 | { |
| 916 | return single_open(filp, sched_feat_show, NULL); |
| 917 | } |
| 918 | |
Alexey Dobriyan | 828c095 | 2009-10-01 15:43:56 -0700 | [diff] [blame] | 919 | static const struct file_operations sched_feat_fops = { |
Li Zefan | 34f3a81 | 2008-10-30 15:23:32 +0800 | [diff] [blame] | 920 | .open = sched_feat_open, |
| 921 | .write = sched_feat_write, |
| 922 | .read = seq_read, |
| 923 | .llseek = seq_lseek, |
| 924 | .release = single_release, |
Peter Zijlstra | f00b45c | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 925 | }; |
| 926 | |
| 927 | static __init int sched_init_debug(void) |
| 928 | { |
Peter Zijlstra | f00b45c | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 929 | debugfs_create_file("sched_features", 0644, NULL, NULL, |
| 930 | &sched_feat_fops); |
| 931 | |
| 932 | return 0; |
| 933 | } |
| 934 | late_initcall(sched_init_debug); |
| 935 | |
| 936 | #endif |
| 937 | |
| 938 | #define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x)) |
Ingo Molnar | bf5c91b | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 939 | |
| 940 | /* |
Peter Zijlstra | b82d9fd | 2007-11-09 22:39:39 +0100 | [diff] [blame] | 941 | * Number of tasks to iterate in a single balance run. |
| 942 | * Limited because this is done with IRQs disabled. |
| 943 | */ |
| 944 | const_debug unsigned int sysctl_sched_nr_migrate = 32; |
| 945 | |
| 946 | /* |
Peter Zijlstra | e9e9250 | 2009-09-01 10:34:37 +0200 | [diff] [blame] | 947 | * period over which we average the RT time consumption, measured |
| 948 | * in ms. |
| 949 | * |
| 950 | * default: 1s |
| 951 | */ |
| 952 | const_debug unsigned int sysctl_sched_time_avg = MSEC_PER_SEC; |
| 953 | |
| 954 | /* |
Peter Zijlstra | 9f0c1e5 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 955 | * period over which we measure -rt task cpu usage in us. |
Peter Zijlstra | fa85ae2 | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 956 | * default: 1s |
| 957 | */ |
Peter Zijlstra | 9f0c1e5 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 958 | unsigned int sysctl_sched_rt_period = 1000000; |
Peter Zijlstra | fa85ae2 | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 959 | |
Ingo Molnar | 6892b75 | 2008-02-13 14:02:36 +0100 | [diff] [blame] | 960 | static __read_mostly int scheduler_running; |
| 961 | |
Peter Zijlstra | fa85ae2 | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 962 | /* |
Peter Zijlstra | 9f0c1e5 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 963 | * part of the period that we allow rt tasks to run in us. |
| 964 | * default: 0.95s |
Peter Zijlstra | fa85ae2 | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 965 | */ |
Peter Zijlstra | 9f0c1e5 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 966 | int sysctl_sched_rt_runtime = 950000; |
| 967 | |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 968 | static inline u64 global_rt_period(void) |
| 969 | { |
| 970 | return (u64)sysctl_sched_rt_period * NSEC_PER_USEC; |
| 971 | } |
| 972 | |
| 973 | static inline u64 global_rt_runtime(void) |
| 974 | { |
roel kluin | e26873b | 2008-07-22 16:51:15 -0400 | [diff] [blame] | 975 | if (sysctl_sched_rt_runtime < 0) |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 976 | return RUNTIME_INF; |
| 977 | |
| 978 | return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC; |
| 979 | } |
Peter Zijlstra | fa85ae2 | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 980 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 981 | #ifndef prepare_arch_switch |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 982 | # define prepare_arch_switch(next) do { } while (0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 983 | #endif |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 984 | #ifndef finish_arch_switch |
| 985 | # define finish_arch_switch(prev) do { } while (0) |
| 986 | #endif |
| 987 | |
Dmitry Adamushko | 051a1d1 | 2007-12-18 15:21:13 +0100 | [diff] [blame] | 988 | static inline int task_current(struct rq *rq, struct task_struct *p) |
| 989 | { |
| 990 | return rq->curr == p; |
| 991 | } |
| 992 | |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 993 | static inline int task_running(struct rq *rq, struct task_struct *p) |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 994 | { |
Peter Zijlstra | 3ca7a44 | 2011-04-05 17:23:40 +0200 | [diff] [blame] | 995 | #ifdef CONFIG_SMP |
| 996 | return p->on_cpu; |
| 997 | #else |
Dmitry Adamushko | 051a1d1 | 2007-12-18 15:21:13 +0100 | [diff] [blame] | 998 | return task_current(rq, p); |
Peter Zijlstra | 3ca7a44 | 2011-04-05 17:23:40 +0200 | [diff] [blame] | 999 | #endif |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 1000 | } |
| 1001 | |
Peter Zijlstra | 3ca7a44 | 2011-04-05 17:23:40 +0200 | [diff] [blame] | 1002 | #ifndef __ARCH_WANT_UNLOCKED_CTXSW |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 1003 | static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 1004 | { |
Peter Zijlstra | 3ca7a44 | 2011-04-05 17:23:40 +0200 | [diff] [blame] | 1005 | #ifdef CONFIG_SMP |
| 1006 | /* |
| 1007 | * We can optimise this out completely for !SMP, because the |
| 1008 | * SMP rebalancing from interrupt is the only thing that cares |
| 1009 | * here. |
| 1010 | */ |
| 1011 | next->on_cpu = 1; |
| 1012 | #endif |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 1013 | } |
| 1014 | |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 1015 | static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 1016 | { |
Peter Zijlstra | 3ca7a44 | 2011-04-05 17:23:40 +0200 | [diff] [blame] | 1017 | #ifdef CONFIG_SMP |
| 1018 | /* |
| 1019 | * After ->on_cpu is cleared, the task can be moved to a different CPU. |
| 1020 | * We must ensure this doesn't happen until the switch is completely |
| 1021 | * finished. |
| 1022 | */ |
| 1023 | smp_wmb(); |
| 1024 | prev->on_cpu = 0; |
| 1025 | #endif |
Ingo Molnar | da04c03 | 2005-09-13 11:17:59 +0200 | [diff] [blame] | 1026 | #ifdef CONFIG_DEBUG_SPINLOCK |
| 1027 | /* this is a valid case when another task releases the spinlock */ |
| 1028 | rq->lock.owner = current; |
| 1029 | #endif |
Ingo Molnar | 8a25d5d | 2006-07-03 00:24:54 -0700 | [diff] [blame] | 1030 | /* |
| 1031 | * If we are tracking spinlock dependencies then we have to |
| 1032 | * fix up the runqueue lock - which gets 'carried over' from |
| 1033 | * prev into current: |
| 1034 | */ |
| 1035 | spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_); |
| 1036 | |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 1037 | raw_spin_unlock_irq(&rq->lock); |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 1038 | } |
| 1039 | |
| 1040 | #else /* __ARCH_WANT_UNLOCKED_CTXSW */ |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 1041 | static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 1042 | { |
| 1043 | #ifdef CONFIG_SMP |
| 1044 | /* |
| 1045 | * We can optimise this out completely for !SMP, because the |
| 1046 | * SMP rebalancing from interrupt is the only thing that cares |
| 1047 | * here. |
| 1048 | */ |
Peter Zijlstra | 3ca7a44 | 2011-04-05 17:23:40 +0200 | [diff] [blame] | 1049 | next->on_cpu = 1; |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 1050 | #endif |
| 1051 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 1052 | raw_spin_unlock_irq(&rq->lock); |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 1053 | #else |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 1054 | raw_spin_unlock(&rq->lock); |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 1055 | #endif |
| 1056 | } |
| 1057 | |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 1058 | static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 1059 | { |
| 1060 | #ifdef CONFIG_SMP |
| 1061 | /* |
Peter Zijlstra | 3ca7a44 | 2011-04-05 17:23:40 +0200 | [diff] [blame] | 1062 | * After ->on_cpu is cleared, the task can be moved to a different CPU. |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 1063 | * We must ensure this doesn't happen until the switch is completely |
| 1064 | * finished. |
| 1065 | */ |
| 1066 | smp_wmb(); |
Peter Zijlstra | 3ca7a44 | 2011-04-05 17:23:40 +0200 | [diff] [blame] | 1067 | prev->on_cpu = 0; |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 1068 | #endif |
| 1069 | #ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW |
| 1070 | local_irq_enable(); |
| 1071 | #endif |
| 1072 | } |
| 1073 | #endif /* __ARCH_WANT_UNLOCKED_CTXSW */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1074 | |
| 1075 | /* |
Peter Zijlstra | 0122ec5 | 2011-04-05 17:23:51 +0200 | [diff] [blame] | 1076 | * __task_rq_lock - lock the rq @p resides on. |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 1077 | */ |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 1078 | static inline struct rq *__task_rq_lock(struct task_struct *p) |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 1079 | __acquires(rq->lock) |
| 1080 | { |
Peter Zijlstra | 0970d29 | 2010-02-15 14:45:54 +0100 | [diff] [blame] | 1081 | struct rq *rq; |
| 1082 | |
Peter Zijlstra | 0122ec5 | 2011-04-05 17:23:51 +0200 | [diff] [blame] | 1083 | lockdep_assert_held(&p->pi_lock); |
| 1084 | |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 1085 | for (;;) { |
Peter Zijlstra | 0970d29 | 2010-02-15 14:45:54 +0100 | [diff] [blame] | 1086 | rq = task_rq(p); |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 1087 | raw_spin_lock(&rq->lock); |
Peter Zijlstra | 65cc8e4 | 2010-03-25 21:05:16 +0100 | [diff] [blame] | 1088 | if (likely(rq == task_rq(p))) |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 1089 | return rq; |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 1090 | raw_spin_unlock(&rq->lock); |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 1091 | } |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 1092 | } |
| 1093 | |
| 1094 | /* |
Peter Zijlstra | 0122ec5 | 2011-04-05 17:23:51 +0200 | [diff] [blame] | 1095 | * task_rq_lock - lock p->pi_lock and lock the rq @p resides on. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1096 | */ |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 1097 | static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags) |
Peter Zijlstra | 0122ec5 | 2011-04-05 17:23:51 +0200 | [diff] [blame] | 1098 | __acquires(p->pi_lock) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1099 | __acquires(rq->lock) |
| 1100 | { |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 1101 | struct rq *rq; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1102 | |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 1103 | for (;;) { |
Peter Zijlstra | 0122ec5 | 2011-04-05 17:23:51 +0200 | [diff] [blame] | 1104 | raw_spin_lock_irqsave(&p->pi_lock, *flags); |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 1105 | rq = task_rq(p); |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 1106 | raw_spin_lock(&rq->lock); |
Peter Zijlstra | 65cc8e4 | 2010-03-25 21:05:16 +0100 | [diff] [blame] | 1107 | if (likely(rq == task_rq(p))) |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 1108 | return rq; |
Peter Zijlstra | 0122ec5 | 2011-04-05 17:23:51 +0200 | [diff] [blame] | 1109 | raw_spin_unlock(&rq->lock); |
| 1110 | raw_spin_unlock_irqrestore(&p->pi_lock, *flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1111 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1112 | } |
| 1113 | |
Alexey Dobriyan | a995744 | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 1114 | static void __task_rq_unlock(struct rq *rq) |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 1115 | __releases(rq->lock) |
| 1116 | { |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 1117 | raw_spin_unlock(&rq->lock); |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 1118 | } |
| 1119 | |
Peter Zijlstra | 0122ec5 | 2011-04-05 17:23:51 +0200 | [diff] [blame] | 1120 | static inline void |
| 1121 | task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1122 | __releases(rq->lock) |
Peter Zijlstra | 0122ec5 | 2011-04-05 17:23:51 +0200 | [diff] [blame] | 1123 | __releases(p->pi_lock) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1124 | { |
Peter Zijlstra | 0122ec5 | 2011-04-05 17:23:51 +0200 | [diff] [blame] | 1125 | raw_spin_unlock(&rq->lock); |
| 1126 | raw_spin_unlock_irqrestore(&p->pi_lock, *flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1127 | } |
| 1128 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1129 | /* |
Robert P. J. Day | cc2a73b | 2006-12-10 02:20:00 -0800 | [diff] [blame] | 1130 | * this_rq_lock - lock this runqueue and disable interrupts. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1131 | */ |
Alexey Dobriyan | a995744 | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 1132 | static struct rq *this_rq_lock(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1133 | __acquires(rq->lock) |
| 1134 | { |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 1135 | struct rq *rq; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1136 | |
| 1137 | local_irq_disable(); |
| 1138 | rq = this_rq(); |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 1139 | raw_spin_lock(&rq->lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1140 | |
| 1141 | return rq; |
| 1142 | } |
| 1143 | |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 1144 | #ifdef CONFIG_SCHED_HRTICK |
| 1145 | /* |
| 1146 | * Use HR-timers to deliver accurate preemption points. |
| 1147 | * |
| 1148 | * Its all a bit involved since we cannot program an hrt while holding the |
| 1149 | * rq->lock. So what we do is store a state in in rq->hrtick_* and ask for a |
| 1150 | * reschedule event. |
| 1151 | * |
| 1152 | * When we get rescheduled we reprogram the hrtick_timer outside of the |
| 1153 | * rq->lock. |
| 1154 | */ |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 1155 | |
| 1156 | /* |
| 1157 | * Use hrtick when: |
| 1158 | * - enabled by features |
| 1159 | * - hrtimer is actually high res |
| 1160 | */ |
| 1161 | static inline int hrtick_enabled(struct rq *rq) |
| 1162 | { |
| 1163 | if (!sched_feat(HRTICK)) |
| 1164 | return 0; |
Ingo Molnar | ba42059 | 2008-07-20 11:02:06 +0200 | [diff] [blame] | 1165 | if (!cpu_active(cpu_of(rq))) |
Peter Zijlstra | b328ca1 | 2008-04-29 10:02:46 +0200 | [diff] [blame] | 1166 | return 0; |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 1167 | return hrtimer_is_hres_active(&rq->hrtick_timer); |
| 1168 | } |
| 1169 | |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 1170 | static void hrtick_clear(struct rq *rq) |
| 1171 | { |
| 1172 | if (hrtimer_active(&rq->hrtick_timer)) |
| 1173 | hrtimer_cancel(&rq->hrtick_timer); |
| 1174 | } |
| 1175 | |
| 1176 | /* |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 1177 | * High-resolution timer tick. |
| 1178 | * Runs from hardirq context with interrupts disabled. |
| 1179 | */ |
| 1180 | static enum hrtimer_restart hrtick(struct hrtimer *timer) |
| 1181 | { |
| 1182 | struct rq *rq = container_of(timer, struct rq, hrtick_timer); |
| 1183 | |
| 1184 | WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); |
| 1185 | |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 1186 | raw_spin_lock(&rq->lock); |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 1187 | update_rq_clock(rq); |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 1188 | rq->curr->sched_class->task_tick(rq, rq->curr, 1); |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 1189 | raw_spin_unlock(&rq->lock); |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 1190 | |
| 1191 | return HRTIMER_NORESTART; |
| 1192 | } |
| 1193 | |
Rabin Vincent | 95e904c | 2008-05-11 05:55:33 +0530 | [diff] [blame] | 1194 | #ifdef CONFIG_SMP |
Peter Zijlstra | 3165651 | 2008-07-18 18:01:23 +0200 | [diff] [blame] | 1195 | /* |
| 1196 | * called from hardirq (IPI) context |
| 1197 | */ |
| 1198 | static void __hrtick_start(void *arg) |
Peter Zijlstra | b328ca1 | 2008-04-29 10:02:46 +0200 | [diff] [blame] | 1199 | { |
Peter Zijlstra | 3165651 | 2008-07-18 18:01:23 +0200 | [diff] [blame] | 1200 | struct rq *rq = arg; |
Peter Zijlstra | b328ca1 | 2008-04-29 10:02:46 +0200 | [diff] [blame] | 1201 | |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 1202 | raw_spin_lock(&rq->lock); |
Peter Zijlstra | 3165651 | 2008-07-18 18:01:23 +0200 | [diff] [blame] | 1203 | hrtimer_restart(&rq->hrtick_timer); |
| 1204 | rq->hrtick_csd_pending = 0; |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 1205 | raw_spin_unlock(&rq->lock); |
Peter Zijlstra | b328ca1 | 2008-04-29 10:02:46 +0200 | [diff] [blame] | 1206 | } |
| 1207 | |
Peter Zijlstra | 3165651 | 2008-07-18 18:01:23 +0200 | [diff] [blame] | 1208 | /* |
| 1209 | * Called to set the hrtick timer state. |
| 1210 | * |
| 1211 | * called with rq->lock held and irqs disabled |
| 1212 | */ |
| 1213 | static void hrtick_start(struct rq *rq, u64 delay) |
Peter Zijlstra | b328ca1 | 2008-04-29 10:02:46 +0200 | [diff] [blame] | 1214 | { |
Peter Zijlstra | 3165651 | 2008-07-18 18:01:23 +0200 | [diff] [blame] | 1215 | struct hrtimer *timer = &rq->hrtick_timer; |
| 1216 | ktime_t time = ktime_add_ns(timer->base->get_time(), delay); |
Peter Zijlstra | b328ca1 | 2008-04-29 10:02:46 +0200 | [diff] [blame] | 1217 | |
Arjan van de Ven | cc584b2 | 2008-09-01 15:02:30 -0700 | [diff] [blame] | 1218 | hrtimer_set_expires(timer, time); |
Peter Zijlstra | 3165651 | 2008-07-18 18:01:23 +0200 | [diff] [blame] | 1219 | |
| 1220 | if (rq == this_rq()) { |
| 1221 | hrtimer_restart(timer); |
| 1222 | } else if (!rq->hrtick_csd_pending) { |
Peter Zijlstra | 6e27563 | 2009-02-25 13:59:48 +0100 | [diff] [blame] | 1223 | __smp_call_function_single(cpu_of(rq), &rq->hrtick_csd, 0); |
Peter Zijlstra | 3165651 | 2008-07-18 18:01:23 +0200 | [diff] [blame] | 1224 | rq->hrtick_csd_pending = 1; |
| 1225 | } |
Peter Zijlstra | b328ca1 | 2008-04-29 10:02:46 +0200 | [diff] [blame] | 1226 | } |
| 1227 | |
| 1228 | static int |
| 1229 | hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu) |
| 1230 | { |
| 1231 | int cpu = (int)(long)hcpu; |
| 1232 | |
| 1233 | switch (action) { |
| 1234 | case CPU_UP_CANCELED: |
| 1235 | case CPU_UP_CANCELED_FROZEN: |
| 1236 | case CPU_DOWN_PREPARE: |
| 1237 | case CPU_DOWN_PREPARE_FROZEN: |
| 1238 | case CPU_DEAD: |
| 1239 | case CPU_DEAD_FROZEN: |
Peter Zijlstra | 3165651 | 2008-07-18 18:01:23 +0200 | [diff] [blame] | 1240 | hrtick_clear(cpu_rq(cpu)); |
Peter Zijlstra | b328ca1 | 2008-04-29 10:02:46 +0200 | [diff] [blame] | 1241 | return NOTIFY_OK; |
| 1242 | } |
| 1243 | |
| 1244 | return NOTIFY_DONE; |
| 1245 | } |
| 1246 | |
Rakib Mullick | fa74820 | 2008-09-22 14:55:45 -0700 | [diff] [blame] | 1247 | static __init void init_hrtick(void) |
Peter Zijlstra | b328ca1 | 2008-04-29 10:02:46 +0200 | [diff] [blame] | 1248 | { |
| 1249 | hotcpu_notifier(hotplug_hrtick, 0); |
| 1250 | } |
Peter Zijlstra | 3165651 | 2008-07-18 18:01:23 +0200 | [diff] [blame] | 1251 | #else |
| 1252 | /* |
| 1253 | * Called to set the hrtick timer state. |
| 1254 | * |
| 1255 | * called with rq->lock held and irqs disabled |
| 1256 | */ |
| 1257 | static void hrtick_start(struct rq *rq, u64 delay) |
| 1258 | { |
Peter Zijlstra | 7f1e2ca | 2009-03-13 12:21:27 +0100 | [diff] [blame] | 1259 | __hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0, |
Arun R Bharadwaj | 5c33386 | 2009-04-16 12:14:37 +0530 | [diff] [blame] | 1260 | HRTIMER_MODE_REL_PINNED, 0); |
Peter Zijlstra | 3165651 | 2008-07-18 18:01:23 +0200 | [diff] [blame] | 1261 | } |
| 1262 | |
Andrew Morton | 006c75f | 2008-09-22 14:55:46 -0700 | [diff] [blame] | 1263 | static inline void init_hrtick(void) |
Peter Zijlstra | 3165651 | 2008-07-18 18:01:23 +0200 | [diff] [blame] | 1264 | { |
| 1265 | } |
Rabin Vincent | 95e904c | 2008-05-11 05:55:33 +0530 | [diff] [blame] | 1266 | #endif /* CONFIG_SMP */ |
Peter Zijlstra | b328ca1 | 2008-04-29 10:02:46 +0200 | [diff] [blame] | 1267 | |
| 1268 | static void init_rq_hrtick(struct rq *rq) |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 1269 | { |
Peter Zijlstra | 3165651 | 2008-07-18 18:01:23 +0200 | [diff] [blame] | 1270 | #ifdef CONFIG_SMP |
| 1271 | rq->hrtick_csd_pending = 0; |
| 1272 | |
| 1273 | rq->hrtick_csd.flags = 0; |
| 1274 | rq->hrtick_csd.func = __hrtick_start; |
| 1275 | rq->hrtick_csd.info = rq; |
| 1276 | #endif |
| 1277 | |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 1278 | hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
| 1279 | rq->hrtick_timer.function = hrtick; |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 1280 | } |
Andrew Morton | 006c75f | 2008-09-22 14:55:46 -0700 | [diff] [blame] | 1281 | #else /* CONFIG_SCHED_HRTICK */ |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 1282 | static inline void hrtick_clear(struct rq *rq) |
| 1283 | { |
| 1284 | } |
| 1285 | |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 1286 | static inline void init_rq_hrtick(struct rq *rq) |
| 1287 | { |
| 1288 | } |
| 1289 | |
Peter Zijlstra | b328ca1 | 2008-04-29 10:02:46 +0200 | [diff] [blame] | 1290 | static inline void init_hrtick(void) |
| 1291 | { |
| 1292 | } |
Andrew Morton | 006c75f | 2008-09-22 14:55:46 -0700 | [diff] [blame] | 1293 | #endif /* CONFIG_SCHED_HRTICK */ |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 1294 | |
Ingo Molnar | 1b9f19c | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1295 | /* |
Ingo Molnar | c24d20d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1296 | * resched_task - mark a task 'to be rescheduled now'. |
| 1297 | * |
| 1298 | * On UP this means the setting of the need_resched flag, on SMP it |
| 1299 | * might also involve a cross-CPU call to trigger the scheduler on |
| 1300 | * the target CPU. |
| 1301 | */ |
| 1302 | #ifdef CONFIG_SMP |
| 1303 | |
| 1304 | #ifndef tsk_is_polling |
| 1305 | #define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG) |
| 1306 | #endif |
| 1307 | |
Peter Zijlstra | 3165651 | 2008-07-18 18:01:23 +0200 | [diff] [blame] | 1308 | static void resched_task(struct task_struct *p) |
Ingo Molnar | c24d20d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1309 | { |
| 1310 | int cpu; |
| 1311 | |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 1312 | assert_raw_spin_locked(&task_rq(p)->lock); |
Ingo Molnar | c24d20d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1313 | |
Lai Jiangshan | 5ed0cec | 2009-03-06 19:40:20 +0800 | [diff] [blame] | 1314 | if (test_tsk_need_resched(p)) |
Ingo Molnar | c24d20d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1315 | return; |
| 1316 | |
Lai Jiangshan | 5ed0cec | 2009-03-06 19:40:20 +0800 | [diff] [blame] | 1317 | set_tsk_need_resched(p); |
Ingo Molnar | c24d20d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1318 | |
| 1319 | cpu = task_cpu(p); |
| 1320 | if (cpu == smp_processor_id()) |
| 1321 | return; |
| 1322 | |
| 1323 | /* NEED_RESCHED must be visible before we test polling */ |
| 1324 | smp_mb(); |
| 1325 | if (!tsk_is_polling(p)) |
| 1326 | smp_send_reschedule(cpu); |
| 1327 | } |
| 1328 | |
| 1329 | static void resched_cpu(int cpu) |
| 1330 | { |
| 1331 | struct rq *rq = cpu_rq(cpu); |
| 1332 | unsigned long flags; |
| 1333 | |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 1334 | if (!raw_spin_trylock_irqsave(&rq->lock, flags)) |
Ingo Molnar | c24d20d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1335 | return; |
| 1336 | resched_task(cpu_curr(cpu)); |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 1337 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
Ingo Molnar | c24d20d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1338 | } |
Thomas Gleixner | 06d8308 | 2008-03-22 09:20:24 +0100 | [diff] [blame] | 1339 | |
| 1340 | #ifdef CONFIG_NO_HZ |
| 1341 | /* |
Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 1342 | * In the semi idle case, use the nearest busy cpu for migrating timers |
| 1343 | * from an idle cpu. This is good for power-savings. |
| 1344 | * |
| 1345 | * We don't do similar optimization for completely idle system, as |
| 1346 | * selecting an idle cpu will add more delays to the timers than intended |
| 1347 | * (as that cpu's timer base may not be uptodate wrt jiffies etc). |
| 1348 | */ |
| 1349 | int get_nohz_timer_target(void) |
| 1350 | { |
| 1351 | int cpu = smp_processor_id(); |
| 1352 | int i; |
| 1353 | struct sched_domain *sd; |
| 1354 | |
Peter Zijlstra | 057f3fa | 2011-04-18 11:24:34 +0200 | [diff] [blame] | 1355 | rcu_read_lock(); |
Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 1356 | for_each_domain(cpu, sd) { |
Peter Zijlstra | 057f3fa | 2011-04-18 11:24:34 +0200 | [diff] [blame] | 1357 | for_each_cpu(i, sched_domain_span(sd)) { |
| 1358 | if (!idle_cpu(i)) { |
| 1359 | cpu = i; |
| 1360 | goto unlock; |
| 1361 | } |
| 1362 | } |
Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 1363 | } |
Peter Zijlstra | 057f3fa | 2011-04-18 11:24:34 +0200 | [diff] [blame] | 1364 | unlock: |
| 1365 | rcu_read_unlock(); |
Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 1366 | return cpu; |
| 1367 | } |
| 1368 | /* |
Thomas Gleixner | 06d8308 | 2008-03-22 09:20:24 +0100 | [diff] [blame] | 1369 | * When add_timer_on() enqueues a timer into the timer wheel of an |
| 1370 | * idle CPU then this timer might expire before the next timer event |
| 1371 | * which is scheduled to wake up that CPU. In case of a completely |
| 1372 | * idle system the next event might even be infinite time into the |
| 1373 | * future. wake_up_idle_cpu() ensures that the CPU is woken up and |
| 1374 | * leaves the inner idle loop so the newly added timer is taken into |
| 1375 | * account when the CPU goes back to idle and evaluates the timer |
| 1376 | * wheel for the next timer event. |
| 1377 | */ |
| 1378 | void wake_up_idle_cpu(int cpu) |
| 1379 | { |
| 1380 | struct rq *rq = cpu_rq(cpu); |
| 1381 | |
| 1382 | if (cpu == smp_processor_id()) |
| 1383 | return; |
| 1384 | |
| 1385 | /* |
| 1386 | * This is safe, as this function is called with the timer |
| 1387 | * wheel base lock of (cpu) held. When the CPU is on the way |
| 1388 | * to idle and has not yet set rq->curr to idle then it will |
| 1389 | * be serialized on the timer wheel base lock and take the new |
| 1390 | * timer into account automatically. |
| 1391 | */ |
| 1392 | if (rq->curr != rq->idle) |
| 1393 | return; |
| 1394 | |
| 1395 | /* |
| 1396 | * We can set TIF_RESCHED on the idle task of the other CPU |
| 1397 | * lockless. The worst case is that the other CPU runs the |
| 1398 | * idle task through an additional NOOP schedule() |
| 1399 | */ |
Lai Jiangshan | 5ed0cec | 2009-03-06 19:40:20 +0800 | [diff] [blame] | 1400 | set_tsk_need_resched(rq->idle); |
Thomas Gleixner | 06d8308 | 2008-03-22 09:20:24 +0100 | [diff] [blame] | 1401 | |
| 1402 | /* NEED_RESCHED must be visible before we test polling */ |
| 1403 | smp_mb(); |
| 1404 | if (!tsk_is_polling(rq->idle)) |
| 1405 | smp_send_reschedule(cpu); |
| 1406 | } |
Mike Galbraith | 39c0cbe | 2010-03-11 17:17:13 +0100 | [diff] [blame] | 1407 | |
Suresh Siddha | ca38062 | 2011-10-03 15:09:00 -0700 | [diff] [blame] | 1408 | static inline bool got_nohz_idle_kick(void) |
| 1409 | { |
| 1410 | return idle_cpu(smp_processor_id()) && this_rq()->nohz_balance_kick; |
| 1411 | } |
| 1412 | |
| 1413 | #else /* CONFIG_NO_HZ */ |
| 1414 | |
| 1415 | static inline bool got_nohz_idle_kick(void) |
| 1416 | { |
| 1417 | return false; |
| 1418 | } |
| 1419 | |
Dhaval Giani | 6d6bc0a | 2008-05-30 14:23:45 +0200 | [diff] [blame] | 1420 | #endif /* CONFIG_NO_HZ */ |
Thomas Gleixner | 06d8308 | 2008-03-22 09:20:24 +0100 | [diff] [blame] | 1421 | |
Peter Zijlstra | e9e9250 | 2009-09-01 10:34:37 +0200 | [diff] [blame] | 1422 | static u64 sched_avg_period(void) |
| 1423 | { |
| 1424 | return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2; |
| 1425 | } |
| 1426 | |
| 1427 | static void sched_avg_update(struct rq *rq) |
| 1428 | { |
| 1429 | s64 period = sched_avg_period(); |
| 1430 | |
| 1431 | while ((s64)(rq->clock - rq->age_stamp) > period) { |
Will Deacon | 0d98bb2 | 2010-05-24 12:11:43 -0700 | [diff] [blame] | 1432 | /* |
| 1433 | * Inline assembly required to prevent the compiler |
| 1434 | * optimising this loop into a divmod call. |
| 1435 | * See __iter_div_u64_rem() for another example of this. |
| 1436 | */ |
| 1437 | asm("" : "+rm" (rq->age_stamp)); |
Peter Zijlstra | e9e9250 | 2009-09-01 10:34:37 +0200 | [diff] [blame] | 1438 | rq->age_stamp += period; |
| 1439 | rq->rt_avg /= 2; |
| 1440 | } |
| 1441 | } |
| 1442 | |
| 1443 | static void sched_rt_avg_update(struct rq *rq, u64 rt_delta) |
| 1444 | { |
| 1445 | rq->rt_avg += rt_delta; |
| 1446 | sched_avg_update(rq); |
| 1447 | } |
| 1448 | |
Dhaval Giani | 6d6bc0a | 2008-05-30 14:23:45 +0200 | [diff] [blame] | 1449 | #else /* !CONFIG_SMP */ |
Peter Zijlstra | 3165651 | 2008-07-18 18:01:23 +0200 | [diff] [blame] | 1450 | static void resched_task(struct task_struct *p) |
Ingo Molnar | c24d20d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1451 | { |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 1452 | assert_raw_spin_locked(&task_rq(p)->lock); |
Peter Zijlstra | 3165651 | 2008-07-18 18:01:23 +0200 | [diff] [blame] | 1453 | set_tsk_need_resched(p); |
Ingo Molnar | c24d20d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1454 | } |
Peter Zijlstra | e9e9250 | 2009-09-01 10:34:37 +0200 | [diff] [blame] | 1455 | |
| 1456 | static void sched_rt_avg_update(struct rq *rq, u64 rt_delta) |
| 1457 | { |
| 1458 | } |
Suresh Siddha | da2b71e | 2010-08-23 13:42:51 -0700 | [diff] [blame] | 1459 | |
| 1460 | static void sched_avg_update(struct rq *rq) |
| 1461 | { |
| 1462 | } |
Dhaval Giani | 6d6bc0a | 2008-05-30 14:23:45 +0200 | [diff] [blame] | 1463 | #endif /* CONFIG_SMP */ |
Ingo Molnar | c24d20d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1464 | |
Ingo Molnar | 45bf76d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1465 | #if BITS_PER_LONG == 32 |
| 1466 | # define WMULT_CONST (~0UL) |
| 1467 | #else |
| 1468 | # define WMULT_CONST (1UL << 32) |
| 1469 | #endif |
| 1470 | |
| 1471 | #define WMULT_SHIFT 32 |
| 1472 | |
Ingo Molnar | 194081e | 2007-08-09 11:16:51 +0200 | [diff] [blame] | 1473 | /* |
| 1474 | * Shift right and round: |
| 1475 | */ |
Ingo Molnar | cf2ab46 | 2007-09-05 14:32:49 +0200 | [diff] [blame] | 1476 | #define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y)) |
Ingo Molnar | 194081e | 2007-08-09 11:16:51 +0200 | [diff] [blame] | 1477 | |
Peter Zijlstra | a7be37a | 2008-06-27 13:41:11 +0200 | [diff] [blame] | 1478 | /* |
| 1479 | * delta *= weight / lw |
| 1480 | */ |
Ingo Molnar | cb1c4fc | 2007-08-02 17:41:40 +0200 | [diff] [blame] | 1481 | static unsigned long |
Ingo Molnar | 45bf76d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1482 | calc_delta_mine(unsigned long delta_exec, unsigned long weight, |
| 1483 | struct load_weight *lw) |
| 1484 | { |
| 1485 | u64 tmp; |
| 1486 | |
Nikhil Rao | c8b2811 | 2011-05-18 14:37:48 -0700 | [diff] [blame] | 1487 | /* |
| 1488 | * weight can be less than 2^SCHED_LOAD_RESOLUTION for task group sched |
| 1489 | * entities since MIN_SHARES = 2. Treat weight as 1 if less than |
| 1490 | * 2^SCHED_LOAD_RESOLUTION. |
| 1491 | */ |
| 1492 | if (likely(weight > (1UL << SCHED_LOAD_RESOLUTION))) |
| 1493 | tmp = (u64)delta_exec * scale_load_down(weight); |
| 1494 | else |
| 1495 | tmp = (u64)delta_exec; |
Stephan Baerwolf | db670da | 2011-05-11 18:03:29 +0200 | [diff] [blame] | 1496 | |
Lai Jiangshan | 7a232e0 | 2008-06-12 16:43:07 +0800 | [diff] [blame] | 1497 | if (!lw->inv_weight) { |
Nikhil Rao | c8b2811 | 2011-05-18 14:37:48 -0700 | [diff] [blame] | 1498 | unsigned long w = scale_load_down(lw->weight); |
| 1499 | |
| 1500 | if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST)) |
Lai Jiangshan | 7a232e0 | 2008-06-12 16:43:07 +0800 | [diff] [blame] | 1501 | lw->inv_weight = 1; |
Nikhil Rao | c8b2811 | 2011-05-18 14:37:48 -0700 | [diff] [blame] | 1502 | else if (unlikely(!w)) |
| 1503 | lw->inv_weight = WMULT_CONST; |
Lai Jiangshan | 7a232e0 | 2008-06-12 16:43:07 +0800 | [diff] [blame] | 1504 | else |
Nikhil Rao | c8b2811 | 2011-05-18 14:37:48 -0700 | [diff] [blame] | 1505 | lw->inv_weight = WMULT_CONST / w; |
Lai Jiangshan | 7a232e0 | 2008-06-12 16:43:07 +0800 | [diff] [blame] | 1506 | } |
Ingo Molnar | 45bf76d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1507 | |
Ingo Molnar | 45bf76d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1508 | /* |
| 1509 | * Check whether we'd overflow the 64-bit multiplication: |
| 1510 | */ |
Ingo Molnar | 194081e | 2007-08-09 11:16:51 +0200 | [diff] [blame] | 1511 | if (unlikely(tmp > WMULT_CONST)) |
Ingo Molnar | cf2ab46 | 2007-09-05 14:32:49 +0200 | [diff] [blame] | 1512 | tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight, |
Ingo Molnar | 194081e | 2007-08-09 11:16:51 +0200 | [diff] [blame] | 1513 | WMULT_SHIFT/2); |
| 1514 | else |
Ingo Molnar | cf2ab46 | 2007-09-05 14:32:49 +0200 | [diff] [blame] | 1515 | tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT); |
Ingo Molnar | 45bf76d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1516 | |
Ingo Molnar | ecf691d | 2007-08-02 17:41:40 +0200 | [diff] [blame] | 1517 | return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX); |
Ingo Molnar | 45bf76d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1518 | } |
| 1519 | |
Ingo Molnar | 1091985 | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 1520 | static inline void update_load_add(struct load_weight *lw, unsigned long inc) |
Ingo Molnar | 45bf76d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1521 | { |
| 1522 | lw->weight += inc; |
Ingo Molnar | e89996a | 2008-03-14 23:48:28 +0100 | [diff] [blame] | 1523 | lw->inv_weight = 0; |
Ingo Molnar | 45bf76d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1524 | } |
| 1525 | |
Ingo Molnar | 1091985 | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 1526 | static inline void update_load_sub(struct load_weight *lw, unsigned long dec) |
Ingo Molnar | 45bf76d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1527 | { |
| 1528 | lw->weight -= dec; |
Ingo Molnar | e89996a | 2008-03-14 23:48:28 +0100 | [diff] [blame] | 1529 | lw->inv_weight = 0; |
Ingo Molnar | 45bf76d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1530 | } |
| 1531 | |
Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 1532 | static inline void update_load_set(struct load_weight *lw, unsigned long w) |
| 1533 | { |
| 1534 | lw->weight = w; |
| 1535 | lw->inv_weight = 0; |
| 1536 | } |
| 1537 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1538 | /* |
Peter Williams | 2dd73a4 | 2006-06-27 02:54:34 -0700 | [diff] [blame] | 1539 | * To aid in avoiding the subversion of "niceness" due to uneven distribution |
| 1540 | * of tasks with abnormal "nice" values across CPUs the contribution that |
| 1541 | * each task makes to its run queue's load is weighted according to its |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 1542 | * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a |
Peter Williams | 2dd73a4 | 2006-06-27 02:54:34 -0700 | [diff] [blame] | 1543 | * scaled version of the new time slice allocation that they receive on time |
| 1544 | * slice expiry etc. |
| 1545 | */ |
| 1546 | |
Peter Zijlstra | cce7ade | 2009-01-15 14:53:37 +0100 | [diff] [blame] | 1547 | #define WEIGHT_IDLEPRIO 3 |
| 1548 | #define WMULT_IDLEPRIO 1431655765 |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1549 | |
| 1550 | /* |
| 1551 | * Nice levels are multiplicative, with a gentle 10% change for every |
| 1552 | * nice level changed. I.e. when a CPU-bound task goes from nice 0 to |
| 1553 | * nice 1, it will get ~10% less CPU time than another CPU-bound task |
| 1554 | * that remained on nice 0. |
| 1555 | * |
| 1556 | * The "10% effect" is relative and cumulative: from _any_ nice level, |
| 1557 | * if you go up 1 level, it's -10% CPU usage, if you go down 1 level |
Ingo Molnar | f9153ee | 2007-07-16 09:46:30 +0200 | [diff] [blame] | 1558 | * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25. |
| 1559 | * If a task goes up by ~10% and another task goes down by ~10% then |
| 1560 | * the relative distance between them is ~25%.) |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1561 | */ |
| 1562 | static const int prio_to_weight[40] = { |
Ingo Molnar | 254753d | 2007-08-09 11:16:51 +0200 | [diff] [blame] | 1563 | /* -20 */ 88761, 71755, 56483, 46273, 36291, |
| 1564 | /* -15 */ 29154, 23254, 18705, 14949, 11916, |
| 1565 | /* -10 */ 9548, 7620, 6100, 4904, 3906, |
| 1566 | /* -5 */ 3121, 2501, 1991, 1586, 1277, |
| 1567 | /* 0 */ 1024, 820, 655, 526, 423, |
| 1568 | /* 5 */ 335, 272, 215, 172, 137, |
| 1569 | /* 10 */ 110, 87, 70, 56, 45, |
| 1570 | /* 15 */ 36, 29, 23, 18, 15, |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1571 | }; |
| 1572 | |
Ingo Molnar | 5714d2d | 2007-07-16 09:46:31 +0200 | [diff] [blame] | 1573 | /* |
| 1574 | * Inverse (2^32/x) values of the prio_to_weight[] array, precalculated. |
| 1575 | * |
| 1576 | * In cases where the weight does not change often, we can use the |
| 1577 | * precalculated inverse to speed up arithmetics by turning divisions |
| 1578 | * into multiplications: |
| 1579 | */ |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1580 | static const u32 prio_to_wmult[40] = { |
Ingo Molnar | 254753d | 2007-08-09 11:16:51 +0200 | [diff] [blame] | 1581 | /* -20 */ 48388, 59856, 76040, 92818, 118348, |
| 1582 | /* -15 */ 147320, 184698, 229616, 287308, 360437, |
| 1583 | /* -10 */ 449829, 563644, 704093, 875809, 1099582, |
| 1584 | /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326, |
| 1585 | /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587, |
| 1586 | /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126, |
| 1587 | /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717, |
| 1588 | /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153, |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1589 | }; |
Peter Williams | 2dd73a4 | 2006-06-27 02:54:34 -0700 | [diff] [blame] | 1590 | |
Bharata B Rao | ef12fef | 2009-03-31 10:02:22 +0530 | [diff] [blame] | 1591 | /* Time spent by the tasks of the cpu accounting group executing in ... */ |
| 1592 | enum cpuacct_stat_index { |
| 1593 | CPUACCT_STAT_USER, /* ... user mode */ |
| 1594 | CPUACCT_STAT_SYSTEM, /* ... kernel mode */ |
| 1595 | |
| 1596 | CPUACCT_STAT_NSTATS, |
| 1597 | }; |
| 1598 | |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 1599 | #ifdef CONFIG_CGROUP_CPUACCT |
| 1600 | static void cpuacct_charge(struct task_struct *tsk, u64 cputime); |
Bharata B Rao | ef12fef | 2009-03-31 10:02:22 +0530 | [diff] [blame] | 1601 | static void cpuacct_update_stats(struct task_struct *tsk, |
| 1602 | enum cpuacct_stat_index idx, cputime_t val); |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 1603 | #else |
| 1604 | static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {} |
Bharata B Rao | ef12fef | 2009-03-31 10:02:22 +0530 | [diff] [blame] | 1605 | static inline void cpuacct_update_stats(struct task_struct *tsk, |
| 1606 | enum cpuacct_stat_index idx, cputime_t val) {} |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 1607 | #endif |
| 1608 | |
Peter Zijlstra | 18d95a2 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 1609 | static inline void inc_cpu_load(struct rq *rq, unsigned long load) |
| 1610 | { |
| 1611 | update_load_add(&rq->load, load); |
| 1612 | } |
| 1613 | |
| 1614 | static inline void dec_cpu_load(struct rq *rq, unsigned long load) |
| 1615 | { |
| 1616 | update_load_sub(&rq->load, load); |
| 1617 | } |
| 1618 | |
Paul Turner | a790de9 | 2011-07-21 09:43:29 -0700 | [diff] [blame] | 1619 | #if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \ |
| 1620 | (defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH))) |
Peter Zijlstra | eb75580 | 2008-08-19 12:33:05 +0200 | [diff] [blame] | 1621 | typedef int (*tg_visitor)(struct task_group *, void *); |
| 1622 | |
| 1623 | /* |
Paul Turner | 8277434 | 2011-07-21 09:43:35 -0700 | [diff] [blame] | 1624 | * Iterate task_group tree rooted at *from, calling @down when first entering a |
| 1625 | * node and @up when leaving it for the final time. |
| 1626 | * |
| 1627 | * Caller must hold rcu_lock or sufficient equivalent. |
Peter Zijlstra | eb75580 | 2008-08-19 12:33:05 +0200 | [diff] [blame] | 1628 | */ |
Paul Turner | 8277434 | 2011-07-21 09:43:35 -0700 | [diff] [blame] | 1629 | static int walk_tg_tree_from(struct task_group *from, |
| 1630 | tg_visitor down, tg_visitor up, void *data) |
Peter Zijlstra | eb75580 | 2008-08-19 12:33:05 +0200 | [diff] [blame] | 1631 | { |
| 1632 | struct task_group *parent, *child; |
| 1633 | int ret; |
| 1634 | |
Paul Turner | 8277434 | 2011-07-21 09:43:35 -0700 | [diff] [blame] | 1635 | parent = from; |
| 1636 | |
Peter Zijlstra | eb75580 | 2008-08-19 12:33:05 +0200 | [diff] [blame] | 1637 | down: |
| 1638 | ret = (*down)(parent, data); |
| 1639 | if (ret) |
Paul Turner | 8277434 | 2011-07-21 09:43:35 -0700 | [diff] [blame] | 1640 | goto out; |
Peter Zijlstra | eb75580 | 2008-08-19 12:33:05 +0200 | [diff] [blame] | 1641 | list_for_each_entry_rcu(child, &parent->children, siblings) { |
| 1642 | parent = child; |
| 1643 | goto down; |
| 1644 | |
| 1645 | up: |
| 1646 | continue; |
| 1647 | } |
| 1648 | ret = (*up)(parent, data); |
Paul Turner | 8277434 | 2011-07-21 09:43:35 -0700 | [diff] [blame] | 1649 | if (ret || parent == from) |
| 1650 | goto out; |
Peter Zijlstra | eb75580 | 2008-08-19 12:33:05 +0200 | [diff] [blame] | 1651 | |
| 1652 | child = parent; |
| 1653 | parent = parent->parent; |
| 1654 | if (parent) |
| 1655 | goto up; |
Paul Turner | 8277434 | 2011-07-21 09:43:35 -0700 | [diff] [blame] | 1656 | out: |
Peter Zijlstra | eb75580 | 2008-08-19 12:33:05 +0200 | [diff] [blame] | 1657 | return ret; |
| 1658 | } |
| 1659 | |
Paul Turner | 8277434 | 2011-07-21 09:43:35 -0700 | [diff] [blame] | 1660 | /* |
| 1661 | * Iterate the full tree, calling @down when first entering a node and @up when |
| 1662 | * leaving it for the final time. |
| 1663 | * |
| 1664 | * Caller must hold rcu_lock or sufficient equivalent. |
| 1665 | */ |
| 1666 | |
| 1667 | static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data) |
| 1668 | { |
| 1669 | return walk_tg_tree_from(&root_task_group, down, up, data); |
| 1670 | } |
| 1671 | |
Peter Zijlstra | eb75580 | 2008-08-19 12:33:05 +0200 | [diff] [blame] | 1672 | static int tg_nop(struct task_group *tg, void *data) |
| 1673 | { |
| 1674 | return 0; |
| 1675 | } |
| 1676 | #endif |
| 1677 | |
Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 1678 | #ifdef CONFIG_SMP |
Peter Zijlstra | f5f08f3 | 2009-09-10 13:35:28 +0200 | [diff] [blame] | 1679 | /* Used instead of source_load when we know the type == 0 */ |
| 1680 | static unsigned long weighted_cpuload(const int cpu) |
| 1681 | { |
| 1682 | return cpu_rq(cpu)->load.weight; |
| 1683 | } |
| 1684 | |
| 1685 | /* |
| 1686 | * Return a low guess at the load of a migration-source cpu weighted |
| 1687 | * according to the scheduling class and "nice" value. |
| 1688 | * |
| 1689 | * We want to under-estimate the load of migration sources, to |
| 1690 | * balance conservatively. |
| 1691 | */ |
| 1692 | static unsigned long source_load(int cpu, int type) |
| 1693 | { |
| 1694 | struct rq *rq = cpu_rq(cpu); |
| 1695 | unsigned long total = weighted_cpuload(cpu); |
| 1696 | |
| 1697 | if (type == 0 || !sched_feat(LB_BIAS)) |
| 1698 | return total; |
| 1699 | |
| 1700 | return min(rq->cpu_load[type-1], total); |
| 1701 | } |
| 1702 | |
| 1703 | /* |
| 1704 | * Return a high guess at the load of a migration-target cpu weighted |
| 1705 | * according to the scheduling class and "nice" value. |
| 1706 | */ |
| 1707 | static unsigned long target_load(int cpu, int type) |
| 1708 | { |
| 1709 | struct rq *rq = cpu_rq(cpu); |
| 1710 | unsigned long total = weighted_cpuload(cpu); |
| 1711 | |
| 1712 | if (type == 0 || !sched_feat(LB_BIAS)) |
| 1713 | return total; |
| 1714 | |
| 1715 | return max(rq->cpu_load[type-1], total); |
| 1716 | } |
| 1717 | |
Peter Zijlstra | ae154be | 2009-09-10 14:40:57 +0200 | [diff] [blame] | 1718 | static unsigned long power_of(int cpu) |
| 1719 | { |
Peter Zijlstra | e51fd5e | 2010-05-31 12:37:30 +0200 | [diff] [blame] | 1720 | return cpu_rq(cpu)->cpu_power; |
Peter Zijlstra | ae154be | 2009-09-10 14:40:57 +0200 | [diff] [blame] | 1721 | } |
| 1722 | |
Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 1723 | static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd); |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1724 | |
Peter Zijlstra | a8a51d5 | 2008-06-27 13:41:26 +0200 | [diff] [blame] | 1725 | static unsigned long cpu_avg_load_per_task(int cpu) |
| 1726 | { |
| 1727 | struct rq *rq = cpu_rq(cpu); |
Ingo Molnar | af6d596 | 2008-11-29 20:45:15 +0100 | [diff] [blame] | 1728 | unsigned long nr_running = ACCESS_ONCE(rq->nr_running); |
Peter Zijlstra | a8a51d5 | 2008-06-27 13:41:26 +0200 | [diff] [blame] | 1729 | |
Steven Rostedt | 4cd4262 | 2008-11-26 21:04:24 -0500 | [diff] [blame] | 1730 | if (nr_running) |
Jan H. Schönherr | e2b245f | 2011-08-01 11:03:28 +0200 | [diff] [blame] | 1731 | return rq->load.weight / nr_running; |
Peter Zijlstra | a8a51d5 | 2008-06-27 13:41:26 +0200 | [diff] [blame] | 1732 | |
Jan H. Schönherr | e2b245f | 2011-08-01 11:03:28 +0200 | [diff] [blame] | 1733 | return 0; |
Peter Zijlstra | a8a51d5 | 2008-06-27 13:41:26 +0200 | [diff] [blame] | 1734 | } |
| 1735 | |
Gregory Haskins | 8f45e2b | 2008-12-29 09:39:51 -0500 | [diff] [blame] | 1736 | #ifdef CONFIG_PREEMPT |
| 1737 | |
Peter Zijlstra | b78bb86 | 2009-09-15 14:23:18 +0200 | [diff] [blame] | 1738 | static void double_rq_lock(struct rq *rq1, struct rq *rq2); |
| 1739 | |
Alexey Dobriyan | 70574a9 | 2008-11-28 22:08:00 +0300 | [diff] [blame] | 1740 | /* |
Gregory Haskins | 8f45e2b | 2008-12-29 09:39:51 -0500 | [diff] [blame] | 1741 | * fair double_lock_balance: Safely acquires both rq->locks in a fair |
| 1742 | * way at the expense of forcing extra atomic operations in all |
| 1743 | * invocations. This assures that the double_lock is acquired using the |
| 1744 | * same underlying policy as the spinlock_t on this architecture, which |
| 1745 | * reduces latency compared to the unfair variant below. However, it |
| 1746 | * also adds more overhead and therefore may reduce throughput. |
Alexey Dobriyan | 70574a9 | 2008-11-28 22:08:00 +0300 | [diff] [blame] | 1747 | */ |
Gregory Haskins | 8f45e2b | 2008-12-29 09:39:51 -0500 | [diff] [blame] | 1748 | static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) |
| 1749 | __releases(this_rq->lock) |
| 1750 | __acquires(busiest->lock) |
| 1751 | __acquires(this_rq->lock) |
| 1752 | { |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 1753 | raw_spin_unlock(&this_rq->lock); |
Gregory Haskins | 8f45e2b | 2008-12-29 09:39:51 -0500 | [diff] [blame] | 1754 | double_rq_lock(this_rq, busiest); |
| 1755 | |
| 1756 | return 1; |
| 1757 | } |
| 1758 | |
| 1759 | #else |
| 1760 | /* |
| 1761 | * Unfair double_lock_balance: Optimizes throughput at the expense of |
| 1762 | * latency by eliminating extra atomic operations when the locks are |
| 1763 | * already in proper order on entry. This favors lower cpu-ids and will |
| 1764 | * grant the double lock to lower cpus over higher ids under contention, |
| 1765 | * regardless of entry order into the function. |
| 1766 | */ |
| 1767 | static int _double_lock_balance(struct rq *this_rq, struct rq *busiest) |
Alexey Dobriyan | 70574a9 | 2008-11-28 22:08:00 +0300 | [diff] [blame] | 1768 | __releases(this_rq->lock) |
| 1769 | __acquires(busiest->lock) |
| 1770 | __acquires(this_rq->lock) |
| 1771 | { |
| 1772 | int ret = 0; |
| 1773 | |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 1774 | if (unlikely(!raw_spin_trylock(&busiest->lock))) { |
Alexey Dobriyan | 70574a9 | 2008-11-28 22:08:00 +0300 | [diff] [blame] | 1775 | if (busiest < this_rq) { |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 1776 | raw_spin_unlock(&this_rq->lock); |
| 1777 | raw_spin_lock(&busiest->lock); |
| 1778 | raw_spin_lock_nested(&this_rq->lock, |
| 1779 | SINGLE_DEPTH_NESTING); |
Alexey Dobriyan | 70574a9 | 2008-11-28 22:08:00 +0300 | [diff] [blame] | 1780 | ret = 1; |
| 1781 | } else |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 1782 | raw_spin_lock_nested(&busiest->lock, |
| 1783 | SINGLE_DEPTH_NESTING); |
Alexey Dobriyan | 70574a9 | 2008-11-28 22:08:00 +0300 | [diff] [blame] | 1784 | } |
| 1785 | return ret; |
| 1786 | } |
| 1787 | |
Gregory Haskins | 8f45e2b | 2008-12-29 09:39:51 -0500 | [diff] [blame] | 1788 | #endif /* CONFIG_PREEMPT */ |
| 1789 | |
| 1790 | /* |
| 1791 | * double_lock_balance - lock the busiest runqueue, this_rq is locked already. |
| 1792 | */ |
| 1793 | static int double_lock_balance(struct rq *this_rq, struct rq *busiest) |
| 1794 | { |
| 1795 | if (unlikely(!irqs_disabled())) { |
| 1796 | /* printk() doesn't work good under rq->lock */ |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 1797 | raw_spin_unlock(&this_rq->lock); |
Gregory Haskins | 8f45e2b | 2008-12-29 09:39:51 -0500 | [diff] [blame] | 1798 | BUG_ON(1); |
| 1799 | } |
| 1800 | |
| 1801 | return _double_lock_balance(this_rq, busiest); |
| 1802 | } |
| 1803 | |
Alexey Dobriyan | 70574a9 | 2008-11-28 22:08:00 +0300 | [diff] [blame] | 1804 | static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) |
| 1805 | __releases(busiest->lock) |
| 1806 | { |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 1807 | raw_spin_unlock(&busiest->lock); |
Alexey Dobriyan | 70574a9 | 2008-11-28 22:08:00 +0300 | [diff] [blame] | 1808 | lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); |
| 1809 | } |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 1810 | |
| 1811 | /* |
| 1812 | * double_rq_lock - safely lock two runqueues |
| 1813 | * |
| 1814 | * Note this does not disable interrupts like task_rq_lock, |
| 1815 | * you need to do so manually before calling. |
| 1816 | */ |
| 1817 | static void double_rq_lock(struct rq *rq1, struct rq *rq2) |
| 1818 | __acquires(rq1->lock) |
| 1819 | __acquires(rq2->lock) |
| 1820 | { |
| 1821 | BUG_ON(!irqs_disabled()); |
| 1822 | if (rq1 == rq2) { |
| 1823 | raw_spin_lock(&rq1->lock); |
| 1824 | __acquire(rq2->lock); /* Fake it out ;) */ |
| 1825 | } else { |
| 1826 | if (rq1 < rq2) { |
| 1827 | raw_spin_lock(&rq1->lock); |
| 1828 | raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING); |
| 1829 | } else { |
| 1830 | raw_spin_lock(&rq2->lock); |
| 1831 | raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING); |
| 1832 | } |
| 1833 | } |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 1834 | } |
| 1835 | |
| 1836 | /* |
| 1837 | * double_rq_unlock - safely unlock two runqueues |
| 1838 | * |
| 1839 | * Note this does not restore interrupts like task_rq_unlock, |
| 1840 | * you need to do so manually after calling. |
| 1841 | */ |
| 1842 | static void double_rq_unlock(struct rq *rq1, struct rq *rq2) |
| 1843 | __releases(rq1->lock) |
| 1844 | __releases(rq2->lock) |
| 1845 | { |
| 1846 | raw_spin_unlock(&rq1->lock); |
| 1847 | if (rq1 != rq2) |
| 1848 | raw_spin_unlock(&rq2->lock); |
| 1849 | else |
| 1850 | __release(rq2->lock); |
| 1851 | } |
| 1852 | |
Mike Galbraith | d95f412 | 2011-02-01 09:50:51 -0500 | [diff] [blame] | 1853 | #else /* CONFIG_SMP */ |
| 1854 | |
| 1855 | /* |
| 1856 | * double_rq_lock - safely lock two runqueues |
| 1857 | * |
| 1858 | * Note this does not disable interrupts like task_rq_lock, |
| 1859 | * you need to do so manually before calling. |
| 1860 | */ |
| 1861 | static void double_rq_lock(struct rq *rq1, struct rq *rq2) |
| 1862 | __acquires(rq1->lock) |
| 1863 | __acquires(rq2->lock) |
| 1864 | { |
| 1865 | BUG_ON(!irqs_disabled()); |
| 1866 | BUG_ON(rq1 != rq2); |
| 1867 | raw_spin_lock(&rq1->lock); |
| 1868 | __acquire(rq2->lock); /* Fake it out ;) */ |
| 1869 | } |
| 1870 | |
| 1871 | /* |
| 1872 | * double_rq_unlock - safely unlock two runqueues |
| 1873 | * |
| 1874 | * Note this does not restore interrupts like task_rq_unlock, |
| 1875 | * you need to do so manually after calling. |
| 1876 | */ |
| 1877 | static void double_rq_unlock(struct rq *rq1, struct rq *rq2) |
| 1878 | __releases(rq1->lock) |
| 1879 | __releases(rq2->lock) |
| 1880 | { |
| 1881 | BUG_ON(rq1 != rq2); |
| 1882 | raw_spin_unlock(&rq1->lock); |
| 1883 | __release(rq2->lock); |
| 1884 | } |
| 1885 | |
Peter Zijlstra | 18d95a2 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 1886 | #endif |
Peter Zijlstra | 18d95a2 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 1887 | |
Peter Zijlstra | 74f5187 | 2010-04-22 21:50:19 +0200 | [diff] [blame] | 1888 | static void calc_load_account_idle(struct rq *this_rq); |
Christian Ehrhardt | 0bcdcf2 | 2009-11-30 12:16:46 +0100 | [diff] [blame] | 1889 | static void update_sysctl(void); |
Christian Ehrhardt | acb4a84 | 2009-11-30 12:16:48 +0100 | [diff] [blame] | 1890 | static int get_update_sysctl_factor(void); |
Venkatesh Pallipadi | fdf3e95 | 2010-05-17 18:14:43 -0700 | [diff] [blame] | 1891 | static void update_cpu_load(struct rq *this_rq); |
Thomas Gleixner | dce48a8 | 2009-04-11 10:43:41 +0200 | [diff] [blame] | 1892 | |
Peter Zijlstra | cd29fe6 | 2009-11-27 17:32:46 +0100 | [diff] [blame] | 1893 | static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) |
| 1894 | { |
| 1895 | set_task_rq(p, cpu); |
| 1896 | #ifdef CONFIG_SMP |
| 1897 | /* |
| 1898 | * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be |
Joe Perches | bfb9035 | 2011-08-17 06:58:04 -0700 | [diff] [blame] | 1899 | * successfully executed on another CPU. We must ensure that updates of |
Peter Zijlstra | cd29fe6 | 2009-11-27 17:32:46 +0100 | [diff] [blame] | 1900 | * per-task data have been completed by this moment. |
| 1901 | */ |
| 1902 | smp_wmb(); |
| 1903 | task_thread_info(p)->cpu = cpu; |
| 1904 | #endif |
| 1905 | } |
Peter Zijlstra | 18d95a2 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 1906 | |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 1907 | static const struct sched_class rt_sched_class; |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1908 | |
Peter Zijlstra | 34f971f | 2010-09-22 13:53:15 +0200 | [diff] [blame] | 1909 | #define sched_class_highest (&stop_sched_class) |
Gregory Haskins | 1f11eb6 | 2008-06-04 15:04:05 -0400 | [diff] [blame] | 1910 | #define for_each_class(class) \ |
| 1911 | for (class = sched_class_highest; class; class = class->next) |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1912 | |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 1913 | #include "sched_stats.h" |
| 1914 | |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1915 | static void inc_nr_running(struct rq *rq) |
Ingo Molnar | 6363ca5 | 2008-05-29 11:28:57 +0200 | [diff] [blame] | 1916 | { |
| 1917 | rq->nr_running++; |
Ingo Molnar | 6363ca5 | 2008-05-29 11:28:57 +0200 | [diff] [blame] | 1918 | } |
| 1919 | |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1920 | static void dec_nr_running(struct rq *rq) |
Ingo Molnar | 9c21724 | 2007-08-02 17:41:40 +0200 | [diff] [blame] | 1921 | { |
| 1922 | rq->nr_running--; |
Ingo Molnar | 9c21724 | 2007-08-02 17:41:40 +0200 | [diff] [blame] | 1923 | } |
| 1924 | |
Ingo Molnar | 45bf76d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1925 | static void set_load_weight(struct task_struct *p) |
| 1926 | { |
Nikhil Rao | f05998d | 2011-05-18 10:09:38 -0700 | [diff] [blame] | 1927 | int prio = p->static_prio - MAX_RT_PRIO; |
| 1928 | struct load_weight *load = &p->se.load; |
| 1929 | |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1930 | /* |
| 1931 | * SCHED_IDLE tasks get minimal weight: |
| 1932 | */ |
| 1933 | if (p->policy == SCHED_IDLE) { |
Nikhil Rao | c8b2811 | 2011-05-18 14:37:48 -0700 | [diff] [blame] | 1934 | load->weight = scale_load(WEIGHT_IDLEPRIO); |
Nikhil Rao | f05998d | 2011-05-18 10:09:38 -0700 | [diff] [blame] | 1935 | load->inv_weight = WMULT_IDLEPRIO; |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1936 | return; |
| 1937 | } |
| 1938 | |
Nikhil Rao | c8b2811 | 2011-05-18 14:37:48 -0700 | [diff] [blame] | 1939 | load->weight = scale_load(prio_to_weight[prio]); |
Nikhil Rao | f05998d | 2011-05-18 10:09:38 -0700 | [diff] [blame] | 1940 | load->inv_weight = prio_to_wmult[prio]; |
Ingo Molnar | 45bf76d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1941 | } |
| 1942 | |
Peter Zijlstra | 371fd7e | 2010-03-24 16:38:48 +0100 | [diff] [blame] | 1943 | static void enqueue_task(struct rq *rq, struct task_struct *p, int flags) |
Gregory Haskins | 2087a1a | 2008-06-27 14:30:00 -0600 | [diff] [blame] | 1944 | { |
Mike Galbraith | a64692a | 2010-03-11 17:16:20 +0100 | [diff] [blame] | 1945 | update_rq_clock(rq); |
Ingo Molnar | 71f8bd4 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1946 | sched_info_queued(p); |
Peter Zijlstra | 371fd7e | 2010-03-24 16:38:48 +0100 | [diff] [blame] | 1947 | p->sched_class->enqueue_task(rq, p, flags); |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1948 | } |
| 1949 | |
Peter Zijlstra | 371fd7e | 2010-03-24 16:38:48 +0100 | [diff] [blame] | 1950 | static void dequeue_task(struct rq *rq, struct task_struct *p, int flags) |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1951 | { |
Mike Galbraith | a64692a | 2010-03-11 17:16:20 +0100 | [diff] [blame] | 1952 | update_rq_clock(rq); |
Ankita Garg | 46ac22b | 2008-07-01 14:30:06 +0530 | [diff] [blame] | 1953 | sched_info_dequeued(p); |
Peter Zijlstra | 371fd7e | 2010-03-24 16:38:48 +0100 | [diff] [blame] | 1954 | p->sched_class->dequeue_task(rq, p, flags); |
Ingo Molnar | 71f8bd4 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1955 | } |
| 1956 | |
| 1957 | /* |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 1958 | * activate_task - move a task to the runqueue. |
| 1959 | */ |
Peter Zijlstra | 371fd7e | 2010-03-24 16:38:48 +0100 | [diff] [blame] | 1960 | static void activate_task(struct rq *rq, struct task_struct *p, int flags) |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 1961 | { |
| 1962 | if (task_contributes_to_load(p)) |
| 1963 | rq->nr_uninterruptible--; |
| 1964 | |
Peter Zijlstra | 371fd7e | 2010-03-24 16:38:48 +0100 | [diff] [blame] | 1965 | enqueue_task(rq, p, flags); |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 1966 | } |
| 1967 | |
| 1968 | /* |
| 1969 | * deactivate_task - remove a task from the runqueue. |
| 1970 | */ |
Peter Zijlstra | 371fd7e | 2010-03-24 16:38:48 +0100 | [diff] [blame] | 1971 | static void deactivate_task(struct rq *rq, struct task_struct *p, int flags) |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 1972 | { |
| 1973 | if (task_contributes_to_load(p)) |
| 1974 | rq->nr_uninterruptible++; |
| 1975 | |
Peter Zijlstra | 371fd7e | 2010-03-24 16:38:48 +0100 | [diff] [blame] | 1976 | dequeue_task(rq, p, flags); |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 1977 | } |
| 1978 | |
Venkatesh Pallipadi | b52bfee | 2010-10-04 17:03:19 -0700 | [diff] [blame] | 1979 | #ifdef CONFIG_IRQ_TIME_ACCOUNTING |
| 1980 | |
Venkatesh Pallipadi | 305e683 | 2010-10-04 17:03:21 -0700 | [diff] [blame] | 1981 | /* |
| 1982 | * There are no locks covering percpu hardirq/softirq time. |
| 1983 | * They are only modified in account_system_vtime, on corresponding CPU |
| 1984 | * with interrupts disabled. So, writes are safe. |
| 1985 | * They are read and saved off onto struct rq in update_rq_clock(). |
| 1986 | * This may result in other CPU reading this CPU's irq time and can |
| 1987 | * race with irq/account_system_vtime on this CPU. We would either get old |
Peter Zijlstra | 8e92c20 | 2010-12-09 14:15:34 +0100 | [diff] [blame] | 1988 | * or new value with a side effect of accounting a slice of irq time to wrong |
| 1989 | * task when irq is in progress while we read rq->clock. That is a worthy |
| 1990 | * compromise in place of having locks on each irq in account_system_time. |
Venkatesh Pallipadi | 305e683 | 2010-10-04 17:03:21 -0700 | [diff] [blame] | 1991 | */ |
Venkatesh Pallipadi | b52bfee | 2010-10-04 17:03:19 -0700 | [diff] [blame] | 1992 | static DEFINE_PER_CPU(u64, cpu_hardirq_time); |
| 1993 | static DEFINE_PER_CPU(u64, cpu_softirq_time); |
| 1994 | |
| 1995 | static DEFINE_PER_CPU(u64, irq_start_time); |
| 1996 | static int sched_clock_irqtime; |
| 1997 | |
| 1998 | void enable_sched_clock_irqtime(void) |
| 1999 | { |
| 2000 | sched_clock_irqtime = 1; |
| 2001 | } |
| 2002 | |
| 2003 | void disable_sched_clock_irqtime(void) |
| 2004 | { |
| 2005 | sched_clock_irqtime = 0; |
| 2006 | } |
| 2007 | |
Peter Zijlstra | 8e92c20 | 2010-12-09 14:15:34 +0100 | [diff] [blame] | 2008 | #ifndef CONFIG_64BIT |
| 2009 | static DEFINE_PER_CPU(seqcount_t, irq_time_seq); |
| 2010 | |
| 2011 | static inline void irq_time_write_begin(void) |
| 2012 | { |
| 2013 | __this_cpu_inc(irq_time_seq.sequence); |
| 2014 | smp_wmb(); |
| 2015 | } |
| 2016 | |
| 2017 | static inline void irq_time_write_end(void) |
| 2018 | { |
| 2019 | smp_wmb(); |
| 2020 | __this_cpu_inc(irq_time_seq.sequence); |
| 2021 | } |
| 2022 | |
| 2023 | static inline u64 irq_time_read(int cpu) |
| 2024 | { |
| 2025 | u64 irq_time; |
| 2026 | unsigned seq; |
| 2027 | |
| 2028 | do { |
| 2029 | seq = read_seqcount_begin(&per_cpu(irq_time_seq, cpu)); |
| 2030 | irq_time = per_cpu(cpu_softirq_time, cpu) + |
| 2031 | per_cpu(cpu_hardirq_time, cpu); |
| 2032 | } while (read_seqcount_retry(&per_cpu(irq_time_seq, cpu), seq)); |
| 2033 | |
| 2034 | return irq_time; |
| 2035 | } |
| 2036 | #else /* CONFIG_64BIT */ |
| 2037 | static inline void irq_time_write_begin(void) |
| 2038 | { |
| 2039 | } |
| 2040 | |
| 2041 | static inline void irq_time_write_end(void) |
| 2042 | { |
| 2043 | } |
| 2044 | |
| 2045 | static inline u64 irq_time_read(int cpu) |
Venkatesh Pallipadi | 305e683 | 2010-10-04 17:03:21 -0700 | [diff] [blame] | 2046 | { |
Venkatesh Pallipadi | 305e683 | 2010-10-04 17:03:21 -0700 | [diff] [blame] | 2047 | return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu); |
| 2048 | } |
Peter Zijlstra | 8e92c20 | 2010-12-09 14:15:34 +0100 | [diff] [blame] | 2049 | #endif /* CONFIG_64BIT */ |
Venkatesh Pallipadi | 305e683 | 2010-10-04 17:03:21 -0700 | [diff] [blame] | 2050 | |
Peter Zijlstra | fe44d62 | 2010-12-09 14:15:34 +0100 | [diff] [blame] | 2051 | /* |
| 2052 | * Called before incrementing preempt_count on {soft,}irq_enter |
| 2053 | * and before decrementing preempt_count on {soft,}irq_exit. |
| 2054 | */ |
Venkatesh Pallipadi | b52bfee | 2010-10-04 17:03:19 -0700 | [diff] [blame] | 2055 | void account_system_vtime(struct task_struct *curr) |
| 2056 | { |
| 2057 | unsigned long flags; |
Peter Zijlstra | fe44d62 | 2010-12-09 14:15:34 +0100 | [diff] [blame] | 2058 | s64 delta; |
Venkatesh Pallipadi | b52bfee | 2010-10-04 17:03:19 -0700 | [diff] [blame] | 2059 | int cpu; |
Venkatesh Pallipadi | b52bfee | 2010-10-04 17:03:19 -0700 | [diff] [blame] | 2060 | |
| 2061 | if (!sched_clock_irqtime) |
| 2062 | return; |
| 2063 | |
| 2064 | local_irq_save(flags); |
| 2065 | |
Venkatesh Pallipadi | b52bfee | 2010-10-04 17:03:19 -0700 | [diff] [blame] | 2066 | cpu = smp_processor_id(); |
Peter Zijlstra | fe44d62 | 2010-12-09 14:15:34 +0100 | [diff] [blame] | 2067 | delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time); |
| 2068 | __this_cpu_add(irq_start_time, delta); |
| 2069 | |
Peter Zijlstra | 8e92c20 | 2010-12-09 14:15:34 +0100 | [diff] [blame] | 2070 | irq_time_write_begin(); |
Venkatesh Pallipadi | b52bfee | 2010-10-04 17:03:19 -0700 | [diff] [blame] | 2071 | /* |
| 2072 | * We do not account for softirq time from ksoftirqd here. |
| 2073 | * We want to continue accounting softirq time to ksoftirqd thread |
| 2074 | * in that case, so as not to confuse scheduler with a special task |
| 2075 | * that do not consume any time, but still wants to run. |
| 2076 | */ |
| 2077 | if (hardirq_count()) |
Peter Zijlstra | fe44d62 | 2010-12-09 14:15:34 +0100 | [diff] [blame] | 2078 | __this_cpu_add(cpu_hardirq_time, delta); |
Venkatesh Pallipadi | 4dd53d8 | 2010-12-21 17:09:00 -0800 | [diff] [blame] | 2079 | else if (in_serving_softirq() && curr != this_cpu_ksoftirqd()) |
Peter Zijlstra | fe44d62 | 2010-12-09 14:15:34 +0100 | [diff] [blame] | 2080 | __this_cpu_add(cpu_softirq_time, delta); |
Venkatesh Pallipadi | b52bfee | 2010-10-04 17:03:19 -0700 | [diff] [blame] | 2081 | |
Peter Zijlstra | 8e92c20 | 2010-12-09 14:15:34 +0100 | [diff] [blame] | 2082 | irq_time_write_end(); |
Venkatesh Pallipadi | b52bfee | 2010-10-04 17:03:19 -0700 | [diff] [blame] | 2083 | local_irq_restore(flags); |
| 2084 | } |
Ingo Molnar | b7dadc3 | 2010-10-18 20:00:37 +0200 | [diff] [blame] | 2085 | EXPORT_SYMBOL_GPL(account_system_vtime); |
Venkatesh Pallipadi | b52bfee | 2010-10-04 17:03:19 -0700 | [diff] [blame] | 2086 | |
Glauber Costa | e6e6685 | 2011-07-11 15:28:17 -0400 | [diff] [blame] | 2087 | #endif /* CONFIG_IRQ_TIME_ACCOUNTING */ |
| 2088 | |
| 2089 | #ifdef CONFIG_PARAVIRT |
| 2090 | static inline u64 steal_ticks(u64 steal) |
| 2091 | { |
| 2092 | if (unlikely(steal > NSEC_PER_SEC)) |
| 2093 | return div_u64(steal, TICK_NSEC); |
| 2094 | |
| 2095 | return __iter_div_u64_rem(steal, TICK_NSEC, &steal); |
| 2096 | } |
| 2097 | #endif |
| 2098 | |
Peter Zijlstra | fe44d62 | 2010-12-09 14:15:34 +0100 | [diff] [blame] | 2099 | static void update_rq_clock_task(struct rq *rq, s64 delta) |
Venkatesh Pallipadi | aa48380 | 2010-10-04 17:03:22 -0700 | [diff] [blame] | 2100 | { |
Glauber Costa | 095c0aa | 2011-07-11 15:28:18 -0400 | [diff] [blame] | 2101 | /* |
| 2102 | * In theory, the compile should just see 0 here, and optimize out the call |
| 2103 | * to sched_rt_avg_update. But I don't trust it... |
| 2104 | */ |
| 2105 | #if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING) |
| 2106 | s64 steal = 0, irq_delta = 0; |
| 2107 | #endif |
| 2108 | #ifdef CONFIG_IRQ_TIME_ACCOUNTING |
Peter Zijlstra | 8e92c20 | 2010-12-09 14:15:34 +0100 | [diff] [blame] | 2109 | irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time; |
Peter Zijlstra | fe44d62 | 2010-12-09 14:15:34 +0100 | [diff] [blame] | 2110 | |
| 2111 | /* |
| 2112 | * Since irq_time is only updated on {soft,}irq_exit, we might run into |
| 2113 | * this case when a previous update_rq_clock() happened inside a |
| 2114 | * {soft,}irq region. |
| 2115 | * |
| 2116 | * When this happens, we stop ->clock_task and only update the |
| 2117 | * prev_irq_time stamp to account for the part that fit, so that a next |
| 2118 | * update will consume the rest. This ensures ->clock_task is |
| 2119 | * monotonic. |
| 2120 | * |
| 2121 | * It does however cause some slight miss-attribution of {soft,}irq |
| 2122 | * time, a more accurate solution would be to update the irq_time using |
| 2123 | * the current rq->clock timestamp, except that would require using |
| 2124 | * atomic ops. |
| 2125 | */ |
| 2126 | if (irq_delta > delta) |
| 2127 | irq_delta = delta; |
| 2128 | |
| 2129 | rq->prev_irq_time += irq_delta; |
| 2130 | delta -= irq_delta; |
Glauber Costa | 095c0aa | 2011-07-11 15:28:18 -0400 | [diff] [blame] | 2131 | #endif |
| 2132 | #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING |
| 2133 | if (static_branch((¶virt_steal_rq_enabled))) { |
| 2134 | u64 st; |
| 2135 | |
| 2136 | steal = paravirt_steal_clock(cpu_of(rq)); |
| 2137 | steal -= rq->prev_steal_time_rq; |
| 2138 | |
| 2139 | if (unlikely(steal > delta)) |
| 2140 | steal = delta; |
| 2141 | |
| 2142 | st = steal_ticks(steal); |
| 2143 | steal = st * TICK_NSEC; |
| 2144 | |
| 2145 | rq->prev_steal_time_rq += steal; |
| 2146 | |
| 2147 | delta -= steal; |
| 2148 | } |
| 2149 | #endif |
| 2150 | |
Peter Zijlstra | fe44d62 | 2010-12-09 14:15:34 +0100 | [diff] [blame] | 2151 | rq->clock_task += delta; |
| 2152 | |
Glauber Costa | 095c0aa | 2011-07-11 15:28:18 -0400 | [diff] [blame] | 2153 | #if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING) |
| 2154 | if ((irq_delta + steal) && sched_feat(NONTASK_POWER)) |
| 2155 | sched_rt_avg_update(rq, irq_delta + steal); |
| 2156 | #endif |
Venkatesh Pallipadi | aa48380 | 2010-10-04 17:03:22 -0700 | [diff] [blame] | 2157 | } |
| 2158 | |
Glauber Costa | 095c0aa | 2011-07-11 15:28:18 -0400 | [diff] [blame] | 2159 | #ifdef CONFIG_IRQ_TIME_ACCOUNTING |
Venkatesh Pallipadi | abb74ce | 2010-12-21 17:09:03 -0800 | [diff] [blame] | 2160 | static int irqtime_account_hi_update(void) |
| 2161 | { |
| 2162 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; |
| 2163 | unsigned long flags; |
| 2164 | u64 latest_ns; |
| 2165 | int ret = 0; |
| 2166 | |
| 2167 | local_irq_save(flags); |
| 2168 | latest_ns = this_cpu_read(cpu_hardirq_time); |
| 2169 | if (cputime64_gt(nsecs_to_cputime64(latest_ns), cpustat->irq)) |
| 2170 | ret = 1; |
| 2171 | local_irq_restore(flags); |
| 2172 | return ret; |
| 2173 | } |
| 2174 | |
| 2175 | static int irqtime_account_si_update(void) |
| 2176 | { |
| 2177 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; |
| 2178 | unsigned long flags; |
| 2179 | u64 latest_ns; |
| 2180 | int ret = 0; |
| 2181 | |
| 2182 | local_irq_save(flags); |
| 2183 | latest_ns = this_cpu_read(cpu_softirq_time); |
| 2184 | if (cputime64_gt(nsecs_to_cputime64(latest_ns), cpustat->softirq)) |
| 2185 | ret = 1; |
| 2186 | local_irq_restore(flags); |
| 2187 | return ret; |
| 2188 | } |
| 2189 | |
Peter Zijlstra | fe44d62 | 2010-12-09 14:15:34 +0100 | [diff] [blame] | 2190 | #else /* CONFIG_IRQ_TIME_ACCOUNTING */ |
Venkatesh Pallipadi | 305e683 | 2010-10-04 17:03:21 -0700 | [diff] [blame] | 2191 | |
Venkatesh Pallipadi | abb74ce | 2010-12-21 17:09:03 -0800 | [diff] [blame] | 2192 | #define sched_clock_irqtime (0) |
| 2193 | |
Glauber Costa | 095c0aa | 2011-07-11 15:28:18 -0400 | [diff] [blame] | 2194 | #endif |
Venkatesh Pallipadi | b52bfee | 2010-10-04 17:03:19 -0700 | [diff] [blame] | 2195 | |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2196 | #include "sched_idletask.c" |
| 2197 | #include "sched_fair.c" |
| 2198 | #include "sched_rt.c" |
Mike Galbraith | 5091faa | 2010-11-30 14:18:03 +0100 | [diff] [blame] | 2199 | #include "sched_autogroup.c" |
Peter Zijlstra | 34f971f | 2010-09-22 13:53:15 +0200 | [diff] [blame] | 2200 | #include "sched_stoptask.c" |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2201 | #ifdef CONFIG_SCHED_DEBUG |
| 2202 | # include "sched_debug.c" |
| 2203 | #endif |
| 2204 | |
Peter Zijlstra | 34f971f | 2010-09-22 13:53:15 +0200 | [diff] [blame] | 2205 | void sched_set_stop_task(int cpu, struct task_struct *stop) |
| 2206 | { |
| 2207 | struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; |
| 2208 | struct task_struct *old_stop = cpu_rq(cpu)->stop; |
| 2209 | |
| 2210 | if (stop) { |
| 2211 | /* |
| 2212 | * Make it appear like a SCHED_FIFO task, its something |
| 2213 | * userspace knows about and won't get confused about. |
| 2214 | * |
| 2215 | * Also, it will make PI more or less work without too |
| 2216 | * much confusion -- but then, stop work should not |
| 2217 | * rely on PI working anyway. |
| 2218 | */ |
| 2219 | sched_setscheduler_nocheck(stop, SCHED_FIFO, ¶m); |
| 2220 | |
| 2221 | stop->sched_class = &stop_sched_class; |
| 2222 | } |
| 2223 | |
| 2224 | cpu_rq(cpu)->stop = stop; |
| 2225 | |
| 2226 | if (old_stop) { |
| 2227 | /* |
| 2228 | * Reset it back to a normal scheduling class so that |
| 2229 | * it can die in pieces. |
| 2230 | */ |
| 2231 | old_stop->sched_class = &rt_sched_class; |
| 2232 | } |
| 2233 | } |
| 2234 | |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 2235 | /* |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 2236 | * __normal_prio - return the priority that is based on the static prio |
Ingo Molnar | 71f8bd4 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 2237 | */ |
Ingo Molnar | 1453118 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 2238 | static inline int __normal_prio(struct task_struct *p) |
| 2239 | { |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 2240 | return p->static_prio; |
Ingo Molnar | 1453118 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 2241 | } |
| 2242 | |
| 2243 | /* |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 2244 | * Calculate the expected normal priority: i.e. priority |
| 2245 | * without taking RT-inheritance into account. Might be |
| 2246 | * boosted by interactivity modifiers. Changes upon fork, |
| 2247 | * setprio syscalls, and whenever the interactivity |
| 2248 | * estimator recalculates. |
| 2249 | */ |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 2250 | static inline int normal_prio(struct task_struct *p) |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 2251 | { |
| 2252 | int prio; |
| 2253 | |
Ingo Molnar | e05606d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 2254 | if (task_has_rt_policy(p)) |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 2255 | prio = MAX_RT_PRIO-1 - p->rt_priority; |
| 2256 | else |
| 2257 | prio = __normal_prio(p); |
| 2258 | return prio; |
| 2259 | } |
| 2260 | |
| 2261 | /* |
| 2262 | * Calculate the current priority, i.e. the priority |
| 2263 | * taken into account by the scheduler. This value might |
| 2264 | * be boosted by RT tasks, or might be boosted by |
| 2265 | * interactivity modifiers. Will be RT if the task got |
| 2266 | * RT-boosted. If not then it returns p->normal_prio. |
| 2267 | */ |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 2268 | static int effective_prio(struct task_struct *p) |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 2269 | { |
| 2270 | p->normal_prio = normal_prio(p); |
| 2271 | /* |
| 2272 | * If we are RT tasks or we were boosted to RT priority, |
| 2273 | * keep the priority unchanged. Otherwise, update priority |
| 2274 | * to the normal priority: |
| 2275 | */ |
| 2276 | if (!rt_prio(p->prio)) |
| 2277 | return p->normal_prio; |
| 2278 | return p->prio; |
| 2279 | } |
| 2280 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2281 | /** |
| 2282 | * task_curr - is this task currently executing on a CPU? |
| 2283 | * @p: the task in question. |
| 2284 | */ |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 2285 | inline int task_curr(const struct task_struct *p) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2286 | { |
| 2287 | return cpu_curr(task_cpu(p)) == p; |
| 2288 | } |
| 2289 | |
Steven Rostedt | cb46984 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 2290 | static inline void check_class_changed(struct rq *rq, struct task_struct *p, |
| 2291 | const struct sched_class *prev_class, |
Peter Zijlstra | da7a735 | 2011-01-17 17:03:27 +0100 | [diff] [blame] | 2292 | int oldprio) |
Steven Rostedt | cb46984 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 2293 | { |
| 2294 | if (prev_class != p->sched_class) { |
| 2295 | if (prev_class->switched_from) |
Peter Zijlstra | da7a735 | 2011-01-17 17:03:27 +0100 | [diff] [blame] | 2296 | prev_class->switched_from(rq, p); |
| 2297 | p->sched_class->switched_to(rq, p); |
| 2298 | } else if (oldprio != p->prio) |
| 2299 | p->sched_class->prio_changed(rq, p, oldprio); |
Steven Rostedt | cb46984 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 2300 | } |
| 2301 | |
Peter Zijlstra | 1e5a740 | 2010-10-31 12:37:04 +0100 | [diff] [blame] | 2302 | static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) |
| 2303 | { |
| 2304 | const struct sched_class *class; |
| 2305 | |
| 2306 | if (p->sched_class == rq->curr->sched_class) { |
| 2307 | rq->curr->sched_class->check_preempt_curr(rq, p, flags); |
| 2308 | } else { |
| 2309 | for_each_class(class) { |
| 2310 | if (class == rq->curr->sched_class) |
| 2311 | break; |
| 2312 | if (class == p->sched_class) { |
| 2313 | resched_task(rq->curr); |
| 2314 | break; |
| 2315 | } |
| 2316 | } |
| 2317 | } |
| 2318 | |
| 2319 | /* |
| 2320 | * A queue event has occurred, and we're going to schedule. In |
| 2321 | * this case, we can save a useless back to back clock update. |
| 2322 | */ |
Peter Zijlstra | fd2f441 | 2011-04-05 17:23:44 +0200 | [diff] [blame] | 2323 | if (rq->curr->on_rq && test_tsk_need_resched(rq->curr)) |
Peter Zijlstra | 1e5a740 | 2010-10-31 12:37:04 +0100 | [diff] [blame] | 2324 | rq->skip_clock_update = 1; |
| 2325 | } |
| 2326 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2327 | #ifdef CONFIG_SMP |
Ingo Molnar | cc36773 | 2007-10-15 17:00:18 +0200 | [diff] [blame] | 2328 | /* |
| 2329 | * Is this task likely cache-hot: |
| 2330 | */ |
Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 2331 | static int |
Ingo Molnar | cc36773 | 2007-10-15 17:00:18 +0200 | [diff] [blame] | 2332 | task_hot(struct task_struct *p, u64 now, struct sched_domain *sd) |
| 2333 | { |
| 2334 | s64 delta; |
| 2335 | |
Peter Zijlstra | e6c8fba | 2009-12-16 18:04:33 +0100 | [diff] [blame] | 2336 | if (p->sched_class != &fair_sched_class) |
| 2337 | return 0; |
| 2338 | |
Nikhil Rao | ef8002f | 2010-10-13 12:09:35 -0700 | [diff] [blame] | 2339 | if (unlikely(p->policy == SCHED_IDLE)) |
| 2340 | return 0; |
| 2341 | |
Ingo Molnar | f540a60 | 2008-03-15 17:10:34 +0100 | [diff] [blame] | 2342 | /* |
| 2343 | * Buddy candidates are cache hot: |
| 2344 | */ |
Mike Galbraith | f685cea | 2009-10-23 23:09:22 +0200 | [diff] [blame] | 2345 | if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running && |
Peter Zijlstra | 4793241 | 2008-11-04 21:25:09 +0100 | [diff] [blame] | 2346 | (&p->se == cfs_rq_of(&p->se)->next || |
| 2347 | &p->se == cfs_rq_of(&p->se)->last)) |
Ingo Molnar | f540a60 | 2008-03-15 17:10:34 +0100 | [diff] [blame] | 2348 | return 1; |
| 2349 | |
Ingo Molnar | 6bc1665 | 2007-10-15 17:00:18 +0200 | [diff] [blame] | 2350 | if (sysctl_sched_migration_cost == -1) |
| 2351 | return 1; |
| 2352 | if (sysctl_sched_migration_cost == 0) |
| 2353 | return 0; |
| 2354 | |
Ingo Molnar | cc36773 | 2007-10-15 17:00:18 +0200 | [diff] [blame] | 2355 | delta = now - p->se.exec_start; |
| 2356 | |
| 2357 | return delta < (s64)sysctl_sched_migration_cost; |
| 2358 | } |
| 2359 | |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 2360 | void set_task_cpu(struct task_struct *p, unsigned int new_cpu) |
Ingo Molnar | c65cc87 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 2361 | { |
Peter Zijlstra | e291200 | 2009-12-16 18:04:36 +0100 | [diff] [blame] | 2362 | #ifdef CONFIG_SCHED_DEBUG |
| 2363 | /* |
| 2364 | * We should never call set_task_cpu() on a blocked task, |
| 2365 | * ttwu() will sort out the placement. |
| 2366 | */ |
Peter Zijlstra | 077614e | 2009-12-17 13:16:31 +0100 | [diff] [blame] | 2367 | WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING && |
| 2368 | !(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE)); |
Peter Zijlstra | 0122ec5 | 2011-04-05 17:23:51 +0200 | [diff] [blame] | 2369 | |
| 2370 | #ifdef CONFIG_LOCKDEP |
Peter Zijlstra | 6c6c54e | 2011-06-03 17:37:07 +0200 | [diff] [blame] | 2371 | /* |
| 2372 | * The caller should hold either p->pi_lock or rq->lock, when changing |
| 2373 | * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks. |
| 2374 | * |
| 2375 | * sched_move_task() holds both and thus holding either pins the cgroup, |
| 2376 | * see set_task_rq(). |
| 2377 | * |
| 2378 | * Furthermore, all task_rq users should acquire both locks, see |
| 2379 | * task_rq_lock(). |
| 2380 | */ |
Peter Zijlstra | 0122ec5 | 2011-04-05 17:23:51 +0200 | [diff] [blame] | 2381 | WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) || |
| 2382 | lockdep_is_held(&task_rq(p)->lock))); |
| 2383 | #endif |
Peter Zijlstra | e291200 | 2009-12-16 18:04:36 +0100 | [diff] [blame] | 2384 | #endif |
| 2385 | |
Mathieu Desnoyers | de1d728 | 2009-05-05 16:49:59 +0800 | [diff] [blame] | 2386 | trace_sched_migrate_task(p, new_cpu); |
Peter Zijlstra | cbc34ed | 2008-12-10 08:08:22 +0100 | [diff] [blame] | 2387 | |
Peter Zijlstra | 0c69774 | 2009-12-22 15:43:19 +0100 | [diff] [blame] | 2388 | if (task_cpu(p) != new_cpu) { |
| 2389 | p->se.nr_migrations++; |
Peter Zijlstra | a8b0ca1 | 2011-06-27 14:41:57 +0200 | [diff] [blame] | 2390 | perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, NULL, 0); |
Peter Zijlstra | 0c69774 | 2009-12-22 15:43:19 +0100 | [diff] [blame] | 2391 | } |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 2392 | |
| 2393 | __set_task_cpu(p, new_cpu); |
Ingo Molnar | c65cc87 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 2394 | } |
| 2395 | |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 2396 | struct migration_arg { |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 2397 | struct task_struct *task; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2398 | int dest_cpu; |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 2399 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2400 | |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 2401 | static int migration_cpu_stop(void *data); |
| 2402 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2403 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2404 | * wait_task_inactive - wait for a thread to unschedule. |
| 2405 | * |
Roland McGrath | 85ba2d8 | 2008-07-25 19:45:58 -0700 | [diff] [blame] | 2406 | * If @match_state is nonzero, it's the @p->state value just checked and |
| 2407 | * not expected to change. If it changes, i.e. @p might have woken up, |
| 2408 | * then return zero. When we succeed in waiting for @p to be off its CPU, |
| 2409 | * we return a positive number (its total switch count). If a second call |
| 2410 | * a short while later returns the same number, the caller can be sure that |
| 2411 | * @p has remained unscheduled the whole time. |
| 2412 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2413 | * The caller must ensure that the task *will* unschedule sometime soon, |
| 2414 | * else this function might spin for a *long* time. This function can't |
| 2415 | * be called with interrupts off, or it may introduce deadlock with |
| 2416 | * smp_call_function() if an IPI is sent by the same process we are |
| 2417 | * waiting to become inactive. |
| 2418 | */ |
Roland McGrath | 85ba2d8 | 2008-07-25 19:45:58 -0700 | [diff] [blame] | 2419 | unsigned long wait_task_inactive(struct task_struct *p, long match_state) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2420 | { |
| 2421 | unsigned long flags; |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 2422 | int running, on_rq; |
Roland McGrath | 85ba2d8 | 2008-07-25 19:45:58 -0700 | [diff] [blame] | 2423 | unsigned long ncsw; |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 2424 | struct rq *rq; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2425 | |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 2426 | for (;;) { |
| 2427 | /* |
| 2428 | * We do the initial early heuristics without holding |
| 2429 | * any task-queue locks at all. We'll only try to get |
| 2430 | * the runqueue lock when things look like they will |
| 2431 | * work out! |
| 2432 | */ |
| 2433 | rq = task_rq(p); |
Linus Torvalds | fa490cf | 2007-06-18 09:34:40 -0700 | [diff] [blame] | 2434 | |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 2435 | /* |
| 2436 | * If the task is actively running on another CPU |
| 2437 | * still, just relax and busy-wait without holding |
| 2438 | * any locks. |
| 2439 | * |
| 2440 | * NOTE! Since we don't hold any locks, it's not |
| 2441 | * even sure that "rq" stays as the right runqueue! |
| 2442 | * But we don't care, since "task_running()" will |
| 2443 | * return false if the runqueue has changed and p |
| 2444 | * is actually now running somewhere else! |
| 2445 | */ |
Roland McGrath | 85ba2d8 | 2008-07-25 19:45:58 -0700 | [diff] [blame] | 2446 | while (task_running(rq, p)) { |
| 2447 | if (match_state && unlikely(p->state != match_state)) |
| 2448 | return 0; |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 2449 | cpu_relax(); |
Roland McGrath | 85ba2d8 | 2008-07-25 19:45:58 -0700 | [diff] [blame] | 2450 | } |
Linus Torvalds | fa490cf | 2007-06-18 09:34:40 -0700 | [diff] [blame] | 2451 | |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 2452 | /* |
| 2453 | * Ok, time to look more closely! We need the rq |
| 2454 | * lock now, to be *sure*. If we're wrong, we'll |
| 2455 | * just go back and repeat. |
| 2456 | */ |
| 2457 | rq = task_rq_lock(p, &flags); |
Peter Zijlstra | 27a9da6 | 2010-05-04 20:36:56 +0200 | [diff] [blame] | 2458 | trace_sched_wait_task(p); |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 2459 | running = task_running(rq, p); |
Peter Zijlstra | fd2f441 | 2011-04-05 17:23:44 +0200 | [diff] [blame] | 2460 | on_rq = p->on_rq; |
Roland McGrath | 85ba2d8 | 2008-07-25 19:45:58 -0700 | [diff] [blame] | 2461 | ncsw = 0; |
Oleg Nesterov | f31e11d | 2008-08-20 16:54:44 -0700 | [diff] [blame] | 2462 | if (!match_state || p->state == match_state) |
Oleg Nesterov | 93dcf55 | 2008-08-20 16:54:44 -0700 | [diff] [blame] | 2463 | ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ |
Peter Zijlstra | 0122ec5 | 2011-04-05 17:23:51 +0200 | [diff] [blame] | 2464 | task_rq_unlock(rq, p, &flags); |
Linus Torvalds | fa490cf | 2007-06-18 09:34:40 -0700 | [diff] [blame] | 2465 | |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 2466 | /* |
Roland McGrath | 85ba2d8 | 2008-07-25 19:45:58 -0700 | [diff] [blame] | 2467 | * If it changed from the expected state, bail out now. |
| 2468 | */ |
| 2469 | if (unlikely(!ncsw)) |
| 2470 | break; |
| 2471 | |
| 2472 | /* |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 2473 | * Was it really running after all now that we |
| 2474 | * checked with the proper locks actually held? |
| 2475 | * |
| 2476 | * Oops. Go back and try again.. |
| 2477 | */ |
| 2478 | if (unlikely(running)) { |
| 2479 | cpu_relax(); |
| 2480 | continue; |
| 2481 | } |
| 2482 | |
| 2483 | /* |
| 2484 | * It's not enough that it's not actively running, |
| 2485 | * it must be off the runqueue _entirely_, and not |
| 2486 | * preempted! |
| 2487 | * |
Luis Henriques | 80dd99b | 2009-03-16 19:58:09 +0000 | [diff] [blame] | 2488 | * So if it was still runnable (but just not actively |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 2489 | * running right now), it's preempted, and we should |
| 2490 | * yield - it could be a while. |
| 2491 | */ |
| 2492 | if (unlikely(on_rq)) { |
Thomas Gleixner | 8eb90c3 | 2011-02-23 23:52:21 +0000 | [diff] [blame] | 2493 | ktime_t to = ktime_set(0, NSEC_PER_SEC/HZ); |
| 2494 | |
| 2495 | set_current_state(TASK_UNINTERRUPTIBLE); |
| 2496 | schedule_hrtimeout(&to, HRTIMER_MODE_REL); |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 2497 | continue; |
| 2498 | } |
| 2499 | |
| 2500 | /* |
| 2501 | * Ahh, all good. It wasn't running, and it wasn't |
| 2502 | * runnable, which means that it will never become |
| 2503 | * running in the future either. We're all done! |
| 2504 | */ |
| 2505 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2506 | } |
Roland McGrath | 85ba2d8 | 2008-07-25 19:45:58 -0700 | [diff] [blame] | 2507 | |
| 2508 | return ncsw; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2509 | } |
| 2510 | |
| 2511 | /*** |
| 2512 | * kick_process - kick a running thread to enter/exit the kernel |
| 2513 | * @p: the to-be-kicked thread |
| 2514 | * |
| 2515 | * Cause a process which is running on another CPU to enter |
| 2516 | * kernel-mode, without any delay. (to get signals handled.) |
| 2517 | * |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 2518 | * NOTE: this function doesn't have to take the runqueue lock, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2519 | * because all it wants to ensure is that the remote task enters |
| 2520 | * the kernel. If the IPI races and the task has been migrated |
| 2521 | * to another CPU then no harm is done and the purpose has been |
| 2522 | * achieved as well. |
| 2523 | */ |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 2524 | void kick_process(struct task_struct *p) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2525 | { |
| 2526 | int cpu; |
| 2527 | |
| 2528 | preempt_disable(); |
| 2529 | cpu = task_cpu(p); |
| 2530 | if ((cpu != smp_processor_id()) && task_curr(p)) |
| 2531 | smp_send_reschedule(cpu); |
| 2532 | preempt_enable(); |
| 2533 | } |
Rusty Russell | b43e352 | 2009-06-12 22:27:00 -0600 | [diff] [blame] | 2534 | EXPORT_SYMBOL_GPL(kick_process); |
Nick Piggin | 476d139 | 2005-06-25 14:57:29 -0700 | [diff] [blame] | 2535 | #endif /* CONFIG_SMP */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2536 | |
Peter Zijlstra | 970b13b | 2009-11-25 13:31:39 +0100 | [diff] [blame] | 2537 | #ifdef CONFIG_SMP |
Oleg Nesterov | 30da688 | 2010-03-15 10:10:19 +0100 | [diff] [blame] | 2538 | /* |
Peter Zijlstra | 013fdb8 | 2011-04-05 17:23:45 +0200 | [diff] [blame] | 2539 | * ->cpus_allowed is protected by both rq->lock and p->pi_lock |
Oleg Nesterov | 30da688 | 2010-03-15 10:10:19 +0100 | [diff] [blame] | 2540 | */ |
Peter Zijlstra | 5da9a0f | 2009-12-16 18:04:38 +0100 | [diff] [blame] | 2541 | static int select_fallback_rq(int cpu, struct task_struct *p) |
| 2542 | { |
| 2543 | int dest_cpu; |
| 2544 | const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(cpu)); |
| 2545 | |
| 2546 | /* Look for allowed, online CPU in same node. */ |
| 2547 | for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask) |
Peter Zijlstra | fa17b50 | 2011-06-16 12:23:22 +0200 | [diff] [blame] | 2548 | if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p))) |
Peter Zijlstra | 5da9a0f | 2009-12-16 18:04:38 +0100 | [diff] [blame] | 2549 | return dest_cpu; |
| 2550 | |
| 2551 | /* Any allowed, online CPU? */ |
Peter Zijlstra | fa17b50 | 2011-06-16 12:23:22 +0200 | [diff] [blame] | 2552 | dest_cpu = cpumask_any_and(tsk_cpus_allowed(p), cpu_active_mask); |
Peter Zijlstra | 5da9a0f | 2009-12-16 18:04:38 +0100 | [diff] [blame] | 2553 | if (dest_cpu < nr_cpu_ids) |
| 2554 | return dest_cpu; |
| 2555 | |
| 2556 | /* No more Mr. Nice Guy. */ |
Peter Zijlstra | 48c5cca | 2010-11-13 19:32:29 +0100 | [diff] [blame] | 2557 | dest_cpu = cpuset_cpus_allowed_fallback(p); |
| 2558 | /* |
| 2559 | * Don't tell them about moving exiting tasks or |
| 2560 | * kernel threads (both mm NULL), since they never |
| 2561 | * leave kernel. |
| 2562 | */ |
| 2563 | if (p->mm && printk_ratelimit()) { |
| 2564 | printk(KERN_INFO "process %d (%s) no longer affine to cpu%d\n", |
| 2565 | task_pid_nr(p), p->comm, cpu); |
Peter Zijlstra | 5da9a0f | 2009-12-16 18:04:38 +0100 | [diff] [blame] | 2566 | } |
| 2567 | |
| 2568 | return dest_cpu; |
| 2569 | } |
| 2570 | |
Peter Zijlstra | e291200 | 2009-12-16 18:04:36 +0100 | [diff] [blame] | 2571 | /* |
Peter Zijlstra | 013fdb8 | 2011-04-05 17:23:45 +0200 | [diff] [blame] | 2572 | * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable. |
Peter Zijlstra | e291200 | 2009-12-16 18:04:36 +0100 | [diff] [blame] | 2573 | */ |
Peter Zijlstra | 970b13b | 2009-11-25 13:31:39 +0100 | [diff] [blame] | 2574 | static inline |
Peter Zijlstra | 7608dec | 2011-04-05 17:23:46 +0200 | [diff] [blame] | 2575 | int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags) |
Peter Zijlstra | 970b13b | 2009-11-25 13:31:39 +0100 | [diff] [blame] | 2576 | { |
Peter Zijlstra | 7608dec | 2011-04-05 17:23:46 +0200 | [diff] [blame] | 2577 | int cpu = p->sched_class->select_task_rq(p, sd_flags, wake_flags); |
Peter Zijlstra | e291200 | 2009-12-16 18:04:36 +0100 | [diff] [blame] | 2578 | |
| 2579 | /* |
| 2580 | * In order not to call set_task_cpu() on a blocking task we need |
| 2581 | * to rely on ttwu() to place the task on a valid ->cpus_allowed |
| 2582 | * cpu. |
| 2583 | * |
| 2584 | * Since this is common to all placement strategies, this lives here. |
| 2585 | * |
| 2586 | * [ this allows ->select_task() to simply return task_cpu(p) and |
| 2587 | * not worry about this generic constraint ] |
| 2588 | */ |
Peter Zijlstra | fa17b50 | 2011-06-16 12:23:22 +0200 | [diff] [blame] | 2589 | if (unlikely(!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) || |
Peter Zijlstra | 70f1120 | 2009-12-20 17:36:27 +0100 | [diff] [blame] | 2590 | !cpu_online(cpu))) |
Peter Zijlstra | 5da9a0f | 2009-12-16 18:04:38 +0100 | [diff] [blame] | 2591 | cpu = select_fallback_rq(task_cpu(p), p); |
Peter Zijlstra | e291200 | 2009-12-16 18:04:36 +0100 | [diff] [blame] | 2592 | |
| 2593 | return cpu; |
Peter Zijlstra | 970b13b | 2009-11-25 13:31:39 +0100 | [diff] [blame] | 2594 | } |
Mike Galbraith | 09a40af | 2010-04-15 07:29:59 +0200 | [diff] [blame] | 2595 | |
| 2596 | static void update_avg(u64 *avg, u64 sample) |
| 2597 | { |
| 2598 | s64 diff = sample - *avg; |
| 2599 | *avg += diff >> 3; |
| 2600 | } |
Peter Zijlstra | 970b13b | 2009-11-25 13:31:39 +0100 | [diff] [blame] | 2601 | #endif |
| 2602 | |
Peter Zijlstra | d7c01d2 | 2011-04-05 17:23:43 +0200 | [diff] [blame] | 2603 | static void |
Peter Zijlstra | b84cb5d | 2011-04-05 17:23:55 +0200 | [diff] [blame] | 2604 | ttwu_stat(struct task_struct *p, int cpu, int wake_flags) |
Tejun Heo | 9ed3811 | 2009-12-03 15:08:03 +0900 | [diff] [blame] | 2605 | { |
Peter Zijlstra | d7c01d2 | 2011-04-05 17:23:43 +0200 | [diff] [blame] | 2606 | #ifdef CONFIG_SCHEDSTATS |
Peter Zijlstra | b84cb5d | 2011-04-05 17:23:55 +0200 | [diff] [blame] | 2607 | struct rq *rq = this_rq(); |
Tejun Heo | 9ed3811 | 2009-12-03 15:08:03 +0900 | [diff] [blame] | 2608 | |
Peter Zijlstra | d7c01d2 | 2011-04-05 17:23:43 +0200 | [diff] [blame] | 2609 | #ifdef CONFIG_SMP |
| 2610 | int this_cpu = smp_processor_id(); |
Tejun Heo | 9ed3811 | 2009-12-03 15:08:03 +0900 | [diff] [blame] | 2611 | |
Peter Zijlstra | d7c01d2 | 2011-04-05 17:23:43 +0200 | [diff] [blame] | 2612 | if (cpu == this_cpu) { |
| 2613 | schedstat_inc(rq, ttwu_local); |
| 2614 | schedstat_inc(p, se.statistics.nr_wakeups_local); |
| 2615 | } else { |
| 2616 | struct sched_domain *sd; |
| 2617 | |
| 2618 | schedstat_inc(p, se.statistics.nr_wakeups_remote); |
Peter Zijlstra | 057f3fa | 2011-04-18 11:24:34 +0200 | [diff] [blame] | 2619 | rcu_read_lock(); |
Peter Zijlstra | d7c01d2 | 2011-04-05 17:23:43 +0200 | [diff] [blame] | 2620 | for_each_domain(this_cpu, sd) { |
| 2621 | if (cpumask_test_cpu(cpu, sched_domain_span(sd))) { |
| 2622 | schedstat_inc(sd, ttwu_wake_remote); |
| 2623 | break; |
| 2624 | } |
| 2625 | } |
Peter Zijlstra | 057f3fa | 2011-04-18 11:24:34 +0200 | [diff] [blame] | 2626 | rcu_read_unlock(); |
Peter Zijlstra | d7c01d2 | 2011-04-05 17:23:43 +0200 | [diff] [blame] | 2627 | } |
Peter Zijlstra | f339b9d | 2011-05-31 10:49:20 +0200 | [diff] [blame] | 2628 | |
| 2629 | if (wake_flags & WF_MIGRATED) |
| 2630 | schedstat_inc(p, se.statistics.nr_wakeups_migrate); |
| 2631 | |
Peter Zijlstra | d7c01d2 | 2011-04-05 17:23:43 +0200 | [diff] [blame] | 2632 | #endif /* CONFIG_SMP */ |
| 2633 | |
| 2634 | schedstat_inc(rq, ttwu_count); |
| 2635 | schedstat_inc(p, se.statistics.nr_wakeups); |
| 2636 | |
| 2637 | if (wake_flags & WF_SYNC) |
| 2638 | schedstat_inc(p, se.statistics.nr_wakeups_sync); |
| 2639 | |
Peter Zijlstra | d7c01d2 | 2011-04-05 17:23:43 +0200 | [diff] [blame] | 2640 | #endif /* CONFIG_SCHEDSTATS */ |
Tejun Heo | 9ed3811 | 2009-12-03 15:08:03 +0900 | [diff] [blame] | 2641 | } |
| 2642 | |
Peter Zijlstra | d7c01d2 | 2011-04-05 17:23:43 +0200 | [diff] [blame] | 2643 | static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags) |
Tejun Heo | 9ed3811 | 2009-12-03 15:08:03 +0900 | [diff] [blame] | 2644 | { |
Tejun Heo | 9ed3811 | 2009-12-03 15:08:03 +0900 | [diff] [blame] | 2645 | activate_task(rq, p, en_flags); |
Peter Zijlstra | fd2f441 | 2011-04-05 17:23:44 +0200 | [diff] [blame] | 2646 | p->on_rq = 1; |
Peter Zijlstra | c2f7115 | 2011-04-13 13:28:56 +0200 | [diff] [blame] | 2647 | |
| 2648 | /* if a worker is waking up, notify workqueue */ |
| 2649 | if (p->flags & PF_WQ_WORKER) |
| 2650 | wq_worker_waking_up(p, cpu_of(rq)); |
Tejun Heo | 9ed3811 | 2009-12-03 15:08:03 +0900 | [diff] [blame] | 2651 | } |
| 2652 | |
Peter Zijlstra | 23f41ee | 2011-04-05 17:23:56 +0200 | [diff] [blame] | 2653 | /* |
| 2654 | * Mark the task runnable and perform wakeup-preemption. |
| 2655 | */ |
Peter Zijlstra | 8936338 | 2011-04-05 17:23:42 +0200 | [diff] [blame] | 2656 | static void |
Peter Zijlstra | 23f41ee | 2011-04-05 17:23:56 +0200 | [diff] [blame] | 2657 | ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags) |
Tejun Heo | 9ed3811 | 2009-12-03 15:08:03 +0900 | [diff] [blame] | 2658 | { |
Peter Zijlstra | 8936338 | 2011-04-05 17:23:42 +0200 | [diff] [blame] | 2659 | trace_sched_wakeup(p, true); |
Tejun Heo | 9ed3811 | 2009-12-03 15:08:03 +0900 | [diff] [blame] | 2660 | check_preempt_curr(rq, p, wake_flags); |
| 2661 | |
| 2662 | p->state = TASK_RUNNING; |
| 2663 | #ifdef CONFIG_SMP |
| 2664 | if (p->sched_class->task_woken) |
| 2665 | p->sched_class->task_woken(rq, p); |
| 2666 | |
Steven Rostedt | e69c634 | 2010-12-06 17:10:31 -0500 | [diff] [blame] | 2667 | if (rq->idle_stamp) { |
Tejun Heo | 9ed3811 | 2009-12-03 15:08:03 +0900 | [diff] [blame] | 2668 | u64 delta = rq->clock - rq->idle_stamp; |
| 2669 | u64 max = 2*sysctl_sched_migration_cost; |
| 2670 | |
| 2671 | if (delta > max) |
| 2672 | rq->avg_idle = max; |
| 2673 | else |
| 2674 | update_avg(&rq->avg_idle, delta); |
| 2675 | rq->idle_stamp = 0; |
| 2676 | } |
| 2677 | #endif |
| 2678 | } |
| 2679 | |
Peter Zijlstra | c05fbaf | 2011-04-05 17:23:57 +0200 | [diff] [blame] | 2680 | static void |
| 2681 | ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags) |
| 2682 | { |
| 2683 | #ifdef CONFIG_SMP |
| 2684 | if (p->sched_contributes_to_load) |
| 2685 | rq->nr_uninterruptible--; |
| 2686 | #endif |
| 2687 | |
| 2688 | ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_WAKING); |
| 2689 | ttwu_do_wakeup(rq, p, wake_flags); |
| 2690 | } |
| 2691 | |
| 2692 | /* |
| 2693 | * Called in case the task @p isn't fully descheduled from its runqueue, |
| 2694 | * in this case we must do a remote wakeup. Its a 'light' wakeup though, |
| 2695 | * since all we need to do is flip p->state to TASK_RUNNING, since |
| 2696 | * the task is still ->on_rq. |
| 2697 | */ |
| 2698 | static int ttwu_remote(struct task_struct *p, int wake_flags) |
| 2699 | { |
| 2700 | struct rq *rq; |
| 2701 | int ret = 0; |
| 2702 | |
| 2703 | rq = __task_rq_lock(p); |
| 2704 | if (p->on_rq) { |
| 2705 | ttwu_do_wakeup(rq, p, wake_flags); |
| 2706 | ret = 1; |
| 2707 | } |
| 2708 | __task_rq_unlock(rq); |
| 2709 | |
| 2710 | return ret; |
| 2711 | } |
| 2712 | |
Peter Zijlstra | 317f394 | 2011-04-05 17:23:58 +0200 | [diff] [blame] | 2713 | #ifdef CONFIG_SMP |
Peter Zijlstra | fa14ff4 | 2011-09-12 13:06:17 +0200 | [diff] [blame] | 2714 | static void sched_ttwu_pending(void) |
Peter Zijlstra | 317f394 | 2011-04-05 17:23:58 +0200 | [diff] [blame] | 2715 | { |
| 2716 | struct rq *rq = this_rq(); |
Peter Zijlstra | fa14ff4 | 2011-09-12 13:06:17 +0200 | [diff] [blame] | 2717 | struct llist_node *llist = llist_del_all(&rq->wake_list); |
| 2718 | struct task_struct *p; |
Peter Zijlstra | 317f394 | 2011-04-05 17:23:58 +0200 | [diff] [blame] | 2719 | |
| 2720 | raw_spin_lock(&rq->lock); |
| 2721 | |
Peter Zijlstra | fa14ff4 | 2011-09-12 13:06:17 +0200 | [diff] [blame] | 2722 | while (llist) { |
| 2723 | p = llist_entry(llist, struct task_struct, wake_entry); |
| 2724 | llist = llist_next(llist); |
Peter Zijlstra | 317f394 | 2011-04-05 17:23:58 +0200 | [diff] [blame] | 2725 | ttwu_do_activate(rq, p, 0); |
| 2726 | } |
| 2727 | |
| 2728 | raw_spin_unlock(&rq->lock); |
| 2729 | } |
| 2730 | |
| 2731 | void scheduler_ipi(void) |
| 2732 | { |
Suresh Siddha | ca38062 | 2011-10-03 15:09:00 -0700 | [diff] [blame] | 2733 | if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick()) |
Peter Zijlstra | c5d753a | 2011-07-19 15:07:25 -0700 | [diff] [blame] | 2734 | return; |
| 2735 | |
| 2736 | /* |
| 2737 | * Not all reschedule IPI handlers call irq_enter/irq_exit, since |
| 2738 | * traditionally all their work was done from the interrupt return |
| 2739 | * path. Now that we actually do some work, we need to make sure |
| 2740 | * we do call them. |
| 2741 | * |
| 2742 | * Some archs already do call them, luckily irq_enter/exit nest |
| 2743 | * properly. |
| 2744 | * |
| 2745 | * Arguably we should visit all archs and update all handlers, |
| 2746 | * however a fair share of IPIs are still resched only so this would |
| 2747 | * somewhat pessimize the simple resched case. |
| 2748 | */ |
| 2749 | irq_enter(); |
Peter Zijlstra | fa14ff4 | 2011-09-12 13:06:17 +0200 | [diff] [blame] | 2750 | sched_ttwu_pending(); |
Suresh Siddha | ca38062 | 2011-10-03 15:09:00 -0700 | [diff] [blame] | 2751 | |
| 2752 | /* |
| 2753 | * Check if someone kicked us for doing the nohz idle load balance. |
| 2754 | */ |
Suresh Siddha | 6eb57e0 | 2011-10-03 15:09:01 -0700 | [diff] [blame] | 2755 | if (unlikely(got_nohz_idle_kick() && !need_resched())) { |
| 2756 | this_rq()->idle_balance = 1; |
Suresh Siddha | ca38062 | 2011-10-03 15:09:00 -0700 | [diff] [blame] | 2757 | raise_softirq_irqoff(SCHED_SOFTIRQ); |
Suresh Siddha | 6eb57e0 | 2011-10-03 15:09:01 -0700 | [diff] [blame] | 2758 | } |
Peter Zijlstra | c5d753a | 2011-07-19 15:07:25 -0700 | [diff] [blame] | 2759 | irq_exit(); |
Peter Zijlstra | 317f394 | 2011-04-05 17:23:58 +0200 | [diff] [blame] | 2760 | } |
| 2761 | |
| 2762 | static void ttwu_queue_remote(struct task_struct *p, int cpu) |
| 2763 | { |
Peter Zijlstra | fa14ff4 | 2011-09-12 13:06:17 +0200 | [diff] [blame] | 2764 | if (llist_add(&p->wake_entry, &cpu_rq(cpu)->wake_list)) |
Peter Zijlstra | 317f394 | 2011-04-05 17:23:58 +0200 | [diff] [blame] | 2765 | smp_send_reschedule(cpu); |
| 2766 | } |
Peter Zijlstra | d6aa8f8 | 2011-05-26 14:21:33 +0200 | [diff] [blame] | 2767 | |
| 2768 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW |
| 2769 | static int ttwu_activate_remote(struct task_struct *p, int wake_flags) |
| 2770 | { |
| 2771 | struct rq *rq; |
| 2772 | int ret = 0; |
| 2773 | |
| 2774 | rq = __task_rq_lock(p); |
| 2775 | if (p->on_cpu) { |
| 2776 | ttwu_activate(rq, p, ENQUEUE_WAKEUP); |
| 2777 | ttwu_do_wakeup(rq, p, wake_flags); |
| 2778 | ret = 1; |
| 2779 | } |
| 2780 | __task_rq_unlock(rq); |
| 2781 | |
| 2782 | return ret; |
| 2783 | |
| 2784 | } |
| 2785 | #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ |
| 2786 | #endif /* CONFIG_SMP */ |
Peter Zijlstra | 317f394 | 2011-04-05 17:23:58 +0200 | [diff] [blame] | 2787 | |
Peter Zijlstra | c05fbaf | 2011-04-05 17:23:57 +0200 | [diff] [blame] | 2788 | static void ttwu_queue(struct task_struct *p, int cpu) |
| 2789 | { |
| 2790 | struct rq *rq = cpu_rq(cpu); |
| 2791 | |
Daniel Hellstrom | 17d9f31 | 2011-05-20 04:01:10 +0000 | [diff] [blame] | 2792 | #if defined(CONFIG_SMP) |
Peter Zijlstra | 317f394 | 2011-04-05 17:23:58 +0200 | [diff] [blame] | 2793 | if (sched_feat(TTWU_QUEUE) && cpu != smp_processor_id()) { |
Peter Zijlstra | f01114c | 2011-05-31 12:26:55 +0200 | [diff] [blame] | 2794 | sched_clock_cpu(cpu); /* sync clocks x-cpu */ |
Peter Zijlstra | 317f394 | 2011-04-05 17:23:58 +0200 | [diff] [blame] | 2795 | ttwu_queue_remote(p, cpu); |
| 2796 | return; |
| 2797 | } |
| 2798 | #endif |
| 2799 | |
Peter Zijlstra | c05fbaf | 2011-04-05 17:23:57 +0200 | [diff] [blame] | 2800 | raw_spin_lock(&rq->lock); |
| 2801 | ttwu_do_activate(rq, p, 0); |
| 2802 | raw_spin_unlock(&rq->lock); |
Tejun Heo | 9ed3811 | 2009-12-03 15:08:03 +0900 | [diff] [blame] | 2803 | } |
| 2804 | |
| 2805 | /** |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2806 | * try_to_wake_up - wake up a thread |
Tejun Heo | 9ed3811 | 2009-12-03 15:08:03 +0900 | [diff] [blame] | 2807 | * @p: the thread to be awakened |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2808 | * @state: the mask of task states that can be woken |
Tejun Heo | 9ed3811 | 2009-12-03 15:08:03 +0900 | [diff] [blame] | 2809 | * @wake_flags: wake modifier flags (WF_*) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2810 | * |
| 2811 | * Put it on the run-queue if it's not already there. The "current" |
| 2812 | * thread is always on the run-queue (except when the actual |
| 2813 | * re-schedule is in progress), and as such you're allowed to do |
| 2814 | * the simpler "current->state = TASK_RUNNING" to mark yourself |
| 2815 | * runnable without the overhead of this. |
| 2816 | * |
Tejun Heo | 9ed3811 | 2009-12-03 15:08:03 +0900 | [diff] [blame] | 2817 | * Returns %true if @p was woken up, %false if it was already running |
| 2818 | * or @state didn't match @p's state. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2819 | */ |
Peter Zijlstra | e4a52bc | 2011-04-05 17:23:54 +0200 | [diff] [blame] | 2820 | static int |
| 2821 | try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2822 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2823 | unsigned long flags; |
Peter Zijlstra | c05fbaf | 2011-04-05 17:23:57 +0200 | [diff] [blame] | 2824 | int cpu, success = 0; |
Peter Zijlstra | 2398f2c | 2008-06-27 13:41:35 +0200 | [diff] [blame] | 2825 | |
Linus Torvalds | 04e2f17 | 2008-02-23 18:05:03 -0800 | [diff] [blame] | 2826 | smp_wmb(); |
Peter Zijlstra | 013fdb8 | 2011-04-05 17:23:45 +0200 | [diff] [blame] | 2827 | raw_spin_lock_irqsave(&p->pi_lock, flags); |
Peter Zijlstra | e9c8431 | 2009-09-15 14:43:03 +0200 | [diff] [blame] | 2828 | if (!(p->state & state)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2829 | goto out; |
| 2830 | |
Peter Zijlstra | c05fbaf | 2011-04-05 17:23:57 +0200 | [diff] [blame] | 2831 | success = 1; /* we're going to change ->state */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2832 | cpu = task_cpu(p); |
Peter Zijlstra | d7c01d2 | 2011-04-05 17:23:43 +0200 | [diff] [blame] | 2833 | |
Peter Zijlstra | c05fbaf | 2011-04-05 17:23:57 +0200 | [diff] [blame] | 2834 | if (p->on_rq && ttwu_remote(p, wake_flags)) |
| 2835 | goto stat; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2836 | |
| 2837 | #ifdef CONFIG_SMP |
Peter Zijlstra | e9c8431 | 2009-09-15 14:43:03 +0200 | [diff] [blame] | 2838 | /* |
Peter Zijlstra | c05fbaf | 2011-04-05 17:23:57 +0200 | [diff] [blame] | 2839 | * If the owning (remote) cpu is still in the middle of schedule() with |
| 2840 | * this task as prev, wait until its done referencing the task. |
Peter Zijlstra | e9c8431 | 2009-09-15 14:43:03 +0200 | [diff] [blame] | 2841 | */ |
Peter Zijlstra | e4a52bc | 2011-04-05 17:23:54 +0200 | [diff] [blame] | 2842 | while (p->on_cpu) { |
| 2843 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW |
| 2844 | /* |
Peter Zijlstra | d6aa8f8 | 2011-05-26 14:21:33 +0200 | [diff] [blame] | 2845 | * In case the architecture enables interrupts in |
| 2846 | * context_switch(), we cannot busy wait, since that |
| 2847 | * would lead to deadlocks when an interrupt hits and |
| 2848 | * tries to wake up @prev. So bail and do a complete |
| 2849 | * remote wakeup. |
Peter Zijlstra | e4a52bc | 2011-04-05 17:23:54 +0200 | [diff] [blame] | 2850 | */ |
Peter Zijlstra | d6aa8f8 | 2011-05-26 14:21:33 +0200 | [diff] [blame] | 2851 | if (ttwu_activate_remote(p, wake_flags)) |
Peter Zijlstra | c05fbaf | 2011-04-05 17:23:57 +0200 | [diff] [blame] | 2852 | goto stat; |
Peter Zijlstra | d6aa8f8 | 2011-05-26 14:21:33 +0200 | [diff] [blame] | 2853 | #else |
Peter Zijlstra | e4a52bc | 2011-04-05 17:23:54 +0200 | [diff] [blame] | 2854 | cpu_relax(); |
Peter Zijlstra | d6aa8f8 | 2011-05-26 14:21:33 +0200 | [diff] [blame] | 2855 | #endif |
Peter Zijlstra | cc87f76 | 2010-03-26 12:22:14 +0100 | [diff] [blame] | 2856 | } |
Peter Zijlstra | e4a52bc | 2011-04-05 17:23:54 +0200 | [diff] [blame] | 2857 | /* |
| 2858 | * Pairs with the smp_wmb() in finish_lock_switch(). |
| 2859 | */ |
| 2860 | smp_rmb(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2861 | |
Peter Zijlstra | a8e4f2e | 2011-04-05 17:23:49 +0200 | [diff] [blame] | 2862 | p->sched_contributes_to_load = !!task_contributes_to_load(p); |
Peter Zijlstra | e9c8431 | 2009-09-15 14:43:03 +0200 | [diff] [blame] | 2863 | p->state = TASK_WAKING; |
Peter Zijlstra | efbbd05 | 2009-12-16 18:04:40 +0100 | [diff] [blame] | 2864 | |
Peter Zijlstra | e4a52bc | 2011-04-05 17:23:54 +0200 | [diff] [blame] | 2865 | if (p->sched_class->task_waking) |
Peter Zijlstra | 74f8e4b | 2011-04-05 17:23:47 +0200 | [diff] [blame] | 2866 | p->sched_class->task_waking(p); |
Peter Zijlstra | ab19cb2 | 2009-11-27 15:44:43 +0100 | [diff] [blame] | 2867 | |
Peter Zijlstra | 7608dec | 2011-04-05 17:23:46 +0200 | [diff] [blame] | 2868 | cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags); |
Peter Zijlstra | f339b9d | 2011-05-31 10:49:20 +0200 | [diff] [blame] | 2869 | if (task_cpu(p) != cpu) { |
| 2870 | wake_flags |= WF_MIGRATED; |
Mike Galbraith | f5dc375 | 2009-10-09 08:35:03 +0200 | [diff] [blame] | 2871 | set_task_cpu(p, cpu); |
Peter Zijlstra | f339b9d | 2011-05-31 10:49:20 +0200 | [diff] [blame] | 2872 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2873 | #endif /* CONFIG_SMP */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2874 | |
Peter Zijlstra | c05fbaf | 2011-04-05 17:23:57 +0200 | [diff] [blame] | 2875 | ttwu_queue(p, cpu); |
| 2876 | stat: |
Peter Zijlstra | b84cb5d | 2011-04-05 17:23:55 +0200 | [diff] [blame] | 2877 | ttwu_stat(p, cpu, wake_flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2878 | out: |
Peter Zijlstra | 013fdb8 | 2011-04-05 17:23:45 +0200 | [diff] [blame] | 2879 | raw_spin_unlock_irqrestore(&p->pi_lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2880 | |
| 2881 | return success; |
| 2882 | } |
| 2883 | |
David Howells | 50fa610 | 2009-04-28 15:01:38 +0100 | [diff] [blame] | 2884 | /** |
Tejun Heo | 21aa9af | 2010-06-08 21:40:37 +0200 | [diff] [blame] | 2885 | * try_to_wake_up_local - try to wake up a local task with rq lock held |
| 2886 | * @p: the thread to be awakened |
| 2887 | * |
Peter Zijlstra | 2acca55 | 2011-04-05 17:23:50 +0200 | [diff] [blame] | 2888 | * Put @p on the run-queue if it's not already there. The caller must |
Tejun Heo | 21aa9af | 2010-06-08 21:40:37 +0200 | [diff] [blame] | 2889 | * ensure that this_rq() is locked, @p is bound to this_rq() and not |
Peter Zijlstra | 2acca55 | 2011-04-05 17:23:50 +0200 | [diff] [blame] | 2890 | * the current task. |
Tejun Heo | 21aa9af | 2010-06-08 21:40:37 +0200 | [diff] [blame] | 2891 | */ |
| 2892 | static void try_to_wake_up_local(struct task_struct *p) |
| 2893 | { |
| 2894 | struct rq *rq = task_rq(p); |
Tejun Heo | 21aa9af | 2010-06-08 21:40:37 +0200 | [diff] [blame] | 2895 | |
| 2896 | BUG_ON(rq != this_rq()); |
| 2897 | BUG_ON(p == current); |
| 2898 | lockdep_assert_held(&rq->lock); |
| 2899 | |
Peter Zijlstra | 2acca55 | 2011-04-05 17:23:50 +0200 | [diff] [blame] | 2900 | if (!raw_spin_trylock(&p->pi_lock)) { |
| 2901 | raw_spin_unlock(&rq->lock); |
| 2902 | raw_spin_lock(&p->pi_lock); |
| 2903 | raw_spin_lock(&rq->lock); |
Tejun Heo | 21aa9af | 2010-06-08 21:40:37 +0200 | [diff] [blame] | 2904 | } |
Peter Zijlstra | 2acca55 | 2011-04-05 17:23:50 +0200 | [diff] [blame] | 2905 | |
Tejun Heo | 21aa9af | 2010-06-08 21:40:37 +0200 | [diff] [blame] | 2906 | if (!(p->state & TASK_NORMAL)) |
Peter Zijlstra | 2acca55 | 2011-04-05 17:23:50 +0200 | [diff] [blame] | 2907 | goto out; |
Tejun Heo | 21aa9af | 2010-06-08 21:40:37 +0200 | [diff] [blame] | 2908 | |
Peter Zijlstra | fd2f441 | 2011-04-05 17:23:44 +0200 | [diff] [blame] | 2909 | if (!p->on_rq) |
Peter Zijlstra | d7c01d2 | 2011-04-05 17:23:43 +0200 | [diff] [blame] | 2910 | ttwu_activate(rq, p, ENQUEUE_WAKEUP); |
| 2911 | |
Peter Zijlstra | 23f41ee | 2011-04-05 17:23:56 +0200 | [diff] [blame] | 2912 | ttwu_do_wakeup(rq, p, 0); |
Peter Zijlstra | b84cb5d | 2011-04-05 17:23:55 +0200 | [diff] [blame] | 2913 | ttwu_stat(p, smp_processor_id(), 0); |
Peter Zijlstra | 2acca55 | 2011-04-05 17:23:50 +0200 | [diff] [blame] | 2914 | out: |
| 2915 | raw_spin_unlock(&p->pi_lock); |
Tejun Heo | 21aa9af | 2010-06-08 21:40:37 +0200 | [diff] [blame] | 2916 | } |
| 2917 | |
| 2918 | /** |
David Howells | 50fa610 | 2009-04-28 15:01:38 +0100 | [diff] [blame] | 2919 | * wake_up_process - Wake up a specific process |
| 2920 | * @p: The process to be woken up. |
| 2921 | * |
| 2922 | * Attempt to wake up the nominated process and move it to the set of runnable |
| 2923 | * processes. Returns 1 if the process was woken up, 0 if it was already |
| 2924 | * running. |
| 2925 | * |
| 2926 | * It may be assumed that this function implies a write memory barrier before |
| 2927 | * changing the task state if and only if any tasks are woken up. |
| 2928 | */ |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 2929 | int wake_up_process(struct task_struct *p) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2930 | { |
Matthew Wilcox | d9514f6 | 2007-12-06 11:07:07 -0500 | [diff] [blame] | 2931 | return try_to_wake_up(p, TASK_ALL, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2932 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2933 | EXPORT_SYMBOL(wake_up_process); |
| 2934 | |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 2935 | int wake_up_state(struct task_struct *p, unsigned int state) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2936 | { |
| 2937 | return try_to_wake_up(p, state, 0); |
| 2938 | } |
| 2939 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2940 | /* |
| 2941 | * Perform scheduler related setup for a newly forked process p. |
| 2942 | * p is forked by current. |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 2943 | * |
| 2944 | * __sched_fork() is basic setup used by init_idle() too: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2945 | */ |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 2946 | static void __sched_fork(struct task_struct *p) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2947 | { |
Peter Zijlstra | fd2f441 | 2011-04-05 17:23:44 +0200 | [diff] [blame] | 2948 | p->on_rq = 0; |
| 2949 | |
| 2950 | p->se.on_rq = 0; |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 2951 | p->se.exec_start = 0; |
| 2952 | p->se.sum_exec_runtime = 0; |
Ingo Molnar | f6cf891 | 2007-08-28 12:53:24 +0200 | [diff] [blame] | 2953 | p->se.prev_sum_exec_runtime = 0; |
Ingo Molnar | 6c594c2 | 2008-12-14 12:34:15 +0100 | [diff] [blame] | 2954 | p->se.nr_migrations = 0; |
Peter Zijlstra | da7a735 | 2011-01-17 17:03:27 +0100 | [diff] [blame] | 2955 | p->se.vruntime = 0; |
Peter Zijlstra | fd2f441 | 2011-04-05 17:23:44 +0200 | [diff] [blame] | 2956 | INIT_LIST_HEAD(&p->se.group_node); |
Ingo Molnar | 6cfb0d5 | 2007-08-02 17:41:40 +0200 | [diff] [blame] | 2957 | |
| 2958 | #ifdef CONFIG_SCHEDSTATS |
Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 2959 | memset(&p->se.statistics, 0, sizeof(p->se.statistics)); |
Ingo Molnar | 6cfb0d5 | 2007-08-02 17:41:40 +0200 | [diff] [blame] | 2960 | #endif |
Nick Piggin | 476d139 | 2005-06-25 14:57:29 -0700 | [diff] [blame] | 2961 | |
Peter Zijlstra | fa71706 | 2008-01-25 21:08:27 +0100 | [diff] [blame] | 2962 | INIT_LIST_HEAD(&p->rt.run_list); |
Nick Piggin | 476d139 | 2005-06-25 14:57:29 -0700 | [diff] [blame] | 2963 | |
Avi Kivity | e107be3 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 2964 | #ifdef CONFIG_PREEMPT_NOTIFIERS |
| 2965 | INIT_HLIST_HEAD(&p->preempt_notifiers); |
| 2966 | #endif |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 2967 | } |
| 2968 | |
| 2969 | /* |
| 2970 | * fork()/clone()-time setup: |
| 2971 | */ |
Samir Bellabes | 3e51e3e | 2011-05-11 18:18:05 +0200 | [diff] [blame] | 2972 | void sched_fork(struct task_struct *p) |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 2973 | { |
Peter Zijlstra | 0122ec5 | 2011-04-05 17:23:51 +0200 | [diff] [blame] | 2974 | unsigned long flags; |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 2975 | int cpu = get_cpu(); |
| 2976 | |
| 2977 | __sched_fork(p); |
Peter Zijlstra | 06b83b5 | 2009-12-16 18:04:35 +0100 | [diff] [blame] | 2978 | /* |
Peter Zijlstra | 0017d73 | 2010-03-24 18:34:10 +0100 | [diff] [blame] | 2979 | * We mark the process as running here. This guarantees that |
Peter Zijlstra | 06b83b5 | 2009-12-16 18:04:35 +0100 | [diff] [blame] | 2980 | * nobody will actually run it, and a signal or other external |
| 2981 | * event cannot wake it up and insert it on the runqueue either. |
| 2982 | */ |
Peter Zijlstra | 0017d73 | 2010-03-24 18:34:10 +0100 | [diff] [blame] | 2983 | p->state = TASK_RUNNING; |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 2984 | |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 2985 | /* |
Mike Galbraith | c350a04 | 2011-07-27 17:14:55 +0200 | [diff] [blame] | 2986 | * Make sure we do not leak PI boosting priority to the child. |
| 2987 | */ |
| 2988 | p->prio = current->normal_prio; |
| 2989 | |
| 2990 | /* |
Mike Galbraith | b9dc29e | 2009-06-17 10:46:01 +0200 | [diff] [blame] | 2991 | * Revert to default priority/policy on fork if requested. |
| 2992 | */ |
| 2993 | if (unlikely(p->sched_reset_on_fork)) { |
Mike Galbraith | c350a04 | 2011-07-27 17:14:55 +0200 | [diff] [blame] | 2994 | if (task_has_rt_policy(p)) { |
Mike Galbraith | b9dc29e | 2009-06-17 10:46:01 +0200 | [diff] [blame] | 2995 | p->policy = SCHED_NORMAL; |
Mike Galbraith | 6c697bd | 2009-06-17 10:48:02 +0200 | [diff] [blame] | 2996 | p->static_prio = NICE_TO_PRIO(0); |
Mike Galbraith | c350a04 | 2011-07-27 17:14:55 +0200 | [diff] [blame] | 2997 | p->rt_priority = 0; |
| 2998 | } else if (PRIO_TO_NICE(p->static_prio) < 0) |
| 2999 | p->static_prio = NICE_TO_PRIO(0); |
| 3000 | |
| 3001 | p->prio = p->normal_prio = __normal_prio(p); |
| 3002 | set_load_weight(p); |
Mike Galbraith | 6c697bd | 2009-06-17 10:48:02 +0200 | [diff] [blame] | 3003 | |
Mike Galbraith | b9dc29e | 2009-06-17 10:46:01 +0200 | [diff] [blame] | 3004 | /* |
| 3005 | * We don't need the reset flag anymore after the fork. It has |
| 3006 | * fulfilled its duty: |
| 3007 | */ |
| 3008 | p->sched_reset_on_fork = 0; |
| 3009 | } |
Lennart Poettering | ca94c44 | 2009-06-15 17:17:47 +0200 | [diff] [blame] | 3010 | |
Hiroshi Shimamoto | 2ddbf95 | 2007-10-15 17:00:11 +0200 | [diff] [blame] | 3011 | if (!rt_prio(p->prio)) |
| 3012 | p->sched_class = &fair_sched_class; |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 3013 | |
Peter Zijlstra | cd29fe6 | 2009-11-27 17:32:46 +0100 | [diff] [blame] | 3014 | if (p->sched_class->task_fork) |
| 3015 | p->sched_class->task_fork(p); |
| 3016 | |
Peter Zijlstra | 8695159 | 2010-06-22 11:44:53 +0200 | [diff] [blame] | 3017 | /* |
| 3018 | * The child is not yet in the pid-hash so no cgroup attach races, |
| 3019 | * and the cgroup is pinned to this child due to cgroup_fork() |
| 3020 | * is ran before sched_fork(). |
| 3021 | * |
| 3022 | * Silence PROVE_RCU. |
| 3023 | */ |
Peter Zijlstra | 0122ec5 | 2011-04-05 17:23:51 +0200 | [diff] [blame] | 3024 | raw_spin_lock_irqsave(&p->pi_lock, flags); |
Peter Zijlstra | 5f3edc1 | 2009-09-10 13:42:00 +0200 | [diff] [blame] | 3025 | set_task_cpu(p, cpu); |
Peter Zijlstra | 0122ec5 | 2011-04-05 17:23:51 +0200 | [diff] [blame] | 3026 | raw_spin_unlock_irqrestore(&p->pi_lock, flags); |
Peter Zijlstra | 5f3edc1 | 2009-09-10 13:42:00 +0200 | [diff] [blame] | 3027 | |
Chandra Seetharaman | 52f17b6 | 2006-07-14 00:24:38 -0700 | [diff] [blame] | 3028 | #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3029 | if (likely(sched_info_on())) |
Chandra Seetharaman | 52f17b6 | 2006-07-14 00:24:38 -0700 | [diff] [blame] | 3030 | memset(&p->sched_info, 0, sizeof(p->sched_info)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3031 | #endif |
Peter Zijlstra | 3ca7a44 | 2011-04-05 17:23:40 +0200 | [diff] [blame] | 3032 | #if defined(CONFIG_SMP) |
| 3033 | p->on_cpu = 0; |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 3034 | #endif |
Frederic Weisbecker | bdd4e85 | 2011-06-08 01:13:27 +0200 | [diff] [blame] | 3035 | #ifdef CONFIG_PREEMPT_COUNT |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 3036 | /* Want to start with kernel preemption disabled. */ |
Al Viro | a1261f5 | 2005-11-13 16:06:55 -0800 | [diff] [blame] | 3037 | task_thread_info(p)->preempt_count = 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3038 | #endif |
Dario Faggioli | 806c09a | 2010-11-30 19:51:33 +0100 | [diff] [blame] | 3039 | #ifdef CONFIG_SMP |
Gregory Haskins | 917b627 | 2008-12-29 09:39:53 -0500 | [diff] [blame] | 3040 | plist_node_init(&p->pushable_tasks, MAX_PRIO); |
Dario Faggioli | 806c09a | 2010-11-30 19:51:33 +0100 | [diff] [blame] | 3041 | #endif |
Gregory Haskins | 917b627 | 2008-12-29 09:39:53 -0500 | [diff] [blame] | 3042 | |
Nick Piggin | 476d139 | 2005-06-25 14:57:29 -0700 | [diff] [blame] | 3043 | put_cpu(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3044 | } |
| 3045 | |
| 3046 | /* |
| 3047 | * wake_up_new_task - wake up a newly created task for the first time. |
| 3048 | * |
| 3049 | * This function will do some initial scheduler statistics housekeeping |
| 3050 | * that must be done for every newly created context, then puts the task |
| 3051 | * on the runqueue and wakes it. |
| 3052 | */ |
Samir Bellabes | 3e51e3e | 2011-05-11 18:18:05 +0200 | [diff] [blame] | 3053 | void wake_up_new_task(struct task_struct *p) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3054 | { |
| 3055 | unsigned long flags; |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3056 | struct rq *rq; |
Peter Zijlstra | fabf318 | 2010-01-21 21:04:57 +0100 | [diff] [blame] | 3057 | |
Peter Zijlstra | ab2515c | 2011-04-05 17:23:52 +0200 | [diff] [blame] | 3058 | raw_spin_lock_irqsave(&p->pi_lock, flags); |
Peter Zijlstra | fabf318 | 2010-01-21 21:04:57 +0100 | [diff] [blame] | 3059 | #ifdef CONFIG_SMP |
| 3060 | /* |
| 3061 | * Fork balancing, do it here and not earlier because: |
| 3062 | * - cpus_allowed can change in the fork path |
| 3063 | * - any previously selected cpu might disappear through hotplug |
Peter Zijlstra | fabf318 | 2010-01-21 21:04:57 +0100 | [diff] [blame] | 3064 | */ |
Peter Zijlstra | ab2515c | 2011-04-05 17:23:52 +0200 | [diff] [blame] | 3065 | set_task_cpu(p, select_task_rq(p, SD_BALANCE_FORK, 0)); |
Peter Zijlstra | fabf318 | 2010-01-21 21:04:57 +0100 | [diff] [blame] | 3066 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3067 | |
Peter Zijlstra | ab2515c | 2011-04-05 17:23:52 +0200 | [diff] [blame] | 3068 | rq = __task_rq_lock(p); |
Peter Zijlstra | cd29fe6 | 2009-11-27 17:32:46 +0100 | [diff] [blame] | 3069 | activate_task(rq, p, 0); |
Peter Zijlstra | fd2f441 | 2011-04-05 17:23:44 +0200 | [diff] [blame] | 3070 | p->on_rq = 1; |
Peter Zijlstra | 8936338 | 2011-04-05 17:23:42 +0200 | [diff] [blame] | 3071 | trace_sched_wakeup_new(p, true); |
Peter Zijlstra | a7558e0 | 2009-09-14 20:02:34 +0200 | [diff] [blame] | 3072 | check_preempt_curr(rq, p, WF_FORK); |
Steven Rostedt | 9a897c5 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 3073 | #ifdef CONFIG_SMP |
Peter Zijlstra | efbbd05 | 2009-12-16 18:04:40 +0100 | [diff] [blame] | 3074 | if (p->sched_class->task_woken) |
| 3075 | p->sched_class->task_woken(rq, p); |
Steven Rostedt | 9a897c5 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 3076 | #endif |
Peter Zijlstra | 0122ec5 | 2011-04-05 17:23:51 +0200 | [diff] [blame] | 3077 | task_rq_unlock(rq, p, &flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3078 | } |
| 3079 | |
Avi Kivity | e107be3 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 3080 | #ifdef CONFIG_PREEMPT_NOTIFIERS |
| 3081 | |
| 3082 | /** |
Luis Henriques | 80dd99b | 2009-03-16 19:58:09 +0000 | [diff] [blame] | 3083 | * preempt_notifier_register - tell me when current is being preempted & rescheduled |
Randy Dunlap | 421cee2 | 2007-07-31 00:37:50 -0700 | [diff] [blame] | 3084 | * @notifier: notifier struct to register |
Avi Kivity | e107be3 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 3085 | */ |
| 3086 | void preempt_notifier_register(struct preempt_notifier *notifier) |
| 3087 | { |
| 3088 | hlist_add_head(¬ifier->link, ¤t->preempt_notifiers); |
| 3089 | } |
| 3090 | EXPORT_SYMBOL_GPL(preempt_notifier_register); |
| 3091 | |
| 3092 | /** |
| 3093 | * preempt_notifier_unregister - no longer interested in preemption notifications |
Randy Dunlap | 421cee2 | 2007-07-31 00:37:50 -0700 | [diff] [blame] | 3094 | * @notifier: notifier struct to unregister |
Avi Kivity | e107be3 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 3095 | * |
| 3096 | * This is safe to call from within a preemption notifier. |
| 3097 | */ |
| 3098 | void preempt_notifier_unregister(struct preempt_notifier *notifier) |
| 3099 | { |
| 3100 | hlist_del(¬ifier->link); |
| 3101 | } |
| 3102 | EXPORT_SYMBOL_GPL(preempt_notifier_unregister); |
| 3103 | |
| 3104 | static void fire_sched_in_preempt_notifiers(struct task_struct *curr) |
| 3105 | { |
| 3106 | struct preempt_notifier *notifier; |
| 3107 | struct hlist_node *node; |
| 3108 | |
| 3109 | hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link) |
| 3110 | notifier->ops->sched_in(notifier, raw_smp_processor_id()); |
| 3111 | } |
| 3112 | |
| 3113 | static void |
| 3114 | fire_sched_out_preempt_notifiers(struct task_struct *curr, |
| 3115 | struct task_struct *next) |
| 3116 | { |
| 3117 | struct preempt_notifier *notifier; |
| 3118 | struct hlist_node *node; |
| 3119 | |
| 3120 | hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link) |
| 3121 | notifier->ops->sched_out(notifier, next); |
| 3122 | } |
| 3123 | |
Dhaval Giani | 6d6bc0a | 2008-05-30 14:23:45 +0200 | [diff] [blame] | 3124 | #else /* !CONFIG_PREEMPT_NOTIFIERS */ |
Avi Kivity | e107be3 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 3125 | |
| 3126 | static void fire_sched_in_preempt_notifiers(struct task_struct *curr) |
| 3127 | { |
| 3128 | } |
| 3129 | |
| 3130 | static void |
| 3131 | fire_sched_out_preempt_notifiers(struct task_struct *curr, |
| 3132 | struct task_struct *next) |
| 3133 | { |
| 3134 | } |
| 3135 | |
Dhaval Giani | 6d6bc0a | 2008-05-30 14:23:45 +0200 | [diff] [blame] | 3136 | #endif /* CONFIG_PREEMPT_NOTIFIERS */ |
Avi Kivity | e107be3 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 3137 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3138 | /** |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 3139 | * prepare_task_switch - prepare to switch tasks |
| 3140 | * @rq: the runqueue preparing to switch |
Randy Dunlap | 421cee2 | 2007-07-31 00:37:50 -0700 | [diff] [blame] | 3141 | * @prev: the current task that is being switched out |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 3142 | * @next: the task we are going to switch to. |
| 3143 | * |
| 3144 | * This is called with the rq lock held and interrupts off. It must |
| 3145 | * be paired with a subsequent finish_task_switch after the context |
| 3146 | * switch. |
| 3147 | * |
| 3148 | * prepare_task_switch sets up locking and calls architecture specific |
| 3149 | * hooks. |
| 3150 | */ |
Avi Kivity | e107be3 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 3151 | static inline void |
| 3152 | prepare_task_switch(struct rq *rq, struct task_struct *prev, |
| 3153 | struct task_struct *next) |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 3154 | { |
Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 3155 | sched_info_switch(prev, next); |
| 3156 | perf_event_task_sched_out(prev, next); |
Avi Kivity | e107be3 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 3157 | fire_sched_out_preempt_notifiers(prev, next); |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 3158 | prepare_lock_switch(rq, next); |
| 3159 | prepare_arch_switch(next); |
Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 3160 | trace_sched_switch(prev, next); |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 3161 | } |
| 3162 | |
| 3163 | /** |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3164 | * finish_task_switch - clean up after a task-switch |
Jeff Garzik | 344baba | 2005-09-07 01:15:17 -0400 | [diff] [blame] | 3165 | * @rq: runqueue associated with task-switch |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3166 | * @prev: the thread we just switched away from. |
| 3167 | * |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 3168 | * finish_task_switch must be called after the context switch, paired |
| 3169 | * with a prepare_task_switch call before the context switch. |
| 3170 | * finish_task_switch will reconcile locking set up by prepare_task_switch, |
| 3171 | * and do any other architecture-specific cleanup actions. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3172 | * |
| 3173 | * Note that we may have delayed dropping an mm in context_switch(). If |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 3174 | * so, we finish that here outside of the runqueue lock. (Doing it |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3175 | * with the lock held can cause deadlocks; see schedule() for |
| 3176 | * details.) |
| 3177 | */ |
Alexey Dobriyan | a995744 | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 3178 | static void finish_task_switch(struct rq *rq, struct task_struct *prev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3179 | __releases(rq->lock) |
| 3180 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3181 | struct mm_struct *mm = rq->prev_mm; |
Oleg Nesterov | 55a101f | 2006-09-29 02:01:10 -0700 | [diff] [blame] | 3182 | long prev_state; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3183 | |
| 3184 | rq->prev_mm = NULL; |
| 3185 | |
| 3186 | /* |
| 3187 | * A task struct has one reference for the use as "current". |
Oleg Nesterov | c394cc9 | 2006-09-29 02:01:11 -0700 | [diff] [blame] | 3188 | * If a task dies, then it sets TASK_DEAD in tsk->state and calls |
Oleg Nesterov | 55a101f | 2006-09-29 02:01:10 -0700 | [diff] [blame] | 3189 | * schedule one last time. The schedule call will never return, and |
| 3190 | * the scheduled task must drop that reference. |
Oleg Nesterov | c394cc9 | 2006-09-29 02:01:11 -0700 | [diff] [blame] | 3191 | * The test for TASK_DEAD must occur while the runqueue locks are |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3192 | * still held, otherwise prev could be scheduled on another cpu, die |
| 3193 | * there before we look at prev->state, and then the reference would |
| 3194 | * be dropped twice. |
| 3195 | * Manfred Spraul <manfred@colorfullife.com> |
| 3196 | */ |
Oleg Nesterov | 55a101f | 2006-09-29 02:01:10 -0700 | [diff] [blame] | 3197 | prev_state = prev->state; |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 3198 | finish_arch_switch(prev); |
Jamie Iles | 8381f65 | 2010-01-08 15:27:33 +0000 | [diff] [blame] | 3199 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW |
| 3200 | local_irq_disable(); |
| 3201 | #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ |
Stephane Eranian | a8d757e | 2011-08-25 15:58:03 +0200 | [diff] [blame] | 3202 | perf_event_task_sched_in(prev, current); |
Jamie Iles | 8381f65 | 2010-01-08 15:27:33 +0000 | [diff] [blame] | 3203 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW |
| 3204 | local_irq_enable(); |
| 3205 | #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 3206 | finish_lock_switch(rq, prev); |
Steven Rostedt | e8fa136 | 2008-01-25 21:08:05 +0100 | [diff] [blame] | 3207 | |
Avi Kivity | e107be3 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 3208 | fire_sched_in_preempt_notifiers(current); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3209 | if (mm) |
| 3210 | mmdrop(mm); |
Oleg Nesterov | c394cc9 | 2006-09-29 02:01:11 -0700 | [diff] [blame] | 3211 | if (unlikely(prev_state == TASK_DEAD)) { |
bibo mao | c6fd91f | 2006-03-26 01:38:20 -0800 | [diff] [blame] | 3212 | /* |
| 3213 | * Remove function-return probe instances associated with this |
| 3214 | * task and put them back on the free list. |
Ingo Molnar | 9761eea | 2007-07-09 18:52:00 +0200 | [diff] [blame] | 3215 | */ |
bibo mao | c6fd91f | 2006-03-26 01:38:20 -0800 | [diff] [blame] | 3216 | kprobe_flush_task(prev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3217 | put_task_struct(prev); |
bibo mao | c6fd91f | 2006-03-26 01:38:20 -0800 | [diff] [blame] | 3218 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3219 | } |
| 3220 | |
Gregory Haskins | 3f029d3 | 2009-07-29 11:08:47 -0400 | [diff] [blame] | 3221 | #ifdef CONFIG_SMP |
| 3222 | |
| 3223 | /* assumes rq->lock is held */ |
| 3224 | static inline void pre_schedule(struct rq *rq, struct task_struct *prev) |
| 3225 | { |
| 3226 | if (prev->sched_class->pre_schedule) |
| 3227 | prev->sched_class->pre_schedule(rq, prev); |
| 3228 | } |
| 3229 | |
| 3230 | /* rq->lock is NOT held, but preemption is disabled */ |
| 3231 | static inline void post_schedule(struct rq *rq) |
| 3232 | { |
| 3233 | if (rq->post_schedule) { |
| 3234 | unsigned long flags; |
| 3235 | |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 3236 | raw_spin_lock_irqsave(&rq->lock, flags); |
Gregory Haskins | 3f029d3 | 2009-07-29 11:08:47 -0400 | [diff] [blame] | 3237 | if (rq->curr->sched_class->post_schedule) |
| 3238 | rq->curr->sched_class->post_schedule(rq); |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 3239 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
Gregory Haskins | 3f029d3 | 2009-07-29 11:08:47 -0400 | [diff] [blame] | 3240 | |
| 3241 | rq->post_schedule = 0; |
| 3242 | } |
| 3243 | } |
| 3244 | |
| 3245 | #else |
| 3246 | |
| 3247 | static inline void pre_schedule(struct rq *rq, struct task_struct *p) |
| 3248 | { |
| 3249 | } |
| 3250 | |
| 3251 | static inline void post_schedule(struct rq *rq) |
| 3252 | { |
| 3253 | } |
| 3254 | |
| 3255 | #endif |
| 3256 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3257 | /** |
| 3258 | * schedule_tail - first thing a freshly forked thread must call. |
| 3259 | * @prev: the thread we just switched away from. |
| 3260 | */ |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 3261 | asmlinkage void schedule_tail(struct task_struct *prev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3262 | __releases(rq->lock) |
| 3263 | { |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 3264 | struct rq *rq = this_rq(); |
| 3265 | |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 3266 | finish_task_switch(rq, prev); |
Steven Rostedt | da19ab5 | 2009-07-29 00:21:22 -0400 | [diff] [blame] | 3267 | |
Gregory Haskins | 3f029d3 | 2009-07-29 11:08:47 -0400 | [diff] [blame] | 3268 | /* |
| 3269 | * FIXME: do we need to worry about rq being invalidated by the |
| 3270 | * task_switch? |
| 3271 | */ |
| 3272 | post_schedule(rq); |
Steven Rostedt | da19ab5 | 2009-07-29 00:21:22 -0400 | [diff] [blame] | 3273 | |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 3274 | #ifdef __ARCH_WANT_UNLOCKED_CTXSW |
| 3275 | /* In this case, finish_task_switch does not reenable preemption */ |
| 3276 | preempt_enable(); |
| 3277 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3278 | if (current->set_child_tid) |
Pavel Emelyanov | b488893 | 2007-10-18 23:40:14 -0700 | [diff] [blame] | 3279 | put_user(task_pid_vnr(current), current->set_child_tid); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3280 | } |
| 3281 | |
| 3282 | /* |
| 3283 | * context_switch - switch to the new MM and the new |
| 3284 | * thread's register state. |
| 3285 | */ |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3286 | static inline void |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 3287 | context_switch(struct rq *rq, struct task_struct *prev, |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 3288 | struct task_struct *next) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3289 | { |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3290 | struct mm_struct *mm, *oldmm; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3291 | |
Avi Kivity | e107be3 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 3292 | prepare_task_switch(rq, prev, next); |
Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 3293 | |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3294 | mm = next->mm; |
| 3295 | oldmm = prev->active_mm; |
Zachary Amsden | 9226d12 | 2007-02-13 13:26:21 +0100 | [diff] [blame] | 3296 | /* |
| 3297 | * For paravirt, this is coupled with an exit in switch_to to |
| 3298 | * combine the page table reload and the switch backend into |
| 3299 | * one hypercall. |
| 3300 | */ |
Jeremy Fitzhardinge | 224101e | 2009-02-18 11:18:57 -0800 | [diff] [blame] | 3301 | arch_start_context_switch(prev); |
Zachary Amsden | 9226d12 | 2007-02-13 13:26:21 +0100 | [diff] [blame] | 3302 | |
Heiko Carstens | 31915ab | 2010-09-16 14:42:25 +0200 | [diff] [blame] | 3303 | if (!mm) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3304 | next->active_mm = oldmm; |
| 3305 | atomic_inc(&oldmm->mm_count); |
| 3306 | enter_lazy_tlb(oldmm, next); |
| 3307 | } else |
| 3308 | switch_mm(oldmm, mm, next); |
| 3309 | |
Heiko Carstens | 31915ab | 2010-09-16 14:42:25 +0200 | [diff] [blame] | 3310 | if (!prev->mm) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3311 | prev->active_mm = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3312 | rq->prev_mm = oldmm; |
| 3313 | } |
Ingo Molnar | 3a5f5e4 | 2006-07-14 00:24:27 -0700 | [diff] [blame] | 3314 | /* |
| 3315 | * Since the runqueue lock will be released by the next |
| 3316 | * task (which is an invalid locking op but in the case |
| 3317 | * of the scheduler it's an obvious special-case), so we |
| 3318 | * do an early lockdep release here: |
| 3319 | */ |
| 3320 | #ifndef __ARCH_WANT_UNLOCKED_CTXSW |
Ingo Molnar | 8a25d5d | 2006-07-03 00:24:54 -0700 | [diff] [blame] | 3321 | spin_release(&rq->lock.dep_map, 1, _THIS_IP_); |
Ingo Molnar | 3a5f5e4 | 2006-07-14 00:24:27 -0700 | [diff] [blame] | 3322 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3323 | |
| 3324 | /* Here we just switch the register state and the stack. */ |
| 3325 | switch_to(prev, next, prev); |
| 3326 | |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3327 | barrier(); |
| 3328 | /* |
| 3329 | * this_rq must be evaluated again because prev may have moved |
| 3330 | * CPUs since it called schedule(), thus the 'rq' on its stack |
| 3331 | * frame will be invalid. |
| 3332 | */ |
| 3333 | finish_task_switch(this_rq(), prev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3334 | } |
| 3335 | |
| 3336 | /* |
| 3337 | * nr_running, nr_uninterruptible and nr_context_switches: |
| 3338 | * |
| 3339 | * externally visible scheduler statistics: current number of runnable |
| 3340 | * threads, current number of uninterruptible-sleeping threads, total |
| 3341 | * number of context switches performed since bootup. |
| 3342 | */ |
| 3343 | unsigned long nr_running(void) |
| 3344 | { |
| 3345 | unsigned long i, sum = 0; |
| 3346 | |
| 3347 | for_each_online_cpu(i) |
| 3348 | sum += cpu_rq(i)->nr_running; |
| 3349 | |
| 3350 | return sum; |
| 3351 | } |
| 3352 | |
| 3353 | unsigned long nr_uninterruptible(void) |
| 3354 | { |
| 3355 | unsigned long i, sum = 0; |
| 3356 | |
KAMEZAWA Hiroyuki | 0a94502 | 2006-03-28 01:56:37 -0800 | [diff] [blame] | 3357 | for_each_possible_cpu(i) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3358 | sum += cpu_rq(i)->nr_uninterruptible; |
| 3359 | |
| 3360 | /* |
| 3361 | * Since we read the counters lockless, it might be slightly |
| 3362 | * inaccurate. Do not allow it to go below zero though: |
| 3363 | */ |
| 3364 | if (unlikely((long)sum < 0)) |
| 3365 | sum = 0; |
| 3366 | |
| 3367 | return sum; |
| 3368 | } |
| 3369 | |
| 3370 | unsigned long long nr_context_switches(void) |
| 3371 | { |
Steven Rostedt | cc94abf | 2006-06-27 02:54:31 -0700 | [diff] [blame] | 3372 | int i; |
| 3373 | unsigned long long sum = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3374 | |
KAMEZAWA Hiroyuki | 0a94502 | 2006-03-28 01:56:37 -0800 | [diff] [blame] | 3375 | for_each_possible_cpu(i) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3376 | sum += cpu_rq(i)->nr_switches; |
| 3377 | |
| 3378 | return sum; |
| 3379 | } |
| 3380 | |
| 3381 | unsigned long nr_iowait(void) |
| 3382 | { |
| 3383 | unsigned long i, sum = 0; |
| 3384 | |
KAMEZAWA Hiroyuki | 0a94502 | 2006-03-28 01:56:37 -0800 | [diff] [blame] | 3385 | for_each_possible_cpu(i) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3386 | sum += atomic_read(&cpu_rq(i)->nr_iowait); |
| 3387 | |
| 3388 | return sum; |
| 3389 | } |
| 3390 | |
Peter Zijlstra | 8c215bd | 2010-07-01 09:07:17 +0200 | [diff] [blame] | 3391 | unsigned long nr_iowait_cpu(int cpu) |
Arjan van de Ven | 69d2587 | 2009-09-21 17:04:08 -0700 | [diff] [blame] | 3392 | { |
Peter Zijlstra | 8c215bd | 2010-07-01 09:07:17 +0200 | [diff] [blame] | 3393 | struct rq *this = cpu_rq(cpu); |
Arjan van de Ven | 69d2587 | 2009-09-21 17:04:08 -0700 | [diff] [blame] | 3394 | return atomic_read(&this->nr_iowait); |
| 3395 | } |
| 3396 | |
| 3397 | unsigned long this_cpu_load(void) |
| 3398 | { |
| 3399 | struct rq *this = this_rq(); |
| 3400 | return this->cpu_load[0]; |
| 3401 | } |
| 3402 | |
| 3403 | |
Thomas Gleixner | dce48a8 | 2009-04-11 10:43:41 +0200 | [diff] [blame] | 3404 | /* Variables and functions for calc_load */ |
| 3405 | static atomic_long_t calc_load_tasks; |
| 3406 | static unsigned long calc_load_update; |
| 3407 | unsigned long avenrun[3]; |
| 3408 | EXPORT_SYMBOL(avenrun); |
| 3409 | |
Peter Zijlstra | 74f5187 | 2010-04-22 21:50:19 +0200 | [diff] [blame] | 3410 | static long calc_load_fold_active(struct rq *this_rq) |
| 3411 | { |
| 3412 | long nr_active, delta = 0; |
| 3413 | |
| 3414 | nr_active = this_rq->nr_running; |
| 3415 | nr_active += (long) this_rq->nr_uninterruptible; |
| 3416 | |
| 3417 | if (nr_active != this_rq->calc_load_active) { |
| 3418 | delta = nr_active - this_rq->calc_load_active; |
| 3419 | this_rq->calc_load_active = nr_active; |
| 3420 | } |
| 3421 | |
| 3422 | return delta; |
| 3423 | } |
| 3424 | |
Peter Zijlstra | 0f004f5 | 2010-11-30 19:48:45 +0100 | [diff] [blame] | 3425 | static unsigned long |
| 3426 | calc_load(unsigned long load, unsigned long exp, unsigned long active) |
| 3427 | { |
| 3428 | load *= exp; |
| 3429 | load += active * (FIXED_1 - exp); |
| 3430 | load += 1UL << (FSHIFT - 1); |
| 3431 | return load >> FSHIFT; |
| 3432 | } |
| 3433 | |
Peter Zijlstra | 74f5187 | 2010-04-22 21:50:19 +0200 | [diff] [blame] | 3434 | #ifdef CONFIG_NO_HZ |
| 3435 | /* |
| 3436 | * For NO_HZ we delay the active fold to the next LOAD_FREQ update. |
| 3437 | * |
| 3438 | * When making the ILB scale, we should try to pull this in as well. |
| 3439 | */ |
| 3440 | static atomic_long_t calc_load_tasks_idle; |
| 3441 | |
| 3442 | static void calc_load_account_idle(struct rq *this_rq) |
| 3443 | { |
| 3444 | long delta; |
| 3445 | |
| 3446 | delta = calc_load_fold_active(this_rq); |
| 3447 | if (delta) |
| 3448 | atomic_long_add(delta, &calc_load_tasks_idle); |
| 3449 | } |
| 3450 | |
| 3451 | static long calc_load_fold_idle(void) |
| 3452 | { |
| 3453 | long delta = 0; |
| 3454 | |
| 3455 | /* |
| 3456 | * Its got a race, we don't care... |
| 3457 | */ |
| 3458 | if (atomic_long_read(&calc_load_tasks_idle)) |
| 3459 | delta = atomic_long_xchg(&calc_load_tasks_idle, 0); |
| 3460 | |
| 3461 | return delta; |
| 3462 | } |
Peter Zijlstra | 0f004f5 | 2010-11-30 19:48:45 +0100 | [diff] [blame] | 3463 | |
| 3464 | /** |
| 3465 | * fixed_power_int - compute: x^n, in O(log n) time |
| 3466 | * |
| 3467 | * @x: base of the power |
| 3468 | * @frac_bits: fractional bits of @x |
| 3469 | * @n: power to raise @x to. |
| 3470 | * |
| 3471 | * By exploiting the relation between the definition of the natural power |
| 3472 | * function: x^n := x*x*...*x (x multiplied by itself for n times), and |
| 3473 | * the binary encoding of numbers used by computers: n := \Sum n_i * 2^i, |
| 3474 | * (where: n_i \elem {0, 1}, the binary vector representing n), |
| 3475 | * we find: x^n := x^(\Sum n_i * 2^i) := \Prod x^(n_i * 2^i), which is |
| 3476 | * of course trivially computable in O(log_2 n), the length of our binary |
| 3477 | * vector. |
| 3478 | */ |
| 3479 | static unsigned long |
| 3480 | fixed_power_int(unsigned long x, unsigned int frac_bits, unsigned int n) |
| 3481 | { |
| 3482 | unsigned long result = 1UL << frac_bits; |
| 3483 | |
| 3484 | if (n) for (;;) { |
| 3485 | if (n & 1) { |
| 3486 | result *= x; |
| 3487 | result += 1UL << (frac_bits - 1); |
| 3488 | result >>= frac_bits; |
| 3489 | } |
| 3490 | n >>= 1; |
| 3491 | if (!n) |
| 3492 | break; |
| 3493 | x *= x; |
| 3494 | x += 1UL << (frac_bits - 1); |
| 3495 | x >>= frac_bits; |
| 3496 | } |
| 3497 | |
| 3498 | return result; |
| 3499 | } |
| 3500 | |
| 3501 | /* |
| 3502 | * a1 = a0 * e + a * (1 - e) |
| 3503 | * |
| 3504 | * a2 = a1 * e + a * (1 - e) |
| 3505 | * = (a0 * e + a * (1 - e)) * e + a * (1 - e) |
| 3506 | * = a0 * e^2 + a * (1 - e) * (1 + e) |
| 3507 | * |
| 3508 | * a3 = a2 * e + a * (1 - e) |
| 3509 | * = (a0 * e^2 + a * (1 - e) * (1 + e)) * e + a * (1 - e) |
| 3510 | * = a0 * e^3 + a * (1 - e) * (1 + e + e^2) |
| 3511 | * |
| 3512 | * ... |
| 3513 | * |
| 3514 | * an = a0 * e^n + a * (1 - e) * (1 + e + ... + e^n-1) [1] |
| 3515 | * = a0 * e^n + a * (1 - e) * (1 - e^n)/(1 - e) |
| 3516 | * = a0 * e^n + a * (1 - e^n) |
| 3517 | * |
| 3518 | * [1] application of the geometric series: |
| 3519 | * |
| 3520 | * n 1 - x^(n+1) |
| 3521 | * S_n := \Sum x^i = ------------- |
| 3522 | * i=0 1 - x |
| 3523 | */ |
| 3524 | static unsigned long |
| 3525 | calc_load_n(unsigned long load, unsigned long exp, |
| 3526 | unsigned long active, unsigned int n) |
| 3527 | { |
| 3528 | |
| 3529 | return calc_load(load, fixed_power_int(exp, FSHIFT, n), active); |
| 3530 | } |
| 3531 | |
| 3532 | /* |
| 3533 | * NO_HZ can leave us missing all per-cpu ticks calling |
| 3534 | * calc_load_account_active(), but since an idle CPU folds its delta into |
| 3535 | * calc_load_tasks_idle per calc_load_account_idle(), all we need to do is fold |
| 3536 | * in the pending idle delta if our idle period crossed a load cycle boundary. |
| 3537 | * |
| 3538 | * Once we've updated the global active value, we need to apply the exponential |
| 3539 | * weights adjusted to the number of cycles missed. |
| 3540 | */ |
| 3541 | static void calc_global_nohz(unsigned long ticks) |
| 3542 | { |
| 3543 | long delta, active, n; |
| 3544 | |
| 3545 | if (time_before(jiffies, calc_load_update)) |
| 3546 | return; |
| 3547 | |
| 3548 | /* |
| 3549 | * If we crossed a calc_load_update boundary, make sure to fold |
| 3550 | * any pending idle changes, the respective CPUs might have |
| 3551 | * missed the tick driven calc_load_account_active() update |
| 3552 | * due to NO_HZ. |
| 3553 | */ |
| 3554 | delta = calc_load_fold_idle(); |
| 3555 | if (delta) |
| 3556 | atomic_long_add(delta, &calc_load_tasks); |
| 3557 | |
| 3558 | /* |
| 3559 | * If we were idle for multiple load cycles, apply them. |
| 3560 | */ |
| 3561 | if (ticks >= LOAD_FREQ) { |
| 3562 | n = ticks / LOAD_FREQ; |
| 3563 | |
| 3564 | active = atomic_long_read(&calc_load_tasks); |
| 3565 | active = active > 0 ? active * FIXED_1 : 0; |
| 3566 | |
| 3567 | avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n); |
| 3568 | avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n); |
| 3569 | avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n); |
| 3570 | |
| 3571 | calc_load_update += n * LOAD_FREQ; |
| 3572 | } |
| 3573 | |
| 3574 | /* |
| 3575 | * Its possible the remainder of the above division also crosses |
| 3576 | * a LOAD_FREQ period, the regular check in calc_global_load() |
| 3577 | * which comes after this will take care of that. |
| 3578 | * |
| 3579 | * Consider us being 11 ticks before a cycle completion, and us |
| 3580 | * sleeping for 4*LOAD_FREQ + 22 ticks, then the above code will |
| 3581 | * age us 4 cycles, and the test in calc_global_load() will |
| 3582 | * pick up the final one. |
| 3583 | */ |
| 3584 | } |
Peter Zijlstra | 74f5187 | 2010-04-22 21:50:19 +0200 | [diff] [blame] | 3585 | #else |
| 3586 | static void calc_load_account_idle(struct rq *this_rq) |
| 3587 | { |
| 3588 | } |
| 3589 | |
| 3590 | static inline long calc_load_fold_idle(void) |
| 3591 | { |
| 3592 | return 0; |
| 3593 | } |
Peter Zijlstra | 0f004f5 | 2010-11-30 19:48:45 +0100 | [diff] [blame] | 3594 | |
| 3595 | static void calc_global_nohz(unsigned long ticks) |
| 3596 | { |
| 3597 | } |
Peter Zijlstra | 74f5187 | 2010-04-22 21:50:19 +0200 | [diff] [blame] | 3598 | #endif |
| 3599 | |
Thomas Gleixner | 2d02494 | 2009-05-02 20:08:52 +0200 | [diff] [blame] | 3600 | /** |
| 3601 | * get_avenrun - get the load average array |
| 3602 | * @loads: pointer to dest load array |
| 3603 | * @offset: offset to add |
| 3604 | * @shift: shift count to shift the result left |
| 3605 | * |
| 3606 | * These values are estimates at best, so no need for locking. |
| 3607 | */ |
| 3608 | void get_avenrun(unsigned long *loads, unsigned long offset, int shift) |
| 3609 | { |
| 3610 | loads[0] = (avenrun[0] + offset) << shift; |
| 3611 | loads[1] = (avenrun[1] + offset) << shift; |
| 3612 | loads[2] = (avenrun[2] + offset) << shift; |
| 3613 | } |
| 3614 | |
Thomas Gleixner | dce48a8 | 2009-04-11 10:43:41 +0200 | [diff] [blame] | 3615 | /* |
| 3616 | * calc_load - update the avenrun load estimates 10 ticks after the |
| 3617 | * CPUs have updated calc_load_tasks. |
| 3618 | */ |
Peter Zijlstra | 0f004f5 | 2010-11-30 19:48:45 +0100 | [diff] [blame] | 3619 | void calc_global_load(unsigned long ticks) |
Thomas Gleixner | dce48a8 | 2009-04-11 10:43:41 +0200 | [diff] [blame] | 3620 | { |
Thomas Gleixner | dce48a8 | 2009-04-11 10:43:41 +0200 | [diff] [blame] | 3621 | long active; |
| 3622 | |
Peter Zijlstra | 0f004f5 | 2010-11-30 19:48:45 +0100 | [diff] [blame] | 3623 | calc_global_nohz(ticks); |
| 3624 | |
| 3625 | if (time_before(jiffies, calc_load_update + 10)) |
Thomas Gleixner | dce48a8 | 2009-04-11 10:43:41 +0200 | [diff] [blame] | 3626 | return; |
| 3627 | |
| 3628 | active = atomic_long_read(&calc_load_tasks); |
| 3629 | active = active > 0 ? active * FIXED_1 : 0; |
| 3630 | |
| 3631 | avenrun[0] = calc_load(avenrun[0], EXP_1, active); |
| 3632 | avenrun[1] = calc_load(avenrun[1], EXP_5, active); |
| 3633 | avenrun[2] = calc_load(avenrun[2], EXP_15, active); |
| 3634 | |
| 3635 | calc_load_update += LOAD_FREQ; |
| 3636 | } |
| 3637 | |
| 3638 | /* |
Peter Zijlstra | 74f5187 | 2010-04-22 21:50:19 +0200 | [diff] [blame] | 3639 | * Called from update_cpu_load() to periodically update this CPU's |
| 3640 | * active count. |
Thomas Gleixner | dce48a8 | 2009-04-11 10:43:41 +0200 | [diff] [blame] | 3641 | */ |
| 3642 | static void calc_load_account_active(struct rq *this_rq) |
| 3643 | { |
Peter Zijlstra | 74f5187 | 2010-04-22 21:50:19 +0200 | [diff] [blame] | 3644 | long delta; |
Thomas Gleixner | dce48a8 | 2009-04-11 10:43:41 +0200 | [diff] [blame] | 3645 | |
Peter Zijlstra | 74f5187 | 2010-04-22 21:50:19 +0200 | [diff] [blame] | 3646 | if (time_before(jiffies, this_rq->calc_load_update)) |
| 3647 | return; |
Thomas Gleixner | dce48a8 | 2009-04-11 10:43:41 +0200 | [diff] [blame] | 3648 | |
Peter Zijlstra | 74f5187 | 2010-04-22 21:50:19 +0200 | [diff] [blame] | 3649 | delta = calc_load_fold_active(this_rq); |
| 3650 | delta += calc_load_fold_idle(); |
| 3651 | if (delta) |
Thomas Gleixner | dce48a8 | 2009-04-11 10:43:41 +0200 | [diff] [blame] | 3652 | atomic_long_add(delta, &calc_load_tasks); |
Peter Zijlstra | 74f5187 | 2010-04-22 21:50:19 +0200 | [diff] [blame] | 3653 | |
| 3654 | this_rq->calc_load_update += LOAD_FREQ; |
Jack Steiner | db1b1fe | 2006-03-31 02:31:21 -0800 | [diff] [blame] | 3655 | } |
| 3656 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3657 | /* |
Venkatesh Pallipadi | fdf3e95 | 2010-05-17 18:14:43 -0700 | [diff] [blame] | 3658 | * The exact cpuload at various idx values, calculated at every tick would be |
| 3659 | * load = (2^idx - 1) / 2^idx * load + 1 / 2^idx * cur_load |
| 3660 | * |
| 3661 | * If a cpu misses updates for n-1 ticks (as it was idle) and update gets called |
| 3662 | * on nth tick when cpu may be busy, then we have: |
| 3663 | * load = ((2^idx - 1) / 2^idx)^(n-1) * load |
| 3664 | * load = (2^idx - 1) / 2^idx) * load + 1 / 2^idx * cur_load |
| 3665 | * |
| 3666 | * decay_load_missed() below does efficient calculation of |
| 3667 | * load = ((2^idx - 1) / 2^idx)^(n-1) * load |
| 3668 | * avoiding 0..n-1 loop doing load = ((2^idx - 1) / 2^idx) * load |
| 3669 | * |
| 3670 | * The calculation is approximated on a 128 point scale. |
| 3671 | * degrade_zero_ticks is the number of ticks after which load at any |
| 3672 | * particular idx is approximated to be zero. |
| 3673 | * degrade_factor is a precomputed table, a row for each load idx. |
| 3674 | * Each column corresponds to degradation factor for a power of two ticks, |
| 3675 | * based on 128 point scale. |
| 3676 | * Example: |
| 3677 | * row 2, col 3 (=12) says that the degradation at load idx 2 after |
| 3678 | * 8 ticks is 12/128 (which is an approximation of exact factor 3^8/4^8). |
| 3679 | * |
| 3680 | * With this power of 2 load factors, we can degrade the load n times |
| 3681 | * by looking at 1 bits in n and doing as many mult/shift instead of |
| 3682 | * n mult/shifts needed by the exact degradation. |
| 3683 | */ |
| 3684 | #define DEGRADE_SHIFT 7 |
| 3685 | static const unsigned char |
| 3686 | degrade_zero_ticks[CPU_LOAD_IDX_MAX] = {0, 8, 32, 64, 128}; |
| 3687 | static const unsigned char |
| 3688 | degrade_factor[CPU_LOAD_IDX_MAX][DEGRADE_SHIFT + 1] = { |
| 3689 | {0, 0, 0, 0, 0, 0, 0, 0}, |
| 3690 | {64, 32, 8, 0, 0, 0, 0, 0}, |
| 3691 | {96, 72, 40, 12, 1, 0, 0}, |
| 3692 | {112, 98, 75, 43, 15, 1, 0}, |
| 3693 | {120, 112, 98, 76, 45, 16, 2} }; |
| 3694 | |
| 3695 | /* |
| 3696 | * Update cpu_load for any missed ticks, due to tickless idle. The backlog |
| 3697 | * would be when CPU is idle and so we just decay the old load without |
| 3698 | * adding any new load. |
| 3699 | */ |
| 3700 | static unsigned long |
| 3701 | decay_load_missed(unsigned long load, unsigned long missed_updates, int idx) |
| 3702 | { |
| 3703 | int j = 0; |
| 3704 | |
| 3705 | if (!missed_updates) |
| 3706 | return load; |
| 3707 | |
| 3708 | if (missed_updates >= degrade_zero_ticks[idx]) |
| 3709 | return 0; |
| 3710 | |
| 3711 | if (idx == 1) |
| 3712 | return load >> missed_updates; |
| 3713 | |
| 3714 | while (missed_updates) { |
| 3715 | if (missed_updates % 2) |
| 3716 | load = (load * degrade_factor[idx][j]) >> DEGRADE_SHIFT; |
| 3717 | |
| 3718 | missed_updates >>= 1; |
| 3719 | j++; |
| 3720 | } |
| 3721 | return load; |
| 3722 | } |
| 3723 | |
| 3724 | /* |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3725 | * Update rq->cpu_load[] statistics. This function is usually called every |
Venkatesh Pallipadi | fdf3e95 | 2010-05-17 18:14:43 -0700 | [diff] [blame] | 3726 | * scheduler tick (TICK_NSEC). With tickless idle this will not be called |
| 3727 | * every tick. We fix it up based on jiffies. |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 3728 | */ |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3729 | static void update_cpu_load(struct rq *this_rq) |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 3730 | { |
Dmitry Adamushko | 495eca4 | 2007-10-15 17:00:06 +0200 | [diff] [blame] | 3731 | unsigned long this_load = this_rq->load.weight; |
Venkatesh Pallipadi | fdf3e95 | 2010-05-17 18:14:43 -0700 | [diff] [blame] | 3732 | unsigned long curr_jiffies = jiffies; |
| 3733 | unsigned long pending_updates; |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3734 | int i, scale; |
| 3735 | |
| 3736 | this_rq->nr_load_updates++; |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3737 | |
Venkatesh Pallipadi | fdf3e95 | 2010-05-17 18:14:43 -0700 | [diff] [blame] | 3738 | /* Avoid repeated calls on same jiffy, when moving in and out of idle */ |
| 3739 | if (curr_jiffies == this_rq->last_load_update_tick) |
| 3740 | return; |
| 3741 | |
| 3742 | pending_updates = curr_jiffies - this_rq->last_load_update_tick; |
| 3743 | this_rq->last_load_update_tick = curr_jiffies; |
| 3744 | |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3745 | /* Update our load: */ |
Venkatesh Pallipadi | fdf3e95 | 2010-05-17 18:14:43 -0700 | [diff] [blame] | 3746 | this_rq->cpu_load[0] = this_load; /* Fasttrack for idx 0 */ |
| 3747 | for (i = 1, scale = 2; i < CPU_LOAD_IDX_MAX; i++, scale += scale) { |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3748 | unsigned long old_load, new_load; |
| 3749 | |
| 3750 | /* scale is effectively 1 << i now, and >> i divides by scale */ |
| 3751 | |
| 3752 | old_load = this_rq->cpu_load[i]; |
Venkatesh Pallipadi | fdf3e95 | 2010-05-17 18:14:43 -0700 | [diff] [blame] | 3753 | old_load = decay_load_missed(old_load, pending_updates - 1, i); |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3754 | new_load = this_load; |
Ingo Molnar | a25707f | 2007-10-15 17:00:03 +0200 | [diff] [blame] | 3755 | /* |
| 3756 | * Round up the averaging division if load is increasing. This |
| 3757 | * prevents us from getting stuck on 9 if the load is 10, for |
| 3758 | * example. |
| 3759 | */ |
| 3760 | if (new_load > old_load) |
Venkatesh Pallipadi | fdf3e95 | 2010-05-17 18:14:43 -0700 | [diff] [blame] | 3761 | new_load += scale - 1; |
| 3762 | |
| 3763 | this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i; |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3764 | } |
Suresh Siddha | da2b71e | 2010-08-23 13:42:51 -0700 | [diff] [blame] | 3765 | |
| 3766 | sched_avg_update(this_rq); |
Venkatesh Pallipadi | fdf3e95 | 2010-05-17 18:14:43 -0700 | [diff] [blame] | 3767 | } |
| 3768 | |
| 3769 | static void update_cpu_load_active(struct rq *this_rq) |
| 3770 | { |
| 3771 | update_cpu_load(this_rq); |
Thomas Gleixner | dce48a8 | 2009-04-11 10:43:41 +0200 | [diff] [blame] | 3772 | |
Peter Zijlstra | 74f5187 | 2010-04-22 21:50:19 +0200 | [diff] [blame] | 3773 | calc_load_account_active(this_rq); |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 3774 | } |
| 3775 | |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3776 | #ifdef CONFIG_SMP |
| 3777 | |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 3778 | /* |
Peter Zijlstra | 3802290 | 2009-12-16 18:04:37 +0100 | [diff] [blame] | 3779 | * sched_exec - execve() is a valuable balancing opportunity, because at |
| 3780 | * this point the task has the smallest effective memory and cache footprint. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3781 | */ |
Peter Zijlstra | 3802290 | 2009-12-16 18:04:37 +0100 | [diff] [blame] | 3782 | void sched_exec(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3783 | { |
Peter Zijlstra | 3802290 | 2009-12-16 18:04:37 +0100 | [diff] [blame] | 3784 | struct task_struct *p = current; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3785 | unsigned long flags; |
Peter Zijlstra | 0017d73 | 2010-03-24 18:34:10 +0100 | [diff] [blame] | 3786 | int dest_cpu; |
Peter Zijlstra | 3802290 | 2009-12-16 18:04:37 +0100 | [diff] [blame] | 3787 | |
Peter Zijlstra | 8f42ced | 2011-04-05 17:23:53 +0200 | [diff] [blame] | 3788 | raw_spin_lock_irqsave(&p->pi_lock, flags); |
Peter Zijlstra | 7608dec | 2011-04-05 17:23:46 +0200 | [diff] [blame] | 3789 | dest_cpu = p->sched_class->select_task_rq(p, SD_BALANCE_EXEC, 0); |
Peter Zijlstra | 0017d73 | 2010-03-24 18:34:10 +0100 | [diff] [blame] | 3790 | if (dest_cpu == smp_processor_id()) |
| 3791 | goto unlock; |
Peter Zijlstra | 3802290 | 2009-12-16 18:04:37 +0100 | [diff] [blame] | 3792 | |
Peter Zijlstra | 8f42ced | 2011-04-05 17:23:53 +0200 | [diff] [blame] | 3793 | if (likely(cpu_active(dest_cpu))) { |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 3794 | struct migration_arg arg = { p, dest_cpu }; |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 3795 | |
Peter Zijlstra | 8f42ced | 2011-04-05 17:23:53 +0200 | [diff] [blame] | 3796 | raw_spin_unlock_irqrestore(&p->pi_lock, flags); |
| 3797 | stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3798 | return; |
| 3799 | } |
Peter Zijlstra | 0017d73 | 2010-03-24 18:34:10 +0100 | [diff] [blame] | 3800 | unlock: |
Peter Zijlstra | 8f42ced | 2011-04-05 17:23:53 +0200 | [diff] [blame] | 3801 | raw_spin_unlock_irqrestore(&p->pi_lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3802 | } |
| 3803 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3804 | #endif |
| 3805 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3806 | DEFINE_PER_CPU(struct kernel_stat, kstat); |
| 3807 | |
| 3808 | EXPORT_PER_CPU_SYMBOL(kstat); |
| 3809 | |
| 3810 | /* |
Hidetoshi Seto | c5f8d99 | 2009-03-31 16:56:03 +0900 | [diff] [blame] | 3811 | * Return any ns on the sched_clock that have not yet been accounted in |
Frank Mayhar | f06febc | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 3812 | * @p in case that task is currently running. |
Hidetoshi Seto | c5f8d99 | 2009-03-31 16:56:03 +0900 | [diff] [blame] | 3813 | * |
| 3814 | * Called with task_rq_lock() held on @rq. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3815 | */ |
Hidetoshi Seto | c5f8d99 | 2009-03-31 16:56:03 +0900 | [diff] [blame] | 3816 | static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq) |
| 3817 | { |
| 3818 | u64 ns = 0; |
| 3819 | |
| 3820 | if (task_current(rq, p)) { |
| 3821 | update_rq_clock(rq); |
Venkatesh Pallipadi | 305e683 | 2010-10-04 17:03:21 -0700 | [diff] [blame] | 3822 | ns = rq->clock_task - p->se.exec_start; |
Hidetoshi Seto | c5f8d99 | 2009-03-31 16:56:03 +0900 | [diff] [blame] | 3823 | if ((s64)ns < 0) |
| 3824 | ns = 0; |
| 3825 | } |
| 3826 | |
| 3827 | return ns; |
| 3828 | } |
| 3829 | |
Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 3830 | unsigned long long task_delta_exec(struct task_struct *p) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3831 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3832 | unsigned long flags; |
Ingo Molnar | 41b86e9 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 3833 | struct rq *rq; |
Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 3834 | u64 ns = 0; |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 3835 | |
Ingo Molnar | 41b86e9 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 3836 | rq = task_rq_lock(p, &flags); |
Hidetoshi Seto | c5f8d99 | 2009-03-31 16:56:03 +0900 | [diff] [blame] | 3837 | ns = do_task_delta_exec(p, rq); |
Peter Zijlstra | 0122ec5 | 2011-04-05 17:23:51 +0200 | [diff] [blame] | 3838 | task_rq_unlock(rq, p, &flags); |
Ingo Molnar | 1508487 | 2008-09-30 08:28:17 +0200 | [diff] [blame] | 3839 | |
Hidetoshi Seto | c5f8d99 | 2009-03-31 16:56:03 +0900 | [diff] [blame] | 3840 | return ns; |
| 3841 | } |
Frank Mayhar | f06febc | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 3842 | |
Hidetoshi Seto | c5f8d99 | 2009-03-31 16:56:03 +0900 | [diff] [blame] | 3843 | /* |
| 3844 | * Return accounted runtime for the task. |
| 3845 | * In case the task is currently running, return the runtime plus current's |
| 3846 | * pending runtime that have not been accounted yet. |
| 3847 | */ |
| 3848 | unsigned long long task_sched_runtime(struct task_struct *p) |
| 3849 | { |
| 3850 | unsigned long flags; |
| 3851 | struct rq *rq; |
| 3852 | u64 ns = 0; |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 3853 | |
Hidetoshi Seto | c5f8d99 | 2009-03-31 16:56:03 +0900 | [diff] [blame] | 3854 | rq = task_rq_lock(p, &flags); |
| 3855 | ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq); |
Peter Zijlstra | 0122ec5 | 2011-04-05 17:23:51 +0200 | [diff] [blame] | 3856 | task_rq_unlock(rq, p, &flags); |
Hidetoshi Seto | c5f8d99 | 2009-03-31 16:56:03 +0900 | [diff] [blame] | 3857 | |
| 3858 | return ns; |
| 3859 | } |
| 3860 | |
| 3861 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3862 | * Account user cpu time to a process. |
| 3863 | * @p: the process that the cpu time gets accounted to |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3864 | * @cputime: the cpu time spent in user space since the last update |
Martin Schwidefsky | 457533a | 2008-12-31 15:11:37 +0100 | [diff] [blame] | 3865 | * @cputime_scaled: cputime scaled by cpu frequency |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3866 | */ |
Martin Schwidefsky | 457533a | 2008-12-31 15:11:37 +0100 | [diff] [blame] | 3867 | void account_user_time(struct task_struct *p, cputime_t cputime, |
| 3868 | cputime_t cputime_scaled) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3869 | { |
| 3870 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; |
| 3871 | cputime64_t tmp; |
| 3872 | |
Martin Schwidefsky | 457533a | 2008-12-31 15:11:37 +0100 | [diff] [blame] | 3873 | /* Add user time to process. */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3874 | p->utime = cputime_add(p->utime, cputime); |
Martin Schwidefsky | 457533a | 2008-12-31 15:11:37 +0100 | [diff] [blame] | 3875 | p->utimescaled = cputime_add(p->utimescaled, cputime_scaled); |
Frank Mayhar | f06febc | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 3876 | account_group_user_time(p, cputime); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3877 | |
| 3878 | /* Add user time to cpustat. */ |
| 3879 | tmp = cputime_to_cputime64(cputime); |
| 3880 | if (TASK_NICE(p) > 0) |
| 3881 | cpustat->nice = cputime64_add(cpustat->nice, tmp); |
| 3882 | else |
| 3883 | cpustat->user = cputime64_add(cpustat->user, tmp); |
Bharata B Rao | ef12fef | 2009-03-31 10:02:22 +0530 | [diff] [blame] | 3884 | |
| 3885 | cpuacct_update_stats(p, CPUACCT_STAT_USER, cputime); |
Jonathan Lim | 49b5cf3 | 2008-07-25 01:48:40 -0700 | [diff] [blame] | 3886 | /* Account for user time used */ |
| 3887 | acct_update_integrals(p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3888 | } |
| 3889 | |
| 3890 | /* |
Laurent Vivier | 94886b8 | 2007-10-15 17:00:19 +0200 | [diff] [blame] | 3891 | * Account guest cpu time to a process. |
| 3892 | * @p: the process that the cpu time gets accounted to |
| 3893 | * @cputime: the cpu time spent in virtual machine since the last update |
Martin Schwidefsky | 457533a | 2008-12-31 15:11:37 +0100 | [diff] [blame] | 3894 | * @cputime_scaled: cputime scaled by cpu frequency |
Laurent Vivier | 94886b8 | 2007-10-15 17:00:19 +0200 | [diff] [blame] | 3895 | */ |
Martin Schwidefsky | 457533a | 2008-12-31 15:11:37 +0100 | [diff] [blame] | 3896 | static void account_guest_time(struct task_struct *p, cputime_t cputime, |
| 3897 | cputime_t cputime_scaled) |
Laurent Vivier | 94886b8 | 2007-10-15 17:00:19 +0200 | [diff] [blame] | 3898 | { |
| 3899 | cputime64_t tmp; |
| 3900 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; |
| 3901 | |
| 3902 | tmp = cputime_to_cputime64(cputime); |
| 3903 | |
Martin Schwidefsky | 457533a | 2008-12-31 15:11:37 +0100 | [diff] [blame] | 3904 | /* Add guest time to process. */ |
Laurent Vivier | 94886b8 | 2007-10-15 17:00:19 +0200 | [diff] [blame] | 3905 | p->utime = cputime_add(p->utime, cputime); |
Martin Schwidefsky | 457533a | 2008-12-31 15:11:37 +0100 | [diff] [blame] | 3906 | p->utimescaled = cputime_add(p->utimescaled, cputime_scaled); |
Frank Mayhar | f06febc | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 3907 | account_group_user_time(p, cputime); |
Laurent Vivier | 94886b8 | 2007-10-15 17:00:19 +0200 | [diff] [blame] | 3908 | p->gtime = cputime_add(p->gtime, cputime); |
| 3909 | |
Martin Schwidefsky | 457533a | 2008-12-31 15:11:37 +0100 | [diff] [blame] | 3910 | /* Add guest time to cpustat. */ |
Ryota Ozaki | ce0e7b2 | 2009-10-24 01:20:10 +0900 | [diff] [blame] | 3911 | if (TASK_NICE(p) > 0) { |
| 3912 | cpustat->nice = cputime64_add(cpustat->nice, tmp); |
| 3913 | cpustat->guest_nice = cputime64_add(cpustat->guest_nice, tmp); |
| 3914 | } else { |
| 3915 | cpustat->user = cputime64_add(cpustat->user, tmp); |
| 3916 | cpustat->guest = cputime64_add(cpustat->guest, tmp); |
| 3917 | } |
Laurent Vivier | 94886b8 | 2007-10-15 17:00:19 +0200 | [diff] [blame] | 3918 | } |
| 3919 | |
| 3920 | /* |
Venkatesh Pallipadi | 70a89a6 | 2010-12-21 17:09:02 -0800 | [diff] [blame] | 3921 | * Account system cpu time to a process and desired cpustat field |
| 3922 | * @p: the process that the cpu time gets accounted to |
| 3923 | * @cputime: the cpu time spent in kernel space since the last update |
| 3924 | * @cputime_scaled: cputime scaled by cpu frequency |
| 3925 | * @target_cputime64: pointer to cpustat field that has to be updated |
| 3926 | */ |
| 3927 | static inline |
| 3928 | void __account_system_time(struct task_struct *p, cputime_t cputime, |
| 3929 | cputime_t cputime_scaled, cputime64_t *target_cputime64) |
| 3930 | { |
| 3931 | cputime64_t tmp = cputime_to_cputime64(cputime); |
| 3932 | |
| 3933 | /* Add system time to process. */ |
| 3934 | p->stime = cputime_add(p->stime, cputime); |
| 3935 | p->stimescaled = cputime_add(p->stimescaled, cputime_scaled); |
| 3936 | account_group_system_time(p, cputime); |
| 3937 | |
| 3938 | /* Add system time to cpustat. */ |
| 3939 | *target_cputime64 = cputime64_add(*target_cputime64, tmp); |
| 3940 | cpuacct_update_stats(p, CPUACCT_STAT_SYSTEM, cputime); |
| 3941 | |
| 3942 | /* Account for system time used */ |
| 3943 | acct_update_integrals(p); |
| 3944 | } |
| 3945 | |
| 3946 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3947 | * Account system cpu time to a process. |
| 3948 | * @p: the process that the cpu time gets accounted to |
| 3949 | * @hardirq_offset: the offset to subtract from hardirq_count() |
| 3950 | * @cputime: the cpu time spent in kernel space since the last update |
Martin Schwidefsky | 457533a | 2008-12-31 15:11:37 +0100 | [diff] [blame] | 3951 | * @cputime_scaled: cputime scaled by cpu frequency |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3952 | */ |
| 3953 | void account_system_time(struct task_struct *p, int hardirq_offset, |
Martin Schwidefsky | 457533a | 2008-12-31 15:11:37 +0100 | [diff] [blame] | 3954 | cputime_t cputime, cputime_t cputime_scaled) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3955 | { |
| 3956 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; |
Venkatesh Pallipadi | 70a89a6 | 2010-12-21 17:09:02 -0800 | [diff] [blame] | 3957 | cputime64_t *target_cputime64; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3958 | |
Harvey Harrison | 983ed7a | 2008-04-24 18:17:55 -0700 | [diff] [blame] | 3959 | if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) { |
Martin Schwidefsky | 457533a | 2008-12-31 15:11:37 +0100 | [diff] [blame] | 3960 | account_guest_time(p, cputime, cputime_scaled); |
Harvey Harrison | 983ed7a | 2008-04-24 18:17:55 -0700 | [diff] [blame] | 3961 | return; |
| 3962 | } |
Laurent Vivier | 94886b8 | 2007-10-15 17:00:19 +0200 | [diff] [blame] | 3963 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3964 | if (hardirq_count() - hardirq_offset) |
Venkatesh Pallipadi | 70a89a6 | 2010-12-21 17:09:02 -0800 | [diff] [blame] | 3965 | target_cputime64 = &cpustat->irq; |
Venkatesh Pallipadi | 75e1056 | 2010-10-04 17:03:16 -0700 | [diff] [blame] | 3966 | else if (in_serving_softirq()) |
Venkatesh Pallipadi | 70a89a6 | 2010-12-21 17:09:02 -0800 | [diff] [blame] | 3967 | target_cputime64 = &cpustat->softirq; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3968 | else |
Venkatesh Pallipadi | 70a89a6 | 2010-12-21 17:09:02 -0800 | [diff] [blame] | 3969 | target_cputime64 = &cpustat->system; |
Martin Schwidefsky | 79741dd | 2008-12-31 15:11:38 +0100 | [diff] [blame] | 3970 | |
Venkatesh Pallipadi | 70a89a6 | 2010-12-21 17:09:02 -0800 | [diff] [blame] | 3971 | __account_system_time(p, cputime, cputime_scaled, target_cputime64); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3972 | } |
| 3973 | |
| 3974 | /* |
| 3975 | * Account for involuntary wait time. |
Venkatesh Pallipadi | 544b4a1 | 2011-02-25 15:13:16 -0800 | [diff] [blame] | 3976 | * @cputime: the cpu time spent in involuntary wait |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3977 | */ |
Martin Schwidefsky | 79741dd | 2008-12-31 15:11:38 +0100 | [diff] [blame] | 3978 | void account_steal_time(cputime_t cputime) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3979 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3980 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; |
Martin Schwidefsky | 79741dd | 2008-12-31 15:11:38 +0100 | [diff] [blame] | 3981 | cputime64_t cputime64 = cputime_to_cputime64(cputime); |
| 3982 | |
| 3983 | cpustat->steal = cputime64_add(cpustat->steal, cputime64); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3984 | } |
| 3985 | |
Christoph Lameter | 7835b98 | 2006-12-10 02:20:22 -0800 | [diff] [blame] | 3986 | /* |
Martin Schwidefsky | 79741dd | 2008-12-31 15:11:38 +0100 | [diff] [blame] | 3987 | * Account for idle time. |
| 3988 | * @cputime: the cpu time spent in idle wait |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3989 | */ |
Martin Schwidefsky | 79741dd | 2008-12-31 15:11:38 +0100 | [diff] [blame] | 3990 | void account_idle_time(cputime_t cputime) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3991 | { |
| 3992 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; |
Martin Schwidefsky | 79741dd | 2008-12-31 15:11:38 +0100 | [diff] [blame] | 3993 | cputime64_t cputime64 = cputime_to_cputime64(cputime); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3994 | struct rq *rq = this_rq(); |
| 3995 | |
Martin Schwidefsky | 79741dd | 2008-12-31 15:11:38 +0100 | [diff] [blame] | 3996 | if (atomic_read(&rq->nr_iowait) > 0) |
| 3997 | cpustat->iowait = cputime64_add(cpustat->iowait, cputime64); |
| 3998 | else |
| 3999 | cpustat->idle = cputime64_add(cpustat->idle, cputime64); |
Christoph Lameter | 7835b98 | 2006-12-10 02:20:22 -0800 | [diff] [blame] | 4000 | } |
| 4001 | |
Glauber Costa | e6e6685 | 2011-07-11 15:28:17 -0400 | [diff] [blame] | 4002 | static __always_inline bool steal_account_process_tick(void) |
| 4003 | { |
| 4004 | #ifdef CONFIG_PARAVIRT |
| 4005 | if (static_branch(¶virt_steal_enabled)) { |
| 4006 | u64 steal, st = 0; |
| 4007 | |
| 4008 | steal = paravirt_steal_clock(smp_processor_id()); |
| 4009 | steal -= this_rq()->prev_steal_time; |
| 4010 | |
| 4011 | st = steal_ticks(steal); |
| 4012 | this_rq()->prev_steal_time += st * TICK_NSEC; |
| 4013 | |
| 4014 | account_steal_time(st); |
| 4015 | return st; |
| 4016 | } |
| 4017 | #endif |
| 4018 | return false; |
| 4019 | } |
| 4020 | |
Martin Schwidefsky | 79741dd | 2008-12-31 15:11:38 +0100 | [diff] [blame] | 4021 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING |
| 4022 | |
Venkatesh Pallipadi | abb74ce | 2010-12-21 17:09:03 -0800 | [diff] [blame] | 4023 | #ifdef CONFIG_IRQ_TIME_ACCOUNTING |
| 4024 | /* |
| 4025 | * Account a tick to a process and cpustat |
| 4026 | * @p: the process that the cpu time gets accounted to |
| 4027 | * @user_tick: is the tick from userspace |
| 4028 | * @rq: the pointer to rq |
| 4029 | * |
| 4030 | * Tick demultiplexing follows the order |
| 4031 | * - pending hardirq update |
| 4032 | * - pending softirq update |
| 4033 | * - user_time |
| 4034 | * - idle_time |
| 4035 | * - system time |
| 4036 | * - check for guest_time |
| 4037 | * - else account as system_time |
| 4038 | * |
| 4039 | * Check for hardirq is done both for system and user time as there is |
| 4040 | * no timer going off while we are on hardirq and hence we may never get an |
| 4041 | * opportunity to update it solely in system time. |
| 4042 | * p->stime and friends are only updated on system time and not on irq |
| 4043 | * softirq as those do not count in task exec_runtime any more. |
| 4044 | */ |
| 4045 | static void irqtime_account_process_tick(struct task_struct *p, int user_tick, |
| 4046 | struct rq *rq) |
| 4047 | { |
| 4048 | cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy); |
| 4049 | cputime64_t tmp = cputime_to_cputime64(cputime_one_jiffy); |
| 4050 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; |
| 4051 | |
Glauber Costa | e6e6685 | 2011-07-11 15:28:17 -0400 | [diff] [blame] | 4052 | if (steal_account_process_tick()) |
| 4053 | return; |
| 4054 | |
Venkatesh Pallipadi | abb74ce | 2010-12-21 17:09:03 -0800 | [diff] [blame] | 4055 | if (irqtime_account_hi_update()) { |
| 4056 | cpustat->irq = cputime64_add(cpustat->irq, tmp); |
| 4057 | } else if (irqtime_account_si_update()) { |
| 4058 | cpustat->softirq = cputime64_add(cpustat->softirq, tmp); |
Venkatesh Pallipadi | 414bee9 | 2010-12-21 17:09:04 -0800 | [diff] [blame] | 4059 | } else if (this_cpu_ksoftirqd() == p) { |
| 4060 | /* |
| 4061 | * ksoftirqd time do not get accounted in cpu_softirq_time. |
| 4062 | * So, we have to handle it separately here. |
| 4063 | * Also, p->stime needs to be updated for ksoftirqd. |
| 4064 | */ |
| 4065 | __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled, |
| 4066 | &cpustat->softirq); |
Venkatesh Pallipadi | abb74ce | 2010-12-21 17:09:03 -0800 | [diff] [blame] | 4067 | } else if (user_tick) { |
| 4068 | account_user_time(p, cputime_one_jiffy, one_jiffy_scaled); |
| 4069 | } else if (p == rq->idle) { |
| 4070 | account_idle_time(cputime_one_jiffy); |
| 4071 | } else if (p->flags & PF_VCPU) { /* System time or guest time */ |
| 4072 | account_guest_time(p, cputime_one_jiffy, one_jiffy_scaled); |
| 4073 | } else { |
| 4074 | __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled, |
| 4075 | &cpustat->system); |
| 4076 | } |
| 4077 | } |
| 4078 | |
| 4079 | static void irqtime_account_idle_ticks(int ticks) |
| 4080 | { |
| 4081 | int i; |
| 4082 | struct rq *rq = this_rq(); |
| 4083 | |
| 4084 | for (i = 0; i < ticks; i++) |
| 4085 | irqtime_account_process_tick(current, 0, rq); |
| 4086 | } |
Venkatesh Pallipadi | 544b4a1 | 2011-02-25 15:13:16 -0800 | [diff] [blame] | 4087 | #else /* CONFIG_IRQ_TIME_ACCOUNTING */ |
Venkatesh Pallipadi | abb74ce | 2010-12-21 17:09:03 -0800 | [diff] [blame] | 4088 | static void irqtime_account_idle_ticks(int ticks) {} |
| 4089 | static void irqtime_account_process_tick(struct task_struct *p, int user_tick, |
| 4090 | struct rq *rq) {} |
Venkatesh Pallipadi | 544b4a1 | 2011-02-25 15:13:16 -0800 | [diff] [blame] | 4091 | #endif /* CONFIG_IRQ_TIME_ACCOUNTING */ |
Martin Schwidefsky | 79741dd | 2008-12-31 15:11:38 +0100 | [diff] [blame] | 4092 | |
| 4093 | /* |
| 4094 | * Account a single tick of cpu time. |
| 4095 | * @p: the process that the cpu time gets accounted to |
| 4096 | * @user_tick: indicates if the tick is a user or a system tick |
| 4097 | */ |
| 4098 | void account_process_tick(struct task_struct *p, int user_tick) |
| 4099 | { |
Stanislaw Gruszka | a42548a | 2009-07-29 12:15:29 +0200 | [diff] [blame] | 4100 | cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy); |
Martin Schwidefsky | 79741dd | 2008-12-31 15:11:38 +0100 | [diff] [blame] | 4101 | struct rq *rq = this_rq(); |
| 4102 | |
Venkatesh Pallipadi | abb74ce | 2010-12-21 17:09:03 -0800 | [diff] [blame] | 4103 | if (sched_clock_irqtime) { |
| 4104 | irqtime_account_process_tick(p, user_tick, rq); |
| 4105 | return; |
| 4106 | } |
| 4107 | |
Glauber Costa | e6e6685 | 2011-07-11 15:28:17 -0400 | [diff] [blame] | 4108 | if (steal_account_process_tick()) |
| 4109 | return; |
| 4110 | |
Martin Schwidefsky | 79741dd | 2008-12-31 15:11:38 +0100 | [diff] [blame] | 4111 | if (user_tick) |
Stanislaw Gruszka | a42548a | 2009-07-29 12:15:29 +0200 | [diff] [blame] | 4112 | account_user_time(p, cputime_one_jiffy, one_jiffy_scaled); |
Eric Dumazet | f5f293a | 2009-04-29 14:44:49 +0200 | [diff] [blame] | 4113 | else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET)) |
Stanislaw Gruszka | a42548a | 2009-07-29 12:15:29 +0200 | [diff] [blame] | 4114 | account_system_time(p, HARDIRQ_OFFSET, cputime_one_jiffy, |
Martin Schwidefsky | 79741dd | 2008-12-31 15:11:38 +0100 | [diff] [blame] | 4115 | one_jiffy_scaled); |
| 4116 | else |
Stanislaw Gruszka | a42548a | 2009-07-29 12:15:29 +0200 | [diff] [blame] | 4117 | account_idle_time(cputime_one_jiffy); |
Martin Schwidefsky | 79741dd | 2008-12-31 15:11:38 +0100 | [diff] [blame] | 4118 | } |
| 4119 | |
| 4120 | /* |
| 4121 | * Account multiple ticks of steal time. |
| 4122 | * @p: the process from which the cpu time has been stolen |
| 4123 | * @ticks: number of stolen ticks |
| 4124 | */ |
| 4125 | void account_steal_ticks(unsigned long ticks) |
| 4126 | { |
| 4127 | account_steal_time(jiffies_to_cputime(ticks)); |
| 4128 | } |
| 4129 | |
| 4130 | /* |
| 4131 | * Account multiple ticks of idle time. |
| 4132 | * @ticks: number of stolen ticks |
| 4133 | */ |
| 4134 | void account_idle_ticks(unsigned long ticks) |
| 4135 | { |
Venkatesh Pallipadi | abb74ce | 2010-12-21 17:09:03 -0800 | [diff] [blame] | 4136 | |
| 4137 | if (sched_clock_irqtime) { |
| 4138 | irqtime_account_idle_ticks(ticks); |
| 4139 | return; |
| 4140 | } |
| 4141 | |
Martin Schwidefsky | 79741dd | 2008-12-31 15:11:38 +0100 | [diff] [blame] | 4142 | account_idle_time(jiffies_to_cputime(ticks)); |
| 4143 | } |
| 4144 | |
| 4145 | #endif |
| 4146 | |
Christoph Lameter | 7835b98 | 2006-12-10 02:20:22 -0800 | [diff] [blame] | 4147 | /* |
Balbir Singh | 4904862 | 2008-09-05 18:12:23 +0200 | [diff] [blame] | 4148 | * Use precise platform statistics if available: |
| 4149 | */ |
| 4150 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
Hidetoshi Seto | d180c5b | 2009-11-26 14:48:30 +0900 | [diff] [blame] | 4151 | void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st) |
Balbir Singh | 4904862 | 2008-09-05 18:12:23 +0200 | [diff] [blame] | 4152 | { |
Hidetoshi Seto | d99ca3b | 2009-12-02 17:26:47 +0900 | [diff] [blame] | 4153 | *ut = p->utime; |
| 4154 | *st = p->stime; |
Balbir Singh | 4904862 | 2008-09-05 18:12:23 +0200 | [diff] [blame] | 4155 | } |
| 4156 | |
Hidetoshi Seto | 0cf55e1 | 2009-12-02 17:28:07 +0900 | [diff] [blame] | 4157 | void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st) |
Balbir Singh | 4904862 | 2008-09-05 18:12:23 +0200 | [diff] [blame] | 4158 | { |
Hidetoshi Seto | 0cf55e1 | 2009-12-02 17:28:07 +0900 | [diff] [blame] | 4159 | struct task_cputime cputime; |
| 4160 | |
| 4161 | thread_group_cputime(p, &cputime); |
| 4162 | |
| 4163 | *ut = cputime.utime; |
| 4164 | *st = cputime.stime; |
Balbir Singh | 4904862 | 2008-09-05 18:12:23 +0200 | [diff] [blame] | 4165 | } |
| 4166 | #else |
Hidetoshi Seto | 761b1d2 | 2009-11-12 13:33:45 +0900 | [diff] [blame] | 4167 | |
| 4168 | #ifndef nsecs_to_cputime |
Hidetoshi Seto | b7b20df9 | 2009-11-26 14:49:27 +0900 | [diff] [blame] | 4169 | # define nsecs_to_cputime(__nsecs) nsecs_to_jiffies(__nsecs) |
Hidetoshi Seto | 761b1d2 | 2009-11-12 13:33:45 +0900 | [diff] [blame] | 4170 | #endif |
| 4171 | |
Hidetoshi Seto | d180c5b | 2009-11-26 14:48:30 +0900 | [diff] [blame] | 4172 | void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st) |
Balbir Singh | 4904862 | 2008-09-05 18:12:23 +0200 | [diff] [blame] | 4173 | { |
Hidetoshi Seto | d99ca3b | 2009-12-02 17:26:47 +0900 | [diff] [blame] | 4174 | cputime_t rtime, utime = p->utime, total = cputime_add(utime, p->stime); |
Balbir Singh | 4904862 | 2008-09-05 18:12:23 +0200 | [diff] [blame] | 4175 | |
| 4176 | /* |
| 4177 | * Use CFS's precise accounting: |
| 4178 | */ |
Hidetoshi Seto | d180c5b | 2009-11-26 14:48:30 +0900 | [diff] [blame] | 4179 | rtime = nsecs_to_cputime(p->se.sum_exec_runtime); |
Balbir Singh | 4904862 | 2008-09-05 18:12:23 +0200 | [diff] [blame] | 4180 | |
| 4181 | if (total) { |
Stanislaw Gruszka | e75e863 | 2010-09-14 16:35:14 +0200 | [diff] [blame] | 4182 | u64 temp = rtime; |
Balbir Singh | 4904862 | 2008-09-05 18:12:23 +0200 | [diff] [blame] | 4183 | |
Stanislaw Gruszka | e75e863 | 2010-09-14 16:35:14 +0200 | [diff] [blame] | 4184 | temp *= utime; |
Balbir Singh | 4904862 | 2008-09-05 18:12:23 +0200 | [diff] [blame] | 4185 | do_div(temp, total); |
Hidetoshi Seto | d180c5b | 2009-11-26 14:48:30 +0900 | [diff] [blame] | 4186 | utime = (cputime_t)temp; |
| 4187 | } else |
| 4188 | utime = rtime; |
Balbir Singh | 4904862 | 2008-09-05 18:12:23 +0200 | [diff] [blame] | 4189 | |
| 4190 | /* |
Hidetoshi Seto | d180c5b | 2009-11-26 14:48:30 +0900 | [diff] [blame] | 4191 | * Compare with previous values, to keep monotonicity: |
Balbir Singh | 4904862 | 2008-09-05 18:12:23 +0200 | [diff] [blame] | 4192 | */ |
Hidetoshi Seto | 761b1d2 | 2009-11-12 13:33:45 +0900 | [diff] [blame] | 4193 | p->prev_utime = max(p->prev_utime, utime); |
Hidetoshi Seto | d99ca3b | 2009-12-02 17:26:47 +0900 | [diff] [blame] | 4194 | p->prev_stime = max(p->prev_stime, cputime_sub(rtime, p->prev_utime)); |
Balbir Singh | 4904862 | 2008-09-05 18:12:23 +0200 | [diff] [blame] | 4195 | |
Hidetoshi Seto | d99ca3b | 2009-12-02 17:26:47 +0900 | [diff] [blame] | 4196 | *ut = p->prev_utime; |
| 4197 | *st = p->prev_stime; |
Hidetoshi Seto | d180c5b | 2009-11-26 14:48:30 +0900 | [diff] [blame] | 4198 | } |
Balbir Singh | 4904862 | 2008-09-05 18:12:23 +0200 | [diff] [blame] | 4199 | |
Hidetoshi Seto | 0cf55e1 | 2009-12-02 17:28:07 +0900 | [diff] [blame] | 4200 | /* |
| 4201 | * Must be called with siglock held. |
| 4202 | */ |
| 4203 | void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st) |
| 4204 | { |
| 4205 | struct signal_struct *sig = p->signal; |
| 4206 | struct task_cputime cputime; |
| 4207 | cputime_t rtime, utime, total; |
| 4208 | |
| 4209 | thread_group_cputime(p, &cputime); |
| 4210 | |
| 4211 | total = cputime_add(cputime.utime, cputime.stime); |
| 4212 | rtime = nsecs_to_cputime(cputime.sum_exec_runtime); |
| 4213 | |
| 4214 | if (total) { |
Stanislaw Gruszka | e75e863 | 2010-09-14 16:35:14 +0200 | [diff] [blame] | 4215 | u64 temp = rtime; |
Hidetoshi Seto | 0cf55e1 | 2009-12-02 17:28:07 +0900 | [diff] [blame] | 4216 | |
Stanislaw Gruszka | e75e863 | 2010-09-14 16:35:14 +0200 | [diff] [blame] | 4217 | temp *= cputime.utime; |
Hidetoshi Seto | 0cf55e1 | 2009-12-02 17:28:07 +0900 | [diff] [blame] | 4218 | do_div(temp, total); |
| 4219 | utime = (cputime_t)temp; |
| 4220 | } else |
| 4221 | utime = rtime; |
| 4222 | |
| 4223 | sig->prev_utime = max(sig->prev_utime, utime); |
| 4224 | sig->prev_stime = max(sig->prev_stime, |
| 4225 | cputime_sub(rtime, sig->prev_utime)); |
| 4226 | |
| 4227 | *ut = sig->prev_utime; |
| 4228 | *st = sig->prev_stime; |
Balbir Singh | 4904862 | 2008-09-05 18:12:23 +0200 | [diff] [blame] | 4229 | } |
| 4230 | #endif |
| 4231 | |
Balbir Singh | 4904862 | 2008-09-05 18:12:23 +0200 | [diff] [blame] | 4232 | /* |
Christoph Lameter | 7835b98 | 2006-12-10 02:20:22 -0800 | [diff] [blame] | 4233 | * This function gets called by the timer code, with HZ frequency. |
| 4234 | * We call it with interrupts disabled. |
Christoph Lameter | 7835b98 | 2006-12-10 02:20:22 -0800 | [diff] [blame] | 4235 | */ |
| 4236 | void scheduler_tick(void) |
| 4237 | { |
Christoph Lameter | 7835b98 | 2006-12-10 02:20:22 -0800 | [diff] [blame] | 4238 | int cpu = smp_processor_id(); |
| 4239 | struct rq *rq = cpu_rq(cpu); |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4240 | struct task_struct *curr = rq->curr; |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 4241 | |
| 4242 | sched_clock_tick(); |
Christoph Lameter | 7835b98 | 2006-12-10 02:20:22 -0800 | [diff] [blame] | 4243 | |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 4244 | raw_spin_lock(&rq->lock); |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 4245 | update_rq_clock(rq); |
Venkatesh Pallipadi | fdf3e95 | 2010-05-17 18:14:43 -0700 | [diff] [blame] | 4246 | update_cpu_load_active(rq); |
Peter Zijlstra | fa85ae2 | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 4247 | curr->sched_class->task_tick(rq, curr, 0); |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 4248 | raw_spin_unlock(&rq->lock); |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4249 | |
Peter Zijlstra | e9d2b06 | 2010-09-17 11:28:50 +0200 | [diff] [blame] | 4250 | perf_event_task_tick(); |
Peter Zijlstra | e220d2d | 2009-05-23 18:28:55 +0200 | [diff] [blame] | 4251 | |
Christoph Lameter | e418e1c | 2006-12-10 02:20:23 -0800 | [diff] [blame] | 4252 | #ifdef CONFIG_SMP |
Suresh Siddha | 6eb57e0 | 2011-10-03 15:09:01 -0700 | [diff] [blame] | 4253 | rq->idle_balance = idle_cpu(cpu); |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4254 | trigger_load_balance(rq, cpu); |
Christoph Lameter | e418e1c | 2006-12-10 02:20:23 -0800 | [diff] [blame] | 4255 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4256 | } |
| 4257 | |
Lai Jiangshan | 132380a | 2009-04-02 14:18:25 +0800 | [diff] [blame] | 4258 | notrace unsigned long get_parent_ip(unsigned long addr) |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 4259 | { |
| 4260 | if (in_lock_functions(addr)) { |
| 4261 | addr = CALLER_ADDR2; |
| 4262 | if (in_lock_functions(addr)) |
| 4263 | addr = CALLER_ADDR3; |
| 4264 | } |
| 4265 | return addr; |
| 4266 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4267 | |
Steven Rostedt | 7e49fcc | 2009-01-22 19:01:40 -0500 | [diff] [blame] | 4268 | #if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \ |
| 4269 | defined(CONFIG_PREEMPT_TRACER)) |
| 4270 | |
Srinivasa Ds | 4362758 | 2008-02-23 15:24:04 -0800 | [diff] [blame] | 4271 | void __kprobes add_preempt_count(int val) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4272 | { |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 4273 | #ifdef CONFIG_DEBUG_PREEMPT |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4274 | /* |
| 4275 | * Underflow? |
| 4276 | */ |
Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 4277 | if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0))) |
| 4278 | return; |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 4279 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4280 | preempt_count() += val; |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 4281 | #ifdef CONFIG_DEBUG_PREEMPT |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4282 | /* |
| 4283 | * Spinlock count overflowing soon? |
| 4284 | */ |
Miguel Ojeda Sandonis | 33859f7 | 2006-12-10 02:20:38 -0800 | [diff] [blame] | 4285 | DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >= |
| 4286 | PREEMPT_MASK - 10); |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 4287 | #endif |
| 4288 | if (preempt_count() == val) |
| 4289 | trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4290 | } |
| 4291 | EXPORT_SYMBOL(add_preempt_count); |
| 4292 | |
Srinivasa Ds | 4362758 | 2008-02-23 15:24:04 -0800 | [diff] [blame] | 4293 | void __kprobes sub_preempt_count(int val) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4294 | { |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 4295 | #ifdef CONFIG_DEBUG_PREEMPT |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4296 | /* |
| 4297 | * Underflow? |
| 4298 | */ |
Ingo Molnar | 01e3eb8 | 2009-01-12 13:00:50 +0100 | [diff] [blame] | 4299 | if (DEBUG_LOCKS_WARN_ON(val > preempt_count())) |
Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 4300 | return; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4301 | /* |
| 4302 | * Is the spinlock portion underflowing? |
| 4303 | */ |
Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 4304 | if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) && |
| 4305 | !(preempt_count() & PREEMPT_MASK))) |
| 4306 | return; |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 4307 | #endif |
Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 4308 | |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 4309 | if (preempt_count() == val) |
| 4310 | trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4311 | preempt_count() -= val; |
| 4312 | } |
| 4313 | EXPORT_SYMBOL(sub_preempt_count); |
| 4314 | |
| 4315 | #endif |
| 4316 | |
| 4317 | /* |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4318 | * Print scheduling while atomic bug: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4319 | */ |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4320 | static noinline void __schedule_bug(struct task_struct *prev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4321 | { |
Satyam Sharma | 838225b | 2007-10-24 18:23:50 +0200 | [diff] [blame] | 4322 | struct pt_regs *regs = get_irq_regs(); |
| 4323 | |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 4324 | printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n", |
| 4325 | prev->comm, prev->pid, preempt_count()); |
Satyam Sharma | 838225b | 2007-10-24 18:23:50 +0200 | [diff] [blame] | 4326 | |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4327 | debug_show_held_locks(prev); |
Arjan van de Ven | e21f5b1 | 2008-05-23 09:05:58 -0700 | [diff] [blame] | 4328 | print_modules(); |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4329 | if (irqs_disabled()) |
| 4330 | print_irqtrace_events(prev); |
Satyam Sharma | 838225b | 2007-10-24 18:23:50 +0200 | [diff] [blame] | 4331 | |
| 4332 | if (regs) |
| 4333 | show_regs(regs); |
| 4334 | else |
| 4335 | dump_stack(); |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4336 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4337 | |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4338 | /* |
| 4339 | * Various schedule()-time debugging checks and statistics: |
| 4340 | */ |
| 4341 | static inline void schedule_debug(struct task_struct *prev) |
| 4342 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4343 | /* |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 4344 | * Test if we are atomic. Since do_exit() needs to call into |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4345 | * schedule() atomically, we ignore that path for now. |
| 4346 | * Otherwise, whine if we are scheduling when we should not be. |
| 4347 | */ |
Roel Kluin | 3f33a7c | 2008-05-13 23:44:11 +0200 | [diff] [blame] | 4348 | if (unlikely(in_atomic_preempt_off() && !prev->exit_state)) |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4349 | __schedule_bug(prev); |
Paul E. McKenney | b3fbab0 | 2011-05-24 08:31:09 -0700 | [diff] [blame] | 4350 | rcu_sleep_check(); |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4351 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4352 | profile_hit(SCHED_PROFILING, __builtin_return_address(0)); |
| 4353 | |
Ingo Molnar | 2d72376 | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 4354 | schedstat_inc(this_rq(), sched_count); |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4355 | } |
| 4356 | |
Peter Zijlstra | 6cecd08 | 2009-11-30 13:00:37 +0100 | [diff] [blame] | 4357 | static void put_prev_task(struct rq *rq, struct task_struct *prev) |
Mike Galbraith | df1c99d | 2009-03-10 19:08:11 +0100 | [diff] [blame] | 4358 | { |
Mike Galbraith | 61eadef | 2011-04-29 08:36:50 +0200 | [diff] [blame] | 4359 | if (prev->on_rq || rq->skip_clock_update < 0) |
Mike Galbraith | a64692a | 2010-03-11 17:16:20 +0100 | [diff] [blame] | 4360 | update_rq_clock(rq); |
Peter Zijlstra | 6cecd08 | 2009-11-30 13:00:37 +0100 | [diff] [blame] | 4361 | prev->sched_class->put_prev_task(rq, prev); |
Mike Galbraith | df1c99d | 2009-03-10 19:08:11 +0100 | [diff] [blame] | 4362 | } |
| 4363 | |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4364 | /* |
| 4365 | * Pick up the highest-prio task: |
| 4366 | */ |
| 4367 | static inline struct task_struct * |
Wang Chen | b67802e | 2009-03-02 13:55:26 +0800 | [diff] [blame] | 4368 | pick_next_task(struct rq *rq) |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4369 | { |
Ingo Molnar | 5522d5d | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 4370 | const struct sched_class *class; |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4371 | struct task_struct *p; |
| 4372 | |
| 4373 | /* |
| 4374 | * Optimization: we know that if all tasks are in |
| 4375 | * the fair class we can call that function directly: |
| 4376 | */ |
Paul Turner | 953bfcd | 2011-07-21 09:43:27 -0700 | [diff] [blame] | 4377 | if (likely(rq->nr_running == rq->cfs.h_nr_running)) { |
Ingo Molnar | fb8d472 | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 4378 | p = fair_sched_class.pick_next_task(rq); |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4379 | if (likely(p)) |
| 4380 | return p; |
| 4381 | } |
| 4382 | |
Peter Zijlstra | 34f971f | 2010-09-22 13:53:15 +0200 | [diff] [blame] | 4383 | for_each_class(class) { |
Ingo Molnar | fb8d472 | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 4384 | p = class->pick_next_task(rq); |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4385 | if (p) |
| 4386 | return p; |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4387 | } |
Peter Zijlstra | 34f971f | 2010-09-22 13:53:15 +0200 | [diff] [blame] | 4388 | |
| 4389 | BUG(); /* the idle class will always have a runnable task */ |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4390 | } |
| 4391 | |
| 4392 | /* |
Thomas Gleixner | c259e01 | 2011-06-22 19:47:00 +0200 | [diff] [blame] | 4393 | * __schedule() is the main scheduler function. |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4394 | */ |
Thomas Gleixner | c259e01 | 2011-06-22 19:47:00 +0200 | [diff] [blame] | 4395 | static void __sched __schedule(void) |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4396 | { |
| 4397 | struct task_struct *prev, *next; |
Harvey Harrison | 67ca7bd | 2008-02-15 09:56:36 -0800 | [diff] [blame] | 4398 | unsigned long *switch_count; |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4399 | struct rq *rq; |
Peter Zijlstra | 3165651 | 2008-07-18 18:01:23 +0200 | [diff] [blame] | 4400 | int cpu; |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4401 | |
Peter Zijlstra | ff74334 | 2009-03-13 12:21:26 +0100 | [diff] [blame] | 4402 | need_resched: |
| 4403 | preempt_disable(); |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4404 | cpu = smp_processor_id(); |
| 4405 | rq = cpu_rq(cpu); |
Paul E. McKenney | 25502a6 | 2010-04-01 17:37:01 -0700 | [diff] [blame] | 4406 | rcu_note_context_switch(cpu); |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4407 | prev = rq->curr; |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4408 | |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4409 | schedule_debug(prev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4410 | |
Peter Zijlstra | 3165651 | 2008-07-18 18:01:23 +0200 | [diff] [blame] | 4411 | if (sched_feat(HRTICK)) |
Mike Galbraith | f333fdc | 2008-05-12 21:20:55 +0200 | [diff] [blame] | 4412 | hrtick_clear(rq); |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 4413 | |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 4414 | raw_spin_lock_irq(&rq->lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4415 | |
Oleg Nesterov | 246d86b | 2010-05-19 14:57:11 +0200 | [diff] [blame] | 4416 | switch_count = &prev->nivcsw; |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4417 | if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { |
Tejun Heo | 21aa9af | 2010-06-08 21:40:37 +0200 | [diff] [blame] | 4418 | if (unlikely(signal_pending_state(prev->state, prev))) { |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4419 | prev->state = TASK_RUNNING; |
Tejun Heo | 21aa9af | 2010-06-08 21:40:37 +0200 | [diff] [blame] | 4420 | } else { |
Peter Zijlstra | 2acca55 | 2011-04-05 17:23:50 +0200 | [diff] [blame] | 4421 | deactivate_task(rq, prev, DEQUEUE_SLEEP); |
| 4422 | prev->on_rq = 0; |
| 4423 | |
Tejun Heo | 21aa9af | 2010-06-08 21:40:37 +0200 | [diff] [blame] | 4424 | /* |
Peter Zijlstra | 2acca55 | 2011-04-05 17:23:50 +0200 | [diff] [blame] | 4425 | * If a worker went to sleep, notify and ask workqueue |
| 4426 | * whether it wants to wake up a task to maintain |
| 4427 | * concurrency. |
Tejun Heo | 21aa9af | 2010-06-08 21:40:37 +0200 | [diff] [blame] | 4428 | */ |
| 4429 | if (prev->flags & PF_WQ_WORKER) { |
| 4430 | struct task_struct *to_wakeup; |
| 4431 | |
| 4432 | to_wakeup = wq_worker_sleeping(prev, cpu); |
| 4433 | if (to_wakeup) |
| 4434 | try_to_wake_up_local(to_wakeup); |
| 4435 | } |
Tejun Heo | 21aa9af | 2010-06-08 21:40:37 +0200 | [diff] [blame] | 4436 | } |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4437 | switch_count = &prev->nvcsw; |
| 4438 | } |
| 4439 | |
Gregory Haskins | 3f029d3 | 2009-07-29 11:08:47 -0400 | [diff] [blame] | 4440 | pre_schedule(rq, prev); |
Steven Rostedt | f65eda4 | 2008-01-25 21:08:07 +0100 | [diff] [blame] | 4441 | |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4442 | if (unlikely(!rq->nr_running)) |
| 4443 | idle_balance(cpu, rq); |
| 4444 | |
Mike Galbraith | df1c99d | 2009-03-10 19:08:11 +0100 | [diff] [blame] | 4445 | put_prev_task(rq, prev); |
Wang Chen | b67802e | 2009-03-02 13:55:26 +0800 | [diff] [blame] | 4446 | next = pick_next_task(rq); |
Mike Galbraith | f26f9af | 2010-12-08 11:05:42 +0100 | [diff] [blame] | 4447 | clear_tsk_need_resched(prev); |
| 4448 | rq->skip_clock_update = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4449 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4450 | if (likely(prev != next)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4451 | rq->nr_switches++; |
| 4452 | rq->curr = next; |
| 4453 | ++*switch_count; |
| 4454 | |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4455 | context_switch(rq, prev, next); /* unlocks the rq */ |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 4456 | /* |
Oleg Nesterov | 246d86b | 2010-05-19 14:57:11 +0200 | [diff] [blame] | 4457 | * The context switch have flipped the stack from under us |
| 4458 | * and restored the local variables which were saved when |
| 4459 | * this task called schedule() in the past. prev == current |
| 4460 | * is still correct, but it can be moved to another cpu/rq. |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 4461 | */ |
| 4462 | cpu = smp_processor_id(); |
| 4463 | rq = cpu_rq(cpu); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4464 | } else |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 4465 | raw_spin_unlock_irq(&rq->lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4466 | |
Gregory Haskins | 3f029d3 | 2009-07-29 11:08:47 -0400 | [diff] [blame] | 4467 | post_schedule(rq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4468 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4469 | preempt_enable_no_resched(); |
Peter Zijlstra | ff74334 | 2009-03-13 12:21:26 +0100 | [diff] [blame] | 4470 | if (need_resched()) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4471 | goto need_resched; |
| 4472 | } |
Thomas Gleixner | c259e01 | 2011-06-22 19:47:00 +0200 | [diff] [blame] | 4473 | |
Thomas Gleixner | 9c40cef | 2011-06-22 19:47:01 +0200 | [diff] [blame] | 4474 | static inline void sched_submit_work(struct task_struct *tsk) |
| 4475 | { |
| 4476 | if (!tsk->state) |
| 4477 | return; |
| 4478 | /* |
| 4479 | * If we are going to sleep and we have plugged IO queued, |
| 4480 | * make sure to submit it to avoid deadlocks. |
| 4481 | */ |
| 4482 | if (blk_needs_flush_plug(tsk)) |
| 4483 | blk_schedule_flush_plug(tsk); |
| 4484 | } |
| 4485 | |
Simon Kirby | 6ebbe7a | 2011-09-22 17:03:46 -0700 | [diff] [blame] | 4486 | asmlinkage void __sched schedule(void) |
Thomas Gleixner | c259e01 | 2011-06-22 19:47:00 +0200 | [diff] [blame] | 4487 | { |
Thomas Gleixner | 9c40cef | 2011-06-22 19:47:01 +0200 | [diff] [blame] | 4488 | struct task_struct *tsk = current; |
| 4489 | |
| 4490 | sched_submit_work(tsk); |
Thomas Gleixner | c259e01 | 2011-06-22 19:47:00 +0200 | [diff] [blame] | 4491 | __schedule(); |
| 4492 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4493 | EXPORT_SYMBOL(schedule); |
| 4494 | |
Frederic Weisbecker | c08f782 | 2009-12-02 20:49:17 +0100 | [diff] [blame] | 4495 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER |
Peter Zijlstra | c6eb3dd | 2011-04-05 17:23:41 +0200 | [diff] [blame] | 4496 | |
| 4497 | static inline bool owner_running(struct mutex *lock, struct task_struct *owner) |
| 4498 | { |
Peter Zijlstra | c6eb3dd | 2011-04-05 17:23:41 +0200 | [diff] [blame] | 4499 | if (lock->owner != owner) |
Thomas Gleixner | 307bf98 | 2011-06-10 15:08:55 +0200 | [diff] [blame] | 4500 | return false; |
Peter Zijlstra | c6eb3dd | 2011-04-05 17:23:41 +0200 | [diff] [blame] | 4501 | |
| 4502 | /* |
| 4503 | * Ensure we emit the owner->on_cpu, dereference _after_ checking |
| 4504 | * lock->owner still matches owner, if that fails, owner might |
| 4505 | * point to free()d memory, if it still matches, the rcu_read_lock() |
| 4506 | * ensures the memory stays valid. |
| 4507 | */ |
| 4508 | barrier(); |
| 4509 | |
Thomas Gleixner | 307bf98 | 2011-06-10 15:08:55 +0200 | [diff] [blame] | 4510 | return owner->on_cpu; |
Peter Zijlstra | c6eb3dd | 2011-04-05 17:23:41 +0200 | [diff] [blame] | 4511 | } |
| 4512 | |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 4513 | /* |
| 4514 | * Look out! "owner" is an entirely speculative pointer |
| 4515 | * access and not reliable. |
| 4516 | */ |
Peter Zijlstra | c6eb3dd | 2011-04-05 17:23:41 +0200 | [diff] [blame] | 4517 | int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner) |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 4518 | { |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 4519 | if (!sched_feat(OWNER_SPIN)) |
| 4520 | return 0; |
| 4521 | |
Thomas Gleixner | 307bf98 | 2011-06-10 15:08:55 +0200 | [diff] [blame] | 4522 | rcu_read_lock(); |
Peter Zijlstra | c6eb3dd | 2011-04-05 17:23:41 +0200 | [diff] [blame] | 4523 | while (owner_running(lock, owner)) { |
| 4524 | if (need_resched()) |
Thomas Gleixner | 307bf98 | 2011-06-10 15:08:55 +0200 | [diff] [blame] | 4525 | break; |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 4526 | |
Gerald Schaefer | 335d7af | 2010-11-22 15:47:36 +0100 | [diff] [blame] | 4527 | arch_mutex_cpu_relax(); |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 4528 | } |
Thomas Gleixner | 307bf98 | 2011-06-10 15:08:55 +0200 | [diff] [blame] | 4529 | rcu_read_unlock(); |
Benjamin Herrenschmidt | 4b40221 | 2010-04-16 23:20:00 +0200 | [diff] [blame] | 4530 | |
Peter Zijlstra | c6eb3dd | 2011-04-05 17:23:41 +0200 | [diff] [blame] | 4531 | /* |
Thomas Gleixner | 307bf98 | 2011-06-10 15:08:55 +0200 | [diff] [blame] | 4532 | * We break out the loop above on need_resched() and when the |
| 4533 | * owner changed, which is a sign for heavy contention. Return |
| 4534 | * success only when lock->owner is NULL. |
Peter Zijlstra | c6eb3dd | 2011-04-05 17:23:41 +0200 | [diff] [blame] | 4535 | */ |
Thomas Gleixner | 307bf98 | 2011-06-10 15:08:55 +0200 | [diff] [blame] | 4536 | return lock->owner == NULL; |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 4537 | } |
| 4538 | #endif |
| 4539 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4540 | #ifdef CONFIG_PREEMPT |
| 4541 | /* |
Andreas Mohr | 2ed6e34 | 2006-07-10 04:43:52 -0700 | [diff] [blame] | 4542 | * this is the entry point to schedule() from in-kernel preemption |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 4543 | * off of preempt_enable. Kernel preemptions off return from interrupt |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4544 | * occur there and call schedule directly. |
| 4545 | */ |
Steven Rostedt | d1f74e2 | 2010-06-02 21:52:29 -0400 | [diff] [blame] | 4546 | asmlinkage void __sched notrace preempt_schedule(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4547 | { |
| 4548 | struct thread_info *ti = current_thread_info(); |
Ingo Molnar | 6478d88 | 2008-01-25 21:08:33 +0100 | [diff] [blame] | 4549 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4550 | /* |
| 4551 | * If there is a non-zero preempt_count or interrupts are disabled, |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 4552 | * we do not want to preempt the current task. Just return.. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4553 | */ |
Nick Piggin | beed33a | 2006-10-11 01:21:52 -0700 | [diff] [blame] | 4554 | if (likely(ti->preempt_count || irqs_disabled())) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4555 | return; |
| 4556 | |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 4557 | do { |
Steven Rostedt | d1f74e2 | 2010-06-02 21:52:29 -0400 | [diff] [blame] | 4558 | add_preempt_count_notrace(PREEMPT_ACTIVE); |
Thomas Gleixner | c259e01 | 2011-06-22 19:47:00 +0200 | [diff] [blame] | 4559 | __schedule(); |
Steven Rostedt | d1f74e2 | 2010-06-02 21:52:29 -0400 | [diff] [blame] | 4560 | sub_preempt_count_notrace(PREEMPT_ACTIVE); |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 4561 | |
| 4562 | /* |
| 4563 | * Check again in case we missed a preemption opportunity |
| 4564 | * between schedule and now. |
| 4565 | */ |
| 4566 | barrier(); |
Lai Jiangshan | 5ed0cec | 2009-03-06 19:40:20 +0800 | [diff] [blame] | 4567 | } while (need_resched()); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4568 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4569 | EXPORT_SYMBOL(preempt_schedule); |
| 4570 | |
| 4571 | /* |
Andreas Mohr | 2ed6e34 | 2006-07-10 04:43:52 -0700 | [diff] [blame] | 4572 | * this is the entry point to schedule() from kernel preemption |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4573 | * off of irq context. |
| 4574 | * Note, that this is called and return with irqs disabled. This will |
| 4575 | * protect us against recursive calling from irq. |
| 4576 | */ |
| 4577 | asmlinkage void __sched preempt_schedule_irq(void) |
| 4578 | { |
| 4579 | struct thread_info *ti = current_thread_info(); |
Ingo Molnar | 6478d88 | 2008-01-25 21:08:33 +0100 | [diff] [blame] | 4580 | |
Andreas Mohr | 2ed6e34 | 2006-07-10 04:43:52 -0700 | [diff] [blame] | 4581 | /* Catch callers which need to be fixed */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4582 | BUG_ON(ti->preempt_count || !irqs_disabled()); |
| 4583 | |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 4584 | do { |
| 4585 | add_preempt_count(PREEMPT_ACTIVE); |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 4586 | local_irq_enable(); |
Thomas Gleixner | c259e01 | 2011-06-22 19:47:00 +0200 | [diff] [blame] | 4587 | __schedule(); |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 4588 | local_irq_disable(); |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 4589 | sub_preempt_count(PREEMPT_ACTIVE); |
| 4590 | |
| 4591 | /* |
| 4592 | * Check again in case we missed a preemption opportunity |
| 4593 | * between schedule and now. |
| 4594 | */ |
| 4595 | barrier(); |
Lai Jiangshan | 5ed0cec | 2009-03-06 19:40:20 +0800 | [diff] [blame] | 4596 | } while (need_resched()); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4597 | } |
| 4598 | |
| 4599 | #endif /* CONFIG_PREEMPT */ |
| 4600 | |
Peter Zijlstra | 63859d4 | 2009-09-15 19:14:42 +0200 | [diff] [blame] | 4601 | int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags, |
Ingo Molnar | 95cdf3b | 2005-09-10 00:26:11 -0700 | [diff] [blame] | 4602 | void *key) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4603 | { |
Peter Zijlstra | 63859d4 | 2009-09-15 19:14:42 +0200 | [diff] [blame] | 4604 | return try_to_wake_up(curr->private, mode, wake_flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4605 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4606 | EXPORT_SYMBOL(default_wake_function); |
| 4607 | |
| 4608 | /* |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 4609 | * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just |
| 4610 | * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4611 | * number) then we wake all the non-exclusive tasks and one exclusive task. |
| 4612 | * |
| 4613 | * There are circumstances in which we can try to wake a task which has already |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 4614 | * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4615 | * zero in this (rare) case, and we handle it by continuing to scan the queue. |
| 4616 | */ |
Johannes Weiner | 78ddb08 | 2009-04-14 16:53:05 +0200 | [diff] [blame] | 4617 | static void __wake_up_common(wait_queue_head_t *q, unsigned int mode, |
Peter Zijlstra | 63859d4 | 2009-09-15 19:14:42 +0200 | [diff] [blame] | 4618 | int nr_exclusive, int wake_flags, void *key) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4619 | { |
Matthias Kaehlcke | 2e45874 | 2007-10-15 17:00:02 +0200 | [diff] [blame] | 4620 | wait_queue_t *curr, *next; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4621 | |
Matthias Kaehlcke | 2e45874 | 2007-10-15 17:00:02 +0200 | [diff] [blame] | 4622 | list_for_each_entry_safe(curr, next, &q->task_list, task_list) { |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 4623 | unsigned flags = curr->flags; |
| 4624 | |
Peter Zijlstra | 63859d4 | 2009-09-15 19:14:42 +0200 | [diff] [blame] | 4625 | if (curr->func(curr, mode, wake_flags, key) && |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 4626 | (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4627 | break; |
| 4628 | } |
| 4629 | } |
| 4630 | |
| 4631 | /** |
| 4632 | * __wake_up - wake up threads blocked on a waitqueue. |
| 4633 | * @q: the waitqueue |
| 4634 | * @mode: which threads |
| 4635 | * @nr_exclusive: how many wake-one or wake-many threads to wake up |
Martin Waitz | 67be2dd | 2005-05-01 08:59:26 -0700 | [diff] [blame] | 4636 | * @key: is directly passed to the wakeup function |
David Howells | 50fa610 | 2009-04-28 15:01:38 +0100 | [diff] [blame] | 4637 | * |
| 4638 | * It may be assumed that this function implies a write memory barrier before |
| 4639 | * changing the task state if and only if any tasks are woken up. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4640 | */ |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 4641 | void __wake_up(wait_queue_head_t *q, unsigned int mode, |
Ingo Molnar | 95cdf3b | 2005-09-10 00:26:11 -0700 | [diff] [blame] | 4642 | int nr_exclusive, void *key) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4643 | { |
| 4644 | unsigned long flags; |
| 4645 | |
| 4646 | spin_lock_irqsave(&q->lock, flags); |
| 4647 | __wake_up_common(q, mode, nr_exclusive, 0, key); |
| 4648 | spin_unlock_irqrestore(&q->lock, flags); |
| 4649 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4650 | EXPORT_SYMBOL(__wake_up); |
| 4651 | |
| 4652 | /* |
| 4653 | * Same as __wake_up but called with the spinlock in wait_queue_head_t held. |
| 4654 | */ |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 4655 | void __wake_up_locked(wait_queue_head_t *q, unsigned int mode) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4656 | { |
| 4657 | __wake_up_common(q, mode, 1, 0, NULL); |
| 4658 | } |
Michal Nazarewicz | 22c43c8 | 2010-05-05 12:53:11 +0200 | [diff] [blame] | 4659 | EXPORT_SYMBOL_GPL(__wake_up_locked); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4660 | |
Davide Libenzi | 4ede816 | 2009-03-31 15:24:20 -0700 | [diff] [blame] | 4661 | void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key) |
| 4662 | { |
| 4663 | __wake_up_common(q, mode, 1, 0, key); |
| 4664 | } |
Trond Myklebust | bf294b4 | 2011-02-21 11:05:41 -0800 | [diff] [blame] | 4665 | EXPORT_SYMBOL_GPL(__wake_up_locked_key); |
Davide Libenzi | 4ede816 | 2009-03-31 15:24:20 -0700 | [diff] [blame] | 4666 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4667 | /** |
Davide Libenzi | 4ede816 | 2009-03-31 15:24:20 -0700 | [diff] [blame] | 4668 | * __wake_up_sync_key - wake up threads blocked on a waitqueue. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4669 | * @q: the waitqueue |
| 4670 | * @mode: which threads |
| 4671 | * @nr_exclusive: how many wake-one or wake-many threads to wake up |
Davide Libenzi | 4ede816 | 2009-03-31 15:24:20 -0700 | [diff] [blame] | 4672 | * @key: opaque value to be passed to wakeup targets |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4673 | * |
| 4674 | * The sync wakeup differs that the waker knows that it will schedule |
| 4675 | * away soon, so while the target thread will be woken up, it will not |
| 4676 | * be migrated to another CPU - ie. the two threads are 'synchronized' |
| 4677 | * with each other. This can prevent needless bouncing between CPUs. |
| 4678 | * |
| 4679 | * On UP it can prevent extra preemption. |
David Howells | 50fa610 | 2009-04-28 15:01:38 +0100 | [diff] [blame] | 4680 | * |
| 4681 | * It may be assumed that this function implies a write memory barrier before |
| 4682 | * changing the task state if and only if any tasks are woken up. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4683 | */ |
Davide Libenzi | 4ede816 | 2009-03-31 15:24:20 -0700 | [diff] [blame] | 4684 | void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, |
| 4685 | int nr_exclusive, void *key) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4686 | { |
| 4687 | unsigned long flags; |
Peter Zijlstra | 7d47872 | 2009-09-14 19:55:44 +0200 | [diff] [blame] | 4688 | int wake_flags = WF_SYNC; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4689 | |
| 4690 | if (unlikely(!q)) |
| 4691 | return; |
| 4692 | |
| 4693 | if (unlikely(!nr_exclusive)) |
Peter Zijlstra | 7d47872 | 2009-09-14 19:55:44 +0200 | [diff] [blame] | 4694 | wake_flags = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4695 | |
| 4696 | spin_lock_irqsave(&q->lock, flags); |
Peter Zijlstra | 7d47872 | 2009-09-14 19:55:44 +0200 | [diff] [blame] | 4697 | __wake_up_common(q, mode, nr_exclusive, wake_flags, key); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4698 | spin_unlock_irqrestore(&q->lock, flags); |
| 4699 | } |
Davide Libenzi | 4ede816 | 2009-03-31 15:24:20 -0700 | [diff] [blame] | 4700 | EXPORT_SYMBOL_GPL(__wake_up_sync_key); |
| 4701 | |
| 4702 | /* |
| 4703 | * __wake_up_sync - see __wake_up_sync_key() |
| 4704 | */ |
| 4705 | void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) |
| 4706 | { |
| 4707 | __wake_up_sync_key(q, mode, nr_exclusive, NULL); |
| 4708 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4709 | EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */ |
| 4710 | |
Kevin Diggs | 65eb3dc | 2008-08-26 10:26:54 +0200 | [diff] [blame] | 4711 | /** |
| 4712 | * complete: - signals a single thread waiting on this completion |
| 4713 | * @x: holds the state of this particular completion |
| 4714 | * |
| 4715 | * This will wake up a single thread waiting on this completion. Threads will be |
| 4716 | * awakened in the same order in which they were queued. |
| 4717 | * |
| 4718 | * See also complete_all(), wait_for_completion() and related routines. |
David Howells | 50fa610 | 2009-04-28 15:01:38 +0100 | [diff] [blame] | 4719 | * |
| 4720 | * It may be assumed that this function implies a write memory barrier before |
| 4721 | * changing the task state if and only if any tasks are woken up. |
Kevin Diggs | 65eb3dc | 2008-08-26 10:26:54 +0200 | [diff] [blame] | 4722 | */ |
Ingo Molnar | b15136e | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 4723 | void complete(struct completion *x) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4724 | { |
| 4725 | unsigned long flags; |
| 4726 | |
| 4727 | spin_lock_irqsave(&x->wait.lock, flags); |
| 4728 | x->done++; |
Matthew Wilcox | d9514f6 | 2007-12-06 11:07:07 -0500 | [diff] [blame] | 4729 | __wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4730 | spin_unlock_irqrestore(&x->wait.lock, flags); |
| 4731 | } |
| 4732 | EXPORT_SYMBOL(complete); |
| 4733 | |
Kevin Diggs | 65eb3dc | 2008-08-26 10:26:54 +0200 | [diff] [blame] | 4734 | /** |
| 4735 | * complete_all: - signals all threads waiting on this completion |
| 4736 | * @x: holds the state of this particular completion |
| 4737 | * |
| 4738 | * This will wake up all threads waiting on this particular completion event. |
David Howells | 50fa610 | 2009-04-28 15:01:38 +0100 | [diff] [blame] | 4739 | * |
| 4740 | * It may be assumed that this function implies a write memory barrier before |
| 4741 | * changing the task state if and only if any tasks are woken up. |
Kevin Diggs | 65eb3dc | 2008-08-26 10:26:54 +0200 | [diff] [blame] | 4742 | */ |
Ingo Molnar | b15136e | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 4743 | void complete_all(struct completion *x) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4744 | { |
| 4745 | unsigned long flags; |
| 4746 | |
| 4747 | spin_lock_irqsave(&x->wait.lock, flags); |
| 4748 | x->done += UINT_MAX/2; |
Matthew Wilcox | d9514f6 | 2007-12-06 11:07:07 -0500 | [diff] [blame] | 4749 | __wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4750 | spin_unlock_irqrestore(&x->wait.lock, flags); |
| 4751 | } |
| 4752 | EXPORT_SYMBOL(complete_all); |
| 4753 | |
Andi Kleen | 8cbbe86 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 4754 | static inline long __sched |
| 4755 | do_wait_for_common(struct completion *x, long timeout, int state) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4756 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4757 | if (!x->done) { |
| 4758 | DECLARE_WAITQUEUE(wait, current); |
| 4759 | |
Changli Gao | a93d2f1 | 2010-05-07 14:33:26 +0800 | [diff] [blame] | 4760 | __add_wait_queue_tail_exclusive(&x->wait, &wait); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4761 | do { |
Oleg Nesterov | 94d3d82 | 2008-08-20 16:54:41 -0700 | [diff] [blame] | 4762 | if (signal_pending_state(state, current)) { |
Oleg Nesterov | ea71a54 | 2008-06-20 18:32:20 +0400 | [diff] [blame] | 4763 | timeout = -ERESTARTSYS; |
| 4764 | break; |
Andi Kleen | 8cbbe86 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 4765 | } |
| 4766 | __set_current_state(state); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4767 | spin_unlock_irq(&x->wait.lock); |
Andi Kleen | 8cbbe86 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 4768 | timeout = schedule_timeout(timeout); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4769 | spin_lock_irq(&x->wait.lock); |
Oleg Nesterov | ea71a54 | 2008-06-20 18:32:20 +0400 | [diff] [blame] | 4770 | } while (!x->done && timeout); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4771 | __remove_wait_queue(&x->wait, &wait); |
Oleg Nesterov | ea71a54 | 2008-06-20 18:32:20 +0400 | [diff] [blame] | 4772 | if (!x->done) |
| 4773 | return timeout; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4774 | } |
| 4775 | x->done--; |
Oleg Nesterov | ea71a54 | 2008-06-20 18:32:20 +0400 | [diff] [blame] | 4776 | return timeout ?: 1; |
Andi Kleen | 8cbbe86 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 4777 | } |
| 4778 | |
| 4779 | static long __sched |
| 4780 | wait_for_common(struct completion *x, long timeout, int state) |
| 4781 | { |
| 4782 | might_sleep(); |
| 4783 | |
| 4784 | spin_lock_irq(&x->wait.lock); |
| 4785 | timeout = do_wait_for_common(x, timeout, state); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4786 | spin_unlock_irq(&x->wait.lock); |
Andi Kleen | 8cbbe86 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 4787 | return timeout; |
| 4788 | } |
| 4789 | |
Kevin Diggs | 65eb3dc | 2008-08-26 10:26:54 +0200 | [diff] [blame] | 4790 | /** |
| 4791 | * wait_for_completion: - waits for completion of a task |
| 4792 | * @x: holds the state of this particular completion |
| 4793 | * |
| 4794 | * This waits to be signaled for completion of a specific task. It is NOT |
| 4795 | * interruptible and there is no timeout. |
| 4796 | * |
| 4797 | * See also similar routines (i.e. wait_for_completion_timeout()) with timeout |
| 4798 | * and interrupt capability. Also see complete(). |
| 4799 | */ |
Ingo Molnar | b15136e | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 4800 | void __sched wait_for_completion(struct completion *x) |
Andi Kleen | 8cbbe86 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 4801 | { |
| 4802 | wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4803 | } |
| 4804 | EXPORT_SYMBOL(wait_for_completion); |
| 4805 | |
Kevin Diggs | 65eb3dc | 2008-08-26 10:26:54 +0200 | [diff] [blame] | 4806 | /** |
| 4807 | * wait_for_completion_timeout: - waits for completion of a task (w/timeout) |
| 4808 | * @x: holds the state of this particular completion |
| 4809 | * @timeout: timeout value in jiffies |
| 4810 | * |
| 4811 | * This waits for either a completion of a specific task to be signaled or for a |
| 4812 | * specified timeout to expire. The timeout is in jiffies. It is not |
| 4813 | * interruptible. |
J. Bruce Fields | c6dc7f0 | 2011-10-06 15:22:46 -0400 | [diff] [blame] | 4814 | * |
| 4815 | * The return value is 0 if timed out, and positive (at least 1, or number of |
| 4816 | * jiffies left till timeout) if completed. |
Kevin Diggs | 65eb3dc | 2008-08-26 10:26:54 +0200 | [diff] [blame] | 4817 | */ |
Ingo Molnar | b15136e | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 4818 | unsigned long __sched |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4819 | wait_for_completion_timeout(struct completion *x, unsigned long timeout) |
| 4820 | { |
Andi Kleen | 8cbbe86 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 4821 | return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4822 | } |
| 4823 | EXPORT_SYMBOL(wait_for_completion_timeout); |
| 4824 | |
Kevin Diggs | 65eb3dc | 2008-08-26 10:26:54 +0200 | [diff] [blame] | 4825 | /** |
| 4826 | * wait_for_completion_interruptible: - waits for completion of a task (w/intr) |
| 4827 | * @x: holds the state of this particular completion |
| 4828 | * |
| 4829 | * This waits for completion of a specific task to be signaled. It is |
| 4830 | * interruptible. |
J. Bruce Fields | c6dc7f0 | 2011-10-06 15:22:46 -0400 | [diff] [blame] | 4831 | * |
| 4832 | * The return value is -ERESTARTSYS if interrupted, 0 if completed. |
Kevin Diggs | 65eb3dc | 2008-08-26 10:26:54 +0200 | [diff] [blame] | 4833 | */ |
Andi Kleen | 8cbbe86 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 4834 | int __sched wait_for_completion_interruptible(struct completion *x) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4835 | { |
Andi Kleen | 51e9799 | 2007-10-18 21:32:55 +0200 | [diff] [blame] | 4836 | long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE); |
| 4837 | if (t == -ERESTARTSYS) |
| 4838 | return t; |
| 4839 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4840 | } |
| 4841 | EXPORT_SYMBOL(wait_for_completion_interruptible); |
| 4842 | |
Kevin Diggs | 65eb3dc | 2008-08-26 10:26:54 +0200 | [diff] [blame] | 4843 | /** |
| 4844 | * wait_for_completion_interruptible_timeout: - waits for completion (w/(to,intr)) |
| 4845 | * @x: holds the state of this particular completion |
| 4846 | * @timeout: timeout value in jiffies |
| 4847 | * |
| 4848 | * This waits for either a completion of a specific task to be signaled or for a |
| 4849 | * specified timeout to expire. It is interruptible. The timeout is in jiffies. |
J. Bruce Fields | c6dc7f0 | 2011-10-06 15:22:46 -0400 | [diff] [blame] | 4850 | * |
| 4851 | * The return value is -ERESTARTSYS if interrupted, 0 if timed out, |
| 4852 | * positive (at least 1, or number of jiffies left till timeout) if completed. |
Kevin Diggs | 65eb3dc | 2008-08-26 10:26:54 +0200 | [diff] [blame] | 4853 | */ |
NeilBrown | 6bf4123 | 2011-01-05 12:50:16 +1100 | [diff] [blame] | 4854 | long __sched |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4855 | wait_for_completion_interruptible_timeout(struct completion *x, |
| 4856 | unsigned long timeout) |
| 4857 | { |
Andi Kleen | 8cbbe86 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 4858 | return wait_for_common(x, timeout, TASK_INTERRUPTIBLE); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4859 | } |
| 4860 | EXPORT_SYMBOL(wait_for_completion_interruptible_timeout); |
| 4861 | |
Kevin Diggs | 65eb3dc | 2008-08-26 10:26:54 +0200 | [diff] [blame] | 4862 | /** |
| 4863 | * wait_for_completion_killable: - waits for completion of a task (killable) |
| 4864 | * @x: holds the state of this particular completion |
| 4865 | * |
| 4866 | * This waits to be signaled for completion of a specific task. It can be |
| 4867 | * interrupted by a kill signal. |
J. Bruce Fields | c6dc7f0 | 2011-10-06 15:22:46 -0400 | [diff] [blame] | 4868 | * |
| 4869 | * The return value is -ERESTARTSYS if interrupted, 0 if completed. |
Kevin Diggs | 65eb3dc | 2008-08-26 10:26:54 +0200 | [diff] [blame] | 4870 | */ |
Matthew Wilcox | 009e577 | 2007-12-06 12:29:54 -0500 | [diff] [blame] | 4871 | int __sched wait_for_completion_killable(struct completion *x) |
| 4872 | { |
| 4873 | long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE); |
| 4874 | if (t == -ERESTARTSYS) |
| 4875 | return t; |
| 4876 | return 0; |
| 4877 | } |
| 4878 | EXPORT_SYMBOL(wait_for_completion_killable); |
| 4879 | |
Dave Chinner | be4de35 | 2008-08-15 00:40:44 -0700 | [diff] [blame] | 4880 | /** |
Sage Weil | 0aa12fb | 2010-05-29 09:12:30 -0700 | [diff] [blame] | 4881 | * wait_for_completion_killable_timeout: - waits for completion of a task (w/(to,killable)) |
| 4882 | * @x: holds the state of this particular completion |
| 4883 | * @timeout: timeout value in jiffies |
| 4884 | * |
| 4885 | * This waits for either a completion of a specific task to be |
| 4886 | * signaled or for a specified timeout to expire. It can be |
| 4887 | * interrupted by a kill signal. The timeout is in jiffies. |
J. Bruce Fields | c6dc7f0 | 2011-10-06 15:22:46 -0400 | [diff] [blame] | 4888 | * |
| 4889 | * The return value is -ERESTARTSYS if interrupted, 0 if timed out, |
| 4890 | * positive (at least 1, or number of jiffies left till timeout) if completed. |
Sage Weil | 0aa12fb | 2010-05-29 09:12:30 -0700 | [diff] [blame] | 4891 | */ |
NeilBrown | 6bf4123 | 2011-01-05 12:50:16 +1100 | [diff] [blame] | 4892 | long __sched |
Sage Weil | 0aa12fb | 2010-05-29 09:12:30 -0700 | [diff] [blame] | 4893 | wait_for_completion_killable_timeout(struct completion *x, |
| 4894 | unsigned long timeout) |
| 4895 | { |
| 4896 | return wait_for_common(x, timeout, TASK_KILLABLE); |
| 4897 | } |
| 4898 | EXPORT_SYMBOL(wait_for_completion_killable_timeout); |
| 4899 | |
| 4900 | /** |
Dave Chinner | be4de35 | 2008-08-15 00:40:44 -0700 | [diff] [blame] | 4901 | * try_wait_for_completion - try to decrement a completion without blocking |
| 4902 | * @x: completion structure |
| 4903 | * |
| 4904 | * Returns: 0 if a decrement cannot be done without blocking |
| 4905 | * 1 if a decrement succeeded. |
| 4906 | * |
| 4907 | * If a completion is being used as a counting completion, |
| 4908 | * attempt to decrement the counter without blocking. This |
| 4909 | * enables us to avoid waiting if the resource the completion |
| 4910 | * is protecting is not available. |
| 4911 | */ |
| 4912 | bool try_wait_for_completion(struct completion *x) |
| 4913 | { |
Rafael J. Wysocki | 7539a3b | 2009-12-13 00:07:30 +0100 | [diff] [blame] | 4914 | unsigned long flags; |
Dave Chinner | be4de35 | 2008-08-15 00:40:44 -0700 | [diff] [blame] | 4915 | int ret = 1; |
| 4916 | |
Rafael J. Wysocki | 7539a3b | 2009-12-13 00:07:30 +0100 | [diff] [blame] | 4917 | spin_lock_irqsave(&x->wait.lock, flags); |
Dave Chinner | be4de35 | 2008-08-15 00:40:44 -0700 | [diff] [blame] | 4918 | if (!x->done) |
| 4919 | ret = 0; |
| 4920 | else |
| 4921 | x->done--; |
Rafael J. Wysocki | 7539a3b | 2009-12-13 00:07:30 +0100 | [diff] [blame] | 4922 | spin_unlock_irqrestore(&x->wait.lock, flags); |
Dave Chinner | be4de35 | 2008-08-15 00:40:44 -0700 | [diff] [blame] | 4923 | return ret; |
| 4924 | } |
| 4925 | EXPORT_SYMBOL(try_wait_for_completion); |
| 4926 | |
| 4927 | /** |
| 4928 | * completion_done - Test to see if a completion has any waiters |
| 4929 | * @x: completion structure |
| 4930 | * |
| 4931 | * Returns: 0 if there are waiters (wait_for_completion() in progress) |
| 4932 | * 1 if there are no waiters. |
| 4933 | * |
| 4934 | */ |
| 4935 | bool completion_done(struct completion *x) |
| 4936 | { |
Rafael J. Wysocki | 7539a3b | 2009-12-13 00:07:30 +0100 | [diff] [blame] | 4937 | unsigned long flags; |
Dave Chinner | be4de35 | 2008-08-15 00:40:44 -0700 | [diff] [blame] | 4938 | int ret = 1; |
| 4939 | |
Rafael J. Wysocki | 7539a3b | 2009-12-13 00:07:30 +0100 | [diff] [blame] | 4940 | spin_lock_irqsave(&x->wait.lock, flags); |
Dave Chinner | be4de35 | 2008-08-15 00:40:44 -0700 | [diff] [blame] | 4941 | if (!x->done) |
| 4942 | ret = 0; |
Rafael J. Wysocki | 7539a3b | 2009-12-13 00:07:30 +0100 | [diff] [blame] | 4943 | spin_unlock_irqrestore(&x->wait.lock, flags); |
Dave Chinner | be4de35 | 2008-08-15 00:40:44 -0700 | [diff] [blame] | 4944 | return ret; |
| 4945 | } |
| 4946 | EXPORT_SYMBOL(completion_done); |
| 4947 | |
Andi Kleen | 8cbbe86 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 4948 | static long __sched |
| 4949 | sleep_on_common(wait_queue_head_t *q, int state, long timeout) |
Ingo Molnar | 0fec171 | 2007-07-09 18:52:01 +0200 | [diff] [blame] | 4950 | { |
| 4951 | unsigned long flags; |
| 4952 | wait_queue_t wait; |
| 4953 | |
| 4954 | init_waitqueue_entry(&wait, current); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4955 | |
Andi Kleen | 8cbbe86 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 4956 | __set_current_state(state); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4957 | |
Andi Kleen | 8cbbe86 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 4958 | spin_lock_irqsave(&q->lock, flags); |
| 4959 | __add_wait_queue(q, &wait); |
| 4960 | spin_unlock(&q->lock); |
| 4961 | timeout = schedule_timeout(timeout); |
| 4962 | spin_lock_irq(&q->lock); |
| 4963 | __remove_wait_queue(q, &wait); |
| 4964 | spin_unlock_irqrestore(&q->lock, flags); |
| 4965 | |
| 4966 | return timeout; |
| 4967 | } |
| 4968 | |
| 4969 | void __sched interruptible_sleep_on(wait_queue_head_t *q) |
| 4970 | { |
| 4971 | sleep_on_common(q, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4972 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4973 | EXPORT_SYMBOL(interruptible_sleep_on); |
| 4974 | |
Ingo Molnar | 0fec171 | 2007-07-09 18:52:01 +0200 | [diff] [blame] | 4975 | long __sched |
Ingo Molnar | 95cdf3b | 2005-09-10 00:26:11 -0700 | [diff] [blame] | 4976 | interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4977 | { |
Andi Kleen | 8cbbe86 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 4978 | return sleep_on_common(q, TASK_INTERRUPTIBLE, timeout); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4979 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4980 | EXPORT_SYMBOL(interruptible_sleep_on_timeout); |
| 4981 | |
Ingo Molnar | 0fec171 | 2007-07-09 18:52:01 +0200 | [diff] [blame] | 4982 | void __sched sleep_on(wait_queue_head_t *q) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4983 | { |
Andi Kleen | 8cbbe86 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 4984 | sleep_on_common(q, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4985 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4986 | EXPORT_SYMBOL(sleep_on); |
| 4987 | |
Ingo Molnar | 0fec171 | 2007-07-09 18:52:01 +0200 | [diff] [blame] | 4988 | long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4989 | { |
Andi Kleen | 8cbbe86 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 4990 | return sleep_on_common(q, TASK_UNINTERRUPTIBLE, timeout); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4991 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4992 | EXPORT_SYMBOL(sleep_on_timeout); |
| 4993 | |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 4994 | #ifdef CONFIG_RT_MUTEXES |
| 4995 | |
| 4996 | /* |
| 4997 | * rt_mutex_setprio - set the current priority of a task |
| 4998 | * @p: task |
| 4999 | * @prio: prio value (kernel-internal form) |
| 5000 | * |
| 5001 | * This function changes the 'effective' priority of a task. It does |
| 5002 | * not touch ->normal_prio like __setscheduler(). |
| 5003 | * |
| 5004 | * Used by the rt_mutex code to implement priority inheritance logic. |
| 5005 | */ |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 5006 | void rt_mutex_setprio(struct task_struct *p, int prio) |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 5007 | { |
Srivatsa Vaddagiri | 83b699e | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 5008 | int oldprio, on_rq, running; |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 5009 | struct rq *rq; |
Thomas Gleixner | 83ab0aa | 2010-02-17 09:05:48 +0100 | [diff] [blame] | 5010 | const struct sched_class *prev_class; |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 5011 | |
| 5012 | BUG_ON(prio < 0 || prio > MAX_PRIO); |
| 5013 | |
Peter Zijlstra | 0122ec5 | 2011-04-05 17:23:51 +0200 | [diff] [blame] | 5014 | rq = __task_rq_lock(p); |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 5015 | |
Steven Rostedt | a802707 | 2010-09-20 15:13:34 -0400 | [diff] [blame] | 5016 | trace_sched_pi_setprio(p, prio); |
Andrew Morton | d5f9f94 | 2007-05-08 20:27:06 -0700 | [diff] [blame] | 5017 | oldprio = p->prio; |
Thomas Gleixner | 83ab0aa | 2010-02-17 09:05:48 +0100 | [diff] [blame] | 5018 | prev_class = p->sched_class; |
Peter Zijlstra | fd2f441 | 2011-04-05 17:23:44 +0200 | [diff] [blame] | 5019 | on_rq = p->on_rq; |
Dmitry Adamushko | 051a1d1 | 2007-12-18 15:21:13 +0100 | [diff] [blame] | 5020 | running = task_current(rq, p); |
Hiroshi Shimamoto | 0e1f348 | 2008-03-10 11:01:20 -0700 | [diff] [blame] | 5021 | if (on_rq) |
Ingo Molnar | 69be72c | 2007-08-09 11:16:49 +0200 | [diff] [blame] | 5022 | dequeue_task(rq, p, 0); |
Hiroshi Shimamoto | 0e1f348 | 2008-03-10 11:01:20 -0700 | [diff] [blame] | 5023 | if (running) |
| 5024 | p->sched_class->put_prev_task(rq, p); |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 5025 | |
| 5026 | if (rt_prio(prio)) |
| 5027 | p->sched_class = &rt_sched_class; |
| 5028 | else |
| 5029 | p->sched_class = &fair_sched_class; |
| 5030 | |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 5031 | p->prio = prio; |
| 5032 | |
Hiroshi Shimamoto | 0e1f348 | 2008-03-10 11:01:20 -0700 | [diff] [blame] | 5033 | if (running) |
| 5034 | p->sched_class->set_curr_task(rq); |
Peter Zijlstra | da7a735 | 2011-01-17 17:03:27 +0100 | [diff] [blame] | 5035 | if (on_rq) |
Peter Zijlstra | 371fd7e | 2010-03-24 16:38:48 +0100 | [diff] [blame] | 5036 | enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0); |
Steven Rostedt | cb46984 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 5037 | |
Peter Zijlstra | da7a735 | 2011-01-17 17:03:27 +0100 | [diff] [blame] | 5038 | check_class_changed(rq, p, prev_class, oldprio); |
Peter Zijlstra | 0122ec5 | 2011-04-05 17:23:51 +0200 | [diff] [blame] | 5039 | __task_rq_unlock(rq); |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 5040 | } |
| 5041 | |
| 5042 | #endif |
| 5043 | |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 5044 | void set_user_nice(struct task_struct *p, long nice) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5045 | { |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 5046 | int old_prio, delta, on_rq; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5047 | unsigned long flags; |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 5048 | struct rq *rq; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5049 | |
| 5050 | if (TASK_NICE(p) == nice || nice < -20 || nice > 19) |
| 5051 | return; |
| 5052 | /* |
| 5053 | * We have to be careful, if called from sys_setpriority(), |
| 5054 | * the task might be in the middle of scheduling on another CPU. |
| 5055 | */ |
| 5056 | rq = task_rq_lock(p, &flags); |
| 5057 | /* |
| 5058 | * The RT priorities are set via sched_setscheduler(), but we still |
| 5059 | * allow the 'normal' nice value to be set - but as expected |
| 5060 | * it wont have any effect on scheduling until the task is |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 5061 | * SCHED_FIFO/SCHED_RR: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5062 | */ |
Ingo Molnar | e05606d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 5063 | if (task_has_rt_policy(p)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5064 | p->static_prio = NICE_TO_PRIO(nice); |
| 5065 | goto out_unlock; |
| 5066 | } |
Peter Zijlstra | fd2f441 | 2011-04-05 17:23:44 +0200 | [diff] [blame] | 5067 | on_rq = p->on_rq; |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 5068 | if (on_rq) |
Ingo Molnar | 69be72c | 2007-08-09 11:16:49 +0200 | [diff] [blame] | 5069 | dequeue_task(rq, p, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5070 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5071 | p->static_prio = NICE_TO_PRIO(nice); |
Peter Williams | 2dd73a4 | 2006-06-27 02:54:34 -0700 | [diff] [blame] | 5072 | set_load_weight(p); |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 5073 | old_prio = p->prio; |
| 5074 | p->prio = effective_prio(p); |
| 5075 | delta = p->prio - old_prio; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5076 | |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 5077 | if (on_rq) { |
Peter Zijlstra | 371fd7e | 2010-03-24 16:38:48 +0100 | [diff] [blame] | 5078 | enqueue_task(rq, p, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5079 | /* |
Andrew Morton | d5f9f94 | 2007-05-08 20:27:06 -0700 | [diff] [blame] | 5080 | * If the task increased its priority or is running and |
| 5081 | * lowered its priority, then reschedule its CPU: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5082 | */ |
Andrew Morton | d5f9f94 | 2007-05-08 20:27:06 -0700 | [diff] [blame] | 5083 | if (delta < 0 || (delta > 0 && task_running(rq, p))) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5084 | resched_task(rq->curr); |
| 5085 | } |
| 5086 | out_unlock: |
Peter Zijlstra | 0122ec5 | 2011-04-05 17:23:51 +0200 | [diff] [blame] | 5087 | task_rq_unlock(rq, p, &flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5088 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5089 | EXPORT_SYMBOL(set_user_nice); |
| 5090 | |
Matt Mackall | e43379f | 2005-05-01 08:59:00 -0700 | [diff] [blame] | 5091 | /* |
| 5092 | * can_nice - check if a task can reduce its nice value |
| 5093 | * @p: task |
| 5094 | * @nice: nice value |
| 5095 | */ |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 5096 | int can_nice(const struct task_struct *p, const int nice) |
Matt Mackall | e43379f | 2005-05-01 08:59:00 -0700 | [diff] [blame] | 5097 | { |
Matt Mackall | 024f474 | 2005-08-18 11:24:19 -0700 | [diff] [blame] | 5098 | /* convert nice value [19,-20] to rlimit style value [1,40] */ |
| 5099 | int nice_rlim = 20 - nice; |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 5100 | |
Jiri Slaby | 78d7d40 | 2010-03-05 13:42:54 -0800 | [diff] [blame] | 5101 | return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) || |
Matt Mackall | e43379f | 2005-05-01 08:59:00 -0700 | [diff] [blame] | 5102 | capable(CAP_SYS_NICE)); |
| 5103 | } |
| 5104 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5105 | #ifdef __ARCH_WANT_SYS_NICE |
| 5106 | |
| 5107 | /* |
| 5108 | * sys_nice - change the priority of the current process. |
| 5109 | * @increment: priority increment |
| 5110 | * |
| 5111 | * sys_setpriority is a more generic, but much slower function that |
| 5112 | * does similar things. |
| 5113 | */ |
Heiko Carstens | 5add95d | 2009-01-14 14:14:08 +0100 | [diff] [blame] | 5114 | SYSCALL_DEFINE1(nice, int, increment) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5115 | { |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 5116 | long nice, retval; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5117 | |
| 5118 | /* |
| 5119 | * Setpriority might change our priority at the same moment. |
| 5120 | * We don't have to worry. Conceptually one call occurs first |
| 5121 | * and we have a single winner. |
| 5122 | */ |
Matt Mackall | e43379f | 2005-05-01 08:59:00 -0700 | [diff] [blame] | 5123 | if (increment < -40) |
| 5124 | increment = -40; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5125 | if (increment > 40) |
| 5126 | increment = 40; |
| 5127 | |
Américo Wang | 2b8f836 | 2009-02-16 18:54:21 +0800 | [diff] [blame] | 5128 | nice = TASK_NICE(current) + increment; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5129 | if (nice < -20) |
| 5130 | nice = -20; |
| 5131 | if (nice > 19) |
| 5132 | nice = 19; |
| 5133 | |
Matt Mackall | e43379f | 2005-05-01 08:59:00 -0700 | [diff] [blame] | 5134 | if (increment < 0 && !can_nice(current, nice)) |
| 5135 | return -EPERM; |
| 5136 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5137 | retval = security_task_setnice(current, nice); |
| 5138 | if (retval) |
| 5139 | return retval; |
| 5140 | |
| 5141 | set_user_nice(current, nice); |
| 5142 | return 0; |
| 5143 | } |
| 5144 | |
| 5145 | #endif |
| 5146 | |
| 5147 | /** |
| 5148 | * task_prio - return the priority value of a given task. |
| 5149 | * @p: the task in question. |
| 5150 | * |
| 5151 | * This is the priority value as seen by users in /proc. |
| 5152 | * RT tasks are offset by -200. Normal tasks are centered |
| 5153 | * around 0, value goes from -16 to +15. |
| 5154 | */ |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 5155 | int task_prio(const struct task_struct *p) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5156 | { |
| 5157 | return p->prio - MAX_RT_PRIO; |
| 5158 | } |
| 5159 | |
| 5160 | /** |
| 5161 | * task_nice - return the nice value of a given task. |
| 5162 | * @p: the task in question. |
| 5163 | */ |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 5164 | int task_nice(const struct task_struct *p) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5165 | { |
| 5166 | return TASK_NICE(p); |
| 5167 | } |
Pavel Roskin | 150d8be | 2008-03-05 16:56:37 -0500 | [diff] [blame] | 5168 | EXPORT_SYMBOL(task_nice); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5169 | |
| 5170 | /** |
| 5171 | * idle_cpu - is a given cpu idle currently? |
| 5172 | * @cpu: the processor in question. |
| 5173 | */ |
| 5174 | int idle_cpu(int cpu) |
| 5175 | { |
Thomas Gleixner | 908a328 | 2011-09-15 15:32:06 +0200 | [diff] [blame] | 5176 | struct rq *rq = cpu_rq(cpu); |
| 5177 | |
| 5178 | if (rq->curr != rq->idle) |
| 5179 | return 0; |
| 5180 | |
| 5181 | if (rq->nr_running) |
| 5182 | return 0; |
| 5183 | |
| 5184 | #ifdef CONFIG_SMP |
| 5185 | if (!llist_empty(&rq->wake_list)) |
| 5186 | return 0; |
| 5187 | #endif |
| 5188 | |
| 5189 | return 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5190 | } |
| 5191 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5192 | /** |
| 5193 | * idle_task - return the idle task for a given cpu. |
| 5194 | * @cpu: the processor in question. |
| 5195 | */ |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 5196 | struct task_struct *idle_task(int cpu) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5197 | { |
| 5198 | return cpu_rq(cpu)->idle; |
| 5199 | } |
| 5200 | |
| 5201 | /** |
| 5202 | * find_process_by_pid - find a process with a matching PID value. |
| 5203 | * @pid: the pid in question. |
| 5204 | */ |
Alexey Dobriyan | a995744 | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 5205 | static struct task_struct *find_process_by_pid(pid_t pid) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5206 | { |
Pavel Emelyanov | 228ebcb | 2007-10-18 23:40:16 -0700 | [diff] [blame] | 5207 | return pid ? find_task_by_vpid(pid) : current; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5208 | } |
| 5209 | |
| 5210 | /* Actually do priority change: must hold rq lock. */ |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 5211 | static void |
| 5212 | __setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5213 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5214 | p->policy = policy; |
| 5215 | p->rt_priority = prio; |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 5216 | p->normal_prio = normal_prio(p); |
| 5217 | /* we are holding p->pi_lock already */ |
| 5218 | p->prio = rt_mutex_getprio(p); |
Peter Zijlstra | ffd44db | 2009-11-10 20:12:01 +0100 | [diff] [blame] | 5219 | if (rt_prio(p->prio)) |
| 5220 | p->sched_class = &rt_sched_class; |
| 5221 | else |
| 5222 | p->sched_class = &fair_sched_class; |
Peter Williams | 2dd73a4 | 2006-06-27 02:54:34 -0700 | [diff] [blame] | 5223 | set_load_weight(p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5224 | } |
| 5225 | |
David Howells | c69e8d9 | 2008-11-14 10:39:19 +1100 | [diff] [blame] | 5226 | /* |
| 5227 | * check the target process has a UID that matches the current process's |
| 5228 | */ |
| 5229 | static bool check_same_owner(struct task_struct *p) |
| 5230 | { |
| 5231 | const struct cred *cred = current_cred(), *pcred; |
| 5232 | bool match; |
| 5233 | |
| 5234 | rcu_read_lock(); |
| 5235 | pcred = __task_cred(p); |
Serge E. Hallyn | b0e7759 | 2011-03-23 16:43:24 -0700 | [diff] [blame] | 5236 | if (cred->user->user_ns == pcred->user->user_ns) |
| 5237 | match = (cred->euid == pcred->euid || |
| 5238 | cred->euid == pcred->uid); |
| 5239 | else |
| 5240 | match = false; |
David Howells | c69e8d9 | 2008-11-14 10:39:19 +1100 | [diff] [blame] | 5241 | rcu_read_unlock(); |
| 5242 | return match; |
| 5243 | } |
| 5244 | |
Rusty Russell | 961ccdd | 2008-06-23 13:55:38 +1000 | [diff] [blame] | 5245 | static int __sched_setscheduler(struct task_struct *p, int policy, |
KOSAKI Motohiro | fe7de49 | 2010-10-20 16:01:12 -0700 | [diff] [blame] | 5246 | const struct sched_param *param, bool user) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5247 | { |
Srivatsa Vaddagiri | 83b699e | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 5248 | int retval, oldprio, oldpolicy = -1, on_rq, running; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5249 | unsigned long flags; |
Thomas Gleixner | 83ab0aa | 2010-02-17 09:05:48 +0100 | [diff] [blame] | 5250 | const struct sched_class *prev_class; |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 5251 | struct rq *rq; |
Lennart Poettering | ca94c44 | 2009-06-15 17:17:47 +0200 | [diff] [blame] | 5252 | int reset_on_fork; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5253 | |
Steven Rostedt | 66e5393 | 2006-06-27 02:54:44 -0700 | [diff] [blame] | 5254 | /* may grab non-irq protected spin_locks */ |
| 5255 | BUG_ON(in_interrupt()); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5256 | recheck: |
| 5257 | /* double check policy once rq lock held */ |
Lennart Poettering | ca94c44 | 2009-06-15 17:17:47 +0200 | [diff] [blame] | 5258 | if (policy < 0) { |
| 5259 | reset_on_fork = p->sched_reset_on_fork; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5260 | policy = oldpolicy = p->policy; |
Lennart Poettering | ca94c44 | 2009-06-15 17:17:47 +0200 | [diff] [blame] | 5261 | } else { |
| 5262 | reset_on_fork = !!(policy & SCHED_RESET_ON_FORK); |
| 5263 | policy &= ~SCHED_RESET_ON_FORK; |
| 5264 | |
| 5265 | if (policy != SCHED_FIFO && policy != SCHED_RR && |
| 5266 | policy != SCHED_NORMAL && policy != SCHED_BATCH && |
| 5267 | policy != SCHED_IDLE) |
| 5268 | return -EINVAL; |
| 5269 | } |
| 5270 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5271 | /* |
| 5272 | * Valid priorities for SCHED_FIFO and SCHED_RR are |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 5273 | * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL, |
| 5274 | * SCHED_BATCH and SCHED_IDLE is 0. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5275 | */ |
| 5276 | if (param->sched_priority < 0 || |
Ingo Molnar | 95cdf3b | 2005-09-10 00:26:11 -0700 | [diff] [blame] | 5277 | (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) || |
Steven Rostedt | d46523e | 2005-07-25 16:28:39 -0400 | [diff] [blame] | 5278 | (!p->mm && param->sched_priority > MAX_RT_PRIO-1)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5279 | return -EINVAL; |
Ingo Molnar | e05606d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 5280 | if (rt_policy(policy) != (param->sched_priority != 0)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5281 | return -EINVAL; |
| 5282 | |
Olivier Croquette | 37e4ab3 | 2005-06-25 14:57:32 -0700 | [diff] [blame] | 5283 | /* |
| 5284 | * Allow unprivileged RT tasks to decrease priority: |
| 5285 | */ |
Rusty Russell | 961ccdd | 2008-06-23 13:55:38 +1000 | [diff] [blame] | 5286 | if (user && !capable(CAP_SYS_NICE)) { |
Ingo Molnar | e05606d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 5287 | if (rt_policy(policy)) { |
Oleg Nesterov | a44702e | 2010-06-11 01:09:44 +0200 | [diff] [blame] | 5288 | unsigned long rlim_rtprio = |
| 5289 | task_rlimit(p, RLIMIT_RTPRIO); |
Oleg Nesterov | 5fe1d75 | 2006-09-29 02:00:48 -0700 | [diff] [blame] | 5290 | |
Oleg Nesterov | 8dc3e90 | 2006-09-29 02:00:50 -0700 | [diff] [blame] | 5291 | /* can't set/change the rt policy */ |
| 5292 | if (policy != p->policy && !rlim_rtprio) |
| 5293 | return -EPERM; |
| 5294 | |
| 5295 | /* can't increase priority */ |
| 5296 | if (param->sched_priority > p->rt_priority && |
| 5297 | param->sched_priority > rlim_rtprio) |
| 5298 | return -EPERM; |
| 5299 | } |
Darren Hart | c02aa73 | 2011-02-17 15:37:07 -0800 | [diff] [blame] | 5300 | |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 5301 | /* |
Darren Hart | c02aa73 | 2011-02-17 15:37:07 -0800 | [diff] [blame] | 5302 | * Treat SCHED_IDLE as nice 20. Only allow a switch to |
| 5303 | * SCHED_NORMAL if the RLIMIT_NICE would normally permit it. |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 5304 | */ |
Darren Hart | c02aa73 | 2011-02-17 15:37:07 -0800 | [diff] [blame] | 5305 | if (p->policy == SCHED_IDLE && policy != SCHED_IDLE) { |
| 5306 | if (!can_nice(p, TASK_NICE(p))) |
| 5307 | return -EPERM; |
| 5308 | } |
Oleg Nesterov | 8dc3e90 | 2006-09-29 02:00:50 -0700 | [diff] [blame] | 5309 | |
Olivier Croquette | 37e4ab3 | 2005-06-25 14:57:32 -0700 | [diff] [blame] | 5310 | /* can't change other user's priorities */ |
David Howells | c69e8d9 | 2008-11-14 10:39:19 +1100 | [diff] [blame] | 5311 | if (!check_same_owner(p)) |
Olivier Croquette | 37e4ab3 | 2005-06-25 14:57:32 -0700 | [diff] [blame] | 5312 | return -EPERM; |
Lennart Poettering | ca94c44 | 2009-06-15 17:17:47 +0200 | [diff] [blame] | 5313 | |
| 5314 | /* Normal users shall not reset the sched_reset_on_fork flag */ |
| 5315 | if (p->sched_reset_on_fork && !reset_on_fork) |
| 5316 | return -EPERM; |
Olivier Croquette | 37e4ab3 | 2005-06-25 14:57:32 -0700 | [diff] [blame] | 5317 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5318 | |
Jeremy Fitzhardinge | 725aad2 | 2008-08-03 09:33:03 -0700 | [diff] [blame] | 5319 | if (user) { |
KOSAKI Motohiro | b0ae198 | 2010-10-15 04:21:18 +0900 | [diff] [blame] | 5320 | retval = security_task_setscheduler(p); |
Jeremy Fitzhardinge | 725aad2 | 2008-08-03 09:33:03 -0700 | [diff] [blame] | 5321 | if (retval) |
| 5322 | return retval; |
| 5323 | } |
| 5324 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5325 | /* |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 5326 | * make sure no PI-waiters arrive (or leave) while we are |
| 5327 | * changing the priority of the task: |
Peter Zijlstra | 0122ec5 | 2011-04-05 17:23:51 +0200 | [diff] [blame] | 5328 | * |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 5329 | * To be able to change p->policy safely, the appropriate |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5330 | * runqueue lock must be held. |
| 5331 | */ |
Peter Zijlstra | 0122ec5 | 2011-04-05 17:23:51 +0200 | [diff] [blame] | 5332 | rq = task_rq_lock(p, &flags); |
Peter Zijlstra | dc61b1d | 2010-06-08 11:40:42 +0200 | [diff] [blame] | 5333 | |
Peter Zijlstra | 34f971f | 2010-09-22 13:53:15 +0200 | [diff] [blame] | 5334 | /* |
| 5335 | * Changing the policy of the stop threads its a very bad idea |
| 5336 | */ |
| 5337 | if (p == rq->stop) { |
Peter Zijlstra | 0122ec5 | 2011-04-05 17:23:51 +0200 | [diff] [blame] | 5338 | task_rq_unlock(rq, p, &flags); |
Peter Zijlstra | 34f971f | 2010-09-22 13:53:15 +0200 | [diff] [blame] | 5339 | return -EINVAL; |
| 5340 | } |
| 5341 | |
Dario Faggioli | a51e919 | 2011-03-24 14:00:18 +0100 | [diff] [blame] | 5342 | /* |
| 5343 | * If not changing anything there's no need to proceed further: |
| 5344 | */ |
| 5345 | if (unlikely(policy == p->policy && (!rt_policy(policy) || |
| 5346 | param->sched_priority == p->rt_priority))) { |
| 5347 | |
| 5348 | __task_rq_unlock(rq); |
| 5349 | raw_spin_unlock_irqrestore(&p->pi_lock, flags); |
| 5350 | return 0; |
| 5351 | } |
| 5352 | |
Peter Zijlstra | dc61b1d | 2010-06-08 11:40:42 +0200 | [diff] [blame] | 5353 | #ifdef CONFIG_RT_GROUP_SCHED |
| 5354 | if (user) { |
| 5355 | /* |
| 5356 | * Do not allow realtime tasks into groups that have no runtime |
| 5357 | * assigned. |
| 5358 | */ |
| 5359 | if (rt_bandwidth_enabled() && rt_policy(policy) && |
Mike Galbraith | f449377 | 2011-01-13 04:54:50 +0100 | [diff] [blame] | 5360 | task_group(p)->rt_bandwidth.rt_runtime == 0 && |
| 5361 | !task_group_is_autogroup(task_group(p))) { |
Peter Zijlstra | 0122ec5 | 2011-04-05 17:23:51 +0200 | [diff] [blame] | 5362 | task_rq_unlock(rq, p, &flags); |
Peter Zijlstra | dc61b1d | 2010-06-08 11:40:42 +0200 | [diff] [blame] | 5363 | return -EPERM; |
| 5364 | } |
| 5365 | } |
| 5366 | #endif |
| 5367 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5368 | /* recheck policy now with rq lock held */ |
| 5369 | if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { |
| 5370 | policy = oldpolicy = -1; |
Peter Zijlstra | 0122ec5 | 2011-04-05 17:23:51 +0200 | [diff] [blame] | 5371 | task_rq_unlock(rq, p, &flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5372 | goto recheck; |
| 5373 | } |
Peter Zijlstra | fd2f441 | 2011-04-05 17:23:44 +0200 | [diff] [blame] | 5374 | on_rq = p->on_rq; |
Dmitry Adamushko | 051a1d1 | 2007-12-18 15:21:13 +0100 | [diff] [blame] | 5375 | running = task_current(rq, p); |
Hiroshi Shimamoto | 0e1f348 | 2008-03-10 11:01:20 -0700 | [diff] [blame] | 5376 | if (on_rq) |
Ingo Molnar | 2e1cb74 | 2007-08-09 11:16:49 +0200 | [diff] [blame] | 5377 | deactivate_task(rq, p, 0); |
Hiroshi Shimamoto | 0e1f348 | 2008-03-10 11:01:20 -0700 | [diff] [blame] | 5378 | if (running) |
| 5379 | p->sched_class->put_prev_task(rq, p); |
Dmitry Adamushko | f6b5320 | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 5380 | |
Lennart Poettering | ca94c44 | 2009-06-15 17:17:47 +0200 | [diff] [blame] | 5381 | p->sched_reset_on_fork = reset_on_fork; |
| 5382 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5383 | oldprio = p->prio; |
Thomas Gleixner | 83ab0aa | 2010-02-17 09:05:48 +0100 | [diff] [blame] | 5384 | prev_class = p->sched_class; |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 5385 | __setscheduler(rq, p, policy, param->sched_priority); |
Dmitry Adamushko | f6b5320 | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 5386 | |
Hiroshi Shimamoto | 0e1f348 | 2008-03-10 11:01:20 -0700 | [diff] [blame] | 5387 | if (running) |
| 5388 | p->sched_class->set_curr_task(rq); |
Peter Zijlstra | da7a735 | 2011-01-17 17:03:27 +0100 | [diff] [blame] | 5389 | if (on_rq) |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 5390 | activate_task(rq, p, 0); |
Steven Rostedt | cb46984 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 5391 | |
Peter Zijlstra | da7a735 | 2011-01-17 17:03:27 +0100 | [diff] [blame] | 5392 | check_class_changed(rq, p, prev_class, oldprio); |
Peter Zijlstra | 0122ec5 | 2011-04-05 17:23:51 +0200 | [diff] [blame] | 5393 | task_rq_unlock(rq, p, &flags); |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 5394 | |
Thomas Gleixner | 95e02ca | 2006-06-27 02:55:02 -0700 | [diff] [blame] | 5395 | rt_mutex_adjust_pi(p); |
| 5396 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5397 | return 0; |
| 5398 | } |
Rusty Russell | 961ccdd | 2008-06-23 13:55:38 +1000 | [diff] [blame] | 5399 | |
| 5400 | /** |
| 5401 | * sched_setscheduler - change the scheduling policy and/or RT priority of a thread. |
| 5402 | * @p: the task in question. |
| 5403 | * @policy: new policy. |
| 5404 | * @param: structure containing the new RT priority. |
| 5405 | * |
| 5406 | * NOTE that the task may be already dead. |
| 5407 | */ |
| 5408 | int sched_setscheduler(struct task_struct *p, int policy, |
KOSAKI Motohiro | fe7de49 | 2010-10-20 16:01:12 -0700 | [diff] [blame] | 5409 | const struct sched_param *param) |
Rusty Russell | 961ccdd | 2008-06-23 13:55:38 +1000 | [diff] [blame] | 5410 | { |
| 5411 | return __sched_setscheduler(p, policy, param, true); |
| 5412 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5413 | EXPORT_SYMBOL_GPL(sched_setscheduler); |
| 5414 | |
Rusty Russell | 961ccdd | 2008-06-23 13:55:38 +1000 | [diff] [blame] | 5415 | /** |
| 5416 | * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace. |
| 5417 | * @p: the task in question. |
| 5418 | * @policy: new policy. |
| 5419 | * @param: structure containing the new RT priority. |
| 5420 | * |
| 5421 | * Just like sched_setscheduler, only don't bother checking if the |
| 5422 | * current context has permission. For example, this is needed in |
| 5423 | * stop_machine(): we create temporary high priority worker threads, |
| 5424 | * but our caller might not have that capability. |
| 5425 | */ |
| 5426 | int sched_setscheduler_nocheck(struct task_struct *p, int policy, |
KOSAKI Motohiro | fe7de49 | 2010-10-20 16:01:12 -0700 | [diff] [blame] | 5427 | const struct sched_param *param) |
Rusty Russell | 961ccdd | 2008-06-23 13:55:38 +1000 | [diff] [blame] | 5428 | { |
| 5429 | return __sched_setscheduler(p, policy, param, false); |
| 5430 | } |
| 5431 | |
Ingo Molnar | 95cdf3b | 2005-09-10 00:26:11 -0700 | [diff] [blame] | 5432 | static int |
| 5433 | do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5434 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5435 | struct sched_param lparam; |
| 5436 | struct task_struct *p; |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 5437 | int retval; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5438 | |
| 5439 | if (!param || pid < 0) |
| 5440 | return -EINVAL; |
| 5441 | if (copy_from_user(&lparam, param, sizeof(struct sched_param))) |
| 5442 | return -EFAULT; |
Oleg Nesterov | 5fe1d75 | 2006-09-29 02:00:48 -0700 | [diff] [blame] | 5443 | |
| 5444 | rcu_read_lock(); |
| 5445 | retval = -ESRCH; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5446 | p = find_process_by_pid(pid); |
Oleg Nesterov | 5fe1d75 | 2006-09-29 02:00:48 -0700 | [diff] [blame] | 5447 | if (p != NULL) |
| 5448 | retval = sched_setscheduler(p, policy, &lparam); |
| 5449 | rcu_read_unlock(); |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 5450 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5451 | return retval; |
| 5452 | } |
| 5453 | |
| 5454 | /** |
| 5455 | * sys_sched_setscheduler - set/change the scheduler policy and RT priority |
| 5456 | * @pid: the pid in question. |
| 5457 | * @policy: new policy. |
| 5458 | * @param: structure containing the new RT priority. |
| 5459 | */ |
Heiko Carstens | 5add95d | 2009-01-14 14:14:08 +0100 | [diff] [blame] | 5460 | SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, |
| 5461 | struct sched_param __user *, param) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5462 | { |
Jason Baron | c21761f | 2006-01-18 17:43:03 -0800 | [diff] [blame] | 5463 | /* negative values for policy are not valid */ |
| 5464 | if (policy < 0) |
| 5465 | return -EINVAL; |
| 5466 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5467 | return do_sched_setscheduler(pid, policy, param); |
| 5468 | } |
| 5469 | |
| 5470 | /** |
| 5471 | * sys_sched_setparam - set/change the RT priority of a thread |
| 5472 | * @pid: the pid in question. |
| 5473 | * @param: structure containing the new RT priority. |
| 5474 | */ |
Heiko Carstens | 5add95d | 2009-01-14 14:14:08 +0100 | [diff] [blame] | 5475 | SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5476 | { |
| 5477 | return do_sched_setscheduler(pid, -1, param); |
| 5478 | } |
| 5479 | |
| 5480 | /** |
| 5481 | * sys_sched_getscheduler - get the policy (scheduling class) of a thread |
| 5482 | * @pid: the pid in question. |
| 5483 | */ |
Heiko Carstens | 5add95d | 2009-01-14 14:14:08 +0100 | [diff] [blame] | 5484 | SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5485 | { |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 5486 | struct task_struct *p; |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 5487 | int retval; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5488 | |
| 5489 | if (pid < 0) |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 5490 | return -EINVAL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5491 | |
| 5492 | retval = -ESRCH; |
Thomas Gleixner | 5fe85be | 2009-12-09 10:14:58 +0000 | [diff] [blame] | 5493 | rcu_read_lock(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5494 | p = find_process_by_pid(pid); |
| 5495 | if (p) { |
| 5496 | retval = security_task_getscheduler(p); |
| 5497 | if (!retval) |
Lennart Poettering | ca94c44 | 2009-06-15 17:17:47 +0200 | [diff] [blame] | 5498 | retval = p->policy |
| 5499 | | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5500 | } |
Thomas Gleixner | 5fe85be | 2009-12-09 10:14:58 +0000 | [diff] [blame] | 5501 | rcu_read_unlock(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5502 | return retval; |
| 5503 | } |
| 5504 | |
| 5505 | /** |
Lennart Poettering | ca94c44 | 2009-06-15 17:17:47 +0200 | [diff] [blame] | 5506 | * sys_sched_getparam - get the RT priority of a thread |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5507 | * @pid: the pid in question. |
| 5508 | * @param: structure containing the RT priority. |
| 5509 | */ |
Heiko Carstens | 5add95d | 2009-01-14 14:14:08 +0100 | [diff] [blame] | 5510 | SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5511 | { |
| 5512 | struct sched_param lp; |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 5513 | struct task_struct *p; |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 5514 | int retval; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5515 | |
| 5516 | if (!param || pid < 0) |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 5517 | return -EINVAL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5518 | |
Thomas Gleixner | 5fe85be | 2009-12-09 10:14:58 +0000 | [diff] [blame] | 5519 | rcu_read_lock(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5520 | p = find_process_by_pid(pid); |
| 5521 | retval = -ESRCH; |
| 5522 | if (!p) |
| 5523 | goto out_unlock; |
| 5524 | |
| 5525 | retval = security_task_getscheduler(p); |
| 5526 | if (retval) |
| 5527 | goto out_unlock; |
| 5528 | |
| 5529 | lp.sched_priority = p->rt_priority; |
Thomas Gleixner | 5fe85be | 2009-12-09 10:14:58 +0000 | [diff] [blame] | 5530 | rcu_read_unlock(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5531 | |
| 5532 | /* |
| 5533 | * This one might sleep, we cannot do it with a spinlock held ... |
| 5534 | */ |
| 5535 | retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0; |
| 5536 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5537 | return retval; |
| 5538 | |
| 5539 | out_unlock: |
Thomas Gleixner | 5fe85be | 2009-12-09 10:14:58 +0000 | [diff] [blame] | 5540 | rcu_read_unlock(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5541 | return retval; |
| 5542 | } |
| 5543 | |
Rusty Russell | 96f874e | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 5544 | long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5545 | { |
Rusty Russell | 5a16f3d | 2008-11-25 02:35:11 +1030 | [diff] [blame] | 5546 | cpumask_var_t cpus_allowed, new_mask; |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 5547 | struct task_struct *p; |
| 5548 | int retval; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5549 | |
Gautham R Shenoy | 95402b3 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 5550 | get_online_cpus(); |
Thomas Gleixner | 23f5d14 | 2009-12-09 10:15:01 +0000 | [diff] [blame] | 5551 | rcu_read_lock(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5552 | |
| 5553 | p = find_process_by_pid(pid); |
| 5554 | if (!p) { |
Thomas Gleixner | 23f5d14 | 2009-12-09 10:15:01 +0000 | [diff] [blame] | 5555 | rcu_read_unlock(); |
Gautham R Shenoy | 95402b3 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 5556 | put_online_cpus(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5557 | return -ESRCH; |
| 5558 | } |
| 5559 | |
Thomas Gleixner | 23f5d14 | 2009-12-09 10:15:01 +0000 | [diff] [blame] | 5560 | /* Prevent p going away */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5561 | get_task_struct(p); |
Thomas Gleixner | 23f5d14 | 2009-12-09 10:15:01 +0000 | [diff] [blame] | 5562 | rcu_read_unlock(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5563 | |
Rusty Russell | 5a16f3d | 2008-11-25 02:35:11 +1030 | [diff] [blame] | 5564 | if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) { |
| 5565 | retval = -ENOMEM; |
| 5566 | goto out_put_task; |
| 5567 | } |
| 5568 | if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) { |
| 5569 | retval = -ENOMEM; |
| 5570 | goto out_free_cpus_allowed; |
| 5571 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5572 | retval = -EPERM; |
Serge E. Hallyn | b0e7759 | 2011-03-23 16:43:24 -0700 | [diff] [blame] | 5573 | if (!check_same_owner(p) && !task_ns_capable(p, CAP_SYS_NICE)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5574 | goto out_unlock; |
| 5575 | |
KOSAKI Motohiro | b0ae198 | 2010-10-15 04:21:18 +0900 | [diff] [blame] | 5576 | retval = security_task_setscheduler(p); |
David Quigley | e7834f8 | 2006-06-23 02:03:59 -0700 | [diff] [blame] | 5577 | if (retval) |
| 5578 | goto out_unlock; |
| 5579 | |
Rusty Russell | 5a16f3d | 2008-11-25 02:35:11 +1030 | [diff] [blame] | 5580 | cpuset_cpus_allowed(p, cpus_allowed); |
| 5581 | cpumask_and(new_mask, in_mask, cpus_allowed); |
Peter Zijlstra | 4924627 | 2010-10-17 21:46:10 +0200 | [diff] [blame] | 5582 | again: |
Rusty Russell | 5a16f3d | 2008-11-25 02:35:11 +1030 | [diff] [blame] | 5583 | retval = set_cpus_allowed_ptr(p, new_mask); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5584 | |
Paul Menage | 8707d8b | 2007-10-18 23:40:22 -0700 | [diff] [blame] | 5585 | if (!retval) { |
Rusty Russell | 5a16f3d | 2008-11-25 02:35:11 +1030 | [diff] [blame] | 5586 | cpuset_cpus_allowed(p, cpus_allowed); |
| 5587 | if (!cpumask_subset(new_mask, cpus_allowed)) { |
Paul Menage | 8707d8b | 2007-10-18 23:40:22 -0700 | [diff] [blame] | 5588 | /* |
| 5589 | * We must have raced with a concurrent cpuset |
| 5590 | * update. Just reset the cpus_allowed to the |
| 5591 | * cpuset's cpus_allowed |
| 5592 | */ |
Rusty Russell | 5a16f3d | 2008-11-25 02:35:11 +1030 | [diff] [blame] | 5593 | cpumask_copy(new_mask, cpus_allowed); |
Paul Menage | 8707d8b | 2007-10-18 23:40:22 -0700 | [diff] [blame] | 5594 | goto again; |
| 5595 | } |
| 5596 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5597 | out_unlock: |
Rusty Russell | 5a16f3d | 2008-11-25 02:35:11 +1030 | [diff] [blame] | 5598 | free_cpumask_var(new_mask); |
| 5599 | out_free_cpus_allowed: |
| 5600 | free_cpumask_var(cpus_allowed); |
| 5601 | out_put_task: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5602 | put_task_struct(p); |
Gautham R Shenoy | 95402b3 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 5603 | put_online_cpus(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5604 | return retval; |
| 5605 | } |
| 5606 | |
| 5607 | static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, |
Rusty Russell | 96f874e | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 5608 | struct cpumask *new_mask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5609 | { |
Rusty Russell | 96f874e | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 5610 | if (len < cpumask_size()) |
| 5611 | cpumask_clear(new_mask); |
| 5612 | else if (len > cpumask_size()) |
| 5613 | len = cpumask_size(); |
| 5614 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5615 | return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0; |
| 5616 | } |
| 5617 | |
| 5618 | /** |
| 5619 | * sys_sched_setaffinity - set the cpu affinity of a process |
| 5620 | * @pid: pid of the process |
| 5621 | * @len: length in bytes of the bitmask pointed to by user_mask_ptr |
| 5622 | * @user_mask_ptr: user-space pointer to the new cpu mask |
| 5623 | */ |
Heiko Carstens | 5add95d | 2009-01-14 14:14:08 +0100 | [diff] [blame] | 5624 | SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len, |
| 5625 | unsigned long __user *, user_mask_ptr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5626 | { |
Rusty Russell | 5a16f3d | 2008-11-25 02:35:11 +1030 | [diff] [blame] | 5627 | cpumask_var_t new_mask; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5628 | int retval; |
| 5629 | |
Rusty Russell | 5a16f3d | 2008-11-25 02:35:11 +1030 | [diff] [blame] | 5630 | if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) |
| 5631 | return -ENOMEM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5632 | |
Rusty Russell | 5a16f3d | 2008-11-25 02:35:11 +1030 | [diff] [blame] | 5633 | retval = get_user_cpu_mask(user_mask_ptr, len, new_mask); |
| 5634 | if (retval == 0) |
| 5635 | retval = sched_setaffinity(pid, new_mask); |
| 5636 | free_cpumask_var(new_mask); |
| 5637 | return retval; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5638 | } |
| 5639 | |
Rusty Russell | 96f874e | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 5640 | long sched_getaffinity(pid_t pid, struct cpumask *mask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5641 | { |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 5642 | struct task_struct *p; |
Thomas Gleixner | 3160568 | 2009-12-08 20:24:16 +0000 | [diff] [blame] | 5643 | unsigned long flags; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5644 | int retval; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5645 | |
Gautham R Shenoy | 95402b3 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 5646 | get_online_cpus(); |
Thomas Gleixner | 23f5d14 | 2009-12-09 10:15:01 +0000 | [diff] [blame] | 5647 | rcu_read_lock(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5648 | |
| 5649 | retval = -ESRCH; |
| 5650 | p = find_process_by_pid(pid); |
| 5651 | if (!p) |
| 5652 | goto out_unlock; |
| 5653 | |
David Quigley | e7834f8 | 2006-06-23 02:03:59 -0700 | [diff] [blame] | 5654 | retval = security_task_getscheduler(p); |
| 5655 | if (retval) |
| 5656 | goto out_unlock; |
| 5657 | |
Peter Zijlstra | 013fdb8 | 2011-04-05 17:23:45 +0200 | [diff] [blame] | 5658 | raw_spin_lock_irqsave(&p->pi_lock, flags); |
Rusty Russell | 96f874e | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 5659 | cpumask_and(mask, &p->cpus_allowed, cpu_online_mask); |
Peter Zijlstra | 013fdb8 | 2011-04-05 17:23:45 +0200 | [diff] [blame] | 5660 | raw_spin_unlock_irqrestore(&p->pi_lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5661 | |
| 5662 | out_unlock: |
Thomas Gleixner | 23f5d14 | 2009-12-09 10:15:01 +0000 | [diff] [blame] | 5663 | rcu_read_unlock(); |
Gautham R Shenoy | 95402b3 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 5664 | put_online_cpus(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5665 | |
Ulrich Drepper | 9531b62 | 2007-08-09 11:16:46 +0200 | [diff] [blame] | 5666 | return retval; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5667 | } |
| 5668 | |
| 5669 | /** |
| 5670 | * sys_sched_getaffinity - get the cpu affinity of a process |
| 5671 | * @pid: pid of the process |
| 5672 | * @len: length in bytes of the bitmask pointed to by user_mask_ptr |
| 5673 | * @user_mask_ptr: user-space pointer to hold the current cpu mask |
| 5674 | */ |
Heiko Carstens | 5add95d | 2009-01-14 14:14:08 +0100 | [diff] [blame] | 5675 | SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, |
| 5676 | unsigned long __user *, user_mask_ptr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5677 | { |
| 5678 | int ret; |
Rusty Russell | f17c860 | 2008-11-25 02:35:11 +1030 | [diff] [blame] | 5679 | cpumask_var_t mask; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5680 | |
Anton Blanchard | 84fba5e | 2010-04-06 17:02:19 +1000 | [diff] [blame] | 5681 | if ((len * BITS_PER_BYTE) < nr_cpu_ids) |
KOSAKI Motohiro | cd3d803 | 2010-03-12 16:15:36 +0900 | [diff] [blame] | 5682 | return -EINVAL; |
| 5683 | if (len & (sizeof(unsigned long)-1)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5684 | return -EINVAL; |
| 5685 | |
Rusty Russell | f17c860 | 2008-11-25 02:35:11 +1030 | [diff] [blame] | 5686 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) |
| 5687 | return -ENOMEM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5688 | |
Rusty Russell | f17c860 | 2008-11-25 02:35:11 +1030 | [diff] [blame] | 5689 | ret = sched_getaffinity(pid, mask); |
| 5690 | if (ret == 0) { |
KOSAKI Motohiro | 8bc037f | 2010-03-17 09:36:58 +0900 | [diff] [blame] | 5691 | size_t retlen = min_t(size_t, len, cpumask_size()); |
KOSAKI Motohiro | cd3d803 | 2010-03-12 16:15:36 +0900 | [diff] [blame] | 5692 | |
| 5693 | if (copy_to_user(user_mask_ptr, mask, retlen)) |
Rusty Russell | f17c860 | 2008-11-25 02:35:11 +1030 | [diff] [blame] | 5694 | ret = -EFAULT; |
| 5695 | else |
KOSAKI Motohiro | cd3d803 | 2010-03-12 16:15:36 +0900 | [diff] [blame] | 5696 | ret = retlen; |
Rusty Russell | f17c860 | 2008-11-25 02:35:11 +1030 | [diff] [blame] | 5697 | } |
| 5698 | free_cpumask_var(mask); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5699 | |
Rusty Russell | f17c860 | 2008-11-25 02:35:11 +1030 | [diff] [blame] | 5700 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5701 | } |
| 5702 | |
| 5703 | /** |
| 5704 | * sys_sched_yield - yield the current processor to other threads. |
| 5705 | * |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 5706 | * This function yields the current CPU to other tasks. If there are no |
| 5707 | * other threads running on this CPU then this function will return. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5708 | */ |
Heiko Carstens | 5add95d | 2009-01-14 14:14:08 +0100 | [diff] [blame] | 5709 | SYSCALL_DEFINE0(sched_yield) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5710 | { |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 5711 | struct rq *rq = this_rq_lock(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5712 | |
Ingo Molnar | 2d72376 | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 5713 | schedstat_inc(rq, yld_count); |
Dmitry Adamushko | 4530d7a | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 5714 | current->sched_class->yield_task(rq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5715 | |
| 5716 | /* |
| 5717 | * Since we are going to call schedule() anyway, there's |
| 5718 | * no need to preempt or enable interrupts: |
| 5719 | */ |
| 5720 | __release(rq->lock); |
Ingo Molnar | 8a25d5d | 2006-07-03 00:24:54 -0700 | [diff] [blame] | 5721 | spin_release(&rq->lock.dep_map, 1, _THIS_IP_); |
Thomas Gleixner | 9828ea9 | 2009-12-03 20:55:53 +0100 | [diff] [blame] | 5722 | do_raw_spin_unlock(&rq->lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5723 | preempt_enable_no_resched(); |
| 5724 | |
| 5725 | schedule(); |
| 5726 | |
| 5727 | return 0; |
| 5728 | } |
| 5729 | |
Peter Zijlstra | d86ee48 | 2009-07-10 14:57:57 +0200 | [diff] [blame] | 5730 | static inline int should_resched(void) |
| 5731 | { |
| 5732 | return need_resched() && !(preempt_count() & PREEMPT_ACTIVE); |
| 5733 | } |
| 5734 | |
Andrew Morton | e7b3840 | 2006-06-30 01:56:00 -0700 | [diff] [blame] | 5735 | static void __cond_resched(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5736 | { |
Frederic Weisbecker | e7aaaa6 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 5737 | add_preempt_count(PREEMPT_ACTIVE); |
Thomas Gleixner | c259e01 | 2011-06-22 19:47:00 +0200 | [diff] [blame] | 5738 | __schedule(); |
Frederic Weisbecker | e7aaaa6 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 5739 | sub_preempt_count(PREEMPT_ACTIVE); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5740 | } |
| 5741 | |
Herbert Xu | 02b67cc3 | 2008-01-25 21:08:28 +0100 | [diff] [blame] | 5742 | int __sched _cond_resched(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5743 | { |
Peter Zijlstra | d86ee48 | 2009-07-10 14:57:57 +0200 | [diff] [blame] | 5744 | if (should_resched()) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5745 | __cond_resched(); |
| 5746 | return 1; |
| 5747 | } |
| 5748 | return 0; |
| 5749 | } |
Herbert Xu | 02b67cc3 | 2008-01-25 21:08:28 +0100 | [diff] [blame] | 5750 | EXPORT_SYMBOL(_cond_resched); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5751 | |
| 5752 | /* |
Frederic Weisbecker | 613afbf | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 5753 | * __cond_resched_lock() - if a reschedule is pending, drop the given lock, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5754 | * call schedule, and on return reacquire the lock. |
| 5755 | * |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 5756 | * This works OK both with and without CONFIG_PREEMPT. We do strange low-level |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5757 | * operations here to prevent schedule() from being called twice (once via |
| 5758 | * spin_unlock(), once by hand). |
| 5759 | */ |
Frederic Weisbecker | 613afbf | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 5760 | int __cond_resched_lock(spinlock_t *lock) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5761 | { |
Peter Zijlstra | d86ee48 | 2009-07-10 14:57:57 +0200 | [diff] [blame] | 5762 | int resched = should_resched(); |
Jan Kara | 6df3cec | 2005-06-13 15:52:32 -0700 | [diff] [blame] | 5763 | int ret = 0; |
| 5764 | |
Peter Zijlstra | f607c66 | 2009-07-20 19:16:29 +0200 | [diff] [blame] | 5765 | lockdep_assert_held(lock); |
| 5766 | |
Nick Piggin | 95c354f | 2008-01-30 13:31:20 +0100 | [diff] [blame] | 5767 | if (spin_needbreak(lock) || resched) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5768 | spin_unlock(lock); |
Peter Zijlstra | d86ee48 | 2009-07-10 14:57:57 +0200 | [diff] [blame] | 5769 | if (resched) |
Nick Piggin | 95c354f | 2008-01-30 13:31:20 +0100 | [diff] [blame] | 5770 | __cond_resched(); |
| 5771 | else |
| 5772 | cpu_relax(); |
Jan Kara | 6df3cec | 2005-06-13 15:52:32 -0700 | [diff] [blame] | 5773 | ret = 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5774 | spin_lock(lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5775 | } |
Jan Kara | 6df3cec | 2005-06-13 15:52:32 -0700 | [diff] [blame] | 5776 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5777 | } |
Frederic Weisbecker | 613afbf | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 5778 | EXPORT_SYMBOL(__cond_resched_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5779 | |
Frederic Weisbecker | 613afbf | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 5780 | int __sched __cond_resched_softirq(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5781 | { |
| 5782 | BUG_ON(!in_softirq()); |
| 5783 | |
Peter Zijlstra | d86ee48 | 2009-07-10 14:57:57 +0200 | [diff] [blame] | 5784 | if (should_resched()) { |
Thomas Gleixner | 98d82567 | 2007-05-23 13:58:18 -0700 | [diff] [blame] | 5785 | local_bh_enable(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5786 | __cond_resched(); |
| 5787 | local_bh_disable(); |
| 5788 | return 1; |
| 5789 | } |
| 5790 | return 0; |
| 5791 | } |
Frederic Weisbecker | 613afbf | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 5792 | EXPORT_SYMBOL(__cond_resched_softirq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5793 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5794 | /** |
| 5795 | * yield - yield the current processor to other threads. |
| 5796 | * |
Robert P. J. Day | 72fd4a3 | 2007-02-10 01:45:59 -0800 | [diff] [blame] | 5797 | * This is a shortcut for kernel-space yielding - it marks the |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5798 | * thread runnable and calls sys_sched_yield(). |
| 5799 | */ |
| 5800 | void __sched yield(void) |
| 5801 | { |
| 5802 | set_current_state(TASK_RUNNING); |
| 5803 | sys_sched_yield(); |
| 5804 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5805 | EXPORT_SYMBOL(yield); |
| 5806 | |
Mike Galbraith | d95f412 | 2011-02-01 09:50:51 -0500 | [diff] [blame] | 5807 | /** |
| 5808 | * yield_to - yield the current processor to another thread in |
| 5809 | * your thread group, or accelerate that thread toward the |
| 5810 | * processor it's on. |
Randy Dunlap | 16addf9 | 2011-03-18 09:34:53 -0700 | [diff] [blame] | 5811 | * @p: target task |
| 5812 | * @preempt: whether task preemption is allowed or not |
Mike Galbraith | d95f412 | 2011-02-01 09:50:51 -0500 | [diff] [blame] | 5813 | * |
| 5814 | * It's the caller's job to ensure that the target task struct |
| 5815 | * can't go away on us before we can do any checks. |
| 5816 | * |
| 5817 | * Returns true if we indeed boosted the target task. |
| 5818 | */ |
| 5819 | bool __sched yield_to(struct task_struct *p, bool preempt) |
| 5820 | { |
| 5821 | struct task_struct *curr = current; |
| 5822 | struct rq *rq, *p_rq; |
| 5823 | unsigned long flags; |
| 5824 | bool yielded = 0; |
| 5825 | |
| 5826 | local_irq_save(flags); |
| 5827 | rq = this_rq(); |
| 5828 | |
| 5829 | again: |
| 5830 | p_rq = task_rq(p); |
| 5831 | double_rq_lock(rq, p_rq); |
| 5832 | while (task_rq(p) != p_rq) { |
| 5833 | double_rq_unlock(rq, p_rq); |
| 5834 | goto again; |
| 5835 | } |
| 5836 | |
| 5837 | if (!curr->sched_class->yield_to_task) |
| 5838 | goto out; |
| 5839 | |
| 5840 | if (curr->sched_class != p->sched_class) |
| 5841 | goto out; |
| 5842 | |
| 5843 | if (task_running(p_rq, p) || p->state) |
| 5844 | goto out; |
| 5845 | |
| 5846 | yielded = curr->sched_class->yield_to_task(rq, p, preempt); |
Venkatesh Pallipadi | 6d1cafd | 2011-03-01 16:28:21 -0800 | [diff] [blame] | 5847 | if (yielded) { |
Mike Galbraith | d95f412 | 2011-02-01 09:50:51 -0500 | [diff] [blame] | 5848 | schedstat_inc(rq, yld_count); |
Venkatesh Pallipadi | 6d1cafd | 2011-03-01 16:28:21 -0800 | [diff] [blame] | 5849 | /* |
| 5850 | * Make p's CPU reschedule; pick_next_entity takes care of |
| 5851 | * fairness. |
| 5852 | */ |
| 5853 | if (preempt && rq != p_rq) |
| 5854 | resched_task(p_rq->curr); |
| 5855 | } |
Mike Galbraith | d95f412 | 2011-02-01 09:50:51 -0500 | [diff] [blame] | 5856 | |
| 5857 | out: |
| 5858 | double_rq_unlock(rq, p_rq); |
| 5859 | local_irq_restore(flags); |
| 5860 | |
| 5861 | if (yielded) |
| 5862 | schedule(); |
| 5863 | |
| 5864 | return yielded; |
| 5865 | } |
| 5866 | EXPORT_SYMBOL_GPL(yield_to); |
| 5867 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5868 | /* |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 5869 | * This task is about to go to sleep on IO. Increment rq->nr_iowait so |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5870 | * that process accounting knows that this is a task in IO wait state. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5871 | */ |
| 5872 | void __sched io_schedule(void) |
| 5873 | { |
Hitoshi Mitake | 54d35f2 | 2009-06-29 14:44:57 +0900 | [diff] [blame] | 5874 | struct rq *rq = raw_rq(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5875 | |
Shailabh Nagar | 0ff9224 | 2006-07-14 00:24:37 -0700 | [diff] [blame] | 5876 | delayacct_blkio_start(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5877 | atomic_inc(&rq->nr_iowait); |
Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 5878 | blk_flush_plug(current); |
Arjan van de Ven | 8f0dfc3 | 2009-07-20 11:26:58 -0700 | [diff] [blame] | 5879 | current->in_iowait = 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5880 | schedule(); |
Arjan van de Ven | 8f0dfc3 | 2009-07-20 11:26:58 -0700 | [diff] [blame] | 5881 | current->in_iowait = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5882 | atomic_dec(&rq->nr_iowait); |
Shailabh Nagar | 0ff9224 | 2006-07-14 00:24:37 -0700 | [diff] [blame] | 5883 | delayacct_blkio_end(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5884 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5885 | EXPORT_SYMBOL(io_schedule); |
| 5886 | |
| 5887 | long __sched io_schedule_timeout(long timeout) |
| 5888 | { |
Hitoshi Mitake | 54d35f2 | 2009-06-29 14:44:57 +0900 | [diff] [blame] | 5889 | struct rq *rq = raw_rq(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5890 | long ret; |
| 5891 | |
Shailabh Nagar | 0ff9224 | 2006-07-14 00:24:37 -0700 | [diff] [blame] | 5892 | delayacct_blkio_start(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5893 | atomic_inc(&rq->nr_iowait); |
Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 5894 | blk_flush_plug(current); |
Arjan van de Ven | 8f0dfc3 | 2009-07-20 11:26:58 -0700 | [diff] [blame] | 5895 | current->in_iowait = 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5896 | ret = schedule_timeout(timeout); |
Arjan van de Ven | 8f0dfc3 | 2009-07-20 11:26:58 -0700 | [diff] [blame] | 5897 | current->in_iowait = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5898 | atomic_dec(&rq->nr_iowait); |
Shailabh Nagar | 0ff9224 | 2006-07-14 00:24:37 -0700 | [diff] [blame] | 5899 | delayacct_blkio_end(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5900 | return ret; |
| 5901 | } |
| 5902 | |
| 5903 | /** |
| 5904 | * sys_sched_get_priority_max - return maximum RT priority. |
| 5905 | * @policy: scheduling class. |
| 5906 | * |
| 5907 | * this syscall returns the maximum rt_priority that can be used |
| 5908 | * by a given scheduling class. |
| 5909 | */ |
Heiko Carstens | 5add95d | 2009-01-14 14:14:08 +0100 | [diff] [blame] | 5910 | SYSCALL_DEFINE1(sched_get_priority_max, int, policy) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5911 | { |
| 5912 | int ret = -EINVAL; |
| 5913 | |
| 5914 | switch (policy) { |
| 5915 | case SCHED_FIFO: |
| 5916 | case SCHED_RR: |
| 5917 | ret = MAX_USER_RT_PRIO-1; |
| 5918 | break; |
| 5919 | case SCHED_NORMAL: |
Ingo Molnar | b0a9499 | 2006-01-14 13:20:41 -0800 | [diff] [blame] | 5920 | case SCHED_BATCH: |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 5921 | case SCHED_IDLE: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5922 | ret = 0; |
| 5923 | break; |
| 5924 | } |
| 5925 | return ret; |
| 5926 | } |
| 5927 | |
| 5928 | /** |
| 5929 | * sys_sched_get_priority_min - return minimum RT priority. |
| 5930 | * @policy: scheduling class. |
| 5931 | * |
| 5932 | * this syscall returns the minimum rt_priority that can be used |
| 5933 | * by a given scheduling class. |
| 5934 | */ |
Heiko Carstens | 5add95d | 2009-01-14 14:14:08 +0100 | [diff] [blame] | 5935 | SYSCALL_DEFINE1(sched_get_priority_min, int, policy) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5936 | { |
| 5937 | int ret = -EINVAL; |
| 5938 | |
| 5939 | switch (policy) { |
| 5940 | case SCHED_FIFO: |
| 5941 | case SCHED_RR: |
| 5942 | ret = 1; |
| 5943 | break; |
| 5944 | case SCHED_NORMAL: |
Ingo Molnar | b0a9499 | 2006-01-14 13:20:41 -0800 | [diff] [blame] | 5945 | case SCHED_BATCH: |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 5946 | case SCHED_IDLE: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5947 | ret = 0; |
| 5948 | } |
| 5949 | return ret; |
| 5950 | } |
| 5951 | |
| 5952 | /** |
| 5953 | * sys_sched_rr_get_interval - return the default timeslice of a process. |
| 5954 | * @pid: pid of the process. |
| 5955 | * @interval: userspace pointer to the timeslice value. |
| 5956 | * |
| 5957 | * this syscall writes the default timeslice value of a given process |
| 5958 | * into the user-space timespec buffer. A value of '0' means infinity. |
| 5959 | */ |
Heiko Carstens | 17da2bd | 2009-01-14 14:14:10 +0100 | [diff] [blame] | 5960 | SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, |
Heiko Carstens | 754fe8d | 2009-01-14 14:14:09 +0100 | [diff] [blame] | 5961 | struct timespec __user *, interval) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5962 | { |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 5963 | struct task_struct *p; |
Dmitry Adamushko | a4ec24b | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 5964 | unsigned int time_slice; |
Thomas Gleixner | dba091b | 2009-12-09 09:32:03 +0100 | [diff] [blame] | 5965 | unsigned long flags; |
| 5966 | struct rq *rq; |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 5967 | int retval; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5968 | struct timespec t; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5969 | |
| 5970 | if (pid < 0) |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 5971 | return -EINVAL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5972 | |
| 5973 | retval = -ESRCH; |
Thomas Gleixner | 1a551ae | 2009-12-09 10:15:11 +0000 | [diff] [blame] | 5974 | rcu_read_lock(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5975 | p = find_process_by_pid(pid); |
| 5976 | if (!p) |
| 5977 | goto out_unlock; |
| 5978 | |
| 5979 | retval = security_task_getscheduler(p); |
| 5980 | if (retval) |
| 5981 | goto out_unlock; |
| 5982 | |
Thomas Gleixner | dba091b | 2009-12-09 09:32:03 +0100 | [diff] [blame] | 5983 | rq = task_rq_lock(p, &flags); |
| 5984 | time_slice = p->sched_class->get_rr_interval(rq, p); |
Peter Zijlstra | 0122ec5 | 2011-04-05 17:23:51 +0200 | [diff] [blame] | 5985 | task_rq_unlock(rq, p, &flags); |
Dmitry Adamushko | a4ec24b | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 5986 | |
Thomas Gleixner | 1a551ae | 2009-12-09 10:15:11 +0000 | [diff] [blame] | 5987 | rcu_read_unlock(); |
Dmitry Adamushko | a4ec24b | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 5988 | jiffies_to_timespec(time_slice, &t); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5989 | retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5990 | return retval; |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 5991 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5992 | out_unlock: |
Thomas Gleixner | 1a551ae | 2009-12-09 10:15:11 +0000 | [diff] [blame] | 5993 | rcu_read_unlock(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5994 | return retval; |
| 5995 | } |
| 5996 | |
Steven Rostedt | 7c731e0 | 2008-05-12 21:20:41 +0200 | [diff] [blame] | 5997 | static const char stat_nam[] = TASK_STATE_TO_CHAR_STR; |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 5998 | |
Ingo Molnar | 82a1fcb | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 5999 | void sched_show_task(struct task_struct *p) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6000 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6001 | unsigned long free = 0; |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 6002 | unsigned state; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6003 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6004 | state = p->state ? __ffs(p->state) + 1 : 0; |
Erik Gilling | 28d0686 | 2010-11-19 18:08:51 -0800 | [diff] [blame] | 6005 | printk(KERN_INFO "%-15.15s %c", p->comm, |
Andreas Mohr | 2ed6e34 | 2006-07-10 04:43:52 -0700 | [diff] [blame] | 6006 | state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?'); |
Ingo Molnar | 4bd7732 | 2007-07-11 21:21:47 +0200 | [diff] [blame] | 6007 | #if BITS_PER_LONG == 32 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6008 | if (state == TASK_RUNNING) |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 6009 | printk(KERN_CONT " running "); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6010 | else |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 6011 | printk(KERN_CONT " %08lx ", thread_saved_pc(p)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6012 | #else |
| 6013 | if (state == TASK_RUNNING) |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 6014 | printk(KERN_CONT " running task "); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6015 | else |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 6016 | printk(KERN_CONT " %016lx ", thread_saved_pc(p)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6017 | #endif |
| 6018 | #ifdef CONFIG_DEBUG_STACK_USAGE |
Eric Sandeen | 7c9f886 | 2008-04-22 16:38:23 -0500 | [diff] [blame] | 6019 | free = stack_not_used(p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6020 | #endif |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 6021 | printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free, |
David Rientjes | aa47b7e | 2009-05-04 01:38:05 -0700 | [diff] [blame] | 6022 | task_pid_nr(p), task_pid_nr(p->real_parent), |
| 6023 | (unsigned long)task_thread_info(p)->flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6024 | |
Nick Piggin | 5fb5e6d | 2008-01-25 21:08:34 +0100 | [diff] [blame] | 6025 | show_stack(p, NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6026 | } |
| 6027 | |
Ingo Molnar | e59e2ae | 2006-12-06 20:35:59 -0800 | [diff] [blame] | 6028 | void show_state_filter(unsigned long state_filter) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6029 | { |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 6030 | struct task_struct *g, *p; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6031 | |
Ingo Molnar | 4bd7732 | 2007-07-11 21:21:47 +0200 | [diff] [blame] | 6032 | #if BITS_PER_LONG == 32 |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 6033 | printk(KERN_INFO |
| 6034 | " task PC stack pid father\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6035 | #else |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 6036 | printk(KERN_INFO |
| 6037 | " task PC stack pid father\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6038 | #endif |
Thomas Gleixner | 510f5ac | 2011-07-17 20:47:54 +0200 | [diff] [blame] | 6039 | rcu_read_lock(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6040 | do_each_thread(g, p) { |
| 6041 | /* |
| 6042 | * reset the NMI-timeout, listing all files on a slow |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 6043 | * console might take a lot of time: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6044 | */ |
| 6045 | touch_nmi_watchdog(); |
Ingo Molnar | 39bc89f | 2007-04-25 20:50:03 -0700 | [diff] [blame] | 6046 | if (!state_filter || (p->state & state_filter)) |
Ingo Molnar | 82a1fcb | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 6047 | sched_show_task(p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6048 | } while_each_thread(g, p); |
| 6049 | |
Jeremy Fitzhardinge | 04c9167 | 2007-05-08 00:28:05 -0700 | [diff] [blame] | 6050 | touch_all_softlockup_watchdogs(); |
| 6051 | |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 6052 | #ifdef CONFIG_SCHED_DEBUG |
| 6053 | sysrq_sched_debug_show(); |
| 6054 | #endif |
Thomas Gleixner | 510f5ac | 2011-07-17 20:47:54 +0200 | [diff] [blame] | 6055 | rcu_read_unlock(); |
Ingo Molnar | e59e2ae | 2006-12-06 20:35:59 -0800 | [diff] [blame] | 6056 | /* |
| 6057 | * Only show locks if all tasks are dumped: |
| 6058 | */ |
Shmulik Ladkani | 93335a2 | 2009-11-25 15:23:41 +0200 | [diff] [blame] | 6059 | if (!state_filter) |
Ingo Molnar | e59e2ae | 2006-12-06 20:35:59 -0800 | [diff] [blame] | 6060 | debug_show_all_locks(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6061 | } |
| 6062 | |
Ingo Molnar | 1df2105 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 6063 | void __cpuinit init_idle_bootup_task(struct task_struct *idle) |
| 6064 | { |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 6065 | idle->sched_class = &idle_sched_class; |
Ingo Molnar | 1df2105 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 6066 | } |
| 6067 | |
Ingo Molnar | f340c0d | 2005-06-28 16:40:42 +0200 | [diff] [blame] | 6068 | /** |
| 6069 | * init_idle - set up an idle thread for a given CPU |
| 6070 | * @idle: task in question |
| 6071 | * @cpu: cpu the idle task belongs to |
| 6072 | * |
| 6073 | * NOTE: this function does not set the idle thread's NEED_RESCHED |
| 6074 | * flag, to make booting more robust. |
| 6075 | */ |
Nick Piggin | 5c1e176 | 2006-10-03 01:14:04 -0700 | [diff] [blame] | 6076 | void __cpuinit init_idle(struct task_struct *idle, int cpu) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6077 | { |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 6078 | struct rq *rq = cpu_rq(cpu); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6079 | unsigned long flags; |
| 6080 | |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 6081 | raw_spin_lock_irqsave(&rq->lock, flags); |
Ingo Molnar | 5cbd54e | 2008-11-12 20:05:50 +0100 | [diff] [blame] | 6082 | |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 6083 | __sched_fork(idle); |
Peter Zijlstra | 06b83b5 | 2009-12-16 18:04:35 +0100 | [diff] [blame] | 6084 | idle->state = TASK_RUNNING; |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 6085 | idle->se.exec_start = sched_clock(); |
| 6086 | |
KOSAKI Motohiro | 1e1b6c5 | 2011-05-19 15:08:58 +0900 | [diff] [blame] | 6087 | do_set_cpus_allowed(idle, cpumask_of(cpu)); |
Peter Zijlstra | 6506cf6c | 2010-09-16 17:50:31 +0200 | [diff] [blame] | 6088 | /* |
| 6089 | * We're having a chicken and egg problem, even though we are |
| 6090 | * holding rq->lock, the cpu isn't yet set to this cpu so the |
| 6091 | * lockdep check in task_group() will fail. |
| 6092 | * |
| 6093 | * Similar case to sched_fork(). / Alternatively we could |
| 6094 | * use task_rq_lock() here and obtain the other rq->lock. |
| 6095 | * |
| 6096 | * Silence PROVE_RCU |
| 6097 | */ |
| 6098 | rcu_read_lock(); |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 6099 | __set_task_cpu(idle, cpu); |
Peter Zijlstra | 6506cf6c | 2010-09-16 17:50:31 +0200 | [diff] [blame] | 6100 | rcu_read_unlock(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6101 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6102 | rq->curr = rq->idle = idle; |
Peter Zijlstra | 3ca7a44 | 2011-04-05 17:23:40 +0200 | [diff] [blame] | 6103 | #if defined(CONFIG_SMP) |
| 6104 | idle->on_cpu = 1; |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 6105 | #endif |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 6106 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6107 | |
| 6108 | /* Set the preempt count _outside_ the spinlocks! */ |
Al Viro | a1261f5 | 2005-11-13 16:06:55 -0800 | [diff] [blame] | 6109 | task_thread_info(idle)->preempt_count = 0; |
Jonathan Corbet | 625f2a3 | 2011-04-22 11:19:10 -0600 | [diff] [blame] | 6110 | |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 6111 | /* |
| 6112 | * The idle tasks have their own, simple scheduling class: |
| 6113 | */ |
| 6114 | idle->sched_class = &idle_sched_class; |
Steven Rostedt | 868baf0 | 2011-02-10 21:26:13 -0500 | [diff] [blame] | 6115 | ftrace_graph_init_idle_task(idle, cpu); |
Carsten Emde | f1c6f1a | 2011-10-26 23:14:16 +0200 | [diff] [blame] | 6116 | #if defined(CONFIG_SMP) |
| 6117 | sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu); |
| 6118 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6119 | } |
| 6120 | |
| 6121 | /* |
Ingo Molnar | 19978ca | 2007-11-09 22:39:38 +0100 | [diff] [blame] | 6122 | * Increase the granularity value when there are more CPUs, |
| 6123 | * because with more CPUs the 'effective latency' as visible |
| 6124 | * to users decreases. But the relationship is not linear, |
| 6125 | * so pick a second-best guess by going with the log2 of the |
| 6126 | * number of CPUs. |
| 6127 | * |
| 6128 | * This idea comes from the SD scheduler of Con Kolivas: |
| 6129 | */ |
Christian Ehrhardt | acb4a84 | 2009-11-30 12:16:48 +0100 | [diff] [blame] | 6130 | static int get_update_sysctl_factor(void) |
Christian Ehrhardt | 0bcdcf2 | 2009-11-30 12:16:46 +0100 | [diff] [blame] | 6131 | { |
Mike Galbraith | 4ca3ef7 | 2009-12-10 09:25:53 +0100 | [diff] [blame] | 6132 | unsigned int cpus = min_t(int, num_online_cpus(), 8); |
Christian Ehrhardt | 1983a92 | 2009-11-30 12:16:47 +0100 | [diff] [blame] | 6133 | unsigned int factor; |
| 6134 | |
| 6135 | switch (sysctl_sched_tunable_scaling) { |
| 6136 | case SCHED_TUNABLESCALING_NONE: |
| 6137 | factor = 1; |
| 6138 | break; |
| 6139 | case SCHED_TUNABLESCALING_LINEAR: |
| 6140 | factor = cpus; |
| 6141 | break; |
| 6142 | case SCHED_TUNABLESCALING_LOG: |
| 6143 | default: |
| 6144 | factor = 1 + ilog2(cpus); |
| 6145 | break; |
| 6146 | } |
Christian Ehrhardt | 0bcdcf2 | 2009-11-30 12:16:46 +0100 | [diff] [blame] | 6147 | |
Christian Ehrhardt | acb4a84 | 2009-11-30 12:16:48 +0100 | [diff] [blame] | 6148 | return factor; |
| 6149 | } |
| 6150 | |
| 6151 | static void update_sysctl(void) |
| 6152 | { |
| 6153 | unsigned int factor = get_update_sysctl_factor(); |
| 6154 | |
Christian Ehrhardt | 0bcdcf2 | 2009-11-30 12:16:46 +0100 | [diff] [blame] | 6155 | #define SET_SYSCTL(name) \ |
| 6156 | (sysctl_##name = (factor) * normalized_sysctl_##name) |
| 6157 | SET_SYSCTL(sched_min_granularity); |
| 6158 | SET_SYSCTL(sched_latency); |
| 6159 | SET_SYSCTL(sched_wakeup_granularity); |
Christian Ehrhardt | 0bcdcf2 | 2009-11-30 12:16:46 +0100 | [diff] [blame] | 6160 | #undef SET_SYSCTL |
| 6161 | } |
| 6162 | |
Ingo Molnar | 19978ca | 2007-11-09 22:39:38 +0100 | [diff] [blame] | 6163 | static inline void sched_init_granularity(void) |
| 6164 | { |
Christian Ehrhardt | 0bcdcf2 | 2009-11-30 12:16:46 +0100 | [diff] [blame] | 6165 | update_sysctl(); |
Ingo Molnar | 19978ca | 2007-11-09 22:39:38 +0100 | [diff] [blame] | 6166 | } |
| 6167 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6168 | #ifdef CONFIG_SMP |
KOSAKI Motohiro | 1e1b6c5 | 2011-05-19 15:08:58 +0900 | [diff] [blame] | 6169 | void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) |
| 6170 | { |
| 6171 | if (p->sched_class && p->sched_class->set_cpus_allowed) |
| 6172 | p->sched_class->set_cpus_allowed(p, new_mask); |
Peter Zijlstra | 4939602 | 2011-06-25 15:45:46 +0200 | [diff] [blame] | 6173 | |
| 6174 | cpumask_copy(&p->cpus_allowed, new_mask); |
| 6175 | p->rt.nr_cpus_allowed = cpumask_weight(new_mask); |
KOSAKI Motohiro | 1e1b6c5 | 2011-05-19 15:08:58 +0900 | [diff] [blame] | 6176 | } |
| 6177 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6178 | /* |
| 6179 | * This is how migration works: |
| 6180 | * |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 6181 | * 1) we invoke migration_cpu_stop() on the target CPU using |
| 6182 | * stop_one_cpu(). |
| 6183 | * 2) stopper starts to run (implicitly forcing the migrated thread |
| 6184 | * off the CPU) |
| 6185 | * 3) it checks whether the migrated task is still in the wrong runqueue. |
| 6186 | * 4) if it's in the wrong runqueue then the migration thread removes |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6187 | * it and puts it into the right queue. |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 6188 | * 5) stopper completes and stop_one_cpu() returns and the migration |
| 6189 | * is done. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6190 | */ |
| 6191 | |
| 6192 | /* |
| 6193 | * Change a given task's CPU affinity. Migrate the thread to a |
| 6194 | * proper CPU and schedule it away if the CPU it's executing on |
| 6195 | * is removed from the allowed bitmask. |
| 6196 | * |
| 6197 | * NOTE: the caller must have a valid reference to the task, the |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 6198 | * task must not exit() & deallocate itself prematurely. The |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6199 | * call is not atomic; no spinlocks may be held. |
| 6200 | */ |
Rusty Russell | 96f874e | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 6201 | int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6202 | { |
| 6203 | unsigned long flags; |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 6204 | struct rq *rq; |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 6205 | unsigned int dest_cpu; |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 6206 | int ret = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6207 | |
| 6208 | rq = task_rq_lock(p, &flags); |
Peter Zijlstra | e291200 | 2009-12-16 18:04:36 +0100 | [diff] [blame] | 6209 | |
Yong Zhang | db44fc0 | 2011-05-09 22:07:05 +0800 | [diff] [blame] | 6210 | if (cpumask_equal(&p->cpus_allowed, new_mask)) |
| 6211 | goto out; |
| 6212 | |
Peter Zijlstra | 6ad4c18 | 2009-11-25 13:31:39 +0100 | [diff] [blame] | 6213 | if (!cpumask_intersects(new_mask, cpu_active_mask)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6214 | ret = -EINVAL; |
| 6215 | goto out; |
| 6216 | } |
| 6217 | |
Yong Zhang | db44fc0 | 2011-05-09 22:07:05 +0800 | [diff] [blame] | 6218 | if (unlikely((p->flags & PF_THREAD_BOUND) && p != current)) { |
David Rientjes | 9985b0b | 2008-06-05 12:57:11 -0700 | [diff] [blame] | 6219 | ret = -EINVAL; |
| 6220 | goto out; |
| 6221 | } |
| 6222 | |
KOSAKI Motohiro | 1e1b6c5 | 2011-05-19 15:08:58 +0900 | [diff] [blame] | 6223 | do_set_cpus_allowed(p, new_mask); |
Gregory Haskins | 73fe6aa | 2008-01-25 21:08:07 +0100 | [diff] [blame] | 6224 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6225 | /* Can the task run on the task's current CPU? If so, we're done */ |
Rusty Russell | 96f874e | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 6226 | if (cpumask_test_cpu(task_cpu(p), new_mask)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6227 | goto out; |
| 6228 | |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 6229 | dest_cpu = cpumask_any_and(cpu_active_mask, new_mask); |
Peter Zijlstra | bd8e7dd | 2011-04-05 17:23:59 +0200 | [diff] [blame] | 6230 | if (p->on_rq) { |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 6231 | struct migration_arg arg = { p, dest_cpu }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6232 | /* Need help from migration thread: drop lock and wait. */ |
Peter Zijlstra | 0122ec5 | 2011-04-05 17:23:51 +0200 | [diff] [blame] | 6233 | task_rq_unlock(rq, p, &flags); |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 6234 | stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6235 | tlb_migrate_finish(p->mm); |
| 6236 | return 0; |
| 6237 | } |
| 6238 | out: |
Peter Zijlstra | 0122ec5 | 2011-04-05 17:23:51 +0200 | [diff] [blame] | 6239 | task_rq_unlock(rq, p, &flags); |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 6240 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6241 | return ret; |
| 6242 | } |
Mike Travis | cd8ba7c | 2008-03-26 14:23:49 -0700 | [diff] [blame] | 6243 | EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6244 | |
| 6245 | /* |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 6246 | * Move (not current) task off this cpu, onto dest cpu. We're doing |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6247 | * this because either it can't run here any more (set_cpus_allowed() |
| 6248 | * away from this CPU, or CPU going down), or because we're |
| 6249 | * attempting to rebalance this task on exec (sched_exec). |
| 6250 | * |
| 6251 | * So we race with normal scheduler movements, but that's OK, as long |
| 6252 | * as the task is no longer on this CPU. |
Kirill Korotaev | efc3081 | 2006-06-27 02:54:32 -0700 | [diff] [blame] | 6253 | * |
| 6254 | * Returns non-zero if task was successfully migrated. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6255 | */ |
Kirill Korotaev | efc3081 | 2006-06-27 02:54:32 -0700 | [diff] [blame] | 6256 | static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6257 | { |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 6258 | struct rq *rq_dest, *rq_src; |
Peter Zijlstra | e291200 | 2009-12-16 18:04:36 +0100 | [diff] [blame] | 6259 | int ret = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6260 | |
Max Krasnyansky | e761b77 | 2008-07-15 04:43:49 -0700 | [diff] [blame] | 6261 | if (unlikely(!cpu_active(dest_cpu))) |
Kirill Korotaev | efc3081 | 2006-06-27 02:54:32 -0700 | [diff] [blame] | 6262 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6263 | |
| 6264 | rq_src = cpu_rq(src_cpu); |
| 6265 | rq_dest = cpu_rq(dest_cpu); |
| 6266 | |
Peter Zijlstra | 0122ec5 | 2011-04-05 17:23:51 +0200 | [diff] [blame] | 6267 | raw_spin_lock(&p->pi_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6268 | double_rq_lock(rq_src, rq_dest); |
| 6269 | /* Already moved. */ |
| 6270 | if (task_cpu(p) != src_cpu) |
Linus Torvalds | b1e3873 | 2008-07-10 11:25:03 -0700 | [diff] [blame] | 6271 | goto done; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6272 | /* Affinity changed (again). */ |
Peter Zijlstra | fa17b50 | 2011-06-16 12:23:22 +0200 | [diff] [blame] | 6273 | if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p))) |
Linus Torvalds | b1e3873 | 2008-07-10 11:25:03 -0700 | [diff] [blame] | 6274 | goto fail; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6275 | |
Peter Zijlstra | e291200 | 2009-12-16 18:04:36 +0100 | [diff] [blame] | 6276 | /* |
| 6277 | * If we're not on a rq, the next wake-up will ensure we're |
| 6278 | * placed properly. |
| 6279 | */ |
Peter Zijlstra | fd2f441 | 2011-04-05 17:23:44 +0200 | [diff] [blame] | 6280 | if (p->on_rq) { |
Ingo Molnar | 2e1cb74 | 2007-08-09 11:16:49 +0200 | [diff] [blame] | 6281 | deactivate_task(rq_src, p, 0); |
Peter Zijlstra | e291200 | 2009-12-16 18:04:36 +0100 | [diff] [blame] | 6282 | set_task_cpu(p, dest_cpu); |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 6283 | activate_task(rq_dest, p, 0); |
Peter Zijlstra | 15afe09 | 2008-09-20 23:38:02 +0200 | [diff] [blame] | 6284 | check_preempt_curr(rq_dest, p, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6285 | } |
Linus Torvalds | b1e3873 | 2008-07-10 11:25:03 -0700 | [diff] [blame] | 6286 | done: |
Kirill Korotaev | efc3081 | 2006-06-27 02:54:32 -0700 | [diff] [blame] | 6287 | ret = 1; |
Linus Torvalds | b1e3873 | 2008-07-10 11:25:03 -0700 | [diff] [blame] | 6288 | fail: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6289 | double_rq_unlock(rq_src, rq_dest); |
Peter Zijlstra | 0122ec5 | 2011-04-05 17:23:51 +0200 | [diff] [blame] | 6290 | raw_spin_unlock(&p->pi_lock); |
Kirill Korotaev | efc3081 | 2006-06-27 02:54:32 -0700 | [diff] [blame] | 6291 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6292 | } |
| 6293 | |
| 6294 | /* |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 6295 | * migration_cpu_stop - this will be executed by a highprio stopper thread |
| 6296 | * and performs thread migration by bumping thread off CPU then |
| 6297 | * 'pushing' onto another runqueue. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6298 | */ |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 6299 | static int migration_cpu_stop(void *data) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6300 | { |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 6301 | struct migration_arg *arg = data; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6302 | |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 6303 | /* |
| 6304 | * The original target cpu might have gone down and we might |
| 6305 | * be on another cpu but it doesn't matter. |
| 6306 | */ |
| 6307 | local_irq_disable(); |
| 6308 | __migrate_task(arg->task, raw_smp_processor_id(), arg->dest_cpu); |
| 6309 | local_irq_enable(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6310 | return 0; |
| 6311 | } |
| 6312 | |
| 6313 | #ifdef CONFIG_HOTPLUG_CPU |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6314 | |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 6315 | /* |
| 6316 | * Ensures that the idle task is using init_mm right before its cpu goes |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6317 | * offline. |
| 6318 | */ |
| 6319 | void idle_task_exit(void) |
| 6320 | { |
| 6321 | struct mm_struct *mm = current->active_mm; |
| 6322 | |
| 6323 | BUG_ON(cpu_online(smp_processor_id())); |
| 6324 | |
| 6325 | if (mm != &init_mm) |
| 6326 | switch_mm(mm, &init_mm, current); |
| 6327 | mmdrop(mm); |
| 6328 | } |
| 6329 | |
Peter Zijlstra | 48c5cca | 2010-11-13 19:32:29 +0100 | [diff] [blame] | 6330 | /* |
| 6331 | * While a dead CPU has no uninterruptible tasks queued at this point, |
| 6332 | * it might still have a nonzero ->nr_uninterruptible counter, because |
| 6333 | * for performance reasons the counter is not stricly tracking tasks to |
| 6334 | * their home CPUs. So we just add the counter to another CPU's counter, |
| 6335 | * to keep the global sum constant after CPU-down: |
| 6336 | */ |
| 6337 | static void migrate_nr_uninterruptible(struct rq *rq_src) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6338 | { |
Peter Zijlstra | 48c5cca | 2010-11-13 19:32:29 +0100 | [diff] [blame] | 6339 | struct rq *rq_dest = cpu_rq(cpumask_any(cpu_active_mask)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6340 | |
Peter Zijlstra | 48c5cca | 2010-11-13 19:32:29 +0100 | [diff] [blame] | 6341 | rq_dest->nr_uninterruptible += rq_src->nr_uninterruptible; |
| 6342 | rq_src->nr_uninterruptible = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6343 | } |
Thomas Gleixner | dce48a8 | 2009-04-11 10:43:41 +0200 | [diff] [blame] | 6344 | |
| 6345 | /* |
| 6346 | * remove the tasks which were accounted by rq from calc_load_tasks. |
| 6347 | */ |
| 6348 | static void calc_global_load_remove(struct rq *rq) |
| 6349 | { |
| 6350 | atomic_long_sub(rq->calc_load_active, &calc_load_tasks); |
Thomas Gleixner | a468d38 | 2009-07-17 14:15:46 +0200 | [diff] [blame] | 6351 | rq->calc_load_active = 0; |
Thomas Gleixner | dce48a8 | 2009-04-11 10:43:41 +0200 | [diff] [blame] | 6352 | } |
Peter Zijlstra | 48c5cca | 2010-11-13 19:32:29 +0100 | [diff] [blame] | 6353 | |
Paul Turner | 8cb120d | 2011-07-21 09:43:38 -0700 | [diff] [blame] | 6354 | #ifdef CONFIG_CFS_BANDWIDTH |
| 6355 | static void unthrottle_offline_cfs_rqs(struct rq *rq) |
| 6356 | { |
| 6357 | struct cfs_rq *cfs_rq; |
| 6358 | |
| 6359 | for_each_leaf_cfs_rq(rq, cfs_rq) { |
| 6360 | struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); |
| 6361 | |
| 6362 | if (!cfs_rq->runtime_enabled) |
| 6363 | continue; |
| 6364 | |
| 6365 | /* |
| 6366 | * clock_task is not advancing so we just need to make sure |
| 6367 | * there's some valid quota amount |
| 6368 | */ |
| 6369 | cfs_rq->runtime_remaining = cfs_b->quota; |
| 6370 | if (cfs_rq_throttled(cfs_rq)) |
| 6371 | unthrottle_cfs_rq(cfs_rq); |
| 6372 | } |
| 6373 | } |
| 6374 | #else |
| 6375 | static void unthrottle_offline_cfs_rqs(struct rq *rq) {} |
| 6376 | #endif |
| 6377 | |
Peter Zijlstra | 48c5cca | 2010-11-13 19:32:29 +0100 | [diff] [blame] | 6378 | /* |
| 6379 | * Migrate all tasks from the rq, sleeping tasks will be migrated by |
| 6380 | * try_to_wake_up()->select_task_rq(). |
| 6381 | * |
| 6382 | * Called with rq->lock held even though we'er in stop_machine() and |
| 6383 | * there's no concurrency possible, we hold the required locks anyway |
| 6384 | * because of lock validation efforts. |
| 6385 | */ |
| 6386 | static void migrate_tasks(unsigned int dead_cpu) |
| 6387 | { |
| 6388 | struct rq *rq = cpu_rq(dead_cpu); |
| 6389 | struct task_struct *next, *stop = rq->stop; |
| 6390 | int dest_cpu; |
| 6391 | |
| 6392 | /* |
| 6393 | * Fudge the rq selection such that the below task selection loop |
| 6394 | * doesn't get stuck on the currently eligible stop task. |
| 6395 | * |
| 6396 | * We're currently inside stop_machine() and the rq is either stuck |
| 6397 | * in the stop_machine_cpu_stop() loop, or we're executing this code, |
| 6398 | * either way we should never end up calling schedule() until we're |
| 6399 | * done here. |
| 6400 | */ |
| 6401 | rq->stop = NULL; |
| 6402 | |
Paul Turner | 8cb120d | 2011-07-21 09:43:38 -0700 | [diff] [blame] | 6403 | /* Ensure any throttled groups are reachable by pick_next_task */ |
| 6404 | unthrottle_offline_cfs_rqs(rq); |
| 6405 | |
Peter Zijlstra | 48c5cca | 2010-11-13 19:32:29 +0100 | [diff] [blame] | 6406 | for ( ; ; ) { |
| 6407 | /* |
| 6408 | * There's this thread running, bail when that's the only |
| 6409 | * remaining thread. |
| 6410 | */ |
| 6411 | if (rq->nr_running == 1) |
| 6412 | break; |
| 6413 | |
| 6414 | next = pick_next_task(rq); |
| 6415 | BUG_ON(!next); |
| 6416 | next->sched_class->put_prev_task(rq, next); |
| 6417 | |
| 6418 | /* Find suitable destination for @next, with force if needed. */ |
| 6419 | dest_cpu = select_fallback_rq(dead_cpu, next); |
| 6420 | raw_spin_unlock(&rq->lock); |
| 6421 | |
| 6422 | __migrate_task(next, dead_cpu, dest_cpu); |
| 6423 | |
| 6424 | raw_spin_lock(&rq->lock); |
| 6425 | } |
| 6426 | |
| 6427 | rq->stop = stop; |
| 6428 | } |
| 6429 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6430 | #endif /* CONFIG_HOTPLUG_CPU */ |
| 6431 | |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 6432 | #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL) |
| 6433 | |
| 6434 | static struct ctl_table sd_ctl_dir[] = { |
Alexey Dobriyan | e036185 | 2007-08-09 11:16:46 +0200 | [diff] [blame] | 6435 | { |
| 6436 | .procname = "sched_domain", |
Eric W. Biederman | c57baf1 | 2007-08-23 15:18:02 +0200 | [diff] [blame] | 6437 | .mode = 0555, |
Alexey Dobriyan | e036185 | 2007-08-09 11:16:46 +0200 | [diff] [blame] | 6438 | }, |
Eric W. Biederman | 5699230 | 2009-11-05 15:38:40 -0800 | [diff] [blame] | 6439 | {} |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 6440 | }; |
| 6441 | |
| 6442 | static struct ctl_table sd_ctl_root[] = { |
Alexey Dobriyan | e036185 | 2007-08-09 11:16:46 +0200 | [diff] [blame] | 6443 | { |
| 6444 | .procname = "kernel", |
Eric W. Biederman | c57baf1 | 2007-08-23 15:18:02 +0200 | [diff] [blame] | 6445 | .mode = 0555, |
Alexey Dobriyan | e036185 | 2007-08-09 11:16:46 +0200 | [diff] [blame] | 6446 | .child = sd_ctl_dir, |
| 6447 | }, |
Eric W. Biederman | 5699230 | 2009-11-05 15:38:40 -0800 | [diff] [blame] | 6448 | {} |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 6449 | }; |
| 6450 | |
| 6451 | static struct ctl_table *sd_alloc_ctl_entry(int n) |
| 6452 | { |
| 6453 | struct ctl_table *entry = |
Milton Miller | 5cf9f06 | 2007-10-15 17:00:19 +0200 | [diff] [blame] | 6454 | kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL); |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 6455 | |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 6456 | return entry; |
| 6457 | } |
| 6458 | |
Milton Miller | 6382bc9 | 2007-10-15 17:00:19 +0200 | [diff] [blame] | 6459 | static void sd_free_ctl_entry(struct ctl_table **tablep) |
| 6460 | { |
Milton Miller | cd790076 | 2007-10-17 16:55:11 +0200 | [diff] [blame] | 6461 | struct ctl_table *entry; |
Milton Miller | 6382bc9 | 2007-10-15 17:00:19 +0200 | [diff] [blame] | 6462 | |
Milton Miller | cd790076 | 2007-10-17 16:55:11 +0200 | [diff] [blame] | 6463 | /* |
| 6464 | * In the intermediate directories, both the child directory and |
| 6465 | * procname are dynamically allocated and could fail but the mode |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 6466 | * will always be set. In the lowest directory the names are |
Milton Miller | cd790076 | 2007-10-17 16:55:11 +0200 | [diff] [blame] | 6467 | * static strings and all have proc handlers. |
| 6468 | */ |
| 6469 | for (entry = *tablep; entry->mode; entry++) { |
Milton Miller | 6382bc9 | 2007-10-15 17:00:19 +0200 | [diff] [blame] | 6470 | if (entry->child) |
| 6471 | sd_free_ctl_entry(&entry->child); |
Milton Miller | cd790076 | 2007-10-17 16:55:11 +0200 | [diff] [blame] | 6472 | if (entry->proc_handler == NULL) |
| 6473 | kfree(entry->procname); |
| 6474 | } |
Milton Miller | 6382bc9 | 2007-10-15 17:00:19 +0200 | [diff] [blame] | 6475 | |
| 6476 | kfree(*tablep); |
| 6477 | *tablep = NULL; |
| 6478 | } |
| 6479 | |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 6480 | static void |
Alexey Dobriyan | e036185 | 2007-08-09 11:16:46 +0200 | [diff] [blame] | 6481 | set_table_entry(struct ctl_table *entry, |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 6482 | const char *procname, void *data, int maxlen, |
| 6483 | mode_t mode, proc_handler *proc_handler) |
| 6484 | { |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 6485 | entry->procname = procname; |
| 6486 | entry->data = data; |
| 6487 | entry->maxlen = maxlen; |
| 6488 | entry->mode = mode; |
| 6489 | entry->proc_handler = proc_handler; |
| 6490 | } |
| 6491 | |
| 6492 | static struct ctl_table * |
| 6493 | sd_alloc_ctl_domain_table(struct sched_domain *sd) |
| 6494 | { |
Ingo Molnar | a5d8c34 | 2008-10-09 11:35:51 +0200 | [diff] [blame] | 6495 | struct ctl_table *table = sd_alloc_ctl_entry(13); |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 6496 | |
Milton Miller | ad1cdc1 | 2007-10-15 17:00:19 +0200 | [diff] [blame] | 6497 | if (table == NULL) |
| 6498 | return NULL; |
| 6499 | |
Alexey Dobriyan | e036185 | 2007-08-09 11:16:46 +0200 | [diff] [blame] | 6500 | set_table_entry(&table[0], "min_interval", &sd->min_interval, |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 6501 | sizeof(long), 0644, proc_doulongvec_minmax); |
Alexey Dobriyan | e036185 | 2007-08-09 11:16:46 +0200 | [diff] [blame] | 6502 | set_table_entry(&table[1], "max_interval", &sd->max_interval, |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 6503 | sizeof(long), 0644, proc_doulongvec_minmax); |
Alexey Dobriyan | e036185 | 2007-08-09 11:16:46 +0200 | [diff] [blame] | 6504 | set_table_entry(&table[2], "busy_idx", &sd->busy_idx, |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 6505 | sizeof(int), 0644, proc_dointvec_minmax); |
Alexey Dobriyan | e036185 | 2007-08-09 11:16:46 +0200 | [diff] [blame] | 6506 | set_table_entry(&table[3], "idle_idx", &sd->idle_idx, |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 6507 | sizeof(int), 0644, proc_dointvec_minmax); |
Alexey Dobriyan | e036185 | 2007-08-09 11:16:46 +0200 | [diff] [blame] | 6508 | set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx, |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 6509 | sizeof(int), 0644, proc_dointvec_minmax); |
Alexey Dobriyan | e036185 | 2007-08-09 11:16:46 +0200 | [diff] [blame] | 6510 | set_table_entry(&table[5], "wake_idx", &sd->wake_idx, |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 6511 | sizeof(int), 0644, proc_dointvec_minmax); |
Alexey Dobriyan | e036185 | 2007-08-09 11:16:46 +0200 | [diff] [blame] | 6512 | set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx, |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 6513 | sizeof(int), 0644, proc_dointvec_minmax); |
Alexey Dobriyan | e036185 | 2007-08-09 11:16:46 +0200 | [diff] [blame] | 6514 | set_table_entry(&table[7], "busy_factor", &sd->busy_factor, |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 6515 | sizeof(int), 0644, proc_dointvec_minmax); |
Alexey Dobriyan | e036185 | 2007-08-09 11:16:46 +0200 | [diff] [blame] | 6516 | set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct, |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 6517 | sizeof(int), 0644, proc_dointvec_minmax); |
Zou Nan hai | ace8b3d | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 6518 | set_table_entry(&table[9], "cache_nice_tries", |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 6519 | &sd->cache_nice_tries, |
| 6520 | sizeof(int), 0644, proc_dointvec_minmax); |
Zou Nan hai | ace8b3d | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 6521 | set_table_entry(&table[10], "flags", &sd->flags, |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 6522 | sizeof(int), 0644, proc_dointvec_minmax); |
Ingo Molnar | a5d8c34 | 2008-10-09 11:35:51 +0200 | [diff] [blame] | 6523 | set_table_entry(&table[11], "name", sd->name, |
| 6524 | CORENAME_MAX_SIZE, 0444, proc_dostring); |
| 6525 | /* &table[12] is terminator */ |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 6526 | |
| 6527 | return table; |
| 6528 | } |
| 6529 | |
Ingo Molnar | 9a4e715 | 2007-11-28 15:52:56 +0100 | [diff] [blame] | 6530 | static ctl_table *sd_alloc_ctl_cpu_table(int cpu) |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 6531 | { |
| 6532 | struct ctl_table *entry, *table; |
| 6533 | struct sched_domain *sd; |
| 6534 | int domain_num = 0, i; |
| 6535 | char buf[32]; |
| 6536 | |
| 6537 | for_each_domain(cpu, sd) |
| 6538 | domain_num++; |
| 6539 | entry = table = sd_alloc_ctl_entry(domain_num + 1); |
Milton Miller | ad1cdc1 | 2007-10-15 17:00:19 +0200 | [diff] [blame] | 6540 | if (table == NULL) |
| 6541 | return NULL; |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 6542 | |
| 6543 | i = 0; |
| 6544 | for_each_domain(cpu, sd) { |
| 6545 | snprintf(buf, 32, "domain%d", i); |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 6546 | entry->procname = kstrdup(buf, GFP_KERNEL); |
Eric W. Biederman | c57baf1 | 2007-08-23 15:18:02 +0200 | [diff] [blame] | 6547 | entry->mode = 0555; |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 6548 | entry->child = sd_alloc_ctl_domain_table(sd); |
| 6549 | entry++; |
| 6550 | i++; |
| 6551 | } |
| 6552 | return table; |
| 6553 | } |
| 6554 | |
| 6555 | static struct ctl_table_header *sd_sysctl_header; |
Milton Miller | 6382bc9 | 2007-10-15 17:00:19 +0200 | [diff] [blame] | 6556 | static void register_sched_domain_sysctl(void) |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 6557 | { |
Peter Zijlstra | 6ad4c18 | 2009-11-25 13:31:39 +0100 | [diff] [blame] | 6558 | int i, cpu_num = num_possible_cpus(); |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 6559 | struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1); |
| 6560 | char buf[32]; |
| 6561 | |
Milton Miller | 7378547 | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 6562 | WARN_ON(sd_ctl_dir[0].child); |
| 6563 | sd_ctl_dir[0].child = entry; |
| 6564 | |
Milton Miller | ad1cdc1 | 2007-10-15 17:00:19 +0200 | [diff] [blame] | 6565 | if (entry == NULL) |
| 6566 | return; |
| 6567 | |
Peter Zijlstra | 6ad4c18 | 2009-11-25 13:31:39 +0100 | [diff] [blame] | 6568 | for_each_possible_cpu(i) { |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 6569 | snprintf(buf, 32, "cpu%d", i); |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 6570 | entry->procname = kstrdup(buf, GFP_KERNEL); |
Eric W. Biederman | c57baf1 | 2007-08-23 15:18:02 +0200 | [diff] [blame] | 6571 | entry->mode = 0555; |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 6572 | entry->child = sd_alloc_ctl_cpu_table(i); |
Milton Miller | 97b6ea7 | 2007-10-15 17:00:19 +0200 | [diff] [blame] | 6573 | entry++; |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 6574 | } |
Milton Miller | 7378547 | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 6575 | |
| 6576 | WARN_ON(sd_sysctl_header); |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 6577 | sd_sysctl_header = register_sysctl_table(sd_ctl_root); |
| 6578 | } |
Milton Miller | 6382bc9 | 2007-10-15 17:00:19 +0200 | [diff] [blame] | 6579 | |
Milton Miller | 7378547 | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 6580 | /* may be called multiple times per register */ |
Milton Miller | 6382bc9 | 2007-10-15 17:00:19 +0200 | [diff] [blame] | 6581 | static void unregister_sched_domain_sysctl(void) |
| 6582 | { |
Milton Miller | 7378547 | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 6583 | if (sd_sysctl_header) |
| 6584 | unregister_sysctl_table(sd_sysctl_header); |
Milton Miller | 6382bc9 | 2007-10-15 17:00:19 +0200 | [diff] [blame] | 6585 | sd_sysctl_header = NULL; |
Milton Miller | 7378547 | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 6586 | if (sd_ctl_dir[0].child) |
| 6587 | sd_free_ctl_entry(&sd_ctl_dir[0].child); |
Milton Miller | 6382bc9 | 2007-10-15 17:00:19 +0200 | [diff] [blame] | 6588 | } |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 6589 | #else |
Milton Miller | 6382bc9 | 2007-10-15 17:00:19 +0200 | [diff] [blame] | 6590 | static void register_sched_domain_sysctl(void) |
| 6591 | { |
| 6592 | } |
| 6593 | static void unregister_sched_domain_sysctl(void) |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 6594 | { |
| 6595 | } |
| 6596 | #endif |
| 6597 | |
Gregory Haskins | 1f11eb6 | 2008-06-04 15:04:05 -0400 | [diff] [blame] | 6598 | static void set_rq_online(struct rq *rq) |
| 6599 | { |
| 6600 | if (!rq->online) { |
| 6601 | const struct sched_class *class; |
| 6602 | |
Rusty Russell | c6c4927 | 2008-11-25 02:35:05 +1030 | [diff] [blame] | 6603 | cpumask_set_cpu(rq->cpu, rq->rd->online); |
Gregory Haskins | 1f11eb6 | 2008-06-04 15:04:05 -0400 | [diff] [blame] | 6604 | rq->online = 1; |
| 6605 | |
| 6606 | for_each_class(class) { |
| 6607 | if (class->rq_online) |
| 6608 | class->rq_online(rq); |
| 6609 | } |
| 6610 | } |
| 6611 | } |
| 6612 | |
| 6613 | static void set_rq_offline(struct rq *rq) |
| 6614 | { |
| 6615 | if (rq->online) { |
| 6616 | const struct sched_class *class; |
| 6617 | |
| 6618 | for_each_class(class) { |
| 6619 | if (class->rq_offline) |
| 6620 | class->rq_offline(rq); |
| 6621 | } |
| 6622 | |
Rusty Russell | c6c4927 | 2008-11-25 02:35:05 +1030 | [diff] [blame] | 6623 | cpumask_clear_cpu(rq->cpu, rq->rd->online); |
Gregory Haskins | 1f11eb6 | 2008-06-04 15:04:05 -0400 | [diff] [blame] | 6624 | rq->online = 0; |
| 6625 | } |
| 6626 | } |
| 6627 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6628 | /* |
| 6629 | * migration_call - callback that gets triggered when a CPU is added. |
| 6630 | * Here we can start up the necessary migration thread for the new CPU. |
| 6631 | */ |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 6632 | static int __cpuinit |
| 6633 | migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6634 | { |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 6635 | int cpu = (long)hcpu; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6636 | unsigned long flags; |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 6637 | struct rq *rq = cpu_rq(cpu); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6638 | |
Peter Zijlstra | 48c5cca | 2010-11-13 19:32:29 +0100 | [diff] [blame] | 6639 | switch (action & ~CPU_TASKS_FROZEN) { |
Gautham R Shenoy | 5be9361 | 2007-05-09 02:34:04 -0700 | [diff] [blame] | 6640 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6641 | case CPU_UP_PREPARE: |
Thomas Gleixner | a468d38 | 2009-07-17 14:15:46 +0200 | [diff] [blame] | 6642 | rq->calc_load_update = calc_load_update; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6643 | break; |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 6644 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6645 | case CPU_ONLINE: |
Gregory Haskins | 1f94ef5 | 2008-03-10 16:52:41 -0400 | [diff] [blame] | 6646 | /* Update our root-domain */ |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 6647 | raw_spin_lock_irqsave(&rq->lock, flags); |
Gregory Haskins | 1f94ef5 | 2008-03-10 16:52:41 -0400 | [diff] [blame] | 6648 | if (rq->rd) { |
Rusty Russell | c6c4927 | 2008-11-25 02:35:05 +1030 | [diff] [blame] | 6649 | BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); |
Gregory Haskins | 1f11eb6 | 2008-06-04 15:04:05 -0400 | [diff] [blame] | 6650 | |
| 6651 | set_rq_online(rq); |
Gregory Haskins | 1f94ef5 | 2008-03-10 16:52:41 -0400 | [diff] [blame] | 6652 | } |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 6653 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6654 | break; |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 6655 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6656 | #ifdef CONFIG_HOTPLUG_CPU |
Gregory Haskins | 08f503b | 2008-03-10 17:59:11 -0400 | [diff] [blame] | 6657 | case CPU_DYING: |
Peter Zijlstra | 317f394 | 2011-04-05 17:23:58 +0200 | [diff] [blame] | 6658 | sched_ttwu_pending(); |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 6659 | /* Update our root-domain */ |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 6660 | raw_spin_lock_irqsave(&rq->lock, flags); |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 6661 | if (rq->rd) { |
Rusty Russell | c6c4927 | 2008-11-25 02:35:05 +1030 | [diff] [blame] | 6662 | BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); |
Gregory Haskins | 1f11eb6 | 2008-06-04 15:04:05 -0400 | [diff] [blame] | 6663 | set_rq_offline(rq); |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 6664 | } |
Peter Zijlstra | 48c5cca | 2010-11-13 19:32:29 +0100 | [diff] [blame] | 6665 | migrate_tasks(cpu); |
| 6666 | BUG_ON(rq->nr_running != 1); /* the migration thread */ |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 6667 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
Peter Zijlstra | 48c5cca | 2010-11-13 19:32:29 +0100 | [diff] [blame] | 6668 | |
| 6669 | migrate_nr_uninterruptible(rq); |
| 6670 | calc_global_load_remove(rq); |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 6671 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6672 | #endif |
| 6673 | } |
Peter Zijlstra | 49c022e | 2011-04-05 10:14:25 +0200 | [diff] [blame] | 6674 | |
| 6675 | update_max_interval(); |
| 6676 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6677 | return NOTIFY_OK; |
| 6678 | } |
| 6679 | |
Paul Mackerras | f38b082 | 2009-06-02 21:05:16 +1000 | [diff] [blame] | 6680 | /* |
| 6681 | * Register at high priority so that task migration (migrate_all_tasks) |
| 6682 | * happens before everything else. This has to be lower priority than |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6683 | * the notifier in the perf_event subsystem, though. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6684 | */ |
Chandra Seetharaman | 26c2143 | 2006-06-27 02:54:10 -0700 | [diff] [blame] | 6685 | static struct notifier_block __cpuinitdata migration_notifier = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6686 | .notifier_call = migration_call, |
Tejun Heo | 50a323b | 2010-06-08 21:40:36 +0200 | [diff] [blame] | 6687 | .priority = CPU_PRI_MIGRATION, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6688 | }; |
| 6689 | |
Tejun Heo | 3a101d0 | 2010-06-08 21:40:36 +0200 | [diff] [blame] | 6690 | static int __cpuinit sched_cpu_active(struct notifier_block *nfb, |
| 6691 | unsigned long action, void *hcpu) |
| 6692 | { |
| 6693 | switch (action & ~CPU_TASKS_FROZEN) { |
| 6694 | case CPU_ONLINE: |
| 6695 | case CPU_DOWN_FAILED: |
| 6696 | set_cpu_active((long)hcpu, true); |
| 6697 | return NOTIFY_OK; |
| 6698 | default: |
| 6699 | return NOTIFY_DONE; |
| 6700 | } |
| 6701 | } |
| 6702 | |
| 6703 | static int __cpuinit sched_cpu_inactive(struct notifier_block *nfb, |
| 6704 | unsigned long action, void *hcpu) |
| 6705 | { |
| 6706 | switch (action & ~CPU_TASKS_FROZEN) { |
| 6707 | case CPU_DOWN_PREPARE: |
| 6708 | set_cpu_active((long)hcpu, false); |
| 6709 | return NOTIFY_OK; |
| 6710 | default: |
| 6711 | return NOTIFY_DONE; |
| 6712 | } |
| 6713 | } |
| 6714 | |
Eduard - Gabriel Munteanu | 7babe8d | 2008-07-25 19:45:11 -0700 | [diff] [blame] | 6715 | static int __init migration_init(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6716 | { |
| 6717 | void *cpu = (void *)(long)smp_processor_id(); |
Akinobu Mita | 07dccf3 | 2006-09-29 02:00:22 -0700 | [diff] [blame] | 6718 | int err; |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 6719 | |
Tejun Heo | 3a101d0 | 2010-06-08 21:40:36 +0200 | [diff] [blame] | 6720 | /* Initialize migration for the boot CPU */ |
Akinobu Mita | 07dccf3 | 2006-09-29 02:00:22 -0700 | [diff] [blame] | 6721 | err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu); |
| 6722 | BUG_ON(err == NOTIFY_BAD); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6723 | migration_call(&migration_notifier, CPU_ONLINE, cpu); |
| 6724 | register_cpu_notifier(&migration_notifier); |
Eduard - Gabriel Munteanu | 7babe8d | 2008-07-25 19:45:11 -0700 | [diff] [blame] | 6725 | |
Tejun Heo | 3a101d0 | 2010-06-08 21:40:36 +0200 | [diff] [blame] | 6726 | /* Register cpu active notifiers */ |
| 6727 | cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE); |
| 6728 | cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE); |
| 6729 | |
Thomas Gleixner | a004cd4 | 2009-07-21 09:54:05 +0200 | [diff] [blame] | 6730 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6731 | } |
Eduard - Gabriel Munteanu | 7babe8d | 2008-07-25 19:45:11 -0700 | [diff] [blame] | 6732 | early_initcall(migration_init); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6733 | #endif |
| 6734 | |
| 6735 | #ifdef CONFIG_SMP |
Christoph Lameter | 476f353 | 2007-05-06 14:48:58 -0700 | [diff] [blame] | 6736 | |
Peter Zijlstra | 4cb9883 | 2011-04-07 14:09:58 +0200 | [diff] [blame] | 6737 | static cpumask_var_t sched_domains_tmpmask; /* sched_domains_mutex */ |
| 6738 | |
Ingo Molnar | 3e9830d | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 6739 | #ifdef CONFIG_SCHED_DEBUG |
Ingo Molnar | 4dcf6af | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 6740 | |
Mike Travis | f663011 | 2009-11-17 18:22:15 -0600 | [diff] [blame] | 6741 | static __read_mostly int sched_domain_debug_enabled; |
| 6742 | |
| 6743 | static int __init sched_domain_debug_setup(char *str) |
| 6744 | { |
| 6745 | sched_domain_debug_enabled = 1; |
| 6746 | |
| 6747 | return 0; |
| 6748 | } |
| 6749 | early_param("sched_debug", sched_domain_debug_setup); |
| 6750 | |
Mike Travis | 7c16ec5 | 2008-04-04 18:11:11 -0700 | [diff] [blame] | 6751 | static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, |
Rusty Russell | 96f874e | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 6752 | struct cpumask *groupmask) |
Ingo Molnar | 4dcf6af | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 6753 | { |
| 6754 | struct sched_group *group = sd->groups; |
Mike Travis | 434d53b | 2008-04-04 18:11:04 -0700 | [diff] [blame] | 6755 | char str[256]; |
Ingo Molnar | 4dcf6af | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 6756 | |
Rusty Russell | 968ea6d | 2008-12-13 21:55:51 +1030 | [diff] [blame] | 6757 | cpulist_scnprintf(str, sizeof(str), sched_domain_span(sd)); |
Rusty Russell | 96f874e | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 6758 | cpumask_clear(groupmask); |
Ingo Molnar | 4dcf6af | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 6759 | |
| 6760 | printk(KERN_DEBUG "%*s domain %d: ", level, "", level); |
| 6761 | |
| 6762 | if (!(sd->flags & SD_LOAD_BALANCE)) { |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 6763 | printk("does not load-balance\n"); |
Ingo Molnar | 4dcf6af | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 6764 | if (sd->parent) |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 6765 | printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain" |
| 6766 | " has parent"); |
Ingo Molnar | 4dcf6af | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 6767 | return -1; |
| 6768 | } |
| 6769 | |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 6770 | printk(KERN_CONT "span %s level %s\n", str, sd->name); |
Ingo Molnar | 4dcf6af | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 6771 | |
Rusty Russell | 758b2cd | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 6772 | if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) { |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 6773 | printk(KERN_ERR "ERROR: domain->span does not contain " |
| 6774 | "CPU%d\n", cpu); |
Ingo Molnar | 4dcf6af | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 6775 | } |
Rusty Russell | 758b2cd | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 6776 | if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) { |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 6777 | printk(KERN_ERR "ERROR: domain->groups does not contain" |
| 6778 | " CPU%d\n", cpu); |
Ingo Molnar | 4dcf6af | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 6779 | } |
| 6780 | |
| 6781 | printk(KERN_DEBUG "%*s groups:", level + 1, ""); |
| 6782 | do { |
| 6783 | if (!group) { |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 6784 | printk("\n"); |
| 6785 | printk(KERN_ERR "ERROR: group is NULL\n"); |
Ingo Molnar | 4dcf6af | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 6786 | break; |
| 6787 | } |
| 6788 | |
Peter Zijlstra | 9c3f75c | 2011-07-14 13:00:06 +0200 | [diff] [blame] | 6789 | if (!group->sgp->power) { |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 6790 | printk(KERN_CONT "\n"); |
| 6791 | printk(KERN_ERR "ERROR: domain->cpu_power not " |
| 6792 | "set\n"); |
Ingo Molnar | 4dcf6af | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 6793 | break; |
| 6794 | } |
| 6795 | |
Rusty Russell | 758b2cd | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 6796 | if (!cpumask_weight(sched_group_cpus(group))) { |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 6797 | printk(KERN_CONT "\n"); |
| 6798 | printk(KERN_ERR "ERROR: empty group\n"); |
Ingo Molnar | 4dcf6af | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 6799 | break; |
| 6800 | } |
| 6801 | |
Rusty Russell | 758b2cd | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 6802 | if (cpumask_intersects(groupmask, sched_group_cpus(group))) { |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 6803 | printk(KERN_CONT "\n"); |
| 6804 | printk(KERN_ERR "ERROR: repeated CPUs\n"); |
Ingo Molnar | 4dcf6af | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 6805 | break; |
| 6806 | } |
| 6807 | |
Rusty Russell | 758b2cd | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 6808 | cpumask_or(groupmask, groupmask, sched_group_cpus(group)); |
Ingo Molnar | 4dcf6af | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 6809 | |
Rusty Russell | 968ea6d | 2008-12-13 21:55:51 +1030 | [diff] [blame] | 6810 | cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group)); |
Gautham R Shenoy | 381512c | 2009-04-14 09:09:36 +0530 | [diff] [blame] | 6811 | |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 6812 | printk(KERN_CONT " %s", str); |
Peter Zijlstra | 9c3f75c | 2011-07-14 13:00:06 +0200 | [diff] [blame] | 6813 | if (group->sgp->power != SCHED_POWER_SCALE) { |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 6814 | printk(KERN_CONT " (cpu_power = %d)", |
Peter Zijlstra | 9c3f75c | 2011-07-14 13:00:06 +0200 | [diff] [blame] | 6815 | group->sgp->power); |
Gautham R Shenoy | 381512c | 2009-04-14 09:09:36 +0530 | [diff] [blame] | 6816 | } |
Ingo Molnar | 4dcf6af | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 6817 | |
| 6818 | group = group->next; |
| 6819 | } while (group != sd->groups); |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 6820 | printk(KERN_CONT "\n"); |
Ingo Molnar | 4dcf6af | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 6821 | |
Rusty Russell | 758b2cd | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 6822 | if (!cpumask_equal(sched_domain_span(sd), groupmask)) |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 6823 | printk(KERN_ERR "ERROR: groups don't span domain->span\n"); |
Ingo Molnar | 4dcf6af | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 6824 | |
Rusty Russell | 758b2cd | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 6825 | if (sd->parent && |
| 6826 | !cpumask_subset(groupmask, sched_domain_span(sd->parent))) |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 6827 | printk(KERN_ERR "ERROR: parent span is not a superset " |
| 6828 | "of domain->span\n"); |
Ingo Molnar | 4dcf6af | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 6829 | return 0; |
| 6830 | } |
| 6831 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6832 | static void sched_domain_debug(struct sched_domain *sd, int cpu) |
| 6833 | { |
| 6834 | int level = 0; |
| 6835 | |
Mike Travis | f663011 | 2009-11-17 18:22:15 -0600 | [diff] [blame] | 6836 | if (!sched_domain_debug_enabled) |
| 6837 | return; |
| 6838 | |
Nick Piggin | 41c7ce9 | 2005-06-25 14:57:24 -0700 | [diff] [blame] | 6839 | if (!sd) { |
| 6840 | printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu); |
| 6841 | return; |
| 6842 | } |
| 6843 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6844 | printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu); |
| 6845 | |
Ingo Molnar | 4dcf6af | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 6846 | for (;;) { |
Peter Zijlstra | 4cb9883 | 2011-04-07 14:09:58 +0200 | [diff] [blame] | 6847 | if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6848 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6849 | level++; |
| 6850 | sd = sd->parent; |
Miguel Ojeda Sandonis | 33859f7 | 2006-12-10 02:20:38 -0800 | [diff] [blame] | 6851 | if (!sd) |
Ingo Molnar | 4dcf6af | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 6852 | break; |
| 6853 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6854 | } |
Dhaval Giani | 6d6bc0a | 2008-05-30 14:23:45 +0200 | [diff] [blame] | 6855 | #else /* !CONFIG_SCHED_DEBUG */ |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 6856 | # define sched_domain_debug(sd, cpu) do { } while (0) |
Dhaval Giani | 6d6bc0a | 2008-05-30 14:23:45 +0200 | [diff] [blame] | 6857 | #endif /* CONFIG_SCHED_DEBUG */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6858 | |
Dinakar Guniguntala | 1a20ff2 | 2005-06-25 14:57:33 -0700 | [diff] [blame] | 6859 | static int sd_degenerate(struct sched_domain *sd) |
Suresh Siddha | 245af2c | 2005-06-25 14:57:25 -0700 | [diff] [blame] | 6860 | { |
Rusty Russell | 758b2cd | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 6861 | if (cpumask_weight(sched_domain_span(sd)) == 1) |
Suresh Siddha | 245af2c | 2005-06-25 14:57:25 -0700 | [diff] [blame] | 6862 | return 1; |
| 6863 | |
| 6864 | /* Following flags need at least 2 groups */ |
| 6865 | if (sd->flags & (SD_LOAD_BALANCE | |
| 6866 | SD_BALANCE_NEWIDLE | |
| 6867 | SD_BALANCE_FORK | |
Siddha, Suresh B | 89c4710 | 2006-10-03 01:14:09 -0700 | [diff] [blame] | 6868 | SD_BALANCE_EXEC | |
| 6869 | SD_SHARE_CPUPOWER | |
| 6870 | SD_SHARE_PKG_RESOURCES)) { |
Suresh Siddha | 245af2c | 2005-06-25 14:57:25 -0700 | [diff] [blame] | 6871 | if (sd->groups != sd->groups->next) |
| 6872 | return 0; |
| 6873 | } |
| 6874 | |
| 6875 | /* Following flags don't use groups */ |
Peter Zijlstra | c88d591 | 2009-09-10 13:50:02 +0200 | [diff] [blame] | 6876 | if (sd->flags & (SD_WAKE_AFFINE)) |
Suresh Siddha | 245af2c | 2005-06-25 14:57:25 -0700 | [diff] [blame] | 6877 | return 0; |
| 6878 | |
| 6879 | return 1; |
| 6880 | } |
| 6881 | |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 6882 | static int |
| 6883 | sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) |
Suresh Siddha | 245af2c | 2005-06-25 14:57:25 -0700 | [diff] [blame] | 6884 | { |
| 6885 | unsigned long cflags = sd->flags, pflags = parent->flags; |
| 6886 | |
| 6887 | if (sd_degenerate(parent)) |
| 6888 | return 1; |
| 6889 | |
Rusty Russell | 758b2cd | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 6890 | if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent))) |
Suresh Siddha | 245af2c | 2005-06-25 14:57:25 -0700 | [diff] [blame] | 6891 | return 0; |
| 6892 | |
Suresh Siddha | 245af2c | 2005-06-25 14:57:25 -0700 | [diff] [blame] | 6893 | /* Flags needing groups don't count if only 1 group in parent */ |
| 6894 | if (parent->groups == parent->groups->next) { |
| 6895 | pflags &= ~(SD_LOAD_BALANCE | |
| 6896 | SD_BALANCE_NEWIDLE | |
| 6897 | SD_BALANCE_FORK | |
Siddha, Suresh B | 89c4710 | 2006-10-03 01:14:09 -0700 | [diff] [blame] | 6898 | SD_BALANCE_EXEC | |
| 6899 | SD_SHARE_CPUPOWER | |
| 6900 | SD_SHARE_PKG_RESOURCES); |
Ken Chen | 5436499 | 2008-12-07 18:47:37 -0800 | [diff] [blame] | 6901 | if (nr_node_ids == 1) |
| 6902 | pflags &= ~SD_SERIALIZE; |
Suresh Siddha | 245af2c | 2005-06-25 14:57:25 -0700 | [diff] [blame] | 6903 | } |
| 6904 | if (~cflags & pflags) |
| 6905 | return 0; |
| 6906 | |
| 6907 | return 1; |
| 6908 | } |
| 6909 | |
Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 6910 | static void free_rootdomain(struct rcu_head *rcu) |
Rusty Russell | c6c4927 | 2008-11-25 02:35:05 +1030 | [diff] [blame] | 6911 | { |
Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 6912 | struct root_domain *rd = container_of(rcu, struct root_domain, rcu); |
Peter Zijlstra | 047106a | 2009-11-16 10:28:09 +0100 | [diff] [blame] | 6913 | |
Rusty Russell | 68e7456 | 2008-11-25 02:35:13 +1030 | [diff] [blame] | 6914 | cpupri_cleanup(&rd->cpupri); |
Rusty Russell | c6c4927 | 2008-11-25 02:35:05 +1030 | [diff] [blame] | 6915 | free_cpumask_var(rd->rto_mask); |
| 6916 | free_cpumask_var(rd->online); |
| 6917 | free_cpumask_var(rd->span); |
| 6918 | kfree(rd); |
| 6919 | } |
| 6920 | |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 6921 | static void rq_attach_root(struct rq *rq, struct root_domain *rd) |
| 6922 | { |
Ingo Molnar | a0490fa | 2009-02-12 11:35:40 +0100 | [diff] [blame] | 6923 | struct root_domain *old_rd = NULL; |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 6924 | unsigned long flags; |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 6925 | |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 6926 | raw_spin_lock_irqsave(&rq->lock, flags); |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 6927 | |
| 6928 | if (rq->rd) { |
Ingo Molnar | a0490fa | 2009-02-12 11:35:40 +0100 | [diff] [blame] | 6929 | old_rd = rq->rd; |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 6930 | |
Rusty Russell | c6c4927 | 2008-11-25 02:35:05 +1030 | [diff] [blame] | 6931 | if (cpumask_test_cpu(rq->cpu, old_rd->online)) |
Gregory Haskins | 1f11eb6 | 2008-06-04 15:04:05 -0400 | [diff] [blame] | 6932 | set_rq_offline(rq); |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 6933 | |
Rusty Russell | c6c4927 | 2008-11-25 02:35:05 +1030 | [diff] [blame] | 6934 | cpumask_clear_cpu(rq->cpu, old_rd->span); |
Gregory Haskins | dc93852 | 2008-01-25 21:08:26 +0100 | [diff] [blame] | 6935 | |
Ingo Molnar | a0490fa | 2009-02-12 11:35:40 +0100 | [diff] [blame] | 6936 | /* |
| 6937 | * If we dont want to free the old_rt yet then |
| 6938 | * set old_rd to NULL to skip the freeing later |
| 6939 | * in this function: |
| 6940 | */ |
| 6941 | if (!atomic_dec_and_test(&old_rd->refcount)) |
| 6942 | old_rd = NULL; |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 6943 | } |
| 6944 | |
| 6945 | atomic_inc(&rd->refcount); |
| 6946 | rq->rd = rd; |
| 6947 | |
Rusty Russell | c6c4927 | 2008-11-25 02:35:05 +1030 | [diff] [blame] | 6948 | cpumask_set_cpu(rq->cpu, rd->span); |
Gregory Haskins | 00aec93 | 2009-07-30 10:57:23 -0400 | [diff] [blame] | 6949 | if (cpumask_test_cpu(rq->cpu, cpu_active_mask)) |
Gregory Haskins | 1f11eb6 | 2008-06-04 15:04:05 -0400 | [diff] [blame] | 6950 | set_rq_online(rq); |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 6951 | |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 6952 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
Ingo Molnar | a0490fa | 2009-02-12 11:35:40 +0100 | [diff] [blame] | 6953 | |
| 6954 | if (old_rd) |
Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 6955 | call_rcu_sched(&old_rd->rcu, free_rootdomain); |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 6956 | } |
| 6957 | |
Pekka Enberg | 68c38fc | 2010-07-15 23:18:22 +0300 | [diff] [blame] | 6958 | static int init_rootdomain(struct root_domain *rd) |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 6959 | { |
| 6960 | memset(rd, 0, sizeof(*rd)); |
| 6961 | |
Pekka Enberg | 68c38fc | 2010-07-15 23:18:22 +0300 | [diff] [blame] | 6962 | if (!alloc_cpumask_var(&rd->span, GFP_KERNEL)) |
Li Zefan | 0c910d2 | 2009-01-06 17:39:06 +0800 | [diff] [blame] | 6963 | goto out; |
Pekka Enberg | 68c38fc | 2010-07-15 23:18:22 +0300 | [diff] [blame] | 6964 | if (!alloc_cpumask_var(&rd->online, GFP_KERNEL)) |
Rusty Russell | c6c4927 | 2008-11-25 02:35:05 +1030 | [diff] [blame] | 6965 | goto free_span; |
Pekka Enberg | 68c38fc | 2010-07-15 23:18:22 +0300 | [diff] [blame] | 6966 | if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) |
Rusty Russell | c6c4927 | 2008-11-25 02:35:05 +1030 | [diff] [blame] | 6967 | goto free_online; |
Gregory Haskins | 6e0534f | 2008-05-12 21:21:01 +0200 | [diff] [blame] | 6968 | |
Pekka Enberg | 68c38fc | 2010-07-15 23:18:22 +0300 | [diff] [blame] | 6969 | if (cpupri_init(&rd->cpupri) != 0) |
Rusty Russell | 68e7456 | 2008-11-25 02:35:13 +1030 | [diff] [blame] | 6970 | goto free_rto_mask; |
Rusty Russell | c6c4927 | 2008-11-25 02:35:05 +1030 | [diff] [blame] | 6971 | return 0; |
| 6972 | |
Rusty Russell | 68e7456 | 2008-11-25 02:35:13 +1030 | [diff] [blame] | 6973 | free_rto_mask: |
| 6974 | free_cpumask_var(rd->rto_mask); |
Rusty Russell | c6c4927 | 2008-11-25 02:35:05 +1030 | [diff] [blame] | 6975 | free_online: |
| 6976 | free_cpumask_var(rd->online); |
| 6977 | free_span: |
| 6978 | free_cpumask_var(rd->span); |
Li Zefan | 0c910d2 | 2009-01-06 17:39:06 +0800 | [diff] [blame] | 6979 | out: |
Rusty Russell | c6c4927 | 2008-11-25 02:35:05 +1030 | [diff] [blame] | 6980 | return -ENOMEM; |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 6981 | } |
| 6982 | |
| 6983 | static void init_defrootdomain(void) |
| 6984 | { |
Pekka Enberg | 68c38fc | 2010-07-15 23:18:22 +0300 | [diff] [blame] | 6985 | init_rootdomain(&def_root_domain); |
Rusty Russell | c6c4927 | 2008-11-25 02:35:05 +1030 | [diff] [blame] | 6986 | |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 6987 | atomic_set(&def_root_domain.refcount, 1); |
| 6988 | } |
| 6989 | |
Gregory Haskins | dc93852 | 2008-01-25 21:08:26 +0100 | [diff] [blame] | 6990 | static struct root_domain *alloc_rootdomain(void) |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 6991 | { |
| 6992 | struct root_domain *rd; |
| 6993 | |
| 6994 | rd = kmalloc(sizeof(*rd), GFP_KERNEL); |
| 6995 | if (!rd) |
| 6996 | return NULL; |
| 6997 | |
Pekka Enberg | 68c38fc | 2010-07-15 23:18:22 +0300 | [diff] [blame] | 6998 | if (init_rootdomain(rd) != 0) { |
Rusty Russell | c6c4927 | 2008-11-25 02:35:05 +1030 | [diff] [blame] | 6999 | kfree(rd); |
| 7000 | return NULL; |
| 7001 | } |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 7002 | |
| 7003 | return rd; |
| 7004 | } |
| 7005 | |
Peter Zijlstra | e3589f6 | 2011-07-15 10:35:52 +0200 | [diff] [blame] | 7006 | static void free_sched_groups(struct sched_group *sg, int free_sgp) |
| 7007 | { |
| 7008 | struct sched_group *tmp, *first; |
| 7009 | |
| 7010 | if (!sg) |
| 7011 | return; |
| 7012 | |
| 7013 | first = sg; |
| 7014 | do { |
| 7015 | tmp = sg->next; |
| 7016 | |
| 7017 | if (free_sgp && atomic_dec_and_test(&sg->sgp->ref)) |
| 7018 | kfree(sg->sgp); |
| 7019 | |
| 7020 | kfree(sg); |
| 7021 | sg = tmp; |
| 7022 | } while (sg != first); |
| 7023 | } |
| 7024 | |
Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 7025 | static void free_sched_domain(struct rcu_head *rcu) |
| 7026 | { |
| 7027 | struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu); |
Peter Zijlstra | e3589f6 | 2011-07-15 10:35:52 +0200 | [diff] [blame] | 7028 | |
| 7029 | /* |
| 7030 | * If its an overlapping domain it has private groups, iterate and |
| 7031 | * nuke them all. |
| 7032 | */ |
| 7033 | if (sd->flags & SD_OVERLAP) { |
| 7034 | free_sched_groups(sd->groups, 1); |
| 7035 | } else if (atomic_dec_and_test(&sd->groups->ref)) { |
Peter Zijlstra | 9c3f75c | 2011-07-14 13:00:06 +0200 | [diff] [blame] | 7036 | kfree(sd->groups->sgp); |
Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 7037 | kfree(sd->groups); |
Peter Zijlstra | 9c3f75c | 2011-07-14 13:00:06 +0200 | [diff] [blame] | 7038 | } |
Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 7039 | kfree(sd); |
| 7040 | } |
| 7041 | |
| 7042 | static void destroy_sched_domain(struct sched_domain *sd, int cpu) |
| 7043 | { |
| 7044 | call_rcu(&sd->rcu, free_sched_domain); |
| 7045 | } |
| 7046 | |
| 7047 | static void destroy_sched_domains(struct sched_domain *sd, int cpu) |
| 7048 | { |
| 7049 | for (; sd; sd = sd->parent) |
| 7050 | destroy_sched_domain(sd, cpu); |
| 7051 | } |
| 7052 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7053 | /* |
Ingo Molnar | 0eab914 | 2008-01-25 21:08:19 +0100 | [diff] [blame] | 7054 | * Attach the domain 'sd' to 'cpu' as its base domain. Callers must |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7055 | * hold the hotplug lock. |
| 7056 | */ |
Ingo Molnar | 0eab914 | 2008-01-25 21:08:19 +0100 | [diff] [blame] | 7057 | static void |
| 7058 | cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7059 | { |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 7060 | struct rq *rq = cpu_rq(cpu); |
Suresh Siddha | 245af2c | 2005-06-25 14:57:25 -0700 | [diff] [blame] | 7061 | struct sched_domain *tmp; |
| 7062 | |
| 7063 | /* Remove the sched domains which do not contribute to scheduling. */ |
Li Zefan | f29c9b1 | 2008-11-06 09:45:16 +0800 | [diff] [blame] | 7064 | for (tmp = sd; tmp; ) { |
Suresh Siddha | 245af2c | 2005-06-25 14:57:25 -0700 | [diff] [blame] | 7065 | struct sched_domain *parent = tmp->parent; |
| 7066 | if (!parent) |
| 7067 | break; |
Li Zefan | f29c9b1 | 2008-11-06 09:45:16 +0800 | [diff] [blame] | 7068 | |
Siddha, Suresh B | 1a84887 | 2006-10-03 01:14:08 -0700 | [diff] [blame] | 7069 | if (sd_parent_degenerate(tmp, parent)) { |
Suresh Siddha | 245af2c | 2005-06-25 14:57:25 -0700 | [diff] [blame] | 7070 | tmp->parent = parent->parent; |
Siddha, Suresh B | 1a84887 | 2006-10-03 01:14:08 -0700 | [diff] [blame] | 7071 | if (parent->parent) |
| 7072 | parent->parent->child = tmp; |
Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 7073 | destroy_sched_domain(parent, cpu); |
Li Zefan | f29c9b1 | 2008-11-06 09:45:16 +0800 | [diff] [blame] | 7074 | } else |
| 7075 | tmp = tmp->parent; |
Suresh Siddha | 245af2c | 2005-06-25 14:57:25 -0700 | [diff] [blame] | 7076 | } |
| 7077 | |
Siddha, Suresh B | 1a84887 | 2006-10-03 01:14:08 -0700 | [diff] [blame] | 7078 | if (sd && sd_degenerate(sd)) { |
Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 7079 | tmp = sd; |
Suresh Siddha | 245af2c | 2005-06-25 14:57:25 -0700 | [diff] [blame] | 7080 | sd = sd->parent; |
Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 7081 | destroy_sched_domain(tmp, cpu); |
Siddha, Suresh B | 1a84887 | 2006-10-03 01:14:08 -0700 | [diff] [blame] | 7082 | if (sd) |
| 7083 | sd->child = NULL; |
| 7084 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7085 | |
Peter Zijlstra | 4cb9883 | 2011-04-07 14:09:58 +0200 | [diff] [blame] | 7086 | sched_domain_debug(sd, cpu); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7087 | |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 7088 | rq_attach_root(rq, rd); |
Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 7089 | tmp = rq->sd; |
Nick Piggin | 674311d | 2005-06-25 14:57:27 -0700 | [diff] [blame] | 7090 | rcu_assign_pointer(rq->sd, sd); |
Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 7091 | destroy_sched_domains(tmp, cpu); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7092 | } |
| 7093 | |
| 7094 | /* cpus with isolated domains */ |
Rusty Russell | dcc30a3 | 2008-11-25 02:35:12 +1030 | [diff] [blame] | 7095 | static cpumask_var_t cpu_isolated_map; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7096 | |
| 7097 | /* Setup the mask of cpus configured for isolated domains */ |
| 7098 | static int __init isolated_cpu_setup(char *str) |
| 7099 | { |
Rusty Russell | bdddd29 | 2009-12-02 14:09:16 +1030 | [diff] [blame] | 7100 | alloc_bootmem_cpumask_var(&cpu_isolated_map); |
Rusty Russell | 968ea6d | 2008-12-13 21:55:51 +1030 | [diff] [blame] | 7101 | cpulist_parse(str, cpu_isolated_map); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7102 | return 1; |
| 7103 | } |
| 7104 | |
Ingo Molnar | 8927f49 | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 7105 | __setup("isolcpus=", isolated_cpu_setup); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7106 | |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 7107 | #ifdef CONFIG_NUMA |
akpm@osdl.org | 198e2f1 | 2006-01-12 01:05:30 -0800 | [diff] [blame] | 7108 | |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 7109 | /** |
| 7110 | * find_next_best_node - find the next node to include in a sched_domain |
| 7111 | * @node: node whose sched_domain we're building |
| 7112 | * @used_nodes: nodes already in the sched_domain |
| 7113 | * |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 7114 | * Find the next node to include in a given scheduling domain. Simply |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 7115 | * finds the closest node not already in the @used_nodes map. |
| 7116 | * |
| 7117 | * Should use nodemask_t. |
| 7118 | */ |
Mike Travis | c5f59f0 | 2008-04-04 18:11:10 -0700 | [diff] [blame] | 7119 | static int find_next_best_node(int node, nodemask_t *used_nodes) |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 7120 | { |
Hillf Danton | 7142d17 | 2011-05-05 20:53:20 +0800 | [diff] [blame] | 7121 | int i, n, val, min_val, best_node = -1; |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 7122 | |
| 7123 | min_val = INT_MAX; |
| 7124 | |
Mike Travis | 076ac2a | 2008-05-12 21:21:12 +0200 | [diff] [blame] | 7125 | for (i = 0; i < nr_node_ids; i++) { |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 7126 | /* Start at @node */ |
Mike Travis | 076ac2a | 2008-05-12 21:21:12 +0200 | [diff] [blame] | 7127 | n = (node + i) % nr_node_ids; |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 7128 | |
| 7129 | if (!nr_cpus_node(n)) |
| 7130 | continue; |
| 7131 | |
| 7132 | /* Skip already used nodes */ |
Mike Travis | c5f59f0 | 2008-04-04 18:11:10 -0700 | [diff] [blame] | 7133 | if (node_isset(n, *used_nodes)) |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 7134 | continue; |
| 7135 | |
| 7136 | /* Simple min distance search */ |
| 7137 | val = node_distance(node, n); |
| 7138 | |
| 7139 | if (val < min_val) { |
| 7140 | min_val = val; |
| 7141 | best_node = n; |
| 7142 | } |
| 7143 | } |
| 7144 | |
Hillf Danton | 7142d17 | 2011-05-05 20:53:20 +0800 | [diff] [blame] | 7145 | if (best_node != -1) |
| 7146 | node_set(best_node, *used_nodes); |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 7147 | return best_node; |
| 7148 | } |
| 7149 | |
| 7150 | /** |
| 7151 | * sched_domain_node_span - get a cpumask for a node's sched_domain |
| 7152 | * @node: node whose cpumask we're constructing |
Randy Dunlap | 7348672 | 2008-04-22 10:07:22 -0700 | [diff] [blame] | 7153 | * @span: resulting cpumask |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 7154 | * |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 7155 | * Given a node, construct a good cpumask for its sched_domain to span. It |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 7156 | * should be one that prevents unnecessary balancing, but also spreads tasks |
| 7157 | * out optimally. |
| 7158 | */ |
Rusty Russell | 96f874e | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 7159 | static void sched_domain_node_span(int node, struct cpumask *span) |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 7160 | { |
Mike Travis | c5f59f0 | 2008-04-04 18:11:10 -0700 | [diff] [blame] | 7161 | nodemask_t used_nodes; |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 7162 | int i; |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 7163 | |
Mike Travis | 6ca09df | 2008-12-31 18:08:45 -0800 | [diff] [blame] | 7164 | cpumask_clear(span); |
Mike Travis | c5f59f0 | 2008-04-04 18:11:10 -0700 | [diff] [blame] | 7165 | nodes_clear(used_nodes); |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 7166 | |
Mike Travis | 6ca09df | 2008-12-31 18:08:45 -0800 | [diff] [blame] | 7167 | cpumask_or(span, span, cpumask_of_node(node)); |
Mike Travis | c5f59f0 | 2008-04-04 18:11:10 -0700 | [diff] [blame] | 7168 | node_set(node, used_nodes); |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 7169 | |
| 7170 | for (i = 1; i < SD_NODES_PER_DOMAIN; i++) { |
Mike Travis | c5f59f0 | 2008-04-04 18:11:10 -0700 | [diff] [blame] | 7171 | int next_node = find_next_best_node(node, &used_nodes); |
Hillf Danton | 7142d17 | 2011-05-05 20:53:20 +0800 | [diff] [blame] | 7172 | if (next_node < 0) |
| 7173 | break; |
Mike Travis | 6ca09df | 2008-12-31 18:08:45 -0800 | [diff] [blame] | 7174 | cpumask_or(span, span, cpumask_of_node(next_node)); |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 7175 | } |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 7176 | } |
Peter Zijlstra | d3081f5 | 2011-04-07 14:09:59 +0200 | [diff] [blame] | 7177 | |
| 7178 | static const struct cpumask *cpu_node_mask(int cpu) |
| 7179 | { |
| 7180 | lockdep_assert_held(&sched_domains_mutex); |
| 7181 | |
| 7182 | sched_domain_node_span(cpu_to_node(cpu), sched_domains_tmpmask); |
| 7183 | |
| 7184 | return sched_domains_tmpmask; |
| 7185 | } |
Peter Zijlstra | 2c402dc | 2011-04-07 14:10:01 +0200 | [diff] [blame] | 7186 | |
| 7187 | static const struct cpumask *cpu_allnodes_mask(int cpu) |
| 7188 | { |
| 7189 | return cpu_possible_mask; |
| 7190 | } |
Dhaval Giani | 6d6bc0a | 2008-05-30 14:23:45 +0200 | [diff] [blame] | 7191 | #endif /* CONFIG_NUMA */ |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 7192 | |
Peter Zijlstra | d3081f5 | 2011-04-07 14:09:59 +0200 | [diff] [blame] | 7193 | static const struct cpumask *cpu_cpu_mask(int cpu) |
| 7194 | { |
| 7195 | return cpumask_of_node(cpu_to_node(cpu)); |
| 7196 | } |
| 7197 | |
Siddha, Suresh B | 5c45bf2 | 2006-06-27 02:54:42 -0700 | [diff] [blame] | 7198 | int sched_smt_power_savings = 0, sched_mc_power_savings = 0; |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 7199 | |
Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 7200 | struct sd_data { |
| 7201 | struct sched_domain **__percpu sd; |
| 7202 | struct sched_group **__percpu sg; |
Peter Zijlstra | 9c3f75c | 2011-07-14 13:00:06 +0200 | [diff] [blame] | 7203 | struct sched_group_power **__percpu sgp; |
Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 7204 | }; |
| 7205 | |
Andreas Herrmann | 49a02c5 | 2009-08-18 12:51:52 +0200 | [diff] [blame] | 7206 | struct s_data { |
Peter Zijlstra | 21d42cc | 2011-04-07 14:09:48 +0200 | [diff] [blame] | 7207 | struct sched_domain ** __percpu sd; |
Andreas Herrmann | 49a02c5 | 2009-08-18 12:51:52 +0200 | [diff] [blame] | 7208 | struct root_domain *rd; |
| 7209 | }; |
| 7210 | |
Andreas Herrmann | 2109b99 | 2009-08-18 12:53:00 +0200 | [diff] [blame] | 7211 | enum s_alloc { |
Andreas Herrmann | 2109b99 | 2009-08-18 12:53:00 +0200 | [diff] [blame] | 7212 | sa_rootdomain, |
Peter Zijlstra | 21d42cc | 2011-04-07 14:09:48 +0200 | [diff] [blame] | 7213 | sa_sd, |
Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 7214 | sa_sd_storage, |
Andreas Herrmann | 2109b99 | 2009-08-18 12:53:00 +0200 | [diff] [blame] | 7215 | sa_none, |
| 7216 | }; |
| 7217 | |
Peter Zijlstra | 54ab4ff | 2011-04-07 14:10:03 +0200 | [diff] [blame] | 7218 | struct sched_domain_topology_level; |
| 7219 | |
| 7220 | typedef struct sched_domain *(*sched_domain_init_f)(struct sched_domain_topology_level *tl, int cpu); |
Peter Zijlstra | eb7a74e6 | 2011-04-07 14:10:00 +0200 | [diff] [blame] | 7221 | typedef const struct cpumask *(*sched_domain_mask_f)(int cpu); |
| 7222 | |
Peter Zijlstra | e3589f6 | 2011-07-15 10:35:52 +0200 | [diff] [blame] | 7223 | #define SDTL_OVERLAP 0x01 |
| 7224 | |
Peter Zijlstra | eb7a74e6 | 2011-04-07 14:10:00 +0200 | [diff] [blame] | 7225 | struct sched_domain_topology_level { |
Peter Zijlstra | 2c402dc | 2011-04-07 14:10:01 +0200 | [diff] [blame] | 7226 | sched_domain_init_f init; |
| 7227 | sched_domain_mask_f mask; |
Peter Zijlstra | e3589f6 | 2011-07-15 10:35:52 +0200 | [diff] [blame] | 7228 | int flags; |
Peter Zijlstra | 54ab4ff | 2011-04-07 14:10:03 +0200 | [diff] [blame] | 7229 | struct sd_data data; |
Peter Zijlstra | eb7a74e6 | 2011-04-07 14:10:00 +0200 | [diff] [blame] | 7230 | }; |
| 7231 | |
Peter Zijlstra | e3589f6 | 2011-07-15 10:35:52 +0200 | [diff] [blame] | 7232 | static int |
| 7233 | build_overlap_sched_groups(struct sched_domain *sd, int cpu) |
| 7234 | { |
| 7235 | struct sched_group *first = NULL, *last = NULL, *groups = NULL, *sg; |
| 7236 | const struct cpumask *span = sched_domain_span(sd); |
| 7237 | struct cpumask *covered = sched_domains_tmpmask; |
| 7238 | struct sd_data *sdd = sd->private; |
| 7239 | struct sched_domain *child; |
| 7240 | int i; |
| 7241 | |
| 7242 | cpumask_clear(covered); |
| 7243 | |
| 7244 | for_each_cpu(i, span) { |
| 7245 | struct cpumask *sg_span; |
| 7246 | |
| 7247 | if (cpumask_test_cpu(i, covered)) |
| 7248 | continue; |
| 7249 | |
| 7250 | sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), |
| 7251 | GFP_KERNEL, cpu_to_node(i)); |
| 7252 | |
| 7253 | if (!sg) |
| 7254 | goto fail; |
| 7255 | |
| 7256 | sg_span = sched_group_cpus(sg); |
| 7257 | |
| 7258 | child = *per_cpu_ptr(sdd->sd, i); |
| 7259 | if (child->child) { |
| 7260 | child = child->child; |
| 7261 | cpumask_copy(sg_span, sched_domain_span(child)); |
| 7262 | } else |
| 7263 | cpumask_set_cpu(i, sg_span); |
| 7264 | |
| 7265 | cpumask_or(covered, covered, sg_span); |
| 7266 | |
| 7267 | sg->sgp = *per_cpu_ptr(sdd->sgp, cpumask_first(sg_span)); |
| 7268 | atomic_inc(&sg->sgp->ref); |
| 7269 | |
| 7270 | if (cpumask_test_cpu(cpu, sg_span)) |
| 7271 | groups = sg; |
| 7272 | |
| 7273 | if (!first) |
| 7274 | first = sg; |
| 7275 | if (last) |
| 7276 | last->next = sg; |
| 7277 | last = sg; |
| 7278 | last->next = first; |
| 7279 | } |
| 7280 | sd->groups = groups; |
| 7281 | |
| 7282 | return 0; |
| 7283 | |
| 7284 | fail: |
| 7285 | free_sched_groups(first, 0); |
| 7286 | |
| 7287 | return -ENOMEM; |
| 7288 | } |
| 7289 | |
Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 7290 | static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7291 | { |
Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 7292 | struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); |
| 7293 | struct sched_domain *child = sd->child; |
| 7294 | |
| 7295 | if (child) |
| 7296 | cpu = cpumask_first(sched_domain_span(child)); |
| 7297 | |
Peter Zijlstra | 9c3f75c | 2011-07-14 13:00:06 +0200 | [diff] [blame] | 7298 | if (sg) { |
Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 7299 | *sg = *per_cpu_ptr(sdd->sg, cpu); |
Peter Zijlstra | 9c3f75c | 2011-07-14 13:00:06 +0200 | [diff] [blame] | 7300 | (*sg)->sgp = *per_cpu_ptr(sdd->sgp, cpu); |
Peter Zijlstra | e3589f6 | 2011-07-15 10:35:52 +0200 | [diff] [blame] | 7301 | atomic_set(&(*sg)->sgp->ref, 1); /* for claim_allocations */ |
Peter Zijlstra | 9c3f75c | 2011-07-14 13:00:06 +0200 | [diff] [blame] | 7302 | } |
Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 7303 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7304 | return cpu; |
| 7305 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7306 | |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 7307 | /* |
Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 7308 | * build_sched_groups will build a circular linked list of the groups |
| 7309 | * covered by the given span, and will set each group's ->cpumask correctly, |
| 7310 | * and ->cpu_power to 0. |
Peter Zijlstra | e3589f6 | 2011-07-15 10:35:52 +0200 | [diff] [blame] | 7311 | * |
| 7312 | * Assumes the sched_domain tree is fully constructed |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 7313 | */ |
Peter Zijlstra | e3589f6 | 2011-07-15 10:35:52 +0200 | [diff] [blame] | 7314 | static int |
| 7315 | build_sched_groups(struct sched_domain *sd, int cpu) |
Siddha, Suresh B | 1e9f28f | 2006-03-27 01:15:22 -0800 | [diff] [blame] | 7316 | { |
Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 7317 | struct sched_group *first = NULL, *last = NULL; |
| 7318 | struct sd_data *sdd = sd->private; |
| 7319 | const struct cpumask *span = sched_domain_span(sd); |
Peter Zijlstra | f96225f | 2011-04-07 14:09:57 +0200 | [diff] [blame] | 7320 | struct cpumask *covered; |
Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 7321 | int i; |
| 7322 | |
Peter Zijlstra | e3589f6 | 2011-07-15 10:35:52 +0200 | [diff] [blame] | 7323 | get_group(cpu, sdd, &sd->groups); |
| 7324 | atomic_inc(&sd->groups->ref); |
| 7325 | |
| 7326 | if (cpu != cpumask_first(sched_domain_span(sd))) |
| 7327 | return 0; |
| 7328 | |
Peter Zijlstra | f96225f | 2011-04-07 14:09:57 +0200 | [diff] [blame] | 7329 | lockdep_assert_held(&sched_domains_mutex); |
| 7330 | covered = sched_domains_tmpmask; |
| 7331 | |
Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 7332 | cpumask_clear(covered); |
| 7333 | |
| 7334 | for_each_cpu(i, span) { |
| 7335 | struct sched_group *sg; |
| 7336 | int group = get_group(i, sdd, &sg); |
| 7337 | int j; |
| 7338 | |
| 7339 | if (cpumask_test_cpu(i, covered)) |
| 7340 | continue; |
| 7341 | |
| 7342 | cpumask_clear(sched_group_cpus(sg)); |
Peter Zijlstra | 9c3f75c | 2011-07-14 13:00:06 +0200 | [diff] [blame] | 7343 | sg->sgp->power = 0; |
Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 7344 | |
| 7345 | for_each_cpu(j, span) { |
| 7346 | if (get_group(j, sdd, NULL) != group) |
| 7347 | continue; |
| 7348 | |
| 7349 | cpumask_set_cpu(j, covered); |
| 7350 | cpumask_set_cpu(j, sched_group_cpus(sg)); |
| 7351 | } |
| 7352 | |
| 7353 | if (!first) |
| 7354 | first = sg; |
| 7355 | if (last) |
| 7356 | last->next = sg; |
| 7357 | last = sg; |
| 7358 | } |
| 7359 | last->next = first; |
Peter Zijlstra | e3589f6 | 2011-07-15 10:35:52 +0200 | [diff] [blame] | 7360 | |
| 7361 | return 0; |
Siddha, Suresh B | 1e9f28f | 2006-03-27 01:15:22 -0800 | [diff] [blame] | 7362 | } |
Srivatsa Vaddagiri | 51888ca | 2006-06-27 02:54:38 -0700 | [diff] [blame] | 7363 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7364 | /* |
Siddha, Suresh B | 89c4710 | 2006-10-03 01:14:09 -0700 | [diff] [blame] | 7365 | * Initialize sched groups cpu_power. |
| 7366 | * |
| 7367 | * cpu_power indicates the capacity of sched group, which is used while |
| 7368 | * distributing the load between different sched groups in a sched domain. |
| 7369 | * Typically cpu_power for all the groups in a sched domain will be same unless |
| 7370 | * there are asymmetries in the topology. If there are asymmetries, group |
| 7371 | * having more cpu_power will pickup more load compared to the group having |
| 7372 | * less cpu_power. |
Siddha, Suresh B | 89c4710 | 2006-10-03 01:14:09 -0700 | [diff] [blame] | 7373 | */ |
| 7374 | static void init_sched_groups_power(int cpu, struct sched_domain *sd) |
| 7375 | { |
Peter Zijlstra | e3589f6 | 2011-07-15 10:35:52 +0200 | [diff] [blame] | 7376 | struct sched_group *sg = sd->groups; |
Siddha, Suresh B | 89c4710 | 2006-10-03 01:14:09 -0700 | [diff] [blame] | 7377 | |
Peter Zijlstra | e3589f6 | 2011-07-15 10:35:52 +0200 | [diff] [blame] | 7378 | WARN_ON(!sd || !sg); |
| 7379 | |
| 7380 | do { |
| 7381 | sg->group_weight = cpumask_weight(sched_group_cpus(sg)); |
| 7382 | sg = sg->next; |
| 7383 | } while (sg != sd->groups); |
| 7384 | |
| 7385 | if (cpu != group_first_cpu(sg)) |
Siddha, Suresh B | 89c4710 | 2006-10-03 01:14:09 -0700 | [diff] [blame] | 7386 | return; |
| 7387 | |
Peter Zijlstra | d274cb3 | 2011-04-07 14:09:43 +0200 | [diff] [blame] | 7388 | update_group_power(sd, cpu); |
Siddha, Suresh B | 89c4710 | 2006-10-03 01:14:09 -0700 | [diff] [blame] | 7389 | } |
| 7390 | |
| 7391 | /* |
Mike Travis | 7c16ec5 | 2008-04-04 18:11:11 -0700 | [diff] [blame] | 7392 | * Initializers for schedule domains |
| 7393 | * Non-inlined to reduce accumulated stack pressure in build_sched_domains() |
| 7394 | */ |
| 7395 | |
Ingo Molnar | a5d8c34 | 2008-10-09 11:35:51 +0200 | [diff] [blame] | 7396 | #ifdef CONFIG_SCHED_DEBUG |
| 7397 | # define SD_INIT_NAME(sd, type) sd->name = #type |
| 7398 | #else |
| 7399 | # define SD_INIT_NAME(sd, type) do { } while (0) |
| 7400 | #endif |
| 7401 | |
Peter Zijlstra | 54ab4ff | 2011-04-07 14:10:03 +0200 | [diff] [blame] | 7402 | #define SD_INIT_FUNC(type) \ |
| 7403 | static noinline struct sched_domain * \ |
| 7404 | sd_init_##type(struct sched_domain_topology_level *tl, int cpu) \ |
| 7405 | { \ |
| 7406 | struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu); \ |
| 7407 | *sd = SD_##type##_INIT; \ |
Peter Zijlstra | 54ab4ff | 2011-04-07 14:10:03 +0200 | [diff] [blame] | 7408 | SD_INIT_NAME(sd, type); \ |
| 7409 | sd->private = &tl->data; \ |
| 7410 | return sd; \ |
Mike Travis | 7c16ec5 | 2008-04-04 18:11:11 -0700 | [diff] [blame] | 7411 | } |
| 7412 | |
| 7413 | SD_INIT_FUNC(CPU) |
| 7414 | #ifdef CONFIG_NUMA |
| 7415 | SD_INIT_FUNC(ALLNODES) |
| 7416 | SD_INIT_FUNC(NODE) |
| 7417 | #endif |
| 7418 | #ifdef CONFIG_SCHED_SMT |
| 7419 | SD_INIT_FUNC(SIBLING) |
| 7420 | #endif |
| 7421 | #ifdef CONFIG_SCHED_MC |
| 7422 | SD_INIT_FUNC(MC) |
| 7423 | #endif |
Heiko Carstens | 01a0854 | 2010-08-31 10:28:16 +0200 | [diff] [blame] | 7424 | #ifdef CONFIG_SCHED_BOOK |
| 7425 | SD_INIT_FUNC(BOOK) |
| 7426 | #endif |
Mike Travis | 7c16ec5 | 2008-04-04 18:11:11 -0700 | [diff] [blame] | 7427 | |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 7428 | static int default_relax_domain_level = -1; |
Peter Zijlstra | 60495e7 | 2011-04-07 14:10:04 +0200 | [diff] [blame] | 7429 | int sched_domain_level_max; |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 7430 | |
| 7431 | static int __init setup_relax_domain_level(char *str) |
| 7432 | { |
Li Zefan | 30e0e17 | 2008-05-13 10:27:17 +0800 | [diff] [blame] | 7433 | unsigned long val; |
| 7434 | |
| 7435 | val = simple_strtoul(str, NULL, 0); |
Peter Zijlstra | 60495e7 | 2011-04-07 14:10:04 +0200 | [diff] [blame] | 7436 | if (val < sched_domain_level_max) |
Li Zefan | 30e0e17 | 2008-05-13 10:27:17 +0800 | [diff] [blame] | 7437 | default_relax_domain_level = val; |
| 7438 | |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 7439 | return 1; |
| 7440 | } |
| 7441 | __setup("relax_domain_level=", setup_relax_domain_level); |
| 7442 | |
| 7443 | static void set_domain_attribute(struct sched_domain *sd, |
| 7444 | struct sched_domain_attr *attr) |
| 7445 | { |
| 7446 | int request; |
| 7447 | |
| 7448 | if (!attr || attr->relax_domain_level < 0) { |
| 7449 | if (default_relax_domain_level < 0) |
| 7450 | return; |
| 7451 | else |
| 7452 | request = default_relax_domain_level; |
| 7453 | } else |
| 7454 | request = attr->relax_domain_level; |
| 7455 | if (request < sd->level) { |
| 7456 | /* turn off idle balance on this domain */ |
Peter Zijlstra | c88d591 | 2009-09-10 13:50:02 +0200 | [diff] [blame] | 7457 | sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE); |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 7458 | } else { |
| 7459 | /* turn on idle balance on this domain */ |
Peter Zijlstra | c88d591 | 2009-09-10 13:50:02 +0200 | [diff] [blame] | 7460 | sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE); |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 7461 | } |
| 7462 | } |
| 7463 | |
Peter Zijlstra | 54ab4ff | 2011-04-07 14:10:03 +0200 | [diff] [blame] | 7464 | static void __sdt_free(const struct cpumask *cpu_map); |
| 7465 | static int __sdt_alloc(const struct cpumask *cpu_map); |
| 7466 | |
Andreas Herrmann | 2109b99 | 2009-08-18 12:53:00 +0200 | [diff] [blame] | 7467 | static void __free_domain_allocs(struct s_data *d, enum s_alloc what, |
| 7468 | const struct cpumask *cpu_map) |
| 7469 | { |
| 7470 | switch (what) { |
Andreas Herrmann | 2109b99 | 2009-08-18 12:53:00 +0200 | [diff] [blame] | 7471 | case sa_rootdomain: |
Peter Zijlstra | 822ff79 | 2011-04-07 14:09:51 +0200 | [diff] [blame] | 7472 | if (!atomic_read(&d->rd->refcount)) |
| 7473 | free_rootdomain(&d->rd->rcu); /* fall through */ |
Peter Zijlstra | 21d42cc | 2011-04-07 14:09:48 +0200 | [diff] [blame] | 7474 | case sa_sd: |
| 7475 | free_percpu(d->sd); /* fall through */ |
Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 7476 | case sa_sd_storage: |
Peter Zijlstra | 54ab4ff | 2011-04-07 14:10:03 +0200 | [diff] [blame] | 7477 | __sdt_free(cpu_map); /* fall through */ |
Andreas Herrmann | 2109b99 | 2009-08-18 12:53:00 +0200 | [diff] [blame] | 7478 | case sa_none: |
| 7479 | break; |
| 7480 | } |
| 7481 | } |
| 7482 | |
| 7483 | static enum s_alloc __visit_domain_allocation_hell(struct s_data *d, |
| 7484 | const struct cpumask *cpu_map) |
| 7485 | { |
Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 7486 | memset(d, 0, sizeof(*d)); |
| 7487 | |
Peter Zijlstra | 54ab4ff | 2011-04-07 14:10:03 +0200 | [diff] [blame] | 7488 | if (__sdt_alloc(cpu_map)) |
| 7489 | return sa_sd_storage; |
Peter Zijlstra | 21d42cc | 2011-04-07 14:09:48 +0200 | [diff] [blame] | 7490 | d->sd = alloc_percpu(struct sched_domain *); |
Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 7491 | if (!d->sd) |
| 7492 | return sa_sd_storage; |
Andreas Herrmann | 2109b99 | 2009-08-18 12:53:00 +0200 | [diff] [blame] | 7493 | d->rd = alloc_rootdomain(); |
Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 7494 | if (!d->rd) |
Peter Zijlstra | 21d42cc | 2011-04-07 14:09:48 +0200 | [diff] [blame] | 7495 | return sa_sd; |
Andreas Herrmann | 2109b99 | 2009-08-18 12:53:00 +0200 | [diff] [blame] | 7496 | return sa_rootdomain; |
| 7497 | } |
| 7498 | |
Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 7499 | /* |
| 7500 | * NULL the sd_data elements we've used to build the sched_domain and |
| 7501 | * sched_group structure so that the subsequent __free_domain_allocs() |
| 7502 | * will not free the data we're using. |
| 7503 | */ |
| 7504 | static void claim_allocations(int cpu, struct sched_domain *sd) |
| 7505 | { |
| 7506 | struct sd_data *sdd = sd->private; |
Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 7507 | |
| 7508 | WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd); |
| 7509 | *per_cpu_ptr(sdd->sd, cpu) = NULL; |
| 7510 | |
Peter Zijlstra | e3589f6 | 2011-07-15 10:35:52 +0200 | [diff] [blame] | 7511 | if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref)) |
Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 7512 | *per_cpu_ptr(sdd->sg, cpu) = NULL; |
Peter Zijlstra | e3589f6 | 2011-07-15 10:35:52 +0200 | [diff] [blame] | 7513 | |
| 7514 | if (atomic_read(&(*per_cpu_ptr(sdd->sgp, cpu))->ref)) |
Peter Zijlstra | 9c3f75c | 2011-07-14 13:00:06 +0200 | [diff] [blame] | 7515 | *per_cpu_ptr(sdd->sgp, cpu) = NULL; |
Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 7516 | } |
| 7517 | |
Andreas Herrmann | d817353 | 2009-08-18 12:57:03 +0200 | [diff] [blame] | 7518 | #ifdef CONFIG_SCHED_SMT |
Peter Zijlstra | 2c402dc | 2011-04-07 14:10:01 +0200 | [diff] [blame] | 7519 | static const struct cpumask *cpu_smt_mask(int cpu) |
| 7520 | { |
| 7521 | return topology_thread_cpumask(cpu); |
Andreas Herrmann | d817353 | 2009-08-18 12:57:03 +0200 | [diff] [blame] | 7522 | } |
Peter Zijlstra | 2c402dc | 2011-04-07 14:10:01 +0200 | [diff] [blame] | 7523 | #endif |
Andreas Herrmann | d817353 | 2009-08-18 12:57:03 +0200 | [diff] [blame] | 7524 | |
Peter Zijlstra | d069b91 | 2011-04-07 14:10:02 +0200 | [diff] [blame] | 7525 | /* |
| 7526 | * Topology list, bottom-up. |
| 7527 | */ |
Peter Zijlstra | eb7a74e6 | 2011-04-07 14:10:00 +0200 | [diff] [blame] | 7528 | static struct sched_domain_topology_level default_topology[] = { |
Peter Zijlstra | d069b91 | 2011-04-07 14:10:02 +0200 | [diff] [blame] | 7529 | #ifdef CONFIG_SCHED_SMT |
| 7530 | { sd_init_SIBLING, cpu_smt_mask, }, |
Peter Zijlstra | 2c402dc | 2011-04-07 14:10:01 +0200 | [diff] [blame] | 7531 | #endif |
| 7532 | #ifdef CONFIG_SCHED_MC |
| 7533 | { sd_init_MC, cpu_coregroup_mask, }, |
| 7534 | #endif |
Peter Zijlstra | d069b91 | 2011-04-07 14:10:02 +0200 | [diff] [blame] | 7535 | #ifdef CONFIG_SCHED_BOOK |
| 7536 | { sd_init_BOOK, cpu_book_mask, }, |
| 7537 | #endif |
| 7538 | { sd_init_CPU, cpu_cpu_mask, }, |
| 7539 | #ifdef CONFIG_NUMA |
Peter Zijlstra | e3589f6 | 2011-07-15 10:35:52 +0200 | [diff] [blame] | 7540 | { sd_init_NODE, cpu_node_mask, SDTL_OVERLAP, }, |
Peter Zijlstra | d069b91 | 2011-04-07 14:10:02 +0200 | [diff] [blame] | 7541 | { sd_init_ALLNODES, cpu_allnodes_mask, }, |
Peter Zijlstra | 2c402dc | 2011-04-07 14:10:01 +0200 | [diff] [blame] | 7542 | #endif |
Peter Zijlstra | eb7a74e6 | 2011-04-07 14:10:00 +0200 | [diff] [blame] | 7543 | { NULL, }, |
| 7544 | }; |
| 7545 | |
| 7546 | static struct sched_domain_topology_level *sched_domain_topology = default_topology; |
| 7547 | |
Peter Zijlstra | 54ab4ff | 2011-04-07 14:10:03 +0200 | [diff] [blame] | 7548 | static int __sdt_alloc(const struct cpumask *cpu_map) |
| 7549 | { |
| 7550 | struct sched_domain_topology_level *tl; |
| 7551 | int j; |
| 7552 | |
| 7553 | for (tl = sched_domain_topology; tl->init; tl++) { |
| 7554 | struct sd_data *sdd = &tl->data; |
| 7555 | |
| 7556 | sdd->sd = alloc_percpu(struct sched_domain *); |
| 7557 | if (!sdd->sd) |
| 7558 | return -ENOMEM; |
| 7559 | |
| 7560 | sdd->sg = alloc_percpu(struct sched_group *); |
| 7561 | if (!sdd->sg) |
| 7562 | return -ENOMEM; |
| 7563 | |
Peter Zijlstra | 9c3f75c | 2011-07-14 13:00:06 +0200 | [diff] [blame] | 7564 | sdd->sgp = alloc_percpu(struct sched_group_power *); |
| 7565 | if (!sdd->sgp) |
| 7566 | return -ENOMEM; |
| 7567 | |
Peter Zijlstra | 54ab4ff | 2011-04-07 14:10:03 +0200 | [diff] [blame] | 7568 | for_each_cpu(j, cpu_map) { |
| 7569 | struct sched_domain *sd; |
| 7570 | struct sched_group *sg; |
Peter Zijlstra | 9c3f75c | 2011-07-14 13:00:06 +0200 | [diff] [blame] | 7571 | struct sched_group_power *sgp; |
Peter Zijlstra | 54ab4ff | 2011-04-07 14:10:03 +0200 | [diff] [blame] | 7572 | |
| 7573 | sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(), |
| 7574 | GFP_KERNEL, cpu_to_node(j)); |
| 7575 | if (!sd) |
| 7576 | return -ENOMEM; |
| 7577 | |
| 7578 | *per_cpu_ptr(sdd->sd, j) = sd; |
| 7579 | |
| 7580 | sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), |
| 7581 | GFP_KERNEL, cpu_to_node(j)); |
| 7582 | if (!sg) |
| 7583 | return -ENOMEM; |
| 7584 | |
| 7585 | *per_cpu_ptr(sdd->sg, j) = sg; |
Peter Zijlstra | 9c3f75c | 2011-07-14 13:00:06 +0200 | [diff] [blame] | 7586 | |
| 7587 | sgp = kzalloc_node(sizeof(struct sched_group_power), |
| 7588 | GFP_KERNEL, cpu_to_node(j)); |
| 7589 | if (!sgp) |
| 7590 | return -ENOMEM; |
| 7591 | |
| 7592 | *per_cpu_ptr(sdd->sgp, j) = sgp; |
Peter Zijlstra | 54ab4ff | 2011-04-07 14:10:03 +0200 | [diff] [blame] | 7593 | } |
| 7594 | } |
| 7595 | |
| 7596 | return 0; |
| 7597 | } |
| 7598 | |
| 7599 | static void __sdt_free(const struct cpumask *cpu_map) |
| 7600 | { |
| 7601 | struct sched_domain_topology_level *tl; |
| 7602 | int j; |
| 7603 | |
| 7604 | for (tl = sched_domain_topology; tl->init; tl++) { |
| 7605 | struct sd_data *sdd = &tl->data; |
| 7606 | |
| 7607 | for_each_cpu(j, cpu_map) { |
Peter Zijlstra | e3589f6 | 2011-07-15 10:35:52 +0200 | [diff] [blame] | 7608 | struct sched_domain *sd = *per_cpu_ptr(sdd->sd, j); |
| 7609 | if (sd && (sd->flags & SD_OVERLAP)) |
| 7610 | free_sched_groups(sd->groups, 0); |
WANG Cong | feff8fa | 2011-08-18 20:36:57 +0800 | [diff] [blame] | 7611 | kfree(*per_cpu_ptr(sdd->sd, j)); |
Peter Zijlstra | 54ab4ff | 2011-04-07 14:10:03 +0200 | [diff] [blame] | 7612 | kfree(*per_cpu_ptr(sdd->sg, j)); |
Peter Zijlstra | 9c3f75c | 2011-07-14 13:00:06 +0200 | [diff] [blame] | 7613 | kfree(*per_cpu_ptr(sdd->sgp, j)); |
Peter Zijlstra | 54ab4ff | 2011-04-07 14:10:03 +0200 | [diff] [blame] | 7614 | } |
| 7615 | free_percpu(sdd->sd); |
| 7616 | free_percpu(sdd->sg); |
Peter Zijlstra | 9c3f75c | 2011-07-14 13:00:06 +0200 | [diff] [blame] | 7617 | free_percpu(sdd->sgp); |
Peter Zijlstra | 54ab4ff | 2011-04-07 14:10:03 +0200 | [diff] [blame] | 7618 | } |
| 7619 | } |
| 7620 | |
Peter Zijlstra | 2c402dc | 2011-04-07 14:10:01 +0200 | [diff] [blame] | 7621 | struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl, |
| 7622 | struct s_data *d, const struct cpumask *cpu_map, |
Peter Zijlstra | d069b91 | 2011-04-07 14:10:02 +0200 | [diff] [blame] | 7623 | struct sched_domain_attr *attr, struct sched_domain *child, |
Peter Zijlstra | 2c402dc | 2011-04-07 14:10:01 +0200 | [diff] [blame] | 7624 | int cpu) |
| 7625 | { |
Peter Zijlstra | 54ab4ff | 2011-04-07 14:10:03 +0200 | [diff] [blame] | 7626 | struct sched_domain *sd = tl->init(tl, cpu); |
Peter Zijlstra | 2c402dc | 2011-04-07 14:10:01 +0200 | [diff] [blame] | 7627 | if (!sd) |
Peter Zijlstra | d069b91 | 2011-04-07 14:10:02 +0200 | [diff] [blame] | 7628 | return child; |
Peter Zijlstra | 2c402dc | 2011-04-07 14:10:01 +0200 | [diff] [blame] | 7629 | |
| 7630 | set_domain_attribute(sd, attr); |
| 7631 | cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu)); |
Peter Zijlstra | 60495e7 | 2011-04-07 14:10:04 +0200 | [diff] [blame] | 7632 | if (child) { |
| 7633 | sd->level = child->level + 1; |
| 7634 | sched_domain_level_max = max(sched_domain_level_max, sd->level); |
Peter Zijlstra | d069b91 | 2011-04-07 14:10:02 +0200 | [diff] [blame] | 7635 | child->parent = sd; |
Peter Zijlstra | 60495e7 | 2011-04-07 14:10:04 +0200 | [diff] [blame] | 7636 | } |
Peter Zijlstra | d069b91 | 2011-04-07 14:10:02 +0200 | [diff] [blame] | 7637 | sd->child = child; |
Peter Zijlstra | 2c402dc | 2011-04-07 14:10:01 +0200 | [diff] [blame] | 7638 | |
| 7639 | return sd; |
| 7640 | } |
| 7641 | |
Mike Travis | 7c16ec5 | 2008-04-04 18:11:11 -0700 | [diff] [blame] | 7642 | /* |
Dinakar Guniguntala | 1a20ff2 | 2005-06-25 14:57:33 -0700 | [diff] [blame] | 7643 | * Build sched domains for a given set of cpus and attach the sched domains |
| 7644 | * to the individual cpus |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7645 | */ |
Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 7646 | static int build_sched_domains(const struct cpumask *cpu_map, |
| 7647 | struct sched_domain_attr *attr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7648 | { |
Andreas Herrmann | 2109b99 | 2009-08-18 12:53:00 +0200 | [diff] [blame] | 7649 | enum s_alloc alloc_state = sa_none; |
Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 7650 | struct sched_domain *sd; |
Andreas Herrmann | 49a02c5 | 2009-08-18 12:51:52 +0200 | [diff] [blame] | 7651 | struct s_data d; |
Peter Zijlstra | 822ff79 | 2011-04-07 14:09:51 +0200 | [diff] [blame] | 7652 | int i, ret = -ENOMEM; |
Rusty Russell | 3404c8d | 2008-11-25 02:35:03 +1030 | [diff] [blame] | 7653 | |
Andreas Herrmann | 2109b99 | 2009-08-18 12:53:00 +0200 | [diff] [blame] | 7654 | alloc_state = __visit_domain_allocation_hell(&d, cpu_map); |
| 7655 | if (alloc_state != sa_rootdomain) |
| 7656 | goto error; |
Mike Travis | 7c16ec5 | 2008-04-04 18:11:11 -0700 | [diff] [blame] | 7657 | |
Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 7658 | /* Set up domains for cpus specified by the cpu_map. */ |
Rusty Russell | abcd083 | 2008-11-25 02:35:02 +1030 | [diff] [blame] | 7659 | for_each_cpu(i, cpu_map) { |
Peter Zijlstra | eb7a74e6 | 2011-04-07 14:10:00 +0200 | [diff] [blame] | 7660 | struct sched_domain_topology_level *tl; |
| 7661 | |
Peter Zijlstra | 3bd65a8 | 2011-04-07 14:09:54 +0200 | [diff] [blame] | 7662 | sd = NULL; |
Peter Zijlstra | e3589f6 | 2011-07-15 10:35:52 +0200 | [diff] [blame] | 7663 | for (tl = sched_domain_topology; tl->init; tl++) { |
Peter Zijlstra | 2c402dc | 2011-04-07 14:10:01 +0200 | [diff] [blame] | 7664 | sd = build_sched_domain(tl, &d, cpu_map, attr, sd, i); |
Peter Zijlstra | e3589f6 | 2011-07-15 10:35:52 +0200 | [diff] [blame] | 7665 | if (tl->flags & SDTL_OVERLAP || sched_feat(FORCE_SD_OVERLAP)) |
| 7666 | sd->flags |= SD_OVERLAP; |
Peter Zijlstra | d110235 | 2011-07-20 18:42:57 +0200 | [diff] [blame] | 7667 | if (cpumask_equal(cpu_map, sched_domain_span(sd))) |
| 7668 | break; |
Peter Zijlstra | e3589f6 | 2011-07-15 10:35:52 +0200 | [diff] [blame] | 7669 | } |
Peter Zijlstra | d274cb3 | 2011-04-07 14:09:43 +0200 | [diff] [blame] | 7670 | |
Peter Zijlstra | d069b91 | 2011-04-07 14:10:02 +0200 | [diff] [blame] | 7671 | while (sd->child) |
| 7672 | sd = sd->child; |
| 7673 | |
Peter Zijlstra | 21d42cc | 2011-04-07 14:09:48 +0200 | [diff] [blame] | 7674 | *per_cpu_ptr(d.sd, i) = sd; |
Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 7675 | } |
Peter Zijlstra | 21d42cc | 2011-04-07 14:09:48 +0200 | [diff] [blame] | 7676 | |
Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 7677 | /* Build the groups for the domains */ |
| 7678 | for_each_cpu(i, cpu_map) { |
| 7679 | for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { |
| 7680 | sd->span_weight = cpumask_weight(sched_domain_span(sd)); |
Peter Zijlstra | e3589f6 | 2011-07-15 10:35:52 +0200 | [diff] [blame] | 7681 | if (sd->flags & SD_OVERLAP) { |
| 7682 | if (build_overlap_sched_groups(sd, i)) |
| 7683 | goto error; |
| 7684 | } else { |
| 7685 | if (build_sched_groups(sd, i)) |
| 7686 | goto error; |
| 7687 | } |
Peter Zijlstra | 1cf51902 | 2011-04-07 14:09:47 +0200 | [diff] [blame] | 7688 | } |
Peter Zijlstra | a06dadb | 2011-04-07 14:09:44 +0200 | [diff] [blame] | 7689 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7690 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7691 | /* Calculate CPU power for physical packages and nodes */ |
Peter Zijlstra | a9c9a9b | 2011-04-07 14:09:49 +0200 | [diff] [blame] | 7692 | for (i = nr_cpumask_bits-1; i >= 0; i--) { |
| 7693 | if (!cpumask_test_cpu(i, cpu_map)) |
| 7694 | continue; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7695 | |
Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 7696 | for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { |
| 7697 | claim_allocations(i, sd); |
Peter Zijlstra | cd4ea6a | 2011-04-07 14:09:45 +0200 | [diff] [blame] | 7698 | init_sched_groups_power(i, sd); |
Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 7699 | } |
Siddha, Suresh B | f712c0c | 2006-07-30 03:02:59 -0700 | [diff] [blame] | 7700 | } |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 7701 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7702 | /* Attach the domains */ |
Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 7703 | rcu_read_lock(); |
Rusty Russell | abcd083 | 2008-11-25 02:35:02 +1030 | [diff] [blame] | 7704 | for_each_cpu(i, cpu_map) { |
Peter Zijlstra | 21d42cc | 2011-04-07 14:09:48 +0200 | [diff] [blame] | 7705 | sd = *per_cpu_ptr(d.sd, i); |
Andreas Herrmann | 49a02c5 | 2009-08-18 12:51:52 +0200 | [diff] [blame] | 7706 | cpu_attach_domain(sd, d.rd, i); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7707 | } |
Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 7708 | rcu_read_unlock(); |
Srivatsa Vaddagiri | 51888ca | 2006-06-27 02:54:38 -0700 | [diff] [blame] | 7709 | |
Peter Zijlstra | 822ff79 | 2011-04-07 14:09:51 +0200 | [diff] [blame] | 7710 | ret = 0; |
Srivatsa Vaddagiri | 51888ca | 2006-06-27 02:54:38 -0700 | [diff] [blame] | 7711 | error: |
Andreas Herrmann | 2109b99 | 2009-08-18 12:53:00 +0200 | [diff] [blame] | 7712 | __free_domain_allocs(&d, alloc_state, cpu_map); |
Peter Zijlstra | 822ff79 | 2011-04-07 14:09:51 +0200 | [diff] [blame] | 7713 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7714 | } |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7715 | |
Rusty Russell | acc3f5d | 2009-11-03 14:53:40 +1030 | [diff] [blame] | 7716 | static cpumask_var_t *doms_cur; /* current sched domains */ |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7717 | static int ndoms_cur; /* number of sched domains in 'doms_cur' */ |
Ingo Molnar | 4285f594 | 2008-05-16 17:47:14 +0200 | [diff] [blame] | 7718 | static struct sched_domain_attr *dattr_cur; |
| 7719 | /* attribues of custom domains in 'doms_cur' */ |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7720 | |
| 7721 | /* |
| 7722 | * Special case: If a kmalloc of a doms_cur partition (array of |
Rusty Russell | 4212823 | 2008-11-25 02:35:12 +1030 | [diff] [blame] | 7723 | * cpumask) fails, then fallback to a single sched domain, |
| 7724 | * as determined by the single cpumask fallback_doms. |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7725 | */ |
Rusty Russell | 4212823 | 2008-11-25 02:35:12 +1030 | [diff] [blame] | 7726 | static cpumask_var_t fallback_doms; |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7727 | |
Heiko Carstens | ee79d1b | 2008-12-09 18:49:50 +0100 | [diff] [blame] | 7728 | /* |
| 7729 | * arch_update_cpu_topology lets virtualized architectures update the |
| 7730 | * cpu core maps. It is supposed to return 1 if the topology changed |
| 7731 | * or 0 if it stayed the same. |
| 7732 | */ |
| 7733 | int __attribute__((weak)) arch_update_cpu_topology(void) |
Heiko Carstens | 22e52b0 | 2008-03-12 18:31:59 +0100 | [diff] [blame] | 7734 | { |
Heiko Carstens | ee79d1b | 2008-12-09 18:49:50 +0100 | [diff] [blame] | 7735 | return 0; |
Heiko Carstens | 22e52b0 | 2008-03-12 18:31:59 +0100 | [diff] [blame] | 7736 | } |
| 7737 | |
Rusty Russell | acc3f5d | 2009-11-03 14:53:40 +1030 | [diff] [blame] | 7738 | cpumask_var_t *alloc_sched_domains(unsigned int ndoms) |
| 7739 | { |
| 7740 | int i; |
| 7741 | cpumask_var_t *doms; |
| 7742 | |
| 7743 | doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL); |
| 7744 | if (!doms) |
| 7745 | return NULL; |
| 7746 | for (i = 0; i < ndoms; i++) { |
| 7747 | if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) { |
| 7748 | free_sched_domains(doms, i); |
| 7749 | return NULL; |
| 7750 | } |
| 7751 | } |
| 7752 | return doms; |
| 7753 | } |
| 7754 | |
| 7755 | void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms) |
| 7756 | { |
| 7757 | unsigned int i; |
| 7758 | for (i = 0; i < ndoms; i++) |
| 7759 | free_cpumask_var(doms[i]); |
| 7760 | kfree(doms); |
| 7761 | } |
| 7762 | |
Dinakar Guniguntala | 1a20ff2 | 2005-06-25 14:57:33 -0700 | [diff] [blame] | 7763 | /* |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 7764 | * Set up scheduler domains and groups. Callers must hold the hotplug lock. |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7765 | * For now this just excludes isolated cpus, but could be used to |
| 7766 | * exclude other special cases in the future. |
Dinakar Guniguntala | 1a20ff2 | 2005-06-25 14:57:33 -0700 | [diff] [blame] | 7767 | */ |
Peter Zijlstra | c4a8849 | 2011-04-07 14:09:42 +0200 | [diff] [blame] | 7768 | static int init_sched_domains(const struct cpumask *cpu_map) |
Dinakar Guniguntala | 1a20ff2 | 2005-06-25 14:57:33 -0700 | [diff] [blame] | 7769 | { |
Milton Miller | 7378547 | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 7770 | int err; |
| 7771 | |
Heiko Carstens | 22e52b0 | 2008-03-12 18:31:59 +0100 | [diff] [blame] | 7772 | arch_update_cpu_topology(); |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7773 | ndoms_cur = 1; |
Rusty Russell | acc3f5d | 2009-11-03 14:53:40 +1030 | [diff] [blame] | 7774 | doms_cur = alloc_sched_domains(ndoms_cur); |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7775 | if (!doms_cur) |
Rusty Russell | acc3f5d | 2009-11-03 14:53:40 +1030 | [diff] [blame] | 7776 | doms_cur = &fallback_doms; |
| 7777 | cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map); |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 7778 | dattr_cur = NULL; |
Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 7779 | err = build_sched_domains(doms_cur[0], NULL); |
Milton Miller | 6382bc9 | 2007-10-15 17:00:19 +0200 | [diff] [blame] | 7780 | register_sched_domain_sysctl(); |
Milton Miller | 7378547 | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 7781 | |
| 7782 | return err; |
Dinakar Guniguntala | 1a20ff2 | 2005-06-25 14:57:33 -0700 | [diff] [blame] | 7783 | } |
| 7784 | |
Dinakar Guniguntala | 1a20ff2 | 2005-06-25 14:57:33 -0700 | [diff] [blame] | 7785 | /* |
| 7786 | * Detach sched domains from a group of cpus specified in cpu_map |
| 7787 | * These cpus will now be attached to the NULL domain |
| 7788 | */ |
Rusty Russell | 96f874e | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 7789 | static void detach_destroy_domains(const struct cpumask *cpu_map) |
Dinakar Guniguntala | 1a20ff2 | 2005-06-25 14:57:33 -0700 | [diff] [blame] | 7790 | { |
| 7791 | int i; |
| 7792 | |
Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 7793 | rcu_read_lock(); |
Rusty Russell | abcd083 | 2008-11-25 02:35:02 +1030 | [diff] [blame] | 7794 | for_each_cpu(i, cpu_map) |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 7795 | cpu_attach_domain(NULL, &def_root_domain, i); |
Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 7796 | rcu_read_unlock(); |
Dinakar Guniguntala | 1a20ff2 | 2005-06-25 14:57:33 -0700 | [diff] [blame] | 7797 | } |
| 7798 | |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 7799 | /* handle null as "default" */ |
| 7800 | static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur, |
| 7801 | struct sched_domain_attr *new, int idx_new) |
| 7802 | { |
| 7803 | struct sched_domain_attr tmp; |
| 7804 | |
| 7805 | /* fast path */ |
| 7806 | if (!new && !cur) |
| 7807 | return 1; |
| 7808 | |
| 7809 | tmp = SD_ATTR_INIT; |
| 7810 | return !memcmp(cur ? (cur + idx_cur) : &tmp, |
| 7811 | new ? (new + idx_new) : &tmp, |
| 7812 | sizeof(struct sched_domain_attr)); |
| 7813 | } |
| 7814 | |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7815 | /* |
| 7816 | * Partition sched domains as specified by the 'ndoms_new' |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 7817 | * cpumasks in the array doms_new[] of cpumasks. This compares |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7818 | * doms_new[] to the current sched domain partitioning, doms_cur[]. |
| 7819 | * It destroys each deleted domain and builds each new domain. |
| 7820 | * |
Rusty Russell | acc3f5d | 2009-11-03 14:53:40 +1030 | [diff] [blame] | 7821 | * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'. |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 7822 | * The masks don't intersect (don't overlap.) We should setup one |
| 7823 | * sched domain for each mask. CPUs not in any of the cpumasks will |
| 7824 | * not be load balanced. If the same cpumask appears both in the |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7825 | * current 'doms_cur' domains and in the new 'doms_new', we can leave |
| 7826 | * it as it is. |
| 7827 | * |
Rusty Russell | acc3f5d | 2009-11-03 14:53:40 +1030 | [diff] [blame] | 7828 | * The passed in 'doms_new' should be allocated using |
| 7829 | * alloc_sched_domains. This routine takes ownership of it and will |
| 7830 | * free_sched_domains it when done with it. If the caller failed the |
| 7831 | * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1, |
| 7832 | * and partition_sched_domains() will fallback to the single partition |
| 7833 | * 'fallback_doms', it also forces the domains to be rebuilt. |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7834 | * |
Rusty Russell | 96f874e | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 7835 | * If doms_new == NULL it will be replaced with cpu_online_mask. |
Li Zefan | 700018e | 2008-11-18 14:02:03 +0800 | [diff] [blame] | 7836 | * ndoms_new == 0 is a special case for destroying existing domains, |
| 7837 | * and it will not create the default domain. |
Max Krasnyansky | dfb512e | 2008-08-29 13:11:41 -0700 | [diff] [blame] | 7838 | * |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7839 | * Call with hotplug lock held |
| 7840 | */ |
Rusty Russell | acc3f5d | 2009-11-03 14:53:40 +1030 | [diff] [blame] | 7841 | void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 7842 | struct sched_domain_attr *dattr_new) |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7843 | { |
Max Krasnyansky | dfb512e | 2008-08-29 13:11:41 -0700 | [diff] [blame] | 7844 | int i, j, n; |
Heiko Carstens | d65bd5e | 2008-12-09 18:49:51 +0100 | [diff] [blame] | 7845 | int new_topology; |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7846 | |
Heiko Carstens | 712555e | 2008-04-28 11:33:07 +0200 | [diff] [blame] | 7847 | mutex_lock(&sched_domains_mutex); |
Srivatsa Vaddagiri | a183561 | 2008-01-25 21:08:00 +0100 | [diff] [blame] | 7848 | |
Milton Miller | 7378547 | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 7849 | /* always unregister in case we don't destroy any domains */ |
| 7850 | unregister_sched_domain_sysctl(); |
| 7851 | |
Heiko Carstens | d65bd5e | 2008-12-09 18:49:51 +0100 | [diff] [blame] | 7852 | /* Let architecture update cpu core mappings. */ |
| 7853 | new_topology = arch_update_cpu_topology(); |
| 7854 | |
Max Krasnyansky | dfb512e | 2008-08-29 13:11:41 -0700 | [diff] [blame] | 7855 | n = doms_new ? ndoms_new : 0; |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7856 | |
| 7857 | /* Destroy deleted domains */ |
| 7858 | for (i = 0; i < ndoms_cur; i++) { |
Heiko Carstens | d65bd5e | 2008-12-09 18:49:51 +0100 | [diff] [blame] | 7859 | for (j = 0; j < n && !new_topology; j++) { |
Rusty Russell | acc3f5d | 2009-11-03 14:53:40 +1030 | [diff] [blame] | 7860 | if (cpumask_equal(doms_cur[i], doms_new[j]) |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 7861 | && dattrs_equal(dattr_cur, i, dattr_new, j)) |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7862 | goto match1; |
| 7863 | } |
| 7864 | /* no match - a current sched domain not in new doms_new[] */ |
Rusty Russell | acc3f5d | 2009-11-03 14:53:40 +1030 | [diff] [blame] | 7865 | detach_destroy_domains(doms_cur[i]); |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7866 | match1: |
| 7867 | ; |
| 7868 | } |
| 7869 | |
Max Krasnyansky | e761b77 | 2008-07-15 04:43:49 -0700 | [diff] [blame] | 7870 | if (doms_new == NULL) { |
| 7871 | ndoms_cur = 0; |
Rusty Russell | acc3f5d | 2009-11-03 14:53:40 +1030 | [diff] [blame] | 7872 | doms_new = &fallback_doms; |
Peter Zijlstra | 6ad4c18 | 2009-11-25 13:31:39 +0100 | [diff] [blame] | 7873 | cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map); |
Li Zefan | faa2f98 | 2008-11-04 16:20:23 +0800 | [diff] [blame] | 7874 | WARN_ON_ONCE(dattr_new); |
Max Krasnyansky | e761b77 | 2008-07-15 04:43:49 -0700 | [diff] [blame] | 7875 | } |
| 7876 | |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7877 | /* Build new domains */ |
| 7878 | for (i = 0; i < ndoms_new; i++) { |
Heiko Carstens | d65bd5e | 2008-12-09 18:49:51 +0100 | [diff] [blame] | 7879 | for (j = 0; j < ndoms_cur && !new_topology; j++) { |
Rusty Russell | acc3f5d | 2009-11-03 14:53:40 +1030 | [diff] [blame] | 7880 | if (cpumask_equal(doms_new[i], doms_cur[j]) |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 7881 | && dattrs_equal(dattr_new, i, dattr_cur, j)) |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7882 | goto match2; |
| 7883 | } |
| 7884 | /* no match - add a new doms_new */ |
Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 7885 | build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL); |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7886 | match2: |
| 7887 | ; |
| 7888 | } |
| 7889 | |
| 7890 | /* Remember the new sched domains */ |
Rusty Russell | acc3f5d | 2009-11-03 14:53:40 +1030 | [diff] [blame] | 7891 | if (doms_cur != &fallback_doms) |
| 7892 | free_sched_domains(doms_cur, ndoms_cur); |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 7893 | kfree(dattr_cur); /* kfree(NULL) is safe */ |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7894 | doms_cur = doms_new; |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 7895 | dattr_cur = dattr_new; |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7896 | ndoms_cur = ndoms_new; |
Milton Miller | 7378547 | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 7897 | |
| 7898 | register_sched_domain_sysctl(); |
Srivatsa Vaddagiri | a183561 | 2008-01-25 21:08:00 +0100 | [diff] [blame] | 7899 | |
Heiko Carstens | 712555e | 2008-04-28 11:33:07 +0200 | [diff] [blame] | 7900 | mutex_unlock(&sched_domains_mutex); |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7901 | } |
| 7902 | |
Siddha, Suresh B | 5c45bf2 | 2006-06-27 02:54:42 -0700 | [diff] [blame] | 7903 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) |
Peter Zijlstra | c4a8849 | 2011-04-07 14:09:42 +0200 | [diff] [blame] | 7904 | static void reinit_sched_domains(void) |
Siddha, Suresh B | 5c45bf2 | 2006-06-27 02:54:42 -0700 | [diff] [blame] | 7905 | { |
Gautham R Shenoy | 95402b3 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 7906 | get_online_cpus(); |
Max Krasnyansky | dfb512e | 2008-08-29 13:11:41 -0700 | [diff] [blame] | 7907 | |
| 7908 | /* Destroy domains first to force the rebuild */ |
| 7909 | partition_sched_domains(0, NULL, NULL); |
| 7910 | |
Max Krasnyansky | e761b77 | 2008-07-15 04:43:49 -0700 | [diff] [blame] | 7911 | rebuild_sched_domains(); |
Gautham R Shenoy | 95402b3 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 7912 | put_online_cpus(); |
Siddha, Suresh B | 5c45bf2 | 2006-06-27 02:54:42 -0700 | [diff] [blame] | 7913 | } |
| 7914 | |
| 7915 | static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt) |
| 7916 | { |
Gautham R Shenoy | afb8a9b | 2008-12-18 23:26:09 +0530 | [diff] [blame] | 7917 | unsigned int level = 0; |
Siddha, Suresh B | 5c45bf2 | 2006-06-27 02:54:42 -0700 | [diff] [blame] | 7918 | |
Gautham R Shenoy | afb8a9b | 2008-12-18 23:26:09 +0530 | [diff] [blame] | 7919 | if (sscanf(buf, "%u", &level) != 1) |
| 7920 | return -EINVAL; |
| 7921 | |
| 7922 | /* |
| 7923 | * level is always be positive so don't check for |
| 7924 | * level < POWERSAVINGS_BALANCE_NONE which is 0 |
| 7925 | * What happens on 0 or 1 byte write, |
| 7926 | * need to check for count as well? |
| 7927 | */ |
| 7928 | |
| 7929 | if (level >= MAX_POWERSAVINGS_BALANCE_LEVELS) |
Siddha, Suresh B | 5c45bf2 | 2006-06-27 02:54:42 -0700 | [diff] [blame] | 7930 | return -EINVAL; |
| 7931 | |
| 7932 | if (smt) |
Gautham R Shenoy | afb8a9b | 2008-12-18 23:26:09 +0530 | [diff] [blame] | 7933 | sched_smt_power_savings = level; |
Siddha, Suresh B | 5c45bf2 | 2006-06-27 02:54:42 -0700 | [diff] [blame] | 7934 | else |
Gautham R Shenoy | afb8a9b | 2008-12-18 23:26:09 +0530 | [diff] [blame] | 7935 | sched_mc_power_savings = level; |
Siddha, Suresh B | 5c45bf2 | 2006-06-27 02:54:42 -0700 | [diff] [blame] | 7936 | |
Peter Zijlstra | c4a8849 | 2011-04-07 14:09:42 +0200 | [diff] [blame] | 7937 | reinit_sched_domains(); |
Siddha, Suresh B | 5c45bf2 | 2006-06-27 02:54:42 -0700 | [diff] [blame] | 7938 | |
Li Zefan | c70f22d | 2009-01-05 19:07:50 +0800 | [diff] [blame] | 7939 | return count; |
Siddha, Suresh B | 5c45bf2 | 2006-06-27 02:54:42 -0700 | [diff] [blame] | 7940 | } |
| 7941 | |
Adrian Bunk | 6707de00 | 2007-08-12 18:08:19 +0200 | [diff] [blame] | 7942 | #ifdef CONFIG_SCHED_MC |
Andi Kleen | f718cd4 | 2008-07-29 22:33:52 -0700 | [diff] [blame] | 7943 | static ssize_t sched_mc_power_savings_show(struct sysdev_class *class, |
Andi Kleen | c9be0a3 | 2010-01-05 12:47:58 +0100 | [diff] [blame] | 7944 | struct sysdev_class_attribute *attr, |
Andi Kleen | f718cd4 | 2008-07-29 22:33:52 -0700 | [diff] [blame] | 7945 | char *page) |
Adrian Bunk | 6707de00 | 2007-08-12 18:08:19 +0200 | [diff] [blame] | 7946 | { |
| 7947 | return sprintf(page, "%u\n", sched_mc_power_savings); |
| 7948 | } |
Andi Kleen | f718cd4 | 2008-07-29 22:33:52 -0700 | [diff] [blame] | 7949 | static ssize_t sched_mc_power_savings_store(struct sysdev_class *class, |
Andi Kleen | c9be0a3 | 2010-01-05 12:47:58 +0100 | [diff] [blame] | 7950 | struct sysdev_class_attribute *attr, |
Adrian Bunk | 6707de00 | 2007-08-12 18:08:19 +0200 | [diff] [blame] | 7951 | const char *buf, size_t count) |
| 7952 | { |
| 7953 | return sched_power_savings_store(buf, count, 0); |
| 7954 | } |
Andi Kleen | f718cd4 | 2008-07-29 22:33:52 -0700 | [diff] [blame] | 7955 | static SYSDEV_CLASS_ATTR(sched_mc_power_savings, 0644, |
| 7956 | sched_mc_power_savings_show, |
| 7957 | sched_mc_power_savings_store); |
Adrian Bunk | 6707de00 | 2007-08-12 18:08:19 +0200 | [diff] [blame] | 7958 | #endif |
| 7959 | |
| 7960 | #ifdef CONFIG_SCHED_SMT |
Andi Kleen | f718cd4 | 2008-07-29 22:33:52 -0700 | [diff] [blame] | 7961 | static ssize_t sched_smt_power_savings_show(struct sysdev_class *dev, |
Andi Kleen | c9be0a3 | 2010-01-05 12:47:58 +0100 | [diff] [blame] | 7962 | struct sysdev_class_attribute *attr, |
Andi Kleen | f718cd4 | 2008-07-29 22:33:52 -0700 | [diff] [blame] | 7963 | char *page) |
Adrian Bunk | 6707de00 | 2007-08-12 18:08:19 +0200 | [diff] [blame] | 7964 | { |
| 7965 | return sprintf(page, "%u\n", sched_smt_power_savings); |
| 7966 | } |
Andi Kleen | f718cd4 | 2008-07-29 22:33:52 -0700 | [diff] [blame] | 7967 | static ssize_t sched_smt_power_savings_store(struct sysdev_class *dev, |
Andi Kleen | c9be0a3 | 2010-01-05 12:47:58 +0100 | [diff] [blame] | 7968 | struct sysdev_class_attribute *attr, |
Adrian Bunk | 6707de00 | 2007-08-12 18:08:19 +0200 | [diff] [blame] | 7969 | const char *buf, size_t count) |
| 7970 | { |
| 7971 | return sched_power_savings_store(buf, count, 1); |
| 7972 | } |
Andi Kleen | f718cd4 | 2008-07-29 22:33:52 -0700 | [diff] [blame] | 7973 | static SYSDEV_CLASS_ATTR(sched_smt_power_savings, 0644, |
| 7974 | sched_smt_power_savings_show, |
Adrian Bunk | 6707de00 | 2007-08-12 18:08:19 +0200 | [diff] [blame] | 7975 | sched_smt_power_savings_store); |
| 7976 | #endif |
| 7977 | |
Li Zefan | 39aac64 | 2009-01-05 19:18:02 +0800 | [diff] [blame] | 7978 | int __init sched_create_sysfs_power_savings_entries(struct sysdev_class *cls) |
Siddha, Suresh B | 5c45bf2 | 2006-06-27 02:54:42 -0700 | [diff] [blame] | 7979 | { |
| 7980 | int err = 0; |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 7981 | |
Siddha, Suresh B | 5c45bf2 | 2006-06-27 02:54:42 -0700 | [diff] [blame] | 7982 | #ifdef CONFIG_SCHED_SMT |
| 7983 | if (smt_capable()) |
| 7984 | err = sysfs_create_file(&cls->kset.kobj, |
| 7985 | &attr_sched_smt_power_savings.attr); |
| 7986 | #endif |
| 7987 | #ifdef CONFIG_SCHED_MC |
| 7988 | if (!err && mc_capable()) |
| 7989 | err = sysfs_create_file(&cls->kset.kobj, |
| 7990 | &attr_sched_mc_power_savings.attr); |
| 7991 | #endif |
| 7992 | return err; |
| 7993 | } |
Dhaval Giani | 6d6bc0a | 2008-05-30 14:23:45 +0200 | [diff] [blame] | 7994 | #endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */ |
Siddha, Suresh B | 5c45bf2 | 2006-06-27 02:54:42 -0700 | [diff] [blame] | 7995 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7996 | /* |
Tejun Heo | 3a101d0 | 2010-06-08 21:40:36 +0200 | [diff] [blame] | 7997 | * Update cpusets according to cpu_active mask. If cpusets are |
| 7998 | * disabled, cpuset_update_active_cpus() becomes a simple wrapper |
| 7999 | * around partition_sched_domains(). |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8000 | */ |
Tejun Heo | 0b2e918 | 2010-06-21 23:53:31 +0200 | [diff] [blame] | 8001 | static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action, |
| 8002 | void *hcpu) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8003 | { |
Tejun Heo | 3a101d0 | 2010-06-08 21:40:36 +0200 | [diff] [blame] | 8004 | switch (action & ~CPU_TASKS_FROZEN) { |
Max Krasnyansky | e761b77 | 2008-07-15 04:43:49 -0700 | [diff] [blame] | 8005 | case CPU_ONLINE: |
Peter Zijlstra | 6ad4c18 | 2009-11-25 13:31:39 +0100 | [diff] [blame] | 8006 | case CPU_DOWN_FAILED: |
Tejun Heo | 3a101d0 | 2010-06-08 21:40:36 +0200 | [diff] [blame] | 8007 | cpuset_update_active_cpus(); |
Max Krasnyansky | e761b77 | 2008-07-15 04:43:49 -0700 | [diff] [blame] | 8008 | return NOTIFY_OK; |
Max Krasnyansky | e761b77 | 2008-07-15 04:43:49 -0700 | [diff] [blame] | 8009 | default: |
| 8010 | return NOTIFY_DONE; |
| 8011 | } |
| 8012 | } |
Tejun Heo | 3a101d0 | 2010-06-08 21:40:36 +0200 | [diff] [blame] | 8013 | |
Tejun Heo | 0b2e918 | 2010-06-21 23:53:31 +0200 | [diff] [blame] | 8014 | static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action, |
| 8015 | void *hcpu) |
Tejun Heo | 3a101d0 | 2010-06-08 21:40:36 +0200 | [diff] [blame] | 8016 | { |
| 8017 | switch (action & ~CPU_TASKS_FROZEN) { |
| 8018 | case CPU_DOWN_PREPARE: |
| 8019 | cpuset_update_active_cpus(); |
| 8020 | return NOTIFY_OK; |
| 8021 | default: |
| 8022 | return NOTIFY_DONE; |
| 8023 | } |
| 8024 | } |
Max Krasnyansky | e761b77 | 2008-07-15 04:43:49 -0700 | [diff] [blame] | 8025 | |
| 8026 | static int update_runtime(struct notifier_block *nfb, |
| 8027 | unsigned long action, void *hcpu) |
| 8028 | { |
Peter Zijlstra | 7def2be | 2008-06-05 14:49:58 +0200 | [diff] [blame] | 8029 | int cpu = (int)(long)hcpu; |
| 8030 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8031 | switch (action) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8032 | case CPU_DOWN_PREPARE: |
Rafael J. Wysocki | 8bb7844 | 2007-05-09 02:35:10 -0700 | [diff] [blame] | 8033 | case CPU_DOWN_PREPARE_FROZEN: |
Peter Zijlstra | 7def2be | 2008-06-05 14:49:58 +0200 | [diff] [blame] | 8034 | disable_runtime(cpu_rq(cpu)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8035 | return NOTIFY_OK; |
| 8036 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8037 | case CPU_DOWN_FAILED: |
Rafael J. Wysocki | 8bb7844 | 2007-05-09 02:35:10 -0700 | [diff] [blame] | 8038 | case CPU_DOWN_FAILED_FROZEN: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8039 | case CPU_ONLINE: |
Rafael J. Wysocki | 8bb7844 | 2007-05-09 02:35:10 -0700 | [diff] [blame] | 8040 | case CPU_ONLINE_FROZEN: |
Peter Zijlstra | 7def2be | 2008-06-05 14:49:58 +0200 | [diff] [blame] | 8041 | enable_runtime(cpu_rq(cpu)); |
Max Krasnyansky | e761b77 | 2008-07-15 04:43:49 -0700 | [diff] [blame] | 8042 | return NOTIFY_OK; |
| 8043 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8044 | default: |
| 8045 | return NOTIFY_DONE; |
| 8046 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8047 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8048 | |
| 8049 | void __init sched_init_smp(void) |
| 8050 | { |
Rusty Russell | dcc30a3 | 2008-11-25 02:35:12 +1030 | [diff] [blame] | 8051 | cpumask_var_t non_isolated_cpus; |
| 8052 | |
| 8053 | alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL); |
Yong Zhang | cb5fd13 | 2009-09-14 20:20:16 +0800 | [diff] [blame] | 8054 | alloc_cpumask_var(&fallback_doms, GFP_KERNEL); |
Nick Piggin | 5c1e176 | 2006-10-03 01:14:04 -0700 | [diff] [blame] | 8055 | |
Gautham R Shenoy | 95402b3 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 8056 | get_online_cpus(); |
Heiko Carstens | 712555e | 2008-04-28 11:33:07 +0200 | [diff] [blame] | 8057 | mutex_lock(&sched_domains_mutex); |
Peter Zijlstra | c4a8849 | 2011-04-07 14:09:42 +0200 | [diff] [blame] | 8058 | init_sched_domains(cpu_active_mask); |
Rusty Russell | dcc30a3 | 2008-11-25 02:35:12 +1030 | [diff] [blame] | 8059 | cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map); |
| 8060 | if (cpumask_empty(non_isolated_cpus)) |
| 8061 | cpumask_set_cpu(smp_processor_id(), non_isolated_cpus); |
Heiko Carstens | 712555e | 2008-04-28 11:33:07 +0200 | [diff] [blame] | 8062 | mutex_unlock(&sched_domains_mutex); |
Gautham R Shenoy | 95402b3 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 8063 | put_online_cpus(); |
Max Krasnyansky | e761b77 | 2008-07-15 04:43:49 -0700 | [diff] [blame] | 8064 | |
Tejun Heo | 3a101d0 | 2010-06-08 21:40:36 +0200 | [diff] [blame] | 8065 | hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE); |
| 8066 | hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE); |
Max Krasnyansky | e761b77 | 2008-07-15 04:43:49 -0700 | [diff] [blame] | 8067 | |
| 8068 | /* RT runtime code needs to handle some hotplug events */ |
| 8069 | hotcpu_notifier(update_runtime, 0); |
| 8070 | |
Peter Zijlstra | b328ca1 | 2008-04-29 10:02:46 +0200 | [diff] [blame] | 8071 | init_hrtick(); |
Nick Piggin | 5c1e176 | 2006-10-03 01:14:04 -0700 | [diff] [blame] | 8072 | |
| 8073 | /* Move init over to a non-isolated CPU */ |
Rusty Russell | dcc30a3 | 2008-11-25 02:35:12 +1030 | [diff] [blame] | 8074 | if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0) |
Nick Piggin | 5c1e176 | 2006-10-03 01:14:04 -0700 | [diff] [blame] | 8075 | BUG(); |
Ingo Molnar | 19978ca | 2007-11-09 22:39:38 +0100 | [diff] [blame] | 8076 | sched_init_granularity(); |
Rusty Russell | dcc30a3 | 2008-11-25 02:35:12 +1030 | [diff] [blame] | 8077 | free_cpumask_var(non_isolated_cpus); |
Rusty Russell | 4212823 | 2008-11-25 02:35:12 +1030 | [diff] [blame] | 8078 | |
Rusty Russell | 0e3900e | 2008-11-25 02:35:13 +1030 | [diff] [blame] | 8079 | init_sched_rt_class(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8080 | } |
| 8081 | #else |
| 8082 | void __init sched_init_smp(void) |
| 8083 | { |
Ingo Molnar | 19978ca | 2007-11-09 22:39:38 +0100 | [diff] [blame] | 8084 | sched_init_granularity(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8085 | } |
| 8086 | #endif /* CONFIG_SMP */ |
| 8087 | |
Arun R Bharadwaj | cd1bb94 | 2009-04-16 12:15:34 +0530 | [diff] [blame] | 8088 | const_debug unsigned int sysctl_timer_migration = 1; |
| 8089 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8090 | int in_sched_functions(unsigned long addr) |
| 8091 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8092 | return in_lock_functions(addr) || |
| 8093 | (addr >= (unsigned long)__sched_text_start |
| 8094 | && addr < (unsigned long)__sched_text_end); |
| 8095 | } |
| 8096 | |
Jan H. Schönherr | acb5a9b | 2011-07-14 18:32:43 +0200 | [diff] [blame] | 8097 | static void init_cfs_rq(struct cfs_rq *cfs_rq) |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 8098 | { |
| 8099 | cfs_rq->tasks_timeline = RB_ROOT; |
Peter Zijlstra | 4a55bd5 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 8100 | INIT_LIST_HEAD(&cfs_rq->tasks); |
Peter Zijlstra | 67e9fb2 | 2007-10-15 17:00:10 +0200 | [diff] [blame] | 8101 | cfs_rq->min_vruntime = (u64)(-(1LL << 20)); |
Peter Zijlstra | c64be78 | 2011-07-11 16:28:50 +0200 | [diff] [blame] | 8102 | #ifndef CONFIG_64BIT |
| 8103 | cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime; |
| 8104 | #endif |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 8105 | } |
| 8106 | |
Peter Zijlstra | fa85ae2 | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 8107 | static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq) |
| 8108 | { |
| 8109 | struct rt_prio_array *array; |
| 8110 | int i; |
| 8111 | |
| 8112 | array = &rt_rq->active; |
| 8113 | for (i = 0; i < MAX_RT_PRIO; i++) { |
| 8114 | INIT_LIST_HEAD(array->queue + i); |
| 8115 | __clear_bit(i, array->bitmap); |
| 8116 | } |
| 8117 | /* delimiter for bitsearch: */ |
| 8118 | __set_bit(MAX_RT_PRIO, array->bitmap); |
| 8119 | |
Jan H. Schönherr | acb5a9b | 2011-07-14 18:32:43 +0200 | [diff] [blame] | 8120 | #if defined CONFIG_SMP |
Gregory Haskins | e864c49 | 2008-12-29 09:39:49 -0500 | [diff] [blame] | 8121 | rt_rq->highest_prio.curr = MAX_RT_PRIO; |
| 8122 | rt_rq->highest_prio.next = MAX_RT_PRIO; |
Peter Zijlstra | fa85ae2 | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 8123 | rt_rq->rt_nr_migratory = 0; |
Peter Zijlstra | fa85ae2 | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 8124 | rt_rq->overloaded = 0; |
Dima Zavin | 732375c | 2011-07-07 17:27:59 -0700 | [diff] [blame] | 8125 | plist_head_init(&rt_rq->pushable_tasks); |
Peter Zijlstra | fa85ae2 | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 8126 | #endif |
| 8127 | |
| 8128 | rt_rq->rt_time = 0; |
| 8129 | rt_rq->rt_throttled = 0; |
Peter Zijlstra | ac086bc | 2008-04-19 19:44:58 +0200 | [diff] [blame] | 8130 | rt_rq->rt_runtime = 0; |
Thomas Gleixner | 0986b11 | 2009-11-17 15:32:06 +0100 | [diff] [blame] | 8131 | raw_spin_lock_init(&rt_rq->rt_runtime_lock); |
Peter Zijlstra | fa85ae2 | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 8132 | } |
| 8133 | |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8134 | #ifdef CONFIG_FAIR_GROUP_SCHED |
Dhaval Giani | ec7dc8a | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 8135 | static void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, |
Peter Zijlstra | 3d4b47b | 2010-11-15 15:47:01 -0800 | [diff] [blame] | 8136 | struct sched_entity *se, int cpu, |
Dhaval Giani | ec7dc8a | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 8137 | struct sched_entity *parent) |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8138 | { |
Dhaval Giani | ec7dc8a | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 8139 | struct rq *rq = cpu_rq(cpu); |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8140 | |
Jan H. Schönherr | acb5a9b | 2011-07-14 18:32:43 +0200 | [diff] [blame] | 8141 | cfs_rq->tg = tg; |
| 8142 | cfs_rq->rq = rq; |
| 8143 | #ifdef CONFIG_SMP |
| 8144 | /* allow initial update_cfs_load() to truncate */ |
| 8145 | cfs_rq->load_stamp = 1; |
| 8146 | #endif |
Paul Turner | ab84d31 | 2011-07-21 09:43:28 -0700 | [diff] [blame] | 8147 | init_cfs_rq_runtime(cfs_rq); |
Jan H. Schönherr | acb5a9b | 2011-07-14 18:32:43 +0200 | [diff] [blame] | 8148 | |
| 8149 | tg->cfs_rq[cpu] = cfs_rq; |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8150 | tg->se[cpu] = se; |
Jan H. Schönherr | acb5a9b | 2011-07-14 18:32:43 +0200 | [diff] [blame] | 8151 | |
Yong Zhang | 07e06b0 | 2011-01-07 15:17:36 +0800 | [diff] [blame] | 8152 | /* se could be NULL for root_task_group */ |
Dhaval Giani | 354d60c | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 8153 | if (!se) |
| 8154 | return; |
| 8155 | |
Dhaval Giani | ec7dc8a | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 8156 | if (!parent) |
| 8157 | se->cfs_rq = &rq->cfs; |
| 8158 | else |
| 8159 | se->cfs_rq = parent->my_q; |
| 8160 | |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8161 | se->my_q = cfs_rq; |
Paul Turner | 9437178 | 2010-11-15 15:47:10 -0800 | [diff] [blame] | 8162 | update_load_set(&se->load, 0); |
Dhaval Giani | ec7dc8a | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 8163 | se->parent = parent; |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8164 | } |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8165 | #endif |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8166 | |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8167 | #ifdef CONFIG_RT_GROUP_SCHED |
Dhaval Giani | ec7dc8a | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 8168 | static void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, |
Peter Zijlstra | 3d4b47b | 2010-11-15 15:47:01 -0800 | [diff] [blame] | 8169 | struct sched_rt_entity *rt_se, int cpu, |
Dhaval Giani | ec7dc8a | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 8170 | struct sched_rt_entity *parent) |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8171 | { |
Dhaval Giani | ec7dc8a | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 8172 | struct rq *rq = cpu_rq(cpu); |
| 8173 | |
Jan H. Schönherr | acb5a9b | 2011-07-14 18:32:43 +0200 | [diff] [blame] | 8174 | rt_rq->highest_prio.curr = MAX_RT_PRIO; |
| 8175 | rt_rq->rt_nr_boosted = 0; |
| 8176 | rt_rq->rq = rq; |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8177 | rt_rq->tg = tg; |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8178 | |
Jan H. Schönherr | acb5a9b | 2011-07-14 18:32:43 +0200 | [diff] [blame] | 8179 | tg->rt_rq[cpu] = rt_rq; |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8180 | tg->rt_se[cpu] = rt_se; |
Jan H. Schönherr | acb5a9b | 2011-07-14 18:32:43 +0200 | [diff] [blame] | 8181 | |
Dhaval Giani | 354d60c | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 8182 | if (!rt_se) |
| 8183 | return; |
| 8184 | |
Dhaval Giani | ec7dc8a | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 8185 | if (!parent) |
| 8186 | rt_se->rt_rq = &rq->rt; |
| 8187 | else |
| 8188 | rt_se->rt_rq = parent->my_q; |
| 8189 | |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8190 | rt_se->my_q = rt_rq; |
Dhaval Giani | ec7dc8a | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 8191 | rt_se->parent = parent; |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8192 | INIT_LIST_HEAD(&rt_se->run_list); |
| 8193 | } |
| 8194 | #endif |
| 8195 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8196 | void __init sched_init(void) |
| 8197 | { |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 8198 | int i, j; |
Mike Travis | 434d53b | 2008-04-04 18:11:04 -0700 | [diff] [blame] | 8199 | unsigned long alloc_size = 0, ptr; |
| 8200 | |
| 8201 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 8202 | alloc_size += 2 * nr_cpu_ids * sizeof(void **); |
| 8203 | #endif |
| 8204 | #ifdef CONFIG_RT_GROUP_SCHED |
| 8205 | alloc_size += 2 * nr_cpu_ids * sizeof(void **); |
| 8206 | #endif |
Rusty Russell | df7c8e8 | 2009-03-19 15:22:20 +1030 | [diff] [blame] | 8207 | #ifdef CONFIG_CPUMASK_OFFSTACK |
Rusty Russell | 8c083f0 | 2009-03-19 15:22:20 +1030 | [diff] [blame] | 8208 | alloc_size += num_possible_cpus() * cpumask_size(); |
Rusty Russell | df7c8e8 | 2009-03-19 15:22:20 +1030 | [diff] [blame] | 8209 | #endif |
Mike Travis | 434d53b | 2008-04-04 18:11:04 -0700 | [diff] [blame] | 8210 | if (alloc_size) { |
Pekka Enberg | 36b7b6d | 2009-06-10 23:42:36 +0300 | [diff] [blame] | 8211 | ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT); |
Mike Travis | 434d53b | 2008-04-04 18:11:04 -0700 | [diff] [blame] | 8212 | |
| 8213 | #ifdef CONFIG_FAIR_GROUP_SCHED |
Yong Zhang | 07e06b0 | 2011-01-07 15:17:36 +0800 | [diff] [blame] | 8214 | root_task_group.se = (struct sched_entity **)ptr; |
Mike Travis | 434d53b | 2008-04-04 18:11:04 -0700 | [diff] [blame] | 8215 | ptr += nr_cpu_ids * sizeof(void **); |
| 8216 | |
Yong Zhang | 07e06b0 | 2011-01-07 15:17:36 +0800 | [diff] [blame] | 8217 | root_task_group.cfs_rq = (struct cfs_rq **)ptr; |
Mike Travis | 434d53b | 2008-04-04 18:11:04 -0700 | [diff] [blame] | 8218 | ptr += nr_cpu_ids * sizeof(void **); |
Peter Zijlstra | eff766a | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 8219 | |
Dhaval Giani | 6d6bc0a | 2008-05-30 14:23:45 +0200 | [diff] [blame] | 8220 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
Mike Travis | 434d53b | 2008-04-04 18:11:04 -0700 | [diff] [blame] | 8221 | #ifdef CONFIG_RT_GROUP_SCHED |
Yong Zhang | 07e06b0 | 2011-01-07 15:17:36 +0800 | [diff] [blame] | 8222 | root_task_group.rt_se = (struct sched_rt_entity **)ptr; |
Mike Travis | 434d53b | 2008-04-04 18:11:04 -0700 | [diff] [blame] | 8223 | ptr += nr_cpu_ids * sizeof(void **); |
| 8224 | |
Yong Zhang | 07e06b0 | 2011-01-07 15:17:36 +0800 | [diff] [blame] | 8225 | root_task_group.rt_rq = (struct rt_rq **)ptr; |
Peter Zijlstra | eff766a | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 8226 | ptr += nr_cpu_ids * sizeof(void **); |
| 8227 | |
Dhaval Giani | 6d6bc0a | 2008-05-30 14:23:45 +0200 | [diff] [blame] | 8228 | #endif /* CONFIG_RT_GROUP_SCHED */ |
Rusty Russell | df7c8e8 | 2009-03-19 15:22:20 +1030 | [diff] [blame] | 8229 | #ifdef CONFIG_CPUMASK_OFFSTACK |
| 8230 | for_each_possible_cpu(i) { |
| 8231 | per_cpu(load_balance_tmpmask, i) = (void *)ptr; |
| 8232 | ptr += cpumask_size(); |
| 8233 | } |
| 8234 | #endif /* CONFIG_CPUMASK_OFFSTACK */ |
Mike Travis | 434d53b | 2008-04-04 18:11:04 -0700 | [diff] [blame] | 8235 | } |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 8236 | |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 8237 | #ifdef CONFIG_SMP |
| 8238 | init_defrootdomain(); |
| 8239 | #endif |
| 8240 | |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 8241 | init_rt_bandwidth(&def_rt_bandwidth, |
| 8242 | global_rt_period(), global_rt_runtime()); |
| 8243 | |
| 8244 | #ifdef CONFIG_RT_GROUP_SCHED |
Yong Zhang | 07e06b0 | 2011-01-07 15:17:36 +0800 | [diff] [blame] | 8245 | init_rt_bandwidth(&root_task_group.rt_bandwidth, |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 8246 | global_rt_period(), global_rt_runtime()); |
Dhaval Giani | 6d6bc0a | 2008-05-30 14:23:45 +0200 | [diff] [blame] | 8247 | #endif /* CONFIG_RT_GROUP_SCHED */ |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 8248 | |
Dhaval Giani | 7c94143 | 2010-01-20 13:26:18 +0100 | [diff] [blame] | 8249 | #ifdef CONFIG_CGROUP_SCHED |
Yong Zhang | 07e06b0 | 2011-01-07 15:17:36 +0800 | [diff] [blame] | 8250 | list_add(&root_task_group.list, &task_groups); |
| 8251 | INIT_LIST_HEAD(&root_task_group.children); |
Mike Galbraith | 5091faa | 2010-11-30 14:18:03 +0100 | [diff] [blame] | 8252 | autogroup_init(&init_task); |
Dhaval Giani | 7c94143 | 2010-01-20 13:26:18 +0100 | [diff] [blame] | 8253 | #endif /* CONFIG_CGROUP_SCHED */ |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8254 | |
KAMEZAWA Hiroyuki | 0a94502 | 2006-03-28 01:56:37 -0800 | [diff] [blame] | 8255 | for_each_possible_cpu(i) { |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 8256 | struct rq *rq; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8257 | |
| 8258 | rq = cpu_rq(i); |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 8259 | raw_spin_lock_init(&rq->lock); |
Nick Piggin | 7897986 | 2005-06-25 14:57:13 -0700 | [diff] [blame] | 8260 | rq->nr_running = 0; |
Thomas Gleixner | dce48a8 | 2009-04-11 10:43:41 +0200 | [diff] [blame] | 8261 | rq->calc_load_active = 0; |
| 8262 | rq->calc_load_update = jiffies + LOAD_FREQ; |
Jan H. Schönherr | acb5a9b | 2011-07-14 18:32:43 +0200 | [diff] [blame] | 8263 | init_cfs_rq(&rq->cfs); |
Peter Zijlstra | fa85ae2 | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 8264 | init_rt_rq(&rq->rt, rq); |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8265 | #ifdef CONFIG_FAIR_GROUP_SCHED |
Yong Zhang | 07e06b0 | 2011-01-07 15:17:36 +0800 | [diff] [blame] | 8266 | root_task_group.shares = root_task_group_load; |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8267 | INIT_LIST_HEAD(&rq->leaf_cfs_rq_list); |
Dhaval Giani | 354d60c | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 8268 | /* |
Yong Zhang | 07e06b0 | 2011-01-07 15:17:36 +0800 | [diff] [blame] | 8269 | * How much cpu bandwidth does root_task_group get? |
Dhaval Giani | 354d60c | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 8270 | * |
| 8271 | * In case of task-groups formed thr' the cgroup filesystem, it |
| 8272 | * gets 100% of the cpu resources in the system. This overall |
| 8273 | * system cpu resource is divided among the tasks of |
Yong Zhang | 07e06b0 | 2011-01-07 15:17:36 +0800 | [diff] [blame] | 8274 | * root_task_group and its child task-groups in a fair manner, |
Dhaval Giani | 354d60c | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 8275 | * based on each entity's (task or task-group's) weight |
| 8276 | * (se->load.weight). |
| 8277 | * |
Yong Zhang | 07e06b0 | 2011-01-07 15:17:36 +0800 | [diff] [blame] | 8278 | * In other words, if root_task_group has 10 tasks of weight |
Dhaval Giani | 354d60c | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 8279 | * 1024) and two child groups A0 and A1 (of weight 1024 each), |
| 8280 | * then A0's share of the cpu resource is: |
| 8281 | * |
Ingo Molnar | 0d905bc | 2009-05-04 19:13:30 +0200 | [diff] [blame] | 8282 | * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33% |
Dhaval Giani | 354d60c | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 8283 | * |
Yong Zhang | 07e06b0 | 2011-01-07 15:17:36 +0800 | [diff] [blame] | 8284 | * We achieve this by letting root_task_group's tasks sit |
| 8285 | * directly in rq->cfs (i.e root_task_group->se[] = NULL). |
Dhaval Giani | 354d60c | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 8286 | */ |
Paul Turner | ab84d31 | 2011-07-21 09:43:28 -0700 | [diff] [blame] | 8287 | init_cfs_bandwidth(&root_task_group.cfs_bandwidth); |
Yong Zhang | 07e06b0 | 2011-01-07 15:17:36 +0800 | [diff] [blame] | 8288 | init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL); |
Dhaval Giani | 354d60c | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 8289 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
| 8290 | |
| 8291 | rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime; |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8292 | #ifdef CONFIG_RT_GROUP_SCHED |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8293 | INIT_LIST_HEAD(&rq->leaf_rt_rq_list); |
Yong Zhang | 07e06b0 | 2011-01-07 15:17:36 +0800 | [diff] [blame] | 8294 | init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL); |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8295 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8296 | |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 8297 | for (j = 0; j < CPU_LOAD_IDX_MAX; j++) |
| 8298 | rq->cpu_load[j] = 0; |
Venkatesh Pallipadi | fdf3e95 | 2010-05-17 18:14:43 -0700 | [diff] [blame] | 8299 | |
| 8300 | rq->last_load_update_tick = jiffies; |
| 8301 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8302 | #ifdef CONFIG_SMP |
Nick Piggin | 41c7ce9 | 2005-06-25 14:57:24 -0700 | [diff] [blame] | 8303 | rq->sd = NULL; |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 8304 | rq->rd = NULL; |
Nikhil Rao | 1399fa7 | 2011-05-18 10:09:39 -0700 | [diff] [blame] | 8305 | rq->cpu_power = SCHED_POWER_SCALE; |
Gregory Haskins | 3f029d3 | 2009-07-29 11:08:47 -0400 | [diff] [blame] | 8306 | rq->post_schedule = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8307 | rq->active_balance = 0; |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 8308 | rq->next_balance = jiffies; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8309 | rq->push_cpu = 0; |
Christoph Lameter | 0a2966b | 2006-09-25 23:30:51 -0700 | [diff] [blame] | 8310 | rq->cpu = i; |
Gregory Haskins | 1f11eb6 | 2008-06-04 15:04:05 -0400 | [diff] [blame] | 8311 | rq->online = 0; |
Mike Galbraith | eae0c9d | 2009-11-10 03:50:02 +0100 | [diff] [blame] | 8312 | rq->idle_stamp = 0; |
| 8313 | rq->avg_idle = 2*sysctl_sched_migration_cost; |
Gregory Haskins | dc93852 | 2008-01-25 21:08:26 +0100 | [diff] [blame] | 8314 | rq_attach_root(rq, &def_root_domain); |
Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 8315 | #ifdef CONFIG_NO_HZ |
| 8316 | rq->nohz_balance_kick = 0; |
Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 8317 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8318 | #endif |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 8319 | init_rq_hrtick(rq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8320 | atomic_set(&rq->nr_iowait, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8321 | } |
| 8322 | |
Peter Williams | 2dd73a4 | 2006-06-27 02:54:34 -0700 | [diff] [blame] | 8323 | set_load_weight(&init_task); |
Heiko Carstens | b50f60c | 2006-07-30 03:03:52 -0700 | [diff] [blame] | 8324 | |
Avi Kivity | e107be3 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 8325 | #ifdef CONFIG_PREEMPT_NOTIFIERS |
| 8326 | INIT_HLIST_HEAD(&init_task.preempt_notifiers); |
| 8327 | #endif |
| 8328 | |
Christoph Lameter | c9819f4 | 2006-12-10 02:20:25 -0800 | [diff] [blame] | 8329 | #ifdef CONFIG_SMP |
Carlos R. Mafra | 962cf36 | 2008-05-15 11:15:37 -0300 | [diff] [blame] | 8330 | open_softirq(SCHED_SOFTIRQ, run_rebalance_domains); |
Christoph Lameter | c9819f4 | 2006-12-10 02:20:25 -0800 | [diff] [blame] | 8331 | #endif |
| 8332 | |
Heiko Carstens | b50f60c | 2006-07-30 03:03:52 -0700 | [diff] [blame] | 8333 | #ifdef CONFIG_RT_MUTEXES |
Dima Zavin | 732375c | 2011-07-07 17:27:59 -0700 | [diff] [blame] | 8334 | plist_head_init(&init_task.pi_waiters); |
Heiko Carstens | b50f60c | 2006-07-30 03:03:52 -0700 | [diff] [blame] | 8335 | #endif |
| 8336 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8337 | /* |
| 8338 | * The boot idle thread does lazy MMU switching as well: |
| 8339 | */ |
| 8340 | atomic_inc(&init_mm.mm_count); |
| 8341 | enter_lazy_tlb(&init_mm, current); |
| 8342 | |
| 8343 | /* |
| 8344 | * Make us the idle thread. Technically, schedule() should not be |
| 8345 | * called from this thread, however somewhere below it might be, |
| 8346 | * but because we are the idle thread, we just pick up running again |
| 8347 | * when this runqueue becomes "idle". |
| 8348 | */ |
| 8349 | init_idle(current, smp_processor_id()); |
Thomas Gleixner | dce48a8 | 2009-04-11 10:43:41 +0200 | [diff] [blame] | 8350 | |
| 8351 | calc_load_update = jiffies + LOAD_FREQ; |
| 8352 | |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 8353 | /* |
| 8354 | * During early bootup we pretend to be a normal task: |
| 8355 | */ |
| 8356 | current->sched_class = &fair_sched_class; |
Ingo Molnar | 6892b75 | 2008-02-13 14:02:36 +0100 | [diff] [blame] | 8357 | |
Rusty Russell | bf4d83f | 2008-11-25 09:57:51 +1030 | [diff] [blame] | 8358 | #ifdef CONFIG_SMP |
Peter Zijlstra | 4cb9883 | 2011-04-07 14:09:58 +0200 | [diff] [blame] | 8359 | zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT); |
Rusty Russell | 7d1e6a9 | 2008-11-25 02:35:09 +1030 | [diff] [blame] | 8360 | #ifdef CONFIG_NO_HZ |
Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 8361 | zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT); |
| 8362 | alloc_cpumask_var(&nohz.grp_idle_mask, GFP_NOWAIT); |
| 8363 | atomic_set(&nohz.load_balancer, nr_cpu_ids); |
| 8364 | atomic_set(&nohz.first_pick_cpu, nr_cpu_ids); |
| 8365 | atomic_set(&nohz.second_pick_cpu, nr_cpu_ids); |
Rusty Russell | 7d1e6a9 | 2008-11-25 02:35:09 +1030 | [diff] [blame] | 8366 | #endif |
Rusty Russell | bdddd29 | 2009-12-02 14:09:16 +1030 | [diff] [blame] | 8367 | /* May be allocated at isolcpus cmdline parse time */ |
| 8368 | if (cpu_isolated_map == NULL) |
| 8369 | zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT); |
Rusty Russell | bf4d83f | 2008-11-25 09:57:51 +1030 | [diff] [blame] | 8370 | #endif /* SMP */ |
Rusty Russell | 6a7b3dc | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 8371 | |
Ingo Molnar | 6892b75 | 2008-02-13 14:02:36 +0100 | [diff] [blame] | 8372 | scheduler_running = 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8373 | } |
| 8374 | |
Frederic Weisbecker | d902db1 | 2011-06-08 19:31:56 +0200 | [diff] [blame] | 8375 | #ifdef CONFIG_DEBUG_ATOMIC_SLEEP |
Frederic Weisbecker | e4aafea | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 8376 | static inline int preempt_count_equals(int preempt_offset) |
| 8377 | { |
Frederic Weisbecker | 234da7b | 2009-12-16 20:21:05 +0100 | [diff] [blame] | 8378 | int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth(); |
Frederic Weisbecker | e4aafea | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 8379 | |
Arnd Bergmann | 4ba8216 | 2011-01-25 22:52:22 +0100 | [diff] [blame] | 8380 | return (nested == preempt_offset); |
Frederic Weisbecker | e4aafea | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 8381 | } |
| 8382 | |
Simon Kagstrom | d894837 | 2009-12-23 11:08:18 +0100 | [diff] [blame] | 8383 | void __might_sleep(const char *file, int line, int preempt_offset) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8384 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8385 | static unsigned long prev_jiffy; /* ratelimiting */ |
| 8386 | |
Paul E. McKenney | b3fbab0 | 2011-05-24 08:31:09 -0700 | [diff] [blame] | 8387 | rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */ |
Frederic Weisbecker | e4aafea | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 8388 | if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) || |
| 8389 | system_state != SYSTEM_RUNNING || oops_in_progress) |
Ingo Molnar | aef745f | 2008-08-28 11:34:43 +0200 | [diff] [blame] | 8390 | return; |
| 8391 | if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) |
| 8392 | return; |
| 8393 | prev_jiffy = jiffies; |
| 8394 | |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 8395 | printk(KERN_ERR |
| 8396 | "BUG: sleeping function called from invalid context at %s:%d\n", |
| 8397 | file, line); |
| 8398 | printk(KERN_ERR |
| 8399 | "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n", |
| 8400 | in_atomic(), irqs_disabled(), |
| 8401 | current->pid, current->comm); |
Ingo Molnar | aef745f | 2008-08-28 11:34:43 +0200 | [diff] [blame] | 8402 | |
| 8403 | debug_show_held_locks(current); |
| 8404 | if (irqs_disabled()) |
| 8405 | print_irqtrace_events(current); |
| 8406 | dump_stack(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8407 | } |
| 8408 | EXPORT_SYMBOL(__might_sleep); |
| 8409 | #endif |
| 8410 | |
| 8411 | #ifdef CONFIG_MAGIC_SYSRQ |
Andi Kleen | 3a5e4dc | 2007-10-15 17:00:15 +0200 | [diff] [blame] | 8412 | static void normalize_task(struct rq *rq, struct task_struct *p) |
| 8413 | { |
Peter Zijlstra | da7a735 | 2011-01-17 17:03:27 +0100 | [diff] [blame] | 8414 | const struct sched_class *prev_class = p->sched_class; |
| 8415 | int old_prio = p->prio; |
Andi Kleen | 3a5e4dc | 2007-10-15 17:00:15 +0200 | [diff] [blame] | 8416 | int on_rq; |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 8417 | |
Peter Zijlstra | fd2f441 | 2011-04-05 17:23:44 +0200 | [diff] [blame] | 8418 | on_rq = p->on_rq; |
Andi Kleen | 3a5e4dc | 2007-10-15 17:00:15 +0200 | [diff] [blame] | 8419 | if (on_rq) |
| 8420 | deactivate_task(rq, p, 0); |
| 8421 | __setscheduler(rq, p, SCHED_NORMAL, 0); |
| 8422 | if (on_rq) { |
| 8423 | activate_task(rq, p, 0); |
| 8424 | resched_task(rq->curr); |
| 8425 | } |
Peter Zijlstra | da7a735 | 2011-01-17 17:03:27 +0100 | [diff] [blame] | 8426 | |
| 8427 | check_class_changed(rq, p, prev_class, old_prio); |
Andi Kleen | 3a5e4dc | 2007-10-15 17:00:15 +0200 | [diff] [blame] | 8428 | } |
| 8429 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8430 | void normalize_rt_tasks(void) |
| 8431 | { |
Ingo Molnar | a0f98a1 | 2007-06-17 18:37:45 +0200 | [diff] [blame] | 8432 | struct task_struct *g, *p; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8433 | unsigned long flags; |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 8434 | struct rq *rq; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8435 | |
Peter Zijlstra | 4cf5d77 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 8436 | read_lock_irqsave(&tasklist_lock, flags); |
Ingo Molnar | a0f98a1 | 2007-06-17 18:37:45 +0200 | [diff] [blame] | 8437 | do_each_thread(g, p) { |
Ingo Molnar | 178be79 | 2007-10-15 17:00:18 +0200 | [diff] [blame] | 8438 | /* |
| 8439 | * Only normalize user tasks: |
| 8440 | */ |
| 8441 | if (!p->mm) |
| 8442 | continue; |
| 8443 | |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 8444 | p->se.exec_start = 0; |
Ingo Molnar | 6cfb0d5 | 2007-08-02 17:41:40 +0200 | [diff] [blame] | 8445 | #ifdef CONFIG_SCHEDSTATS |
Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 8446 | p->se.statistics.wait_start = 0; |
| 8447 | p->se.statistics.sleep_start = 0; |
| 8448 | p->se.statistics.block_start = 0; |
Ingo Molnar | 6cfb0d5 | 2007-08-02 17:41:40 +0200 | [diff] [blame] | 8449 | #endif |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 8450 | |
| 8451 | if (!rt_task(p)) { |
| 8452 | /* |
| 8453 | * Renice negative nice level userspace |
| 8454 | * tasks back to 0: |
| 8455 | */ |
| 8456 | if (TASK_NICE(p) < 0 && p->mm) |
| 8457 | set_user_nice(p, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8458 | continue; |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 8459 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8460 | |
Thomas Gleixner | 1d61548 | 2009-11-17 14:54:03 +0100 | [diff] [blame] | 8461 | raw_spin_lock(&p->pi_lock); |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 8462 | rq = __task_rq_lock(p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8463 | |
Ingo Molnar | 178be79 | 2007-10-15 17:00:18 +0200 | [diff] [blame] | 8464 | normalize_task(rq, p); |
Andi Kleen | 3a5e4dc | 2007-10-15 17:00:15 +0200 | [diff] [blame] | 8465 | |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 8466 | __task_rq_unlock(rq); |
Thomas Gleixner | 1d61548 | 2009-11-17 14:54:03 +0100 | [diff] [blame] | 8467 | raw_spin_unlock(&p->pi_lock); |
Ingo Molnar | a0f98a1 | 2007-06-17 18:37:45 +0200 | [diff] [blame] | 8468 | } while_each_thread(g, p); |
| 8469 | |
Peter Zijlstra | 4cf5d77 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 8470 | read_unlock_irqrestore(&tasklist_lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8471 | } |
| 8472 | |
| 8473 | #endif /* CONFIG_MAGIC_SYSRQ */ |
Linus Torvalds | 1df5c10 | 2005-09-12 07:59:21 -0700 | [diff] [blame] | 8474 | |
Jason Wessel | 67fc4e0 | 2010-05-20 21:04:21 -0500 | [diff] [blame] | 8475 | #if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) |
Linus Torvalds | 1df5c10 | 2005-09-12 07:59:21 -0700 | [diff] [blame] | 8476 | /* |
Jason Wessel | 67fc4e0 | 2010-05-20 21:04:21 -0500 | [diff] [blame] | 8477 | * These functions are only useful for the IA64 MCA handling, or kdb. |
Linus Torvalds | 1df5c10 | 2005-09-12 07:59:21 -0700 | [diff] [blame] | 8478 | * |
| 8479 | * They can only be called when the whole system has been |
| 8480 | * stopped - every CPU needs to be quiescent, and no scheduling |
| 8481 | * activity can take place. Using them for anything else would |
| 8482 | * be a serious bug, and as a result, they aren't even visible |
| 8483 | * under any other configuration. |
| 8484 | */ |
| 8485 | |
| 8486 | /** |
| 8487 | * curr_task - return the current task for a given cpu. |
| 8488 | * @cpu: the processor in question. |
| 8489 | * |
| 8490 | * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! |
| 8491 | */ |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 8492 | struct task_struct *curr_task(int cpu) |
Linus Torvalds | 1df5c10 | 2005-09-12 07:59:21 -0700 | [diff] [blame] | 8493 | { |
| 8494 | return cpu_curr(cpu); |
| 8495 | } |
| 8496 | |
Jason Wessel | 67fc4e0 | 2010-05-20 21:04:21 -0500 | [diff] [blame] | 8497 | #endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */ |
| 8498 | |
| 8499 | #ifdef CONFIG_IA64 |
Linus Torvalds | 1df5c10 | 2005-09-12 07:59:21 -0700 | [diff] [blame] | 8500 | /** |
| 8501 | * set_curr_task - set the current task for a given cpu. |
| 8502 | * @cpu: the processor in question. |
| 8503 | * @p: the task pointer to set. |
| 8504 | * |
| 8505 | * Description: This function must only be used when non-maskable interrupts |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 8506 | * are serviced on a separate stack. It allows the architecture to switch the |
| 8507 | * notion of the current task on a cpu in a non-blocking manner. This function |
Linus Torvalds | 1df5c10 | 2005-09-12 07:59:21 -0700 | [diff] [blame] | 8508 | * must be called with all CPU's synchronized, and interrupts disabled, the |
| 8509 | * and caller must save the original value of the current task (see |
| 8510 | * curr_task() above) and restore that value before reenabling interrupts and |
| 8511 | * re-starting the system. |
| 8512 | * |
| 8513 | * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! |
| 8514 | */ |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 8515 | void set_curr_task(int cpu, struct task_struct *p) |
Linus Torvalds | 1df5c10 | 2005-09-12 07:59:21 -0700 | [diff] [blame] | 8516 | { |
| 8517 | cpu_curr(cpu) = p; |
| 8518 | } |
| 8519 | |
| 8520 | #endif |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8521 | |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8522 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 8523 | static void free_fair_sched_group(struct task_group *tg) |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8524 | { |
| 8525 | int i; |
| 8526 | |
Paul Turner | ab84d31 | 2011-07-21 09:43:28 -0700 | [diff] [blame] | 8527 | destroy_cfs_bandwidth(tg_cfs_bandwidth(tg)); |
| 8528 | |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8529 | for_each_possible_cpu(i) { |
| 8530 | if (tg->cfs_rq) |
| 8531 | kfree(tg->cfs_rq[i]); |
| 8532 | if (tg->se) |
| 8533 | kfree(tg->se[i]); |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8534 | } |
| 8535 | |
| 8536 | kfree(tg->cfs_rq); |
| 8537 | kfree(tg->se); |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8538 | } |
| 8539 | |
Dhaval Giani | ec7dc8a | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 8540 | static |
| 8541 | int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8542 | { |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8543 | struct cfs_rq *cfs_rq; |
Li Zefan | eab1722 | 2008-10-29 17:03:22 +0800 | [diff] [blame] | 8544 | struct sched_entity *se; |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8545 | int i; |
| 8546 | |
Mike Travis | 434d53b | 2008-04-04 18:11:04 -0700 | [diff] [blame] | 8547 | tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL); |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8548 | if (!tg->cfs_rq) |
| 8549 | goto err; |
Mike Travis | 434d53b | 2008-04-04 18:11:04 -0700 | [diff] [blame] | 8550 | tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL); |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8551 | if (!tg->se) |
| 8552 | goto err; |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8553 | |
| 8554 | tg->shares = NICE_0_LOAD; |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8555 | |
Paul Turner | ab84d31 | 2011-07-21 09:43:28 -0700 | [diff] [blame] | 8556 | init_cfs_bandwidth(tg_cfs_bandwidth(tg)); |
| 8557 | |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8558 | for_each_possible_cpu(i) { |
Li Zefan | eab1722 | 2008-10-29 17:03:22 +0800 | [diff] [blame] | 8559 | cfs_rq = kzalloc_node(sizeof(struct cfs_rq), |
| 8560 | GFP_KERNEL, cpu_to_node(i)); |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8561 | if (!cfs_rq) |
| 8562 | goto err; |
| 8563 | |
Li Zefan | eab1722 | 2008-10-29 17:03:22 +0800 | [diff] [blame] | 8564 | se = kzalloc_node(sizeof(struct sched_entity), |
| 8565 | GFP_KERNEL, cpu_to_node(i)); |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8566 | if (!se) |
Phil Carmody | dfc12eb | 2009-12-10 14:29:37 +0200 | [diff] [blame] | 8567 | goto err_free_rq; |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8568 | |
Jan H. Schönherr | acb5a9b | 2011-07-14 18:32:43 +0200 | [diff] [blame] | 8569 | init_cfs_rq(cfs_rq); |
Peter Zijlstra | 3d4b47b | 2010-11-15 15:47:01 -0800 | [diff] [blame] | 8570 | init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]); |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8571 | } |
| 8572 | |
| 8573 | return 1; |
| 8574 | |
Peter Zijlstra | 4924627 | 2010-10-17 21:46:10 +0200 | [diff] [blame] | 8575 | err_free_rq: |
Phil Carmody | dfc12eb | 2009-12-10 14:29:37 +0200 | [diff] [blame] | 8576 | kfree(cfs_rq); |
Peter Zijlstra | 4924627 | 2010-10-17 21:46:10 +0200 | [diff] [blame] | 8577 | err: |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8578 | return 0; |
| 8579 | } |
| 8580 | |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8581 | static inline void unregister_fair_sched_group(struct task_group *tg, int cpu) |
| 8582 | { |
Peter Zijlstra | 3d4b47b | 2010-11-15 15:47:01 -0800 | [diff] [blame] | 8583 | struct rq *rq = cpu_rq(cpu); |
| 8584 | unsigned long flags; |
Peter Zijlstra | 3d4b47b | 2010-11-15 15:47:01 -0800 | [diff] [blame] | 8585 | |
| 8586 | /* |
| 8587 | * Only empty task groups can be destroyed; so we can speculatively |
| 8588 | * check on_list without danger of it being re-added. |
| 8589 | */ |
| 8590 | if (!tg->cfs_rq[cpu]->on_list) |
| 8591 | return; |
| 8592 | |
| 8593 | raw_spin_lock_irqsave(&rq->lock, flags); |
Paul Turner | 822bc18 | 2010-11-29 16:55:40 -0800 | [diff] [blame] | 8594 | list_del_leaf_cfs_rq(tg->cfs_rq[cpu]); |
Peter Zijlstra | 3d4b47b | 2010-11-15 15:47:01 -0800 | [diff] [blame] | 8595 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8596 | } |
Jan Schoenherr | 5f817d6 | 2011-07-13 20:13:31 +0200 | [diff] [blame] | 8597 | #else /* !CONFIG_FAIR_GROUP_SCHED */ |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8598 | static inline void free_fair_sched_group(struct task_group *tg) |
| 8599 | { |
| 8600 | } |
| 8601 | |
Dhaval Giani | ec7dc8a | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 8602 | static inline |
| 8603 | int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8604 | { |
| 8605 | return 1; |
| 8606 | } |
| 8607 | |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8608 | static inline void unregister_fair_sched_group(struct task_group *tg, int cpu) |
| 8609 | { |
| 8610 | } |
Dhaval Giani | 6d6bc0a | 2008-05-30 14:23:45 +0200 | [diff] [blame] | 8611 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8612 | |
| 8613 | #ifdef CONFIG_RT_GROUP_SCHED |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8614 | static void free_rt_sched_group(struct task_group *tg) |
| 8615 | { |
| 8616 | int i; |
| 8617 | |
Bianca Lutz | 99bc524 | 2011-07-13 20:13:36 +0200 | [diff] [blame] | 8618 | if (tg->rt_se) |
| 8619 | destroy_rt_bandwidth(&tg->rt_bandwidth); |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 8620 | |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8621 | for_each_possible_cpu(i) { |
| 8622 | if (tg->rt_rq) |
| 8623 | kfree(tg->rt_rq[i]); |
| 8624 | if (tg->rt_se) |
| 8625 | kfree(tg->rt_se[i]); |
| 8626 | } |
| 8627 | |
| 8628 | kfree(tg->rt_rq); |
| 8629 | kfree(tg->rt_se); |
| 8630 | } |
| 8631 | |
Dhaval Giani | ec7dc8a | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 8632 | static |
| 8633 | int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8634 | { |
| 8635 | struct rt_rq *rt_rq; |
Li Zefan | eab1722 | 2008-10-29 17:03:22 +0800 | [diff] [blame] | 8636 | struct sched_rt_entity *rt_se; |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8637 | int i; |
| 8638 | |
Mike Travis | 434d53b | 2008-04-04 18:11:04 -0700 | [diff] [blame] | 8639 | tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL); |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8640 | if (!tg->rt_rq) |
| 8641 | goto err; |
Mike Travis | 434d53b | 2008-04-04 18:11:04 -0700 | [diff] [blame] | 8642 | tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL); |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8643 | if (!tg->rt_se) |
| 8644 | goto err; |
| 8645 | |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 8646 | init_rt_bandwidth(&tg->rt_bandwidth, |
| 8647 | ktime_to_ns(def_rt_bandwidth.rt_period), 0); |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8648 | |
| 8649 | for_each_possible_cpu(i) { |
Li Zefan | eab1722 | 2008-10-29 17:03:22 +0800 | [diff] [blame] | 8650 | rt_rq = kzalloc_node(sizeof(struct rt_rq), |
| 8651 | GFP_KERNEL, cpu_to_node(i)); |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8652 | if (!rt_rq) |
| 8653 | goto err; |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8654 | |
Li Zefan | eab1722 | 2008-10-29 17:03:22 +0800 | [diff] [blame] | 8655 | rt_se = kzalloc_node(sizeof(struct sched_rt_entity), |
| 8656 | GFP_KERNEL, cpu_to_node(i)); |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8657 | if (!rt_se) |
Phil Carmody | dfc12eb | 2009-12-10 14:29:37 +0200 | [diff] [blame] | 8658 | goto err_free_rq; |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8659 | |
Jan H. Schönherr | acb5a9b | 2011-07-14 18:32:43 +0200 | [diff] [blame] | 8660 | init_rt_rq(rt_rq, cpu_rq(i)); |
| 8661 | rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime; |
Peter Zijlstra | 3d4b47b | 2010-11-15 15:47:01 -0800 | [diff] [blame] | 8662 | init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]); |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8663 | } |
| 8664 | |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8665 | return 1; |
| 8666 | |
Peter Zijlstra | 4924627 | 2010-10-17 21:46:10 +0200 | [diff] [blame] | 8667 | err_free_rq: |
Phil Carmody | dfc12eb | 2009-12-10 14:29:37 +0200 | [diff] [blame] | 8668 | kfree(rt_rq); |
Peter Zijlstra | 4924627 | 2010-10-17 21:46:10 +0200 | [diff] [blame] | 8669 | err: |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8670 | return 0; |
| 8671 | } |
Dhaval Giani | 6d6bc0a | 2008-05-30 14:23:45 +0200 | [diff] [blame] | 8672 | #else /* !CONFIG_RT_GROUP_SCHED */ |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8673 | static inline void free_rt_sched_group(struct task_group *tg) |
| 8674 | { |
| 8675 | } |
| 8676 | |
Dhaval Giani | ec7dc8a | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 8677 | static inline |
| 8678 | int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8679 | { |
| 8680 | return 1; |
| 8681 | } |
Dhaval Giani | 6d6bc0a | 2008-05-30 14:23:45 +0200 | [diff] [blame] | 8682 | #endif /* CONFIG_RT_GROUP_SCHED */ |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8683 | |
Dhaval Giani | 7c94143 | 2010-01-20 13:26:18 +0100 | [diff] [blame] | 8684 | #ifdef CONFIG_CGROUP_SCHED |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8685 | static void free_sched_group(struct task_group *tg) |
| 8686 | { |
| 8687 | free_fair_sched_group(tg); |
| 8688 | free_rt_sched_group(tg); |
Mike Galbraith | e9aa1dd | 2011-01-05 11:11:25 +0100 | [diff] [blame] | 8689 | autogroup_free(tg); |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8690 | kfree(tg); |
| 8691 | } |
| 8692 | |
| 8693 | /* allocate runqueue etc for a new task group */ |
Dhaval Giani | ec7dc8a | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 8694 | struct task_group *sched_create_group(struct task_group *parent) |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8695 | { |
| 8696 | struct task_group *tg; |
| 8697 | unsigned long flags; |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8698 | |
| 8699 | tg = kzalloc(sizeof(*tg), GFP_KERNEL); |
| 8700 | if (!tg) |
| 8701 | return ERR_PTR(-ENOMEM); |
| 8702 | |
Dhaval Giani | ec7dc8a | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 8703 | if (!alloc_fair_sched_group(tg, parent)) |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8704 | goto err; |
| 8705 | |
Dhaval Giani | ec7dc8a | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 8706 | if (!alloc_rt_sched_group(tg, parent)) |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8707 | goto err; |
| 8708 | |
Peter Zijlstra | 8ed3699 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 8709 | spin_lock_irqsave(&task_group_lock, flags); |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8710 | list_add_rcu(&tg->list, &task_groups); |
Peter Zijlstra | f473aa5 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 8711 | |
| 8712 | WARN_ON(!parent); /* root should already exist */ |
| 8713 | |
| 8714 | tg->parent = parent; |
Peter Zijlstra | f473aa5 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 8715 | INIT_LIST_HEAD(&tg->children); |
Zhang, Yanmin | 09f2724 | 2030-08-14 15:56:40 +0800 | [diff] [blame] | 8716 | list_add_rcu(&tg->siblings, &parent->children); |
Peter Zijlstra | 8ed3699 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 8717 | spin_unlock_irqrestore(&task_group_lock, flags); |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8718 | |
Srivatsa Vaddagiri | 9b5b775 | 2007-10-15 17:00:09 +0200 | [diff] [blame] | 8719 | return tg; |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8720 | |
| 8721 | err: |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8722 | free_sched_group(tg); |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8723 | return ERR_PTR(-ENOMEM); |
| 8724 | } |
| 8725 | |
Srivatsa Vaddagiri | 9b5b775 | 2007-10-15 17:00:09 +0200 | [diff] [blame] | 8726 | /* rcu callback to free various structures associated with a task group */ |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8727 | static void free_sched_group_rcu(struct rcu_head *rhp) |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8728 | { |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8729 | /* now it should be safe to free those cfs_rqs */ |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8730 | free_sched_group(container_of(rhp, struct task_group, rcu)); |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8731 | } |
| 8732 | |
Srivatsa Vaddagiri | 9b5b775 | 2007-10-15 17:00:09 +0200 | [diff] [blame] | 8733 | /* Destroy runqueue etc associated with a task group */ |
Ingo Molnar | 4cf86d7 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 8734 | void sched_destroy_group(struct task_group *tg) |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8735 | { |
Peter Zijlstra | 8ed3699 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 8736 | unsigned long flags; |
Srivatsa Vaddagiri | 9b5b775 | 2007-10-15 17:00:09 +0200 | [diff] [blame] | 8737 | int i; |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8738 | |
Peter Zijlstra | 3d4b47b | 2010-11-15 15:47:01 -0800 | [diff] [blame] | 8739 | /* end participation in shares distribution */ |
| 8740 | for_each_possible_cpu(i) |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8741 | unregister_fair_sched_group(tg, i); |
Peter Zijlstra | 3d4b47b | 2010-11-15 15:47:01 -0800 | [diff] [blame] | 8742 | |
| 8743 | spin_lock_irqsave(&task_group_lock, flags); |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8744 | list_del_rcu(&tg->list); |
Peter Zijlstra | f473aa5 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 8745 | list_del_rcu(&tg->siblings); |
Peter Zijlstra | 8ed3699 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 8746 | spin_unlock_irqrestore(&task_group_lock, flags); |
Srivatsa Vaddagiri | 9b5b775 | 2007-10-15 17:00:09 +0200 | [diff] [blame] | 8747 | |
Srivatsa Vaddagiri | 9b5b775 | 2007-10-15 17:00:09 +0200 | [diff] [blame] | 8748 | /* wait for possible concurrent references to cfs_rqs complete */ |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8749 | call_rcu(&tg->rcu, free_sched_group_rcu); |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8750 | } |
| 8751 | |
Srivatsa Vaddagiri | 9b5b775 | 2007-10-15 17:00:09 +0200 | [diff] [blame] | 8752 | /* change task's runqueue when it moves between groups. |
Ingo Molnar | 3a25201 | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 8753 | * The caller of this function should have put the task in its new group |
| 8754 | * by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to |
| 8755 | * reflect its new group. |
Srivatsa Vaddagiri | 9b5b775 | 2007-10-15 17:00:09 +0200 | [diff] [blame] | 8756 | */ |
| 8757 | void sched_move_task(struct task_struct *tsk) |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8758 | { |
| 8759 | int on_rq, running; |
| 8760 | unsigned long flags; |
| 8761 | struct rq *rq; |
| 8762 | |
| 8763 | rq = task_rq_lock(tsk, &flags); |
| 8764 | |
Dmitry Adamushko | 051a1d1 | 2007-12-18 15:21:13 +0100 | [diff] [blame] | 8765 | running = task_current(rq, tsk); |
Peter Zijlstra | fd2f441 | 2011-04-05 17:23:44 +0200 | [diff] [blame] | 8766 | on_rq = tsk->on_rq; |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8767 | |
Hiroshi Shimamoto | 0e1f348 | 2008-03-10 11:01:20 -0700 | [diff] [blame] | 8768 | if (on_rq) |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8769 | dequeue_task(rq, tsk, 0); |
Hiroshi Shimamoto | 0e1f348 | 2008-03-10 11:01:20 -0700 | [diff] [blame] | 8770 | if (unlikely(running)) |
| 8771 | tsk->sched_class->put_prev_task(rq, tsk); |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8772 | |
Peter Zijlstra | 810b381 | 2008-02-29 15:21:01 -0500 | [diff] [blame] | 8773 | #ifdef CONFIG_FAIR_GROUP_SCHED |
Peter Zijlstra | b2b5ce0 | 2010-10-15 15:24:15 +0200 | [diff] [blame] | 8774 | if (tsk->sched_class->task_move_group) |
| 8775 | tsk->sched_class->task_move_group(tsk, on_rq); |
| 8776 | else |
Peter Zijlstra | 810b381 | 2008-02-29 15:21:01 -0500 | [diff] [blame] | 8777 | #endif |
Peter Zijlstra | b2b5ce0 | 2010-10-15 15:24:15 +0200 | [diff] [blame] | 8778 | set_task_rq(tsk, task_cpu(tsk)); |
Peter Zijlstra | 810b381 | 2008-02-29 15:21:01 -0500 | [diff] [blame] | 8779 | |
Hiroshi Shimamoto | 0e1f348 | 2008-03-10 11:01:20 -0700 | [diff] [blame] | 8780 | if (unlikely(running)) |
| 8781 | tsk->sched_class->set_curr_task(rq); |
| 8782 | if (on_rq) |
Peter Zijlstra | 371fd7e | 2010-03-24 16:38:48 +0100 | [diff] [blame] | 8783 | enqueue_task(rq, tsk, 0); |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8784 | |
Peter Zijlstra | 0122ec5 | 2011-04-05 17:23:51 +0200 | [diff] [blame] | 8785 | task_rq_unlock(rq, tsk, &flags); |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8786 | } |
Dhaval Giani | 7c94143 | 2010-01-20 13:26:18 +0100 | [diff] [blame] | 8787 | #endif /* CONFIG_CGROUP_SCHED */ |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8788 | |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8789 | #ifdef CONFIG_FAIR_GROUP_SCHED |
Peter Zijlstra | 8ed3699 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 8790 | static DEFINE_MUTEX(shares_mutex); |
| 8791 | |
Ingo Molnar | 4cf86d7 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 8792 | int sched_group_set_shares(struct task_group *tg, unsigned long shares) |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8793 | { |
| 8794 | int i; |
Peter Zijlstra | 8ed3699 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 8795 | unsigned long flags; |
Ingo Molnar | c61935f | 2008-01-22 11:24:58 +0100 | [diff] [blame] | 8796 | |
Peter Zijlstra | 62fb185 | 2008-02-25 17:34:02 +0100 | [diff] [blame] | 8797 | /* |
Dhaval Giani | ec7dc8a | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 8798 | * We can't change the weight of the root cgroup. |
| 8799 | */ |
| 8800 | if (!tg->se[0]) |
| 8801 | return -EINVAL; |
| 8802 | |
Mike Galbraith | cd62287 | 2011-06-04 15:03:20 +0200 | [diff] [blame] | 8803 | shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES)); |
Peter Zijlstra | 62fb185 | 2008-02-25 17:34:02 +0100 | [diff] [blame] | 8804 | |
Peter Zijlstra | 8ed3699 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 8805 | mutex_lock(&shares_mutex); |
Srivatsa Vaddagiri | 9b5b775 | 2007-10-15 17:00:09 +0200 | [diff] [blame] | 8806 | if (tg->shares == shares) |
Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 8807 | goto done; |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8808 | |
Srivatsa Vaddagiri | 6b2d770 | 2008-01-25 21:08:00 +0100 | [diff] [blame] | 8809 | tg->shares = shares; |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 8810 | for_each_possible_cpu(i) { |
Paul Turner | 9437178 | 2010-11-15 15:47:10 -0800 | [diff] [blame] | 8811 | struct rq *rq = cpu_rq(i); |
| 8812 | struct sched_entity *se; |
| 8813 | |
| 8814 | se = tg->se[i]; |
| 8815 | /* Propagate contribution to hierarchy */ |
| 8816 | raw_spin_lock_irqsave(&rq->lock, flags); |
| 8817 | for_each_sched_entity(se) |
Paul Turner | 6d5ab29 | 2011-01-21 20:45:01 -0800 | [diff] [blame] | 8818 | update_cfs_shares(group_cfs_rq(se)); |
Paul Turner | 9437178 | 2010-11-15 15:47:10 -0800 | [diff] [blame] | 8819 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 8820 | } |
Srivatsa Vaddagiri | 6b2d770 | 2008-01-25 21:08:00 +0100 | [diff] [blame] | 8821 | |
Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 8822 | done: |
Peter Zijlstra | 8ed3699 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 8823 | mutex_unlock(&shares_mutex); |
Srivatsa Vaddagiri | 9b5b775 | 2007-10-15 17:00:09 +0200 | [diff] [blame] | 8824 | return 0; |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8825 | } |
| 8826 | |
Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 8827 | unsigned long sched_group_shares(struct task_group *tg) |
| 8828 | { |
| 8829 | return tg->shares; |
| 8830 | } |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8831 | #endif |
Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 8832 | |
Paul Turner | a790de9 | 2011-07-21 09:43:29 -0700 | [diff] [blame] | 8833 | #if defined(CONFIG_RT_GROUP_SCHED) || defined(CONFIG_CFS_BANDWIDTH) |
Peter Zijlstra | 9f0c1e5 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 8834 | static unsigned long to_ratio(u64 period, u64 runtime) |
| 8835 | { |
| 8836 | if (runtime == RUNTIME_INF) |
Peter Zijlstra | 9a7e0b1 | 2008-08-19 12:33:06 +0200 | [diff] [blame] | 8837 | return 1ULL << 20; |
Peter Zijlstra | 9f0c1e5 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 8838 | |
Peter Zijlstra | 9a7e0b1 | 2008-08-19 12:33:06 +0200 | [diff] [blame] | 8839 | return div64_u64(runtime << 20, period); |
Peter Zijlstra | 9f0c1e5 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 8840 | } |
Paul Turner | a790de9 | 2011-07-21 09:43:29 -0700 | [diff] [blame] | 8841 | #endif |
| 8842 | |
| 8843 | #ifdef CONFIG_RT_GROUP_SCHED |
| 8844 | /* |
| 8845 | * Ensure that the real time constraints are schedulable. |
| 8846 | */ |
| 8847 | static DEFINE_MUTEX(rt_constraints_mutex); |
Peter Zijlstra | 9f0c1e5 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 8848 | |
Dhaval Giani | 521f1a24 | 2008-02-28 15:21:56 +0530 | [diff] [blame] | 8849 | /* Must be called with tasklist_lock held */ |
| 8850 | static inline int tg_has_rt_tasks(struct task_group *tg) |
| 8851 | { |
| 8852 | struct task_struct *g, *p; |
Peter Zijlstra | 9a7e0b1 | 2008-08-19 12:33:06 +0200 | [diff] [blame] | 8853 | |
Dhaval Giani | 521f1a24 | 2008-02-28 15:21:56 +0530 | [diff] [blame] | 8854 | do_each_thread(g, p) { |
| 8855 | if (rt_task(p) && rt_rq_of_se(&p->rt)->tg == tg) |
| 8856 | return 1; |
| 8857 | } while_each_thread(g, p); |
Peter Zijlstra | 9a7e0b1 | 2008-08-19 12:33:06 +0200 | [diff] [blame] | 8858 | |
Dhaval Giani | 521f1a24 | 2008-02-28 15:21:56 +0530 | [diff] [blame] | 8859 | return 0; |
| 8860 | } |
| 8861 | |
Peter Zijlstra | 9a7e0b1 | 2008-08-19 12:33:06 +0200 | [diff] [blame] | 8862 | struct rt_schedulable_data { |
| 8863 | struct task_group *tg; |
| 8864 | u64 rt_period; |
| 8865 | u64 rt_runtime; |
| 8866 | }; |
| 8867 | |
Paul Turner | a790de9 | 2011-07-21 09:43:29 -0700 | [diff] [blame] | 8868 | static int tg_rt_schedulable(struct task_group *tg, void *data) |
Peter Zijlstra | 9a7e0b1 | 2008-08-19 12:33:06 +0200 | [diff] [blame] | 8869 | { |
| 8870 | struct rt_schedulable_data *d = data; |
| 8871 | struct task_group *child; |
| 8872 | unsigned long total, sum = 0; |
| 8873 | u64 period, runtime; |
| 8874 | |
| 8875 | period = ktime_to_ns(tg->rt_bandwidth.rt_period); |
| 8876 | runtime = tg->rt_bandwidth.rt_runtime; |
| 8877 | |
| 8878 | if (tg == d->tg) { |
| 8879 | period = d->rt_period; |
| 8880 | runtime = d->rt_runtime; |
| 8881 | } |
| 8882 | |
Peter Zijlstra | 4653f80 | 2008-09-23 15:33:44 +0200 | [diff] [blame] | 8883 | /* |
| 8884 | * Cannot have more runtime than the period. |
| 8885 | */ |
| 8886 | if (runtime > period && runtime != RUNTIME_INF) |
| 8887 | return -EINVAL; |
| 8888 | |
| 8889 | /* |
| 8890 | * Ensure we don't starve existing RT tasks. |
| 8891 | */ |
Peter Zijlstra | 9a7e0b1 | 2008-08-19 12:33:06 +0200 | [diff] [blame] | 8892 | if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg)) |
| 8893 | return -EBUSY; |
| 8894 | |
| 8895 | total = to_ratio(period, runtime); |
| 8896 | |
Peter Zijlstra | 4653f80 | 2008-09-23 15:33:44 +0200 | [diff] [blame] | 8897 | /* |
| 8898 | * Nobody can have more than the global setting allows. |
| 8899 | */ |
| 8900 | if (total > to_ratio(global_rt_period(), global_rt_runtime())) |
| 8901 | return -EINVAL; |
| 8902 | |
| 8903 | /* |
| 8904 | * The sum of our children's runtime should not exceed our own. |
| 8905 | */ |
Peter Zijlstra | 9a7e0b1 | 2008-08-19 12:33:06 +0200 | [diff] [blame] | 8906 | list_for_each_entry_rcu(child, &tg->children, siblings) { |
| 8907 | period = ktime_to_ns(child->rt_bandwidth.rt_period); |
| 8908 | runtime = child->rt_bandwidth.rt_runtime; |
| 8909 | |
| 8910 | if (child == d->tg) { |
| 8911 | period = d->rt_period; |
| 8912 | runtime = d->rt_runtime; |
| 8913 | } |
| 8914 | |
| 8915 | sum += to_ratio(period, runtime); |
| 8916 | } |
| 8917 | |
| 8918 | if (sum > total) |
| 8919 | return -EINVAL; |
| 8920 | |
| 8921 | return 0; |
| 8922 | } |
| 8923 | |
| 8924 | static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) |
| 8925 | { |
Paul Turner | 8277434 | 2011-07-21 09:43:35 -0700 | [diff] [blame] | 8926 | int ret; |
| 8927 | |
Peter Zijlstra | 9a7e0b1 | 2008-08-19 12:33:06 +0200 | [diff] [blame] | 8928 | struct rt_schedulable_data data = { |
| 8929 | .tg = tg, |
| 8930 | .rt_period = period, |
| 8931 | .rt_runtime = runtime, |
| 8932 | }; |
| 8933 | |
Paul Turner | 8277434 | 2011-07-21 09:43:35 -0700 | [diff] [blame] | 8934 | rcu_read_lock(); |
| 8935 | ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data); |
| 8936 | rcu_read_unlock(); |
| 8937 | |
| 8938 | return ret; |
Peter Zijlstra | 9a7e0b1 | 2008-08-19 12:33:06 +0200 | [diff] [blame] | 8939 | } |
| 8940 | |
Paul Turner | ab84d31 | 2011-07-21 09:43:28 -0700 | [diff] [blame] | 8941 | static int tg_set_rt_bandwidth(struct task_group *tg, |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 8942 | u64 rt_period, u64 rt_runtime) |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8943 | { |
Peter Zijlstra | ac086bc | 2008-04-19 19:44:58 +0200 | [diff] [blame] | 8944 | int i, err = 0; |
Peter Zijlstra | 9f0c1e5 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 8945 | |
Peter Zijlstra | 9f0c1e5 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 8946 | mutex_lock(&rt_constraints_mutex); |
Dhaval Giani | 521f1a24 | 2008-02-28 15:21:56 +0530 | [diff] [blame] | 8947 | read_lock(&tasklist_lock); |
Peter Zijlstra | 9a7e0b1 | 2008-08-19 12:33:06 +0200 | [diff] [blame] | 8948 | err = __rt_schedulable(tg, rt_period, rt_runtime); |
| 8949 | if (err) |
Dhaval Giani | 521f1a24 | 2008-02-28 15:21:56 +0530 | [diff] [blame] | 8950 | goto unlock; |
Peter Zijlstra | ac086bc | 2008-04-19 19:44:58 +0200 | [diff] [blame] | 8951 | |
Thomas Gleixner | 0986b11 | 2009-11-17 15:32:06 +0100 | [diff] [blame] | 8952 | raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock); |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 8953 | tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period); |
| 8954 | tg->rt_bandwidth.rt_runtime = rt_runtime; |
Peter Zijlstra | ac086bc | 2008-04-19 19:44:58 +0200 | [diff] [blame] | 8955 | |
| 8956 | for_each_possible_cpu(i) { |
| 8957 | struct rt_rq *rt_rq = tg->rt_rq[i]; |
| 8958 | |
Thomas Gleixner | 0986b11 | 2009-11-17 15:32:06 +0100 | [diff] [blame] | 8959 | raw_spin_lock(&rt_rq->rt_runtime_lock); |
Peter Zijlstra | ac086bc | 2008-04-19 19:44:58 +0200 | [diff] [blame] | 8960 | rt_rq->rt_runtime = rt_runtime; |
Thomas Gleixner | 0986b11 | 2009-11-17 15:32:06 +0100 | [diff] [blame] | 8961 | raw_spin_unlock(&rt_rq->rt_runtime_lock); |
Peter Zijlstra | ac086bc | 2008-04-19 19:44:58 +0200 | [diff] [blame] | 8962 | } |
Thomas Gleixner | 0986b11 | 2009-11-17 15:32:06 +0100 | [diff] [blame] | 8963 | raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock); |
Peter Zijlstra | 4924627 | 2010-10-17 21:46:10 +0200 | [diff] [blame] | 8964 | unlock: |
Dhaval Giani | 521f1a24 | 2008-02-28 15:21:56 +0530 | [diff] [blame] | 8965 | read_unlock(&tasklist_lock); |
Peter Zijlstra | 9f0c1e5 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 8966 | mutex_unlock(&rt_constraints_mutex); |
| 8967 | |
| 8968 | return err; |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8969 | } |
| 8970 | |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 8971 | int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us) |
| 8972 | { |
| 8973 | u64 rt_runtime, rt_period; |
| 8974 | |
| 8975 | rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period); |
| 8976 | rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC; |
| 8977 | if (rt_runtime_us < 0) |
| 8978 | rt_runtime = RUNTIME_INF; |
| 8979 | |
Paul Turner | ab84d31 | 2011-07-21 09:43:28 -0700 | [diff] [blame] | 8980 | return tg_set_rt_bandwidth(tg, rt_period, rt_runtime); |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 8981 | } |
| 8982 | |
Peter Zijlstra | 9f0c1e5 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 8983 | long sched_group_rt_runtime(struct task_group *tg) |
| 8984 | { |
| 8985 | u64 rt_runtime_us; |
| 8986 | |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 8987 | if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF) |
Peter Zijlstra | 9f0c1e5 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 8988 | return -1; |
| 8989 | |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 8990 | rt_runtime_us = tg->rt_bandwidth.rt_runtime; |
Peter Zijlstra | 9f0c1e5 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 8991 | do_div(rt_runtime_us, NSEC_PER_USEC); |
| 8992 | return rt_runtime_us; |
| 8993 | } |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 8994 | |
| 8995 | int sched_group_set_rt_period(struct task_group *tg, long rt_period_us) |
| 8996 | { |
| 8997 | u64 rt_runtime, rt_period; |
| 8998 | |
| 8999 | rt_period = (u64)rt_period_us * NSEC_PER_USEC; |
| 9000 | rt_runtime = tg->rt_bandwidth.rt_runtime; |
| 9001 | |
Raistlin | 619b048 | 2008-06-26 18:54:09 +0200 | [diff] [blame] | 9002 | if (rt_period == 0) |
| 9003 | return -EINVAL; |
| 9004 | |
Paul Turner | ab84d31 | 2011-07-21 09:43:28 -0700 | [diff] [blame] | 9005 | return tg_set_rt_bandwidth(tg, rt_period, rt_runtime); |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 9006 | } |
| 9007 | |
| 9008 | long sched_group_rt_period(struct task_group *tg) |
| 9009 | { |
| 9010 | u64 rt_period_us; |
| 9011 | |
| 9012 | rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period); |
| 9013 | do_div(rt_period_us, NSEC_PER_USEC); |
| 9014 | return rt_period_us; |
| 9015 | } |
| 9016 | |
| 9017 | static int sched_rt_global_constraints(void) |
| 9018 | { |
Peter Zijlstra | 4653f80 | 2008-09-23 15:33:44 +0200 | [diff] [blame] | 9019 | u64 runtime, period; |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 9020 | int ret = 0; |
| 9021 | |
Hiroshi Shimamoto | ec5d498 | 2008-09-10 17:00:19 -0700 | [diff] [blame] | 9022 | if (sysctl_sched_rt_period <= 0) |
| 9023 | return -EINVAL; |
| 9024 | |
Peter Zijlstra | 4653f80 | 2008-09-23 15:33:44 +0200 | [diff] [blame] | 9025 | runtime = global_rt_runtime(); |
| 9026 | period = global_rt_period(); |
| 9027 | |
| 9028 | /* |
| 9029 | * Sanity check on the sysctl variables. |
| 9030 | */ |
| 9031 | if (runtime > period && runtime != RUNTIME_INF) |
| 9032 | return -EINVAL; |
Peter Zijlstra | 10b612f | 2008-06-19 14:22:27 +0200 | [diff] [blame] | 9033 | |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 9034 | mutex_lock(&rt_constraints_mutex); |
Peter Zijlstra | 9a7e0b1 | 2008-08-19 12:33:06 +0200 | [diff] [blame] | 9035 | read_lock(&tasklist_lock); |
Peter Zijlstra | 4653f80 | 2008-09-23 15:33:44 +0200 | [diff] [blame] | 9036 | ret = __rt_schedulable(NULL, 0, 0); |
Peter Zijlstra | 9a7e0b1 | 2008-08-19 12:33:06 +0200 | [diff] [blame] | 9037 | read_unlock(&tasklist_lock); |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 9038 | mutex_unlock(&rt_constraints_mutex); |
| 9039 | |
| 9040 | return ret; |
| 9041 | } |
Dhaval Giani | 54e9912 | 2009-02-27 15:13:54 +0530 | [diff] [blame] | 9042 | |
| 9043 | int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk) |
| 9044 | { |
| 9045 | /* Don't accept realtime tasks when there is no way for them to run */ |
| 9046 | if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0) |
| 9047 | return 0; |
| 9048 | |
| 9049 | return 1; |
| 9050 | } |
| 9051 | |
Dhaval Giani | 6d6bc0a | 2008-05-30 14:23:45 +0200 | [diff] [blame] | 9052 | #else /* !CONFIG_RT_GROUP_SCHED */ |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 9053 | static int sched_rt_global_constraints(void) |
| 9054 | { |
Peter Zijlstra | ac086bc | 2008-04-19 19:44:58 +0200 | [diff] [blame] | 9055 | unsigned long flags; |
| 9056 | int i; |
| 9057 | |
Hiroshi Shimamoto | ec5d498 | 2008-09-10 17:00:19 -0700 | [diff] [blame] | 9058 | if (sysctl_sched_rt_period <= 0) |
| 9059 | return -EINVAL; |
| 9060 | |
Peter Zijlstra | 60aa605 | 2009-05-05 17:50:21 +0200 | [diff] [blame] | 9061 | /* |
| 9062 | * There's always some RT tasks in the root group |
| 9063 | * -- migration, kstopmachine etc.. |
| 9064 | */ |
| 9065 | if (sysctl_sched_rt_runtime == 0) |
| 9066 | return -EBUSY; |
| 9067 | |
Thomas Gleixner | 0986b11 | 2009-11-17 15:32:06 +0100 | [diff] [blame] | 9068 | raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags); |
Peter Zijlstra | ac086bc | 2008-04-19 19:44:58 +0200 | [diff] [blame] | 9069 | for_each_possible_cpu(i) { |
| 9070 | struct rt_rq *rt_rq = &cpu_rq(i)->rt; |
| 9071 | |
Thomas Gleixner | 0986b11 | 2009-11-17 15:32:06 +0100 | [diff] [blame] | 9072 | raw_spin_lock(&rt_rq->rt_runtime_lock); |
Peter Zijlstra | ac086bc | 2008-04-19 19:44:58 +0200 | [diff] [blame] | 9073 | rt_rq->rt_runtime = global_rt_runtime(); |
Thomas Gleixner | 0986b11 | 2009-11-17 15:32:06 +0100 | [diff] [blame] | 9074 | raw_spin_unlock(&rt_rq->rt_runtime_lock); |
Peter Zijlstra | ac086bc | 2008-04-19 19:44:58 +0200 | [diff] [blame] | 9075 | } |
Thomas Gleixner | 0986b11 | 2009-11-17 15:32:06 +0100 | [diff] [blame] | 9076 | raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags); |
Peter Zijlstra | ac086bc | 2008-04-19 19:44:58 +0200 | [diff] [blame] | 9077 | |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 9078 | return 0; |
| 9079 | } |
Dhaval Giani | 6d6bc0a | 2008-05-30 14:23:45 +0200 | [diff] [blame] | 9080 | #endif /* CONFIG_RT_GROUP_SCHED */ |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 9081 | |
| 9082 | int sched_rt_handler(struct ctl_table *table, int write, |
Alexey Dobriyan | 8d65af7 | 2009-09-23 15:57:19 -0700 | [diff] [blame] | 9083 | void __user *buffer, size_t *lenp, |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 9084 | loff_t *ppos) |
| 9085 | { |
| 9086 | int ret; |
| 9087 | int old_period, old_runtime; |
| 9088 | static DEFINE_MUTEX(mutex); |
| 9089 | |
| 9090 | mutex_lock(&mutex); |
| 9091 | old_period = sysctl_sched_rt_period; |
| 9092 | old_runtime = sysctl_sched_rt_runtime; |
| 9093 | |
Alexey Dobriyan | 8d65af7 | 2009-09-23 15:57:19 -0700 | [diff] [blame] | 9094 | ret = proc_dointvec(table, write, buffer, lenp, ppos); |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 9095 | |
| 9096 | if (!ret && write) { |
| 9097 | ret = sched_rt_global_constraints(); |
| 9098 | if (ret) { |
| 9099 | sysctl_sched_rt_period = old_period; |
| 9100 | sysctl_sched_rt_runtime = old_runtime; |
| 9101 | } else { |
| 9102 | def_rt_bandwidth.rt_runtime = global_rt_runtime(); |
| 9103 | def_rt_bandwidth.rt_period = |
| 9104 | ns_to_ktime(global_rt_period()); |
| 9105 | } |
| 9106 | } |
| 9107 | mutex_unlock(&mutex); |
| 9108 | |
| 9109 | return ret; |
| 9110 | } |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 9111 | |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 9112 | #ifdef CONFIG_CGROUP_SCHED |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 9113 | |
| 9114 | /* return corresponding task_group object of a cgroup */ |
Paul Menage | 2b01dfe | 2007-10-24 18:23:50 +0200 | [diff] [blame] | 9115 | static inline struct task_group *cgroup_tg(struct cgroup *cgrp) |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 9116 | { |
Paul Menage | 2b01dfe | 2007-10-24 18:23:50 +0200 | [diff] [blame] | 9117 | return container_of(cgroup_subsys_state(cgrp, cpu_cgroup_subsys_id), |
| 9118 | struct task_group, css); |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 9119 | } |
| 9120 | |
| 9121 | static struct cgroup_subsys_state * |
Paul Menage | 2b01dfe | 2007-10-24 18:23:50 +0200 | [diff] [blame] | 9122 | cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp) |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 9123 | { |
Dhaval Giani | ec7dc8a | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 9124 | struct task_group *tg, *parent; |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 9125 | |
Paul Menage | 2b01dfe | 2007-10-24 18:23:50 +0200 | [diff] [blame] | 9126 | if (!cgrp->parent) { |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 9127 | /* This is early initialization for the top cgroup */ |
Yong Zhang | 07e06b0 | 2011-01-07 15:17:36 +0800 | [diff] [blame] | 9128 | return &root_task_group.css; |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 9129 | } |
| 9130 | |
Dhaval Giani | ec7dc8a | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 9131 | parent = cgroup_tg(cgrp->parent); |
| 9132 | tg = sched_create_group(parent); |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 9133 | if (IS_ERR(tg)) |
| 9134 | return ERR_PTR(-ENOMEM); |
| 9135 | |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 9136 | return &tg->css; |
| 9137 | } |
| 9138 | |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 9139 | static void |
| 9140 | cpu_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp) |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 9141 | { |
Paul Menage | 2b01dfe | 2007-10-24 18:23:50 +0200 | [diff] [blame] | 9142 | struct task_group *tg = cgroup_tg(cgrp); |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 9143 | |
| 9144 | sched_destroy_group(tg); |
| 9145 | } |
| 9146 | |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 9147 | static int |
Ben Blum | be367d0 | 2009-09-23 15:56:31 -0700 | [diff] [blame] | 9148 | cpu_cgroup_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk) |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 9149 | { |
Peter Zijlstra | b68aa23 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 9150 | #ifdef CONFIG_RT_GROUP_SCHED |
Dhaval Giani | 54e9912 | 2009-02-27 15:13:54 +0530 | [diff] [blame] | 9151 | if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk)) |
Peter Zijlstra | b68aa23 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 9152 | return -EINVAL; |
| 9153 | #else |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 9154 | /* We don't support RT-tasks being in separate groups */ |
| 9155 | if (tsk->sched_class != &fair_sched_class) |
| 9156 | return -EINVAL; |
Peter Zijlstra | b68aa23 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 9157 | #endif |
Ben Blum | be367d0 | 2009-09-23 15:56:31 -0700 | [diff] [blame] | 9158 | return 0; |
| 9159 | } |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 9160 | |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 9161 | static void |
Ben Blum | f780bdb | 2011-05-26 16:25:19 -0700 | [diff] [blame] | 9162 | cpu_cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk) |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 9163 | { |
| 9164 | sched_move_task(tsk); |
| 9165 | } |
| 9166 | |
Peter Zijlstra | 068c5cc | 2011-01-19 12:26:11 +0100 | [diff] [blame] | 9167 | static void |
Peter Zijlstra | d41d5a0 | 2011-02-07 17:02:20 +0100 | [diff] [blame] | 9168 | cpu_cgroup_exit(struct cgroup_subsys *ss, struct cgroup *cgrp, |
| 9169 | struct cgroup *old_cgrp, struct task_struct *task) |
Peter Zijlstra | 068c5cc | 2011-01-19 12:26:11 +0100 | [diff] [blame] | 9170 | { |
| 9171 | /* |
| 9172 | * cgroup_exit() is called in the copy_process() failure path. |
| 9173 | * Ignore this case since the task hasn't ran yet, this avoids |
| 9174 | * trying to poke a half freed task state from generic code. |
| 9175 | */ |
| 9176 | if (!(task->flags & PF_EXITING)) |
| 9177 | return; |
| 9178 | |
| 9179 | sched_move_task(task); |
| 9180 | } |
| 9181 | |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 9182 | #ifdef CONFIG_FAIR_GROUP_SCHED |
Paul Menage | f4c753b | 2008-04-29 00:59:56 -0700 | [diff] [blame] | 9183 | static int cpu_shares_write_u64(struct cgroup *cgrp, struct cftype *cftype, |
Paul Menage | 2b01dfe | 2007-10-24 18:23:50 +0200 | [diff] [blame] | 9184 | u64 shareval) |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 9185 | { |
Nikhil Rao | c8b2811 | 2011-05-18 14:37:48 -0700 | [diff] [blame] | 9186 | return sched_group_set_shares(cgroup_tg(cgrp), scale_load(shareval)); |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 9187 | } |
| 9188 | |
Paul Menage | f4c753b | 2008-04-29 00:59:56 -0700 | [diff] [blame] | 9189 | static u64 cpu_shares_read_u64(struct cgroup *cgrp, struct cftype *cft) |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 9190 | { |
Paul Menage | 2b01dfe | 2007-10-24 18:23:50 +0200 | [diff] [blame] | 9191 | struct task_group *tg = cgroup_tg(cgrp); |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 9192 | |
Nikhil Rao | c8b2811 | 2011-05-18 14:37:48 -0700 | [diff] [blame] | 9193 | return (u64) scale_load_down(tg->shares); |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 9194 | } |
Paul Turner | ab84d31 | 2011-07-21 09:43:28 -0700 | [diff] [blame] | 9195 | |
| 9196 | #ifdef CONFIG_CFS_BANDWIDTH |
Paul Turner | a790de9 | 2011-07-21 09:43:29 -0700 | [diff] [blame] | 9197 | static DEFINE_MUTEX(cfs_constraints_mutex); |
| 9198 | |
Paul Turner | ab84d31 | 2011-07-21 09:43:28 -0700 | [diff] [blame] | 9199 | const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */ |
| 9200 | const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */ |
| 9201 | |
Paul Turner | a790de9 | 2011-07-21 09:43:29 -0700 | [diff] [blame] | 9202 | static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime); |
| 9203 | |
Paul Turner | ab84d31 | 2011-07-21 09:43:28 -0700 | [diff] [blame] | 9204 | static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota) |
| 9205 | { |
Paul Turner | 58088ad | 2011-07-21 09:43:31 -0700 | [diff] [blame] | 9206 | int i, ret = 0, runtime_enabled; |
Paul Turner | ab84d31 | 2011-07-21 09:43:28 -0700 | [diff] [blame] | 9207 | struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg); |
Paul Turner | ab84d31 | 2011-07-21 09:43:28 -0700 | [diff] [blame] | 9208 | |
| 9209 | if (tg == &root_task_group) |
| 9210 | return -EINVAL; |
| 9211 | |
| 9212 | /* |
| 9213 | * Ensure we have at some amount of bandwidth every period. This is |
| 9214 | * to prevent reaching a state of large arrears when throttled via |
| 9215 | * entity_tick() resulting in prolonged exit starvation. |
| 9216 | */ |
| 9217 | if (quota < min_cfs_quota_period || period < min_cfs_quota_period) |
| 9218 | return -EINVAL; |
| 9219 | |
| 9220 | /* |
| 9221 | * Likewise, bound things on the otherside by preventing insane quota |
| 9222 | * periods. This also allows us to normalize in computing quota |
| 9223 | * feasibility. |
| 9224 | */ |
| 9225 | if (period > max_cfs_quota_period) |
| 9226 | return -EINVAL; |
| 9227 | |
Paul Turner | a790de9 | 2011-07-21 09:43:29 -0700 | [diff] [blame] | 9228 | mutex_lock(&cfs_constraints_mutex); |
| 9229 | ret = __cfs_schedulable(tg, period, quota); |
| 9230 | if (ret) |
| 9231 | goto out_unlock; |
| 9232 | |
Paul Turner | 58088ad | 2011-07-21 09:43:31 -0700 | [diff] [blame] | 9233 | runtime_enabled = quota != RUNTIME_INF; |
Paul Turner | ab84d31 | 2011-07-21 09:43:28 -0700 | [diff] [blame] | 9234 | raw_spin_lock_irq(&cfs_b->lock); |
| 9235 | cfs_b->period = ns_to_ktime(period); |
| 9236 | cfs_b->quota = quota; |
Paul Turner | 58088ad | 2011-07-21 09:43:31 -0700 | [diff] [blame] | 9237 | |
Paul Turner | a9cf55b | 2011-07-21 09:43:32 -0700 | [diff] [blame] | 9238 | __refill_cfs_bandwidth_runtime(cfs_b); |
Paul Turner | 58088ad | 2011-07-21 09:43:31 -0700 | [diff] [blame] | 9239 | /* restart the period timer (if active) to handle new period expiry */ |
| 9240 | if (runtime_enabled && cfs_b->timer_active) { |
| 9241 | /* force a reprogram */ |
| 9242 | cfs_b->timer_active = 0; |
| 9243 | __start_cfs_bandwidth(cfs_b); |
| 9244 | } |
Paul Turner | ab84d31 | 2011-07-21 09:43:28 -0700 | [diff] [blame] | 9245 | raw_spin_unlock_irq(&cfs_b->lock); |
| 9246 | |
| 9247 | for_each_possible_cpu(i) { |
| 9248 | struct cfs_rq *cfs_rq = tg->cfs_rq[i]; |
| 9249 | struct rq *rq = rq_of(cfs_rq); |
| 9250 | |
| 9251 | raw_spin_lock_irq(&rq->lock); |
Paul Turner | 58088ad | 2011-07-21 09:43:31 -0700 | [diff] [blame] | 9252 | cfs_rq->runtime_enabled = runtime_enabled; |
Paul Turner | ab84d31 | 2011-07-21 09:43:28 -0700 | [diff] [blame] | 9253 | cfs_rq->runtime_remaining = 0; |
Paul Turner | 671fd9d | 2011-07-21 09:43:34 -0700 | [diff] [blame] | 9254 | |
| 9255 | if (cfs_rq_throttled(cfs_rq)) |
| 9256 | unthrottle_cfs_rq(cfs_rq); |
Paul Turner | ab84d31 | 2011-07-21 09:43:28 -0700 | [diff] [blame] | 9257 | raw_spin_unlock_irq(&rq->lock); |
| 9258 | } |
Paul Turner | a790de9 | 2011-07-21 09:43:29 -0700 | [diff] [blame] | 9259 | out_unlock: |
| 9260 | mutex_unlock(&cfs_constraints_mutex); |
Paul Turner | ab84d31 | 2011-07-21 09:43:28 -0700 | [diff] [blame] | 9261 | |
Paul Turner | a790de9 | 2011-07-21 09:43:29 -0700 | [diff] [blame] | 9262 | return ret; |
Paul Turner | ab84d31 | 2011-07-21 09:43:28 -0700 | [diff] [blame] | 9263 | } |
| 9264 | |
| 9265 | int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us) |
| 9266 | { |
| 9267 | u64 quota, period; |
| 9268 | |
| 9269 | period = ktime_to_ns(tg_cfs_bandwidth(tg)->period); |
| 9270 | if (cfs_quota_us < 0) |
| 9271 | quota = RUNTIME_INF; |
| 9272 | else |
| 9273 | quota = (u64)cfs_quota_us * NSEC_PER_USEC; |
| 9274 | |
| 9275 | return tg_set_cfs_bandwidth(tg, period, quota); |
| 9276 | } |
| 9277 | |
| 9278 | long tg_get_cfs_quota(struct task_group *tg) |
| 9279 | { |
| 9280 | u64 quota_us; |
| 9281 | |
| 9282 | if (tg_cfs_bandwidth(tg)->quota == RUNTIME_INF) |
| 9283 | return -1; |
| 9284 | |
| 9285 | quota_us = tg_cfs_bandwidth(tg)->quota; |
| 9286 | do_div(quota_us, NSEC_PER_USEC); |
| 9287 | |
| 9288 | return quota_us; |
| 9289 | } |
| 9290 | |
| 9291 | int tg_set_cfs_period(struct task_group *tg, long cfs_period_us) |
| 9292 | { |
| 9293 | u64 quota, period; |
| 9294 | |
| 9295 | period = (u64)cfs_period_us * NSEC_PER_USEC; |
| 9296 | quota = tg_cfs_bandwidth(tg)->quota; |
| 9297 | |
| 9298 | if (period <= 0) |
| 9299 | return -EINVAL; |
| 9300 | |
| 9301 | return tg_set_cfs_bandwidth(tg, period, quota); |
| 9302 | } |
| 9303 | |
| 9304 | long tg_get_cfs_period(struct task_group *tg) |
| 9305 | { |
| 9306 | u64 cfs_period_us; |
| 9307 | |
| 9308 | cfs_period_us = ktime_to_ns(tg_cfs_bandwidth(tg)->period); |
| 9309 | do_div(cfs_period_us, NSEC_PER_USEC); |
| 9310 | |
| 9311 | return cfs_period_us; |
| 9312 | } |
| 9313 | |
| 9314 | static s64 cpu_cfs_quota_read_s64(struct cgroup *cgrp, struct cftype *cft) |
| 9315 | { |
| 9316 | return tg_get_cfs_quota(cgroup_tg(cgrp)); |
| 9317 | } |
| 9318 | |
| 9319 | static int cpu_cfs_quota_write_s64(struct cgroup *cgrp, struct cftype *cftype, |
| 9320 | s64 cfs_quota_us) |
| 9321 | { |
| 9322 | return tg_set_cfs_quota(cgroup_tg(cgrp), cfs_quota_us); |
| 9323 | } |
| 9324 | |
| 9325 | static u64 cpu_cfs_period_read_u64(struct cgroup *cgrp, struct cftype *cft) |
| 9326 | { |
| 9327 | return tg_get_cfs_period(cgroup_tg(cgrp)); |
| 9328 | } |
| 9329 | |
| 9330 | static int cpu_cfs_period_write_u64(struct cgroup *cgrp, struct cftype *cftype, |
| 9331 | u64 cfs_period_us) |
| 9332 | { |
| 9333 | return tg_set_cfs_period(cgroup_tg(cgrp), cfs_period_us); |
| 9334 | } |
| 9335 | |
Paul Turner | a790de9 | 2011-07-21 09:43:29 -0700 | [diff] [blame] | 9336 | struct cfs_schedulable_data { |
| 9337 | struct task_group *tg; |
| 9338 | u64 period, quota; |
| 9339 | }; |
| 9340 | |
| 9341 | /* |
| 9342 | * normalize group quota/period to be quota/max_period |
| 9343 | * note: units are usecs |
| 9344 | */ |
| 9345 | static u64 normalize_cfs_quota(struct task_group *tg, |
| 9346 | struct cfs_schedulable_data *d) |
| 9347 | { |
| 9348 | u64 quota, period; |
| 9349 | |
| 9350 | if (tg == d->tg) { |
| 9351 | period = d->period; |
| 9352 | quota = d->quota; |
| 9353 | } else { |
| 9354 | period = tg_get_cfs_period(tg); |
| 9355 | quota = tg_get_cfs_quota(tg); |
| 9356 | } |
| 9357 | |
| 9358 | /* note: these should typically be equivalent */ |
| 9359 | if (quota == RUNTIME_INF || quota == -1) |
| 9360 | return RUNTIME_INF; |
| 9361 | |
| 9362 | return to_ratio(period, quota); |
| 9363 | } |
| 9364 | |
| 9365 | static int tg_cfs_schedulable_down(struct task_group *tg, void *data) |
| 9366 | { |
| 9367 | struct cfs_schedulable_data *d = data; |
| 9368 | struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg); |
| 9369 | s64 quota = 0, parent_quota = -1; |
| 9370 | |
| 9371 | if (!tg->parent) { |
| 9372 | quota = RUNTIME_INF; |
| 9373 | } else { |
| 9374 | struct cfs_bandwidth *parent_b = tg_cfs_bandwidth(tg->parent); |
| 9375 | |
| 9376 | quota = normalize_cfs_quota(tg, d); |
| 9377 | parent_quota = parent_b->hierarchal_quota; |
| 9378 | |
| 9379 | /* |
| 9380 | * ensure max(child_quota) <= parent_quota, inherit when no |
| 9381 | * limit is set |
| 9382 | */ |
| 9383 | if (quota == RUNTIME_INF) |
| 9384 | quota = parent_quota; |
| 9385 | else if (parent_quota != RUNTIME_INF && quota > parent_quota) |
| 9386 | return -EINVAL; |
| 9387 | } |
| 9388 | cfs_b->hierarchal_quota = quota; |
| 9389 | |
| 9390 | return 0; |
| 9391 | } |
| 9392 | |
| 9393 | static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota) |
| 9394 | { |
Paul Turner | 8277434 | 2011-07-21 09:43:35 -0700 | [diff] [blame] | 9395 | int ret; |
Paul Turner | a790de9 | 2011-07-21 09:43:29 -0700 | [diff] [blame] | 9396 | struct cfs_schedulable_data data = { |
| 9397 | .tg = tg, |
| 9398 | .period = period, |
| 9399 | .quota = quota, |
| 9400 | }; |
| 9401 | |
| 9402 | if (quota != RUNTIME_INF) { |
| 9403 | do_div(data.period, NSEC_PER_USEC); |
| 9404 | do_div(data.quota, NSEC_PER_USEC); |
| 9405 | } |
| 9406 | |
Paul Turner | 8277434 | 2011-07-21 09:43:35 -0700 | [diff] [blame] | 9407 | rcu_read_lock(); |
| 9408 | ret = walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data); |
| 9409 | rcu_read_unlock(); |
| 9410 | |
| 9411 | return ret; |
Paul Turner | a790de9 | 2011-07-21 09:43:29 -0700 | [diff] [blame] | 9412 | } |
Nikhil Rao | e8da1b1 | 2011-07-21 09:43:40 -0700 | [diff] [blame] | 9413 | |
| 9414 | static int cpu_stats_show(struct cgroup *cgrp, struct cftype *cft, |
| 9415 | struct cgroup_map_cb *cb) |
| 9416 | { |
| 9417 | struct task_group *tg = cgroup_tg(cgrp); |
| 9418 | struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg); |
| 9419 | |
| 9420 | cb->fill(cb, "nr_periods", cfs_b->nr_periods); |
| 9421 | cb->fill(cb, "nr_throttled", cfs_b->nr_throttled); |
| 9422 | cb->fill(cb, "throttled_time", cfs_b->throttled_time); |
| 9423 | |
| 9424 | return 0; |
| 9425 | } |
Paul Turner | ab84d31 | 2011-07-21 09:43:28 -0700 | [diff] [blame] | 9426 | #endif /* CONFIG_CFS_BANDWIDTH */ |
Dhaval Giani | 6d6bc0a | 2008-05-30 14:23:45 +0200 | [diff] [blame] | 9427 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 9428 | |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 9429 | #ifdef CONFIG_RT_GROUP_SCHED |
Mirco Tischler | 0c70814 | 2008-05-14 16:05:46 -0700 | [diff] [blame] | 9430 | static int cpu_rt_runtime_write(struct cgroup *cgrp, struct cftype *cft, |
Paul Menage | 06ecb27 | 2008-04-29 01:00:06 -0700 | [diff] [blame] | 9431 | s64 val) |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 9432 | { |
Paul Menage | 06ecb27 | 2008-04-29 01:00:06 -0700 | [diff] [blame] | 9433 | return sched_group_set_rt_runtime(cgroup_tg(cgrp), val); |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 9434 | } |
| 9435 | |
Paul Menage | 06ecb27 | 2008-04-29 01:00:06 -0700 | [diff] [blame] | 9436 | static s64 cpu_rt_runtime_read(struct cgroup *cgrp, struct cftype *cft) |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 9437 | { |
Paul Menage | 06ecb27 | 2008-04-29 01:00:06 -0700 | [diff] [blame] | 9438 | return sched_group_rt_runtime(cgroup_tg(cgrp)); |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 9439 | } |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 9440 | |
| 9441 | static int cpu_rt_period_write_uint(struct cgroup *cgrp, struct cftype *cftype, |
| 9442 | u64 rt_period_us) |
| 9443 | { |
| 9444 | return sched_group_set_rt_period(cgroup_tg(cgrp), rt_period_us); |
| 9445 | } |
| 9446 | |
| 9447 | static u64 cpu_rt_period_read_uint(struct cgroup *cgrp, struct cftype *cft) |
| 9448 | { |
| 9449 | return sched_group_rt_period(cgroup_tg(cgrp)); |
| 9450 | } |
Dhaval Giani | 6d6bc0a | 2008-05-30 14:23:45 +0200 | [diff] [blame] | 9451 | #endif /* CONFIG_RT_GROUP_SCHED */ |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 9452 | |
Paul Menage | fe5c7cc | 2007-10-29 21:18:11 +0100 | [diff] [blame] | 9453 | static struct cftype cpu_files[] = { |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 9454 | #ifdef CONFIG_FAIR_GROUP_SCHED |
Paul Menage | fe5c7cc | 2007-10-29 21:18:11 +0100 | [diff] [blame] | 9455 | { |
| 9456 | .name = "shares", |
Paul Menage | f4c753b | 2008-04-29 00:59:56 -0700 | [diff] [blame] | 9457 | .read_u64 = cpu_shares_read_u64, |
| 9458 | .write_u64 = cpu_shares_write_u64, |
Paul Menage | fe5c7cc | 2007-10-29 21:18:11 +0100 | [diff] [blame] | 9459 | }, |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 9460 | #endif |
Paul Turner | ab84d31 | 2011-07-21 09:43:28 -0700 | [diff] [blame] | 9461 | #ifdef CONFIG_CFS_BANDWIDTH |
| 9462 | { |
| 9463 | .name = "cfs_quota_us", |
| 9464 | .read_s64 = cpu_cfs_quota_read_s64, |
| 9465 | .write_s64 = cpu_cfs_quota_write_s64, |
| 9466 | }, |
| 9467 | { |
| 9468 | .name = "cfs_period_us", |
| 9469 | .read_u64 = cpu_cfs_period_read_u64, |
| 9470 | .write_u64 = cpu_cfs_period_write_u64, |
| 9471 | }, |
Nikhil Rao | e8da1b1 | 2011-07-21 09:43:40 -0700 | [diff] [blame] | 9472 | { |
| 9473 | .name = "stat", |
| 9474 | .read_map = cpu_stats_show, |
| 9475 | }, |
Paul Turner | ab84d31 | 2011-07-21 09:43:28 -0700 | [diff] [blame] | 9476 | #endif |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 9477 | #ifdef CONFIG_RT_GROUP_SCHED |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 9478 | { |
Peter Zijlstra | 9f0c1e5 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 9479 | .name = "rt_runtime_us", |
Paul Menage | 06ecb27 | 2008-04-29 01:00:06 -0700 | [diff] [blame] | 9480 | .read_s64 = cpu_rt_runtime_read, |
| 9481 | .write_s64 = cpu_rt_runtime_write, |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 9482 | }, |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 9483 | { |
| 9484 | .name = "rt_period_us", |
Paul Menage | f4c753b | 2008-04-29 00:59:56 -0700 | [diff] [blame] | 9485 | .read_u64 = cpu_rt_period_read_uint, |
| 9486 | .write_u64 = cpu_rt_period_write_uint, |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 9487 | }, |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 9488 | #endif |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 9489 | }; |
| 9490 | |
| 9491 | static int cpu_cgroup_populate(struct cgroup_subsys *ss, struct cgroup *cont) |
| 9492 | { |
Paul Menage | fe5c7cc | 2007-10-29 21:18:11 +0100 | [diff] [blame] | 9493 | return cgroup_add_files(cont, ss, cpu_files, ARRAY_SIZE(cpu_files)); |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 9494 | } |
| 9495 | |
| 9496 | struct cgroup_subsys cpu_cgroup_subsys = { |
Ingo Molnar | 38605ca | 2007-10-29 21:18:11 +0100 | [diff] [blame] | 9497 | .name = "cpu", |
| 9498 | .create = cpu_cgroup_create, |
| 9499 | .destroy = cpu_cgroup_destroy, |
Ben Blum | f780bdb | 2011-05-26 16:25:19 -0700 | [diff] [blame] | 9500 | .can_attach_task = cpu_cgroup_can_attach_task, |
| 9501 | .attach_task = cpu_cgroup_attach_task, |
Peter Zijlstra | 068c5cc | 2011-01-19 12:26:11 +0100 | [diff] [blame] | 9502 | .exit = cpu_cgroup_exit, |
Ingo Molnar | 38605ca | 2007-10-29 21:18:11 +0100 | [diff] [blame] | 9503 | .populate = cpu_cgroup_populate, |
| 9504 | .subsys_id = cpu_cgroup_subsys_id, |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 9505 | .early_init = 1, |
| 9506 | }; |
| 9507 | |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 9508 | #endif /* CONFIG_CGROUP_SCHED */ |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 9509 | |
| 9510 | #ifdef CONFIG_CGROUP_CPUACCT |
| 9511 | |
| 9512 | /* |
| 9513 | * CPU accounting code for task groups. |
| 9514 | * |
| 9515 | * Based on the work by Paul Menage (menage@google.com) and Balbir Singh |
| 9516 | * (balbir@in.ibm.com). |
| 9517 | */ |
| 9518 | |
Bharata B Rao | 934352f | 2008-11-10 20:41:13 +0530 | [diff] [blame] | 9519 | /* track cpu usage of a group of tasks and its child groups */ |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 9520 | struct cpuacct { |
| 9521 | struct cgroup_subsys_state css; |
| 9522 | /* cpuusage holds pointer to a u64-type object on every cpu */ |
Tejun Heo | 43cf38e | 2010-02-02 14:38:57 +0900 | [diff] [blame] | 9523 | u64 __percpu *cpuusage; |
Bharata B Rao | ef12fef | 2009-03-31 10:02:22 +0530 | [diff] [blame] | 9524 | struct percpu_counter cpustat[CPUACCT_STAT_NSTATS]; |
Bharata B Rao | 934352f | 2008-11-10 20:41:13 +0530 | [diff] [blame] | 9525 | struct cpuacct *parent; |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 9526 | }; |
| 9527 | |
| 9528 | struct cgroup_subsys cpuacct_subsys; |
| 9529 | |
| 9530 | /* return cpu accounting group corresponding to this container */ |
Dhaval Giani | 32cd756 | 2008-02-29 10:02:43 +0530 | [diff] [blame] | 9531 | static inline struct cpuacct *cgroup_ca(struct cgroup *cgrp) |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 9532 | { |
Dhaval Giani | 32cd756 | 2008-02-29 10:02:43 +0530 | [diff] [blame] | 9533 | return container_of(cgroup_subsys_state(cgrp, cpuacct_subsys_id), |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 9534 | struct cpuacct, css); |
| 9535 | } |
| 9536 | |
| 9537 | /* return cpu accounting group to which this task belongs */ |
| 9538 | static inline struct cpuacct *task_ca(struct task_struct *tsk) |
| 9539 | { |
| 9540 | return container_of(task_subsys_state(tsk, cpuacct_subsys_id), |
| 9541 | struct cpuacct, css); |
| 9542 | } |
| 9543 | |
| 9544 | /* create a new cpu accounting group */ |
| 9545 | static struct cgroup_subsys_state *cpuacct_create( |
Dhaval Giani | 32cd756 | 2008-02-29 10:02:43 +0530 | [diff] [blame] | 9546 | struct cgroup_subsys *ss, struct cgroup *cgrp) |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 9547 | { |
| 9548 | struct cpuacct *ca = kzalloc(sizeof(*ca), GFP_KERNEL); |
Bharata B Rao | ef12fef | 2009-03-31 10:02:22 +0530 | [diff] [blame] | 9549 | int i; |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 9550 | |
| 9551 | if (!ca) |
Bharata B Rao | ef12fef | 2009-03-31 10:02:22 +0530 | [diff] [blame] | 9552 | goto out; |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 9553 | |
| 9554 | ca->cpuusage = alloc_percpu(u64); |
Bharata B Rao | ef12fef | 2009-03-31 10:02:22 +0530 | [diff] [blame] | 9555 | if (!ca->cpuusage) |
| 9556 | goto out_free_ca; |
| 9557 | |
| 9558 | for (i = 0; i < CPUACCT_STAT_NSTATS; i++) |
| 9559 | if (percpu_counter_init(&ca->cpustat[i], 0)) |
| 9560 | goto out_free_counters; |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 9561 | |
Bharata B Rao | 934352f | 2008-11-10 20:41:13 +0530 | [diff] [blame] | 9562 | if (cgrp->parent) |
| 9563 | ca->parent = cgroup_ca(cgrp->parent); |
| 9564 | |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 9565 | return &ca->css; |
Bharata B Rao | ef12fef | 2009-03-31 10:02:22 +0530 | [diff] [blame] | 9566 | |
| 9567 | out_free_counters: |
| 9568 | while (--i >= 0) |
| 9569 | percpu_counter_destroy(&ca->cpustat[i]); |
| 9570 | free_percpu(ca->cpuusage); |
| 9571 | out_free_ca: |
| 9572 | kfree(ca); |
| 9573 | out: |
| 9574 | return ERR_PTR(-ENOMEM); |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 9575 | } |
| 9576 | |
| 9577 | /* destroy an existing cpu accounting group */ |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 9578 | static void |
Dhaval Giani | 32cd756 | 2008-02-29 10:02:43 +0530 | [diff] [blame] | 9579 | cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp) |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 9580 | { |
Dhaval Giani | 32cd756 | 2008-02-29 10:02:43 +0530 | [diff] [blame] | 9581 | struct cpuacct *ca = cgroup_ca(cgrp); |
Bharata B Rao | ef12fef | 2009-03-31 10:02:22 +0530 | [diff] [blame] | 9582 | int i; |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 9583 | |
Bharata B Rao | ef12fef | 2009-03-31 10:02:22 +0530 | [diff] [blame] | 9584 | for (i = 0; i < CPUACCT_STAT_NSTATS; i++) |
| 9585 | percpu_counter_destroy(&ca->cpustat[i]); |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 9586 | free_percpu(ca->cpuusage); |
| 9587 | kfree(ca); |
| 9588 | } |
| 9589 | |
Ken Chen | 720f549 | 2008-12-15 22:02:01 -0800 | [diff] [blame] | 9590 | static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu) |
| 9591 | { |
Rusty Russell | b36128c | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 9592 | u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu); |
Ken Chen | 720f549 | 2008-12-15 22:02:01 -0800 | [diff] [blame] | 9593 | u64 data; |
| 9594 | |
| 9595 | #ifndef CONFIG_64BIT |
| 9596 | /* |
| 9597 | * Take rq->lock to make 64-bit read safe on 32-bit platforms. |
| 9598 | */ |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 9599 | raw_spin_lock_irq(&cpu_rq(cpu)->lock); |
Ken Chen | 720f549 | 2008-12-15 22:02:01 -0800 | [diff] [blame] | 9600 | data = *cpuusage; |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 9601 | raw_spin_unlock_irq(&cpu_rq(cpu)->lock); |
Ken Chen | 720f549 | 2008-12-15 22:02:01 -0800 | [diff] [blame] | 9602 | #else |
| 9603 | data = *cpuusage; |
| 9604 | #endif |
| 9605 | |
| 9606 | return data; |
| 9607 | } |
| 9608 | |
| 9609 | static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val) |
| 9610 | { |
Rusty Russell | b36128c | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 9611 | u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu); |
Ken Chen | 720f549 | 2008-12-15 22:02:01 -0800 | [diff] [blame] | 9612 | |
| 9613 | #ifndef CONFIG_64BIT |
| 9614 | /* |
| 9615 | * Take rq->lock to make 64-bit write safe on 32-bit platforms. |
| 9616 | */ |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 9617 | raw_spin_lock_irq(&cpu_rq(cpu)->lock); |
Ken Chen | 720f549 | 2008-12-15 22:02:01 -0800 | [diff] [blame] | 9618 | *cpuusage = val; |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 9619 | raw_spin_unlock_irq(&cpu_rq(cpu)->lock); |
Ken Chen | 720f549 | 2008-12-15 22:02:01 -0800 | [diff] [blame] | 9620 | #else |
| 9621 | *cpuusage = val; |
| 9622 | #endif |
| 9623 | } |
| 9624 | |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 9625 | /* return total cpu usage (in nanoseconds) of a group */ |
Dhaval Giani | 32cd756 | 2008-02-29 10:02:43 +0530 | [diff] [blame] | 9626 | static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft) |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 9627 | { |
Dhaval Giani | 32cd756 | 2008-02-29 10:02:43 +0530 | [diff] [blame] | 9628 | struct cpuacct *ca = cgroup_ca(cgrp); |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 9629 | u64 totalcpuusage = 0; |
| 9630 | int i; |
| 9631 | |
Ken Chen | 720f549 | 2008-12-15 22:02:01 -0800 | [diff] [blame] | 9632 | for_each_present_cpu(i) |
| 9633 | totalcpuusage += cpuacct_cpuusage_read(ca, i); |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 9634 | |
| 9635 | return totalcpuusage; |
| 9636 | } |
| 9637 | |
Dhaval Giani | 0297b80 | 2008-02-29 10:02:44 +0530 | [diff] [blame] | 9638 | static int cpuusage_write(struct cgroup *cgrp, struct cftype *cftype, |
| 9639 | u64 reset) |
| 9640 | { |
| 9641 | struct cpuacct *ca = cgroup_ca(cgrp); |
| 9642 | int err = 0; |
| 9643 | int i; |
| 9644 | |
| 9645 | if (reset) { |
| 9646 | err = -EINVAL; |
| 9647 | goto out; |
| 9648 | } |
| 9649 | |
Ken Chen | 720f549 | 2008-12-15 22:02:01 -0800 | [diff] [blame] | 9650 | for_each_present_cpu(i) |
| 9651 | cpuacct_cpuusage_write(ca, i, 0); |
Dhaval Giani | 0297b80 | 2008-02-29 10:02:44 +0530 | [diff] [blame] | 9652 | |
Dhaval Giani | 0297b80 | 2008-02-29 10:02:44 +0530 | [diff] [blame] | 9653 | out: |
| 9654 | return err; |
| 9655 | } |
| 9656 | |
Ken Chen | e9515c3 | 2008-12-15 22:04:15 -0800 | [diff] [blame] | 9657 | static int cpuacct_percpu_seq_read(struct cgroup *cgroup, struct cftype *cft, |
| 9658 | struct seq_file *m) |
| 9659 | { |
| 9660 | struct cpuacct *ca = cgroup_ca(cgroup); |
| 9661 | u64 percpu; |
| 9662 | int i; |
| 9663 | |
| 9664 | for_each_present_cpu(i) { |
| 9665 | percpu = cpuacct_cpuusage_read(ca, i); |
| 9666 | seq_printf(m, "%llu ", (unsigned long long) percpu); |
| 9667 | } |
| 9668 | seq_printf(m, "\n"); |
| 9669 | return 0; |
| 9670 | } |
| 9671 | |
Bharata B Rao | ef12fef | 2009-03-31 10:02:22 +0530 | [diff] [blame] | 9672 | static const char *cpuacct_stat_desc[] = { |
| 9673 | [CPUACCT_STAT_USER] = "user", |
| 9674 | [CPUACCT_STAT_SYSTEM] = "system", |
| 9675 | }; |
| 9676 | |
| 9677 | static int cpuacct_stats_show(struct cgroup *cgrp, struct cftype *cft, |
| 9678 | struct cgroup_map_cb *cb) |
| 9679 | { |
| 9680 | struct cpuacct *ca = cgroup_ca(cgrp); |
| 9681 | int i; |
| 9682 | |
| 9683 | for (i = 0; i < CPUACCT_STAT_NSTATS; i++) { |
| 9684 | s64 val = percpu_counter_read(&ca->cpustat[i]); |
| 9685 | val = cputime64_to_clock_t(val); |
| 9686 | cb->fill(cb, cpuacct_stat_desc[i], val); |
| 9687 | } |
| 9688 | return 0; |
| 9689 | } |
| 9690 | |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 9691 | static struct cftype files[] = { |
| 9692 | { |
| 9693 | .name = "usage", |
Paul Menage | f4c753b | 2008-04-29 00:59:56 -0700 | [diff] [blame] | 9694 | .read_u64 = cpuusage_read, |
| 9695 | .write_u64 = cpuusage_write, |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 9696 | }, |
Ken Chen | e9515c3 | 2008-12-15 22:04:15 -0800 | [diff] [blame] | 9697 | { |
| 9698 | .name = "usage_percpu", |
| 9699 | .read_seq_string = cpuacct_percpu_seq_read, |
| 9700 | }, |
Bharata B Rao | ef12fef | 2009-03-31 10:02:22 +0530 | [diff] [blame] | 9701 | { |
| 9702 | .name = "stat", |
| 9703 | .read_map = cpuacct_stats_show, |
| 9704 | }, |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 9705 | }; |
| 9706 | |
Dhaval Giani | 32cd756 | 2008-02-29 10:02:43 +0530 | [diff] [blame] | 9707 | static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cgrp) |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 9708 | { |
Dhaval Giani | 32cd756 | 2008-02-29 10:02:43 +0530 | [diff] [blame] | 9709 | return cgroup_add_files(cgrp, ss, files, ARRAY_SIZE(files)); |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 9710 | } |
| 9711 | |
| 9712 | /* |
| 9713 | * charge this task's execution time to its accounting group. |
| 9714 | * |
| 9715 | * called with rq->lock held. |
| 9716 | */ |
| 9717 | static void cpuacct_charge(struct task_struct *tsk, u64 cputime) |
| 9718 | { |
| 9719 | struct cpuacct *ca; |
Bharata B Rao | 934352f | 2008-11-10 20:41:13 +0530 | [diff] [blame] | 9720 | int cpu; |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 9721 | |
Li Zefan | c40c6f8 | 2009-02-26 15:40:15 +0800 | [diff] [blame] | 9722 | if (unlikely(!cpuacct_subsys.active)) |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 9723 | return; |
| 9724 | |
Bharata B Rao | 934352f | 2008-11-10 20:41:13 +0530 | [diff] [blame] | 9725 | cpu = task_cpu(tsk); |
Bharata B Rao | a18b83b | 2009-03-23 10:02:53 +0530 | [diff] [blame] | 9726 | |
| 9727 | rcu_read_lock(); |
| 9728 | |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 9729 | ca = task_ca(tsk); |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 9730 | |
Bharata B Rao | 934352f | 2008-11-10 20:41:13 +0530 | [diff] [blame] | 9731 | for (; ca; ca = ca->parent) { |
Rusty Russell | b36128c | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 9732 | u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu); |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 9733 | *cpuusage += cputime; |
| 9734 | } |
Bharata B Rao | a18b83b | 2009-03-23 10:02:53 +0530 | [diff] [blame] | 9735 | |
| 9736 | rcu_read_unlock(); |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 9737 | } |
| 9738 | |
Bharata B Rao | ef12fef | 2009-03-31 10:02:22 +0530 | [diff] [blame] | 9739 | /* |
Anton Blanchard | fa535a7 | 2010-02-02 14:46:13 -0800 | [diff] [blame] | 9740 | * When CONFIG_VIRT_CPU_ACCOUNTING is enabled one jiffy can be very large |
| 9741 | * in cputime_t units. As a result, cpuacct_update_stats calls |
| 9742 | * percpu_counter_add with values large enough to always overflow the |
| 9743 | * per cpu batch limit causing bad SMP scalability. |
| 9744 | * |
| 9745 | * To fix this we scale percpu_counter_batch by cputime_one_jiffy so we |
| 9746 | * batch the same amount of time with CONFIG_VIRT_CPU_ACCOUNTING disabled |
| 9747 | * and enabled. We cap it at INT_MAX which is the largest allowed batch value. |
| 9748 | */ |
| 9749 | #ifdef CONFIG_SMP |
| 9750 | #define CPUACCT_BATCH \ |
| 9751 | min_t(long, percpu_counter_batch * cputime_one_jiffy, INT_MAX) |
| 9752 | #else |
| 9753 | #define CPUACCT_BATCH 0 |
| 9754 | #endif |
| 9755 | |
| 9756 | /* |
Bharata B Rao | ef12fef | 2009-03-31 10:02:22 +0530 | [diff] [blame] | 9757 | * Charge the system/user time to the task's accounting group. |
| 9758 | */ |
| 9759 | static void cpuacct_update_stats(struct task_struct *tsk, |
| 9760 | enum cpuacct_stat_index idx, cputime_t val) |
| 9761 | { |
| 9762 | struct cpuacct *ca; |
Anton Blanchard | fa535a7 | 2010-02-02 14:46:13 -0800 | [diff] [blame] | 9763 | int batch = CPUACCT_BATCH; |
Bharata B Rao | ef12fef | 2009-03-31 10:02:22 +0530 | [diff] [blame] | 9764 | |
| 9765 | if (unlikely(!cpuacct_subsys.active)) |
| 9766 | return; |
| 9767 | |
| 9768 | rcu_read_lock(); |
| 9769 | ca = task_ca(tsk); |
| 9770 | |
| 9771 | do { |
Anton Blanchard | fa535a7 | 2010-02-02 14:46:13 -0800 | [diff] [blame] | 9772 | __percpu_counter_add(&ca->cpustat[idx], val, batch); |
Bharata B Rao | ef12fef | 2009-03-31 10:02:22 +0530 | [diff] [blame] | 9773 | ca = ca->parent; |
| 9774 | } while (ca); |
| 9775 | rcu_read_unlock(); |
| 9776 | } |
| 9777 | |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 9778 | struct cgroup_subsys cpuacct_subsys = { |
| 9779 | .name = "cpuacct", |
| 9780 | .create = cpuacct_create, |
| 9781 | .destroy = cpuacct_destroy, |
| 9782 | .populate = cpuacct_populate, |
| 9783 | .subsys_id = cpuacct_subsys_id, |
| 9784 | }; |
| 9785 | #endif /* CONFIG_CGROUP_CPUACCT */ |