Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH) |
| 3 | * |
| 4 | * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> |
| 5 | * |
| 6 | * Interactivity improvements by Mike Galbraith |
| 7 | * (C) 2007 Mike Galbraith <efault@gmx.de> |
| 8 | * |
| 9 | * Various enhancements by Dmitry Adamushko. |
| 10 | * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com> |
| 11 | * |
| 12 | * Group scheduling enhancements by Srivatsa Vaddagiri |
| 13 | * Copyright IBM Corporation, 2007 |
| 14 | * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> |
| 15 | * |
| 16 | * Scaled math optimizations by Thomas Gleixner |
| 17 | * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de> |
Peter Zijlstra | 2180508 | 2007-08-25 18:41:53 +0200 | [diff] [blame] | 18 | * |
| 19 | * Adaptive scheduling granularity, math enhancements by Peter Zijlstra |
| 20 | * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 21 | */ |
| 22 | |
Arjan van de Ven | 9745512 | 2008-01-25 21:08:34 +0100 | [diff] [blame] | 23 | #include <linux/latencytop.h> |
Christian Ehrhardt | 1983a92 | 2009-11-30 12:16:47 +0100 | [diff] [blame] | 24 | #include <linux/sched.h> |
Sisir Koppaka | 3436ae1 | 2011-03-26 18:22:55 +0530 | [diff] [blame] | 25 | #include <linux/cpumask.h> |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 26 | #include <linux/slab.h> |
| 27 | #include <linux/profile.h> |
| 28 | #include <linux/interrupt.h> |
| 29 | |
| 30 | #include <trace/events/sched.h> |
| 31 | |
| 32 | #include "sched.h" |
Arjan van de Ven | 9745512 | 2008-01-25 21:08:34 +0100 | [diff] [blame] | 33 | |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 34 | /* |
Peter Zijlstra | 2180508 | 2007-08-25 18:41:53 +0200 | [diff] [blame] | 35 | * Targeted preemption latency for CPU-bound tasks: |
Takuya Yoshikawa | 864616e | 2010-10-14 16:09:13 +0900 | [diff] [blame] | 36 | * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds) |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 37 | * |
Peter Zijlstra | 2180508 | 2007-08-25 18:41:53 +0200 | [diff] [blame] | 38 | * NOTE: this latency value is not the same as the concept of |
Ingo Molnar | d274a4c | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 39 | * 'timeslice length' - timeslices in CFS are of variable length |
| 40 | * and have no persistent notion like in traditional, time-slice |
| 41 | * based scheduling concepts. |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 42 | * |
Ingo Molnar | d274a4c | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 43 | * (to see the precise effective timeslice length of your workload, |
| 44 | * run vmstat and monitor the context-switches (cs) field) |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 45 | */ |
Mike Galbraith | 2140692 | 2010-03-11 17:17:15 +0100 | [diff] [blame] | 46 | unsigned int sysctl_sched_latency = 6000000ULL; |
| 47 | unsigned int normalized_sysctl_sched_latency = 6000000ULL; |
Ingo Molnar | 2bd8e6d | 2007-10-15 17:00:02 +0200 | [diff] [blame] | 48 | |
| 49 | /* |
Christian Ehrhardt | 1983a92 | 2009-11-30 12:16:47 +0100 | [diff] [blame] | 50 | * The initial- and re-scaling of tunables is configurable |
| 51 | * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus)) |
| 52 | * |
| 53 | * Options are: |
| 54 | * SCHED_TUNABLESCALING_NONE - unscaled, always *1 |
| 55 | * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus) |
| 56 | * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus |
| 57 | */ |
| 58 | enum sched_tunable_scaling sysctl_sched_tunable_scaling |
| 59 | = SCHED_TUNABLESCALING_LOG; |
| 60 | |
| 61 | /* |
Peter Zijlstra | b2be5e9 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 62 | * Minimal preemption granularity for CPU-bound tasks: |
Takuya Yoshikawa | 864616e | 2010-10-14 16:09:13 +0900 | [diff] [blame] | 63 | * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds) |
Peter Zijlstra | b2be5e9 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 64 | */ |
Ingo Molnar | 0bf377b | 2010-09-12 08:14:52 +0200 | [diff] [blame] | 65 | unsigned int sysctl_sched_min_granularity = 750000ULL; |
| 66 | unsigned int normalized_sysctl_sched_min_granularity = 750000ULL; |
Peter Zijlstra | b2be5e9 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 67 | |
| 68 | /* |
| 69 | * is kept at sysctl_sched_latency / sysctl_sched_min_granularity |
| 70 | */ |
Ingo Molnar | 0bf377b | 2010-09-12 08:14:52 +0200 | [diff] [blame] | 71 | static unsigned int sched_nr_latency = 8; |
Peter Zijlstra | b2be5e9 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 72 | |
| 73 | /* |
Mike Galbraith | 2bba22c | 2009-09-09 15:41:37 +0200 | [diff] [blame] | 74 | * After fork, child runs first. If set to 0 (default) then |
Ingo Molnar | 2bd8e6d | 2007-10-15 17:00:02 +0200 | [diff] [blame] | 75 | * parent will (try to) run first. |
| 76 | */ |
Mike Galbraith | 2bba22c | 2009-09-09 15:41:37 +0200 | [diff] [blame] | 77 | unsigned int sysctl_sched_child_runs_first __read_mostly; |
Peter Zijlstra | 2180508 | 2007-08-25 18:41:53 +0200 | [diff] [blame] | 78 | |
| 79 | /* |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 80 | * SCHED_OTHER wake-up granularity. |
Mike Galbraith | 172e082 | 2009-09-09 15:41:37 +0200 | [diff] [blame] | 81 | * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds) |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 82 | * |
| 83 | * This option delays the preemption effects of decoupled workloads |
| 84 | * and reduces their over-scheduling. Synchronous workloads will still |
| 85 | * have immediate wakeup/sleep latencies. |
| 86 | */ |
Mike Galbraith | 172e082 | 2009-09-09 15:41:37 +0200 | [diff] [blame] | 87 | unsigned int sysctl_sched_wakeup_granularity = 1000000UL; |
Christian Ehrhardt | 0bcdcf2 | 2009-11-30 12:16:46 +0100 | [diff] [blame] | 88 | unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL; |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 89 | |
Ingo Molnar | da84d96 | 2007-10-15 17:00:18 +0200 | [diff] [blame] | 90 | const_debug unsigned int sysctl_sched_migration_cost = 500000UL; |
| 91 | |
Paul Turner | a7a4f8a | 2010-11-15 15:47:06 -0800 | [diff] [blame] | 92 | /* |
| 93 | * The exponential sliding window over which load is averaged for shares |
| 94 | * distribution. |
| 95 | * (default: 10msec) |
| 96 | */ |
| 97 | unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL; |
| 98 | |
Paul Turner | ec12cb7 | 2011-07-21 09:43:30 -0700 | [diff] [blame] | 99 | #ifdef CONFIG_CFS_BANDWIDTH |
| 100 | /* |
| 101 | * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool |
| 102 | * each time a cfs_rq requests quota. |
| 103 | * |
| 104 | * Note: in the case that the slice exceeds the runtime remaining (either due |
| 105 | * to consumption or the quota being specified to be smaller than the slice) |
| 106 | * we will always only issue the remaining available time. |
| 107 | * |
| 108 | * default: 5 msec, units: microseconds |
| 109 | */ |
| 110 | unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL; |
| 111 | #endif |
| 112 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 113 | /* |
| 114 | * Increase the granularity value when there are more CPUs, |
| 115 | * because with more CPUs the 'effective latency' as visible |
| 116 | * to users decreases. But the relationship is not linear, |
| 117 | * so pick a second-best guess by going with the log2 of the |
| 118 | * number of CPUs. |
| 119 | * |
| 120 | * This idea comes from the SD scheduler of Con Kolivas: |
| 121 | */ |
| 122 | static int get_update_sysctl_factor(void) |
| 123 | { |
| 124 | unsigned int cpus = min_t(int, num_online_cpus(), 8); |
| 125 | unsigned int factor; |
| 126 | |
| 127 | switch (sysctl_sched_tunable_scaling) { |
| 128 | case SCHED_TUNABLESCALING_NONE: |
| 129 | factor = 1; |
| 130 | break; |
| 131 | case SCHED_TUNABLESCALING_LINEAR: |
| 132 | factor = cpus; |
| 133 | break; |
| 134 | case SCHED_TUNABLESCALING_LOG: |
| 135 | default: |
| 136 | factor = 1 + ilog2(cpus); |
| 137 | break; |
| 138 | } |
| 139 | |
| 140 | return factor; |
| 141 | } |
| 142 | |
| 143 | static void update_sysctl(void) |
| 144 | { |
| 145 | unsigned int factor = get_update_sysctl_factor(); |
| 146 | |
| 147 | #define SET_SYSCTL(name) \ |
| 148 | (sysctl_##name = (factor) * normalized_sysctl_##name) |
| 149 | SET_SYSCTL(sched_min_granularity); |
| 150 | SET_SYSCTL(sched_latency); |
| 151 | SET_SYSCTL(sched_wakeup_granularity); |
| 152 | #undef SET_SYSCTL |
| 153 | } |
| 154 | |
| 155 | void sched_init_granularity(void) |
| 156 | { |
| 157 | update_sysctl(); |
| 158 | } |
| 159 | |
| 160 | #if BITS_PER_LONG == 32 |
| 161 | # define WMULT_CONST (~0UL) |
| 162 | #else |
| 163 | # define WMULT_CONST (1UL << 32) |
| 164 | #endif |
| 165 | |
| 166 | #define WMULT_SHIFT 32 |
| 167 | |
| 168 | /* |
| 169 | * Shift right and round: |
| 170 | */ |
| 171 | #define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y)) |
| 172 | |
| 173 | /* |
| 174 | * delta *= weight / lw |
| 175 | */ |
| 176 | static unsigned long |
| 177 | calc_delta_mine(unsigned long delta_exec, unsigned long weight, |
| 178 | struct load_weight *lw) |
| 179 | { |
| 180 | u64 tmp; |
| 181 | |
| 182 | /* |
| 183 | * weight can be less than 2^SCHED_LOAD_RESOLUTION for task group sched |
| 184 | * entities since MIN_SHARES = 2. Treat weight as 1 if less than |
| 185 | * 2^SCHED_LOAD_RESOLUTION. |
| 186 | */ |
| 187 | if (likely(weight > (1UL << SCHED_LOAD_RESOLUTION))) |
| 188 | tmp = (u64)delta_exec * scale_load_down(weight); |
| 189 | else |
| 190 | tmp = (u64)delta_exec; |
| 191 | |
| 192 | if (!lw->inv_weight) { |
| 193 | unsigned long w = scale_load_down(lw->weight); |
| 194 | |
| 195 | if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST)) |
| 196 | lw->inv_weight = 1; |
| 197 | else if (unlikely(!w)) |
| 198 | lw->inv_weight = WMULT_CONST; |
| 199 | else |
| 200 | lw->inv_weight = WMULT_CONST / w; |
| 201 | } |
| 202 | |
| 203 | /* |
| 204 | * Check whether we'd overflow the 64-bit multiplication: |
| 205 | */ |
| 206 | if (unlikely(tmp > WMULT_CONST)) |
| 207 | tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight, |
| 208 | WMULT_SHIFT/2); |
| 209 | else |
| 210 | tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT); |
| 211 | |
| 212 | return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX); |
| 213 | } |
| 214 | |
| 215 | |
| 216 | const struct sched_class fair_sched_class; |
Peter Zijlstra | a4c2f00 | 2008-10-17 19:27:03 +0200 | [diff] [blame] | 217 | |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 218 | /************************************************************** |
| 219 | * CFS operations on generic schedulable entities: |
| 220 | */ |
| 221 | |
| 222 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 223 | |
| 224 | /* cpu runqueue to which this cfs_rq is attached */ |
| 225 | static inline struct rq *rq_of(struct cfs_rq *cfs_rq) |
| 226 | { |
| 227 | return cfs_rq->rq; |
| 228 | } |
| 229 | |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 230 | /* An entity is a task if it doesn't "own" a runqueue */ |
| 231 | #define entity_is_task(se) (!se->my_q) |
| 232 | |
Peter Zijlstra | 8f48894 | 2009-07-24 12:25:30 +0200 | [diff] [blame] | 233 | static inline struct task_struct *task_of(struct sched_entity *se) |
| 234 | { |
| 235 | #ifdef CONFIG_SCHED_DEBUG |
| 236 | WARN_ON_ONCE(!entity_is_task(se)); |
| 237 | #endif |
| 238 | return container_of(se, struct task_struct, se); |
| 239 | } |
| 240 | |
Peter Zijlstra | b758149 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 241 | /* Walk up scheduling entities hierarchy */ |
| 242 | #define for_each_sched_entity(se) \ |
| 243 | for (; se; se = se->parent) |
| 244 | |
| 245 | static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) |
| 246 | { |
| 247 | return p->se.cfs_rq; |
| 248 | } |
| 249 | |
| 250 | /* runqueue on which this entity is (to be) queued */ |
| 251 | static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) |
| 252 | { |
| 253 | return se->cfs_rq; |
| 254 | } |
| 255 | |
| 256 | /* runqueue "owned" by this group */ |
| 257 | static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) |
| 258 | { |
| 259 | return grp->my_q; |
| 260 | } |
| 261 | |
Paul Turner | aff3e49 | 2012-10-04 13:18:30 +0200 | [diff] [blame^] | 262 | static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq, |
| 263 | int force_update); |
Paul Turner | 9ee474f | 2012-10-04 13:18:30 +0200 | [diff] [blame] | 264 | |
Peter Zijlstra | 3d4b47b | 2010-11-15 15:47:01 -0800 | [diff] [blame] | 265 | static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) |
| 266 | { |
| 267 | if (!cfs_rq->on_list) { |
Paul Turner | 67e8625 | 2010-11-15 15:47:05 -0800 | [diff] [blame] | 268 | /* |
| 269 | * Ensure we either appear before our parent (if already |
| 270 | * enqueued) or force our parent to appear after us when it is |
| 271 | * enqueued. The fact that we always enqueue bottom-up |
| 272 | * reduces this to two cases. |
| 273 | */ |
| 274 | if (cfs_rq->tg->parent && |
| 275 | cfs_rq->tg->parent->cfs_rq[cpu_of(rq_of(cfs_rq))]->on_list) { |
| 276 | list_add_rcu(&cfs_rq->leaf_cfs_rq_list, |
Peter Zijlstra | 3d4b47b | 2010-11-15 15:47:01 -0800 | [diff] [blame] | 277 | &rq_of(cfs_rq)->leaf_cfs_rq_list); |
Paul Turner | 67e8625 | 2010-11-15 15:47:05 -0800 | [diff] [blame] | 278 | } else { |
| 279 | list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list, |
| 280 | &rq_of(cfs_rq)->leaf_cfs_rq_list); |
| 281 | } |
Peter Zijlstra | 3d4b47b | 2010-11-15 15:47:01 -0800 | [diff] [blame] | 282 | |
| 283 | cfs_rq->on_list = 1; |
Paul Turner | 9ee474f | 2012-10-04 13:18:30 +0200 | [diff] [blame] | 284 | /* We should have no load, but we need to update last_decay. */ |
Paul Turner | aff3e49 | 2012-10-04 13:18:30 +0200 | [diff] [blame^] | 285 | update_cfs_rq_blocked_load(cfs_rq, 0); |
Peter Zijlstra | 3d4b47b | 2010-11-15 15:47:01 -0800 | [diff] [blame] | 286 | } |
| 287 | } |
| 288 | |
| 289 | static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq) |
| 290 | { |
| 291 | if (cfs_rq->on_list) { |
| 292 | list_del_rcu(&cfs_rq->leaf_cfs_rq_list); |
| 293 | cfs_rq->on_list = 0; |
| 294 | } |
| 295 | } |
| 296 | |
Peter Zijlstra | b758149 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 297 | /* Iterate thr' all leaf cfs_rq's on a runqueue */ |
| 298 | #define for_each_leaf_cfs_rq(rq, cfs_rq) \ |
| 299 | list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list) |
| 300 | |
| 301 | /* Do the two (enqueued) entities belong to the same group ? */ |
| 302 | static inline int |
| 303 | is_same_group(struct sched_entity *se, struct sched_entity *pse) |
| 304 | { |
| 305 | if (se->cfs_rq == pse->cfs_rq) |
| 306 | return 1; |
| 307 | |
| 308 | return 0; |
| 309 | } |
| 310 | |
| 311 | static inline struct sched_entity *parent_entity(struct sched_entity *se) |
| 312 | { |
| 313 | return se->parent; |
| 314 | } |
| 315 | |
Peter Zijlstra | 464b752 | 2008-10-24 11:06:15 +0200 | [diff] [blame] | 316 | /* return depth at which a sched entity is present in the hierarchy */ |
| 317 | static inline int depth_se(struct sched_entity *se) |
| 318 | { |
| 319 | int depth = 0; |
| 320 | |
| 321 | for_each_sched_entity(se) |
| 322 | depth++; |
| 323 | |
| 324 | return depth; |
| 325 | } |
| 326 | |
| 327 | static void |
| 328 | find_matching_se(struct sched_entity **se, struct sched_entity **pse) |
| 329 | { |
| 330 | int se_depth, pse_depth; |
| 331 | |
| 332 | /* |
| 333 | * preemption test can be made between sibling entities who are in the |
| 334 | * same cfs_rq i.e who have a common parent. Walk up the hierarchy of |
| 335 | * both tasks until we find their ancestors who are siblings of common |
| 336 | * parent. |
| 337 | */ |
| 338 | |
| 339 | /* First walk up until both entities are at same depth */ |
| 340 | se_depth = depth_se(*se); |
| 341 | pse_depth = depth_se(*pse); |
| 342 | |
| 343 | while (se_depth > pse_depth) { |
| 344 | se_depth--; |
| 345 | *se = parent_entity(*se); |
| 346 | } |
| 347 | |
| 348 | while (pse_depth > se_depth) { |
| 349 | pse_depth--; |
| 350 | *pse = parent_entity(*pse); |
| 351 | } |
| 352 | |
| 353 | while (!is_same_group(*se, *pse)) { |
| 354 | *se = parent_entity(*se); |
| 355 | *pse = parent_entity(*pse); |
| 356 | } |
| 357 | } |
| 358 | |
Peter Zijlstra | 8f48894 | 2009-07-24 12:25:30 +0200 | [diff] [blame] | 359 | #else /* !CONFIG_FAIR_GROUP_SCHED */ |
| 360 | |
| 361 | static inline struct task_struct *task_of(struct sched_entity *se) |
| 362 | { |
| 363 | return container_of(se, struct task_struct, se); |
| 364 | } |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 365 | |
| 366 | static inline struct rq *rq_of(struct cfs_rq *cfs_rq) |
| 367 | { |
| 368 | return container_of(cfs_rq, struct rq, cfs); |
| 369 | } |
| 370 | |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 371 | #define entity_is_task(se) 1 |
| 372 | |
Peter Zijlstra | b758149 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 373 | #define for_each_sched_entity(se) \ |
| 374 | for (; se; se = NULL) |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 375 | |
Peter Zijlstra | b758149 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 376 | static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 377 | { |
Peter Zijlstra | b758149 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 378 | return &task_rq(p)->cfs; |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 379 | } |
| 380 | |
Peter Zijlstra | b758149 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 381 | static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) |
| 382 | { |
| 383 | struct task_struct *p = task_of(se); |
| 384 | struct rq *rq = task_rq(p); |
| 385 | |
| 386 | return &rq->cfs; |
| 387 | } |
| 388 | |
| 389 | /* runqueue "owned" by this group */ |
| 390 | static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) |
| 391 | { |
| 392 | return NULL; |
| 393 | } |
| 394 | |
Peter Zijlstra | 3d4b47b | 2010-11-15 15:47:01 -0800 | [diff] [blame] | 395 | static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) |
| 396 | { |
| 397 | } |
| 398 | |
| 399 | static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq) |
| 400 | { |
| 401 | } |
| 402 | |
Peter Zijlstra | b758149 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 403 | #define for_each_leaf_cfs_rq(rq, cfs_rq) \ |
| 404 | for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL) |
| 405 | |
| 406 | static inline int |
| 407 | is_same_group(struct sched_entity *se, struct sched_entity *pse) |
| 408 | { |
| 409 | return 1; |
| 410 | } |
| 411 | |
| 412 | static inline struct sched_entity *parent_entity(struct sched_entity *se) |
| 413 | { |
| 414 | return NULL; |
| 415 | } |
| 416 | |
Peter Zijlstra | 464b752 | 2008-10-24 11:06:15 +0200 | [diff] [blame] | 417 | static inline void |
| 418 | find_matching_se(struct sched_entity **se, struct sched_entity **pse) |
| 419 | { |
| 420 | } |
| 421 | |
Peter Zijlstra | b758149 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 422 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
| 423 | |
Peter Zijlstra | 6c16a6d | 2012-03-21 13:07:16 -0700 | [diff] [blame] | 424 | static __always_inline |
| 425 | void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec); |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 426 | |
| 427 | /************************************************************** |
| 428 | * Scheduling class tree data structure manipulation methods: |
| 429 | */ |
| 430 | |
Ingo Molnar | 0702e3e | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 431 | static inline u64 max_vruntime(u64 min_vruntime, u64 vruntime) |
Peter Zijlstra | 02e0431 | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 432 | { |
Peter Zijlstra | 368059a | 2007-10-15 17:00:11 +0200 | [diff] [blame] | 433 | s64 delta = (s64)(vruntime - min_vruntime); |
| 434 | if (delta > 0) |
Peter Zijlstra | 02e0431 | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 435 | min_vruntime = vruntime; |
| 436 | |
| 437 | return min_vruntime; |
| 438 | } |
| 439 | |
Ingo Molnar | 0702e3e | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 440 | static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime) |
Peter Zijlstra | b0ffd24 | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 441 | { |
| 442 | s64 delta = (s64)(vruntime - min_vruntime); |
| 443 | if (delta < 0) |
| 444 | min_vruntime = vruntime; |
| 445 | |
| 446 | return min_vruntime; |
| 447 | } |
| 448 | |
Fabio Checconi | 54fdc58 | 2009-07-16 12:32:27 +0200 | [diff] [blame] | 449 | static inline int entity_before(struct sched_entity *a, |
| 450 | struct sched_entity *b) |
| 451 | { |
| 452 | return (s64)(a->vruntime - b->vruntime) < 0; |
| 453 | } |
| 454 | |
Peter Zijlstra | 1af5f73 | 2008-10-24 11:06:13 +0200 | [diff] [blame] | 455 | static void update_min_vruntime(struct cfs_rq *cfs_rq) |
| 456 | { |
| 457 | u64 vruntime = cfs_rq->min_vruntime; |
| 458 | |
| 459 | if (cfs_rq->curr) |
| 460 | vruntime = cfs_rq->curr->vruntime; |
| 461 | |
| 462 | if (cfs_rq->rb_leftmost) { |
| 463 | struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost, |
| 464 | struct sched_entity, |
| 465 | run_node); |
| 466 | |
Peter Zijlstra | e17036d | 2009-01-15 14:53:39 +0100 | [diff] [blame] | 467 | if (!cfs_rq->curr) |
Peter Zijlstra | 1af5f73 | 2008-10-24 11:06:13 +0200 | [diff] [blame] | 468 | vruntime = se->vruntime; |
| 469 | else |
| 470 | vruntime = min_vruntime(vruntime, se->vruntime); |
| 471 | } |
| 472 | |
| 473 | cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime); |
Peter Zijlstra | 3fe1698 | 2011-04-05 17:23:48 +0200 | [diff] [blame] | 474 | #ifndef CONFIG_64BIT |
| 475 | smp_wmb(); |
| 476 | cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime; |
| 477 | #endif |
Peter Zijlstra | 1af5f73 | 2008-10-24 11:06:13 +0200 | [diff] [blame] | 478 | } |
| 479 | |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 480 | /* |
| 481 | * Enqueue an entity into the rb-tree: |
| 482 | */ |
Ingo Molnar | 0702e3e | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 483 | static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 484 | { |
| 485 | struct rb_node **link = &cfs_rq->tasks_timeline.rb_node; |
| 486 | struct rb_node *parent = NULL; |
| 487 | struct sched_entity *entry; |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 488 | int leftmost = 1; |
| 489 | |
| 490 | /* |
| 491 | * Find the right place in the rbtree: |
| 492 | */ |
| 493 | while (*link) { |
| 494 | parent = *link; |
| 495 | entry = rb_entry(parent, struct sched_entity, run_node); |
| 496 | /* |
| 497 | * We dont care about collisions. Nodes with |
| 498 | * the same key stay together. |
| 499 | */ |
Stephan Baerwolf | 2bd2d6f | 2011-07-20 14:46:59 +0200 | [diff] [blame] | 500 | if (entity_before(se, entry)) { |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 501 | link = &parent->rb_left; |
| 502 | } else { |
| 503 | link = &parent->rb_right; |
| 504 | leftmost = 0; |
| 505 | } |
| 506 | } |
| 507 | |
| 508 | /* |
| 509 | * Maintain a cache of leftmost tree entries (it is frequently |
| 510 | * used): |
| 511 | */ |
Peter Zijlstra | 1af5f73 | 2008-10-24 11:06:13 +0200 | [diff] [blame] | 512 | if (leftmost) |
Ingo Molnar | 57cb499 | 2007-10-15 17:00:11 +0200 | [diff] [blame] | 513 | cfs_rq->rb_leftmost = &se->run_node; |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 514 | |
| 515 | rb_link_node(&se->run_node, parent, link); |
| 516 | rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline); |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 517 | } |
| 518 | |
Ingo Molnar | 0702e3e | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 519 | static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 520 | { |
Peter Zijlstra | 3fe6974 | 2008-03-14 20:55:51 +0100 | [diff] [blame] | 521 | if (cfs_rq->rb_leftmost == &se->run_node) { |
| 522 | struct rb_node *next_node; |
Peter Zijlstra | 3fe6974 | 2008-03-14 20:55:51 +0100 | [diff] [blame] | 523 | |
| 524 | next_node = rb_next(&se->run_node); |
| 525 | cfs_rq->rb_leftmost = next_node; |
Peter Zijlstra | 3fe6974 | 2008-03-14 20:55:51 +0100 | [diff] [blame] | 526 | } |
Ingo Molnar | e9acbff | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 527 | |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 528 | rb_erase(&se->run_node, &cfs_rq->tasks_timeline); |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 529 | } |
| 530 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 531 | struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq) |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 532 | { |
Peter Zijlstra | f4b6755 | 2008-11-04 21:25:07 +0100 | [diff] [blame] | 533 | struct rb_node *left = cfs_rq->rb_leftmost; |
| 534 | |
| 535 | if (!left) |
| 536 | return NULL; |
| 537 | |
| 538 | return rb_entry(left, struct sched_entity, run_node); |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 539 | } |
| 540 | |
Rik van Riel | ac53db5 | 2011-02-01 09:51:03 -0500 | [diff] [blame] | 541 | static struct sched_entity *__pick_next_entity(struct sched_entity *se) |
| 542 | { |
| 543 | struct rb_node *next = rb_next(&se->run_node); |
| 544 | |
| 545 | if (!next) |
| 546 | return NULL; |
| 547 | |
| 548 | return rb_entry(next, struct sched_entity, run_node); |
| 549 | } |
| 550 | |
| 551 | #ifdef CONFIG_SCHED_DEBUG |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 552 | struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq) |
Peter Zijlstra | aeb73b0 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 553 | { |
Ingo Molnar | 7eee3e6 | 2008-02-22 10:32:21 +0100 | [diff] [blame] | 554 | struct rb_node *last = rb_last(&cfs_rq->tasks_timeline); |
Peter Zijlstra | aeb73b0 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 555 | |
Balbir Singh | 70eee74 | 2008-02-22 13:25:53 +0530 | [diff] [blame] | 556 | if (!last) |
| 557 | return NULL; |
Ingo Molnar | 7eee3e6 | 2008-02-22 10:32:21 +0100 | [diff] [blame] | 558 | |
| 559 | return rb_entry(last, struct sched_entity, run_node); |
Peter Zijlstra | aeb73b0 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 560 | } |
| 561 | |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 562 | /************************************************************** |
| 563 | * Scheduling class statistics methods: |
| 564 | */ |
| 565 | |
Christian Ehrhardt | acb4a84 | 2009-11-30 12:16:48 +0100 | [diff] [blame] | 566 | int sched_proc_update_handler(struct ctl_table *table, int write, |
Alexey Dobriyan | 8d65af7 | 2009-09-23 15:57:19 -0700 | [diff] [blame] | 567 | void __user *buffer, size_t *lenp, |
Peter Zijlstra | b2be5e9 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 568 | loff_t *ppos) |
| 569 | { |
Alexey Dobriyan | 8d65af7 | 2009-09-23 15:57:19 -0700 | [diff] [blame] | 570 | int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); |
Christian Ehrhardt | acb4a84 | 2009-11-30 12:16:48 +0100 | [diff] [blame] | 571 | int factor = get_update_sysctl_factor(); |
Peter Zijlstra | b2be5e9 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 572 | |
| 573 | if (ret || !write) |
| 574 | return ret; |
| 575 | |
| 576 | sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency, |
| 577 | sysctl_sched_min_granularity); |
| 578 | |
Christian Ehrhardt | acb4a84 | 2009-11-30 12:16:48 +0100 | [diff] [blame] | 579 | #define WRT_SYSCTL(name) \ |
| 580 | (normalized_sysctl_##name = sysctl_##name / (factor)) |
| 581 | WRT_SYSCTL(sched_min_granularity); |
| 582 | WRT_SYSCTL(sched_latency); |
| 583 | WRT_SYSCTL(sched_wakeup_granularity); |
Christian Ehrhardt | acb4a84 | 2009-11-30 12:16:48 +0100 | [diff] [blame] | 584 | #undef WRT_SYSCTL |
| 585 | |
Peter Zijlstra | b2be5e9 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 586 | return 0; |
| 587 | } |
| 588 | #endif |
Ingo Molnar | 647e7ca | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 589 | |
| 590 | /* |
Peter Zijlstra | f9c0b09 | 2008-10-17 19:27:04 +0200 | [diff] [blame] | 591 | * delta /= w |
Peter Zijlstra | a7be37a | 2008-06-27 13:41:11 +0200 | [diff] [blame] | 592 | */ |
| 593 | static inline unsigned long |
| 594 | calc_delta_fair(unsigned long delta, struct sched_entity *se) |
| 595 | { |
Peter Zijlstra | f9c0b09 | 2008-10-17 19:27:04 +0200 | [diff] [blame] | 596 | if (unlikely(se->load.weight != NICE_0_LOAD)) |
| 597 | delta = calc_delta_mine(delta, NICE_0_LOAD, &se->load); |
Peter Zijlstra | a7be37a | 2008-06-27 13:41:11 +0200 | [diff] [blame] | 598 | |
| 599 | return delta; |
| 600 | } |
| 601 | |
| 602 | /* |
Ingo Molnar | 647e7ca | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 603 | * The idea is to set a period in which each task runs once. |
| 604 | * |
Borislav Petkov | 532b185 | 2012-08-08 16:16:04 +0200 | [diff] [blame] | 605 | * When there are too many tasks (sched_nr_latency) we have to stretch |
Ingo Molnar | 647e7ca | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 606 | * this period because otherwise the slices get too small. |
| 607 | * |
| 608 | * p = (nr <= nl) ? l : l*nr/nl |
| 609 | */ |
Peter Zijlstra | 4d78e7b | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 610 | static u64 __sched_period(unsigned long nr_running) |
| 611 | { |
| 612 | u64 period = sysctl_sched_latency; |
Peter Zijlstra | b2be5e9 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 613 | unsigned long nr_latency = sched_nr_latency; |
Peter Zijlstra | 4d78e7b | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 614 | |
| 615 | if (unlikely(nr_running > nr_latency)) { |
Peter Zijlstra | 4bf0b77 | 2008-01-25 21:08:21 +0100 | [diff] [blame] | 616 | period = sysctl_sched_min_granularity; |
Peter Zijlstra | 4d78e7b | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 617 | period *= nr_running; |
Peter Zijlstra | 4d78e7b | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 618 | } |
| 619 | |
| 620 | return period; |
| 621 | } |
| 622 | |
Ingo Molnar | 647e7ca | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 623 | /* |
| 624 | * We calculate the wall-time slice from the period by taking a part |
| 625 | * proportional to the weight. |
| 626 | * |
Peter Zijlstra | f9c0b09 | 2008-10-17 19:27:04 +0200 | [diff] [blame] | 627 | * s = p*P[w/rw] |
Ingo Molnar | 647e7ca | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 628 | */ |
Peter Zijlstra | 6d0f0ebd | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 629 | static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) |
Peter Zijlstra | 2180508 | 2007-08-25 18:41:53 +0200 | [diff] [blame] | 630 | { |
Mike Galbraith | 0a58244 | 2009-01-02 12:16:42 +0100 | [diff] [blame] | 631 | u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq); |
Peter Zijlstra | f9c0b09 | 2008-10-17 19:27:04 +0200 | [diff] [blame] | 632 | |
Mike Galbraith | 0a58244 | 2009-01-02 12:16:42 +0100 | [diff] [blame] | 633 | for_each_sched_entity(se) { |
Lin Ming | 6272d68 | 2009-01-15 17:17:15 +0100 | [diff] [blame] | 634 | struct load_weight *load; |
Christian Engelmayer | 3104bf0 | 2009-06-16 10:35:12 +0200 | [diff] [blame] | 635 | struct load_weight lw; |
Lin Ming | 6272d68 | 2009-01-15 17:17:15 +0100 | [diff] [blame] | 636 | |
| 637 | cfs_rq = cfs_rq_of(se); |
| 638 | load = &cfs_rq->load; |
Peter Zijlstra | f9c0b09 | 2008-10-17 19:27:04 +0200 | [diff] [blame] | 639 | |
Mike Galbraith | 0a58244 | 2009-01-02 12:16:42 +0100 | [diff] [blame] | 640 | if (unlikely(!se->on_rq)) { |
Christian Engelmayer | 3104bf0 | 2009-06-16 10:35:12 +0200 | [diff] [blame] | 641 | lw = cfs_rq->load; |
Mike Galbraith | 0a58244 | 2009-01-02 12:16:42 +0100 | [diff] [blame] | 642 | |
| 643 | update_load_add(&lw, se->load.weight); |
| 644 | load = &lw; |
| 645 | } |
| 646 | slice = calc_delta_mine(slice, se->load.weight, load); |
| 647 | } |
| 648 | return slice; |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 649 | } |
| 650 | |
Ingo Molnar | 647e7ca | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 651 | /* |
Peter Zijlstra | ac884de | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 652 | * We calculate the vruntime slice of a to be inserted task |
Ingo Molnar | 647e7ca | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 653 | * |
Peter Zijlstra | f9c0b09 | 2008-10-17 19:27:04 +0200 | [diff] [blame] | 654 | * vs = s/w |
Ingo Molnar | 647e7ca | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 655 | */ |
Peter Zijlstra | f9c0b09 | 2008-10-17 19:27:04 +0200 | [diff] [blame] | 656 | static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se) |
Ingo Molnar | 647e7ca | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 657 | { |
Peter Zijlstra | f9c0b09 | 2008-10-17 19:27:04 +0200 | [diff] [blame] | 658 | return calc_delta_fair(sched_slice(cfs_rq, se), se); |
Peter Zijlstra | a7be37a | 2008-06-27 13:41:11 +0200 | [diff] [blame] | 659 | } |
| 660 | |
Paul Turner | d6b5591 | 2010-11-15 15:47:09 -0800 | [diff] [blame] | 661 | static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update); |
Paul Turner | 6d5ab29 | 2011-01-21 20:45:01 -0800 | [diff] [blame] | 662 | static void update_cfs_shares(struct cfs_rq *cfs_rq); |
Paul Turner | 3b3d190 | 2010-11-15 15:47:08 -0800 | [diff] [blame] | 663 | |
Peter Zijlstra | a7be37a | 2008-06-27 13:41:11 +0200 | [diff] [blame] | 664 | /* |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 665 | * Update the current task's runtime statistics. Skip current tasks that |
| 666 | * are not in our scheduling class. |
| 667 | */ |
| 668 | static inline void |
Ingo Molnar | 8ebc91d | 2007-10-15 17:00:03 +0200 | [diff] [blame] | 669 | __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr, |
| 670 | unsigned long delta_exec) |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 671 | { |
Ingo Molnar | bbdba7c | 2007-10-15 17:00:06 +0200 | [diff] [blame] | 672 | unsigned long delta_exec_weighted; |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 673 | |
Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 674 | schedstat_set(curr->statistics.exec_max, |
| 675 | max((u64)delta_exec, curr->statistics.exec_max)); |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 676 | |
| 677 | curr->sum_exec_runtime += delta_exec; |
Ingo Molnar | 7a62eab | 2007-10-15 17:00:06 +0200 | [diff] [blame] | 678 | schedstat_add(cfs_rq, exec_clock, delta_exec); |
Peter Zijlstra | a7be37a | 2008-06-27 13:41:11 +0200 | [diff] [blame] | 679 | delta_exec_weighted = calc_delta_fair(delta_exec, curr); |
Peter Zijlstra | 88ec22d | 2009-12-16 18:04:41 +0100 | [diff] [blame] | 680 | |
Ingo Molnar | e9acbff | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 681 | curr->vruntime += delta_exec_weighted; |
Peter Zijlstra | 1af5f73 | 2008-10-24 11:06:13 +0200 | [diff] [blame] | 682 | update_min_vruntime(cfs_rq); |
Paul Turner | 3b3d190 | 2010-11-15 15:47:08 -0800 | [diff] [blame] | 683 | |
Peter Zijlstra | 70caf8a | 2010-11-20 00:53:51 +0100 | [diff] [blame] | 684 | #if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED |
Paul Turner | 3b3d190 | 2010-11-15 15:47:08 -0800 | [diff] [blame] | 685 | cfs_rq->load_unacc_exec_time += delta_exec; |
Paul Turner | 3b3d190 | 2010-11-15 15:47:08 -0800 | [diff] [blame] | 686 | #endif |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 687 | } |
| 688 | |
Ingo Molnar | b7cc089 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 689 | static void update_curr(struct cfs_rq *cfs_rq) |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 690 | { |
Ingo Molnar | 429d43b | 2007-10-15 17:00:03 +0200 | [diff] [blame] | 691 | struct sched_entity *curr = cfs_rq->curr; |
Venkatesh Pallipadi | 305e683 | 2010-10-04 17:03:21 -0700 | [diff] [blame] | 692 | u64 now = rq_of(cfs_rq)->clock_task; |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 693 | unsigned long delta_exec; |
| 694 | |
| 695 | if (unlikely(!curr)) |
| 696 | return; |
| 697 | |
| 698 | /* |
| 699 | * Get the amount of time the current task was running |
| 700 | * since the last time we changed load (this cannot |
| 701 | * overflow on 32 bits): |
| 702 | */ |
Ingo Molnar | 8ebc91d | 2007-10-15 17:00:03 +0200 | [diff] [blame] | 703 | delta_exec = (unsigned long)(now - curr->exec_start); |
Peter Zijlstra | 34f28ec | 2008-12-16 08:45:31 +0100 | [diff] [blame] | 704 | if (!delta_exec) |
| 705 | return; |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 706 | |
Ingo Molnar | 8ebc91d | 2007-10-15 17:00:03 +0200 | [diff] [blame] | 707 | __update_curr(cfs_rq, curr, delta_exec); |
| 708 | curr->exec_start = now; |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 709 | |
| 710 | if (entity_is_task(curr)) { |
| 711 | struct task_struct *curtask = task_of(curr); |
| 712 | |
Ingo Molnar | f977bb4 | 2009-09-13 18:15:54 +0200 | [diff] [blame] | 713 | trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime); |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 714 | cpuacct_charge(curtask, delta_exec); |
Frank Mayhar | f06febc | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 715 | account_group_exec_runtime(curtask, delta_exec); |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 716 | } |
Paul Turner | ec12cb7 | 2011-07-21 09:43:30 -0700 | [diff] [blame] | 717 | |
| 718 | account_cfs_rq_runtime(cfs_rq, delta_exec); |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 719 | } |
| 720 | |
| 721 | static inline void |
Ingo Molnar | 5870db5 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 722 | update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se) |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 723 | { |
Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 724 | schedstat_set(se->statistics.wait_start, rq_of(cfs_rq)->clock); |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 725 | } |
| 726 | |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 727 | /* |
| 728 | * Task is being enqueued - update stats: |
| 729 | */ |
Ingo Molnar | d2417e5 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 730 | static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 731 | { |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 732 | /* |
| 733 | * Are we enqueueing a waiting task? (for current tasks |
| 734 | * a dequeue/enqueue event is a NOP) |
| 735 | */ |
Ingo Molnar | 429d43b | 2007-10-15 17:00:03 +0200 | [diff] [blame] | 736 | if (se != cfs_rq->curr) |
Ingo Molnar | 5870db5 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 737 | update_stats_wait_start(cfs_rq, se); |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 738 | } |
| 739 | |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 740 | static void |
Ingo Molnar | 9ef0a96 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 741 | update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se) |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 742 | { |
Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 743 | schedstat_set(se->statistics.wait_max, max(se->statistics.wait_max, |
| 744 | rq_of(cfs_rq)->clock - se->statistics.wait_start)); |
| 745 | schedstat_set(se->statistics.wait_count, se->statistics.wait_count + 1); |
| 746 | schedstat_set(se->statistics.wait_sum, se->statistics.wait_sum + |
| 747 | rq_of(cfs_rq)->clock - se->statistics.wait_start); |
Peter Zijlstra | 768d0c2 | 2009-07-23 20:13:26 +0200 | [diff] [blame] | 748 | #ifdef CONFIG_SCHEDSTATS |
| 749 | if (entity_is_task(se)) { |
| 750 | trace_sched_stat_wait(task_of(se), |
Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 751 | rq_of(cfs_rq)->clock - se->statistics.wait_start); |
Peter Zijlstra | 768d0c2 | 2009-07-23 20:13:26 +0200 | [diff] [blame] | 752 | } |
| 753 | #endif |
Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 754 | schedstat_set(se->statistics.wait_start, 0); |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 755 | } |
| 756 | |
| 757 | static inline void |
Ingo Molnar | 19b6a2e | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 758 | update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 759 | { |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 760 | /* |
| 761 | * Mark the end of the wait period if dequeueing a |
| 762 | * waiting task: |
| 763 | */ |
Ingo Molnar | 429d43b | 2007-10-15 17:00:03 +0200 | [diff] [blame] | 764 | if (se != cfs_rq->curr) |
Ingo Molnar | 9ef0a96 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 765 | update_stats_wait_end(cfs_rq, se); |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 766 | } |
| 767 | |
| 768 | /* |
| 769 | * We are picking a new current task - update its stats: |
| 770 | */ |
| 771 | static inline void |
Ingo Molnar | 79303e9 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 772 | update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se) |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 773 | { |
| 774 | /* |
| 775 | * We are starting a new run period: |
| 776 | */ |
Venkatesh Pallipadi | 305e683 | 2010-10-04 17:03:21 -0700 | [diff] [blame] | 777 | se->exec_start = rq_of(cfs_rq)->clock_task; |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 778 | } |
| 779 | |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 780 | /************************************************** |
| 781 | * Scheduling class queueing methods: |
| 782 | */ |
| 783 | |
Dmitry Adamushko | 30cfdcf | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 784 | static void |
| 785 | account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) |
| 786 | { |
| 787 | update_load_add(&cfs_rq->load, se->load.weight); |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 788 | if (!parent_entity(se)) |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 789 | update_load_add(&rq_of(cfs_rq)->load, se->load.weight); |
Peter Zijlstra | 367456c | 2012-02-20 21:49:09 +0100 | [diff] [blame] | 790 | #ifdef CONFIG_SMP |
| 791 | if (entity_is_task(se)) |
Peter Zijlstra | eb95308 | 2012-04-17 13:38:40 +0200 | [diff] [blame] | 792 | list_add(&se->group_node, &rq_of(cfs_rq)->cfs_tasks); |
Peter Zijlstra | 367456c | 2012-02-20 21:49:09 +0100 | [diff] [blame] | 793 | #endif |
Dmitry Adamushko | 30cfdcf | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 794 | cfs_rq->nr_running++; |
Dmitry Adamushko | 30cfdcf | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 795 | } |
| 796 | |
| 797 | static void |
| 798 | account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) |
| 799 | { |
| 800 | update_load_sub(&cfs_rq->load, se->load.weight); |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 801 | if (!parent_entity(se)) |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 802 | update_load_sub(&rq_of(cfs_rq)->load, se->load.weight); |
Peter Zijlstra | 367456c | 2012-02-20 21:49:09 +0100 | [diff] [blame] | 803 | if (entity_is_task(se)) |
Bharata B Rao | b87f172 | 2008-09-25 09:53:54 +0530 | [diff] [blame] | 804 | list_del_init(&se->group_node); |
Dmitry Adamushko | 30cfdcf | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 805 | cfs_rq->nr_running--; |
Dmitry Adamushko | 30cfdcf | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 806 | } |
| 807 | |
Yong Zhang | 3ff6dca | 2011-01-24 15:33:52 +0800 | [diff] [blame] | 808 | #ifdef CONFIG_FAIR_GROUP_SCHED |
Paul Turner | 64660c8 | 2011-07-21 09:43:36 -0700 | [diff] [blame] | 809 | /* we need this in update_cfs_load and load-balance functions below */ |
| 810 | static inline int throttled_hierarchy(struct cfs_rq *cfs_rq); |
Yong Zhang | 3ff6dca | 2011-01-24 15:33:52 +0800 | [diff] [blame] | 811 | # ifdef CONFIG_SMP |
Paul Turner | d6b5591 | 2010-11-15 15:47:09 -0800 | [diff] [blame] | 812 | static void update_cfs_rq_load_contribution(struct cfs_rq *cfs_rq, |
| 813 | int global_update) |
| 814 | { |
| 815 | struct task_group *tg = cfs_rq->tg; |
| 816 | long load_avg; |
| 817 | |
| 818 | load_avg = div64_u64(cfs_rq->load_avg, cfs_rq->load_period+1); |
| 819 | load_avg -= cfs_rq->load_contribution; |
| 820 | |
| 821 | if (global_update || abs(load_avg) > cfs_rq->load_contribution / 8) { |
| 822 | atomic_add(load_avg, &tg->load_weight); |
| 823 | cfs_rq->load_contribution += load_avg; |
| 824 | } |
| 825 | } |
| 826 | |
| 827 | static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update) |
Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 828 | { |
Paul Turner | a7a4f8a | 2010-11-15 15:47:06 -0800 | [diff] [blame] | 829 | u64 period = sysctl_sched_shares_window; |
Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 830 | u64 now, delta; |
Paul Turner | e33078b | 2010-11-15 15:47:04 -0800 | [diff] [blame] | 831 | unsigned long load = cfs_rq->load.weight; |
Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 832 | |
Paul Turner | 64660c8 | 2011-07-21 09:43:36 -0700 | [diff] [blame] | 833 | if (cfs_rq->tg == &root_task_group || throttled_hierarchy(cfs_rq)) |
Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 834 | return; |
| 835 | |
Paul Turner | 05ca62c | 2011-01-21 20:45:02 -0800 | [diff] [blame] | 836 | now = rq_of(cfs_rq)->clock_task; |
Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 837 | delta = now - cfs_rq->load_stamp; |
| 838 | |
Paul Turner | e33078b | 2010-11-15 15:47:04 -0800 | [diff] [blame] | 839 | /* truncate load history at 4 idle periods */ |
| 840 | if (cfs_rq->load_stamp > cfs_rq->load_last && |
| 841 | now - cfs_rq->load_last > 4 * period) { |
| 842 | cfs_rq->load_period = 0; |
| 843 | cfs_rq->load_avg = 0; |
Paul Turner | f07333b | 2011-01-21 20:45:03 -0800 | [diff] [blame] | 844 | delta = period - 1; |
Paul Turner | e33078b | 2010-11-15 15:47:04 -0800 | [diff] [blame] | 845 | } |
| 846 | |
Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 847 | cfs_rq->load_stamp = now; |
Paul Turner | 3b3d190 | 2010-11-15 15:47:08 -0800 | [diff] [blame] | 848 | cfs_rq->load_unacc_exec_time = 0; |
Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 849 | cfs_rq->load_period += delta; |
Paul Turner | e33078b | 2010-11-15 15:47:04 -0800 | [diff] [blame] | 850 | if (load) { |
| 851 | cfs_rq->load_last = now; |
| 852 | cfs_rq->load_avg += delta * load; |
| 853 | } |
Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 854 | |
Paul Turner | d6b5591 | 2010-11-15 15:47:09 -0800 | [diff] [blame] | 855 | /* consider updating load contribution on each fold or truncate */ |
| 856 | if (global_update || cfs_rq->load_period > period |
| 857 | || !cfs_rq->load_period) |
| 858 | update_cfs_rq_load_contribution(cfs_rq, global_update); |
| 859 | |
Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 860 | while (cfs_rq->load_period > period) { |
| 861 | /* |
| 862 | * Inline assembly required to prevent the compiler |
| 863 | * optimising this loop into a divmod call. |
| 864 | * See __iter_div_u64_rem() for another example of this. |
| 865 | */ |
| 866 | asm("" : "+rm" (cfs_rq->load_period)); |
| 867 | cfs_rq->load_period /= 2; |
| 868 | cfs_rq->load_avg /= 2; |
| 869 | } |
Peter Zijlstra | 3d4b47b | 2010-11-15 15:47:01 -0800 | [diff] [blame] | 870 | |
Paul Turner | e33078b | 2010-11-15 15:47:04 -0800 | [diff] [blame] | 871 | if (!cfs_rq->curr && !cfs_rq->nr_running && !cfs_rq->load_avg) |
| 872 | list_del_leaf_cfs_rq(cfs_rq); |
Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 873 | } |
| 874 | |
Peter Zijlstra | cf5f0ac | 2011-10-13 16:52:28 +0200 | [diff] [blame] | 875 | static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq) |
| 876 | { |
| 877 | long tg_weight; |
| 878 | |
| 879 | /* |
| 880 | * Use this CPU's actual weight instead of the last load_contribution |
| 881 | * to gain a more accurate current total weight. See |
| 882 | * update_cfs_rq_load_contribution(). |
| 883 | */ |
| 884 | tg_weight = atomic_read(&tg->load_weight); |
| 885 | tg_weight -= cfs_rq->load_contribution; |
| 886 | tg_weight += cfs_rq->load.weight; |
| 887 | |
| 888 | return tg_weight; |
| 889 | } |
| 890 | |
Paul Turner | 6d5ab29 | 2011-01-21 20:45:01 -0800 | [diff] [blame] | 891 | static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg) |
Yong Zhang | 3ff6dca | 2011-01-24 15:33:52 +0800 | [diff] [blame] | 892 | { |
Peter Zijlstra | cf5f0ac | 2011-10-13 16:52:28 +0200 | [diff] [blame] | 893 | long tg_weight, load, shares; |
Yong Zhang | 3ff6dca | 2011-01-24 15:33:52 +0800 | [diff] [blame] | 894 | |
Peter Zijlstra | cf5f0ac | 2011-10-13 16:52:28 +0200 | [diff] [blame] | 895 | tg_weight = calc_tg_weight(tg, cfs_rq); |
Paul Turner | 6d5ab29 | 2011-01-21 20:45:01 -0800 | [diff] [blame] | 896 | load = cfs_rq->load.weight; |
Yong Zhang | 3ff6dca | 2011-01-24 15:33:52 +0800 | [diff] [blame] | 897 | |
Yong Zhang | 3ff6dca | 2011-01-24 15:33:52 +0800 | [diff] [blame] | 898 | shares = (tg->shares * load); |
Peter Zijlstra | cf5f0ac | 2011-10-13 16:52:28 +0200 | [diff] [blame] | 899 | if (tg_weight) |
| 900 | shares /= tg_weight; |
Yong Zhang | 3ff6dca | 2011-01-24 15:33:52 +0800 | [diff] [blame] | 901 | |
| 902 | if (shares < MIN_SHARES) |
| 903 | shares = MIN_SHARES; |
| 904 | if (shares > tg->shares) |
| 905 | shares = tg->shares; |
| 906 | |
| 907 | return shares; |
| 908 | } |
| 909 | |
| 910 | static void update_entity_shares_tick(struct cfs_rq *cfs_rq) |
| 911 | { |
| 912 | if (cfs_rq->load_unacc_exec_time > sysctl_sched_shares_window) { |
| 913 | update_cfs_load(cfs_rq, 0); |
Paul Turner | 6d5ab29 | 2011-01-21 20:45:01 -0800 | [diff] [blame] | 914 | update_cfs_shares(cfs_rq); |
Yong Zhang | 3ff6dca | 2011-01-24 15:33:52 +0800 | [diff] [blame] | 915 | } |
| 916 | } |
| 917 | # else /* CONFIG_SMP */ |
| 918 | static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update) |
| 919 | { |
| 920 | } |
| 921 | |
Paul Turner | 6d5ab29 | 2011-01-21 20:45:01 -0800 | [diff] [blame] | 922 | static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg) |
Yong Zhang | 3ff6dca | 2011-01-24 15:33:52 +0800 | [diff] [blame] | 923 | { |
| 924 | return tg->shares; |
| 925 | } |
| 926 | |
| 927 | static inline void update_entity_shares_tick(struct cfs_rq *cfs_rq) |
| 928 | { |
| 929 | } |
| 930 | # endif /* CONFIG_SMP */ |
Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 931 | static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, |
| 932 | unsigned long weight) |
| 933 | { |
Paul Turner | 19e5eeb | 2010-12-15 19:10:18 -0800 | [diff] [blame] | 934 | if (se->on_rq) { |
| 935 | /* commit outstanding execution time */ |
| 936 | if (cfs_rq->curr == se) |
| 937 | update_curr(cfs_rq); |
Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 938 | account_entity_dequeue(cfs_rq, se); |
Paul Turner | 19e5eeb | 2010-12-15 19:10:18 -0800 | [diff] [blame] | 939 | } |
Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 940 | |
| 941 | update_load_set(&se->load, weight); |
| 942 | |
| 943 | if (se->on_rq) |
| 944 | account_entity_enqueue(cfs_rq, se); |
| 945 | } |
| 946 | |
Paul Turner | 6d5ab29 | 2011-01-21 20:45:01 -0800 | [diff] [blame] | 947 | static void update_cfs_shares(struct cfs_rq *cfs_rq) |
Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 948 | { |
| 949 | struct task_group *tg; |
| 950 | struct sched_entity *se; |
Yong Zhang | 3ff6dca | 2011-01-24 15:33:52 +0800 | [diff] [blame] | 951 | long shares; |
Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 952 | |
Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 953 | tg = cfs_rq->tg; |
| 954 | se = tg->se[cpu_of(rq_of(cfs_rq))]; |
Paul Turner | 64660c8 | 2011-07-21 09:43:36 -0700 | [diff] [blame] | 955 | if (!se || throttled_hierarchy(cfs_rq)) |
Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 956 | return; |
Yong Zhang | 3ff6dca | 2011-01-24 15:33:52 +0800 | [diff] [blame] | 957 | #ifndef CONFIG_SMP |
| 958 | if (likely(se->load.weight == tg->shares)) |
| 959 | return; |
| 960 | #endif |
Paul Turner | 6d5ab29 | 2011-01-21 20:45:01 -0800 | [diff] [blame] | 961 | shares = calc_cfs_shares(cfs_rq, tg); |
Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 962 | |
| 963 | reweight_entity(cfs_rq_of(se), se, shares); |
| 964 | } |
| 965 | #else /* CONFIG_FAIR_GROUP_SCHED */ |
Paul Turner | d6b5591 | 2010-11-15 15:47:09 -0800 | [diff] [blame] | 966 | static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update) |
Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 967 | { |
| 968 | } |
| 969 | |
Paul Turner | 6d5ab29 | 2011-01-21 20:45:01 -0800 | [diff] [blame] | 970 | static inline void update_cfs_shares(struct cfs_rq *cfs_rq) |
Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 971 | { |
| 972 | } |
Paul Turner | 43365bd | 2010-12-15 19:10:17 -0800 | [diff] [blame] | 973 | |
| 974 | static inline void update_entity_shares_tick(struct cfs_rq *cfs_rq) |
| 975 | { |
| 976 | } |
Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 977 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
| 978 | |
Paul Turner | 9d85f21 | 2012-10-04 13:18:29 +0200 | [diff] [blame] | 979 | #ifdef CONFIG_SMP |
| 980 | /* |
| 981 | * Approximate: |
| 982 | * val * y^n, where y^32 ~= 0.5 (~1 scheduling period) |
| 983 | */ |
| 984 | static __always_inline u64 decay_load(u64 val, u64 n) |
| 985 | { |
| 986 | for (; n && val; n--) { |
| 987 | val *= 4008; |
| 988 | val >>= 12; |
| 989 | } |
| 990 | |
| 991 | return val; |
| 992 | } |
| 993 | |
| 994 | /* |
| 995 | * We can represent the historical contribution to runnable average as the |
| 996 | * coefficients of a geometric series. To do this we sub-divide our runnable |
| 997 | * history into segments of approximately 1ms (1024us); label the segment that |
| 998 | * occurred N-ms ago p_N, with p_0 corresponding to the current period, e.g. |
| 999 | * |
| 1000 | * [<- 1024us ->|<- 1024us ->|<- 1024us ->| ... |
| 1001 | * p0 p1 p2 |
| 1002 | * (now) (~1ms ago) (~2ms ago) |
| 1003 | * |
| 1004 | * Let u_i denote the fraction of p_i that the entity was runnable. |
| 1005 | * |
| 1006 | * We then designate the fractions u_i as our co-efficients, yielding the |
| 1007 | * following representation of historical load: |
| 1008 | * u_0 + u_1*y + u_2*y^2 + u_3*y^3 + ... |
| 1009 | * |
| 1010 | * We choose y based on the with of a reasonably scheduling period, fixing: |
| 1011 | * y^32 = 0.5 |
| 1012 | * |
| 1013 | * This means that the contribution to load ~32ms ago (u_32) will be weighted |
| 1014 | * approximately half as much as the contribution to load within the last ms |
| 1015 | * (u_0). |
| 1016 | * |
| 1017 | * When a period "rolls over" and we have new u_0`, multiplying the previous |
| 1018 | * sum again by y is sufficient to update: |
| 1019 | * load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... ) |
| 1020 | * = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}] |
| 1021 | */ |
| 1022 | static __always_inline int __update_entity_runnable_avg(u64 now, |
| 1023 | struct sched_avg *sa, |
| 1024 | int runnable) |
| 1025 | { |
| 1026 | u64 delta; |
| 1027 | int delta_w, decayed = 0; |
| 1028 | |
| 1029 | delta = now - sa->last_runnable_update; |
| 1030 | /* |
| 1031 | * This should only happen when time goes backwards, which it |
| 1032 | * unfortunately does during sched clock init when we swap over to TSC. |
| 1033 | */ |
| 1034 | if ((s64)delta < 0) { |
| 1035 | sa->last_runnable_update = now; |
| 1036 | return 0; |
| 1037 | } |
| 1038 | |
| 1039 | /* |
| 1040 | * Use 1024ns as the unit of measurement since it's a reasonable |
| 1041 | * approximation of 1us and fast to compute. |
| 1042 | */ |
| 1043 | delta >>= 10; |
| 1044 | if (!delta) |
| 1045 | return 0; |
| 1046 | sa->last_runnable_update = now; |
| 1047 | |
| 1048 | /* delta_w is the amount already accumulated against our next period */ |
| 1049 | delta_w = sa->runnable_avg_period % 1024; |
| 1050 | if (delta + delta_w >= 1024) { |
| 1051 | /* period roll-over */ |
| 1052 | decayed = 1; |
| 1053 | |
| 1054 | /* |
| 1055 | * Now that we know we're crossing a period boundary, figure |
| 1056 | * out how much from delta we need to complete the current |
| 1057 | * period and accrue it. |
| 1058 | */ |
| 1059 | delta_w = 1024 - delta_w; |
| 1060 | BUG_ON(delta_w > delta); |
| 1061 | do { |
| 1062 | if (runnable) |
| 1063 | sa->runnable_avg_sum += delta_w; |
| 1064 | sa->runnable_avg_period += delta_w; |
| 1065 | |
| 1066 | /* |
| 1067 | * Remainder of delta initiates a new period, roll over |
| 1068 | * the previous. |
| 1069 | */ |
| 1070 | sa->runnable_avg_sum = |
| 1071 | decay_load(sa->runnable_avg_sum, 1); |
| 1072 | sa->runnable_avg_period = |
| 1073 | decay_load(sa->runnable_avg_period, 1); |
| 1074 | |
| 1075 | delta -= delta_w; |
| 1076 | /* New period is empty */ |
| 1077 | delta_w = 1024; |
| 1078 | } while (delta >= 1024); |
| 1079 | } |
| 1080 | |
| 1081 | /* Remainder of delta accrued against u_0` */ |
| 1082 | if (runnable) |
| 1083 | sa->runnable_avg_sum += delta; |
| 1084 | sa->runnable_avg_period += delta; |
| 1085 | |
| 1086 | return decayed; |
| 1087 | } |
| 1088 | |
Paul Turner | 9ee474f | 2012-10-04 13:18:30 +0200 | [diff] [blame] | 1089 | /* Synchronize an entity's decay with its parenting cfs_rq.*/ |
Paul Turner | aff3e49 | 2012-10-04 13:18:30 +0200 | [diff] [blame^] | 1090 | static inline u64 __synchronize_entity_decay(struct sched_entity *se) |
Paul Turner | 9ee474f | 2012-10-04 13:18:30 +0200 | [diff] [blame] | 1091 | { |
| 1092 | struct cfs_rq *cfs_rq = cfs_rq_of(se); |
| 1093 | u64 decays = atomic64_read(&cfs_rq->decay_counter); |
| 1094 | |
| 1095 | decays -= se->avg.decay_count; |
| 1096 | if (!decays) |
Paul Turner | aff3e49 | 2012-10-04 13:18:30 +0200 | [diff] [blame^] | 1097 | return 0; |
Paul Turner | 9ee474f | 2012-10-04 13:18:30 +0200 | [diff] [blame] | 1098 | |
| 1099 | se->avg.load_avg_contrib = decay_load(se->avg.load_avg_contrib, decays); |
| 1100 | se->avg.decay_count = 0; |
Paul Turner | aff3e49 | 2012-10-04 13:18:30 +0200 | [diff] [blame^] | 1101 | |
| 1102 | return decays; |
Paul Turner | 9ee474f | 2012-10-04 13:18:30 +0200 | [diff] [blame] | 1103 | } |
| 1104 | |
Paul Turner | 2dac754 | 2012-10-04 13:18:30 +0200 | [diff] [blame] | 1105 | /* Compute the current contribution to load_avg by se, return any delta */ |
| 1106 | static long __update_entity_load_avg_contrib(struct sched_entity *se) |
| 1107 | { |
| 1108 | long old_contrib = se->avg.load_avg_contrib; |
| 1109 | |
| 1110 | if (!entity_is_task(se)) |
| 1111 | return 0; |
| 1112 | |
| 1113 | se->avg.load_avg_contrib = div64_u64(se->avg.runnable_avg_sum * |
| 1114 | se->load.weight, |
| 1115 | se->avg.runnable_avg_period + 1); |
| 1116 | |
| 1117 | return se->avg.load_avg_contrib - old_contrib; |
| 1118 | } |
| 1119 | |
Paul Turner | 9ee474f | 2012-10-04 13:18:30 +0200 | [diff] [blame] | 1120 | static inline void subtract_blocked_load_contrib(struct cfs_rq *cfs_rq, |
| 1121 | long load_contrib) |
| 1122 | { |
| 1123 | if (likely(load_contrib < cfs_rq->blocked_load_avg)) |
| 1124 | cfs_rq->blocked_load_avg -= load_contrib; |
| 1125 | else |
| 1126 | cfs_rq->blocked_load_avg = 0; |
| 1127 | } |
| 1128 | |
Paul Turner | 9d85f21 | 2012-10-04 13:18:29 +0200 | [diff] [blame] | 1129 | /* Update a sched_entity's runnable average */ |
Paul Turner | 9ee474f | 2012-10-04 13:18:30 +0200 | [diff] [blame] | 1130 | static inline void update_entity_load_avg(struct sched_entity *se, |
| 1131 | int update_cfs_rq) |
Paul Turner | 9d85f21 | 2012-10-04 13:18:29 +0200 | [diff] [blame] | 1132 | { |
Paul Turner | 2dac754 | 2012-10-04 13:18:30 +0200 | [diff] [blame] | 1133 | struct cfs_rq *cfs_rq = cfs_rq_of(se); |
| 1134 | long contrib_delta; |
| 1135 | |
| 1136 | if (!__update_entity_runnable_avg(rq_of(cfs_rq)->clock_task, &se->avg, |
| 1137 | se->on_rq)) |
| 1138 | return; |
| 1139 | |
| 1140 | contrib_delta = __update_entity_load_avg_contrib(se); |
Paul Turner | 9ee474f | 2012-10-04 13:18:30 +0200 | [diff] [blame] | 1141 | |
| 1142 | if (!update_cfs_rq) |
| 1143 | return; |
| 1144 | |
Paul Turner | 2dac754 | 2012-10-04 13:18:30 +0200 | [diff] [blame] | 1145 | if (se->on_rq) |
| 1146 | cfs_rq->runnable_load_avg += contrib_delta; |
Paul Turner | 9ee474f | 2012-10-04 13:18:30 +0200 | [diff] [blame] | 1147 | else |
| 1148 | subtract_blocked_load_contrib(cfs_rq, -contrib_delta); |
| 1149 | } |
| 1150 | |
| 1151 | /* |
| 1152 | * Decay the load contributed by all blocked children and account this so that |
| 1153 | * their contribution may appropriately discounted when they wake up. |
| 1154 | */ |
Paul Turner | aff3e49 | 2012-10-04 13:18:30 +0200 | [diff] [blame^] | 1155 | static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq, int force_update) |
Paul Turner | 9ee474f | 2012-10-04 13:18:30 +0200 | [diff] [blame] | 1156 | { |
| 1157 | u64 now = rq_of(cfs_rq)->clock_task >> 20; |
| 1158 | u64 decays; |
| 1159 | |
| 1160 | decays = now - cfs_rq->last_decay; |
Paul Turner | aff3e49 | 2012-10-04 13:18:30 +0200 | [diff] [blame^] | 1161 | if (!decays && !force_update) |
Paul Turner | 9ee474f | 2012-10-04 13:18:30 +0200 | [diff] [blame] | 1162 | return; |
| 1163 | |
Paul Turner | aff3e49 | 2012-10-04 13:18:30 +0200 | [diff] [blame^] | 1164 | if (atomic64_read(&cfs_rq->removed_load)) { |
| 1165 | u64 removed_load = atomic64_xchg(&cfs_rq->removed_load, 0); |
| 1166 | subtract_blocked_load_contrib(cfs_rq, removed_load); |
| 1167 | } |
Paul Turner | 9ee474f | 2012-10-04 13:18:30 +0200 | [diff] [blame] | 1168 | |
Paul Turner | aff3e49 | 2012-10-04 13:18:30 +0200 | [diff] [blame^] | 1169 | if (decays) { |
| 1170 | cfs_rq->blocked_load_avg = decay_load(cfs_rq->blocked_load_avg, |
| 1171 | decays); |
| 1172 | atomic64_add(decays, &cfs_rq->decay_counter); |
| 1173 | cfs_rq->last_decay = now; |
| 1174 | } |
Paul Turner | 9d85f21 | 2012-10-04 13:18:29 +0200 | [diff] [blame] | 1175 | } |
Ben Segall | 18bf280 | 2012-10-04 12:51:20 +0200 | [diff] [blame] | 1176 | |
| 1177 | static inline void update_rq_runnable_avg(struct rq *rq, int runnable) |
| 1178 | { |
| 1179 | __update_entity_runnable_avg(rq->clock_task, &rq->avg, runnable); |
| 1180 | } |
Paul Turner | 2dac754 | 2012-10-04 13:18:30 +0200 | [diff] [blame] | 1181 | |
| 1182 | /* Add the load generated by se into cfs_rq's child load-average */ |
| 1183 | static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq, |
Paul Turner | 9ee474f | 2012-10-04 13:18:30 +0200 | [diff] [blame] | 1184 | struct sched_entity *se, |
| 1185 | int wakeup) |
Paul Turner | 2dac754 | 2012-10-04 13:18:30 +0200 | [diff] [blame] | 1186 | { |
Paul Turner | aff3e49 | 2012-10-04 13:18:30 +0200 | [diff] [blame^] | 1187 | /* |
| 1188 | * We track migrations using entity decay_count <= 0, on a wake-up |
| 1189 | * migration we use a negative decay count to track the remote decays |
| 1190 | * accumulated while sleeping. |
| 1191 | */ |
| 1192 | if (unlikely(se->avg.decay_count <= 0)) { |
Paul Turner | 9ee474f | 2012-10-04 13:18:30 +0200 | [diff] [blame] | 1193 | se->avg.last_runnable_update = rq_of(cfs_rq)->clock_task; |
Paul Turner | aff3e49 | 2012-10-04 13:18:30 +0200 | [diff] [blame^] | 1194 | if (se->avg.decay_count) { |
| 1195 | /* |
| 1196 | * In a wake-up migration we have to approximate the |
| 1197 | * time sleeping. This is because we can't synchronize |
| 1198 | * clock_task between the two cpus, and it is not |
| 1199 | * guaranteed to be read-safe. Instead, we can |
| 1200 | * approximate this using our carried decays, which are |
| 1201 | * explicitly atomically readable. |
| 1202 | */ |
| 1203 | se->avg.last_runnable_update -= (-se->avg.decay_count) |
| 1204 | << 20; |
| 1205 | update_entity_load_avg(se, 0); |
| 1206 | /* Indicate that we're now synchronized and on-rq */ |
| 1207 | se->avg.decay_count = 0; |
| 1208 | } |
Paul Turner | 9ee474f | 2012-10-04 13:18:30 +0200 | [diff] [blame] | 1209 | wakeup = 0; |
| 1210 | } else { |
| 1211 | __synchronize_entity_decay(se); |
| 1212 | } |
| 1213 | |
Paul Turner | aff3e49 | 2012-10-04 13:18:30 +0200 | [diff] [blame^] | 1214 | /* migrated tasks did not contribute to our blocked load */ |
| 1215 | if (wakeup) { |
Paul Turner | 9ee474f | 2012-10-04 13:18:30 +0200 | [diff] [blame] | 1216 | subtract_blocked_load_contrib(cfs_rq, se->avg.load_avg_contrib); |
Paul Turner | aff3e49 | 2012-10-04 13:18:30 +0200 | [diff] [blame^] | 1217 | update_entity_load_avg(se, 0); |
| 1218 | } |
Paul Turner | 9ee474f | 2012-10-04 13:18:30 +0200 | [diff] [blame] | 1219 | |
Paul Turner | 2dac754 | 2012-10-04 13:18:30 +0200 | [diff] [blame] | 1220 | cfs_rq->runnable_load_avg += se->avg.load_avg_contrib; |
Paul Turner | aff3e49 | 2012-10-04 13:18:30 +0200 | [diff] [blame^] | 1221 | /* we force update consideration on load-balancer moves */ |
| 1222 | update_cfs_rq_blocked_load(cfs_rq, !wakeup); |
Paul Turner | 2dac754 | 2012-10-04 13:18:30 +0200 | [diff] [blame] | 1223 | } |
| 1224 | |
Paul Turner | 9ee474f | 2012-10-04 13:18:30 +0200 | [diff] [blame] | 1225 | /* |
| 1226 | * Remove se's load from this cfs_rq child load-average, if the entity is |
| 1227 | * transitioning to a blocked state we track its projected decay using |
| 1228 | * blocked_load_avg. |
| 1229 | */ |
Paul Turner | 2dac754 | 2012-10-04 13:18:30 +0200 | [diff] [blame] | 1230 | static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq, |
Paul Turner | 9ee474f | 2012-10-04 13:18:30 +0200 | [diff] [blame] | 1231 | struct sched_entity *se, |
| 1232 | int sleep) |
Paul Turner | 2dac754 | 2012-10-04 13:18:30 +0200 | [diff] [blame] | 1233 | { |
Paul Turner | 9ee474f | 2012-10-04 13:18:30 +0200 | [diff] [blame] | 1234 | update_entity_load_avg(se, 1); |
Paul Turner | aff3e49 | 2012-10-04 13:18:30 +0200 | [diff] [blame^] | 1235 | /* we force update consideration on load-balancer moves */ |
| 1236 | update_cfs_rq_blocked_load(cfs_rq, !sleep); |
Paul Turner | 9ee474f | 2012-10-04 13:18:30 +0200 | [diff] [blame] | 1237 | |
Paul Turner | 2dac754 | 2012-10-04 13:18:30 +0200 | [diff] [blame] | 1238 | cfs_rq->runnable_load_avg -= se->avg.load_avg_contrib; |
Paul Turner | 9ee474f | 2012-10-04 13:18:30 +0200 | [diff] [blame] | 1239 | if (sleep) { |
| 1240 | cfs_rq->blocked_load_avg += se->avg.load_avg_contrib; |
| 1241 | se->avg.decay_count = atomic64_read(&cfs_rq->decay_counter); |
| 1242 | } /* migrations, e.g. sleep=0 leave decay_count == 0 */ |
Paul Turner | 2dac754 | 2012-10-04 13:18:30 +0200 | [diff] [blame] | 1243 | } |
Paul Turner | 9d85f21 | 2012-10-04 13:18:29 +0200 | [diff] [blame] | 1244 | #else |
Paul Turner | 9ee474f | 2012-10-04 13:18:30 +0200 | [diff] [blame] | 1245 | static inline void update_entity_load_avg(struct sched_entity *se, |
| 1246 | int update_cfs_rq) {} |
Ben Segall | 18bf280 | 2012-10-04 12:51:20 +0200 | [diff] [blame] | 1247 | static inline void update_rq_runnable_avg(struct rq *rq, int runnable) {} |
Paul Turner | 2dac754 | 2012-10-04 13:18:30 +0200 | [diff] [blame] | 1248 | static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq, |
Paul Turner | 9ee474f | 2012-10-04 13:18:30 +0200 | [diff] [blame] | 1249 | struct sched_entity *se, |
| 1250 | int wakeup) {} |
Paul Turner | 2dac754 | 2012-10-04 13:18:30 +0200 | [diff] [blame] | 1251 | static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq, |
Paul Turner | 9ee474f | 2012-10-04 13:18:30 +0200 | [diff] [blame] | 1252 | struct sched_entity *se, |
| 1253 | int sleep) {} |
Paul Turner | aff3e49 | 2012-10-04 13:18:30 +0200 | [diff] [blame^] | 1254 | static inline void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq, |
| 1255 | int force_update) {} |
Paul Turner | 9d85f21 | 2012-10-04 13:18:29 +0200 | [diff] [blame] | 1256 | #endif |
| 1257 | |
Ingo Molnar | 2396af6 | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 1258 | static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1259 | { |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1260 | #ifdef CONFIG_SCHEDSTATS |
Peter Zijlstra | e414314 | 2009-07-23 20:13:26 +0200 | [diff] [blame] | 1261 | struct task_struct *tsk = NULL; |
| 1262 | |
| 1263 | if (entity_is_task(se)) |
| 1264 | tsk = task_of(se); |
| 1265 | |
Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 1266 | if (se->statistics.sleep_start) { |
| 1267 | u64 delta = rq_of(cfs_rq)->clock - se->statistics.sleep_start; |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1268 | |
| 1269 | if ((s64)delta < 0) |
| 1270 | delta = 0; |
| 1271 | |
Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 1272 | if (unlikely(delta > se->statistics.sleep_max)) |
| 1273 | se->statistics.sleep_max = delta; |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1274 | |
Peter Zijlstra | 8c79a04 | 2012-01-30 14:51:37 +0100 | [diff] [blame] | 1275 | se->statistics.sleep_start = 0; |
Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 1276 | se->statistics.sum_sleep_runtime += delta; |
Arjan van de Ven | 9745512 | 2008-01-25 21:08:34 +0100 | [diff] [blame] | 1277 | |
Peter Zijlstra | 768d0c2 | 2009-07-23 20:13:26 +0200 | [diff] [blame] | 1278 | if (tsk) { |
Peter Zijlstra | e414314 | 2009-07-23 20:13:26 +0200 | [diff] [blame] | 1279 | account_scheduler_latency(tsk, delta >> 10, 1); |
Peter Zijlstra | 768d0c2 | 2009-07-23 20:13:26 +0200 | [diff] [blame] | 1280 | trace_sched_stat_sleep(tsk, delta); |
| 1281 | } |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1282 | } |
Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 1283 | if (se->statistics.block_start) { |
| 1284 | u64 delta = rq_of(cfs_rq)->clock - se->statistics.block_start; |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1285 | |
| 1286 | if ((s64)delta < 0) |
| 1287 | delta = 0; |
| 1288 | |
Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 1289 | if (unlikely(delta > se->statistics.block_max)) |
| 1290 | se->statistics.block_max = delta; |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1291 | |
Peter Zijlstra | 8c79a04 | 2012-01-30 14:51:37 +0100 | [diff] [blame] | 1292 | se->statistics.block_start = 0; |
Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 1293 | se->statistics.sum_sleep_runtime += delta; |
Ingo Molnar | 30084fb | 2007-10-02 14:13:08 +0200 | [diff] [blame] | 1294 | |
Peter Zijlstra | e414314 | 2009-07-23 20:13:26 +0200 | [diff] [blame] | 1295 | if (tsk) { |
Arjan van de Ven | 8f0dfc3 | 2009-07-20 11:26:58 -0700 | [diff] [blame] | 1296 | if (tsk->in_iowait) { |
Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 1297 | se->statistics.iowait_sum += delta; |
| 1298 | se->statistics.iowait_count++; |
Peter Zijlstra | 768d0c2 | 2009-07-23 20:13:26 +0200 | [diff] [blame] | 1299 | trace_sched_stat_iowait(tsk, delta); |
Arjan van de Ven | 8f0dfc3 | 2009-07-20 11:26:58 -0700 | [diff] [blame] | 1300 | } |
| 1301 | |
Andrew Vagin | b781a60 | 2011-11-28 12:03:35 +0300 | [diff] [blame] | 1302 | trace_sched_stat_blocked(tsk, delta); |
| 1303 | |
Peter Zijlstra | e414314 | 2009-07-23 20:13:26 +0200 | [diff] [blame] | 1304 | /* |
| 1305 | * Blocking time is in units of nanosecs, so shift by |
| 1306 | * 20 to get a milliseconds-range estimation of the |
| 1307 | * amount of time that the task spent sleeping: |
| 1308 | */ |
| 1309 | if (unlikely(prof_on == SLEEP_PROFILING)) { |
| 1310 | profile_hits(SLEEP_PROFILING, |
| 1311 | (void *)get_wchan(tsk), |
| 1312 | delta >> 20); |
| 1313 | } |
| 1314 | account_scheduler_latency(tsk, delta >> 10, 0); |
Ingo Molnar | 30084fb | 2007-10-02 14:13:08 +0200 | [diff] [blame] | 1315 | } |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1316 | } |
| 1317 | #endif |
| 1318 | } |
| 1319 | |
Peter Zijlstra | ddc9729 | 2007-10-15 17:00:10 +0200 | [diff] [blame] | 1320 | static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se) |
| 1321 | { |
| 1322 | #ifdef CONFIG_SCHED_DEBUG |
| 1323 | s64 d = se->vruntime - cfs_rq->min_vruntime; |
| 1324 | |
| 1325 | if (d < 0) |
| 1326 | d = -d; |
| 1327 | |
| 1328 | if (d > 3*sysctl_sched_latency) |
| 1329 | schedstat_inc(cfs_rq, nr_spread_over); |
| 1330 | #endif |
| 1331 | } |
| 1332 | |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1333 | static void |
Peter Zijlstra | aeb73b0 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 1334 | place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) |
| 1335 | { |
Peter Zijlstra | 1af5f73 | 2008-10-24 11:06:13 +0200 | [diff] [blame] | 1336 | u64 vruntime = cfs_rq->min_vruntime; |
Peter Zijlstra | 94dfb5e | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 1337 | |
Peter Zijlstra | 2cb8600 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 1338 | /* |
| 1339 | * The 'current' period is already promised to the current tasks, |
| 1340 | * however the extra weight of the new task will slow them down a |
| 1341 | * little, place the new task so that it fits in the slot that |
| 1342 | * stays open at the end. |
| 1343 | */ |
Peter Zijlstra | 94dfb5e | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 1344 | if (initial && sched_feat(START_DEBIT)) |
Peter Zijlstra | f9c0b09 | 2008-10-17 19:27:04 +0200 | [diff] [blame] | 1345 | vruntime += sched_vslice(cfs_rq, se); |
Peter Zijlstra | aeb73b0 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 1346 | |
Mike Galbraith | a2e7a7e | 2009-09-18 09:19:25 +0200 | [diff] [blame] | 1347 | /* sleeps up to a single latency don't count. */ |
Mike Galbraith | 5ca9880 | 2010-03-11 17:17:17 +0100 | [diff] [blame] | 1348 | if (!initial) { |
Mike Galbraith | a2e7a7e | 2009-09-18 09:19:25 +0200 | [diff] [blame] | 1349 | unsigned long thresh = sysctl_sched_latency; |
Peter Zijlstra | a7be37a | 2008-06-27 13:41:11 +0200 | [diff] [blame] | 1350 | |
Mike Galbraith | a2e7a7e | 2009-09-18 09:19:25 +0200 | [diff] [blame] | 1351 | /* |
Mike Galbraith | a2e7a7e | 2009-09-18 09:19:25 +0200 | [diff] [blame] | 1352 | * Halve their sleep time's effect, to allow |
| 1353 | * for a gentler effect of sleepers: |
| 1354 | */ |
| 1355 | if (sched_feat(GENTLE_FAIR_SLEEPERS)) |
| 1356 | thresh >>= 1; |
Ingo Molnar | 51e0304 | 2009-09-16 08:54:45 +0200 | [diff] [blame] | 1357 | |
Mike Galbraith | a2e7a7e | 2009-09-18 09:19:25 +0200 | [diff] [blame] | 1358 | vruntime -= thresh; |
Peter Zijlstra | aeb73b0 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 1359 | } |
| 1360 | |
Mike Galbraith | b5d9d73 | 2009-09-08 11:12:28 +0200 | [diff] [blame] | 1361 | /* ensure we never gain time by being placed backwards. */ |
| 1362 | vruntime = max_vruntime(se->vruntime, vruntime); |
| 1363 | |
Peter Zijlstra | 67e9fb2 | 2007-10-15 17:00:10 +0200 | [diff] [blame] | 1364 | se->vruntime = vruntime; |
Peter Zijlstra | aeb73b0 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 1365 | } |
| 1366 | |
Paul Turner | d3d9dc3 | 2011-07-21 09:43:39 -0700 | [diff] [blame] | 1367 | static void check_enqueue_throttle(struct cfs_rq *cfs_rq); |
| 1368 | |
Peter Zijlstra | aeb73b0 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 1369 | static void |
Peter Zijlstra | 88ec22d | 2009-12-16 18:04:41 +0100 | [diff] [blame] | 1370 | enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1371 | { |
| 1372 | /* |
Peter Zijlstra | 88ec22d | 2009-12-16 18:04:41 +0100 | [diff] [blame] | 1373 | * Update the normalized vruntime before updating min_vruntime |
| 1374 | * through callig update_curr(). |
| 1375 | */ |
Peter Zijlstra | 371fd7e | 2010-03-24 16:38:48 +0100 | [diff] [blame] | 1376 | if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_WAKING)) |
Peter Zijlstra | 88ec22d | 2009-12-16 18:04:41 +0100 | [diff] [blame] | 1377 | se->vruntime += cfs_rq->min_vruntime; |
| 1378 | |
| 1379 | /* |
Dmitry Adamushko | a2a2d68 | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 1380 | * Update run-time statistics of the 'current'. |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1381 | */ |
Ingo Molnar | b7cc089 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 1382 | update_curr(cfs_rq); |
Paul Turner | d6b5591 | 2010-11-15 15:47:09 -0800 | [diff] [blame] | 1383 | update_cfs_load(cfs_rq, 0); |
Paul Turner | 9ee474f | 2012-10-04 13:18:30 +0200 | [diff] [blame] | 1384 | enqueue_entity_load_avg(cfs_rq, se, flags & ENQUEUE_WAKEUP); |
Peter Zijlstra | a992241 | 2008-05-05 23:56:17 +0200 | [diff] [blame] | 1385 | account_entity_enqueue(cfs_rq, se); |
Paul Turner | 6d5ab29 | 2011-01-21 20:45:01 -0800 | [diff] [blame] | 1386 | update_cfs_shares(cfs_rq); |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1387 | |
Peter Zijlstra | 88ec22d | 2009-12-16 18:04:41 +0100 | [diff] [blame] | 1388 | if (flags & ENQUEUE_WAKEUP) { |
Peter Zijlstra | aeb73b0 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 1389 | place_entity(cfs_rq, se, 0); |
Ingo Molnar | 2396af6 | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 1390 | enqueue_sleeper(cfs_rq, se); |
Ingo Molnar | e9acbff | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 1391 | } |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1392 | |
Ingo Molnar | d2417e5 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 1393 | update_stats_enqueue(cfs_rq, se); |
Peter Zijlstra | ddc9729 | 2007-10-15 17:00:10 +0200 | [diff] [blame] | 1394 | check_spread(cfs_rq, se); |
Srivatsa Vaddagiri | 83b699e | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 1395 | if (se != cfs_rq->curr) |
| 1396 | __enqueue_entity(cfs_rq, se); |
Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 1397 | se->on_rq = 1; |
Peter Zijlstra | 3d4b47b | 2010-11-15 15:47:01 -0800 | [diff] [blame] | 1398 | |
Paul Turner | d3d9dc3 | 2011-07-21 09:43:39 -0700 | [diff] [blame] | 1399 | if (cfs_rq->nr_running == 1) { |
Peter Zijlstra | 3d4b47b | 2010-11-15 15:47:01 -0800 | [diff] [blame] | 1400 | list_add_leaf_cfs_rq(cfs_rq); |
Paul Turner | d3d9dc3 | 2011-07-21 09:43:39 -0700 | [diff] [blame] | 1401 | check_enqueue_throttle(cfs_rq); |
| 1402 | } |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1403 | } |
| 1404 | |
Rik van Riel | 2c13c919 | 2011-02-01 09:48:37 -0500 | [diff] [blame] | 1405 | static void __clear_buddies_last(struct sched_entity *se) |
Peter Zijlstra | 2002c69 | 2008-11-11 11:52:33 +0100 | [diff] [blame] | 1406 | { |
Rik van Riel | 2c13c919 | 2011-02-01 09:48:37 -0500 | [diff] [blame] | 1407 | for_each_sched_entity(se) { |
| 1408 | struct cfs_rq *cfs_rq = cfs_rq_of(se); |
| 1409 | if (cfs_rq->last == se) |
| 1410 | cfs_rq->last = NULL; |
| 1411 | else |
| 1412 | break; |
| 1413 | } |
| 1414 | } |
Peter Zijlstra | 2002c69 | 2008-11-11 11:52:33 +0100 | [diff] [blame] | 1415 | |
Rik van Riel | 2c13c919 | 2011-02-01 09:48:37 -0500 | [diff] [blame] | 1416 | static void __clear_buddies_next(struct sched_entity *se) |
| 1417 | { |
| 1418 | for_each_sched_entity(se) { |
| 1419 | struct cfs_rq *cfs_rq = cfs_rq_of(se); |
| 1420 | if (cfs_rq->next == se) |
| 1421 | cfs_rq->next = NULL; |
| 1422 | else |
| 1423 | break; |
| 1424 | } |
Peter Zijlstra | 2002c69 | 2008-11-11 11:52:33 +0100 | [diff] [blame] | 1425 | } |
| 1426 | |
Rik van Riel | ac53db5 | 2011-02-01 09:51:03 -0500 | [diff] [blame] | 1427 | static void __clear_buddies_skip(struct sched_entity *se) |
| 1428 | { |
| 1429 | for_each_sched_entity(se) { |
| 1430 | struct cfs_rq *cfs_rq = cfs_rq_of(se); |
| 1431 | if (cfs_rq->skip == se) |
| 1432 | cfs_rq->skip = NULL; |
| 1433 | else |
| 1434 | break; |
| 1435 | } |
| 1436 | } |
| 1437 | |
Peter Zijlstra | a571bbe | 2009-01-28 14:51:40 +0100 | [diff] [blame] | 1438 | static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) |
| 1439 | { |
Rik van Riel | 2c13c919 | 2011-02-01 09:48:37 -0500 | [diff] [blame] | 1440 | if (cfs_rq->last == se) |
| 1441 | __clear_buddies_last(se); |
| 1442 | |
| 1443 | if (cfs_rq->next == se) |
| 1444 | __clear_buddies_next(se); |
Rik van Riel | ac53db5 | 2011-02-01 09:51:03 -0500 | [diff] [blame] | 1445 | |
| 1446 | if (cfs_rq->skip == se) |
| 1447 | __clear_buddies_skip(se); |
Peter Zijlstra | a571bbe | 2009-01-28 14:51:40 +0100 | [diff] [blame] | 1448 | } |
| 1449 | |
Peter Zijlstra | 6c16a6d | 2012-03-21 13:07:16 -0700 | [diff] [blame] | 1450 | static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq); |
Paul Turner | d8b4986 | 2011-07-21 09:43:41 -0700 | [diff] [blame] | 1451 | |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1452 | static void |
Peter Zijlstra | 371fd7e | 2010-03-24 16:38:48 +0100 | [diff] [blame] | 1453 | dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1454 | { |
Dmitry Adamushko | a2a2d68 | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 1455 | /* |
| 1456 | * Update run-time statistics of the 'current'. |
| 1457 | */ |
| 1458 | update_curr(cfs_rq); |
Paul Turner | 9ee474f | 2012-10-04 13:18:30 +0200 | [diff] [blame] | 1459 | dequeue_entity_load_avg(cfs_rq, se, flags & DEQUEUE_SLEEP); |
Dmitry Adamushko | a2a2d68 | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 1460 | |
Ingo Molnar | 19b6a2e | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 1461 | update_stats_dequeue(cfs_rq, se); |
Peter Zijlstra | 371fd7e | 2010-03-24 16:38:48 +0100 | [diff] [blame] | 1462 | if (flags & DEQUEUE_SLEEP) { |
Peter Zijlstra | 67e9fb2 | 2007-10-15 17:00:10 +0200 | [diff] [blame] | 1463 | #ifdef CONFIG_SCHEDSTATS |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1464 | if (entity_is_task(se)) { |
| 1465 | struct task_struct *tsk = task_of(se); |
| 1466 | |
| 1467 | if (tsk->state & TASK_INTERRUPTIBLE) |
Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 1468 | se->statistics.sleep_start = rq_of(cfs_rq)->clock; |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1469 | if (tsk->state & TASK_UNINTERRUPTIBLE) |
Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 1470 | se->statistics.block_start = rq_of(cfs_rq)->clock; |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1471 | } |
Dmitry Adamushko | db36cc7 | 2007-10-15 17:00:06 +0200 | [diff] [blame] | 1472 | #endif |
Peter Zijlstra | 67e9fb2 | 2007-10-15 17:00:10 +0200 | [diff] [blame] | 1473 | } |
| 1474 | |
Peter Zijlstra | 2002c69 | 2008-11-11 11:52:33 +0100 | [diff] [blame] | 1475 | clear_buddies(cfs_rq, se); |
Peter Zijlstra | 4793241 | 2008-11-04 21:25:09 +0100 | [diff] [blame] | 1476 | |
Srivatsa Vaddagiri | 83b699e | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 1477 | if (se != cfs_rq->curr) |
Dmitry Adamushko | 30cfdcf | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 1478 | __dequeue_entity(cfs_rq, se); |
Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 1479 | se->on_rq = 0; |
Paul Turner | d6b5591 | 2010-11-15 15:47:09 -0800 | [diff] [blame] | 1480 | update_cfs_load(cfs_rq, 0); |
Dmitry Adamushko | 30cfdcf | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 1481 | account_entity_dequeue(cfs_rq, se); |
Peter Zijlstra | 88ec22d | 2009-12-16 18:04:41 +0100 | [diff] [blame] | 1482 | |
| 1483 | /* |
| 1484 | * Normalize the entity after updating the min_vruntime because the |
| 1485 | * update can refer to the ->curr item and we need to reflect this |
| 1486 | * movement in our normalized position. |
| 1487 | */ |
Peter Zijlstra | 371fd7e | 2010-03-24 16:38:48 +0100 | [diff] [blame] | 1488 | if (!(flags & DEQUEUE_SLEEP)) |
Peter Zijlstra | 88ec22d | 2009-12-16 18:04:41 +0100 | [diff] [blame] | 1489 | se->vruntime -= cfs_rq->min_vruntime; |
Peter Zijlstra | 1e87623 | 2011-05-17 16:21:10 -0700 | [diff] [blame] | 1490 | |
Paul Turner | d8b4986 | 2011-07-21 09:43:41 -0700 | [diff] [blame] | 1491 | /* return excess runtime on last dequeue */ |
| 1492 | return_cfs_rq_runtime(cfs_rq); |
| 1493 | |
Peter Zijlstra | 1e87623 | 2011-05-17 16:21:10 -0700 | [diff] [blame] | 1494 | update_min_vruntime(cfs_rq); |
| 1495 | update_cfs_shares(cfs_rq); |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1496 | } |
| 1497 | |
| 1498 | /* |
| 1499 | * Preempt the current task with a newly woken task if needed: |
| 1500 | */ |
Peter Zijlstra | 7c92e54 | 2007-09-05 14:32:49 +0200 | [diff] [blame] | 1501 | static void |
Ingo Molnar | 2e09bf5 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 1502 | check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1503 | { |
Peter Zijlstra | 1169783 | 2007-09-05 14:32:49 +0200 | [diff] [blame] | 1504 | unsigned long ideal_runtime, delta_exec; |
Wang Xingchao | f4cfb33 | 2011-09-16 13:35:52 -0400 | [diff] [blame] | 1505 | struct sched_entity *se; |
| 1506 | s64 delta; |
Peter Zijlstra | 1169783 | 2007-09-05 14:32:49 +0200 | [diff] [blame] | 1507 | |
Peter Zijlstra | 6d0f0ebd | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 1508 | ideal_runtime = sched_slice(cfs_rq, curr); |
Peter Zijlstra | 1169783 | 2007-09-05 14:32:49 +0200 | [diff] [blame] | 1509 | delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; |
Mike Galbraith | a9f3e2b | 2009-01-28 14:51:39 +0100 | [diff] [blame] | 1510 | if (delta_exec > ideal_runtime) { |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1511 | resched_task(rq_of(cfs_rq)->curr); |
Mike Galbraith | a9f3e2b | 2009-01-28 14:51:39 +0100 | [diff] [blame] | 1512 | /* |
| 1513 | * The current task ran long enough, ensure it doesn't get |
| 1514 | * re-elected due to buddy favours. |
| 1515 | */ |
| 1516 | clear_buddies(cfs_rq, curr); |
Mike Galbraith | f685cea | 2009-10-23 23:09:22 +0200 | [diff] [blame] | 1517 | return; |
| 1518 | } |
| 1519 | |
| 1520 | /* |
| 1521 | * Ensure that a task that missed wakeup preemption by a |
| 1522 | * narrow margin doesn't have to wait for a full slice. |
| 1523 | * This also mitigates buddy induced latencies under load. |
| 1524 | */ |
Mike Galbraith | f685cea | 2009-10-23 23:09:22 +0200 | [diff] [blame] | 1525 | if (delta_exec < sysctl_sched_min_granularity) |
| 1526 | return; |
| 1527 | |
Wang Xingchao | f4cfb33 | 2011-09-16 13:35:52 -0400 | [diff] [blame] | 1528 | se = __pick_first_entity(cfs_rq); |
| 1529 | delta = curr->vruntime - se->vruntime; |
Mike Galbraith | f685cea | 2009-10-23 23:09:22 +0200 | [diff] [blame] | 1530 | |
Wang Xingchao | f4cfb33 | 2011-09-16 13:35:52 -0400 | [diff] [blame] | 1531 | if (delta < 0) |
| 1532 | return; |
Mike Galbraith | d7d8294 | 2011-01-05 05:41:17 +0100 | [diff] [blame] | 1533 | |
Wang Xingchao | f4cfb33 | 2011-09-16 13:35:52 -0400 | [diff] [blame] | 1534 | if (delta > ideal_runtime) |
| 1535 | resched_task(rq_of(cfs_rq)->curr); |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1536 | } |
| 1537 | |
Srivatsa Vaddagiri | 83b699e | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 1538 | static void |
Ingo Molnar | 8494f41 | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 1539 | set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1540 | { |
Srivatsa Vaddagiri | 83b699e | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 1541 | /* 'current' is not kept within the tree. */ |
| 1542 | if (se->on_rq) { |
| 1543 | /* |
| 1544 | * Any task has to be enqueued before it get to execute on |
| 1545 | * a CPU. So account for the time it spent waiting on the |
| 1546 | * runqueue. |
| 1547 | */ |
| 1548 | update_stats_wait_end(cfs_rq, se); |
| 1549 | __dequeue_entity(cfs_rq, se); |
| 1550 | } |
| 1551 | |
Ingo Molnar | 79303e9 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 1552 | update_stats_curr_start(cfs_rq, se); |
Ingo Molnar | 429d43b | 2007-10-15 17:00:03 +0200 | [diff] [blame] | 1553 | cfs_rq->curr = se; |
Ingo Molnar | eba1ed4 | 2007-10-15 17:00:02 +0200 | [diff] [blame] | 1554 | #ifdef CONFIG_SCHEDSTATS |
| 1555 | /* |
| 1556 | * Track our maximum slice length, if the CPU's load is at |
| 1557 | * least twice that of our own weight (i.e. dont track it |
| 1558 | * when there are only lesser-weight tasks around): |
| 1559 | */ |
Dmitry Adamushko | 495eca4 | 2007-10-15 17:00:06 +0200 | [diff] [blame] | 1560 | if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) { |
Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 1561 | se->statistics.slice_max = max(se->statistics.slice_max, |
Ingo Molnar | eba1ed4 | 2007-10-15 17:00:02 +0200 | [diff] [blame] | 1562 | se->sum_exec_runtime - se->prev_sum_exec_runtime); |
| 1563 | } |
| 1564 | #endif |
Peter Zijlstra | 4a55b45 | 2007-09-05 14:32:49 +0200 | [diff] [blame] | 1565 | se->prev_sum_exec_runtime = se->sum_exec_runtime; |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1566 | } |
| 1567 | |
Peter Zijlstra | 3f3a490 | 2008-10-24 11:06:16 +0200 | [diff] [blame] | 1568 | static int |
| 1569 | wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se); |
| 1570 | |
Rik van Riel | ac53db5 | 2011-02-01 09:51:03 -0500 | [diff] [blame] | 1571 | /* |
| 1572 | * Pick the next process, keeping these things in mind, in this order: |
| 1573 | * 1) keep things fair between processes/task groups |
| 1574 | * 2) pick the "next" process, since someone really wants that to run |
| 1575 | * 3) pick the "last" process, for cache locality |
| 1576 | * 4) do not run the "skip" process, if something else is available |
| 1577 | */ |
Peter Zijlstra | f4b6755 | 2008-11-04 21:25:07 +0100 | [diff] [blame] | 1578 | static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq) |
Peter Zijlstra | aa2ac25 | 2008-03-14 21:12:12 +0100 | [diff] [blame] | 1579 | { |
Rik van Riel | ac53db5 | 2011-02-01 09:51:03 -0500 | [diff] [blame] | 1580 | struct sched_entity *se = __pick_first_entity(cfs_rq); |
Mike Galbraith | f685cea | 2009-10-23 23:09:22 +0200 | [diff] [blame] | 1581 | struct sched_entity *left = se; |
Peter Zijlstra | f4b6755 | 2008-11-04 21:25:07 +0100 | [diff] [blame] | 1582 | |
Rik van Riel | ac53db5 | 2011-02-01 09:51:03 -0500 | [diff] [blame] | 1583 | /* |
| 1584 | * Avoid running the skip buddy, if running something else can |
| 1585 | * be done without getting too unfair. |
| 1586 | */ |
| 1587 | if (cfs_rq->skip == se) { |
| 1588 | struct sched_entity *second = __pick_next_entity(se); |
| 1589 | if (second && wakeup_preempt_entity(second, left) < 1) |
| 1590 | se = second; |
| 1591 | } |
Peter Zijlstra | aa2ac25 | 2008-03-14 21:12:12 +0100 | [diff] [blame] | 1592 | |
Mike Galbraith | f685cea | 2009-10-23 23:09:22 +0200 | [diff] [blame] | 1593 | /* |
| 1594 | * Prefer last buddy, try to return the CPU to a preempted task. |
| 1595 | */ |
| 1596 | if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1) |
| 1597 | se = cfs_rq->last; |
| 1598 | |
Rik van Riel | ac53db5 | 2011-02-01 09:51:03 -0500 | [diff] [blame] | 1599 | /* |
| 1600 | * Someone really wants this to run. If it's not unfair, run it. |
| 1601 | */ |
| 1602 | if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1) |
| 1603 | se = cfs_rq->next; |
| 1604 | |
Mike Galbraith | f685cea | 2009-10-23 23:09:22 +0200 | [diff] [blame] | 1605 | clear_buddies(cfs_rq, se); |
Peter Zijlstra | 4793241 | 2008-11-04 21:25:09 +0100 | [diff] [blame] | 1606 | |
| 1607 | return se; |
Peter Zijlstra | aa2ac25 | 2008-03-14 21:12:12 +0100 | [diff] [blame] | 1608 | } |
| 1609 | |
Paul Turner | d3d9dc3 | 2011-07-21 09:43:39 -0700 | [diff] [blame] | 1610 | static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq); |
| 1611 | |
Ingo Molnar | ab6cde2 | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 1612 | static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev) |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1613 | { |
| 1614 | /* |
| 1615 | * If still on the runqueue then deactivate_task() |
| 1616 | * was not called and update_curr() has to be done: |
| 1617 | */ |
| 1618 | if (prev->on_rq) |
Ingo Molnar | b7cc089 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 1619 | update_curr(cfs_rq); |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1620 | |
Paul Turner | d3d9dc3 | 2011-07-21 09:43:39 -0700 | [diff] [blame] | 1621 | /* throttle cfs_rqs exceeding runtime */ |
| 1622 | check_cfs_rq_runtime(cfs_rq); |
| 1623 | |
Peter Zijlstra | ddc9729 | 2007-10-15 17:00:10 +0200 | [diff] [blame] | 1624 | check_spread(cfs_rq, prev); |
Dmitry Adamushko | 30cfdcf | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 1625 | if (prev->on_rq) { |
Ingo Molnar | 5870db5 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 1626 | update_stats_wait_start(cfs_rq, prev); |
Dmitry Adamushko | 30cfdcf | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 1627 | /* Put 'current' back into the tree. */ |
| 1628 | __enqueue_entity(cfs_rq, prev); |
Paul Turner | 9d85f21 | 2012-10-04 13:18:29 +0200 | [diff] [blame] | 1629 | /* in !on_rq case, update occurred at dequeue */ |
Paul Turner | 9ee474f | 2012-10-04 13:18:30 +0200 | [diff] [blame] | 1630 | update_entity_load_avg(prev, 1); |
Dmitry Adamushko | 30cfdcf | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 1631 | } |
Ingo Molnar | 429d43b | 2007-10-15 17:00:03 +0200 | [diff] [blame] | 1632 | cfs_rq->curr = NULL; |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1633 | } |
| 1634 | |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 1635 | static void |
| 1636 | entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1637 | { |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1638 | /* |
Dmitry Adamushko | 30cfdcf | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 1639 | * Update run-time statistics of the 'current'. |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1640 | */ |
Dmitry Adamushko | 30cfdcf | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 1641 | update_curr(cfs_rq); |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1642 | |
Paul Turner | 43365bd | 2010-12-15 19:10:17 -0800 | [diff] [blame] | 1643 | /* |
Paul Turner | 9d85f21 | 2012-10-04 13:18:29 +0200 | [diff] [blame] | 1644 | * Ensure that runnable average is periodically updated. |
| 1645 | */ |
Paul Turner | 9ee474f | 2012-10-04 13:18:30 +0200 | [diff] [blame] | 1646 | update_entity_load_avg(curr, 1); |
Paul Turner | aff3e49 | 2012-10-04 13:18:30 +0200 | [diff] [blame^] | 1647 | update_cfs_rq_blocked_load(cfs_rq, 1); |
Paul Turner | 9d85f21 | 2012-10-04 13:18:29 +0200 | [diff] [blame] | 1648 | |
| 1649 | /* |
Paul Turner | 43365bd | 2010-12-15 19:10:17 -0800 | [diff] [blame] | 1650 | * Update share accounting for long-running entities. |
| 1651 | */ |
| 1652 | update_entity_shares_tick(cfs_rq); |
| 1653 | |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 1654 | #ifdef CONFIG_SCHED_HRTICK |
| 1655 | /* |
| 1656 | * queued ticks are scheduled to match the slice, so don't bother |
| 1657 | * validating it and just reschedule. |
| 1658 | */ |
Harvey Harrison | 983ed7a | 2008-04-24 18:17:55 -0700 | [diff] [blame] | 1659 | if (queued) { |
| 1660 | resched_task(rq_of(cfs_rq)->curr); |
| 1661 | return; |
| 1662 | } |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 1663 | /* |
| 1664 | * don't let the period tick interfere with the hrtick preemption |
| 1665 | */ |
| 1666 | if (!sched_feat(DOUBLE_TICK) && |
| 1667 | hrtimer_active(&rq_of(cfs_rq)->hrtick_timer)) |
| 1668 | return; |
| 1669 | #endif |
| 1670 | |
Yong Zhang | 2c2efae | 2011-07-29 16:20:33 +0800 | [diff] [blame] | 1671 | if (cfs_rq->nr_running > 1) |
Ingo Molnar | 2e09bf5 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 1672 | check_preempt_tick(cfs_rq, curr); |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1673 | } |
| 1674 | |
Paul Turner | ab84d31 | 2011-07-21 09:43:28 -0700 | [diff] [blame] | 1675 | |
| 1676 | /************************************************** |
| 1677 | * CFS bandwidth control machinery |
| 1678 | */ |
| 1679 | |
| 1680 | #ifdef CONFIG_CFS_BANDWIDTH |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1681 | |
| 1682 | #ifdef HAVE_JUMP_LABEL |
Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 1683 | static struct static_key __cfs_bandwidth_used; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1684 | |
| 1685 | static inline bool cfs_bandwidth_used(void) |
| 1686 | { |
Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 1687 | return static_key_false(&__cfs_bandwidth_used); |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1688 | } |
| 1689 | |
| 1690 | void account_cfs_bandwidth_used(int enabled, int was_enabled) |
| 1691 | { |
| 1692 | /* only need to count groups transitioning between enabled/!enabled */ |
| 1693 | if (enabled && !was_enabled) |
Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 1694 | static_key_slow_inc(&__cfs_bandwidth_used); |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1695 | else if (!enabled && was_enabled) |
Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 1696 | static_key_slow_dec(&__cfs_bandwidth_used); |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1697 | } |
| 1698 | #else /* HAVE_JUMP_LABEL */ |
| 1699 | static bool cfs_bandwidth_used(void) |
| 1700 | { |
| 1701 | return true; |
| 1702 | } |
| 1703 | |
| 1704 | void account_cfs_bandwidth_used(int enabled, int was_enabled) {} |
| 1705 | #endif /* HAVE_JUMP_LABEL */ |
| 1706 | |
Paul Turner | ab84d31 | 2011-07-21 09:43:28 -0700 | [diff] [blame] | 1707 | /* |
| 1708 | * default period for cfs group bandwidth. |
| 1709 | * default: 0.1s, units: nanoseconds |
| 1710 | */ |
| 1711 | static inline u64 default_cfs_period(void) |
| 1712 | { |
| 1713 | return 100000000ULL; |
| 1714 | } |
Paul Turner | ec12cb7 | 2011-07-21 09:43:30 -0700 | [diff] [blame] | 1715 | |
| 1716 | static inline u64 sched_cfs_bandwidth_slice(void) |
| 1717 | { |
| 1718 | return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC; |
| 1719 | } |
| 1720 | |
Paul Turner | a9cf55b | 2011-07-21 09:43:32 -0700 | [diff] [blame] | 1721 | /* |
| 1722 | * Replenish runtime according to assigned quota and update expiration time. |
| 1723 | * We use sched_clock_cpu directly instead of rq->clock to avoid adding |
| 1724 | * additional synchronization around rq->lock. |
| 1725 | * |
| 1726 | * requires cfs_b->lock |
| 1727 | */ |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1728 | void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b) |
Paul Turner | a9cf55b | 2011-07-21 09:43:32 -0700 | [diff] [blame] | 1729 | { |
| 1730 | u64 now; |
| 1731 | |
| 1732 | if (cfs_b->quota == RUNTIME_INF) |
| 1733 | return; |
| 1734 | |
| 1735 | now = sched_clock_cpu(smp_processor_id()); |
| 1736 | cfs_b->runtime = cfs_b->quota; |
| 1737 | cfs_b->runtime_expires = now + ktime_to_ns(cfs_b->period); |
| 1738 | } |
| 1739 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1740 | static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg) |
| 1741 | { |
| 1742 | return &tg->cfs_bandwidth; |
| 1743 | } |
| 1744 | |
Paul Turner | 85dac90 | 2011-07-21 09:43:33 -0700 | [diff] [blame] | 1745 | /* returns 0 on failure to allocate runtime */ |
| 1746 | static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq) |
Paul Turner | ec12cb7 | 2011-07-21 09:43:30 -0700 | [diff] [blame] | 1747 | { |
| 1748 | struct task_group *tg = cfs_rq->tg; |
| 1749 | struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg); |
Paul Turner | a9cf55b | 2011-07-21 09:43:32 -0700 | [diff] [blame] | 1750 | u64 amount = 0, min_amount, expires; |
Paul Turner | ec12cb7 | 2011-07-21 09:43:30 -0700 | [diff] [blame] | 1751 | |
| 1752 | /* note: this is a positive sum as runtime_remaining <= 0 */ |
| 1753 | min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining; |
| 1754 | |
| 1755 | raw_spin_lock(&cfs_b->lock); |
| 1756 | if (cfs_b->quota == RUNTIME_INF) |
| 1757 | amount = min_amount; |
Paul Turner | 58088ad | 2011-07-21 09:43:31 -0700 | [diff] [blame] | 1758 | else { |
Paul Turner | a9cf55b | 2011-07-21 09:43:32 -0700 | [diff] [blame] | 1759 | /* |
| 1760 | * If the bandwidth pool has become inactive, then at least one |
| 1761 | * period must have elapsed since the last consumption. |
| 1762 | * Refresh the global state and ensure bandwidth timer becomes |
| 1763 | * active. |
| 1764 | */ |
| 1765 | if (!cfs_b->timer_active) { |
| 1766 | __refill_cfs_bandwidth_runtime(cfs_b); |
Paul Turner | 58088ad | 2011-07-21 09:43:31 -0700 | [diff] [blame] | 1767 | __start_cfs_bandwidth(cfs_b); |
Paul Turner | a9cf55b | 2011-07-21 09:43:32 -0700 | [diff] [blame] | 1768 | } |
Paul Turner | 58088ad | 2011-07-21 09:43:31 -0700 | [diff] [blame] | 1769 | |
| 1770 | if (cfs_b->runtime > 0) { |
| 1771 | amount = min(cfs_b->runtime, min_amount); |
| 1772 | cfs_b->runtime -= amount; |
| 1773 | cfs_b->idle = 0; |
| 1774 | } |
Paul Turner | ec12cb7 | 2011-07-21 09:43:30 -0700 | [diff] [blame] | 1775 | } |
Paul Turner | a9cf55b | 2011-07-21 09:43:32 -0700 | [diff] [blame] | 1776 | expires = cfs_b->runtime_expires; |
Paul Turner | ec12cb7 | 2011-07-21 09:43:30 -0700 | [diff] [blame] | 1777 | raw_spin_unlock(&cfs_b->lock); |
| 1778 | |
| 1779 | cfs_rq->runtime_remaining += amount; |
Paul Turner | a9cf55b | 2011-07-21 09:43:32 -0700 | [diff] [blame] | 1780 | /* |
| 1781 | * we may have advanced our local expiration to account for allowed |
| 1782 | * spread between our sched_clock and the one on which runtime was |
| 1783 | * issued. |
| 1784 | */ |
| 1785 | if ((s64)(expires - cfs_rq->runtime_expires) > 0) |
| 1786 | cfs_rq->runtime_expires = expires; |
Paul Turner | 85dac90 | 2011-07-21 09:43:33 -0700 | [diff] [blame] | 1787 | |
| 1788 | return cfs_rq->runtime_remaining > 0; |
Paul Turner | a9cf55b | 2011-07-21 09:43:32 -0700 | [diff] [blame] | 1789 | } |
| 1790 | |
| 1791 | /* |
| 1792 | * Note: This depends on the synchronization provided by sched_clock and the |
| 1793 | * fact that rq->clock snapshots this value. |
| 1794 | */ |
| 1795 | static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq) |
| 1796 | { |
| 1797 | struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); |
| 1798 | struct rq *rq = rq_of(cfs_rq); |
| 1799 | |
| 1800 | /* if the deadline is ahead of our clock, nothing to do */ |
| 1801 | if (likely((s64)(rq->clock - cfs_rq->runtime_expires) < 0)) |
| 1802 | return; |
| 1803 | |
| 1804 | if (cfs_rq->runtime_remaining < 0) |
| 1805 | return; |
| 1806 | |
| 1807 | /* |
| 1808 | * If the local deadline has passed we have to consider the |
| 1809 | * possibility that our sched_clock is 'fast' and the global deadline |
| 1810 | * has not truly expired. |
| 1811 | * |
| 1812 | * Fortunately we can check determine whether this the case by checking |
| 1813 | * whether the global deadline has advanced. |
| 1814 | */ |
| 1815 | |
| 1816 | if ((s64)(cfs_rq->runtime_expires - cfs_b->runtime_expires) >= 0) { |
| 1817 | /* extend local deadline, drift is bounded above by 2 ticks */ |
| 1818 | cfs_rq->runtime_expires += TICK_NSEC; |
| 1819 | } else { |
| 1820 | /* global deadline is ahead, expiration has passed */ |
| 1821 | cfs_rq->runtime_remaining = 0; |
| 1822 | } |
Paul Turner | ec12cb7 | 2011-07-21 09:43:30 -0700 | [diff] [blame] | 1823 | } |
| 1824 | |
| 1825 | static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, |
| 1826 | unsigned long delta_exec) |
| 1827 | { |
Paul Turner | a9cf55b | 2011-07-21 09:43:32 -0700 | [diff] [blame] | 1828 | /* dock delta_exec before expiring quota (as it could span periods) */ |
Paul Turner | ec12cb7 | 2011-07-21 09:43:30 -0700 | [diff] [blame] | 1829 | cfs_rq->runtime_remaining -= delta_exec; |
Paul Turner | a9cf55b | 2011-07-21 09:43:32 -0700 | [diff] [blame] | 1830 | expire_cfs_rq_runtime(cfs_rq); |
| 1831 | |
| 1832 | if (likely(cfs_rq->runtime_remaining > 0)) |
Paul Turner | ec12cb7 | 2011-07-21 09:43:30 -0700 | [diff] [blame] | 1833 | return; |
| 1834 | |
Paul Turner | 85dac90 | 2011-07-21 09:43:33 -0700 | [diff] [blame] | 1835 | /* |
| 1836 | * if we're unable to extend our runtime we resched so that the active |
| 1837 | * hierarchy can be throttled |
| 1838 | */ |
| 1839 | if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr)) |
| 1840 | resched_task(rq_of(cfs_rq)->curr); |
Paul Turner | ec12cb7 | 2011-07-21 09:43:30 -0700 | [diff] [blame] | 1841 | } |
| 1842 | |
Peter Zijlstra | 6c16a6d | 2012-03-21 13:07:16 -0700 | [diff] [blame] | 1843 | static __always_inline |
| 1844 | void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec) |
Paul Turner | ec12cb7 | 2011-07-21 09:43:30 -0700 | [diff] [blame] | 1845 | { |
Paul Turner | 56f570e | 2011-11-07 20:26:33 -0800 | [diff] [blame] | 1846 | if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled) |
Paul Turner | ec12cb7 | 2011-07-21 09:43:30 -0700 | [diff] [blame] | 1847 | return; |
| 1848 | |
| 1849 | __account_cfs_rq_runtime(cfs_rq, delta_exec); |
| 1850 | } |
| 1851 | |
Paul Turner | 85dac90 | 2011-07-21 09:43:33 -0700 | [diff] [blame] | 1852 | static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq) |
| 1853 | { |
Paul Turner | 56f570e | 2011-11-07 20:26:33 -0800 | [diff] [blame] | 1854 | return cfs_bandwidth_used() && cfs_rq->throttled; |
Paul Turner | 85dac90 | 2011-07-21 09:43:33 -0700 | [diff] [blame] | 1855 | } |
| 1856 | |
Paul Turner | 64660c8 | 2011-07-21 09:43:36 -0700 | [diff] [blame] | 1857 | /* check whether cfs_rq, or any parent, is throttled */ |
| 1858 | static inline int throttled_hierarchy(struct cfs_rq *cfs_rq) |
| 1859 | { |
Paul Turner | 56f570e | 2011-11-07 20:26:33 -0800 | [diff] [blame] | 1860 | return cfs_bandwidth_used() && cfs_rq->throttle_count; |
Paul Turner | 64660c8 | 2011-07-21 09:43:36 -0700 | [diff] [blame] | 1861 | } |
| 1862 | |
| 1863 | /* |
| 1864 | * Ensure that neither of the group entities corresponding to src_cpu or |
| 1865 | * dest_cpu are members of a throttled hierarchy when performing group |
| 1866 | * load-balance operations. |
| 1867 | */ |
| 1868 | static inline int throttled_lb_pair(struct task_group *tg, |
| 1869 | int src_cpu, int dest_cpu) |
| 1870 | { |
| 1871 | struct cfs_rq *src_cfs_rq, *dest_cfs_rq; |
| 1872 | |
| 1873 | src_cfs_rq = tg->cfs_rq[src_cpu]; |
| 1874 | dest_cfs_rq = tg->cfs_rq[dest_cpu]; |
| 1875 | |
| 1876 | return throttled_hierarchy(src_cfs_rq) || |
| 1877 | throttled_hierarchy(dest_cfs_rq); |
| 1878 | } |
| 1879 | |
| 1880 | /* updated child weight may affect parent so we have to do this bottom up */ |
| 1881 | static int tg_unthrottle_up(struct task_group *tg, void *data) |
| 1882 | { |
| 1883 | struct rq *rq = data; |
| 1884 | struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; |
| 1885 | |
| 1886 | cfs_rq->throttle_count--; |
| 1887 | #ifdef CONFIG_SMP |
| 1888 | if (!cfs_rq->throttle_count) { |
| 1889 | u64 delta = rq->clock_task - cfs_rq->load_stamp; |
| 1890 | |
| 1891 | /* leaving throttled state, advance shares averaging windows */ |
| 1892 | cfs_rq->load_stamp += delta; |
| 1893 | cfs_rq->load_last += delta; |
| 1894 | |
| 1895 | /* update entity weight now that we are on_rq again */ |
| 1896 | update_cfs_shares(cfs_rq); |
| 1897 | } |
| 1898 | #endif |
| 1899 | |
| 1900 | return 0; |
| 1901 | } |
| 1902 | |
| 1903 | static int tg_throttle_down(struct task_group *tg, void *data) |
| 1904 | { |
| 1905 | struct rq *rq = data; |
| 1906 | struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; |
| 1907 | |
| 1908 | /* group is entering throttled state, record last load */ |
| 1909 | if (!cfs_rq->throttle_count) |
| 1910 | update_cfs_load(cfs_rq, 0); |
| 1911 | cfs_rq->throttle_count++; |
| 1912 | |
| 1913 | return 0; |
| 1914 | } |
| 1915 | |
Paul Turner | d3d9dc3 | 2011-07-21 09:43:39 -0700 | [diff] [blame] | 1916 | static void throttle_cfs_rq(struct cfs_rq *cfs_rq) |
Paul Turner | 85dac90 | 2011-07-21 09:43:33 -0700 | [diff] [blame] | 1917 | { |
| 1918 | struct rq *rq = rq_of(cfs_rq); |
| 1919 | struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); |
| 1920 | struct sched_entity *se; |
| 1921 | long task_delta, dequeue = 1; |
| 1922 | |
| 1923 | se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))]; |
| 1924 | |
| 1925 | /* account load preceding throttle */ |
Paul Turner | 64660c8 | 2011-07-21 09:43:36 -0700 | [diff] [blame] | 1926 | rcu_read_lock(); |
| 1927 | walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq); |
| 1928 | rcu_read_unlock(); |
Paul Turner | 85dac90 | 2011-07-21 09:43:33 -0700 | [diff] [blame] | 1929 | |
| 1930 | task_delta = cfs_rq->h_nr_running; |
| 1931 | for_each_sched_entity(se) { |
| 1932 | struct cfs_rq *qcfs_rq = cfs_rq_of(se); |
| 1933 | /* throttled entity or throttle-on-deactivate */ |
| 1934 | if (!se->on_rq) |
| 1935 | break; |
| 1936 | |
| 1937 | if (dequeue) |
| 1938 | dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP); |
| 1939 | qcfs_rq->h_nr_running -= task_delta; |
| 1940 | |
| 1941 | if (qcfs_rq->load.weight) |
| 1942 | dequeue = 0; |
| 1943 | } |
| 1944 | |
| 1945 | if (!se) |
| 1946 | rq->nr_running -= task_delta; |
| 1947 | |
| 1948 | cfs_rq->throttled = 1; |
Nikhil Rao | e8da1b1 | 2011-07-21 09:43:40 -0700 | [diff] [blame] | 1949 | cfs_rq->throttled_timestamp = rq->clock; |
Paul Turner | 85dac90 | 2011-07-21 09:43:33 -0700 | [diff] [blame] | 1950 | raw_spin_lock(&cfs_b->lock); |
| 1951 | list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq); |
| 1952 | raw_spin_unlock(&cfs_b->lock); |
| 1953 | } |
| 1954 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1955 | void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) |
Paul Turner | 671fd9d | 2011-07-21 09:43:34 -0700 | [diff] [blame] | 1956 | { |
| 1957 | struct rq *rq = rq_of(cfs_rq); |
| 1958 | struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); |
| 1959 | struct sched_entity *se; |
| 1960 | int enqueue = 1; |
| 1961 | long task_delta; |
| 1962 | |
| 1963 | se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))]; |
| 1964 | |
| 1965 | cfs_rq->throttled = 0; |
| 1966 | raw_spin_lock(&cfs_b->lock); |
Nikhil Rao | e8da1b1 | 2011-07-21 09:43:40 -0700 | [diff] [blame] | 1967 | cfs_b->throttled_time += rq->clock - cfs_rq->throttled_timestamp; |
Paul Turner | 671fd9d | 2011-07-21 09:43:34 -0700 | [diff] [blame] | 1968 | list_del_rcu(&cfs_rq->throttled_list); |
| 1969 | raw_spin_unlock(&cfs_b->lock); |
Nikhil Rao | e8da1b1 | 2011-07-21 09:43:40 -0700 | [diff] [blame] | 1970 | cfs_rq->throttled_timestamp = 0; |
Paul Turner | 671fd9d | 2011-07-21 09:43:34 -0700 | [diff] [blame] | 1971 | |
Paul Turner | 64660c8 | 2011-07-21 09:43:36 -0700 | [diff] [blame] | 1972 | update_rq_clock(rq); |
| 1973 | /* update hierarchical throttle state */ |
| 1974 | walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq); |
| 1975 | |
Paul Turner | 671fd9d | 2011-07-21 09:43:34 -0700 | [diff] [blame] | 1976 | if (!cfs_rq->load.weight) |
| 1977 | return; |
| 1978 | |
| 1979 | task_delta = cfs_rq->h_nr_running; |
| 1980 | for_each_sched_entity(se) { |
| 1981 | if (se->on_rq) |
| 1982 | enqueue = 0; |
| 1983 | |
| 1984 | cfs_rq = cfs_rq_of(se); |
| 1985 | if (enqueue) |
| 1986 | enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP); |
| 1987 | cfs_rq->h_nr_running += task_delta; |
| 1988 | |
| 1989 | if (cfs_rq_throttled(cfs_rq)) |
| 1990 | break; |
| 1991 | } |
| 1992 | |
| 1993 | if (!se) |
| 1994 | rq->nr_running += task_delta; |
| 1995 | |
| 1996 | /* determine whether we need to wake up potentially idle cpu */ |
| 1997 | if (rq->curr == rq->idle && rq->cfs.nr_running) |
| 1998 | resched_task(rq->curr); |
| 1999 | } |
| 2000 | |
| 2001 | static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b, |
| 2002 | u64 remaining, u64 expires) |
| 2003 | { |
| 2004 | struct cfs_rq *cfs_rq; |
| 2005 | u64 runtime = remaining; |
| 2006 | |
| 2007 | rcu_read_lock(); |
| 2008 | list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq, |
| 2009 | throttled_list) { |
| 2010 | struct rq *rq = rq_of(cfs_rq); |
| 2011 | |
| 2012 | raw_spin_lock(&rq->lock); |
| 2013 | if (!cfs_rq_throttled(cfs_rq)) |
| 2014 | goto next; |
| 2015 | |
| 2016 | runtime = -cfs_rq->runtime_remaining + 1; |
| 2017 | if (runtime > remaining) |
| 2018 | runtime = remaining; |
| 2019 | remaining -= runtime; |
| 2020 | |
| 2021 | cfs_rq->runtime_remaining += runtime; |
| 2022 | cfs_rq->runtime_expires = expires; |
| 2023 | |
| 2024 | /* we check whether we're throttled above */ |
| 2025 | if (cfs_rq->runtime_remaining > 0) |
| 2026 | unthrottle_cfs_rq(cfs_rq); |
| 2027 | |
| 2028 | next: |
| 2029 | raw_spin_unlock(&rq->lock); |
| 2030 | |
| 2031 | if (!remaining) |
| 2032 | break; |
| 2033 | } |
| 2034 | rcu_read_unlock(); |
| 2035 | |
| 2036 | return remaining; |
| 2037 | } |
| 2038 | |
Paul Turner | 58088ad | 2011-07-21 09:43:31 -0700 | [diff] [blame] | 2039 | /* |
| 2040 | * Responsible for refilling a task_group's bandwidth and unthrottling its |
| 2041 | * cfs_rqs as appropriate. If there has been no activity within the last |
| 2042 | * period the timer is deactivated until scheduling resumes; cfs_b->idle is |
| 2043 | * used to track this state. |
| 2044 | */ |
| 2045 | static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun) |
| 2046 | { |
Paul Turner | 671fd9d | 2011-07-21 09:43:34 -0700 | [diff] [blame] | 2047 | u64 runtime, runtime_expires; |
| 2048 | int idle = 1, throttled; |
Paul Turner | 58088ad | 2011-07-21 09:43:31 -0700 | [diff] [blame] | 2049 | |
| 2050 | raw_spin_lock(&cfs_b->lock); |
| 2051 | /* no need to continue the timer with no bandwidth constraint */ |
| 2052 | if (cfs_b->quota == RUNTIME_INF) |
| 2053 | goto out_unlock; |
| 2054 | |
Paul Turner | 671fd9d | 2011-07-21 09:43:34 -0700 | [diff] [blame] | 2055 | throttled = !list_empty(&cfs_b->throttled_cfs_rq); |
| 2056 | /* idle depends on !throttled (for the case of a large deficit) */ |
| 2057 | idle = cfs_b->idle && !throttled; |
Nikhil Rao | e8da1b1 | 2011-07-21 09:43:40 -0700 | [diff] [blame] | 2058 | cfs_b->nr_periods += overrun; |
Paul Turner | 671fd9d | 2011-07-21 09:43:34 -0700 | [diff] [blame] | 2059 | |
Paul Turner | a9cf55b | 2011-07-21 09:43:32 -0700 | [diff] [blame] | 2060 | /* if we're going inactive then everything else can be deferred */ |
| 2061 | if (idle) |
| 2062 | goto out_unlock; |
| 2063 | |
| 2064 | __refill_cfs_bandwidth_runtime(cfs_b); |
| 2065 | |
Paul Turner | 671fd9d | 2011-07-21 09:43:34 -0700 | [diff] [blame] | 2066 | if (!throttled) { |
| 2067 | /* mark as potentially idle for the upcoming period */ |
| 2068 | cfs_b->idle = 1; |
| 2069 | goto out_unlock; |
| 2070 | } |
Paul Turner | 58088ad | 2011-07-21 09:43:31 -0700 | [diff] [blame] | 2071 | |
Nikhil Rao | e8da1b1 | 2011-07-21 09:43:40 -0700 | [diff] [blame] | 2072 | /* account preceding periods in which throttling occurred */ |
| 2073 | cfs_b->nr_throttled += overrun; |
| 2074 | |
Paul Turner | 671fd9d | 2011-07-21 09:43:34 -0700 | [diff] [blame] | 2075 | /* |
| 2076 | * There are throttled entities so we must first use the new bandwidth |
| 2077 | * to unthrottle them before making it generally available. This |
| 2078 | * ensures that all existing debts will be paid before a new cfs_rq is |
| 2079 | * allowed to run. |
| 2080 | */ |
| 2081 | runtime = cfs_b->runtime; |
| 2082 | runtime_expires = cfs_b->runtime_expires; |
| 2083 | cfs_b->runtime = 0; |
| 2084 | |
| 2085 | /* |
| 2086 | * This check is repeated as we are holding onto the new bandwidth |
| 2087 | * while we unthrottle. This can potentially race with an unthrottled |
| 2088 | * group trying to acquire new bandwidth from the global pool. |
| 2089 | */ |
| 2090 | while (throttled && runtime > 0) { |
| 2091 | raw_spin_unlock(&cfs_b->lock); |
| 2092 | /* we can't nest cfs_b->lock while distributing bandwidth */ |
| 2093 | runtime = distribute_cfs_runtime(cfs_b, runtime, |
| 2094 | runtime_expires); |
| 2095 | raw_spin_lock(&cfs_b->lock); |
| 2096 | |
| 2097 | throttled = !list_empty(&cfs_b->throttled_cfs_rq); |
| 2098 | } |
| 2099 | |
| 2100 | /* return (any) remaining runtime */ |
| 2101 | cfs_b->runtime = runtime; |
| 2102 | /* |
| 2103 | * While we are ensured activity in the period following an |
| 2104 | * unthrottle, this also covers the case in which the new bandwidth is |
| 2105 | * insufficient to cover the existing bandwidth deficit. (Forcing the |
| 2106 | * timer to remain active while there are any throttled entities.) |
| 2107 | */ |
| 2108 | cfs_b->idle = 0; |
Paul Turner | 58088ad | 2011-07-21 09:43:31 -0700 | [diff] [blame] | 2109 | out_unlock: |
| 2110 | if (idle) |
| 2111 | cfs_b->timer_active = 0; |
| 2112 | raw_spin_unlock(&cfs_b->lock); |
| 2113 | |
| 2114 | return idle; |
| 2115 | } |
Paul Turner | d3d9dc3 | 2011-07-21 09:43:39 -0700 | [diff] [blame] | 2116 | |
Paul Turner | d8b4986 | 2011-07-21 09:43:41 -0700 | [diff] [blame] | 2117 | /* a cfs_rq won't donate quota below this amount */ |
| 2118 | static const u64 min_cfs_rq_runtime = 1 * NSEC_PER_MSEC; |
| 2119 | /* minimum remaining period time to redistribute slack quota */ |
| 2120 | static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC; |
| 2121 | /* how long we wait to gather additional slack before distributing */ |
| 2122 | static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC; |
| 2123 | |
| 2124 | /* are we near the end of the current quota period? */ |
| 2125 | static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire) |
| 2126 | { |
| 2127 | struct hrtimer *refresh_timer = &cfs_b->period_timer; |
| 2128 | u64 remaining; |
| 2129 | |
| 2130 | /* if the call-back is running a quota refresh is already occurring */ |
| 2131 | if (hrtimer_callback_running(refresh_timer)) |
| 2132 | return 1; |
| 2133 | |
| 2134 | /* is a quota refresh about to occur? */ |
| 2135 | remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer)); |
| 2136 | if (remaining < min_expire) |
| 2137 | return 1; |
| 2138 | |
| 2139 | return 0; |
| 2140 | } |
| 2141 | |
| 2142 | static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b) |
| 2143 | { |
| 2144 | u64 min_left = cfs_bandwidth_slack_period + min_bandwidth_expiration; |
| 2145 | |
| 2146 | /* if there's a quota refresh soon don't bother with slack */ |
| 2147 | if (runtime_refresh_within(cfs_b, min_left)) |
| 2148 | return; |
| 2149 | |
| 2150 | start_bandwidth_timer(&cfs_b->slack_timer, |
| 2151 | ns_to_ktime(cfs_bandwidth_slack_period)); |
| 2152 | } |
| 2153 | |
| 2154 | /* we know any runtime found here is valid as update_curr() precedes return */ |
| 2155 | static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq) |
| 2156 | { |
| 2157 | struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); |
| 2158 | s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime; |
| 2159 | |
| 2160 | if (slack_runtime <= 0) |
| 2161 | return; |
| 2162 | |
| 2163 | raw_spin_lock(&cfs_b->lock); |
| 2164 | if (cfs_b->quota != RUNTIME_INF && |
| 2165 | cfs_rq->runtime_expires == cfs_b->runtime_expires) { |
| 2166 | cfs_b->runtime += slack_runtime; |
| 2167 | |
| 2168 | /* we are under rq->lock, defer unthrottling using a timer */ |
| 2169 | if (cfs_b->runtime > sched_cfs_bandwidth_slice() && |
| 2170 | !list_empty(&cfs_b->throttled_cfs_rq)) |
| 2171 | start_cfs_slack_bandwidth(cfs_b); |
| 2172 | } |
| 2173 | raw_spin_unlock(&cfs_b->lock); |
| 2174 | |
| 2175 | /* even if it's not valid for return we don't want to try again */ |
| 2176 | cfs_rq->runtime_remaining -= slack_runtime; |
| 2177 | } |
| 2178 | |
| 2179 | static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) |
| 2180 | { |
Paul Turner | 56f570e | 2011-11-07 20:26:33 -0800 | [diff] [blame] | 2181 | if (!cfs_bandwidth_used()) |
| 2182 | return; |
| 2183 | |
Paul Turner | fccfdc6 | 2011-11-07 20:26:34 -0800 | [diff] [blame] | 2184 | if (!cfs_rq->runtime_enabled || cfs_rq->nr_running) |
Paul Turner | d8b4986 | 2011-07-21 09:43:41 -0700 | [diff] [blame] | 2185 | return; |
| 2186 | |
| 2187 | __return_cfs_rq_runtime(cfs_rq); |
| 2188 | } |
| 2189 | |
| 2190 | /* |
| 2191 | * This is done with a timer (instead of inline with bandwidth return) since |
| 2192 | * it's necessary to juggle rq->locks to unthrottle their respective cfs_rqs. |
| 2193 | */ |
| 2194 | static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b) |
| 2195 | { |
| 2196 | u64 runtime = 0, slice = sched_cfs_bandwidth_slice(); |
| 2197 | u64 expires; |
| 2198 | |
| 2199 | /* confirm we're still not at a refresh boundary */ |
| 2200 | if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) |
| 2201 | return; |
| 2202 | |
| 2203 | raw_spin_lock(&cfs_b->lock); |
| 2204 | if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice) { |
| 2205 | runtime = cfs_b->runtime; |
| 2206 | cfs_b->runtime = 0; |
| 2207 | } |
| 2208 | expires = cfs_b->runtime_expires; |
| 2209 | raw_spin_unlock(&cfs_b->lock); |
| 2210 | |
| 2211 | if (!runtime) |
| 2212 | return; |
| 2213 | |
| 2214 | runtime = distribute_cfs_runtime(cfs_b, runtime, expires); |
| 2215 | |
| 2216 | raw_spin_lock(&cfs_b->lock); |
| 2217 | if (expires == cfs_b->runtime_expires) |
| 2218 | cfs_b->runtime = runtime; |
| 2219 | raw_spin_unlock(&cfs_b->lock); |
| 2220 | } |
| 2221 | |
Paul Turner | d3d9dc3 | 2011-07-21 09:43:39 -0700 | [diff] [blame] | 2222 | /* |
| 2223 | * When a group wakes up we want to make sure that its quota is not already |
| 2224 | * expired/exceeded, otherwise it may be allowed to steal additional ticks of |
| 2225 | * runtime as update_curr() throttling can not not trigger until it's on-rq. |
| 2226 | */ |
| 2227 | static void check_enqueue_throttle(struct cfs_rq *cfs_rq) |
| 2228 | { |
Paul Turner | 56f570e | 2011-11-07 20:26:33 -0800 | [diff] [blame] | 2229 | if (!cfs_bandwidth_used()) |
| 2230 | return; |
| 2231 | |
Paul Turner | d3d9dc3 | 2011-07-21 09:43:39 -0700 | [diff] [blame] | 2232 | /* an active group must be handled by the update_curr()->put() path */ |
| 2233 | if (!cfs_rq->runtime_enabled || cfs_rq->curr) |
| 2234 | return; |
| 2235 | |
| 2236 | /* ensure the group is not already throttled */ |
| 2237 | if (cfs_rq_throttled(cfs_rq)) |
| 2238 | return; |
| 2239 | |
| 2240 | /* update runtime allocation */ |
| 2241 | account_cfs_rq_runtime(cfs_rq, 0); |
| 2242 | if (cfs_rq->runtime_remaining <= 0) |
| 2243 | throttle_cfs_rq(cfs_rq); |
| 2244 | } |
| 2245 | |
| 2246 | /* conditionally throttle active cfs_rq's from put_prev_entity() */ |
| 2247 | static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) |
| 2248 | { |
Paul Turner | 56f570e | 2011-11-07 20:26:33 -0800 | [diff] [blame] | 2249 | if (!cfs_bandwidth_used()) |
| 2250 | return; |
| 2251 | |
Paul Turner | d3d9dc3 | 2011-07-21 09:43:39 -0700 | [diff] [blame] | 2252 | if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0)) |
| 2253 | return; |
| 2254 | |
| 2255 | /* |
| 2256 | * it's possible for a throttled entity to be forced into a running |
| 2257 | * state (e.g. set_curr_task), in this case we're finished. |
| 2258 | */ |
| 2259 | if (cfs_rq_throttled(cfs_rq)) |
| 2260 | return; |
| 2261 | |
| 2262 | throttle_cfs_rq(cfs_rq); |
| 2263 | } |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 2264 | |
| 2265 | static inline u64 default_cfs_period(void); |
| 2266 | static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun); |
| 2267 | static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b); |
| 2268 | |
| 2269 | static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer) |
| 2270 | { |
| 2271 | struct cfs_bandwidth *cfs_b = |
| 2272 | container_of(timer, struct cfs_bandwidth, slack_timer); |
| 2273 | do_sched_cfs_slack_timer(cfs_b); |
| 2274 | |
| 2275 | return HRTIMER_NORESTART; |
| 2276 | } |
| 2277 | |
| 2278 | static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer) |
| 2279 | { |
| 2280 | struct cfs_bandwidth *cfs_b = |
| 2281 | container_of(timer, struct cfs_bandwidth, period_timer); |
| 2282 | ktime_t now; |
| 2283 | int overrun; |
| 2284 | int idle = 0; |
| 2285 | |
| 2286 | for (;;) { |
| 2287 | now = hrtimer_cb_get_time(timer); |
| 2288 | overrun = hrtimer_forward(timer, now, cfs_b->period); |
| 2289 | |
| 2290 | if (!overrun) |
| 2291 | break; |
| 2292 | |
| 2293 | idle = do_sched_cfs_period_timer(cfs_b, overrun); |
| 2294 | } |
| 2295 | |
| 2296 | return idle ? HRTIMER_NORESTART : HRTIMER_RESTART; |
| 2297 | } |
| 2298 | |
| 2299 | void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) |
| 2300 | { |
| 2301 | raw_spin_lock_init(&cfs_b->lock); |
| 2302 | cfs_b->runtime = 0; |
| 2303 | cfs_b->quota = RUNTIME_INF; |
| 2304 | cfs_b->period = ns_to_ktime(default_cfs_period()); |
| 2305 | |
| 2306 | INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq); |
| 2307 | hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
| 2308 | cfs_b->period_timer.function = sched_cfs_period_timer; |
| 2309 | hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
| 2310 | cfs_b->slack_timer.function = sched_cfs_slack_timer; |
| 2311 | } |
| 2312 | |
| 2313 | static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) |
| 2314 | { |
| 2315 | cfs_rq->runtime_enabled = 0; |
| 2316 | INIT_LIST_HEAD(&cfs_rq->throttled_list); |
| 2317 | } |
| 2318 | |
| 2319 | /* requires cfs_b->lock, may release to reprogram timer */ |
| 2320 | void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b) |
| 2321 | { |
| 2322 | /* |
| 2323 | * The timer may be active because we're trying to set a new bandwidth |
| 2324 | * period or because we're racing with the tear-down path |
| 2325 | * (timer_active==0 becomes visible before the hrtimer call-back |
| 2326 | * terminates). In either case we ensure that it's re-programmed |
| 2327 | */ |
| 2328 | while (unlikely(hrtimer_active(&cfs_b->period_timer))) { |
| 2329 | raw_spin_unlock(&cfs_b->lock); |
| 2330 | /* ensure cfs_b->lock is available while we wait */ |
| 2331 | hrtimer_cancel(&cfs_b->period_timer); |
| 2332 | |
| 2333 | raw_spin_lock(&cfs_b->lock); |
| 2334 | /* if someone else restarted the timer then we're done */ |
| 2335 | if (cfs_b->timer_active) |
| 2336 | return; |
| 2337 | } |
| 2338 | |
| 2339 | cfs_b->timer_active = 1; |
| 2340 | start_bandwidth_timer(&cfs_b->period_timer, cfs_b->period); |
| 2341 | } |
| 2342 | |
| 2343 | static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) |
| 2344 | { |
| 2345 | hrtimer_cancel(&cfs_b->period_timer); |
| 2346 | hrtimer_cancel(&cfs_b->slack_timer); |
| 2347 | } |
| 2348 | |
Peter Boonstoppel | a4c96ae | 2012-08-09 15:34:47 -0700 | [diff] [blame] | 2349 | static void unthrottle_offline_cfs_rqs(struct rq *rq) |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 2350 | { |
| 2351 | struct cfs_rq *cfs_rq; |
| 2352 | |
| 2353 | for_each_leaf_cfs_rq(rq, cfs_rq) { |
| 2354 | struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); |
| 2355 | |
| 2356 | if (!cfs_rq->runtime_enabled) |
| 2357 | continue; |
| 2358 | |
| 2359 | /* |
| 2360 | * clock_task is not advancing so we just need to make sure |
| 2361 | * there's some valid quota amount |
| 2362 | */ |
| 2363 | cfs_rq->runtime_remaining = cfs_b->quota; |
| 2364 | if (cfs_rq_throttled(cfs_rq)) |
| 2365 | unthrottle_cfs_rq(cfs_rq); |
| 2366 | } |
| 2367 | } |
| 2368 | |
| 2369 | #else /* CONFIG_CFS_BANDWIDTH */ |
Peter Zijlstra | 6c16a6d | 2012-03-21 13:07:16 -0700 | [diff] [blame] | 2370 | static __always_inline |
| 2371 | void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec) {} |
Paul Turner | d3d9dc3 | 2011-07-21 09:43:39 -0700 | [diff] [blame] | 2372 | static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} |
| 2373 | static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {} |
Peter Zijlstra | 6c16a6d | 2012-03-21 13:07:16 -0700 | [diff] [blame] | 2374 | static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} |
Paul Turner | 85dac90 | 2011-07-21 09:43:33 -0700 | [diff] [blame] | 2375 | |
| 2376 | static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq) |
| 2377 | { |
| 2378 | return 0; |
| 2379 | } |
Paul Turner | 64660c8 | 2011-07-21 09:43:36 -0700 | [diff] [blame] | 2380 | |
| 2381 | static inline int throttled_hierarchy(struct cfs_rq *cfs_rq) |
| 2382 | { |
| 2383 | return 0; |
| 2384 | } |
| 2385 | |
| 2386 | static inline int throttled_lb_pair(struct task_group *tg, |
| 2387 | int src_cpu, int dest_cpu) |
| 2388 | { |
| 2389 | return 0; |
| 2390 | } |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 2391 | |
| 2392 | void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {} |
| 2393 | |
| 2394 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 2395 | static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} |
Paul Turner | ab84d31 | 2011-07-21 09:43:28 -0700 | [diff] [blame] | 2396 | #endif |
| 2397 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 2398 | static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg) |
| 2399 | { |
| 2400 | return NULL; |
| 2401 | } |
| 2402 | static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {} |
Peter Boonstoppel | a4c96ae | 2012-08-09 15:34:47 -0700 | [diff] [blame] | 2403 | static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {} |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 2404 | |
| 2405 | #endif /* CONFIG_CFS_BANDWIDTH */ |
| 2406 | |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 2407 | /************************************************** |
| 2408 | * CFS operations on tasks: |
| 2409 | */ |
| 2410 | |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 2411 | #ifdef CONFIG_SCHED_HRTICK |
| 2412 | static void hrtick_start_fair(struct rq *rq, struct task_struct *p) |
| 2413 | { |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 2414 | struct sched_entity *se = &p->se; |
| 2415 | struct cfs_rq *cfs_rq = cfs_rq_of(se); |
| 2416 | |
| 2417 | WARN_ON(task_rq(p) != rq); |
| 2418 | |
Mike Galbraith | b39e66e | 2011-11-22 15:20:07 +0100 | [diff] [blame] | 2419 | if (cfs_rq->nr_running > 1) { |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 2420 | u64 slice = sched_slice(cfs_rq, se); |
| 2421 | u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime; |
| 2422 | s64 delta = slice - ran; |
| 2423 | |
| 2424 | if (delta < 0) { |
| 2425 | if (rq->curr == p) |
| 2426 | resched_task(p); |
| 2427 | return; |
| 2428 | } |
| 2429 | |
| 2430 | /* |
| 2431 | * Don't schedule slices shorter than 10000ns, that just |
| 2432 | * doesn't make sense. Rely on vruntime for fairness. |
| 2433 | */ |
Peter Zijlstra | 3165651 | 2008-07-18 18:01:23 +0200 | [diff] [blame] | 2434 | if (rq->curr != p) |
Peter Zijlstra | 157124c | 2008-07-28 11:53:11 +0200 | [diff] [blame] | 2435 | delta = max_t(s64, 10000LL, delta); |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 2436 | |
Peter Zijlstra | 3165651 | 2008-07-18 18:01:23 +0200 | [diff] [blame] | 2437 | hrtick_start(rq, delta); |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 2438 | } |
| 2439 | } |
Peter Zijlstra | a4c2f00 | 2008-10-17 19:27:03 +0200 | [diff] [blame] | 2440 | |
| 2441 | /* |
| 2442 | * called from enqueue/dequeue and updates the hrtick when the |
| 2443 | * current task is from our class and nr_running is low enough |
| 2444 | * to matter. |
| 2445 | */ |
| 2446 | static void hrtick_update(struct rq *rq) |
| 2447 | { |
| 2448 | struct task_struct *curr = rq->curr; |
| 2449 | |
Mike Galbraith | b39e66e | 2011-11-22 15:20:07 +0100 | [diff] [blame] | 2450 | if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class) |
Peter Zijlstra | a4c2f00 | 2008-10-17 19:27:03 +0200 | [diff] [blame] | 2451 | return; |
| 2452 | |
| 2453 | if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency) |
| 2454 | hrtick_start_fair(rq, curr); |
| 2455 | } |
Dhaval Giani | 55e12e5 | 2008-06-24 23:39:43 +0530 | [diff] [blame] | 2456 | #else /* !CONFIG_SCHED_HRTICK */ |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 2457 | static inline void |
| 2458 | hrtick_start_fair(struct rq *rq, struct task_struct *p) |
| 2459 | { |
| 2460 | } |
Peter Zijlstra | a4c2f00 | 2008-10-17 19:27:03 +0200 | [diff] [blame] | 2461 | |
| 2462 | static inline void hrtick_update(struct rq *rq) |
| 2463 | { |
| 2464 | } |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 2465 | #endif |
| 2466 | |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 2467 | /* |
| 2468 | * The enqueue_task method is called before nr_running is |
| 2469 | * increased. Here we update the fair scheduling stats and |
| 2470 | * then put the task into the rbtree: |
| 2471 | */ |
Thomas Gleixner | ea87bb7 | 2010-01-20 20:58:57 +0000 | [diff] [blame] | 2472 | static void |
Peter Zijlstra | 371fd7e | 2010-03-24 16:38:48 +0100 | [diff] [blame] | 2473 | enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 2474 | { |
| 2475 | struct cfs_rq *cfs_rq; |
Peter Zijlstra | 62fb185 | 2008-02-25 17:34:02 +0100 | [diff] [blame] | 2476 | struct sched_entity *se = &p->se; |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 2477 | |
| 2478 | for_each_sched_entity(se) { |
Peter Zijlstra | 62fb185 | 2008-02-25 17:34:02 +0100 | [diff] [blame] | 2479 | if (se->on_rq) |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 2480 | break; |
| 2481 | cfs_rq = cfs_rq_of(se); |
Peter Zijlstra | 88ec22d | 2009-12-16 18:04:41 +0100 | [diff] [blame] | 2482 | enqueue_entity(cfs_rq, se, flags); |
Paul Turner | 85dac90 | 2011-07-21 09:43:33 -0700 | [diff] [blame] | 2483 | |
| 2484 | /* |
| 2485 | * end evaluation on encountering a throttled cfs_rq |
| 2486 | * |
| 2487 | * note: in the case of encountering a throttled cfs_rq we will |
| 2488 | * post the final h_nr_running increment below. |
| 2489 | */ |
| 2490 | if (cfs_rq_throttled(cfs_rq)) |
| 2491 | break; |
Paul Turner | 953bfcd | 2011-07-21 09:43:27 -0700 | [diff] [blame] | 2492 | cfs_rq->h_nr_running++; |
Paul Turner | 85dac90 | 2011-07-21 09:43:33 -0700 | [diff] [blame] | 2493 | |
Peter Zijlstra | 88ec22d | 2009-12-16 18:04:41 +0100 | [diff] [blame] | 2494 | flags = ENQUEUE_WAKEUP; |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 2495 | } |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 2496 | |
Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 2497 | for_each_sched_entity(se) { |
Lin Ming | 0f31714 | 2011-07-22 09:14:31 +0800 | [diff] [blame] | 2498 | cfs_rq = cfs_rq_of(se); |
Paul Turner | 953bfcd | 2011-07-21 09:43:27 -0700 | [diff] [blame] | 2499 | cfs_rq->h_nr_running++; |
Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 2500 | |
Paul Turner | 85dac90 | 2011-07-21 09:43:33 -0700 | [diff] [blame] | 2501 | if (cfs_rq_throttled(cfs_rq)) |
| 2502 | break; |
| 2503 | |
Paul Turner | d6b5591 | 2010-11-15 15:47:09 -0800 | [diff] [blame] | 2504 | update_cfs_load(cfs_rq, 0); |
Paul Turner | 6d5ab29 | 2011-01-21 20:45:01 -0800 | [diff] [blame] | 2505 | update_cfs_shares(cfs_rq); |
Paul Turner | 9ee474f | 2012-10-04 13:18:30 +0200 | [diff] [blame] | 2506 | update_entity_load_avg(se, 1); |
Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 2507 | } |
| 2508 | |
Ben Segall | 18bf280 | 2012-10-04 12:51:20 +0200 | [diff] [blame] | 2509 | if (!se) { |
| 2510 | update_rq_runnable_avg(rq, rq->nr_running); |
Paul Turner | 85dac90 | 2011-07-21 09:43:33 -0700 | [diff] [blame] | 2511 | inc_nr_running(rq); |
Ben Segall | 18bf280 | 2012-10-04 12:51:20 +0200 | [diff] [blame] | 2512 | } |
Peter Zijlstra | a4c2f00 | 2008-10-17 19:27:03 +0200 | [diff] [blame] | 2513 | hrtick_update(rq); |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 2514 | } |
| 2515 | |
Venkatesh Pallipadi | 2f36825 | 2011-04-14 10:30:53 -0700 | [diff] [blame] | 2516 | static void set_next_buddy(struct sched_entity *se); |
| 2517 | |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 2518 | /* |
| 2519 | * The dequeue_task method is called before nr_running is |
| 2520 | * decreased. We remove the task from the rbtree and |
| 2521 | * update the fair scheduling stats: |
| 2522 | */ |
Peter Zijlstra | 371fd7e | 2010-03-24 16:38:48 +0100 | [diff] [blame] | 2523 | static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 2524 | { |
| 2525 | struct cfs_rq *cfs_rq; |
Peter Zijlstra | 62fb185 | 2008-02-25 17:34:02 +0100 | [diff] [blame] | 2526 | struct sched_entity *se = &p->se; |
Venkatesh Pallipadi | 2f36825 | 2011-04-14 10:30:53 -0700 | [diff] [blame] | 2527 | int task_sleep = flags & DEQUEUE_SLEEP; |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 2528 | |
| 2529 | for_each_sched_entity(se) { |
| 2530 | cfs_rq = cfs_rq_of(se); |
Peter Zijlstra | 371fd7e | 2010-03-24 16:38:48 +0100 | [diff] [blame] | 2531 | dequeue_entity(cfs_rq, se, flags); |
Paul Turner | 85dac90 | 2011-07-21 09:43:33 -0700 | [diff] [blame] | 2532 | |
| 2533 | /* |
| 2534 | * end evaluation on encountering a throttled cfs_rq |
| 2535 | * |
| 2536 | * note: in the case of encountering a throttled cfs_rq we will |
| 2537 | * post the final h_nr_running decrement below. |
| 2538 | */ |
| 2539 | if (cfs_rq_throttled(cfs_rq)) |
| 2540 | break; |
Paul Turner | 953bfcd | 2011-07-21 09:43:27 -0700 | [diff] [blame] | 2541 | cfs_rq->h_nr_running--; |
Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 2542 | |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 2543 | /* Don't dequeue parent if it has other entities besides us */ |
Venkatesh Pallipadi | 2f36825 | 2011-04-14 10:30:53 -0700 | [diff] [blame] | 2544 | if (cfs_rq->load.weight) { |
| 2545 | /* |
| 2546 | * Bias pick_next to pick a task from this cfs_rq, as |
| 2547 | * p is sleeping when it is within its sched_slice. |
| 2548 | */ |
| 2549 | if (task_sleep && parent_entity(se)) |
| 2550 | set_next_buddy(parent_entity(se)); |
Paul Turner | 9598c82 | 2011-07-06 22:30:37 -0700 | [diff] [blame] | 2551 | |
| 2552 | /* avoid re-evaluating load for this entity */ |
| 2553 | se = parent_entity(se); |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 2554 | break; |
Venkatesh Pallipadi | 2f36825 | 2011-04-14 10:30:53 -0700 | [diff] [blame] | 2555 | } |
Peter Zijlstra | 371fd7e | 2010-03-24 16:38:48 +0100 | [diff] [blame] | 2556 | flags |= DEQUEUE_SLEEP; |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 2557 | } |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 2558 | |
Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 2559 | for_each_sched_entity(se) { |
Lin Ming | 0f31714 | 2011-07-22 09:14:31 +0800 | [diff] [blame] | 2560 | cfs_rq = cfs_rq_of(se); |
Paul Turner | 953bfcd | 2011-07-21 09:43:27 -0700 | [diff] [blame] | 2561 | cfs_rq->h_nr_running--; |
Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 2562 | |
Paul Turner | 85dac90 | 2011-07-21 09:43:33 -0700 | [diff] [blame] | 2563 | if (cfs_rq_throttled(cfs_rq)) |
| 2564 | break; |
| 2565 | |
Paul Turner | d6b5591 | 2010-11-15 15:47:09 -0800 | [diff] [blame] | 2566 | update_cfs_load(cfs_rq, 0); |
Paul Turner | 6d5ab29 | 2011-01-21 20:45:01 -0800 | [diff] [blame] | 2567 | update_cfs_shares(cfs_rq); |
Paul Turner | 9ee474f | 2012-10-04 13:18:30 +0200 | [diff] [blame] | 2568 | update_entity_load_avg(se, 1); |
Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 2569 | } |
| 2570 | |
Ben Segall | 18bf280 | 2012-10-04 12:51:20 +0200 | [diff] [blame] | 2571 | if (!se) { |
Paul Turner | 85dac90 | 2011-07-21 09:43:33 -0700 | [diff] [blame] | 2572 | dec_nr_running(rq); |
Ben Segall | 18bf280 | 2012-10-04 12:51:20 +0200 | [diff] [blame] | 2573 | update_rq_runnable_avg(rq, 1); |
| 2574 | } |
Peter Zijlstra | a4c2f00 | 2008-10-17 19:27:03 +0200 | [diff] [blame] | 2575 | hrtick_update(rq); |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 2576 | } |
| 2577 | |
Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 2578 | #ifdef CONFIG_SMP |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 2579 | /* Used instead of source_load when we know the type == 0 */ |
| 2580 | static unsigned long weighted_cpuload(const int cpu) |
| 2581 | { |
| 2582 | return cpu_rq(cpu)->load.weight; |
| 2583 | } |
| 2584 | |
| 2585 | /* |
| 2586 | * Return a low guess at the load of a migration-source cpu weighted |
| 2587 | * according to the scheduling class and "nice" value. |
| 2588 | * |
| 2589 | * We want to under-estimate the load of migration sources, to |
| 2590 | * balance conservatively. |
| 2591 | */ |
| 2592 | static unsigned long source_load(int cpu, int type) |
| 2593 | { |
| 2594 | struct rq *rq = cpu_rq(cpu); |
| 2595 | unsigned long total = weighted_cpuload(cpu); |
| 2596 | |
| 2597 | if (type == 0 || !sched_feat(LB_BIAS)) |
| 2598 | return total; |
| 2599 | |
| 2600 | return min(rq->cpu_load[type-1], total); |
| 2601 | } |
| 2602 | |
| 2603 | /* |
| 2604 | * Return a high guess at the load of a migration-target cpu weighted |
| 2605 | * according to the scheduling class and "nice" value. |
| 2606 | */ |
| 2607 | static unsigned long target_load(int cpu, int type) |
| 2608 | { |
| 2609 | struct rq *rq = cpu_rq(cpu); |
| 2610 | unsigned long total = weighted_cpuload(cpu); |
| 2611 | |
| 2612 | if (type == 0 || !sched_feat(LB_BIAS)) |
| 2613 | return total; |
| 2614 | |
| 2615 | return max(rq->cpu_load[type-1], total); |
| 2616 | } |
| 2617 | |
| 2618 | static unsigned long power_of(int cpu) |
| 2619 | { |
| 2620 | return cpu_rq(cpu)->cpu_power; |
| 2621 | } |
| 2622 | |
| 2623 | static unsigned long cpu_avg_load_per_task(int cpu) |
| 2624 | { |
| 2625 | struct rq *rq = cpu_rq(cpu); |
| 2626 | unsigned long nr_running = ACCESS_ONCE(rq->nr_running); |
| 2627 | |
| 2628 | if (nr_running) |
| 2629 | return rq->load.weight / nr_running; |
| 2630 | |
| 2631 | return 0; |
| 2632 | } |
| 2633 | |
Ingo Molnar | 098fb9d | 2008-03-16 20:36:10 +0100 | [diff] [blame] | 2634 | |
Peter Zijlstra | 74f8e4b | 2011-04-05 17:23:47 +0200 | [diff] [blame] | 2635 | static void task_waking_fair(struct task_struct *p) |
Peter Zijlstra | 88ec22d | 2009-12-16 18:04:41 +0100 | [diff] [blame] | 2636 | { |
| 2637 | struct sched_entity *se = &p->se; |
| 2638 | struct cfs_rq *cfs_rq = cfs_rq_of(se); |
Peter Zijlstra | 3fe1698 | 2011-04-05 17:23:48 +0200 | [diff] [blame] | 2639 | u64 min_vruntime; |
Peter Zijlstra | 88ec22d | 2009-12-16 18:04:41 +0100 | [diff] [blame] | 2640 | |
Peter Zijlstra | 3fe1698 | 2011-04-05 17:23:48 +0200 | [diff] [blame] | 2641 | #ifndef CONFIG_64BIT |
| 2642 | u64 min_vruntime_copy; |
Peter Zijlstra | 74f8e4b | 2011-04-05 17:23:47 +0200 | [diff] [blame] | 2643 | |
Peter Zijlstra | 3fe1698 | 2011-04-05 17:23:48 +0200 | [diff] [blame] | 2644 | do { |
| 2645 | min_vruntime_copy = cfs_rq->min_vruntime_copy; |
| 2646 | smp_rmb(); |
| 2647 | min_vruntime = cfs_rq->min_vruntime; |
| 2648 | } while (min_vruntime != min_vruntime_copy); |
| 2649 | #else |
| 2650 | min_vruntime = cfs_rq->min_vruntime; |
| 2651 | #endif |
| 2652 | |
| 2653 | se->vruntime -= min_vruntime; |
Peter Zijlstra | 88ec22d | 2009-12-16 18:04:41 +0100 | [diff] [blame] | 2654 | } |
| 2655 | |
Peter Zijlstra | bb3469a | 2008-06-27 13:41:27 +0200 | [diff] [blame] | 2656 | #ifdef CONFIG_FAIR_GROUP_SCHED |
Peter Zijlstra | f5bfb7d | 2008-06-27 13:41:39 +0200 | [diff] [blame] | 2657 | /* |
| 2658 | * effective_load() calculates the load change as seen from the root_task_group |
| 2659 | * |
| 2660 | * Adding load to a group doesn't make a group heavier, but can cause movement |
| 2661 | * of group shares between cpus. Assuming the shares were perfectly aligned one |
| 2662 | * can calculate the shift in shares. |
Peter Zijlstra | cf5f0ac | 2011-10-13 16:52:28 +0200 | [diff] [blame] | 2663 | * |
| 2664 | * Calculate the effective load difference if @wl is added (subtracted) to @tg |
| 2665 | * on this @cpu and results in a total addition (subtraction) of @wg to the |
| 2666 | * total group weight. |
| 2667 | * |
| 2668 | * Given a runqueue weight distribution (rw_i) we can compute a shares |
| 2669 | * distribution (s_i) using: |
| 2670 | * |
| 2671 | * s_i = rw_i / \Sum rw_j (1) |
| 2672 | * |
| 2673 | * Suppose we have 4 CPUs and our @tg is a direct child of the root group and |
| 2674 | * has 7 equal weight tasks, distributed as below (rw_i), with the resulting |
| 2675 | * shares distribution (s_i): |
| 2676 | * |
| 2677 | * rw_i = { 2, 4, 1, 0 } |
| 2678 | * s_i = { 2/7, 4/7, 1/7, 0 } |
| 2679 | * |
| 2680 | * As per wake_affine() we're interested in the load of two CPUs (the CPU the |
| 2681 | * task used to run on and the CPU the waker is running on), we need to |
| 2682 | * compute the effect of waking a task on either CPU and, in case of a sync |
| 2683 | * wakeup, compute the effect of the current task going to sleep. |
| 2684 | * |
| 2685 | * So for a change of @wl to the local @cpu with an overall group weight change |
| 2686 | * of @wl we can compute the new shares distribution (s'_i) using: |
| 2687 | * |
| 2688 | * s'_i = (rw_i + @wl) / (@wg + \Sum rw_j) (2) |
| 2689 | * |
| 2690 | * Suppose we're interested in CPUs 0 and 1, and want to compute the load |
| 2691 | * differences in waking a task to CPU 0. The additional task changes the |
| 2692 | * weight and shares distributions like: |
| 2693 | * |
| 2694 | * rw'_i = { 3, 4, 1, 0 } |
| 2695 | * s'_i = { 3/8, 4/8, 1/8, 0 } |
| 2696 | * |
| 2697 | * We can then compute the difference in effective weight by using: |
| 2698 | * |
| 2699 | * dw_i = S * (s'_i - s_i) (3) |
| 2700 | * |
| 2701 | * Where 'S' is the group weight as seen by its parent. |
| 2702 | * |
| 2703 | * Therefore the effective change in loads on CPU 0 would be 5/56 (3/8 - 2/7) |
| 2704 | * times the weight of the group. The effect on CPU 1 would be -4/56 (4/8 - |
| 2705 | * 4/7) times the weight of the group. |
Peter Zijlstra | f5bfb7d | 2008-06-27 13:41:39 +0200 | [diff] [blame] | 2706 | */ |
Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 2707 | static long effective_load(struct task_group *tg, int cpu, long wl, long wg) |
Peter Zijlstra | bb3469a | 2008-06-27 13:41:27 +0200 | [diff] [blame] | 2708 | { |
Peter Zijlstra | 4be9daa | 2008-06-27 13:41:30 +0200 | [diff] [blame] | 2709 | struct sched_entity *se = tg->se[cpu]; |
Peter Zijlstra | f1d239f | 2008-06-27 13:41:38 +0200 | [diff] [blame] | 2710 | |
Peter Zijlstra | cf5f0ac | 2011-10-13 16:52:28 +0200 | [diff] [blame] | 2711 | if (!tg->parent) /* the trivial, non-cgroup case */ |
Peter Zijlstra | f1d239f | 2008-06-27 13:41:38 +0200 | [diff] [blame] | 2712 | return wl; |
| 2713 | |
Peter Zijlstra | 4be9daa | 2008-06-27 13:41:30 +0200 | [diff] [blame] | 2714 | for_each_sched_entity(se) { |
Peter Zijlstra | cf5f0ac | 2011-10-13 16:52:28 +0200 | [diff] [blame] | 2715 | long w, W; |
Peter Zijlstra | bb3469a | 2008-06-27 13:41:27 +0200 | [diff] [blame] | 2716 | |
Paul Turner | 977dda7 | 2011-01-14 17:57:50 -0800 | [diff] [blame] | 2717 | tg = se->my_q->tg; |
Peter Zijlstra | 4be9daa | 2008-06-27 13:41:30 +0200 | [diff] [blame] | 2718 | |
Peter Zijlstra | cf5f0ac | 2011-10-13 16:52:28 +0200 | [diff] [blame] | 2719 | /* |
| 2720 | * W = @wg + \Sum rw_j |
| 2721 | */ |
| 2722 | W = wg + calc_tg_weight(tg, se->my_q); |
Peter Zijlstra | 4be9daa | 2008-06-27 13:41:30 +0200 | [diff] [blame] | 2723 | |
Peter Zijlstra | cf5f0ac | 2011-10-13 16:52:28 +0200 | [diff] [blame] | 2724 | /* |
| 2725 | * w = rw_i + @wl |
| 2726 | */ |
| 2727 | w = se->my_q->load.weight + wl; |
Peter Zijlstra | 940959e | 2008-09-23 15:33:42 +0200 | [diff] [blame] | 2728 | |
Peter Zijlstra | cf5f0ac | 2011-10-13 16:52:28 +0200 | [diff] [blame] | 2729 | /* |
| 2730 | * wl = S * s'_i; see (2) |
| 2731 | */ |
| 2732 | if (W > 0 && w < W) |
| 2733 | wl = (w * tg->shares) / W; |
Paul Turner | 977dda7 | 2011-01-14 17:57:50 -0800 | [diff] [blame] | 2734 | else |
| 2735 | wl = tg->shares; |
Peter Zijlstra | 940959e | 2008-09-23 15:33:42 +0200 | [diff] [blame] | 2736 | |
Peter Zijlstra | cf5f0ac | 2011-10-13 16:52:28 +0200 | [diff] [blame] | 2737 | /* |
| 2738 | * Per the above, wl is the new se->load.weight value; since |
| 2739 | * those are clipped to [MIN_SHARES, ...) do so now. See |
| 2740 | * calc_cfs_shares(). |
| 2741 | */ |
Paul Turner | 977dda7 | 2011-01-14 17:57:50 -0800 | [diff] [blame] | 2742 | if (wl < MIN_SHARES) |
| 2743 | wl = MIN_SHARES; |
Peter Zijlstra | cf5f0ac | 2011-10-13 16:52:28 +0200 | [diff] [blame] | 2744 | |
| 2745 | /* |
| 2746 | * wl = dw_i = S * (s'_i - s_i); see (3) |
| 2747 | */ |
Paul Turner | 977dda7 | 2011-01-14 17:57:50 -0800 | [diff] [blame] | 2748 | wl -= se->load.weight; |
Peter Zijlstra | cf5f0ac | 2011-10-13 16:52:28 +0200 | [diff] [blame] | 2749 | |
| 2750 | /* |
| 2751 | * Recursively apply this logic to all parent groups to compute |
| 2752 | * the final effective load change on the root group. Since |
| 2753 | * only the @tg group gets extra weight, all parent groups can |
| 2754 | * only redistribute existing shares. @wl is the shift in shares |
| 2755 | * resulting from this level per the above. |
| 2756 | */ |
Peter Zijlstra | 4be9daa | 2008-06-27 13:41:30 +0200 | [diff] [blame] | 2757 | wg = 0; |
Peter Zijlstra | 4be9daa | 2008-06-27 13:41:30 +0200 | [diff] [blame] | 2758 | } |
| 2759 | |
| 2760 | return wl; |
Peter Zijlstra | bb3469a | 2008-06-27 13:41:27 +0200 | [diff] [blame] | 2761 | } |
| 2762 | #else |
Peter Zijlstra | 4be9daa | 2008-06-27 13:41:30 +0200 | [diff] [blame] | 2763 | |
Peter Zijlstra | 8337826 | 2008-06-27 13:41:37 +0200 | [diff] [blame] | 2764 | static inline unsigned long effective_load(struct task_group *tg, int cpu, |
| 2765 | unsigned long wl, unsigned long wg) |
Peter Zijlstra | 4be9daa | 2008-06-27 13:41:30 +0200 | [diff] [blame] | 2766 | { |
Peter Zijlstra | 8337826 | 2008-06-27 13:41:37 +0200 | [diff] [blame] | 2767 | return wl; |
Peter Zijlstra | bb3469a | 2008-06-27 13:41:27 +0200 | [diff] [blame] | 2768 | } |
Peter Zijlstra | 4be9daa | 2008-06-27 13:41:30 +0200 | [diff] [blame] | 2769 | |
Peter Zijlstra | bb3469a | 2008-06-27 13:41:27 +0200 | [diff] [blame] | 2770 | #endif |
| 2771 | |
Peter Zijlstra | c88d591 | 2009-09-10 13:50:02 +0200 | [diff] [blame] | 2772 | static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync) |
Ingo Molnar | 098fb9d | 2008-03-16 20:36:10 +0100 | [diff] [blame] | 2773 | { |
Paul Turner | e37b6a7 | 2011-01-21 20:44:59 -0800 | [diff] [blame] | 2774 | s64 this_load, load; |
Peter Zijlstra | c88d591 | 2009-09-10 13:50:02 +0200 | [diff] [blame] | 2775 | int idx, this_cpu, prev_cpu; |
Ingo Molnar | 098fb9d | 2008-03-16 20:36:10 +0100 | [diff] [blame] | 2776 | unsigned long tl_per_task; |
Peter Zijlstra | c88d591 | 2009-09-10 13:50:02 +0200 | [diff] [blame] | 2777 | struct task_group *tg; |
Peter Zijlstra | 8337826 | 2008-06-27 13:41:37 +0200 | [diff] [blame] | 2778 | unsigned long weight; |
Mike Galbraith | b3137bc | 2008-05-29 11:11:41 +0200 | [diff] [blame] | 2779 | int balanced; |
Ingo Molnar | 098fb9d | 2008-03-16 20:36:10 +0100 | [diff] [blame] | 2780 | |
Peter Zijlstra | c88d591 | 2009-09-10 13:50:02 +0200 | [diff] [blame] | 2781 | idx = sd->wake_idx; |
| 2782 | this_cpu = smp_processor_id(); |
| 2783 | prev_cpu = task_cpu(p); |
| 2784 | load = source_load(prev_cpu, idx); |
| 2785 | this_load = target_load(this_cpu, idx); |
Ingo Molnar | 098fb9d | 2008-03-16 20:36:10 +0100 | [diff] [blame] | 2786 | |
| 2787 | /* |
Ingo Molnar | 098fb9d | 2008-03-16 20:36:10 +0100 | [diff] [blame] | 2788 | * If sync wakeup then subtract the (maximum possible) |
| 2789 | * effect of the currently running task from the load |
| 2790 | * of the current CPU: |
| 2791 | */ |
Peter Zijlstra | 8337826 | 2008-06-27 13:41:37 +0200 | [diff] [blame] | 2792 | if (sync) { |
| 2793 | tg = task_group(current); |
| 2794 | weight = current->se.load.weight; |
Ingo Molnar | 098fb9d | 2008-03-16 20:36:10 +0100 | [diff] [blame] | 2795 | |
Peter Zijlstra | c88d591 | 2009-09-10 13:50:02 +0200 | [diff] [blame] | 2796 | this_load += effective_load(tg, this_cpu, -weight, -weight); |
Peter Zijlstra | 8337826 | 2008-06-27 13:41:37 +0200 | [diff] [blame] | 2797 | load += effective_load(tg, prev_cpu, 0, -weight); |
| 2798 | } |
| 2799 | |
| 2800 | tg = task_group(p); |
| 2801 | weight = p->se.load.weight; |
| 2802 | |
Peter Zijlstra | 71a29aa | 2009-09-07 18:28:05 +0200 | [diff] [blame] | 2803 | /* |
| 2804 | * In low-load situations, where prev_cpu is idle and this_cpu is idle |
Peter Zijlstra | c88d591 | 2009-09-10 13:50:02 +0200 | [diff] [blame] | 2805 | * due to the sync cause above having dropped this_load to 0, we'll |
| 2806 | * always have an imbalance, but there's really nothing you can do |
| 2807 | * about that, so that's good too. |
Peter Zijlstra | 71a29aa | 2009-09-07 18:28:05 +0200 | [diff] [blame] | 2808 | * |
| 2809 | * Otherwise check if either cpus are near enough in load to allow this |
| 2810 | * task to be woken on this_cpu. |
| 2811 | */ |
Paul Turner | e37b6a7 | 2011-01-21 20:44:59 -0800 | [diff] [blame] | 2812 | if (this_load > 0) { |
| 2813 | s64 this_eff_load, prev_eff_load; |
Peter Zijlstra | e51fd5e | 2010-05-31 12:37:30 +0200 | [diff] [blame] | 2814 | |
| 2815 | this_eff_load = 100; |
| 2816 | this_eff_load *= power_of(prev_cpu); |
| 2817 | this_eff_load *= this_load + |
| 2818 | effective_load(tg, this_cpu, weight, weight); |
| 2819 | |
| 2820 | prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2; |
| 2821 | prev_eff_load *= power_of(this_cpu); |
| 2822 | prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight); |
| 2823 | |
| 2824 | balanced = this_eff_load <= prev_eff_load; |
| 2825 | } else |
| 2826 | balanced = true; |
Mike Galbraith | b3137bc | 2008-05-29 11:11:41 +0200 | [diff] [blame] | 2827 | |
| 2828 | /* |
| 2829 | * If the currently running task will sleep within |
| 2830 | * a reasonable amount of time then attract this newly |
| 2831 | * woken task: |
| 2832 | */ |
Peter Zijlstra | 2fb7635 | 2008-10-08 09:16:04 +0200 | [diff] [blame] | 2833 | if (sync && balanced) |
| 2834 | return 1; |
Mike Galbraith | b3137bc | 2008-05-29 11:11:41 +0200 | [diff] [blame] | 2835 | |
Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 2836 | schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts); |
Mike Galbraith | b3137bc | 2008-05-29 11:11:41 +0200 | [diff] [blame] | 2837 | tl_per_task = cpu_avg_load_per_task(this_cpu); |
| 2838 | |
Peter Zijlstra | c88d591 | 2009-09-10 13:50:02 +0200 | [diff] [blame] | 2839 | if (balanced || |
| 2840 | (this_load <= load && |
| 2841 | this_load + target_load(prev_cpu, idx) <= tl_per_task)) { |
Ingo Molnar | 098fb9d | 2008-03-16 20:36:10 +0100 | [diff] [blame] | 2842 | /* |
| 2843 | * This domain has SD_WAKE_AFFINE and |
| 2844 | * p is cache cold in this domain, and |
| 2845 | * there is no bad imbalance. |
| 2846 | */ |
Peter Zijlstra | c88d591 | 2009-09-10 13:50:02 +0200 | [diff] [blame] | 2847 | schedstat_inc(sd, ttwu_move_affine); |
Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 2848 | schedstat_inc(p, se.statistics.nr_wakeups_affine); |
Ingo Molnar | 098fb9d | 2008-03-16 20:36:10 +0100 | [diff] [blame] | 2849 | |
| 2850 | return 1; |
| 2851 | } |
| 2852 | return 0; |
| 2853 | } |
| 2854 | |
Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 2855 | /* |
| 2856 | * find_idlest_group finds and returns the least busy CPU group within the |
| 2857 | * domain. |
| 2858 | */ |
| 2859 | static struct sched_group * |
Peter Zijlstra | 78e7ed5 | 2009-09-03 13:16:51 +0200 | [diff] [blame] | 2860 | find_idlest_group(struct sched_domain *sd, struct task_struct *p, |
Peter Zijlstra | 5158f4e | 2009-09-16 13:46:59 +0200 | [diff] [blame] | 2861 | int this_cpu, int load_idx) |
Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 2862 | { |
Andi Kleen | b3bd3de | 2010-08-10 14:17:51 -0700 | [diff] [blame] | 2863 | struct sched_group *idlest = NULL, *group = sd->groups; |
Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 2864 | unsigned long min_load = ULONG_MAX, this_load = 0; |
Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 2865 | int imbalance = 100 + (sd->imbalance_pct-100)/2; |
Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 2866 | |
Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 2867 | do { |
| 2868 | unsigned long load, avg_load; |
| 2869 | int local_group; |
| 2870 | int i; |
Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 2871 | |
Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 2872 | /* Skip over this group if it has no CPUs allowed */ |
| 2873 | if (!cpumask_intersects(sched_group_cpus(group), |
Peter Zijlstra | fa17b50 | 2011-06-16 12:23:22 +0200 | [diff] [blame] | 2874 | tsk_cpus_allowed(p))) |
Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 2875 | continue; |
| 2876 | |
| 2877 | local_group = cpumask_test_cpu(this_cpu, |
| 2878 | sched_group_cpus(group)); |
| 2879 | |
| 2880 | /* Tally up the load of all CPUs in the group */ |
| 2881 | avg_load = 0; |
| 2882 | |
| 2883 | for_each_cpu(i, sched_group_cpus(group)) { |
| 2884 | /* Bias balancing toward cpus of our domain */ |
| 2885 | if (local_group) |
| 2886 | load = source_load(i, load_idx); |
| 2887 | else |
| 2888 | load = target_load(i, load_idx); |
| 2889 | |
| 2890 | avg_load += load; |
| 2891 | } |
| 2892 | |
| 2893 | /* Adjust by relative CPU power of the group */ |
Peter Zijlstra | 9c3f75c | 2011-07-14 13:00:06 +0200 | [diff] [blame] | 2894 | avg_load = (avg_load * SCHED_POWER_SCALE) / group->sgp->power; |
Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 2895 | |
| 2896 | if (local_group) { |
| 2897 | this_load = avg_load; |
Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 2898 | } else if (avg_load < min_load) { |
| 2899 | min_load = avg_load; |
| 2900 | idlest = group; |
| 2901 | } |
| 2902 | } while (group = group->next, group != sd->groups); |
| 2903 | |
| 2904 | if (!idlest || 100*this_load < imbalance*min_load) |
| 2905 | return NULL; |
| 2906 | return idlest; |
| 2907 | } |
| 2908 | |
| 2909 | /* |
| 2910 | * find_idlest_cpu - find the idlest cpu among the cpus in group. |
| 2911 | */ |
| 2912 | static int |
| 2913 | find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) |
| 2914 | { |
| 2915 | unsigned long load, min_load = ULONG_MAX; |
| 2916 | int idlest = -1; |
| 2917 | int i; |
| 2918 | |
| 2919 | /* Traverse only the allowed CPUs */ |
Peter Zijlstra | fa17b50 | 2011-06-16 12:23:22 +0200 | [diff] [blame] | 2920 | for_each_cpu_and(i, sched_group_cpus(group), tsk_cpus_allowed(p)) { |
Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 2921 | load = weighted_cpuload(i); |
| 2922 | |
| 2923 | if (load < min_load || (load == min_load && i == this_cpu)) { |
| 2924 | min_load = load; |
| 2925 | idlest = i; |
Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 2926 | } |
| 2927 | } |
| 2928 | |
Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 2929 | return idlest; |
| 2930 | } |
Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 2931 | |
Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 2932 | /* |
Peter Zijlstra | a50bde5 | 2009-11-12 15:55:28 +0100 | [diff] [blame] | 2933 | * Try and locate an idle CPU in the sched_domain. |
| 2934 | */ |
Suresh Siddha | 99bd5e2 | 2010-03-31 16:47:45 -0700 | [diff] [blame] | 2935 | static int select_idle_sibling(struct task_struct *p, int target) |
Peter Zijlstra | a50bde5 | 2009-11-12 15:55:28 +0100 | [diff] [blame] | 2936 | { |
| 2937 | int cpu = smp_processor_id(); |
| 2938 | int prev_cpu = task_cpu(p); |
Suresh Siddha | 99bd5e2 | 2010-03-31 16:47:45 -0700 | [diff] [blame] | 2939 | struct sched_domain *sd; |
Linus Torvalds | 37407ea | 2012-09-16 12:29:43 -0700 | [diff] [blame] | 2940 | struct sched_group *sg; |
| 2941 | int i; |
Peter Zijlstra | a50bde5 | 2009-11-12 15:55:28 +0100 | [diff] [blame] | 2942 | |
| 2943 | /* |
Suresh Siddha | 99bd5e2 | 2010-03-31 16:47:45 -0700 | [diff] [blame] | 2944 | * If the task is going to be woken-up on this cpu and if it is |
| 2945 | * already idle, then it is the right target. |
Peter Zijlstra | a50bde5 | 2009-11-12 15:55:28 +0100 | [diff] [blame] | 2946 | */ |
Suresh Siddha | 99bd5e2 | 2010-03-31 16:47:45 -0700 | [diff] [blame] | 2947 | if (target == cpu && idle_cpu(cpu)) |
| 2948 | return cpu; |
| 2949 | |
| 2950 | /* |
| 2951 | * If the task is going to be woken-up on the cpu where it previously |
| 2952 | * ran and if it is currently idle, then it the right target. |
| 2953 | */ |
| 2954 | if (target == prev_cpu && idle_cpu(prev_cpu)) |
Peter Zijlstra | fe3bcfe | 2009-11-12 15:55:29 +0100 | [diff] [blame] | 2955 | return prev_cpu; |
Peter Zijlstra | a50bde5 | 2009-11-12 15:55:28 +0100 | [diff] [blame] | 2956 | |
| 2957 | /* |
Linus Torvalds | 37407ea | 2012-09-16 12:29:43 -0700 | [diff] [blame] | 2958 | * Otherwise, iterate the domains and find an elegible idle cpu. |
Peter Zijlstra | a50bde5 | 2009-11-12 15:55:28 +0100 | [diff] [blame] | 2959 | */ |
Peter Zijlstra | 518cd62 | 2011-12-07 15:07:31 +0100 | [diff] [blame] | 2960 | sd = rcu_dereference(per_cpu(sd_llc, target)); |
Suresh Siddha | 77e8136 | 2011-11-17 11:08:23 -0800 | [diff] [blame] | 2961 | for_each_lower_domain(sd) { |
Linus Torvalds | 37407ea | 2012-09-16 12:29:43 -0700 | [diff] [blame] | 2962 | sg = sd->groups; |
| 2963 | do { |
| 2964 | if (!cpumask_intersects(sched_group_cpus(sg), |
| 2965 | tsk_cpus_allowed(p))) |
| 2966 | goto next; |
Mike Galbraith | 970e178 | 2012-06-12 05:18:32 +0200 | [diff] [blame] | 2967 | |
Linus Torvalds | 37407ea | 2012-09-16 12:29:43 -0700 | [diff] [blame] | 2968 | for_each_cpu(i, sched_group_cpus(sg)) { |
| 2969 | if (!idle_cpu(i)) |
| 2970 | goto next; |
| 2971 | } |
| 2972 | |
| 2973 | target = cpumask_first_and(sched_group_cpus(sg), |
| 2974 | tsk_cpus_allowed(p)); |
| 2975 | goto done; |
| 2976 | next: |
| 2977 | sg = sg->next; |
| 2978 | } while (sg != sd->groups); |
| 2979 | } |
| 2980 | done: |
Peter Zijlstra | a50bde5 | 2009-11-12 15:55:28 +0100 | [diff] [blame] | 2981 | return target; |
| 2982 | } |
| 2983 | |
| 2984 | /* |
Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 2985 | * sched_balance_self: balance the current task (running on cpu) in domains |
| 2986 | * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and |
| 2987 | * SD_BALANCE_EXEC. |
| 2988 | * |
| 2989 | * Balance, ie. select the least loaded group. |
| 2990 | * |
| 2991 | * Returns the target CPU number, or the same CPU if no balancing is needed. |
| 2992 | * |
| 2993 | * preempt must be disabled. |
| 2994 | */ |
Peter Zijlstra | 0017d73 | 2010-03-24 18:34:10 +0100 | [diff] [blame] | 2995 | static int |
Peter Zijlstra | 7608dec | 2011-04-05 17:23:46 +0200 | [diff] [blame] | 2996 | select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags) |
Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 2997 | { |
Peter Zijlstra | 29cd8ba | 2009-09-17 09:01:14 +0200 | [diff] [blame] | 2998 | struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL; |
Peter Zijlstra | c88d591 | 2009-09-10 13:50:02 +0200 | [diff] [blame] | 2999 | int cpu = smp_processor_id(); |
| 3000 | int prev_cpu = task_cpu(p); |
| 3001 | int new_cpu = cpu; |
Suresh Siddha | 99bd5e2 | 2010-03-31 16:47:45 -0700 | [diff] [blame] | 3002 | int want_affine = 0; |
Peter Zijlstra | 5158f4e | 2009-09-16 13:46:59 +0200 | [diff] [blame] | 3003 | int sync = wake_flags & WF_SYNC; |
Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 3004 | |
Peter Zijlstra | 29baa74 | 2012-04-23 12:11:21 +0200 | [diff] [blame] | 3005 | if (p->nr_cpus_allowed == 1) |
Mike Galbraith | 76854c7 | 2011-11-22 15:18:24 +0100 | [diff] [blame] | 3006 | return prev_cpu; |
| 3007 | |
Peter Zijlstra | 0763a66 | 2009-09-14 19:37:39 +0200 | [diff] [blame] | 3008 | if (sd_flag & SD_BALANCE_WAKE) { |
Peter Zijlstra | fa17b50 | 2011-06-16 12:23:22 +0200 | [diff] [blame] | 3009 | if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) |
Peter Zijlstra | c88d591 | 2009-09-10 13:50:02 +0200 | [diff] [blame] | 3010 | want_affine = 1; |
| 3011 | new_cpu = prev_cpu; |
| 3012 | } |
Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 3013 | |
Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 3014 | rcu_read_lock(); |
Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 3015 | for_each_domain(cpu, tmp) { |
Peter Zijlstra | e4f42888 | 2009-12-16 18:04:34 +0100 | [diff] [blame] | 3016 | if (!(tmp->flags & SD_LOAD_BALANCE)) |
| 3017 | continue; |
| 3018 | |
Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 3019 | /* |
Suresh Siddha | 99bd5e2 | 2010-03-31 16:47:45 -0700 | [diff] [blame] | 3020 | * If both cpu and prev_cpu are part of this domain, |
| 3021 | * cpu is a valid SD_WAKE_AFFINE target. |
Peter Zijlstra | fe3bcfe | 2009-11-12 15:55:29 +0100 | [diff] [blame] | 3022 | */ |
Suresh Siddha | 99bd5e2 | 2010-03-31 16:47:45 -0700 | [diff] [blame] | 3023 | if (want_affine && (tmp->flags & SD_WAKE_AFFINE) && |
| 3024 | cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) { |
| 3025 | affine_sd = tmp; |
Alex Shi | f03542a | 2012-07-26 08:55:34 +0800 | [diff] [blame] | 3026 | break; |
Peter Zijlstra | c88d591 | 2009-09-10 13:50:02 +0200 | [diff] [blame] | 3027 | } |
| 3028 | |
Alex Shi | f03542a | 2012-07-26 08:55:34 +0800 | [diff] [blame] | 3029 | if (tmp->flags & sd_flag) |
Peter Zijlstra | 29cd8ba | 2009-09-17 09:01:14 +0200 | [diff] [blame] | 3030 | sd = tmp; |
Peter Zijlstra | c88d591 | 2009-09-10 13:50:02 +0200 | [diff] [blame] | 3031 | } |
Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 3032 | |
Mike Galbraith | 8b911ac | 2010-03-11 17:17:16 +0100 | [diff] [blame] | 3033 | if (affine_sd) { |
Alex Shi | f03542a | 2012-07-26 08:55:34 +0800 | [diff] [blame] | 3034 | if (cpu != prev_cpu && wake_affine(affine_sd, p, sync)) |
Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 3035 | prev_cpu = cpu; |
| 3036 | |
| 3037 | new_cpu = select_idle_sibling(p, prev_cpu); |
| 3038 | goto unlock; |
Mike Galbraith | 8b911ac | 2010-03-11 17:17:16 +0100 | [diff] [blame] | 3039 | } |
Peter Zijlstra | 3b64089 | 2009-09-16 13:44:33 +0200 | [diff] [blame] | 3040 | |
Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 3041 | while (sd) { |
Peter Zijlstra | 5158f4e | 2009-09-16 13:46:59 +0200 | [diff] [blame] | 3042 | int load_idx = sd->forkexec_idx; |
Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 3043 | struct sched_group *group; |
Peter Zijlstra | c88d591 | 2009-09-10 13:50:02 +0200 | [diff] [blame] | 3044 | int weight; |
Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 3045 | |
Peter Zijlstra | 0763a66 | 2009-09-14 19:37:39 +0200 | [diff] [blame] | 3046 | if (!(sd->flags & sd_flag)) { |
Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 3047 | sd = sd->child; |
| 3048 | continue; |
| 3049 | } |
| 3050 | |
Peter Zijlstra | 5158f4e | 2009-09-16 13:46:59 +0200 | [diff] [blame] | 3051 | if (sd_flag & SD_BALANCE_WAKE) |
| 3052 | load_idx = sd->wake_idx; |
| 3053 | |
| 3054 | group = find_idlest_group(sd, p, cpu, load_idx); |
Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 3055 | if (!group) { |
| 3056 | sd = sd->child; |
| 3057 | continue; |
| 3058 | } |
| 3059 | |
Peter Zijlstra | d7c33c4 | 2009-09-11 12:45:38 +0200 | [diff] [blame] | 3060 | new_cpu = find_idlest_cpu(group, p, cpu); |
Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 3061 | if (new_cpu == -1 || new_cpu == cpu) { |
| 3062 | /* Now try balancing at a lower domain level of cpu */ |
| 3063 | sd = sd->child; |
| 3064 | continue; |
| 3065 | } |
| 3066 | |
| 3067 | /* Now try balancing at a lower domain level of new_cpu */ |
| 3068 | cpu = new_cpu; |
Peter Zijlstra | 669c55e | 2010-04-16 14:59:29 +0200 | [diff] [blame] | 3069 | weight = sd->span_weight; |
Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 3070 | sd = NULL; |
| 3071 | for_each_domain(cpu, tmp) { |
Peter Zijlstra | 669c55e | 2010-04-16 14:59:29 +0200 | [diff] [blame] | 3072 | if (weight <= tmp->span_weight) |
Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 3073 | break; |
Peter Zijlstra | 0763a66 | 2009-09-14 19:37:39 +0200 | [diff] [blame] | 3074 | if (tmp->flags & sd_flag) |
Peter Zijlstra | aaee120 | 2009-09-10 13:36:25 +0200 | [diff] [blame] | 3075 | sd = tmp; |
| 3076 | } |
| 3077 | /* while loop will break here if sd == NULL */ |
Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 3078 | } |
Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 3079 | unlock: |
| 3080 | rcu_read_unlock(); |
Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 3081 | |
Peter Zijlstra | c88d591 | 2009-09-10 13:50:02 +0200 | [diff] [blame] | 3082 | return new_cpu; |
Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 3083 | } |
Paul Turner | 0a74bef | 2012-10-04 13:18:30 +0200 | [diff] [blame] | 3084 | |
| 3085 | /* |
| 3086 | * Called immediately before a task is migrated to a new cpu; task_cpu(p) and |
| 3087 | * cfs_rq_of(p) references at time of call are still valid and identify the |
| 3088 | * previous cpu. However, the caller only guarantees p->pi_lock is held; no |
| 3089 | * other assumptions, including the state of rq->lock, should be made. |
| 3090 | */ |
| 3091 | static void |
| 3092 | migrate_task_rq_fair(struct task_struct *p, int next_cpu) |
| 3093 | { |
Paul Turner | aff3e49 | 2012-10-04 13:18:30 +0200 | [diff] [blame^] | 3094 | struct sched_entity *se = &p->se; |
| 3095 | struct cfs_rq *cfs_rq = cfs_rq_of(se); |
| 3096 | |
| 3097 | /* |
| 3098 | * Load tracking: accumulate removed load so that it can be processed |
| 3099 | * when we next update owning cfs_rq under rq->lock. Tasks contribute |
| 3100 | * to blocked load iff they have a positive decay-count. It can never |
| 3101 | * be negative here since on-rq tasks have decay-count == 0. |
| 3102 | */ |
| 3103 | if (se->avg.decay_count) { |
| 3104 | se->avg.decay_count = -__synchronize_entity_decay(se); |
| 3105 | atomic64_add(se->avg.load_avg_contrib, &cfs_rq->removed_load); |
| 3106 | } |
Paul Turner | 0a74bef | 2012-10-04 13:18:30 +0200 | [diff] [blame] | 3107 | } |
Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 3108 | #endif /* CONFIG_SMP */ |
| 3109 | |
Peter Zijlstra | e52fb7c | 2009-01-14 12:39:19 +0100 | [diff] [blame] | 3110 | static unsigned long |
| 3111 | wakeup_gran(struct sched_entity *curr, struct sched_entity *se) |
Peter Zijlstra | 0bbd333 | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 3112 | { |
| 3113 | unsigned long gran = sysctl_sched_wakeup_granularity; |
| 3114 | |
| 3115 | /* |
Peter Zijlstra | e52fb7c | 2009-01-14 12:39:19 +0100 | [diff] [blame] | 3116 | * Since its curr running now, convert the gran from real-time |
| 3117 | * to virtual-time in his units. |
Mike Galbraith | 13814d4 | 2010-03-11 17:17:04 +0100 | [diff] [blame] | 3118 | * |
| 3119 | * By using 'se' instead of 'curr' we penalize light tasks, so |
| 3120 | * they get preempted easier. That is, if 'se' < 'curr' then |
| 3121 | * the resulting gran will be larger, therefore penalizing the |
| 3122 | * lighter, if otoh 'se' > 'curr' then the resulting gran will |
| 3123 | * be smaller, again penalizing the lighter task. |
| 3124 | * |
| 3125 | * This is especially important for buddies when the leftmost |
| 3126 | * task is higher priority than the buddy. |
Peter Zijlstra | 0bbd333 | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 3127 | */ |
Shaohua Li | f4ad9bd | 2011-04-08 12:53:09 +0800 | [diff] [blame] | 3128 | return calc_delta_fair(gran, se); |
Peter Zijlstra | 0bbd333 | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 3129 | } |
| 3130 | |
| 3131 | /* |
Peter Zijlstra | 464b752 | 2008-10-24 11:06:15 +0200 | [diff] [blame] | 3132 | * Should 'se' preempt 'curr'. |
| 3133 | * |
| 3134 | * |s1 |
| 3135 | * |s2 |
| 3136 | * |s3 |
| 3137 | * g |
| 3138 | * |<--->|c |
| 3139 | * |
| 3140 | * w(c, s1) = -1 |
| 3141 | * w(c, s2) = 0 |
| 3142 | * w(c, s3) = 1 |
| 3143 | * |
| 3144 | */ |
| 3145 | static int |
| 3146 | wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se) |
| 3147 | { |
| 3148 | s64 gran, vdiff = curr->vruntime - se->vruntime; |
| 3149 | |
| 3150 | if (vdiff <= 0) |
| 3151 | return -1; |
| 3152 | |
Peter Zijlstra | e52fb7c | 2009-01-14 12:39:19 +0100 | [diff] [blame] | 3153 | gran = wakeup_gran(curr, se); |
Peter Zijlstra | 464b752 | 2008-10-24 11:06:15 +0200 | [diff] [blame] | 3154 | if (vdiff > gran) |
| 3155 | return 1; |
| 3156 | |
| 3157 | return 0; |
| 3158 | } |
| 3159 | |
Peter Zijlstra | 0247909 | 2008-11-04 21:25:10 +0100 | [diff] [blame] | 3160 | static void set_last_buddy(struct sched_entity *se) |
| 3161 | { |
Venkatesh Pallipadi | 69c80f3 | 2011-04-13 18:21:09 -0700 | [diff] [blame] | 3162 | if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE)) |
| 3163 | return; |
| 3164 | |
| 3165 | for_each_sched_entity(se) |
| 3166 | cfs_rq_of(se)->last = se; |
Peter Zijlstra | 0247909 | 2008-11-04 21:25:10 +0100 | [diff] [blame] | 3167 | } |
| 3168 | |
| 3169 | static void set_next_buddy(struct sched_entity *se) |
| 3170 | { |
Venkatesh Pallipadi | 69c80f3 | 2011-04-13 18:21:09 -0700 | [diff] [blame] | 3171 | if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE)) |
| 3172 | return; |
| 3173 | |
| 3174 | for_each_sched_entity(se) |
| 3175 | cfs_rq_of(se)->next = se; |
Peter Zijlstra | 0247909 | 2008-11-04 21:25:10 +0100 | [diff] [blame] | 3176 | } |
| 3177 | |
Rik van Riel | ac53db5 | 2011-02-01 09:51:03 -0500 | [diff] [blame] | 3178 | static void set_skip_buddy(struct sched_entity *se) |
| 3179 | { |
Venkatesh Pallipadi | 69c80f3 | 2011-04-13 18:21:09 -0700 | [diff] [blame] | 3180 | for_each_sched_entity(se) |
| 3181 | cfs_rq_of(se)->skip = se; |
Rik van Riel | ac53db5 | 2011-02-01 09:51:03 -0500 | [diff] [blame] | 3182 | } |
| 3183 | |
Peter Zijlstra | 464b752 | 2008-10-24 11:06:15 +0200 | [diff] [blame] | 3184 | /* |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 3185 | * Preempt the current task with a newly woken task if needed: |
| 3186 | */ |
Peter Zijlstra | 5a9b86f | 2009-09-16 13:47:58 +0200 | [diff] [blame] | 3187 | static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags) |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 3188 | { |
| 3189 | struct task_struct *curr = rq->curr; |
Srivatsa Vaddagiri | 8651a86 | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 3190 | struct sched_entity *se = &curr->se, *pse = &p->se; |
Mike Galbraith | 03e89e4 | 2008-12-16 08:45:30 +0100 | [diff] [blame] | 3191 | struct cfs_rq *cfs_rq = task_cfs_rq(curr); |
Mike Galbraith | f685cea | 2009-10-23 23:09:22 +0200 | [diff] [blame] | 3192 | int scale = cfs_rq->nr_running >= sched_nr_latency; |
Venkatesh Pallipadi | 2f36825 | 2011-04-14 10:30:53 -0700 | [diff] [blame] | 3193 | int next_buddy_marked = 0; |
Mike Galbraith | 03e89e4 | 2008-12-16 08:45:30 +0100 | [diff] [blame] | 3194 | |
Ingo Molnar | 4ae7d5c | 2008-03-19 01:42:00 +0100 | [diff] [blame] | 3195 | if (unlikely(se == pse)) |
| 3196 | return; |
| 3197 | |
Paul Turner | 5238cdd | 2011-07-21 09:43:37 -0700 | [diff] [blame] | 3198 | /* |
Peter Zijlstra | ddcdf6e | 2012-02-22 19:27:40 +0100 | [diff] [blame] | 3199 | * This is possible from callers such as move_task(), in which we |
Paul Turner | 5238cdd | 2011-07-21 09:43:37 -0700 | [diff] [blame] | 3200 | * unconditionally check_prempt_curr() after an enqueue (which may have |
| 3201 | * lead to a throttle). This both saves work and prevents false |
| 3202 | * next-buddy nomination below. |
| 3203 | */ |
| 3204 | if (unlikely(throttled_hierarchy(cfs_rq_of(pse)))) |
| 3205 | return; |
| 3206 | |
Venkatesh Pallipadi | 2f36825 | 2011-04-14 10:30:53 -0700 | [diff] [blame] | 3207 | if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) { |
Mike Galbraith | 3cb63d5 | 2009-09-11 12:01:17 +0200 | [diff] [blame] | 3208 | set_next_buddy(pse); |
Venkatesh Pallipadi | 2f36825 | 2011-04-14 10:30:53 -0700 | [diff] [blame] | 3209 | next_buddy_marked = 1; |
| 3210 | } |
Peter Zijlstra | 57fdc26 | 2008-09-23 15:33:45 +0200 | [diff] [blame] | 3211 | |
Bharata B Rao | aec0a51 | 2008-08-28 14:42:49 +0530 | [diff] [blame] | 3212 | /* |
| 3213 | * We can come here with TIF_NEED_RESCHED already set from new task |
| 3214 | * wake up path. |
Paul Turner | 5238cdd | 2011-07-21 09:43:37 -0700 | [diff] [blame] | 3215 | * |
| 3216 | * Note: this also catches the edge-case of curr being in a throttled |
| 3217 | * group (e.g. via set_curr_task), since update_curr() (in the |
| 3218 | * enqueue of curr) will have resulted in resched being set. This |
| 3219 | * prevents us from potentially nominating it as a false LAST_BUDDY |
| 3220 | * below. |
Bharata B Rao | aec0a51 | 2008-08-28 14:42:49 +0530 | [diff] [blame] | 3221 | */ |
| 3222 | if (test_tsk_need_resched(curr)) |
| 3223 | return; |
| 3224 | |
Darren Hart | a2f5c9a | 2011-02-22 13:04:33 -0800 | [diff] [blame] | 3225 | /* Idle tasks are by definition preempted by non-idle tasks. */ |
| 3226 | if (unlikely(curr->policy == SCHED_IDLE) && |
| 3227 | likely(p->policy != SCHED_IDLE)) |
| 3228 | goto preempt; |
| 3229 | |
Ingo Molnar | 91c234b | 2007-10-15 17:00:18 +0200 | [diff] [blame] | 3230 | /* |
Darren Hart | a2f5c9a | 2011-02-22 13:04:33 -0800 | [diff] [blame] | 3231 | * Batch and idle tasks do not preempt non-idle tasks (their preemption |
| 3232 | * is driven by the tick): |
Ingo Molnar | 91c234b | 2007-10-15 17:00:18 +0200 | [diff] [blame] | 3233 | */ |
Peter Zijlstra | 6bc912b | 2009-01-15 14:53:38 +0100 | [diff] [blame] | 3234 | if (unlikely(p->policy != SCHED_NORMAL)) |
Ingo Molnar | 91c234b | 2007-10-15 17:00:18 +0200 | [diff] [blame] | 3235 | return; |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 3236 | |
Peter Zijlstra | 3a7e73a | 2009-11-28 18:51:02 +0100 | [diff] [blame] | 3237 | find_matching_se(&se, &pse); |
Paul Turner | 9bbd737 | 2011-07-05 19:07:21 -0700 | [diff] [blame] | 3238 | update_curr(cfs_rq_of(se)); |
Peter Zijlstra | 3a7e73a | 2009-11-28 18:51:02 +0100 | [diff] [blame] | 3239 | BUG_ON(!pse); |
Venkatesh Pallipadi | 2f36825 | 2011-04-14 10:30:53 -0700 | [diff] [blame] | 3240 | if (wakeup_preempt_entity(se, pse) == 1) { |
| 3241 | /* |
| 3242 | * Bias pick_next to pick the sched entity that is |
| 3243 | * triggering this preemption. |
| 3244 | */ |
| 3245 | if (!next_buddy_marked) |
| 3246 | set_next_buddy(pse); |
Peter Zijlstra | 3a7e73a | 2009-11-28 18:51:02 +0100 | [diff] [blame] | 3247 | goto preempt; |
Venkatesh Pallipadi | 2f36825 | 2011-04-14 10:30:53 -0700 | [diff] [blame] | 3248 | } |
Jupyung Lee | a65ac74 | 2009-11-17 18:51:40 +0900 | [diff] [blame] | 3249 | |
Peter Zijlstra | 3a7e73a | 2009-11-28 18:51:02 +0100 | [diff] [blame] | 3250 | return; |
| 3251 | |
| 3252 | preempt: |
| 3253 | resched_task(curr); |
| 3254 | /* |
| 3255 | * Only set the backward buddy when the current task is still |
| 3256 | * on the rq. This can happen when a wakeup gets interleaved |
| 3257 | * with schedule on the ->pre_schedule() or idle_balance() |
| 3258 | * point, either of which can * drop the rq lock. |
| 3259 | * |
| 3260 | * Also, during early boot the idle thread is in the fair class, |
| 3261 | * for obvious reasons its a bad idea to schedule back to it. |
| 3262 | */ |
| 3263 | if (unlikely(!se->on_rq || curr == rq->idle)) |
| 3264 | return; |
| 3265 | |
| 3266 | if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se)) |
| 3267 | set_last_buddy(se); |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 3268 | } |
| 3269 | |
Ingo Molnar | fb8d472 | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 3270 | static struct task_struct *pick_next_task_fair(struct rq *rq) |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 3271 | { |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 3272 | struct task_struct *p; |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 3273 | struct cfs_rq *cfs_rq = &rq->cfs; |
| 3274 | struct sched_entity *se; |
| 3275 | |
Tim Blechmann | 36ace27 | 2009-11-24 11:55:45 +0100 | [diff] [blame] | 3276 | if (!cfs_rq->nr_running) |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 3277 | return NULL; |
| 3278 | |
| 3279 | do { |
Ingo Molnar | 9948f4b | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 3280 | se = pick_next_entity(cfs_rq); |
Peter Zijlstra | f4b6755 | 2008-11-04 21:25:07 +0100 | [diff] [blame] | 3281 | set_next_entity(cfs_rq, se); |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 3282 | cfs_rq = group_cfs_rq(se); |
| 3283 | } while (cfs_rq); |
| 3284 | |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 3285 | p = task_of(se); |
Mike Galbraith | b39e66e | 2011-11-22 15:20:07 +0100 | [diff] [blame] | 3286 | if (hrtick_enabled(rq)) |
| 3287 | hrtick_start_fair(rq, p); |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 3288 | |
| 3289 | return p; |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 3290 | } |
| 3291 | |
| 3292 | /* |
| 3293 | * Account for a descheduled task: |
| 3294 | */ |
Ingo Molnar | 31ee529 | 2007-08-09 11:16:49 +0200 | [diff] [blame] | 3295 | static void put_prev_task_fair(struct rq *rq, struct task_struct *prev) |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 3296 | { |
| 3297 | struct sched_entity *se = &prev->se; |
| 3298 | struct cfs_rq *cfs_rq; |
| 3299 | |
| 3300 | for_each_sched_entity(se) { |
| 3301 | cfs_rq = cfs_rq_of(se); |
Ingo Molnar | ab6cde2 | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 3302 | put_prev_entity(cfs_rq, se); |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 3303 | } |
| 3304 | } |
| 3305 | |
Rik van Riel | ac53db5 | 2011-02-01 09:51:03 -0500 | [diff] [blame] | 3306 | /* |
| 3307 | * sched_yield() is very simple |
| 3308 | * |
| 3309 | * The magic of dealing with the ->skip buddy is in pick_next_entity. |
| 3310 | */ |
| 3311 | static void yield_task_fair(struct rq *rq) |
| 3312 | { |
| 3313 | struct task_struct *curr = rq->curr; |
| 3314 | struct cfs_rq *cfs_rq = task_cfs_rq(curr); |
| 3315 | struct sched_entity *se = &curr->se; |
| 3316 | |
| 3317 | /* |
| 3318 | * Are we the only task in the tree? |
| 3319 | */ |
| 3320 | if (unlikely(rq->nr_running == 1)) |
| 3321 | return; |
| 3322 | |
| 3323 | clear_buddies(cfs_rq, se); |
| 3324 | |
| 3325 | if (curr->policy != SCHED_BATCH) { |
| 3326 | update_rq_clock(rq); |
| 3327 | /* |
| 3328 | * Update run-time statistics of the 'current'. |
| 3329 | */ |
| 3330 | update_curr(cfs_rq); |
Mike Galbraith | 916671c | 2011-11-22 15:21:26 +0100 | [diff] [blame] | 3331 | /* |
| 3332 | * Tell update_rq_clock() that we've just updated, |
| 3333 | * so we don't do microscopic update in schedule() |
| 3334 | * and double the fastpath cost. |
| 3335 | */ |
| 3336 | rq->skip_clock_update = 1; |
Rik van Riel | ac53db5 | 2011-02-01 09:51:03 -0500 | [diff] [blame] | 3337 | } |
| 3338 | |
| 3339 | set_skip_buddy(se); |
| 3340 | } |
| 3341 | |
Mike Galbraith | d95f412 | 2011-02-01 09:50:51 -0500 | [diff] [blame] | 3342 | static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preempt) |
| 3343 | { |
| 3344 | struct sched_entity *se = &p->se; |
| 3345 | |
Paul Turner | 5238cdd | 2011-07-21 09:43:37 -0700 | [diff] [blame] | 3346 | /* throttled hierarchies are not runnable */ |
| 3347 | if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se))) |
Mike Galbraith | d95f412 | 2011-02-01 09:50:51 -0500 | [diff] [blame] | 3348 | return false; |
| 3349 | |
| 3350 | /* Tell the scheduler that we'd really like pse to run next. */ |
| 3351 | set_next_buddy(se); |
| 3352 | |
Mike Galbraith | d95f412 | 2011-02-01 09:50:51 -0500 | [diff] [blame] | 3353 | yield_task_fair(rq); |
| 3354 | |
| 3355 | return true; |
| 3356 | } |
| 3357 | |
Peter Williams | 681f3e6 | 2007-10-24 18:23:51 +0200 | [diff] [blame] | 3358 | #ifdef CONFIG_SMP |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 3359 | /************************************************** |
| 3360 | * Fair scheduling class load-balancing methods: |
| 3361 | */ |
| 3362 | |
Hiroshi Shimamoto | ed387b7 | 2012-01-31 11:40:32 +0900 | [diff] [blame] | 3363 | static unsigned long __read_mostly max_load_balance_interval = HZ/10; |
| 3364 | |
Peter Zijlstra | ddcdf6e | 2012-02-22 19:27:40 +0100 | [diff] [blame] | 3365 | #define LBF_ALL_PINNED 0x01 |
Peter Zijlstra | 367456c | 2012-02-20 21:49:09 +0100 | [diff] [blame] | 3366 | #define LBF_NEED_BREAK 0x02 |
Srivatsa Vaddagiri | 88b8dac | 2012-06-19 17:43:15 +0530 | [diff] [blame] | 3367 | #define LBF_SOME_PINNED 0x04 |
Peter Zijlstra | ddcdf6e | 2012-02-22 19:27:40 +0100 | [diff] [blame] | 3368 | |
| 3369 | struct lb_env { |
| 3370 | struct sched_domain *sd; |
| 3371 | |
Peter Zijlstra | ddcdf6e | 2012-02-22 19:27:40 +0100 | [diff] [blame] | 3372 | struct rq *src_rq; |
Prashanth Nageshappa | 85c1e7d | 2012-06-19 17:47:34 +0530 | [diff] [blame] | 3373 | int src_cpu; |
Peter Zijlstra | ddcdf6e | 2012-02-22 19:27:40 +0100 | [diff] [blame] | 3374 | |
| 3375 | int dst_cpu; |
| 3376 | struct rq *dst_rq; |
| 3377 | |
Srivatsa Vaddagiri | 88b8dac | 2012-06-19 17:43:15 +0530 | [diff] [blame] | 3378 | struct cpumask *dst_grpmask; |
| 3379 | int new_dst_cpu; |
Peter Zijlstra | ddcdf6e | 2012-02-22 19:27:40 +0100 | [diff] [blame] | 3380 | enum cpu_idle_type idle; |
Peter Zijlstra | bd939f4 | 2012-05-02 14:20:37 +0200 | [diff] [blame] | 3381 | long imbalance; |
Michael Wang | b9403130 | 2012-07-12 16:10:13 +0800 | [diff] [blame] | 3382 | /* The set of CPUs under consideration for load-balancing */ |
| 3383 | struct cpumask *cpus; |
| 3384 | |
Peter Zijlstra | ddcdf6e | 2012-02-22 19:27:40 +0100 | [diff] [blame] | 3385 | unsigned int flags; |
Peter Zijlstra | 367456c | 2012-02-20 21:49:09 +0100 | [diff] [blame] | 3386 | |
| 3387 | unsigned int loop; |
| 3388 | unsigned int loop_break; |
| 3389 | unsigned int loop_max; |
Peter Zijlstra | ddcdf6e | 2012-02-22 19:27:40 +0100 | [diff] [blame] | 3390 | }; |
| 3391 | |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3392 | /* |
Peter Zijlstra | ddcdf6e | 2012-02-22 19:27:40 +0100 | [diff] [blame] | 3393 | * move_task - move a task from one runqueue to another runqueue. |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3394 | * Both runqueues must be locked. |
| 3395 | */ |
Peter Zijlstra | ddcdf6e | 2012-02-22 19:27:40 +0100 | [diff] [blame] | 3396 | static void move_task(struct task_struct *p, struct lb_env *env) |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3397 | { |
Peter Zijlstra | ddcdf6e | 2012-02-22 19:27:40 +0100 | [diff] [blame] | 3398 | deactivate_task(env->src_rq, p, 0); |
| 3399 | set_task_cpu(p, env->dst_cpu); |
| 3400 | activate_task(env->dst_rq, p, 0); |
| 3401 | check_preempt_curr(env->dst_rq, p, 0); |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3402 | } |
| 3403 | |
| 3404 | /* |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 3405 | * Is this task likely cache-hot: |
| 3406 | */ |
| 3407 | static int |
| 3408 | task_hot(struct task_struct *p, u64 now, struct sched_domain *sd) |
| 3409 | { |
| 3410 | s64 delta; |
| 3411 | |
| 3412 | if (p->sched_class != &fair_sched_class) |
| 3413 | return 0; |
| 3414 | |
| 3415 | if (unlikely(p->policy == SCHED_IDLE)) |
| 3416 | return 0; |
| 3417 | |
| 3418 | /* |
| 3419 | * Buddy candidates are cache hot: |
| 3420 | */ |
| 3421 | if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running && |
| 3422 | (&p->se == cfs_rq_of(&p->se)->next || |
| 3423 | &p->se == cfs_rq_of(&p->se)->last)) |
| 3424 | return 1; |
| 3425 | |
| 3426 | if (sysctl_sched_migration_cost == -1) |
| 3427 | return 1; |
| 3428 | if (sysctl_sched_migration_cost == 0) |
| 3429 | return 0; |
| 3430 | |
| 3431 | delta = now - p->se.exec_start; |
| 3432 | |
| 3433 | return delta < (s64)sysctl_sched_migration_cost; |
| 3434 | } |
| 3435 | |
| 3436 | /* |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3437 | * can_migrate_task - may task p from runqueue rq be migrated to this_cpu? |
| 3438 | */ |
| 3439 | static |
Peter Zijlstra | 8e45cb5 | 2012-02-22 12:47:19 +0100 | [diff] [blame] | 3440 | int can_migrate_task(struct task_struct *p, struct lb_env *env) |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3441 | { |
| 3442 | int tsk_cache_hot = 0; |
| 3443 | /* |
| 3444 | * We do not migrate tasks that are: |
| 3445 | * 1) running (obviously), or |
| 3446 | * 2) cannot be migrated to this CPU due to cpus_allowed, or |
| 3447 | * 3) are cache-hot on their current CPU. |
| 3448 | */ |
Peter Zijlstra | ddcdf6e | 2012-02-22 19:27:40 +0100 | [diff] [blame] | 3449 | if (!cpumask_test_cpu(env->dst_cpu, tsk_cpus_allowed(p))) { |
Srivatsa Vaddagiri | 88b8dac | 2012-06-19 17:43:15 +0530 | [diff] [blame] | 3450 | int new_dst_cpu; |
| 3451 | |
Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 3452 | schedstat_inc(p, se.statistics.nr_failed_migrations_affine); |
Srivatsa Vaddagiri | 88b8dac | 2012-06-19 17:43:15 +0530 | [diff] [blame] | 3453 | |
| 3454 | /* |
| 3455 | * Remember if this task can be migrated to any other cpu in |
| 3456 | * our sched_group. We may want to revisit it if we couldn't |
| 3457 | * meet load balance goals by pulling other tasks on src_cpu. |
| 3458 | * |
| 3459 | * Also avoid computing new_dst_cpu if we have already computed |
| 3460 | * one in current iteration. |
| 3461 | */ |
| 3462 | if (!env->dst_grpmask || (env->flags & LBF_SOME_PINNED)) |
| 3463 | return 0; |
| 3464 | |
| 3465 | new_dst_cpu = cpumask_first_and(env->dst_grpmask, |
| 3466 | tsk_cpus_allowed(p)); |
| 3467 | if (new_dst_cpu < nr_cpu_ids) { |
| 3468 | env->flags |= LBF_SOME_PINNED; |
| 3469 | env->new_dst_cpu = new_dst_cpu; |
| 3470 | } |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3471 | return 0; |
| 3472 | } |
Srivatsa Vaddagiri | 88b8dac | 2012-06-19 17:43:15 +0530 | [diff] [blame] | 3473 | |
| 3474 | /* Record that we found atleast one task that could run on dst_cpu */ |
Peter Zijlstra | 8e45cb5 | 2012-02-22 12:47:19 +0100 | [diff] [blame] | 3475 | env->flags &= ~LBF_ALL_PINNED; |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3476 | |
Peter Zijlstra | ddcdf6e | 2012-02-22 19:27:40 +0100 | [diff] [blame] | 3477 | if (task_running(env->src_rq, p)) { |
Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 3478 | schedstat_inc(p, se.statistics.nr_failed_migrations_running); |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3479 | return 0; |
| 3480 | } |
| 3481 | |
| 3482 | /* |
| 3483 | * Aggressive migration if: |
| 3484 | * 1) task is cache cold, or |
| 3485 | * 2) too many balance attempts have failed. |
| 3486 | */ |
| 3487 | |
Peter Zijlstra | ddcdf6e | 2012-02-22 19:27:40 +0100 | [diff] [blame] | 3488 | tsk_cache_hot = task_hot(p, env->src_rq->clock_task, env->sd); |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3489 | if (!tsk_cache_hot || |
Peter Zijlstra | 8e45cb5 | 2012-02-22 12:47:19 +0100 | [diff] [blame] | 3490 | env->sd->nr_balance_failed > env->sd->cache_nice_tries) { |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3491 | #ifdef CONFIG_SCHEDSTATS |
| 3492 | if (tsk_cache_hot) { |
Peter Zijlstra | 8e45cb5 | 2012-02-22 12:47:19 +0100 | [diff] [blame] | 3493 | schedstat_inc(env->sd, lb_hot_gained[env->idle]); |
Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 3494 | schedstat_inc(p, se.statistics.nr_forced_migrations); |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3495 | } |
| 3496 | #endif |
| 3497 | return 1; |
| 3498 | } |
| 3499 | |
| 3500 | if (tsk_cache_hot) { |
Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 3501 | schedstat_inc(p, se.statistics.nr_failed_migrations_hot); |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3502 | return 0; |
| 3503 | } |
| 3504 | return 1; |
| 3505 | } |
| 3506 | |
Peter Zijlstra | 897c395 | 2009-12-17 17:45:42 +0100 | [diff] [blame] | 3507 | /* |
| 3508 | * move_one_task tries to move exactly one task from busiest to this_rq, as |
| 3509 | * part of active balancing operations within "domain". |
| 3510 | * Returns 1 if successful and 0 otherwise. |
| 3511 | * |
| 3512 | * Called with both runqueues locked. |
| 3513 | */ |
Peter Zijlstra | 8e45cb5 | 2012-02-22 12:47:19 +0100 | [diff] [blame] | 3514 | static int move_one_task(struct lb_env *env) |
Peter Zijlstra | 897c395 | 2009-12-17 17:45:42 +0100 | [diff] [blame] | 3515 | { |
| 3516 | struct task_struct *p, *n; |
Peter Zijlstra | 897c395 | 2009-12-17 17:45:42 +0100 | [diff] [blame] | 3517 | |
Peter Zijlstra | 367456c | 2012-02-20 21:49:09 +0100 | [diff] [blame] | 3518 | list_for_each_entry_safe(p, n, &env->src_rq->cfs_tasks, se.group_node) { |
| 3519 | if (throttled_lb_pair(task_group(p), env->src_rq->cpu, env->dst_cpu)) |
| 3520 | continue; |
Peter Zijlstra | 897c395 | 2009-12-17 17:45:42 +0100 | [diff] [blame] | 3521 | |
Peter Zijlstra | 367456c | 2012-02-20 21:49:09 +0100 | [diff] [blame] | 3522 | if (!can_migrate_task(p, env)) |
| 3523 | continue; |
Peter Zijlstra | 897c395 | 2009-12-17 17:45:42 +0100 | [diff] [blame] | 3524 | |
Peter Zijlstra | 367456c | 2012-02-20 21:49:09 +0100 | [diff] [blame] | 3525 | move_task(p, env); |
| 3526 | /* |
| 3527 | * Right now, this is only the second place move_task() |
| 3528 | * is called, so we can safely collect move_task() |
| 3529 | * stats here rather than inside move_task(). |
| 3530 | */ |
| 3531 | schedstat_inc(env->sd, lb_gained[env->idle]); |
| 3532 | return 1; |
Peter Zijlstra | 897c395 | 2009-12-17 17:45:42 +0100 | [diff] [blame] | 3533 | } |
Peter Zijlstra | 897c395 | 2009-12-17 17:45:42 +0100 | [diff] [blame] | 3534 | return 0; |
| 3535 | } |
| 3536 | |
Peter Zijlstra | 367456c | 2012-02-20 21:49:09 +0100 | [diff] [blame] | 3537 | static unsigned long task_h_load(struct task_struct *p); |
| 3538 | |
Peter Zijlstra | eb95308 | 2012-04-17 13:38:40 +0200 | [diff] [blame] | 3539 | static const unsigned int sched_nr_migrate_break = 32; |
| 3540 | |
Peter Zijlstra | 5d6523e | 2012-03-10 00:07:36 +0100 | [diff] [blame] | 3541 | /* |
Peter Zijlstra | bd939f4 | 2012-05-02 14:20:37 +0200 | [diff] [blame] | 3542 | * move_tasks tries to move up to imbalance weighted load from busiest to |
Peter Zijlstra | 5d6523e | 2012-03-10 00:07:36 +0100 | [diff] [blame] | 3543 | * this_rq, as part of a balancing operation within domain "sd". |
| 3544 | * Returns 1 if successful and 0 otherwise. |
| 3545 | * |
| 3546 | * Called with both runqueues locked. |
| 3547 | */ |
| 3548 | static int move_tasks(struct lb_env *env) |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3549 | { |
Peter Zijlstra | 5d6523e | 2012-03-10 00:07:36 +0100 | [diff] [blame] | 3550 | struct list_head *tasks = &env->src_rq->cfs_tasks; |
| 3551 | struct task_struct *p; |
Peter Zijlstra | 367456c | 2012-02-20 21:49:09 +0100 | [diff] [blame] | 3552 | unsigned long load; |
| 3553 | int pulled = 0; |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3554 | |
Peter Zijlstra | bd939f4 | 2012-05-02 14:20:37 +0200 | [diff] [blame] | 3555 | if (env->imbalance <= 0) |
Peter Zijlstra | 5d6523e | 2012-03-10 00:07:36 +0100 | [diff] [blame] | 3556 | return 0; |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3557 | |
Peter Zijlstra | 5d6523e | 2012-03-10 00:07:36 +0100 | [diff] [blame] | 3558 | while (!list_empty(tasks)) { |
| 3559 | p = list_first_entry(tasks, struct task_struct, se.group_node); |
| 3560 | |
Peter Zijlstra | 367456c | 2012-02-20 21:49:09 +0100 | [diff] [blame] | 3561 | env->loop++; |
| 3562 | /* We've more or less seen every task there is, call it quits */ |
Peter Zijlstra | 5d6523e | 2012-03-10 00:07:36 +0100 | [diff] [blame] | 3563 | if (env->loop > env->loop_max) |
Peter Zijlstra | 367456c | 2012-02-20 21:49:09 +0100 | [diff] [blame] | 3564 | break; |
Peter Zijlstra | 5d6523e | 2012-03-10 00:07:36 +0100 | [diff] [blame] | 3565 | |
| 3566 | /* take a breather every nr_migrate tasks */ |
Peter Zijlstra | 367456c | 2012-02-20 21:49:09 +0100 | [diff] [blame] | 3567 | if (env->loop > env->loop_break) { |
Peter Zijlstra | eb95308 | 2012-04-17 13:38:40 +0200 | [diff] [blame] | 3568 | env->loop_break += sched_nr_migrate_break; |
Peter Zijlstra | 8e45cb5 | 2012-02-22 12:47:19 +0100 | [diff] [blame] | 3569 | env->flags |= LBF_NEED_BREAK; |
Peter Zijlstra | ee00e66 | 2009-12-17 17:25:20 +0100 | [diff] [blame] | 3570 | break; |
Peter Zijlstra | a195f00 | 2011-09-22 15:30:18 +0200 | [diff] [blame] | 3571 | } |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3572 | |
Peter Zijlstra | 5d6523e | 2012-03-10 00:07:36 +0100 | [diff] [blame] | 3573 | if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu)) |
Peter Zijlstra | 367456c | 2012-02-20 21:49:09 +0100 | [diff] [blame] | 3574 | goto next; |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3575 | |
Peter Zijlstra | 367456c | 2012-02-20 21:49:09 +0100 | [diff] [blame] | 3576 | load = task_h_load(p); |
Peter Zijlstra | 5d6523e | 2012-03-10 00:07:36 +0100 | [diff] [blame] | 3577 | |
Peter Zijlstra | eb95308 | 2012-04-17 13:38:40 +0200 | [diff] [blame] | 3578 | if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed) |
Peter Zijlstra | 367456c | 2012-02-20 21:49:09 +0100 | [diff] [blame] | 3579 | goto next; |
| 3580 | |
Peter Zijlstra | bd939f4 | 2012-05-02 14:20:37 +0200 | [diff] [blame] | 3581 | if ((load / 2) > env->imbalance) |
Peter Zijlstra | 367456c | 2012-02-20 21:49:09 +0100 | [diff] [blame] | 3582 | goto next; |
| 3583 | |
| 3584 | if (!can_migrate_task(p, env)) |
| 3585 | goto next; |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3586 | |
Peter Zijlstra | ddcdf6e | 2012-02-22 19:27:40 +0100 | [diff] [blame] | 3587 | move_task(p, env); |
Peter Zijlstra | ee00e66 | 2009-12-17 17:25:20 +0100 | [diff] [blame] | 3588 | pulled++; |
Peter Zijlstra | bd939f4 | 2012-05-02 14:20:37 +0200 | [diff] [blame] | 3589 | env->imbalance -= load; |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3590 | |
| 3591 | #ifdef CONFIG_PREEMPT |
Peter Zijlstra | ee00e66 | 2009-12-17 17:25:20 +0100 | [diff] [blame] | 3592 | /* |
| 3593 | * NEWIDLE balancing is a source of latency, so preemptible |
| 3594 | * kernels will stop after the first task is pulled to minimize |
| 3595 | * the critical section. |
| 3596 | */ |
Peter Zijlstra | 5d6523e | 2012-03-10 00:07:36 +0100 | [diff] [blame] | 3597 | if (env->idle == CPU_NEWLY_IDLE) |
Peter Zijlstra | ee00e66 | 2009-12-17 17:25:20 +0100 | [diff] [blame] | 3598 | break; |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3599 | #endif |
| 3600 | |
Peter Zijlstra | ee00e66 | 2009-12-17 17:25:20 +0100 | [diff] [blame] | 3601 | /* |
| 3602 | * We only want to steal up to the prescribed amount of |
| 3603 | * weighted load. |
| 3604 | */ |
Peter Zijlstra | bd939f4 | 2012-05-02 14:20:37 +0200 | [diff] [blame] | 3605 | if (env->imbalance <= 0) |
Peter Zijlstra | ee00e66 | 2009-12-17 17:25:20 +0100 | [diff] [blame] | 3606 | break; |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3607 | |
Peter Zijlstra | 367456c | 2012-02-20 21:49:09 +0100 | [diff] [blame] | 3608 | continue; |
| 3609 | next: |
Peter Zijlstra | 5d6523e | 2012-03-10 00:07:36 +0100 | [diff] [blame] | 3610 | list_move_tail(&p->se.group_node, tasks); |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3611 | } |
Peter Zijlstra | 5d6523e | 2012-03-10 00:07:36 +0100 | [diff] [blame] | 3612 | |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3613 | /* |
Peter Zijlstra | ddcdf6e | 2012-02-22 19:27:40 +0100 | [diff] [blame] | 3614 | * Right now, this is one of only two places move_task() is called, |
| 3615 | * so we can safely collect move_task() stats here rather than |
| 3616 | * inside move_task(). |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3617 | */ |
Peter Zijlstra | 8e45cb5 | 2012-02-22 12:47:19 +0100 | [diff] [blame] | 3618 | schedstat_add(env->sd, lb_gained[env->idle], pulled); |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3619 | |
Peter Zijlstra | 5d6523e | 2012-03-10 00:07:36 +0100 | [diff] [blame] | 3620 | return pulled; |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3621 | } |
| 3622 | |
Peter Zijlstra | 230059de | 2009-12-17 17:47:12 +0100 | [diff] [blame] | 3623 | #ifdef CONFIG_FAIR_GROUP_SCHED |
Peter Zijlstra | 9e3081c | 2010-11-15 15:47:02 -0800 | [diff] [blame] | 3624 | /* |
| 3625 | * update tg->load_weight by folding this cpu's load_avg |
| 3626 | */ |
Paul Turner | 67e8625 | 2010-11-15 15:47:05 -0800 | [diff] [blame] | 3627 | static int update_shares_cpu(struct task_group *tg, int cpu) |
Peter Zijlstra | 9e3081c | 2010-11-15 15:47:02 -0800 | [diff] [blame] | 3628 | { |
| 3629 | struct cfs_rq *cfs_rq; |
| 3630 | unsigned long flags; |
| 3631 | struct rq *rq; |
Peter Zijlstra | 9e3081c | 2010-11-15 15:47:02 -0800 | [diff] [blame] | 3632 | |
| 3633 | if (!tg->se[cpu]) |
| 3634 | return 0; |
| 3635 | |
| 3636 | rq = cpu_rq(cpu); |
| 3637 | cfs_rq = tg->cfs_rq[cpu]; |
| 3638 | |
| 3639 | raw_spin_lock_irqsave(&rq->lock, flags); |
| 3640 | |
| 3641 | update_rq_clock(rq); |
Paul Turner | d6b5591 | 2010-11-15 15:47:09 -0800 | [diff] [blame] | 3642 | update_cfs_load(cfs_rq, 1); |
Paul Turner | aff3e49 | 2012-10-04 13:18:30 +0200 | [diff] [blame^] | 3643 | update_cfs_rq_blocked_load(cfs_rq, 1); |
Peter Zijlstra | 9e3081c | 2010-11-15 15:47:02 -0800 | [diff] [blame] | 3644 | |
| 3645 | /* |
| 3646 | * We need to update shares after updating tg->load_weight in |
| 3647 | * order to adjust the weight of groups with long running tasks. |
| 3648 | */ |
Paul Turner | 6d5ab29 | 2011-01-21 20:45:01 -0800 | [diff] [blame] | 3649 | update_cfs_shares(cfs_rq); |
Peter Zijlstra | 9e3081c | 2010-11-15 15:47:02 -0800 | [diff] [blame] | 3650 | |
| 3651 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
| 3652 | |
| 3653 | return 0; |
| 3654 | } |
| 3655 | |
| 3656 | static void update_shares(int cpu) |
| 3657 | { |
| 3658 | struct cfs_rq *cfs_rq; |
| 3659 | struct rq *rq = cpu_rq(cpu); |
| 3660 | |
| 3661 | rcu_read_lock(); |
Peter Zijlstra | 9763b67 | 2011-07-13 13:09:25 +0200 | [diff] [blame] | 3662 | /* |
| 3663 | * Iterates the task_group tree in a bottom up fashion, see |
| 3664 | * list_add_leaf_cfs_rq() for details. |
| 3665 | */ |
Paul Turner | 64660c8 | 2011-07-21 09:43:36 -0700 | [diff] [blame] | 3666 | for_each_leaf_cfs_rq(rq, cfs_rq) { |
| 3667 | /* throttled entities do not contribute to load */ |
| 3668 | if (throttled_hierarchy(cfs_rq)) |
| 3669 | continue; |
| 3670 | |
Paul Turner | 67e8625 | 2010-11-15 15:47:05 -0800 | [diff] [blame] | 3671 | update_shares_cpu(cfs_rq->tg, cpu); |
Paul Turner | 64660c8 | 2011-07-21 09:43:36 -0700 | [diff] [blame] | 3672 | } |
Peter Zijlstra | 9e3081c | 2010-11-15 15:47:02 -0800 | [diff] [blame] | 3673 | rcu_read_unlock(); |
| 3674 | } |
| 3675 | |
Peter Zijlstra | 9763b67 | 2011-07-13 13:09:25 +0200 | [diff] [blame] | 3676 | /* |
| 3677 | * Compute the cpu's hierarchical load factor for each task group. |
| 3678 | * This needs to be done in a top-down fashion because the load of a child |
| 3679 | * group is a fraction of its parents load. |
| 3680 | */ |
| 3681 | static int tg_load_down(struct task_group *tg, void *data) |
| 3682 | { |
| 3683 | unsigned long load; |
| 3684 | long cpu = (long)data; |
| 3685 | |
| 3686 | if (!tg->parent) { |
| 3687 | load = cpu_rq(cpu)->load.weight; |
| 3688 | } else { |
| 3689 | load = tg->parent->cfs_rq[cpu]->h_load; |
| 3690 | load *= tg->se[cpu]->load.weight; |
| 3691 | load /= tg->parent->cfs_rq[cpu]->load.weight + 1; |
| 3692 | } |
| 3693 | |
| 3694 | tg->cfs_rq[cpu]->h_load = load; |
| 3695 | |
| 3696 | return 0; |
| 3697 | } |
| 3698 | |
| 3699 | static void update_h_load(long cpu) |
| 3700 | { |
Peter Zijlstra | a35b646 | 2012-08-08 21:46:40 +0200 | [diff] [blame] | 3701 | struct rq *rq = cpu_rq(cpu); |
| 3702 | unsigned long now = jiffies; |
| 3703 | |
| 3704 | if (rq->h_load_throttle == now) |
| 3705 | return; |
| 3706 | |
| 3707 | rq->h_load_throttle = now; |
| 3708 | |
Peter Zijlstra | 367456c | 2012-02-20 21:49:09 +0100 | [diff] [blame] | 3709 | rcu_read_lock(); |
Peter Zijlstra | 9763b67 | 2011-07-13 13:09:25 +0200 | [diff] [blame] | 3710 | walk_tg_tree(tg_load_down, tg_nop, (void *)cpu); |
Peter Zijlstra | 367456c | 2012-02-20 21:49:09 +0100 | [diff] [blame] | 3711 | rcu_read_unlock(); |
Peter Zijlstra | 9763b67 | 2011-07-13 13:09:25 +0200 | [diff] [blame] | 3712 | } |
| 3713 | |
Peter Zijlstra | 367456c | 2012-02-20 21:49:09 +0100 | [diff] [blame] | 3714 | static unsigned long task_h_load(struct task_struct *p) |
Peter Zijlstra | 230059de | 2009-12-17 17:47:12 +0100 | [diff] [blame] | 3715 | { |
Peter Zijlstra | 367456c | 2012-02-20 21:49:09 +0100 | [diff] [blame] | 3716 | struct cfs_rq *cfs_rq = task_cfs_rq(p); |
| 3717 | unsigned long load; |
Peter Zijlstra | 230059de | 2009-12-17 17:47:12 +0100 | [diff] [blame] | 3718 | |
Peter Zijlstra | 367456c | 2012-02-20 21:49:09 +0100 | [diff] [blame] | 3719 | load = p->se.load.weight; |
| 3720 | load = div_u64(load * cfs_rq->h_load, cfs_rq->load.weight + 1); |
Peter Zijlstra | 230059de | 2009-12-17 17:47:12 +0100 | [diff] [blame] | 3721 | |
Peter Zijlstra | 367456c | 2012-02-20 21:49:09 +0100 | [diff] [blame] | 3722 | return load; |
Peter Zijlstra | 230059de | 2009-12-17 17:47:12 +0100 | [diff] [blame] | 3723 | } |
| 3724 | #else |
Peter Zijlstra | 9e3081c | 2010-11-15 15:47:02 -0800 | [diff] [blame] | 3725 | static inline void update_shares(int cpu) |
| 3726 | { |
| 3727 | } |
| 3728 | |
Peter Zijlstra | 367456c | 2012-02-20 21:49:09 +0100 | [diff] [blame] | 3729 | static inline void update_h_load(long cpu) |
Peter Zijlstra | 230059de | 2009-12-17 17:47:12 +0100 | [diff] [blame] | 3730 | { |
Peter Zijlstra | 367456c | 2012-02-20 21:49:09 +0100 | [diff] [blame] | 3731 | } |
| 3732 | |
| 3733 | static unsigned long task_h_load(struct task_struct *p) |
| 3734 | { |
| 3735 | return p->se.load.weight; |
Peter Zijlstra | 230059de | 2009-12-17 17:47:12 +0100 | [diff] [blame] | 3736 | } |
| 3737 | #endif |
| 3738 | |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3739 | /********** Helpers for find_busiest_group ************************/ |
| 3740 | /* |
| 3741 | * sd_lb_stats - Structure to store the statistics of a sched_domain |
| 3742 | * during load balancing. |
| 3743 | */ |
| 3744 | struct sd_lb_stats { |
| 3745 | struct sched_group *busiest; /* Busiest group in this sd */ |
| 3746 | struct sched_group *this; /* Local group in this sd */ |
| 3747 | unsigned long total_load; /* Total load of all groups in sd */ |
| 3748 | unsigned long total_pwr; /* Total power of all groups in sd */ |
| 3749 | unsigned long avg_load; /* Average load across all groups in sd */ |
| 3750 | |
| 3751 | /** Statistics of this group */ |
| 3752 | unsigned long this_load; |
| 3753 | unsigned long this_load_per_task; |
| 3754 | unsigned long this_nr_running; |
Nikhil Rao | fab4762 | 2010-10-15 13:12:29 -0700 | [diff] [blame] | 3755 | unsigned long this_has_capacity; |
Suresh Siddha | aae6d3d | 2010-09-17 15:02:32 -0700 | [diff] [blame] | 3756 | unsigned int this_idle_cpus; |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3757 | |
| 3758 | /* Statistics of the busiest group */ |
Suresh Siddha | aae6d3d | 2010-09-17 15:02:32 -0700 | [diff] [blame] | 3759 | unsigned int busiest_idle_cpus; |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3760 | unsigned long max_load; |
| 3761 | unsigned long busiest_load_per_task; |
| 3762 | unsigned long busiest_nr_running; |
Suresh Siddha | dd5feea | 2010-02-23 16:13:52 -0800 | [diff] [blame] | 3763 | unsigned long busiest_group_capacity; |
Nikhil Rao | fab4762 | 2010-10-15 13:12:29 -0700 | [diff] [blame] | 3764 | unsigned long busiest_has_capacity; |
Suresh Siddha | aae6d3d | 2010-09-17 15:02:32 -0700 | [diff] [blame] | 3765 | unsigned int busiest_group_weight; |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3766 | |
| 3767 | int group_imb; /* Is there imbalance in this sd */ |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3768 | }; |
| 3769 | |
| 3770 | /* |
| 3771 | * sg_lb_stats - stats of a sched_group required for load_balancing |
| 3772 | */ |
| 3773 | struct sg_lb_stats { |
| 3774 | unsigned long avg_load; /*Avg load across the CPUs of the group */ |
| 3775 | unsigned long group_load; /* Total load over the CPUs of the group */ |
| 3776 | unsigned long sum_nr_running; /* Nr tasks running in the group */ |
| 3777 | unsigned long sum_weighted_load; /* Weighted load of group's tasks */ |
| 3778 | unsigned long group_capacity; |
Suresh Siddha | aae6d3d | 2010-09-17 15:02:32 -0700 | [diff] [blame] | 3779 | unsigned long idle_cpus; |
| 3780 | unsigned long group_weight; |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3781 | int group_imb; /* Is there an imbalance in the group ? */ |
Nikhil Rao | fab4762 | 2010-10-15 13:12:29 -0700 | [diff] [blame] | 3782 | int group_has_capacity; /* Is there extra capacity in the group? */ |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3783 | }; |
| 3784 | |
| 3785 | /** |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3786 | * get_sd_load_idx - Obtain the load index for a given sched domain. |
| 3787 | * @sd: The sched_domain whose load_idx is to be obtained. |
| 3788 | * @idle: The Idle status of the CPU for whose sd load_icx is obtained. |
| 3789 | */ |
| 3790 | static inline int get_sd_load_idx(struct sched_domain *sd, |
| 3791 | enum cpu_idle_type idle) |
| 3792 | { |
| 3793 | int load_idx; |
| 3794 | |
| 3795 | switch (idle) { |
| 3796 | case CPU_NOT_IDLE: |
| 3797 | load_idx = sd->busy_idx; |
| 3798 | break; |
| 3799 | |
| 3800 | case CPU_NEWLY_IDLE: |
| 3801 | load_idx = sd->newidle_idx; |
| 3802 | break; |
| 3803 | default: |
| 3804 | load_idx = sd->idle_idx; |
| 3805 | break; |
| 3806 | } |
| 3807 | |
| 3808 | return load_idx; |
| 3809 | } |
| 3810 | |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3811 | unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu) |
| 3812 | { |
Nikhil Rao | 1399fa7 | 2011-05-18 10:09:39 -0700 | [diff] [blame] | 3813 | return SCHED_POWER_SCALE; |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3814 | } |
| 3815 | |
| 3816 | unsigned long __weak arch_scale_freq_power(struct sched_domain *sd, int cpu) |
| 3817 | { |
| 3818 | return default_scale_freq_power(sd, cpu); |
| 3819 | } |
| 3820 | |
| 3821 | unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu) |
| 3822 | { |
Peter Zijlstra | 669c55e | 2010-04-16 14:59:29 +0200 | [diff] [blame] | 3823 | unsigned long weight = sd->span_weight; |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3824 | unsigned long smt_gain = sd->smt_gain; |
| 3825 | |
| 3826 | smt_gain /= weight; |
| 3827 | |
| 3828 | return smt_gain; |
| 3829 | } |
| 3830 | |
| 3831 | unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu) |
| 3832 | { |
| 3833 | return default_scale_smt_power(sd, cpu); |
| 3834 | } |
| 3835 | |
| 3836 | unsigned long scale_rt_power(int cpu) |
| 3837 | { |
| 3838 | struct rq *rq = cpu_rq(cpu); |
Peter Zijlstra | b654f7d | 2012-05-22 14:04:28 +0200 | [diff] [blame] | 3839 | u64 total, available, age_stamp, avg; |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3840 | |
Peter Zijlstra | b654f7d | 2012-05-22 14:04:28 +0200 | [diff] [blame] | 3841 | /* |
| 3842 | * Since we're reading these variables without serialization make sure |
| 3843 | * we read them once before doing sanity checks on them. |
| 3844 | */ |
| 3845 | age_stamp = ACCESS_ONCE(rq->age_stamp); |
| 3846 | avg = ACCESS_ONCE(rq->rt_avg); |
Venkatesh Pallipadi | aa48380 | 2010-10-04 17:03:22 -0700 | [diff] [blame] | 3847 | |
Peter Zijlstra | b654f7d | 2012-05-22 14:04:28 +0200 | [diff] [blame] | 3848 | total = sched_avg_period() + (rq->clock - age_stamp); |
| 3849 | |
| 3850 | if (unlikely(total < avg)) { |
Venkatesh Pallipadi | aa48380 | 2010-10-04 17:03:22 -0700 | [diff] [blame] | 3851 | /* Ensures that power won't end up being negative */ |
| 3852 | available = 0; |
| 3853 | } else { |
Peter Zijlstra | b654f7d | 2012-05-22 14:04:28 +0200 | [diff] [blame] | 3854 | available = total - avg; |
Venkatesh Pallipadi | aa48380 | 2010-10-04 17:03:22 -0700 | [diff] [blame] | 3855 | } |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3856 | |
Nikhil Rao | 1399fa7 | 2011-05-18 10:09:39 -0700 | [diff] [blame] | 3857 | if (unlikely((s64)total < SCHED_POWER_SCALE)) |
| 3858 | total = SCHED_POWER_SCALE; |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3859 | |
Nikhil Rao | 1399fa7 | 2011-05-18 10:09:39 -0700 | [diff] [blame] | 3860 | total >>= SCHED_POWER_SHIFT; |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3861 | |
| 3862 | return div_u64(available, total); |
| 3863 | } |
| 3864 | |
| 3865 | static void update_cpu_power(struct sched_domain *sd, int cpu) |
| 3866 | { |
Peter Zijlstra | 669c55e | 2010-04-16 14:59:29 +0200 | [diff] [blame] | 3867 | unsigned long weight = sd->span_weight; |
Nikhil Rao | 1399fa7 | 2011-05-18 10:09:39 -0700 | [diff] [blame] | 3868 | unsigned long power = SCHED_POWER_SCALE; |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3869 | struct sched_group *sdg = sd->groups; |
| 3870 | |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3871 | if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) { |
| 3872 | if (sched_feat(ARCH_POWER)) |
| 3873 | power *= arch_scale_smt_power(sd, cpu); |
| 3874 | else |
| 3875 | power *= default_scale_smt_power(sd, cpu); |
| 3876 | |
Nikhil Rao | 1399fa7 | 2011-05-18 10:09:39 -0700 | [diff] [blame] | 3877 | power >>= SCHED_POWER_SHIFT; |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3878 | } |
| 3879 | |
Peter Zijlstra | 9c3f75c | 2011-07-14 13:00:06 +0200 | [diff] [blame] | 3880 | sdg->sgp->power_orig = power; |
Srivatsa Vaddagiri | 9d5efe0 | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 3881 | |
| 3882 | if (sched_feat(ARCH_POWER)) |
| 3883 | power *= arch_scale_freq_power(sd, cpu); |
| 3884 | else |
| 3885 | power *= default_scale_freq_power(sd, cpu); |
| 3886 | |
Nikhil Rao | 1399fa7 | 2011-05-18 10:09:39 -0700 | [diff] [blame] | 3887 | power >>= SCHED_POWER_SHIFT; |
Srivatsa Vaddagiri | 9d5efe0 | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 3888 | |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3889 | power *= scale_rt_power(cpu); |
Nikhil Rao | 1399fa7 | 2011-05-18 10:09:39 -0700 | [diff] [blame] | 3890 | power >>= SCHED_POWER_SHIFT; |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3891 | |
| 3892 | if (!power) |
| 3893 | power = 1; |
| 3894 | |
Peter Zijlstra | e51fd5e | 2010-05-31 12:37:30 +0200 | [diff] [blame] | 3895 | cpu_rq(cpu)->cpu_power = power; |
Peter Zijlstra | 9c3f75c | 2011-07-14 13:00:06 +0200 | [diff] [blame] | 3896 | sdg->sgp->power = power; |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3897 | } |
| 3898 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 3899 | void update_group_power(struct sched_domain *sd, int cpu) |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3900 | { |
| 3901 | struct sched_domain *child = sd->child; |
| 3902 | struct sched_group *group, *sdg = sd->groups; |
| 3903 | unsigned long power; |
Vincent Guittot | 4ec4412 | 2011-12-12 20:21:08 +0100 | [diff] [blame] | 3904 | unsigned long interval; |
| 3905 | |
| 3906 | interval = msecs_to_jiffies(sd->balance_interval); |
| 3907 | interval = clamp(interval, 1UL, max_load_balance_interval); |
| 3908 | sdg->sgp->next_update = jiffies + interval; |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3909 | |
| 3910 | if (!child) { |
| 3911 | update_cpu_power(sd, cpu); |
| 3912 | return; |
| 3913 | } |
| 3914 | |
| 3915 | power = 0; |
| 3916 | |
Peter Zijlstra | 74a5ce2 | 2012-05-23 18:00:43 +0200 | [diff] [blame] | 3917 | if (child->flags & SD_OVERLAP) { |
| 3918 | /* |
| 3919 | * SD_OVERLAP domains cannot assume that child groups |
| 3920 | * span the current group. |
| 3921 | */ |
| 3922 | |
| 3923 | for_each_cpu(cpu, sched_group_cpus(sdg)) |
| 3924 | power += power_of(cpu); |
| 3925 | } else { |
| 3926 | /* |
| 3927 | * !SD_OVERLAP domains can assume that child groups |
| 3928 | * span the current group. |
| 3929 | */ |
| 3930 | |
| 3931 | group = child->groups; |
| 3932 | do { |
| 3933 | power += group->sgp->power; |
| 3934 | group = group->next; |
| 3935 | } while (group != child->groups); |
| 3936 | } |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3937 | |
Peter Zijlstra | c3decf0 | 2012-05-31 12:05:32 +0200 | [diff] [blame] | 3938 | sdg->sgp->power_orig = sdg->sgp->power = power; |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3939 | } |
| 3940 | |
Srivatsa Vaddagiri | 9d5efe0 | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 3941 | /* |
| 3942 | * Try and fix up capacity for tiny siblings, this is needed when |
| 3943 | * things like SD_ASYM_PACKING need f_b_g to select another sibling |
| 3944 | * which on its own isn't powerful enough. |
| 3945 | * |
| 3946 | * See update_sd_pick_busiest() and check_asym_packing(). |
| 3947 | */ |
| 3948 | static inline int |
| 3949 | fix_small_capacity(struct sched_domain *sd, struct sched_group *group) |
| 3950 | { |
| 3951 | /* |
Nikhil Rao | 1399fa7 | 2011-05-18 10:09:39 -0700 | [diff] [blame] | 3952 | * Only siblings can have significantly less than SCHED_POWER_SCALE |
Srivatsa Vaddagiri | 9d5efe0 | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 3953 | */ |
Peter Zijlstra | a6c75f2 | 2011-04-07 14:09:52 +0200 | [diff] [blame] | 3954 | if (!(sd->flags & SD_SHARE_CPUPOWER)) |
Srivatsa Vaddagiri | 9d5efe0 | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 3955 | return 0; |
| 3956 | |
| 3957 | /* |
| 3958 | * If ~90% of the cpu_power is still there, we're good. |
| 3959 | */ |
Peter Zijlstra | 9c3f75c | 2011-07-14 13:00:06 +0200 | [diff] [blame] | 3960 | if (group->sgp->power * 32 > group->sgp->power_orig * 29) |
Srivatsa Vaddagiri | 9d5efe0 | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 3961 | return 1; |
| 3962 | |
| 3963 | return 0; |
| 3964 | } |
| 3965 | |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3966 | /** |
| 3967 | * update_sg_lb_stats - Update sched_group's statistics for load balancing. |
Randy Dunlap | cd96891 | 2012-06-08 13:18:33 -0700 | [diff] [blame] | 3968 | * @env: The load balancing environment. |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3969 | * @group: sched_group whose statistics are to be updated. |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3970 | * @load_idx: Load index of sched_domain of this_cpu for load calc. |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3971 | * @local_group: Does group contain this_cpu. |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3972 | * @balance: Should we balance. |
| 3973 | * @sgs: variable to hold the statistics for this group. |
| 3974 | */ |
Peter Zijlstra | bd939f4 | 2012-05-02 14:20:37 +0200 | [diff] [blame] | 3975 | static inline void update_sg_lb_stats(struct lb_env *env, |
| 3976 | struct sched_group *group, int load_idx, |
Michael Wang | b9403130 | 2012-07-12 16:10:13 +0800 | [diff] [blame] | 3977 | int local_group, int *balance, struct sg_lb_stats *sgs) |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3978 | { |
Peter Zijlstra | e44bc5c | 2012-05-11 00:22:12 +0200 | [diff] [blame] | 3979 | unsigned long nr_running, max_nr_running, min_nr_running; |
| 3980 | unsigned long load, max_cpu_load, min_cpu_load; |
Peter Zijlstra | 04f733b | 2012-05-11 00:12:02 +0200 | [diff] [blame] | 3981 | unsigned int balance_cpu = -1, first_idle_cpu = 0; |
Suresh Siddha | dd5feea | 2010-02-23 16:13:52 -0800 | [diff] [blame] | 3982 | unsigned long avg_load_per_task = 0; |
Peter Zijlstra | bd939f4 | 2012-05-02 14:20:37 +0200 | [diff] [blame] | 3983 | int i; |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3984 | |
Gautham R Shenoy | 871e35b | 2010-01-20 14:02:44 -0600 | [diff] [blame] | 3985 | if (local_group) |
Peter Zijlstra | c117487 | 2012-05-31 14:47:33 +0200 | [diff] [blame] | 3986 | balance_cpu = group_balance_cpu(group); |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3987 | |
| 3988 | /* Tally up the load of all CPUs in the group */ |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3989 | max_cpu_load = 0; |
| 3990 | min_cpu_load = ~0UL; |
Nikhil Rao | 2582f0e | 2010-10-13 12:09:36 -0700 | [diff] [blame] | 3991 | max_nr_running = 0; |
Peter Zijlstra | e44bc5c | 2012-05-11 00:22:12 +0200 | [diff] [blame] | 3992 | min_nr_running = ~0UL; |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3993 | |
Michael Wang | b9403130 | 2012-07-12 16:10:13 +0800 | [diff] [blame] | 3994 | for_each_cpu_and(i, sched_group_cpus(group), env->cpus) { |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3995 | struct rq *rq = cpu_rq(i); |
| 3996 | |
Peter Zijlstra | e44bc5c | 2012-05-11 00:22:12 +0200 | [diff] [blame] | 3997 | nr_running = rq->nr_running; |
| 3998 | |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 3999 | /* Bias balancing toward cpus of our domain */ |
| 4000 | if (local_group) { |
Peter Zijlstra | c117487 | 2012-05-31 14:47:33 +0200 | [diff] [blame] | 4001 | if (idle_cpu(i) && !first_idle_cpu && |
| 4002 | cpumask_test_cpu(i, sched_group_mask(group))) { |
Peter Zijlstra | 04f733b | 2012-05-11 00:12:02 +0200 | [diff] [blame] | 4003 | first_idle_cpu = 1; |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4004 | balance_cpu = i; |
| 4005 | } |
Peter Zijlstra | 04f733b | 2012-05-11 00:12:02 +0200 | [diff] [blame] | 4006 | |
| 4007 | load = target_load(i, load_idx); |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4008 | } else { |
| 4009 | load = source_load(i, load_idx); |
Peter Zijlstra | e44bc5c | 2012-05-11 00:22:12 +0200 | [diff] [blame] | 4010 | if (load > max_cpu_load) |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4011 | max_cpu_load = load; |
| 4012 | if (min_cpu_load > load) |
| 4013 | min_cpu_load = load; |
Peter Zijlstra | e44bc5c | 2012-05-11 00:22:12 +0200 | [diff] [blame] | 4014 | |
| 4015 | if (nr_running > max_nr_running) |
| 4016 | max_nr_running = nr_running; |
| 4017 | if (min_nr_running > nr_running) |
| 4018 | min_nr_running = nr_running; |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4019 | } |
| 4020 | |
| 4021 | sgs->group_load += load; |
Peter Zijlstra | e44bc5c | 2012-05-11 00:22:12 +0200 | [diff] [blame] | 4022 | sgs->sum_nr_running += nr_running; |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4023 | sgs->sum_weighted_load += weighted_cpuload(i); |
Suresh Siddha | aae6d3d | 2010-09-17 15:02:32 -0700 | [diff] [blame] | 4024 | if (idle_cpu(i)) |
| 4025 | sgs->idle_cpus++; |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4026 | } |
| 4027 | |
| 4028 | /* |
| 4029 | * First idle cpu or the first cpu(busiest) in this sched group |
| 4030 | * is eligible for doing load balancing at this and above |
| 4031 | * domains. In the newly idle case, we will allow all the cpu's |
| 4032 | * to do the newly idle load balance. |
| 4033 | */ |
Vincent Guittot | 4ec4412 | 2011-12-12 20:21:08 +0100 | [diff] [blame] | 4034 | if (local_group) { |
Peter Zijlstra | bd939f4 | 2012-05-02 14:20:37 +0200 | [diff] [blame] | 4035 | if (env->idle != CPU_NEWLY_IDLE) { |
Peter Zijlstra | 04f733b | 2012-05-11 00:12:02 +0200 | [diff] [blame] | 4036 | if (balance_cpu != env->dst_cpu) { |
Vincent Guittot | 4ec4412 | 2011-12-12 20:21:08 +0100 | [diff] [blame] | 4037 | *balance = 0; |
| 4038 | return; |
| 4039 | } |
Peter Zijlstra | bd939f4 | 2012-05-02 14:20:37 +0200 | [diff] [blame] | 4040 | update_group_power(env->sd, env->dst_cpu); |
Vincent Guittot | 4ec4412 | 2011-12-12 20:21:08 +0100 | [diff] [blame] | 4041 | } else if (time_after_eq(jiffies, group->sgp->next_update)) |
Peter Zijlstra | bd939f4 | 2012-05-02 14:20:37 +0200 | [diff] [blame] | 4042 | update_group_power(env->sd, env->dst_cpu); |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4043 | } |
| 4044 | |
| 4045 | /* Adjust by relative CPU power of the group */ |
Peter Zijlstra | 9c3f75c | 2011-07-14 13:00:06 +0200 | [diff] [blame] | 4046 | sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / group->sgp->power; |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4047 | |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4048 | /* |
| 4049 | * Consider the group unbalanced when the imbalance is larger |
Peter Zijlstra | 866ab43 | 2011-02-21 18:56:47 +0100 | [diff] [blame] | 4050 | * than the average weight of a task. |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4051 | * |
| 4052 | * APZ: with cgroup the avg task weight can vary wildly and |
| 4053 | * might not be a suitable number - should we keep a |
| 4054 | * normalized nr_running number somewhere that negates |
| 4055 | * the hierarchy? |
| 4056 | */ |
Suresh Siddha | dd5feea | 2010-02-23 16:13:52 -0800 | [diff] [blame] | 4057 | if (sgs->sum_nr_running) |
| 4058 | avg_load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running; |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4059 | |
Peter Zijlstra | e44bc5c | 2012-05-11 00:22:12 +0200 | [diff] [blame] | 4060 | if ((max_cpu_load - min_cpu_load) >= avg_load_per_task && |
| 4061 | (max_nr_running - min_nr_running) > 1) |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4062 | sgs->group_imb = 1; |
| 4063 | |
Peter Zijlstra | 9c3f75c | 2011-07-14 13:00:06 +0200 | [diff] [blame] | 4064 | sgs->group_capacity = DIV_ROUND_CLOSEST(group->sgp->power, |
Nikhil Rao | 1399fa7 | 2011-05-18 10:09:39 -0700 | [diff] [blame] | 4065 | SCHED_POWER_SCALE); |
Srivatsa Vaddagiri | 9d5efe0 | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 4066 | if (!sgs->group_capacity) |
Peter Zijlstra | bd939f4 | 2012-05-02 14:20:37 +0200 | [diff] [blame] | 4067 | sgs->group_capacity = fix_small_capacity(env->sd, group); |
Suresh Siddha | aae6d3d | 2010-09-17 15:02:32 -0700 | [diff] [blame] | 4068 | sgs->group_weight = group->group_weight; |
Nikhil Rao | fab4762 | 2010-10-15 13:12:29 -0700 | [diff] [blame] | 4069 | |
| 4070 | if (sgs->group_capacity > sgs->sum_nr_running) |
| 4071 | sgs->group_has_capacity = 1; |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4072 | } |
| 4073 | |
| 4074 | /** |
Michael Neuling | 532cb4c | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 4075 | * update_sd_pick_busiest - return 1 on busiest group |
Randy Dunlap | cd96891 | 2012-06-08 13:18:33 -0700 | [diff] [blame] | 4076 | * @env: The load balancing environment. |
Michael Neuling | 532cb4c | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 4077 | * @sds: sched_domain statistics |
| 4078 | * @sg: sched_group candidate to be checked for being the busiest |
Michael Neuling | b6b1229 | 2010-06-10 12:06:21 +1000 | [diff] [blame] | 4079 | * @sgs: sched_group statistics |
Michael Neuling | 532cb4c | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 4080 | * |
| 4081 | * Determine if @sg is a busier group than the previously selected |
| 4082 | * busiest group. |
| 4083 | */ |
Peter Zijlstra | bd939f4 | 2012-05-02 14:20:37 +0200 | [diff] [blame] | 4084 | static bool update_sd_pick_busiest(struct lb_env *env, |
Michael Neuling | 532cb4c | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 4085 | struct sd_lb_stats *sds, |
| 4086 | struct sched_group *sg, |
Peter Zijlstra | bd939f4 | 2012-05-02 14:20:37 +0200 | [diff] [blame] | 4087 | struct sg_lb_stats *sgs) |
Michael Neuling | 532cb4c | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 4088 | { |
| 4089 | if (sgs->avg_load <= sds->max_load) |
| 4090 | return false; |
| 4091 | |
| 4092 | if (sgs->sum_nr_running > sgs->group_capacity) |
| 4093 | return true; |
| 4094 | |
| 4095 | if (sgs->group_imb) |
| 4096 | return true; |
| 4097 | |
| 4098 | /* |
| 4099 | * ASYM_PACKING needs to move all the work to the lowest |
| 4100 | * numbered CPUs in the group, therefore mark all groups |
| 4101 | * higher than ourself as busy. |
| 4102 | */ |
Peter Zijlstra | bd939f4 | 2012-05-02 14:20:37 +0200 | [diff] [blame] | 4103 | if ((env->sd->flags & SD_ASYM_PACKING) && sgs->sum_nr_running && |
| 4104 | env->dst_cpu < group_first_cpu(sg)) { |
Michael Neuling | 532cb4c | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 4105 | if (!sds->busiest) |
| 4106 | return true; |
| 4107 | |
| 4108 | if (group_first_cpu(sds->busiest) > group_first_cpu(sg)) |
| 4109 | return true; |
| 4110 | } |
| 4111 | |
| 4112 | return false; |
| 4113 | } |
| 4114 | |
| 4115 | /** |
Hui Kang | 461819a | 2011-10-11 23:00:59 -0400 | [diff] [blame] | 4116 | * update_sd_lb_stats - Update sched_domain's statistics for load balancing. |
Randy Dunlap | cd96891 | 2012-06-08 13:18:33 -0700 | [diff] [blame] | 4117 | * @env: The load balancing environment. |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4118 | * @balance: Should we balance. |
| 4119 | * @sds: variable to hold the statistics for this sched_domain. |
| 4120 | */ |
Peter Zijlstra | bd939f4 | 2012-05-02 14:20:37 +0200 | [diff] [blame] | 4121 | static inline void update_sd_lb_stats(struct lb_env *env, |
Michael Wang | b9403130 | 2012-07-12 16:10:13 +0800 | [diff] [blame] | 4122 | int *balance, struct sd_lb_stats *sds) |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4123 | { |
Peter Zijlstra | bd939f4 | 2012-05-02 14:20:37 +0200 | [diff] [blame] | 4124 | struct sched_domain *child = env->sd->child; |
| 4125 | struct sched_group *sg = env->sd->groups; |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4126 | struct sg_lb_stats sgs; |
| 4127 | int load_idx, prefer_sibling = 0; |
| 4128 | |
| 4129 | if (child && child->flags & SD_PREFER_SIBLING) |
| 4130 | prefer_sibling = 1; |
| 4131 | |
Peter Zijlstra | bd939f4 | 2012-05-02 14:20:37 +0200 | [diff] [blame] | 4132 | load_idx = get_sd_load_idx(env->sd, env->idle); |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4133 | |
| 4134 | do { |
| 4135 | int local_group; |
| 4136 | |
Peter Zijlstra | bd939f4 | 2012-05-02 14:20:37 +0200 | [diff] [blame] | 4137 | local_group = cpumask_test_cpu(env->dst_cpu, sched_group_cpus(sg)); |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4138 | memset(&sgs, 0, sizeof(sgs)); |
Michael Wang | b9403130 | 2012-07-12 16:10:13 +0800 | [diff] [blame] | 4139 | update_sg_lb_stats(env, sg, load_idx, local_group, balance, &sgs); |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4140 | |
Peter Zijlstra | 8f190fb | 2009-12-24 14:18:21 +0100 | [diff] [blame] | 4141 | if (local_group && !(*balance)) |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4142 | return; |
| 4143 | |
| 4144 | sds->total_load += sgs.group_load; |
Peter Zijlstra | 9c3f75c | 2011-07-14 13:00:06 +0200 | [diff] [blame] | 4145 | sds->total_pwr += sg->sgp->power; |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4146 | |
| 4147 | /* |
| 4148 | * In case the child domain prefers tasks go to siblings |
Michael Neuling | 532cb4c | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 4149 | * first, lower the sg capacity to one so that we'll try |
Nikhil Rao | 75dd321 | 2010-10-15 13:12:30 -0700 | [diff] [blame] | 4150 | * and move all the excess tasks away. We lower the capacity |
| 4151 | * of a group only if the local group has the capacity to fit |
| 4152 | * these excess tasks, i.e. nr_running < group_capacity. The |
| 4153 | * extra check prevents the case where you always pull from the |
| 4154 | * heaviest group when it is already under-utilized (possible |
| 4155 | * with a large weight task outweighs the tasks on the system). |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4156 | */ |
Nikhil Rao | 75dd321 | 2010-10-15 13:12:30 -0700 | [diff] [blame] | 4157 | if (prefer_sibling && !local_group && sds->this_has_capacity) |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4158 | sgs.group_capacity = min(sgs.group_capacity, 1UL); |
| 4159 | |
| 4160 | if (local_group) { |
| 4161 | sds->this_load = sgs.avg_load; |
Michael Neuling | 532cb4c | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 4162 | sds->this = sg; |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4163 | sds->this_nr_running = sgs.sum_nr_running; |
| 4164 | sds->this_load_per_task = sgs.sum_weighted_load; |
Nikhil Rao | fab4762 | 2010-10-15 13:12:29 -0700 | [diff] [blame] | 4165 | sds->this_has_capacity = sgs.group_has_capacity; |
Suresh Siddha | aae6d3d | 2010-09-17 15:02:32 -0700 | [diff] [blame] | 4166 | sds->this_idle_cpus = sgs.idle_cpus; |
Peter Zijlstra | bd939f4 | 2012-05-02 14:20:37 +0200 | [diff] [blame] | 4167 | } else if (update_sd_pick_busiest(env, sds, sg, &sgs)) { |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4168 | sds->max_load = sgs.avg_load; |
Michael Neuling | 532cb4c | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 4169 | sds->busiest = sg; |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4170 | sds->busiest_nr_running = sgs.sum_nr_running; |
Suresh Siddha | aae6d3d | 2010-09-17 15:02:32 -0700 | [diff] [blame] | 4171 | sds->busiest_idle_cpus = sgs.idle_cpus; |
Suresh Siddha | dd5feea | 2010-02-23 16:13:52 -0800 | [diff] [blame] | 4172 | sds->busiest_group_capacity = sgs.group_capacity; |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4173 | sds->busiest_load_per_task = sgs.sum_weighted_load; |
Nikhil Rao | fab4762 | 2010-10-15 13:12:29 -0700 | [diff] [blame] | 4174 | sds->busiest_has_capacity = sgs.group_has_capacity; |
Suresh Siddha | aae6d3d | 2010-09-17 15:02:32 -0700 | [diff] [blame] | 4175 | sds->busiest_group_weight = sgs.group_weight; |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4176 | sds->group_imb = sgs.group_imb; |
| 4177 | } |
| 4178 | |
Michael Neuling | 532cb4c | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 4179 | sg = sg->next; |
Peter Zijlstra | bd939f4 | 2012-05-02 14:20:37 +0200 | [diff] [blame] | 4180 | } while (sg != env->sd->groups); |
Michael Neuling | 532cb4c | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 4181 | } |
| 4182 | |
Michael Neuling | 532cb4c | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 4183 | /** |
| 4184 | * check_asym_packing - Check to see if the group is packed into the |
| 4185 | * sched doman. |
| 4186 | * |
| 4187 | * This is primarily intended to used at the sibling level. Some |
| 4188 | * cores like POWER7 prefer to use lower numbered SMT threads. In the |
| 4189 | * case of POWER7, it can move to lower SMT modes only when higher |
| 4190 | * threads are idle. When in lower SMT modes, the threads will |
| 4191 | * perform better since they share less core resources. Hence when we |
| 4192 | * have idle threads, we want them to be the higher ones. |
| 4193 | * |
| 4194 | * This packing function is run on idle threads. It checks to see if |
| 4195 | * the busiest CPU in this domain (core in the P7 case) has a higher |
| 4196 | * CPU number than the packing function is being run on. Here we are |
| 4197 | * assuming lower CPU number will be equivalent to lower a SMT thread |
| 4198 | * number. |
| 4199 | * |
Michael Neuling | b6b1229 | 2010-06-10 12:06:21 +1000 | [diff] [blame] | 4200 | * Returns 1 when packing is required and a task should be moved to |
| 4201 | * this CPU. The amount of the imbalance is returned in *imbalance. |
| 4202 | * |
Randy Dunlap | cd96891 | 2012-06-08 13:18:33 -0700 | [diff] [blame] | 4203 | * @env: The load balancing environment. |
Michael Neuling | 532cb4c | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 4204 | * @sds: Statistics of the sched_domain which is to be packed |
Michael Neuling | 532cb4c | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 4205 | */ |
Peter Zijlstra | bd939f4 | 2012-05-02 14:20:37 +0200 | [diff] [blame] | 4206 | static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds) |
Michael Neuling | 532cb4c | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 4207 | { |
| 4208 | int busiest_cpu; |
| 4209 | |
Peter Zijlstra | bd939f4 | 2012-05-02 14:20:37 +0200 | [diff] [blame] | 4210 | if (!(env->sd->flags & SD_ASYM_PACKING)) |
Michael Neuling | 532cb4c | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 4211 | return 0; |
| 4212 | |
| 4213 | if (!sds->busiest) |
| 4214 | return 0; |
| 4215 | |
| 4216 | busiest_cpu = group_first_cpu(sds->busiest); |
Peter Zijlstra | bd939f4 | 2012-05-02 14:20:37 +0200 | [diff] [blame] | 4217 | if (env->dst_cpu > busiest_cpu) |
Michael Neuling | 532cb4c | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 4218 | return 0; |
| 4219 | |
Peter Zijlstra | bd939f4 | 2012-05-02 14:20:37 +0200 | [diff] [blame] | 4220 | env->imbalance = DIV_ROUND_CLOSEST( |
| 4221 | sds->max_load * sds->busiest->sgp->power, SCHED_POWER_SCALE); |
| 4222 | |
Michael Neuling | 532cb4c | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 4223 | return 1; |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4224 | } |
| 4225 | |
| 4226 | /** |
| 4227 | * fix_small_imbalance - Calculate the minor imbalance that exists |
| 4228 | * amongst the groups of a sched_domain, during |
| 4229 | * load balancing. |
Randy Dunlap | cd96891 | 2012-06-08 13:18:33 -0700 | [diff] [blame] | 4230 | * @env: The load balancing environment. |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4231 | * @sds: Statistics of the sched_domain whose imbalance is to be calculated. |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4232 | */ |
Peter Zijlstra | bd939f4 | 2012-05-02 14:20:37 +0200 | [diff] [blame] | 4233 | static inline |
| 4234 | void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds) |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4235 | { |
| 4236 | unsigned long tmp, pwr_now = 0, pwr_move = 0; |
| 4237 | unsigned int imbn = 2; |
Suresh Siddha | dd5feea | 2010-02-23 16:13:52 -0800 | [diff] [blame] | 4238 | unsigned long scaled_busy_load_per_task; |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4239 | |
| 4240 | if (sds->this_nr_running) { |
| 4241 | sds->this_load_per_task /= sds->this_nr_running; |
| 4242 | if (sds->busiest_load_per_task > |
| 4243 | sds->this_load_per_task) |
| 4244 | imbn = 1; |
Peter Zijlstra | bd939f4 | 2012-05-02 14:20:37 +0200 | [diff] [blame] | 4245 | } else { |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4246 | sds->this_load_per_task = |
Peter Zijlstra | bd939f4 | 2012-05-02 14:20:37 +0200 | [diff] [blame] | 4247 | cpu_avg_load_per_task(env->dst_cpu); |
| 4248 | } |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4249 | |
Suresh Siddha | dd5feea | 2010-02-23 16:13:52 -0800 | [diff] [blame] | 4250 | scaled_busy_load_per_task = sds->busiest_load_per_task |
Nikhil Rao | 1399fa7 | 2011-05-18 10:09:39 -0700 | [diff] [blame] | 4251 | * SCHED_POWER_SCALE; |
Peter Zijlstra | 9c3f75c | 2011-07-14 13:00:06 +0200 | [diff] [blame] | 4252 | scaled_busy_load_per_task /= sds->busiest->sgp->power; |
Suresh Siddha | dd5feea | 2010-02-23 16:13:52 -0800 | [diff] [blame] | 4253 | |
| 4254 | if (sds->max_load - sds->this_load + scaled_busy_load_per_task >= |
| 4255 | (scaled_busy_load_per_task * imbn)) { |
Peter Zijlstra | bd939f4 | 2012-05-02 14:20:37 +0200 | [diff] [blame] | 4256 | env->imbalance = sds->busiest_load_per_task; |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4257 | return; |
| 4258 | } |
| 4259 | |
| 4260 | /* |
| 4261 | * OK, we don't have enough imbalance to justify moving tasks, |
| 4262 | * however we may be able to increase total CPU power used by |
| 4263 | * moving them. |
| 4264 | */ |
| 4265 | |
Peter Zijlstra | 9c3f75c | 2011-07-14 13:00:06 +0200 | [diff] [blame] | 4266 | pwr_now += sds->busiest->sgp->power * |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4267 | min(sds->busiest_load_per_task, sds->max_load); |
Peter Zijlstra | 9c3f75c | 2011-07-14 13:00:06 +0200 | [diff] [blame] | 4268 | pwr_now += sds->this->sgp->power * |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4269 | min(sds->this_load_per_task, sds->this_load); |
Nikhil Rao | 1399fa7 | 2011-05-18 10:09:39 -0700 | [diff] [blame] | 4270 | pwr_now /= SCHED_POWER_SCALE; |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4271 | |
| 4272 | /* Amount of load we'd subtract */ |
Nikhil Rao | 1399fa7 | 2011-05-18 10:09:39 -0700 | [diff] [blame] | 4273 | tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) / |
Peter Zijlstra | 9c3f75c | 2011-07-14 13:00:06 +0200 | [diff] [blame] | 4274 | sds->busiest->sgp->power; |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4275 | if (sds->max_load > tmp) |
Peter Zijlstra | 9c3f75c | 2011-07-14 13:00:06 +0200 | [diff] [blame] | 4276 | pwr_move += sds->busiest->sgp->power * |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4277 | min(sds->busiest_load_per_task, sds->max_load - tmp); |
| 4278 | |
| 4279 | /* Amount of load we'd add */ |
Peter Zijlstra | 9c3f75c | 2011-07-14 13:00:06 +0200 | [diff] [blame] | 4280 | if (sds->max_load * sds->busiest->sgp->power < |
Nikhil Rao | 1399fa7 | 2011-05-18 10:09:39 -0700 | [diff] [blame] | 4281 | sds->busiest_load_per_task * SCHED_POWER_SCALE) |
Peter Zijlstra | 9c3f75c | 2011-07-14 13:00:06 +0200 | [diff] [blame] | 4282 | tmp = (sds->max_load * sds->busiest->sgp->power) / |
| 4283 | sds->this->sgp->power; |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4284 | else |
Nikhil Rao | 1399fa7 | 2011-05-18 10:09:39 -0700 | [diff] [blame] | 4285 | tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) / |
Peter Zijlstra | 9c3f75c | 2011-07-14 13:00:06 +0200 | [diff] [blame] | 4286 | sds->this->sgp->power; |
| 4287 | pwr_move += sds->this->sgp->power * |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4288 | min(sds->this_load_per_task, sds->this_load + tmp); |
Nikhil Rao | 1399fa7 | 2011-05-18 10:09:39 -0700 | [diff] [blame] | 4289 | pwr_move /= SCHED_POWER_SCALE; |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4290 | |
| 4291 | /* Move if we gain throughput */ |
| 4292 | if (pwr_move > pwr_now) |
Peter Zijlstra | bd939f4 | 2012-05-02 14:20:37 +0200 | [diff] [blame] | 4293 | env->imbalance = sds->busiest_load_per_task; |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4294 | } |
| 4295 | |
| 4296 | /** |
| 4297 | * calculate_imbalance - Calculate the amount of imbalance present within the |
| 4298 | * groups of a given sched_domain during load balance. |
Peter Zijlstra | bd939f4 | 2012-05-02 14:20:37 +0200 | [diff] [blame] | 4299 | * @env: load balance environment |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4300 | * @sds: statistics of the sched_domain whose imbalance is to be calculated. |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4301 | */ |
Peter Zijlstra | bd939f4 | 2012-05-02 14:20:37 +0200 | [diff] [blame] | 4302 | static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds) |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4303 | { |
Suresh Siddha | dd5feea | 2010-02-23 16:13:52 -0800 | [diff] [blame] | 4304 | unsigned long max_pull, load_above_capacity = ~0UL; |
| 4305 | |
| 4306 | sds->busiest_load_per_task /= sds->busiest_nr_running; |
| 4307 | if (sds->group_imb) { |
| 4308 | sds->busiest_load_per_task = |
| 4309 | min(sds->busiest_load_per_task, sds->avg_load); |
| 4310 | } |
| 4311 | |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4312 | /* |
| 4313 | * In the presence of smp nice balancing, certain scenarios can have |
| 4314 | * max load less than avg load(as we skip the groups at or below |
| 4315 | * its cpu_power, while calculating max_load..) |
| 4316 | */ |
| 4317 | if (sds->max_load < sds->avg_load) { |
Peter Zijlstra | bd939f4 | 2012-05-02 14:20:37 +0200 | [diff] [blame] | 4318 | env->imbalance = 0; |
| 4319 | return fix_small_imbalance(env, sds); |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4320 | } |
| 4321 | |
Suresh Siddha | dd5feea | 2010-02-23 16:13:52 -0800 | [diff] [blame] | 4322 | if (!sds->group_imb) { |
| 4323 | /* |
| 4324 | * Don't want to pull so many tasks that a group would go idle. |
| 4325 | */ |
| 4326 | load_above_capacity = (sds->busiest_nr_running - |
| 4327 | sds->busiest_group_capacity); |
| 4328 | |
Nikhil Rao | 1399fa7 | 2011-05-18 10:09:39 -0700 | [diff] [blame] | 4329 | load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_POWER_SCALE); |
Suresh Siddha | dd5feea | 2010-02-23 16:13:52 -0800 | [diff] [blame] | 4330 | |
Peter Zijlstra | 9c3f75c | 2011-07-14 13:00:06 +0200 | [diff] [blame] | 4331 | load_above_capacity /= sds->busiest->sgp->power; |
Suresh Siddha | dd5feea | 2010-02-23 16:13:52 -0800 | [diff] [blame] | 4332 | } |
| 4333 | |
| 4334 | /* |
| 4335 | * We're trying to get all the cpus to the average_load, so we don't |
| 4336 | * want to push ourselves above the average load, nor do we wish to |
| 4337 | * reduce the max loaded cpu below the average load. At the same time, |
| 4338 | * we also don't want to reduce the group load below the group capacity |
| 4339 | * (so that we can implement power-savings policies etc). Thus we look |
| 4340 | * for the minimum possible imbalance. |
| 4341 | * Be careful of negative numbers as they'll appear as very large values |
| 4342 | * with unsigned longs. |
| 4343 | */ |
| 4344 | max_pull = min(sds->max_load - sds->avg_load, load_above_capacity); |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4345 | |
| 4346 | /* How much load to actually move to equalise the imbalance */ |
Peter Zijlstra | bd939f4 | 2012-05-02 14:20:37 +0200 | [diff] [blame] | 4347 | env->imbalance = min(max_pull * sds->busiest->sgp->power, |
Peter Zijlstra | 9c3f75c | 2011-07-14 13:00:06 +0200 | [diff] [blame] | 4348 | (sds->avg_load - sds->this_load) * sds->this->sgp->power) |
Nikhil Rao | 1399fa7 | 2011-05-18 10:09:39 -0700 | [diff] [blame] | 4349 | / SCHED_POWER_SCALE; |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4350 | |
| 4351 | /* |
| 4352 | * if *imbalance is less than the average load per runnable task |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 4353 | * there is no guarantee that any tasks will be moved so we'll have |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4354 | * a think about bumping its value to force at least one task to be |
| 4355 | * moved |
| 4356 | */ |
Peter Zijlstra | bd939f4 | 2012-05-02 14:20:37 +0200 | [diff] [blame] | 4357 | if (env->imbalance < sds->busiest_load_per_task) |
| 4358 | return fix_small_imbalance(env, sds); |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4359 | |
| 4360 | } |
Nikhil Rao | fab4762 | 2010-10-15 13:12:29 -0700 | [diff] [blame] | 4361 | |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4362 | /******* find_busiest_group() helpers end here *********************/ |
| 4363 | |
| 4364 | /** |
| 4365 | * find_busiest_group - Returns the busiest group within the sched_domain |
| 4366 | * if there is an imbalance. If there isn't an imbalance, and |
| 4367 | * the user has opted for power-savings, it returns a group whose |
| 4368 | * CPUs can be put to idle by rebalancing those tasks elsewhere, if |
| 4369 | * such a group exists. |
| 4370 | * |
| 4371 | * Also calculates the amount of weighted load which should be moved |
| 4372 | * to restore balance. |
| 4373 | * |
Randy Dunlap | cd96891 | 2012-06-08 13:18:33 -0700 | [diff] [blame] | 4374 | * @env: The load balancing environment. |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4375 | * @balance: Pointer to a variable indicating if this_cpu |
| 4376 | * is the appropriate cpu to perform load balancing at this_level. |
| 4377 | * |
| 4378 | * Returns: - the busiest group if imbalance exists. |
| 4379 | * - If no imbalance and user has opted for power-savings balance, |
| 4380 | * return the least loaded group whose CPUs can be |
| 4381 | * put to idle by rebalancing its tasks onto our group. |
| 4382 | */ |
| 4383 | static struct sched_group * |
Michael Wang | b9403130 | 2012-07-12 16:10:13 +0800 | [diff] [blame] | 4384 | find_busiest_group(struct lb_env *env, int *balance) |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4385 | { |
| 4386 | struct sd_lb_stats sds; |
| 4387 | |
| 4388 | memset(&sds, 0, sizeof(sds)); |
| 4389 | |
| 4390 | /* |
| 4391 | * Compute the various statistics relavent for load balancing at |
| 4392 | * this level. |
| 4393 | */ |
Michael Wang | b9403130 | 2012-07-12 16:10:13 +0800 | [diff] [blame] | 4394 | update_sd_lb_stats(env, balance, &sds); |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4395 | |
Peter Zijlstra | cc57aa8 | 2011-02-21 18:55:32 +0100 | [diff] [blame] | 4396 | /* |
| 4397 | * this_cpu is not the appropriate cpu to perform load balancing at |
| 4398 | * this level. |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4399 | */ |
Peter Zijlstra | 8f190fb | 2009-12-24 14:18:21 +0100 | [diff] [blame] | 4400 | if (!(*balance)) |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4401 | goto ret; |
| 4402 | |
Peter Zijlstra | bd939f4 | 2012-05-02 14:20:37 +0200 | [diff] [blame] | 4403 | if ((env->idle == CPU_IDLE || env->idle == CPU_NEWLY_IDLE) && |
| 4404 | check_asym_packing(env, &sds)) |
Michael Neuling | 532cb4c | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 4405 | return sds.busiest; |
| 4406 | |
Peter Zijlstra | cc57aa8 | 2011-02-21 18:55:32 +0100 | [diff] [blame] | 4407 | /* There is no busy sibling group to pull tasks from */ |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4408 | if (!sds.busiest || sds.busiest_nr_running == 0) |
| 4409 | goto out_balanced; |
| 4410 | |
Nikhil Rao | 1399fa7 | 2011-05-18 10:09:39 -0700 | [diff] [blame] | 4411 | sds.avg_load = (SCHED_POWER_SCALE * sds.total_load) / sds.total_pwr; |
Ken Chen | b0432d8 | 2011-04-07 17:23:22 -0700 | [diff] [blame] | 4412 | |
Peter Zijlstra | 866ab43 | 2011-02-21 18:56:47 +0100 | [diff] [blame] | 4413 | /* |
| 4414 | * If the busiest group is imbalanced the below checks don't |
| 4415 | * work because they assumes all things are equal, which typically |
| 4416 | * isn't true due to cpus_allowed constraints and the like. |
| 4417 | */ |
| 4418 | if (sds.group_imb) |
| 4419 | goto force_balance; |
| 4420 | |
Peter Zijlstra | cc57aa8 | 2011-02-21 18:55:32 +0100 | [diff] [blame] | 4421 | /* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */ |
Peter Zijlstra | bd939f4 | 2012-05-02 14:20:37 +0200 | [diff] [blame] | 4422 | if (env->idle == CPU_NEWLY_IDLE && sds.this_has_capacity && |
Nikhil Rao | fab4762 | 2010-10-15 13:12:29 -0700 | [diff] [blame] | 4423 | !sds.busiest_has_capacity) |
| 4424 | goto force_balance; |
| 4425 | |
Peter Zijlstra | cc57aa8 | 2011-02-21 18:55:32 +0100 | [diff] [blame] | 4426 | /* |
| 4427 | * If the local group is more busy than the selected busiest group |
| 4428 | * don't try and pull any tasks. |
| 4429 | */ |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4430 | if (sds.this_load >= sds.max_load) |
| 4431 | goto out_balanced; |
| 4432 | |
Peter Zijlstra | cc57aa8 | 2011-02-21 18:55:32 +0100 | [diff] [blame] | 4433 | /* |
| 4434 | * Don't pull any tasks if this group is already above the domain |
| 4435 | * average load. |
| 4436 | */ |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4437 | if (sds.this_load >= sds.avg_load) |
| 4438 | goto out_balanced; |
| 4439 | |
Peter Zijlstra | bd939f4 | 2012-05-02 14:20:37 +0200 | [diff] [blame] | 4440 | if (env->idle == CPU_IDLE) { |
Suresh Siddha | aae6d3d | 2010-09-17 15:02:32 -0700 | [diff] [blame] | 4441 | /* |
| 4442 | * This cpu is idle. If the busiest group load doesn't |
| 4443 | * have more tasks than the number of available cpu's and |
| 4444 | * there is no imbalance between this and busiest group |
| 4445 | * wrt to idle cpu's, it is balanced. |
| 4446 | */ |
Peter Zijlstra | c186faf | 2011-02-21 18:52:53 +0100 | [diff] [blame] | 4447 | if ((sds.this_idle_cpus <= sds.busiest_idle_cpus + 1) && |
Suresh Siddha | aae6d3d | 2010-09-17 15:02:32 -0700 | [diff] [blame] | 4448 | sds.busiest_nr_running <= sds.busiest_group_weight) |
| 4449 | goto out_balanced; |
Peter Zijlstra | c186faf | 2011-02-21 18:52:53 +0100 | [diff] [blame] | 4450 | } else { |
| 4451 | /* |
| 4452 | * In the CPU_NEWLY_IDLE, CPU_NOT_IDLE cases, use |
| 4453 | * imbalance_pct to be conservative. |
| 4454 | */ |
Peter Zijlstra | bd939f4 | 2012-05-02 14:20:37 +0200 | [diff] [blame] | 4455 | if (100 * sds.max_load <= env->sd->imbalance_pct * sds.this_load) |
Peter Zijlstra | c186faf | 2011-02-21 18:52:53 +0100 | [diff] [blame] | 4456 | goto out_balanced; |
Suresh Siddha | aae6d3d | 2010-09-17 15:02:32 -0700 | [diff] [blame] | 4457 | } |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4458 | |
Nikhil Rao | fab4762 | 2010-10-15 13:12:29 -0700 | [diff] [blame] | 4459 | force_balance: |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4460 | /* Looks like there is an imbalance. Compute it */ |
Peter Zijlstra | bd939f4 | 2012-05-02 14:20:37 +0200 | [diff] [blame] | 4461 | calculate_imbalance(env, &sds); |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4462 | return sds.busiest; |
| 4463 | |
| 4464 | out_balanced: |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4465 | ret: |
Peter Zijlstra | bd939f4 | 2012-05-02 14:20:37 +0200 | [diff] [blame] | 4466 | env->imbalance = 0; |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4467 | return NULL; |
| 4468 | } |
| 4469 | |
| 4470 | /* |
| 4471 | * find_busiest_queue - find the busiest runqueue among the cpus in group. |
| 4472 | */ |
Peter Zijlstra | bd939f4 | 2012-05-02 14:20:37 +0200 | [diff] [blame] | 4473 | static struct rq *find_busiest_queue(struct lb_env *env, |
Michael Wang | b9403130 | 2012-07-12 16:10:13 +0800 | [diff] [blame] | 4474 | struct sched_group *group) |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4475 | { |
| 4476 | struct rq *busiest = NULL, *rq; |
| 4477 | unsigned long max_load = 0; |
| 4478 | int i; |
| 4479 | |
| 4480 | for_each_cpu(i, sched_group_cpus(group)) { |
| 4481 | unsigned long power = power_of(i); |
Nikhil Rao | 1399fa7 | 2011-05-18 10:09:39 -0700 | [diff] [blame] | 4482 | unsigned long capacity = DIV_ROUND_CLOSEST(power, |
| 4483 | SCHED_POWER_SCALE); |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4484 | unsigned long wl; |
| 4485 | |
Srivatsa Vaddagiri | 9d5efe0 | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 4486 | if (!capacity) |
Peter Zijlstra | bd939f4 | 2012-05-02 14:20:37 +0200 | [diff] [blame] | 4487 | capacity = fix_small_capacity(env->sd, group); |
Srivatsa Vaddagiri | 9d5efe0 | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 4488 | |
Michael Wang | b9403130 | 2012-07-12 16:10:13 +0800 | [diff] [blame] | 4489 | if (!cpumask_test_cpu(i, env->cpus)) |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4490 | continue; |
| 4491 | |
| 4492 | rq = cpu_rq(i); |
Thomas Gleixner | 6e40f5b | 2010-02-16 16:48:56 +0100 | [diff] [blame] | 4493 | wl = weighted_cpuload(i); |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4494 | |
Thomas Gleixner | 6e40f5b | 2010-02-16 16:48:56 +0100 | [diff] [blame] | 4495 | /* |
| 4496 | * When comparing with imbalance, use weighted_cpuload() |
| 4497 | * which is not scaled with the cpu power. |
| 4498 | */ |
Peter Zijlstra | bd939f4 | 2012-05-02 14:20:37 +0200 | [diff] [blame] | 4499 | if (capacity && rq->nr_running == 1 && wl > env->imbalance) |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4500 | continue; |
| 4501 | |
Thomas Gleixner | 6e40f5b | 2010-02-16 16:48:56 +0100 | [diff] [blame] | 4502 | /* |
| 4503 | * For the load comparisons with the other cpu's, consider |
| 4504 | * the weighted_cpuload() scaled with the cpu power, so that |
| 4505 | * the load can be moved away from the cpu that is potentially |
| 4506 | * running at a lower capacity. |
| 4507 | */ |
Nikhil Rao | 1399fa7 | 2011-05-18 10:09:39 -0700 | [diff] [blame] | 4508 | wl = (wl * SCHED_POWER_SCALE) / power; |
Thomas Gleixner | 6e40f5b | 2010-02-16 16:48:56 +0100 | [diff] [blame] | 4509 | |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4510 | if (wl > max_load) { |
| 4511 | max_load = wl; |
| 4512 | busiest = rq; |
| 4513 | } |
| 4514 | } |
| 4515 | |
| 4516 | return busiest; |
| 4517 | } |
| 4518 | |
| 4519 | /* |
| 4520 | * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but |
| 4521 | * so long as it is large enough. |
| 4522 | */ |
| 4523 | #define MAX_PINNED_INTERVAL 512 |
| 4524 | |
| 4525 | /* Working cpumask for load_balance and load_balance_newidle. */ |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 4526 | DEFINE_PER_CPU(cpumask_var_t, load_balance_tmpmask); |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4527 | |
Peter Zijlstra | bd939f4 | 2012-05-02 14:20:37 +0200 | [diff] [blame] | 4528 | static int need_active_balance(struct lb_env *env) |
Peter Zijlstra | 1af3ed3 | 2009-12-23 15:10:31 +0100 | [diff] [blame] | 4529 | { |
Peter Zijlstra | bd939f4 | 2012-05-02 14:20:37 +0200 | [diff] [blame] | 4530 | struct sched_domain *sd = env->sd; |
| 4531 | |
| 4532 | if (env->idle == CPU_NEWLY_IDLE) { |
Michael Neuling | 532cb4c | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 4533 | |
| 4534 | /* |
| 4535 | * ASYM_PACKING needs to force migrate tasks from busy but |
| 4536 | * higher numbered CPUs in order to pack all tasks in the |
| 4537 | * lowest numbered CPUs. |
| 4538 | */ |
Peter Zijlstra | bd939f4 | 2012-05-02 14:20:37 +0200 | [diff] [blame] | 4539 | if ((sd->flags & SD_ASYM_PACKING) && env->src_cpu > env->dst_cpu) |
Michael Neuling | 532cb4c | 2010-06-08 14:57:02 +1000 | [diff] [blame] | 4540 | return 1; |
Peter Zijlstra | 1af3ed3 | 2009-12-23 15:10:31 +0100 | [diff] [blame] | 4541 | } |
| 4542 | |
| 4543 | return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2); |
| 4544 | } |
| 4545 | |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 4546 | static int active_load_balance_cpu_stop(void *data); |
| 4547 | |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4548 | /* |
| 4549 | * Check this_cpu to ensure it is balanced within domain. Attempt to move |
| 4550 | * tasks if there is an imbalance. |
| 4551 | */ |
| 4552 | static int load_balance(int this_cpu, struct rq *this_rq, |
| 4553 | struct sched_domain *sd, enum cpu_idle_type idle, |
| 4554 | int *balance) |
| 4555 | { |
Srivatsa Vaddagiri | 88b8dac | 2012-06-19 17:43:15 +0530 | [diff] [blame] | 4556 | int ld_moved, cur_ld_moved, active_balance = 0; |
| 4557 | int lb_iterations, max_lb_iterations; |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4558 | struct sched_group *group; |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4559 | struct rq *busiest; |
| 4560 | unsigned long flags; |
| 4561 | struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask); |
| 4562 | |
Peter Zijlstra | 8e45cb5 | 2012-02-22 12:47:19 +0100 | [diff] [blame] | 4563 | struct lb_env env = { |
| 4564 | .sd = sd, |
Peter Zijlstra | ddcdf6e | 2012-02-22 19:27:40 +0100 | [diff] [blame] | 4565 | .dst_cpu = this_cpu, |
| 4566 | .dst_rq = this_rq, |
Srivatsa Vaddagiri | 88b8dac | 2012-06-19 17:43:15 +0530 | [diff] [blame] | 4567 | .dst_grpmask = sched_group_cpus(sd->groups), |
Peter Zijlstra | 8e45cb5 | 2012-02-22 12:47:19 +0100 | [diff] [blame] | 4568 | .idle = idle, |
Peter Zijlstra | eb95308 | 2012-04-17 13:38:40 +0200 | [diff] [blame] | 4569 | .loop_break = sched_nr_migrate_break, |
Michael Wang | b9403130 | 2012-07-12 16:10:13 +0800 | [diff] [blame] | 4570 | .cpus = cpus, |
Peter Zijlstra | 8e45cb5 | 2012-02-22 12:47:19 +0100 | [diff] [blame] | 4571 | }; |
| 4572 | |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4573 | cpumask_copy(cpus, cpu_active_mask); |
Srivatsa Vaddagiri | 88b8dac | 2012-06-19 17:43:15 +0530 | [diff] [blame] | 4574 | max_lb_iterations = cpumask_weight(env.dst_grpmask); |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4575 | |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4576 | schedstat_inc(sd, lb_count[idle]); |
| 4577 | |
| 4578 | redo: |
Michael Wang | b9403130 | 2012-07-12 16:10:13 +0800 | [diff] [blame] | 4579 | group = find_busiest_group(&env, balance); |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4580 | |
| 4581 | if (*balance == 0) |
| 4582 | goto out_balanced; |
| 4583 | |
| 4584 | if (!group) { |
| 4585 | schedstat_inc(sd, lb_nobusyg[idle]); |
| 4586 | goto out_balanced; |
| 4587 | } |
| 4588 | |
Michael Wang | b9403130 | 2012-07-12 16:10:13 +0800 | [diff] [blame] | 4589 | busiest = find_busiest_queue(&env, group); |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4590 | if (!busiest) { |
| 4591 | schedstat_inc(sd, lb_nobusyq[idle]); |
| 4592 | goto out_balanced; |
| 4593 | } |
| 4594 | |
Michael Wang | 78feefc | 2012-08-06 16:41:59 +0800 | [diff] [blame] | 4595 | BUG_ON(busiest == env.dst_rq); |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4596 | |
Peter Zijlstra | bd939f4 | 2012-05-02 14:20:37 +0200 | [diff] [blame] | 4597 | schedstat_add(sd, lb_imbalance[idle], env.imbalance); |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4598 | |
| 4599 | ld_moved = 0; |
Srivatsa Vaddagiri | 88b8dac | 2012-06-19 17:43:15 +0530 | [diff] [blame] | 4600 | lb_iterations = 1; |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4601 | if (busiest->nr_running > 1) { |
| 4602 | /* |
| 4603 | * Attempt to move tasks. If find_busiest_group has found |
| 4604 | * an imbalance but busiest->nr_running <= 1, the group is |
| 4605 | * still unbalanced. ld_moved simply stays zero, so it is |
| 4606 | * correctly treated as an imbalance. |
| 4607 | */ |
Peter Zijlstra | 8e45cb5 | 2012-02-22 12:47:19 +0100 | [diff] [blame] | 4608 | env.flags |= LBF_ALL_PINNED; |
Peter Zijlstra | c82513e | 2012-04-26 13:12:27 +0200 | [diff] [blame] | 4609 | env.src_cpu = busiest->cpu; |
| 4610 | env.src_rq = busiest; |
| 4611 | env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running); |
Peter Zijlstra | 8e45cb5 | 2012-02-22 12:47:19 +0100 | [diff] [blame] | 4612 | |
Peter Zijlstra | a35b646 | 2012-08-08 21:46:40 +0200 | [diff] [blame] | 4613 | update_h_load(env.src_cpu); |
Peter Zijlstra | 5d6523e | 2012-03-10 00:07:36 +0100 | [diff] [blame] | 4614 | more_balance: |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4615 | local_irq_save(flags); |
Michael Wang | 78feefc | 2012-08-06 16:41:59 +0800 | [diff] [blame] | 4616 | double_rq_lock(env.dst_rq, busiest); |
Srivatsa Vaddagiri | 88b8dac | 2012-06-19 17:43:15 +0530 | [diff] [blame] | 4617 | |
| 4618 | /* |
| 4619 | * cur_ld_moved - load moved in current iteration |
| 4620 | * ld_moved - cumulative load moved across iterations |
| 4621 | */ |
| 4622 | cur_ld_moved = move_tasks(&env); |
| 4623 | ld_moved += cur_ld_moved; |
Michael Wang | 78feefc | 2012-08-06 16:41:59 +0800 | [diff] [blame] | 4624 | double_rq_unlock(env.dst_rq, busiest); |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4625 | local_irq_restore(flags); |
| 4626 | |
Peter Zijlstra | 5d6523e | 2012-03-10 00:07:36 +0100 | [diff] [blame] | 4627 | if (env.flags & LBF_NEED_BREAK) { |
| 4628 | env.flags &= ~LBF_NEED_BREAK; |
| 4629 | goto more_balance; |
| 4630 | } |
| 4631 | |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4632 | /* |
| 4633 | * some other cpu did the load balance for us. |
| 4634 | */ |
Srivatsa Vaddagiri | 88b8dac | 2012-06-19 17:43:15 +0530 | [diff] [blame] | 4635 | if (cur_ld_moved && env.dst_cpu != smp_processor_id()) |
| 4636 | resched_cpu(env.dst_cpu); |
| 4637 | |
| 4638 | /* |
| 4639 | * Revisit (affine) tasks on src_cpu that couldn't be moved to |
| 4640 | * us and move them to an alternate dst_cpu in our sched_group |
| 4641 | * where they can run. The upper limit on how many times we |
| 4642 | * iterate on same src_cpu is dependent on number of cpus in our |
| 4643 | * sched_group. |
| 4644 | * |
| 4645 | * This changes load balance semantics a bit on who can move |
| 4646 | * load to a given_cpu. In addition to the given_cpu itself |
| 4647 | * (or a ilb_cpu acting on its behalf where given_cpu is |
| 4648 | * nohz-idle), we now have balance_cpu in a position to move |
| 4649 | * load to given_cpu. In rare situations, this may cause |
| 4650 | * conflicts (balance_cpu and given_cpu/ilb_cpu deciding |
| 4651 | * _independently_ and at _same_ time to move some load to |
| 4652 | * given_cpu) causing exceess load to be moved to given_cpu. |
| 4653 | * This however should not happen so much in practice and |
| 4654 | * moreover subsequent load balance cycles should correct the |
| 4655 | * excess load moved. |
| 4656 | */ |
| 4657 | if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0 && |
| 4658 | lb_iterations++ < max_lb_iterations) { |
| 4659 | |
Michael Wang | 78feefc | 2012-08-06 16:41:59 +0800 | [diff] [blame] | 4660 | env.dst_rq = cpu_rq(env.new_dst_cpu); |
Srivatsa Vaddagiri | 88b8dac | 2012-06-19 17:43:15 +0530 | [diff] [blame] | 4661 | env.dst_cpu = env.new_dst_cpu; |
| 4662 | env.flags &= ~LBF_SOME_PINNED; |
| 4663 | env.loop = 0; |
| 4664 | env.loop_break = sched_nr_migrate_break; |
| 4665 | /* |
| 4666 | * Go back to "more_balance" rather than "redo" since we |
| 4667 | * need to continue with same src_cpu. |
| 4668 | */ |
| 4669 | goto more_balance; |
| 4670 | } |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4671 | |
| 4672 | /* All tasks on this runqueue were pinned by CPU affinity */ |
Peter Zijlstra | 8e45cb5 | 2012-02-22 12:47:19 +0100 | [diff] [blame] | 4673 | if (unlikely(env.flags & LBF_ALL_PINNED)) { |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4674 | cpumask_clear_cpu(cpu_of(busiest), cpus); |
Prashanth Nageshappa | bbf18b1 | 2012-06-19 17:52:07 +0530 | [diff] [blame] | 4675 | if (!cpumask_empty(cpus)) { |
| 4676 | env.loop = 0; |
| 4677 | env.loop_break = sched_nr_migrate_break; |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4678 | goto redo; |
Prashanth Nageshappa | bbf18b1 | 2012-06-19 17:52:07 +0530 | [diff] [blame] | 4679 | } |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4680 | goto out_balanced; |
| 4681 | } |
| 4682 | } |
| 4683 | |
| 4684 | if (!ld_moved) { |
| 4685 | schedstat_inc(sd, lb_failed[idle]); |
Venkatesh Pallipadi | 58b26c4 | 2010-09-10 18:19:17 -0700 | [diff] [blame] | 4686 | /* |
| 4687 | * Increment the failure counter only on periodic balance. |
| 4688 | * We do not want newidle balance, which can be very |
| 4689 | * frequent, pollute the failure counter causing |
| 4690 | * excessive cache_hot migrations and active balances. |
| 4691 | */ |
| 4692 | if (idle != CPU_NEWLY_IDLE) |
| 4693 | sd->nr_balance_failed++; |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4694 | |
Peter Zijlstra | bd939f4 | 2012-05-02 14:20:37 +0200 | [diff] [blame] | 4695 | if (need_active_balance(&env)) { |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4696 | raw_spin_lock_irqsave(&busiest->lock, flags); |
| 4697 | |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 4698 | /* don't kick the active_load_balance_cpu_stop, |
| 4699 | * if the curr task on busiest cpu can't be |
| 4700 | * moved to this_cpu |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4701 | */ |
| 4702 | if (!cpumask_test_cpu(this_cpu, |
Peter Zijlstra | fa17b50 | 2011-06-16 12:23:22 +0200 | [diff] [blame] | 4703 | tsk_cpus_allowed(busiest->curr))) { |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4704 | raw_spin_unlock_irqrestore(&busiest->lock, |
| 4705 | flags); |
Peter Zijlstra | 8e45cb5 | 2012-02-22 12:47:19 +0100 | [diff] [blame] | 4706 | env.flags |= LBF_ALL_PINNED; |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4707 | goto out_one_pinned; |
| 4708 | } |
| 4709 | |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 4710 | /* |
| 4711 | * ->active_balance synchronizes accesses to |
| 4712 | * ->active_balance_work. Once set, it's cleared |
| 4713 | * only after active load balance is finished. |
| 4714 | */ |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4715 | if (!busiest->active_balance) { |
| 4716 | busiest->active_balance = 1; |
| 4717 | busiest->push_cpu = this_cpu; |
| 4718 | active_balance = 1; |
| 4719 | } |
| 4720 | raw_spin_unlock_irqrestore(&busiest->lock, flags); |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 4721 | |
Peter Zijlstra | bd939f4 | 2012-05-02 14:20:37 +0200 | [diff] [blame] | 4722 | if (active_balance) { |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 4723 | stop_one_cpu_nowait(cpu_of(busiest), |
| 4724 | active_load_balance_cpu_stop, busiest, |
| 4725 | &busiest->active_balance_work); |
Peter Zijlstra | bd939f4 | 2012-05-02 14:20:37 +0200 | [diff] [blame] | 4726 | } |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4727 | |
| 4728 | /* |
| 4729 | * We've kicked active balancing, reset the failure |
| 4730 | * counter. |
| 4731 | */ |
| 4732 | sd->nr_balance_failed = sd->cache_nice_tries+1; |
| 4733 | } |
| 4734 | } else |
| 4735 | sd->nr_balance_failed = 0; |
| 4736 | |
| 4737 | if (likely(!active_balance)) { |
| 4738 | /* We were unbalanced, so reset the balancing interval */ |
| 4739 | sd->balance_interval = sd->min_interval; |
| 4740 | } else { |
| 4741 | /* |
| 4742 | * If we've begun active balancing, start to back off. This |
| 4743 | * case may not be covered by the all_pinned logic if there |
| 4744 | * is only 1 task on the busy runqueue (because we don't call |
| 4745 | * move_tasks). |
| 4746 | */ |
| 4747 | if (sd->balance_interval < sd->max_interval) |
| 4748 | sd->balance_interval *= 2; |
| 4749 | } |
| 4750 | |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4751 | goto out; |
| 4752 | |
| 4753 | out_balanced: |
| 4754 | schedstat_inc(sd, lb_balanced[idle]); |
| 4755 | |
| 4756 | sd->nr_balance_failed = 0; |
| 4757 | |
| 4758 | out_one_pinned: |
| 4759 | /* tune up the balancing interval */ |
Peter Zijlstra | 8e45cb5 | 2012-02-22 12:47:19 +0100 | [diff] [blame] | 4760 | if (((env.flags & LBF_ALL_PINNED) && |
Peter Zijlstra | 5b54b56 | 2011-09-22 15:23:13 +0200 | [diff] [blame] | 4761 | sd->balance_interval < MAX_PINNED_INTERVAL) || |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4762 | (sd->balance_interval < sd->max_interval)) |
| 4763 | sd->balance_interval *= 2; |
| 4764 | |
Venkatesh Pallipadi | 46e49b3 | 2011-02-14 14:38:50 -0800 | [diff] [blame] | 4765 | ld_moved = 0; |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4766 | out: |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4767 | return ld_moved; |
| 4768 | } |
| 4769 | |
| 4770 | /* |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4771 | * idle_balance is called by schedule() if this_cpu is about to become |
| 4772 | * idle. Attempts to pull tasks from other CPUs. |
| 4773 | */ |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 4774 | void idle_balance(int this_cpu, struct rq *this_rq) |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4775 | { |
| 4776 | struct sched_domain *sd; |
| 4777 | int pulled_task = 0; |
| 4778 | unsigned long next_balance = jiffies + HZ; |
| 4779 | |
| 4780 | this_rq->idle_stamp = this_rq->clock; |
| 4781 | |
| 4782 | if (this_rq->avg_idle < sysctl_sched_migration_cost) |
| 4783 | return; |
| 4784 | |
Ben Segall | 18bf280 | 2012-10-04 12:51:20 +0200 | [diff] [blame] | 4785 | update_rq_runnable_avg(this_rq, 1); |
| 4786 | |
Peter Zijlstra | f492e12 | 2009-12-23 15:29:42 +0100 | [diff] [blame] | 4787 | /* |
| 4788 | * Drop the rq->lock, but keep IRQ/preempt disabled. |
| 4789 | */ |
| 4790 | raw_spin_unlock(&this_rq->lock); |
| 4791 | |
Paul Turner | c66eaf6 | 2010-11-15 15:47:07 -0800 | [diff] [blame] | 4792 | update_shares(this_cpu); |
Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 4793 | rcu_read_lock(); |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4794 | for_each_domain(this_cpu, sd) { |
| 4795 | unsigned long interval; |
Peter Zijlstra | f492e12 | 2009-12-23 15:29:42 +0100 | [diff] [blame] | 4796 | int balance = 1; |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4797 | |
| 4798 | if (!(sd->flags & SD_LOAD_BALANCE)) |
| 4799 | continue; |
| 4800 | |
Peter Zijlstra | f492e12 | 2009-12-23 15:29:42 +0100 | [diff] [blame] | 4801 | if (sd->flags & SD_BALANCE_NEWIDLE) { |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4802 | /* If we've pulled tasks over stop searching: */ |
Peter Zijlstra | f492e12 | 2009-12-23 15:29:42 +0100 | [diff] [blame] | 4803 | pulled_task = load_balance(this_cpu, this_rq, |
| 4804 | sd, CPU_NEWLY_IDLE, &balance); |
| 4805 | } |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4806 | |
| 4807 | interval = msecs_to_jiffies(sd->balance_interval); |
| 4808 | if (time_after(next_balance, sd->last_balance + interval)) |
| 4809 | next_balance = sd->last_balance + interval; |
Nikhil Rao | d5ad140 | 2010-11-17 11:42:04 -0800 | [diff] [blame] | 4810 | if (pulled_task) { |
| 4811 | this_rq->idle_stamp = 0; |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4812 | break; |
Nikhil Rao | d5ad140 | 2010-11-17 11:42:04 -0800 | [diff] [blame] | 4813 | } |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4814 | } |
Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 4815 | rcu_read_unlock(); |
Peter Zijlstra | f492e12 | 2009-12-23 15:29:42 +0100 | [diff] [blame] | 4816 | |
| 4817 | raw_spin_lock(&this_rq->lock); |
| 4818 | |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4819 | if (pulled_task || time_after(jiffies, this_rq->next_balance)) { |
| 4820 | /* |
| 4821 | * We are going idle. next_balance may be set based on |
| 4822 | * a busy processor. So reset next_balance. |
| 4823 | */ |
| 4824 | this_rq->next_balance = next_balance; |
| 4825 | } |
| 4826 | } |
| 4827 | |
| 4828 | /* |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 4829 | * active_load_balance_cpu_stop is run by cpu stopper. It pushes |
| 4830 | * running tasks off the busiest CPU onto idle CPUs. It requires at |
| 4831 | * least 1 task to be running on each physical CPU where possible, and |
| 4832 | * avoids physical / logical imbalances. |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4833 | */ |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 4834 | static int active_load_balance_cpu_stop(void *data) |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4835 | { |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 4836 | struct rq *busiest_rq = data; |
| 4837 | int busiest_cpu = cpu_of(busiest_rq); |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4838 | int target_cpu = busiest_rq->push_cpu; |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 4839 | struct rq *target_rq = cpu_rq(target_cpu); |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4840 | struct sched_domain *sd; |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 4841 | |
| 4842 | raw_spin_lock_irq(&busiest_rq->lock); |
| 4843 | |
| 4844 | /* make sure the requested cpu hasn't gone down in the meantime */ |
| 4845 | if (unlikely(busiest_cpu != smp_processor_id() || |
| 4846 | !busiest_rq->active_balance)) |
| 4847 | goto out_unlock; |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4848 | |
| 4849 | /* Is there any task to move? */ |
| 4850 | if (busiest_rq->nr_running <= 1) |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 4851 | goto out_unlock; |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4852 | |
| 4853 | /* |
| 4854 | * This condition is "impossible", if it occurs |
| 4855 | * we need to fix it. Originally reported by |
| 4856 | * Bjorn Helgaas on a 128-cpu setup. |
| 4857 | */ |
| 4858 | BUG_ON(busiest_rq == target_rq); |
| 4859 | |
| 4860 | /* move a task from busiest_rq to target_rq */ |
| 4861 | double_lock_balance(busiest_rq, target_rq); |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4862 | |
| 4863 | /* Search for an sd spanning us and the target CPU. */ |
Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 4864 | rcu_read_lock(); |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4865 | for_each_domain(target_cpu, sd) { |
| 4866 | if ((sd->flags & SD_LOAD_BALANCE) && |
| 4867 | cpumask_test_cpu(busiest_cpu, sched_domain_span(sd))) |
| 4868 | break; |
| 4869 | } |
| 4870 | |
| 4871 | if (likely(sd)) { |
Peter Zijlstra | 8e45cb5 | 2012-02-22 12:47:19 +0100 | [diff] [blame] | 4872 | struct lb_env env = { |
| 4873 | .sd = sd, |
Peter Zijlstra | ddcdf6e | 2012-02-22 19:27:40 +0100 | [diff] [blame] | 4874 | .dst_cpu = target_cpu, |
| 4875 | .dst_rq = target_rq, |
| 4876 | .src_cpu = busiest_rq->cpu, |
| 4877 | .src_rq = busiest_rq, |
Peter Zijlstra | 8e45cb5 | 2012-02-22 12:47:19 +0100 | [diff] [blame] | 4878 | .idle = CPU_IDLE, |
| 4879 | }; |
| 4880 | |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4881 | schedstat_inc(sd, alb_count); |
| 4882 | |
Peter Zijlstra | 8e45cb5 | 2012-02-22 12:47:19 +0100 | [diff] [blame] | 4883 | if (move_one_task(&env)) |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4884 | schedstat_inc(sd, alb_pushed); |
| 4885 | else |
| 4886 | schedstat_inc(sd, alb_failed); |
| 4887 | } |
Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 4888 | rcu_read_unlock(); |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4889 | double_unlock_balance(busiest_rq, target_rq); |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 4890 | out_unlock: |
| 4891 | busiest_rq->active_balance = 0; |
| 4892 | raw_spin_unlock_irq(&busiest_rq->lock); |
| 4893 | return 0; |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4894 | } |
| 4895 | |
| 4896 | #ifdef CONFIG_NO_HZ |
Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 4897 | /* |
| 4898 | * idle load balancing details |
Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 4899 | * - When one of the busy CPUs notice that there may be an idle rebalancing |
| 4900 | * needed, they will kick the idle load balancer, which then does idle |
| 4901 | * load balancing for all the idle CPUs. |
| 4902 | */ |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4903 | static struct { |
Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 4904 | cpumask_var_t idle_cpus_mask; |
Suresh Siddha | 0b005cf | 2011-12-01 17:07:34 -0800 | [diff] [blame] | 4905 | atomic_t nr_cpus; |
Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 4906 | unsigned long next_balance; /* in jiffy units */ |
| 4907 | } nohz ____cacheline_aligned; |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4908 | |
Peter Zijlstra | 8e7fbcb | 2012-01-09 11:28:35 +0100 | [diff] [blame] | 4909 | static inline int find_new_ilb(int call_cpu) |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4910 | { |
Suresh Siddha | 0b005cf | 2011-12-01 17:07:34 -0800 | [diff] [blame] | 4911 | int ilb = cpumask_first(nohz.idle_cpus_mask); |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4912 | |
Suresh Siddha | 786d6dc7 | 2011-12-01 17:07:35 -0800 | [diff] [blame] | 4913 | if (ilb < nr_cpu_ids && idle_cpu(ilb)) |
| 4914 | return ilb; |
| 4915 | |
| 4916 | return nr_cpu_ids; |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4917 | } |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4918 | |
| 4919 | /* |
Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 4920 | * Kick a CPU to do the nohz balancing, if it is time for it. We pick the |
| 4921 | * nohz_load_balancer CPU (if there is one) otherwise fallback to any idle |
| 4922 | * CPU (if there is one). |
| 4923 | */ |
| 4924 | static void nohz_balancer_kick(int cpu) |
| 4925 | { |
| 4926 | int ilb_cpu; |
| 4927 | |
| 4928 | nohz.next_balance++; |
| 4929 | |
Suresh Siddha | 0b005cf | 2011-12-01 17:07:34 -0800 | [diff] [blame] | 4930 | ilb_cpu = find_new_ilb(cpu); |
Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 4931 | |
Suresh Siddha | 0b005cf | 2011-12-01 17:07:34 -0800 | [diff] [blame] | 4932 | if (ilb_cpu >= nr_cpu_ids) |
| 4933 | return; |
Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 4934 | |
Suresh Siddha | cd490c5 | 2011-12-06 11:26:34 -0800 | [diff] [blame] | 4935 | if (test_and_set_bit(NOHZ_BALANCE_KICK, nohz_flags(ilb_cpu))) |
Suresh Siddha | 1c792db | 2011-12-01 17:07:32 -0800 | [diff] [blame] | 4936 | return; |
| 4937 | /* |
| 4938 | * Use smp_send_reschedule() instead of resched_cpu(). |
| 4939 | * This way we generate a sched IPI on the target cpu which |
| 4940 | * is idle. And the softirq performing nohz idle load balance |
| 4941 | * will be run before returning from the IPI. |
| 4942 | */ |
| 4943 | smp_send_reschedule(ilb_cpu); |
Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 4944 | return; |
| 4945 | } |
| 4946 | |
Alex Shi | c1cc017 | 2012-09-10 15:10:58 +0800 | [diff] [blame] | 4947 | static inline void nohz_balance_exit_idle(int cpu) |
Suresh Siddha | 7132596 | 2012-01-19 18:28:57 -0800 | [diff] [blame] | 4948 | { |
| 4949 | if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) { |
| 4950 | cpumask_clear_cpu(cpu, nohz.idle_cpus_mask); |
| 4951 | atomic_dec(&nohz.nr_cpus); |
| 4952 | clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)); |
| 4953 | } |
| 4954 | } |
| 4955 | |
Suresh Siddha | 69e1e81 | 2011-12-01 17:07:33 -0800 | [diff] [blame] | 4956 | static inline void set_cpu_sd_state_busy(void) |
| 4957 | { |
| 4958 | struct sched_domain *sd; |
| 4959 | int cpu = smp_processor_id(); |
| 4960 | |
| 4961 | if (!test_bit(NOHZ_IDLE, nohz_flags(cpu))) |
| 4962 | return; |
| 4963 | clear_bit(NOHZ_IDLE, nohz_flags(cpu)); |
| 4964 | |
| 4965 | rcu_read_lock(); |
| 4966 | for_each_domain(cpu, sd) |
| 4967 | atomic_inc(&sd->groups->sgp->nr_busy_cpus); |
| 4968 | rcu_read_unlock(); |
| 4969 | } |
| 4970 | |
| 4971 | void set_cpu_sd_state_idle(void) |
| 4972 | { |
| 4973 | struct sched_domain *sd; |
| 4974 | int cpu = smp_processor_id(); |
| 4975 | |
| 4976 | if (test_bit(NOHZ_IDLE, nohz_flags(cpu))) |
| 4977 | return; |
| 4978 | set_bit(NOHZ_IDLE, nohz_flags(cpu)); |
| 4979 | |
| 4980 | rcu_read_lock(); |
| 4981 | for_each_domain(cpu, sd) |
| 4982 | atomic_dec(&sd->groups->sgp->nr_busy_cpus); |
| 4983 | rcu_read_unlock(); |
| 4984 | } |
| 4985 | |
Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 4986 | /* |
Alex Shi | c1cc017 | 2012-09-10 15:10:58 +0800 | [diff] [blame] | 4987 | * This routine will record that the cpu is going idle with tick stopped. |
Suresh Siddha | 0b005cf | 2011-12-01 17:07:34 -0800 | [diff] [blame] | 4988 | * This info will be used in performing idle load balancing in the future. |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4989 | */ |
Alex Shi | c1cc017 | 2012-09-10 15:10:58 +0800 | [diff] [blame] | 4990 | void nohz_balance_enter_idle(int cpu) |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 4991 | { |
Suresh Siddha | 7132596 | 2012-01-19 18:28:57 -0800 | [diff] [blame] | 4992 | /* |
| 4993 | * If this cpu is going down, then nothing needs to be done. |
| 4994 | */ |
| 4995 | if (!cpu_active(cpu)) |
| 4996 | return; |
| 4997 | |
Alex Shi | c1cc017 | 2012-09-10 15:10:58 +0800 | [diff] [blame] | 4998 | if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu))) |
| 4999 | return; |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 5000 | |
Alex Shi | c1cc017 | 2012-09-10 15:10:58 +0800 | [diff] [blame] | 5001 | cpumask_set_cpu(cpu, nohz.idle_cpus_mask); |
| 5002 | atomic_inc(&nohz.nr_cpus); |
| 5003 | set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)); |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 5004 | } |
Suresh Siddha | 7132596 | 2012-01-19 18:28:57 -0800 | [diff] [blame] | 5005 | |
| 5006 | static int __cpuinit sched_ilb_notifier(struct notifier_block *nfb, |
| 5007 | unsigned long action, void *hcpu) |
| 5008 | { |
| 5009 | switch (action & ~CPU_TASKS_FROZEN) { |
| 5010 | case CPU_DYING: |
Alex Shi | c1cc017 | 2012-09-10 15:10:58 +0800 | [diff] [blame] | 5011 | nohz_balance_exit_idle(smp_processor_id()); |
Suresh Siddha | 7132596 | 2012-01-19 18:28:57 -0800 | [diff] [blame] | 5012 | return NOTIFY_OK; |
| 5013 | default: |
| 5014 | return NOTIFY_DONE; |
| 5015 | } |
| 5016 | } |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 5017 | #endif |
| 5018 | |
| 5019 | static DEFINE_SPINLOCK(balancing); |
| 5020 | |
Peter Zijlstra | 49c022e | 2011-04-05 10:14:25 +0200 | [diff] [blame] | 5021 | /* |
| 5022 | * Scale the max load_balance interval with the number of CPUs in the system. |
| 5023 | * This trades load-balance latency on larger machines for less cross talk. |
| 5024 | */ |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 5025 | void update_max_interval(void) |
Peter Zijlstra | 49c022e | 2011-04-05 10:14:25 +0200 | [diff] [blame] | 5026 | { |
| 5027 | max_load_balance_interval = HZ*num_online_cpus()/10; |
| 5028 | } |
| 5029 | |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 5030 | /* |
| 5031 | * It checks each scheduling domain to see if it is due to be balanced, |
| 5032 | * and initiates a balancing operation if so. |
| 5033 | * |
| 5034 | * Balancing parameters are set up in arch_init_sched_domains. |
| 5035 | */ |
| 5036 | static void rebalance_domains(int cpu, enum cpu_idle_type idle) |
| 5037 | { |
| 5038 | int balance = 1; |
| 5039 | struct rq *rq = cpu_rq(cpu); |
| 5040 | unsigned long interval; |
Peter Zijlstra | 04f733b | 2012-05-11 00:12:02 +0200 | [diff] [blame] | 5041 | struct sched_domain *sd; |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 5042 | /* Earliest time when we have to do rebalance again */ |
| 5043 | unsigned long next_balance = jiffies + 60*HZ; |
| 5044 | int update_next_balance = 0; |
| 5045 | int need_serialize; |
| 5046 | |
Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 5047 | update_shares(cpu); |
| 5048 | |
Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 5049 | rcu_read_lock(); |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 5050 | for_each_domain(cpu, sd) { |
| 5051 | if (!(sd->flags & SD_LOAD_BALANCE)) |
| 5052 | continue; |
| 5053 | |
| 5054 | interval = sd->balance_interval; |
| 5055 | if (idle != CPU_IDLE) |
| 5056 | interval *= sd->busy_factor; |
| 5057 | |
| 5058 | /* scale ms to jiffies */ |
| 5059 | interval = msecs_to_jiffies(interval); |
Peter Zijlstra | 49c022e | 2011-04-05 10:14:25 +0200 | [diff] [blame] | 5060 | interval = clamp(interval, 1UL, max_load_balance_interval); |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 5061 | |
| 5062 | need_serialize = sd->flags & SD_SERIALIZE; |
| 5063 | |
| 5064 | if (need_serialize) { |
| 5065 | if (!spin_trylock(&balancing)) |
| 5066 | goto out; |
| 5067 | } |
| 5068 | |
| 5069 | if (time_after_eq(jiffies, sd->last_balance + interval)) { |
| 5070 | if (load_balance(cpu, rq, sd, idle, &balance)) { |
| 5071 | /* |
| 5072 | * We've pulled tasks over so either we're no |
Peter Zijlstra | c186faf | 2011-02-21 18:52:53 +0100 | [diff] [blame] | 5073 | * longer idle. |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 5074 | */ |
| 5075 | idle = CPU_NOT_IDLE; |
| 5076 | } |
| 5077 | sd->last_balance = jiffies; |
| 5078 | } |
| 5079 | if (need_serialize) |
| 5080 | spin_unlock(&balancing); |
| 5081 | out: |
| 5082 | if (time_after(next_balance, sd->last_balance + interval)) { |
| 5083 | next_balance = sd->last_balance + interval; |
| 5084 | update_next_balance = 1; |
| 5085 | } |
| 5086 | |
| 5087 | /* |
| 5088 | * Stop the load balance at this level. There is another |
| 5089 | * CPU in our sched group which is doing load balancing more |
| 5090 | * actively. |
| 5091 | */ |
| 5092 | if (!balance) |
| 5093 | break; |
| 5094 | } |
Peter Zijlstra | dce840a | 2011-04-07 14:09:50 +0200 | [diff] [blame] | 5095 | rcu_read_unlock(); |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 5096 | |
| 5097 | /* |
| 5098 | * next_balance will be updated only when there is a need. |
| 5099 | * When the cpu is attached to null domain for ex, it will not be |
| 5100 | * updated. |
| 5101 | */ |
| 5102 | if (likely(update_next_balance)) |
| 5103 | rq->next_balance = next_balance; |
| 5104 | } |
| 5105 | |
Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 5106 | #ifdef CONFIG_NO_HZ |
| 5107 | /* |
| 5108 | * In CONFIG_NO_HZ case, the idle balance kickee will do the |
| 5109 | * rebalancing for all the cpus for whom scheduler ticks are stopped. |
| 5110 | */ |
| 5111 | static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) |
| 5112 | { |
| 5113 | struct rq *this_rq = cpu_rq(this_cpu); |
| 5114 | struct rq *rq; |
| 5115 | int balance_cpu; |
| 5116 | |
Suresh Siddha | 1c792db | 2011-12-01 17:07:32 -0800 | [diff] [blame] | 5117 | if (idle != CPU_IDLE || |
| 5118 | !test_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu))) |
| 5119 | goto end; |
Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 5120 | |
| 5121 | for_each_cpu(balance_cpu, nohz.idle_cpus_mask) { |
Suresh Siddha | 8a6d42d | 2011-12-06 11:19:37 -0800 | [diff] [blame] | 5122 | if (balance_cpu == this_cpu || !idle_cpu(balance_cpu)) |
Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 5123 | continue; |
| 5124 | |
| 5125 | /* |
| 5126 | * If this cpu gets work to do, stop the load balancing |
| 5127 | * work being done for other cpus. Next load |
| 5128 | * balancing owner will pick it up. |
| 5129 | */ |
Suresh Siddha | 1c792db | 2011-12-01 17:07:32 -0800 | [diff] [blame] | 5130 | if (need_resched()) |
Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 5131 | break; |
Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 5132 | |
Vincent Guittot | 5ed4f1d | 2012-09-13 06:11:26 +0200 | [diff] [blame] | 5133 | rq = cpu_rq(balance_cpu); |
| 5134 | |
| 5135 | raw_spin_lock_irq(&rq->lock); |
| 5136 | update_rq_clock(rq); |
| 5137 | update_idle_cpu_load(rq); |
| 5138 | raw_spin_unlock_irq(&rq->lock); |
Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 5139 | |
| 5140 | rebalance_domains(balance_cpu, CPU_IDLE); |
| 5141 | |
Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 5142 | if (time_after(this_rq->next_balance, rq->next_balance)) |
| 5143 | this_rq->next_balance = rq->next_balance; |
| 5144 | } |
| 5145 | nohz.next_balance = this_rq->next_balance; |
Suresh Siddha | 1c792db | 2011-12-01 17:07:32 -0800 | [diff] [blame] | 5146 | end: |
| 5147 | clear_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu)); |
Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 5148 | } |
| 5149 | |
| 5150 | /* |
Suresh Siddha | 0b005cf | 2011-12-01 17:07:34 -0800 | [diff] [blame] | 5151 | * Current heuristic for kicking the idle load balancer in the presence |
| 5152 | * of an idle cpu is the system. |
| 5153 | * - This rq has more than one task. |
| 5154 | * - At any scheduler domain level, this cpu's scheduler group has multiple |
| 5155 | * busy cpu's exceeding the group's power. |
| 5156 | * - For SD_ASYM_PACKING, if the lower numbered cpu's in the scheduler |
| 5157 | * domain span are idle. |
Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 5158 | */ |
| 5159 | static inline int nohz_kick_needed(struct rq *rq, int cpu) |
| 5160 | { |
| 5161 | unsigned long now = jiffies; |
Suresh Siddha | 0b005cf | 2011-12-01 17:07:34 -0800 | [diff] [blame] | 5162 | struct sched_domain *sd; |
Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 5163 | |
Suresh Siddha | 1c792db | 2011-12-01 17:07:32 -0800 | [diff] [blame] | 5164 | if (unlikely(idle_cpu(cpu))) |
Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 5165 | return 0; |
| 5166 | |
Suresh Siddha | 1c792db | 2011-12-01 17:07:32 -0800 | [diff] [blame] | 5167 | /* |
| 5168 | * We may be recently in ticked or tickless idle mode. At the first |
| 5169 | * busy tick after returning from idle, we will update the busy stats. |
| 5170 | */ |
Suresh Siddha | 69e1e81 | 2011-12-01 17:07:33 -0800 | [diff] [blame] | 5171 | set_cpu_sd_state_busy(); |
Alex Shi | c1cc017 | 2012-09-10 15:10:58 +0800 | [diff] [blame] | 5172 | nohz_balance_exit_idle(cpu); |
Suresh Siddha | 0b005cf | 2011-12-01 17:07:34 -0800 | [diff] [blame] | 5173 | |
| 5174 | /* |
| 5175 | * None are in tickless mode and hence no need for NOHZ idle load |
| 5176 | * balancing. |
| 5177 | */ |
| 5178 | if (likely(!atomic_read(&nohz.nr_cpus))) |
| 5179 | return 0; |
Suresh Siddha | 1c792db | 2011-12-01 17:07:32 -0800 | [diff] [blame] | 5180 | |
| 5181 | if (time_before(now, nohz.next_balance)) |
Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 5182 | return 0; |
| 5183 | |
Suresh Siddha | 0b005cf | 2011-12-01 17:07:34 -0800 | [diff] [blame] | 5184 | if (rq->nr_running >= 2) |
| 5185 | goto need_kick; |
Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 5186 | |
Peter Zijlstra | 067491b | 2011-12-07 14:32:08 +0100 | [diff] [blame] | 5187 | rcu_read_lock(); |
Suresh Siddha | 0b005cf | 2011-12-01 17:07:34 -0800 | [diff] [blame] | 5188 | for_each_domain(cpu, sd) { |
| 5189 | struct sched_group *sg = sd->groups; |
| 5190 | struct sched_group_power *sgp = sg->sgp; |
| 5191 | int nr_busy = atomic_read(&sgp->nr_busy_cpus); |
Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 5192 | |
Suresh Siddha | 0b005cf | 2011-12-01 17:07:34 -0800 | [diff] [blame] | 5193 | if (sd->flags & SD_SHARE_PKG_RESOURCES && nr_busy > 1) |
Peter Zijlstra | 067491b | 2011-12-07 14:32:08 +0100 | [diff] [blame] | 5194 | goto need_kick_unlock; |
Suresh Siddha | 0b005cf | 2011-12-01 17:07:34 -0800 | [diff] [blame] | 5195 | |
| 5196 | if (sd->flags & SD_ASYM_PACKING && nr_busy != sg->group_weight |
| 5197 | && (cpumask_first_and(nohz.idle_cpus_mask, |
| 5198 | sched_domain_span(sd)) < cpu)) |
Peter Zijlstra | 067491b | 2011-12-07 14:32:08 +0100 | [diff] [blame] | 5199 | goto need_kick_unlock; |
Suresh Siddha | 0b005cf | 2011-12-01 17:07:34 -0800 | [diff] [blame] | 5200 | |
| 5201 | if (!(sd->flags & (SD_SHARE_PKG_RESOURCES | SD_ASYM_PACKING))) |
| 5202 | break; |
Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 5203 | } |
Peter Zijlstra | 067491b | 2011-12-07 14:32:08 +0100 | [diff] [blame] | 5204 | rcu_read_unlock(); |
Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 5205 | return 0; |
Peter Zijlstra | 067491b | 2011-12-07 14:32:08 +0100 | [diff] [blame] | 5206 | |
| 5207 | need_kick_unlock: |
| 5208 | rcu_read_unlock(); |
Suresh Siddha | 0b005cf | 2011-12-01 17:07:34 -0800 | [diff] [blame] | 5209 | need_kick: |
| 5210 | return 1; |
Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 5211 | } |
| 5212 | #else |
| 5213 | static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { } |
| 5214 | #endif |
| 5215 | |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 5216 | /* |
| 5217 | * run_rebalance_domains is triggered when needed from the scheduler tick. |
Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 5218 | * Also triggered for nohz idle balancing (with nohz_balancing_kick set). |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 5219 | */ |
| 5220 | static void run_rebalance_domains(struct softirq_action *h) |
| 5221 | { |
| 5222 | int this_cpu = smp_processor_id(); |
| 5223 | struct rq *this_rq = cpu_rq(this_cpu); |
Suresh Siddha | 6eb57e0 | 2011-10-03 15:09:01 -0700 | [diff] [blame] | 5224 | enum cpu_idle_type idle = this_rq->idle_balance ? |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 5225 | CPU_IDLE : CPU_NOT_IDLE; |
| 5226 | |
| 5227 | rebalance_domains(this_cpu, idle); |
| 5228 | |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 5229 | /* |
Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 5230 | * If this cpu has a pending nohz_balance_kick, then do the |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 5231 | * balancing on behalf of the other idle cpus whose ticks are |
| 5232 | * stopped. |
| 5233 | */ |
Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 5234 | nohz_idle_balance(this_cpu, idle); |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 5235 | } |
| 5236 | |
| 5237 | static inline int on_null_domain(int cpu) |
| 5238 | { |
Paul E. McKenney | 90a6501 | 2010-02-28 08:32:18 -0800 | [diff] [blame] | 5239 | return !rcu_dereference_sched(cpu_rq(cpu)->sd); |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 5240 | } |
| 5241 | |
| 5242 | /* |
| 5243 | * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing. |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 5244 | */ |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 5245 | void trigger_load_balance(struct rq *rq, int cpu) |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 5246 | { |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 5247 | /* Don't need to rebalance while attached to NULL domain */ |
| 5248 | if (time_after_eq(jiffies, rq->next_balance) && |
| 5249 | likely(!on_null_domain(cpu))) |
| 5250 | raise_softirq(SCHED_SOFTIRQ); |
Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 5251 | #ifdef CONFIG_NO_HZ |
Suresh Siddha | 1c792db | 2011-12-01 17:07:32 -0800 | [diff] [blame] | 5252 | if (nohz_kick_needed(rq, cpu) && likely(!on_null_domain(cpu))) |
Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 5253 | nohz_balancer_kick(cpu); |
| 5254 | #endif |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 5255 | } |
| 5256 | |
Christian Ehrhardt | 0bcdcf2 | 2009-11-30 12:16:46 +0100 | [diff] [blame] | 5257 | static void rq_online_fair(struct rq *rq) |
| 5258 | { |
| 5259 | update_sysctl(); |
| 5260 | } |
| 5261 | |
| 5262 | static void rq_offline_fair(struct rq *rq) |
| 5263 | { |
| 5264 | update_sysctl(); |
Peter Boonstoppel | a4c96ae | 2012-08-09 15:34:47 -0700 | [diff] [blame] | 5265 | |
| 5266 | /* Ensure any throttled groups are reachable by pick_next_task */ |
| 5267 | unthrottle_offline_cfs_rqs(rq); |
Christian Ehrhardt | 0bcdcf2 | 2009-11-30 12:16:46 +0100 | [diff] [blame] | 5268 | } |
| 5269 | |
Dhaval Giani | 55e12e5 | 2008-06-24 23:39:43 +0530 | [diff] [blame] | 5270 | #endif /* CONFIG_SMP */ |
Peter Williams | e1d1484 | 2007-10-24 18:23:51 +0200 | [diff] [blame] | 5271 | |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 5272 | /* |
| 5273 | * scheduler tick hitting a task of our scheduling class: |
| 5274 | */ |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 5275 | static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued) |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 5276 | { |
| 5277 | struct cfs_rq *cfs_rq; |
| 5278 | struct sched_entity *se = &curr->se; |
| 5279 | |
| 5280 | for_each_sched_entity(se) { |
| 5281 | cfs_rq = cfs_rq_of(se); |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 5282 | entity_tick(cfs_rq, se, queued); |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 5283 | } |
Ben Segall | 18bf280 | 2012-10-04 12:51:20 +0200 | [diff] [blame] | 5284 | |
| 5285 | update_rq_runnable_avg(rq, 1); |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 5286 | } |
| 5287 | |
| 5288 | /* |
Peter Zijlstra | cd29fe6 | 2009-11-27 17:32:46 +0100 | [diff] [blame] | 5289 | * called on fork with the child task as argument from the parent's context |
| 5290 | * - child not yet on the tasklist |
| 5291 | * - preemption disabled |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 5292 | */ |
Peter Zijlstra | cd29fe6 | 2009-11-27 17:32:46 +0100 | [diff] [blame] | 5293 | static void task_fork_fair(struct task_struct *p) |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 5294 | { |
Daisuke Nishimura | 4fc420c | 2011-12-15 14:36:55 +0900 | [diff] [blame] | 5295 | struct cfs_rq *cfs_rq; |
| 5296 | struct sched_entity *se = &p->se, *curr; |
Ingo Molnar | 00bf7bf | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 5297 | int this_cpu = smp_processor_id(); |
Peter Zijlstra | cd29fe6 | 2009-11-27 17:32:46 +0100 | [diff] [blame] | 5298 | struct rq *rq = this_rq(); |
| 5299 | unsigned long flags; |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 5300 | |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 5301 | raw_spin_lock_irqsave(&rq->lock, flags); |
Peter Zijlstra | cd29fe6 | 2009-11-27 17:32:46 +0100 | [diff] [blame] | 5302 | |
Peter Zijlstra | 861d034 | 2010-08-19 13:31:43 +0200 | [diff] [blame] | 5303 | update_rq_clock(rq); |
| 5304 | |
Daisuke Nishimura | 4fc420c | 2011-12-15 14:36:55 +0900 | [diff] [blame] | 5305 | cfs_rq = task_cfs_rq(current); |
| 5306 | curr = cfs_rq->curr; |
| 5307 | |
Paul E. McKenney | b0a0f66 | 2010-10-06 17:32:51 -0700 | [diff] [blame] | 5308 | if (unlikely(task_cpu(p) != this_cpu)) { |
| 5309 | rcu_read_lock(); |
Peter Zijlstra | cd29fe6 | 2009-11-27 17:32:46 +0100 | [diff] [blame] | 5310 | __set_task_cpu(p, this_cpu); |
Paul E. McKenney | b0a0f66 | 2010-10-06 17:32:51 -0700 | [diff] [blame] | 5311 | rcu_read_unlock(); |
| 5312 | } |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 5313 | |
Ting Yang | 7109c44 | 2007-08-28 12:53:24 +0200 | [diff] [blame] | 5314 | update_curr(cfs_rq); |
Peter Zijlstra | cd29fe6 | 2009-11-27 17:32:46 +0100 | [diff] [blame] | 5315 | |
Mike Galbraith | b5d9d73 | 2009-09-08 11:12:28 +0200 | [diff] [blame] | 5316 | if (curr) |
| 5317 | se->vruntime = curr->vruntime; |
Peter Zijlstra | aeb73b0 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 5318 | place_entity(cfs_rq, se, 1); |
Peter Zijlstra | 4d78e7b | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 5319 | |
Peter Zijlstra | cd29fe6 | 2009-11-27 17:32:46 +0100 | [diff] [blame] | 5320 | if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) { |
Dmitry Adamushko | 87fefa3 | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 5321 | /* |
Ingo Molnar | edcb60a | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 5322 | * Upon rescheduling, sched_class::put_prev_task() will place |
| 5323 | * 'current' within the tree based on its new key value. |
| 5324 | */ |
Peter Zijlstra | 4d78e7b | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 5325 | swap(curr->vruntime, se->vruntime); |
Bharata B Rao | aec0a51 | 2008-08-28 14:42:49 +0530 | [diff] [blame] | 5326 | resched_task(rq->curr); |
Peter Zijlstra | 4d78e7b | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 5327 | } |
| 5328 | |
Peter Zijlstra | 88ec22d | 2009-12-16 18:04:41 +0100 | [diff] [blame] | 5329 | se->vruntime -= cfs_rq->min_vruntime; |
| 5330 | |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 5331 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 5332 | } |
| 5333 | |
Steven Rostedt | cb46984 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 5334 | /* |
| 5335 | * Priority of the task has changed. Check to see if we preempt |
| 5336 | * the current task. |
| 5337 | */ |
Peter Zijlstra | da7a735 | 2011-01-17 17:03:27 +0100 | [diff] [blame] | 5338 | static void |
| 5339 | prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio) |
Steven Rostedt | cb46984 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 5340 | { |
Peter Zijlstra | da7a735 | 2011-01-17 17:03:27 +0100 | [diff] [blame] | 5341 | if (!p->se.on_rq) |
| 5342 | return; |
| 5343 | |
Steven Rostedt | cb46984 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 5344 | /* |
| 5345 | * Reschedule if we are currently running on this runqueue and |
| 5346 | * our priority decreased, or if we are not currently running on |
| 5347 | * this runqueue and our priority is higher than the current's |
| 5348 | */ |
Peter Zijlstra | da7a735 | 2011-01-17 17:03:27 +0100 | [diff] [blame] | 5349 | if (rq->curr == p) { |
Steven Rostedt | cb46984 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 5350 | if (p->prio > oldprio) |
| 5351 | resched_task(rq->curr); |
| 5352 | } else |
Peter Zijlstra | 15afe09 | 2008-09-20 23:38:02 +0200 | [diff] [blame] | 5353 | check_preempt_curr(rq, p, 0); |
Steven Rostedt | cb46984 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 5354 | } |
| 5355 | |
Peter Zijlstra | da7a735 | 2011-01-17 17:03:27 +0100 | [diff] [blame] | 5356 | static void switched_from_fair(struct rq *rq, struct task_struct *p) |
| 5357 | { |
| 5358 | struct sched_entity *se = &p->se; |
| 5359 | struct cfs_rq *cfs_rq = cfs_rq_of(se); |
| 5360 | |
| 5361 | /* |
| 5362 | * Ensure the task's vruntime is normalized, so that when its |
| 5363 | * switched back to the fair class the enqueue_entity(.flags=0) will |
| 5364 | * do the right thing. |
| 5365 | * |
| 5366 | * If it was on_rq, then the dequeue_entity(.flags=0) will already |
| 5367 | * have normalized the vruntime, if it was !on_rq, then only when |
| 5368 | * the task is sleeping will it still have non-normalized vruntime. |
| 5369 | */ |
| 5370 | if (!se->on_rq && p->state != TASK_RUNNING) { |
| 5371 | /* |
| 5372 | * Fix up our vruntime so that the current sleep doesn't |
| 5373 | * cause 'unlimited' sleep bonus. |
| 5374 | */ |
| 5375 | place_entity(cfs_rq, se, 0); |
| 5376 | se->vruntime -= cfs_rq->min_vruntime; |
| 5377 | } |
Paul Turner | 9ee474f | 2012-10-04 13:18:30 +0200 | [diff] [blame] | 5378 | |
| 5379 | #if defined(CONFIG_FAIR_GROUP_SCHED) && defined(CONFIG_SMP) |
| 5380 | /* |
| 5381 | * Remove our load from contribution when we leave sched_fair |
| 5382 | * and ensure we don't carry in an old decay_count if we |
| 5383 | * switch back. |
| 5384 | */ |
| 5385 | if (p->se.avg.decay_count) { |
| 5386 | struct cfs_rq *cfs_rq = cfs_rq_of(&p->se); |
| 5387 | __synchronize_entity_decay(&p->se); |
| 5388 | subtract_blocked_load_contrib(cfs_rq, |
| 5389 | p->se.avg.load_avg_contrib); |
| 5390 | } |
| 5391 | #endif |
Peter Zijlstra | da7a735 | 2011-01-17 17:03:27 +0100 | [diff] [blame] | 5392 | } |
| 5393 | |
Steven Rostedt | cb46984 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 5394 | /* |
| 5395 | * We switched to the sched_fair class. |
| 5396 | */ |
Peter Zijlstra | da7a735 | 2011-01-17 17:03:27 +0100 | [diff] [blame] | 5397 | static void switched_to_fair(struct rq *rq, struct task_struct *p) |
Steven Rostedt | cb46984 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 5398 | { |
Peter Zijlstra | da7a735 | 2011-01-17 17:03:27 +0100 | [diff] [blame] | 5399 | if (!p->se.on_rq) |
| 5400 | return; |
| 5401 | |
Steven Rostedt | cb46984 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 5402 | /* |
| 5403 | * We were most likely switched from sched_rt, so |
| 5404 | * kick off the schedule if running, otherwise just see |
| 5405 | * if we can still preempt the current task. |
| 5406 | */ |
Peter Zijlstra | da7a735 | 2011-01-17 17:03:27 +0100 | [diff] [blame] | 5407 | if (rq->curr == p) |
Steven Rostedt | cb46984 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 5408 | resched_task(rq->curr); |
| 5409 | else |
Peter Zijlstra | 15afe09 | 2008-09-20 23:38:02 +0200 | [diff] [blame] | 5410 | check_preempt_curr(rq, p, 0); |
Steven Rostedt | cb46984 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 5411 | } |
| 5412 | |
Srivatsa Vaddagiri | 83b699e | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 5413 | /* Account for a task changing its policy or group. |
| 5414 | * |
| 5415 | * This routine is mostly called to set cfs_rq->curr field when a task |
| 5416 | * migrates between groups/classes. |
| 5417 | */ |
| 5418 | static void set_curr_task_fair(struct rq *rq) |
| 5419 | { |
| 5420 | struct sched_entity *se = &rq->curr->se; |
| 5421 | |
Paul Turner | ec12cb7 | 2011-07-21 09:43:30 -0700 | [diff] [blame] | 5422 | for_each_sched_entity(se) { |
| 5423 | struct cfs_rq *cfs_rq = cfs_rq_of(se); |
| 5424 | |
| 5425 | set_next_entity(cfs_rq, se); |
| 5426 | /* ensure bandwidth has been allocated on our new cfs_rq */ |
| 5427 | account_cfs_rq_runtime(cfs_rq, 0); |
| 5428 | } |
Srivatsa Vaddagiri | 83b699e | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 5429 | } |
| 5430 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 5431 | void init_cfs_rq(struct cfs_rq *cfs_rq) |
| 5432 | { |
| 5433 | cfs_rq->tasks_timeline = RB_ROOT; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 5434 | cfs_rq->min_vruntime = (u64)(-(1LL << 20)); |
| 5435 | #ifndef CONFIG_64BIT |
| 5436 | cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime; |
| 5437 | #endif |
Paul Turner | 9ee474f | 2012-10-04 13:18:30 +0200 | [diff] [blame] | 5438 | #if defined(CONFIG_FAIR_GROUP_SCHED) && defined(CONFIG_SMP) |
| 5439 | atomic64_set(&cfs_rq->decay_counter, 1); |
Paul Turner | aff3e49 | 2012-10-04 13:18:30 +0200 | [diff] [blame^] | 5440 | atomic64_set(&cfs_rq->removed_load, 0); |
Paul Turner | 9ee474f | 2012-10-04 13:18:30 +0200 | [diff] [blame] | 5441 | #endif |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 5442 | } |
| 5443 | |
Peter Zijlstra | 810b381 | 2008-02-29 15:21:01 -0500 | [diff] [blame] | 5444 | #ifdef CONFIG_FAIR_GROUP_SCHED |
Peter Zijlstra | b2b5ce0 | 2010-10-15 15:24:15 +0200 | [diff] [blame] | 5445 | static void task_move_group_fair(struct task_struct *p, int on_rq) |
Peter Zijlstra | 810b381 | 2008-02-29 15:21:01 -0500 | [diff] [blame] | 5446 | { |
Paul Turner | aff3e49 | 2012-10-04 13:18:30 +0200 | [diff] [blame^] | 5447 | struct cfs_rq *cfs_rq; |
Peter Zijlstra | b2b5ce0 | 2010-10-15 15:24:15 +0200 | [diff] [blame] | 5448 | /* |
| 5449 | * If the task was not on the rq at the time of this cgroup movement |
| 5450 | * it must have been asleep, sleeping tasks keep their ->vruntime |
| 5451 | * absolute on their old rq until wakeup (needed for the fair sleeper |
| 5452 | * bonus in place_entity()). |
| 5453 | * |
| 5454 | * If it was on the rq, we've just 'preempted' it, which does convert |
| 5455 | * ->vruntime to a relative base. |
| 5456 | * |
| 5457 | * Make sure both cases convert their relative position when migrating |
| 5458 | * to another cgroup's rq. This does somewhat interfere with the |
| 5459 | * fair sleeper stuff for the first placement, but who cares. |
| 5460 | */ |
Daisuke Nishimura | 7ceff01 | 2011-12-15 14:36:07 +0900 | [diff] [blame] | 5461 | /* |
| 5462 | * When !on_rq, vruntime of the task has usually NOT been normalized. |
| 5463 | * But there are some cases where it has already been normalized: |
| 5464 | * |
| 5465 | * - Moving a forked child which is waiting for being woken up by |
| 5466 | * wake_up_new_task(). |
Daisuke Nishimura | 62af378 | 2011-12-15 14:37:41 +0900 | [diff] [blame] | 5467 | * - Moving a task which has been woken up by try_to_wake_up() and |
| 5468 | * waiting for actually being woken up by sched_ttwu_pending(). |
Daisuke Nishimura | 7ceff01 | 2011-12-15 14:36:07 +0900 | [diff] [blame] | 5469 | * |
| 5470 | * To prevent boost or penalty in the new cfs_rq caused by delta |
| 5471 | * min_vruntime between the two cfs_rqs, we skip vruntime adjustment. |
| 5472 | */ |
Daisuke Nishimura | 62af378 | 2011-12-15 14:37:41 +0900 | [diff] [blame] | 5473 | if (!on_rq && (!p->se.sum_exec_runtime || p->state == TASK_WAKING)) |
Daisuke Nishimura | 7ceff01 | 2011-12-15 14:36:07 +0900 | [diff] [blame] | 5474 | on_rq = 1; |
| 5475 | |
Peter Zijlstra | 88ec22d | 2009-12-16 18:04:41 +0100 | [diff] [blame] | 5476 | if (!on_rq) |
Peter Zijlstra | b2b5ce0 | 2010-10-15 15:24:15 +0200 | [diff] [blame] | 5477 | p->se.vruntime -= cfs_rq_of(&p->se)->min_vruntime; |
| 5478 | set_task_rq(p, task_cpu(p)); |
Paul Turner | aff3e49 | 2012-10-04 13:18:30 +0200 | [diff] [blame^] | 5479 | if (!on_rq) { |
| 5480 | cfs_rq = cfs_rq_of(&p->se); |
| 5481 | p->se.vruntime += cfs_rq->min_vruntime; |
| 5482 | #ifdef CONFIG_SMP |
| 5483 | /* |
| 5484 | * migrate_task_rq_fair() will have removed our previous |
| 5485 | * contribution, but we must synchronize for ongoing future |
| 5486 | * decay. |
| 5487 | */ |
| 5488 | p->se.avg.decay_count = atomic64_read(&cfs_rq->decay_counter); |
| 5489 | cfs_rq->blocked_load_avg += p->se.avg.load_avg_contrib; |
| 5490 | #endif |
| 5491 | } |
Peter Zijlstra | 810b381 | 2008-02-29 15:21:01 -0500 | [diff] [blame] | 5492 | } |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 5493 | |
| 5494 | void free_fair_sched_group(struct task_group *tg) |
| 5495 | { |
| 5496 | int i; |
| 5497 | |
| 5498 | destroy_cfs_bandwidth(tg_cfs_bandwidth(tg)); |
| 5499 | |
| 5500 | for_each_possible_cpu(i) { |
| 5501 | if (tg->cfs_rq) |
| 5502 | kfree(tg->cfs_rq[i]); |
| 5503 | if (tg->se) |
| 5504 | kfree(tg->se[i]); |
| 5505 | } |
| 5506 | |
| 5507 | kfree(tg->cfs_rq); |
| 5508 | kfree(tg->se); |
| 5509 | } |
| 5510 | |
| 5511 | int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) |
| 5512 | { |
| 5513 | struct cfs_rq *cfs_rq; |
| 5514 | struct sched_entity *se; |
| 5515 | int i; |
| 5516 | |
| 5517 | tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL); |
| 5518 | if (!tg->cfs_rq) |
| 5519 | goto err; |
| 5520 | tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL); |
| 5521 | if (!tg->se) |
| 5522 | goto err; |
| 5523 | |
| 5524 | tg->shares = NICE_0_LOAD; |
| 5525 | |
| 5526 | init_cfs_bandwidth(tg_cfs_bandwidth(tg)); |
| 5527 | |
| 5528 | for_each_possible_cpu(i) { |
| 5529 | cfs_rq = kzalloc_node(sizeof(struct cfs_rq), |
| 5530 | GFP_KERNEL, cpu_to_node(i)); |
| 5531 | if (!cfs_rq) |
| 5532 | goto err; |
| 5533 | |
| 5534 | se = kzalloc_node(sizeof(struct sched_entity), |
| 5535 | GFP_KERNEL, cpu_to_node(i)); |
| 5536 | if (!se) |
| 5537 | goto err_free_rq; |
| 5538 | |
| 5539 | init_cfs_rq(cfs_rq); |
| 5540 | init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]); |
| 5541 | } |
| 5542 | |
| 5543 | return 1; |
| 5544 | |
| 5545 | err_free_rq: |
| 5546 | kfree(cfs_rq); |
| 5547 | err: |
| 5548 | return 0; |
| 5549 | } |
| 5550 | |
| 5551 | void unregister_fair_sched_group(struct task_group *tg, int cpu) |
| 5552 | { |
| 5553 | struct rq *rq = cpu_rq(cpu); |
| 5554 | unsigned long flags; |
| 5555 | |
| 5556 | /* |
| 5557 | * Only empty task groups can be destroyed; so we can speculatively |
| 5558 | * check on_list without danger of it being re-added. |
| 5559 | */ |
| 5560 | if (!tg->cfs_rq[cpu]->on_list) |
| 5561 | return; |
| 5562 | |
| 5563 | raw_spin_lock_irqsave(&rq->lock, flags); |
| 5564 | list_del_leaf_cfs_rq(tg->cfs_rq[cpu]); |
| 5565 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
| 5566 | } |
| 5567 | |
| 5568 | void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, |
| 5569 | struct sched_entity *se, int cpu, |
| 5570 | struct sched_entity *parent) |
| 5571 | { |
| 5572 | struct rq *rq = cpu_rq(cpu); |
| 5573 | |
| 5574 | cfs_rq->tg = tg; |
| 5575 | cfs_rq->rq = rq; |
| 5576 | #ifdef CONFIG_SMP |
| 5577 | /* allow initial update_cfs_load() to truncate */ |
| 5578 | cfs_rq->load_stamp = 1; |
Peter Zijlstra | 810b381 | 2008-02-29 15:21:01 -0500 | [diff] [blame] | 5579 | #endif |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 5580 | init_cfs_rq_runtime(cfs_rq); |
| 5581 | |
| 5582 | tg->cfs_rq[cpu] = cfs_rq; |
| 5583 | tg->se[cpu] = se; |
| 5584 | |
| 5585 | /* se could be NULL for root_task_group */ |
| 5586 | if (!se) |
| 5587 | return; |
| 5588 | |
| 5589 | if (!parent) |
| 5590 | se->cfs_rq = &rq->cfs; |
| 5591 | else |
| 5592 | se->cfs_rq = parent->my_q; |
| 5593 | |
| 5594 | se->my_q = cfs_rq; |
| 5595 | update_load_set(&se->load, 0); |
| 5596 | se->parent = parent; |
| 5597 | } |
| 5598 | |
| 5599 | static DEFINE_MUTEX(shares_mutex); |
| 5600 | |
| 5601 | int sched_group_set_shares(struct task_group *tg, unsigned long shares) |
| 5602 | { |
| 5603 | int i; |
| 5604 | unsigned long flags; |
| 5605 | |
| 5606 | /* |
| 5607 | * We can't change the weight of the root cgroup. |
| 5608 | */ |
| 5609 | if (!tg->se[0]) |
| 5610 | return -EINVAL; |
| 5611 | |
| 5612 | shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES)); |
| 5613 | |
| 5614 | mutex_lock(&shares_mutex); |
| 5615 | if (tg->shares == shares) |
| 5616 | goto done; |
| 5617 | |
| 5618 | tg->shares = shares; |
| 5619 | for_each_possible_cpu(i) { |
| 5620 | struct rq *rq = cpu_rq(i); |
| 5621 | struct sched_entity *se; |
| 5622 | |
| 5623 | se = tg->se[i]; |
| 5624 | /* Propagate contribution to hierarchy */ |
| 5625 | raw_spin_lock_irqsave(&rq->lock, flags); |
| 5626 | for_each_sched_entity(se) |
| 5627 | update_cfs_shares(group_cfs_rq(se)); |
| 5628 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
| 5629 | } |
| 5630 | |
| 5631 | done: |
| 5632 | mutex_unlock(&shares_mutex); |
| 5633 | return 0; |
| 5634 | } |
| 5635 | #else /* CONFIG_FAIR_GROUP_SCHED */ |
| 5636 | |
| 5637 | void free_fair_sched_group(struct task_group *tg) { } |
| 5638 | |
| 5639 | int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) |
| 5640 | { |
| 5641 | return 1; |
| 5642 | } |
| 5643 | |
| 5644 | void unregister_fair_sched_group(struct task_group *tg, int cpu) { } |
| 5645 | |
| 5646 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
| 5647 | |
Peter Zijlstra | 810b381 | 2008-02-29 15:21:01 -0500 | [diff] [blame] | 5648 | |
H Hartley Sweeten | 6d686f4 | 2010-01-13 20:21:52 -0700 | [diff] [blame] | 5649 | static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task) |
Peter Williams | 0d721ce | 2009-09-21 01:31:53 +0000 | [diff] [blame] | 5650 | { |
| 5651 | struct sched_entity *se = &task->se; |
Peter Williams | 0d721ce | 2009-09-21 01:31:53 +0000 | [diff] [blame] | 5652 | unsigned int rr_interval = 0; |
| 5653 | |
| 5654 | /* |
| 5655 | * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise |
| 5656 | * idle runqueue: |
| 5657 | */ |
Peter Williams | 0d721ce | 2009-09-21 01:31:53 +0000 | [diff] [blame] | 5658 | if (rq->cfs.load.weight) |
| 5659 | rr_interval = NS_TO_JIFFIES(sched_slice(&rq->cfs, se)); |
Peter Williams | 0d721ce | 2009-09-21 01:31:53 +0000 | [diff] [blame] | 5660 | |
| 5661 | return rr_interval; |
| 5662 | } |
| 5663 | |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 5664 | /* |
| 5665 | * All the scheduling class methods: |
| 5666 | */ |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 5667 | const struct sched_class fair_sched_class = { |
Ingo Molnar | 5522d5d | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 5668 | .next = &idle_sched_class, |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 5669 | .enqueue_task = enqueue_task_fair, |
| 5670 | .dequeue_task = dequeue_task_fair, |
| 5671 | .yield_task = yield_task_fair, |
Mike Galbraith | d95f412 | 2011-02-01 09:50:51 -0500 | [diff] [blame] | 5672 | .yield_to_task = yield_to_task_fair, |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 5673 | |
Ingo Molnar | 2e09bf5 | 2007-10-15 17:00:05 +0200 | [diff] [blame] | 5674 | .check_preempt_curr = check_preempt_wakeup, |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 5675 | |
| 5676 | .pick_next_task = pick_next_task_fair, |
| 5677 | .put_prev_task = put_prev_task_fair, |
| 5678 | |
Peter Williams | 681f3e6 | 2007-10-24 18:23:51 +0200 | [diff] [blame] | 5679 | #ifdef CONFIG_SMP |
Li Zefan | 4ce72a2 | 2008-10-22 15:25:26 +0800 | [diff] [blame] | 5680 | .select_task_rq = select_task_rq_fair, |
Paul Turner | 0a74bef | 2012-10-04 13:18:30 +0200 | [diff] [blame] | 5681 | .migrate_task_rq = migrate_task_rq_fair, |
Li Zefan | 4ce72a2 | 2008-10-22 15:25:26 +0800 | [diff] [blame] | 5682 | |
Christian Ehrhardt | 0bcdcf2 | 2009-11-30 12:16:46 +0100 | [diff] [blame] | 5683 | .rq_online = rq_online_fair, |
| 5684 | .rq_offline = rq_offline_fair, |
Peter Zijlstra | 88ec22d | 2009-12-16 18:04:41 +0100 | [diff] [blame] | 5685 | |
| 5686 | .task_waking = task_waking_fair, |
Peter Williams | 681f3e6 | 2007-10-24 18:23:51 +0200 | [diff] [blame] | 5687 | #endif |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 5688 | |
Srivatsa Vaddagiri | 83b699e | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 5689 | .set_curr_task = set_curr_task_fair, |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 5690 | .task_tick = task_tick_fair, |
Peter Zijlstra | cd29fe6 | 2009-11-27 17:32:46 +0100 | [diff] [blame] | 5691 | .task_fork = task_fork_fair, |
Steven Rostedt | cb46984 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 5692 | |
| 5693 | .prio_changed = prio_changed_fair, |
Peter Zijlstra | da7a735 | 2011-01-17 17:03:27 +0100 | [diff] [blame] | 5694 | .switched_from = switched_from_fair, |
Steven Rostedt | cb46984 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 5695 | .switched_to = switched_to_fair, |
Peter Zijlstra | 810b381 | 2008-02-29 15:21:01 -0500 | [diff] [blame] | 5696 | |
Peter Williams | 0d721ce | 2009-09-21 01:31:53 +0000 | [diff] [blame] | 5697 | .get_rr_interval = get_rr_interval_fair, |
| 5698 | |
Peter Zijlstra | 810b381 | 2008-02-29 15:21:01 -0500 | [diff] [blame] | 5699 | #ifdef CONFIG_FAIR_GROUP_SCHED |
Peter Zijlstra | b2b5ce0 | 2010-10-15 15:24:15 +0200 | [diff] [blame] | 5700 | .task_move_group = task_move_group_fair, |
Peter Zijlstra | 810b381 | 2008-02-29 15:21:01 -0500 | [diff] [blame] | 5701 | #endif |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 5702 | }; |
| 5703 | |
| 5704 | #ifdef CONFIG_SCHED_DEBUG |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 5705 | void print_cfs_stats(struct seq_file *m, int cpu) |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 5706 | { |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 5707 | struct cfs_rq *cfs_rq; |
| 5708 | |
Peter Zijlstra | 5973e5b | 2008-01-25 21:08:34 +0100 | [diff] [blame] | 5709 | rcu_read_lock(); |
Ingo Molnar | c3b64f1 | 2007-08-09 11:16:51 +0200 | [diff] [blame] | 5710 | for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq) |
Ingo Molnar | 5cef9ec | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 5711 | print_cfs_rq(m, cpu, cfs_rq); |
Peter Zijlstra | 5973e5b | 2008-01-25 21:08:34 +0100 | [diff] [blame] | 5712 | rcu_read_unlock(); |
Ingo Molnar | bf0f6f2 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 5713 | } |
| 5714 | #endif |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 5715 | |
| 5716 | __init void init_sched_fair_class(void) |
| 5717 | { |
| 5718 | #ifdef CONFIG_SMP |
| 5719 | open_softirq(SCHED_SOFTIRQ, run_rebalance_domains); |
| 5720 | |
| 5721 | #ifdef CONFIG_NO_HZ |
Diwakar Tundlam | 554ceca | 2012-03-07 14:44:26 -0800 | [diff] [blame] | 5722 | nohz.next_balance = jiffies; |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 5723 | zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT); |
Suresh Siddha | 7132596 | 2012-01-19 18:28:57 -0800 | [diff] [blame] | 5724 | cpu_notifier(sched_ilb_notifier, 0); |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 5725 | #endif |
| 5726 | #endif /* SMP */ |
| 5727 | |
| 5728 | } |