Paul Gortmaker | 45ceebf | 2013-04-19 15:10:49 -0400 | [diff] [blame] | 1 | /* |
| 2 | * kernel/sched/proc.c |
| 3 | * |
| 4 | * Kernel load calculations, forked from sched/core.c |
| 5 | */ |
| 6 | |
| 7 | #include <linux/export.h> |
| 8 | |
| 9 | #include "sched.h" |
| 10 | |
Paul Gortmaker | 45ceebf | 2013-04-19 15:10:49 -0400 | [diff] [blame] | 11 | /* |
| 12 | * Global load-average calculations |
| 13 | * |
| 14 | * We take a distributed and async approach to calculating the global load-avg |
| 15 | * in order to minimize overhead. |
| 16 | * |
| 17 | * The global load average is an exponentially decaying average of nr_running + |
| 18 | * nr_uninterruptible. |
| 19 | * |
| 20 | * Once every LOAD_FREQ: |
| 21 | * |
| 22 | * nr_active = 0; |
| 23 | * for_each_possible_cpu(cpu) |
| 24 | * nr_active += cpu_of(cpu)->nr_running + cpu_of(cpu)->nr_uninterruptible; |
| 25 | * |
| 26 | * avenrun[n] = avenrun[0] * exp_n + nr_active * (1 - exp_n) |
| 27 | * |
| 28 | * Due to a number of reasons the above turns in the mess below: |
| 29 | * |
| 30 | * - for_each_possible_cpu() is prohibitively expensive on machines with |
| 31 | * serious number of cpus, therefore we need to take a distributed approach |
| 32 | * to calculating nr_active. |
| 33 | * |
| 34 | * \Sum_i x_i(t) = \Sum_i x_i(t) - x_i(t_0) | x_i(t_0) := 0 |
| 35 | * = \Sum_i { \Sum_j=1 x_i(t_j) - x_i(t_j-1) } |
| 36 | * |
| 37 | * So assuming nr_active := 0 when we start out -- true per definition, we |
| 38 | * can simply take per-cpu deltas and fold those into a global accumulate |
| 39 | * to obtain the same result. See calc_load_fold_active(). |
| 40 | * |
| 41 | * Furthermore, in order to avoid synchronizing all per-cpu delta folding |
| 42 | * across the machine, we assume 10 ticks is sufficient time for every |
| 43 | * cpu to have completed this task. |
| 44 | * |
| 45 | * This places an upper-bound on the IRQ-off latency of the machine. Then |
| 46 | * again, being late doesn't loose the delta, just wrecks the sample. |
| 47 | * |
| 48 | * - cpu_rq()->nr_uninterruptible isn't accurately tracked per-cpu because |
| 49 | * this would add another cross-cpu cacheline miss and atomic operation |
| 50 | * to the wakeup path. Instead we increment on whatever cpu the task ran |
| 51 | * when it went into uninterruptible state and decrement on whatever cpu |
| 52 | * did the wakeup. This means that only the sum of nr_uninterruptible over |
| 53 | * all cpus yields the correct result. |
| 54 | * |
| 55 | * This covers the NO_HZ=n code, for extra head-aches, see the comment below. |
| 56 | */ |
| 57 | |
| 58 | /* Variables and functions for calc_load */ |
| 59 | atomic_long_t calc_load_tasks; |
| 60 | unsigned long calc_load_update; |
| 61 | unsigned long avenrun[3]; |
| 62 | EXPORT_SYMBOL(avenrun); /* should be removed */ |
| 63 | |
| 64 | /** |
| 65 | * get_avenrun - get the load average array |
| 66 | * @loads: pointer to dest load array |
| 67 | * @offset: offset to add |
| 68 | * @shift: shift count to shift the result left |
| 69 | * |
| 70 | * These values are estimates at best, so no need for locking. |
| 71 | */ |
| 72 | void get_avenrun(unsigned long *loads, unsigned long offset, int shift) |
| 73 | { |
| 74 | loads[0] = (avenrun[0] + offset) << shift; |
| 75 | loads[1] = (avenrun[1] + offset) << shift; |
| 76 | loads[2] = (avenrun[2] + offset) << shift; |
| 77 | } |
| 78 | |
| 79 | long calc_load_fold_active(struct rq *this_rq) |
| 80 | { |
| 81 | long nr_active, delta = 0; |
| 82 | |
| 83 | nr_active = this_rq->nr_running; |
| 84 | nr_active += (long) this_rq->nr_uninterruptible; |
| 85 | |
| 86 | if (nr_active != this_rq->calc_load_active) { |
| 87 | delta = nr_active - this_rq->calc_load_active; |
| 88 | this_rq->calc_load_active = nr_active; |
| 89 | } |
| 90 | |
| 91 | return delta; |
| 92 | } |
| 93 | |
| 94 | /* |
| 95 | * a1 = a0 * e + a * (1 - e) |
| 96 | */ |
| 97 | static unsigned long |
| 98 | calc_load(unsigned long load, unsigned long exp, unsigned long active) |
| 99 | { |
| 100 | load *= exp; |
| 101 | load += active * (FIXED_1 - exp); |
| 102 | load += 1UL << (FSHIFT - 1); |
| 103 | return load >> FSHIFT; |
| 104 | } |
| 105 | |
| 106 | #ifdef CONFIG_NO_HZ_COMMON |
| 107 | /* |
| 108 | * Handle NO_HZ for the global load-average. |
| 109 | * |
| 110 | * Since the above described distributed algorithm to compute the global |
| 111 | * load-average relies on per-cpu sampling from the tick, it is affected by |
| 112 | * NO_HZ. |
| 113 | * |
| 114 | * The basic idea is to fold the nr_active delta into a global idle-delta upon |
| 115 | * entering NO_HZ state such that we can include this as an 'extra' cpu delta |
| 116 | * when we read the global state. |
| 117 | * |
| 118 | * Obviously reality has to ruin such a delightfully simple scheme: |
| 119 | * |
| 120 | * - When we go NO_HZ idle during the window, we can negate our sample |
| 121 | * contribution, causing under-accounting. |
| 122 | * |
| 123 | * We avoid this by keeping two idle-delta counters and flipping them |
| 124 | * when the window starts, thus separating old and new NO_HZ load. |
| 125 | * |
| 126 | * The only trick is the slight shift in index flip for read vs write. |
| 127 | * |
| 128 | * 0s 5s 10s 15s |
| 129 | * +10 +10 +10 +10 |
| 130 | * |-|-----------|-|-----------|-|-----------|-| |
| 131 | * r:0 0 1 1 0 0 1 1 0 |
| 132 | * w:0 1 1 0 0 1 1 0 0 |
| 133 | * |
| 134 | * This ensures we'll fold the old idle contribution in this window while |
| 135 | * accumlating the new one. |
| 136 | * |
| 137 | * - When we wake up from NO_HZ idle during the window, we push up our |
| 138 | * contribution, since we effectively move our sample point to a known |
| 139 | * busy state. |
| 140 | * |
| 141 | * This is solved by pushing the window forward, and thus skipping the |
| 142 | * sample, for this cpu (effectively using the idle-delta for this cpu which |
| 143 | * was in effect at the time the window opened). This also solves the issue |
| 144 | * of having to deal with a cpu having been in NOHZ idle for multiple |
| 145 | * LOAD_FREQ intervals. |
| 146 | * |
| 147 | * When making the ILB scale, we should try to pull this in as well. |
| 148 | */ |
| 149 | static atomic_long_t calc_load_idle[2]; |
| 150 | static int calc_load_idx; |
| 151 | |
| 152 | static inline int calc_load_write_idx(void) |
| 153 | { |
| 154 | int idx = calc_load_idx; |
| 155 | |
| 156 | /* |
| 157 | * See calc_global_nohz(), if we observe the new index, we also |
| 158 | * need to observe the new update time. |
| 159 | */ |
| 160 | smp_rmb(); |
| 161 | |
| 162 | /* |
| 163 | * If the folding window started, make sure we start writing in the |
| 164 | * next idle-delta. |
| 165 | */ |
| 166 | if (!time_before(jiffies, calc_load_update)) |
| 167 | idx++; |
| 168 | |
| 169 | return idx & 1; |
| 170 | } |
| 171 | |
| 172 | static inline int calc_load_read_idx(void) |
| 173 | { |
| 174 | return calc_load_idx & 1; |
| 175 | } |
| 176 | |
| 177 | void calc_load_enter_idle(void) |
| 178 | { |
| 179 | struct rq *this_rq = this_rq(); |
| 180 | long delta; |
| 181 | |
| 182 | /* |
| 183 | * We're going into NOHZ mode, if there's any pending delta, fold it |
| 184 | * into the pending idle delta. |
| 185 | */ |
| 186 | delta = calc_load_fold_active(this_rq); |
| 187 | if (delta) { |
| 188 | int idx = calc_load_write_idx(); |
| 189 | atomic_long_add(delta, &calc_load_idle[idx]); |
| 190 | } |
| 191 | } |
| 192 | |
| 193 | void calc_load_exit_idle(void) |
| 194 | { |
| 195 | struct rq *this_rq = this_rq(); |
| 196 | |
| 197 | /* |
| 198 | * If we're still before the sample window, we're done. |
| 199 | */ |
| 200 | if (time_before(jiffies, this_rq->calc_load_update)) |
| 201 | return; |
| 202 | |
| 203 | /* |
| 204 | * We woke inside or after the sample window, this means we're already |
| 205 | * accounted through the nohz accounting, so skip the entire deal and |
| 206 | * sync up for the next window. |
| 207 | */ |
| 208 | this_rq->calc_load_update = calc_load_update; |
| 209 | if (time_before(jiffies, this_rq->calc_load_update + 10)) |
| 210 | this_rq->calc_load_update += LOAD_FREQ; |
| 211 | } |
| 212 | |
| 213 | static long calc_load_fold_idle(void) |
| 214 | { |
| 215 | int idx = calc_load_read_idx(); |
| 216 | long delta = 0; |
| 217 | |
| 218 | if (atomic_long_read(&calc_load_idle[idx])) |
| 219 | delta = atomic_long_xchg(&calc_load_idle[idx], 0); |
| 220 | |
| 221 | return delta; |
| 222 | } |
| 223 | |
| 224 | /** |
| 225 | * fixed_power_int - compute: x^n, in O(log n) time |
| 226 | * |
| 227 | * @x: base of the power |
| 228 | * @frac_bits: fractional bits of @x |
| 229 | * @n: power to raise @x to. |
| 230 | * |
| 231 | * By exploiting the relation between the definition of the natural power |
| 232 | * function: x^n := x*x*...*x (x multiplied by itself for n times), and |
| 233 | * the binary encoding of numbers used by computers: n := \Sum n_i * 2^i, |
| 234 | * (where: n_i \elem {0, 1}, the binary vector representing n), |
| 235 | * we find: x^n := x^(\Sum n_i * 2^i) := \Prod x^(n_i * 2^i), which is |
| 236 | * of course trivially computable in O(log_2 n), the length of our binary |
| 237 | * vector. |
| 238 | */ |
| 239 | static unsigned long |
| 240 | fixed_power_int(unsigned long x, unsigned int frac_bits, unsigned int n) |
| 241 | { |
| 242 | unsigned long result = 1UL << frac_bits; |
| 243 | |
| 244 | if (n) for (;;) { |
| 245 | if (n & 1) { |
| 246 | result *= x; |
| 247 | result += 1UL << (frac_bits - 1); |
| 248 | result >>= frac_bits; |
| 249 | } |
| 250 | n >>= 1; |
| 251 | if (!n) |
| 252 | break; |
| 253 | x *= x; |
| 254 | x += 1UL << (frac_bits - 1); |
| 255 | x >>= frac_bits; |
| 256 | } |
| 257 | |
| 258 | return result; |
| 259 | } |
| 260 | |
| 261 | /* |
| 262 | * a1 = a0 * e + a * (1 - e) |
| 263 | * |
| 264 | * a2 = a1 * e + a * (1 - e) |
| 265 | * = (a0 * e + a * (1 - e)) * e + a * (1 - e) |
| 266 | * = a0 * e^2 + a * (1 - e) * (1 + e) |
| 267 | * |
| 268 | * a3 = a2 * e + a * (1 - e) |
| 269 | * = (a0 * e^2 + a * (1 - e) * (1 + e)) * e + a * (1 - e) |
| 270 | * = a0 * e^3 + a * (1 - e) * (1 + e + e^2) |
| 271 | * |
| 272 | * ... |
| 273 | * |
| 274 | * an = a0 * e^n + a * (1 - e) * (1 + e + ... + e^n-1) [1] |
| 275 | * = a0 * e^n + a * (1 - e) * (1 - e^n)/(1 - e) |
| 276 | * = a0 * e^n + a * (1 - e^n) |
| 277 | * |
| 278 | * [1] application of the geometric series: |
| 279 | * |
| 280 | * n 1 - x^(n+1) |
| 281 | * S_n := \Sum x^i = ------------- |
| 282 | * i=0 1 - x |
| 283 | */ |
| 284 | static unsigned long |
| 285 | calc_load_n(unsigned long load, unsigned long exp, |
| 286 | unsigned long active, unsigned int n) |
| 287 | { |
| 288 | |
| 289 | return calc_load(load, fixed_power_int(exp, FSHIFT, n), active); |
| 290 | } |
| 291 | |
| 292 | /* |
| 293 | * NO_HZ can leave us missing all per-cpu ticks calling |
| 294 | * calc_load_account_active(), but since an idle CPU folds its delta into |
| 295 | * calc_load_tasks_idle per calc_load_account_idle(), all we need to do is fold |
| 296 | * in the pending idle delta if our idle period crossed a load cycle boundary. |
| 297 | * |
| 298 | * Once we've updated the global active value, we need to apply the exponential |
| 299 | * weights adjusted to the number of cycles missed. |
| 300 | */ |
| 301 | static void calc_global_nohz(void) |
| 302 | { |
| 303 | long delta, active, n; |
| 304 | |
| 305 | if (!time_before(jiffies, calc_load_update + 10)) { |
| 306 | /* |
| 307 | * Catch-up, fold however many we are behind still |
| 308 | */ |
| 309 | delta = jiffies - calc_load_update - 10; |
| 310 | n = 1 + (delta / LOAD_FREQ); |
| 311 | |
| 312 | active = atomic_long_read(&calc_load_tasks); |
| 313 | active = active > 0 ? active * FIXED_1 : 0; |
| 314 | |
| 315 | avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n); |
| 316 | avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n); |
| 317 | avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n); |
| 318 | |
| 319 | calc_load_update += n * LOAD_FREQ; |
| 320 | } |
| 321 | |
| 322 | /* |
| 323 | * Flip the idle index... |
| 324 | * |
| 325 | * Make sure we first write the new time then flip the index, so that |
| 326 | * calc_load_write_idx() will see the new time when it reads the new |
| 327 | * index, this avoids a double flip messing things up. |
| 328 | */ |
| 329 | smp_wmb(); |
| 330 | calc_load_idx++; |
| 331 | } |
| 332 | #else /* !CONFIG_NO_HZ_COMMON */ |
| 333 | |
| 334 | static inline long calc_load_fold_idle(void) { return 0; } |
| 335 | static inline void calc_global_nohz(void) { } |
| 336 | |
| 337 | #endif /* CONFIG_NO_HZ_COMMON */ |
| 338 | |
| 339 | /* |
| 340 | * calc_load - update the avenrun load estimates 10 ticks after the |
| 341 | * CPUs have updated calc_load_tasks. |
| 342 | */ |
| 343 | void calc_global_load(unsigned long ticks) |
| 344 | { |
| 345 | long active, delta; |
| 346 | |
| 347 | if (time_before(jiffies, calc_load_update + 10)) |
| 348 | return; |
| 349 | |
| 350 | /* |
| 351 | * Fold the 'old' idle-delta to include all NO_HZ cpus. |
| 352 | */ |
| 353 | delta = calc_load_fold_idle(); |
| 354 | if (delta) |
| 355 | atomic_long_add(delta, &calc_load_tasks); |
| 356 | |
| 357 | active = atomic_long_read(&calc_load_tasks); |
| 358 | active = active > 0 ? active * FIXED_1 : 0; |
| 359 | |
| 360 | avenrun[0] = calc_load(avenrun[0], EXP_1, active); |
| 361 | avenrun[1] = calc_load(avenrun[1], EXP_5, active); |
| 362 | avenrun[2] = calc_load(avenrun[2], EXP_15, active); |
| 363 | |
| 364 | calc_load_update += LOAD_FREQ; |
| 365 | |
| 366 | /* |
| 367 | * In case we idled for multiple LOAD_FREQ intervals, catch up in bulk. |
| 368 | */ |
| 369 | calc_global_nohz(); |
| 370 | } |
| 371 | |
| 372 | /* |
| 373 | * Called from update_cpu_load() to periodically update this CPU's |
| 374 | * active count. |
| 375 | */ |
| 376 | static void calc_load_account_active(struct rq *this_rq) |
| 377 | { |
| 378 | long delta; |
| 379 | |
| 380 | if (time_before(jiffies, this_rq->calc_load_update)) |
| 381 | return; |
| 382 | |
| 383 | delta = calc_load_fold_active(this_rq); |
| 384 | if (delta) |
| 385 | atomic_long_add(delta, &calc_load_tasks); |
| 386 | |
| 387 | this_rq->calc_load_update += LOAD_FREQ; |
| 388 | } |
| 389 | |
| 390 | /* |
| 391 | * End of global load-average stuff |
| 392 | */ |
| 393 | |
| 394 | /* |
| 395 | * The exact cpuload at various idx values, calculated at every tick would be |
| 396 | * load = (2^idx - 1) / 2^idx * load + 1 / 2^idx * cur_load |
| 397 | * |
| 398 | * If a cpu misses updates for n-1 ticks (as it was idle) and update gets called |
| 399 | * on nth tick when cpu may be busy, then we have: |
| 400 | * load = ((2^idx - 1) / 2^idx)^(n-1) * load |
| 401 | * load = (2^idx - 1) / 2^idx) * load + 1 / 2^idx * cur_load |
| 402 | * |
| 403 | * decay_load_missed() below does efficient calculation of |
| 404 | * load = ((2^idx - 1) / 2^idx)^(n-1) * load |
| 405 | * avoiding 0..n-1 loop doing load = ((2^idx - 1) / 2^idx) * load |
| 406 | * |
| 407 | * The calculation is approximated on a 128 point scale. |
| 408 | * degrade_zero_ticks is the number of ticks after which load at any |
| 409 | * particular idx is approximated to be zero. |
| 410 | * degrade_factor is a precomputed table, a row for each load idx. |
| 411 | * Each column corresponds to degradation factor for a power of two ticks, |
| 412 | * based on 128 point scale. |
| 413 | * Example: |
| 414 | * row 2, col 3 (=12) says that the degradation at load idx 2 after |
| 415 | * 8 ticks is 12/128 (which is an approximation of exact factor 3^8/4^8). |
| 416 | * |
| 417 | * With this power of 2 load factors, we can degrade the load n times |
| 418 | * by looking at 1 bits in n and doing as many mult/shift instead of |
| 419 | * n mult/shifts needed by the exact degradation. |
| 420 | */ |
| 421 | #define DEGRADE_SHIFT 7 |
| 422 | static const unsigned char |
| 423 | degrade_zero_ticks[CPU_LOAD_IDX_MAX] = {0, 8, 32, 64, 128}; |
| 424 | static const unsigned char |
| 425 | degrade_factor[CPU_LOAD_IDX_MAX][DEGRADE_SHIFT + 1] = { |
| 426 | {0, 0, 0, 0, 0, 0, 0, 0}, |
| 427 | {64, 32, 8, 0, 0, 0, 0, 0}, |
| 428 | {96, 72, 40, 12, 1, 0, 0}, |
| 429 | {112, 98, 75, 43, 15, 1, 0}, |
| 430 | {120, 112, 98, 76, 45, 16, 2} }; |
| 431 | |
| 432 | /* |
| 433 | * Update cpu_load for any missed ticks, due to tickless idle. The backlog |
| 434 | * would be when CPU is idle and so we just decay the old load without |
| 435 | * adding any new load. |
| 436 | */ |
| 437 | static unsigned long |
| 438 | decay_load_missed(unsigned long load, unsigned long missed_updates, int idx) |
| 439 | { |
| 440 | int j = 0; |
| 441 | |
| 442 | if (!missed_updates) |
| 443 | return load; |
| 444 | |
| 445 | if (missed_updates >= degrade_zero_ticks[idx]) |
| 446 | return 0; |
| 447 | |
| 448 | if (idx == 1) |
| 449 | return load >> missed_updates; |
| 450 | |
| 451 | while (missed_updates) { |
| 452 | if (missed_updates % 2) |
| 453 | load = (load * degrade_factor[idx][j]) >> DEGRADE_SHIFT; |
| 454 | |
| 455 | missed_updates >>= 1; |
| 456 | j++; |
| 457 | } |
| 458 | return load; |
| 459 | } |
| 460 | |
| 461 | /* |
| 462 | * Update rq->cpu_load[] statistics. This function is usually called every |
| 463 | * scheduler tick (TICK_NSEC). With tickless idle this will not be called |
| 464 | * every tick. We fix it up based on jiffies. |
| 465 | */ |
| 466 | static void __update_cpu_load(struct rq *this_rq, unsigned long this_load, |
| 467 | unsigned long pending_updates) |
| 468 | { |
| 469 | int i, scale; |
| 470 | |
| 471 | this_rq->nr_load_updates++; |
| 472 | |
| 473 | /* Update our load: */ |
| 474 | this_rq->cpu_load[0] = this_load; /* Fasttrack for idx 0 */ |
| 475 | for (i = 1, scale = 2; i < CPU_LOAD_IDX_MAX; i++, scale += scale) { |
| 476 | unsigned long old_load, new_load; |
| 477 | |
| 478 | /* scale is effectively 1 << i now, and >> i divides by scale */ |
| 479 | |
| 480 | old_load = this_rq->cpu_load[i]; |
| 481 | old_load = decay_load_missed(old_load, pending_updates - 1, i); |
| 482 | new_load = this_load; |
| 483 | /* |
| 484 | * Round up the averaging division if load is increasing. This |
| 485 | * prevents us from getting stuck on 9 if the load is 10, for |
| 486 | * example. |
| 487 | */ |
| 488 | if (new_load > old_load) |
| 489 | new_load += scale - 1; |
| 490 | |
| 491 | this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i; |
| 492 | } |
| 493 | |
| 494 | sched_avg_update(this_rq); |
| 495 | } |
| 496 | |
Alex Shi | b92486c | 2013-06-20 10:18:50 +0800 | [diff] [blame] | 497 | #ifdef CONFIG_SMP |
Alex Shi | a9dc5d0 | 2013-06-20 10:18:57 +0800 | [diff] [blame] | 498 | static inline unsigned long get_rq_runnable_load(struct rq *rq) |
Alex Shi | b92486c | 2013-06-20 10:18:50 +0800 | [diff] [blame] | 499 | { |
| 500 | return rq->cfs.runnable_load_avg; |
| 501 | } |
| 502 | #else |
Alex Shi | a9dc5d0 | 2013-06-20 10:18:57 +0800 | [diff] [blame] | 503 | static inline unsigned long get_rq_runnable_load(struct rq *rq) |
Alex Shi | b92486c | 2013-06-20 10:18:50 +0800 | [diff] [blame] | 504 | { |
| 505 | return rq->load.weight; |
| 506 | } |
| 507 | #endif |
| 508 | |
Paul Gortmaker | 45ceebf | 2013-04-19 15:10:49 -0400 | [diff] [blame] | 509 | #ifdef CONFIG_NO_HZ_COMMON |
| 510 | /* |
| 511 | * There is no sane way to deal with nohz on smp when using jiffies because the |
| 512 | * cpu doing the jiffies update might drift wrt the cpu doing the jiffy reading |
| 513 | * causing off-by-one errors in observed deltas; {0,2} instead of {1,1}. |
| 514 | * |
| 515 | * Therefore we cannot use the delta approach from the regular tick since that |
| 516 | * would seriously skew the load calculation. However we'll make do for those |
| 517 | * updates happening while idle (nohz_idle_balance) or coming out of idle |
| 518 | * (tick_nohz_idle_exit). |
| 519 | * |
| 520 | * This means we might still be one tick off for nohz periods. |
| 521 | */ |
| 522 | |
| 523 | /* |
| 524 | * Called from nohz_idle_balance() to update the load ratings before doing the |
| 525 | * idle balance. |
| 526 | */ |
| 527 | void update_idle_cpu_load(struct rq *this_rq) |
| 528 | { |
| 529 | unsigned long curr_jiffies = ACCESS_ONCE(jiffies); |
Alex Shi | b92486c | 2013-06-20 10:18:50 +0800 | [diff] [blame] | 530 | unsigned long load = get_rq_runnable_load(this_rq); |
Paul Gortmaker | 45ceebf | 2013-04-19 15:10:49 -0400 | [diff] [blame] | 531 | unsigned long pending_updates; |
| 532 | |
| 533 | /* |
| 534 | * bail if there's load or we're actually up-to-date. |
| 535 | */ |
| 536 | if (load || curr_jiffies == this_rq->last_load_update_tick) |
| 537 | return; |
| 538 | |
| 539 | pending_updates = curr_jiffies - this_rq->last_load_update_tick; |
| 540 | this_rq->last_load_update_tick = curr_jiffies; |
| 541 | |
| 542 | __update_cpu_load(this_rq, load, pending_updates); |
| 543 | } |
| 544 | |
| 545 | /* |
| 546 | * Called from tick_nohz_idle_exit() -- try and fix up the ticks we missed. |
| 547 | */ |
| 548 | void update_cpu_load_nohz(void) |
| 549 | { |
| 550 | struct rq *this_rq = this_rq(); |
| 551 | unsigned long curr_jiffies = ACCESS_ONCE(jiffies); |
| 552 | unsigned long pending_updates; |
| 553 | |
| 554 | if (curr_jiffies == this_rq->last_load_update_tick) |
| 555 | return; |
| 556 | |
| 557 | raw_spin_lock(&this_rq->lock); |
| 558 | pending_updates = curr_jiffies - this_rq->last_load_update_tick; |
| 559 | if (pending_updates) { |
| 560 | this_rq->last_load_update_tick = curr_jiffies; |
| 561 | /* |
| 562 | * We were idle, this means load 0, the current load might be |
| 563 | * !0 due to remote wakeups and the sort. |
| 564 | */ |
| 565 | __update_cpu_load(this_rq, 0, pending_updates); |
| 566 | } |
| 567 | raw_spin_unlock(&this_rq->lock); |
| 568 | } |
| 569 | #endif /* CONFIG_NO_HZ */ |
| 570 | |
| 571 | /* |
| 572 | * Called from scheduler_tick() |
| 573 | */ |
| 574 | void update_cpu_load_active(struct rq *this_rq) |
| 575 | { |
Alex Shi | b92486c | 2013-06-20 10:18:50 +0800 | [diff] [blame] | 576 | unsigned long load = get_rq_runnable_load(this_rq); |
Paul Gortmaker | 45ceebf | 2013-04-19 15:10:49 -0400 | [diff] [blame] | 577 | /* |
| 578 | * See the mess around update_idle_cpu_load() / update_cpu_load_nohz(). |
| 579 | */ |
| 580 | this_rq->last_load_update_tick = jiffies; |
Alex Shi | b92486c | 2013-06-20 10:18:50 +0800 | [diff] [blame] | 581 | __update_cpu_load(this_rq, load, 1); |
Paul Gortmaker | 45ceebf | 2013-04-19 15:10:49 -0400 | [diff] [blame] | 582 | |
| 583 | calc_load_account_active(this_rq); |
| 584 | } |