Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 1 | #include <linux/export.h> |
| 2 | #include <linux/sched.h> |
| 3 | #include <linux/tsacct_kern.h> |
| 4 | #include <linux/kernel_stat.h> |
| 5 | #include <linux/static_key.h> |
Frederic Weisbecker | abf917c | 2012-07-25 07:56:04 +0200 | [diff] [blame] | 6 | #include <linux/context_tracking.h> |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 7 | #include "sched.h" |
Stefano Stabellini | 1fe7c4e | 2015-11-10 12:36:46 +0000 | [diff] [blame] | 8 | #ifdef CONFIG_PARAVIRT |
| 9 | #include <asm/paravirt.h> |
| 10 | #endif |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 11 | |
| 12 | |
| 13 | #ifdef CONFIG_IRQ_TIME_ACCOUNTING |
| 14 | |
| 15 | /* |
| 16 | * There are no locks covering percpu hardirq/softirq time. |
Frederic Weisbecker | bf9fae9 | 2012-09-08 15:23:11 +0200 | [diff] [blame] | 17 | * They are only modified in vtime_account, on corresponding CPU |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 18 | * with interrupts disabled. So, writes are safe. |
| 19 | * They are read and saved off onto struct rq in update_rq_clock(). |
| 20 | * This may result in other CPU reading this CPU's irq time and can |
Frederic Weisbecker | bf9fae9 | 2012-09-08 15:23:11 +0200 | [diff] [blame] | 21 | * race with irq/vtime_account on this CPU. We would either get old |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 22 | * or new value with a side effect of accounting a slice of irq time to wrong |
| 23 | * task when irq is in progress while we read rq->clock. That is a worthy |
| 24 | * compromise in place of having locks on each irq in account_system_time. |
| 25 | */ |
| 26 | DEFINE_PER_CPU(u64, cpu_hardirq_time); |
| 27 | DEFINE_PER_CPU(u64, cpu_softirq_time); |
| 28 | |
| 29 | static DEFINE_PER_CPU(u64, irq_start_time); |
| 30 | static int sched_clock_irqtime; |
| 31 | |
| 32 | void enable_sched_clock_irqtime(void) |
| 33 | { |
| 34 | sched_clock_irqtime = 1; |
| 35 | } |
| 36 | |
| 37 | void disable_sched_clock_irqtime(void) |
| 38 | { |
| 39 | sched_clock_irqtime = 0; |
| 40 | } |
| 41 | |
| 42 | #ifndef CONFIG_64BIT |
| 43 | DEFINE_PER_CPU(seqcount_t, irq_time_seq); |
| 44 | #endif /* CONFIG_64BIT */ |
| 45 | |
| 46 | /* |
| 47 | * Called before incrementing preempt_count on {soft,}irq_enter |
| 48 | * and before decrementing preempt_count on {soft,}irq_exit. |
| 49 | */ |
Frederic Weisbecker | 3e1df4f5 | 2012-10-06 05:23:22 +0200 | [diff] [blame] | 50 | void irqtime_account_irq(struct task_struct *curr) |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 51 | { |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 52 | s64 delta; |
| 53 | int cpu; |
| 54 | |
| 55 | if (!sched_clock_irqtime) |
| 56 | return; |
| 57 | |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 58 | cpu = smp_processor_id(); |
| 59 | delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time); |
| 60 | __this_cpu_add(irq_start_time, delta); |
| 61 | |
| 62 | irq_time_write_begin(); |
| 63 | /* |
| 64 | * We do not account for softirq time from ksoftirqd here. |
| 65 | * We want to continue accounting softirq time to ksoftirqd thread |
| 66 | * in that case, so as not to confuse scheduler with a special task |
| 67 | * that do not consume any time, but still wants to run. |
| 68 | */ |
| 69 | if (hardirq_count()) |
| 70 | __this_cpu_add(cpu_hardirq_time, delta); |
| 71 | else if (in_serving_softirq() && curr != this_cpu_ksoftirqd()) |
| 72 | __this_cpu_add(cpu_softirq_time, delta); |
| 73 | |
| 74 | irq_time_write_end(); |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 75 | } |
Frederic Weisbecker | 3e1df4f5 | 2012-10-06 05:23:22 +0200 | [diff] [blame] | 76 | EXPORT_SYMBOL_GPL(irqtime_account_irq); |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 77 | |
Rik van Riel | 5743021 | 2016-07-13 16:50:01 +0200 | [diff] [blame] | 78 | static cputime_t irqtime_account_hi_update(cputime_t maxtime) |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 79 | { |
| 80 | u64 *cpustat = kcpustat_this_cpu->cpustat; |
| 81 | unsigned long flags; |
Rik van Riel | 5743021 | 2016-07-13 16:50:01 +0200 | [diff] [blame] | 82 | cputime_t irq_cputime; |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 83 | |
| 84 | local_irq_save(flags); |
Rik van Riel | 5743021 | 2016-07-13 16:50:01 +0200 | [diff] [blame] | 85 | irq_cputime = nsecs_to_cputime64(this_cpu_read(cpu_hardirq_time)) - |
| 86 | cpustat[CPUTIME_IRQ]; |
| 87 | irq_cputime = min(irq_cputime, maxtime); |
| 88 | cpustat[CPUTIME_IRQ] += irq_cputime; |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 89 | local_irq_restore(flags); |
Rik van Riel | 5743021 | 2016-07-13 16:50:01 +0200 | [diff] [blame] | 90 | return irq_cputime; |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 91 | } |
| 92 | |
Rik van Riel | 5743021 | 2016-07-13 16:50:01 +0200 | [diff] [blame] | 93 | static cputime_t irqtime_account_si_update(cputime_t maxtime) |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 94 | { |
| 95 | u64 *cpustat = kcpustat_this_cpu->cpustat; |
| 96 | unsigned long flags; |
Rik van Riel | 5743021 | 2016-07-13 16:50:01 +0200 | [diff] [blame] | 97 | cputime_t softirq_cputime; |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 98 | |
| 99 | local_irq_save(flags); |
Rik van Riel | 5743021 | 2016-07-13 16:50:01 +0200 | [diff] [blame] | 100 | softirq_cputime = nsecs_to_cputime64(this_cpu_read(cpu_softirq_time)) - |
| 101 | cpustat[CPUTIME_SOFTIRQ]; |
| 102 | softirq_cputime = min(softirq_cputime, maxtime); |
| 103 | cpustat[CPUTIME_SOFTIRQ] += softirq_cputime; |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 104 | local_irq_restore(flags); |
Rik van Riel | 5743021 | 2016-07-13 16:50:01 +0200 | [diff] [blame] | 105 | return softirq_cputime; |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 106 | } |
| 107 | |
| 108 | #else /* CONFIG_IRQ_TIME_ACCOUNTING */ |
| 109 | |
| 110 | #define sched_clock_irqtime (0) |
| 111 | |
Rik van Riel | 5743021 | 2016-07-13 16:50:01 +0200 | [diff] [blame] | 112 | static cputime_t irqtime_account_hi_update(cputime_t dummy) |
| 113 | { |
| 114 | return 0; |
| 115 | } |
| 116 | |
| 117 | static cputime_t irqtime_account_si_update(cputime_t dummy) |
| 118 | { |
| 119 | return 0; |
| 120 | } |
| 121 | |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 122 | #endif /* !CONFIG_IRQ_TIME_ACCOUNTING */ |
| 123 | |
| 124 | static inline void task_group_account_field(struct task_struct *p, int index, |
| 125 | u64 tmp) |
| 126 | { |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 127 | /* |
| 128 | * Since all updates are sure to touch the root cgroup, we |
| 129 | * get ourselves ahead and touch it first. If the root cgroup |
| 130 | * is the only cgroup, then nothing else should be necessary. |
| 131 | * |
| 132 | */ |
Christoph Lameter | a4f61cc | 2013-08-07 15:38:24 +0000 | [diff] [blame] | 133 | __this_cpu_add(kernel_cpustat.cpustat[index], tmp); |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 134 | |
Li Zefan | 1966aaf | 2013-03-29 14:37:06 +0800 | [diff] [blame] | 135 | cpuacct_account_field(p, index, tmp); |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 136 | } |
| 137 | |
| 138 | /* |
| 139 | * Account user cpu time to a process. |
| 140 | * @p: the process that the cpu time gets accounted to |
| 141 | * @cputime: the cpu time spent in user space since the last update |
| 142 | * @cputime_scaled: cputime scaled by cpu frequency |
| 143 | */ |
| 144 | void account_user_time(struct task_struct *p, cputime_t cputime, |
| 145 | cputime_t cputime_scaled) |
| 146 | { |
| 147 | int index; |
| 148 | |
| 149 | /* Add user time to process. */ |
| 150 | p->utime += cputime; |
| 151 | p->utimescaled += cputime_scaled; |
| 152 | account_group_user_time(p, cputime); |
| 153 | |
Dongsheng Yang | d0ea026 | 2014-01-27 22:00:45 -0500 | [diff] [blame] | 154 | index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER; |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 155 | |
| 156 | /* Add user time to cpustat. */ |
| 157 | task_group_account_field(p, index, (__force u64) cputime); |
| 158 | |
| 159 | /* Account for user time used */ |
Frederic Weisbecker | 6fac482 | 2012-11-13 14:20:55 +0100 | [diff] [blame] | 160 | acct_account_cputime(p); |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 161 | } |
| 162 | |
| 163 | /* |
| 164 | * Account guest cpu time to a process. |
| 165 | * @p: the process that the cpu time gets accounted to |
| 166 | * @cputime: the cpu time spent in virtual machine since the last update |
| 167 | * @cputime_scaled: cputime scaled by cpu frequency |
| 168 | */ |
| 169 | static void account_guest_time(struct task_struct *p, cputime_t cputime, |
| 170 | cputime_t cputime_scaled) |
| 171 | { |
| 172 | u64 *cpustat = kcpustat_this_cpu->cpustat; |
| 173 | |
| 174 | /* Add guest time to process. */ |
| 175 | p->utime += cputime; |
| 176 | p->utimescaled += cputime_scaled; |
| 177 | account_group_user_time(p, cputime); |
| 178 | p->gtime += cputime; |
| 179 | |
| 180 | /* Add guest time to cpustat. */ |
Dongsheng Yang | d0ea026 | 2014-01-27 22:00:45 -0500 | [diff] [blame] | 181 | if (task_nice(p) > 0) { |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 182 | cpustat[CPUTIME_NICE] += (__force u64) cputime; |
| 183 | cpustat[CPUTIME_GUEST_NICE] += (__force u64) cputime; |
| 184 | } else { |
| 185 | cpustat[CPUTIME_USER] += (__force u64) cputime; |
| 186 | cpustat[CPUTIME_GUEST] += (__force u64) cputime; |
| 187 | } |
| 188 | } |
| 189 | |
| 190 | /* |
| 191 | * Account system cpu time to a process and desired cpustat field |
| 192 | * @p: the process that the cpu time gets accounted to |
| 193 | * @cputime: the cpu time spent in kernel space since the last update |
| 194 | * @cputime_scaled: cputime scaled by cpu frequency |
| 195 | * @target_cputime64: pointer to cpustat field that has to be updated |
| 196 | */ |
| 197 | static inline |
| 198 | void __account_system_time(struct task_struct *p, cputime_t cputime, |
| 199 | cputime_t cputime_scaled, int index) |
| 200 | { |
| 201 | /* Add system time to process. */ |
| 202 | p->stime += cputime; |
| 203 | p->stimescaled += cputime_scaled; |
| 204 | account_group_system_time(p, cputime); |
| 205 | |
| 206 | /* Add system time to cpustat. */ |
| 207 | task_group_account_field(p, index, (__force u64) cputime); |
| 208 | |
| 209 | /* Account for system time used */ |
Frederic Weisbecker | 6fac482 | 2012-11-13 14:20:55 +0100 | [diff] [blame] | 210 | acct_account_cputime(p); |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 211 | } |
| 212 | |
| 213 | /* |
| 214 | * Account system cpu time to a process. |
| 215 | * @p: the process that the cpu time gets accounted to |
| 216 | * @hardirq_offset: the offset to subtract from hardirq_count() |
| 217 | * @cputime: the cpu time spent in kernel space since the last update |
| 218 | * @cputime_scaled: cputime scaled by cpu frequency |
| 219 | */ |
| 220 | void account_system_time(struct task_struct *p, int hardirq_offset, |
| 221 | cputime_t cputime, cputime_t cputime_scaled) |
| 222 | { |
| 223 | int index; |
| 224 | |
| 225 | if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) { |
| 226 | account_guest_time(p, cputime, cputime_scaled); |
| 227 | return; |
| 228 | } |
| 229 | |
| 230 | if (hardirq_count() - hardirq_offset) |
| 231 | index = CPUTIME_IRQ; |
| 232 | else if (in_serving_softirq()) |
| 233 | index = CPUTIME_SOFTIRQ; |
| 234 | else |
| 235 | index = CPUTIME_SYSTEM; |
| 236 | |
| 237 | __account_system_time(p, cputime, cputime_scaled, index); |
| 238 | } |
| 239 | |
| 240 | /* |
| 241 | * Account for involuntary wait time. |
| 242 | * @cputime: the cpu time spent in involuntary wait |
| 243 | */ |
| 244 | void account_steal_time(cputime_t cputime) |
| 245 | { |
| 246 | u64 *cpustat = kcpustat_this_cpu->cpustat; |
| 247 | |
| 248 | cpustat[CPUTIME_STEAL] += (__force u64) cputime; |
| 249 | } |
| 250 | |
| 251 | /* |
| 252 | * Account for idle time. |
| 253 | * @cputime: the cpu time spent in idle wait |
| 254 | */ |
| 255 | void account_idle_time(cputime_t cputime) |
| 256 | { |
| 257 | u64 *cpustat = kcpustat_this_cpu->cpustat; |
| 258 | struct rq *rq = this_rq(); |
| 259 | |
| 260 | if (atomic_read(&rq->nr_iowait) > 0) |
| 261 | cpustat[CPUTIME_IOWAIT] += (__force u64) cputime; |
| 262 | else |
| 263 | cpustat[CPUTIME_IDLE] += (__force u64) cputime; |
| 264 | } |
| 265 | |
Rik van Riel | 5743021 | 2016-07-13 16:50:01 +0200 | [diff] [blame] | 266 | static __always_inline cputime_t steal_account_process_time(cputime_t maxtime) |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 267 | { |
| 268 | #ifdef CONFIG_PARAVIRT |
| 269 | if (static_key_false(¶virt_steal_enabled)) { |
Rik van Riel | 5743021 | 2016-07-13 16:50:01 +0200 | [diff] [blame] | 270 | cputime_t steal_cputime; |
Frederic Weisbecker | dee08a7 | 2014-03-05 17:02:22 +0100 | [diff] [blame] | 271 | u64 steal; |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 272 | |
| 273 | steal = paravirt_steal_clock(smp_processor_id()); |
| 274 | steal -= this_rq()->prev_steal_time; |
| 275 | |
Rik van Riel | 5743021 | 2016-07-13 16:50:01 +0200 | [diff] [blame] | 276 | steal_cputime = min(nsecs_to_cputime(steal), maxtime); |
| 277 | account_steal_time(steal_cputime); |
| 278 | this_rq()->prev_steal_time += cputime_to_nsecs(steal_cputime); |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 279 | |
Rik van Riel | 5743021 | 2016-07-13 16:50:01 +0200 | [diff] [blame] | 280 | return steal_cputime; |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 281 | } |
| 282 | #endif |
Wanpeng Li | 807e5b8 | 2016-06-13 18:32:46 +0800 | [diff] [blame] | 283 | return 0; |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 284 | } |
| 285 | |
Frederic Weisbecker | a634f93 | 2012-11-21 15:55:59 +0100 | [diff] [blame] | 286 | /* |
Rik van Riel | 5743021 | 2016-07-13 16:50:01 +0200 | [diff] [blame] | 287 | * Account how much elapsed time was spent in steal, irq, or softirq time. |
| 288 | */ |
| 289 | static inline cputime_t account_other_time(cputime_t max) |
| 290 | { |
| 291 | cputime_t accounted; |
| 292 | |
| 293 | accounted = steal_account_process_time(max); |
| 294 | |
| 295 | if (accounted < max) |
| 296 | accounted += irqtime_account_hi_update(max - accounted); |
| 297 | |
| 298 | if (accounted < max) |
| 299 | accounted += irqtime_account_si_update(max - accounted); |
| 300 | |
| 301 | return accounted; |
| 302 | } |
| 303 | |
| 304 | /* |
Frederic Weisbecker | a634f93 | 2012-11-21 15:55:59 +0100 | [diff] [blame] | 305 | * Accumulate raw cputime values of dead tasks (sig->[us]time) and live |
| 306 | * tasks (sum on group iteration) belonging to @tsk's group. |
| 307 | */ |
| 308 | void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times) |
| 309 | { |
| 310 | struct signal_struct *sig = tsk->signal; |
Frederic Weisbecker | 6fac482 | 2012-11-13 14:20:55 +0100 | [diff] [blame] | 311 | cputime_t utime, stime; |
Frederic Weisbecker | a634f93 | 2012-11-21 15:55:59 +0100 | [diff] [blame] | 312 | struct task_struct *t; |
Rik van Riel | e78c349 | 2014-08-16 13:40:10 -0400 | [diff] [blame] | 313 | unsigned int seq, nextseq; |
Rik van Riel | 9c368b5 | 2014-09-12 09:12:15 -0400 | [diff] [blame] | 314 | unsigned long flags; |
Frederic Weisbecker | a634f93 | 2012-11-21 15:55:59 +0100 | [diff] [blame] | 315 | |
| 316 | rcu_read_lock(); |
Rik van Riel | e78c349 | 2014-08-16 13:40:10 -0400 | [diff] [blame] | 317 | /* Attempt a lockless read on the first round. */ |
| 318 | nextseq = 0; |
| 319 | do { |
| 320 | seq = nextseq; |
Rik van Riel | 9c368b5 | 2014-09-12 09:12:15 -0400 | [diff] [blame] | 321 | flags = read_seqbegin_or_lock_irqsave(&sig->stats_lock, &seq); |
Rik van Riel | e78c349 | 2014-08-16 13:40:10 -0400 | [diff] [blame] | 322 | times->utime = sig->utime; |
| 323 | times->stime = sig->stime; |
| 324 | times->sum_exec_runtime = sig->sum_sched_runtime; |
| 325 | |
| 326 | for_each_thread(tsk, t) { |
| 327 | task_cputime(t, &utime, &stime); |
| 328 | times->utime += utime; |
| 329 | times->stime += stime; |
| 330 | times->sum_exec_runtime += task_sched_runtime(t); |
| 331 | } |
| 332 | /* If lockless access failed, take the lock. */ |
| 333 | nextseq = 1; |
| 334 | } while (need_seqretry(&sig->stats_lock, seq)); |
Rik van Riel | 9c368b5 | 2014-09-12 09:12:15 -0400 | [diff] [blame] | 335 | done_seqretry_irqrestore(&sig->stats_lock, seq, flags); |
Frederic Weisbecker | a634f93 | 2012-11-21 15:55:59 +0100 | [diff] [blame] | 336 | rcu_read_unlock(); |
| 337 | } |
| 338 | |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 339 | #ifdef CONFIG_IRQ_TIME_ACCOUNTING |
| 340 | /* |
| 341 | * Account a tick to a process and cpustat |
| 342 | * @p: the process that the cpu time gets accounted to |
| 343 | * @user_tick: is the tick from userspace |
| 344 | * @rq: the pointer to rq |
| 345 | * |
| 346 | * Tick demultiplexing follows the order |
| 347 | * - pending hardirq update |
| 348 | * - pending softirq update |
| 349 | * - user_time |
| 350 | * - idle_time |
| 351 | * - system time |
| 352 | * - check for guest_time |
| 353 | * - else account as system_time |
| 354 | * |
| 355 | * Check for hardirq is done both for system and user time as there is |
| 356 | * no timer going off while we are on hardirq and hence we may never get an |
| 357 | * opportunity to update it solely in system time. |
| 358 | * p->stime and friends are only updated on system time and not on irq |
| 359 | * softirq as those do not count in task exec_runtime any more. |
| 360 | */ |
| 361 | static void irqtime_account_process_tick(struct task_struct *p, int user_tick, |
Thomas Gleixner | 2d51386 | 2014-05-02 23:26:24 +0200 | [diff] [blame] | 362 | struct rq *rq, int ticks) |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 363 | { |
Rik van Riel | 5743021 | 2016-07-13 16:50:01 +0200 | [diff] [blame] | 364 | u64 cputime = (__force u64) cputime_one_jiffy * ticks; |
| 365 | cputime_t scaled, other; |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 366 | |
Rik van Riel | 5743021 | 2016-07-13 16:50:01 +0200 | [diff] [blame] | 367 | /* |
| 368 | * When returning from idle, many ticks can get accounted at |
| 369 | * once, including some ticks of steal, irq, and softirq time. |
| 370 | * Subtract those ticks from the amount of time accounted to |
| 371 | * idle, or potentially user or system time. Due to rounding, |
| 372 | * other time can exceed ticks occasionally. |
| 373 | */ |
| 374 | other = account_other_time(cputime); |
| 375 | if (other >= cputime) |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 376 | return; |
Rik van Riel | 5743021 | 2016-07-13 16:50:01 +0200 | [diff] [blame] | 377 | cputime -= other; |
| 378 | scaled = cputime_to_scaled(cputime); |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 379 | |
Rik van Riel | 5743021 | 2016-07-13 16:50:01 +0200 | [diff] [blame] | 380 | if (this_cpu_ksoftirqd() == p) { |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 381 | /* |
| 382 | * ksoftirqd time do not get accounted in cpu_softirq_time. |
| 383 | * So, we have to handle it separately here. |
| 384 | * Also, p->stime needs to be updated for ksoftirqd. |
| 385 | */ |
Thomas Gleixner | 2d51386 | 2014-05-02 23:26:24 +0200 | [diff] [blame] | 386 | __account_system_time(p, cputime, scaled, CPUTIME_SOFTIRQ); |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 387 | } else if (user_tick) { |
Thomas Gleixner | 2d51386 | 2014-05-02 23:26:24 +0200 | [diff] [blame] | 388 | account_user_time(p, cputime, scaled); |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 389 | } else if (p == rq->idle) { |
Thomas Gleixner | 2d51386 | 2014-05-02 23:26:24 +0200 | [diff] [blame] | 390 | account_idle_time(cputime); |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 391 | } else if (p->flags & PF_VCPU) { /* System time or guest time */ |
Thomas Gleixner | 2d51386 | 2014-05-02 23:26:24 +0200 | [diff] [blame] | 392 | account_guest_time(p, cputime, scaled); |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 393 | } else { |
Thomas Gleixner | 2d51386 | 2014-05-02 23:26:24 +0200 | [diff] [blame] | 394 | __account_system_time(p, cputime, scaled, CPUTIME_SYSTEM); |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 395 | } |
| 396 | } |
| 397 | |
| 398 | static void irqtime_account_idle_ticks(int ticks) |
| 399 | { |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 400 | struct rq *rq = this_rq(); |
| 401 | |
Thomas Gleixner | 2d51386 | 2014-05-02 23:26:24 +0200 | [diff] [blame] | 402 | irqtime_account_process_tick(current, 0, rq, ticks); |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 403 | } |
| 404 | #else /* CONFIG_IRQ_TIME_ACCOUNTING */ |
Frederic Weisbecker | 3f4724e | 2012-07-16 18:00:34 +0200 | [diff] [blame] | 405 | static inline void irqtime_account_idle_ticks(int ticks) {} |
| 406 | static inline void irqtime_account_process_tick(struct task_struct *p, int user_tick, |
Thomas Gleixner | 2d51386 | 2014-05-02 23:26:24 +0200 | [diff] [blame] | 407 | struct rq *rq, int nr_ticks) {} |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 408 | #endif /* CONFIG_IRQ_TIME_ACCOUNTING */ |
| 409 | |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 410 | /* |
| 411 | * Use precise platform statistics if available: |
| 412 | */ |
| 413 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
Frederic Weisbecker | a7e1a9e | 2012-09-08 16:14:02 +0200 | [diff] [blame] | 414 | |
Frederic Weisbecker | e3942ba | 2012-11-14 00:24:25 +0100 | [diff] [blame] | 415 | #ifndef __ARCH_HAS_VTIME_TASK_SWITCH |
Frederic Weisbecker | b049340 | 2013-07-12 03:10:15 +0200 | [diff] [blame] | 416 | void vtime_common_task_switch(struct task_struct *prev) |
Frederic Weisbecker | e3942ba | 2012-11-14 00:24:25 +0100 | [diff] [blame] | 417 | { |
| 418 | if (is_idle_task(prev)) |
| 419 | vtime_account_idle(prev); |
| 420 | else |
| 421 | vtime_account_system(prev); |
| 422 | |
Frederic Weisbecker | abf917c | 2012-07-25 07:56:04 +0200 | [diff] [blame] | 423 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
Frederic Weisbecker | e3942ba | 2012-11-14 00:24:25 +0100 | [diff] [blame] | 424 | vtime_account_user(prev); |
Frederic Weisbecker | abf917c | 2012-07-25 07:56:04 +0200 | [diff] [blame] | 425 | #endif |
Frederic Weisbecker | e3942ba | 2012-11-14 00:24:25 +0100 | [diff] [blame] | 426 | arch_vtime_task_switch(prev); |
| 427 | } |
| 428 | #endif |
Frederic Weisbecker | 1111333 | 2012-10-24 18:05:51 +0200 | [diff] [blame] | 429 | |
Frederic Weisbecker | 0cfdf9a | 2016-07-13 16:50:03 +0200 | [diff] [blame] | 430 | #endif /* CONFIG_VIRT_CPU_ACCOUNTING */ |
| 431 | |
| 432 | |
| 433 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
Frederic Weisbecker | a7e1a9e | 2012-09-08 16:14:02 +0200 | [diff] [blame] | 434 | /* |
| 435 | * Archs that account the whole time spent in the idle task |
| 436 | * (outside irq) as idle time can rely on this and just implement |
Frederic Weisbecker | fd25b4c | 2012-11-13 18:21:22 +0100 | [diff] [blame] | 437 | * vtime_account_system() and vtime_account_idle(). Archs that |
Frederic Weisbecker | a7e1a9e | 2012-09-08 16:14:02 +0200 | [diff] [blame] | 438 | * have other meaning of the idle time (s390 only includes the |
| 439 | * time spent by the CPU when it's in low power mode) must override |
| 440 | * vtime_account(). |
| 441 | */ |
| 442 | #ifndef __ARCH_HAS_VTIME_ACCOUNT |
Frederic Weisbecker | 0cfdf9a | 2016-07-13 16:50:03 +0200 | [diff] [blame] | 443 | void vtime_account_irq_enter(struct task_struct *tsk) |
Frederic Weisbecker | a7e1a9e | 2012-09-08 16:14:02 +0200 | [diff] [blame] | 444 | { |
Frederic Weisbecker | 0cfdf9a | 2016-07-13 16:50:03 +0200 | [diff] [blame] | 445 | if (!in_interrupt() && is_idle_task(tsk)) |
| 446 | vtime_account_idle(tsk); |
| 447 | else |
| 448 | vtime_account_system(tsk); |
Frederic Weisbecker | a7e1a9e | 2012-09-08 16:14:02 +0200 | [diff] [blame] | 449 | } |
Frederic Weisbecker | 0cfdf9a | 2016-07-13 16:50:03 +0200 | [diff] [blame] | 450 | EXPORT_SYMBOL_GPL(vtime_account_irq_enter); |
Frederic Weisbecker | a7e1a9e | 2012-09-08 16:14:02 +0200 | [diff] [blame] | 451 | #endif /* __ARCH_HAS_VTIME_ACCOUNT */ |
| 452 | |
Frederic Weisbecker | 9fbc42e | 2013-02-25 17:25:39 +0100 | [diff] [blame] | 453 | void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st) |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 454 | { |
Frederic Weisbecker | 9fbc42e | 2013-02-25 17:25:39 +0100 | [diff] [blame] | 455 | *ut = p->utime; |
| 456 | *st = p->stime; |
| 457 | } |
Andrey Smetanin | 9eec50b | 2015-09-16 12:29:50 +0300 | [diff] [blame] | 458 | EXPORT_SYMBOL_GPL(task_cputime_adjusted); |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 459 | |
Frederic Weisbecker | 9fbc42e | 2013-02-25 17:25:39 +0100 | [diff] [blame] | 460 | void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st) |
| 461 | { |
| 462 | struct task_cputime cputime; |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 463 | |
Frederic Weisbecker | 9fbc42e | 2013-02-25 17:25:39 +0100 | [diff] [blame] | 464 | thread_group_cputime(p, &cputime); |
| 465 | |
| 466 | *ut = cputime.utime; |
| 467 | *st = cputime.stime; |
| 468 | } |
| 469 | #else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ |
| 470 | /* |
| 471 | * Account a single tick of cpu time. |
| 472 | * @p: the process that the cpu time gets accounted to |
| 473 | * @user_tick: indicates if the tick is a user or a system tick |
| 474 | */ |
| 475 | void account_process_tick(struct task_struct *p, int user_tick) |
| 476 | { |
Rik van Riel | 5743021 | 2016-07-13 16:50:01 +0200 | [diff] [blame] | 477 | cputime_t cputime, scaled, steal; |
Frederic Weisbecker | 9fbc42e | 2013-02-25 17:25:39 +0100 | [diff] [blame] | 478 | struct rq *rq = this_rq(); |
| 479 | |
Frederic Weisbecker | 55dbdcf | 2015-11-19 16:47:32 +0100 | [diff] [blame] | 480 | if (vtime_accounting_cpu_enabled()) |
Frederic Weisbecker | 9fbc42e | 2013-02-25 17:25:39 +0100 | [diff] [blame] | 481 | return; |
| 482 | |
| 483 | if (sched_clock_irqtime) { |
Thomas Gleixner | 2d51386 | 2014-05-02 23:26:24 +0200 | [diff] [blame] | 484 | irqtime_account_process_tick(p, user_tick, rq, 1); |
Frederic Weisbecker | 9fbc42e | 2013-02-25 17:25:39 +0100 | [diff] [blame] | 485 | return; |
| 486 | } |
| 487 | |
Rik van Riel | 5743021 | 2016-07-13 16:50:01 +0200 | [diff] [blame] | 488 | cputime = cputime_one_jiffy; |
| 489 | steal = steal_account_process_time(cputime); |
| 490 | |
| 491 | if (steal >= cputime) |
Frederic Weisbecker | 9fbc42e | 2013-02-25 17:25:39 +0100 | [diff] [blame] | 492 | return; |
| 493 | |
Rik van Riel | 5743021 | 2016-07-13 16:50:01 +0200 | [diff] [blame] | 494 | cputime -= steal; |
| 495 | scaled = cputime_to_scaled(cputime); |
| 496 | |
Frederic Weisbecker | 9fbc42e | 2013-02-25 17:25:39 +0100 | [diff] [blame] | 497 | if (user_tick) |
Rik van Riel | 5743021 | 2016-07-13 16:50:01 +0200 | [diff] [blame] | 498 | account_user_time(p, cputime, scaled); |
Frederic Weisbecker | 9fbc42e | 2013-02-25 17:25:39 +0100 | [diff] [blame] | 499 | else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET)) |
Rik van Riel | 5743021 | 2016-07-13 16:50:01 +0200 | [diff] [blame] | 500 | account_system_time(p, HARDIRQ_OFFSET, cputime, scaled); |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 501 | else |
Rik van Riel | 5743021 | 2016-07-13 16:50:01 +0200 | [diff] [blame] | 502 | account_idle_time(cputime); |
Frederic Weisbecker | 9fbc42e | 2013-02-25 17:25:39 +0100 | [diff] [blame] | 503 | } |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 504 | |
Frederic Weisbecker | 9fbc42e | 2013-02-25 17:25:39 +0100 | [diff] [blame] | 505 | /* |
Frederic Weisbecker | 9fbc42e | 2013-02-25 17:25:39 +0100 | [diff] [blame] | 506 | * Account multiple ticks of idle time. |
| 507 | * @ticks: number of stolen ticks |
| 508 | */ |
| 509 | void account_idle_ticks(unsigned long ticks) |
| 510 | { |
Wanpeng Li | f9bcf1e | 2016-08-11 13:36:35 +0800 | [diff] [blame] | 511 | cputime_t cputime, steal; |
Frederic Weisbecker | 26f2c75 | 2016-08-11 14:58:24 +0200 | [diff] [blame] | 512 | |
Frederic Weisbecker | 9fbc42e | 2013-02-25 17:25:39 +0100 | [diff] [blame] | 513 | if (sched_clock_irqtime) { |
| 514 | irqtime_account_idle_ticks(ticks); |
| 515 | return; |
| 516 | } |
| 517 | |
Frederic Weisbecker | 26f2c75 | 2016-08-11 14:58:24 +0200 | [diff] [blame] | 518 | cputime = jiffies_to_cputime(ticks); |
Wanpeng Li | f9bcf1e | 2016-08-11 13:36:35 +0800 | [diff] [blame] | 519 | steal = steal_account_process_time(cputime); |
| 520 | |
| 521 | if (steal >= cputime) |
| 522 | return; |
| 523 | |
| 524 | cputime -= steal; |
| 525 | account_idle_time(cputime); |
Frederic Weisbecker | 9fbc42e | 2013-02-25 17:25:39 +0100 | [diff] [blame] | 526 | } |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 527 | |
Frederic Weisbecker | d9a3c98 | 2013-02-20 18:54:55 +0100 | [diff] [blame] | 528 | /* |
Stanislaw Gruszka | 55eaa7c | 2013-04-30 17:14:42 +0200 | [diff] [blame] | 529 | * Perform (stime * rtime) / total, but avoid multiplication overflow by |
| 530 | * loosing precision when the numbers are big. |
Frederic Weisbecker | d9a3c98 | 2013-02-20 18:54:55 +0100 | [diff] [blame] | 531 | */ |
| 532 | static cputime_t scale_stime(u64 stime, u64 rtime, u64 total) |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 533 | { |
Stanislaw Gruszka | 55eaa7c | 2013-04-30 17:14:42 +0200 | [diff] [blame] | 534 | u64 scaled; |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 535 | |
Stanislaw Gruszka | 55eaa7c | 2013-04-30 17:14:42 +0200 | [diff] [blame] | 536 | for (;;) { |
| 537 | /* Make sure "rtime" is the bigger of stime/rtime */ |
Stanislaw Gruszka | 84f9f3a | 2013-05-02 15:34:33 +0200 | [diff] [blame] | 538 | if (stime > rtime) |
| 539 | swap(rtime, stime); |
Stanislaw Gruszka | 55eaa7c | 2013-04-30 17:14:42 +0200 | [diff] [blame] | 540 | |
| 541 | /* Make sure 'total' fits in 32 bits */ |
| 542 | if (total >> 32) |
| 543 | goto drop_precision; |
| 544 | |
| 545 | /* Does rtime (and thus stime) fit in 32 bits? */ |
| 546 | if (!(rtime >> 32)) |
| 547 | break; |
| 548 | |
| 549 | /* Can we just balance rtime/stime rather than dropping bits? */ |
| 550 | if (stime >> 31) |
| 551 | goto drop_precision; |
| 552 | |
| 553 | /* We can grow stime and shrink rtime and try to make them both fit */ |
| 554 | stime <<= 1; |
| 555 | rtime >>= 1; |
| 556 | continue; |
| 557 | |
| 558 | drop_precision: |
| 559 | /* We drop from rtime, it has more bits than stime */ |
| 560 | rtime >>= 1; |
| 561 | total >>= 1; |
Frederic Weisbecker | d9a3c98 | 2013-02-20 18:54:55 +0100 | [diff] [blame] | 562 | } |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 563 | |
Stanislaw Gruszka | 55eaa7c | 2013-04-30 17:14:42 +0200 | [diff] [blame] | 564 | /* |
| 565 | * Make sure gcc understands that this is a 32x32->64 multiply, |
| 566 | * followed by a 64/32->64 divide. |
| 567 | */ |
| 568 | scaled = div_u64((u64) (u32) stime * (u64) (u32) rtime, (u32)total); |
Frederic Weisbecker | d9a3c98 | 2013-02-20 18:54:55 +0100 | [diff] [blame] | 569 | return (__force cputime_t) scaled; |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 570 | } |
| 571 | |
Frederic Weisbecker | fa09205 | 2012-11-28 17:00:57 +0100 | [diff] [blame] | 572 | /* |
Peter Zijlstra | 9d7fb04 | 2015-06-30 11:30:54 +0200 | [diff] [blame] | 573 | * Adjust tick based cputime random precision against scheduler runtime |
| 574 | * accounting. |
Rik van Riel | 347abad | 2014-09-30 15:59:47 -0400 | [diff] [blame] | 575 | * |
Peter Zijlstra | 9d7fb04 | 2015-06-30 11:30:54 +0200 | [diff] [blame] | 576 | * Tick based cputime accounting depend on random scheduling timeslices of a |
| 577 | * task to be interrupted or not by the timer. Depending on these |
| 578 | * circumstances, the number of these interrupts may be over or |
| 579 | * under-optimistic, matching the real user and system cputime with a variable |
| 580 | * precision. |
| 581 | * |
| 582 | * Fix this by scaling these tick based values against the total runtime |
| 583 | * accounted by the CFS scheduler. |
| 584 | * |
| 585 | * This code provides the following guarantees: |
| 586 | * |
| 587 | * stime + utime == rtime |
| 588 | * stime_i+1 >= stime_i, utime_i+1 >= utime_i |
| 589 | * |
| 590 | * Assuming that rtime_i+1 >= rtime_i. |
Frederic Weisbecker | fa09205 | 2012-11-28 17:00:57 +0100 | [diff] [blame] | 591 | */ |
Frederic Weisbecker | d37f761d | 2012-11-22 00:58:35 +0100 | [diff] [blame] | 592 | static void cputime_adjust(struct task_cputime *curr, |
Peter Zijlstra | 9d7fb04 | 2015-06-30 11:30:54 +0200 | [diff] [blame] | 593 | struct prev_cputime *prev, |
Frederic Weisbecker | d37f761d | 2012-11-22 00:58:35 +0100 | [diff] [blame] | 594 | cputime_t *ut, cputime_t *st) |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 595 | { |
Stanislaw Gruszka | 5a8e01f | 2013-09-04 15:16:03 +0200 | [diff] [blame] | 596 | cputime_t rtime, stime, utime; |
Peter Zijlstra | 9d7fb04 | 2015-06-30 11:30:54 +0200 | [diff] [blame] | 597 | unsigned long flags; |
Frederic Weisbecker | fa09205 | 2012-11-28 17:00:57 +0100 | [diff] [blame] | 598 | |
Peter Zijlstra | 9d7fb04 | 2015-06-30 11:30:54 +0200 | [diff] [blame] | 599 | /* Serialize concurrent callers such that we can honour our guarantees */ |
| 600 | raw_spin_lock_irqsave(&prev->lock, flags); |
Frederic Weisbecker | d37f761d | 2012-11-22 00:58:35 +0100 | [diff] [blame] | 601 | rtime = nsecs_to_cputime(curr->sum_exec_runtime); |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 602 | |
Stanislaw Gruszka | 772c808 | 2013-04-30 11:35:05 +0200 | [diff] [blame] | 603 | /* |
Peter Zijlstra | 9d7fb04 | 2015-06-30 11:30:54 +0200 | [diff] [blame] | 604 | * This is possible under two circumstances: |
| 605 | * - rtime isn't monotonic after all (a bug); |
| 606 | * - we got reordered by the lock. |
| 607 | * |
| 608 | * In both cases this acts as a filter such that the rest of the code |
| 609 | * can assume it is monotonic regardless of anything else. |
Stanislaw Gruszka | 772c808 | 2013-04-30 11:35:05 +0200 | [diff] [blame] | 610 | */ |
| 611 | if (prev->stime + prev->utime >= rtime) |
| 612 | goto out; |
| 613 | |
Stanislaw Gruszka | 5a8e01f | 2013-09-04 15:16:03 +0200 | [diff] [blame] | 614 | stime = curr->stime; |
| 615 | utime = curr->utime; |
| 616 | |
| 617 | if (utime == 0) { |
| 618 | stime = rtime; |
Peter Zijlstra | 9d7fb04 | 2015-06-30 11:30:54 +0200 | [diff] [blame] | 619 | goto update; |
Frederic Weisbecker | d9a3c98 | 2013-02-20 18:54:55 +0100 | [diff] [blame] | 620 | } |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 621 | |
Peter Zijlstra | 9d7fb04 | 2015-06-30 11:30:54 +0200 | [diff] [blame] | 622 | if (stime == 0) { |
| 623 | utime = rtime; |
| 624 | goto update; |
| 625 | } |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 626 | |
Peter Zijlstra | 9d7fb04 | 2015-06-30 11:30:54 +0200 | [diff] [blame] | 627 | stime = scale_stime((__force u64)stime, (__force u64)rtime, |
| 628 | (__force u64)(stime + utime)); |
| 629 | |
| 630 | /* |
| 631 | * Make sure stime doesn't go backwards; this preserves monotonicity |
| 632 | * for utime because rtime is monotonic. |
| 633 | * |
| 634 | * utime_i+1 = rtime_i+1 - stime_i |
| 635 | * = rtime_i+1 - (rtime_i - utime_i) |
| 636 | * = (rtime_i+1 - rtime_i) + utime_i |
| 637 | * >= utime_i |
| 638 | */ |
| 639 | if (stime < prev->stime) |
| 640 | stime = prev->stime; |
| 641 | utime = rtime - stime; |
| 642 | |
| 643 | /* |
| 644 | * Make sure utime doesn't go backwards; this still preserves |
| 645 | * monotonicity for stime, analogous argument to above. |
| 646 | */ |
| 647 | if (utime < prev->utime) { |
| 648 | utime = prev->utime; |
| 649 | stime = rtime - utime; |
| 650 | } |
| 651 | |
| 652 | update: |
| 653 | prev->stime = stime; |
| 654 | prev->utime = utime; |
Stanislaw Gruszka | 772c808 | 2013-04-30 11:35:05 +0200 | [diff] [blame] | 655 | out: |
Frederic Weisbecker | d37f761d | 2012-11-22 00:58:35 +0100 | [diff] [blame] | 656 | *ut = prev->utime; |
| 657 | *st = prev->stime; |
Peter Zijlstra | 9d7fb04 | 2015-06-30 11:30:54 +0200 | [diff] [blame] | 658 | raw_spin_unlock_irqrestore(&prev->lock, flags); |
Frederic Weisbecker | d37f761d | 2012-11-22 00:58:35 +0100 | [diff] [blame] | 659 | } |
| 660 | |
| 661 | void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st) |
| 662 | { |
| 663 | struct task_cputime cputime = { |
Frederic Weisbecker | d37f761d | 2012-11-22 00:58:35 +0100 | [diff] [blame] | 664 | .sum_exec_runtime = p->se.sum_exec_runtime, |
| 665 | }; |
| 666 | |
Frederic Weisbecker | 6fac482 | 2012-11-13 14:20:55 +0100 | [diff] [blame] | 667 | task_cputime(p, &cputime.utime, &cputime.stime); |
Frederic Weisbecker | d37f761d | 2012-11-22 00:58:35 +0100 | [diff] [blame] | 668 | cputime_adjust(&cputime, &p->prev_cputime, ut, st); |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 669 | } |
Andrey Smetanin | 9eec50b | 2015-09-16 12:29:50 +0300 | [diff] [blame] | 670 | EXPORT_SYMBOL_GPL(task_cputime_adjusted); |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 671 | |
Frederic Weisbecker | e80d0a1a | 2012-11-21 16:26:44 +0100 | [diff] [blame] | 672 | void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st) |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 673 | { |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 674 | struct task_cputime cputime; |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 675 | |
| 676 | thread_group_cputime(p, &cputime); |
Frederic Weisbecker | d37f761d | 2012-11-22 00:58:35 +0100 | [diff] [blame] | 677 | cputime_adjust(&cputime, &p->signal->prev_cputime, ut, st); |
Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 678 | } |
Frederic Weisbecker | 9fbc42e | 2013-02-25 17:25:39 +0100 | [diff] [blame] | 679 | #endif /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ |
Frederic Weisbecker | abf917c | 2012-07-25 07:56:04 +0200 | [diff] [blame] | 680 | |
| 681 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN |
Rik van Riel | ff9a9b4 | 2016-02-10 20:08:27 -0500 | [diff] [blame] | 682 | static cputime_t vtime_delta(struct task_struct *tsk) |
Frederic Weisbecker | abf917c | 2012-07-25 07:56:04 +0200 | [diff] [blame] | 683 | { |
Rik van Riel | ff9a9b4 | 2016-02-10 20:08:27 -0500 | [diff] [blame] | 684 | unsigned long now = READ_ONCE(jiffies); |
Frederic Weisbecker | abf917c | 2012-07-25 07:56:04 +0200 | [diff] [blame] | 685 | |
Rik van Riel | ff9a9b4 | 2016-02-10 20:08:27 -0500 | [diff] [blame] | 686 | if (time_before(now, (unsigned long)tsk->vtime_snap)) |
Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 687 | return 0; |
| 688 | |
Rik van Riel | ff9a9b4 | 2016-02-10 20:08:27 -0500 | [diff] [blame] | 689 | return jiffies_to_cputime(now - tsk->vtime_snap); |
Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 690 | } |
| 691 | |
| 692 | static cputime_t get_vtime_delta(struct task_struct *tsk) |
| 693 | { |
Rik van Riel | ff9a9b4 | 2016-02-10 20:08:27 -0500 | [diff] [blame] | 694 | unsigned long now = READ_ONCE(jiffies); |
Rik van Riel | b58c358 | 2016-07-13 16:50:02 +0200 | [diff] [blame] | 695 | cputime_t delta, other; |
Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 696 | |
Rik van Riel | 5743021 | 2016-07-13 16:50:01 +0200 | [diff] [blame] | 697 | delta = jiffies_to_cputime(now - tsk->vtime_snap); |
Rik van Riel | b58c358 | 2016-07-13 16:50:02 +0200 | [diff] [blame] | 698 | other = account_other_time(delta); |
Frederic Weisbecker | 7098c1e | 2015-11-19 16:47:30 +0100 | [diff] [blame] | 699 | WARN_ON_ONCE(tsk->vtime_snap_whence == VTIME_INACTIVE); |
Rik van Riel | ff9a9b4 | 2016-02-10 20:08:27 -0500 | [diff] [blame] | 700 | tsk->vtime_snap = now; |
Frederic Weisbecker | abf917c | 2012-07-25 07:56:04 +0200 | [diff] [blame] | 701 | |
Rik van Riel | b58c358 | 2016-07-13 16:50:02 +0200 | [diff] [blame] | 702 | return delta - other; |
Frederic Weisbecker | abf917c | 2012-07-25 07:56:04 +0200 | [diff] [blame] | 703 | } |
| 704 | |
Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 705 | static void __vtime_account_system(struct task_struct *tsk) |
| 706 | { |
| 707 | cputime_t delta_cpu = get_vtime_delta(tsk); |
| 708 | |
| 709 | account_system_time(tsk, irq_count(), delta_cpu, cputime_to_scaled(delta_cpu)); |
| 710 | } |
| 711 | |
Frederic Weisbecker | abf917c | 2012-07-25 07:56:04 +0200 | [diff] [blame] | 712 | void vtime_account_system(struct task_struct *tsk) |
| 713 | { |
Rik van Riel | ff9a9b4 | 2016-02-10 20:08:27 -0500 | [diff] [blame] | 714 | if (!vtime_delta(tsk)) |
| 715 | return; |
| 716 | |
Frederic Weisbecker | b7ce227 | 2015-11-19 16:47:34 +0100 | [diff] [blame] | 717 | write_seqcount_begin(&tsk->vtime_seqcount); |
Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 718 | __vtime_account_system(tsk); |
Frederic Weisbecker | b7ce227 | 2015-11-19 16:47:34 +0100 | [diff] [blame] | 719 | write_seqcount_end(&tsk->vtime_seqcount); |
Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 720 | } |
| 721 | |
Frederic Weisbecker | abf917c | 2012-07-25 07:56:04 +0200 | [diff] [blame] | 722 | void vtime_account_user(struct task_struct *tsk) |
| 723 | { |
Frederic Weisbecker | 3f4724e | 2012-07-16 18:00:34 +0200 | [diff] [blame] | 724 | cputime_t delta_cpu; |
| 725 | |
Frederic Weisbecker | b7ce227 | 2015-11-19 16:47:34 +0100 | [diff] [blame] | 726 | write_seqcount_begin(&tsk->vtime_seqcount); |
Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 727 | tsk->vtime_snap_whence = VTIME_SYS; |
Rik van Riel | ff9a9b4 | 2016-02-10 20:08:27 -0500 | [diff] [blame] | 728 | if (vtime_delta(tsk)) { |
| 729 | delta_cpu = get_vtime_delta(tsk); |
| 730 | account_user_time(tsk, delta_cpu, cputime_to_scaled(delta_cpu)); |
| 731 | } |
Frederic Weisbecker | b7ce227 | 2015-11-19 16:47:34 +0100 | [diff] [blame] | 732 | write_seqcount_end(&tsk->vtime_seqcount); |
Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 733 | } |
| 734 | |
| 735 | void vtime_user_enter(struct task_struct *tsk) |
| 736 | { |
Frederic Weisbecker | b7ce227 | 2015-11-19 16:47:34 +0100 | [diff] [blame] | 737 | write_seqcount_begin(&tsk->vtime_seqcount); |
Rik van Riel | ff9a9b4 | 2016-02-10 20:08:27 -0500 | [diff] [blame] | 738 | if (vtime_delta(tsk)) |
| 739 | __vtime_account_system(tsk); |
Frederic Weisbecker | af2350b | 2013-07-15 16:35:55 +0200 | [diff] [blame] | 740 | tsk->vtime_snap_whence = VTIME_USER; |
Frederic Weisbecker | b7ce227 | 2015-11-19 16:47:34 +0100 | [diff] [blame] | 741 | write_seqcount_end(&tsk->vtime_seqcount); |
Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 742 | } |
| 743 | |
| 744 | void vtime_guest_enter(struct task_struct *tsk) |
| 745 | { |
Frederic Weisbecker | 5b206d4 | 2013-07-12 19:05:14 +0200 | [diff] [blame] | 746 | /* |
| 747 | * The flags must be updated under the lock with |
| 748 | * the vtime_snap flush and update. |
| 749 | * That enforces a right ordering and update sequence |
| 750 | * synchronization against the reader (task_gtime()) |
| 751 | * that can thus safely catch up with a tickless delta. |
| 752 | */ |
Frederic Weisbecker | b7ce227 | 2015-11-19 16:47:34 +0100 | [diff] [blame] | 753 | write_seqcount_begin(&tsk->vtime_seqcount); |
Rik van Riel | ff9a9b4 | 2016-02-10 20:08:27 -0500 | [diff] [blame] | 754 | if (vtime_delta(tsk)) |
| 755 | __vtime_account_system(tsk); |
Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 756 | current->flags |= PF_VCPU; |
Frederic Weisbecker | b7ce227 | 2015-11-19 16:47:34 +0100 | [diff] [blame] | 757 | write_seqcount_end(&tsk->vtime_seqcount); |
Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 758 | } |
Frederic Weisbecker | 48d6a81 | 2013-07-10 02:44:35 +0200 | [diff] [blame] | 759 | EXPORT_SYMBOL_GPL(vtime_guest_enter); |
Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 760 | |
| 761 | void vtime_guest_exit(struct task_struct *tsk) |
| 762 | { |
Frederic Weisbecker | b7ce227 | 2015-11-19 16:47:34 +0100 | [diff] [blame] | 763 | write_seqcount_begin(&tsk->vtime_seqcount); |
Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 764 | __vtime_account_system(tsk); |
| 765 | current->flags &= ~PF_VCPU; |
Frederic Weisbecker | b7ce227 | 2015-11-19 16:47:34 +0100 | [diff] [blame] | 766 | write_seqcount_end(&tsk->vtime_seqcount); |
Frederic Weisbecker | abf917c | 2012-07-25 07:56:04 +0200 | [diff] [blame] | 767 | } |
Frederic Weisbecker | 48d6a81 | 2013-07-10 02:44:35 +0200 | [diff] [blame] | 768 | EXPORT_SYMBOL_GPL(vtime_guest_exit); |
Frederic Weisbecker | abf917c | 2012-07-25 07:56:04 +0200 | [diff] [blame] | 769 | |
| 770 | void vtime_account_idle(struct task_struct *tsk) |
| 771 | { |
Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 772 | cputime_t delta_cpu = get_vtime_delta(tsk); |
Frederic Weisbecker | abf917c | 2012-07-25 07:56:04 +0200 | [diff] [blame] | 773 | |
| 774 | account_idle_time(delta_cpu); |
| 775 | } |
Frederic Weisbecker | 3f4724e | 2012-07-16 18:00:34 +0200 | [diff] [blame] | 776 | |
Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 777 | void arch_vtime_task_switch(struct task_struct *prev) |
| 778 | { |
Frederic Weisbecker | b7ce227 | 2015-11-19 16:47:34 +0100 | [diff] [blame] | 779 | write_seqcount_begin(&prev->vtime_seqcount); |
Frederic Weisbecker | 7098c1e | 2015-11-19 16:47:30 +0100 | [diff] [blame] | 780 | prev->vtime_snap_whence = VTIME_INACTIVE; |
Frederic Weisbecker | b7ce227 | 2015-11-19 16:47:34 +0100 | [diff] [blame] | 781 | write_seqcount_end(&prev->vtime_seqcount); |
Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 782 | |
Frederic Weisbecker | b7ce227 | 2015-11-19 16:47:34 +0100 | [diff] [blame] | 783 | write_seqcount_begin(¤t->vtime_seqcount); |
Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 784 | current->vtime_snap_whence = VTIME_SYS; |
Rik van Riel | ff9a9b4 | 2016-02-10 20:08:27 -0500 | [diff] [blame] | 785 | current->vtime_snap = jiffies; |
Frederic Weisbecker | b7ce227 | 2015-11-19 16:47:34 +0100 | [diff] [blame] | 786 | write_seqcount_end(¤t->vtime_seqcount); |
Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 787 | } |
| 788 | |
Frederic Weisbecker | 45eacc6 | 2013-05-15 22:16:32 +0200 | [diff] [blame] | 789 | void vtime_init_idle(struct task_struct *t, int cpu) |
Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 790 | { |
| 791 | unsigned long flags; |
| 792 | |
Frederic Weisbecker | b7ce227 | 2015-11-19 16:47:34 +0100 | [diff] [blame] | 793 | local_irq_save(flags); |
| 794 | write_seqcount_begin(&t->vtime_seqcount); |
Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 795 | t->vtime_snap_whence = VTIME_SYS; |
Rik van Riel | ff9a9b4 | 2016-02-10 20:08:27 -0500 | [diff] [blame] | 796 | t->vtime_snap = jiffies; |
Frederic Weisbecker | b7ce227 | 2015-11-19 16:47:34 +0100 | [diff] [blame] | 797 | write_seqcount_end(&t->vtime_seqcount); |
| 798 | local_irq_restore(flags); |
Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 799 | } |
| 800 | |
| 801 | cputime_t task_gtime(struct task_struct *t) |
| 802 | { |
Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 803 | unsigned int seq; |
| 804 | cputime_t gtime; |
| 805 | |
Frederic Weisbecker | e592539 | 2015-11-19 16:47:33 +0100 | [diff] [blame] | 806 | if (!vtime_accounting_enabled()) |
Hiroshi Shimamoto | 2541117 | 2015-11-19 16:47:28 +0100 | [diff] [blame] | 807 | return t->gtime; |
| 808 | |
Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 809 | do { |
Frederic Weisbecker | b7ce227 | 2015-11-19 16:47:34 +0100 | [diff] [blame] | 810 | seq = read_seqcount_begin(&t->vtime_seqcount); |
Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 811 | |
| 812 | gtime = t->gtime; |
Frederic Weisbecker | cab245d | 2015-11-19 16:47:31 +0100 | [diff] [blame] | 813 | if (t->vtime_snap_whence == VTIME_SYS && t->flags & PF_VCPU) |
Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 814 | gtime += vtime_delta(t); |
| 815 | |
Frederic Weisbecker | b7ce227 | 2015-11-19 16:47:34 +0100 | [diff] [blame] | 816 | } while (read_seqcount_retry(&t->vtime_seqcount, seq)); |
Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 817 | |
| 818 | return gtime; |
| 819 | } |
| 820 | |
| 821 | /* |
| 822 | * Fetch cputime raw values from fields of task_struct and |
| 823 | * add up the pending nohz execution time since the last |
| 824 | * cputime snapshot. |
| 825 | */ |
| 826 | static void |
| 827 | fetch_task_cputime(struct task_struct *t, |
| 828 | cputime_t *u_dst, cputime_t *s_dst, |
| 829 | cputime_t *u_src, cputime_t *s_src, |
| 830 | cputime_t *udelta, cputime_t *sdelta) |
| 831 | { |
Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 832 | unsigned int seq; |
| 833 | unsigned long long delta; |
| 834 | |
| 835 | do { |
| 836 | *udelta = 0; |
| 837 | *sdelta = 0; |
| 838 | |
Frederic Weisbecker | b7ce227 | 2015-11-19 16:47:34 +0100 | [diff] [blame] | 839 | seq = read_seqcount_begin(&t->vtime_seqcount); |
Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 840 | |
| 841 | if (u_dst) |
| 842 | *u_dst = *u_src; |
| 843 | if (s_dst) |
| 844 | *s_dst = *s_src; |
| 845 | |
| 846 | /* Task is sleeping, nothing to add */ |
Frederic Weisbecker | 7098c1e | 2015-11-19 16:47:30 +0100 | [diff] [blame] | 847 | if (t->vtime_snap_whence == VTIME_INACTIVE || |
Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 848 | is_idle_task(t)) |
| 849 | continue; |
| 850 | |
| 851 | delta = vtime_delta(t); |
| 852 | |
| 853 | /* |
| 854 | * Task runs either in user or kernel space, add pending nohz time to |
| 855 | * the right place. |
| 856 | */ |
| 857 | if (t->vtime_snap_whence == VTIME_USER || t->flags & PF_VCPU) { |
| 858 | *udelta = delta; |
| 859 | } else { |
| 860 | if (t->vtime_snap_whence == VTIME_SYS) |
| 861 | *sdelta = delta; |
| 862 | } |
Frederic Weisbecker | b7ce227 | 2015-11-19 16:47:34 +0100 | [diff] [blame] | 863 | } while (read_seqcount_retry(&t->vtime_seqcount, seq)); |
Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 864 | } |
| 865 | |
| 866 | |
| 867 | void task_cputime(struct task_struct *t, cputime_t *utime, cputime_t *stime) |
| 868 | { |
| 869 | cputime_t udelta, sdelta; |
| 870 | |
Frederic Weisbecker | e592539 | 2015-11-19 16:47:33 +0100 | [diff] [blame] | 871 | if (!vtime_accounting_enabled()) { |
Hiroshi Shimamoto | 7877a0b | 2015-11-19 16:47:29 +0100 | [diff] [blame] | 872 | if (utime) |
| 873 | *utime = t->utime; |
| 874 | if (stime) |
| 875 | *stime = t->stime; |
| 876 | return; |
| 877 | } |
| 878 | |
Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 879 | fetch_task_cputime(t, utime, stime, &t->utime, |
| 880 | &t->stime, &udelta, &sdelta); |
| 881 | if (utime) |
| 882 | *utime += udelta; |
| 883 | if (stime) |
| 884 | *stime += sdelta; |
| 885 | } |
| 886 | |
| 887 | void task_cputime_scaled(struct task_struct *t, |
| 888 | cputime_t *utimescaled, cputime_t *stimescaled) |
| 889 | { |
| 890 | cputime_t udelta, sdelta; |
| 891 | |
Frederic Weisbecker | e592539 | 2015-11-19 16:47:33 +0100 | [diff] [blame] | 892 | if (!vtime_accounting_enabled()) { |
Hiroshi Shimamoto | 7877a0b | 2015-11-19 16:47:29 +0100 | [diff] [blame] | 893 | if (utimescaled) |
| 894 | *utimescaled = t->utimescaled; |
| 895 | if (stimescaled) |
| 896 | *stimescaled = t->stimescaled; |
| 897 | return; |
| 898 | } |
| 899 | |
Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 900 | fetch_task_cputime(t, utimescaled, stimescaled, |
| 901 | &t->utimescaled, &t->stimescaled, &udelta, &sdelta); |
| 902 | if (utimescaled) |
| 903 | *utimescaled += cputime_to_scaled(udelta); |
| 904 | if (stimescaled) |
| 905 | *stimescaled += cputime_to_scaled(sdelta); |
| 906 | } |
Frederic Weisbecker | abf917c | 2012-07-25 07:56:04 +0200 | [diff] [blame] | 907 | #endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */ |