Thomas Gleixner | 457c899 | 2019-05-19 13:08:55 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Tejun Heo | 041cd64 | 2017-09-25 08:12:05 -0700 | [diff] [blame] | 2 | #include "cgroup-internal.h" |
| 3 | |
| 4 | #include <linux/sched/cputime.h> |
| 5 | |
Tejun Heo | 0fa294f | 2018-04-26 14:29:05 -0700 | [diff] [blame] | 6 | static DEFINE_SPINLOCK(cgroup_rstat_lock); |
Tejun Heo | c58632b | 2018-04-26 14:29:04 -0700 | [diff] [blame] | 7 | static DEFINE_PER_CPU(raw_spinlock_t, cgroup_rstat_cpu_lock); |
Tejun Heo | 041cd64 | 2017-09-25 08:12:05 -0700 | [diff] [blame] | 8 | |
Tejun Heo | a17556f8 | 2018-04-26 14:29:05 -0700 | [diff] [blame] | 9 | static void cgroup_base_stat_flush(struct cgroup *cgrp, int cpu); |
| 10 | |
Tejun Heo | c58632b | 2018-04-26 14:29:04 -0700 | [diff] [blame] | 11 | static struct cgroup_rstat_cpu *cgroup_rstat_cpu(struct cgroup *cgrp, int cpu) |
Tejun Heo | 041cd64 | 2017-09-25 08:12:05 -0700 | [diff] [blame] | 12 | { |
Tejun Heo | c58632b | 2018-04-26 14:29:04 -0700 | [diff] [blame] | 13 | return per_cpu_ptr(cgrp->rstat_cpu, cpu); |
Tejun Heo | 041cd64 | 2017-09-25 08:12:05 -0700 | [diff] [blame] | 14 | } |
| 15 | |
| 16 | /** |
Tejun Heo | 6162cef | 2018-04-26 14:29:05 -0700 | [diff] [blame] | 17 | * cgroup_rstat_updated - keep track of updated rstat_cpu |
Tejun Heo | 041cd64 | 2017-09-25 08:12:05 -0700 | [diff] [blame] | 18 | * @cgrp: target cgroup |
Tejun Heo | c58632b | 2018-04-26 14:29:04 -0700 | [diff] [blame] | 19 | * @cpu: cpu on which rstat_cpu was updated |
Tejun Heo | 041cd64 | 2017-09-25 08:12:05 -0700 | [diff] [blame] | 20 | * |
Tejun Heo | c58632b | 2018-04-26 14:29:04 -0700 | [diff] [blame] | 21 | * @cgrp's rstat_cpu on @cpu was updated. Put it on the parent's matching |
| 22 | * rstat_cpu->updated_children list. See the comment on top of |
| 23 | * cgroup_rstat_cpu definition for details. |
Tejun Heo | 041cd64 | 2017-09-25 08:12:05 -0700 | [diff] [blame] | 24 | */ |
Tejun Heo | 6162cef | 2018-04-26 14:29:05 -0700 | [diff] [blame] | 25 | void cgroup_rstat_updated(struct cgroup *cgrp, int cpu) |
Tejun Heo | 041cd64 | 2017-09-25 08:12:05 -0700 | [diff] [blame] | 26 | { |
Tejun Heo | c58632b | 2018-04-26 14:29:04 -0700 | [diff] [blame] | 27 | raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu); |
Tejun Heo | 041cd64 | 2017-09-25 08:12:05 -0700 | [diff] [blame] | 28 | unsigned long flags; |
| 29 | |
| 30 | /* |
Tejun Heo | d8ef4b3 | 2020-04-09 14:55:35 -0400 | [diff] [blame] | 31 | * Speculative already-on-list test. This may race leading to |
| 32 | * temporary inaccuracies, which is fine. |
| 33 | * |
Tejun Heo | 041cd64 | 2017-09-25 08:12:05 -0700 | [diff] [blame] | 34 | * Because @parent's updated_children is terminated with @parent |
| 35 | * instead of NULL, we can tell whether @cgrp is on the list by |
| 36 | * testing the next pointer for NULL. |
| 37 | */ |
Michal Koutný | eda0970 | 2021-11-03 17:58:45 +0100 | [diff] [blame] | 38 | if (data_race(cgroup_rstat_cpu(cgrp, cpu)->updated_next)) |
Tejun Heo | 041cd64 | 2017-09-25 08:12:05 -0700 | [diff] [blame] | 39 | return; |
| 40 | |
| 41 | raw_spin_lock_irqsave(cpu_lock, flags); |
| 42 | |
| 43 | /* put @cgrp and all ancestors on the corresponding updated lists */ |
Johannes Weiner | dc26532 | 2021-04-29 22:56:23 -0700 | [diff] [blame] | 44 | while (true) { |
Tejun Heo | c58632b | 2018-04-26 14:29:04 -0700 | [diff] [blame] | 45 | struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu); |
Johannes Weiner | dc26532 | 2021-04-29 22:56:23 -0700 | [diff] [blame] | 46 | struct cgroup *parent = cgroup_parent(cgrp); |
| 47 | struct cgroup_rstat_cpu *prstatc; |
Tejun Heo | 041cd64 | 2017-09-25 08:12:05 -0700 | [diff] [blame] | 48 | |
| 49 | /* |
| 50 | * Both additions and removals are bottom-up. If a cgroup |
| 51 | * is already in the tree, all ancestors are. |
| 52 | */ |
Tejun Heo | c58632b | 2018-04-26 14:29:04 -0700 | [diff] [blame] | 53 | if (rstatc->updated_next) |
Tejun Heo | 041cd64 | 2017-09-25 08:12:05 -0700 | [diff] [blame] | 54 | break; |
| 55 | |
Johannes Weiner | dc26532 | 2021-04-29 22:56:23 -0700 | [diff] [blame] | 56 | /* Root has no parent to link it to, but mark it busy */ |
| 57 | if (!parent) { |
| 58 | rstatc->updated_next = cgrp; |
| 59 | break; |
| 60 | } |
| 61 | |
| 62 | prstatc = cgroup_rstat_cpu(parent, cpu); |
Tejun Heo | c58632b | 2018-04-26 14:29:04 -0700 | [diff] [blame] | 63 | rstatc->updated_next = prstatc->updated_children; |
| 64 | prstatc->updated_children = cgrp; |
Johannes Weiner | dc26532 | 2021-04-29 22:56:23 -0700 | [diff] [blame] | 65 | |
| 66 | cgrp = parent; |
Tejun Heo | 041cd64 | 2017-09-25 08:12:05 -0700 | [diff] [blame] | 67 | } |
| 68 | |
| 69 | raw_spin_unlock_irqrestore(cpu_lock, flags); |
| 70 | } |
| 71 | |
| 72 | /** |
Tejun Heo | c58632b | 2018-04-26 14:29:04 -0700 | [diff] [blame] | 73 | * cgroup_rstat_cpu_pop_updated - iterate and dismantle rstat_cpu updated tree |
Tejun Heo | 041cd64 | 2017-09-25 08:12:05 -0700 | [diff] [blame] | 74 | * @pos: current position |
| 75 | * @root: root of the tree to traversal |
| 76 | * @cpu: target cpu |
| 77 | * |
Zhen Lei | 08b2b6f | 2021-05-24 16:29:43 +0800 | [diff] [blame] | 78 | * Walks the updated rstat_cpu tree on @cpu from @root. %NULL @pos starts |
Tejun Heo | 041cd64 | 2017-09-25 08:12:05 -0700 | [diff] [blame] | 79 | * the traversal and %NULL return indicates the end. During traversal, |
| 80 | * each returned cgroup is unlinked from the tree. Must be called with the |
Tejun Heo | c58632b | 2018-04-26 14:29:04 -0700 | [diff] [blame] | 81 | * matching cgroup_rstat_cpu_lock held. |
Tejun Heo | 041cd64 | 2017-09-25 08:12:05 -0700 | [diff] [blame] | 82 | * |
| 83 | * The only ordering guarantee is that, for a parent and a child pair |
| 84 | * covered by a given traversal, if a child is visited, its parent is |
| 85 | * guaranteed to be visited afterwards. |
| 86 | */ |
Tejun Heo | c58632b | 2018-04-26 14:29:04 -0700 | [diff] [blame] | 87 | static struct cgroup *cgroup_rstat_cpu_pop_updated(struct cgroup *pos, |
| 88 | struct cgroup *root, int cpu) |
Tejun Heo | 041cd64 | 2017-09-25 08:12:05 -0700 | [diff] [blame] | 89 | { |
Tejun Heo | c58632b | 2018-04-26 14:29:04 -0700 | [diff] [blame] | 90 | struct cgroup_rstat_cpu *rstatc; |
Wei Yang | f5f60d2 | 2021-12-25 00:09:32 +0000 | [diff] [blame] | 91 | struct cgroup *parent; |
Tejun Heo | 041cd64 | 2017-09-25 08:12:05 -0700 | [diff] [blame] | 92 | |
| 93 | if (pos == root) |
| 94 | return NULL; |
| 95 | |
| 96 | /* |
| 97 | * We're gonna walk down to the first leaf and visit/remove it. We |
| 98 | * can pick whatever unvisited node as the starting point. |
| 99 | */ |
Wei Yang | f5f60d2 | 2021-12-25 00:09:32 +0000 | [diff] [blame] | 100 | if (!pos) { |
Tejun Heo | 041cd64 | 2017-09-25 08:12:05 -0700 | [diff] [blame] | 101 | pos = root; |
Wei Yang | f5f60d2 | 2021-12-25 00:09:32 +0000 | [diff] [blame] | 102 | /* return NULL if this subtree is not on-list */ |
| 103 | if (!cgroup_rstat_cpu(pos, cpu)->updated_next) |
| 104 | return NULL; |
| 105 | } else { |
Tejun Heo | 041cd64 | 2017-09-25 08:12:05 -0700 | [diff] [blame] | 106 | pos = cgroup_parent(pos); |
Wei Yang | f5f60d2 | 2021-12-25 00:09:32 +0000 | [diff] [blame] | 107 | } |
Tejun Heo | 041cd64 | 2017-09-25 08:12:05 -0700 | [diff] [blame] | 108 | |
| 109 | /* walk down to the first leaf */ |
| 110 | while (true) { |
Tejun Heo | c58632b | 2018-04-26 14:29:04 -0700 | [diff] [blame] | 111 | rstatc = cgroup_rstat_cpu(pos, cpu); |
| 112 | if (rstatc->updated_children == pos) |
Tejun Heo | 041cd64 | 2017-09-25 08:12:05 -0700 | [diff] [blame] | 113 | break; |
Tejun Heo | c58632b | 2018-04-26 14:29:04 -0700 | [diff] [blame] | 114 | pos = rstatc->updated_children; |
Tejun Heo | 041cd64 | 2017-09-25 08:12:05 -0700 | [diff] [blame] | 115 | } |
| 116 | |
| 117 | /* |
| 118 | * Unlink @pos from the tree. As the updated_children list is |
| 119 | * singly linked, we have to walk it to find the removal point. |
| 120 | * However, due to the way we traverse, @pos will be the first |
| 121 | * child in most cases. The only exception is @root. |
| 122 | */ |
Wei Yang | f5f60d2 | 2021-12-25 00:09:32 +0000 | [diff] [blame] | 123 | parent = cgroup_parent(pos); |
| 124 | if (parent) { |
| 125 | struct cgroup_rstat_cpu *prstatc; |
| 126 | struct cgroup **nextp; |
Tejun Heo | 041cd64 | 2017-09-25 08:12:05 -0700 | [diff] [blame] | 127 | |
Wei Yang | f5f60d2 | 2021-12-25 00:09:32 +0000 | [diff] [blame] | 128 | prstatc = cgroup_rstat_cpu(parent, cpu); |
| 129 | nextp = &prstatc->updated_children; |
| 130 | while (*nextp != pos) { |
| 131 | struct cgroup_rstat_cpu *nrstatc; |
Tejun Heo | 041cd64 | 2017-09-25 08:12:05 -0700 | [diff] [blame] | 132 | |
Wei Yang | f5f60d2 | 2021-12-25 00:09:32 +0000 | [diff] [blame] | 133 | nrstatc = cgroup_rstat_cpu(*nextp, cpu); |
| 134 | WARN_ON_ONCE(*nextp == parent); |
| 135 | nextp = &nrstatc->updated_next; |
Tejun Heo | 041cd64 | 2017-09-25 08:12:05 -0700 | [diff] [blame] | 136 | } |
Wei Yang | f5f60d2 | 2021-12-25 00:09:32 +0000 | [diff] [blame] | 137 | *nextp = rstatc->updated_next; |
Tejun Heo | 041cd64 | 2017-09-25 08:12:05 -0700 | [diff] [blame] | 138 | } |
| 139 | |
Wei Yang | f5f60d2 | 2021-12-25 00:09:32 +0000 | [diff] [blame] | 140 | rstatc->updated_next = NULL; |
| 141 | return pos; |
Tejun Heo | 041cd64 | 2017-09-25 08:12:05 -0700 | [diff] [blame] | 142 | } |
| 143 | |
Tejun Heo | a17556f8 | 2018-04-26 14:29:05 -0700 | [diff] [blame] | 144 | /* see cgroup_rstat_flush() */ |
Tejun Heo | 0fa294f | 2018-04-26 14:29:05 -0700 | [diff] [blame] | 145 | static void cgroup_rstat_flush_locked(struct cgroup *cgrp, bool may_sleep) |
| 146 | __releases(&cgroup_rstat_lock) __acquires(&cgroup_rstat_lock) |
Tejun Heo | a17556f8 | 2018-04-26 14:29:05 -0700 | [diff] [blame] | 147 | { |
| 148 | int cpu; |
| 149 | |
Tejun Heo | 0fa294f | 2018-04-26 14:29:05 -0700 | [diff] [blame] | 150 | lockdep_assert_held(&cgroup_rstat_lock); |
Tejun Heo | a17556f8 | 2018-04-26 14:29:05 -0700 | [diff] [blame] | 151 | |
| 152 | for_each_possible_cpu(cpu) { |
| 153 | raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock, |
| 154 | cpu); |
| 155 | struct cgroup *pos = NULL; |
| 156 | |
Tejun Heo | 0fa294f | 2018-04-26 14:29:05 -0700 | [diff] [blame] | 157 | raw_spin_lock(cpu_lock); |
Tejun Heo | 8f53470 | 2018-04-26 14:29:05 -0700 | [diff] [blame] | 158 | while ((pos = cgroup_rstat_cpu_pop_updated(pos, cgrp, cpu))) { |
| 159 | struct cgroup_subsys_state *css; |
| 160 | |
Tejun Heo | a17556f8 | 2018-04-26 14:29:05 -0700 | [diff] [blame] | 161 | cgroup_base_stat_flush(pos, cpu); |
Tejun Heo | 8f53470 | 2018-04-26 14:29:05 -0700 | [diff] [blame] | 162 | |
| 163 | rcu_read_lock(); |
| 164 | list_for_each_entry_rcu(css, &pos->rstat_css_list, |
| 165 | rstat_css_node) |
| 166 | css->ss->css_rstat_flush(css, cpu); |
| 167 | rcu_read_unlock(); |
| 168 | } |
Tejun Heo | 0fa294f | 2018-04-26 14:29:05 -0700 | [diff] [blame] | 169 | raw_spin_unlock(cpu_lock); |
| 170 | |
| 171 | /* if @may_sleep, play nice and yield if necessary */ |
| 172 | if (may_sleep && (need_resched() || |
| 173 | spin_needbreak(&cgroup_rstat_lock))) { |
| 174 | spin_unlock_irq(&cgroup_rstat_lock); |
| 175 | if (!cond_resched()) |
| 176 | cpu_relax(); |
| 177 | spin_lock_irq(&cgroup_rstat_lock); |
| 178 | } |
Tejun Heo | a17556f8 | 2018-04-26 14:29:05 -0700 | [diff] [blame] | 179 | } |
| 180 | } |
| 181 | |
| 182 | /** |
| 183 | * cgroup_rstat_flush - flush stats in @cgrp's subtree |
| 184 | * @cgrp: target cgroup |
| 185 | * |
| 186 | * Collect all per-cpu stats in @cgrp's subtree into the global counters |
| 187 | * and propagate them upwards. After this function returns, all cgroups in |
| 188 | * the subtree have up-to-date ->stat. |
| 189 | * |
| 190 | * This also gets all cgroups in the subtree including @cgrp off the |
| 191 | * ->updated_children lists. |
Tejun Heo | 0fa294f | 2018-04-26 14:29:05 -0700 | [diff] [blame] | 192 | * |
| 193 | * This function may block. |
Tejun Heo | a17556f8 | 2018-04-26 14:29:05 -0700 | [diff] [blame] | 194 | */ |
| 195 | void cgroup_rstat_flush(struct cgroup *cgrp) |
| 196 | { |
Tejun Heo | 0fa294f | 2018-04-26 14:29:05 -0700 | [diff] [blame] | 197 | might_sleep(); |
| 198 | |
| 199 | spin_lock_irq(&cgroup_rstat_lock); |
| 200 | cgroup_rstat_flush_locked(cgrp, true); |
| 201 | spin_unlock_irq(&cgroup_rstat_lock); |
| 202 | } |
| 203 | |
| 204 | /** |
| 205 | * cgroup_rstat_flush_irqsafe - irqsafe version of cgroup_rstat_flush() |
| 206 | * @cgrp: target cgroup |
| 207 | * |
| 208 | * This function can be called from any context. |
| 209 | */ |
| 210 | void cgroup_rstat_flush_irqsafe(struct cgroup *cgrp) |
| 211 | { |
| 212 | unsigned long flags; |
| 213 | |
| 214 | spin_lock_irqsave(&cgroup_rstat_lock, flags); |
| 215 | cgroup_rstat_flush_locked(cgrp, false); |
| 216 | spin_unlock_irqrestore(&cgroup_rstat_lock, flags); |
Tejun Heo | a17556f8 | 2018-04-26 14:29:05 -0700 | [diff] [blame] | 217 | } |
| 218 | |
Tejun Heo | 6162cef | 2018-04-26 14:29:05 -0700 | [diff] [blame] | 219 | /** |
Yang Li | 2ca11b0 | 2021-05-26 10:49:09 +0800 | [diff] [blame] | 220 | * cgroup_rstat_flush_hold - flush stats in @cgrp's subtree and hold |
Tejun Heo | 6162cef | 2018-04-26 14:29:05 -0700 | [diff] [blame] | 221 | * @cgrp: target cgroup |
| 222 | * |
| 223 | * Flush stats in @cgrp's subtree and prevent further flushes. Must be |
| 224 | * paired with cgroup_rstat_flush_release(). |
Tejun Heo | 0fa294f | 2018-04-26 14:29:05 -0700 | [diff] [blame] | 225 | * |
| 226 | * This function may block. |
Tejun Heo | 6162cef | 2018-04-26 14:29:05 -0700 | [diff] [blame] | 227 | */ |
| 228 | void cgroup_rstat_flush_hold(struct cgroup *cgrp) |
Tejun Heo | 0fa294f | 2018-04-26 14:29:05 -0700 | [diff] [blame] | 229 | __acquires(&cgroup_rstat_lock) |
Tejun Heo | 6162cef | 2018-04-26 14:29:05 -0700 | [diff] [blame] | 230 | { |
Tejun Heo | 0fa294f | 2018-04-26 14:29:05 -0700 | [diff] [blame] | 231 | might_sleep(); |
| 232 | spin_lock_irq(&cgroup_rstat_lock); |
| 233 | cgroup_rstat_flush_locked(cgrp, true); |
Tejun Heo | 6162cef | 2018-04-26 14:29:05 -0700 | [diff] [blame] | 234 | } |
| 235 | |
| 236 | /** |
| 237 | * cgroup_rstat_flush_release - release cgroup_rstat_flush_hold() |
| 238 | */ |
| 239 | void cgroup_rstat_flush_release(void) |
Tejun Heo | 0fa294f | 2018-04-26 14:29:05 -0700 | [diff] [blame] | 240 | __releases(&cgroup_rstat_lock) |
Tejun Heo | 6162cef | 2018-04-26 14:29:05 -0700 | [diff] [blame] | 241 | { |
Tejun Heo | 0fa294f | 2018-04-26 14:29:05 -0700 | [diff] [blame] | 242 | spin_unlock_irq(&cgroup_rstat_lock); |
Tejun Heo | 6162cef | 2018-04-26 14:29:05 -0700 | [diff] [blame] | 243 | } |
| 244 | |
Tejun Heo | a17556f8 | 2018-04-26 14:29:05 -0700 | [diff] [blame] | 245 | int cgroup_rstat_init(struct cgroup *cgrp) |
| 246 | { |
| 247 | int cpu; |
| 248 | |
| 249 | /* the root cgrp has rstat_cpu preallocated */ |
| 250 | if (!cgrp->rstat_cpu) { |
| 251 | cgrp->rstat_cpu = alloc_percpu(struct cgroup_rstat_cpu); |
| 252 | if (!cgrp->rstat_cpu) |
| 253 | return -ENOMEM; |
| 254 | } |
| 255 | |
| 256 | /* ->updated_children list is self terminated */ |
| 257 | for_each_possible_cpu(cpu) { |
| 258 | struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu); |
| 259 | |
| 260 | rstatc->updated_children = cgrp; |
| 261 | u64_stats_init(&rstatc->bsync); |
| 262 | } |
| 263 | |
| 264 | return 0; |
| 265 | } |
| 266 | |
| 267 | void cgroup_rstat_exit(struct cgroup *cgrp) |
| 268 | { |
| 269 | int cpu; |
| 270 | |
| 271 | cgroup_rstat_flush(cgrp); |
| 272 | |
| 273 | /* sanity check */ |
| 274 | for_each_possible_cpu(cpu) { |
| 275 | struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu); |
| 276 | |
| 277 | if (WARN_ON_ONCE(rstatc->updated_children != cgrp) || |
| 278 | WARN_ON_ONCE(rstatc->updated_next)) |
| 279 | return; |
| 280 | } |
| 281 | |
| 282 | free_percpu(cgrp->rstat_cpu); |
| 283 | cgrp->rstat_cpu = NULL; |
| 284 | } |
| 285 | |
| 286 | void __init cgroup_rstat_boot(void) |
| 287 | { |
| 288 | int cpu; |
| 289 | |
| 290 | for_each_possible_cpu(cpu) |
| 291 | raw_spin_lock_init(per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu)); |
Tejun Heo | a17556f8 | 2018-04-26 14:29:05 -0700 | [diff] [blame] | 292 | } |
| 293 | |
| 294 | /* |
| 295 | * Functions for cgroup basic resource statistics implemented on top of |
| 296 | * rstat. |
| 297 | */ |
Tejun Heo | 1bb5ec2 | 2019-11-06 12:49:57 -0800 | [diff] [blame] | 298 | static void cgroup_base_stat_add(struct cgroup_base_stat *dst_bstat, |
| 299 | struct cgroup_base_stat *src_bstat) |
Tejun Heo | 041cd64 | 2017-09-25 08:12:05 -0700 | [diff] [blame] | 300 | { |
Tejun Heo | d4ff749 | 2018-04-26 14:29:04 -0700 | [diff] [blame] | 301 | dst_bstat->cputime.utime += src_bstat->cputime.utime; |
| 302 | dst_bstat->cputime.stime += src_bstat->cputime.stime; |
| 303 | dst_bstat->cputime.sum_exec_runtime += src_bstat->cputime.sum_exec_runtime; |
Tejun Heo | 041cd64 | 2017-09-25 08:12:05 -0700 | [diff] [blame] | 304 | } |
| 305 | |
Tejun Heo | 1bb5ec2 | 2019-11-06 12:49:57 -0800 | [diff] [blame] | 306 | static void cgroup_base_stat_sub(struct cgroup_base_stat *dst_bstat, |
| 307 | struct cgroup_base_stat *src_bstat) |
| 308 | { |
| 309 | dst_bstat->cputime.utime -= src_bstat->cputime.utime; |
| 310 | dst_bstat->cputime.stime -= src_bstat->cputime.stime; |
| 311 | dst_bstat->cputime.sum_exec_runtime -= src_bstat->cputime.sum_exec_runtime; |
| 312 | } |
| 313 | |
Tejun Heo | d4ff749 | 2018-04-26 14:29:04 -0700 | [diff] [blame] | 314 | static void cgroup_base_stat_flush(struct cgroup *cgrp, int cpu) |
Tejun Heo | 041cd64 | 2017-09-25 08:12:05 -0700 | [diff] [blame] | 315 | { |
Tejun Heo | c58632b | 2018-04-26 14:29:04 -0700 | [diff] [blame] | 316 | struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu); |
Johannes Weiner | dc26532 | 2021-04-29 22:56:23 -0700 | [diff] [blame] | 317 | struct cgroup *parent = cgroup_parent(cgrp); |
Tejun Heo | 1bb5ec2 | 2019-11-06 12:49:57 -0800 | [diff] [blame] | 318 | struct cgroup_base_stat cur, delta; |
Tejun Heo | 041cd64 | 2017-09-25 08:12:05 -0700 | [diff] [blame] | 319 | unsigned seq; |
| 320 | |
Johannes Weiner | dc26532 | 2021-04-29 22:56:23 -0700 | [diff] [blame] | 321 | /* Root-level stats are sourced from system-wide CPU stats */ |
| 322 | if (!parent) |
| 323 | return; |
| 324 | |
Tejun Heo | 041cd64 | 2017-09-25 08:12:05 -0700 | [diff] [blame] | 325 | /* fetch the current per-cpu values */ |
| 326 | do { |
Tejun Heo | d4ff749 | 2018-04-26 14:29:04 -0700 | [diff] [blame] | 327 | seq = __u64_stats_fetch_begin(&rstatc->bsync); |
Tejun Heo | 1bb5ec2 | 2019-11-06 12:49:57 -0800 | [diff] [blame] | 328 | cur.cputime = rstatc->bstat.cputime; |
Tejun Heo | d4ff749 | 2018-04-26 14:29:04 -0700 | [diff] [blame] | 329 | } while (__u64_stats_fetch_retry(&rstatc->bsync, seq)); |
Tejun Heo | 041cd64 | 2017-09-25 08:12:05 -0700 | [diff] [blame] | 330 | |
Tejun Heo | 1bb5ec2 | 2019-11-06 12:49:57 -0800 | [diff] [blame] | 331 | /* propagate percpu delta to global */ |
| 332 | delta = cur; |
| 333 | cgroup_base_stat_sub(&delta, &rstatc->last_bstat); |
| 334 | cgroup_base_stat_add(&cgrp->bstat, &delta); |
| 335 | cgroup_base_stat_add(&rstatc->last_bstat, &delta); |
Tejun Heo | 041cd64 | 2017-09-25 08:12:05 -0700 | [diff] [blame] | 336 | |
Johannes Weiner | dc26532 | 2021-04-29 22:56:23 -0700 | [diff] [blame] | 337 | /* propagate global delta to parent (unless that's root) */ |
| 338 | if (cgroup_parent(parent)) { |
Tejun Heo | 1bb5ec2 | 2019-11-06 12:49:57 -0800 | [diff] [blame] | 339 | delta = cgrp->bstat; |
| 340 | cgroup_base_stat_sub(&delta, &cgrp->last_bstat); |
| 341 | cgroup_base_stat_add(&parent->bstat, &delta); |
| 342 | cgroup_base_stat_add(&cgrp->last_bstat, &delta); |
| 343 | } |
Tejun Heo | 041cd64 | 2017-09-25 08:12:05 -0700 | [diff] [blame] | 344 | } |
| 345 | |
Tejun Heo | c58632b | 2018-04-26 14:29:04 -0700 | [diff] [blame] | 346 | static struct cgroup_rstat_cpu * |
Tejun Heo | c3df5fb | 2021-07-27 13:12:20 -1000 | [diff] [blame] | 347 | cgroup_base_stat_cputime_account_begin(struct cgroup *cgrp, unsigned long *flags) |
Tejun Heo | 041cd64 | 2017-09-25 08:12:05 -0700 | [diff] [blame] | 348 | { |
Tejun Heo | c58632b | 2018-04-26 14:29:04 -0700 | [diff] [blame] | 349 | struct cgroup_rstat_cpu *rstatc; |
Tejun Heo | 041cd64 | 2017-09-25 08:12:05 -0700 | [diff] [blame] | 350 | |
Tejun Heo | c58632b | 2018-04-26 14:29:04 -0700 | [diff] [blame] | 351 | rstatc = get_cpu_ptr(cgrp->rstat_cpu); |
Tejun Heo | c3df5fb | 2021-07-27 13:12:20 -1000 | [diff] [blame] | 352 | *flags = u64_stats_update_begin_irqsave(&rstatc->bsync); |
Tejun Heo | c58632b | 2018-04-26 14:29:04 -0700 | [diff] [blame] | 353 | return rstatc; |
Tejun Heo | 041cd64 | 2017-09-25 08:12:05 -0700 | [diff] [blame] | 354 | } |
| 355 | |
Tejun Heo | d4ff749 | 2018-04-26 14:29:04 -0700 | [diff] [blame] | 356 | static void cgroup_base_stat_cputime_account_end(struct cgroup *cgrp, |
Tejun Heo | c3df5fb | 2021-07-27 13:12:20 -1000 | [diff] [blame] | 357 | struct cgroup_rstat_cpu *rstatc, |
| 358 | unsigned long flags) |
Tejun Heo | 041cd64 | 2017-09-25 08:12:05 -0700 | [diff] [blame] | 359 | { |
Tejun Heo | c3df5fb | 2021-07-27 13:12:20 -1000 | [diff] [blame] | 360 | u64_stats_update_end_irqrestore(&rstatc->bsync, flags); |
Tejun Heo | 6162cef | 2018-04-26 14:29:05 -0700 | [diff] [blame] | 361 | cgroup_rstat_updated(cgrp, smp_processor_id()); |
Tejun Heo | c58632b | 2018-04-26 14:29:04 -0700 | [diff] [blame] | 362 | put_cpu_ptr(rstatc); |
Tejun Heo | 041cd64 | 2017-09-25 08:12:05 -0700 | [diff] [blame] | 363 | } |
| 364 | |
| 365 | void __cgroup_account_cputime(struct cgroup *cgrp, u64 delta_exec) |
| 366 | { |
Tejun Heo | c58632b | 2018-04-26 14:29:04 -0700 | [diff] [blame] | 367 | struct cgroup_rstat_cpu *rstatc; |
Tejun Heo | c3df5fb | 2021-07-27 13:12:20 -1000 | [diff] [blame] | 368 | unsigned long flags; |
Tejun Heo | 041cd64 | 2017-09-25 08:12:05 -0700 | [diff] [blame] | 369 | |
Tejun Heo | c3df5fb | 2021-07-27 13:12:20 -1000 | [diff] [blame] | 370 | rstatc = cgroup_base_stat_cputime_account_begin(cgrp, &flags); |
Tejun Heo | d4ff749 | 2018-04-26 14:29:04 -0700 | [diff] [blame] | 371 | rstatc->bstat.cputime.sum_exec_runtime += delta_exec; |
Tejun Heo | c3df5fb | 2021-07-27 13:12:20 -1000 | [diff] [blame] | 372 | cgroup_base_stat_cputime_account_end(cgrp, rstatc, flags); |
Tejun Heo | 041cd64 | 2017-09-25 08:12:05 -0700 | [diff] [blame] | 373 | } |
| 374 | |
| 375 | void __cgroup_account_cputime_field(struct cgroup *cgrp, |
| 376 | enum cpu_usage_stat index, u64 delta_exec) |
| 377 | { |
Tejun Heo | c58632b | 2018-04-26 14:29:04 -0700 | [diff] [blame] | 378 | struct cgroup_rstat_cpu *rstatc; |
Tejun Heo | c3df5fb | 2021-07-27 13:12:20 -1000 | [diff] [blame] | 379 | unsigned long flags; |
Tejun Heo | 041cd64 | 2017-09-25 08:12:05 -0700 | [diff] [blame] | 380 | |
Tejun Heo | c3df5fb | 2021-07-27 13:12:20 -1000 | [diff] [blame] | 381 | rstatc = cgroup_base_stat_cputime_account_begin(cgrp, &flags); |
Tejun Heo | 041cd64 | 2017-09-25 08:12:05 -0700 | [diff] [blame] | 382 | |
| 383 | switch (index) { |
| 384 | case CPUTIME_USER: |
| 385 | case CPUTIME_NICE: |
Tejun Heo | d4ff749 | 2018-04-26 14:29:04 -0700 | [diff] [blame] | 386 | rstatc->bstat.cputime.utime += delta_exec; |
Tejun Heo | 041cd64 | 2017-09-25 08:12:05 -0700 | [diff] [blame] | 387 | break; |
| 388 | case CPUTIME_SYSTEM: |
| 389 | case CPUTIME_IRQ: |
| 390 | case CPUTIME_SOFTIRQ: |
Tejun Heo | d4ff749 | 2018-04-26 14:29:04 -0700 | [diff] [blame] | 391 | rstatc->bstat.cputime.stime += delta_exec; |
Tejun Heo | 041cd64 | 2017-09-25 08:12:05 -0700 | [diff] [blame] | 392 | break; |
| 393 | default: |
| 394 | break; |
| 395 | } |
| 396 | |
Tejun Heo | c3df5fb | 2021-07-27 13:12:20 -1000 | [diff] [blame] | 397 | cgroup_base_stat_cputime_account_end(cgrp, rstatc, flags); |
Tejun Heo | 041cd64 | 2017-09-25 08:12:05 -0700 | [diff] [blame] | 398 | } |
| 399 | |
Boris Burkov | 936f2a7 | 2020-05-27 14:43:19 -0700 | [diff] [blame] | 400 | /* |
| 401 | * compute the cputime for the root cgroup by getting the per cpu data |
| 402 | * at a global level, then categorizing the fields in a manner consistent |
| 403 | * with how it is done by __cgroup_account_cputime_field for each bit of |
| 404 | * cpu time attributed to a cgroup. |
| 405 | */ |
| 406 | static void root_cgroup_cputime(struct task_cputime *cputime) |
| 407 | { |
| 408 | int i; |
| 409 | |
| 410 | cputime->stime = 0; |
| 411 | cputime->utime = 0; |
| 412 | cputime->sum_exec_runtime = 0; |
| 413 | for_each_possible_cpu(i) { |
| 414 | struct kernel_cpustat kcpustat; |
| 415 | u64 *cpustat = kcpustat.cpustat; |
| 416 | u64 user = 0; |
| 417 | u64 sys = 0; |
| 418 | |
| 419 | kcpustat_cpu_fetch(&kcpustat, i); |
| 420 | |
| 421 | user += cpustat[CPUTIME_USER]; |
| 422 | user += cpustat[CPUTIME_NICE]; |
| 423 | cputime->utime += user; |
| 424 | |
| 425 | sys += cpustat[CPUTIME_SYSTEM]; |
| 426 | sys += cpustat[CPUTIME_IRQ]; |
| 427 | sys += cpustat[CPUTIME_SOFTIRQ]; |
| 428 | cputime->stime += sys; |
| 429 | |
| 430 | cputime->sum_exec_runtime += user; |
| 431 | cputime->sum_exec_runtime += sys; |
| 432 | cputime->sum_exec_runtime += cpustat[CPUTIME_STEAL]; |
Boris Burkov | 936f2a7 | 2020-05-27 14:43:19 -0700 | [diff] [blame] | 433 | } |
| 434 | } |
| 435 | |
Tejun Heo | d4ff749 | 2018-04-26 14:29:04 -0700 | [diff] [blame] | 436 | void cgroup_base_stat_cputime_show(struct seq_file *seq) |
Tejun Heo | 041cd64 | 2017-09-25 08:12:05 -0700 | [diff] [blame] | 437 | { |
| 438 | struct cgroup *cgrp = seq_css(seq)->cgroup; |
| 439 | u64 usage, utime, stime; |
Boris Burkov | 936f2a7 | 2020-05-27 14:43:19 -0700 | [diff] [blame] | 440 | struct task_cputime cputime; |
Tejun Heo | 041cd64 | 2017-09-25 08:12:05 -0700 | [diff] [blame] | 441 | |
Boris Burkov | 936f2a7 | 2020-05-27 14:43:19 -0700 | [diff] [blame] | 442 | if (cgroup_parent(cgrp)) { |
| 443 | cgroup_rstat_flush_hold(cgrp); |
| 444 | usage = cgrp->bstat.cputime.sum_exec_runtime; |
| 445 | cputime_adjust(&cgrp->bstat.cputime, &cgrp->prev_cputime, |
| 446 | &utime, &stime); |
| 447 | cgroup_rstat_flush_release(); |
| 448 | } else { |
| 449 | root_cgroup_cputime(&cputime); |
| 450 | usage = cputime.sum_exec_runtime; |
| 451 | utime = cputime.utime; |
| 452 | stime = cputime.stime; |
| 453 | } |
Tejun Heo | 041cd64 | 2017-09-25 08:12:05 -0700 | [diff] [blame] | 454 | |
| 455 | do_div(usage, NSEC_PER_USEC); |
| 456 | do_div(utime, NSEC_PER_USEC); |
| 457 | do_div(stime, NSEC_PER_USEC); |
| 458 | |
Tejun Heo | d41bf8c | 2017-10-23 16:18:27 -0700 | [diff] [blame] | 459 | seq_printf(seq, "usage_usec %llu\n" |
| 460 | "user_usec %llu\n" |
| 461 | "system_usec %llu\n", |
| 462 | usage, utime, stime); |
Tejun Heo | 041cd64 | 2017-09-25 08:12:05 -0700 | [diff] [blame] | 463 | } |