Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Paul Menage | ddbcc7e | 2007-10-18 23:39:30 -0700 | [diff] [blame] | 2 | #ifndef _LINUX_CGROUP_H |
| 3 | #define _LINUX_CGROUP_H |
| 4 | /* |
| 5 | * cgroup interface |
| 6 | * |
| 7 | * Copyright (C) 2003 BULL SA |
| 8 | * Copyright (C) 2004-2006 Silicon Graphics, Inc. |
| 9 | * |
| 10 | */ |
| 11 | |
| 12 | #include <linux/sched.h> |
Paul Menage | ddbcc7e | 2007-10-18 23:39:30 -0700 | [diff] [blame] | 13 | #include <linux/cpumask.h> |
| 14 | #include <linux/nodemask.h> |
Tejun Heo | eb6fd50 | 2012-11-09 09:12:29 -0800 | [diff] [blame] | 15 | #include <linux/rculist.h> |
Balbir Singh | 846c7bb | 2007-10-18 23:39:44 -0700 | [diff] [blame] | 16 | #include <linux/cgroupstats.h> |
Tejun Heo | 25a7e68 | 2013-04-14 20:15:25 -0700 | [diff] [blame] | 17 | #include <linux/fs.h> |
Tejun Heo | 7da1127 | 2013-12-05 12:28:04 -0500 | [diff] [blame] | 18 | #include <linux/seq_file.h> |
Tejun Heo | 2bd59d4 | 2014-02-11 11:52:49 -0500 | [diff] [blame] | 19 | #include <linux/kernfs.h> |
Tejun Heo | 49d1dc4 | 2015-09-18 11:56:28 -0400 | [diff] [blame] | 20 | #include <linux/jump_label.h> |
Aditya Kali | a79a908 | 2016-01-29 02:54:06 -0600 | [diff] [blame] | 21 | #include <linux/types.h> |
| 22 | #include <linux/ns_common.h> |
| 23 | #include <linux/nsproxy.h> |
| 24 | #include <linux/user_namespace.h> |
Elena Reshetova | 387ad96 | 2017-02-20 12:19:00 +0200 | [diff] [blame] | 25 | #include <linux/refcount.h> |
Tejun Heo | d2cc5ed | 2017-09-25 08:12:04 -0700 | [diff] [blame] | 26 | #include <linux/kernel_stat.h> |
Greg Kroah-Hartman | 37485a3 | 2021-07-02 15:58:26 +0200 | [diff] [blame] | 27 | #include <linux/android_kabi.h> |
Tejun Heo | b4a04ab | 2015-05-13 15:38:40 -0400 | [diff] [blame] | 28 | |
| 29 | #include <linux/cgroup-defs.h> |
Paul Menage | ddbcc7e | 2007-10-18 23:39:30 -0700 | [diff] [blame] | 30 | |
Christian Brauner | ef2c41c | 2020-02-05 14:26:22 +0100 | [diff] [blame] | 31 | struct kernel_clone_args; |
| 32 | |
Paul Menage | ddbcc7e | 2007-10-18 23:39:30 -0700 | [diff] [blame] | 33 | #ifdef CONFIG_CGROUPS |
| 34 | |
Tejun Heo | 6abc8ca | 2015-08-04 15:20:55 -0400 | [diff] [blame] | 35 | /* |
| 36 | * All weight knobs on the default hierarhcy should use the following min, |
| 37 | * default and max values. The default value is the logarithmic center of |
| 38 | * MIN and MAX and allows 100x to be expressed in both directions. |
| 39 | */ |
| 40 | #define CGROUP_WEIGHT_MIN 1 |
| 41 | #define CGROUP_WEIGHT_DFL 100 |
| 42 | #define CGROUP_WEIGHT_MAX 10000 |
| 43 | |
Tejun Heo | bc2fb7e | 2017-05-15 09:34:01 -0400 | [diff] [blame] | 44 | /* walk only threadgroup leaders */ |
| 45 | #define CSS_TASK_ITER_PROCS (1U << 0) |
Tejun Heo | 450ee0c | 2017-05-15 09:34:03 -0400 | [diff] [blame] | 46 | /* walk all threaded css_sets in the domain */ |
| 47 | #define CSS_TASK_ITER_THREADED (1U << 1) |
Tejun Heo | bc2fb7e | 2017-05-15 09:34:01 -0400 | [diff] [blame] | 48 | |
Tejun Heo | b636fd3 | 2019-05-31 10:38:58 -0700 | [diff] [blame] | 49 | /* internal flags */ |
| 50 | #define CSS_TASK_ITER_SKIPPED (1U << 16) |
| 51 | |
Tejun Heo | c326aa2 | 2015-05-13 16:24:16 -0400 | [diff] [blame] | 52 | /* a css_task_iter should be treated as an opaque object */ |
| 53 | struct css_task_iter { |
| 54 | struct cgroup_subsys *ss; |
Tejun Heo | bc2fb7e | 2017-05-15 09:34:01 -0400 | [diff] [blame] | 55 | unsigned int flags; |
Paul Menage | ddbcc7e | 2007-10-18 23:39:30 -0700 | [diff] [blame] | 56 | |
Tejun Heo | c326aa2 | 2015-05-13 16:24:16 -0400 | [diff] [blame] | 57 | struct list_head *cset_pos; |
| 58 | struct list_head *cset_head; |
Paul Menage | ddbcc7e | 2007-10-18 23:39:30 -0700 | [diff] [blame] | 59 | |
Tejun Heo | 450ee0c | 2017-05-15 09:34:03 -0400 | [diff] [blame] | 60 | struct list_head *tcset_pos; |
| 61 | struct list_head *tcset_head; |
| 62 | |
Tejun Heo | c326aa2 | 2015-05-13 16:24:16 -0400 | [diff] [blame] | 63 | struct list_head *task_pos; |
Tejun Heo | ed27b9f | 2015-10-15 16:41:52 -0400 | [diff] [blame] | 64 | |
Michal Koutný | 9c974c7 | 2020-01-24 12:40:15 +0100 | [diff] [blame] | 65 | struct list_head *cur_tasks_head; |
Tejun Heo | ed27b9f | 2015-10-15 16:41:52 -0400 | [diff] [blame] | 66 | struct css_set *cur_cset; |
Tejun Heo | 450ee0c | 2017-05-15 09:34:03 -0400 | [diff] [blame] | 67 | struct css_set *cur_dcset; |
Tejun Heo | ed27b9f | 2015-10-15 16:41:52 -0400 | [diff] [blame] | 68 | struct task_struct *cur_task; |
| 69 | struct list_head iters_node; /* css_set->task_iters */ |
Greg Kroah-Hartman | 37485a3 | 2021-07-02 15:58:26 +0200 | [diff] [blame] | 70 | |
| 71 | ANDROID_KABI_RESERVE(1); |
Paul Menage | 817929e | 2007-10-18 23:39:36 -0700 | [diff] [blame] | 72 | }; |
Tejun Heo | c326aa2 | 2015-05-13 16:24:16 -0400 | [diff] [blame] | 73 | |
| 74 | extern struct cgroup_root cgrp_dfl_root; |
| 75 | extern struct css_set init_css_set; |
| 76 | |
| 77 | #define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys; |
| 78 | #include <linux/cgroup_subsys.h> |
Paul Menage | 817929e | 2007-10-18 23:39:36 -0700 | [diff] [blame] | 79 | #undef SUBSYS |
| 80 | |
Tejun Heo | 49d1dc4 | 2015-09-18 11:56:28 -0400 | [diff] [blame] | 81 | #define SUBSYS(_x) \ |
| 82 | extern struct static_key_true _x ## _cgrp_subsys_enabled_key; \ |
| 83 | extern struct static_key_true _x ## _cgrp_subsys_on_dfl_key; |
| 84 | #include <linux/cgroup_subsys.h> |
| 85 | #undef SUBSYS |
| 86 | |
| 87 | /** |
| 88 | * cgroup_subsys_enabled - fast test on whether a subsys is enabled |
| 89 | * @ss: subsystem in question |
| 90 | */ |
| 91 | #define cgroup_subsys_enabled(ss) \ |
| 92 | static_branch_likely(&ss ## _enabled_key) |
| 93 | |
| 94 | /** |
| 95 | * cgroup_subsys_on_dfl - fast test on whether a subsys is on default hierarchy |
| 96 | * @ss: subsystem in question |
| 97 | */ |
| 98 | #define cgroup_subsys_on_dfl(ss) \ |
| 99 | static_branch_likely(&ss ## _on_dfl_key) |
| 100 | |
Tejun Heo | c326aa2 | 2015-05-13 16:24:16 -0400 | [diff] [blame] | 101 | bool css_has_online_children(struct cgroup_subsys_state *css); |
| 102 | struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss); |
Dennis Zhou | fc5a828 | 2018-12-05 12:10:36 -0500 | [diff] [blame] | 103 | struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgroup, |
| 104 | struct cgroup_subsys *ss); |
Tejun Heo | c326aa2 | 2015-05-13 16:24:16 -0400 | [diff] [blame] | 105 | struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup, |
| 106 | struct cgroup_subsys *ss); |
| 107 | struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry, |
| 108 | struct cgroup_subsys *ss); |
| 109 | |
Tejun Heo | 16af439 | 2015-11-20 15:55:52 -0500 | [diff] [blame] | 110 | struct cgroup *cgroup_get_from_path(const char *path); |
Martin KaFai Lau | 1f3fe7e | 2016-06-30 10:28:42 -0700 | [diff] [blame] | 111 | struct cgroup *cgroup_get_from_fd(int fd); |
Tejun Heo | 16af439 | 2015-11-20 15:55:52 -0500 | [diff] [blame] | 112 | |
Tejun Heo | c326aa2 | 2015-05-13 16:24:16 -0400 | [diff] [blame] | 113 | int cgroup_attach_task_all(struct task_struct *from, struct task_struct *); |
| 114 | int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from); |
| 115 | |
| 116 | int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts); |
| 117 | int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts); |
| 118 | int cgroup_rm_cftypes(struct cftype *cfts); |
Tejun Heo | 34c0625 | 2015-11-05 00:12:24 -0500 | [diff] [blame] | 119 | void cgroup_file_notify(struct cgroup_file *cfile); |
Tejun Heo | c326aa2 | 2015-05-13 16:24:16 -0400 | [diff] [blame] | 120 | |
Tejun Heo | 4c737b4 | 2016-08-10 11:23:44 -0400 | [diff] [blame] | 121 | int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen); |
Tejun Heo | c326aa2 | 2015-05-13 16:24:16 -0400 | [diff] [blame] | 122 | int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry); |
| 123 | int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns, |
| 124 | struct pid *pid, struct task_struct *tsk); |
| 125 | |
| 126 | void cgroup_fork(struct task_struct *p); |
Christian Brauner | ef2c41c | 2020-02-05 14:26:22 +0100 | [diff] [blame] | 127 | extern int cgroup_can_fork(struct task_struct *p, |
| 128 | struct kernel_clone_args *kargs); |
| 129 | extern void cgroup_cancel_fork(struct task_struct *p, |
| 130 | struct kernel_clone_args *kargs); |
| 131 | extern void cgroup_post_fork(struct task_struct *p, |
| 132 | struct kernel_clone_args *kargs); |
Tejun Heo | c326aa2 | 2015-05-13 16:24:16 -0400 | [diff] [blame] | 133 | void cgroup_exit(struct task_struct *p); |
Oleg Nesterov | 51bee5a | 2019-01-28 17:00:13 +0100 | [diff] [blame] | 134 | void cgroup_release(struct task_struct *p); |
Tejun Heo | 2e91fa7 | 2015-10-15 16:41:53 -0400 | [diff] [blame] | 135 | void cgroup_free(struct task_struct *p); |
Tejun Heo | c326aa2 | 2015-05-13 16:24:16 -0400 | [diff] [blame] | 136 | |
| 137 | int cgroup_init_early(void); |
| 138 | int cgroup_init(void); |
| 139 | |
Tejun Heo | a5e112e | 2019-05-13 12:37:17 -0700 | [diff] [blame] | 140 | int cgroup_parse_float(const char *input, unsigned dec_shift, s64 *v); |
| 141 | |
Tejun Heo | 5c9d535 | 2014-05-16 13:22:48 -0400 | [diff] [blame] | 142 | /* |
Tejun Heo | c326aa2 | 2015-05-13 16:24:16 -0400 | [diff] [blame] | 143 | * Iteration helpers and macros. |
Tejun Heo | 5c9d535 | 2014-05-16 13:22:48 -0400 | [diff] [blame] | 144 | */ |
Paul Menage | ddbcc7e | 2007-10-18 23:39:30 -0700 | [diff] [blame] | 145 | |
Tejun Heo | c326aa2 | 2015-05-13 16:24:16 -0400 | [diff] [blame] | 146 | struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos, |
| 147 | struct cgroup_subsys_state *parent); |
| 148 | struct cgroup_subsys_state *css_next_descendant_pre(struct cgroup_subsys_state *pos, |
| 149 | struct cgroup_subsys_state *css); |
| 150 | struct cgroup_subsys_state *css_rightmost_descendant(struct cgroup_subsys_state *pos); |
| 151 | struct cgroup_subsys_state *css_next_descendant_post(struct cgroup_subsys_state *pos, |
| 152 | struct cgroup_subsys_state *css); |
Tejun Heo | 72c97e5 | 2013-08-08 20:11:22 -0400 | [diff] [blame] | 153 | |
Tejun Heo | 1f7dd3e5 | 2015-12-03 10:18:21 -0500 | [diff] [blame] | 154 | struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset, |
| 155 | struct cgroup_subsys_state **dst_cssp); |
| 156 | struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset, |
| 157 | struct cgroup_subsys_state **dst_cssp); |
Paul Menage | ddbcc7e | 2007-10-18 23:39:30 -0700 | [diff] [blame] | 158 | |
Tejun Heo | bc2fb7e | 2017-05-15 09:34:01 -0400 | [diff] [blame] | 159 | void css_task_iter_start(struct cgroup_subsys_state *css, unsigned int flags, |
Tejun Heo | c326aa2 | 2015-05-13 16:24:16 -0400 | [diff] [blame] | 160 | struct css_task_iter *it); |
| 161 | struct task_struct *css_task_iter_next(struct css_task_iter *it); |
| 162 | void css_task_iter_end(struct css_task_iter *it); |
Tejun Heo | 0ae78e0 | 2013-08-13 11:01:54 -0400 | [diff] [blame] | 163 | |
Tejun Heo | c326aa2 | 2015-05-13 16:24:16 -0400 | [diff] [blame] | 164 | /** |
| 165 | * css_for_each_child - iterate through children of a css |
| 166 | * @pos: the css * to use as the loop cursor |
| 167 | * @parent: css whose children to walk |
| 168 | * |
| 169 | * Walk @parent's children. Must be called under rcu_read_lock(). |
| 170 | * |
| 171 | * If a subsystem synchronizes ->css_online() and the start of iteration, a |
| 172 | * css which finished ->css_online() is guaranteed to be visible in the |
| 173 | * future iterations and will stay visible until the last reference is put. |
| 174 | * A css which hasn't finished ->css_online() or already finished |
| 175 | * ->css_offline() may show up during traversal. It's each subsystem's |
| 176 | * responsibility to synchronize against on/offlining. |
| 177 | * |
| 178 | * It is allowed to temporarily drop RCU read lock during iteration. The |
| 179 | * caller is responsible for ensuring that @pos remains accessible until |
| 180 | * the start of the next iteration by, for example, bumping the css refcnt. |
| 181 | */ |
| 182 | #define css_for_each_child(pos, parent) \ |
| 183 | for ((pos) = css_next_child(NULL, (parent)); (pos); \ |
| 184 | (pos) = css_next_child((pos), (parent))) |
Tejun Heo | d5c419b | 2014-05-16 13:22:48 -0400 | [diff] [blame] | 185 | |
Tejun Heo | c326aa2 | 2015-05-13 16:24:16 -0400 | [diff] [blame] | 186 | /** |
| 187 | * css_for_each_descendant_pre - pre-order walk of a css's descendants |
| 188 | * @pos: the css * to use as the loop cursor |
| 189 | * @root: css whose descendants to walk |
| 190 | * |
| 191 | * Walk @root's descendants. @root is included in the iteration and the |
| 192 | * first node to be visited. Must be called under rcu_read_lock(). |
| 193 | * |
| 194 | * If a subsystem synchronizes ->css_online() and the start of iteration, a |
| 195 | * css which finished ->css_online() is guaranteed to be visible in the |
| 196 | * future iterations and will stay visible until the last reference is put. |
| 197 | * A css which hasn't finished ->css_online() or already finished |
| 198 | * ->css_offline() may show up during traversal. It's each subsystem's |
| 199 | * responsibility to synchronize against on/offlining. |
| 200 | * |
| 201 | * For example, the following guarantees that a descendant can't escape |
| 202 | * state updates of its ancestors. |
| 203 | * |
| 204 | * my_online(@css) |
| 205 | * { |
| 206 | * Lock @css's parent and @css; |
| 207 | * Inherit state from the parent; |
| 208 | * Unlock both. |
| 209 | * } |
| 210 | * |
| 211 | * my_update_state(@css) |
| 212 | * { |
| 213 | * css_for_each_descendant_pre(@pos, @css) { |
| 214 | * Lock @pos; |
| 215 | * if (@pos == @css) |
| 216 | * Update @css's state; |
| 217 | * else |
| 218 | * Verify @pos is alive and inherit state from its parent; |
| 219 | * Unlock @pos; |
| 220 | * } |
| 221 | * } |
| 222 | * |
| 223 | * As long as the inheriting step, including checking the parent state, is |
| 224 | * enclosed inside @pos locking, double-locking the parent isn't necessary |
| 225 | * while inheriting. The state update to the parent is guaranteed to be |
| 226 | * visible by walking order and, as long as inheriting operations to the |
| 227 | * same @pos are atomic to each other, multiple updates racing each other |
| 228 | * still result in the correct state. It's guaranateed that at least one |
| 229 | * inheritance happens for any css after the latest update to its parent. |
| 230 | * |
| 231 | * If checking parent's state requires locking the parent, each inheriting |
| 232 | * iteration should lock and unlock both @pos->parent and @pos. |
| 233 | * |
| 234 | * Alternatively, a subsystem may choose to use a single global lock to |
| 235 | * synchronize ->css_online() and ->css_offline() against tree-walking |
| 236 | * operations. |
| 237 | * |
| 238 | * It is allowed to temporarily drop RCU read lock during iteration. The |
| 239 | * caller is responsible for ensuring that @pos remains accessible until |
| 240 | * the start of the next iteration by, for example, bumping the css refcnt. |
| 241 | */ |
| 242 | #define css_for_each_descendant_pre(pos, css) \ |
| 243 | for ((pos) = css_next_descendant_pre(NULL, (css)); (pos); \ |
| 244 | (pos) = css_next_descendant_pre((pos), (css))) |
Tejun Heo | 15a4c83 | 2014-05-04 15:09:14 -0400 | [diff] [blame] | 245 | |
Tejun Heo | c326aa2 | 2015-05-13 16:24:16 -0400 | [diff] [blame] | 246 | /** |
| 247 | * css_for_each_descendant_post - post-order walk of a css's descendants |
| 248 | * @pos: the css * to use as the loop cursor |
| 249 | * @css: css whose descendants to walk |
| 250 | * |
| 251 | * Similar to css_for_each_descendant_pre() but performs post-order |
| 252 | * traversal instead. @root is included in the iteration and the last |
| 253 | * node to be visited. |
| 254 | * |
| 255 | * If a subsystem synchronizes ->css_online() and the start of iteration, a |
| 256 | * css which finished ->css_online() is guaranteed to be visible in the |
| 257 | * future iterations and will stay visible until the last reference is put. |
| 258 | * A css which hasn't finished ->css_online() or already finished |
| 259 | * ->css_offline() may show up during traversal. It's each subsystem's |
| 260 | * responsibility to synchronize against on/offlining. |
| 261 | * |
| 262 | * Note that the walk visibility guarantee example described in pre-order |
| 263 | * walk doesn't apply the same to post-order walks. |
| 264 | */ |
| 265 | #define css_for_each_descendant_post(pos, css) \ |
| 266 | for ((pos) = css_next_descendant_post(NULL, (css)); (pos); \ |
| 267 | (pos) = css_next_descendant_post((pos), (css))) |
Tejun Heo | 48ddbe1 | 2012-04-01 12:09:56 -0700 | [diff] [blame] | 268 | |
Tejun Heo | c326aa2 | 2015-05-13 16:24:16 -0400 | [diff] [blame] | 269 | /** |
| 270 | * cgroup_taskset_for_each - iterate cgroup_taskset |
| 271 | * @task: the loop cursor |
Tejun Heo | 1f7dd3e5 | 2015-12-03 10:18:21 -0500 | [diff] [blame] | 272 | * @dst_css: the destination css |
Tejun Heo | c326aa2 | 2015-05-13 16:24:16 -0400 | [diff] [blame] | 273 | * @tset: taskset to iterate |
Tejun Heo | 4530edd | 2015-09-11 15:00:19 -0400 | [diff] [blame] | 274 | * |
| 275 | * @tset may contain multiple tasks and they may belong to multiple |
Tejun Heo | 1f7dd3e5 | 2015-12-03 10:18:21 -0500 | [diff] [blame] | 276 | * processes. |
| 277 | * |
| 278 | * On the v2 hierarchy, there may be tasks from multiple processes and they |
| 279 | * may not share the source or destination csses. |
| 280 | * |
| 281 | * On traditional hierarchies, when there are multiple tasks in @tset, if a |
| 282 | * task of a process is in @tset, all tasks of the process are in @tset. |
| 283 | * Also, all are guaranteed to share the same source and destination csses. |
Tejun Heo | 4530edd | 2015-09-11 15:00:19 -0400 | [diff] [blame] | 284 | * |
| 285 | * Iteration is not in any specific order. |
Tejun Heo | c326aa2 | 2015-05-13 16:24:16 -0400 | [diff] [blame] | 286 | */ |
Tejun Heo | 1f7dd3e5 | 2015-12-03 10:18:21 -0500 | [diff] [blame] | 287 | #define cgroup_taskset_for_each(task, dst_css, tset) \ |
| 288 | for ((task) = cgroup_taskset_first((tset), &(dst_css)); \ |
| 289 | (task); \ |
| 290 | (task) = cgroup_taskset_next((tset), &(dst_css))) |
Tejun Heo | 0cb51d7 | 2014-05-16 13:22:49 -0400 | [diff] [blame] | 291 | |
Tejun Heo | 4530edd | 2015-09-11 15:00:19 -0400 | [diff] [blame] | 292 | /** |
| 293 | * cgroup_taskset_for_each_leader - iterate group leaders in a cgroup_taskset |
| 294 | * @leader: the loop cursor |
Tejun Heo | 1f7dd3e5 | 2015-12-03 10:18:21 -0500 | [diff] [blame] | 295 | * @dst_css: the destination css |
Geliang Tang | 7b4632f | 2016-12-24 23:28:35 +0800 | [diff] [blame] | 296 | * @tset: taskset to iterate |
Tejun Heo | 4530edd | 2015-09-11 15:00:19 -0400 | [diff] [blame] | 297 | * |
| 298 | * Iterate threadgroup leaders of @tset. For single-task migrations, @tset |
| 299 | * may not contain any. |
| 300 | */ |
Tejun Heo | 1f7dd3e5 | 2015-12-03 10:18:21 -0500 | [diff] [blame] | 301 | #define cgroup_taskset_for_each_leader(leader, dst_css, tset) \ |
| 302 | for ((leader) = cgroup_taskset_first((tset), &(dst_css)); \ |
| 303 | (leader); \ |
| 304 | (leader) = cgroup_taskset_next((tset), &(dst_css))) \ |
Tejun Heo | 4530edd | 2015-09-11 15:00:19 -0400 | [diff] [blame] | 305 | if ((leader) != (leader)->group_leader) \ |
| 306 | ; \ |
| 307 | else |
| 308 | |
Tejun Heo | c326aa2 | 2015-05-13 16:24:16 -0400 | [diff] [blame] | 309 | /* |
| 310 | * Inline functions. |
| 311 | */ |
Paul Menage | ddbcc7e | 2007-10-18 23:39:30 -0700 | [diff] [blame] | 312 | |
Tejun Heo | 7432103 | 2019-11-04 15:54:30 -0800 | [diff] [blame] | 313 | static inline u64 cgroup_id(struct cgroup *cgrp) |
| 314 | { |
| 315 | return cgrp->kn->id; |
| 316 | } |
| 317 | |
Tejun Heo | 5de0107 | 2013-06-12 21:04:52 -0700 | [diff] [blame] | 318 | /** |
| 319 | * css_get - obtain a reference on the specified css |
| 320 | * @css: target css |
| 321 | * |
| 322 | * The caller must already have a reference. |
Paul Menage | ddbcc7e | 2007-10-18 23:39:30 -0700 | [diff] [blame] | 323 | */ |
Paul Menage | ddbcc7e | 2007-10-18 23:39:30 -0700 | [diff] [blame] | 324 | static inline void css_get(struct cgroup_subsys_state *css) |
| 325 | { |
Tejun Heo | 3b514d2 | 2014-05-16 13:22:47 -0400 | [diff] [blame] | 326 | if (!(css->flags & CSS_NO_REF)) |
| 327 | percpu_ref_get(&css->refcnt); |
Paul Menage | ddbcc7e | 2007-10-18 23:39:30 -0700 | [diff] [blame] | 328 | } |
Paul Menage | e7c5ec9 | 2009-01-07 18:08:38 -0800 | [diff] [blame] | 329 | |
Tejun Heo | 5de0107 | 2013-06-12 21:04:52 -0700 | [diff] [blame] | 330 | /** |
Johannes Weiner | e8ea14c | 2014-12-10 15:42:42 -0800 | [diff] [blame] | 331 | * css_get_many - obtain references on the specified css |
| 332 | * @css: target css |
| 333 | * @n: number of references to get |
| 334 | * |
| 335 | * The caller must already have a reference. |
| 336 | */ |
| 337 | static inline void css_get_many(struct cgroup_subsys_state *css, unsigned int n) |
| 338 | { |
| 339 | if (!(css->flags & CSS_NO_REF)) |
| 340 | percpu_ref_get_many(&css->refcnt, n); |
| 341 | } |
| 342 | |
| 343 | /** |
Tejun Heo | 6f4524d | 2014-05-16 13:22:52 -0400 | [diff] [blame] | 344 | * css_tryget - try to obtain a reference on the specified css |
| 345 | * @css: target css |
| 346 | * |
| 347 | * Obtain a reference on @css unless it already has reached zero and is |
| 348 | * being released. This function doesn't care whether @css is on or |
| 349 | * offline. The caller naturally needs to ensure that @css is accessible |
| 350 | * but doesn't have to be holding a reference on it - IOW, RCU protected |
| 351 | * access is good enough for this function. Returns %true if a reference |
| 352 | * count was successfully obtained; %false otherwise. |
| 353 | */ |
| 354 | static inline bool css_tryget(struct cgroup_subsys_state *css) |
| 355 | { |
| 356 | if (!(css->flags & CSS_NO_REF)) |
| 357 | return percpu_ref_tryget(&css->refcnt); |
| 358 | return true; |
| 359 | } |
| 360 | |
| 361 | /** |
Tejun Heo | ec903c0 | 2014-05-13 12:11:01 -0400 | [diff] [blame] | 362 | * css_tryget_online - try to obtain a reference on the specified css if online |
Tejun Heo | 5de0107 | 2013-06-12 21:04:52 -0700 | [diff] [blame] | 363 | * @css: target css |
| 364 | * |
Tejun Heo | ec903c0 | 2014-05-13 12:11:01 -0400 | [diff] [blame] | 365 | * Obtain a reference on @css if it's online. The caller naturally needs |
| 366 | * to ensure that @css is accessible but doesn't have to be holding a |
Tejun Heo | 5de0107 | 2013-06-12 21:04:52 -0700 | [diff] [blame] | 367 | * reference on it - IOW, RCU protected access is good enough for this |
| 368 | * function. Returns %true if a reference count was successfully obtained; |
| 369 | * %false otherwise. |
| 370 | */ |
Tejun Heo | ec903c0 | 2014-05-13 12:11:01 -0400 | [diff] [blame] | 371 | static inline bool css_tryget_online(struct cgroup_subsys_state *css) |
Paul Menage | e7c5ec9 | 2009-01-07 18:08:38 -0800 | [diff] [blame] | 372 | { |
Tejun Heo | 3b514d2 | 2014-05-16 13:22:47 -0400 | [diff] [blame] | 373 | if (!(css->flags & CSS_NO_REF)) |
| 374 | return percpu_ref_tryget_live(&css->refcnt); |
| 375 | return true; |
Paul Menage | e7c5ec9 | 2009-01-07 18:08:38 -0800 | [diff] [blame] | 376 | } |
| 377 | |
Tejun Heo | 5de0107 | 2013-06-12 21:04:52 -0700 | [diff] [blame] | 378 | /** |
Tejun Heo | 41c2570 | 2017-05-24 12:03:48 -0400 | [diff] [blame] | 379 | * css_is_dying - test whether the specified css is dying |
| 380 | * @css: target css |
| 381 | * |
| 382 | * Test whether @css is in the process of offlining or already offline. In |
| 383 | * most cases, ->css_online() and ->css_offline() callbacks should be |
| 384 | * enough; however, the actual offline operations are RCU delayed and this |
| 385 | * test returns %true also when @css is scheduled to be offlined. |
| 386 | * |
| 387 | * This is useful, for example, when the use case requires synchronous |
| 388 | * behavior with respect to cgroup removal. cgroup removal schedules css |
| 389 | * offlining but the css can seem alive while the operation is being |
| 390 | * delayed. If the delay affects user visible semantics, this test can be |
| 391 | * used to resolve the situation. |
| 392 | */ |
| 393 | static inline bool css_is_dying(struct cgroup_subsys_state *css) |
| 394 | { |
| 395 | return !(css->flags & CSS_NO_REF) && percpu_ref_is_dying(&css->refcnt); |
| 396 | } |
| 397 | |
| 398 | /** |
Tejun Heo | 5de0107 | 2013-06-12 21:04:52 -0700 | [diff] [blame] | 399 | * css_put - put a css reference |
| 400 | * @css: target css |
| 401 | * |
Tejun Heo | ec903c0 | 2014-05-13 12:11:01 -0400 | [diff] [blame] | 402 | * Put a reference obtained via css_get() and css_tryget_online(). |
Tejun Heo | 5de0107 | 2013-06-12 21:04:52 -0700 | [diff] [blame] | 403 | */ |
Paul Menage | ddbcc7e | 2007-10-18 23:39:30 -0700 | [diff] [blame] | 404 | static inline void css_put(struct cgroup_subsys_state *css) |
| 405 | { |
Tejun Heo | 3b514d2 | 2014-05-16 13:22:47 -0400 | [diff] [blame] | 406 | if (!(css->flags & CSS_NO_REF)) |
| 407 | percpu_ref_put(&css->refcnt); |
Paul Menage | ddbcc7e | 2007-10-18 23:39:30 -0700 | [diff] [blame] | 408 | } |
| 409 | |
Johannes Weiner | e8ea14c | 2014-12-10 15:42:42 -0800 | [diff] [blame] | 410 | /** |
| 411 | * css_put_many - put css references |
| 412 | * @css: target css |
| 413 | * @n: number of references to put |
| 414 | * |
| 415 | * Put references obtained via css_get() and css_tryget_online(). |
| 416 | */ |
| 417 | static inline void css_put_many(struct cgroup_subsys_state *css, unsigned int n) |
| 418 | { |
| 419 | if (!(css->flags & CSS_NO_REF)) |
| 420 | percpu_ref_put_many(&css->refcnt, n); |
| 421 | } |
| 422 | |
Tejun Heo | 3e48930 | 2017-08-11 05:49:01 -0700 | [diff] [blame] | 423 | static inline void cgroup_get(struct cgroup *cgrp) |
| 424 | { |
| 425 | css_get(&cgrp->self); |
| 426 | } |
| 427 | |
| 428 | static inline bool cgroup_tryget(struct cgroup *cgrp) |
| 429 | { |
| 430 | return css_tryget(&cgrp->self); |
| 431 | } |
| 432 | |
Tejun Heo | 16af439 | 2015-11-20 15:55:52 -0500 | [diff] [blame] | 433 | static inline void cgroup_put(struct cgroup *cgrp) |
| 434 | { |
| 435 | css_put(&cgrp->self); |
| 436 | } |
| 437 | |
Tejun Heo | c326aa2 | 2015-05-13 16:24:16 -0400 | [diff] [blame] | 438 | /** |
| 439 | * task_css_set_check - obtain a task's css_set with extra access conditions |
| 440 | * @task: the task to obtain css_set for |
| 441 | * @__c: extra condition expression to be passed to rcu_dereference_check() |
Paul Menage | ddbcc7e | 2007-10-18 23:39:30 -0700 | [diff] [blame] | 442 | * |
Tejun Heo | c326aa2 | 2015-05-13 16:24:16 -0400 | [diff] [blame] | 443 | * A task's css_set is RCU protected, initialized and exited while holding |
| 444 | * task_lock(), and can only be modified while holding both cgroup_mutex |
| 445 | * and task_lock() while the task is alive. This macro verifies that the |
| 446 | * caller is inside proper critical section and returns @task's css_set. |
| 447 | * |
| 448 | * The caller can also specify additional allowed conditions via @__c, such |
| 449 | * as locks used during the cgroup_subsys::attach() methods. |
Paul Menage | ddbcc7e | 2007-10-18 23:39:30 -0700 | [diff] [blame] | 450 | */ |
Tejun Heo | c326aa2 | 2015-05-13 16:24:16 -0400 | [diff] [blame] | 451 | #ifdef CONFIG_PROVE_RCU |
| 452 | extern struct mutex cgroup_mutex; |
Tejun Heo | f0d9a5f | 2015-10-15 16:41:53 -0400 | [diff] [blame] | 453 | extern spinlock_t css_set_lock; |
Tejun Heo | c326aa2 | 2015-05-13 16:24:16 -0400 | [diff] [blame] | 454 | #define task_css_set_check(task, __c) \ |
| 455 | rcu_dereference_check((task)->cgroups, \ |
| 456 | lockdep_is_held(&cgroup_mutex) || \ |
Tejun Heo | f0d9a5f | 2015-10-15 16:41:53 -0400 | [diff] [blame] | 457 | lockdep_is_held(&css_set_lock) || \ |
Tejun Heo | c326aa2 | 2015-05-13 16:24:16 -0400 | [diff] [blame] | 458 | ((task)->flags & PF_EXITING) || (__c)) |
| 459 | #else |
| 460 | #define task_css_set_check(task, __c) \ |
| 461 | rcu_dereference((task)->cgroups) |
Tejun Heo | 2bd59d4 | 2014-02-11 11:52:49 -0500 | [diff] [blame] | 462 | #endif |
Paul Menage | ddbcc7e | 2007-10-18 23:39:30 -0700 | [diff] [blame] | 463 | |
Tejun Heo | c326aa2 | 2015-05-13 16:24:16 -0400 | [diff] [blame] | 464 | /** |
| 465 | * task_css_check - obtain css for (task, subsys) w/ extra access conds |
| 466 | * @task: the target task |
| 467 | * @subsys_id: the target subsystem ID |
| 468 | * @__c: extra condition expression to be passed to rcu_dereference_check() |
| 469 | * |
| 470 | * Return the cgroup_subsys_state for the (@task, @subsys_id) pair. The |
| 471 | * synchronization rules are the same as task_css_set_check(). |
| 472 | */ |
| 473 | #define task_css_check(task, subsys_id, __c) \ |
| 474 | task_css_set_check((task), (__c))->subsys[(subsys_id)] |
| 475 | |
| 476 | /** |
| 477 | * task_css_set - obtain a task's css_set |
| 478 | * @task: the task to obtain css_set for |
| 479 | * |
| 480 | * See task_css_set_check(). |
| 481 | */ |
| 482 | static inline struct css_set *task_css_set(struct task_struct *task) |
| 483 | { |
| 484 | return task_css_set_check(task, false); |
| 485 | } |
| 486 | |
| 487 | /** |
| 488 | * task_css - obtain css for (task, subsys) |
| 489 | * @task: the target task |
| 490 | * @subsys_id: the target subsystem ID |
| 491 | * |
| 492 | * See task_css_check(). |
| 493 | */ |
| 494 | static inline struct cgroup_subsys_state *task_css(struct task_struct *task, |
| 495 | int subsys_id) |
| 496 | { |
| 497 | return task_css_check(task, subsys_id, false); |
| 498 | } |
| 499 | |
| 500 | /** |
Linus Torvalds | bbe179f | 2015-06-26 19:50:04 -0700 | [diff] [blame] | 501 | * task_get_css - find and get the css for (task, subsys) |
| 502 | * @task: the target task |
| 503 | * @subsys_id: the target subsystem ID |
| 504 | * |
| 505 | * Find the css for the (@task, @subsys_id) combination, increment a |
| 506 | * reference on and return it. This function is guaranteed to return a |
Tejun Heo | 18fa84a | 2019-05-29 13:46:25 -0700 | [diff] [blame] | 507 | * valid css. The returned css may already have been offlined. |
Linus Torvalds | bbe179f | 2015-06-26 19:50:04 -0700 | [diff] [blame] | 508 | */ |
| 509 | static inline struct cgroup_subsys_state * |
| 510 | task_get_css(struct task_struct *task, int subsys_id) |
| 511 | { |
| 512 | struct cgroup_subsys_state *css; |
| 513 | |
| 514 | rcu_read_lock(); |
| 515 | while (true) { |
| 516 | css = task_css(task, subsys_id); |
Tejun Heo | 18fa84a | 2019-05-29 13:46:25 -0700 | [diff] [blame] | 517 | /* |
| 518 | * Can't use css_tryget_online() here. A task which has |
| 519 | * PF_EXITING set may stay associated with an offline css. |
| 520 | * If such task calls this function, css_tryget_online() |
| 521 | * will keep failing. |
| 522 | */ |
| 523 | if (likely(css_tryget(css))) |
Linus Torvalds | bbe179f | 2015-06-26 19:50:04 -0700 | [diff] [blame] | 524 | break; |
| 525 | cpu_relax(); |
| 526 | } |
| 527 | rcu_read_unlock(); |
| 528 | return css; |
| 529 | } |
| 530 | |
| 531 | /** |
Tejun Heo | c326aa2 | 2015-05-13 16:24:16 -0400 | [diff] [blame] | 532 | * task_css_is_root - test whether a task belongs to the root css |
| 533 | * @task: the target task |
| 534 | * @subsys_id: the target subsystem ID |
| 535 | * |
| 536 | * Test whether @task belongs to the root css on the specified subsystem. |
| 537 | * May be invoked in any context. |
| 538 | */ |
| 539 | static inline bool task_css_is_root(struct task_struct *task, int subsys_id) |
| 540 | { |
| 541 | return task_css_check(task, subsys_id, true) == |
| 542 | init_css_set.subsys[subsys_id]; |
| 543 | } |
| 544 | |
| 545 | static inline struct cgroup *task_cgroup(struct task_struct *task, |
| 546 | int subsys_id) |
| 547 | { |
| 548 | return task_css(task, subsys_id)->cgroup; |
| 549 | } |
Tejun Heo | a2dd424 | 2014-03-19 10:23:55 -0400 | [diff] [blame] | 550 | |
Tejun Heo | 3e48930 | 2017-08-11 05:49:01 -0700 | [diff] [blame] | 551 | static inline struct cgroup *task_dfl_cgroup(struct task_struct *task) |
| 552 | { |
| 553 | return task_css_set(task)->dfl_cgrp; |
| 554 | } |
| 555 | |
| 556 | static inline struct cgroup *cgroup_parent(struct cgroup *cgrp) |
| 557 | { |
| 558 | struct cgroup_subsys_state *parent_css = cgrp->self.parent; |
| 559 | |
| 560 | if (parent_css) |
| 561 | return container_of(parent_css, struct cgroup, self); |
| 562 | return NULL; |
| 563 | } |
| 564 | |
Tejun Heo | b11cfb5 | 2015-11-20 15:55:52 -0500 | [diff] [blame] | 565 | /** |
| 566 | * cgroup_is_descendant - test ancestry |
| 567 | * @cgrp: the cgroup to be tested |
| 568 | * @ancestor: possible ancestor of @cgrp |
| 569 | * |
| 570 | * Test whether @cgrp is a descendant of @ancestor. It also returns %true |
| 571 | * if @cgrp == @ancestor. This function is safe to call as long as @cgrp |
| 572 | * and @ancestor are accessible. |
| 573 | */ |
| 574 | static inline bool cgroup_is_descendant(struct cgroup *cgrp, |
| 575 | struct cgroup *ancestor) |
| 576 | { |
| 577 | if (cgrp->root != ancestor->root || cgrp->level < ancestor->level) |
| 578 | return false; |
Tejun Heo | 7432103 | 2019-11-04 15:54:30 -0800 | [diff] [blame] | 579 | return cgrp->ancestor_ids[ancestor->level] == cgroup_id(ancestor); |
Tejun Heo | b11cfb5 | 2015-11-20 15:55:52 -0500 | [diff] [blame] | 580 | } |
| 581 | |
Sargun Dhillon | aed704b | 2016-08-12 08:56:40 -0700 | [diff] [blame] | 582 | /** |
Andrey Ignatov | 7723628 | 2018-08-12 10:49:27 -0700 | [diff] [blame] | 583 | * cgroup_ancestor - find ancestor of cgroup |
| 584 | * @cgrp: cgroup to find ancestor of |
| 585 | * @ancestor_level: level of ancestor to find starting from root |
| 586 | * |
| 587 | * Find ancestor of cgroup at specified level starting from root if it exists |
| 588 | * and return pointer to it. Return NULL if @cgrp doesn't have ancestor at |
| 589 | * @ancestor_level. |
| 590 | * |
| 591 | * This function is safe to call as long as @cgrp is accessible. |
| 592 | */ |
| 593 | static inline struct cgroup *cgroup_ancestor(struct cgroup *cgrp, |
| 594 | int ancestor_level) |
| 595 | { |
Andrey Ignatov | 7723628 | 2018-08-12 10:49:27 -0700 | [diff] [blame] | 596 | if (cgrp->level < ancestor_level) |
| 597 | return NULL; |
Andrey Ignatov | 808c43b | 2018-09-21 17:03:27 -0700 | [diff] [blame] | 598 | while (cgrp && cgrp->level > ancestor_level) |
| 599 | cgrp = cgroup_parent(cgrp); |
| 600 | return cgrp; |
Andrey Ignatov | 7723628 | 2018-08-12 10:49:27 -0700 | [diff] [blame] | 601 | } |
| 602 | |
| 603 | /** |
Sargun Dhillon | aed704b | 2016-08-12 08:56:40 -0700 | [diff] [blame] | 604 | * task_under_cgroup_hierarchy - test task's membership of cgroup ancestry |
| 605 | * @task: the task to be tested |
| 606 | * @ancestor: possible ancestor of @task's cgroup |
| 607 | * |
| 608 | * Tests whether @task's default cgroup hierarchy is a descendant of @ancestor. |
| 609 | * It follows all the same rules as cgroup_is_descendant, and only applies |
| 610 | * to the default hierarchy. |
| 611 | */ |
| 612 | static inline bool task_under_cgroup_hierarchy(struct task_struct *task, |
| 613 | struct cgroup *ancestor) |
| 614 | { |
| 615 | struct css_set *cset = task_css_set(task); |
| 616 | |
| 617 | return cgroup_is_descendant(cset->dfl_cgrp, ancestor); |
| 618 | } |
| 619 | |
Tejun Heo | 07bc356 | 2014-02-13 06:58:39 -0500 | [diff] [blame] | 620 | /* no synchronization, the result can only be used as a hint */ |
Tejun Heo | 27bd4db | 2015-10-15 16:41:50 -0400 | [diff] [blame] | 621 | static inline bool cgroup_is_populated(struct cgroup *cgrp) |
Tejun Heo | 07bc356 | 2014-02-13 06:58:39 -0500 | [diff] [blame] | 622 | { |
Tejun Heo | 454000a | 2017-05-15 09:34:02 -0400 | [diff] [blame] | 623 | return cgrp->nr_populated_csets + cgrp->nr_populated_domain_children + |
| 624 | cgrp->nr_populated_threaded_children; |
Tejun Heo | 07bc356 | 2014-02-13 06:58:39 -0500 | [diff] [blame] | 625 | } |
| 626 | |
Zefan Li | f29374b | 2014-09-19 16:29:31 +0800 | [diff] [blame] | 627 | /* returns ino associated with a cgroup */ |
Tejun Heo | b166492 | 2014-02-11 11:52:49 -0500 | [diff] [blame] | 628 | static inline ino_t cgroup_ino(struct cgroup *cgrp) |
| 629 | { |
Tejun Heo | 67c0496 | 2019-11-04 15:54:30 -0800 | [diff] [blame] | 630 | return kernfs_ino(cgrp->kn); |
Tejun Heo | b166492 | 2014-02-11 11:52:49 -0500 | [diff] [blame] | 631 | } |
| 632 | |
Tejun Heo | b416864 | 2014-05-13 12:16:21 -0400 | [diff] [blame] | 633 | /* cft/css accessors for cftype->write() operation */ |
| 634 | static inline struct cftype *of_cft(struct kernfs_open_file *of) |
Tejun Heo | 7da1127 | 2013-12-05 12:28:04 -0500 | [diff] [blame] | 635 | { |
Tejun Heo | 2bd59d4 | 2014-02-11 11:52:49 -0500 | [diff] [blame] | 636 | return of->kn->priv; |
Tejun Heo | 7da1127 | 2013-12-05 12:28:04 -0500 | [diff] [blame] | 637 | } |
| 638 | |
Tejun Heo | b416864 | 2014-05-13 12:16:21 -0400 | [diff] [blame] | 639 | struct cgroup_subsys_state *of_css(struct kernfs_open_file *of); |
| 640 | |
| 641 | /* cft/css accessors for cftype->seq_*() operations */ |
| 642 | static inline struct cftype *seq_cft(struct seq_file *seq) |
| 643 | { |
| 644 | return of_cft(seq->private); |
| 645 | } |
| 646 | |
| 647 | static inline struct cgroup_subsys_state *seq_css(struct seq_file *seq) |
| 648 | { |
| 649 | return of_css(seq->private); |
| 650 | } |
Tejun Heo | 59f5296 | 2014-02-11 11:52:49 -0500 | [diff] [blame] | 651 | |
Tejun Heo | e61734c | 2014-02-12 09:29:50 -0500 | [diff] [blame] | 652 | /* |
| 653 | * Name / path handling functions. All are thin wrappers around the kernfs |
| 654 | * counterparts and can be called under any context. |
| 655 | */ |
| 656 | |
| 657 | static inline int cgroup_name(struct cgroup *cgrp, char *buf, size_t buflen) |
| 658 | { |
Tejun Heo | fdce6bf | 2014-03-19 10:23:54 -0400 | [diff] [blame] | 659 | return kernfs_name(cgrp->kn, buf, buflen); |
Tejun Heo | e61734c | 2014-02-12 09:29:50 -0500 | [diff] [blame] | 660 | } |
| 661 | |
Tejun Heo | 4c737b4 | 2016-08-10 11:23:44 -0400 | [diff] [blame] | 662 | static inline int cgroup_path(struct cgroup *cgrp, char *buf, size_t buflen) |
Tejun Heo | e61734c | 2014-02-12 09:29:50 -0500 | [diff] [blame] | 663 | { |
Tejun Heo | fdce6bf | 2014-03-19 10:23:54 -0400 | [diff] [blame] | 664 | return kernfs_path(cgrp->kn, buf, buflen); |
Tejun Heo | e61734c | 2014-02-12 09:29:50 -0500 | [diff] [blame] | 665 | } |
| 666 | |
| 667 | static inline void pr_cont_cgroup_name(struct cgroup *cgrp) |
| 668 | { |
Tejun Heo | fdce6bf | 2014-03-19 10:23:54 -0400 | [diff] [blame] | 669 | pr_cont_kernfs_name(cgrp->kn); |
Tejun Heo | e61734c | 2014-02-12 09:29:50 -0500 | [diff] [blame] | 670 | } |
| 671 | |
| 672 | static inline void pr_cont_cgroup_path(struct cgroup *cgrp) |
| 673 | { |
Tejun Heo | fdce6bf | 2014-03-19 10:23:54 -0400 | [diff] [blame] | 674 | pr_cont_kernfs_path(cgrp->kn); |
Tejun Heo | e61734c | 2014-02-12 09:29:50 -0500 | [diff] [blame] | 675 | } |
| 676 | |
Johannes Weiner | 2ce7135 | 2018-10-26 15:06:31 -0700 | [diff] [blame] | 677 | static inline struct psi_group *cgroup_psi(struct cgroup *cgrp) |
| 678 | { |
| 679 | return &cgrp->psi; |
| 680 | } |
| 681 | |
Suren Baghdasaryan | c7186c2 | 2021-05-24 12:53:39 -0700 | [diff] [blame] | 682 | bool cgroup_psi_enabled(void); |
| 683 | |
Tejun Heo | 77f8879 | 2017-03-16 16:54:24 -0400 | [diff] [blame] | 684 | static inline void cgroup_init_kthreadd(void) |
| 685 | { |
| 686 | /* |
| 687 | * kthreadd is inherited by all kthreads, keep it in the root so |
| 688 | * that the new kthreads are guaranteed to stay in the root until |
| 689 | * initialization is finished. |
| 690 | */ |
| 691 | current->no_cgroup_migration = 1; |
| 692 | } |
| 693 | |
| 694 | static inline void cgroup_kthread_ready(void) |
| 695 | { |
| 696 | /* |
| 697 | * This kthread finished initialization. The creator should have |
| 698 | * set PF_NO_SETAFFINITY if this kthread should stay in the root. |
| 699 | */ |
| 700 | current->no_cgroup_migration = 0; |
| 701 | } |
| 702 | |
Tejun Heo | 67c0496 | 2019-11-04 15:54:30 -0800 | [diff] [blame] | 703 | void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen); |
Paul Menage | ddbcc7e | 2007-10-18 23:39:30 -0700 | [diff] [blame] | 704 | #else /* !CONFIG_CGROUPS */ |
| 705 | |
Tejun Heo | f3ba538 | 2015-01-06 12:02:46 -0500 | [diff] [blame] | 706 | struct cgroup_subsys_state; |
Sargun Dhillon | aed704b | 2016-08-12 08:56:40 -0700 | [diff] [blame] | 707 | struct cgroup; |
Tejun Heo | f3ba538 | 2015-01-06 12:02:46 -0500 | [diff] [blame] | 708 | |
Tejun Heo | 7432103 | 2019-11-04 15:54:30 -0800 | [diff] [blame] | 709 | static inline u64 cgroup_id(struct cgroup *cgrp) { return 1; } |
Tejun Heo | 9b0eb69 | 2019-06-27 13:39:48 -0700 | [diff] [blame] | 710 | static inline void css_get(struct cgroup_subsys_state *css) {} |
Tejun Heo | c326aa2 | 2015-05-13 16:24:16 -0400 | [diff] [blame] | 711 | static inline void css_put(struct cgroup_subsys_state *css) {} |
| 712 | static inline int cgroup_attach_task_all(struct task_struct *from, |
| 713 | struct task_struct *t) { return 0; } |
| 714 | static inline int cgroupstats_build(struct cgroupstats *stats, |
| 715 | struct dentry *dentry) { return -EINVAL; } |
| 716 | |
Paul Menage | b4f48b6 | 2007-10-18 23:39:33 -0700 | [diff] [blame] | 717 | static inline void cgroup_fork(struct task_struct *p) {} |
Christian Brauner | ef2c41c | 2020-02-05 14:26:22 +0100 | [diff] [blame] | 718 | static inline int cgroup_can_fork(struct task_struct *p, |
| 719 | struct kernel_clone_args *kargs) { return 0; } |
| 720 | static inline void cgroup_cancel_fork(struct task_struct *p, |
| 721 | struct kernel_clone_args *kargs) {} |
| 722 | static inline void cgroup_post_fork(struct task_struct *p, |
| 723 | struct kernel_clone_args *kargs) {} |
Li Zefan | 1ec4183 | 2014-03-28 15:22:19 +0800 | [diff] [blame] | 724 | static inline void cgroup_exit(struct task_struct *p) {} |
Oleg Nesterov | 51bee5a | 2019-01-28 17:00:13 +0100 | [diff] [blame] | 725 | static inline void cgroup_release(struct task_struct *p) {} |
Tejun Heo | 2e91fa7 | 2015-10-15 16:41:53 -0400 | [diff] [blame] | 726 | static inline void cgroup_free(struct task_struct *p) {} |
Paul Menage | ddbcc7e | 2007-10-18 23:39:30 -0700 | [diff] [blame] | 727 | |
Tejun Heo | c326aa2 | 2015-05-13 16:24:16 -0400 | [diff] [blame] | 728 | static inline int cgroup_init_early(void) { return 0; } |
| 729 | static inline int cgroup_init(void) { return 0; } |
Tejun Heo | 77f8879 | 2017-03-16 16:54:24 -0400 | [diff] [blame] | 730 | static inline void cgroup_init_kthreadd(void) {} |
| 731 | static inline void cgroup_kthread_ready(void) {} |
Sridhar Samudrala | d7926ee | 2010-05-30 22:24:39 +0200 | [diff] [blame] | 732 | |
Johannes Weiner | 2ce7135 | 2018-10-26 15:06:31 -0700 | [diff] [blame] | 733 | static inline struct cgroup *cgroup_parent(struct cgroup *cgrp) |
| 734 | { |
| 735 | return NULL; |
| 736 | } |
| 737 | |
| 738 | static inline struct psi_group *cgroup_psi(struct cgroup *cgrp) |
| 739 | { |
| 740 | return NULL; |
| 741 | } |
| 742 | |
Suren Baghdasaryan | c7186c2 | 2021-05-24 12:53:39 -0700 | [diff] [blame] | 743 | static inline bool cgroup_psi_enabled(void) |
| 744 | { |
| 745 | return false; |
| 746 | } |
| 747 | |
Sargun Dhillon | aed704b | 2016-08-12 08:56:40 -0700 | [diff] [blame] | 748 | static inline bool task_under_cgroup_hierarchy(struct task_struct *task, |
| 749 | struct cgroup *ancestor) |
| 750 | { |
| 751 | return true; |
| 752 | } |
Shaohua Li | 69fd5c3 | 2017-07-12 11:49:55 -0700 | [diff] [blame] | 753 | |
Tejun Heo | 67c0496 | 2019-11-04 15:54:30 -0800 | [diff] [blame] | 754 | static inline void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen) |
| 755 | {} |
Paul Menage | ddbcc7e | 2007-10-18 23:39:30 -0700 | [diff] [blame] | 756 | #endif /* !CONFIG_CGROUPS */ |
| 757 | |
Tejun Heo | 6162cef | 2018-04-26 14:29:05 -0700 | [diff] [blame] | 758 | #ifdef CONFIG_CGROUPS |
| 759 | /* |
| 760 | * cgroup scalable recursive statistics. |
| 761 | */ |
| 762 | void cgroup_rstat_updated(struct cgroup *cgrp, int cpu); |
| 763 | void cgroup_rstat_flush(struct cgroup *cgrp); |
Tejun Heo | 0fa294f | 2018-04-26 14:29:05 -0700 | [diff] [blame] | 764 | void cgroup_rstat_flush_irqsafe(struct cgroup *cgrp); |
Tejun Heo | 6162cef | 2018-04-26 14:29:05 -0700 | [diff] [blame] | 765 | void cgroup_rstat_flush_hold(struct cgroup *cgrp); |
| 766 | void cgroup_rstat_flush_release(void); |
| 767 | |
Tejun Heo | bd1060a | 2015-12-07 17:38:53 -0500 | [diff] [blame] | 768 | /* |
Tejun Heo | d2cc5ed | 2017-09-25 08:12:04 -0700 | [diff] [blame] | 769 | * Basic resource stats. |
| 770 | */ |
Tejun Heo | d2cc5ed | 2017-09-25 08:12:04 -0700 | [diff] [blame] | 771 | #ifdef CONFIG_CGROUP_CPUACCT |
| 772 | void cpuacct_charge(struct task_struct *tsk, u64 cputime); |
| 773 | void cpuacct_account_field(struct task_struct *tsk, int index, u64 val); |
| 774 | #else |
| 775 | static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {} |
| 776 | static inline void cpuacct_account_field(struct task_struct *tsk, int index, |
| 777 | u64 val) {} |
| 778 | #endif |
| 779 | |
Tejun Heo | 041cd64 | 2017-09-25 08:12:05 -0700 | [diff] [blame] | 780 | void __cgroup_account_cputime(struct cgroup *cgrp, u64 delta_exec); |
| 781 | void __cgroup_account_cputime_field(struct cgroup *cgrp, |
| 782 | enum cpu_usage_stat index, u64 delta_exec); |
| 783 | |
Tejun Heo | d2cc5ed | 2017-09-25 08:12:04 -0700 | [diff] [blame] | 784 | static inline void cgroup_account_cputime(struct task_struct *task, |
| 785 | u64 delta_exec) |
| 786 | { |
Tejun Heo | 041cd64 | 2017-09-25 08:12:05 -0700 | [diff] [blame] | 787 | struct cgroup *cgrp; |
| 788 | |
Tejun Heo | d2cc5ed | 2017-09-25 08:12:04 -0700 | [diff] [blame] | 789 | cpuacct_charge(task, delta_exec); |
Tejun Heo | 041cd64 | 2017-09-25 08:12:05 -0700 | [diff] [blame] | 790 | |
| 791 | rcu_read_lock(); |
| 792 | cgrp = task_dfl_cgroup(task); |
| 793 | if (cgroup_parent(cgrp)) |
| 794 | __cgroup_account_cputime(cgrp, delta_exec); |
| 795 | rcu_read_unlock(); |
Tejun Heo | d2cc5ed | 2017-09-25 08:12:04 -0700 | [diff] [blame] | 796 | } |
| 797 | |
| 798 | static inline void cgroup_account_cputime_field(struct task_struct *task, |
| 799 | enum cpu_usage_stat index, |
| 800 | u64 delta_exec) |
| 801 | { |
Tejun Heo | 041cd64 | 2017-09-25 08:12:05 -0700 | [diff] [blame] | 802 | struct cgroup *cgrp; |
| 803 | |
Tejun Heo | d2cc5ed | 2017-09-25 08:12:04 -0700 | [diff] [blame] | 804 | cpuacct_account_field(task, index, delta_exec); |
Tejun Heo | 041cd64 | 2017-09-25 08:12:05 -0700 | [diff] [blame] | 805 | |
| 806 | rcu_read_lock(); |
| 807 | cgrp = task_dfl_cgroup(task); |
| 808 | if (cgroup_parent(cgrp)) |
| 809 | __cgroup_account_cputime_field(cgrp, index, delta_exec); |
| 810 | rcu_read_unlock(); |
Tejun Heo | d2cc5ed | 2017-09-25 08:12:04 -0700 | [diff] [blame] | 811 | } |
| 812 | |
| 813 | #else /* CONFIG_CGROUPS */ |
| 814 | |
| 815 | static inline void cgroup_account_cputime(struct task_struct *task, |
| 816 | u64 delta_exec) {} |
| 817 | static inline void cgroup_account_cputime_field(struct task_struct *task, |
| 818 | enum cpu_usage_stat index, |
| 819 | u64 delta_exec) {} |
| 820 | |
| 821 | #endif /* CONFIG_CGROUPS */ |
| 822 | |
| 823 | /* |
Tejun Heo | bd1060a | 2015-12-07 17:38:53 -0500 | [diff] [blame] | 824 | * sock->sk_cgrp_data handling. For more info, see sock_cgroup_data |
| 825 | * definition in cgroup-defs.h. |
| 826 | */ |
| 827 | #ifdef CONFIG_SOCK_CGROUP_DATA |
| 828 | |
| 829 | #if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID) |
| 830 | extern spinlock_t cgroup_sk_update_lock; |
| 831 | #endif |
| 832 | |
| 833 | void cgroup_sk_alloc_disable(void); |
| 834 | void cgroup_sk_alloc(struct sock_cgroup_data *skcd); |
Cong Wang | ad0f75e | 2020-07-02 11:52:56 -0700 | [diff] [blame] | 835 | void cgroup_sk_clone(struct sock_cgroup_data *skcd); |
Tejun Heo | bd1060a | 2015-12-07 17:38:53 -0500 | [diff] [blame] | 836 | void cgroup_sk_free(struct sock_cgroup_data *skcd); |
| 837 | |
| 838 | static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd) |
| 839 | { |
| 840 | #if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID) |
| 841 | unsigned long v; |
| 842 | |
| 843 | /* |
| 844 | * @skcd->val is 64bit but the following is safe on 32bit too as we |
| 845 | * just need the lower ulong to be written and read atomically. |
| 846 | */ |
| 847 | v = READ_ONCE(skcd->val); |
| 848 | |
Cong Wang | ad0f75e | 2020-07-02 11:52:56 -0700 | [diff] [blame] | 849 | if (v & 3) |
Tejun Heo | bd1060a | 2015-12-07 17:38:53 -0500 | [diff] [blame] | 850 | return &cgrp_dfl_root.cgrp; |
| 851 | |
| 852 | return (struct cgroup *)(unsigned long)v ?: &cgrp_dfl_root.cgrp; |
| 853 | #else |
| 854 | return (struct cgroup *)(unsigned long)skcd->val; |
| 855 | #endif |
| 856 | } |
| 857 | |
| 858 | #else /* CONFIG_CGROUP_DATA */ |
| 859 | |
| 860 | static inline void cgroup_sk_alloc(struct sock_cgroup_data *skcd) {} |
Cong Wang | ad0f75e | 2020-07-02 11:52:56 -0700 | [diff] [blame] | 861 | static inline void cgroup_sk_clone(struct sock_cgroup_data *skcd) {} |
Tejun Heo | bd1060a | 2015-12-07 17:38:53 -0500 | [diff] [blame] | 862 | static inline void cgroup_sk_free(struct sock_cgroup_data *skcd) {} |
| 863 | |
| 864 | #endif /* CONFIG_CGROUP_DATA */ |
| 865 | |
Aditya Kali | a79a908 | 2016-01-29 02:54:06 -0600 | [diff] [blame] | 866 | struct cgroup_namespace { |
Elena Reshetova | 387ad96 | 2017-02-20 12:19:00 +0200 | [diff] [blame] | 867 | refcount_t count; |
Aditya Kali | a79a908 | 2016-01-29 02:54:06 -0600 | [diff] [blame] | 868 | struct ns_common ns; |
| 869 | struct user_namespace *user_ns; |
Eric W. Biederman | d08311d | 2016-08-08 14:25:30 -0500 | [diff] [blame] | 870 | struct ucounts *ucounts; |
Aditya Kali | a79a908 | 2016-01-29 02:54:06 -0600 | [diff] [blame] | 871 | struct css_set *root_cset; |
| 872 | }; |
| 873 | |
| 874 | extern struct cgroup_namespace init_cgroup_ns; |
| 875 | |
| 876 | #ifdef CONFIG_CGROUPS |
| 877 | |
| 878 | void free_cgroup_ns(struct cgroup_namespace *ns); |
| 879 | |
| 880 | struct cgroup_namespace *copy_cgroup_ns(unsigned long flags, |
| 881 | struct user_namespace *user_ns, |
| 882 | struct cgroup_namespace *old_ns); |
| 883 | |
Tejun Heo | 4c737b4 | 2016-08-10 11:23:44 -0400 | [diff] [blame] | 884 | int cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen, |
| 885 | struct cgroup_namespace *ns); |
Aditya Kali | a79a908 | 2016-01-29 02:54:06 -0600 | [diff] [blame] | 886 | |
| 887 | #else /* !CONFIG_CGROUPS */ |
| 888 | |
| 889 | static inline void free_cgroup_ns(struct cgroup_namespace *ns) { } |
| 890 | static inline struct cgroup_namespace * |
| 891 | copy_cgroup_ns(unsigned long flags, struct user_namespace *user_ns, |
| 892 | struct cgroup_namespace *old_ns) |
| 893 | { |
| 894 | return old_ns; |
| 895 | } |
| 896 | |
| 897 | #endif /* !CONFIG_CGROUPS */ |
| 898 | |
| 899 | static inline void get_cgroup_ns(struct cgroup_namespace *ns) |
| 900 | { |
| 901 | if (ns) |
Elena Reshetova | 387ad96 | 2017-02-20 12:19:00 +0200 | [diff] [blame] | 902 | refcount_inc(&ns->count); |
Aditya Kali | a79a908 | 2016-01-29 02:54:06 -0600 | [diff] [blame] | 903 | } |
| 904 | |
| 905 | static inline void put_cgroup_ns(struct cgroup_namespace *ns) |
| 906 | { |
Elena Reshetova | 387ad96 | 2017-02-20 12:19:00 +0200 | [diff] [blame] | 907 | if (ns && refcount_dec_and_test(&ns->count)) |
Aditya Kali | a79a908 | 2016-01-29 02:54:06 -0600 | [diff] [blame] | 908 | free_cgroup_ns(ns); |
| 909 | } |
| 910 | |
Roman Gushchin | 76f969e | 2019-04-19 10:03:04 -0700 | [diff] [blame] | 911 | #ifdef CONFIG_CGROUPS |
| 912 | |
| 913 | void cgroup_enter_frozen(void); |
| 914 | void cgroup_leave_frozen(bool always_leave); |
| 915 | void cgroup_update_frozen(struct cgroup *cgrp); |
| 916 | void cgroup_freeze(struct cgroup *cgrp, bool freeze); |
| 917 | void cgroup_freezer_migrate_task(struct task_struct *task, struct cgroup *src, |
| 918 | struct cgroup *dst); |
Roman Gushchin | 96b9c59 | 2019-04-26 10:59:45 -0700 | [diff] [blame] | 919 | |
Roman Gushchin | 76f969e | 2019-04-19 10:03:04 -0700 | [diff] [blame] | 920 | static inline bool cgroup_task_freeze(struct task_struct *task) |
| 921 | { |
| 922 | bool ret; |
| 923 | |
| 924 | if (task->flags & PF_KTHREAD) |
| 925 | return false; |
| 926 | |
| 927 | rcu_read_lock(); |
| 928 | ret = test_bit(CGRP_FREEZE, &task_dfl_cgroup(task)->flags); |
| 929 | rcu_read_unlock(); |
| 930 | |
| 931 | return ret; |
| 932 | } |
| 933 | |
| 934 | static inline bool cgroup_task_frozen(struct task_struct *task) |
| 935 | { |
| 936 | return task->frozen; |
| 937 | } |
| 938 | |
| 939 | #else /* !CONFIG_CGROUPS */ |
| 940 | |
| 941 | static inline void cgroup_enter_frozen(void) { } |
| 942 | static inline void cgroup_leave_frozen(bool always_leave) { } |
| 943 | static inline bool cgroup_task_freeze(struct task_struct *task) |
| 944 | { |
| 945 | return false; |
| 946 | } |
| 947 | static inline bool cgroup_task_frozen(struct task_struct *task) |
| 948 | { |
| 949 | return false; |
| 950 | } |
| 951 | |
| 952 | #endif /* !CONFIG_CGROUPS */ |
| 953 | |
Roman Gushchin | 4bfc0bb | 2019-05-25 09:37:39 -0700 | [diff] [blame] | 954 | #ifdef CONFIG_CGROUP_BPF |
| 955 | static inline void cgroup_bpf_get(struct cgroup *cgrp) |
| 956 | { |
| 957 | percpu_ref_get(&cgrp->bpf.refcnt); |
| 958 | } |
| 959 | |
| 960 | static inline void cgroup_bpf_put(struct cgroup *cgrp) |
| 961 | { |
| 962 | percpu_ref_put(&cgrp->bpf.refcnt); |
| 963 | } |
| 964 | |
| 965 | #else /* CONFIG_CGROUP_BPF */ |
| 966 | |
| 967 | static inline void cgroup_bpf_get(struct cgroup *cgrp) {} |
| 968 | static inline void cgroup_bpf_put(struct cgroup *cgrp) {} |
| 969 | |
| 970 | #endif /* CONFIG_CGROUP_BPF */ |
| 971 | |
Paul Menage | ddbcc7e | 2007-10-18 23:39:30 -0700 | [diff] [blame] | 972 | #endif /* _LINUX_CGROUP_H */ |