Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * kernel/cpuset.c |
| 3 | * |
| 4 | * Processor and Memory placement constraints for sets of tasks. |
| 5 | * |
| 6 | * Copyright (C) 2003 BULL SA. |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7 | * Copyright (C) 2004-2007 Silicon Graphics, Inc. |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 8 | * Copyright (C) 2006 Google, Inc |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | * |
| 10 | * Portions derived from Patrick Mochel's sysfs code. |
| 11 | * sysfs is Copyright (c) 2001-3 Patrick Mochel |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | * |
Paul Jackson | 825a46a | 2006-03-24 03:16:03 -0800 | [diff] [blame] | 13 | * 2003-10-10 Written by Simon Derr. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | * 2003-10-22 Updates by Stephen Hemminger. |
Paul Jackson | 825a46a | 2006-03-24 03:16:03 -0800 | [diff] [blame] | 15 | * 2004 May-July Rework by Paul Jackson. |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 16 | * 2006 Rework by Paul Menage to use generic cgroups |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 17 | * 2008 Rework of the scheduler domains and CPU hotplug handling |
| 18 | * by Max Krasnyansky |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | * |
| 20 | * This file is subject to the terms and conditions of the GNU General Public |
| 21 | * License. See the file COPYING in the main directory of the Linux |
| 22 | * distribution for more details. |
| 23 | */ |
| 24 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | #include <linux/cpu.h> |
| 26 | #include <linux/cpumask.h> |
| 27 | #include <linux/cpuset.h> |
| 28 | #include <linux/err.h> |
| 29 | #include <linux/errno.h> |
| 30 | #include <linux/file.h> |
| 31 | #include <linux/fs.h> |
| 32 | #include <linux/init.h> |
| 33 | #include <linux/interrupt.h> |
| 34 | #include <linux/kernel.h> |
| 35 | #include <linux/kmod.h> |
| 36 | #include <linux/list.h> |
Paul Jackson | 68860ec | 2005-10-30 15:02:36 -0800 | [diff] [blame] | 37 | #include <linux/mempolicy.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | #include <linux/mm.h> |
Miao Xie | f481891 | 2008-11-19 15:36:30 -0800 | [diff] [blame] | 39 | #include <linux/memory.h> |
Paul Gortmaker | 9984de1 | 2011-05-23 14:51:41 -0400 | [diff] [blame] | 40 | #include <linux/export.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | #include <linux/mount.h> |
David Howells | a187537 | 2018-11-01 23:07:25 +0000 | [diff] [blame] | 42 | #include <linux/fs_context.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 | #include <linux/namei.h> |
| 44 | #include <linux/pagemap.h> |
| 45 | #include <linux/proc_fs.h> |
Paul Jackson | 6b9c260 | 2006-01-08 01:02:02 -0800 | [diff] [blame] | 46 | #include <linux/rcupdate.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 | #include <linux/sched.h> |
Mathieu Poirier | f9a25f7 | 2019-07-19 15:59:55 +0200 | [diff] [blame] | 48 | #include <linux/sched/deadline.h> |
Ingo Molnar | 6e84f31 | 2017-02-08 18:51:29 +0100 | [diff] [blame] | 49 | #include <linux/sched/mm.h> |
Ingo Molnar | f719ff9b | 2017-02-06 10:57:33 +0100 | [diff] [blame] | 50 | #include <linux/sched/task.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | #include <linux/seq_file.h> |
David Quigley | 22fb52d | 2006-06-23 02:04:00 -0700 | [diff] [blame] | 52 | #include <linux/security.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 53 | #include <linux/slab.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 54 | #include <linux/spinlock.h> |
| 55 | #include <linux/stat.h> |
| 56 | #include <linux/string.h> |
| 57 | #include <linux/time.h> |
Arnd Bergmann | d2b43658 | 2015-11-25 16:16:55 +0100 | [diff] [blame] | 58 | #include <linux/time64.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 59 | #include <linux/backing-dev.h> |
| 60 | #include <linux/sort.h> |
Michal Hocko | da99ecf | 2017-09-06 16:24:53 -0700 | [diff] [blame] | 61 | #include <linux/oom.h> |
Frederic Weisbecker | edb9382 | 2017-10-27 04:42:37 +0200 | [diff] [blame] | 62 | #include <linux/sched/isolation.h> |
Linus Torvalds | 7c0f6ba | 2016-12-24 11:46:01 -0800 | [diff] [blame] | 63 | #include <linux/uaccess.h> |
Arun Sharma | 60063497 | 2011-07-26 16:09:06 -0700 | [diff] [blame] | 64 | #include <linux/atomic.h> |
Ingo Molnar | 3d3f26a | 2006-03-23 03:00:18 -0800 | [diff] [blame] | 65 | #include <linux/mutex.h> |
Cliff Wickman | 956db3c | 2008-02-07 00:14:43 -0800 | [diff] [blame] | 66 | #include <linux/cgroup.h> |
Li Zefan | e44193d | 2013-06-09 17:14:22 +0800 | [diff] [blame] | 67 | #include <linux/wait.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 68 | |
Dima Zavin | 89affbf | 2017-08-02 13:32:18 -0700 | [diff] [blame] | 69 | DEFINE_STATIC_KEY_FALSE(cpusets_pre_enable_key); |
Vlastimil Babka | 002f290 | 2016-05-19 17:14:30 -0700 | [diff] [blame] | 70 | DEFINE_STATIC_KEY_FALSE(cpusets_enabled_key); |
Paul Jackson | 202f72d | 2006-01-08 01:01:57 -0800 | [diff] [blame] | 71 | |
Feng Tang | 8ca1b5a | 2021-11-05 13:40:34 -0700 | [diff] [blame] | 72 | /* |
| 73 | * There could be abnormal cpuset configurations for cpu or memory |
| 74 | * node binding, add this key to provide a quick low-cost judgement |
| 75 | * of the situation. |
| 76 | */ |
| 77 | DEFINE_STATIC_KEY_FALSE(cpusets_insane_config_key); |
| 78 | |
Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 79 | /* See "Frequency meter" comments, below. */ |
| 80 | |
| 81 | struct fmeter { |
| 82 | int cnt; /* unprocessed events count */ |
| 83 | int val; /* most recent output value */ |
Arnd Bergmann | d2b43658 | 2015-11-25 16:16:55 +0100 | [diff] [blame] | 84 | time64_t time; /* clock (secs) when val computed */ |
Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 85 | spinlock_t lock; /* guards read or write of above */ |
| 86 | }; |
| 87 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 88 | struct cpuset { |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 89 | struct cgroup_subsys_state css; |
| 90 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 91 | unsigned long flags; /* "unsigned long" so bitops work */ |
Li Zefan | e2b9a3d | 2014-07-09 16:47:03 +0800 | [diff] [blame] | 92 | |
Li Zefan | 7e88291 | 2014-07-09 16:48:42 +0800 | [diff] [blame] | 93 | /* |
| 94 | * On default hierarchy: |
| 95 | * |
| 96 | * The user-configured masks can only be changed by writing to |
| 97 | * cpuset.cpus and cpuset.mems, and won't be limited by the |
| 98 | * parent masks. |
| 99 | * |
| 100 | * The effective masks is the real masks that apply to the tasks |
| 101 | * in the cpuset. They may be changed if the configured masks are |
| 102 | * changed or hotplug happens. |
| 103 | * |
| 104 | * effective_mask == configured_mask & parent's effective_mask, |
| 105 | * and if it ends up empty, it will inherit the parent's mask. |
| 106 | * |
| 107 | * |
Aubrey Li | 415de5f | 2021-01-13 12:37:41 +0800 | [diff] [blame] | 108 | * On legacy hierarchy: |
Li Zefan | 7e88291 | 2014-07-09 16:48:42 +0800 | [diff] [blame] | 109 | * |
| 110 | * The user-configured masks are always the same with effective masks. |
| 111 | */ |
| 112 | |
Li Zefan | e2b9a3d | 2014-07-09 16:47:03 +0800 | [diff] [blame] | 113 | /* user-configured CPUs and Memory Nodes allow to tasks */ |
| 114 | cpumask_var_t cpus_allowed; |
| 115 | nodemask_t mems_allowed; |
| 116 | |
| 117 | /* effective CPUs and Memory Nodes allow to tasks */ |
| 118 | cpumask_var_t effective_cpus; |
| 119 | nodemask_t effective_mems; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 120 | |
Li Zefan | 33ad801 | 2013-06-09 17:15:08 +0800 | [diff] [blame] | 121 | /* |
Waiman Long | 58b7484 | 2018-11-08 10:08:36 -0500 | [diff] [blame] | 122 | * CPUs allocated to child sub-partitions (default hierarchy only) |
| 123 | * - CPUs granted by the parent = effective_cpus U subparts_cpus |
| 124 | * - effective_cpus and subparts_cpus are mutually exclusive. |
Waiman Long | 4b842da | 2018-11-08 10:08:41 -0500 | [diff] [blame] | 125 | * |
| 126 | * effective_cpus contains only onlined CPUs, but subparts_cpus |
| 127 | * may have offlined ones. |
Waiman Long | 58b7484 | 2018-11-08 10:08:36 -0500 | [diff] [blame] | 128 | */ |
| 129 | cpumask_var_t subparts_cpus; |
| 130 | |
| 131 | /* |
Li Zefan | 33ad801 | 2013-06-09 17:15:08 +0800 | [diff] [blame] | 132 | * This is old Memory Nodes tasks took on. |
| 133 | * |
| 134 | * - top_cpuset.old_mems_allowed is initialized to mems_allowed. |
| 135 | * - A new cpuset's old_mems_allowed is initialized when some |
| 136 | * task is moved into it. |
| 137 | * - old_mems_allowed is used in cpuset_migrate_mm() when we change |
| 138 | * cpuset.mems_allowed and have tasks' nodemask updated, and |
| 139 | * then old_mems_allowed is updated to mems_allowed. |
| 140 | */ |
| 141 | nodemask_t old_mems_allowed; |
| 142 | |
Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 143 | struct fmeter fmeter; /* memory_pressure filter */ |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 144 | |
Tejun Heo | 452477f | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 145 | /* |
| 146 | * Tasks are being attached to this cpuset. Used to prevent |
| 147 | * zeroing cpus/mems_allowed between ->can_attach() and ->attach(). |
| 148 | */ |
| 149 | int attach_in_progress; |
| 150 | |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 151 | /* partition number for rebuild_sched_domains() */ |
| 152 | int pn; |
Cliff Wickman | 956db3c | 2008-02-07 00:14:43 -0800 | [diff] [blame] | 153 | |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 154 | /* for custom sched domain */ |
| 155 | int relax_domain_level; |
Waiman Long | 58b7484 | 2018-11-08 10:08:36 -0500 | [diff] [blame] | 156 | |
| 157 | /* number of CPUs in subparts_cpus */ |
| 158 | int nr_subparts_cpus; |
| 159 | |
| 160 | /* partition root state */ |
| 161 | int partition_root_state; |
Waiman Long | 4716909 | 2018-11-08 10:08:40 -0500 | [diff] [blame] | 162 | |
| 163 | /* |
| 164 | * Default hierarchy only: |
| 165 | * use_parent_ecpus - set if using parent's effective_cpus |
| 166 | * child_ecpus_count - # of children with use_parent_ecpus set |
| 167 | */ |
| 168 | int use_parent_ecpus; |
| 169 | int child_ecpus_count; |
Waiman Long | e7cc988 | 2021-08-10 23:06:02 -0400 | [diff] [blame] | 170 | |
| 171 | /* Handle for cpuset.cpus.partition */ |
| 172 | struct cgroup_file partition_file; |
Waiman Long | 58b7484 | 2018-11-08 10:08:36 -0500 | [diff] [blame] | 173 | }; |
| 174 | |
| 175 | /* |
| 176 | * Partition root states: |
| 177 | * |
| 178 | * 0 - not a partition root |
Waiman Long | 3881b86 | 2018-11-08 10:08:39 -0500 | [diff] [blame] | 179 | * |
Waiman Long | 58b7484 | 2018-11-08 10:08:36 -0500 | [diff] [blame] | 180 | * 1 - partition root |
Waiman Long | 3881b86 | 2018-11-08 10:08:39 -0500 | [diff] [blame] | 181 | * |
| 182 | * -1 - invalid partition root |
| 183 | * None of the cpus in cpus_allowed can be put into the parent's |
| 184 | * subparts_cpus. In this case, the cpuset is not a real partition |
| 185 | * root anymore. However, the CPU_EXCLUSIVE bit will still be set |
| 186 | * and the cpuset can be restored back to a partition root if the |
| 187 | * parent cpuset can give more CPUs back to this child cpuset. |
Waiman Long | 58b7484 | 2018-11-08 10:08:36 -0500 | [diff] [blame] | 188 | */ |
| 189 | #define PRS_DISABLED 0 |
| 190 | #define PRS_ENABLED 1 |
Waiman Long | 3881b86 | 2018-11-08 10:08:39 -0500 | [diff] [blame] | 191 | #define PRS_ERROR -1 |
Waiman Long | 58b7484 | 2018-11-08 10:08:36 -0500 | [diff] [blame] | 192 | |
| 193 | /* |
| 194 | * Temporary cpumasks for working with partitions that are passed among |
| 195 | * functions to avoid memory allocation in inner functions. |
| 196 | */ |
| 197 | struct tmpmasks { |
| 198 | cpumask_var_t addmask, delmask; /* For partition root */ |
| 199 | cpumask_var_t new_cpus; /* For update_cpumasks_hier() */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 200 | }; |
| 201 | |
Tejun Heo | a7c6d55 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 202 | static inline struct cpuset *css_cs(struct cgroup_subsys_state *css) |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 203 | { |
Tejun Heo | a7c6d55 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 204 | return css ? container_of(css, struct cpuset, css) : NULL; |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 205 | } |
| 206 | |
| 207 | /* Retrieve the cpuset for a task */ |
| 208 | static inline struct cpuset *task_cs(struct task_struct *task) |
| 209 | { |
Tejun Heo | 073219e | 2014-02-08 10:36:58 -0500 | [diff] [blame] | 210 | return css_cs(task_css(task, cpuset_cgrp_id)); |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 211 | } |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 212 | |
Tejun Heo | c9710d8 | 2013-08-08 20:11:22 -0400 | [diff] [blame] | 213 | static inline struct cpuset *parent_cs(struct cpuset *cs) |
Tejun Heo | c431069 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 214 | { |
Tejun Heo | 5c9d535 | 2014-05-16 13:22:48 -0400 | [diff] [blame] | 215 | return css_cs(cs->css.parent); |
Tejun Heo | c431069 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 216 | } |
| 217 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 218 | /* bits in struct cpuset flags field */ |
| 219 | typedef enum { |
Tejun Heo | efeb77b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 220 | CS_ONLINE, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 221 | CS_CPU_EXCLUSIVE, |
| 222 | CS_MEM_EXCLUSIVE, |
Paul Menage | 7860836 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 223 | CS_MEM_HARDWALL, |
Paul Jackson | 45b07ef | 2006-01-08 01:00:56 -0800 | [diff] [blame] | 224 | CS_MEMORY_MIGRATE, |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 225 | CS_SCHED_LOAD_BALANCE, |
Paul Jackson | 825a46a | 2006-03-24 03:16:03 -0800 | [diff] [blame] | 226 | CS_SPREAD_PAGE, |
| 227 | CS_SPREAD_SLAB, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 228 | } cpuset_flagbits_t; |
| 229 | |
| 230 | /* convenient tests for these bits */ |
Tejun Heo | 41c2570 | 2017-05-24 12:03:48 -0400 | [diff] [blame] | 231 | static inline bool is_cpuset_online(struct cpuset *cs) |
Tejun Heo | efeb77b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 232 | { |
Tejun Heo | 41c2570 | 2017-05-24 12:03:48 -0400 | [diff] [blame] | 233 | return test_bit(CS_ONLINE, &cs->flags) && !css_is_dying(&cs->css); |
Tejun Heo | efeb77b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 234 | } |
| 235 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 236 | static inline int is_cpu_exclusive(const struct cpuset *cs) |
| 237 | { |
Paul Jackson | 7b5b9ef | 2006-03-24 03:16:00 -0800 | [diff] [blame] | 238 | return test_bit(CS_CPU_EXCLUSIVE, &cs->flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 239 | } |
| 240 | |
| 241 | static inline int is_mem_exclusive(const struct cpuset *cs) |
| 242 | { |
Paul Jackson | 7b5b9ef | 2006-03-24 03:16:00 -0800 | [diff] [blame] | 243 | return test_bit(CS_MEM_EXCLUSIVE, &cs->flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 244 | } |
| 245 | |
Paul Menage | 7860836 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 246 | static inline int is_mem_hardwall(const struct cpuset *cs) |
| 247 | { |
| 248 | return test_bit(CS_MEM_HARDWALL, &cs->flags); |
| 249 | } |
| 250 | |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 251 | static inline int is_sched_load_balance(const struct cpuset *cs) |
| 252 | { |
| 253 | return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); |
| 254 | } |
| 255 | |
Paul Jackson | 45b07ef | 2006-01-08 01:00:56 -0800 | [diff] [blame] | 256 | static inline int is_memory_migrate(const struct cpuset *cs) |
| 257 | { |
Paul Jackson | 7b5b9ef | 2006-03-24 03:16:00 -0800 | [diff] [blame] | 258 | return test_bit(CS_MEMORY_MIGRATE, &cs->flags); |
Paul Jackson | 45b07ef | 2006-01-08 01:00:56 -0800 | [diff] [blame] | 259 | } |
| 260 | |
Paul Jackson | 825a46a | 2006-03-24 03:16:03 -0800 | [diff] [blame] | 261 | static inline int is_spread_page(const struct cpuset *cs) |
| 262 | { |
| 263 | return test_bit(CS_SPREAD_PAGE, &cs->flags); |
| 264 | } |
| 265 | |
| 266 | static inline int is_spread_slab(const struct cpuset *cs) |
| 267 | { |
| 268 | return test_bit(CS_SPREAD_SLAB, &cs->flags); |
| 269 | } |
| 270 | |
Waiman Long | 58b7484 | 2018-11-08 10:08:36 -0500 | [diff] [blame] | 271 | static inline int is_partition_root(const struct cpuset *cs) |
| 272 | { |
Waiman Long | 3881b86 | 2018-11-08 10:08:39 -0500 | [diff] [blame] | 273 | return cs->partition_root_state > 0; |
Waiman Long | 58b7484 | 2018-11-08 10:08:36 -0500 | [diff] [blame] | 274 | } |
| 275 | |
Waiman Long | e7cc988 | 2021-08-10 23:06:02 -0400 | [diff] [blame] | 276 | /* |
| 277 | * Send notification event of whenever partition_root_state changes. |
| 278 | */ |
| 279 | static inline void notify_partition_change(struct cpuset *cs, |
| 280 | int old_prs, int new_prs) |
| 281 | { |
| 282 | if (old_prs != new_prs) |
| 283 | cgroup_file_notify(&cs->partition_file); |
| 284 | } |
| 285 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 286 | static struct cpuset top_cpuset = { |
Tejun Heo | efeb77b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 287 | .flags = ((1 << CS_ONLINE) | (1 << CS_CPU_EXCLUSIVE) | |
| 288 | (1 << CS_MEM_EXCLUSIVE)), |
Waiman Long | 58b7484 | 2018-11-08 10:08:36 -0500 | [diff] [blame] | 289 | .partition_root_state = PRS_ENABLED, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 290 | }; |
| 291 | |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 292 | /** |
| 293 | * cpuset_for_each_child - traverse online children of a cpuset |
| 294 | * @child_cs: loop cursor pointing to the current child |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 295 | * @pos_css: used for iteration |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 296 | * @parent_cs: target cpuset to walk children of |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 297 | * |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 298 | * Walk @child_cs through the online children of @parent_cs. Must be used |
| 299 | * with RCU read locked. |
| 300 | */ |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 301 | #define cpuset_for_each_child(child_cs, pos_css, parent_cs) \ |
| 302 | css_for_each_child((pos_css), &(parent_cs)->css) \ |
| 303 | if (is_cpuset_online(((child_cs) = css_cs((pos_css))))) |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 304 | |
Tejun Heo | fc560a2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 305 | /** |
| 306 | * cpuset_for_each_descendant_pre - pre-order walk of a cpuset's descendants |
| 307 | * @des_cs: loop cursor pointing to the current descendant |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 308 | * @pos_css: used for iteration |
Tejun Heo | fc560a2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 309 | * @root_cs: target cpuset to walk ancestor of |
| 310 | * |
| 311 | * Walk @des_cs through the online descendants of @root_cs. Must be used |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 312 | * with RCU read locked. The caller may modify @pos_css by calling |
Tejun Heo | bd8815a | 2013-08-08 20:11:27 -0400 | [diff] [blame] | 313 | * css_rightmost_descendant() to skip subtree. @root_cs is included in the |
| 314 | * iteration and the first node to be visited. |
Tejun Heo | fc560a2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 315 | */ |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 316 | #define cpuset_for_each_descendant_pre(des_cs, pos_css, root_cs) \ |
| 317 | css_for_each_descendant_pre((pos_css), &(root_cs)->css) \ |
| 318 | if (is_cpuset_online(((des_cs) = css_cs((pos_css))))) |
Tejun Heo | fc560a2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 319 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 320 | /* |
Waiman Long | b94f9ac | 2021-09-09 22:42:56 -0400 | [diff] [blame] | 321 | * There are two global locks guarding cpuset structures - cpuset_rwsem and |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 322 | * callback_lock. We also require taking task_lock() when dereferencing a |
| 323 | * task's cpuset pointer. See "The task_lock() exception", at the end of this |
Waiman Long | b94f9ac | 2021-09-09 22:42:56 -0400 | [diff] [blame] | 324 | * comment. The cpuset code uses only cpuset_rwsem write lock. Other |
| 325 | * kernel subsystems can use cpuset_read_lock()/cpuset_read_unlock() to |
| 326 | * prevent change to cpuset structures. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 327 | * |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 328 | * A task must hold both locks to modify cpusets. If a task holds |
Waiman Long | b94f9ac | 2021-09-09 22:42:56 -0400 | [diff] [blame] | 329 | * cpuset_rwsem, it blocks others wanting that rwsem, ensuring that it |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 330 | * is the only task able to also acquire callback_lock and be able to |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 331 | * modify cpusets. It can perform various checks on the cpuset structure |
| 332 | * first, knowing nothing will change. It can also allocate memory while |
Waiman Long | b94f9ac | 2021-09-09 22:42:56 -0400 | [diff] [blame] | 333 | * just holding cpuset_rwsem. While it is performing these checks, various |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 334 | * callback routines can briefly acquire callback_lock to query cpusets. |
| 335 | * Once it is ready to make the changes, it takes callback_lock, blocking |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 336 | * everyone else. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 337 | * |
Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 338 | * Calls to the kernel memory allocator can not be made while holding |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 339 | * callback_lock, as that would risk double tripping on callback_lock |
Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 340 | * from one of the callbacks into the cpuset code from within |
| 341 | * __alloc_pages(). |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 342 | * |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 343 | * If a task is only holding callback_lock, then it has read-only |
Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 344 | * access to cpusets. |
| 345 | * |
Miao Xie | 58568d2 | 2009-06-16 15:31:49 -0700 | [diff] [blame] | 346 | * Now, the task_struct fields mems_allowed and mempolicy may be changed |
| 347 | * by other task, we use alloc_lock in the task_struct fields to protect |
| 348 | * them. |
Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 349 | * |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 350 | * The cpuset_common_file_read() handlers only hold callback_lock across |
Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 351 | * small pieces of code, such as when reading out possibly multi-word |
| 352 | * cpumasks and nodemasks. |
| 353 | * |
Paul Menage | 2df167a | 2008-02-07 00:14:45 -0800 | [diff] [blame] | 354 | * Accessing a task's cpuset should be done in accordance with the |
| 355 | * guidelines for accessing subsystem state in kernel/cgroup.c |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 356 | */ |
| 357 | |
Juri Lelli | 1243dc5 | 2019-07-19 15:59:57 +0200 | [diff] [blame] | 358 | DEFINE_STATIC_PERCPU_RWSEM(cpuset_rwsem); |
Juri Lelli | 710da3c | 2019-07-19 16:00:00 +0200 | [diff] [blame] | 359 | |
| 360 | void cpuset_read_lock(void) |
| 361 | { |
| 362 | percpu_down_read(&cpuset_rwsem); |
| 363 | } |
| 364 | |
| 365 | void cpuset_read_unlock(void) |
| 366 | { |
| 367 | percpu_up_read(&cpuset_rwsem); |
| 368 | } |
| 369 | |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 370 | static DEFINE_SPINLOCK(callback_lock); |
Paul Jackson | 4247bdc | 2005-09-10 00:26:06 -0700 | [diff] [blame] | 371 | |
Tejun Heo | e93ad19 | 2016-01-19 12:18:41 -0500 | [diff] [blame] | 372 | static struct workqueue_struct *cpuset_migrate_mm_wq; |
| 373 | |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 374 | /* |
Tejun Heo | 3a5a6d0 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 375 | * CPU / memory hotplug is handled asynchronously. |
| 376 | */ |
| 377 | static void cpuset_hotplug_workfn(struct work_struct *work); |
Tejun Heo | 3a5a6d0 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 378 | static DECLARE_WORK(cpuset_hotplug_work, cpuset_hotplug_workfn); |
| 379 | |
Li Zefan | e44193d | 2013-06-09 17:14:22 +0800 | [diff] [blame] | 380 | static DECLARE_WAIT_QUEUE_HEAD(cpuset_attach_wq); |
| 381 | |
Feng Tang | 8ca1b5a | 2021-11-05 13:40:34 -0700 | [diff] [blame] | 382 | static inline void check_insane_mems_config(nodemask_t *nodes) |
| 383 | { |
| 384 | if (!cpusets_insane_config() && |
| 385 | movable_only_nodes(nodes)) { |
| 386 | static_branch_enable(&cpusets_insane_config_key); |
| 387 | pr_info("Unsupported (movable nodes only) cpuset configuration detected (nmask=%*pbl)!\n" |
| 388 | "Cpuset allocations might fail even with a lot of memory available.\n", |
| 389 | nodemask_pr_args(nodes)); |
| 390 | } |
| 391 | } |
| 392 | |
Tejun Heo | 3a5a6d0 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 393 | /* |
Waiman Long | 0c05b9b | 2020-03-30 10:06:15 -0400 | [diff] [blame] | 394 | * Cgroup v2 behavior is used on the "cpus" and "mems" control files when |
| 395 | * on default hierarchy or when the cpuset_v2_mode flag is set by mounting |
| 396 | * the v1 cpuset cgroup filesystem with the "cpuset_v2_mode" mount option. |
| 397 | * With v2 behavior, "cpus" and "mems" are always what the users have |
| 398 | * requested and won't be changed by hotplug events. Only the effective |
| 399 | * cpus or mems will be affected. |
Waiman Long | b8d1b8e | 2017-08-17 15:33:10 -0400 | [diff] [blame] | 400 | */ |
| 401 | static inline bool is_in_v2_mode(void) |
| 402 | { |
| 403 | return cgroup_subsys_on_dfl(cpuset_cgrp_subsys) || |
| 404 | (cpuset_cgrp_subsys.root->flags & CGRP_ROOT_CPUSET_V2_MODE); |
| 405 | } |
| 406 | |
| 407 | /* |
Will Deacon | 431c69f | 2021-07-30 12:24:30 +0100 | [diff] [blame] | 408 | * Return in pmask the portion of a task's cpusets's cpus_allowed that |
| 409 | * are online and are capable of running the task. If none are found, |
| 410 | * walk up the cpuset hierarchy until we find one that does have some |
| 411 | * appropriate cpus. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 412 | * |
| 413 | * One way or another, we guarantee to return some non-empty subset |
Rusty Russell | 5f054e3 | 2012-03-29 15:38:31 +1030 | [diff] [blame] | 414 | * of cpu_online_mask. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 415 | * |
Waiman Long | b94f9ac | 2021-09-09 22:42:56 -0400 | [diff] [blame] | 416 | * Call with callback_lock or cpuset_rwsem held. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 417 | */ |
Will Deacon | 431c69f | 2021-07-30 12:24:30 +0100 | [diff] [blame] | 418 | static void guarantee_online_cpus(struct task_struct *tsk, |
| 419 | struct cpumask *pmask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 420 | { |
Will Deacon | 431c69f | 2021-07-30 12:24:30 +0100 | [diff] [blame] | 421 | const struct cpumask *possible_mask = task_cpu_possible_mask(tsk); |
| 422 | struct cpuset *cs; |
| 423 | |
| 424 | if (WARN_ON(!cpumask_and(pmask, possible_mask, cpu_online_mask))) |
| 425 | cpumask_copy(pmask, cpu_online_mask); |
| 426 | |
| 427 | rcu_read_lock(); |
| 428 | cs = task_cs(tsk); |
| 429 | |
| 430 | while (!cpumask_intersects(cs->effective_cpus, pmask)) { |
Tejun Heo | c431069 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 431 | cs = parent_cs(cs); |
Joonwoo Park | 28b89b9 | 2016-09-11 21:14:58 -0700 | [diff] [blame] | 432 | if (unlikely(!cs)) { |
| 433 | /* |
| 434 | * The top cpuset doesn't have any online cpu as a |
| 435 | * consequence of a race between cpuset_hotplug_work |
| 436 | * and cpu hotplug notifier. But we know the top |
Randy Dunlap | 7b7b8a2 | 2020-10-15 20:10:28 -0700 | [diff] [blame] | 437 | * cpuset's effective_cpus is on its way to be |
Joonwoo Park | 28b89b9 | 2016-09-11 21:14:58 -0700 | [diff] [blame] | 438 | * identical to cpu_online_mask. |
| 439 | */ |
Will Deacon | 431c69f | 2021-07-30 12:24:30 +0100 | [diff] [blame] | 440 | goto out_unlock; |
Joonwoo Park | 28b89b9 | 2016-09-11 21:14:58 -0700 | [diff] [blame] | 441 | } |
| 442 | } |
Will Deacon | 431c69f | 2021-07-30 12:24:30 +0100 | [diff] [blame] | 443 | cpumask_and(pmask, pmask, cs->effective_cpus); |
| 444 | |
| 445 | out_unlock: |
| 446 | rcu_read_unlock(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 447 | } |
| 448 | |
| 449 | /* |
| 450 | * Return in *pmask the portion of a cpusets's mems_allowed that |
Christoph Lameter | 0e1e7c7 | 2007-10-16 01:25:38 -0700 | [diff] [blame] | 451 | * are online, with memory. If none are online with memory, walk |
| 452 | * up the cpuset hierarchy until we find one that does have some |
Li Zefan | 40df2de | 2013-06-05 17:15:23 +0800 | [diff] [blame] | 453 | * online mems. The top cpuset always has some mems online. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 454 | * |
| 455 | * One way or another, we guarantee to return some non-empty subset |
Lai Jiangshan | 38d7bee | 2012-12-12 13:51:24 -0800 | [diff] [blame] | 456 | * of node_states[N_MEMORY]. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 457 | * |
Waiman Long | b94f9ac | 2021-09-09 22:42:56 -0400 | [diff] [blame] | 458 | * Call with callback_lock or cpuset_rwsem held. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 459 | */ |
Tejun Heo | c9710d8 | 2013-08-08 20:11:22 -0400 | [diff] [blame] | 460 | static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 461 | { |
Li Zefan | ae1c802 | 2014-07-09 16:48:32 +0800 | [diff] [blame] | 462 | while (!nodes_intersects(cs->effective_mems, node_states[N_MEMORY])) |
Tejun Heo | c431069 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 463 | cs = parent_cs(cs); |
Li Zefan | ae1c802 | 2014-07-09 16:48:32 +0800 | [diff] [blame] | 464 | nodes_and(*pmask, cs->effective_mems, node_states[N_MEMORY]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 465 | } |
| 466 | |
Miao Xie | f3b39d4 | 2009-06-16 15:31:46 -0700 | [diff] [blame] | 467 | /* |
| 468 | * update task's spread flag if cpuset's page/slab spread flag is set |
| 469 | * |
Waiman Long | b94f9ac | 2021-09-09 22:42:56 -0400 | [diff] [blame] | 470 | * Call with callback_lock or cpuset_rwsem held. |
Miao Xie | f3b39d4 | 2009-06-16 15:31:46 -0700 | [diff] [blame] | 471 | */ |
| 472 | static void cpuset_update_task_spread_flag(struct cpuset *cs, |
| 473 | struct task_struct *tsk) |
| 474 | { |
| 475 | if (is_spread_page(cs)) |
Zefan Li | 2ad654b | 2014-09-25 09:41:02 +0800 | [diff] [blame] | 476 | task_set_spread_page(tsk); |
Miao Xie | f3b39d4 | 2009-06-16 15:31:46 -0700 | [diff] [blame] | 477 | else |
Zefan Li | 2ad654b | 2014-09-25 09:41:02 +0800 | [diff] [blame] | 478 | task_clear_spread_page(tsk); |
| 479 | |
Miao Xie | f3b39d4 | 2009-06-16 15:31:46 -0700 | [diff] [blame] | 480 | if (is_spread_slab(cs)) |
Zefan Li | 2ad654b | 2014-09-25 09:41:02 +0800 | [diff] [blame] | 481 | task_set_spread_slab(tsk); |
Miao Xie | f3b39d4 | 2009-06-16 15:31:46 -0700 | [diff] [blame] | 482 | else |
Zefan Li | 2ad654b | 2014-09-25 09:41:02 +0800 | [diff] [blame] | 483 | task_clear_spread_slab(tsk); |
Miao Xie | f3b39d4 | 2009-06-16 15:31:46 -0700 | [diff] [blame] | 484 | } |
| 485 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 486 | /* |
| 487 | * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q? |
| 488 | * |
| 489 | * One cpuset is a subset of another if all its allowed CPUs and |
| 490 | * Memory Nodes are a subset of the other, and its exclusive flags |
Waiman Long | b94f9ac | 2021-09-09 22:42:56 -0400 | [diff] [blame] | 491 | * are only set if the other's are set. Call holding cpuset_rwsem. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 492 | */ |
| 493 | |
| 494 | static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q) |
| 495 | { |
Li Zefan | 300ed6c | 2009-01-07 18:08:44 -0800 | [diff] [blame] | 496 | return cpumask_subset(p->cpus_allowed, q->cpus_allowed) && |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 497 | nodes_subset(p->mems_allowed, q->mems_allowed) && |
| 498 | is_cpu_exclusive(p) <= is_cpu_exclusive(q) && |
| 499 | is_mem_exclusive(p) <= is_mem_exclusive(q); |
| 500 | } |
| 501 | |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 502 | /** |
Waiman Long | bf92370 | 2018-11-08 10:08:37 -0500 | [diff] [blame] | 503 | * alloc_cpumasks - allocate three cpumasks for cpuset |
| 504 | * @cs: the cpuset that have cpumasks to be allocated. |
| 505 | * @tmp: the tmpmasks structure pointer |
| 506 | * Return: 0 if successful, -ENOMEM otherwise. |
| 507 | * |
| 508 | * Only one of the two input arguments should be non-NULL. |
| 509 | */ |
| 510 | static inline int alloc_cpumasks(struct cpuset *cs, struct tmpmasks *tmp) |
| 511 | { |
| 512 | cpumask_var_t *pmask1, *pmask2, *pmask3; |
| 513 | |
| 514 | if (cs) { |
| 515 | pmask1 = &cs->cpus_allowed; |
| 516 | pmask2 = &cs->effective_cpus; |
| 517 | pmask3 = &cs->subparts_cpus; |
| 518 | } else { |
| 519 | pmask1 = &tmp->new_cpus; |
| 520 | pmask2 = &tmp->addmask; |
| 521 | pmask3 = &tmp->delmask; |
| 522 | } |
| 523 | |
| 524 | if (!zalloc_cpumask_var(pmask1, GFP_KERNEL)) |
| 525 | return -ENOMEM; |
| 526 | |
| 527 | if (!zalloc_cpumask_var(pmask2, GFP_KERNEL)) |
| 528 | goto free_one; |
| 529 | |
| 530 | if (!zalloc_cpumask_var(pmask3, GFP_KERNEL)) |
| 531 | goto free_two; |
| 532 | |
| 533 | return 0; |
| 534 | |
| 535 | free_two: |
| 536 | free_cpumask_var(*pmask2); |
| 537 | free_one: |
| 538 | free_cpumask_var(*pmask1); |
| 539 | return -ENOMEM; |
| 540 | } |
| 541 | |
| 542 | /** |
| 543 | * free_cpumasks - free cpumasks in a tmpmasks structure |
| 544 | * @cs: the cpuset that have cpumasks to be free. |
| 545 | * @tmp: the tmpmasks structure pointer |
| 546 | */ |
| 547 | static inline void free_cpumasks(struct cpuset *cs, struct tmpmasks *tmp) |
| 548 | { |
| 549 | if (cs) { |
| 550 | free_cpumask_var(cs->cpus_allowed); |
| 551 | free_cpumask_var(cs->effective_cpus); |
| 552 | free_cpumask_var(cs->subparts_cpus); |
| 553 | } |
| 554 | if (tmp) { |
| 555 | free_cpumask_var(tmp->new_cpus); |
| 556 | free_cpumask_var(tmp->addmask); |
| 557 | free_cpumask_var(tmp->delmask); |
| 558 | } |
| 559 | } |
| 560 | |
| 561 | /** |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 562 | * alloc_trial_cpuset - allocate a trial cpuset |
| 563 | * @cs: the cpuset that the trial cpuset duplicates |
| 564 | */ |
Tejun Heo | c9710d8 | 2013-08-08 20:11:22 -0400 | [diff] [blame] | 565 | static struct cpuset *alloc_trial_cpuset(struct cpuset *cs) |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 566 | { |
Li Zefan | 300ed6c | 2009-01-07 18:08:44 -0800 | [diff] [blame] | 567 | struct cpuset *trial; |
| 568 | |
| 569 | trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL); |
| 570 | if (!trial) |
| 571 | return NULL; |
| 572 | |
Waiman Long | bf92370 | 2018-11-08 10:08:37 -0500 | [diff] [blame] | 573 | if (alloc_cpumasks(trial, NULL)) { |
| 574 | kfree(trial); |
| 575 | return NULL; |
| 576 | } |
Li Zefan | 300ed6c | 2009-01-07 18:08:44 -0800 | [diff] [blame] | 577 | |
Li Zefan | e2b9a3d | 2014-07-09 16:47:03 +0800 | [diff] [blame] | 578 | cpumask_copy(trial->cpus_allowed, cs->cpus_allowed); |
| 579 | cpumask_copy(trial->effective_cpus, cs->effective_cpus); |
Li Zefan | 300ed6c | 2009-01-07 18:08:44 -0800 | [diff] [blame] | 580 | return trial; |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 581 | } |
| 582 | |
| 583 | /** |
Waiman Long | bf92370 | 2018-11-08 10:08:37 -0500 | [diff] [blame] | 584 | * free_cpuset - free the cpuset |
| 585 | * @cs: the cpuset to be freed |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 586 | */ |
Waiman Long | bf92370 | 2018-11-08 10:08:37 -0500 | [diff] [blame] | 587 | static inline void free_cpuset(struct cpuset *cs) |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 588 | { |
Waiman Long | bf92370 | 2018-11-08 10:08:37 -0500 | [diff] [blame] | 589 | free_cpumasks(cs, NULL); |
| 590 | kfree(cs); |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 591 | } |
| 592 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 593 | /* |
| 594 | * validate_change() - Used to validate that any proposed cpuset change |
| 595 | * follows the structural rules for cpusets. |
| 596 | * |
| 597 | * If we replaced the flag and mask values of the current cpuset |
| 598 | * (cur) with those values in the trial cpuset (trial), would |
| 599 | * our various subset and exclusive rules still be valid? Presumes |
Waiman Long | b94f9ac | 2021-09-09 22:42:56 -0400 | [diff] [blame] | 600 | * cpuset_rwsem held. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 601 | * |
| 602 | * 'cur' is the address of an actual, in-use cpuset. Operations |
| 603 | * such as list traversal that depend on the actual address of the |
| 604 | * cpuset in the list must use cur below, not trial. |
| 605 | * |
| 606 | * 'trial' is the address of bulk structure copy of cur, with |
| 607 | * perhaps one or more of the fields cpus_allowed, mems_allowed, |
| 608 | * or flags changed to new, trial values. |
| 609 | * |
| 610 | * Return 0 if valid, -errno if not. |
| 611 | */ |
| 612 | |
Tejun Heo | c9710d8 | 2013-08-08 20:11:22 -0400 | [diff] [blame] | 613 | static int validate_change(struct cpuset *cur, struct cpuset *trial) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 614 | { |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 615 | struct cgroup_subsys_state *css; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 616 | struct cpuset *c, *par; |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 617 | int ret; |
| 618 | |
Waiman Long | 1f1562f | 2021-12-05 13:32:14 -0500 | [diff] [blame] | 619 | /* The checks don't apply to root cpuset */ |
Paul Jackson | 6960406 | 2006-12-06 20:36:15 -0800 | [diff] [blame] | 620 | if (cur == &top_cpuset) |
Waiman Long | 1f1562f | 2021-12-05 13:32:14 -0500 | [diff] [blame] | 621 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 622 | |
Waiman Long | 1f1562f | 2021-12-05 13:32:14 -0500 | [diff] [blame] | 623 | rcu_read_lock(); |
Tejun Heo | c431069 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 624 | par = parent_cs(cur); |
Paul Jackson | 6960406 | 2006-12-06 20:36:15 -0800 | [diff] [blame] | 625 | |
Lu Jialin | d95af61 | 2021-04-08 16:03:46 +0800 | [diff] [blame] | 626 | /* On legacy hierarchy, we must be a subset of our parent cpuset. */ |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 627 | ret = -EACCES; |
Waiman Long | b8d1b8e | 2017-08-17 15:33:10 -0400 | [diff] [blame] | 628 | if (!is_in_v2_mode() && !is_cpuset_subset(trial, par)) |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 629 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 630 | |
Paul Menage | 2df167a | 2008-02-07 00:14:45 -0800 | [diff] [blame] | 631 | /* |
| 632 | * If either I or some sibling (!= me) is exclusive, we can't |
| 633 | * overlap |
| 634 | */ |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 635 | ret = -EINVAL; |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 636 | cpuset_for_each_child(c, css, par) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 637 | if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) && |
| 638 | c != cur && |
Li Zefan | 300ed6c | 2009-01-07 18:08:44 -0800 | [diff] [blame] | 639 | cpumask_intersects(trial->cpus_allowed, c->cpus_allowed)) |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 640 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 641 | if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) && |
| 642 | c != cur && |
| 643 | nodes_intersects(trial->mems_allowed, c->mems_allowed)) |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 644 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 645 | } |
| 646 | |
Tejun Heo | 452477f | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 647 | /* |
| 648 | * Cpusets with tasks - existing or newly being attached - can't |
Li Zefan | 1c09b19 | 2013-08-21 10:22:28 +0800 | [diff] [blame] | 649 | * be changed to have empty cpus_allowed or mems_allowed. |
Tejun Heo | 452477f | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 650 | */ |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 651 | ret = -ENOSPC; |
Tejun Heo | 27bd4db | 2015-10-15 16:41:50 -0400 | [diff] [blame] | 652 | if ((cgroup_is_populated(cur->css.cgroup) || cur->attach_in_progress)) { |
Li Zefan | 1c09b19 | 2013-08-21 10:22:28 +0800 | [diff] [blame] | 653 | if (!cpumask_empty(cur->cpus_allowed) && |
| 654 | cpumask_empty(trial->cpus_allowed)) |
| 655 | goto out; |
| 656 | if (!nodes_empty(cur->mems_allowed) && |
| 657 | nodes_empty(trial->mems_allowed)) |
| 658 | goto out; |
| 659 | } |
Paul Jackson | 020958b | 2007-10-18 23:40:21 -0700 | [diff] [blame] | 660 | |
Juri Lelli | f82f804 | 2014-10-07 09:52:11 +0100 | [diff] [blame] | 661 | /* |
| 662 | * We can't shrink if we won't have enough room for SCHED_DEADLINE |
| 663 | * tasks. |
| 664 | */ |
| 665 | ret = -EBUSY; |
| 666 | if (is_cpu_exclusive(cur) && |
| 667 | !cpuset_cpumask_can_shrink(cur->cpus_allowed, |
| 668 | trial->cpus_allowed)) |
| 669 | goto out; |
| 670 | |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 671 | ret = 0; |
| 672 | out: |
| 673 | rcu_read_unlock(); |
| 674 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 675 | } |
| 676 | |
Paul Menage | db7f47c | 2009-04-02 16:57:55 -0700 | [diff] [blame] | 677 | #ifdef CONFIG_SMP |
Dinakar Guniguntala | 85d7b94 | 2005-06-25 14:57:34 -0700 | [diff] [blame] | 678 | /* |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 679 | * Helper routine for generate_sched_domains(). |
Li Zefan | 8b5f1c5 | 2014-07-09 16:47:50 +0800 | [diff] [blame] | 680 | * Do cpusets a, b have overlapping effective cpus_allowed masks? |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 681 | */ |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 682 | static int cpusets_overlap(struct cpuset *a, struct cpuset *b) |
| 683 | { |
Li Zefan | 8b5f1c5 | 2014-07-09 16:47:50 +0800 | [diff] [blame] | 684 | return cpumask_intersects(a->effective_cpus, b->effective_cpus); |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 685 | } |
| 686 | |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 687 | static void |
| 688 | update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c) |
| 689 | { |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 690 | if (dattr->relax_domain_level < c->relax_domain_level) |
| 691 | dattr->relax_domain_level = c->relax_domain_level; |
| 692 | return; |
| 693 | } |
| 694 | |
Tejun Heo | fc560a2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 695 | static void update_domain_attr_tree(struct sched_domain_attr *dattr, |
| 696 | struct cpuset *root_cs) |
Lai Jiangshan | f539369 | 2008-07-29 22:33:22 -0700 | [diff] [blame] | 697 | { |
Tejun Heo | fc560a2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 698 | struct cpuset *cp; |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 699 | struct cgroup_subsys_state *pos_css; |
Lai Jiangshan | f539369 | 2008-07-29 22:33:22 -0700 | [diff] [blame] | 700 | |
Tejun Heo | fc560a2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 701 | rcu_read_lock(); |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 702 | cpuset_for_each_descendant_pre(cp, pos_css, root_cs) { |
Tejun Heo | fc560a2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 703 | /* skip the whole subtree if @cp doesn't have any CPU */ |
| 704 | if (cpumask_empty(cp->cpus_allowed)) { |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 705 | pos_css = css_rightmost_descendant(pos_css); |
Lai Jiangshan | f539369 | 2008-07-29 22:33:22 -0700 | [diff] [blame] | 706 | continue; |
Tejun Heo | fc560a2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 707 | } |
Lai Jiangshan | f539369 | 2008-07-29 22:33:22 -0700 | [diff] [blame] | 708 | |
| 709 | if (is_sched_load_balance(cp)) |
| 710 | update_domain_attr(dattr, cp); |
Lai Jiangshan | f539369 | 2008-07-29 22:33:22 -0700 | [diff] [blame] | 711 | } |
Tejun Heo | fc560a2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 712 | rcu_read_unlock(); |
Lai Jiangshan | f539369 | 2008-07-29 22:33:22 -0700 | [diff] [blame] | 713 | } |
| 714 | |
Waiman Long | b94f9ac | 2021-09-09 22:42:56 -0400 | [diff] [blame] | 715 | /* Must be called with cpuset_rwsem held. */ |
Paolo Bonzini | be040be | 2017-08-01 17:24:06 +0200 | [diff] [blame] | 716 | static inline int nr_cpusets(void) |
| 717 | { |
| 718 | /* jump label reference count + the top-level cpuset */ |
| 719 | return static_key_count(&cpusets_enabled_key.key) + 1; |
| 720 | } |
| 721 | |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 722 | /* |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 723 | * generate_sched_domains() |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 724 | * |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 725 | * This function builds a partial partition of the systems CPUs |
| 726 | * A 'partial partition' is a set of non-overlapping subsets whose |
| 727 | * union is a subset of that set. |
Viresh Kumar | 0a0fca9 | 2013-06-04 13:10:24 +0530 | [diff] [blame] | 728 | * The output of this function needs to be passed to kernel/sched/core.c |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 729 | * partition_sched_domains() routine, which will rebuild the scheduler's |
| 730 | * load balancing domains (sched domains) as specified by that partial |
| 731 | * partition. |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 732 | * |
Mauro Carvalho Chehab | da82c92 | 2019-06-27 13:08:35 -0300 | [diff] [blame] | 733 | * See "What is sched_load_balance" in Documentation/admin-guide/cgroup-v1/cpusets.rst |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 734 | * for a background explanation of this. |
| 735 | * |
| 736 | * Does not return errors, on the theory that the callers of this |
| 737 | * routine would rather not worry about failures to rebuild sched |
| 738 | * domains when operating in the severe memory shortage situations |
| 739 | * that could cause allocation failures below. |
| 740 | * |
Waiman Long | b94f9ac | 2021-09-09 22:42:56 -0400 | [diff] [blame] | 741 | * Must be called with cpuset_rwsem held. |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 742 | * |
| 743 | * The three key local variables below are: |
Juri Lelli | b6fbbf3 | 2018-12-19 14:34:44 +0100 | [diff] [blame] | 744 | * cp - cpuset pointer, used (together with pos_css) to perform a |
| 745 | * top-down scan of all cpusets. For our purposes, rebuilding |
| 746 | * the schedulers sched domains, we can ignore !is_sched_load_ |
| 747 | * balance cpusets. |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 748 | * csa - (for CpuSet Array) Array of pointers to all the cpusets |
| 749 | * that need to be load balanced, for convenient iterative |
| 750 | * access by the subsequent code that finds the best partition, |
| 751 | * i.e the set of domains (subsets) of CPUs such that the |
| 752 | * cpus_allowed of every cpuset marked is_sched_load_balance |
| 753 | * is a subset of one of these domains, while there are as |
| 754 | * many such domains as possible, each as small as possible. |
| 755 | * doms - Conversion of 'csa' to an array of cpumasks, for passing to |
Viresh Kumar | 0a0fca9 | 2013-06-04 13:10:24 +0530 | [diff] [blame] | 756 | * the kernel/sched/core.c routine partition_sched_domains() in a |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 757 | * convenient format, that can be easily compared to the prior |
| 758 | * value to determine what partition elements (sched domains) |
| 759 | * were changed (added or removed.) |
| 760 | * |
| 761 | * Finding the best partition (set of domains): |
| 762 | * The triple nested loops below over i, j, k scan over the |
| 763 | * load balanced cpusets (using the array of cpuset pointers in |
| 764 | * csa[]) looking for pairs of cpusets that have overlapping |
| 765 | * cpus_allowed, but which don't have the same 'pn' partition |
| 766 | * number and gives them in the same partition number. It keeps |
| 767 | * looping on the 'restart' label until it can no longer find |
| 768 | * any such pairs. |
| 769 | * |
| 770 | * The union of the cpus_allowed masks from the set of |
| 771 | * all cpusets having the same 'pn' value then form the one |
| 772 | * element of the partition (one sched domain) to be passed to |
| 773 | * partition_sched_domains(). |
| 774 | */ |
Rusty Russell | acc3f5d | 2009-11-03 14:53:40 +1030 | [diff] [blame] | 775 | static int generate_sched_domains(cpumask_var_t **domains, |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 776 | struct sched_domain_attr **attributes) |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 777 | { |
Juri Lelli | b6fbbf3 | 2018-12-19 14:34:44 +0100 | [diff] [blame] | 778 | struct cpuset *cp; /* top-down scan of cpusets */ |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 779 | struct cpuset **csa; /* array of all cpuset ptrs */ |
| 780 | int csn; /* how many cpuset ptrs in csa so far */ |
| 781 | int i, j, k; /* indices for partition finding loops */ |
Rusty Russell | acc3f5d | 2009-11-03 14:53:40 +1030 | [diff] [blame] | 782 | cpumask_var_t *doms; /* resulting partition; i.e. sched domains */ |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 783 | struct sched_domain_attr *dattr; /* attributes for custom domains */ |
Ingo Molnar | 1583715 | 2008-11-25 10:27:49 +0100 | [diff] [blame] | 784 | int ndoms = 0; /* number of sched domains in result */ |
Li Zefan | 6af866a | 2009-01-07 18:08:45 -0800 | [diff] [blame] | 785 | int nslot; /* next empty doms[] struct cpumask slot */ |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 786 | struct cgroup_subsys_state *pos_css; |
Waiman Long | 0ccea8f | 2018-11-08 10:08:42 -0500 | [diff] [blame] | 787 | bool root_load_balance = is_sched_load_balance(&top_cpuset); |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 788 | |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 789 | doms = NULL; |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 790 | dattr = NULL; |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 791 | csa = NULL; |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 792 | |
| 793 | /* Special case for the 99% of systems with one, full, sched domain */ |
Waiman Long | 0ccea8f | 2018-11-08 10:08:42 -0500 | [diff] [blame] | 794 | if (root_load_balance && !top_cpuset.nr_subparts_cpus) { |
Rusty Russell | acc3f5d | 2009-11-03 14:53:40 +1030 | [diff] [blame] | 795 | ndoms = 1; |
| 796 | doms = alloc_sched_domains(ndoms); |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 797 | if (!doms) |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 798 | goto done; |
| 799 | |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 800 | dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL); |
| 801 | if (dattr) { |
| 802 | *dattr = SD_ATTR_INIT; |
Li Zefan | 93a6557 | 2008-07-29 22:33:23 -0700 | [diff] [blame] | 803 | update_domain_attr_tree(dattr, &top_cpuset); |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 804 | } |
Rik van Riel | 47b8ea7 | 2015-03-09 12:12:08 -0400 | [diff] [blame] | 805 | cpumask_and(doms[0], top_cpuset.effective_cpus, |
Frederic Weisbecker | edb9382 | 2017-10-27 04:42:37 +0200 | [diff] [blame] | 806 | housekeeping_cpumask(HK_FLAG_DOMAIN)); |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 807 | |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 808 | goto done; |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 809 | } |
| 810 | |
Kees Cook | 6da2ec5 | 2018-06-12 13:55:00 -0700 | [diff] [blame] | 811 | csa = kmalloc_array(nr_cpusets(), sizeof(cp), GFP_KERNEL); |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 812 | if (!csa) |
| 813 | goto done; |
| 814 | csn = 0; |
| 815 | |
Tejun Heo | fc560a2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 816 | rcu_read_lock(); |
Waiman Long | 0ccea8f | 2018-11-08 10:08:42 -0500 | [diff] [blame] | 817 | if (root_load_balance) |
| 818 | csa[csn++] = &top_cpuset; |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 819 | cpuset_for_each_descendant_pre(cp, pos_css, &top_cpuset) { |
Tejun Heo | bd8815a | 2013-08-08 20:11:27 -0400 | [diff] [blame] | 820 | if (cp == &top_cpuset) |
| 821 | continue; |
Lai Jiangshan | f539369 | 2008-07-29 22:33:22 -0700 | [diff] [blame] | 822 | /* |
Tejun Heo | fc560a2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 823 | * Continue traversing beyond @cp iff @cp has some CPUs and |
| 824 | * isn't load balancing. The former is obvious. The |
| 825 | * latter: All child cpusets contain a subset of the |
| 826 | * parent's cpus, so just skip them, and then we call |
| 827 | * update_domain_attr_tree() to calc relax_domain_level of |
| 828 | * the corresponding sched domain. |
Waiman Long | 0ccea8f | 2018-11-08 10:08:42 -0500 | [diff] [blame] | 829 | * |
| 830 | * If root is load-balancing, we can skip @cp if it |
| 831 | * is a subset of the root's effective_cpus. |
Lai Jiangshan | f539369 | 2008-07-29 22:33:22 -0700 | [diff] [blame] | 832 | */ |
Tejun Heo | fc560a2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 833 | if (!cpumask_empty(cp->cpus_allowed) && |
Rik van Riel | 47b8ea7 | 2015-03-09 12:12:08 -0400 | [diff] [blame] | 834 | !(is_sched_load_balance(cp) && |
Frederic Weisbecker | edb9382 | 2017-10-27 04:42:37 +0200 | [diff] [blame] | 835 | cpumask_intersects(cp->cpus_allowed, |
| 836 | housekeeping_cpumask(HK_FLAG_DOMAIN)))) |
Lai Jiangshan | f539369 | 2008-07-29 22:33:22 -0700 | [diff] [blame] | 837 | continue; |
Lai Jiangshan | 489a539 | 2008-07-25 01:47:23 -0700 | [diff] [blame] | 838 | |
Waiman Long | 0ccea8f | 2018-11-08 10:08:42 -0500 | [diff] [blame] | 839 | if (root_load_balance && |
| 840 | cpumask_subset(cp->cpus_allowed, top_cpuset.effective_cpus)) |
| 841 | continue; |
| 842 | |
Valentin Schneider | cd1cb33 | 2019-10-23 16:37:44 +0100 | [diff] [blame] | 843 | if (is_sched_load_balance(cp) && |
| 844 | !cpumask_empty(cp->effective_cpus)) |
Tejun Heo | fc560a2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 845 | csa[csn++] = cp; |
| 846 | |
Waiman Long | 0ccea8f | 2018-11-08 10:08:42 -0500 | [diff] [blame] | 847 | /* skip @cp's subtree if not a partition root */ |
| 848 | if (!is_partition_root(cp)) |
| 849 | pos_css = css_rightmost_descendant(pos_css); |
Tejun Heo | fc560a2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 850 | } |
| 851 | rcu_read_unlock(); |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 852 | |
| 853 | for (i = 0; i < csn; i++) |
| 854 | csa[i]->pn = i; |
| 855 | ndoms = csn; |
| 856 | |
| 857 | restart: |
| 858 | /* Find the best partition (set of sched domains) */ |
| 859 | for (i = 0; i < csn; i++) { |
| 860 | struct cpuset *a = csa[i]; |
| 861 | int apn = a->pn; |
| 862 | |
| 863 | for (j = 0; j < csn; j++) { |
| 864 | struct cpuset *b = csa[j]; |
| 865 | int bpn = b->pn; |
| 866 | |
| 867 | if (apn != bpn && cpusets_overlap(a, b)) { |
| 868 | for (k = 0; k < csn; k++) { |
| 869 | struct cpuset *c = csa[k]; |
| 870 | |
| 871 | if (c->pn == bpn) |
| 872 | c->pn = apn; |
| 873 | } |
| 874 | ndoms--; /* one less element */ |
| 875 | goto restart; |
| 876 | } |
| 877 | } |
| 878 | } |
| 879 | |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 880 | /* |
| 881 | * Now we know how many domains to create. |
| 882 | * Convert <csn, csa> to <ndoms, doms> and populate cpu masks. |
| 883 | */ |
Rusty Russell | acc3f5d | 2009-11-03 14:53:40 +1030 | [diff] [blame] | 884 | doms = alloc_sched_domains(ndoms); |
Li Zefan | 700018e | 2008-11-18 14:02:03 +0800 | [diff] [blame] | 885 | if (!doms) |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 886 | goto done; |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 887 | |
| 888 | /* |
| 889 | * The rest of the code, including the scheduler, can deal with |
| 890 | * dattr==NULL case. No need to abort if alloc fails. |
| 891 | */ |
Kees Cook | 6da2ec5 | 2018-06-12 13:55:00 -0700 | [diff] [blame] | 892 | dattr = kmalloc_array(ndoms, sizeof(struct sched_domain_attr), |
| 893 | GFP_KERNEL); |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 894 | |
| 895 | for (nslot = 0, i = 0; i < csn; i++) { |
| 896 | struct cpuset *a = csa[i]; |
Li Zefan | 6af866a | 2009-01-07 18:08:45 -0800 | [diff] [blame] | 897 | struct cpumask *dp; |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 898 | int apn = a->pn; |
| 899 | |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 900 | if (apn < 0) { |
| 901 | /* Skip completed partitions */ |
| 902 | continue; |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 903 | } |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 904 | |
Rusty Russell | acc3f5d | 2009-11-03 14:53:40 +1030 | [diff] [blame] | 905 | dp = doms[nslot]; |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 906 | |
| 907 | if (nslot == ndoms) { |
| 908 | static int warnings = 10; |
| 909 | if (warnings) { |
Fabian Frederick | 12d3089 | 2014-05-05 19:49:00 +0200 | [diff] [blame] | 910 | pr_warn("rebuild_sched_domains confused: nslot %d, ndoms %d, csn %d, i %d, apn %d\n", |
| 911 | nslot, ndoms, csn, i, apn); |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 912 | warnings--; |
| 913 | } |
| 914 | continue; |
| 915 | } |
| 916 | |
Li Zefan | 6af866a | 2009-01-07 18:08:45 -0800 | [diff] [blame] | 917 | cpumask_clear(dp); |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 918 | if (dattr) |
| 919 | *(dattr + nslot) = SD_ATTR_INIT; |
| 920 | for (j = i; j < csn; j++) { |
| 921 | struct cpuset *b = csa[j]; |
| 922 | |
| 923 | if (apn == b->pn) { |
Li Zefan | 8b5f1c5 | 2014-07-09 16:47:50 +0800 | [diff] [blame] | 924 | cpumask_or(dp, dp, b->effective_cpus); |
Frederic Weisbecker | edb9382 | 2017-10-27 04:42:37 +0200 | [diff] [blame] | 925 | cpumask_and(dp, dp, housekeeping_cpumask(HK_FLAG_DOMAIN)); |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 926 | if (dattr) |
| 927 | update_domain_attr_tree(dattr + nslot, b); |
| 928 | |
| 929 | /* Done with this partition */ |
| 930 | b->pn = -1; |
| 931 | } |
| 932 | } |
| 933 | nslot++; |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 934 | } |
| 935 | BUG_ON(nslot != ndoms); |
| 936 | |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 937 | done: |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 938 | kfree(csa); |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 939 | |
Li Zefan | 700018e | 2008-11-18 14:02:03 +0800 | [diff] [blame] | 940 | /* |
| 941 | * Fallback to the default domain if kmalloc() failed. |
| 942 | * See comments in partition_sched_domains(). |
| 943 | */ |
| 944 | if (doms == NULL) |
| 945 | ndoms = 1; |
| 946 | |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 947 | *domains = doms; |
| 948 | *attributes = dattr; |
| 949 | return ndoms; |
| 950 | } |
| 951 | |
Mathieu Poirier | f9a25f7 | 2019-07-19 15:59:55 +0200 | [diff] [blame] | 952 | static void update_tasks_root_domain(struct cpuset *cs) |
| 953 | { |
| 954 | struct css_task_iter it; |
| 955 | struct task_struct *task; |
| 956 | |
| 957 | css_task_iter_start(&cs->css, 0, &it); |
| 958 | |
| 959 | while ((task = css_task_iter_next(&it))) |
| 960 | dl_add_task_root_domain(task); |
| 961 | |
| 962 | css_task_iter_end(&it); |
| 963 | } |
| 964 | |
| 965 | static void rebuild_root_domains(void) |
| 966 | { |
| 967 | struct cpuset *cs = NULL; |
| 968 | struct cgroup_subsys_state *pos_css; |
| 969 | |
Juri Lelli | 1243dc5 | 2019-07-19 15:59:57 +0200 | [diff] [blame] | 970 | percpu_rwsem_assert_held(&cpuset_rwsem); |
Mathieu Poirier | f9a25f7 | 2019-07-19 15:59:55 +0200 | [diff] [blame] | 971 | lockdep_assert_cpus_held(); |
| 972 | lockdep_assert_held(&sched_domains_mutex); |
| 973 | |
Mathieu Poirier | f9a25f7 | 2019-07-19 15:59:55 +0200 | [diff] [blame] | 974 | rcu_read_lock(); |
| 975 | |
| 976 | /* |
| 977 | * Clear default root domain DL accounting, it will be computed again |
| 978 | * if a task belongs to it. |
| 979 | */ |
| 980 | dl_clear_root_domain(&def_root_domain); |
| 981 | |
| 982 | cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { |
| 983 | |
| 984 | if (cpumask_empty(cs->effective_cpus)) { |
| 985 | pos_css = css_rightmost_descendant(pos_css); |
| 986 | continue; |
| 987 | } |
| 988 | |
| 989 | css_get(&cs->css); |
| 990 | |
| 991 | rcu_read_unlock(); |
| 992 | |
| 993 | update_tasks_root_domain(cs); |
| 994 | |
| 995 | rcu_read_lock(); |
| 996 | css_put(&cs->css); |
| 997 | } |
| 998 | rcu_read_unlock(); |
| 999 | } |
| 1000 | |
| 1001 | static void |
| 1002 | partition_and_rebuild_sched_domains(int ndoms_new, cpumask_var_t doms_new[], |
| 1003 | struct sched_domain_attr *dattr_new) |
| 1004 | { |
| 1005 | mutex_lock(&sched_domains_mutex); |
| 1006 | partition_sched_domains_locked(ndoms_new, doms_new, dattr_new); |
| 1007 | rebuild_root_domains(); |
| 1008 | mutex_unlock(&sched_domains_mutex); |
| 1009 | } |
| 1010 | |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 1011 | /* |
| 1012 | * Rebuild scheduler domains. |
| 1013 | * |
Tejun Heo | 699140b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 1014 | * If the flag 'sched_load_balance' of any cpuset with non-empty |
| 1015 | * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset |
| 1016 | * which has that flag enabled, or if any cpuset with a non-empty |
| 1017 | * 'cpus' is removed, then call this routine to rebuild the |
| 1018 | * scheduler's dynamic sched domains. |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 1019 | * |
Waiman Long | b94f9ac | 2021-09-09 22:42:56 -0400 | [diff] [blame] | 1020 | * Call with cpuset_rwsem held. Takes cpus_read_lock(). |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 1021 | */ |
Tejun Heo | 699140b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 1022 | static void rebuild_sched_domains_locked(void) |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 1023 | { |
Daniel Jordan | 406100f | 2020-11-12 12:17:11 -0500 | [diff] [blame] | 1024 | struct cgroup_subsys_state *pos_css; |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 1025 | struct sched_domain_attr *attr; |
Rusty Russell | acc3f5d | 2009-11-03 14:53:40 +1030 | [diff] [blame] | 1026 | cpumask_var_t *doms; |
Daniel Jordan | 406100f | 2020-11-12 12:17:11 -0500 | [diff] [blame] | 1027 | struct cpuset *cs; |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 1028 | int ndoms; |
| 1029 | |
Juri Lelli | d74b27d | 2019-07-19 15:59:58 +0200 | [diff] [blame] | 1030 | lockdep_assert_cpus_held(); |
Juri Lelli | 1243dc5 | 2019-07-19 15:59:57 +0200 | [diff] [blame] | 1031 | percpu_rwsem_assert_held(&cpuset_rwsem); |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 1032 | |
Li Zefan | 5b16c2a | 2013-04-27 06:52:43 -0700 | [diff] [blame] | 1033 | /* |
Daniel Jordan | 406100f | 2020-11-12 12:17:11 -0500 | [diff] [blame] | 1034 | * If we have raced with CPU hotplug, return early to avoid |
Li Zefan | 5b16c2a | 2013-04-27 06:52:43 -0700 | [diff] [blame] | 1035 | * passing doms with offlined cpu to partition_sched_domains(). |
Daniel Jordan | 406100f | 2020-11-12 12:17:11 -0500 | [diff] [blame] | 1036 | * Anyways, cpuset_hotplug_workfn() will rebuild sched domains. |
| 1037 | * |
| 1038 | * With no CPUs in any subpartitions, top_cpuset's effective CPUs |
| 1039 | * should be the same as the active CPUs, so checking only top_cpuset |
| 1040 | * is enough to detect racing CPU offlines. |
Li Zefan | 5b16c2a | 2013-04-27 06:52:43 -0700 | [diff] [blame] | 1041 | */ |
Waiman Long | 0ccea8f | 2018-11-08 10:08:42 -0500 | [diff] [blame] | 1042 | if (!top_cpuset.nr_subparts_cpus && |
| 1043 | !cpumask_equal(top_cpuset.effective_cpus, cpu_active_mask)) |
Juri Lelli | d74b27d | 2019-07-19 15:59:58 +0200 | [diff] [blame] | 1044 | return; |
Waiman Long | 0ccea8f | 2018-11-08 10:08:42 -0500 | [diff] [blame] | 1045 | |
Daniel Jordan | 406100f | 2020-11-12 12:17:11 -0500 | [diff] [blame] | 1046 | /* |
| 1047 | * With subpartition CPUs, however, the effective CPUs of a partition |
| 1048 | * root should be only a subset of the active CPUs. Since a CPU in any |
| 1049 | * partition root could be offlined, all must be checked. |
| 1050 | */ |
| 1051 | if (top_cpuset.nr_subparts_cpus) { |
| 1052 | rcu_read_lock(); |
| 1053 | cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { |
| 1054 | if (!is_partition_root(cs)) { |
| 1055 | pos_css = css_rightmost_descendant(pos_css); |
| 1056 | continue; |
| 1057 | } |
| 1058 | if (!cpumask_subset(cs->effective_cpus, |
| 1059 | cpu_active_mask)) { |
| 1060 | rcu_read_unlock(); |
| 1061 | return; |
| 1062 | } |
| 1063 | } |
| 1064 | rcu_read_unlock(); |
| 1065 | } |
Li Zefan | 5b16c2a | 2013-04-27 06:52:43 -0700 | [diff] [blame] | 1066 | |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 1067 | /* Generate domain masks and attrs */ |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 1068 | ndoms = generate_sched_domains(&doms, &attr); |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 1069 | |
| 1070 | /* Have scheduler rebuild the domains */ |
Mathieu Poirier | f9a25f7 | 2019-07-19 15:59:55 +0200 | [diff] [blame] | 1071 | partition_and_rebuild_sched_domains(ndoms, doms, attr); |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 1072 | } |
Paul Menage | db7f47c | 2009-04-02 16:57:55 -0700 | [diff] [blame] | 1073 | #else /* !CONFIG_SMP */ |
Tejun Heo | 699140b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 1074 | static void rebuild_sched_domains_locked(void) |
Paul Menage | db7f47c | 2009-04-02 16:57:55 -0700 | [diff] [blame] | 1075 | { |
| 1076 | } |
Paul Menage | db7f47c | 2009-04-02 16:57:55 -0700 | [diff] [blame] | 1077 | #endif /* CONFIG_SMP */ |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 1078 | |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 1079 | void rebuild_sched_domains(void) |
| 1080 | { |
Sebastian Andrzej Siewior | c5c63b9 | 2021-08-03 16:16:07 +0200 | [diff] [blame] | 1081 | cpus_read_lock(); |
Juri Lelli | 1243dc5 | 2019-07-19 15:59:57 +0200 | [diff] [blame] | 1082 | percpu_down_write(&cpuset_rwsem); |
Tejun Heo | 699140b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 1083 | rebuild_sched_domains_locked(); |
Juri Lelli | 1243dc5 | 2019-07-19 15:59:57 +0200 | [diff] [blame] | 1084 | percpu_up_write(&cpuset_rwsem); |
Sebastian Andrzej Siewior | c5c63b9 | 2021-08-03 16:16:07 +0200 | [diff] [blame] | 1085 | cpus_read_unlock(); |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 1086 | } |
| 1087 | |
Cliff Wickman | 58f4790 | 2008-02-07 00:14:44 -0800 | [diff] [blame] | 1088 | /** |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1089 | * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset. |
| 1090 | * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed |
| 1091 | * |
Tejun Heo | d66393e | 2014-02-13 06:58:40 -0500 | [diff] [blame] | 1092 | * Iterate through each task of @cs updating its cpus_allowed to the |
Waiman Long | b94f9ac | 2021-09-09 22:42:56 -0400 | [diff] [blame] | 1093 | * effective cpuset's. As this function is called with cpuset_rwsem held, |
Tejun Heo | d66393e | 2014-02-13 06:58:40 -0500 | [diff] [blame] | 1094 | * cpuset membership stays stable. |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1095 | */ |
Tejun Heo | d66393e | 2014-02-13 06:58:40 -0500 | [diff] [blame] | 1096 | static void update_tasks_cpumask(struct cpuset *cs) |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1097 | { |
Tejun Heo | d66393e | 2014-02-13 06:58:40 -0500 | [diff] [blame] | 1098 | struct css_task_iter it; |
| 1099 | struct task_struct *task; |
| 1100 | |
Tejun Heo | bc2fb7e | 2017-05-15 09:34:01 -0400 | [diff] [blame] | 1101 | css_task_iter_start(&cs->css, 0, &it); |
Tejun Heo | d66393e | 2014-02-13 06:58:40 -0500 | [diff] [blame] | 1102 | while ((task = css_task_iter_next(&it))) |
Li Zefan | ae1c802 | 2014-07-09 16:48:32 +0800 | [diff] [blame] | 1103 | set_cpus_allowed_ptr(task, cs->effective_cpus); |
Tejun Heo | d66393e | 2014-02-13 06:58:40 -0500 | [diff] [blame] | 1104 | css_task_iter_end(&it); |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1105 | } |
| 1106 | |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1107 | /** |
| 1108 | * compute_effective_cpumask - Compute the effective cpumask of the cpuset |
| 1109 | * @new_cpus: the temp variable for the new effective_cpus mask |
| 1110 | * @cs: the cpuset the need to recompute the new effective_cpus mask |
| 1111 | * @parent: the parent cpuset |
| 1112 | * |
| 1113 | * If the parent has subpartition CPUs, include them in the list of |
Waiman Long | 4b842da | 2018-11-08 10:08:41 -0500 | [diff] [blame] | 1114 | * allowable CPUs in computing the new effective_cpus mask. Since offlined |
| 1115 | * CPUs are not removed from subparts_cpus, we have to use cpu_active_mask |
| 1116 | * to mask those out. |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1117 | */ |
| 1118 | static void compute_effective_cpumask(struct cpumask *new_cpus, |
| 1119 | struct cpuset *cs, struct cpuset *parent) |
| 1120 | { |
| 1121 | if (parent->nr_subparts_cpus) { |
| 1122 | cpumask_or(new_cpus, parent->effective_cpus, |
| 1123 | parent->subparts_cpus); |
| 1124 | cpumask_and(new_cpus, new_cpus, cs->cpus_allowed); |
Waiman Long | 4b842da | 2018-11-08 10:08:41 -0500 | [diff] [blame] | 1125 | cpumask_and(new_cpus, new_cpus, cpu_active_mask); |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1126 | } else { |
| 1127 | cpumask_and(new_cpus, cs->cpus_allowed, parent->effective_cpus); |
| 1128 | } |
| 1129 | } |
| 1130 | |
| 1131 | /* |
| 1132 | * Commands for update_parent_subparts_cpumask |
| 1133 | */ |
| 1134 | enum subparts_cmd { |
| 1135 | partcmd_enable, /* Enable partition root */ |
| 1136 | partcmd_disable, /* Disable partition root */ |
| 1137 | partcmd_update, /* Update parent's subparts_cpus */ |
| 1138 | }; |
| 1139 | |
| 1140 | /** |
| 1141 | * update_parent_subparts_cpumask - update subparts_cpus mask of parent cpuset |
| 1142 | * @cpuset: The cpuset that requests change in partition root state |
| 1143 | * @cmd: Partition root state change command |
| 1144 | * @newmask: Optional new cpumask for partcmd_update |
| 1145 | * @tmp: Temporary addmask and delmask |
| 1146 | * Return: 0, 1 or an error code |
| 1147 | * |
| 1148 | * For partcmd_enable, the cpuset is being transformed from a non-partition |
| 1149 | * root to a partition root. The cpus_allowed mask of the given cpuset will |
| 1150 | * be put into parent's subparts_cpus and taken away from parent's |
| 1151 | * effective_cpus. The function will return 0 if all the CPUs listed in |
| 1152 | * cpus_allowed can be granted or an error code will be returned. |
| 1153 | * |
| 1154 | * For partcmd_disable, the cpuset is being transofrmed from a partition |
Waiman Long | 0f3adb8 | 2021-07-20 10:18:26 -0400 | [diff] [blame] | 1155 | * root back to a non-partition root. Any CPUs in cpus_allowed that are in |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1156 | * parent's subparts_cpus will be taken away from that cpumask and put back |
| 1157 | * into parent's effective_cpus. 0 should always be returned. |
| 1158 | * |
| 1159 | * For partcmd_update, if the optional newmask is specified, the cpu |
| 1160 | * list is to be changed from cpus_allowed to newmask. Otherwise, |
Waiman Long | 3881b86 | 2018-11-08 10:08:39 -0500 | [diff] [blame] | 1161 | * cpus_allowed is assumed to remain the same. The cpuset should either |
| 1162 | * be a partition root or an invalid partition root. The partition root |
| 1163 | * state may change if newmask is NULL and none of the requested CPUs can |
| 1164 | * be granted by the parent. The function will return 1 if changes to |
| 1165 | * parent's subparts_cpus and effective_cpus happen or 0 otherwise. |
| 1166 | * Error code should only be returned when newmask is non-NULL. |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1167 | * |
| 1168 | * The partcmd_enable and partcmd_disable commands are used by |
| 1169 | * update_prstate(). The partcmd_update command is used by |
| 1170 | * update_cpumasks_hier() with newmask NULL and update_cpumask() with |
| 1171 | * newmask set. |
| 1172 | * |
| 1173 | * The checking is more strict when enabling partition root than the |
| 1174 | * other two commands. |
| 1175 | * |
| 1176 | * Because of the implicit cpu exclusive nature of a partition root, |
| 1177 | * cpumask changes that violates the cpu exclusivity rule will not be |
| 1178 | * permitted when checked by validate_change(). The validate_change() |
| 1179 | * function will also prevent any changes to the cpu list if it is not |
| 1180 | * a superset of children's cpu lists. |
| 1181 | */ |
| 1182 | static int update_parent_subparts_cpumask(struct cpuset *cpuset, int cmd, |
| 1183 | struct cpumask *newmask, |
| 1184 | struct tmpmasks *tmp) |
| 1185 | { |
| 1186 | struct cpuset *parent = parent_cs(cpuset); |
| 1187 | int adding; /* Moving cpus from effective_cpus to subparts_cpus */ |
| 1188 | int deleting; /* Moving cpus from subparts_cpus to effective_cpus */ |
Waiman Long | e7cc988 | 2021-08-10 23:06:02 -0400 | [diff] [blame] | 1189 | int old_prs, new_prs; |
Waiman Long | 3881b86 | 2018-11-08 10:08:39 -0500 | [diff] [blame] | 1190 | bool part_error = false; /* Partition error? */ |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1191 | |
Juri Lelli | 1243dc5 | 2019-07-19 15:59:57 +0200 | [diff] [blame] | 1192 | percpu_rwsem_assert_held(&cpuset_rwsem); |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1193 | |
| 1194 | /* |
| 1195 | * The parent must be a partition root. |
| 1196 | * The new cpumask, if present, or the current cpus_allowed must |
| 1197 | * not be empty. |
| 1198 | */ |
| 1199 | if (!is_partition_root(parent) || |
| 1200 | (newmask && cpumask_empty(newmask)) || |
| 1201 | (!newmask && cpumask_empty(cpuset->cpus_allowed))) |
| 1202 | return -EINVAL; |
| 1203 | |
| 1204 | /* |
| 1205 | * Enabling/disabling partition root is not allowed if there are |
| 1206 | * online children. |
| 1207 | */ |
| 1208 | if ((cmd != partcmd_update) && css_has_online_children(&cpuset->css)) |
| 1209 | return -EBUSY; |
| 1210 | |
| 1211 | /* |
| 1212 | * Enabling partition root is not allowed if not all the CPUs |
| 1213 | * can be granted from parent's effective_cpus or at least one |
| 1214 | * CPU will be left after that. |
| 1215 | */ |
| 1216 | if ((cmd == partcmd_enable) && |
| 1217 | (!cpumask_subset(cpuset->cpus_allowed, parent->effective_cpus) || |
| 1218 | cpumask_equal(cpuset->cpus_allowed, parent->effective_cpus))) |
| 1219 | return -EINVAL; |
| 1220 | |
| 1221 | /* |
| 1222 | * A cpumask update cannot make parent's effective_cpus become empty. |
| 1223 | */ |
| 1224 | adding = deleting = false; |
Waiman Long | e7cc988 | 2021-08-10 23:06:02 -0400 | [diff] [blame] | 1225 | old_prs = new_prs = cpuset->partition_root_state; |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1226 | if (cmd == partcmd_enable) { |
| 1227 | cpumask_copy(tmp->addmask, cpuset->cpus_allowed); |
| 1228 | adding = true; |
| 1229 | } else if (cmd == partcmd_disable) { |
| 1230 | deleting = cpumask_and(tmp->delmask, cpuset->cpus_allowed, |
| 1231 | parent->subparts_cpus); |
| 1232 | } else if (newmask) { |
| 1233 | /* |
| 1234 | * partcmd_update with newmask: |
| 1235 | * |
| 1236 | * delmask = cpus_allowed & ~newmask & parent->subparts_cpus |
| 1237 | * addmask = newmask & parent->effective_cpus |
| 1238 | * & ~parent->subparts_cpus |
| 1239 | */ |
| 1240 | cpumask_andnot(tmp->delmask, cpuset->cpus_allowed, newmask); |
| 1241 | deleting = cpumask_and(tmp->delmask, tmp->delmask, |
| 1242 | parent->subparts_cpus); |
| 1243 | |
| 1244 | cpumask_and(tmp->addmask, newmask, parent->effective_cpus); |
| 1245 | adding = cpumask_andnot(tmp->addmask, tmp->addmask, |
| 1246 | parent->subparts_cpus); |
| 1247 | /* |
| 1248 | * Return error if the new effective_cpus could become empty. |
| 1249 | */ |
Waiman Long | 4b842da | 2018-11-08 10:08:41 -0500 | [diff] [blame] | 1250 | if (adding && |
| 1251 | cpumask_equal(parent->effective_cpus, tmp->addmask)) { |
| 1252 | if (!deleting) |
| 1253 | return -EINVAL; |
| 1254 | /* |
| 1255 | * As some of the CPUs in subparts_cpus might have |
| 1256 | * been offlined, we need to compute the real delmask |
| 1257 | * to confirm that. |
| 1258 | */ |
| 1259 | if (!cpumask_and(tmp->addmask, tmp->delmask, |
| 1260 | cpu_active_mask)) |
| 1261 | return -EINVAL; |
| 1262 | cpumask_copy(tmp->addmask, parent->effective_cpus); |
| 1263 | } |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1264 | } else { |
| 1265 | /* |
| 1266 | * partcmd_update w/o newmask: |
| 1267 | * |
Waiman Long | 0f3adb8 | 2021-07-20 10:18:26 -0400 | [diff] [blame] | 1268 | * addmask = cpus_allowed & parent->effective_cpus |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1269 | * |
| 1270 | * Note that parent's subparts_cpus may have been |
Waiman Long | 3881b86 | 2018-11-08 10:08:39 -0500 | [diff] [blame] | 1271 | * pre-shrunk in case there is a change in the cpu list. |
| 1272 | * So no deletion is needed. |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1273 | */ |
| 1274 | adding = cpumask_and(tmp->addmask, cpuset->cpus_allowed, |
| 1275 | parent->effective_cpus); |
Waiman Long | 3881b86 | 2018-11-08 10:08:39 -0500 | [diff] [blame] | 1276 | part_error = cpumask_equal(tmp->addmask, |
| 1277 | parent->effective_cpus); |
| 1278 | } |
| 1279 | |
| 1280 | if (cmd == partcmd_update) { |
| 1281 | int prev_prs = cpuset->partition_root_state; |
| 1282 | |
| 1283 | /* |
| 1284 | * Check for possible transition between PRS_ENABLED |
| 1285 | * and PRS_ERROR. |
| 1286 | */ |
| 1287 | switch (cpuset->partition_root_state) { |
| 1288 | case PRS_ENABLED: |
| 1289 | if (part_error) |
Waiman Long | 6ba34d3 | 2021-07-20 10:18:28 -0400 | [diff] [blame] | 1290 | new_prs = PRS_ERROR; |
Waiman Long | 3881b86 | 2018-11-08 10:08:39 -0500 | [diff] [blame] | 1291 | break; |
| 1292 | case PRS_ERROR: |
| 1293 | if (!part_error) |
Waiman Long | 6ba34d3 | 2021-07-20 10:18:28 -0400 | [diff] [blame] | 1294 | new_prs = PRS_ENABLED; |
Waiman Long | 3881b86 | 2018-11-08 10:08:39 -0500 | [diff] [blame] | 1295 | break; |
| 1296 | } |
| 1297 | /* |
| 1298 | * Set part_error if previously in invalid state. |
| 1299 | */ |
| 1300 | part_error = (prev_prs == PRS_ERROR); |
| 1301 | } |
| 1302 | |
Waiman Long | 6ba34d3 | 2021-07-20 10:18:28 -0400 | [diff] [blame] | 1303 | if (!part_error && (new_prs == PRS_ERROR)) |
Waiman Long | 3881b86 | 2018-11-08 10:08:39 -0500 | [diff] [blame] | 1304 | return 0; /* Nothing need to be done */ |
| 1305 | |
Waiman Long | 6ba34d3 | 2021-07-20 10:18:28 -0400 | [diff] [blame] | 1306 | if (new_prs == PRS_ERROR) { |
Waiman Long | 3881b86 | 2018-11-08 10:08:39 -0500 | [diff] [blame] | 1307 | /* |
| 1308 | * Remove all its cpus from parent's subparts_cpus. |
| 1309 | */ |
| 1310 | adding = false; |
| 1311 | deleting = cpumask_and(tmp->delmask, cpuset->cpus_allowed, |
| 1312 | parent->subparts_cpus); |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1313 | } |
| 1314 | |
Waiman Long | e7cc988 | 2021-08-10 23:06:02 -0400 | [diff] [blame] | 1315 | if (!adding && !deleting && (new_prs == old_prs)) |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1316 | return 0; |
| 1317 | |
| 1318 | /* |
| 1319 | * Change the parent's subparts_cpus. |
| 1320 | * Newly added CPUs will be removed from effective_cpus and |
| 1321 | * newly deleted ones will be added back to effective_cpus. |
| 1322 | */ |
| 1323 | spin_lock_irq(&callback_lock); |
| 1324 | if (adding) { |
| 1325 | cpumask_or(parent->subparts_cpus, |
| 1326 | parent->subparts_cpus, tmp->addmask); |
| 1327 | cpumask_andnot(parent->effective_cpus, |
| 1328 | parent->effective_cpus, tmp->addmask); |
| 1329 | } |
| 1330 | if (deleting) { |
| 1331 | cpumask_andnot(parent->subparts_cpus, |
| 1332 | parent->subparts_cpus, tmp->delmask); |
Waiman Long | 4b842da | 2018-11-08 10:08:41 -0500 | [diff] [blame] | 1333 | /* |
| 1334 | * Some of the CPUs in subparts_cpus might have been offlined. |
| 1335 | */ |
| 1336 | cpumask_and(tmp->delmask, tmp->delmask, cpu_active_mask); |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1337 | cpumask_or(parent->effective_cpus, |
| 1338 | parent->effective_cpus, tmp->delmask); |
| 1339 | } |
| 1340 | |
| 1341 | parent->nr_subparts_cpus = cpumask_weight(parent->subparts_cpus); |
Waiman Long | 6ba34d3 | 2021-07-20 10:18:28 -0400 | [diff] [blame] | 1342 | |
Waiman Long | e7cc988 | 2021-08-10 23:06:02 -0400 | [diff] [blame] | 1343 | if (old_prs != new_prs) |
Waiman Long | 6ba34d3 | 2021-07-20 10:18:28 -0400 | [diff] [blame] | 1344 | cpuset->partition_root_state = new_prs; |
Waiman Long | e7cc988 | 2021-08-10 23:06:02 -0400 | [diff] [blame] | 1345 | |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1346 | spin_unlock_irq(&callback_lock); |
Waiman Long | e7cc988 | 2021-08-10 23:06:02 -0400 | [diff] [blame] | 1347 | notify_partition_change(cpuset, old_prs, new_prs); |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1348 | |
| 1349 | return cmd == partcmd_update; |
| 1350 | } |
| 1351 | |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 1352 | /* |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 1353 | * update_cpumasks_hier - Update effective cpumasks and tasks in the subtree |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1354 | * @cs: the cpuset to consider |
| 1355 | * @tmp: temp variables for calculating effective_cpus & partition setup |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 1356 | * |
Aubrey Li | 415de5f | 2021-01-13 12:37:41 +0800 | [diff] [blame] | 1357 | * When configured cpumask is changed, the effective cpumasks of this cpuset |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 1358 | * and all its descendants need to be updated. |
| 1359 | * |
Aubrey Li | 415de5f | 2021-01-13 12:37:41 +0800 | [diff] [blame] | 1360 | * On legacy hierarchy, effective_cpus will be the same with cpu_allowed. |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 1361 | * |
Waiman Long | b94f9ac | 2021-09-09 22:42:56 -0400 | [diff] [blame] | 1362 | * Called with cpuset_rwsem held |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 1363 | */ |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1364 | static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp) |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 1365 | { |
| 1366 | struct cpuset *cp; |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 1367 | struct cgroup_subsys_state *pos_css; |
Li Zefan | 8b5f1c5 | 2014-07-09 16:47:50 +0800 | [diff] [blame] | 1368 | bool need_rebuild_sched_domains = false; |
Waiman Long | e7cc988 | 2021-08-10 23:06:02 -0400 | [diff] [blame] | 1369 | int old_prs, new_prs; |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 1370 | |
| 1371 | rcu_read_lock(); |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 1372 | cpuset_for_each_descendant_pre(cp, pos_css, cs) { |
| 1373 | struct cpuset *parent = parent_cs(cp); |
| 1374 | |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1375 | compute_effective_cpumask(tmp->new_cpus, cp, parent); |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 1376 | |
Li Zefan | 554b0d1 | 2014-07-09 16:47:41 +0800 | [diff] [blame] | 1377 | /* |
| 1378 | * If it becomes empty, inherit the effective mask of the |
| 1379 | * parent, which is guaranteed to have some CPUs. |
| 1380 | */ |
Waiman Long | 4716909 | 2018-11-08 10:08:40 -0500 | [diff] [blame] | 1381 | if (is_in_v2_mode() && cpumask_empty(tmp->new_cpus)) { |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1382 | cpumask_copy(tmp->new_cpus, parent->effective_cpus); |
Waiman Long | 4716909 | 2018-11-08 10:08:40 -0500 | [diff] [blame] | 1383 | if (!cp->use_parent_ecpus) { |
| 1384 | cp->use_parent_ecpus = true; |
| 1385 | parent->child_ecpus_count++; |
| 1386 | } |
| 1387 | } else if (cp->use_parent_ecpus) { |
| 1388 | cp->use_parent_ecpus = false; |
| 1389 | WARN_ON_ONCE(!parent->child_ecpus_count); |
| 1390 | parent->child_ecpus_count--; |
| 1391 | } |
Li Zefan | 554b0d1 | 2014-07-09 16:47:41 +0800 | [diff] [blame] | 1392 | |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1393 | /* |
| 1394 | * Skip the whole subtree if the cpumask remains the same |
| 1395 | * and has no partition root state. |
| 1396 | */ |
Waiman Long | 3881b86 | 2018-11-08 10:08:39 -0500 | [diff] [blame] | 1397 | if (!cp->partition_root_state && |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1398 | cpumask_equal(tmp->new_cpus, cp->effective_cpus)) { |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 1399 | pos_css = css_rightmost_descendant(pos_css); |
| 1400 | continue; |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 1401 | } |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 1402 | |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1403 | /* |
| 1404 | * update_parent_subparts_cpumask() should have been called |
| 1405 | * for cs already in update_cpumask(). We should also call |
| 1406 | * update_tasks_cpumask() again for tasks in the parent |
| 1407 | * cpuset if the parent's subparts_cpus changes. |
| 1408 | */ |
Waiman Long | e7cc988 | 2021-08-10 23:06:02 -0400 | [diff] [blame] | 1409 | old_prs = new_prs = cp->partition_root_state; |
| 1410 | if ((cp != cs) && old_prs) { |
Waiman Long | 3881b86 | 2018-11-08 10:08:39 -0500 | [diff] [blame] | 1411 | switch (parent->partition_root_state) { |
| 1412 | case PRS_DISABLED: |
| 1413 | /* |
| 1414 | * If parent is not a partition root or an |
Waiman Long | 0f3adb8 | 2021-07-20 10:18:26 -0400 | [diff] [blame] | 1415 | * invalid partition root, clear its state |
| 1416 | * and its CS_CPU_EXCLUSIVE flag. |
Waiman Long | 3881b86 | 2018-11-08 10:08:39 -0500 | [diff] [blame] | 1417 | */ |
| 1418 | WARN_ON_ONCE(cp->partition_root_state |
| 1419 | != PRS_ERROR); |
Waiman Long | 6ba34d3 | 2021-07-20 10:18:28 -0400 | [diff] [blame] | 1420 | new_prs = PRS_DISABLED; |
Waiman Long | 3881b86 | 2018-11-08 10:08:39 -0500 | [diff] [blame] | 1421 | |
| 1422 | /* |
| 1423 | * clear_bit() is an atomic operation and |
| 1424 | * readers aren't interested in the state |
| 1425 | * of CS_CPU_EXCLUSIVE anyway. So we can |
| 1426 | * just update the flag without holding |
| 1427 | * the callback_lock. |
| 1428 | */ |
| 1429 | clear_bit(CS_CPU_EXCLUSIVE, &cp->flags); |
| 1430 | break; |
| 1431 | |
| 1432 | case PRS_ENABLED: |
| 1433 | if (update_parent_subparts_cpumask(cp, partcmd_update, NULL, tmp)) |
| 1434 | update_tasks_cpumask(parent); |
| 1435 | break; |
| 1436 | |
| 1437 | case PRS_ERROR: |
| 1438 | /* |
| 1439 | * When parent is invalid, it has to be too. |
| 1440 | */ |
Waiman Long | 6ba34d3 | 2021-07-20 10:18:28 -0400 | [diff] [blame] | 1441 | new_prs = PRS_ERROR; |
Waiman Long | 3881b86 | 2018-11-08 10:08:39 -0500 | [diff] [blame] | 1442 | break; |
| 1443 | } |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1444 | } |
| 1445 | |
Tejun Heo | ec903c0 | 2014-05-13 12:11:01 -0400 | [diff] [blame] | 1446 | if (!css_tryget_online(&cp->css)) |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 1447 | continue; |
| 1448 | rcu_read_unlock(); |
| 1449 | |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 1450 | spin_lock_irq(&callback_lock); |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1451 | |
| 1452 | cpumask_copy(cp->effective_cpus, tmp->new_cpus); |
Waiman Long | 6ba34d3 | 2021-07-20 10:18:28 -0400 | [diff] [blame] | 1453 | if (cp->nr_subparts_cpus && (new_prs != PRS_ENABLED)) { |
Waiman Long | 3881b86 | 2018-11-08 10:08:39 -0500 | [diff] [blame] | 1454 | cp->nr_subparts_cpus = 0; |
| 1455 | cpumask_clear(cp->subparts_cpus); |
| 1456 | } else if (cp->nr_subparts_cpus) { |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1457 | /* |
| 1458 | * Make sure that effective_cpus & subparts_cpus |
| 1459 | * are mutually exclusive. |
Waiman Long | 3881b86 | 2018-11-08 10:08:39 -0500 | [diff] [blame] | 1460 | * |
| 1461 | * In the unlikely event that effective_cpus |
| 1462 | * becomes empty. we clear cp->nr_subparts_cpus and |
| 1463 | * let its child partition roots to compete for |
| 1464 | * CPUs again. |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1465 | */ |
| 1466 | cpumask_andnot(cp->effective_cpus, cp->effective_cpus, |
| 1467 | cp->subparts_cpus); |
Waiman Long | 3881b86 | 2018-11-08 10:08:39 -0500 | [diff] [blame] | 1468 | if (cpumask_empty(cp->effective_cpus)) { |
| 1469 | cpumask_copy(cp->effective_cpus, tmp->new_cpus); |
| 1470 | cpumask_clear(cp->subparts_cpus); |
| 1471 | cp->nr_subparts_cpus = 0; |
| 1472 | } else if (!cpumask_subset(cp->subparts_cpus, |
| 1473 | tmp->new_cpus)) { |
| 1474 | cpumask_andnot(cp->subparts_cpus, |
| 1475 | cp->subparts_cpus, tmp->new_cpus); |
| 1476 | cp->nr_subparts_cpus |
| 1477 | = cpumask_weight(cp->subparts_cpus); |
| 1478 | } |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1479 | } |
Waiman Long | 6ba34d3 | 2021-07-20 10:18:28 -0400 | [diff] [blame] | 1480 | |
Waiman Long | e7cc988 | 2021-08-10 23:06:02 -0400 | [diff] [blame] | 1481 | if (new_prs != old_prs) |
Waiman Long | 6ba34d3 | 2021-07-20 10:18:28 -0400 | [diff] [blame] | 1482 | cp->partition_root_state = new_prs; |
| 1483 | |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 1484 | spin_unlock_irq(&callback_lock); |
Waiman Long | e7cc988 | 2021-08-10 23:06:02 -0400 | [diff] [blame] | 1485 | notify_partition_change(cp, old_prs, new_prs); |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 1486 | |
Waiman Long | b8d1b8e | 2017-08-17 15:33:10 -0400 | [diff] [blame] | 1487 | WARN_ON(!is_in_v2_mode() && |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 1488 | !cpumask_equal(cp->cpus_allowed, cp->effective_cpus)); |
| 1489 | |
Tejun Heo | d66393e | 2014-02-13 06:58:40 -0500 | [diff] [blame] | 1490 | update_tasks_cpumask(cp); |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 1491 | |
Li Zefan | 8b5f1c5 | 2014-07-09 16:47:50 +0800 | [diff] [blame] | 1492 | /* |
Waiman Long | 0ccea8f | 2018-11-08 10:08:42 -0500 | [diff] [blame] | 1493 | * On legacy hierarchy, if the effective cpumask of any non- |
| 1494 | * empty cpuset is changed, we need to rebuild sched domains. |
| 1495 | * On default hierarchy, the cpuset needs to be a partition |
| 1496 | * root as well. |
Li Zefan | 8b5f1c5 | 2014-07-09 16:47:50 +0800 | [diff] [blame] | 1497 | */ |
| 1498 | if (!cpumask_empty(cp->cpus_allowed) && |
Waiman Long | 0ccea8f | 2018-11-08 10:08:42 -0500 | [diff] [blame] | 1499 | is_sched_load_balance(cp) && |
| 1500 | (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) || |
| 1501 | is_partition_root(cp))) |
Li Zefan | 8b5f1c5 | 2014-07-09 16:47:50 +0800 | [diff] [blame] | 1502 | need_rebuild_sched_domains = true; |
| 1503 | |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 1504 | rcu_read_lock(); |
| 1505 | css_put(&cp->css); |
| 1506 | } |
| 1507 | rcu_read_unlock(); |
Li Zefan | 8b5f1c5 | 2014-07-09 16:47:50 +0800 | [diff] [blame] | 1508 | |
| 1509 | if (need_rebuild_sched_domains) |
| 1510 | rebuild_sched_domains_locked(); |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 1511 | } |
| 1512 | |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1513 | /** |
Waiman Long | 4716909 | 2018-11-08 10:08:40 -0500 | [diff] [blame] | 1514 | * update_sibling_cpumasks - Update siblings cpumasks |
| 1515 | * @parent: Parent cpuset |
| 1516 | * @cs: Current cpuset |
| 1517 | * @tmp: Temp variables |
| 1518 | */ |
| 1519 | static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs, |
| 1520 | struct tmpmasks *tmp) |
| 1521 | { |
| 1522 | struct cpuset *sibling; |
| 1523 | struct cgroup_subsys_state *pos_css; |
| 1524 | |
| 1525 | /* |
| 1526 | * Check all its siblings and call update_cpumasks_hier() |
| 1527 | * if their use_parent_ecpus flag is set in order for them |
| 1528 | * to use the right effective_cpus value. |
| 1529 | */ |
| 1530 | rcu_read_lock(); |
| 1531 | cpuset_for_each_child(sibling, pos_css, parent) { |
| 1532 | if (sibling == cs) |
| 1533 | continue; |
| 1534 | if (!sibling->use_parent_ecpus) |
| 1535 | continue; |
| 1536 | |
| 1537 | update_cpumasks_hier(sibling, tmp); |
| 1538 | } |
| 1539 | rcu_read_unlock(); |
| 1540 | } |
| 1541 | |
| 1542 | /** |
Cliff Wickman | 58f4790 | 2008-02-07 00:14:44 -0800 | [diff] [blame] | 1543 | * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it |
| 1544 | * @cs: the cpuset to consider |
Fabian Frederick | fc34ac1 | 2014-05-05 19:46:55 +0200 | [diff] [blame] | 1545 | * @trialcs: trial cpuset |
Cliff Wickman | 58f4790 | 2008-02-07 00:14:44 -0800 | [diff] [blame] | 1546 | * @buf: buffer of cpu numbers written to this cpuset |
| 1547 | */ |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 1548 | static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, |
| 1549 | const char *buf) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1550 | { |
Cliff Wickman | 58f4790 | 2008-02-07 00:14:44 -0800 | [diff] [blame] | 1551 | int retval; |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1552 | struct tmpmasks tmp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1553 | |
Rusty Russell | 5f054e3 | 2012-03-29 15:38:31 +1030 | [diff] [blame] | 1554 | /* top_cpuset.cpus_allowed tracks cpu_online_mask; it's read-only */ |
Paul Jackson | 4c4d50f | 2006-08-27 01:23:51 -0700 | [diff] [blame] | 1555 | if (cs == &top_cpuset) |
| 1556 | return -EACCES; |
| 1557 | |
David Rientjes | 6f7f02e | 2007-05-08 00:31:43 -0700 | [diff] [blame] | 1558 | /* |
Paul Jackson | c8d9c90 | 2008-02-07 00:14:46 -0800 | [diff] [blame] | 1559 | * An empty cpus_allowed is ok only if the cpuset has no tasks. |
Paul Jackson | 020958b | 2007-10-18 23:40:21 -0700 | [diff] [blame] | 1560 | * Since cpulist_parse() fails on an empty mask, we special case |
| 1561 | * that parsing. The validate_change() call ensures that cpusets |
| 1562 | * with tasks have cpus. |
David Rientjes | 6f7f02e | 2007-05-08 00:31:43 -0700 | [diff] [blame] | 1563 | */ |
Paul Jackson | 020958b | 2007-10-18 23:40:21 -0700 | [diff] [blame] | 1564 | if (!*buf) { |
Li Zefan | 300ed6c | 2009-01-07 18:08:44 -0800 | [diff] [blame] | 1565 | cpumask_clear(trialcs->cpus_allowed); |
David Rientjes | 6f7f02e | 2007-05-08 00:31:43 -0700 | [diff] [blame] | 1566 | } else { |
Li Zefan | 300ed6c | 2009-01-07 18:08:44 -0800 | [diff] [blame] | 1567 | retval = cpulist_parse(buf, trialcs->cpus_allowed); |
David Rientjes | 6f7f02e | 2007-05-08 00:31:43 -0700 | [diff] [blame] | 1568 | if (retval < 0) |
| 1569 | return retval; |
Lai Jiangshan | 3734074 | 2008-06-05 22:46:32 -0700 | [diff] [blame] | 1570 | |
Li Zefan | 5d8ba82 | 2014-07-09 16:49:12 +0800 | [diff] [blame] | 1571 | if (!cpumask_subset(trialcs->cpus_allowed, |
| 1572 | top_cpuset.cpus_allowed)) |
Lai Jiangshan | 3734074 | 2008-06-05 22:46:32 -0700 | [diff] [blame] | 1573 | return -EINVAL; |
David Rientjes | 6f7f02e | 2007-05-08 00:31:43 -0700 | [diff] [blame] | 1574 | } |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 1575 | |
Paul Menage | 8707d8b | 2007-10-18 23:40:22 -0700 | [diff] [blame] | 1576 | /* Nothing to do if the cpus didn't change */ |
Li Zefan | 300ed6c | 2009-01-07 18:08:44 -0800 | [diff] [blame] | 1577 | if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed)) |
Paul Menage | 8707d8b | 2007-10-18 23:40:22 -0700 | [diff] [blame] | 1578 | return 0; |
Cliff Wickman | 58f4790 | 2008-02-07 00:14:44 -0800 | [diff] [blame] | 1579 | |
Li Zefan | a73456f | 2013-06-05 17:15:59 +0800 | [diff] [blame] | 1580 | retval = validate_change(cs, trialcs); |
| 1581 | if (retval < 0) |
| 1582 | return retval; |
| 1583 | |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1584 | #ifdef CONFIG_CPUMASK_OFFSTACK |
| 1585 | /* |
| 1586 | * Use the cpumasks in trialcs for tmpmasks when they are pointers |
| 1587 | * to allocated cpumasks. |
| 1588 | */ |
| 1589 | tmp.addmask = trialcs->subparts_cpus; |
| 1590 | tmp.delmask = trialcs->effective_cpus; |
| 1591 | tmp.new_cpus = trialcs->cpus_allowed; |
| 1592 | #endif |
| 1593 | |
| 1594 | if (cs->partition_root_state) { |
| 1595 | /* Cpumask of a partition root cannot be empty */ |
| 1596 | if (cpumask_empty(trialcs->cpus_allowed)) |
| 1597 | return -EINVAL; |
| 1598 | if (update_parent_subparts_cpumask(cs, partcmd_update, |
| 1599 | trialcs->cpus_allowed, &tmp) < 0) |
| 1600 | return -EINVAL; |
| 1601 | } |
| 1602 | |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 1603 | spin_lock_irq(&callback_lock); |
Li Zefan | 300ed6c | 2009-01-07 18:08:44 -0800 | [diff] [blame] | 1604 | cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed); |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1605 | |
| 1606 | /* |
| 1607 | * Make sure that subparts_cpus is a subset of cpus_allowed. |
| 1608 | */ |
| 1609 | if (cs->nr_subparts_cpus) { |
| 1610 | cpumask_andnot(cs->subparts_cpus, cs->subparts_cpus, |
| 1611 | cs->cpus_allowed); |
| 1612 | cs->nr_subparts_cpus = cpumask_weight(cs->subparts_cpus); |
| 1613 | } |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 1614 | spin_unlock_irq(&callback_lock); |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 1615 | |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1616 | update_cpumasks_hier(cs, &tmp); |
Waiman Long | 4716909 | 2018-11-08 10:08:40 -0500 | [diff] [blame] | 1617 | |
| 1618 | if (cs->partition_root_state) { |
| 1619 | struct cpuset *parent = parent_cs(cs); |
| 1620 | |
| 1621 | /* |
| 1622 | * For partition root, update the cpumasks of sibling |
| 1623 | * cpusets if they use parent's effective_cpus. |
| 1624 | */ |
| 1625 | if (parent->child_ecpus_count) |
| 1626 | update_sibling_cpumasks(parent, cs, &tmp); |
| 1627 | } |
Dinakar Guniguntala | 85d7b94 | 2005-06-25 14:57:34 -0700 | [diff] [blame] | 1628 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1629 | } |
| 1630 | |
Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 1631 | /* |
Tejun Heo | e93ad19 | 2016-01-19 12:18:41 -0500 | [diff] [blame] | 1632 | * Migrate memory region from one set of nodes to another. This is |
| 1633 | * performed asynchronously as it can be called from process migration path |
| 1634 | * holding locks involved in process management. All mm migrations are |
| 1635 | * performed in the queued order and can be waited for by flushing |
| 1636 | * cpuset_migrate_mm_wq. |
Paul Jackson | e4e364e | 2006-03-31 02:30:52 -0800 | [diff] [blame] | 1637 | */ |
| 1638 | |
Tejun Heo | e93ad19 | 2016-01-19 12:18:41 -0500 | [diff] [blame] | 1639 | struct cpuset_migrate_mm_work { |
| 1640 | struct work_struct work; |
| 1641 | struct mm_struct *mm; |
| 1642 | nodemask_t from; |
| 1643 | nodemask_t to; |
| 1644 | }; |
| 1645 | |
| 1646 | static void cpuset_migrate_mm_workfn(struct work_struct *work) |
| 1647 | { |
| 1648 | struct cpuset_migrate_mm_work *mwork = |
| 1649 | container_of(work, struct cpuset_migrate_mm_work, work); |
| 1650 | |
| 1651 | /* on a wq worker, no need to worry about %current's mems_allowed */ |
| 1652 | do_migrate_pages(mwork->mm, &mwork->from, &mwork->to, MPOL_MF_MOVE_ALL); |
| 1653 | mmput(mwork->mm); |
| 1654 | kfree(mwork); |
| 1655 | } |
| 1656 | |
Paul Jackson | e4e364e | 2006-03-31 02:30:52 -0800 | [diff] [blame] | 1657 | static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from, |
| 1658 | const nodemask_t *to) |
| 1659 | { |
Tejun Heo | e93ad19 | 2016-01-19 12:18:41 -0500 | [diff] [blame] | 1660 | struct cpuset_migrate_mm_work *mwork; |
Paul Jackson | e4e364e | 2006-03-31 02:30:52 -0800 | [diff] [blame] | 1661 | |
Nicolas Saenz Julienne | 9f72daf | 2021-08-25 12:54:15 +0200 | [diff] [blame] | 1662 | if (nodes_equal(*from, *to)) { |
| 1663 | mmput(mm); |
| 1664 | return; |
| 1665 | } |
| 1666 | |
Tejun Heo | e93ad19 | 2016-01-19 12:18:41 -0500 | [diff] [blame] | 1667 | mwork = kzalloc(sizeof(*mwork), GFP_KERNEL); |
| 1668 | if (mwork) { |
| 1669 | mwork->mm = mm; |
| 1670 | mwork->from = *from; |
| 1671 | mwork->to = *to; |
| 1672 | INIT_WORK(&mwork->work, cpuset_migrate_mm_workfn); |
| 1673 | queue_work(cpuset_migrate_mm_wq, &mwork->work); |
| 1674 | } else { |
| 1675 | mmput(mm); |
| 1676 | } |
| 1677 | } |
Paul Jackson | e4e364e | 2006-03-31 02:30:52 -0800 | [diff] [blame] | 1678 | |
Tejun Heo | 5cf1cac | 2016-04-21 19:06:48 -0400 | [diff] [blame] | 1679 | static void cpuset_post_attach(void) |
Tejun Heo | e93ad19 | 2016-01-19 12:18:41 -0500 | [diff] [blame] | 1680 | { |
| 1681 | flush_workqueue(cpuset_migrate_mm_wq); |
Paul Jackson | e4e364e | 2006-03-31 02:30:52 -0800 | [diff] [blame] | 1682 | } |
| 1683 | |
Li Zefan | 3b6766f | 2009-04-02 16:57:51 -0700 | [diff] [blame] | 1684 | /* |
Miao Xie | 58568d2 | 2009-06-16 15:31:49 -0700 | [diff] [blame] | 1685 | * cpuset_change_task_nodemask - change task's mems_allowed and mempolicy |
| 1686 | * @tsk: the task to change |
| 1687 | * @newmems: new nodes that the task will be set |
| 1688 | * |
Vlastimil Babka | 5f155f2 | 2017-07-06 15:40:09 -0700 | [diff] [blame] | 1689 | * We use the mems_allowed_seq seqlock to safely update both tsk->mems_allowed |
| 1690 | * and rebind an eventual tasks' mempolicy. If the task is allocating in |
| 1691 | * parallel, it might temporarily see an empty intersection, which results in |
| 1692 | * a seqlock check and retry before OOM or allocation failure. |
Miao Xie | 58568d2 | 2009-06-16 15:31:49 -0700 | [diff] [blame] | 1693 | */ |
| 1694 | static void cpuset_change_task_nodemask(struct task_struct *tsk, |
| 1695 | nodemask_t *newmems) |
| 1696 | { |
Miao Xie | c0ff745 | 2010-05-24 14:32:08 -0700 | [diff] [blame] | 1697 | task_lock(tsk); |
Mel Gorman | cc9a6c8 | 2012-03-21 16:34:11 -0700 | [diff] [blame] | 1698 | |
Vlastimil Babka | 5f155f2 | 2017-07-06 15:40:09 -0700 | [diff] [blame] | 1699 | local_irq_disable(); |
| 1700 | write_seqcount_begin(&tsk->mems_allowed_seq); |
Mel Gorman | cc9a6c8 | 2012-03-21 16:34:11 -0700 | [diff] [blame] | 1701 | |
Miao Xie | 58568d2 | 2009-06-16 15:31:49 -0700 | [diff] [blame] | 1702 | nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems); |
Vlastimil Babka | 213980c | 2017-07-06 15:40:06 -0700 | [diff] [blame] | 1703 | mpol_rebind_task(tsk, newmems); |
Miao Xie | 58568d2 | 2009-06-16 15:31:49 -0700 | [diff] [blame] | 1704 | tsk->mems_allowed = *newmems; |
Mel Gorman | cc9a6c8 | 2012-03-21 16:34:11 -0700 | [diff] [blame] | 1705 | |
Vlastimil Babka | 5f155f2 | 2017-07-06 15:40:09 -0700 | [diff] [blame] | 1706 | write_seqcount_end(&tsk->mems_allowed_seq); |
| 1707 | local_irq_enable(); |
Mel Gorman | cc9a6c8 | 2012-03-21 16:34:11 -0700 | [diff] [blame] | 1708 | |
Miao Xie | c0ff745 | 2010-05-24 14:32:08 -0700 | [diff] [blame] | 1709 | task_unlock(tsk); |
Miao Xie | 58568d2 | 2009-06-16 15:31:49 -0700 | [diff] [blame] | 1710 | } |
| 1711 | |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 1712 | static void *cpuset_being_rebound; |
| 1713 | |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1714 | /** |
| 1715 | * update_tasks_nodemask - Update the nodemasks of tasks in the cpuset. |
| 1716 | * @cs: the cpuset in which each task's mems_allowed mask needs to be changed |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1717 | * |
Tejun Heo | d66393e | 2014-02-13 06:58:40 -0500 | [diff] [blame] | 1718 | * Iterate through each task of @cs updating its mems_allowed to the |
Waiman Long | b94f9ac | 2021-09-09 22:42:56 -0400 | [diff] [blame] | 1719 | * effective cpuset's. As this function is called with cpuset_rwsem held, |
Tejun Heo | d66393e | 2014-02-13 06:58:40 -0500 | [diff] [blame] | 1720 | * cpuset membership stays stable. |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1721 | */ |
Tejun Heo | d66393e | 2014-02-13 06:58:40 -0500 | [diff] [blame] | 1722 | static void update_tasks_nodemask(struct cpuset *cs) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1723 | { |
Waiman Long | b94f9ac | 2021-09-09 22:42:56 -0400 | [diff] [blame] | 1724 | static nodemask_t newmems; /* protected by cpuset_rwsem */ |
Tejun Heo | d66393e | 2014-02-13 06:58:40 -0500 | [diff] [blame] | 1725 | struct css_task_iter it; |
| 1726 | struct task_struct *task; |
Paul Jackson | 59dac16 | 2006-01-08 01:01:52 -0800 | [diff] [blame] | 1727 | |
Lee Schermerhorn | 846a16b | 2008-04-28 02:13:09 -0700 | [diff] [blame] | 1728 | cpuset_being_rebound = cs; /* causes mpol_dup() rebind */ |
Paul Jackson | 4225399 | 2006-01-08 01:01:59 -0800 | [diff] [blame] | 1729 | |
Li Zefan | ae1c802 | 2014-07-09 16:48:32 +0800 | [diff] [blame] | 1730 | guarantee_online_mems(cs, &newmems); |
Li Zefan | 33ad801 | 2013-06-09 17:15:08 +0800 | [diff] [blame] | 1731 | |
Paul Jackson | 4225399 | 2006-01-08 01:01:59 -0800 | [diff] [blame] | 1732 | /* |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 1733 | * The mpol_rebind_mm() call takes mmap_lock, which we couldn't |
Li Zefan | 3b6766f | 2009-04-02 16:57:51 -0700 | [diff] [blame] | 1734 | * take while holding tasklist_lock. Forks can happen - the |
| 1735 | * mpol_dup() cpuset_being_rebound check will catch such forks, |
| 1736 | * and rebind their vma mempolicies too. Because we still hold |
Waiman Long | b94f9ac | 2021-09-09 22:42:56 -0400 | [diff] [blame] | 1737 | * the global cpuset_rwsem, we know that no other rebind effort |
Li Zefan | 3b6766f | 2009-04-02 16:57:51 -0700 | [diff] [blame] | 1738 | * will be contending for the global variable cpuset_being_rebound. |
Paul Jackson | 4225399 | 2006-01-08 01:01:59 -0800 | [diff] [blame] | 1739 | * It's ok if we rebind the same mm twice; mpol_rebind_mm() |
Paul Jackson | 04c19fa | 2006-01-08 01:02:00 -0800 | [diff] [blame] | 1740 | * is idempotent. Also migrate pages in each mm to new nodes. |
Paul Jackson | 4225399 | 2006-01-08 01:01:59 -0800 | [diff] [blame] | 1741 | */ |
Tejun Heo | bc2fb7e | 2017-05-15 09:34:01 -0400 | [diff] [blame] | 1742 | css_task_iter_start(&cs->css, 0, &it); |
Tejun Heo | d66393e | 2014-02-13 06:58:40 -0500 | [diff] [blame] | 1743 | while ((task = css_task_iter_next(&it))) { |
| 1744 | struct mm_struct *mm; |
| 1745 | bool migrate; |
| 1746 | |
| 1747 | cpuset_change_task_nodemask(task, &newmems); |
| 1748 | |
| 1749 | mm = get_task_mm(task); |
| 1750 | if (!mm) |
| 1751 | continue; |
| 1752 | |
| 1753 | migrate = is_memory_migrate(cs); |
| 1754 | |
| 1755 | mpol_rebind_mm(mm, &cs->mems_allowed); |
| 1756 | if (migrate) |
| 1757 | cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems); |
Tejun Heo | e93ad19 | 2016-01-19 12:18:41 -0500 | [diff] [blame] | 1758 | else |
| 1759 | mmput(mm); |
Tejun Heo | d66393e | 2014-02-13 06:58:40 -0500 | [diff] [blame] | 1760 | } |
| 1761 | css_task_iter_end(&it); |
Paul Jackson | 4225399 | 2006-01-08 01:01:59 -0800 | [diff] [blame] | 1762 | |
Li Zefan | 33ad801 | 2013-06-09 17:15:08 +0800 | [diff] [blame] | 1763 | /* |
| 1764 | * All the tasks' nodemasks have been updated, update |
| 1765 | * cs->old_mems_allowed. |
| 1766 | */ |
| 1767 | cs->old_mems_allowed = newmems; |
| 1768 | |
Paul Menage | 2df167a | 2008-02-07 00:14:45 -0800 | [diff] [blame] | 1769 | /* We're done rebinding vmas to this cpuset's new mems_allowed. */ |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 1770 | cpuset_being_rebound = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1771 | } |
| 1772 | |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1773 | /* |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 1774 | * update_nodemasks_hier - Update effective nodemasks and tasks in the subtree |
| 1775 | * @cs: the cpuset to consider |
| 1776 | * @new_mems: a temp variable for calculating new effective_mems |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 1777 | * |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 1778 | * When configured nodemask is changed, the effective nodemasks of this cpuset |
| 1779 | * and all its descendants need to be updated. |
| 1780 | * |
Lu Jialin | d95af61 | 2021-04-08 16:03:46 +0800 | [diff] [blame] | 1781 | * On legacy hierarchy, effective_mems will be the same with mems_allowed. |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 1782 | * |
Waiman Long | b94f9ac | 2021-09-09 22:42:56 -0400 | [diff] [blame] | 1783 | * Called with cpuset_rwsem held |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 1784 | */ |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 1785 | static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems) |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 1786 | { |
| 1787 | struct cpuset *cp; |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 1788 | struct cgroup_subsys_state *pos_css; |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 1789 | |
| 1790 | rcu_read_lock(); |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 1791 | cpuset_for_each_descendant_pre(cp, pos_css, cs) { |
| 1792 | struct cpuset *parent = parent_cs(cp); |
| 1793 | |
| 1794 | nodes_and(*new_mems, cp->mems_allowed, parent->effective_mems); |
| 1795 | |
Li Zefan | 554b0d1 | 2014-07-09 16:47:41 +0800 | [diff] [blame] | 1796 | /* |
| 1797 | * If it becomes empty, inherit the effective mask of the |
| 1798 | * parent, which is guaranteed to have some MEMs. |
| 1799 | */ |
Waiman Long | b8d1b8e | 2017-08-17 15:33:10 -0400 | [diff] [blame] | 1800 | if (is_in_v2_mode() && nodes_empty(*new_mems)) |
Li Zefan | 554b0d1 | 2014-07-09 16:47:41 +0800 | [diff] [blame] | 1801 | *new_mems = parent->effective_mems; |
| 1802 | |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 1803 | /* Skip the whole subtree if the nodemask remains the same. */ |
| 1804 | if (nodes_equal(*new_mems, cp->effective_mems)) { |
| 1805 | pos_css = css_rightmost_descendant(pos_css); |
| 1806 | continue; |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 1807 | } |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 1808 | |
Tejun Heo | ec903c0 | 2014-05-13 12:11:01 -0400 | [diff] [blame] | 1809 | if (!css_tryget_online(&cp->css)) |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 1810 | continue; |
| 1811 | rcu_read_unlock(); |
| 1812 | |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 1813 | spin_lock_irq(&callback_lock); |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 1814 | cp->effective_mems = *new_mems; |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 1815 | spin_unlock_irq(&callback_lock); |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 1816 | |
Waiman Long | b8d1b8e | 2017-08-17 15:33:10 -0400 | [diff] [blame] | 1817 | WARN_ON(!is_in_v2_mode() && |
Li Zefan | a138126 | 2014-07-30 15:07:13 +0800 | [diff] [blame] | 1818 | !nodes_equal(cp->mems_allowed, cp->effective_mems)); |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 1819 | |
Tejun Heo | d66393e | 2014-02-13 06:58:40 -0500 | [diff] [blame] | 1820 | update_tasks_nodemask(cp); |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 1821 | |
| 1822 | rcu_read_lock(); |
| 1823 | css_put(&cp->css); |
| 1824 | } |
| 1825 | rcu_read_unlock(); |
| 1826 | } |
| 1827 | |
| 1828 | /* |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1829 | * Handle user request to change the 'mems' memory placement |
| 1830 | * of a cpuset. Needs to validate the request, update the |
Miao Xie | 58568d2 | 2009-06-16 15:31:49 -0700 | [diff] [blame] | 1831 | * cpusets mems_allowed, and for each task in the cpuset, |
| 1832 | * update mems_allowed and rebind task's mempolicy and any vma |
| 1833 | * mempolicies and if the cpuset is marked 'memory_migrate', |
| 1834 | * migrate the tasks pages to the new memory. |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1835 | * |
Waiman Long | b94f9ac | 2021-09-09 22:42:56 -0400 | [diff] [blame] | 1836 | * Call with cpuset_rwsem held. May take callback_lock during call. |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1837 | * Will take tasklist_lock, scan tasklist for tasks in cpuset cs, |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 1838 | * lock each such tasks mm->mmap_lock, scan its vma's and rebind |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1839 | * their mempolicies to the cpusets new mems_allowed. |
| 1840 | */ |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 1841 | static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs, |
| 1842 | const char *buf) |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1843 | { |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1844 | int retval; |
| 1845 | |
| 1846 | /* |
Lai Jiangshan | 38d7bee | 2012-12-12 13:51:24 -0800 | [diff] [blame] | 1847 | * top_cpuset.mems_allowed tracks node_stats[N_MEMORY]; |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1848 | * it's read-only |
| 1849 | */ |
Miao Xie | 53feb29 | 2010-03-23 13:35:35 -0700 | [diff] [blame] | 1850 | if (cs == &top_cpuset) { |
| 1851 | retval = -EACCES; |
| 1852 | goto done; |
| 1853 | } |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1854 | |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1855 | /* |
| 1856 | * An empty mems_allowed is ok iff there are no tasks in the cpuset. |
| 1857 | * Since nodelist_parse() fails on an empty mask, we special case |
| 1858 | * that parsing. The validate_change() call ensures that cpusets |
| 1859 | * with tasks have memory. |
| 1860 | */ |
| 1861 | if (!*buf) { |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 1862 | nodes_clear(trialcs->mems_allowed); |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1863 | } else { |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 1864 | retval = nodelist_parse(buf, trialcs->mems_allowed); |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1865 | if (retval < 0) |
| 1866 | goto done; |
| 1867 | |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 1868 | if (!nodes_subset(trialcs->mems_allowed, |
Li Zefan | 5d8ba82 | 2014-07-09 16:49:12 +0800 | [diff] [blame] | 1869 | top_cpuset.mems_allowed)) { |
| 1870 | retval = -EINVAL; |
Miao Xie | 53feb29 | 2010-03-23 13:35:35 -0700 | [diff] [blame] | 1871 | goto done; |
| 1872 | } |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1873 | } |
Li Zefan | 33ad801 | 2013-06-09 17:15:08 +0800 | [diff] [blame] | 1874 | |
| 1875 | if (nodes_equal(cs->mems_allowed, trialcs->mems_allowed)) { |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1876 | retval = 0; /* Too easy - nothing to do */ |
| 1877 | goto done; |
| 1878 | } |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 1879 | retval = validate_change(cs, trialcs); |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1880 | if (retval < 0) |
| 1881 | goto done; |
| 1882 | |
Feng Tang | 8ca1b5a | 2021-11-05 13:40:34 -0700 | [diff] [blame] | 1883 | check_insane_mems_config(&trialcs->mems_allowed); |
| 1884 | |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 1885 | spin_lock_irq(&callback_lock); |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 1886 | cs->mems_allowed = trialcs->mems_allowed; |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 1887 | spin_unlock_irq(&callback_lock); |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1888 | |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 1889 | /* use trialcs->mems_allowed as a temp variable */ |
Alban Crequy | 24ee3cf | 2015-08-06 16:21:05 +0200 | [diff] [blame] | 1890 | update_nodemasks_hier(cs, &trialcs->mems_allowed); |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1891 | done: |
| 1892 | return retval; |
| 1893 | } |
| 1894 | |
Yaowei Bai | 77ef80c | 2018-02-06 15:41:24 -0800 | [diff] [blame] | 1895 | bool current_cpuset_is_being_rebound(void) |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 1896 | { |
Yaowei Bai | 77ef80c | 2018-02-06 15:41:24 -0800 | [diff] [blame] | 1897 | bool ret; |
Gu Zheng | 391acf9 | 2014-06-25 09:57:18 +0800 | [diff] [blame] | 1898 | |
| 1899 | rcu_read_lock(); |
| 1900 | ret = task_cs(current) == cpuset_being_rebound; |
| 1901 | rcu_read_unlock(); |
| 1902 | |
| 1903 | return ret; |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 1904 | } |
| 1905 | |
Paul Menage | 5be7a47 | 2008-05-06 20:42:41 -0700 | [diff] [blame] | 1906 | static int update_relax_domain_level(struct cpuset *cs, s64 val) |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 1907 | { |
Paul Menage | db7f47c | 2009-04-02 16:57:55 -0700 | [diff] [blame] | 1908 | #ifdef CONFIG_SMP |
Peter Zijlstra | 60495e7 | 2011-04-07 14:10:04 +0200 | [diff] [blame] | 1909 | if (val < -1 || val >= sched_domain_level_max) |
Li Zefan | 30e0e17 | 2008-05-13 10:27:17 +0800 | [diff] [blame] | 1910 | return -EINVAL; |
Paul Menage | db7f47c | 2009-04-02 16:57:55 -0700 | [diff] [blame] | 1911 | #endif |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 1912 | |
| 1913 | if (val != cs->relax_domain_level) { |
| 1914 | cs->relax_domain_level = val; |
Li Zefan | 300ed6c | 2009-01-07 18:08:44 -0800 | [diff] [blame] | 1915 | if (!cpumask_empty(cs->cpus_allowed) && |
| 1916 | is_sched_load_balance(cs)) |
Tejun Heo | 699140b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 1917 | rebuild_sched_domains_locked(); |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 1918 | } |
| 1919 | |
| 1920 | return 0; |
| 1921 | } |
| 1922 | |
Tejun Heo | 72ec702 | 2013-08-08 20:11:26 -0400 | [diff] [blame] | 1923 | /** |
Miao Xie | 950592f | 2009-06-16 15:31:47 -0700 | [diff] [blame] | 1924 | * update_tasks_flags - update the spread flags of tasks in the cpuset. |
| 1925 | * @cs: the cpuset in which each task's spread flags needs to be changed |
Miao Xie | 950592f | 2009-06-16 15:31:47 -0700 | [diff] [blame] | 1926 | * |
Tejun Heo | d66393e | 2014-02-13 06:58:40 -0500 | [diff] [blame] | 1927 | * Iterate through each task of @cs updating its spread flags. As this |
Waiman Long | b94f9ac | 2021-09-09 22:42:56 -0400 | [diff] [blame] | 1928 | * function is called with cpuset_rwsem held, cpuset membership stays |
Tejun Heo | d66393e | 2014-02-13 06:58:40 -0500 | [diff] [blame] | 1929 | * stable. |
Miao Xie | 950592f | 2009-06-16 15:31:47 -0700 | [diff] [blame] | 1930 | */ |
Tejun Heo | d66393e | 2014-02-13 06:58:40 -0500 | [diff] [blame] | 1931 | static void update_tasks_flags(struct cpuset *cs) |
Miao Xie | 950592f | 2009-06-16 15:31:47 -0700 | [diff] [blame] | 1932 | { |
Tejun Heo | d66393e | 2014-02-13 06:58:40 -0500 | [diff] [blame] | 1933 | struct css_task_iter it; |
| 1934 | struct task_struct *task; |
| 1935 | |
Tejun Heo | bc2fb7e | 2017-05-15 09:34:01 -0400 | [diff] [blame] | 1936 | css_task_iter_start(&cs->css, 0, &it); |
Tejun Heo | d66393e | 2014-02-13 06:58:40 -0500 | [diff] [blame] | 1937 | while ((task = css_task_iter_next(&it))) |
| 1938 | cpuset_update_task_spread_flag(cs, task); |
| 1939 | css_task_iter_end(&it); |
Miao Xie | 950592f | 2009-06-16 15:31:47 -0700 | [diff] [blame] | 1940 | } |
| 1941 | |
| 1942 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1943 | * update_flag - read a 0 or a 1 in a file and update associated flag |
Paul Menage | 7860836 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 1944 | * bit: the bit to update (see cpuset_flagbits_t) |
| 1945 | * cs: the cpuset to update |
| 1946 | * turning_on: whether the flag is being set or cleared |
Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 1947 | * |
Waiman Long | b94f9ac | 2021-09-09 22:42:56 -0400 | [diff] [blame] | 1948 | * Call with cpuset_rwsem held. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1949 | */ |
| 1950 | |
Paul Menage | 700fe1a | 2008-04-29 01:00:00 -0700 | [diff] [blame] | 1951 | static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, |
| 1952 | int turning_on) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1953 | { |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 1954 | struct cpuset *trialcs; |
Rakib Mullick | 40b6a76 | 2008-10-18 20:28:18 -0700 | [diff] [blame] | 1955 | int balance_flag_changed; |
Miao Xie | 950592f | 2009-06-16 15:31:47 -0700 | [diff] [blame] | 1956 | int spread_flag_changed; |
Miao Xie | 950592f | 2009-06-16 15:31:47 -0700 | [diff] [blame] | 1957 | int err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1958 | |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 1959 | trialcs = alloc_trial_cpuset(cs); |
| 1960 | if (!trialcs) |
| 1961 | return -ENOMEM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1962 | |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 1963 | if (turning_on) |
| 1964 | set_bit(bit, &trialcs->flags); |
| 1965 | else |
| 1966 | clear_bit(bit, &trialcs->flags); |
| 1967 | |
| 1968 | err = validate_change(cs, trialcs); |
Dinakar Guniguntala | 85d7b94 | 2005-06-25 14:57:34 -0700 | [diff] [blame] | 1969 | if (err < 0) |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 1970 | goto out; |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 1971 | |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 1972 | balance_flag_changed = (is_sched_load_balance(cs) != |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 1973 | is_sched_load_balance(trialcs)); |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 1974 | |
Miao Xie | 950592f | 2009-06-16 15:31:47 -0700 | [diff] [blame] | 1975 | spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs)) |
| 1976 | || (is_spread_page(cs) != is_spread_page(trialcs))); |
| 1977 | |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 1978 | spin_lock_irq(&callback_lock); |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 1979 | cs->flags = trialcs->flags; |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 1980 | spin_unlock_irq(&callback_lock); |
Dinakar Guniguntala | 85d7b94 | 2005-06-25 14:57:34 -0700 | [diff] [blame] | 1981 | |
Li Zefan | 300ed6c | 2009-01-07 18:08:44 -0800 | [diff] [blame] | 1982 | if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed) |
Tejun Heo | 699140b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 1983 | rebuild_sched_domains_locked(); |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 1984 | |
Miao Xie | 950592f | 2009-06-16 15:31:47 -0700 | [diff] [blame] | 1985 | if (spread_flag_changed) |
Tejun Heo | d66393e | 2014-02-13 06:58:40 -0500 | [diff] [blame] | 1986 | update_tasks_flags(cs); |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 1987 | out: |
Waiman Long | bf92370 | 2018-11-08 10:08:37 -0500 | [diff] [blame] | 1988 | free_cpuset(trialcs); |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 1989 | return err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1990 | } |
| 1991 | |
Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 1992 | /* |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1993 | * update_prstate - update partititon_root_state |
Waiman Long | 0f3adb8 | 2021-07-20 10:18:26 -0400 | [diff] [blame] | 1994 | * cs: the cpuset to update |
| 1995 | * new_prs: new partition root state |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1996 | * |
Waiman Long | b94f9ac | 2021-09-09 22:42:56 -0400 | [diff] [blame] | 1997 | * Call with cpuset_rwsem held. |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1998 | */ |
Waiman Long | 0f3adb8 | 2021-07-20 10:18:26 -0400 | [diff] [blame] | 1999 | static int update_prstate(struct cpuset *cs, int new_prs) |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 2000 | { |
Waiman Long | 6ba34d3 | 2021-07-20 10:18:28 -0400 | [diff] [blame] | 2001 | int err, old_prs = cs->partition_root_state; |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 2002 | struct cpuset *parent = parent_cs(cs); |
Waiman Long | 0f3adb8 | 2021-07-20 10:18:26 -0400 | [diff] [blame] | 2003 | struct tmpmasks tmpmask; |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 2004 | |
Waiman Long | 6ba34d3 | 2021-07-20 10:18:28 -0400 | [diff] [blame] | 2005 | if (old_prs == new_prs) |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 2006 | return 0; |
| 2007 | |
| 2008 | /* |
Waiman Long | 3881b86 | 2018-11-08 10:08:39 -0500 | [diff] [blame] | 2009 | * Cannot force a partial or invalid partition root to a full |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 2010 | * partition root. |
| 2011 | */ |
Waiman Long | 6ba34d3 | 2021-07-20 10:18:28 -0400 | [diff] [blame] | 2012 | if (new_prs && (old_prs == PRS_ERROR)) |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 2013 | return -EINVAL; |
| 2014 | |
Waiman Long | 0f3adb8 | 2021-07-20 10:18:26 -0400 | [diff] [blame] | 2015 | if (alloc_cpumasks(NULL, &tmpmask)) |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 2016 | return -ENOMEM; |
| 2017 | |
| 2018 | err = -EINVAL; |
Waiman Long | 6ba34d3 | 2021-07-20 10:18:28 -0400 | [diff] [blame] | 2019 | if (!old_prs) { |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 2020 | /* |
| 2021 | * Turning on partition root requires setting the |
| 2022 | * CS_CPU_EXCLUSIVE bit implicitly as well and cpus_allowed |
| 2023 | * cannot be NULL. |
| 2024 | */ |
| 2025 | if (cpumask_empty(cs->cpus_allowed)) |
| 2026 | goto out; |
| 2027 | |
| 2028 | err = update_flag(CS_CPU_EXCLUSIVE, cs, 1); |
| 2029 | if (err) |
| 2030 | goto out; |
| 2031 | |
| 2032 | err = update_parent_subparts_cpumask(cs, partcmd_enable, |
Waiman Long | 0f3adb8 | 2021-07-20 10:18:26 -0400 | [diff] [blame] | 2033 | NULL, &tmpmask); |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 2034 | if (err) { |
| 2035 | update_flag(CS_CPU_EXCLUSIVE, cs, 0); |
| 2036 | goto out; |
| 2037 | } |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 2038 | } else { |
Waiman Long | 3881b86 | 2018-11-08 10:08:39 -0500 | [diff] [blame] | 2039 | /* |
| 2040 | * Turning off partition root will clear the |
| 2041 | * CS_CPU_EXCLUSIVE bit. |
| 2042 | */ |
Waiman Long | 6ba34d3 | 2021-07-20 10:18:28 -0400 | [diff] [blame] | 2043 | if (old_prs == PRS_ERROR) { |
Waiman Long | 3881b86 | 2018-11-08 10:08:39 -0500 | [diff] [blame] | 2044 | update_flag(CS_CPU_EXCLUSIVE, cs, 0); |
| 2045 | err = 0; |
| 2046 | goto out; |
| 2047 | } |
| 2048 | |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 2049 | err = update_parent_subparts_cpumask(cs, partcmd_disable, |
Waiman Long | 0f3adb8 | 2021-07-20 10:18:26 -0400 | [diff] [blame] | 2050 | NULL, &tmpmask); |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 2051 | if (err) |
| 2052 | goto out; |
| 2053 | |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 2054 | /* Turning off CS_CPU_EXCLUSIVE will not return error */ |
| 2055 | update_flag(CS_CPU_EXCLUSIVE, cs, 0); |
| 2056 | } |
| 2057 | |
| 2058 | /* |
| 2059 | * Update cpumask of parent's tasks except when it is the top |
| 2060 | * cpuset as some system daemons cannot be mapped to other CPUs. |
| 2061 | */ |
| 2062 | if (parent != &top_cpuset) |
| 2063 | update_tasks_cpumask(parent); |
| 2064 | |
Waiman Long | 4716909 | 2018-11-08 10:08:40 -0500 | [diff] [blame] | 2065 | if (parent->child_ecpus_count) |
Waiman Long | 0f3adb8 | 2021-07-20 10:18:26 -0400 | [diff] [blame] | 2066 | update_sibling_cpumasks(parent, cs, &tmpmask); |
Waiman Long | 4716909 | 2018-11-08 10:08:40 -0500 | [diff] [blame] | 2067 | |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 2068 | rebuild_sched_domains_locked(); |
| 2069 | out: |
Waiman Long | 6ba34d3 | 2021-07-20 10:18:28 -0400 | [diff] [blame] | 2070 | if (!err) { |
| 2071 | spin_lock_irq(&callback_lock); |
| 2072 | cs->partition_root_state = new_prs; |
| 2073 | spin_unlock_irq(&callback_lock); |
Waiman Long | e7cc988 | 2021-08-10 23:06:02 -0400 | [diff] [blame] | 2074 | notify_partition_change(cs, old_prs, new_prs); |
Waiman Long | 6ba34d3 | 2021-07-20 10:18:28 -0400 | [diff] [blame] | 2075 | } |
| 2076 | |
Waiman Long | 0f3adb8 | 2021-07-20 10:18:26 -0400 | [diff] [blame] | 2077 | free_cpumasks(NULL, &tmpmask); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2078 | return err; |
| 2079 | } |
| 2080 | |
Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 2081 | /* |
Adrian Bunk | 80f7228 | 2006-06-30 18:27:16 +0200 | [diff] [blame] | 2082 | * Frequency meter - How fast is some event occurring? |
Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 2083 | * |
| 2084 | * These routines manage a digitally filtered, constant time based, |
| 2085 | * event frequency meter. There are four routines: |
| 2086 | * fmeter_init() - initialize a frequency meter. |
| 2087 | * fmeter_markevent() - called each time the event happens. |
| 2088 | * fmeter_getrate() - returns the recent rate of such events. |
| 2089 | * fmeter_update() - internal routine used to update fmeter. |
| 2090 | * |
| 2091 | * A common data structure is passed to each of these routines, |
| 2092 | * which is used to keep track of the state required to manage the |
| 2093 | * frequency meter and its digital filter. |
| 2094 | * |
| 2095 | * The filter works on the number of events marked per unit time. |
| 2096 | * The filter is single-pole low-pass recursive (IIR). The time unit |
| 2097 | * is 1 second. Arithmetic is done using 32-bit integers scaled to |
| 2098 | * simulate 3 decimal digits of precision (multiplied by 1000). |
| 2099 | * |
| 2100 | * With an FM_COEF of 933, and a time base of 1 second, the filter |
| 2101 | * has a half-life of 10 seconds, meaning that if the events quit |
| 2102 | * happening, then the rate returned from the fmeter_getrate() |
| 2103 | * will be cut in half each 10 seconds, until it converges to zero. |
| 2104 | * |
| 2105 | * It is not worth doing a real infinitely recursive filter. If more |
| 2106 | * than FM_MAXTICKS ticks have elapsed since the last filter event, |
| 2107 | * just compute FM_MAXTICKS ticks worth, by which point the level |
| 2108 | * will be stable. |
| 2109 | * |
| 2110 | * Limit the count of unprocessed events to FM_MAXCNT, so as to avoid |
| 2111 | * arithmetic overflow in the fmeter_update() routine. |
| 2112 | * |
| 2113 | * Given the simple 32 bit integer arithmetic used, this meter works |
| 2114 | * best for reporting rates between one per millisecond (msec) and |
| 2115 | * one per 32 (approx) seconds. At constant rates faster than one |
| 2116 | * per msec it maxes out at values just under 1,000,000. At constant |
| 2117 | * rates between one per msec, and one per second it will stabilize |
| 2118 | * to a value N*1000, where N is the rate of events per second. |
| 2119 | * At constant rates between one per second and one per 32 seconds, |
| 2120 | * it will be choppy, moving up on the seconds that have an event, |
| 2121 | * and then decaying until the next event. At rates slower than |
| 2122 | * about one in 32 seconds, it decays all the way back to zero between |
| 2123 | * each event. |
| 2124 | */ |
| 2125 | |
| 2126 | #define FM_COEF 933 /* coefficient for half-life of 10 secs */ |
Arnd Bergmann | d2b43658 | 2015-11-25 16:16:55 +0100 | [diff] [blame] | 2127 | #define FM_MAXTICKS ((u32)99) /* useless computing more ticks than this */ |
Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 2128 | #define FM_MAXCNT 1000000 /* limit cnt to avoid overflow */ |
| 2129 | #define FM_SCALE 1000 /* faux fixed point scale */ |
| 2130 | |
| 2131 | /* Initialize a frequency meter */ |
| 2132 | static void fmeter_init(struct fmeter *fmp) |
| 2133 | { |
| 2134 | fmp->cnt = 0; |
| 2135 | fmp->val = 0; |
| 2136 | fmp->time = 0; |
| 2137 | spin_lock_init(&fmp->lock); |
| 2138 | } |
| 2139 | |
| 2140 | /* Internal meter update - process cnt events and update value */ |
| 2141 | static void fmeter_update(struct fmeter *fmp) |
| 2142 | { |
Arnd Bergmann | d2b43658 | 2015-11-25 16:16:55 +0100 | [diff] [blame] | 2143 | time64_t now; |
| 2144 | u32 ticks; |
| 2145 | |
| 2146 | now = ktime_get_seconds(); |
| 2147 | ticks = now - fmp->time; |
Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 2148 | |
| 2149 | if (ticks == 0) |
| 2150 | return; |
| 2151 | |
| 2152 | ticks = min(FM_MAXTICKS, ticks); |
| 2153 | while (ticks-- > 0) |
| 2154 | fmp->val = (FM_COEF * fmp->val) / FM_SCALE; |
| 2155 | fmp->time = now; |
| 2156 | |
| 2157 | fmp->val += ((FM_SCALE - FM_COEF) * fmp->cnt) / FM_SCALE; |
| 2158 | fmp->cnt = 0; |
| 2159 | } |
| 2160 | |
| 2161 | /* Process any previous ticks, then bump cnt by one (times scale). */ |
| 2162 | static void fmeter_markevent(struct fmeter *fmp) |
| 2163 | { |
| 2164 | spin_lock(&fmp->lock); |
| 2165 | fmeter_update(fmp); |
| 2166 | fmp->cnt = min(FM_MAXCNT, fmp->cnt + FM_SCALE); |
| 2167 | spin_unlock(&fmp->lock); |
| 2168 | } |
| 2169 | |
| 2170 | /* Process any previous ticks, then return current value. */ |
| 2171 | static int fmeter_getrate(struct fmeter *fmp) |
| 2172 | { |
| 2173 | int val; |
| 2174 | |
| 2175 | spin_lock(&fmp->lock); |
| 2176 | fmeter_update(fmp); |
| 2177 | val = fmp->val; |
| 2178 | spin_unlock(&fmp->lock); |
| 2179 | return val; |
| 2180 | } |
| 2181 | |
Tejun Heo | 57fce0a | 2014-02-13 06:58:41 -0500 | [diff] [blame] | 2182 | static struct cpuset *cpuset_attach_old_cs; |
| 2183 | |
Waiman Long | b94f9ac | 2021-09-09 22:42:56 -0400 | [diff] [blame] | 2184 | /* Called by cgroups to determine if a cpuset is usable; cpuset_rwsem held */ |
Tejun Heo | 1f7dd3e5 | 2015-12-03 10:18:21 -0500 | [diff] [blame] | 2185 | static int cpuset_can_attach(struct cgroup_taskset *tset) |
Ben Blum | f780bdb | 2011-05-26 16:25:19 -0700 | [diff] [blame] | 2186 | { |
Tejun Heo | 1f7dd3e5 | 2015-12-03 10:18:21 -0500 | [diff] [blame] | 2187 | struct cgroup_subsys_state *css; |
| 2188 | struct cpuset *cs; |
Tejun Heo | bb9d97b | 2011-12-12 18:12:21 -0800 | [diff] [blame] | 2189 | struct task_struct *task; |
| 2190 | int ret; |
Ben Blum | f780bdb | 2011-05-26 16:25:19 -0700 | [diff] [blame] | 2191 | |
Tejun Heo | 57fce0a | 2014-02-13 06:58:41 -0500 | [diff] [blame] | 2192 | /* used later by cpuset_attach() */ |
Tejun Heo | 1f7dd3e5 | 2015-12-03 10:18:21 -0500 | [diff] [blame] | 2193 | cpuset_attach_old_cs = task_cs(cgroup_taskset_first(tset, &css)); |
| 2194 | cs = css_cs(css); |
Tejun Heo | 57fce0a | 2014-02-13 06:58:41 -0500 | [diff] [blame] | 2195 | |
Juri Lelli | 1243dc5 | 2019-07-19 15:59:57 +0200 | [diff] [blame] | 2196 | percpu_down_write(&cpuset_rwsem); |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2197 | |
Tejun Heo | aa6ec29 | 2014-07-09 10:08:08 -0400 | [diff] [blame] | 2198 | /* allow moving tasks into an empty cpuset if on default hierarchy */ |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2199 | ret = -ENOSPC; |
Waiman Long | b8d1b8e | 2017-08-17 15:33:10 -0400 | [diff] [blame] | 2200 | if (!is_in_v2_mode() && |
Li Zefan | 88fa523 | 2013-06-09 17:16:46 +0800 | [diff] [blame] | 2201 | (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))) |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2202 | goto out_unlock; |
Ben Blum | be367d0 | 2009-09-23 15:56:31 -0700 | [diff] [blame] | 2203 | |
Tejun Heo | 1f7dd3e5 | 2015-12-03 10:18:21 -0500 | [diff] [blame] | 2204 | cgroup_taskset_for_each(task, css, tset) { |
Juri Lelli | 7f51412 | 2014-09-19 10:22:40 +0100 | [diff] [blame] | 2205 | ret = task_can_attach(task, cs->cpus_allowed); |
| 2206 | if (ret) |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2207 | goto out_unlock; |
| 2208 | ret = security_task_setscheduler(task); |
| 2209 | if (ret) |
| 2210 | goto out_unlock; |
Tejun Heo | bb9d97b | 2011-12-12 18:12:21 -0800 | [diff] [blame] | 2211 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2212 | |
Tejun Heo | 452477f | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2213 | /* |
| 2214 | * Mark attach is in progress. This makes validate_change() fail |
| 2215 | * changes which zero cpus/mems_allowed. |
| 2216 | */ |
| 2217 | cs->attach_in_progress++; |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2218 | ret = 0; |
| 2219 | out_unlock: |
Juri Lelli | 1243dc5 | 2019-07-19 15:59:57 +0200 | [diff] [blame] | 2220 | percpu_up_write(&cpuset_rwsem); |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2221 | return ret; |
Tejun Heo | 4e4c9a1 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2222 | } |
| 2223 | |
Tejun Heo | 1f7dd3e5 | 2015-12-03 10:18:21 -0500 | [diff] [blame] | 2224 | static void cpuset_cancel_attach(struct cgroup_taskset *tset) |
Tejun Heo | 452477f | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2225 | { |
Tejun Heo | 1f7dd3e5 | 2015-12-03 10:18:21 -0500 | [diff] [blame] | 2226 | struct cgroup_subsys_state *css; |
Tejun Heo | 1f7dd3e5 | 2015-12-03 10:18:21 -0500 | [diff] [blame] | 2227 | |
| 2228 | cgroup_taskset_first(tset, &css); |
Tejun Heo | 1f7dd3e5 | 2015-12-03 10:18:21 -0500 | [diff] [blame] | 2229 | |
Juri Lelli | 1243dc5 | 2019-07-19 15:59:57 +0200 | [diff] [blame] | 2230 | percpu_down_write(&cpuset_rwsem); |
Tejun Heo | eb95419 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 2231 | css_cs(css)->attach_in_progress--; |
Juri Lelli | 1243dc5 | 2019-07-19 15:59:57 +0200 | [diff] [blame] | 2232 | percpu_up_write(&cpuset_rwsem); |
Tejun Heo | 452477f | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2233 | } |
| 2234 | |
Tejun Heo | 4e4c9a1 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2235 | /* |
Waiman Long | b94f9ac | 2021-09-09 22:42:56 -0400 | [diff] [blame] | 2236 | * Protected by cpuset_rwsem. cpus_attach is used only by cpuset_attach() |
Tejun Heo | 4e4c9a1 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2237 | * but we can't allocate it dynamically there. Define it global and |
| 2238 | * allocate from cpuset_init(). |
| 2239 | */ |
| 2240 | static cpumask_var_t cpus_attach; |
| 2241 | |
Tejun Heo | 1f7dd3e5 | 2015-12-03 10:18:21 -0500 | [diff] [blame] | 2242 | static void cpuset_attach(struct cgroup_taskset *tset) |
Tejun Heo | 4e4c9a1 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2243 | { |
Waiman Long | b94f9ac | 2021-09-09 22:42:56 -0400 | [diff] [blame] | 2244 | /* static buf protected by cpuset_rwsem */ |
Tejun Heo | 4e4c9a1 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2245 | static nodemask_t cpuset_attach_nodemask_to; |
Tejun Heo | 4e4c9a1 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2246 | struct task_struct *task; |
Tejun Heo | 4530edd | 2015-09-11 15:00:19 -0400 | [diff] [blame] | 2247 | struct task_struct *leader; |
Tejun Heo | 1f7dd3e5 | 2015-12-03 10:18:21 -0500 | [diff] [blame] | 2248 | struct cgroup_subsys_state *css; |
| 2249 | struct cpuset *cs; |
Tejun Heo | 57fce0a | 2014-02-13 06:58:41 -0500 | [diff] [blame] | 2250 | struct cpuset *oldcs = cpuset_attach_old_cs; |
Tejun Heo | 4e4c9a1 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2251 | |
Tejun Heo | 1f7dd3e5 | 2015-12-03 10:18:21 -0500 | [diff] [blame] | 2252 | cgroup_taskset_first(tset, &css); |
| 2253 | cs = css_cs(css); |
| 2254 | |
Juri Lelli | 1243dc5 | 2019-07-19 15:59:57 +0200 | [diff] [blame] | 2255 | percpu_down_write(&cpuset_rwsem); |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2256 | |
Li Zefan | ae1c802 | 2014-07-09 16:48:32 +0800 | [diff] [blame] | 2257 | guarantee_online_mems(cs, &cpuset_attach_nodemask_to); |
Tejun Heo | 94196f5 | 2011-12-12 18:12:22 -0800 | [diff] [blame] | 2258 | |
Tejun Heo | 1f7dd3e5 | 2015-12-03 10:18:21 -0500 | [diff] [blame] | 2259 | cgroup_taskset_for_each(task, css, tset) { |
Will Deacon | 431c69f | 2021-07-30 12:24:30 +0100 | [diff] [blame] | 2260 | if (cs != &top_cpuset) |
| 2261 | guarantee_online_cpus(task, cpus_attach); |
| 2262 | else |
| 2263 | cpumask_copy(cpus_attach, task_cpu_possible_mask(task)); |
Tejun Heo | bb9d97b | 2011-12-12 18:12:21 -0800 | [diff] [blame] | 2264 | /* |
| 2265 | * can_attach beforehand should guarantee that this doesn't |
| 2266 | * fail. TODO: have a better way to handle failure here |
| 2267 | */ |
| 2268 | WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach)); |
| 2269 | |
| 2270 | cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to); |
| 2271 | cpuset_update_task_spread_flag(cs, task); |
| 2272 | } |
David Quigley | 22fb52d | 2006-06-23 02:04:00 -0700 | [diff] [blame] | 2273 | |
Ben Blum | f780bdb | 2011-05-26 16:25:19 -0700 | [diff] [blame] | 2274 | /* |
Tejun Heo | 4530edd | 2015-09-11 15:00:19 -0400 | [diff] [blame] | 2275 | * Change mm for all threadgroup leaders. This is expensive and may |
| 2276 | * sleep and should be moved outside migration path proper. |
Ben Blum | f780bdb | 2011-05-26 16:25:19 -0700 | [diff] [blame] | 2277 | */ |
Li Zefan | ae1c802 | 2014-07-09 16:48:32 +0800 | [diff] [blame] | 2278 | cpuset_attach_nodemask_to = cs->effective_mems; |
Tejun Heo | 1f7dd3e5 | 2015-12-03 10:18:21 -0500 | [diff] [blame] | 2279 | cgroup_taskset_for_each_leader(leader, css, tset) { |
Tejun Heo | 3df9ca0 | 2015-09-11 15:00:18 -0400 | [diff] [blame] | 2280 | struct mm_struct *mm = get_task_mm(leader); |
Li Zefan | f047cec | 2013-06-13 15:11:44 +0800 | [diff] [blame] | 2281 | |
Tejun Heo | 3df9ca0 | 2015-09-11 15:00:18 -0400 | [diff] [blame] | 2282 | if (mm) { |
| 2283 | mpol_rebind_mm(mm, &cpuset_attach_nodemask_to); |
| 2284 | |
| 2285 | /* |
| 2286 | * old_mems_allowed is the same with mems_allowed |
| 2287 | * here, except if this task is being moved |
| 2288 | * automatically due to hotplug. In that case |
| 2289 | * @mems_allowed has been updated and is empty, so |
| 2290 | * @old_mems_allowed is the right nodesets that we |
| 2291 | * migrate mm from. |
| 2292 | */ |
Tejun Heo | e93ad19 | 2016-01-19 12:18:41 -0500 | [diff] [blame] | 2293 | if (is_memory_migrate(cs)) |
Tejun Heo | 3df9ca0 | 2015-09-11 15:00:18 -0400 | [diff] [blame] | 2294 | cpuset_migrate_mm(mm, &oldcs->old_mems_allowed, |
| 2295 | &cpuset_attach_nodemask_to); |
Tejun Heo | e93ad19 | 2016-01-19 12:18:41 -0500 | [diff] [blame] | 2296 | else |
| 2297 | mmput(mm); |
Li Zefan | f047cec | 2013-06-13 15:11:44 +0800 | [diff] [blame] | 2298 | } |
Paul Jackson | 4225399 | 2006-01-08 01:01:59 -0800 | [diff] [blame] | 2299 | } |
Tejun Heo | 452477f | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2300 | |
Li Zefan | 33ad801 | 2013-06-09 17:15:08 +0800 | [diff] [blame] | 2301 | cs->old_mems_allowed = cpuset_attach_nodemask_to; |
Tejun Heo | 02bb586 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2302 | |
Tejun Heo | 452477f | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2303 | cs->attach_in_progress--; |
Li Zefan | e44193d | 2013-06-09 17:14:22 +0800 | [diff] [blame] | 2304 | if (!cs->attach_in_progress) |
| 2305 | wake_up(&cpuset_attach_wq); |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2306 | |
Juri Lelli | 1243dc5 | 2019-07-19 15:59:57 +0200 | [diff] [blame] | 2307 | percpu_up_write(&cpuset_rwsem); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2308 | } |
| 2309 | |
| 2310 | /* The various types of files and directories in a cpuset file system */ |
| 2311 | |
| 2312 | typedef enum { |
Paul Jackson | 45b07ef | 2006-01-08 01:00:56 -0800 | [diff] [blame] | 2313 | FILE_MEMORY_MIGRATE, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2314 | FILE_CPULIST, |
| 2315 | FILE_MEMLIST, |
Li Zefan | afd1a8b | 2014-07-09 16:49:25 +0800 | [diff] [blame] | 2316 | FILE_EFFECTIVE_CPULIST, |
| 2317 | FILE_EFFECTIVE_MEMLIST, |
Waiman Long | 5cf8114 | 2018-11-08 10:08:46 -0500 | [diff] [blame] | 2318 | FILE_SUBPARTS_CPULIST, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2319 | FILE_CPU_EXCLUSIVE, |
| 2320 | FILE_MEM_EXCLUSIVE, |
Paul Menage | 7860836 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 2321 | FILE_MEM_HARDWALL, |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 2322 | FILE_SCHED_LOAD_BALANCE, |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 2323 | FILE_PARTITION_ROOT, |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 2324 | FILE_SCHED_RELAX_DOMAIN_LEVEL, |
Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 2325 | FILE_MEMORY_PRESSURE_ENABLED, |
| 2326 | FILE_MEMORY_PRESSURE, |
Paul Jackson | 825a46a | 2006-03-24 03:16:03 -0800 | [diff] [blame] | 2327 | FILE_SPREAD_PAGE, |
| 2328 | FILE_SPREAD_SLAB, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2329 | } cpuset_filetype_t; |
| 2330 | |
Tejun Heo | 182446d | 2013-08-08 20:11:24 -0400 | [diff] [blame] | 2331 | static int cpuset_write_u64(struct cgroup_subsys_state *css, struct cftype *cft, |
| 2332 | u64 val) |
Paul Menage | 700fe1a | 2008-04-29 01:00:00 -0700 | [diff] [blame] | 2333 | { |
Tejun Heo | 182446d | 2013-08-08 20:11:24 -0400 | [diff] [blame] | 2334 | struct cpuset *cs = css_cs(css); |
Paul Menage | 700fe1a | 2008-04-29 01:00:00 -0700 | [diff] [blame] | 2335 | cpuset_filetype_t type = cft->private; |
Li Zefan | a903f08 | 2013-08-13 10:05:59 +0800 | [diff] [blame] | 2336 | int retval = 0; |
Paul Menage | 700fe1a | 2008-04-29 01:00:00 -0700 | [diff] [blame] | 2337 | |
Sebastian Andrzej Siewior | c5c63b9 | 2021-08-03 16:16:07 +0200 | [diff] [blame] | 2338 | cpus_read_lock(); |
Juri Lelli | 1243dc5 | 2019-07-19 15:59:57 +0200 | [diff] [blame] | 2339 | percpu_down_write(&cpuset_rwsem); |
Li Zefan | a903f08 | 2013-08-13 10:05:59 +0800 | [diff] [blame] | 2340 | if (!is_cpuset_online(cs)) { |
| 2341 | retval = -ENODEV; |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2342 | goto out_unlock; |
Li Zefan | a903f08 | 2013-08-13 10:05:59 +0800 | [diff] [blame] | 2343 | } |
Paul Menage | 700fe1a | 2008-04-29 01:00:00 -0700 | [diff] [blame] | 2344 | |
| 2345 | switch (type) { |
| 2346 | case FILE_CPU_EXCLUSIVE: |
| 2347 | retval = update_flag(CS_CPU_EXCLUSIVE, cs, val); |
| 2348 | break; |
| 2349 | case FILE_MEM_EXCLUSIVE: |
| 2350 | retval = update_flag(CS_MEM_EXCLUSIVE, cs, val); |
| 2351 | break; |
Paul Menage | 7860836 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 2352 | case FILE_MEM_HARDWALL: |
| 2353 | retval = update_flag(CS_MEM_HARDWALL, cs, val); |
| 2354 | break; |
Paul Menage | 700fe1a | 2008-04-29 01:00:00 -0700 | [diff] [blame] | 2355 | case FILE_SCHED_LOAD_BALANCE: |
| 2356 | retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, val); |
| 2357 | break; |
| 2358 | case FILE_MEMORY_MIGRATE: |
| 2359 | retval = update_flag(CS_MEMORY_MIGRATE, cs, val); |
| 2360 | break; |
| 2361 | case FILE_MEMORY_PRESSURE_ENABLED: |
| 2362 | cpuset_memory_pressure_enabled = !!val; |
| 2363 | break; |
Paul Menage | 700fe1a | 2008-04-29 01:00:00 -0700 | [diff] [blame] | 2364 | case FILE_SPREAD_PAGE: |
| 2365 | retval = update_flag(CS_SPREAD_PAGE, cs, val); |
Paul Menage | 700fe1a | 2008-04-29 01:00:00 -0700 | [diff] [blame] | 2366 | break; |
| 2367 | case FILE_SPREAD_SLAB: |
| 2368 | retval = update_flag(CS_SPREAD_SLAB, cs, val); |
Paul Menage | 700fe1a | 2008-04-29 01:00:00 -0700 | [diff] [blame] | 2369 | break; |
| 2370 | default: |
| 2371 | retval = -EINVAL; |
| 2372 | break; |
| 2373 | } |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2374 | out_unlock: |
Juri Lelli | 1243dc5 | 2019-07-19 15:59:57 +0200 | [diff] [blame] | 2375 | percpu_up_write(&cpuset_rwsem); |
Sebastian Andrzej Siewior | c5c63b9 | 2021-08-03 16:16:07 +0200 | [diff] [blame] | 2376 | cpus_read_unlock(); |
Paul Menage | 700fe1a | 2008-04-29 01:00:00 -0700 | [diff] [blame] | 2377 | return retval; |
| 2378 | } |
| 2379 | |
Tejun Heo | 182446d | 2013-08-08 20:11:24 -0400 | [diff] [blame] | 2380 | static int cpuset_write_s64(struct cgroup_subsys_state *css, struct cftype *cft, |
| 2381 | s64 val) |
Paul Menage | 5be7a47 | 2008-05-06 20:42:41 -0700 | [diff] [blame] | 2382 | { |
Tejun Heo | 182446d | 2013-08-08 20:11:24 -0400 | [diff] [blame] | 2383 | struct cpuset *cs = css_cs(css); |
Paul Menage | 5be7a47 | 2008-05-06 20:42:41 -0700 | [diff] [blame] | 2384 | cpuset_filetype_t type = cft->private; |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2385 | int retval = -ENODEV; |
Paul Menage | 5be7a47 | 2008-05-06 20:42:41 -0700 | [diff] [blame] | 2386 | |
Sebastian Andrzej Siewior | c5c63b9 | 2021-08-03 16:16:07 +0200 | [diff] [blame] | 2387 | cpus_read_lock(); |
Juri Lelli | 1243dc5 | 2019-07-19 15:59:57 +0200 | [diff] [blame] | 2388 | percpu_down_write(&cpuset_rwsem); |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2389 | if (!is_cpuset_online(cs)) |
| 2390 | goto out_unlock; |
Paul Menage | e371239 | 2008-07-25 01:47:02 -0700 | [diff] [blame] | 2391 | |
Paul Menage | 5be7a47 | 2008-05-06 20:42:41 -0700 | [diff] [blame] | 2392 | switch (type) { |
| 2393 | case FILE_SCHED_RELAX_DOMAIN_LEVEL: |
| 2394 | retval = update_relax_domain_level(cs, val); |
| 2395 | break; |
| 2396 | default: |
| 2397 | retval = -EINVAL; |
| 2398 | break; |
| 2399 | } |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2400 | out_unlock: |
Juri Lelli | 1243dc5 | 2019-07-19 15:59:57 +0200 | [diff] [blame] | 2401 | percpu_up_write(&cpuset_rwsem); |
Sebastian Andrzej Siewior | c5c63b9 | 2021-08-03 16:16:07 +0200 | [diff] [blame] | 2402 | cpus_read_unlock(); |
Paul Menage | 5be7a47 | 2008-05-06 20:42:41 -0700 | [diff] [blame] | 2403 | return retval; |
| 2404 | } |
| 2405 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2406 | /* |
Paul Menage | e371239 | 2008-07-25 01:47:02 -0700 | [diff] [blame] | 2407 | * Common handling for a write to a "cpus" or "mems" file. |
| 2408 | */ |
Tejun Heo | 451af50 | 2014-05-13 12:16:21 -0400 | [diff] [blame] | 2409 | static ssize_t cpuset_write_resmask(struct kernfs_open_file *of, |
| 2410 | char *buf, size_t nbytes, loff_t off) |
Paul Menage | e371239 | 2008-07-25 01:47:02 -0700 | [diff] [blame] | 2411 | { |
Tejun Heo | 451af50 | 2014-05-13 12:16:21 -0400 | [diff] [blame] | 2412 | struct cpuset *cs = css_cs(of_css(of)); |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 2413 | struct cpuset *trialcs; |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2414 | int retval = -ENODEV; |
Paul Menage | e371239 | 2008-07-25 01:47:02 -0700 | [diff] [blame] | 2415 | |
Tejun Heo | 451af50 | 2014-05-13 12:16:21 -0400 | [diff] [blame] | 2416 | buf = strstrip(buf); |
| 2417 | |
Tejun Heo | 3a5a6d0 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2418 | /* |
| 2419 | * CPU or memory hotunplug may leave @cs w/o any execution |
| 2420 | * resources, in which case the hotplug code asynchronously updates |
| 2421 | * configuration and transfers all tasks to the nearest ancestor |
| 2422 | * which can execute. |
| 2423 | * |
| 2424 | * As writes to "cpus" or "mems" may restore @cs's execution |
| 2425 | * resources, wait for the previously scheduled operations before |
| 2426 | * proceeding, so that we don't end up keep removing tasks added |
| 2427 | * after execution capability is restored. |
Tejun Heo | 76bb5ab | 2014-06-30 15:47:32 -0400 | [diff] [blame] | 2428 | * |
| 2429 | * cpuset_hotplug_work calls back into cgroup core via |
| 2430 | * cgroup_transfer_tasks() and waiting for it from a cgroupfs |
| 2431 | * operation like this one can lead to a deadlock through kernfs |
| 2432 | * active_ref protection. Let's break the protection. Losing the |
| 2433 | * protection is okay as we check whether @cs is online after |
Waiman Long | b94f9ac | 2021-09-09 22:42:56 -0400 | [diff] [blame] | 2434 | * grabbing cpuset_rwsem anyway. This only happens on the legacy |
Tejun Heo | 76bb5ab | 2014-06-30 15:47:32 -0400 | [diff] [blame] | 2435 | * hierarchies. |
Tejun Heo | 3a5a6d0 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2436 | */ |
Tejun Heo | 76bb5ab | 2014-06-30 15:47:32 -0400 | [diff] [blame] | 2437 | css_get(&cs->css); |
| 2438 | kernfs_break_active_protection(of->kn); |
Tejun Heo | 3a5a6d0 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2439 | flush_work(&cpuset_hotplug_work); |
| 2440 | |
Sebastian Andrzej Siewior | c5c63b9 | 2021-08-03 16:16:07 +0200 | [diff] [blame] | 2441 | cpus_read_lock(); |
Juri Lelli | 1243dc5 | 2019-07-19 15:59:57 +0200 | [diff] [blame] | 2442 | percpu_down_write(&cpuset_rwsem); |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2443 | if (!is_cpuset_online(cs)) |
| 2444 | goto out_unlock; |
Paul Menage | e371239 | 2008-07-25 01:47:02 -0700 | [diff] [blame] | 2445 | |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 2446 | trialcs = alloc_trial_cpuset(cs); |
Li Zefan | b75f38d | 2011-03-04 17:36:21 -0800 | [diff] [blame] | 2447 | if (!trialcs) { |
| 2448 | retval = -ENOMEM; |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2449 | goto out_unlock; |
Li Zefan | b75f38d | 2011-03-04 17:36:21 -0800 | [diff] [blame] | 2450 | } |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 2451 | |
Tejun Heo | 451af50 | 2014-05-13 12:16:21 -0400 | [diff] [blame] | 2452 | switch (of_cft(of)->private) { |
Paul Menage | e371239 | 2008-07-25 01:47:02 -0700 | [diff] [blame] | 2453 | case FILE_CPULIST: |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 2454 | retval = update_cpumask(cs, trialcs, buf); |
Paul Menage | e371239 | 2008-07-25 01:47:02 -0700 | [diff] [blame] | 2455 | break; |
| 2456 | case FILE_MEMLIST: |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 2457 | retval = update_nodemask(cs, trialcs, buf); |
Paul Menage | e371239 | 2008-07-25 01:47:02 -0700 | [diff] [blame] | 2458 | break; |
| 2459 | default: |
| 2460 | retval = -EINVAL; |
| 2461 | break; |
| 2462 | } |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 2463 | |
Waiman Long | bf92370 | 2018-11-08 10:08:37 -0500 | [diff] [blame] | 2464 | free_cpuset(trialcs); |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2465 | out_unlock: |
Juri Lelli | 1243dc5 | 2019-07-19 15:59:57 +0200 | [diff] [blame] | 2466 | percpu_up_write(&cpuset_rwsem); |
Sebastian Andrzej Siewior | c5c63b9 | 2021-08-03 16:16:07 +0200 | [diff] [blame] | 2467 | cpus_read_unlock(); |
Tejun Heo | 76bb5ab | 2014-06-30 15:47:32 -0400 | [diff] [blame] | 2468 | kernfs_unbreak_active_protection(of->kn); |
| 2469 | css_put(&cs->css); |
Tejun Heo | e93ad19 | 2016-01-19 12:18:41 -0500 | [diff] [blame] | 2470 | flush_workqueue(cpuset_migrate_mm_wq); |
Tejun Heo | 451af50 | 2014-05-13 12:16:21 -0400 | [diff] [blame] | 2471 | return retval ?: nbytes; |
Paul Menage | e371239 | 2008-07-25 01:47:02 -0700 | [diff] [blame] | 2472 | } |
| 2473 | |
| 2474 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2475 | * These ascii lists should be read in a single call, by using a user |
| 2476 | * buffer large enough to hold the entire map. If read in smaller |
| 2477 | * chunks, there is no guarantee of atomicity. Since the display format |
| 2478 | * used, list of ranges of sequential numbers, is variable length, |
| 2479 | * and since these maps can change value dynamically, one could read |
| 2480 | * gibberish by doing partial reads while a list was changing. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2481 | */ |
Tejun Heo | 2da8ca8 | 2013-12-05 12:28:04 -0500 | [diff] [blame] | 2482 | static int cpuset_common_seq_show(struct seq_file *sf, void *v) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2483 | { |
Tejun Heo | 2da8ca8 | 2013-12-05 12:28:04 -0500 | [diff] [blame] | 2484 | struct cpuset *cs = css_cs(seq_css(sf)); |
| 2485 | cpuset_filetype_t type = seq_cft(sf)->private; |
Tejun Heo | 51ffe41 | 2013-12-05 12:28:02 -0500 | [diff] [blame] | 2486 | int ret = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2487 | |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 2488 | spin_lock_irq(&callback_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2489 | |
| 2490 | switch (type) { |
| 2491 | case FILE_CPULIST: |
Tejun Heo | e8e6d97 | 2015-02-13 14:37:23 -0800 | [diff] [blame] | 2492 | seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_allowed)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2493 | break; |
| 2494 | case FILE_MEMLIST: |
Tejun Heo | e8e6d97 | 2015-02-13 14:37:23 -0800 | [diff] [blame] | 2495 | seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2496 | break; |
Li Zefan | afd1a8b | 2014-07-09 16:49:25 +0800 | [diff] [blame] | 2497 | case FILE_EFFECTIVE_CPULIST: |
Tejun Heo | e8e6d97 | 2015-02-13 14:37:23 -0800 | [diff] [blame] | 2498 | seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_cpus)); |
Li Zefan | afd1a8b | 2014-07-09 16:49:25 +0800 | [diff] [blame] | 2499 | break; |
| 2500 | case FILE_EFFECTIVE_MEMLIST: |
Tejun Heo | e8e6d97 | 2015-02-13 14:37:23 -0800 | [diff] [blame] | 2501 | seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->effective_mems)); |
Li Zefan | afd1a8b | 2014-07-09 16:49:25 +0800 | [diff] [blame] | 2502 | break; |
Waiman Long | 5cf8114 | 2018-11-08 10:08:46 -0500 | [diff] [blame] | 2503 | case FILE_SUBPARTS_CPULIST: |
| 2504 | seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->subparts_cpus)); |
| 2505 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2506 | default: |
Tejun Heo | 51ffe41 | 2013-12-05 12:28:02 -0500 | [diff] [blame] | 2507 | ret = -EINVAL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2508 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2509 | |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 2510 | spin_unlock_irq(&callback_lock); |
Tejun Heo | 51ffe41 | 2013-12-05 12:28:02 -0500 | [diff] [blame] | 2511 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2512 | } |
| 2513 | |
Tejun Heo | 182446d | 2013-08-08 20:11:24 -0400 | [diff] [blame] | 2514 | static u64 cpuset_read_u64(struct cgroup_subsys_state *css, struct cftype *cft) |
Paul Menage | 700fe1a | 2008-04-29 01:00:00 -0700 | [diff] [blame] | 2515 | { |
Tejun Heo | 182446d | 2013-08-08 20:11:24 -0400 | [diff] [blame] | 2516 | struct cpuset *cs = css_cs(css); |
Paul Menage | 700fe1a | 2008-04-29 01:00:00 -0700 | [diff] [blame] | 2517 | cpuset_filetype_t type = cft->private; |
| 2518 | switch (type) { |
| 2519 | case FILE_CPU_EXCLUSIVE: |
| 2520 | return is_cpu_exclusive(cs); |
| 2521 | case FILE_MEM_EXCLUSIVE: |
| 2522 | return is_mem_exclusive(cs); |
Paul Menage | 7860836 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 2523 | case FILE_MEM_HARDWALL: |
| 2524 | return is_mem_hardwall(cs); |
Paul Menage | 700fe1a | 2008-04-29 01:00:00 -0700 | [diff] [blame] | 2525 | case FILE_SCHED_LOAD_BALANCE: |
| 2526 | return is_sched_load_balance(cs); |
| 2527 | case FILE_MEMORY_MIGRATE: |
| 2528 | return is_memory_migrate(cs); |
| 2529 | case FILE_MEMORY_PRESSURE_ENABLED: |
| 2530 | return cpuset_memory_pressure_enabled; |
| 2531 | case FILE_MEMORY_PRESSURE: |
| 2532 | return fmeter_getrate(&cs->fmeter); |
| 2533 | case FILE_SPREAD_PAGE: |
| 2534 | return is_spread_page(cs); |
| 2535 | case FILE_SPREAD_SLAB: |
| 2536 | return is_spread_slab(cs); |
| 2537 | default: |
| 2538 | BUG(); |
| 2539 | } |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 2540 | |
| 2541 | /* Unreachable but makes gcc happy */ |
| 2542 | return 0; |
Paul Menage | 700fe1a | 2008-04-29 01:00:00 -0700 | [diff] [blame] | 2543 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2544 | |
Tejun Heo | 182446d | 2013-08-08 20:11:24 -0400 | [diff] [blame] | 2545 | static s64 cpuset_read_s64(struct cgroup_subsys_state *css, struct cftype *cft) |
Paul Menage | 5be7a47 | 2008-05-06 20:42:41 -0700 | [diff] [blame] | 2546 | { |
Tejun Heo | 182446d | 2013-08-08 20:11:24 -0400 | [diff] [blame] | 2547 | struct cpuset *cs = css_cs(css); |
Paul Menage | 5be7a47 | 2008-05-06 20:42:41 -0700 | [diff] [blame] | 2548 | cpuset_filetype_t type = cft->private; |
| 2549 | switch (type) { |
| 2550 | case FILE_SCHED_RELAX_DOMAIN_LEVEL: |
| 2551 | return cs->relax_domain_level; |
| 2552 | default: |
| 2553 | BUG(); |
| 2554 | } |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 2555 | |
Lu Jialin | d95af61 | 2021-04-08 16:03:46 +0800 | [diff] [blame] | 2556 | /* Unreachable but makes gcc happy */ |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 2557 | return 0; |
Paul Menage | 5be7a47 | 2008-05-06 20:42:41 -0700 | [diff] [blame] | 2558 | } |
| 2559 | |
Waiman Long | bb5b553 | 2018-11-08 10:08:44 -0500 | [diff] [blame] | 2560 | static int sched_partition_show(struct seq_file *seq, void *v) |
| 2561 | { |
| 2562 | struct cpuset *cs = css_cs(seq_css(seq)); |
| 2563 | |
| 2564 | switch (cs->partition_root_state) { |
| 2565 | case PRS_ENABLED: |
| 2566 | seq_puts(seq, "root\n"); |
| 2567 | break; |
| 2568 | case PRS_DISABLED: |
| 2569 | seq_puts(seq, "member\n"); |
| 2570 | break; |
| 2571 | case PRS_ERROR: |
| 2572 | seq_puts(seq, "root invalid\n"); |
| 2573 | break; |
| 2574 | } |
| 2575 | return 0; |
| 2576 | } |
| 2577 | |
| 2578 | static ssize_t sched_partition_write(struct kernfs_open_file *of, char *buf, |
| 2579 | size_t nbytes, loff_t off) |
| 2580 | { |
| 2581 | struct cpuset *cs = css_cs(of_css(of)); |
| 2582 | int val; |
| 2583 | int retval = -ENODEV; |
| 2584 | |
| 2585 | buf = strstrip(buf); |
| 2586 | |
| 2587 | /* |
Tejun Heo | b1e3aeb | 2018-11-13 12:03:33 -0800 | [diff] [blame] | 2588 | * Convert "root" to ENABLED, and convert "member" to DISABLED. |
Waiman Long | bb5b553 | 2018-11-08 10:08:44 -0500 | [diff] [blame] | 2589 | */ |
Tejun Heo | b1e3aeb | 2018-11-13 12:03:33 -0800 | [diff] [blame] | 2590 | if (!strcmp(buf, "root")) |
Waiman Long | bb5b553 | 2018-11-08 10:08:44 -0500 | [diff] [blame] | 2591 | val = PRS_ENABLED; |
Tejun Heo | b1e3aeb | 2018-11-13 12:03:33 -0800 | [diff] [blame] | 2592 | else if (!strcmp(buf, "member")) |
Waiman Long | bb5b553 | 2018-11-08 10:08:44 -0500 | [diff] [blame] | 2593 | val = PRS_DISABLED; |
| 2594 | else |
| 2595 | return -EINVAL; |
| 2596 | |
| 2597 | css_get(&cs->css); |
Sebastian Andrzej Siewior | c5c63b9 | 2021-08-03 16:16:07 +0200 | [diff] [blame] | 2598 | cpus_read_lock(); |
Juri Lelli | 1243dc5 | 2019-07-19 15:59:57 +0200 | [diff] [blame] | 2599 | percpu_down_write(&cpuset_rwsem); |
Waiman Long | bb5b553 | 2018-11-08 10:08:44 -0500 | [diff] [blame] | 2600 | if (!is_cpuset_online(cs)) |
| 2601 | goto out_unlock; |
| 2602 | |
| 2603 | retval = update_prstate(cs, val); |
| 2604 | out_unlock: |
Juri Lelli | 1243dc5 | 2019-07-19 15:59:57 +0200 | [diff] [blame] | 2605 | percpu_up_write(&cpuset_rwsem); |
Sebastian Andrzej Siewior | c5c63b9 | 2021-08-03 16:16:07 +0200 | [diff] [blame] | 2606 | cpus_read_unlock(); |
Waiman Long | bb5b553 | 2018-11-08 10:08:44 -0500 | [diff] [blame] | 2607 | css_put(&cs->css); |
| 2608 | return retval ?: nbytes; |
| 2609 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2610 | |
| 2611 | /* |
| 2612 | * for the common functions, 'private' gives the type of file |
| 2613 | */ |
| 2614 | |
Waiman Long | 4ec22e9 | 2018-11-08 10:08:35 -0500 | [diff] [blame] | 2615 | static struct cftype legacy_files[] = { |
Paul Menage | addf2c7 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 2616 | { |
| 2617 | .name = "cpus", |
Tejun Heo | 2da8ca8 | 2013-12-05 12:28:04 -0500 | [diff] [blame] | 2618 | .seq_show = cpuset_common_seq_show, |
Tejun Heo | 451af50 | 2014-05-13 12:16:21 -0400 | [diff] [blame] | 2619 | .write = cpuset_write_resmask, |
Paul Menage | e371239 | 2008-07-25 01:47:02 -0700 | [diff] [blame] | 2620 | .max_write_len = (100U + 6 * NR_CPUS), |
Paul Menage | addf2c7 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 2621 | .private = FILE_CPULIST, |
| 2622 | }, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2623 | |
Paul Menage | addf2c7 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 2624 | { |
| 2625 | .name = "mems", |
Tejun Heo | 2da8ca8 | 2013-12-05 12:28:04 -0500 | [diff] [blame] | 2626 | .seq_show = cpuset_common_seq_show, |
Tejun Heo | 451af50 | 2014-05-13 12:16:21 -0400 | [diff] [blame] | 2627 | .write = cpuset_write_resmask, |
Paul Menage | e371239 | 2008-07-25 01:47:02 -0700 | [diff] [blame] | 2628 | .max_write_len = (100U + 6 * MAX_NUMNODES), |
Paul Menage | addf2c7 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 2629 | .private = FILE_MEMLIST, |
| 2630 | }, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2631 | |
Paul Menage | addf2c7 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 2632 | { |
Li Zefan | afd1a8b | 2014-07-09 16:49:25 +0800 | [diff] [blame] | 2633 | .name = "effective_cpus", |
| 2634 | .seq_show = cpuset_common_seq_show, |
| 2635 | .private = FILE_EFFECTIVE_CPULIST, |
| 2636 | }, |
| 2637 | |
| 2638 | { |
| 2639 | .name = "effective_mems", |
| 2640 | .seq_show = cpuset_common_seq_show, |
| 2641 | .private = FILE_EFFECTIVE_MEMLIST, |
| 2642 | }, |
| 2643 | |
| 2644 | { |
Paul Menage | addf2c7 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 2645 | .name = "cpu_exclusive", |
| 2646 | .read_u64 = cpuset_read_u64, |
| 2647 | .write_u64 = cpuset_write_u64, |
| 2648 | .private = FILE_CPU_EXCLUSIVE, |
| 2649 | }, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2650 | |
Paul Menage | addf2c7 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 2651 | { |
| 2652 | .name = "mem_exclusive", |
| 2653 | .read_u64 = cpuset_read_u64, |
| 2654 | .write_u64 = cpuset_write_u64, |
| 2655 | .private = FILE_MEM_EXCLUSIVE, |
| 2656 | }, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2657 | |
Paul Menage | addf2c7 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 2658 | { |
Paul Menage | 7860836 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 2659 | .name = "mem_hardwall", |
| 2660 | .read_u64 = cpuset_read_u64, |
| 2661 | .write_u64 = cpuset_write_u64, |
| 2662 | .private = FILE_MEM_HARDWALL, |
| 2663 | }, |
| 2664 | |
| 2665 | { |
Paul Menage | addf2c7 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 2666 | .name = "sched_load_balance", |
| 2667 | .read_u64 = cpuset_read_u64, |
| 2668 | .write_u64 = cpuset_write_u64, |
| 2669 | .private = FILE_SCHED_LOAD_BALANCE, |
| 2670 | }, |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 2671 | |
Paul Menage | addf2c7 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 2672 | { |
| 2673 | .name = "sched_relax_domain_level", |
Paul Menage | 5be7a47 | 2008-05-06 20:42:41 -0700 | [diff] [blame] | 2674 | .read_s64 = cpuset_read_s64, |
| 2675 | .write_s64 = cpuset_write_s64, |
Paul Menage | addf2c7 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 2676 | .private = FILE_SCHED_RELAX_DOMAIN_LEVEL, |
| 2677 | }, |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 2678 | |
Paul Menage | addf2c7 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 2679 | { |
| 2680 | .name = "memory_migrate", |
| 2681 | .read_u64 = cpuset_read_u64, |
| 2682 | .write_u64 = cpuset_write_u64, |
| 2683 | .private = FILE_MEMORY_MIGRATE, |
| 2684 | }, |
| 2685 | |
| 2686 | { |
| 2687 | .name = "memory_pressure", |
| 2688 | .read_u64 = cpuset_read_u64, |
Waiman Long | 1c08c22 | 2017-08-24 12:04:29 -0400 | [diff] [blame] | 2689 | .private = FILE_MEMORY_PRESSURE, |
Paul Menage | addf2c7 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 2690 | }, |
| 2691 | |
| 2692 | { |
| 2693 | .name = "memory_spread_page", |
| 2694 | .read_u64 = cpuset_read_u64, |
| 2695 | .write_u64 = cpuset_write_u64, |
| 2696 | .private = FILE_SPREAD_PAGE, |
| 2697 | }, |
| 2698 | |
| 2699 | { |
| 2700 | .name = "memory_spread_slab", |
| 2701 | .read_u64 = cpuset_read_u64, |
| 2702 | .write_u64 = cpuset_write_u64, |
| 2703 | .private = FILE_SPREAD_SLAB, |
| 2704 | }, |
Tejun Heo | 4baf6e3 | 2012-04-01 12:09:55 -0700 | [diff] [blame] | 2705 | |
| 2706 | { |
| 2707 | .name = "memory_pressure_enabled", |
| 2708 | .flags = CFTYPE_ONLY_ON_ROOT, |
| 2709 | .read_u64 = cpuset_read_u64, |
| 2710 | .write_u64 = cpuset_write_u64, |
| 2711 | .private = FILE_MEMORY_PRESSURE_ENABLED, |
| 2712 | }, |
| 2713 | |
| 2714 | { } /* terminate */ |
Paul Jackson | 45b07ef | 2006-01-08 01:00:56 -0800 | [diff] [blame] | 2715 | }; |
| 2716 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2717 | /* |
Waiman Long | 4ec22e9 | 2018-11-08 10:08:35 -0500 | [diff] [blame] | 2718 | * This is currently a minimal set for the default hierarchy. It can be |
| 2719 | * expanded later on by migrating more features and control files from v1. |
| 2720 | */ |
| 2721 | static struct cftype dfl_files[] = { |
| 2722 | { |
| 2723 | .name = "cpus", |
| 2724 | .seq_show = cpuset_common_seq_show, |
| 2725 | .write = cpuset_write_resmask, |
| 2726 | .max_write_len = (100U + 6 * NR_CPUS), |
| 2727 | .private = FILE_CPULIST, |
| 2728 | .flags = CFTYPE_NOT_ON_ROOT, |
| 2729 | }, |
| 2730 | |
| 2731 | { |
| 2732 | .name = "mems", |
| 2733 | .seq_show = cpuset_common_seq_show, |
| 2734 | .write = cpuset_write_resmask, |
| 2735 | .max_write_len = (100U + 6 * MAX_NUMNODES), |
| 2736 | .private = FILE_MEMLIST, |
| 2737 | .flags = CFTYPE_NOT_ON_ROOT, |
| 2738 | }, |
| 2739 | |
| 2740 | { |
| 2741 | .name = "cpus.effective", |
| 2742 | .seq_show = cpuset_common_seq_show, |
| 2743 | .private = FILE_EFFECTIVE_CPULIST, |
Waiman Long | 4ec22e9 | 2018-11-08 10:08:35 -0500 | [diff] [blame] | 2744 | }, |
| 2745 | |
| 2746 | { |
| 2747 | .name = "mems.effective", |
| 2748 | .seq_show = cpuset_common_seq_show, |
| 2749 | .private = FILE_EFFECTIVE_MEMLIST, |
Waiman Long | 4ec22e9 | 2018-11-08 10:08:35 -0500 | [diff] [blame] | 2750 | }, |
| 2751 | |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 2752 | { |
Tejun Heo | b1e3aeb | 2018-11-13 12:03:33 -0800 | [diff] [blame] | 2753 | .name = "cpus.partition", |
Waiman Long | bb5b553 | 2018-11-08 10:08:44 -0500 | [diff] [blame] | 2754 | .seq_show = sched_partition_show, |
| 2755 | .write = sched_partition_write, |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 2756 | .private = FILE_PARTITION_ROOT, |
| 2757 | .flags = CFTYPE_NOT_ON_ROOT, |
Waiman Long | e7cc988 | 2021-08-10 23:06:02 -0400 | [diff] [blame] | 2758 | .file_offset = offsetof(struct cpuset, partition_file), |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 2759 | }, |
| 2760 | |
Waiman Long | 5cf8114 | 2018-11-08 10:08:46 -0500 | [diff] [blame] | 2761 | { |
| 2762 | .name = "cpus.subpartitions", |
| 2763 | .seq_show = cpuset_common_seq_show, |
| 2764 | .private = FILE_SUBPARTS_CPULIST, |
| 2765 | .flags = CFTYPE_DEBUG, |
| 2766 | }, |
| 2767 | |
Waiman Long | 4ec22e9 | 2018-11-08 10:08:35 -0500 | [diff] [blame] | 2768 | { } /* terminate */ |
| 2769 | }; |
| 2770 | |
| 2771 | |
| 2772 | /* |
Tejun Heo | 92fb974 | 2012-11-19 08:13:38 -0800 | [diff] [blame] | 2773 | * cpuset_css_alloc - allocate a cpuset css |
Li Zefan | c9e5fe6 | 2013-06-14 11:18:27 +0800 | [diff] [blame] | 2774 | * cgrp: control group that the new cpuset will be part of |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2775 | */ |
| 2776 | |
Tejun Heo | eb95419 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 2777 | static struct cgroup_subsys_state * |
| 2778 | cpuset_css_alloc(struct cgroup_subsys_state *parent_css) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2779 | { |
Tejun Heo | c8f699b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2780 | struct cpuset *cs; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2781 | |
Tejun Heo | eb95419 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 2782 | if (!parent_css) |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 2783 | return &top_cpuset.css; |
Tejun Heo | 033fa1c | 2012-11-19 08:13:39 -0800 | [diff] [blame] | 2784 | |
Tejun Heo | c8f699b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2785 | cs = kzalloc(sizeof(*cs), GFP_KERNEL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2786 | if (!cs) |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 2787 | return ERR_PTR(-ENOMEM); |
Waiman Long | bf92370 | 2018-11-08 10:08:37 -0500 | [diff] [blame] | 2788 | |
| 2789 | if (alloc_cpumasks(cs, NULL)) { |
| 2790 | kfree(cs); |
| 2791 | return ERR_PTR(-ENOMEM); |
| 2792 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2793 | |
Waiman Long | ee9707e | 2021-08-11 15:57:07 -0400 | [diff] [blame] | 2794 | __set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); |
Mike Travis | f9a86fc | 2008-04-04 18:11:07 -0700 | [diff] [blame] | 2795 | nodes_clear(cs->mems_allowed); |
Li Zefan | e2b9a3d | 2014-07-09 16:47:03 +0800 | [diff] [blame] | 2796 | nodes_clear(cs->effective_mems); |
Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 2797 | fmeter_init(&cs->fmeter); |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 2798 | cs->relax_domain_level = -1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2799 | |
Waiman Long | ee9707e | 2021-08-11 15:57:07 -0400 | [diff] [blame] | 2800 | /* Set CS_MEMORY_MIGRATE for default hierarchy */ |
| 2801 | if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) |
| 2802 | __set_bit(CS_MEMORY_MIGRATE, &cs->flags); |
| 2803 | |
Tejun Heo | c8f699b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2804 | return &cs->css; |
| 2805 | } |
| 2806 | |
Tejun Heo | eb95419 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 2807 | static int cpuset_css_online(struct cgroup_subsys_state *css) |
Tejun Heo | c8f699b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2808 | { |
Tejun Heo | eb95419 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 2809 | struct cpuset *cs = css_cs(css); |
Tejun Heo | c431069 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2810 | struct cpuset *parent = parent_cs(cs); |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2811 | struct cpuset *tmp_cs; |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 2812 | struct cgroup_subsys_state *pos_css; |
Tejun Heo | c8f699b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2813 | |
| 2814 | if (!parent) |
| 2815 | return 0; |
| 2816 | |
Sebastian Andrzej Siewior | c5c63b9 | 2021-08-03 16:16:07 +0200 | [diff] [blame] | 2817 | cpus_read_lock(); |
Juri Lelli | 1243dc5 | 2019-07-19 15:59:57 +0200 | [diff] [blame] | 2818 | percpu_down_write(&cpuset_rwsem); |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2819 | |
Tejun Heo | efeb77b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2820 | set_bit(CS_ONLINE, &cs->flags); |
Tejun Heo | c8f699b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2821 | if (is_spread_page(parent)) |
| 2822 | set_bit(CS_SPREAD_PAGE, &cs->flags); |
| 2823 | if (is_spread_slab(parent)) |
| 2824 | set_bit(CS_SPREAD_SLAB, &cs->flags); |
| 2825 | |
Mel Gorman | 664eedd | 2014-06-04 16:10:08 -0700 | [diff] [blame] | 2826 | cpuset_inc(); |
Tejun Heo | 033fa1c | 2012-11-19 08:13:39 -0800 | [diff] [blame] | 2827 | |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 2828 | spin_lock_irq(&callback_lock); |
Waiman Long | b8d1b8e | 2017-08-17 15:33:10 -0400 | [diff] [blame] | 2829 | if (is_in_v2_mode()) { |
Li Zefan | e2b9a3d | 2014-07-09 16:47:03 +0800 | [diff] [blame] | 2830 | cpumask_copy(cs->effective_cpus, parent->effective_cpus); |
| 2831 | cs->effective_mems = parent->effective_mems; |
Waiman Long | 4716909 | 2018-11-08 10:08:40 -0500 | [diff] [blame] | 2832 | cs->use_parent_ecpus = true; |
| 2833 | parent->child_ecpus_count++; |
Li Zefan | e2b9a3d | 2014-07-09 16:47:03 +0800 | [diff] [blame] | 2834 | } |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 2835 | spin_unlock_irq(&callback_lock); |
Li Zefan | e2b9a3d | 2014-07-09 16:47:03 +0800 | [diff] [blame] | 2836 | |
Tejun Heo | eb95419 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 2837 | if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags)) |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2838 | goto out_unlock; |
Tejun Heo | 033fa1c | 2012-11-19 08:13:39 -0800 | [diff] [blame] | 2839 | |
| 2840 | /* |
| 2841 | * Clone @parent's configuration if CGRP_CPUSET_CLONE_CHILDREN is |
| 2842 | * set. This flag handling is implemented in cgroup core for |
| 2843 | * histrical reasons - the flag may be specified during mount. |
| 2844 | * |
| 2845 | * Currently, if any sibling cpusets have exclusive cpus or mem, we |
| 2846 | * refuse to clone the configuration - thereby refusing the task to |
| 2847 | * be entered, and as a result refusing the sys_unshare() or |
| 2848 | * clone() which initiated it. If this becomes a problem for some |
| 2849 | * users who wish to allow that scenario, then this could be |
| 2850 | * changed to grant parent->cpus_allowed-sibling_cpus_exclusive |
| 2851 | * (and likewise for mems) to the new cgroup. |
| 2852 | */ |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2853 | rcu_read_lock(); |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 2854 | cpuset_for_each_child(tmp_cs, pos_css, parent) { |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2855 | if (is_mem_exclusive(tmp_cs) || is_cpu_exclusive(tmp_cs)) { |
| 2856 | rcu_read_unlock(); |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2857 | goto out_unlock; |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2858 | } |
Tejun Heo | 033fa1c | 2012-11-19 08:13:39 -0800 | [diff] [blame] | 2859 | } |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2860 | rcu_read_unlock(); |
Tejun Heo | 033fa1c | 2012-11-19 08:13:39 -0800 | [diff] [blame] | 2861 | |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 2862 | spin_lock_irq(&callback_lock); |
Tejun Heo | 033fa1c | 2012-11-19 08:13:39 -0800 | [diff] [blame] | 2863 | cs->mems_allowed = parent->mems_allowed; |
Zefan Li | 790317e | 2015-02-13 11:19:49 +0800 | [diff] [blame] | 2864 | cs->effective_mems = parent->mems_allowed; |
Tejun Heo | 033fa1c | 2012-11-19 08:13:39 -0800 | [diff] [blame] | 2865 | cpumask_copy(cs->cpus_allowed, parent->cpus_allowed); |
Zefan Li | 790317e | 2015-02-13 11:19:49 +0800 | [diff] [blame] | 2866 | cpumask_copy(cs->effective_cpus, parent->cpus_allowed); |
Dan Carpenter | cea7446 | 2014-10-27 16:27:02 +0300 | [diff] [blame] | 2867 | spin_unlock_irq(&callback_lock); |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2868 | out_unlock: |
Juri Lelli | 1243dc5 | 2019-07-19 15:59:57 +0200 | [diff] [blame] | 2869 | percpu_up_write(&cpuset_rwsem); |
Sebastian Andrzej Siewior | c5c63b9 | 2021-08-03 16:16:07 +0200 | [diff] [blame] | 2870 | cpus_read_unlock(); |
Tejun Heo | c8f699b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2871 | return 0; |
| 2872 | } |
| 2873 | |
Zhao Hongjiang | 0b9e696 | 2013-07-27 11:56:53 +0800 | [diff] [blame] | 2874 | /* |
| 2875 | * If the cpuset being removed has its flag 'sched_load_balance' |
| 2876 | * enabled, then simulate turning sched_load_balance off, which |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 2877 | * will call rebuild_sched_domains_locked(). That is not needed |
| 2878 | * in the default hierarchy where only changes in partition |
| 2879 | * will cause repartitioning. |
| 2880 | * |
| 2881 | * If the cpuset has the 'sched.partition' flag enabled, simulate |
| 2882 | * turning 'sched.partition" off. |
Zhao Hongjiang | 0b9e696 | 2013-07-27 11:56:53 +0800 | [diff] [blame] | 2883 | */ |
| 2884 | |
Tejun Heo | eb95419 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 2885 | static void cpuset_css_offline(struct cgroup_subsys_state *css) |
Tejun Heo | c8f699b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2886 | { |
Tejun Heo | eb95419 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 2887 | struct cpuset *cs = css_cs(css); |
Tejun Heo | c8f699b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2888 | |
Sebastian Andrzej Siewior | c5c63b9 | 2021-08-03 16:16:07 +0200 | [diff] [blame] | 2889 | cpus_read_lock(); |
Juri Lelli | 1243dc5 | 2019-07-19 15:59:57 +0200 | [diff] [blame] | 2890 | percpu_down_write(&cpuset_rwsem); |
Tejun Heo | c8f699b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2891 | |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 2892 | if (is_partition_root(cs)) |
| 2893 | update_prstate(cs, 0); |
| 2894 | |
| 2895 | if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) && |
| 2896 | is_sched_load_balance(cs)) |
Tejun Heo | c8f699b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2897 | update_flag(CS_SCHED_LOAD_BALANCE, cs, 0); |
| 2898 | |
Waiman Long | 4716909 | 2018-11-08 10:08:40 -0500 | [diff] [blame] | 2899 | if (cs->use_parent_ecpus) { |
| 2900 | struct cpuset *parent = parent_cs(cs); |
| 2901 | |
| 2902 | cs->use_parent_ecpus = false; |
| 2903 | parent->child_ecpus_count--; |
| 2904 | } |
| 2905 | |
Mel Gorman | 664eedd | 2014-06-04 16:10:08 -0700 | [diff] [blame] | 2906 | cpuset_dec(); |
Tejun Heo | efeb77b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2907 | clear_bit(CS_ONLINE, &cs->flags); |
Tejun Heo | c8f699b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2908 | |
Juri Lelli | 1243dc5 | 2019-07-19 15:59:57 +0200 | [diff] [blame] | 2909 | percpu_up_write(&cpuset_rwsem); |
Sebastian Andrzej Siewior | c5c63b9 | 2021-08-03 16:16:07 +0200 | [diff] [blame] | 2910 | cpus_read_unlock(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2911 | } |
| 2912 | |
Tejun Heo | eb95419 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 2913 | static void cpuset_css_free(struct cgroup_subsys_state *css) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2914 | { |
Tejun Heo | eb95419 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 2915 | struct cpuset *cs = css_cs(css); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2916 | |
Waiman Long | bf92370 | 2018-11-08 10:08:37 -0500 | [diff] [blame] | 2917 | free_cpuset(cs); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2918 | } |
| 2919 | |
Li Zefan | 39bd0d1 | 2014-07-09 16:48:01 +0800 | [diff] [blame] | 2920 | static void cpuset_bind(struct cgroup_subsys_state *root_css) |
| 2921 | { |
Juri Lelli | 1243dc5 | 2019-07-19 15:59:57 +0200 | [diff] [blame] | 2922 | percpu_down_write(&cpuset_rwsem); |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 2923 | spin_lock_irq(&callback_lock); |
Li Zefan | 39bd0d1 | 2014-07-09 16:48:01 +0800 | [diff] [blame] | 2924 | |
Waiman Long | b8d1b8e | 2017-08-17 15:33:10 -0400 | [diff] [blame] | 2925 | if (is_in_v2_mode()) { |
Li Zefan | 39bd0d1 | 2014-07-09 16:48:01 +0800 | [diff] [blame] | 2926 | cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask); |
| 2927 | top_cpuset.mems_allowed = node_possible_map; |
| 2928 | } else { |
| 2929 | cpumask_copy(top_cpuset.cpus_allowed, |
| 2930 | top_cpuset.effective_cpus); |
| 2931 | top_cpuset.mems_allowed = top_cpuset.effective_mems; |
| 2932 | } |
| 2933 | |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 2934 | spin_unlock_irq(&callback_lock); |
Juri Lelli | 1243dc5 | 2019-07-19 15:59:57 +0200 | [diff] [blame] | 2935 | percpu_up_write(&cpuset_rwsem); |
Li Zefan | 39bd0d1 | 2014-07-09 16:48:01 +0800 | [diff] [blame] | 2936 | } |
| 2937 | |
Zefan Li | 06f4e94 | 2016-08-09 11:25:01 +0800 | [diff] [blame] | 2938 | /* |
| 2939 | * Make sure the new task conform to the current state of its parent, |
| 2940 | * which could have been changed by cpuset just after it inherits the |
| 2941 | * state from the parent and before it sits on the cgroup's task list. |
| 2942 | */ |
Wei Yongjun | 8a15b81 | 2016-09-16 13:02:37 +0000 | [diff] [blame] | 2943 | static void cpuset_fork(struct task_struct *task) |
Zefan Li | 06f4e94 | 2016-08-09 11:25:01 +0800 | [diff] [blame] | 2944 | { |
| 2945 | if (task_css_is_root(task, cpuset_cgrp_id)) |
| 2946 | return; |
| 2947 | |
Sebastian Andrzej Siewior | 3bd3706 | 2019-04-23 16:26:36 +0200 | [diff] [blame] | 2948 | set_cpus_allowed_ptr(task, current->cpus_ptr); |
Zefan Li | 06f4e94 | 2016-08-09 11:25:01 +0800 | [diff] [blame] | 2949 | task->mems_allowed = current->mems_allowed; |
| 2950 | } |
| 2951 | |
Tejun Heo | 073219e | 2014-02-08 10:36:58 -0500 | [diff] [blame] | 2952 | struct cgroup_subsys cpuset_cgrp_subsys = { |
Li Zefan | 39bd0d1 | 2014-07-09 16:48:01 +0800 | [diff] [blame] | 2953 | .css_alloc = cpuset_css_alloc, |
| 2954 | .css_online = cpuset_css_online, |
| 2955 | .css_offline = cpuset_css_offline, |
| 2956 | .css_free = cpuset_css_free, |
| 2957 | .can_attach = cpuset_can_attach, |
| 2958 | .cancel_attach = cpuset_cancel_attach, |
| 2959 | .attach = cpuset_attach, |
Tejun Heo | 5cf1cac | 2016-04-21 19:06:48 -0400 | [diff] [blame] | 2960 | .post_attach = cpuset_post_attach, |
Li Zefan | 39bd0d1 | 2014-07-09 16:48:01 +0800 | [diff] [blame] | 2961 | .bind = cpuset_bind, |
Zefan Li | 06f4e94 | 2016-08-09 11:25:01 +0800 | [diff] [blame] | 2962 | .fork = cpuset_fork, |
Waiman Long | 4ec22e9 | 2018-11-08 10:08:35 -0500 | [diff] [blame] | 2963 | .legacy_cftypes = legacy_files, |
| 2964 | .dfl_cftypes = dfl_files, |
Tejun Heo | b38e42e | 2016-02-23 10:00:50 -0500 | [diff] [blame] | 2965 | .early_init = true, |
Waiman Long | 4ec22e9 | 2018-11-08 10:08:35 -0500 | [diff] [blame] | 2966 | .threaded = true, |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 2967 | }; |
| 2968 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2969 | /** |
| 2970 | * cpuset_init - initialize cpusets at system boot |
| 2971 | * |
Al Viro | d5f68d3 | 2019-05-13 12:33:22 -0400 | [diff] [blame] | 2972 | * Description: Initialize top_cpuset |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2973 | **/ |
| 2974 | |
| 2975 | int __init cpuset_init(void) |
| 2976 | { |
Juri Lelli | 1243dc5 | 2019-07-19 15:59:57 +0200 | [diff] [blame] | 2977 | BUG_ON(percpu_init_rwsem(&cpuset_rwsem)); |
| 2978 | |
Nicholas Mc Guire | 75fa8e5 | 2017-03-26 18:24:06 +0200 | [diff] [blame] | 2979 | BUG_ON(!alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL)); |
| 2980 | BUG_ON(!alloc_cpumask_var(&top_cpuset.effective_cpus, GFP_KERNEL)); |
Waiman Long | bf92370 | 2018-11-08 10:08:37 -0500 | [diff] [blame] | 2981 | BUG_ON(!zalloc_cpumask_var(&top_cpuset.subparts_cpus, GFP_KERNEL)); |
Miao Xie | 58568d2 | 2009-06-16 15:31:49 -0700 | [diff] [blame] | 2982 | |
Li Zefan | 300ed6c | 2009-01-07 18:08:44 -0800 | [diff] [blame] | 2983 | cpumask_setall(top_cpuset.cpus_allowed); |
Mike Travis | f9a86fc | 2008-04-04 18:11:07 -0700 | [diff] [blame] | 2984 | nodes_setall(top_cpuset.mems_allowed); |
Li Zefan | e2b9a3d | 2014-07-09 16:47:03 +0800 | [diff] [blame] | 2985 | cpumask_setall(top_cpuset.effective_cpus); |
| 2986 | nodes_setall(top_cpuset.effective_mems); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2987 | |
Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 2988 | fmeter_init(&top_cpuset.fmeter); |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 2989 | set_bit(CS_SCHED_LOAD_BALANCE, &top_cpuset.flags); |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 2990 | top_cpuset.relax_domain_level = -1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2991 | |
Nicholas Mc Guire | 75fa8e5 | 2017-03-26 18:24:06 +0200 | [diff] [blame] | 2992 | BUG_ON(!alloc_cpumask_var(&cpus_attach, GFP_KERNEL)); |
Li Zefan | 2341d1b | 2009-01-07 18:08:42 -0800 | [diff] [blame] | 2993 | |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 2994 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2995 | } |
| 2996 | |
Paul Jackson | b1aac8b | 2006-09-29 02:01:17 -0700 | [diff] [blame] | 2997 | /* |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 2998 | * If CPU and/or memory hotplug handlers, below, unplug any CPUs |
Paul Jackson | b1aac8b | 2006-09-29 02:01:17 -0700 | [diff] [blame] | 2999 | * or memory nodes, we need to walk over the cpuset hierarchy, |
| 3000 | * removing that CPU or node from all cpusets. If this removes the |
Cliff Wickman | 956db3c | 2008-02-07 00:14:43 -0800 | [diff] [blame] | 3001 | * last CPU or node from a cpuset, then move the tasks in the empty |
| 3002 | * cpuset to its next-highest non-empty parent. |
Paul Jackson | b1aac8b | 2006-09-29 02:01:17 -0700 | [diff] [blame] | 3003 | */ |
Cliff Wickman | 956db3c | 2008-02-07 00:14:43 -0800 | [diff] [blame] | 3004 | static void remove_tasks_in_empty_cpuset(struct cpuset *cs) |
Paul Jackson | b1aac8b | 2006-09-29 02:01:17 -0700 | [diff] [blame] | 3005 | { |
Cliff Wickman | 956db3c | 2008-02-07 00:14:43 -0800 | [diff] [blame] | 3006 | struct cpuset *parent; |
Paul Jackson | b1aac8b | 2006-09-29 02:01:17 -0700 | [diff] [blame] | 3007 | |
Paul Jackson | c8d9c90 | 2008-02-07 00:14:46 -0800 | [diff] [blame] | 3008 | /* |
Cliff Wickman | 956db3c | 2008-02-07 00:14:43 -0800 | [diff] [blame] | 3009 | * Find its next-highest non-empty parent, (top cpuset |
| 3010 | * has online cpus, so can't be empty). |
| 3011 | */ |
Tejun Heo | c431069 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 3012 | parent = parent_cs(cs); |
Li Zefan | 300ed6c | 2009-01-07 18:08:44 -0800 | [diff] [blame] | 3013 | while (cpumask_empty(parent->cpus_allowed) || |
Paul Jackson | b450129 | 2008-02-07 00:14:47 -0800 | [diff] [blame] | 3014 | nodes_empty(parent->mems_allowed)) |
Tejun Heo | c431069 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 3015 | parent = parent_cs(parent); |
Cliff Wickman | 956db3c | 2008-02-07 00:14:43 -0800 | [diff] [blame] | 3016 | |
Tejun Heo | 8cc9934 | 2013-04-07 09:29:50 -0700 | [diff] [blame] | 3017 | if (cgroup_transfer_tasks(parent->css.cgroup, cs->css.cgroup)) { |
Fabian Frederick | 12d3089 | 2014-05-05 19:49:00 +0200 | [diff] [blame] | 3018 | pr_err("cpuset: failed to transfer tasks out of empty cpuset "); |
Tejun Heo | e61734c | 2014-02-12 09:29:50 -0500 | [diff] [blame] | 3019 | pr_cont_cgroup_name(cs->css.cgroup); |
| 3020 | pr_cont("\n"); |
Tejun Heo | 8cc9934 | 2013-04-07 09:29:50 -0700 | [diff] [blame] | 3021 | } |
Cliff Wickman | 956db3c | 2008-02-07 00:14:43 -0800 | [diff] [blame] | 3022 | } |
| 3023 | |
Li Zefan | be4c9dd | 2014-07-09 16:49:04 +0800 | [diff] [blame] | 3024 | static void |
| 3025 | hotplug_update_tasks_legacy(struct cpuset *cs, |
| 3026 | struct cpumask *new_cpus, nodemask_t *new_mems, |
| 3027 | bool cpus_updated, bool mems_updated) |
Li Zefan | 390a36a | 2014-07-09 16:48:54 +0800 | [diff] [blame] | 3028 | { |
| 3029 | bool is_empty; |
| 3030 | |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 3031 | spin_lock_irq(&callback_lock); |
Li Zefan | be4c9dd | 2014-07-09 16:49:04 +0800 | [diff] [blame] | 3032 | cpumask_copy(cs->cpus_allowed, new_cpus); |
| 3033 | cpumask_copy(cs->effective_cpus, new_cpus); |
| 3034 | cs->mems_allowed = *new_mems; |
| 3035 | cs->effective_mems = *new_mems; |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 3036 | spin_unlock_irq(&callback_lock); |
Li Zefan | 390a36a | 2014-07-09 16:48:54 +0800 | [diff] [blame] | 3037 | |
| 3038 | /* |
| 3039 | * Don't call update_tasks_cpumask() if the cpuset becomes empty, |
| 3040 | * as the tasks will be migratecd to an ancestor. |
| 3041 | */ |
Li Zefan | be4c9dd | 2014-07-09 16:49:04 +0800 | [diff] [blame] | 3042 | if (cpus_updated && !cpumask_empty(cs->cpus_allowed)) |
Li Zefan | 390a36a | 2014-07-09 16:48:54 +0800 | [diff] [blame] | 3043 | update_tasks_cpumask(cs); |
Li Zefan | be4c9dd | 2014-07-09 16:49:04 +0800 | [diff] [blame] | 3044 | if (mems_updated && !nodes_empty(cs->mems_allowed)) |
Li Zefan | 390a36a | 2014-07-09 16:48:54 +0800 | [diff] [blame] | 3045 | update_tasks_nodemask(cs); |
| 3046 | |
| 3047 | is_empty = cpumask_empty(cs->cpus_allowed) || |
| 3048 | nodes_empty(cs->mems_allowed); |
| 3049 | |
Juri Lelli | 1243dc5 | 2019-07-19 15:59:57 +0200 | [diff] [blame] | 3050 | percpu_up_write(&cpuset_rwsem); |
Li Zefan | 390a36a | 2014-07-09 16:48:54 +0800 | [diff] [blame] | 3051 | |
| 3052 | /* |
| 3053 | * Move tasks to the nearest ancestor with execution resources, |
| 3054 | * This is full cgroup operation which will also call back into |
| 3055 | * cpuset. Should be done outside any lock. |
| 3056 | */ |
| 3057 | if (is_empty) |
| 3058 | remove_tasks_in_empty_cpuset(cs); |
| 3059 | |
Juri Lelli | 1243dc5 | 2019-07-19 15:59:57 +0200 | [diff] [blame] | 3060 | percpu_down_write(&cpuset_rwsem); |
Li Zefan | 390a36a | 2014-07-09 16:48:54 +0800 | [diff] [blame] | 3061 | } |
| 3062 | |
Li Zefan | be4c9dd | 2014-07-09 16:49:04 +0800 | [diff] [blame] | 3063 | static void |
| 3064 | hotplug_update_tasks(struct cpuset *cs, |
| 3065 | struct cpumask *new_cpus, nodemask_t *new_mems, |
| 3066 | bool cpus_updated, bool mems_updated) |
Li Zefan | 390a36a | 2014-07-09 16:48:54 +0800 | [diff] [blame] | 3067 | { |
Li Zefan | be4c9dd | 2014-07-09 16:49:04 +0800 | [diff] [blame] | 3068 | if (cpumask_empty(new_cpus)) |
| 3069 | cpumask_copy(new_cpus, parent_cs(cs)->effective_cpus); |
| 3070 | if (nodes_empty(*new_mems)) |
| 3071 | *new_mems = parent_cs(cs)->effective_mems; |
Li Zefan | 390a36a | 2014-07-09 16:48:54 +0800 | [diff] [blame] | 3072 | |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 3073 | spin_lock_irq(&callback_lock); |
Li Zefan | be4c9dd | 2014-07-09 16:49:04 +0800 | [diff] [blame] | 3074 | cpumask_copy(cs->effective_cpus, new_cpus); |
| 3075 | cs->effective_mems = *new_mems; |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 3076 | spin_unlock_irq(&callback_lock); |
Li Zefan | 390a36a | 2014-07-09 16:48:54 +0800 | [diff] [blame] | 3077 | |
Li Zefan | be4c9dd | 2014-07-09 16:49:04 +0800 | [diff] [blame] | 3078 | if (cpus_updated) |
Li Zefan | 390a36a | 2014-07-09 16:48:54 +0800 | [diff] [blame] | 3079 | update_tasks_cpumask(cs); |
Li Zefan | be4c9dd | 2014-07-09 16:49:04 +0800 | [diff] [blame] | 3080 | if (mems_updated) |
Li Zefan | 390a36a | 2014-07-09 16:48:54 +0800 | [diff] [blame] | 3081 | update_tasks_nodemask(cs); |
| 3082 | } |
| 3083 | |
Waiman Long | 4b842da | 2018-11-08 10:08:41 -0500 | [diff] [blame] | 3084 | static bool force_rebuild; |
| 3085 | |
| 3086 | void cpuset_force_rebuild(void) |
| 3087 | { |
| 3088 | force_rebuild = true; |
| 3089 | } |
| 3090 | |
Tejun Heo | deb7aa3 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 3091 | /** |
Li Zefan | 388afd8 | 2013-06-09 17:14:47 +0800 | [diff] [blame] | 3092 | * cpuset_hotplug_update_tasks - update tasks in a cpuset for hotunplug |
Tejun Heo | deb7aa3 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 3093 | * @cs: cpuset in interest |
Waiman Long | 4b842da | 2018-11-08 10:08:41 -0500 | [diff] [blame] | 3094 | * @tmp: the tmpmasks structure pointer |
Cliff Wickman | 956db3c | 2008-02-07 00:14:43 -0800 | [diff] [blame] | 3095 | * |
Tejun Heo | deb7aa3 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 3096 | * Compare @cs's cpu and mem masks against top_cpuset and if some have gone |
| 3097 | * offline, update @cs accordingly. If @cs ends up with no CPU or memory, |
| 3098 | * all its tasks are moved to the nearest ancestor with both resources. |
Paul Jackson | b1aac8b | 2006-09-29 02:01:17 -0700 | [diff] [blame] | 3099 | */ |
Waiman Long | 4b842da | 2018-11-08 10:08:41 -0500 | [diff] [blame] | 3100 | static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp) |
Paul Jackson | b1aac8b | 2006-09-29 02:01:17 -0700 | [diff] [blame] | 3101 | { |
Li Zefan | be4c9dd | 2014-07-09 16:49:04 +0800 | [diff] [blame] | 3102 | static cpumask_t new_cpus; |
| 3103 | static nodemask_t new_mems; |
| 3104 | bool cpus_updated; |
| 3105 | bool mems_updated; |
Waiman Long | 4b842da | 2018-11-08 10:08:41 -0500 | [diff] [blame] | 3106 | struct cpuset *parent; |
Li Zefan | e44193d | 2013-06-09 17:14:22 +0800 | [diff] [blame] | 3107 | retry: |
| 3108 | wait_event(cpuset_attach_wq, cs->attach_in_progress == 0); |
Paul Jackson | b1aac8b | 2006-09-29 02:01:17 -0700 | [diff] [blame] | 3109 | |
Juri Lelli | 1243dc5 | 2019-07-19 15:59:57 +0200 | [diff] [blame] | 3110 | percpu_down_write(&cpuset_rwsem); |
Paul Jackson | b1aac8b | 2006-09-29 02:01:17 -0700 | [diff] [blame] | 3111 | |
Li Zefan | e44193d | 2013-06-09 17:14:22 +0800 | [diff] [blame] | 3112 | /* |
| 3113 | * We have raced with task attaching. We wait until attaching |
| 3114 | * is finished, so we won't attach a task to an empty cpuset. |
| 3115 | */ |
| 3116 | if (cs->attach_in_progress) { |
Juri Lelli | 1243dc5 | 2019-07-19 15:59:57 +0200 | [diff] [blame] | 3117 | percpu_up_write(&cpuset_rwsem); |
Li Zefan | e44193d | 2013-06-09 17:14:22 +0800 | [diff] [blame] | 3118 | goto retry; |
| 3119 | } |
| 3120 | |
Waiman Long | 0f3adb8 | 2021-07-20 10:18:26 -0400 | [diff] [blame] | 3121 | parent = parent_cs(cs); |
Waiman Long | 4b842da | 2018-11-08 10:08:41 -0500 | [diff] [blame] | 3122 | compute_effective_cpumask(&new_cpus, cs, parent); |
| 3123 | nodes_and(new_mems, cs->mems_allowed, parent->effective_mems); |
Paul Jackson | b450129 | 2008-02-07 00:14:47 -0800 | [diff] [blame] | 3124 | |
Waiman Long | 4b842da | 2018-11-08 10:08:41 -0500 | [diff] [blame] | 3125 | if (cs->nr_subparts_cpus) |
| 3126 | /* |
| 3127 | * Make sure that CPUs allocated to child partitions |
| 3128 | * do not show up in effective_cpus. |
| 3129 | */ |
| 3130 | cpumask_andnot(&new_cpus, &new_cpus, cs->subparts_cpus); |
| 3131 | |
| 3132 | if (!tmp || !cs->partition_root_state) |
| 3133 | goto update_tasks; |
| 3134 | |
| 3135 | /* |
| 3136 | * In the unlikely event that a partition root has empty |
| 3137 | * effective_cpus or its parent becomes erroneous, we have to |
| 3138 | * transition it to the erroneous state. |
| 3139 | */ |
| 3140 | if (is_partition_root(cs) && (cpumask_empty(&new_cpus) || |
| 3141 | (parent->partition_root_state == PRS_ERROR))) { |
| 3142 | if (cs->nr_subparts_cpus) { |
Waiman Long | 6ba34d3 | 2021-07-20 10:18:28 -0400 | [diff] [blame] | 3143 | spin_lock_irq(&callback_lock); |
Waiman Long | 4b842da | 2018-11-08 10:08:41 -0500 | [diff] [blame] | 3144 | cs->nr_subparts_cpus = 0; |
| 3145 | cpumask_clear(cs->subparts_cpus); |
Waiman Long | 6ba34d3 | 2021-07-20 10:18:28 -0400 | [diff] [blame] | 3146 | spin_unlock_irq(&callback_lock); |
Waiman Long | 4b842da | 2018-11-08 10:08:41 -0500 | [diff] [blame] | 3147 | compute_effective_cpumask(&new_cpus, cs, parent); |
| 3148 | } |
| 3149 | |
| 3150 | /* |
| 3151 | * If the effective_cpus is empty because the child |
| 3152 | * partitions take away all the CPUs, we can keep |
| 3153 | * the current partition and let the child partitions |
| 3154 | * fight for available CPUs. |
| 3155 | */ |
| 3156 | if ((parent->partition_root_state == PRS_ERROR) || |
| 3157 | cpumask_empty(&new_cpus)) { |
Waiman Long | e7cc988 | 2021-08-10 23:06:02 -0400 | [diff] [blame] | 3158 | int old_prs; |
| 3159 | |
Waiman Long | 4b842da | 2018-11-08 10:08:41 -0500 | [diff] [blame] | 3160 | update_parent_subparts_cpumask(cs, partcmd_disable, |
| 3161 | NULL, tmp); |
Waiman Long | e7cc988 | 2021-08-10 23:06:02 -0400 | [diff] [blame] | 3162 | old_prs = cs->partition_root_state; |
| 3163 | if (old_prs != PRS_ERROR) { |
| 3164 | spin_lock_irq(&callback_lock); |
| 3165 | cs->partition_root_state = PRS_ERROR; |
| 3166 | spin_unlock_irq(&callback_lock); |
| 3167 | notify_partition_change(cs, old_prs, PRS_ERROR); |
| 3168 | } |
Waiman Long | 4b842da | 2018-11-08 10:08:41 -0500 | [diff] [blame] | 3169 | } |
| 3170 | cpuset_force_rebuild(); |
| 3171 | } |
| 3172 | |
| 3173 | /* |
| 3174 | * On the other hand, an erroneous partition root may be transitioned |
| 3175 | * back to a regular one or a partition root with no CPU allocated |
| 3176 | * from the parent may change to erroneous. |
| 3177 | */ |
| 3178 | if (is_partition_root(parent) && |
| 3179 | ((cs->partition_root_state == PRS_ERROR) || |
| 3180 | !cpumask_intersects(&new_cpus, parent->subparts_cpus)) && |
| 3181 | update_parent_subparts_cpumask(cs, partcmd_update, NULL, tmp)) |
| 3182 | cpuset_force_rebuild(); |
| 3183 | |
| 3184 | update_tasks: |
Li Zefan | be4c9dd | 2014-07-09 16:49:04 +0800 | [diff] [blame] | 3185 | cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus); |
| 3186 | mems_updated = !nodes_equal(new_mems, cs->effective_mems); |
Cliff Wickman | 956db3c | 2008-02-07 00:14:43 -0800 | [diff] [blame] | 3187 | |
Feng Tang | 8ca1b5a | 2021-11-05 13:40:34 -0700 | [diff] [blame] | 3188 | if (mems_updated) |
| 3189 | check_insane_mems_config(&new_mems); |
| 3190 | |
Waiman Long | b8d1b8e | 2017-08-17 15:33:10 -0400 | [diff] [blame] | 3191 | if (is_in_v2_mode()) |
Li Zefan | be4c9dd | 2014-07-09 16:49:04 +0800 | [diff] [blame] | 3192 | hotplug_update_tasks(cs, &new_cpus, &new_mems, |
| 3193 | cpus_updated, mems_updated); |
Li Zefan | 390a36a | 2014-07-09 16:48:54 +0800 | [diff] [blame] | 3194 | else |
Li Zefan | be4c9dd | 2014-07-09 16:49:04 +0800 | [diff] [blame] | 3195 | hotplug_update_tasks_legacy(cs, &new_cpus, &new_mems, |
| 3196 | cpus_updated, mems_updated); |
Tejun Heo | 8d03394 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 3197 | |
Juri Lelli | 1243dc5 | 2019-07-19 15:59:57 +0200 | [diff] [blame] | 3198 | percpu_up_write(&cpuset_rwsem); |
Tejun Heo | deb7aa3 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 3199 | } |
Paul Jackson | b450129 | 2008-02-07 00:14:47 -0800 | [diff] [blame] | 3200 | |
Tejun Heo | deb7aa3 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 3201 | /** |
Tejun Heo | 2b729fe | 2020-04-03 11:32:13 -0400 | [diff] [blame] | 3202 | * cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset |
Tejun Heo | deb7aa3 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 3203 | * |
| 3204 | * This function is called after either CPU or memory configuration has |
| 3205 | * changed and updates cpuset accordingly. The top_cpuset is always |
| 3206 | * synchronized to cpu_active_mask and N_MEMORY, which is necessary in |
| 3207 | * order to make cpusets transparent (of no affect) on systems that are |
| 3208 | * actively using CPU hotplug but making no active use of cpusets. |
| 3209 | * |
| 3210 | * Non-root cpusets are only affected by offlining. If any CPUs or memory |
Li Zefan | 388afd8 | 2013-06-09 17:14:47 +0800 | [diff] [blame] | 3211 | * nodes have been taken down, cpuset_hotplug_update_tasks() is invoked on |
| 3212 | * all descendants. |
Tejun Heo | deb7aa3 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 3213 | * |
| 3214 | * Note that CPU offlining during suspend is ignored. We don't modify |
| 3215 | * cpusets across suspend/resume cycles at all. |
| 3216 | */ |
Tejun Heo | 2b729fe | 2020-04-03 11:32:13 -0400 | [diff] [blame] | 3217 | static void cpuset_hotplug_workfn(struct work_struct *work) |
Tejun Heo | deb7aa3 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 3218 | { |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 3219 | static cpumask_t new_cpus; |
| 3220 | static nodemask_t new_mems; |
Tejun Heo | deb7aa3 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 3221 | bool cpus_updated, mems_updated; |
Waiman Long | b8d1b8e | 2017-08-17 15:33:10 -0400 | [diff] [blame] | 3222 | bool on_dfl = is_in_v2_mode(); |
Waiman Long | 4b842da | 2018-11-08 10:08:41 -0500 | [diff] [blame] | 3223 | struct tmpmasks tmp, *ptmp = NULL; |
| 3224 | |
| 3225 | if (on_dfl && !alloc_cpumasks(NULL, &tmp)) |
| 3226 | ptmp = &tmp; |
Cliff Wickman | 956db3c | 2008-02-07 00:14:43 -0800 | [diff] [blame] | 3227 | |
Juri Lelli | 1243dc5 | 2019-07-19 15:59:57 +0200 | [diff] [blame] | 3228 | percpu_down_write(&cpuset_rwsem); |
Cliff Wickman | 956db3c | 2008-02-07 00:14:43 -0800 | [diff] [blame] | 3229 | |
Tejun Heo | deb7aa3 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 3230 | /* fetch the available cpus/mems and find out which changed how */ |
| 3231 | cpumask_copy(&new_cpus, cpu_active_mask); |
| 3232 | new_mems = node_states[N_MEMORY]; |
Cliff Wickman | 956db3c | 2008-02-07 00:14:43 -0800 | [diff] [blame] | 3233 | |
Waiman Long | 4b842da | 2018-11-08 10:08:41 -0500 | [diff] [blame] | 3234 | /* |
| 3235 | * If subparts_cpus is populated, it is likely that the check below |
| 3236 | * will produce a false positive on cpus_updated when the cpu list |
| 3237 | * isn't changed. It is extra work, but it is better to be safe. |
| 3238 | */ |
Li Zefan | 7e88291 | 2014-07-09 16:48:42 +0800 | [diff] [blame] | 3239 | cpus_updated = !cpumask_equal(top_cpuset.effective_cpus, &new_cpus); |
| 3240 | mems_updated = !nodes_equal(top_cpuset.effective_mems, new_mems); |
Srivatsa S. Bhat | 7ddf96b | 2012-05-24 19:46:55 +0530 | [diff] [blame] | 3241 | |
Waiman Long | 15d428e | 2021-07-20 10:18:27 -0400 | [diff] [blame] | 3242 | /* |
| 3243 | * In the rare case that hotplug removes all the cpus in subparts_cpus, |
| 3244 | * we assumed that cpus are updated. |
| 3245 | */ |
| 3246 | if (!cpus_updated && top_cpuset.nr_subparts_cpus) |
| 3247 | cpus_updated = true; |
| 3248 | |
Tejun Heo | deb7aa3 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 3249 | /* synchronize cpus_allowed to cpu_active_mask */ |
| 3250 | if (cpus_updated) { |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 3251 | spin_lock_irq(&callback_lock); |
Li Zefan | 7e88291 | 2014-07-09 16:48:42 +0800 | [diff] [blame] | 3252 | if (!on_dfl) |
| 3253 | cpumask_copy(top_cpuset.cpus_allowed, &new_cpus); |
Waiman Long | 4b842da | 2018-11-08 10:08:41 -0500 | [diff] [blame] | 3254 | /* |
| 3255 | * Make sure that CPUs allocated to child partitions |
| 3256 | * do not show up in effective_cpus. If no CPU is left, |
| 3257 | * we clear the subparts_cpus & let the child partitions |
| 3258 | * fight for the CPUs again. |
| 3259 | */ |
| 3260 | if (top_cpuset.nr_subparts_cpus) { |
| 3261 | if (cpumask_subset(&new_cpus, |
| 3262 | top_cpuset.subparts_cpus)) { |
| 3263 | top_cpuset.nr_subparts_cpus = 0; |
| 3264 | cpumask_clear(top_cpuset.subparts_cpus); |
| 3265 | } else { |
| 3266 | cpumask_andnot(&new_cpus, &new_cpus, |
| 3267 | top_cpuset.subparts_cpus); |
| 3268 | } |
| 3269 | } |
Li Zefan | 1344ab9 | 2014-07-09 16:47:16 +0800 | [diff] [blame] | 3270 | cpumask_copy(top_cpuset.effective_cpus, &new_cpus); |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 3271 | spin_unlock_irq(&callback_lock); |
Tejun Heo | deb7aa3 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 3272 | /* we don't mess with cpumasks of tasks in top_cpuset */ |
| 3273 | } |
Srivatsa S. Bhat | 7ddf96b | 2012-05-24 19:46:55 +0530 | [diff] [blame] | 3274 | |
Tejun Heo | deb7aa3 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 3275 | /* synchronize mems_allowed to N_MEMORY */ |
| 3276 | if (mems_updated) { |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 3277 | spin_lock_irq(&callback_lock); |
Li Zefan | 7e88291 | 2014-07-09 16:48:42 +0800 | [diff] [blame] | 3278 | if (!on_dfl) |
| 3279 | top_cpuset.mems_allowed = new_mems; |
Li Zefan | 1344ab9 | 2014-07-09 16:47:16 +0800 | [diff] [blame] | 3280 | top_cpuset.effective_mems = new_mems; |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 3281 | spin_unlock_irq(&callback_lock); |
Tejun Heo | d66393e | 2014-02-13 06:58:40 -0500 | [diff] [blame] | 3282 | update_tasks_nodemask(&top_cpuset); |
Tejun Heo | deb7aa3 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 3283 | } |
| 3284 | |
Juri Lelli | 1243dc5 | 2019-07-19 15:59:57 +0200 | [diff] [blame] | 3285 | percpu_up_write(&cpuset_rwsem); |
Tejun Heo | deb7aa3 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 3286 | |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 3287 | /* if cpus or mems changed, we need to propagate to descendants */ |
| 3288 | if (cpus_updated || mems_updated) { |
Lai Jiangshan | 38d7bee | 2012-12-12 13:51:24 -0800 | [diff] [blame] | 3289 | struct cpuset *cs; |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 3290 | struct cgroup_subsys_state *pos_css; |
Paul Jackson | b450129 | 2008-02-07 00:14:47 -0800 | [diff] [blame] | 3291 | |
Paul Jackson | b450129 | 2008-02-07 00:14:47 -0800 | [diff] [blame] | 3292 | rcu_read_lock(); |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 3293 | cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { |
Tejun Heo | ec903c0 | 2014-05-13 12:11:01 -0400 | [diff] [blame] | 3294 | if (cs == &top_cpuset || !css_tryget_online(&cs->css)) |
Li Zefan | 388afd8 | 2013-06-09 17:14:47 +0800 | [diff] [blame] | 3295 | continue; |
| 3296 | rcu_read_unlock(); |
| 3297 | |
Waiman Long | 4b842da | 2018-11-08 10:08:41 -0500 | [diff] [blame] | 3298 | cpuset_hotplug_update_tasks(cs, ptmp); |
Li Zefan | 388afd8 | 2013-06-09 17:14:47 +0800 | [diff] [blame] | 3299 | |
| 3300 | rcu_read_lock(); |
| 3301 | css_put(&cs->css); |
| 3302 | } |
Tejun Heo | deb7aa3 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 3303 | rcu_read_unlock(); |
| 3304 | } |
Tejun Heo | 8d03394 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 3305 | |
Tejun Heo | deb7aa3 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 3306 | /* rebuild sched domains if cpus_allowed has changed */ |
Peter Zijlstra | 50e7663 | 2017-09-07 11:13:38 +0200 | [diff] [blame] | 3307 | if (cpus_updated || force_rebuild) { |
| 3308 | force_rebuild = false; |
Tejun Heo | 2b729fe | 2020-04-03 11:32:13 -0400 | [diff] [blame] | 3309 | rebuild_sched_domains(); |
Peter Zijlstra | 50e7663 | 2017-09-07 11:13:38 +0200 | [diff] [blame] | 3310 | } |
Waiman Long | 4b842da | 2018-11-08 10:08:41 -0500 | [diff] [blame] | 3311 | |
| 3312 | free_cpumasks(NULL, ptmp); |
Paul Jackson | b1aac8b | 2006-09-29 02:01:17 -0700 | [diff] [blame] | 3313 | } |
| 3314 | |
Prateek Sood | a49e462 | 2020-01-24 20:37:29 +0530 | [diff] [blame] | 3315 | void cpuset_update_active_cpus(void) |
Peter Zijlstra | 50e7663 | 2017-09-07 11:13:38 +0200 | [diff] [blame] | 3316 | { |
Tejun Heo | 2b729fe | 2020-04-03 11:32:13 -0400 | [diff] [blame] | 3317 | /* |
| 3318 | * We're inside cpu hotplug critical region which usually nests |
| 3319 | * inside cgroup synchronization. Bounce actual hotplug processing |
| 3320 | * to a work item to avoid reverse locking order. |
| 3321 | */ |
| 3322 | schedule_work(&cpuset_hotplug_work); |
| 3323 | } |
| 3324 | |
| 3325 | void cpuset_wait_for_hotplug(void) |
| 3326 | { |
| 3327 | flush_work(&cpuset_hotplug_work); |
Peter Zijlstra | 50e7663 | 2017-09-07 11:13:38 +0200 | [diff] [blame] | 3328 | } |
| 3329 | |
Paul Jackson | 38837fc | 2006-09-29 02:01:16 -0700 | [diff] [blame] | 3330 | /* |
Lai Jiangshan | 38d7bee | 2012-12-12 13:51:24 -0800 | [diff] [blame] | 3331 | * Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY]. |
| 3332 | * Call this routine anytime after node_states[N_MEMORY] changes. |
Srivatsa S. Bhat | a1cd2b1 | 2012-05-24 19:47:03 +0530 | [diff] [blame] | 3333 | * See cpuset_update_active_cpus() for CPU hotplug handling. |
Paul Jackson | 38837fc | 2006-09-29 02:01:16 -0700 | [diff] [blame] | 3334 | */ |
Miao Xie | f481891 | 2008-11-19 15:36:30 -0800 | [diff] [blame] | 3335 | static int cpuset_track_online_nodes(struct notifier_block *self, |
| 3336 | unsigned long action, void *arg) |
Paul Jackson | 38837fc | 2006-09-29 02:01:16 -0700 | [diff] [blame] | 3337 | { |
Tejun Heo | 3a5a6d0 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 3338 | schedule_work(&cpuset_hotplug_work); |
Miao Xie | f481891 | 2008-11-19 15:36:30 -0800 | [diff] [blame] | 3339 | return NOTIFY_OK; |
Paul Jackson | 38837fc | 2006-09-29 02:01:16 -0700 | [diff] [blame] | 3340 | } |
Andrew Morton | d8f10cb | 2013-04-29 15:08:08 -0700 | [diff] [blame] | 3341 | |
| 3342 | static struct notifier_block cpuset_track_online_nodes_nb = { |
| 3343 | .notifier_call = cpuset_track_online_nodes, |
| 3344 | .priority = 10, /* ??! */ |
| 3345 | }; |
Paul Jackson | 38837fc | 2006-09-29 02:01:16 -0700 | [diff] [blame] | 3346 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3347 | /** |
| 3348 | * cpuset_init_smp - initialize cpus_allowed |
| 3349 | * |
| 3350 | * Description: Finish top cpuset after cpu, node maps are initialized |
Andrew Morton | d8f10cb | 2013-04-29 15:08:08 -0700 | [diff] [blame] | 3351 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3352 | void __init cpuset_init_smp(void) |
| 3353 | { |
Peter Zijlstra | 6ad4c18 | 2009-11-25 13:31:39 +0100 | [diff] [blame] | 3354 | cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask); |
Lai Jiangshan | 38d7bee | 2012-12-12 13:51:24 -0800 | [diff] [blame] | 3355 | top_cpuset.mems_allowed = node_states[N_MEMORY]; |
Li Zefan | 33ad801 | 2013-06-09 17:15:08 +0800 | [diff] [blame] | 3356 | top_cpuset.old_mems_allowed = top_cpuset.mems_allowed; |
Paul Jackson | 4c4d50f | 2006-08-27 01:23:51 -0700 | [diff] [blame] | 3357 | |
Li Zefan | e2b9a3d | 2014-07-09 16:47:03 +0800 | [diff] [blame] | 3358 | cpumask_copy(top_cpuset.effective_cpus, cpu_active_mask); |
| 3359 | top_cpuset.effective_mems = node_states[N_MEMORY]; |
| 3360 | |
Andrew Morton | d8f10cb | 2013-04-29 15:08:08 -0700 | [diff] [blame] | 3361 | register_hotmemory_notifier(&cpuset_track_online_nodes_nb); |
Tejun Heo | e93ad19 | 2016-01-19 12:18:41 -0500 | [diff] [blame] | 3362 | |
| 3363 | cpuset_migrate_mm_wq = alloc_ordered_workqueue("cpuset_migrate_mm", 0); |
| 3364 | BUG_ON(!cpuset_migrate_mm_wq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3365 | } |
| 3366 | |
| 3367 | /** |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3368 | * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset. |
| 3369 | * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed. |
Li Zefan | 6af866a | 2009-01-07 18:08:45 -0800 | [diff] [blame] | 3370 | * @pmask: pointer to struct cpumask variable to receive cpus_allowed set. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3371 | * |
Li Zefan | 300ed6c | 2009-01-07 18:08:44 -0800 | [diff] [blame] | 3372 | * Description: Returns the cpumask_var_t cpus_allowed of the cpuset |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3373 | * attached to the specified @tsk. Guaranteed to return some non-empty |
Rusty Russell | 5f054e3 | 2012-03-29 15:38:31 +1030 | [diff] [blame] | 3374 | * subset of cpu_online_mask, even if this means going outside the |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3375 | * tasks cpuset. |
| 3376 | **/ |
| 3377 | |
Li Zefan | 6af866a | 2009-01-07 18:08:45 -0800 | [diff] [blame] | 3378 | void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3379 | { |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 3380 | unsigned long flags; |
| 3381 | |
| 3382 | spin_lock_irqsave(&callback_lock, flags); |
Will Deacon | 431c69f | 2021-07-30 12:24:30 +0100 | [diff] [blame] | 3383 | guarantee_online_cpus(tsk, pmask); |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 3384 | spin_unlock_irqrestore(&callback_lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3385 | } |
| 3386 | |
Joel Savitz | d477f8c | 2019-06-12 11:50:48 -0400 | [diff] [blame] | 3387 | /** |
| 3388 | * cpuset_cpus_allowed_fallback - final fallback before complete catastrophe. |
| 3389 | * @tsk: pointer to task_struct with which the scheduler is struggling |
| 3390 | * |
| 3391 | * Description: In the case that the scheduler cannot find an allowed cpu in |
| 3392 | * tsk->cpus_allowed, we fall back to task_cs(tsk)->cpus_allowed. In legacy |
| 3393 | * mode however, this value is the same as task_cs(tsk)->effective_cpus, |
| 3394 | * which will not contain a sane cpumask during cases such as cpu hotplugging. |
| 3395 | * This is the absolute last resort for the scheduler and it is only used if |
| 3396 | * _every_ other avenue has been traveled. |
Will Deacon | 97c0054 | 2021-07-30 12:24:31 +0100 | [diff] [blame] | 3397 | * |
| 3398 | * Returns true if the affinity of @tsk was changed, false otherwise. |
Joel Savitz | d477f8c | 2019-06-12 11:50:48 -0400 | [diff] [blame] | 3399 | **/ |
| 3400 | |
Will Deacon | 97c0054 | 2021-07-30 12:24:31 +0100 | [diff] [blame] | 3401 | bool cpuset_cpus_allowed_fallback(struct task_struct *tsk) |
Oleg Nesterov | 9084bb8 | 2010-03-15 10:10:27 +0100 | [diff] [blame] | 3402 | { |
Will Deacon | d4b96fb | 2021-07-30 12:24:29 +0100 | [diff] [blame] | 3403 | const struct cpumask *possible_mask = task_cpu_possible_mask(tsk); |
| 3404 | const struct cpumask *cs_mask; |
Will Deacon | 97c0054 | 2021-07-30 12:24:31 +0100 | [diff] [blame] | 3405 | bool changed = false; |
Will Deacon | d4b96fb | 2021-07-30 12:24:29 +0100 | [diff] [blame] | 3406 | |
Oleg Nesterov | 9084bb8 | 2010-03-15 10:10:27 +0100 | [diff] [blame] | 3407 | rcu_read_lock(); |
Will Deacon | d4b96fb | 2021-07-30 12:24:29 +0100 | [diff] [blame] | 3408 | cs_mask = task_cs(tsk)->cpus_allowed; |
Will Deacon | 97c0054 | 2021-07-30 12:24:31 +0100 | [diff] [blame] | 3409 | if (is_in_v2_mode() && cpumask_subset(cs_mask, possible_mask)) { |
Will Deacon | d4b96fb | 2021-07-30 12:24:29 +0100 | [diff] [blame] | 3410 | do_set_cpus_allowed(tsk, cs_mask); |
Will Deacon | 97c0054 | 2021-07-30 12:24:31 +0100 | [diff] [blame] | 3411 | changed = true; |
| 3412 | } |
Oleg Nesterov | 9084bb8 | 2010-03-15 10:10:27 +0100 | [diff] [blame] | 3413 | rcu_read_unlock(); |
| 3414 | |
| 3415 | /* |
| 3416 | * We own tsk->cpus_allowed, nobody can change it under us. |
| 3417 | * |
| 3418 | * But we used cs && cs->cpus_allowed lockless and thus can |
| 3419 | * race with cgroup_attach_task() or update_cpumask() and get |
| 3420 | * the wrong tsk->cpus_allowed. However, both cases imply the |
| 3421 | * subsequent cpuset_change_cpumask()->set_cpus_allowed_ptr() |
| 3422 | * which takes task_rq_lock(). |
| 3423 | * |
| 3424 | * If we are called after it dropped the lock we must see all |
| 3425 | * changes in tsk_cs()->cpus_allowed. Otherwise we can temporary |
| 3426 | * set any mask even if it is not right from task_cs() pov, |
| 3427 | * the pending set_cpus_allowed_ptr() will fix things. |
Peter Zijlstra | 2baab4e | 2012-03-20 15:57:01 +0100 | [diff] [blame] | 3428 | * |
| 3429 | * select_fallback_rq() will fix things ups and set cpu_possible_mask |
| 3430 | * if required. |
Oleg Nesterov | 9084bb8 | 2010-03-15 10:10:27 +0100 | [diff] [blame] | 3431 | */ |
Will Deacon | 97c0054 | 2021-07-30 12:24:31 +0100 | [diff] [blame] | 3432 | return changed; |
Oleg Nesterov | 9084bb8 | 2010-03-15 10:10:27 +0100 | [diff] [blame] | 3433 | } |
| 3434 | |
Rasmus Villemoes | 8f4ab07 | 2015-02-12 15:00:16 -0800 | [diff] [blame] | 3435 | void __init cpuset_init_current_mems_allowed(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3436 | { |
Mike Travis | f9a86fc | 2008-04-04 18:11:07 -0700 | [diff] [blame] | 3437 | nodes_setall(current->mems_allowed); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3438 | } |
| 3439 | |
Randy Dunlap | d9fd8a6 | 2005-07-27 11:45:11 -0700 | [diff] [blame] | 3440 | /** |
Paul Jackson | 909d75a | 2006-01-08 01:01:55 -0800 | [diff] [blame] | 3441 | * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset. |
| 3442 | * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed. |
| 3443 | * |
| 3444 | * Description: Returns the nodemask_t mems_allowed of the cpuset |
| 3445 | * attached to the specified @tsk. Guaranteed to return some non-empty |
Lai Jiangshan | 38d7bee | 2012-12-12 13:51:24 -0800 | [diff] [blame] | 3446 | * subset of node_states[N_MEMORY], even if this means going outside the |
Paul Jackson | 909d75a | 2006-01-08 01:01:55 -0800 | [diff] [blame] | 3447 | * tasks cpuset. |
| 3448 | **/ |
| 3449 | |
| 3450 | nodemask_t cpuset_mems_allowed(struct task_struct *tsk) |
| 3451 | { |
| 3452 | nodemask_t mask; |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 3453 | unsigned long flags; |
Paul Jackson | 909d75a | 2006-01-08 01:01:55 -0800 | [diff] [blame] | 3454 | |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 3455 | spin_lock_irqsave(&callback_lock, flags); |
Li Zefan | b8dadcb | 2014-03-03 17:28:36 -0500 | [diff] [blame] | 3456 | rcu_read_lock(); |
Li Zefan | ae1c802 | 2014-07-09 16:48:32 +0800 | [diff] [blame] | 3457 | guarantee_online_mems(task_cs(tsk), &mask); |
Li Zefan | b8dadcb | 2014-03-03 17:28:36 -0500 | [diff] [blame] | 3458 | rcu_read_unlock(); |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 3459 | spin_unlock_irqrestore(&callback_lock, flags); |
Paul Jackson | 909d75a | 2006-01-08 01:01:55 -0800 | [diff] [blame] | 3460 | |
| 3461 | return mask; |
| 3462 | } |
| 3463 | |
| 3464 | /** |
Zhen Lei | 08b2b6f | 2021-05-24 16:29:43 +0800 | [diff] [blame] | 3465 | * cpuset_nodemask_valid_mems_allowed - check nodemask vs. current mems_allowed |
Mel Gorman | 19770b3 | 2008-04-28 02:12:18 -0700 | [diff] [blame] | 3466 | * @nodemask: the nodemask to be checked |
Randy Dunlap | d9fd8a6 | 2005-07-27 11:45:11 -0700 | [diff] [blame] | 3467 | * |
Mel Gorman | 19770b3 | 2008-04-28 02:12:18 -0700 | [diff] [blame] | 3468 | * Are any of the nodes in the nodemask allowed in current->mems_allowed? |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3469 | */ |
Mel Gorman | 19770b3 | 2008-04-28 02:12:18 -0700 | [diff] [blame] | 3470 | int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3471 | { |
Mel Gorman | 19770b3 | 2008-04-28 02:12:18 -0700 | [diff] [blame] | 3472 | return nodes_intersects(*nodemask, current->mems_allowed); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3473 | } |
| 3474 | |
Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 3475 | /* |
Paul Menage | 7860836 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 3476 | * nearest_hardwall_ancestor() - Returns the nearest mem_exclusive or |
| 3477 | * mem_hardwall ancestor to the specified cpuset. Call holding |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 3478 | * callback_lock. If no ancestor is mem_exclusive or mem_hardwall |
Paul Menage | 7860836 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 3479 | * (an unusual configuration), then returns the root cpuset. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3480 | */ |
Tejun Heo | c9710d8 | 2013-08-08 20:11:22 -0400 | [diff] [blame] | 3481 | static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3482 | { |
Tejun Heo | c431069 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 3483 | while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && parent_cs(cs)) |
| 3484 | cs = parent_cs(cs); |
Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 3485 | return cs; |
| 3486 | } |
| 3487 | |
| 3488 | /** |
Vladimir Davydov | 344736f | 2014-10-20 15:50:30 +0400 | [diff] [blame] | 3489 | * cpuset_node_allowed - Can we allocate on a memory node? |
David Rientjes | a1bc5a4 | 2009-04-02 16:57:54 -0700 | [diff] [blame] | 3490 | * @node: is this an allowed node? |
Paul Jackson | 02a0e53 | 2006-12-13 00:34:25 -0800 | [diff] [blame] | 3491 | * @gfp_mask: memory allocation flags |
Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 3492 | * |
David Rientjes | 6e276d2 | 2015-04-14 15:47:01 -0700 | [diff] [blame] | 3493 | * If we're in interrupt, yes, we can always allocate. If @node is set in |
| 3494 | * current's mems_allowed, yes. If it's not a __GFP_HARDWALL request and this |
| 3495 | * node is set in the nearest hardwalled cpuset ancestor to current's cpuset, |
Michal Hocko | da99ecf | 2017-09-06 16:24:53 -0700 | [diff] [blame] | 3496 | * yes. If current has access to memory reserves as an oom victim, yes. |
Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 3497 | * Otherwise, no. |
| 3498 | * |
| 3499 | * GFP_USER allocations are marked with the __GFP_HARDWALL bit, |
David Rientjes | c596d9f | 2007-05-06 14:49:32 -0700 | [diff] [blame] | 3500 | * and do not allow allocations outside the current tasks cpuset |
Michal Hocko | da99ecf | 2017-09-06 16:24:53 -0700 | [diff] [blame] | 3501 | * unless the task has been OOM killed. |
Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 3502 | * GFP_KERNEL allocations are not so marked, so can escape to the |
Paul Menage | 7860836 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 3503 | * nearest enclosing hardwalled ancestor cpuset. |
Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 3504 | * |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 3505 | * Scanning up parent cpusets requires callback_lock. The |
Paul Jackson | 02a0e53 | 2006-12-13 00:34:25 -0800 | [diff] [blame] | 3506 | * __alloc_pages() routine only calls here with __GFP_HARDWALL bit |
| 3507 | * _not_ set if it's a GFP_KERNEL allocation, and all nodes in the |
| 3508 | * current tasks mems_allowed came up empty on the first pass over |
| 3509 | * the zonelist. So only GFP_KERNEL allocations, if all nodes in the |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 3510 | * cpuset are short of memory, might require taking the callback_lock. |
Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 3511 | * |
Paul Jackson | 36be57f | 2006-05-20 15:00:10 -0700 | [diff] [blame] | 3512 | * The first call here from mm/page_alloc:get_page_from_freelist() |
Paul Jackson | 02a0e53 | 2006-12-13 00:34:25 -0800 | [diff] [blame] | 3513 | * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets, |
| 3514 | * so no allocation on a node outside the cpuset is allowed (unless |
| 3515 | * in interrupt, of course). |
Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 3516 | * |
Paul Jackson | 36be57f | 2006-05-20 15:00:10 -0700 | [diff] [blame] | 3517 | * The second pass through get_page_from_freelist() doesn't even call |
| 3518 | * here for GFP_ATOMIC calls. For those calls, the __alloc_pages() |
| 3519 | * variable 'wait' is not set, and the bit ALLOC_CPUSET is not set |
| 3520 | * in alloc_flags. That logic and the checks below have the combined |
| 3521 | * affect that: |
Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 3522 | * in_interrupt - any node ok (current task context irrelevant) |
| 3523 | * GFP_ATOMIC - any node ok |
Michal Hocko | da99ecf | 2017-09-06 16:24:53 -0700 | [diff] [blame] | 3524 | * tsk_is_oom_victim - any node ok |
Paul Menage | 7860836 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 3525 | * GFP_KERNEL - any node in enclosing hardwalled cpuset ok |
Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 3526 | * GFP_USER - only nodes in current tasks mems allowed ok. |
Paul Jackson | 02a0e53 | 2006-12-13 00:34:25 -0800 | [diff] [blame] | 3527 | */ |
Vlastimil Babka | 002f290 | 2016-05-19 17:14:30 -0700 | [diff] [blame] | 3528 | bool __cpuset_node_allowed(int node, gfp_t gfp_mask) |
Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 3529 | { |
Tejun Heo | c9710d8 | 2013-08-08 20:11:22 -0400 | [diff] [blame] | 3530 | struct cpuset *cs; /* current cpuset ancestors */ |
Qi Zheng | d4296fa | 2021-12-19 10:41:54 +0800 | [diff] [blame] | 3531 | bool allowed; /* is allocation in zone z allowed? */ |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 3532 | unsigned long flags; |
Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 3533 | |
David Rientjes | 6e276d2 | 2015-04-14 15:47:01 -0700 | [diff] [blame] | 3534 | if (in_interrupt()) |
Vlastimil Babka | 002f290 | 2016-05-19 17:14:30 -0700 | [diff] [blame] | 3535 | return true; |
Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 3536 | if (node_isset(node, current->mems_allowed)) |
Vlastimil Babka | 002f290 | 2016-05-19 17:14:30 -0700 | [diff] [blame] | 3537 | return true; |
David Rientjes | c596d9f | 2007-05-06 14:49:32 -0700 | [diff] [blame] | 3538 | /* |
| 3539 | * Allow tasks that have access to memory reserves because they have |
| 3540 | * been OOM killed to get memory anywhere. |
| 3541 | */ |
Michal Hocko | da99ecf | 2017-09-06 16:24:53 -0700 | [diff] [blame] | 3542 | if (unlikely(tsk_is_oom_victim(current))) |
Vlastimil Babka | 002f290 | 2016-05-19 17:14:30 -0700 | [diff] [blame] | 3543 | return true; |
Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 3544 | if (gfp_mask & __GFP_HARDWALL) /* If hardwall request, stop here */ |
Vlastimil Babka | 002f290 | 2016-05-19 17:14:30 -0700 | [diff] [blame] | 3545 | return false; |
Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 3546 | |
Bob Picco | 5563e77 | 2005-11-13 16:06:35 -0800 | [diff] [blame] | 3547 | if (current->flags & PF_EXITING) /* Let dying task have memory */ |
Vlastimil Babka | 002f290 | 2016-05-19 17:14:30 -0700 | [diff] [blame] | 3548 | return true; |
Bob Picco | 5563e77 | 2005-11-13 16:06:35 -0800 | [diff] [blame] | 3549 | |
Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 3550 | /* Not hardwall and node outside mems_allowed: scan up cpusets */ |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 3551 | spin_lock_irqsave(&callback_lock, flags); |
Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 3552 | |
Li Zefan | b8dadcb | 2014-03-03 17:28:36 -0500 | [diff] [blame] | 3553 | rcu_read_lock(); |
Paul Menage | 7860836 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 3554 | cs = nearest_hardwall_ancestor(task_cs(current)); |
Li Zefan | 99afb0f | 2014-02-27 18:19:36 +0800 | [diff] [blame] | 3555 | allowed = node_isset(node, cs->mems_allowed); |
Li Zefan | b8dadcb | 2014-03-03 17:28:36 -0500 | [diff] [blame] | 3556 | rcu_read_unlock(); |
Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 3557 | |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 3558 | spin_unlock_irqrestore(&callback_lock, flags); |
Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 3559 | return allowed; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3560 | } |
| 3561 | |
Paul Jackson | ef08e3b | 2005-09-06 15:18:13 -0700 | [diff] [blame] | 3562 | /** |
Jack Steiner | 6adef3e | 2010-05-26 14:42:49 -0700 | [diff] [blame] | 3563 | * cpuset_mem_spread_node() - On which node to begin search for a file page |
| 3564 | * cpuset_slab_spread_node() - On which node to begin search for a slab page |
Paul Jackson | 825a46a | 2006-03-24 03:16:03 -0800 | [diff] [blame] | 3565 | * |
| 3566 | * If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for |
| 3567 | * tasks in a cpuset with is_spread_page or is_spread_slab set), |
| 3568 | * and if the memory allocation used cpuset_mem_spread_node() |
| 3569 | * to determine on which node to start looking, as it will for |
| 3570 | * certain page cache or slab cache pages such as used for file |
| 3571 | * system buffers and inode caches, then instead of starting on the |
| 3572 | * local node to look for a free page, rather spread the starting |
| 3573 | * node around the tasks mems_allowed nodes. |
| 3574 | * |
| 3575 | * We don't have to worry about the returned node being offline |
| 3576 | * because "it can't happen", and even if it did, it would be ok. |
| 3577 | * |
| 3578 | * The routines calling guarantee_online_mems() are careful to |
| 3579 | * only set nodes in task->mems_allowed that are online. So it |
| 3580 | * should not be possible for the following code to return an |
| 3581 | * offline node. But if it did, that would be ok, as this routine |
| 3582 | * is not returning the node where the allocation must be, only |
| 3583 | * the node where the search should start. The zonelist passed to |
| 3584 | * __alloc_pages() will include all nodes. If the slab allocator |
| 3585 | * is passed an offline node, it will fall back to the local node. |
| 3586 | * See kmem_cache_alloc_node(). |
| 3587 | */ |
| 3588 | |
Jack Steiner | 6adef3e | 2010-05-26 14:42:49 -0700 | [diff] [blame] | 3589 | static int cpuset_spread_node(int *rotor) |
Paul Jackson | 825a46a | 2006-03-24 03:16:03 -0800 | [diff] [blame] | 3590 | { |
Andrew Morton | 0edaf86 | 2016-05-19 17:10:58 -0700 | [diff] [blame] | 3591 | return *rotor = next_node_in(*rotor, current->mems_allowed); |
Paul Jackson | 825a46a | 2006-03-24 03:16:03 -0800 | [diff] [blame] | 3592 | } |
Jack Steiner | 6adef3e | 2010-05-26 14:42:49 -0700 | [diff] [blame] | 3593 | |
| 3594 | int cpuset_mem_spread_node(void) |
| 3595 | { |
Michal Hocko | 778d3b0 | 2011-07-26 16:08:30 -0700 | [diff] [blame] | 3596 | if (current->cpuset_mem_spread_rotor == NUMA_NO_NODE) |
| 3597 | current->cpuset_mem_spread_rotor = |
| 3598 | node_random(¤t->mems_allowed); |
| 3599 | |
Jack Steiner | 6adef3e | 2010-05-26 14:42:49 -0700 | [diff] [blame] | 3600 | return cpuset_spread_node(¤t->cpuset_mem_spread_rotor); |
| 3601 | } |
| 3602 | |
| 3603 | int cpuset_slab_spread_node(void) |
| 3604 | { |
Michal Hocko | 778d3b0 | 2011-07-26 16:08:30 -0700 | [diff] [blame] | 3605 | if (current->cpuset_slab_spread_rotor == NUMA_NO_NODE) |
| 3606 | current->cpuset_slab_spread_rotor = |
| 3607 | node_random(¤t->mems_allowed); |
| 3608 | |
Jack Steiner | 6adef3e | 2010-05-26 14:42:49 -0700 | [diff] [blame] | 3609 | return cpuset_spread_node(¤t->cpuset_slab_spread_rotor); |
| 3610 | } |
| 3611 | |
Paul Jackson | 825a46a | 2006-03-24 03:16:03 -0800 | [diff] [blame] | 3612 | EXPORT_SYMBOL_GPL(cpuset_mem_spread_node); |
| 3613 | |
| 3614 | /** |
David Rientjes | bbe373f | 2007-10-16 23:25:58 -0700 | [diff] [blame] | 3615 | * cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's? |
| 3616 | * @tsk1: pointer to task_struct of some task. |
| 3617 | * @tsk2: pointer to task_struct of some other task. |
Paul Jackson | ef08e3b | 2005-09-06 15:18:13 -0700 | [diff] [blame] | 3618 | * |
David Rientjes | bbe373f | 2007-10-16 23:25:58 -0700 | [diff] [blame] | 3619 | * Description: Return true if @tsk1's mems_allowed intersects the |
| 3620 | * mems_allowed of @tsk2. Used by the OOM killer to determine if |
| 3621 | * one of the task's memory usage might impact the memory available |
| 3622 | * to the other. |
Paul Jackson | ef08e3b | 2005-09-06 15:18:13 -0700 | [diff] [blame] | 3623 | **/ |
| 3624 | |
David Rientjes | bbe373f | 2007-10-16 23:25:58 -0700 | [diff] [blame] | 3625 | int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, |
| 3626 | const struct task_struct *tsk2) |
Paul Jackson | ef08e3b | 2005-09-06 15:18:13 -0700 | [diff] [blame] | 3627 | { |
David Rientjes | bbe373f | 2007-10-16 23:25:58 -0700 | [diff] [blame] | 3628 | return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed); |
Paul Jackson | ef08e3b | 2005-09-06 15:18:13 -0700 | [diff] [blame] | 3629 | } |
| 3630 | |
David Rientjes | 75aa199 | 2009-01-06 14:39:01 -0800 | [diff] [blame] | 3631 | /** |
David Rientjes | da39da3 | 2015-11-05 18:48:05 -0800 | [diff] [blame] | 3632 | * cpuset_print_current_mems_allowed - prints current's cpuset and mems_allowed |
David Rientjes | 75aa199 | 2009-01-06 14:39:01 -0800 | [diff] [blame] | 3633 | * |
David Rientjes | da39da3 | 2015-11-05 18:48:05 -0800 | [diff] [blame] | 3634 | * Description: Prints current's name, cpuset name, and cached copy of its |
Li Zefan | b8dadcb | 2014-03-03 17:28:36 -0500 | [diff] [blame] | 3635 | * mems_allowed to the kernel log. |
David Rientjes | 75aa199 | 2009-01-06 14:39:01 -0800 | [diff] [blame] | 3636 | */ |
David Rientjes | da39da3 | 2015-11-05 18:48:05 -0800 | [diff] [blame] | 3637 | void cpuset_print_current_mems_allowed(void) |
David Rientjes | 75aa199 | 2009-01-06 14:39:01 -0800 | [diff] [blame] | 3638 | { |
Li Zefan | b8dadcb | 2014-03-03 17:28:36 -0500 | [diff] [blame] | 3639 | struct cgroup *cgrp; |
David Rientjes | 75aa199 | 2009-01-06 14:39:01 -0800 | [diff] [blame] | 3640 | |
Li Zefan | b8dadcb | 2014-03-03 17:28:36 -0500 | [diff] [blame] | 3641 | rcu_read_lock(); |
Li Zefan | 63f43f5 | 2013-01-25 16:08:01 +0800 | [diff] [blame] | 3642 | |
David Rientjes | da39da3 | 2015-11-05 18:48:05 -0800 | [diff] [blame] | 3643 | cgrp = task_cs(current)->css.cgroup; |
yuzhoujian | ef8444e | 2018-12-28 00:36:07 -0800 | [diff] [blame] | 3644 | pr_cont(",cpuset="); |
Tejun Heo | e61734c | 2014-02-12 09:29:50 -0500 | [diff] [blame] | 3645 | pr_cont_cgroup_name(cgrp); |
yuzhoujian | ef8444e | 2018-12-28 00:36:07 -0800 | [diff] [blame] | 3646 | pr_cont(",mems_allowed=%*pbl", |
David Rientjes | da39da3 | 2015-11-05 18:48:05 -0800 | [diff] [blame] | 3647 | nodemask_pr_args(¤t->mems_allowed)); |
Li Zefan | f440d98 | 2013-03-01 15:02:15 +0800 | [diff] [blame] | 3648 | |
Li Zefan | cfb5966 | 2013-03-12 10:28:39 +0800 | [diff] [blame] | 3649 | rcu_read_unlock(); |
David Rientjes | 75aa199 | 2009-01-06 14:39:01 -0800 | [diff] [blame] | 3650 | } |
| 3651 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3652 | /* |
Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 3653 | * Collection of memory_pressure is suppressed unless |
| 3654 | * this flag is enabled by writing "1" to the special |
| 3655 | * cpuset file 'memory_pressure_enabled' in the root cpuset. |
| 3656 | */ |
| 3657 | |
Paul Jackson | c5b2aff8 | 2006-01-08 01:01:51 -0800 | [diff] [blame] | 3658 | int cpuset_memory_pressure_enabled __read_mostly; |
Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 3659 | |
| 3660 | /** |
| 3661 | * cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims. |
| 3662 | * |
| 3663 | * Keep a running average of the rate of synchronous (direct) |
| 3664 | * page reclaim efforts initiated by tasks in each cpuset. |
| 3665 | * |
| 3666 | * This represents the rate at which some task in the cpuset |
| 3667 | * ran low on memory on all nodes it was allowed to use, and |
| 3668 | * had to enter the kernels page reclaim code in an effort to |
| 3669 | * create more free memory by tossing clean pages or swapping |
| 3670 | * or writing dirty pages. |
| 3671 | * |
| 3672 | * Display to user space in the per-cpuset read-only file |
| 3673 | * "memory_pressure". Value displayed is an integer |
| 3674 | * representing the recent rate of entry into the synchronous |
| 3675 | * (direct) page reclaim by any task attached to the cpuset. |
| 3676 | **/ |
| 3677 | |
| 3678 | void __cpuset_memory_pressure_bump(void) |
| 3679 | { |
Li Zefan | b8dadcb | 2014-03-03 17:28:36 -0500 | [diff] [blame] | 3680 | rcu_read_lock(); |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 3681 | fmeter_markevent(&task_cs(current)->fmeter); |
Li Zefan | b8dadcb | 2014-03-03 17:28:36 -0500 | [diff] [blame] | 3682 | rcu_read_unlock(); |
Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 3683 | } |
| 3684 | |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 3685 | #ifdef CONFIG_PROC_PID_CPUSET |
Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 3686 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3687 | * proc_cpuset_show() |
| 3688 | * - Print tasks cpuset path into seq_file. |
| 3689 | * - Used for /proc/<pid>/cpuset. |
Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 3690 | * - No need to task_lock(tsk) on this tsk->cpuset reference, as it |
| 3691 | * doesn't really matter if tsk->cpuset changes after we read it, |
Waiman Long | b94f9ac | 2021-09-09 22:42:56 -0400 | [diff] [blame] | 3692 | * and we take cpuset_rwsem, keeping cpuset_attach() from changing it |
Paul Menage | 2df167a | 2008-02-07 00:14:45 -0800 | [diff] [blame] | 3693 | * anyway. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3694 | */ |
Zefan Li | 52de477 | 2014-09-18 16:03:36 +0800 | [diff] [blame] | 3695 | int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns, |
| 3696 | struct pid *pid, struct task_struct *tsk) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3697 | { |
Tejun Heo | 4c737b4 | 2016-08-10 11:23:44 -0400 | [diff] [blame] | 3698 | char *buf; |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 3699 | struct cgroup_subsys_state *css; |
Eric W. Biederman | 99f8955 | 2006-06-26 00:25:55 -0700 | [diff] [blame] | 3700 | int retval; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3701 | |
Eric W. Biederman | 99f8955 | 2006-06-26 00:25:55 -0700 | [diff] [blame] | 3702 | retval = -ENOMEM; |
Tejun Heo | e61734c | 2014-02-12 09:29:50 -0500 | [diff] [blame] | 3703 | buf = kmalloc(PATH_MAX, GFP_KERNEL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3704 | if (!buf) |
Eric W. Biederman | 99f8955 | 2006-06-26 00:25:55 -0700 | [diff] [blame] | 3705 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3706 | |
Aditya Kali | a79a908 | 2016-01-29 02:54:06 -0600 | [diff] [blame] | 3707 | css = task_get_css(tsk, cpuset_cgrp_id); |
Tejun Heo | 4c737b4 | 2016-08-10 11:23:44 -0400 | [diff] [blame] | 3708 | retval = cgroup_path_ns(css->cgroup, buf, PATH_MAX, |
| 3709 | current->nsproxy->cgroup_ns); |
Aditya Kali | a79a908 | 2016-01-29 02:54:06 -0600 | [diff] [blame] | 3710 | css_put(css); |
Tejun Heo | 4c737b4 | 2016-08-10 11:23:44 -0400 | [diff] [blame] | 3711 | if (retval >= PATH_MAX) |
Tejun Heo | 679a5e3 | 2016-09-29 11:58:36 +0200 | [diff] [blame] | 3712 | retval = -ENAMETOOLONG; |
| 3713 | if (retval < 0) |
Zefan Li | 52de477 | 2014-09-18 16:03:36 +0800 | [diff] [blame] | 3714 | goto out_free; |
Tejun Heo | 4c737b4 | 2016-08-10 11:23:44 -0400 | [diff] [blame] | 3715 | seq_puts(m, buf); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3716 | seq_putc(m, '\n'); |
Tejun Heo | e61734c | 2014-02-12 09:29:50 -0500 | [diff] [blame] | 3717 | retval = 0; |
Eric W. Biederman | 99f8955 | 2006-06-26 00:25:55 -0700 | [diff] [blame] | 3718 | out_free: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3719 | kfree(buf); |
Eric W. Biederman | 99f8955 | 2006-06-26 00:25:55 -0700 | [diff] [blame] | 3720 | out: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3721 | return retval; |
| 3722 | } |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 3723 | #endif /* CONFIG_PROC_PID_CPUSET */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3724 | |
Heiko Carstens | d01d482 | 2009-09-21 11:06:27 +0200 | [diff] [blame] | 3725 | /* Display task mems_allowed in /proc/<pid>/status file. */ |
Eric W. Biederman | df5f831 | 2008-02-08 04:18:33 -0800 | [diff] [blame] | 3726 | void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3727 | { |
Tejun Heo | e8e6d97 | 2015-02-13 14:37:23 -0800 | [diff] [blame] | 3728 | seq_printf(m, "Mems_allowed:\t%*pb\n", |
| 3729 | nodemask_pr_args(&task->mems_allowed)); |
| 3730 | seq_printf(m, "Mems_allowed_list:\t%*pbl\n", |
| 3731 | nodemask_pr_args(&task->mems_allowed)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3732 | } |