Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * kernel/cpuset.c |
| 3 | * |
| 4 | * Processor and Memory placement constraints for sets of tasks. |
| 5 | * |
| 6 | * Copyright (C) 2003 BULL SA. |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7 | * Copyright (C) 2004-2007 Silicon Graphics, Inc. |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 8 | * Copyright (C) 2006 Google, Inc |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | * |
| 10 | * Portions derived from Patrick Mochel's sysfs code. |
| 11 | * sysfs is Copyright (c) 2001-3 Patrick Mochel |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | * |
Paul Jackson | 825a46a | 2006-03-24 03:16:03 -0800 | [diff] [blame] | 13 | * 2003-10-10 Written by Simon Derr. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | * 2003-10-22 Updates by Stephen Hemminger. |
Paul Jackson | 825a46a | 2006-03-24 03:16:03 -0800 | [diff] [blame] | 15 | * 2004 May-July Rework by Paul Jackson. |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 16 | * 2006 Rework by Paul Menage to use generic cgroups |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 17 | * 2008 Rework of the scheduler domains and CPU hotplug handling |
| 18 | * by Max Krasnyansky |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | * |
| 20 | * This file is subject to the terms and conditions of the GNU General Public |
| 21 | * License. See the file COPYING in the main directory of the Linux |
| 22 | * distribution for more details. |
| 23 | */ |
| 24 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | #include <linux/cpu.h> |
| 26 | #include <linux/cpumask.h> |
| 27 | #include <linux/cpuset.h> |
| 28 | #include <linux/err.h> |
| 29 | #include <linux/errno.h> |
| 30 | #include <linux/file.h> |
| 31 | #include <linux/fs.h> |
| 32 | #include <linux/init.h> |
| 33 | #include <linux/interrupt.h> |
| 34 | #include <linux/kernel.h> |
| 35 | #include <linux/kmod.h> |
| 36 | #include <linux/list.h> |
Paul Jackson | 68860ec | 2005-10-30 15:02:36 -0800 | [diff] [blame] | 37 | #include <linux/mempolicy.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | #include <linux/mm.h> |
Miao Xie | f481891 | 2008-11-19 15:36:30 -0800 | [diff] [blame] | 39 | #include <linux/memory.h> |
Paul Gortmaker | 9984de1 | 2011-05-23 14:51:41 -0400 | [diff] [blame] | 40 | #include <linux/export.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | #include <linux/mount.h> |
David Howells | a187537 | 2018-11-01 23:07:25 +0000 | [diff] [blame] | 42 | #include <linux/fs_context.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 | #include <linux/namei.h> |
| 44 | #include <linux/pagemap.h> |
| 45 | #include <linux/proc_fs.h> |
Paul Jackson | 6b9c260 | 2006-01-08 01:02:02 -0800 | [diff] [blame] | 46 | #include <linux/rcupdate.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 | #include <linux/sched.h> |
Mathieu Poirier | f9a25f7 | 2019-07-19 15:59:55 +0200 | [diff] [blame] | 48 | #include <linux/sched/deadline.h> |
Ingo Molnar | 6e84f31 | 2017-02-08 18:51:29 +0100 | [diff] [blame] | 49 | #include <linux/sched/mm.h> |
Ingo Molnar | f719ff9b | 2017-02-06 10:57:33 +0100 | [diff] [blame] | 50 | #include <linux/sched/task.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | #include <linux/seq_file.h> |
David Quigley | 22fb52d | 2006-06-23 02:04:00 -0700 | [diff] [blame] | 52 | #include <linux/security.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 53 | #include <linux/slab.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 54 | #include <linux/spinlock.h> |
| 55 | #include <linux/stat.h> |
| 56 | #include <linux/string.h> |
| 57 | #include <linux/time.h> |
Arnd Bergmann | d2b43658 | 2015-11-25 16:16:55 +0100 | [diff] [blame] | 58 | #include <linux/time64.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 59 | #include <linux/backing-dev.h> |
| 60 | #include <linux/sort.h> |
Michal Hocko | da99ecf | 2017-09-06 16:24:53 -0700 | [diff] [blame] | 61 | #include <linux/oom.h> |
Frederic Weisbecker | edb9382 | 2017-10-27 04:42:37 +0200 | [diff] [blame] | 62 | #include <linux/sched/isolation.h> |
Linus Torvalds | 7c0f6ba | 2016-12-24 11:46:01 -0800 | [diff] [blame] | 63 | #include <linux/uaccess.h> |
Arun Sharma | 60063497 | 2011-07-26 16:09:06 -0700 | [diff] [blame] | 64 | #include <linux/atomic.h> |
Ingo Molnar | 3d3f26a | 2006-03-23 03:00:18 -0800 | [diff] [blame] | 65 | #include <linux/mutex.h> |
Cliff Wickman | 956db3c | 2008-02-07 00:14:43 -0800 | [diff] [blame] | 66 | #include <linux/cgroup.h> |
Li Zefan | e44193d | 2013-06-09 17:14:22 +0800 | [diff] [blame] | 67 | #include <linux/wait.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 68 | |
Dima Zavin | 89affbf | 2017-08-02 13:32:18 -0700 | [diff] [blame] | 69 | DEFINE_STATIC_KEY_FALSE(cpusets_pre_enable_key); |
Vlastimil Babka | 002f290 | 2016-05-19 17:14:30 -0700 | [diff] [blame] | 70 | DEFINE_STATIC_KEY_FALSE(cpusets_enabled_key); |
Paul Jackson | 202f72d | 2006-01-08 01:01:57 -0800 | [diff] [blame] | 71 | |
Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 72 | /* See "Frequency meter" comments, below. */ |
| 73 | |
| 74 | struct fmeter { |
| 75 | int cnt; /* unprocessed events count */ |
| 76 | int val; /* most recent output value */ |
Arnd Bergmann | d2b43658 | 2015-11-25 16:16:55 +0100 | [diff] [blame] | 77 | time64_t time; /* clock (secs) when val computed */ |
Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 78 | spinlock_t lock; /* guards read or write of above */ |
| 79 | }; |
| 80 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 81 | struct cpuset { |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 82 | struct cgroup_subsys_state css; |
| 83 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 84 | unsigned long flags; /* "unsigned long" so bitops work */ |
Li Zefan | e2b9a3d | 2014-07-09 16:47:03 +0800 | [diff] [blame] | 85 | |
Li Zefan | 7e88291 | 2014-07-09 16:48:42 +0800 | [diff] [blame] | 86 | /* |
| 87 | * On default hierarchy: |
| 88 | * |
| 89 | * The user-configured masks can only be changed by writing to |
| 90 | * cpuset.cpus and cpuset.mems, and won't be limited by the |
| 91 | * parent masks. |
| 92 | * |
| 93 | * The effective masks is the real masks that apply to the tasks |
| 94 | * in the cpuset. They may be changed if the configured masks are |
| 95 | * changed or hotplug happens. |
| 96 | * |
| 97 | * effective_mask == configured_mask & parent's effective_mask, |
| 98 | * and if it ends up empty, it will inherit the parent's mask. |
| 99 | * |
| 100 | * |
Aubrey Li | 415de5f | 2021-01-13 12:37:41 +0800 | [diff] [blame] | 101 | * On legacy hierarchy: |
Li Zefan | 7e88291 | 2014-07-09 16:48:42 +0800 | [diff] [blame] | 102 | * |
| 103 | * The user-configured masks are always the same with effective masks. |
| 104 | */ |
| 105 | |
Li Zefan | e2b9a3d | 2014-07-09 16:47:03 +0800 | [diff] [blame] | 106 | /* user-configured CPUs and Memory Nodes allow to tasks */ |
| 107 | cpumask_var_t cpus_allowed; |
| 108 | nodemask_t mems_allowed; |
| 109 | |
| 110 | /* effective CPUs and Memory Nodes allow to tasks */ |
| 111 | cpumask_var_t effective_cpus; |
| 112 | nodemask_t effective_mems; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 113 | |
Li Zefan | 33ad801 | 2013-06-09 17:15:08 +0800 | [diff] [blame] | 114 | /* |
Waiman Long | 58b7484 | 2018-11-08 10:08:36 -0500 | [diff] [blame] | 115 | * CPUs allocated to child sub-partitions (default hierarchy only) |
| 116 | * - CPUs granted by the parent = effective_cpus U subparts_cpus |
| 117 | * - effective_cpus and subparts_cpus are mutually exclusive. |
Waiman Long | 4b842da | 2018-11-08 10:08:41 -0500 | [diff] [blame] | 118 | * |
| 119 | * effective_cpus contains only onlined CPUs, but subparts_cpus |
| 120 | * may have offlined ones. |
Waiman Long | 58b7484 | 2018-11-08 10:08:36 -0500 | [diff] [blame] | 121 | */ |
| 122 | cpumask_var_t subparts_cpus; |
| 123 | |
| 124 | /* |
Li Zefan | 33ad801 | 2013-06-09 17:15:08 +0800 | [diff] [blame] | 125 | * This is old Memory Nodes tasks took on. |
| 126 | * |
| 127 | * - top_cpuset.old_mems_allowed is initialized to mems_allowed. |
| 128 | * - A new cpuset's old_mems_allowed is initialized when some |
| 129 | * task is moved into it. |
| 130 | * - old_mems_allowed is used in cpuset_migrate_mm() when we change |
| 131 | * cpuset.mems_allowed and have tasks' nodemask updated, and |
| 132 | * then old_mems_allowed is updated to mems_allowed. |
| 133 | */ |
| 134 | nodemask_t old_mems_allowed; |
| 135 | |
Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 136 | struct fmeter fmeter; /* memory_pressure filter */ |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 137 | |
Tejun Heo | 452477f | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 138 | /* |
| 139 | * Tasks are being attached to this cpuset. Used to prevent |
| 140 | * zeroing cpus/mems_allowed between ->can_attach() and ->attach(). |
| 141 | */ |
| 142 | int attach_in_progress; |
| 143 | |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 144 | /* partition number for rebuild_sched_domains() */ |
| 145 | int pn; |
Cliff Wickman | 956db3c | 2008-02-07 00:14:43 -0800 | [diff] [blame] | 146 | |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 147 | /* for custom sched domain */ |
| 148 | int relax_domain_level; |
Waiman Long | 58b7484 | 2018-11-08 10:08:36 -0500 | [diff] [blame] | 149 | |
| 150 | /* number of CPUs in subparts_cpus */ |
| 151 | int nr_subparts_cpus; |
| 152 | |
| 153 | /* partition root state */ |
| 154 | int partition_root_state; |
Waiman Long | 4716909 | 2018-11-08 10:08:40 -0500 | [diff] [blame] | 155 | |
| 156 | /* |
| 157 | * Default hierarchy only: |
| 158 | * use_parent_ecpus - set if using parent's effective_cpus |
| 159 | * child_ecpus_count - # of children with use_parent_ecpus set |
| 160 | */ |
| 161 | int use_parent_ecpus; |
| 162 | int child_ecpus_count; |
Waiman Long | e7cc988 | 2021-08-10 23:06:02 -0400 | [diff] [blame] | 163 | |
| 164 | /* Handle for cpuset.cpus.partition */ |
| 165 | struct cgroup_file partition_file; |
Waiman Long | 58b7484 | 2018-11-08 10:08:36 -0500 | [diff] [blame] | 166 | }; |
| 167 | |
| 168 | /* |
| 169 | * Partition root states: |
| 170 | * |
| 171 | * 0 - not a partition root |
Waiman Long | 3881b86 | 2018-11-08 10:08:39 -0500 | [diff] [blame] | 172 | * |
Waiman Long | 58b7484 | 2018-11-08 10:08:36 -0500 | [diff] [blame] | 173 | * 1 - partition root |
Waiman Long | 3881b86 | 2018-11-08 10:08:39 -0500 | [diff] [blame] | 174 | * |
| 175 | * -1 - invalid partition root |
| 176 | * None of the cpus in cpus_allowed can be put into the parent's |
| 177 | * subparts_cpus. In this case, the cpuset is not a real partition |
| 178 | * root anymore. However, the CPU_EXCLUSIVE bit will still be set |
| 179 | * and the cpuset can be restored back to a partition root if the |
| 180 | * parent cpuset can give more CPUs back to this child cpuset. |
Waiman Long | 58b7484 | 2018-11-08 10:08:36 -0500 | [diff] [blame] | 181 | */ |
| 182 | #define PRS_DISABLED 0 |
| 183 | #define PRS_ENABLED 1 |
Waiman Long | 3881b86 | 2018-11-08 10:08:39 -0500 | [diff] [blame] | 184 | #define PRS_ERROR -1 |
Waiman Long | 58b7484 | 2018-11-08 10:08:36 -0500 | [diff] [blame] | 185 | |
| 186 | /* |
| 187 | * Temporary cpumasks for working with partitions that are passed among |
| 188 | * functions to avoid memory allocation in inner functions. |
| 189 | */ |
| 190 | struct tmpmasks { |
| 191 | cpumask_var_t addmask, delmask; /* For partition root */ |
| 192 | cpumask_var_t new_cpus; /* For update_cpumasks_hier() */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 193 | }; |
| 194 | |
Tejun Heo | a7c6d55 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 195 | static inline struct cpuset *css_cs(struct cgroup_subsys_state *css) |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 196 | { |
Tejun Heo | a7c6d55 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 197 | return css ? container_of(css, struct cpuset, css) : NULL; |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 198 | } |
| 199 | |
| 200 | /* Retrieve the cpuset for a task */ |
| 201 | static inline struct cpuset *task_cs(struct task_struct *task) |
| 202 | { |
Tejun Heo | 073219e | 2014-02-08 10:36:58 -0500 | [diff] [blame] | 203 | return css_cs(task_css(task, cpuset_cgrp_id)); |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 204 | } |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 205 | |
Tejun Heo | c9710d8 | 2013-08-08 20:11:22 -0400 | [diff] [blame] | 206 | static inline struct cpuset *parent_cs(struct cpuset *cs) |
Tejun Heo | c431069 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 207 | { |
Tejun Heo | 5c9d535 | 2014-05-16 13:22:48 -0400 | [diff] [blame] | 208 | return css_cs(cs->css.parent); |
Tejun Heo | c431069 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 209 | } |
| 210 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 211 | /* bits in struct cpuset flags field */ |
| 212 | typedef enum { |
Tejun Heo | efeb77b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 213 | CS_ONLINE, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 214 | CS_CPU_EXCLUSIVE, |
| 215 | CS_MEM_EXCLUSIVE, |
Paul Menage | 7860836 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 216 | CS_MEM_HARDWALL, |
Paul Jackson | 45b07ef | 2006-01-08 01:00:56 -0800 | [diff] [blame] | 217 | CS_MEMORY_MIGRATE, |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 218 | CS_SCHED_LOAD_BALANCE, |
Paul Jackson | 825a46a | 2006-03-24 03:16:03 -0800 | [diff] [blame] | 219 | CS_SPREAD_PAGE, |
| 220 | CS_SPREAD_SLAB, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 221 | } cpuset_flagbits_t; |
| 222 | |
| 223 | /* convenient tests for these bits */ |
Tejun Heo | 41c2570 | 2017-05-24 12:03:48 -0400 | [diff] [blame] | 224 | static inline bool is_cpuset_online(struct cpuset *cs) |
Tejun Heo | efeb77b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 225 | { |
Tejun Heo | 41c2570 | 2017-05-24 12:03:48 -0400 | [diff] [blame] | 226 | return test_bit(CS_ONLINE, &cs->flags) && !css_is_dying(&cs->css); |
Tejun Heo | efeb77b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 227 | } |
| 228 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 229 | static inline int is_cpu_exclusive(const struct cpuset *cs) |
| 230 | { |
Paul Jackson | 7b5b9ef | 2006-03-24 03:16:00 -0800 | [diff] [blame] | 231 | return test_bit(CS_CPU_EXCLUSIVE, &cs->flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 232 | } |
| 233 | |
| 234 | static inline int is_mem_exclusive(const struct cpuset *cs) |
| 235 | { |
Paul Jackson | 7b5b9ef | 2006-03-24 03:16:00 -0800 | [diff] [blame] | 236 | return test_bit(CS_MEM_EXCLUSIVE, &cs->flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 237 | } |
| 238 | |
Paul Menage | 7860836 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 239 | static inline int is_mem_hardwall(const struct cpuset *cs) |
| 240 | { |
| 241 | return test_bit(CS_MEM_HARDWALL, &cs->flags); |
| 242 | } |
| 243 | |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 244 | static inline int is_sched_load_balance(const struct cpuset *cs) |
| 245 | { |
| 246 | return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); |
| 247 | } |
| 248 | |
Paul Jackson | 45b07ef | 2006-01-08 01:00:56 -0800 | [diff] [blame] | 249 | static inline int is_memory_migrate(const struct cpuset *cs) |
| 250 | { |
Paul Jackson | 7b5b9ef | 2006-03-24 03:16:00 -0800 | [diff] [blame] | 251 | return test_bit(CS_MEMORY_MIGRATE, &cs->flags); |
Paul Jackson | 45b07ef | 2006-01-08 01:00:56 -0800 | [diff] [blame] | 252 | } |
| 253 | |
Paul Jackson | 825a46a | 2006-03-24 03:16:03 -0800 | [diff] [blame] | 254 | static inline int is_spread_page(const struct cpuset *cs) |
| 255 | { |
| 256 | return test_bit(CS_SPREAD_PAGE, &cs->flags); |
| 257 | } |
| 258 | |
| 259 | static inline int is_spread_slab(const struct cpuset *cs) |
| 260 | { |
| 261 | return test_bit(CS_SPREAD_SLAB, &cs->flags); |
| 262 | } |
| 263 | |
Waiman Long | 58b7484 | 2018-11-08 10:08:36 -0500 | [diff] [blame] | 264 | static inline int is_partition_root(const struct cpuset *cs) |
| 265 | { |
Waiman Long | 3881b86 | 2018-11-08 10:08:39 -0500 | [diff] [blame] | 266 | return cs->partition_root_state > 0; |
Waiman Long | 58b7484 | 2018-11-08 10:08:36 -0500 | [diff] [blame] | 267 | } |
| 268 | |
Waiman Long | e7cc988 | 2021-08-10 23:06:02 -0400 | [diff] [blame] | 269 | /* |
| 270 | * Send notification event of whenever partition_root_state changes. |
| 271 | */ |
| 272 | static inline void notify_partition_change(struct cpuset *cs, |
| 273 | int old_prs, int new_prs) |
| 274 | { |
| 275 | if (old_prs != new_prs) |
| 276 | cgroup_file_notify(&cs->partition_file); |
| 277 | } |
| 278 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 279 | static struct cpuset top_cpuset = { |
Tejun Heo | efeb77b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 280 | .flags = ((1 << CS_ONLINE) | (1 << CS_CPU_EXCLUSIVE) | |
| 281 | (1 << CS_MEM_EXCLUSIVE)), |
Waiman Long | 58b7484 | 2018-11-08 10:08:36 -0500 | [diff] [blame] | 282 | .partition_root_state = PRS_ENABLED, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 283 | }; |
| 284 | |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 285 | /** |
| 286 | * cpuset_for_each_child - traverse online children of a cpuset |
| 287 | * @child_cs: loop cursor pointing to the current child |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 288 | * @pos_css: used for iteration |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 289 | * @parent_cs: target cpuset to walk children of |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 290 | * |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 291 | * Walk @child_cs through the online children of @parent_cs. Must be used |
| 292 | * with RCU read locked. |
| 293 | */ |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 294 | #define cpuset_for_each_child(child_cs, pos_css, parent_cs) \ |
| 295 | css_for_each_child((pos_css), &(parent_cs)->css) \ |
| 296 | if (is_cpuset_online(((child_cs) = css_cs((pos_css))))) |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 297 | |
Tejun Heo | fc560a2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 298 | /** |
| 299 | * cpuset_for_each_descendant_pre - pre-order walk of a cpuset's descendants |
| 300 | * @des_cs: loop cursor pointing to the current descendant |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 301 | * @pos_css: used for iteration |
Tejun Heo | fc560a2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 302 | * @root_cs: target cpuset to walk ancestor of |
| 303 | * |
| 304 | * Walk @des_cs through the online descendants of @root_cs. Must be used |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 305 | * with RCU read locked. The caller may modify @pos_css by calling |
Tejun Heo | bd8815a | 2013-08-08 20:11:27 -0400 | [diff] [blame] | 306 | * css_rightmost_descendant() to skip subtree. @root_cs is included in the |
| 307 | * iteration and the first node to be visited. |
Tejun Heo | fc560a2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 308 | */ |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 309 | #define cpuset_for_each_descendant_pre(des_cs, pos_css, root_cs) \ |
| 310 | css_for_each_descendant_pre((pos_css), &(root_cs)->css) \ |
| 311 | if (is_cpuset_online(((des_cs) = css_cs((pos_css))))) |
Tejun Heo | fc560a2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 312 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 313 | /* |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 314 | * There are two global locks guarding cpuset structures - cpuset_mutex and |
| 315 | * callback_lock. We also require taking task_lock() when dereferencing a |
| 316 | * task's cpuset pointer. See "The task_lock() exception", at the end of this |
| 317 | * comment. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 318 | * |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 319 | * A task must hold both locks to modify cpusets. If a task holds |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 320 | * cpuset_mutex, then it blocks others wanting that mutex, ensuring that it |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 321 | * is the only task able to also acquire callback_lock and be able to |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 322 | * modify cpusets. It can perform various checks on the cpuset structure |
| 323 | * first, knowing nothing will change. It can also allocate memory while |
| 324 | * just holding cpuset_mutex. While it is performing these checks, various |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 325 | * callback routines can briefly acquire callback_lock to query cpusets. |
| 326 | * Once it is ready to make the changes, it takes callback_lock, blocking |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 327 | * everyone else. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 328 | * |
Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 329 | * Calls to the kernel memory allocator can not be made while holding |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 330 | * callback_lock, as that would risk double tripping on callback_lock |
Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 331 | * from one of the callbacks into the cpuset code from within |
| 332 | * __alloc_pages(). |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 333 | * |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 334 | * If a task is only holding callback_lock, then it has read-only |
Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 335 | * access to cpusets. |
| 336 | * |
Miao Xie | 58568d2 | 2009-06-16 15:31:49 -0700 | [diff] [blame] | 337 | * Now, the task_struct fields mems_allowed and mempolicy may be changed |
| 338 | * by other task, we use alloc_lock in the task_struct fields to protect |
| 339 | * them. |
Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 340 | * |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 341 | * The cpuset_common_file_read() handlers only hold callback_lock across |
Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 342 | * small pieces of code, such as when reading out possibly multi-word |
| 343 | * cpumasks and nodemasks. |
| 344 | * |
Paul Menage | 2df167a | 2008-02-07 00:14:45 -0800 | [diff] [blame] | 345 | * Accessing a task's cpuset should be done in accordance with the |
| 346 | * guidelines for accessing subsystem state in kernel/cgroup.c |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 347 | */ |
| 348 | |
Juri Lelli | 1243dc5 | 2019-07-19 15:59:57 +0200 | [diff] [blame] | 349 | DEFINE_STATIC_PERCPU_RWSEM(cpuset_rwsem); |
Juri Lelli | 710da3c | 2019-07-19 16:00:00 +0200 | [diff] [blame] | 350 | |
| 351 | void cpuset_read_lock(void) |
| 352 | { |
| 353 | percpu_down_read(&cpuset_rwsem); |
| 354 | } |
| 355 | |
| 356 | void cpuset_read_unlock(void) |
| 357 | { |
| 358 | percpu_up_read(&cpuset_rwsem); |
| 359 | } |
| 360 | |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 361 | static DEFINE_SPINLOCK(callback_lock); |
Paul Jackson | 4247bdc | 2005-09-10 00:26:06 -0700 | [diff] [blame] | 362 | |
Tejun Heo | e93ad19 | 2016-01-19 12:18:41 -0500 | [diff] [blame] | 363 | static struct workqueue_struct *cpuset_migrate_mm_wq; |
| 364 | |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 365 | /* |
Tejun Heo | 3a5a6d0 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 366 | * CPU / memory hotplug is handled asynchronously. |
| 367 | */ |
| 368 | static void cpuset_hotplug_workfn(struct work_struct *work); |
Tejun Heo | 3a5a6d0 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 369 | static DECLARE_WORK(cpuset_hotplug_work, cpuset_hotplug_workfn); |
| 370 | |
Li Zefan | e44193d | 2013-06-09 17:14:22 +0800 | [diff] [blame] | 371 | static DECLARE_WAIT_QUEUE_HEAD(cpuset_attach_wq); |
| 372 | |
Tejun Heo | 3a5a6d0 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 373 | /* |
Waiman Long | 0c05b9b | 2020-03-30 10:06:15 -0400 | [diff] [blame] | 374 | * Cgroup v2 behavior is used on the "cpus" and "mems" control files when |
| 375 | * on default hierarchy or when the cpuset_v2_mode flag is set by mounting |
| 376 | * the v1 cpuset cgroup filesystem with the "cpuset_v2_mode" mount option. |
| 377 | * With v2 behavior, "cpus" and "mems" are always what the users have |
| 378 | * requested and won't be changed by hotplug events. Only the effective |
| 379 | * cpus or mems will be affected. |
Waiman Long | b8d1b8e | 2017-08-17 15:33:10 -0400 | [diff] [blame] | 380 | */ |
| 381 | static inline bool is_in_v2_mode(void) |
| 382 | { |
| 383 | return cgroup_subsys_on_dfl(cpuset_cgrp_subsys) || |
| 384 | (cpuset_cgrp_subsys.root->flags & CGRP_ROOT_CPUSET_V2_MODE); |
| 385 | } |
| 386 | |
| 387 | /* |
Li Zefan | 300ed6c | 2009-01-07 18:08:44 -0800 | [diff] [blame] | 388 | * Return in pmask the portion of a cpusets's cpus_allowed that |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 389 | * are online. If none are online, walk up the cpuset hierarchy |
Joonwoo Park | 28b89b9 | 2016-09-11 21:14:58 -0700 | [diff] [blame] | 390 | * until we find one that does have some online cpus. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 391 | * |
| 392 | * One way or another, we guarantee to return some non-empty subset |
Rusty Russell | 5f054e3 | 2012-03-29 15:38:31 +1030 | [diff] [blame] | 393 | * of cpu_online_mask. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 394 | * |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 395 | * Call with callback_lock or cpuset_mutex held. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 396 | */ |
Tejun Heo | c9710d8 | 2013-08-08 20:11:22 -0400 | [diff] [blame] | 397 | static void guarantee_online_cpus(struct cpuset *cs, struct cpumask *pmask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 398 | { |
Joonwoo Park | 28b89b9 | 2016-09-11 21:14:58 -0700 | [diff] [blame] | 399 | while (!cpumask_intersects(cs->effective_cpus, cpu_online_mask)) { |
Tejun Heo | c431069 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 400 | cs = parent_cs(cs); |
Joonwoo Park | 28b89b9 | 2016-09-11 21:14:58 -0700 | [diff] [blame] | 401 | if (unlikely(!cs)) { |
| 402 | /* |
| 403 | * The top cpuset doesn't have any online cpu as a |
| 404 | * consequence of a race between cpuset_hotplug_work |
| 405 | * and cpu hotplug notifier. But we know the top |
Randy Dunlap | 7b7b8a2 | 2020-10-15 20:10:28 -0700 | [diff] [blame] | 406 | * cpuset's effective_cpus is on its way to be |
Joonwoo Park | 28b89b9 | 2016-09-11 21:14:58 -0700 | [diff] [blame] | 407 | * identical to cpu_online_mask. |
| 408 | */ |
| 409 | cpumask_copy(pmask, cpu_online_mask); |
| 410 | return; |
| 411 | } |
| 412 | } |
Li Zefan | ae1c802 | 2014-07-09 16:48:32 +0800 | [diff] [blame] | 413 | cpumask_and(pmask, cs->effective_cpus, cpu_online_mask); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 414 | } |
| 415 | |
| 416 | /* |
| 417 | * Return in *pmask the portion of a cpusets's mems_allowed that |
Christoph Lameter | 0e1e7c7 | 2007-10-16 01:25:38 -0700 | [diff] [blame] | 418 | * are online, with memory. If none are online with memory, walk |
| 419 | * up the cpuset hierarchy until we find one that does have some |
Li Zefan | 40df2de | 2013-06-05 17:15:23 +0800 | [diff] [blame] | 420 | * online mems. The top cpuset always has some mems online. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 421 | * |
| 422 | * One way or another, we guarantee to return some non-empty subset |
Lai Jiangshan | 38d7bee | 2012-12-12 13:51:24 -0800 | [diff] [blame] | 423 | * of node_states[N_MEMORY]. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 424 | * |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 425 | * Call with callback_lock or cpuset_mutex held. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 426 | */ |
Tejun Heo | c9710d8 | 2013-08-08 20:11:22 -0400 | [diff] [blame] | 427 | static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 428 | { |
Li Zefan | ae1c802 | 2014-07-09 16:48:32 +0800 | [diff] [blame] | 429 | while (!nodes_intersects(cs->effective_mems, node_states[N_MEMORY])) |
Tejun Heo | c431069 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 430 | cs = parent_cs(cs); |
Li Zefan | ae1c802 | 2014-07-09 16:48:32 +0800 | [diff] [blame] | 431 | nodes_and(*pmask, cs->effective_mems, node_states[N_MEMORY]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 432 | } |
| 433 | |
Miao Xie | f3b39d4 | 2009-06-16 15:31:46 -0700 | [diff] [blame] | 434 | /* |
| 435 | * update task's spread flag if cpuset's page/slab spread flag is set |
| 436 | * |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 437 | * Call with callback_lock or cpuset_mutex held. |
Miao Xie | f3b39d4 | 2009-06-16 15:31:46 -0700 | [diff] [blame] | 438 | */ |
| 439 | static void cpuset_update_task_spread_flag(struct cpuset *cs, |
| 440 | struct task_struct *tsk) |
| 441 | { |
| 442 | if (is_spread_page(cs)) |
Zefan Li | 2ad654b | 2014-09-25 09:41:02 +0800 | [diff] [blame] | 443 | task_set_spread_page(tsk); |
Miao Xie | f3b39d4 | 2009-06-16 15:31:46 -0700 | [diff] [blame] | 444 | else |
Zefan Li | 2ad654b | 2014-09-25 09:41:02 +0800 | [diff] [blame] | 445 | task_clear_spread_page(tsk); |
| 446 | |
Miao Xie | f3b39d4 | 2009-06-16 15:31:46 -0700 | [diff] [blame] | 447 | if (is_spread_slab(cs)) |
Zefan Li | 2ad654b | 2014-09-25 09:41:02 +0800 | [diff] [blame] | 448 | task_set_spread_slab(tsk); |
Miao Xie | f3b39d4 | 2009-06-16 15:31:46 -0700 | [diff] [blame] | 449 | else |
Zefan Li | 2ad654b | 2014-09-25 09:41:02 +0800 | [diff] [blame] | 450 | task_clear_spread_slab(tsk); |
Miao Xie | f3b39d4 | 2009-06-16 15:31:46 -0700 | [diff] [blame] | 451 | } |
| 452 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 453 | /* |
| 454 | * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q? |
| 455 | * |
| 456 | * One cpuset is a subset of another if all its allowed CPUs and |
| 457 | * Memory Nodes are a subset of the other, and its exclusive flags |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 458 | * are only set if the other's are set. Call holding cpuset_mutex. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 459 | */ |
| 460 | |
| 461 | static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q) |
| 462 | { |
Li Zefan | 300ed6c | 2009-01-07 18:08:44 -0800 | [diff] [blame] | 463 | return cpumask_subset(p->cpus_allowed, q->cpus_allowed) && |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 464 | nodes_subset(p->mems_allowed, q->mems_allowed) && |
| 465 | is_cpu_exclusive(p) <= is_cpu_exclusive(q) && |
| 466 | is_mem_exclusive(p) <= is_mem_exclusive(q); |
| 467 | } |
| 468 | |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 469 | /** |
Waiman Long | bf92370 | 2018-11-08 10:08:37 -0500 | [diff] [blame] | 470 | * alloc_cpumasks - allocate three cpumasks for cpuset |
| 471 | * @cs: the cpuset that have cpumasks to be allocated. |
| 472 | * @tmp: the tmpmasks structure pointer |
| 473 | * Return: 0 if successful, -ENOMEM otherwise. |
| 474 | * |
| 475 | * Only one of the two input arguments should be non-NULL. |
| 476 | */ |
| 477 | static inline int alloc_cpumasks(struct cpuset *cs, struct tmpmasks *tmp) |
| 478 | { |
| 479 | cpumask_var_t *pmask1, *pmask2, *pmask3; |
| 480 | |
| 481 | if (cs) { |
| 482 | pmask1 = &cs->cpus_allowed; |
| 483 | pmask2 = &cs->effective_cpus; |
| 484 | pmask3 = &cs->subparts_cpus; |
| 485 | } else { |
| 486 | pmask1 = &tmp->new_cpus; |
| 487 | pmask2 = &tmp->addmask; |
| 488 | pmask3 = &tmp->delmask; |
| 489 | } |
| 490 | |
| 491 | if (!zalloc_cpumask_var(pmask1, GFP_KERNEL)) |
| 492 | return -ENOMEM; |
| 493 | |
| 494 | if (!zalloc_cpumask_var(pmask2, GFP_KERNEL)) |
| 495 | goto free_one; |
| 496 | |
| 497 | if (!zalloc_cpumask_var(pmask3, GFP_KERNEL)) |
| 498 | goto free_two; |
| 499 | |
| 500 | return 0; |
| 501 | |
| 502 | free_two: |
| 503 | free_cpumask_var(*pmask2); |
| 504 | free_one: |
| 505 | free_cpumask_var(*pmask1); |
| 506 | return -ENOMEM; |
| 507 | } |
| 508 | |
| 509 | /** |
| 510 | * free_cpumasks - free cpumasks in a tmpmasks structure |
| 511 | * @cs: the cpuset that have cpumasks to be free. |
| 512 | * @tmp: the tmpmasks structure pointer |
| 513 | */ |
| 514 | static inline void free_cpumasks(struct cpuset *cs, struct tmpmasks *tmp) |
| 515 | { |
| 516 | if (cs) { |
| 517 | free_cpumask_var(cs->cpus_allowed); |
| 518 | free_cpumask_var(cs->effective_cpus); |
| 519 | free_cpumask_var(cs->subparts_cpus); |
| 520 | } |
| 521 | if (tmp) { |
| 522 | free_cpumask_var(tmp->new_cpus); |
| 523 | free_cpumask_var(tmp->addmask); |
| 524 | free_cpumask_var(tmp->delmask); |
| 525 | } |
| 526 | } |
| 527 | |
| 528 | /** |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 529 | * alloc_trial_cpuset - allocate a trial cpuset |
| 530 | * @cs: the cpuset that the trial cpuset duplicates |
| 531 | */ |
Tejun Heo | c9710d8 | 2013-08-08 20:11:22 -0400 | [diff] [blame] | 532 | static struct cpuset *alloc_trial_cpuset(struct cpuset *cs) |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 533 | { |
Li Zefan | 300ed6c | 2009-01-07 18:08:44 -0800 | [diff] [blame] | 534 | struct cpuset *trial; |
| 535 | |
| 536 | trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL); |
| 537 | if (!trial) |
| 538 | return NULL; |
| 539 | |
Waiman Long | bf92370 | 2018-11-08 10:08:37 -0500 | [diff] [blame] | 540 | if (alloc_cpumasks(trial, NULL)) { |
| 541 | kfree(trial); |
| 542 | return NULL; |
| 543 | } |
Li Zefan | 300ed6c | 2009-01-07 18:08:44 -0800 | [diff] [blame] | 544 | |
Li Zefan | e2b9a3d | 2014-07-09 16:47:03 +0800 | [diff] [blame] | 545 | cpumask_copy(trial->cpus_allowed, cs->cpus_allowed); |
| 546 | cpumask_copy(trial->effective_cpus, cs->effective_cpus); |
Li Zefan | 300ed6c | 2009-01-07 18:08:44 -0800 | [diff] [blame] | 547 | return trial; |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 548 | } |
| 549 | |
| 550 | /** |
Waiman Long | bf92370 | 2018-11-08 10:08:37 -0500 | [diff] [blame] | 551 | * free_cpuset - free the cpuset |
| 552 | * @cs: the cpuset to be freed |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 553 | */ |
Waiman Long | bf92370 | 2018-11-08 10:08:37 -0500 | [diff] [blame] | 554 | static inline void free_cpuset(struct cpuset *cs) |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 555 | { |
Waiman Long | bf92370 | 2018-11-08 10:08:37 -0500 | [diff] [blame] | 556 | free_cpumasks(cs, NULL); |
| 557 | kfree(cs); |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 558 | } |
| 559 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 560 | /* |
| 561 | * validate_change() - Used to validate that any proposed cpuset change |
| 562 | * follows the structural rules for cpusets. |
| 563 | * |
| 564 | * If we replaced the flag and mask values of the current cpuset |
| 565 | * (cur) with those values in the trial cpuset (trial), would |
| 566 | * our various subset and exclusive rules still be valid? Presumes |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 567 | * cpuset_mutex held. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 568 | * |
| 569 | * 'cur' is the address of an actual, in-use cpuset. Operations |
| 570 | * such as list traversal that depend on the actual address of the |
| 571 | * cpuset in the list must use cur below, not trial. |
| 572 | * |
| 573 | * 'trial' is the address of bulk structure copy of cur, with |
| 574 | * perhaps one or more of the fields cpus_allowed, mems_allowed, |
| 575 | * or flags changed to new, trial values. |
| 576 | * |
| 577 | * Return 0 if valid, -errno if not. |
| 578 | */ |
| 579 | |
Tejun Heo | c9710d8 | 2013-08-08 20:11:22 -0400 | [diff] [blame] | 580 | static int validate_change(struct cpuset *cur, struct cpuset *trial) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 581 | { |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 582 | struct cgroup_subsys_state *css; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 583 | struct cpuset *c, *par; |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 584 | int ret; |
| 585 | |
| 586 | rcu_read_lock(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 587 | |
| 588 | /* Each of our child cpusets must be a subset of us */ |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 589 | ret = -EBUSY; |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 590 | cpuset_for_each_child(c, css, cur) |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 591 | if (!is_cpuset_subset(c, trial)) |
| 592 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 593 | |
| 594 | /* Remaining checks don't apply to root cpuset */ |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 595 | ret = 0; |
Paul Jackson | 6960406 | 2006-12-06 20:36:15 -0800 | [diff] [blame] | 596 | if (cur == &top_cpuset) |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 597 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 598 | |
Tejun Heo | c431069 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 599 | par = parent_cs(cur); |
Paul Jackson | 6960406 | 2006-12-06 20:36:15 -0800 | [diff] [blame] | 600 | |
Lu Jialin | d95af61 | 2021-04-08 16:03:46 +0800 | [diff] [blame] | 601 | /* On legacy hierarchy, we must be a subset of our parent cpuset. */ |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 602 | ret = -EACCES; |
Waiman Long | b8d1b8e | 2017-08-17 15:33:10 -0400 | [diff] [blame] | 603 | if (!is_in_v2_mode() && !is_cpuset_subset(trial, par)) |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 604 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 605 | |
Paul Menage | 2df167a | 2008-02-07 00:14:45 -0800 | [diff] [blame] | 606 | /* |
| 607 | * If either I or some sibling (!= me) is exclusive, we can't |
| 608 | * overlap |
| 609 | */ |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 610 | ret = -EINVAL; |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 611 | cpuset_for_each_child(c, css, par) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 612 | if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) && |
| 613 | c != cur && |
Li Zefan | 300ed6c | 2009-01-07 18:08:44 -0800 | [diff] [blame] | 614 | cpumask_intersects(trial->cpus_allowed, c->cpus_allowed)) |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 615 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 616 | if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) && |
| 617 | c != cur && |
| 618 | nodes_intersects(trial->mems_allowed, c->mems_allowed)) |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 619 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 620 | } |
| 621 | |
Tejun Heo | 452477f | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 622 | /* |
| 623 | * Cpusets with tasks - existing or newly being attached - can't |
Li Zefan | 1c09b19 | 2013-08-21 10:22:28 +0800 | [diff] [blame] | 624 | * be changed to have empty cpus_allowed or mems_allowed. |
Tejun Heo | 452477f | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 625 | */ |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 626 | ret = -ENOSPC; |
Tejun Heo | 27bd4db | 2015-10-15 16:41:50 -0400 | [diff] [blame] | 627 | if ((cgroup_is_populated(cur->css.cgroup) || cur->attach_in_progress)) { |
Li Zefan | 1c09b19 | 2013-08-21 10:22:28 +0800 | [diff] [blame] | 628 | if (!cpumask_empty(cur->cpus_allowed) && |
| 629 | cpumask_empty(trial->cpus_allowed)) |
| 630 | goto out; |
| 631 | if (!nodes_empty(cur->mems_allowed) && |
| 632 | nodes_empty(trial->mems_allowed)) |
| 633 | goto out; |
| 634 | } |
Paul Jackson | 020958b | 2007-10-18 23:40:21 -0700 | [diff] [blame] | 635 | |
Juri Lelli | f82f804 | 2014-10-07 09:52:11 +0100 | [diff] [blame] | 636 | /* |
| 637 | * We can't shrink if we won't have enough room for SCHED_DEADLINE |
| 638 | * tasks. |
| 639 | */ |
| 640 | ret = -EBUSY; |
| 641 | if (is_cpu_exclusive(cur) && |
| 642 | !cpuset_cpumask_can_shrink(cur->cpus_allowed, |
| 643 | trial->cpus_allowed)) |
| 644 | goto out; |
| 645 | |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 646 | ret = 0; |
| 647 | out: |
| 648 | rcu_read_unlock(); |
| 649 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 650 | } |
| 651 | |
Paul Menage | db7f47c | 2009-04-02 16:57:55 -0700 | [diff] [blame] | 652 | #ifdef CONFIG_SMP |
Dinakar Guniguntala | 85d7b94 | 2005-06-25 14:57:34 -0700 | [diff] [blame] | 653 | /* |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 654 | * Helper routine for generate_sched_domains(). |
Li Zefan | 8b5f1c5 | 2014-07-09 16:47:50 +0800 | [diff] [blame] | 655 | * Do cpusets a, b have overlapping effective cpus_allowed masks? |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 656 | */ |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 657 | static int cpusets_overlap(struct cpuset *a, struct cpuset *b) |
| 658 | { |
Li Zefan | 8b5f1c5 | 2014-07-09 16:47:50 +0800 | [diff] [blame] | 659 | return cpumask_intersects(a->effective_cpus, b->effective_cpus); |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 660 | } |
| 661 | |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 662 | static void |
| 663 | update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c) |
| 664 | { |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 665 | if (dattr->relax_domain_level < c->relax_domain_level) |
| 666 | dattr->relax_domain_level = c->relax_domain_level; |
| 667 | return; |
| 668 | } |
| 669 | |
Tejun Heo | fc560a2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 670 | static void update_domain_attr_tree(struct sched_domain_attr *dattr, |
| 671 | struct cpuset *root_cs) |
Lai Jiangshan | f539369 | 2008-07-29 22:33:22 -0700 | [diff] [blame] | 672 | { |
Tejun Heo | fc560a2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 673 | struct cpuset *cp; |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 674 | struct cgroup_subsys_state *pos_css; |
Lai Jiangshan | f539369 | 2008-07-29 22:33:22 -0700 | [diff] [blame] | 675 | |
Tejun Heo | fc560a2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 676 | rcu_read_lock(); |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 677 | cpuset_for_each_descendant_pre(cp, pos_css, root_cs) { |
Tejun Heo | fc560a2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 678 | /* skip the whole subtree if @cp doesn't have any CPU */ |
| 679 | if (cpumask_empty(cp->cpus_allowed)) { |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 680 | pos_css = css_rightmost_descendant(pos_css); |
Lai Jiangshan | f539369 | 2008-07-29 22:33:22 -0700 | [diff] [blame] | 681 | continue; |
Tejun Heo | fc560a2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 682 | } |
Lai Jiangshan | f539369 | 2008-07-29 22:33:22 -0700 | [diff] [blame] | 683 | |
| 684 | if (is_sched_load_balance(cp)) |
| 685 | update_domain_attr(dattr, cp); |
Lai Jiangshan | f539369 | 2008-07-29 22:33:22 -0700 | [diff] [blame] | 686 | } |
Tejun Heo | fc560a2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 687 | rcu_read_unlock(); |
Lai Jiangshan | f539369 | 2008-07-29 22:33:22 -0700 | [diff] [blame] | 688 | } |
| 689 | |
Paolo Bonzini | be040be | 2017-08-01 17:24:06 +0200 | [diff] [blame] | 690 | /* Must be called with cpuset_mutex held. */ |
| 691 | static inline int nr_cpusets(void) |
| 692 | { |
| 693 | /* jump label reference count + the top-level cpuset */ |
| 694 | return static_key_count(&cpusets_enabled_key.key) + 1; |
| 695 | } |
| 696 | |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 697 | /* |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 698 | * generate_sched_domains() |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 699 | * |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 700 | * This function builds a partial partition of the systems CPUs |
| 701 | * A 'partial partition' is a set of non-overlapping subsets whose |
| 702 | * union is a subset of that set. |
Viresh Kumar | 0a0fca9 | 2013-06-04 13:10:24 +0530 | [diff] [blame] | 703 | * The output of this function needs to be passed to kernel/sched/core.c |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 704 | * partition_sched_domains() routine, which will rebuild the scheduler's |
| 705 | * load balancing domains (sched domains) as specified by that partial |
| 706 | * partition. |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 707 | * |
Mauro Carvalho Chehab | da82c92 | 2019-06-27 13:08:35 -0300 | [diff] [blame] | 708 | * See "What is sched_load_balance" in Documentation/admin-guide/cgroup-v1/cpusets.rst |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 709 | * for a background explanation of this. |
| 710 | * |
| 711 | * Does not return errors, on the theory that the callers of this |
| 712 | * routine would rather not worry about failures to rebuild sched |
| 713 | * domains when operating in the severe memory shortage situations |
| 714 | * that could cause allocation failures below. |
| 715 | * |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 716 | * Must be called with cpuset_mutex held. |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 717 | * |
| 718 | * The three key local variables below are: |
Juri Lelli | b6fbbf3 | 2018-12-19 14:34:44 +0100 | [diff] [blame] | 719 | * cp - cpuset pointer, used (together with pos_css) to perform a |
| 720 | * top-down scan of all cpusets. For our purposes, rebuilding |
| 721 | * the schedulers sched domains, we can ignore !is_sched_load_ |
| 722 | * balance cpusets. |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 723 | * csa - (for CpuSet Array) Array of pointers to all the cpusets |
| 724 | * that need to be load balanced, for convenient iterative |
| 725 | * access by the subsequent code that finds the best partition, |
| 726 | * i.e the set of domains (subsets) of CPUs such that the |
| 727 | * cpus_allowed of every cpuset marked is_sched_load_balance |
| 728 | * is a subset of one of these domains, while there are as |
| 729 | * many such domains as possible, each as small as possible. |
| 730 | * doms - Conversion of 'csa' to an array of cpumasks, for passing to |
Viresh Kumar | 0a0fca9 | 2013-06-04 13:10:24 +0530 | [diff] [blame] | 731 | * the kernel/sched/core.c routine partition_sched_domains() in a |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 732 | * convenient format, that can be easily compared to the prior |
| 733 | * value to determine what partition elements (sched domains) |
| 734 | * were changed (added or removed.) |
| 735 | * |
| 736 | * Finding the best partition (set of domains): |
| 737 | * The triple nested loops below over i, j, k scan over the |
| 738 | * load balanced cpusets (using the array of cpuset pointers in |
| 739 | * csa[]) looking for pairs of cpusets that have overlapping |
| 740 | * cpus_allowed, but which don't have the same 'pn' partition |
| 741 | * number and gives them in the same partition number. It keeps |
| 742 | * looping on the 'restart' label until it can no longer find |
| 743 | * any such pairs. |
| 744 | * |
| 745 | * The union of the cpus_allowed masks from the set of |
| 746 | * all cpusets having the same 'pn' value then form the one |
| 747 | * element of the partition (one sched domain) to be passed to |
| 748 | * partition_sched_domains(). |
| 749 | */ |
Rusty Russell | acc3f5d | 2009-11-03 14:53:40 +1030 | [diff] [blame] | 750 | static int generate_sched_domains(cpumask_var_t **domains, |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 751 | struct sched_domain_attr **attributes) |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 752 | { |
Juri Lelli | b6fbbf3 | 2018-12-19 14:34:44 +0100 | [diff] [blame] | 753 | struct cpuset *cp; /* top-down scan of cpusets */ |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 754 | struct cpuset **csa; /* array of all cpuset ptrs */ |
| 755 | int csn; /* how many cpuset ptrs in csa so far */ |
| 756 | int i, j, k; /* indices for partition finding loops */ |
Rusty Russell | acc3f5d | 2009-11-03 14:53:40 +1030 | [diff] [blame] | 757 | cpumask_var_t *doms; /* resulting partition; i.e. sched domains */ |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 758 | struct sched_domain_attr *dattr; /* attributes for custom domains */ |
Ingo Molnar | 1583715 | 2008-11-25 10:27:49 +0100 | [diff] [blame] | 759 | int ndoms = 0; /* number of sched domains in result */ |
Li Zefan | 6af866a | 2009-01-07 18:08:45 -0800 | [diff] [blame] | 760 | int nslot; /* next empty doms[] struct cpumask slot */ |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 761 | struct cgroup_subsys_state *pos_css; |
Waiman Long | 0ccea8f | 2018-11-08 10:08:42 -0500 | [diff] [blame] | 762 | bool root_load_balance = is_sched_load_balance(&top_cpuset); |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 763 | |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 764 | doms = NULL; |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 765 | dattr = NULL; |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 766 | csa = NULL; |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 767 | |
| 768 | /* Special case for the 99% of systems with one, full, sched domain */ |
Waiman Long | 0ccea8f | 2018-11-08 10:08:42 -0500 | [diff] [blame] | 769 | if (root_load_balance && !top_cpuset.nr_subparts_cpus) { |
Rusty Russell | acc3f5d | 2009-11-03 14:53:40 +1030 | [diff] [blame] | 770 | ndoms = 1; |
| 771 | doms = alloc_sched_domains(ndoms); |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 772 | if (!doms) |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 773 | goto done; |
| 774 | |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 775 | dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL); |
| 776 | if (dattr) { |
| 777 | *dattr = SD_ATTR_INIT; |
Li Zefan | 93a6557 | 2008-07-29 22:33:23 -0700 | [diff] [blame] | 778 | update_domain_attr_tree(dattr, &top_cpuset); |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 779 | } |
Rik van Riel | 47b8ea7 | 2015-03-09 12:12:08 -0400 | [diff] [blame] | 780 | cpumask_and(doms[0], top_cpuset.effective_cpus, |
Frederic Weisbecker | edb9382 | 2017-10-27 04:42:37 +0200 | [diff] [blame] | 781 | housekeeping_cpumask(HK_FLAG_DOMAIN)); |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 782 | |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 783 | goto done; |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 784 | } |
| 785 | |
Kees Cook | 6da2ec5 | 2018-06-12 13:55:00 -0700 | [diff] [blame] | 786 | csa = kmalloc_array(nr_cpusets(), sizeof(cp), GFP_KERNEL); |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 787 | if (!csa) |
| 788 | goto done; |
| 789 | csn = 0; |
| 790 | |
Tejun Heo | fc560a2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 791 | rcu_read_lock(); |
Waiman Long | 0ccea8f | 2018-11-08 10:08:42 -0500 | [diff] [blame] | 792 | if (root_load_balance) |
| 793 | csa[csn++] = &top_cpuset; |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 794 | cpuset_for_each_descendant_pre(cp, pos_css, &top_cpuset) { |
Tejun Heo | bd8815a | 2013-08-08 20:11:27 -0400 | [diff] [blame] | 795 | if (cp == &top_cpuset) |
| 796 | continue; |
Lai Jiangshan | f539369 | 2008-07-29 22:33:22 -0700 | [diff] [blame] | 797 | /* |
Tejun Heo | fc560a2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 798 | * Continue traversing beyond @cp iff @cp has some CPUs and |
| 799 | * isn't load balancing. The former is obvious. The |
| 800 | * latter: All child cpusets contain a subset of the |
| 801 | * parent's cpus, so just skip them, and then we call |
| 802 | * update_domain_attr_tree() to calc relax_domain_level of |
| 803 | * the corresponding sched domain. |
Waiman Long | 0ccea8f | 2018-11-08 10:08:42 -0500 | [diff] [blame] | 804 | * |
| 805 | * If root is load-balancing, we can skip @cp if it |
| 806 | * is a subset of the root's effective_cpus. |
Lai Jiangshan | f539369 | 2008-07-29 22:33:22 -0700 | [diff] [blame] | 807 | */ |
Tejun Heo | fc560a2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 808 | if (!cpumask_empty(cp->cpus_allowed) && |
Rik van Riel | 47b8ea7 | 2015-03-09 12:12:08 -0400 | [diff] [blame] | 809 | !(is_sched_load_balance(cp) && |
Frederic Weisbecker | edb9382 | 2017-10-27 04:42:37 +0200 | [diff] [blame] | 810 | cpumask_intersects(cp->cpus_allowed, |
| 811 | housekeeping_cpumask(HK_FLAG_DOMAIN)))) |
Lai Jiangshan | f539369 | 2008-07-29 22:33:22 -0700 | [diff] [blame] | 812 | continue; |
Lai Jiangshan | 489a539 | 2008-07-25 01:47:23 -0700 | [diff] [blame] | 813 | |
Waiman Long | 0ccea8f | 2018-11-08 10:08:42 -0500 | [diff] [blame] | 814 | if (root_load_balance && |
| 815 | cpumask_subset(cp->cpus_allowed, top_cpuset.effective_cpus)) |
| 816 | continue; |
| 817 | |
Valentin Schneider | cd1cb33 | 2019-10-23 16:37:44 +0100 | [diff] [blame] | 818 | if (is_sched_load_balance(cp) && |
| 819 | !cpumask_empty(cp->effective_cpus)) |
Tejun Heo | fc560a2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 820 | csa[csn++] = cp; |
| 821 | |
Waiman Long | 0ccea8f | 2018-11-08 10:08:42 -0500 | [diff] [blame] | 822 | /* skip @cp's subtree if not a partition root */ |
| 823 | if (!is_partition_root(cp)) |
| 824 | pos_css = css_rightmost_descendant(pos_css); |
Tejun Heo | fc560a2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 825 | } |
| 826 | rcu_read_unlock(); |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 827 | |
| 828 | for (i = 0; i < csn; i++) |
| 829 | csa[i]->pn = i; |
| 830 | ndoms = csn; |
| 831 | |
| 832 | restart: |
| 833 | /* Find the best partition (set of sched domains) */ |
| 834 | for (i = 0; i < csn; i++) { |
| 835 | struct cpuset *a = csa[i]; |
| 836 | int apn = a->pn; |
| 837 | |
| 838 | for (j = 0; j < csn; j++) { |
| 839 | struct cpuset *b = csa[j]; |
| 840 | int bpn = b->pn; |
| 841 | |
| 842 | if (apn != bpn && cpusets_overlap(a, b)) { |
| 843 | for (k = 0; k < csn; k++) { |
| 844 | struct cpuset *c = csa[k]; |
| 845 | |
| 846 | if (c->pn == bpn) |
| 847 | c->pn = apn; |
| 848 | } |
| 849 | ndoms--; /* one less element */ |
| 850 | goto restart; |
| 851 | } |
| 852 | } |
| 853 | } |
| 854 | |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 855 | /* |
| 856 | * Now we know how many domains to create. |
| 857 | * Convert <csn, csa> to <ndoms, doms> and populate cpu masks. |
| 858 | */ |
Rusty Russell | acc3f5d | 2009-11-03 14:53:40 +1030 | [diff] [blame] | 859 | doms = alloc_sched_domains(ndoms); |
Li Zefan | 700018e | 2008-11-18 14:02:03 +0800 | [diff] [blame] | 860 | if (!doms) |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 861 | goto done; |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 862 | |
| 863 | /* |
| 864 | * The rest of the code, including the scheduler, can deal with |
| 865 | * dattr==NULL case. No need to abort if alloc fails. |
| 866 | */ |
Kees Cook | 6da2ec5 | 2018-06-12 13:55:00 -0700 | [diff] [blame] | 867 | dattr = kmalloc_array(ndoms, sizeof(struct sched_domain_attr), |
| 868 | GFP_KERNEL); |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 869 | |
| 870 | for (nslot = 0, i = 0; i < csn; i++) { |
| 871 | struct cpuset *a = csa[i]; |
Li Zefan | 6af866a | 2009-01-07 18:08:45 -0800 | [diff] [blame] | 872 | struct cpumask *dp; |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 873 | int apn = a->pn; |
| 874 | |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 875 | if (apn < 0) { |
| 876 | /* Skip completed partitions */ |
| 877 | continue; |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 878 | } |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 879 | |
Rusty Russell | acc3f5d | 2009-11-03 14:53:40 +1030 | [diff] [blame] | 880 | dp = doms[nslot]; |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 881 | |
| 882 | if (nslot == ndoms) { |
| 883 | static int warnings = 10; |
| 884 | if (warnings) { |
Fabian Frederick | 12d3089 | 2014-05-05 19:49:00 +0200 | [diff] [blame] | 885 | pr_warn("rebuild_sched_domains confused: nslot %d, ndoms %d, csn %d, i %d, apn %d\n", |
| 886 | nslot, ndoms, csn, i, apn); |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 887 | warnings--; |
| 888 | } |
| 889 | continue; |
| 890 | } |
| 891 | |
Li Zefan | 6af866a | 2009-01-07 18:08:45 -0800 | [diff] [blame] | 892 | cpumask_clear(dp); |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 893 | if (dattr) |
| 894 | *(dattr + nslot) = SD_ATTR_INIT; |
| 895 | for (j = i; j < csn; j++) { |
| 896 | struct cpuset *b = csa[j]; |
| 897 | |
| 898 | if (apn == b->pn) { |
Li Zefan | 8b5f1c5 | 2014-07-09 16:47:50 +0800 | [diff] [blame] | 899 | cpumask_or(dp, dp, b->effective_cpus); |
Frederic Weisbecker | edb9382 | 2017-10-27 04:42:37 +0200 | [diff] [blame] | 900 | cpumask_and(dp, dp, housekeeping_cpumask(HK_FLAG_DOMAIN)); |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 901 | if (dattr) |
| 902 | update_domain_attr_tree(dattr + nslot, b); |
| 903 | |
| 904 | /* Done with this partition */ |
| 905 | b->pn = -1; |
| 906 | } |
| 907 | } |
| 908 | nslot++; |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 909 | } |
| 910 | BUG_ON(nslot != ndoms); |
| 911 | |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 912 | done: |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 913 | kfree(csa); |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 914 | |
Li Zefan | 700018e | 2008-11-18 14:02:03 +0800 | [diff] [blame] | 915 | /* |
| 916 | * Fallback to the default domain if kmalloc() failed. |
| 917 | * See comments in partition_sched_domains(). |
| 918 | */ |
| 919 | if (doms == NULL) |
| 920 | ndoms = 1; |
| 921 | |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 922 | *domains = doms; |
| 923 | *attributes = dattr; |
| 924 | return ndoms; |
| 925 | } |
| 926 | |
Mathieu Poirier | f9a25f7 | 2019-07-19 15:59:55 +0200 | [diff] [blame] | 927 | static void update_tasks_root_domain(struct cpuset *cs) |
| 928 | { |
| 929 | struct css_task_iter it; |
| 930 | struct task_struct *task; |
| 931 | |
| 932 | css_task_iter_start(&cs->css, 0, &it); |
| 933 | |
| 934 | while ((task = css_task_iter_next(&it))) |
| 935 | dl_add_task_root_domain(task); |
| 936 | |
| 937 | css_task_iter_end(&it); |
| 938 | } |
| 939 | |
| 940 | static void rebuild_root_domains(void) |
| 941 | { |
| 942 | struct cpuset *cs = NULL; |
| 943 | struct cgroup_subsys_state *pos_css; |
| 944 | |
Juri Lelli | 1243dc5 | 2019-07-19 15:59:57 +0200 | [diff] [blame] | 945 | percpu_rwsem_assert_held(&cpuset_rwsem); |
Mathieu Poirier | f9a25f7 | 2019-07-19 15:59:55 +0200 | [diff] [blame] | 946 | lockdep_assert_cpus_held(); |
| 947 | lockdep_assert_held(&sched_domains_mutex); |
| 948 | |
Mathieu Poirier | f9a25f7 | 2019-07-19 15:59:55 +0200 | [diff] [blame] | 949 | rcu_read_lock(); |
| 950 | |
| 951 | /* |
| 952 | * Clear default root domain DL accounting, it will be computed again |
| 953 | * if a task belongs to it. |
| 954 | */ |
| 955 | dl_clear_root_domain(&def_root_domain); |
| 956 | |
| 957 | cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { |
| 958 | |
| 959 | if (cpumask_empty(cs->effective_cpus)) { |
| 960 | pos_css = css_rightmost_descendant(pos_css); |
| 961 | continue; |
| 962 | } |
| 963 | |
| 964 | css_get(&cs->css); |
| 965 | |
| 966 | rcu_read_unlock(); |
| 967 | |
| 968 | update_tasks_root_domain(cs); |
| 969 | |
| 970 | rcu_read_lock(); |
| 971 | css_put(&cs->css); |
| 972 | } |
| 973 | rcu_read_unlock(); |
| 974 | } |
| 975 | |
| 976 | static void |
| 977 | partition_and_rebuild_sched_domains(int ndoms_new, cpumask_var_t doms_new[], |
| 978 | struct sched_domain_attr *dattr_new) |
| 979 | { |
| 980 | mutex_lock(&sched_domains_mutex); |
| 981 | partition_sched_domains_locked(ndoms_new, doms_new, dattr_new); |
| 982 | rebuild_root_domains(); |
| 983 | mutex_unlock(&sched_domains_mutex); |
| 984 | } |
| 985 | |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 986 | /* |
| 987 | * Rebuild scheduler domains. |
| 988 | * |
Tejun Heo | 699140b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 989 | * If the flag 'sched_load_balance' of any cpuset with non-empty |
| 990 | * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset |
| 991 | * which has that flag enabled, or if any cpuset with a non-empty |
| 992 | * 'cpus' is removed, then call this routine to rebuild the |
| 993 | * scheduler's dynamic sched domains. |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 994 | * |
Sebastian Andrzej Siewior | c5c63b9 | 2021-08-03 16:16:07 +0200 | [diff] [blame] | 995 | * Call with cpuset_mutex held. Takes cpus_read_lock(). |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 996 | */ |
Tejun Heo | 699140b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 997 | static void rebuild_sched_domains_locked(void) |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 998 | { |
Daniel Jordan | 406100f | 2020-11-12 12:17:11 -0500 | [diff] [blame] | 999 | struct cgroup_subsys_state *pos_css; |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 1000 | struct sched_domain_attr *attr; |
Rusty Russell | acc3f5d | 2009-11-03 14:53:40 +1030 | [diff] [blame] | 1001 | cpumask_var_t *doms; |
Daniel Jordan | 406100f | 2020-11-12 12:17:11 -0500 | [diff] [blame] | 1002 | struct cpuset *cs; |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 1003 | int ndoms; |
| 1004 | |
Juri Lelli | d74b27d | 2019-07-19 15:59:58 +0200 | [diff] [blame] | 1005 | lockdep_assert_cpus_held(); |
Juri Lelli | 1243dc5 | 2019-07-19 15:59:57 +0200 | [diff] [blame] | 1006 | percpu_rwsem_assert_held(&cpuset_rwsem); |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 1007 | |
Li Zefan | 5b16c2a | 2013-04-27 06:52:43 -0700 | [diff] [blame] | 1008 | /* |
Daniel Jordan | 406100f | 2020-11-12 12:17:11 -0500 | [diff] [blame] | 1009 | * If we have raced with CPU hotplug, return early to avoid |
Li Zefan | 5b16c2a | 2013-04-27 06:52:43 -0700 | [diff] [blame] | 1010 | * passing doms with offlined cpu to partition_sched_domains(). |
Daniel Jordan | 406100f | 2020-11-12 12:17:11 -0500 | [diff] [blame] | 1011 | * Anyways, cpuset_hotplug_workfn() will rebuild sched domains. |
| 1012 | * |
| 1013 | * With no CPUs in any subpartitions, top_cpuset's effective CPUs |
| 1014 | * should be the same as the active CPUs, so checking only top_cpuset |
| 1015 | * is enough to detect racing CPU offlines. |
Li Zefan | 5b16c2a | 2013-04-27 06:52:43 -0700 | [diff] [blame] | 1016 | */ |
Waiman Long | 0ccea8f | 2018-11-08 10:08:42 -0500 | [diff] [blame] | 1017 | if (!top_cpuset.nr_subparts_cpus && |
| 1018 | !cpumask_equal(top_cpuset.effective_cpus, cpu_active_mask)) |
Juri Lelli | d74b27d | 2019-07-19 15:59:58 +0200 | [diff] [blame] | 1019 | return; |
Waiman Long | 0ccea8f | 2018-11-08 10:08:42 -0500 | [diff] [blame] | 1020 | |
Daniel Jordan | 406100f | 2020-11-12 12:17:11 -0500 | [diff] [blame] | 1021 | /* |
| 1022 | * With subpartition CPUs, however, the effective CPUs of a partition |
| 1023 | * root should be only a subset of the active CPUs. Since a CPU in any |
| 1024 | * partition root could be offlined, all must be checked. |
| 1025 | */ |
| 1026 | if (top_cpuset.nr_subparts_cpus) { |
| 1027 | rcu_read_lock(); |
| 1028 | cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { |
| 1029 | if (!is_partition_root(cs)) { |
| 1030 | pos_css = css_rightmost_descendant(pos_css); |
| 1031 | continue; |
| 1032 | } |
| 1033 | if (!cpumask_subset(cs->effective_cpus, |
| 1034 | cpu_active_mask)) { |
| 1035 | rcu_read_unlock(); |
| 1036 | return; |
| 1037 | } |
| 1038 | } |
| 1039 | rcu_read_unlock(); |
| 1040 | } |
Li Zefan | 5b16c2a | 2013-04-27 06:52:43 -0700 | [diff] [blame] | 1041 | |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 1042 | /* Generate domain masks and attrs */ |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 1043 | ndoms = generate_sched_domains(&doms, &attr); |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 1044 | |
| 1045 | /* Have scheduler rebuild the domains */ |
Mathieu Poirier | f9a25f7 | 2019-07-19 15:59:55 +0200 | [diff] [blame] | 1046 | partition_and_rebuild_sched_domains(ndoms, doms, attr); |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 1047 | } |
Paul Menage | db7f47c | 2009-04-02 16:57:55 -0700 | [diff] [blame] | 1048 | #else /* !CONFIG_SMP */ |
Tejun Heo | 699140b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 1049 | static void rebuild_sched_domains_locked(void) |
Paul Menage | db7f47c | 2009-04-02 16:57:55 -0700 | [diff] [blame] | 1050 | { |
| 1051 | } |
Paul Menage | db7f47c | 2009-04-02 16:57:55 -0700 | [diff] [blame] | 1052 | #endif /* CONFIG_SMP */ |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 1053 | |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 1054 | void rebuild_sched_domains(void) |
| 1055 | { |
Sebastian Andrzej Siewior | c5c63b9 | 2021-08-03 16:16:07 +0200 | [diff] [blame] | 1056 | cpus_read_lock(); |
Juri Lelli | 1243dc5 | 2019-07-19 15:59:57 +0200 | [diff] [blame] | 1057 | percpu_down_write(&cpuset_rwsem); |
Tejun Heo | 699140b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 1058 | rebuild_sched_domains_locked(); |
Juri Lelli | 1243dc5 | 2019-07-19 15:59:57 +0200 | [diff] [blame] | 1059 | percpu_up_write(&cpuset_rwsem); |
Sebastian Andrzej Siewior | c5c63b9 | 2021-08-03 16:16:07 +0200 | [diff] [blame] | 1060 | cpus_read_unlock(); |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 1061 | } |
| 1062 | |
Cliff Wickman | 58f4790 | 2008-02-07 00:14:44 -0800 | [diff] [blame] | 1063 | /** |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1064 | * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset. |
| 1065 | * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed |
| 1066 | * |
Tejun Heo | d66393e | 2014-02-13 06:58:40 -0500 | [diff] [blame] | 1067 | * Iterate through each task of @cs updating its cpus_allowed to the |
| 1068 | * effective cpuset's. As this function is called with cpuset_mutex held, |
| 1069 | * cpuset membership stays stable. |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1070 | */ |
Tejun Heo | d66393e | 2014-02-13 06:58:40 -0500 | [diff] [blame] | 1071 | static void update_tasks_cpumask(struct cpuset *cs) |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1072 | { |
Tejun Heo | d66393e | 2014-02-13 06:58:40 -0500 | [diff] [blame] | 1073 | struct css_task_iter it; |
| 1074 | struct task_struct *task; |
| 1075 | |
Tejun Heo | bc2fb7e | 2017-05-15 09:34:01 -0400 | [diff] [blame] | 1076 | css_task_iter_start(&cs->css, 0, &it); |
Tejun Heo | d66393e | 2014-02-13 06:58:40 -0500 | [diff] [blame] | 1077 | while ((task = css_task_iter_next(&it))) |
Li Zefan | ae1c802 | 2014-07-09 16:48:32 +0800 | [diff] [blame] | 1078 | set_cpus_allowed_ptr(task, cs->effective_cpus); |
Tejun Heo | d66393e | 2014-02-13 06:58:40 -0500 | [diff] [blame] | 1079 | css_task_iter_end(&it); |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1080 | } |
| 1081 | |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1082 | /** |
| 1083 | * compute_effective_cpumask - Compute the effective cpumask of the cpuset |
| 1084 | * @new_cpus: the temp variable for the new effective_cpus mask |
| 1085 | * @cs: the cpuset the need to recompute the new effective_cpus mask |
| 1086 | * @parent: the parent cpuset |
| 1087 | * |
| 1088 | * If the parent has subpartition CPUs, include them in the list of |
Waiman Long | 4b842da | 2018-11-08 10:08:41 -0500 | [diff] [blame] | 1089 | * allowable CPUs in computing the new effective_cpus mask. Since offlined |
| 1090 | * CPUs are not removed from subparts_cpus, we have to use cpu_active_mask |
| 1091 | * to mask those out. |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1092 | */ |
| 1093 | static void compute_effective_cpumask(struct cpumask *new_cpus, |
| 1094 | struct cpuset *cs, struct cpuset *parent) |
| 1095 | { |
| 1096 | if (parent->nr_subparts_cpus) { |
| 1097 | cpumask_or(new_cpus, parent->effective_cpus, |
| 1098 | parent->subparts_cpus); |
| 1099 | cpumask_and(new_cpus, new_cpus, cs->cpus_allowed); |
Waiman Long | 4b842da | 2018-11-08 10:08:41 -0500 | [diff] [blame] | 1100 | cpumask_and(new_cpus, new_cpus, cpu_active_mask); |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1101 | } else { |
| 1102 | cpumask_and(new_cpus, cs->cpus_allowed, parent->effective_cpus); |
| 1103 | } |
| 1104 | } |
| 1105 | |
| 1106 | /* |
| 1107 | * Commands for update_parent_subparts_cpumask |
| 1108 | */ |
| 1109 | enum subparts_cmd { |
| 1110 | partcmd_enable, /* Enable partition root */ |
| 1111 | partcmd_disable, /* Disable partition root */ |
| 1112 | partcmd_update, /* Update parent's subparts_cpus */ |
| 1113 | }; |
| 1114 | |
| 1115 | /** |
| 1116 | * update_parent_subparts_cpumask - update subparts_cpus mask of parent cpuset |
| 1117 | * @cpuset: The cpuset that requests change in partition root state |
| 1118 | * @cmd: Partition root state change command |
| 1119 | * @newmask: Optional new cpumask for partcmd_update |
| 1120 | * @tmp: Temporary addmask and delmask |
| 1121 | * Return: 0, 1 or an error code |
| 1122 | * |
| 1123 | * For partcmd_enable, the cpuset is being transformed from a non-partition |
| 1124 | * root to a partition root. The cpus_allowed mask of the given cpuset will |
| 1125 | * be put into parent's subparts_cpus and taken away from parent's |
| 1126 | * effective_cpus. The function will return 0 if all the CPUs listed in |
| 1127 | * cpus_allowed can be granted or an error code will be returned. |
| 1128 | * |
| 1129 | * For partcmd_disable, the cpuset is being transofrmed from a partition |
Waiman Long | 0f3adb8 | 2021-07-20 10:18:26 -0400 | [diff] [blame] | 1130 | * root back to a non-partition root. Any CPUs in cpus_allowed that are in |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1131 | * parent's subparts_cpus will be taken away from that cpumask and put back |
| 1132 | * into parent's effective_cpus. 0 should always be returned. |
| 1133 | * |
| 1134 | * For partcmd_update, if the optional newmask is specified, the cpu |
| 1135 | * list is to be changed from cpus_allowed to newmask. Otherwise, |
Waiman Long | 3881b86 | 2018-11-08 10:08:39 -0500 | [diff] [blame] | 1136 | * cpus_allowed is assumed to remain the same. The cpuset should either |
| 1137 | * be a partition root or an invalid partition root. The partition root |
| 1138 | * state may change if newmask is NULL and none of the requested CPUs can |
| 1139 | * be granted by the parent. The function will return 1 if changes to |
| 1140 | * parent's subparts_cpus and effective_cpus happen or 0 otherwise. |
| 1141 | * Error code should only be returned when newmask is non-NULL. |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1142 | * |
| 1143 | * The partcmd_enable and partcmd_disable commands are used by |
| 1144 | * update_prstate(). The partcmd_update command is used by |
| 1145 | * update_cpumasks_hier() with newmask NULL and update_cpumask() with |
| 1146 | * newmask set. |
| 1147 | * |
| 1148 | * The checking is more strict when enabling partition root than the |
| 1149 | * other two commands. |
| 1150 | * |
| 1151 | * Because of the implicit cpu exclusive nature of a partition root, |
| 1152 | * cpumask changes that violates the cpu exclusivity rule will not be |
| 1153 | * permitted when checked by validate_change(). The validate_change() |
| 1154 | * function will also prevent any changes to the cpu list if it is not |
| 1155 | * a superset of children's cpu lists. |
| 1156 | */ |
| 1157 | static int update_parent_subparts_cpumask(struct cpuset *cpuset, int cmd, |
| 1158 | struct cpumask *newmask, |
| 1159 | struct tmpmasks *tmp) |
| 1160 | { |
| 1161 | struct cpuset *parent = parent_cs(cpuset); |
| 1162 | int adding; /* Moving cpus from effective_cpus to subparts_cpus */ |
| 1163 | int deleting; /* Moving cpus from subparts_cpus to effective_cpus */ |
Waiman Long | e7cc988 | 2021-08-10 23:06:02 -0400 | [diff] [blame] | 1164 | int old_prs, new_prs; |
Waiman Long | 3881b86 | 2018-11-08 10:08:39 -0500 | [diff] [blame] | 1165 | bool part_error = false; /* Partition error? */ |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1166 | |
Juri Lelli | 1243dc5 | 2019-07-19 15:59:57 +0200 | [diff] [blame] | 1167 | percpu_rwsem_assert_held(&cpuset_rwsem); |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1168 | |
| 1169 | /* |
| 1170 | * The parent must be a partition root. |
| 1171 | * The new cpumask, if present, or the current cpus_allowed must |
| 1172 | * not be empty. |
| 1173 | */ |
| 1174 | if (!is_partition_root(parent) || |
| 1175 | (newmask && cpumask_empty(newmask)) || |
| 1176 | (!newmask && cpumask_empty(cpuset->cpus_allowed))) |
| 1177 | return -EINVAL; |
| 1178 | |
| 1179 | /* |
| 1180 | * Enabling/disabling partition root is not allowed if there are |
| 1181 | * online children. |
| 1182 | */ |
| 1183 | if ((cmd != partcmd_update) && css_has_online_children(&cpuset->css)) |
| 1184 | return -EBUSY; |
| 1185 | |
| 1186 | /* |
| 1187 | * Enabling partition root is not allowed if not all the CPUs |
| 1188 | * can be granted from parent's effective_cpus or at least one |
| 1189 | * CPU will be left after that. |
| 1190 | */ |
| 1191 | if ((cmd == partcmd_enable) && |
| 1192 | (!cpumask_subset(cpuset->cpus_allowed, parent->effective_cpus) || |
| 1193 | cpumask_equal(cpuset->cpus_allowed, parent->effective_cpus))) |
| 1194 | return -EINVAL; |
| 1195 | |
| 1196 | /* |
| 1197 | * A cpumask update cannot make parent's effective_cpus become empty. |
| 1198 | */ |
| 1199 | adding = deleting = false; |
Waiman Long | e7cc988 | 2021-08-10 23:06:02 -0400 | [diff] [blame] | 1200 | old_prs = new_prs = cpuset->partition_root_state; |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1201 | if (cmd == partcmd_enable) { |
| 1202 | cpumask_copy(tmp->addmask, cpuset->cpus_allowed); |
| 1203 | adding = true; |
| 1204 | } else if (cmd == partcmd_disable) { |
| 1205 | deleting = cpumask_and(tmp->delmask, cpuset->cpus_allowed, |
| 1206 | parent->subparts_cpus); |
| 1207 | } else if (newmask) { |
| 1208 | /* |
| 1209 | * partcmd_update with newmask: |
| 1210 | * |
| 1211 | * delmask = cpus_allowed & ~newmask & parent->subparts_cpus |
| 1212 | * addmask = newmask & parent->effective_cpus |
| 1213 | * & ~parent->subparts_cpus |
| 1214 | */ |
| 1215 | cpumask_andnot(tmp->delmask, cpuset->cpus_allowed, newmask); |
| 1216 | deleting = cpumask_and(tmp->delmask, tmp->delmask, |
| 1217 | parent->subparts_cpus); |
| 1218 | |
| 1219 | cpumask_and(tmp->addmask, newmask, parent->effective_cpus); |
| 1220 | adding = cpumask_andnot(tmp->addmask, tmp->addmask, |
| 1221 | parent->subparts_cpus); |
| 1222 | /* |
| 1223 | * Return error if the new effective_cpus could become empty. |
| 1224 | */ |
Waiman Long | 4b842da | 2018-11-08 10:08:41 -0500 | [diff] [blame] | 1225 | if (adding && |
| 1226 | cpumask_equal(parent->effective_cpus, tmp->addmask)) { |
| 1227 | if (!deleting) |
| 1228 | return -EINVAL; |
| 1229 | /* |
| 1230 | * As some of the CPUs in subparts_cpus might have |
| 1231 | * been offlined, we need to compute the real delmask |
| 1232 | * to confirm that. |
| 1233 | */ |
| 1234 | if (!cpumask_and(tmp->addmask, tmp->delmask, |
| 1235 | cpu_active_mask)) |
| 1236 | return -EINVAL; |
| 1237 | cpumask_copy(tmp->addmask, parent->effective_cpus); |
| 1238 | } |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1239 | } else { |
| 1240 | /* |
| 1241 | * partcmd_update w/o newmask: |
| 1242 | * |
Waiman Long | 0f3adb8 | 2021-07-20 10:18:26 -0400 | [diff] [blame] | 1243 | * addmask = cpus_allowed & parent->effective_cpus |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1244 | * |
| 1245 | * Note that parent's subparts_cpus may have been |
Waiman Long | 3881b86 | 2018-11-08 10:08:39 -0500 | [diff] [blame] | 1246 | * pre-shrunk in case there is a change in the cpu list. |
| 1247 | * So no deletion is needed. |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1248 | */ |
| 1249 | adding = cpumask_and(tmp->addmask, cpuset->cpus_allowed, |
| 1250 | parent->effective_cpus); |
Waiman Long | 3881b86 | 2018-11-08 10:08:39 -0500 | [diff] [blame] | 1251 | part_error = cpumask_equal(tmp->addmask, |
| 1252 | parent->effective_cpus); |
| 1253 | } |
| 1254 | |
| 1255 | if (cmd == partcmd_update) { |
| 1256 | int prev_prs = cpuset->partition_root_state; |
| 1257 | |
| 1258 | /* |
| 1259 | * Check for possible transition between PRS_ENABLED |
| 1260 | * and PRS_ERROR. |
| 1261 | */ |
| 1262 | switch (cpuset->partition_root_state) { |
| 1263 | case PRS_ENABLED: |
| 1264 | if (part_error) |
Waiman Long | 6ba34d3 | 2021-07-20 10:18:28 -0400 | [diff] [blame] | 1265 | new_prs = PRS_ERROR; |
Waiman Long | 3881b86 | 2018-11-08 10:08:39 -0500 | [diff] [blame] | 1266 | break; |
| 1267 | case PRS_ERROR: |
| 1268 | if (!part_error) |
Waiman Long | 6ba34d3 | 2021-07-20 10:18:28 -0400 | [diff] [blame] | 1269 | new_prs = PRS_ENABLED; |
Waiman Long | 3881b86 | 2018-11-08 10:08:39 -0500 | [diff] [blame] | 1270 | break; |
| 1271 | } |
| 1272 | /* |
| 1273 | * Set part_error if previously in invalid state. |
| 1274 | */ |
| 1275 | part_error = (prev_prs == PRS_ERROR); |
| 1276 | } |
| 1277 | |
Waiman Long | 6ba34d3 | 2021-07-20 10:18:28 -0400 | [diff] [blame] | 1278 | if (!part_error && (new_prs == PRS_ERROR)) |
Waiman Long | 3881b86 | 2018-11-08 10:08:39 -0500 | [diff] [blame] | 1279 | return 0; /* Nothing need to be done */ |
| 1280 | |
Waiman Long | 6ba34d3 | 2021-07-20 10:18:28 -0400 | [diff] [blame] | 1281 | if (new_prs == PRS_ERROR) { |
Waiman Long | 3881b86 | 2018-11-08 10:08:39 -0500 | [diff] [blame] | 1282 | /* |
| 1283 | * Remove all its cpus from parent's subparts_cpus. |
| 1284 | */ |
| 1285 | adding = false; |
| 1286 | deleting = cpumask_and(tmp->delmask, cpuset->cpus_allowed, |
| 1287 | parent->subparts_cpus); |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1288 | } |
| 1289 | |
Waiman Long | e7cc988 | 2021-08-10 23:06:02 -0400 | [diff] [blame] | 1290 | if (!adding && !deleting && (new_prs == old_prs)) |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1291 | return 0; |
| 1292 | |
| 1293 | /* |
| 1294 | * Change the parent's subparts_cpus. |
| 1295 | * Newly added CPUs will be removed from effective_cpus and |
| 1296 | * newly deleted ones will be added back to effective_cpus. |
| 1297 | */ |
| 1298 | spin_lock_irq(&callback_lock); |
| 1299 | if (adding) { |
| 1300 | cpumask_or(parent->subparts_cpus, |
| 1301 | parent->subparts_cpus, tmp->addmask); |
| 1302 | cpumask_andnot(parent->effective_cpus, |
| 1303 | parent->effective_cpus, tmp->addmask); |
| 1304 | } |
| 1305 | if (deleting) { |
| 1306 | cpumask_andnot(parent->subparts_cpus, |
| 1307 | parent->subparts_cpus, tmp->delmask); |
Waiman Long | 4b842da | 2018-11-08 10:08:41 -0500 | [diff] [blame] | 1308 | /* |
| 1309 | * Some of the CPUs in subparts_cpus might have been offlined. |
| 1310 | */ |
| 1311 | cpumask_and(tmp->delmask, tmp->delmask, cpu_active_mask); |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1312 | cpumask_or(parent->effective_cpus, |
| 1313 | parent->effective_cpus, tmp->delmask); |
| 1314 | } |
| 1315 | |
| 1316 | parent->nr_subparts_cpus = cpumask_weight(parent->subparts_cpus); |
Waiman Long | 6ba34d3 | 2021-07-20 10:18:28 -0400 | [diff] [blame] | 1317 | |
Waiman Long | e7cc988 | 2021-08-10 23:06:02 -0400 | [diff] [blame] | 1318 | if (old_prs != new_prs) |
Waiman Long | 6ba34d3 | 2021-07-20 10:18:28 -0400 | [diff] [blame] | 1319 | cpuset->partition_root_state = new_prs; |
Waiman Long | e7cc988 | 2021-08-10 23:06:02 -0400 | [diff] [blame] | 1320 | |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1321 | spin_unlock_irq(&callback_lock); |
Waiman Long | e7cc988 | 2021-08-10 23:06:02 -0400 | [diff] [blame] | 1322 | notify_partition_change(cpuset, old_prs, new_prs); |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1323 | |
| 1324 | return cmd == partcmd_update; |
| 1325 | } |
| 1326 | |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 1327 | /* |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 1328 | * update_cpumasks_hier - Update effective cpumasks and tasks in the subtree |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1329 | * @cs: the cpuset to consider |
| 1330 | * @tmp: temp variables for calculating effective_cpus & partition setup |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 1331 | * |
Aubrey Li | 415de5f | 2021-01-13 12:37:41 +0800 | [diff] [blame] | 1332 | * When configured cpumask is changed, the effective cpumasks of this cpuset |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 1333 | * and all its descendants need to be updated. |
| 1334 | * |
Aubrey Li | 415de5f | 2021-01-13 12:37:41 +0800 | [diff] [blame] | 1335 | * On legacy hierarchy, effective_cpus will be the same with cpu_allowed. |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 1336 | * |
| 1337 | * Called with cpuset_mutex held |
| 1338 | */ |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1339 | static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp) |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 1340 | { |
| 1341 | struct cpuset *cp; |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 1342 | struct cgroup_subsys_state *pos_css; |
Li Zefan | 8b5f1c5 | 2014-07-09 16:47:50 +0800 | [diff] [blame] | 1343 | bool need_rebuild_sched_domains = false; |
Waiman Long | e7cc988 | 2021-08-10 23:06:02 -0400 | [diff] [blame] | 1344 | int old_prs, new_prs; |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 1345 | |
| 1346 | rcu_read_lock(); |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 1347 | cpuset_for_each_descendant_pre(cp, pos_css, cs) { |
| 1348 | struct cpuset *parent = parent_cs(cp); |
| 1349 | |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1350 | compute_effective_cpumask(tmp->new_cpus, cp, parent); |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 1351 | |
Li Zefan | 554b0d1 | 2014-07-09 16:47:41 +0800 | [diff] [blame] | 1352 | /* |
| 1353 | * If it becomes empty, inherit the effective mask of the |
| 1354 | * parent, which is guaranteed to have some CPUs. |
| 1355 | */ |
Waiman Long | 4716909 | 2018-11-08 10:08:40 -0500 | [diff] [blame] | 1356 | if (is_in_v2_mode() && cpumask_empty(tmp->new_cpus)) { |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1357 | cpumask_copy(tmp->new_cpus, parent->effective_cpus); |
Waiman Long | 4716909 | 2018-11-08 10:08:40 -0500 | [diff] [blame] | 1358 | if (!cp->use_parent_ecpus) { |
| 1359 | cp->use_parent_ecpus = true; |
| 1360 | parent->child_ecpus_count++; |
| 1361 | } |
| 1362 | } else if (cp->use_parent_ecpus) { |
| 1363 | cp->use_parent_ecpus = false; |
| 1364 | WARN_ON_ONCE(!parent->child_ecpus_count); |
| 1365 | parent->child_ecpus_count--; |
| 1366 | } |
Li Zefan | 554b0d1 | 2014-07-09 16:47:41 +0800 | [diff] [blame] | 1367 | |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1368 | /* |
| 1369 | * Skip the whole subtree if the cpumask remains the same |
| 1370 | * and has no partition root state. |
| 1371 | */ |
Waiman Long | 3881b86 | 2018-11-08 10:08:39 -0500 | [diff] [blame] | 1372 | if (!cp->partition_root_state && |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1373 | cpumask_equal(tmp->new_cpus, cp->effective_cpus)) { |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 1374 | pos_css = css_rightmost_descendant(pos_css); |
| 1375 | continue; |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 1376 | } |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 1377 | |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1378 | /* |
| 1379 | * update_parent_subparts_cpumask() should have been called |
| 1380 | * for cs already in update_cpumask(). We should also call |
| 1381 | * update_tasks_cpumask() again for tasks in the parent |
| 1382 | * cpuset if the parent's subparts_cpus changes. |
| 1383 | */ |
Waiman Long | e7cc988 | 2021-08-10 23:06:02 -0400 | [diff] [blame] | 1384 | old_prs = new_prs = cp->partition_root_state; |
| 1385 | if ((cp != cs) && old_prs) { |
Waiman Long | 3881b86 | 2018-11-08 10:08:39 -0500 | [diff] [blame] | 1386 | switch (parent->partition_root_state) { |
| 1387 | case PRS_DISABLED: |
| 1388 | /* |
| 1389 | * If parent is not a partition root or an |
Waiman Long | 0f3adb8 | 2021-07-20 10:18:26 -0400 | [diff] [blame] | 1390 | * invalid partition root, clear its state |
| 1391 | * and its CS_CPU_EXCLUSIVE flag. |
Waiman Long | 3881b86 | 2018-11-08 10:08:39 -0500 | [diff] [blame] | 1392 | */ |
| 1393 | WARN_ON_ONCE(cp->partition_root_state |
| 1394 | != PRS_ERROR); |
Waiman Long | 6ba34d3 | 2021-07-20 10:18:28 -0400 | [diff] [blame] | 1395 | new_prs = PRS_DISABLED; |
Waiman Long | 3881b86 | 2018-11-08 10:08:39 -0500 | [diff] [blame] | 1396 | |
| 1397 | /* |
| 1398 | * clear_bit() is an atomic operation and |
| 1399 | * readers aren't interested in the state |
| 1400 | * of CS_CPU_EXCLUSIVE anyway. So we can |
| 1401 | * just update the flag without holding |
| 1402 | * the callback_lock. |
| 1403 | */ |
| 1404 | clear_bit(CS_CPU_EXCLUSIVE, &cp->flags); |
| 1405 | break; |
| 1406 | |
| 1407 | case PRS_ENABLED: |
| 1408 | if (update_parent_subparts_cpumask(cp, partcmd_update, NULL, tmp)) |
| 1409 | update_tasks_cpumask(parent); |
| 1410 | break; |
| 1411 | |
| 1412 | case PRS_ERROR: |
| 1413 | /* |
| 1414 | * When parent is invalid, it has to be too. |
| 1415 | */ |
Waiman Long | 6ba34d3 | 2021-07-20 10:18:28 -0400 | [diff] [blame] | 1416 | new_prs = PRS_ERROR; |
Waiman Long | 3881b86 | 2018-11-08 10:08:39 -0500 | [diff] [blame] | 1417 | break; |
| 1418 | } |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1419 | } |
| 1420 | |
Tejun Heo | ec903c0 | 2014-05-13 12:11:01 -0400 | [diff] [blame] | 1421 | if (!css_tryget_online(&cp->css)) |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 1422 | continue; |
| 1423 | rcu_read_unlock(); |
| 1424 | |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 1425 | spin_lock_irq(&callback_lock); |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1426 | |
| 1427 | cpumask_copy(cp->effective_cpus, tmp->new_cpus); |
Waiman Long | 6ba34d3 | 2021-07-20 10:18:28 -0400 | [diff] [blame] | 1428 | if (cp->nr_subparts_cpus && (new_prs != PRS_ENABLED)) { |
Waiman Long | 3881b86 | 2018-11-08 10:08:39 -0500 | [diff] [blame] | 1429 | cp->nr_subparts_cpus = 0; |
| 1430 | cpumask_clear(cp->subparts_cpus); |
| 1431 | } else if (cp->nr_subparts_cpus) { |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1432 | /* |
| 1433 | * Make sure that effective_cpus & subparts_cpus |
| 1434 | * are mutually exclusive. |
Waiman Long | 3881b86 | 2018-11-08 10:08:39 -0500 | [diff] [blame] | 1435 | * |
| 1436 | * In the unlikely event that effective_cpus |
| 1437 | * becomes empty. we clear cp->nr_subparts_cpus and |
| 1438 | * let its child partition roots to compete for |
| 1439 | * CPUs again. |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1440 | */ |
| 1441 | cpumask_andnot(cp->effective_cpus, cp->effective_cpus, |
| 1442 | cp->subparts_cpus); |
Waiman Long | 3881b86 | 2018-11-08 10:08:39 -0500 | [diff] [blame] | 1443 | if (cpumask_empty(cp->effective_cpus)) { |
| 1444 | cpumask_copy(cp->effective_cpus, tmp->new_cpus); |
| 1445 | cpumask_clear(cp->subparts_cpus); |
| 1446 | cp->nr_subparts_cpus = 0; |
| 1447 | } else if (!cpumask_subset(cp->subparts_cpus, |
| 1448 | tmp->new_cpus)) { |
| 1449 | cpumask_andnot(cp->subparts_cpus, |
| 1450 | cp->subparts_cpus, tmp->new_cpus); |
| 1451 | cp->nr_subparts_cpus |
| 1452 | = cpumask_weight(cp->subparts_cpus); |
| 1453 | } |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1454 | } |
Waiman Long | 6ba34d3 | 2021-07-20 10:18:28 -0400 | [diff] [blame] | 1455 | |
Waiman Long | e7cc988 | 2021-08-10 23:06:02 -0400 | [diff] [blame] | 1456 | if (new_prs != old_prs) |
Waiman Long | 6ba34d3 | 2021-07-20 10:18:28 -0400 | [diff] [blame] | 1457 | cp->partition_root_state = new_prs; |
| 1458 | |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 1459 | spin_unlock_irq(&callback_lock); |
Waiman Long | e7cc988 | 2021-08-10 23:06:02 -0400 | [diff] [blame] | 1460 | notify_partition_change(cp, old_prs, new_prs); |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 1461 | |
Waiman Long | b8d1b8e | 2017-08-17 15:33:10 -0400 | [diff] [blame] | 1462 | WARN_ON(!is_in_v2_mode() && |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 1463 | !cpumask_equal(cp->cpus_allowed, cp->effective_cpus)); |
| 1464 | |
Tejun Heo | d66393e | 2014-02-13 06:58:40 -0500 | [diff] [blame] | 1465 | update_tasks_cpumask(cp); |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 1466 | |
Li Zefan | 8b5f1c5 | 2014-07-09 16:47:50 +0800 | [diff] [blame] | 1467 | /* |
Waiman Long | 0ccea8f | 2018-11-08 10:08:42 -0500 | [diff] [blame] | 1468 | * On legacy hierarchy, if the effective cpumask of any non- |
| 1469 | * empty cpuset is changed, we need to rebuild sched domains. |
| 1470 | * On default hierarchy, the cpuset needs to be a partition |
| 1471 | * root as well. |
Li Zefan | 8b5f1c5 | 2014-07-09 16:47:50 +0800 | [diff] [blame] | 1472 | */ |
| 1473 | if (!cpumask_empty(cp->cpus_allowed) && |
Waiman Long | 0ccea8f | 2018-11-08 10:08:42 -0500 | [diff] [blame] | 1474 | is_sched_load_balance(cp) && |
| 1475 | (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) || |
| 1476 | is_partition_root(cp))) |
Li Zefan | 8b5f1c5 | 2014-07-09 16:47:50 +0800 | [diff] [blame] | 1477 | need_rebuild_sched_domains = true; |
| 1478 | |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 1479 | rcu_read_lock(); |
| 1480 | css_put(&cp->css); |
| 1481 | } |
| 1482 | rcu_read_unlock(); |
Li Zefan | 8b5f1c5 | 2014-07-09 16:47:50 +0800 | [diff] [blame] | 1483 | |
| 1484 | if (need_rebuild_sched_domains) |
| 1485 | rebuild_sched_domains_locked(); |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 1486 | } |
| 1487 | |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1488 | /** |
Waiman Long | 4716909 | 2018-11-08 10:08:40 -0500 | [diff] [blame] | 1489 | * update_sibling_cpumasks - Update siblings cpumasks |
| 1490 | * @parent: Parent cpuset |
| 1491 | * @cs: Current cpuset |
| 1492 | * @tmp: Temp variables |
| 1493 | */ |
| 1494 | static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs, |
| 1495 | struct tmpmasks *tmp) |
| 1496 | { |
| 1497 | struct cpuset *sibling; |
| 1498 | struct cgroup_subsys_state *pos_css; |
| 1499 | |
| 1500 | /* |
| 1501 | * Check all its siblings and call update_cpumasks_hier() |
| 1502 | * if their use_parent_ecpus flag is set in order for them |
| 1503 | * to use the right effective_cpus value. |
| 1504 | */ |
| 1505 | rcu_read_lock(); |
| 1506 | cpuset_for_each_child(sibling, pos_css, parent) { |
| 1507 | if (sibling == cs) |
| 1508 | continue; |
| 1509 | if (!sibling->use_parent_ecpus) |
| 1510 | continue; |
| 1511 | |
| 1512 | update_cpumasks_hier(sibling, tmp); |
| 1513 | } |
| 1514 | rcu_read_unlock(); |
| 1515 | } |
| 1516 | |
| 1517 | /** |
Cliff Wickman | 58f4790 | 2008-02-07 00:14:44 -0800 | [diff] [blame] | 1518 | * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it |
| 1519 | * @cs: the cpuset to consider |
Fabian Frederick | fc34ac1 | 2014-05-05 19:46:55 +0200 | [diff] [blame] | 1520 | * @trialcs: trial cpuset |
Cliff Wickman | 58f4790 | 2008-02-07 00:14:44 -0800 | [diff] [blame] | 1521 | * @buf: buffer of cpu numbers written to this cpuset |
| 1522 | */ |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 1523 | static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, |
| 1524 | const char *buf) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1525 | { |
Cliff Wickman | 58f4790 | 2008-02-07 00:14:44 -0800 | [diff] [blame] | 1526 | int retval; |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1527 | struct tmpmasks tmp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1528 | |
Rusty Russell | 5f054e3 | 2012-03-29 15:38:31 +1030 | [diff] [blame] | 1529 | /* top_cpuset.cpus_allowed tracks cpu_online_mask; it's read-only */ |
Paul Jackson | 4c4d50f | 2006-08-27 01:23:51 -0700 | [diff] [blame] | 1530 | if (cs == &top_cpuset) |
| 1531 | return -EACCES; |
| 1532 | |
David Rientjes | 6f7f02e | 2007-05-08 00:31:43 -0700 | [diff] [blame] | 1533 | /* |
Paul Jackson | c8d9c90 | 2008-02-07 00:14:46 -0800 | [diff] [blame] | 1534 | * An empty cpus_allowed is ok only if the cpuset has no tasks. |
Paul Jackson | 020958b | 2007-10-18 23:40:21 -0700 | [diff] [blame] | 1535 | * Since cpulist_parse() fails on an empty mask, we special case |
| 1536 | * that parsing. The validate_change() call ensures that cpusets |
| 1537 | * with tasks have cpus. |
David Rientjes | 6f7f02e | 2007-05-08 00:31:43 -0700 | [diff] [blame] | 1538 | */ |
Paul Jackson | 020958b | 2007-10-18 23:40:21 -0700 | [diff] [blame] | 1539 | if (!*buf) { |
Li Zefan | 300ed6c | 2009-01-07 18:08:44 -0800 | [diff] [blame] | 1540 | cpumask_clear(trialcs->cpus_allowed); |
David Rientjes | 6f7f02e | 2007-05-08 00:31:43 -0700 | [diff] [blame] | 1541 | } else { |
Li Zefan | 300ed6c | 2009-01-07 18:08:44 -0800 | [diff] [blame] | 1542 | retval = cpulist_parse(buf, trialcs->cpus_allowed); |
David Rientjes | 6f7f02e | 2007-05-08 00:31:43 -0700 | [diff] [blame] | 1543 | if (retval < 0) |
| 1544 | return retval; |
Lai Jiangshan | 3734074 | 2008-06-05 22:46:32 -0700 | [diff] [blame] | 1545 | |
Li Zefan | 5d8ba82 | 2014-07-09 16:49:12 +0800 | [diff] [blame] | 1546 | if (!cpumask_subset(trialcs->cpus_allowed, |
| 1547 | top_cpuset.cpus_allowed)) |
Lai Jiangshan | 3734074 | 2008-06-05 22:46:32 -0700 | [diff] [blame] | 1548 | return -EINVAL; |
David Rientjes | 6f7f02e | 2007-05-08 00:31:43 -0700 | [diff] [blame] | 1549 | } |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 1550 | |
Paul Menage | 8707d8b | 2007-10-18 23:40:22 -0700 | [diff] [blame] | 1551 | /* Nothing to do if the cpus didn't change */ |
Li Zefan | 300ed6c | 2009-01-07 18:08:44 -0800 | [diff] [blame] | 1552 | if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed)) |
Paul Menage | 8707d8b | 2007-10-18 23:40:22 -0700 | [diff] [blame] | 1553 | return 0; |
Cliff Wickman | 58f4790 | 2008-02-07 00:14:44 -0800 | [diff] [blame] | 1554 | |
Li Zefan | a73456f | 2013-06-05 17:15:59 +0800 | [diff] [blame] | 1555 | retval = validate_change(cs, trialcs); |
| 1556 | if (retval < 0) |
| 1557 | return retval; |
| 1558 | |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1559 | #ifdef CONFIG_CPUMASK_OFFSTACK |
| 1560 | /* |
| 1561 | * Use the cpumasks in trialcs for tmpmasks when they are pointers |
| 1562 | * to allocated cpumasks. |
| 1563 | */ |
| 1564 | tmp.addmask = trialcs->subparts_cpus; |
| 1565 | tmp.delmask = trialcs->effective_cpus; |
| 1566 | tmp.new_cpus = trialcs->cpus_allowed; |
| 1567 | #endif |
| 1568 | |
| 1569 | if (cs->partition_root_state) { |
| 1570 | /* Cpumask of a partition root cannot be empty */ |
| 1571 | if (cpumask_empty(trialcs->cpus_allowed)) |
| 1572 | return -EINVAL; |
| 1573 | if (update_parent_subparts_cpumask(cs, partcmd_update, |
| 1574 | trialcs->cpus_allowed, &tmp) < 0) |
| 1575 | return -EINVAL; |
| 1576 | } |
| 1577 | |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 1578 | spin_lock_irq(&callback_lock); |
Li Zefan | 300ed6c | 2009-01-07 18:08:44 -0800 | [diff] [blame] | 1579 | cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed); |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1580 | |
| 1581 | /* |
| 1582 | * Make sure that subparts_cpus is a subset of cpus_allowed. |
| 1583 | */ |
| 1584 | if (cs->nr_subparts_cpus) { |
| 1585 | cpumask_andnot(cs->subparts_cpus, cs->subparts_cpus, |
| 1586 | cs->cpus_allowed); |
| 1587 | cs->nr_subparts_cpus = cpumask_weight(cs->subparts_cpus); |
| 1588 | } |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 1589 | spin_unlock_irq(&callback_lock); |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 1590 | |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1591 | update_cpumasks_hier(cs, &tmp); |
Waiman Long | 4716909 | 2018-11-08 10:08:40 -0500 | [diff] [blame] | 1592 | |
| 1593 | if (cs->partition_root_state) { |
| 1594 | struct cpuset *parent = parent_cs(cs); |
| 1595 | |
| 1596 | /* |
| 1597 | * For partition root, update the cpumasks of sibling |
| 1598 | * cpusets if they use parent's effective_cpus. |
| 1599 | */ |
| 1600 | if (parent->child_ecpus_count) |
| 1601 | update_sibling_cpumasks(parent, cs, &tmp); |
| 1602 | } |
Dinakar Guniguntala | 85d7b94 | 2005-06-25 14:57:34 -0700 | [diff] [blame] | 1603 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1604 | } |
| 1605 | |
Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 1606 | /* |
Tejun Heo | e93ad19 | 2016-01-19 12:18:41 -0500 | [diff] [blame] | 1607 | * Migrate memory region from one set of nodes to another. This is |
| 1608 | * performed asynchronously as it can be called from process migration path |
| 1609 | * holding locks involved in process management. All mm migrations are |
| 1610 | * performed in the queued order and can be waited for by flushing |
| 1611 | * cpuset_migrate_mm_wq. |
Paul Jackson | e4e364e | 2006-03-31 02:30:52 -0800 | [diff] [blame] | 1612 | */ |
| 1613 | |
Tejun Heo | e93ad19 | 2016-01-19 12:18:41 -0500 | [diff] [blame] | 1614 | struct cpuset_migrate_mm_work { |
| 1615 | struct work_struct work; |
| 1616 | struct mm_struct *mm; |
| 1617 | nodemask_t from; |
| 1618 | nodemask_t to; |
| 1619 | }; |
| 1620 | |
| 1621 | static void cpuset_migrate_mm_workfn(struct work_struct *work) |
| 1622 | { |
| 1623 | struct cpuset_migrate_mm_work *mwork = |
| 1624 | container_of(work, struct cpuset_migrate_mm_work, work); |
| 1625 | |
| 1626 | /* on a wq worker, no need to worry about %current's mems_allowed */ |
| 1627 | do_migrate_pages(mwork->mm, &mwork->from, &mwork->to, MPOL_MF_MOVE_ALL); |
| 1628 | mmput(mwork->mm); |
| 1629 | kfree(mwork); |
| 1630 | } |
| 1631 | |
Paul Jackson | e4e364e | 2006-03-31 02:30:52 -0800 | [diff] [blame] | 1632 | static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from, |
| 1633 | const nodemask_t *to) |
| 1634 | { |
Tejun Heo | e93ad19 | 2016-01-19 12:18:41 -0500 | [diff] [blame] | 1635 | struct cpuset_migrate_mm_work *mwork; |
Paul Jackson | e4e364e | 2006-03-31 02:30:52 -0800 | [diff] [blame] | 1636 | |
Tejun Heo | e93ad19 | 2016-01-19 12:18:41 -0500 | [diff] [blame] | 1637 | mwork = kzalloc(sizeof(*mwork), GFP_KERNEL); |
| 1638 | if (mwork) { |
| 1639 | mwork->mm = mm; |
| 1640 | mwork->from = *from; |
| 1641 | mwork->to = *to; |
| 1642 | INIT_WORK(&mwork->work, cpuset_migrate_mm_workfn); |
| 1643 | queue_work(cpuset_migrate_mm_wq, &mwork->work); |
| 1644 | } else { |
| 1645 | mmput(mm); |
| 1646 | } |
| 1647 | } |
Paul Jackson | e4e364e | 2006-03-31 02:30:52 -0800 | [diff] [blame] | 1648 | |
Tejun Heo | 5cf1cac | 2016-04-21 19:06:48 -0400 | [diff] [blame] | 1649 | static void cpuset_post_attach(void) |
Tejun Heo | e93ad19 | 2016-01-19 12:18:41 -0500 | [diff] [blame] | 1650 | { |
| 1651 | flush_workqueue(cpuset_migrate_mm_wq); |
Paul Jackson | e4e364e | 2006-03-31 02:30:52 -0800 | [diff] [blame] | 1652 | } |
| 1653 | |
Li Zefan | 3b6766f | 2009-04-02 16:57:51 -0700 | [diff] [blame] | 1654 | /* |
Miao Xie | 58568d2 | 2009-06-16 15:31:49 -0700 | [diff] [blame] | 1655 | * cpuset_change_task_nodemask - change task's mems_allowed and mempolicy |
| 1656 | * @tsk: the task to change |
| 1657 | * @newmems: new nodes that the task will be set |
| 1658 | * |
Vlastimil Babka | 5f155f2 | 2017-07-06 15:40:09 -0700 | [diff] [blame] | 1659 | * We use the mems_allowed_seq seqlock to safely update both tsk->mems_allowed |
| 1660 | * and rebind an eventual tasks' mempolicy. If the task is allocating in |
| 1661 | * parallel, it might temporarily see an empty intersection, which results in |
| 1662 | * a seqlock check and retry before OOM or allocation failure. |
Miao Xie | 58568d2 | 2009-06-16 15:31:49 -0700 | [diff] [blame] | 1663 | */ |
| 1664 | static void cpuset_change_task_nodemask(struct task_struct *tsk, |
| 1665 | nodemask_t *newmems) |
| 1666 | { |
Miao Xie | c0ff745 | 2010-05-24 14:32:08 -0700 | [diff] [blame] | 1667 | task_lock(tsk); |
Mel Gorman | cc9a6c8 | 2012-03-21 16:34:11 -0700 | [diff] [blame] | 1668 | |
Vlastimil Babka | 5f155f2 | 2017-07-06 15:40:09 -0700 | [diff] [blame] | 1669 | local_irq_disable(); |
| 1670 | write_seqcount_begin(&tsk->mems_allowed_seq); |
Mel Gorman | cc9a6c8 | 2012-03-21 16:34:11 -0700 | [diff] [blame] | 1671 | |
Miao Xie | 58568d2 | 2009-06-16 15:31:49 -0700 | [diff] [blame] | 1672 | nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems); |
Vlastimil Babka | 213980c | 2017-07-06 15:40:06 -0700 | [diff] [blame] | 1673 | mpol_rebind_task(tsk, newmems); |
Miao Xie | 58568d2 | 2009-06-16 15:31:49 -0700 | [diff] [blame] | 1674 | tsk->mems_allowed = *newmems; |
Mel Gorman | cc9a6c8 | 2012-03-21 16:34:11 -0700 | [diff] [blame] | 1675 | |
Vlastimil Babka | 5f155f2 | 2017-07-06 15:40:09 -0700 | [diff] [blame] | 1676 | write_seqcount_end(&tsk->mems_allowed_seq); |
| 1677 | local_irq_enable(); |
Mel Gorman | cc9a6c8 | 2012-03-21 16:34:11 -0700 | [diff] [blame] | 1678 | |
Miao Xie | c0ff745 | 2010-05-24 14:32:08 -0700 | [diff] [blame] | 1679 | task_unlock(tsk); |
Miao Xie | 58568d2 | 2009-06-16 15:31:49 -0700 | [diff] [blame] | 1680 | } |
| 1681 | |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 1682 | static void *cpuset_being_rebound; |
| 1683 | |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1684 | /** |
| 1685 | * update_tasks_nodemask - Update the nodemasks of tasks in the cpuset. |
| 1686 | * @cs: the cpuset in which each task's mems_allowed mask needs to be changed |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1687 | * |
Tejun Heo | d66393e | 2014-02-13 06:58:40 -0500 | [diff] [blame] | 1688 | * Iterate through each task of @cs updating its mems_allowed to the |
| 1689 | * effective cpuset's. As this function is called with cpuset_mutex held, |
| 1690 | * cpuset membership stays stable. |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1691 | */ |
Tejun Heo | d66393e | 2014-02-13 06:58:40 -0500 | [diff] [blame] | 1692 | static void update_tasks_nodemask(struct cpuset *cs) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1693 | { |
Li Zefan | 33ad801 | 2013-06-09 17:15:08 +0800 | [diff] [blame] | 1694 | static nodemask_t newmems; /* protected by cpuset_mutex */ |
Tejun Heo | d66393e | 2014-02-13 06:58:40 -0500 | [diff] [blame] | 1695 | struct css_task_iter it; |
| 1696 | struct task_struct *task; |
Paul Jackson | 59dac16 | 2006-01-08 01:01:52 -0800 | [diff] [blame] | 1697 | |
Lee Schermerhorn | 846a16b | 2008-04-28 02:13:09 -0700 | [diff] [blame] | 1698 | cpuset_being_rebound = cs; /* causes mpol_dup() rebind */ |
Paul Jackson | 4225399 | 2006-01-08 01:01:59 -0800 | [diff] [blame] | 1699 | |
Li Zefan | ae1c802 | 2014-07-09 16:48:32 +0800 | [diff] [blame] | 1700 | guarantee_online_mems(cs, &newmems); |
Li Zefan | 33ad801 | 2013-06-09 17:15:08 +0800 | [diff] [blame] | 1701 | |
Paul Jackson | 4225399 | 2006-01-08 01:01:59 -0800 | [diff] [blame] | 1702 | /* |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 1703 | * The mpol_rebind_mm() call takes mmap_lock, which we couldn't |
Li Zefan | 3b6766f | 2009-04-02 16:57:51 -0700 | [diff] [blame] | 1704 | * take while holding tasklist_lock. Forks can happen - the |
| 1705 | * mpol_dup() cpuset_being_rebound check will catch such forks, |
| 1706 | * and rebind their vma mempolicies too. Because we still hold |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 1707 | * the global cpuset_mutex, we know that no other rebind effort |
Li Zefan | 3b6766f | 2009-04-02 16:57:51 -0700 | [diff] [blame] | 1708 | * will be contending for the global variable cpuset_being_rebound. |
Paul Jackson | 4225399 | 2006-01-08 01:01:59 -0800 | [diff] [blame] | 1709 | * It's ok if we rebind the same mm twice; mpol_rebind_mm() |
Paul Jackson | 04c19fa | 2006-01-08 01:02:00 -0800 | [diff] [blame] | 1710 | * is idempotent. Also migrate pages in each mm to new nodes. |
Paul Jackson | 4225399 | 2006-01-08 01:01:59 -0800 | [diff] [blame] | 1711 | */ |
Tejun Heo | bc2fb7e | 2017-05-15 09:34:01 -0400 | [diff] [blame] | 1712 | css_task_iter_start(&cs->css, 0, &it); |
Tejun Heo | d66393e | 2014-02-13 06:58:40 -0500 | [diff] [blame] | 1713 | while ((task = css_task_iter_next(&it))) { |
| 1714 | struct mm_struct *mm; |
| 1715 | bool migrate; |
| 1716 | |
| 1717 | cpuset_change_task_nodemask(task, &newmems); |
| 1718 | |
| 1719 | mm = get_task_mm(task); |
| 1720 | if (!mm) |
| 1721 | continue; |
| 1722 | |
| 1723 | migrate = is_memory_migrate(cs); |
| 1724 | |
| 1725 | mpol_rebind_mm(mm, &cs->mems_allowed); |
| 1726 | if (migrate) |
| 1727 | cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems); |
Tejun Heo | e93ad19 | 2016-01-19 12:18:41 -0500 | [diff] [blame] | 1728 | else |
| 1729 | mmput(mm); |
Tejun Heo | d66393e | 2014-02-13 06:58:40 -0500 | [diff] [blame] | 1730 | } |
| 1731 | css_task_iter_end(&it); |
Paul Jackson | 4225399 | 2006-01-08 01:01:59 -0800 | [diff] [blame] | 1732 | |
Li Zefan | 33ad801 | 2013-06-09 17:15:08 +0800 | [diff] [blame] | 1733 | /* |
| 1734 | * All the tasks' nodemasks have been updated, update |
| 1735 | * cs->old_mems_allowed. |
| 1736 | */ |
| 1737 | cs->old_mems_allowed = newmems; |
| 1738 | |
Paul Menage | 2df167a | 2008-02-07 00:14:45 -0800 | [diff] [blame] | 1739 | /* We're done rebinding vmas to this cpuset's new mems_allowed. */ |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 1740 | cpuset_being_rebound = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1741 | } |
| 1742 | |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1743 | /* |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 1744 | * update_nodemasks_hier - Update effective nodemasks and tasks in the subtree |
| 1745 | * @cs: the cpuset to consider |
| 1746 | * @new_mems: a temp variable for calculating new effective_mems |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 1747 | * |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 1748 | * When configured nodemask is changed, the effective nodemasks of this cpuset |
| 1749 | * and all its descendants need to be updated. |
| 1750 | * |
Lu Jialin | d95af61 | 2021-04-08 16:03:46 +0800 | [diff] [blame] | 1751 | * On legacy hierarchy, effective_mems will be the same with mems_allowed. |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 1752 | * |
| 1753 | * Called with cpuset_mutex held |
| 1754 | */ |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 1755 | static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems) |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 1756 | { |
| 1757 | struct cpuset *cp; |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 1758 | struct cgroup_subsys_state *pos_css; |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 1759 | |
| 1760 | rcu_read_lock(); |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 1761 | cpuset_for_each_descendant_pre(cp, pos_css, cs) { |
| 1762 | struct cpuset *parent = parent_cs(cp); |
| 1763 | |
| 1764 | nodes_and(*new_mems, cp->mems_allowed, parent->effective_mems); |
| 1765 | |
Li Zefan | 554b0d1 | 2014-07-09 16:47:41 +0800 | [diff] [blame] | 1766 | /* |
| 1767 | * If it becomes empty, inherit the effective mask of the |
| 1768 | * parent, which is guaranteed to have some MEMs. |
| 1769 | */ |
Waiman Long | b8d1b8e | 2017-08-17 15:33:10 -0400 | [diff] [blame] | 1770 | if (is_in_v2_mode() && nodes_empty(*new_mems)) |
Li Zefan | 554b0d1 | 2014-07-09 16:47:41 +0800 | [diff] [blame] | 1771 | *new_mems = parent->effective_mems; |
| 1772 | |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 1773 | /* Skip the whole subtree if the nodemask remains the same. */ |
| 1774 | if (nodes_equal(*new_mems, cp->effective_mems)) { |
| 1775 | pos_css = css_rightmost_descendant(pos_css); |
| 1776 | continue; |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 1777 | } |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 1778 | |
Tejun Heo | ec903c0 | 2014-05-13 12:11:01 -0400 | [diff] [blame] | 1779 | if (!css_tryget_online(&cp->css)) |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 1780 | continue; |
| 1781 | rcu_read_unlock(); |
| 1782 | |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 1783 | spin_lock_irq(&callback_lock); |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 1784 | cp->effective_mems = *new_mems; |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 1785 | spin_unlock_irq(&callback_lock); |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 1786 | |
Waiman Long | b8d1b8e | 2017-08-17 15:33:10 -0400 | [diff] [blame] | 1787 | WARN_ON(!is_in_v2_mode() && |
Li Zefan | a138126 | 2014-07-30 15:07:13 +0800 | [diff] [blame] | 1788 | !nodes_equal(cp->mems_allowed, cp->effective_mems)); |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 1789 | |
Tejun Heo | d66393e | 2014-02-13 06:58:40 -0500 | [diff] [blame] | 1790 | update_tasks_nodemask(cp); |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 1791 | |
| 1792 | rcu_read_lock(); |
| 1793 | css_put(&cp->css); |
| 1794 | } |
| 1795 | rcu_read_unlock(); |
| 1796 | } |
| 1797 | |
| 1798 | /* |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1799 | * Handle user request to change the 'mems' memory placement |
| 1800 | * of a cpuset. Needs to validate the request, update the |
Miao Xie | 58568d2 | 2009-06-16 15:31:49 -0700 | [diff] [blame] | 1801 | * cpusets mems_allowed, and for each task in the cpuset, |
| 1802 | * update mems_allowed and rebind task's mempolicy and any vma |
| 1803 | * mempolicies and if the cpuset is marked 'memory_migrate', |
| 1804 | * migrate the tasks pages to the new memory. |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1805 | * |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 1806 | * Call with cpuset_mutex held. May take callback_lock during call. |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1807 | * Will take tasklist_lock, scan tasklist for tasks in cpuset cs, |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 1808 | * lock each such tasks mm->mmap_lock, scan its vma's and rebind |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1809 | * their mempolicies to the cpusets new mems_allowed. |
| 1810 | */ |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 1811 | static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs, |
| 1812 | const char *buf) |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1813 | { |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1814 | int retval; |
| 1815 | |
| 1816 | /* |
Lai Jiangshan | 38d7bee | 2012-12-12 13:51:24 -0800 | [diff] [blame] | 1817 | * top_cpuset.mems_allowed tracks node_stats[N_MEMORY]; |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1818 | * it's read-only |
| 1819 | */ |
Miao Xie | 53feb29 | 2010-03-23 13:35:35 -0700 | [diff] [blame] | 1820 | if (cs == &top_cpuset) { |
| 1821 | retval = -EACCES; |
| 1822 | goto done; |
| 1823 | } |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1824 | |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1825 | /* |
| 1826 | * An empty mems_allowed is ok iff there are no tasks in the cpuset. |
| 1827 | * Since nodelist_parse() fails on an empty mask, we special case |
| 1828 | * that parsing. The validate_change() call ensures that cpusets |
| 1829 | * with tasks have memory. |
| 1830 | */ |
| 1831 | if (!*buf) { |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 1832 | nodes_clear(trialcs->mems_allowed); |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1833 | } else { |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 1834 | retval = nodelist_parse(buf, trialcs->mems_allowed); |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1835 | if (retval < 0) |
| 1836 | goto done; |
| 1837 | |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 1838 | if (!nodes_subset(trialcs->mems_allowed, |
Li Zefan | 5d8ba82 | 2014-07-09 16:49:12 +0800 | [diff] [blame] | 1839 | top_cpuset.mems_allowed)) { |
| 1840 | retval = -EINVAL; |
Miao Xie | 53feb29 | 2010-03-23 13:35:35 -0700 | [diff] [blame] | 1841 | goto done; |
| 1842 | } |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1843 | } |
Li Zefan | 33ad801 | 2013-06-09 17:15:08 +0800 | [diff] [blame] | 1844 | |
| 1845 | if (nodes_equal(cs->mems_allowed, trialcs->mems_allowed)) { |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1846 | retval = 0; /* Too easy - nothing to do */ |
| 1847 | goto done; |
| 1848 | } |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 1849 | retval = validate_change(cs, trialcs); |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1850 | if (retval < 0) |
| 1851 | goto done; |
| 1852 | |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 1853 | spin_lock_irq(&callback_lock); |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 1854 | cs->mems_allowed = trialcs->mems_allowed; |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 1855 | spin_unlock_irq(&callback_lock); |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1856 | |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 1857 | /* use trialcs->mems_allowed as a temp variable */ |
Alban Crequy | 24ee3cf | 2015-08-06 16:21:05 +0200 | [diff] [blame] | 1858 | update_nodemasks_hier(cs, &trialcs->mems_allowed); |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1859 | done: |
| 1860 | return retval; |
| 1861 | } |
| 1862 | |
Yaowei Bai | 77ef80c | 2018-02-06 15:41:24 -0800 | [diff] [blame] | 1863 | bool current_cpuset_is_being_rebound(void) |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 1864 | { |
Yaowei Bai | 77ef80c | 2018-02-06 15:41:24 -0800 | [diff] [blame] | 1865 | bool ret; |
Gu Zheng | 391acf9 | 2014-06-25 09:57:18 +0800 | [diff] [blame] | 1866 | |
| 1867 | rcu_read_lock(); |
| 1868 | ret = task_cs(current) == cpuset_being_rebound; |
| 1869 | rcu_read_unlock(); |
| 1870 | |
| 1871 | return ret; |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 1872 | } |
| 1873 | |
Paul Menage | 5be7a47 | 2008-05-06 20:42:41 -0700 | [diff] [blame] | 1874 | static int update_relax_domain_level(struct cpuset *cs, s64 val) |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 1875 | { |
Paul Menage | db7f47c | 2009-04-02 16:57:55 -0700 | [diff] [blame] | 1876 | #ifdef CONFIG_SMP |
Peter Zijlstra | 60495e7 | 2011-04-07 14:10:04 +0200 | [diff] [blame] | 1877 | if (val < -1 || val >= sched_domain_level_max) |
Li Zefan | 30e0e17 | 2008-05-13 10:27:17 +0800 | [diff] [blame] | 1878 | return -EINVAL; |
Paul Menage | db7f47c | 2009-04-02 16:57:55 -0700 | [diff] [blame] | 1879 | #endif |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 1880 | |
| 1881 | if (val != cs->relax_domain_level) { |
| 1882 | cs->relax_domain_level = val; |
Li Zefan | 300ed6c | 2009-01-07 18:08:44 -0800 | [diff] [blame] | 1883 | if (!cpumask_empty(cs->cpus_allowed) && |
| 1884 | is_sched_load_balance(cs)) |
Tejun Heo | 699140b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 1885 | rebuild_sched_domains_locked(); |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 1886 | } |
| 1887 | |
| 1888 | return 0; |
| 1889 | } |
| 1890 | |
Tejun Heo | 72ec702 | 2013-08-08 20:11:26 -0400 | [diff] [blame] | 1891 | /** |
Miao Xie | 950592f | 2009-06-16 15:31:47 -0700 | [diff] [blame] | 1892 | * update_tasks_flags - update the spread flags of tasks in the cpuset. |
| 1893 | * @cs: the cpuset in which each task's spread flags needs to be changed |
Miao Xie | 950592f | 2009-06-16 15:31:47 -0700 | [diff] [blame] | 1894 | * |
Tejun Heo | d66393e | 2014-02-13 06:58:40 -0500 | [diff] [blame] | 1895 | * Iterate through each task of @cs updating its spread flags. As this |
| 1896 | * function is called with cpuset_mutex held, cpuset membership stays |
| 1897 | * stable. |
Miao Xie | 950592f | 2009-06-16 15:31:47 -0700 | [diff] [blame] | 1898 | */ |
Tejun Heo | d66393e | 2014-02-13 06:58:40 -0500 | [diff] [blame] | 1899 | static void update_tasks_flags(struct cpuset *cs) |
Miao Xie | 950592f | 2009-06-16 15:31:47 -0700 | [diff] [blame] | 1900 | { |
Tejun Heo | d66393e | 2014-02-13 06:58:40 -0500 | [diff] [blame] | 1901 | struct css_task_iter it; |
| 1902 | struct task_struct *task; |
| 1903 | |
Tejun Heo | bc2fb7e | 2017-05-15 09:34:01 -0400 | [diff] [blame] | 1904 | css_task_iter_start(&cs->css, 0, &it); |
Tejun Heo | d66393e | 2014-02-13 06:58:40 -0500 | [diff] [blame] | 1905 | while ((task = css_task_iter_next(&it))) |
| 1906 | cpuset_update_task_spread_flag(cs, task); |
| 1907 | css_task_iter_end(&it); |
Miao Xie | 950592f | 2009-06-16 15:31:47 -0700 | [diff] [blame] | 1908 | } |
| 1909 | |
| 1910 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1911 | * update_flag - read a 0 or a 1 in a file and update associated flag |
Paul Menage | 7860836 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 1912 | * bit: the bit to update (see cpuset_flagbits_t) |
| 1913 | * cs: the cpuset to update |
| 1914 | * turning_on: whether the flag is being set or cleared |
Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 1915 | * |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 1916 | * Call with cpuset_mutex held. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1917 | */ |
| 1918 | |
Paul Menage | 700fe1a | 2008-04-29 01:00:00 -0700 | [diff] [blame] | 1919 | static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, |
| 1920 | int turning_on) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1921 | { |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 1922 | struct cpuset *trialcs; |
Rakib Mullick | 40b6a76 | 2008-10-18 20:28:18 -0700 | [diff] [blame] | 1923 | int balance_flag_changed; |
Miao Xie | 950592f | 2009-06-16 15:31:47 -0700 | [diff] [blame] | 1924 | int spread_flag_changed; |
Miao Xie | 950592f | 2009-06-16 15:31:47 -0700 | [diff] [blame] | 1925 | int err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1926 | |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 1927 | trialcs = alloc_trial_cpuset(cs); |
| 1928 | if (!trialcs) |
| 1929 | return -ENOMEM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1930 | |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 1931 | if (turning_on) |
| 1932 | set_bit(bit, &trialcs->flags); |
| 1933 | else |
| 1934 | clear_bit(bit, &trialcs->flags); |
| 1935 | |
| 1936 | err = validate_change(cs, trialcs); |
Dinakar Guniguntala | 85d7b94 | 2005-06-25 14:57:34 -0700 | [diff] [blame] | 1937 | if (err < 0) |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 1938 | goto out; |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 1939 | |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 1940 | balance_flag_changed = (is_sched_load_balance(cs) != |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 1941 | is_sched_load_balance(trialcs)); |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 1942 | |
Miao Xie | 950592f | 2009-06-16 15:31:47 -0700 | [diff] [blame] | 1943 | spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs)) |
| 1944 | || (is_spread_page(cs) != is_spread_page(trialcs))); |
| 1945 | |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 1946 | spin_lock_irq(&callback_lock); |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 1947 | cs->flags = trialcs->flags; |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 1948 | spin_unlock_irq(&callback_lock); |
Dinakar Guniguntala | 85d7b94 | 2005-06-25 14:57:34 -0700 | [diff] [blame] | 1949 | |
Li Zefan | 300ed6c | 2009-01-07 18:08:44 -0800 | [diff] [blame] | 1950 | if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed) |
Tejun Heo | 699140b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 1951 | rebuild_sched_domains_locked(); |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 1952 | |
Miao Xie | 950592f | 2009-06-16 15:31:47 -0700 | [diff] [blame] | 1953 | if (spread_flag_changed) |
Tejun Heo | d66393e | 2014-02-13 06:58:40 -0500 | [diff] [blame] | 1954 | update_tasks_flags(cs); |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 1955 | out: |
Waiman Long | bf92370 | 2018-11-08 10:08:37 -0500 | [diff] [blame] | 1956 | free_cpuset(trialcs); |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 1957 | return err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1958 | } |
| 1959 | |
Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 1960 | /* |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1961 | * update_prstate - update partititon_root_state |
Waiman Long | 0f3adb8 | 2021-07-20 10:18:26 -0400 | [diff] [blame] | 1962 | * cs: the cpuset to update |
| 1963 | * new_prs: new partition root state |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1964 | * |
| 1965 | * Call with cpuset_mutex held. |
| 1966 | */ |
Waiman Long | 0f3adb8 | 2021-07-20 10:18:26 -0400 | [diff] [blame] | 1967 | static int update_prstate(struct cpuset *cs, int new_prs) |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1968 | { |
Waiman Long | 6ba34d3 | 2021-07-20 10:18:28 -0400 | [diff] [blame] | 1969 | int err, old_prs = cs->partition_root_state; |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1970 | struct cpuset *parent = parent_cs(cs); |
Waiman Long | 0f3adb8 | 2021-07-20 10:18:26 -0400 | [diff] [blame] | 1971 | struct tmpmasks tmpmask; |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1972 | |
Waiman Long | 6ba34d3 | 2021-07-20 10:18:28 -0400 | [diff] [blame] | 1973 | if (old_prs == new_prs) |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1974 | return 0; |
| 1975 | |
| 1976 | /* |
Waiman Long | 3881b86 | 2018-11-08 10:08:39 -0500 | [diff] [blame] | 1977 | * Cannot force a partial or invalid partition root to a full |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1978 | * partition root. |
| 1979 | */ |
Waiman Long | 6ba34d3 | 2021-07-20 10:18:28 -0400 | [diff] [blame] | 1980 | if (new_prs && (old_prs == PRS_ERROR)) |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1981 | return -EINVAL; |
| 1982 | |
Waiman Long | 0f3adb8 | 2021-07-20 10:18:26 -0400 | [diff] [blame] | 1983 | if (alloc_cpumasks(NULL, &tmpmask)) |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1984 | return -ENOMEM; |
| 1985 | |
| 1986 | err = -EINVAL; |
Waiman Long | 6ba34d3 | 2021-07-20 10:18:28 -0400 | [diff] [blame] | 1987 | if (!old_prs) { |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 1988 | /* |
| 1989 | * Turning on partition root requires setting the |
| 1990 | * CS_CPU_EXCLUSIVE bit implicitly as well and cpus_allowed |
| 1991 | * cannot be NULL. |
| 1992 | */ |
| 1993 | if (cpumask_empty(cs->cpus_allowed)) |
| 1994 | goto out; |
| 1995 | |
| 1996 | err = update_flag(CS_CPU_EXCLUSIVE, cs, 1); |
| 1997 | if (err) |
| 1998 | goto out; |
| 1999 | |
| 2000 | err = update_parent_subparts_cpumask(cs, partcmd_enable, |
Waiman Long | 0f3adb8 | 2021-07-20 10:18:26 -0400 | [diff] [blame] | 2001 | NULL, &tmpmask); |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 2002 | if (err) { |
| 2003 | update_flag(CS_CPU_EXCLUSIVE, cs, 0); |
| 2004 | goto out; |
| 2005 | } |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 2006 | } else { |
Waiman Long | 3881b86 | 2018-11-08 10:08:39 -0500 | [diff] [blame] | 2007 | /* |
| 2008 | * Turning off partition root will clear the |
| 2009 | * CS_CPU_EXCLUSIVE bit. |
| 2010 | */ |
Waiman Long | 6ba34d3 | 2021-07-20 10:18:28 -0400 | [diff] [blame] | 2011 | if (old_prs == PRS_ERROR) { |
Waiman Long | 3881b86 | 2018-11-08 10:08:39 -0500 | [diff] [blame] | 2012 | update_flag(CS_CPU_EXCLUSIVE, cs, 0); |
| 2013 | err = 0; |
| 2014 | goto out; |
| 2015 | } |
| 2016 | |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 2017 | err = update_parent_subparts_cpumask(cs, partcmd_disable, |
Waiman Long | 0f3adb8 | 2021-07-20 10:18:26 -0400 | [diff] [blame] | 2018 | NULL, &tmpmask); |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 2019 | if (err) |
| 2020 | goto out; |
| 2021 | |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 2022 | /* Turning off CS_CPU_EXCLUSIVE will not return error */ |
| 2023 | update_flag(CS_CPU_EXCLUSIVE, cs, 0); |
| 2024 | } |
| 2025 | |
| 2026 | /* |
| 2027 | * Update cpumask of parent's tasks except when it is the top |
| 2028 | * cpuset as some system daemons cannot be mapped to other CPUs. |
| 2029 | */ |
| 2030 | if (parent != &top_cpuset) |
| 2031 | update_tasks_cpumask(parent); |
| 2032 | |
Waiman Long | 4716909 | 2018-11-08 10:08:40 -0500 | [diff] [blame] | 2033 | if (parent->child_ecpus_count) |
Waiman Long | 0f3adb8 | 2021-07-20 10:18:26 -0400 | [diff] [blame] | 2034 | update_sibling_cpumasks(parent, cs, &tmpmask); |
Waiman Long | 4716909 | 2018-11-08 10:08:40 -0500 | [diff] [blame] | 2035 | |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 2036 | rebuild_sched_domains_locked(); |
| 2037 | out: |
Waiman Long | 6ba34d3 | 2021-07-20 10:18:28 -0400 | [diff] [blame] | 2038 | if (!err) { |
| 2039 | spin_lock_irq(&callback_lock); |
| 2040 | cs->partition_root_state = new_prs; |
| 2041 | spin_unlock_irq(&callback_lock); |
Waiman Long | e7cc988 | 2021-08-10 23:06:02 -0400 | [diff] [blame] | 2042 | notify_partition_change(cs, old_prs, new_prs); |
Waiman Long | 6ba34d3 | 2021-07-20 10:18:28 -0400 | [diff] [blame] | 2043 | } |
| 2044 | |
Waiman Long | 0f3adb8 | 2021-07-20 10:18:26 -0400 | [diff] [blame] | 2045 | free_cpumasks(NULL, &tmpmask); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2046 | return err; |
| 2047 | } |
| 2048 | |
Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 2049 | /* |
Adrian Bunk | 80f7228 | 2006-06-30 18:27:16 +0200 | [diff] [blame] | 2050 | * Frequency meter - How fast is some event occurring? |
Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 2051 | * |
| 2052 | * These routines manage a digitally filtered, constant time based, |
| 2053 | * event frequency meter. There are four routines: |
| 2054 | * fmeter_init() - initialize a frequency meter. |
| 2055 | * fmeter_markevent() - called each time the event happens. |
| 2056 | * fmeter_getrate() - returns the recent rate of such events. |
| 2057 | * fmeter_update() - internal routine used to update fmeter. |
| 2058 | * |
| 2059 | * A common data structure is passed to each of these routines, |
| 2060 | * which is used to keep track of the state required to manage the |
| 2061 | * frequency meter and its digital filter. |
| 2062 | * |
| 2063 | * The filter works on the number of events marked per unit time. |
| 2064 | * The filter is single-pole low-pass recursive (IIR). The time unit |
| 2065 | * is 1 second. Arithmetic is done using 32-bit integers scaled to |
| 2066 | * simulate 3 decimal digits of precision (multiplied by 1000). |
| 2067 | * |
| 2068 | * With an FM_COEF of 933, and a time base of 1 second, the filter |
| 2069 | * has a half-life of 10 seconds, meaning that if the events quit |
| 2070 | * happening, then the rate returned from the fmeter_getrate() |
| 2071 | * will be cut in half each 10 seconds, until it converges to zero. |
| 2072 | * |
| 2073 | * It is not worth doing a real infinitely recursive filter. If more |
| 2074 | * than FM_MAXTICKS ticks have elapsed since the last filter event, |
| 2075 | * just compute FM_MAXTICKS ticks worth, by which point the level |
| 2076 | * will be stable. |
| 2077 | * |
| 2078 | * Limit the count of unprocessed events to FM_MAXCNT, so as to avoid |
| 2079 | * arithmetic overflow in the fmeter_update() routine. |
| 2080 | * |
| 2081 | * Given the simple 32 bit integer arithmetic used, this meter works |
| 2082 | * best for reporting rates between one per millisecond (msec) and |
| 2083 | * one per 32 (approx) seconds. At constant rates faster than one |
| 2084 | * per msec it maxes out at values just under 1,000,000. At constant |
| 2085 | * rates between one per msec, and one per second it will stabilize |
| 2086 | * to a value N*1000, where N is the rate of events per second. |
| 2087 | * At constant rates between one per second and one per 32 seconds, |
| 2088 | * it will be choppy, moving up on the seconds that have an event, |
| 2089 | * and then decaying until the next event. At rates slower than |
| 2090 | * about one in 32 seconds, it decays all the way back to zero between |
| 2091 | * each event. |
| 2092 | */ |
| 2093 | |
| 2094 | #define FM_COEF 933 /* coefficient for half-life of 10 secs */ |
Arnd Bergmann | d2b43658 | 2015-11-25 16:16:55 +0100 | [diff] [blame] | 2095 | #define FM_MAXTICKS ((u32)99) /* useless computing more ticks than this */ |
Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 2096 | #define FM_MAXCNT 1000000 /* limit cnt to avoid overflow */ |
| 2097 | #define FM_SCALE 1000 /* faux fixed point scale */ |
| 2098 | |
| 2099 | /* Initialize a frequency meter */ |
| 2100 | static void fmeter_init(struct fmeter *fmp) |
| 2101 | { |
| 2102 | fmp->cnt = 0; |
| 2103 | fmp->val = 0; |
| 2104 | fmp->time = 0; |
| 2105 | spin_lock_init(&fmp->lock); |
| 2106 | } |
| 2107 | |
| 2108 | /* Internal meter update - process cnt events and update value */ |
| 2109 | static void fmeter_update(struct fmeter *fmp) |
| 2110 | { |
Arnd Bergmann | d2b43658 | 2015-11-25 16:16:55 +0100 | [diff] [blame] | 2111 | time64_t now; |
| 2112 | u32 ticks; |
| 2113 | |
| 2114 | now = ktime_get_seconds(); |
| 2115 | ticks = now - fmp->time; |
Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 2116 | |
| 2117 | if (ticks == 0) |
| 2118 | return; |
| 2119 | |
| 2120 | ticks = min(FM_MAXTICKS, ticks); |
| 2121 | while (ticks-- > 0) |
| 2122 | fmp->val = (FM_COEF * fmp->val) / FM_SCALE; |
| 2123 | fmp->time = now; |
| 2124 | |
| 2125 | fmp->val += ((FM_SCALE - FM_COEF) * fmp->cnt) / FM_SCALE; |
| 2126 | fmp->cnt = 0; |
| 2127 | } |
| 2128 | |
| 2129 | /* Process any previous ticks, then bump cnt by one (times scale). */ |
| 2130 | static void fmeter_markevent(struct fmeter *fmp) |
| 2131 | { |
| 2132 | spin_lock(&fmp->lock); |
| 2133 | fmeter_update(fmp); |
| 2134 | fmp->cnt = min(FM_MAXCNT, fmp->cnt + FM_SCALE); |
| 2135 | spin_unlock(&fmp->lock); |
| 2136 | } |
| 2137 | |
| 2138 | /* Process any previous ticks, then return current value. */ |
| 2139 | static int fmeter_getrate(struct fmeter *fmp) |
| 2140 | { |
| 2141 | int val; |
| 2142 | |
| 2143 | spin_lock(&fmp->lock); |
| 2144 | fmeter_update(fmp); |
| 2145 | val = fmp->val; |
| 2146 | spin_unlock(&fmp->lock); |
| 2147 | return val; |
| 2148 | } |
| 2149 | |
Tejun Heo | 57fce0a | 2014-02-13 06:58:41 -0500 | [diff] [blame] | 2150 | static struct cpuset *cpuset_attach_old_cs; |
| 2151 | |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2152 | /* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */ |
Tejun Heo | 1f7dd3e5 | 2015-12-03 10:18:21 -0500 | [diff] [blame] | 2153 | static int cpuset_can_attach(struct cgroup_taskset *tset) |
Ben Blum | f780bdb | 2011-05-26 16:25:19 -0700 | [diff] [blame] | 2154 | { |
Tejun Heo | 1f7dd3e5 | 2015-12-03 10:18:21 -0500 | [diff] [blame] | 2155 | struct cgroup_subsys_state *css; |
| 2156 | struct cpuset *cs; |
Tejun Heo | bb9d97b | 2011-12-12 18:12:21 -0800 | [diff] [blame] | 2157 | struct task_struct *task; |
| 2158 | int ret; |
Ben Blum | f780bdb | 2011-05-26 16:25:19 -0700 | [diff] [blame] | 2159 | |
Tejun Heo | 57fce0a | 2014-02-13 06:58:41 -0500 | [diff] [blame] | 2160 | /* used later by cpuset_attach() */ |
Tejun Heo | 1f7dd3e5 | 2015-12-03 10:18:21 -0500 | [diff] [blame] | 2161 | cpuset_attach_old_cs = task_cs(cgroup_taskset_first(tset, &css)); |
| 2162 | cs = css_cs(css); |
Tejun Heo | 57fce0a | 2014-02-13 06:58:41 -0500 | [diff] [blame] | 2163 | |
Juri Lelli | 1243dc5 | 2019-07-19 15:59:57 +0200 | [diff] [blame] | 2164 | percpu_down_write(&cpuset_rwsem); |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2165 | |
Tejun Heo | aa6ec29 | 2014-07-09 10:08:08 -0400 | [diff] [blame] | 2166 | /* allow moving tasks into an empty cpuset if on default hierarchy */ |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2167 | ret = -ENOSPC; |
Waiman Long | b8d1b8e | 2017-08-17 15:33:10 -0400 | [diff] [blame] | 2168 | if (!is_in_v2_mode() && |
Li Zefan | 88fa523 | 2013-06-09 17:16:46 +0800 | [diff] [blame] | 2169 | (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))) |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2170 | goto out_unlock; |
Ben Blum | be367d0 | 2009-09-23 15:56:31 -0700 | [diff] [blame] | 2171 | |
Tejun Heo | 1f7dd3e5 | 2015-12-03 10:18:21 -0500 | [diff] [blame] | 2172 | cgroup_taskset_for_each(task, css, tset) { |
Juri Lelli | 7f51412 | 2014-09-19 10:22:40 +0100 | [diff] [blame] | 2173 | ret = task_can_attach(task, cs->cpus_allowed); |
| 2174 | if (ret) |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2175 | goto out_unlock; |
| 2176 | ret = security_task_setscheduler(task); |
| 2177 | if (ret) |
| 2178 | goto out_unlock; |
Tejun Heo | bb9d97b | 2011-12-12 18:12:21 -0800 | [diff] [blame] | 2179 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2180 | |
Tejun Heo | 452477f | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2181 | /* |
| 2182 | * Mark attach is in progress. This makes validate_change() fail |
| 2183 | * changes which zero cpus/mems_allowed. |
| 2184 | */ |
| 2185 | cs->attach_in_progress++; |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2186 | ret = 0; |
| 2187 | out_unlock: |
Juri Lelli | 1243dc5 | 2019-07-19 15:59:57 +0200 | [diff] [blame] | 2188 | percpu_up_write(&cpuset_rwsem); |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2189 | return ret; |
Tejun Heo | 4e4c9a1 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2190 | } |
| 2191 | |
Tejun Heo | 1f7dd3e5 | 2015-12-03 10:18:21 -0500 | [diff] [blame] | 2192 | static void cpuset_cancel_attach(struct cgroup_taskset *tset) |
Tejun Heo | 452477f | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2193 | { |
Tejun Heo | 1f7dd3e5 | 2015-12-03 10:18:21 -0500 | [diff] [blame] | 2194 | struct cgroup_subsys_state *css; |
Tejun Heo | 1f7dd3e5 | 2015-12-03 10:18:21 -0500 | [diff] [blame] | 2195 | |
| 2196 | cgroup_taskset_first(tset, &css); |
Tejun Heo | 1f7dd3e5 | 2015-12-03 10:18:21 -0500 | [diff] [blame] | 2197 | |
Juri Lelli | 1243dc5 | 2019-07-19 15:59:57 +0200 | [diff] [blame] | 2198 | percpu_down_write(&cpuset_rwsem); |
Tejun Heo | eb95419 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 2199 | css_cs(css)->attach_in_progress--; |
Juri Lelli | 1243dc5 | 2019-07-19 15:59:57 +0200 | [diff] [blame] | 2200 | percpu_up_write(&cpuset_rwsem); |
Tejun Heo | 452477f | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2201 | } |
| 2202 | |
Tejun Heo | 4e4c9a1 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2203 | /* |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2204 | * Protected by cpuset_mutex. cpus_attach is used only by cpuset_attach() |
Tejun Heo | 4e4c9a1 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2205 | * but we can't allocate it dynamically there. Define it global and |
| 2206 | * allocate from cpuset_init(). |
| 2207 | */ |
| 2208 | static cpumask_var_t cpus_attach; |
| 2209 | |
Tejun Heo | 1f7dd3e5 | 2015-12-03 10:18:21 -0500 | [diff] [blame] | 2210 | static void cpuset_attach(struct cgroup_taskset *tset) |
Tejun Heo | 4e4c9a1 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2211 | { |
Li Zefan | 67bd2c5 | 2013-06-05 17:15:35 +0800 | [diff] [blame] | 2212 | /* static buf protected by cpuset_mutex */ |
Tejun Heo | 4e4c9a1 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2213 | static nodemask_t cpuset_attach_nodemask_to; |
Tejun Heo | 4e4c9a1 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2214 | struct task_struct *task; |
Tejun Heo | 4530edd | 2015-09-11 15:00:19 -0400 | [diff] [blame] | 2215 | struct task_struct *leader; |
Tejun Heo | 1f7dd3e5 | 2015-12-03 10:18:21 -0500 | [diff] [blame] | 2216 | struct cgroup_subsys_state *css; |
| 2217 | struct cpuset *cs; |
Tejun Heo | 57fce0a | 2014-02-13 06:58:41 -0500 | [diff] [blame] | 2218 | struct cpuset *oldcs = cpuset_attach_old_cs; |
Tejun Heo | 4e4c9a1 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2219 | |
Tejun Heo | 1f7dd3e5 | 2015-12-03 10:18:21 -0500 | [diff] [blame] | 2220 | cgroup_taskset_first(tset, &css); |
| 2221 | cs = css_cs(css); |
| 2222 | |
Juri Lelli | 1243dc5 | 2019-07-19 15:59:57 +0200 | [diff] [blame] | 2223 | percpu_down_write(&cpuset_rwsem); |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2224 | |
Tejun Heo | 94196f5 | 2011-12-12 18:12:22 -0800 | [diff] [blame] | 2225 | /* prepare for attach */ |
Ben Blum | f780bdb | 2011-05-26 16:25:19 -0700 | [diff] [blame] | 2226 | if (cs == &top_cpuset) |
| 2227 | cpumask_copy(cpus_attach, cpu_possible_mask); |
| 2228 | else |
Li Zefan | ae1c802 | 2014-07-09 16:48:32 +0800 | [diff] [blame] | 2229 | guarantee_online_cpus(cs, cpus_attach); |
Ben Blum | f780bdb | 2011-05-26 16:25:19 -0700 | [diff] [blame] | 2230 | |
Li Zefan | ae1c802 | 2014-07-09 16:48:32 +0800 | [diff] [blame] | 2231 | guarantee_online_mems(cs, &cpuset_attach_nodemask_to); |
Tejun Heo | 94196f5 | 2011-12-12 18:12:22 -0800 | [diff] [blame] | 2232 | |
Tejun Heo | 1f7dd3e5 | 2015-12-03 10:18:21 -0500 | [diff] [blame] | 2233 | cgroup_taskset_for_each(task, css, tset) { |
Tejun Heo | bb9d97b | 2011-12-12 18:12:21 -0800 | [diff] [blame] | 2234 | /* |
| 2235 | * can_attach beforehand should guarantee that this doesn't |
| 2236 | * fail. TODO: have a better way to handle failure here |
| 2237 | */ |
| 2238 | WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach)); |
| 2239 | |
| 2240 | cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to); |
| 2241 | cpuset_update_task_spread_flag(cs, task); |
| 2242 | } |
David Quigley | 22fb52d | 2006-06-23 02:04:00 -0700 | [diff] [blame] | 2243 | |
Ben Blum | f780bdb | 2011-05-26 16:25:19 -0700 | [diff] [blame] | 2244 | /* |
Tejun Heo | 4530edd | 2015-09-11 15:00:19 -0400 | [diff] [blame] | 2245 | * Change mm for all threadgroup leaders. This is expensive and may |
| 2246 | * sleep and should be moved outside migration path proper. |
Ben Blum | f780bdb | 2011-05-26 16:25:19 -0700 | [diff] [blame] | 2247 | */ |
Li Zefan | ae1c802 | 2014-07-09 16:48:32 +0800 | [diff] [blame] | 2248 | cpuset_attach_nodemask_to = cs->effective_mems; |
Tejun Heo | 1f7dd3e5 | 2015-12-03 10:18:21 -0500 | [diff] [blame] | 2249 | cgroup_taskset_for_each_leader(leader, css, tset) { |
Tejun Heo | 3df9ca0 | 2015-09-11 15:00:18 -0400 | [diff] [blame] | 2250 | struct mm_struct *mm = get_task_mm(leader); |
Li Zefan | f047cec | 2013-06-13 15:11:44 +0800 | [diff] [blame] | 2251 | |
Tejun Heo | 3df9ca0 | 2015-09-11 15:00:18 -0400 | [diff] [blame] | 2252 | if (mm) { |
| 2253 | mpol_rebind_mm(mm, &cpuset_attach_nodemask_to); |
| 2254 | |
| 2255 | /* |
| 2256 | * old_mems_allowed is the same with mems_allowed |
| 2257 | * here, except if this task is being moved |
| 2258 | * automatically due to hotplug. In that case |
| 2259 | * @mems_allowed has been updated and is empty, so |
| 2260 | * @old_mems_allowed is the right nodesets that we |
| 2261 | * migrate mm from. |
| 2262 | */ |
Tejun Heo | e93ad19 | 2016-01-19 12:18:41 -0500 | [diff] [blame] | 2263 | if (is_memory_migrate(cs)) |
Tejun Heo | 3df9ca0 | 2015-09-11 15:00:18 -0400 | [diff] [blame] | 2264 | cpuset_migrate_mm(mm, &oldcs->old_mems_allowed, |
| 2265 | &cpuset_attach_nodemask_to); |
Tejun Heo | e93ad19 | 2016-01-19 12:18:41 -0500 | [diff] [blame] | 2266 | else |
| 2267 | mmput(mm); |
Li Zefan | f047cec | 2013-06-13 15:11:44 +0800 | [diff] [blame] | 2268 | } |
Paul Jackson | 4225399 | 2006-01-08 01:01:59 -0800 | [diff] [blame] | 2269 | } |
Tejun Heo | 452477f | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2270 | |
Li Zefan | 33ad801 | 2013-06-09 17:15:08 +0800 | [diff] [blame] | 2271 | cs->old_mems_allowed = cpuset_attach_nodemask_to; |
Tejun Heo | 02bb586 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2272 | |
Tejun Heo | 452477f | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2273 | cs->attach_in_progress--; |
Li Zefan | e44193d | 2013-06-09 17:14:22 +0800 | [diff] [blame] | 2274 | if (!cs->attach_in_progress) |
| 2275 | wake_up(&cpuset_attach_wq); |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2276 | |
Juri Lelli | 1243dc5 | 2019-07-19 15:59:57 +0200 | [diff] [blame] | 2277 | percpu_up_write(&cpuset_rwsem); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2278 | } |
| 2279 | |
| 2280 | /* The various types of files and directories in a cpuset file system */ |
| 2281 | |
| 2282 | typedef enum { |
Paul Jackson | 45b07ef | 2006-01-08 01:00:56 -0800 | [diff] [blame] | 2283 | FILE_MEMORY_MIGRATE, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2284 | FILE_CPULIST, |
| 2285 | FILE_MEMLIST, |
Li Zefan | afd1a8b | 2014-07-09 16:49:25 +0800 | [diff] [blame] | 2286 | FILE_EFFECTIVE_CPULIST, |
| 2287 | FILE_EFFECTIVE_MEMLIST, |
Waiman Long | 5cf8114 | 2018-11-08 10:08:46 -0500 | [diff] [blame] | 2288 | FILE_SUBPARTS_CPULIST, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2289 | FILE_CPU_EXCLUSIVE, |
| 2290 | FILE_MEM_EXCLUSIVE, |
Paul Menage | 7860836 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 2291 | FILE_MEM_HARDWALL, |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 2292 | FILE_SCHED_LOAD_BALANCE, |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 2293 | FILE_PARTITION_ROOT, |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 2294 | FILE_SCHED_RELAX_DOMAIN_LEVEL, |
Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 2295 | FILE_MEMORY_PRESSURE_ENABLED, |
| 2296 | FILE_MEMORY_PRESSURE, |
Paul Jackson | 825a46a | 2006-03-24 03:16:03 -0800 | [diff] [blame] | 2297 | FILE_SPREAD_PAGE, |
| 2298 | FILE_SPREAD_SLAB, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2299 | } cpuset_filetype_t; |
| 2300 | |
Tejun Heo | 182446d | 2013-08-08 20:11:24 -0400 | [diff] [blame] | 2301 | static int cpuset_write_u64(struct cgroup_subsys_state *css, struct cftype *cft, |
| 2302 | u64 val) |
Paul Menage | 700fe1a | 2008-04-29 01:00:00 -0700 | [diff] [blame] | 2303 | { |
Tejun Heo | 182446d | 2013-08-08 20:11:24 -0400 | [diff] [blame] | 2304 | struct cpuset *cs = css_cs(css); |
Paul Menage | 700fe1a | 2008-04-29 01:00:00 -0700 | [diff] [blame] | 2305 | cpuset_filetype_t type = cft->private; |
Li Zefan | a903f08 | 2013-08-13 10:05:59 +0800 | [diff] [blame] | 2306 | int retval = 0; |
Paul Menage | 700fe1a | 2008-04-29 01:00:00 -0700 | [diff] [blame] | 2307 | |
Sebastian Andrzej Siewior | c5c63b9 | 2021-08-03 16:16:07 +0200 | [diff] [blame] | 2308 | cpus_read_lock(); |
Juri Lelli | 1243dc5 | 2019-07-19 15:59:57 +0200 | [diff] [blame] | 2309 | percpu_down_write(&cpuset_rwsem); |
Li Zefan | a903f08 | 2013-08-13 10:05:59 +0800 | [diff] [blame] | 2310 | if (!is_cpuset_online(cs)) { |
| 2311 | retval = -ENODEV; |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2312 | goto out_unlock; |
Li Zefan | a903f08 | 2013-08-13 10:05:59 +0800 | [diff] [blame] | 2313 | } |
Paul Menage | 700fe1a | 2008-04-29 01:00:00 -0700 | [diff] [blame] | 2314 | |
| 2315 | switch (type) { |
| 2316 | case FILE_CPU_EXCLUSIVE: |
| 2317 | retval = update_flag(CS_CPU_EXCLUSIVE, cs, val); |
| 2318 | break; |
| 2319 | case FILE_MEM_EXCLUSIVE: |
| 2320 | retval = update_flag(CS_MEM_EXCLUSIVE, cs, val); |
| 2321 | break; |
Paul Menage | 7860836 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 2322 | case FILE_MEM_HARDWALL: |
| 2323 | retval = update_flag(CS_MEM_HARDWALL, cs, val); |
| 2324 | break; |
Paul Menage | 700fe1a | 2008-04-29 01:00:00 -0700 | [diff] [blame] | 2325 | case FILE_SCHED_LOAD_BALANCE: |
| 2326 | retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, val); |
| 2327 | break; |
| 2328 | case FILE_MEMORY_MIGRATE: |
| 2329 | retval = update_flag(CS_MEMORY_MIGRATE, cs, val); |
| 2330 | break; |
| 2331 | case FILE_MEMORY_PRESSURE_ENABLED: |
| 2332 | cpuset_memory_pressure_enabled = !!val; |
| 2333 | break; |
Paul Menage | 700fe1a | 2008-04-29 01:00:00 -0700 | [diff] [blame] | 2334 | case FILE_SPREAD_PAGE: |
| 2335 | retval = update_flag(CS_SPREAD_PAGE, cs, val); |
Paul Menage | 700fe1a | 2008-04-29 01:00:00 -0700 | [diff] [blame] | 2336 | break; |
| 2337 | case FILE_SPREAD_SLAB: |
| 2338 | retval = update_flag(CS_SPREAD_SLAB, cs, val); |
Paul Menage | 700fe1a | 2008-04-29 01:00:00 -0700 | [diff] [blame] | 2339 | break; |
| 2340 | default: |
| 2341 | retval = -EINVAL; |
| 2342 | break; |
| 2343 | } |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2344 | out_unlock: |
Juri Lelli | 1243dc5 | 2019-07-19 15:59:57 +0200 | [diff] [blame] | 2345 | percpu_up_write(&cpuset_rwsem); |
Sebastian Andrzej Siewior | c5c63b9 | 2021-08-03 16:16:07 +0200 | [diff] [blame] | 2346 | cpus_read_unlock(); |
Paul Menage | 700fe1a | 2008-04-29 01:00:00 -0700 | [diff] [blame] | 2347 | return retval; |
| 2348 | } |
| 2349 | |
Tejun Heo | 182446d | 2013-08-08 20:11:24 -0400 | [diff] [blame] | 2350 | static int cpuset_write_s64(struct cgroup_subsys_state *css, struct cftype *cft, |
| 2351 | s64 val) |
Paul Menage | 5be7a47 | 2008-05-06 20:42:41 -0700 | [diff] [blame] | 2352 | { |
Tejun Heo | 182446d | 2013-08-08 20:11:24 -0400 | [diff] [blame] | 2353 | struct cpuset *cs = css_cs(css); |
Paul Menage | 5be7a47 | 2008-05-06 20:42:41 -0700 | [diff] [blame] | 2354 | cpuset_filetype_t type = cft->private; |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2355 | int retval = -ENODEV; |
Paul Menage | 5be7a47 | 2008-05-06 20:42:41 -0700 | [diff] [blame] | 2356 | |
Sebastian Andrzej Siewior | c5c63b9 | 2021-08-03 16:16:07 +0200 | [diff] [blame] | 2357 | cpus_read_lock(); |
Juri Lelli | 1243dc5 | 2019-07-19 15:59:57 +0200 | [diff] [blame] | 2358 | percpu_down_write(&cpuset_rwsem); |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2359 | if (!is_cpuset_online(cs)) |
| 2360 | goto out_unlock; |
Paul Menage | e371239 | 2008-07-25 01:47:02 -0700 | [diff] [blame] | 2361 | |
Paul Menage | 5be7a47 | 2008-05-06 20:42:41 -0700 | [diff] [blame] | 2362 | switch (type) { |
| 2363 | case FILE_SCHED_RELAX_DOMAIN_LEVEL: |
| 2364 | retval = update_relax_domain_level(cs, val); |
| 2365 | break; |
| 2366 | default: |
| 2367 | retval = -EINVAL; |
| 2368 | break; |
| 2369 | } |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2370 | out_unlock: |
Juri Lelli | 1243dc5 | 2019-07-19 15:59:57 +0200 | [diff] [blame] | 2371 | percpu_up_write(&cpuset_rwsem); |
Sebastian Andrzej Siewior | c5c63b9 | 2021-08-03 16:16:07 +0200 | [diff] [blame] | 2372 | cpus_read_unlock(); |
Paul Menage | 5be7a47 | 2008-05-06 20:42:41 -0700 | [diff] [blame] | 2373 | return retval; |
| 2374 | } |
| 2375 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2376 | /* |
Paul Menage | e371239 | 2008-07-25 01:47:02 -0700 | [diff] [blame] | 2377 | * Common handling for a write to a "cpus" or "mems" file. |
| 2378 | */ |
Tejun Heo | 451af50 | 2014-05-13 12:16:21 -0400 | [diff] [blame] | 2379 | static ssize_t cpuset_write_resmask(struct kernfs_open_file *of, |
| 2380 | char *buf, size_t nbytes, loff_t off) |
Paul Menage | e371239 | 2008-07-25 01:47:02 -0700 | [diff] [blame] | 2381 | { |
Tejun Heo | 451af50 | 2014-05-13 12:16:21 -0400 | [diff] [blame] | 2382 | struct cpuset *cs = css_cs(of_css(of)); |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 2383 | struct cpuset *trialcs; |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2384 | int retval = -ENODEV; |
Paul Menage | e371239 | 2008-07-25 01:47:02 -0700 | [diff] [blame] | 2385 | |
Tejun Heo | 451af50 | 2014-05-13 12:16:21 -0400 | [diff] [blame] | 2386 | buf = strstrip(buf); |
| 2387 | |
Tejun Heo | 3a5a6d0 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2388 | /* |
| 2389 | * CPU or memory hotunplug may leave @cs w/o any execution |
| 2390 | * resources, in which case the hotplug code asynchronously updates |
| 2391 | * configuration and transfers all tasks to the nearest ancestor |
| 2392 | * which can execute. |
| 2393 | * |
| 2394 | * As writes to "cpus" or "mems" may restore @cs's execution |
| 2395 | * resources, wait for the previously scheduled operations before |
| 2396 | * proceeding, so that we don't end up keep removing tasks added |
| 2397 | * after execution capability is restored. |
Tejun Heo | 76bb5ab | 2014-06-30 15:47:32 -0400 | [diff] [blame] | 2398 | * |
| 2399 | * cpuset_hotplug_work calls back into cgroup core via |
| 2400 | * cgroup_transfer_tasks() and waiting for it from a cgroupfs |
| 2401 | * operation like this one can lead to a deadlock through kernfs |
| 2402 | * active_ref protection. Let's break the protection. Losing the |
| 2403 | * protection is okay as we check whether @cs is online after |
| 2404 | * grabbing cpuset_mutex anyway. This only happens on the legacy |
| 2405 | * hierarchies. |
Tejun Heo | 3a5a6d0 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2406 | */ |
Tejun Heo | 76bb5ab | 2014-06-30 15:47:32 -0400 | [diff] [blame] | 2407 | css_get(&cs->css); |
| 2408 | kernfs_break_active_protection(of->kn); |
Tejun Heo | 3a5a6d0 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2409 | flush_work(&cpuset_hotplug_work); |
| 2410 | |
Sebastian Andrzej Siewior | c5c63b9 | 2021-08-03 16:16:07 +0200 | [diff] [blame] | 2411 | cpus_read_lock(); |
Juri Lelli | 1243dc5 | 2019-07-19 15:59:57 +0200 | [diff] [blame] | 2412 | percpu_down_write(&cpuset_rwsem); |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2413 | if (!is_cpuset_online(cs)) |
| 2414 | goto out_unlock; |
Paul Menage | e371239 | 2008-07-25 01:47:02 -0700 | [diff] [blame] | 2415 | |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 2416 | trialcs = alloc_trial_cpuset(cs); |
Li Zefan | b75f38d | 2011-03-04 17:36:21 -0800 | [diff] [blame] | 2417 | if (!trialcs) { |
| 2418 | retval = -ENOMEM; |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2419 | goto out_unlock; |
Li Zefan | b75f38d | 2011-03-04 17:36:21 -0800 | [diff] [blame] | 2420 | } |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 2421 | |
Tejun Heo | 451af50 | 2014-05-13 12:16:21 -0400 | [diff] [blame] | 2422 | switch (of_cft(of)->private) { |
Paul Menage | e371239 | 2008-07-25 01:47:02 -0700 | [diff] [blame] | 2423 | case FILE_CPULIST: |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 2424 | retval = update_cpumask(cs, trialcs, buf); |
Paul Menage | e371239 | 2008-07-25 01:47:02 -0700 | [diff] [blame] | 2425 | break; |
| 2426 | case FILE_MEMLIST: |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 2427 | retval = update_nodemask(cs, trialcs, buf); |
Paul Menage | e371239 | 2008-07-25 01:47:02 -0700 | [diff] [blame] | 2428 | break; |
| 2429 | default: |
| 2430 | retval = -EINVAL; |
| 2431 | break; |
| 2432 | } |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 2433 | |
Waiman Long | bf92370 | 2018-11-08 10:08:37 -0500 | [diff] [blame] | 2434 | free_cpuset(trialcs); |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2435 | out_unlock: |
Juri Lelli | 1243dc5 | 2019-07-19 15:59:57 +0200 | [diff] [blame] | 2436 | percpu_up_write(&cpuset_rwsem); |
Sebastian Andrzej Siewior | c5c63b9 | 2021-08-03 16:16:07 +0200 | [diff] [blame] | 2437 | cpus_read_unlock(); |
Tejun Heo | 76bb5ab | 2014-06-30 15:47:32 -0400 | [diff] [blame] | 2438 | kernfs_unbreak_active_protection(of->kn); |
| 2439 | css_put(&cs->css); |
Tejun Heo | e93ad19 | 2016-01-19 12:18:41 -0500 | [diff] [blame] | 2440 | flush_workqueue(cpuset_migrate_mm_wq); |
Tejun Heo | 451af50 | 2014-05-13 12:16:21 -0400 | [diff] [blame] | 2441 | return retval ?: nbytes; |
Paul Menage | e371239 | 2008-07-25 01:47:02 -0700 | [diff] [blame] | 2442 | } |
| 2443 | |
| 2444 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2445 | * These ascii lists should be read in a single call, by using a user |
| 2446 | * buffer large enough to hold the entire map. If read in smaller |
| 2447 | * chunks, there is no guarantee of atomicity. Since the display format |
| 2448 | * used, list of ranges of sequential numbers, is variable length, |
| 2449 | * and since these maps can change value dynamically, one could read |
| 2450 | * gibberish by doing partial reads while a list was changing. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2451 | */ |
Tejun Heo | 2da8ca8 | 2013-12-05 12:28:04 -0500 | [diff] [blame] | 2452 | static int cpuset_common_seq_show(struct seq_file *sf, void *v) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2453 | { |
Tejun Heo | 2da8ca8 | 2013-12-05 12:28:04 -0500 | [diff] [blame] | 2454 | struct cpuset *cs = css_cs(seq_css(sf)); |
| 2455 | cpuset_filetype_t type = seq_cft(sf)->private; |
Tejun Heo | 51ffe41 | 2013-12-05 12:28:02 -0500 | [diff] [blame] | 2456 | int ret = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2457 | |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 2458 | spin_lock_irq(&callback_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2459 | |
| 2460 | switch (type) { |
| 2461 | case FILE_CPULIST: |
Tejun Heo | e8e6d97 | 2015-02-13 14:37:23 -0800 | [diff] [blame] | 2462 | seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_allowed)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2463 | break; |
| 2464 | case FILE_MEMLIST: |
Tejun Heo | e8e6d97 | 2015-02-13 14:37:23 -0800 | [diff] [blame] | 2465 | seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2466 | break; |
Li Zefan | afd1a8b | 2014-07-09 16:49:25 +0800 | [diff] [blame] | 2467 | case FILE_EFFECTIVE_CPULIST: |
Tejun Heo | e8e6d97 | 2015-02-13 14:37:23 -0800 | [diff] [blame] | 2468 | seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_cpus)); |
Li Zefan | afd1a8b | 2014-07-09 16:49:25 +0800 | [diff] [blame] | 2469 | break; |
| 2470 | case FILE_EFFECTIVE_MEMLIST: |
Tejun Heo | e8e6d97 | 2015-02-13 14:37:23 -0800 | [diff] [blame] | 2471 | seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->effective_mems)); |
Li Zefan | afd1a8b | 2014-07-09 16:49:25 +0800 | [diff] [blame] | 2472 | break; |
Waiman Long | 5cf8114 | 2018-11-08 10:08:46 -0500 | [diff] [blame] | 2473 | case FILE_SUBPARTS_CPULIST: |
| 2474 | seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->subparts_cpus)); |
| 2475 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2476 | default: |
Tejun Heo | 51ffe41 | 2013-12-05 12:28:02 -0500 | [diff] [blame] | 2477 | ret = -EINVAL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2478 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2479 | |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 2480 | spin_unlock_irq(&callback_lock); |
Tejun Heo | 51ffe41 | 2013-12-05 12:28:02 -0500 | [diff] [blame] | 2481 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2482 | } |
| 2483 | |
Tejun Heo | 182446d | 2013-08-08 20:11:24 -0400 | [diff] [blame] | 2484 | static u64 cpuset_read_u64(struct cgroup_subsys_state *css, struct cftype *cft) |
Paul Menage | 700fe1a | 2008-04-29 01:00:00 -0700 | [diff] [blame] | 2485 | { |
Tejun Heo | 182446d | 2013-08-08 20:11:24 -0400 | [diff] [blame] | 2486 | struct cpuset *cs = css_cs(css); |
Paul Menage | 700fe1a | 2008-04-29 01:00:00 -0700 | [diff] [blame] | 2487 | cpuset_filetype_t type = cft->private; |
| 2488 | switch (type) { |
| 2489 | case FILE_CPU_EXCLUSIVE: |
| 2490 | return is_cpu_exclusive(cs); |
| 2491 | case FILE_MEM_EXCLUSIVE: |
| 2492 | return is_mem_exclusive(cs); |
Paul Menage | 7860836 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 2493 | case FILE_MEM_HARDWALL: |
| 2494 | return is_mem_hardwall(cs); |
Paul Menage | 700fe1a | 2008-04-29 01:00:00 -0700 | [diff] [blame] | 2495 | case FILE_SCHED_LOAD_BALANCE: |
| 2496 | return is_sched_load_balance(cs); |
| 2497 | case FILE_MEMORY_MIGRATE: |
| 2498 | return is_memory_migrate(cs); |
| 2499 | case FILE_MEMORY_PRESSURE_ENABLED: |
| 2500 | return cpuset_memory_pressure_enabled; |
| 2501 | case FILE_MEMORY_PRESSURE: |
| 2502 | return fmeter_getrate(&cs->fmeter); |
| 2503 | case FILE_SPREAD_PAGE: |
| 2504 | return is_spread_page(cs); |
| 2505 | case FILE_SPREAD_SLAB: |
| 2506 | return is_spread_slab(cs); |
| 2507 | default: |
| 2508 | BUG(); |
| 2509 | } |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 2510 | |
| 2511 | /* Unreachable but makes gcc happy */ |
| 2512 | return 0; |
Paul Menage | 700fe1a | 2008-04-29 01:00:00 -0700 | [diff] [blame] | 2513 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2514 | |
Tejun Heo | 182446d | 2013-08-08 20:11:24 -0400 | [diff] [blame] | 2515 | static s64 cpuset_read_s64(struct cgroup_subsys_state *css, struct cftype *cft) |
Paul Menage | 5be7a47 | 2008-05-06 20:42:41 -0700 | [diff] [blame] | 2516 | { |
Tejun Heo | 182446d | 2013-08-08 20:11:24 -0400 | [diff] [blame] | 2517 | struct cpuset *cs = css_cs(css); |
Paul Menage | 5be7a47 | 2008-05-06 20:42:41 -0700 | [diff] [blame] | 2518 | cpuset_filetype_t type = cft->private; |
| 2519 | switch (type) { |
| 2520 | case FILE_SCHED_RELAX_DOMAIN_LEVEL: |
| 2521 | return cs->relax_domain_level; |
| 2522 | default: |
| 2523 | BUG(); |
| 2524 | } |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 2525 | |
Lu Jialin | d95af61 | 2021-04-08 16:03:46 +0800 | [diff] [blame] | 2526 | /* Unreachable but makes gcc happy */ |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 2527 | return 0; |
Paul Menage | 5be7a47 | 2008-05-06 20:42:41 -0700 | [diff] [blame] | 2528 | } |
| 2529 | |
Waiman Long | bb5b553 | 2018-11-08 10:08:44 -0500 | [diff] [blame] | 2530 | static int sched_partition_show(struct seq_file *seq, void *v) |
| 2531 | { |
| 2532 | struct cpuset *cs = css_cs(seq_css(seq)); |
| 2533 | |
| 2534 | switch (cs->partition_root_state) { |
| 2535 | case PRS_ENABLED: |
| 2536 | seq_puts(seq, "root\n"); |
| 2537 | break; |
| 2538 | case PRS_DISABLED: |
| 2539 | seq_puts(seq, "member\n"); |
| 2540 | break; |
| 2541 | case PRS_ERROR: |
| 2542 | seq_puts(seq, "root invalid\n"); |
| 2543 | break; |
| 2544 | } |
| 2545 | return 0; |
| 2546 | } |
| 2547 | |
| 2548 | static ssize_t sched_partition_write(struct kernfs_open_file *of, char *buf, |
| 2549 | size_t nbytes, loff_t off) |
| 2550 | { |
| 2551 | struct cpuset *cs = css_cs(of_css(of)); |
| 2552 | int val; |
| 2553 | int retval = -ENODEV; |
| 2554 | |
| 2555 | buf = strstrip(buf); |
| 2556 | |
| 2557 | /* |
Tejun Heo | b1e3aeb | 2018-11-13 12:03:33 -0800 | [diff] [blame] | 2558 | * Convert "root" to ENABLED, and convert "member" to DISABLED. |
Waiman Long | bb5b553 | 2018-11-08 10:08:44 -0500 | [diff] [blame] | 2559 | */ |
Tejun Heo | b1e3aeb | 2018-11-13 12:03:33 -0800 | [diff] [blame] | 2560 | if (!strcmp(buf, "root")) |
Waiman Long | bb5b553 | 2018-11-08 10:08:44 -0500 | [diff] [blame] | 2561 | val = PRS_ENABLED; |
Tejun Heo | b1e3aeb | 2018-11-13 12:03:33 -0800 | [diff] [blame] | 2562 | else if (!strcmp(buf, "member")) |
Waiman Long | bb5b553 | 2018-11-08 10:08:44 -0500 | [diff] [blame] | 2563 | val = PRS_DISABLED; |
| 2564 | else |
| 2565 | return -EINVAL; |
| 2566 | |
| 2567 | css_get(&cs->css); |
Sebastian Andrzej Siewior | c5c63b9 | 2021-08-03 16:16:07 +0200 | [diff] [blame] | 2568 | cpus_read_lock(); |
Juri Lelli | 1243dc5 | 2019-07-19 15:59:57 +0200 | [diff] [blame] | 2569 | percpu_down_write(&cpuset_rwsem); |
Waiman Long | bb5b553 | 2018-11-08 10:08:44 -0500 | [diff] [blame] | 2570 | if (!is_cpuset_online(cs)) |
| 2571 | goto out_unlock; |
| 2572 | |
| 2573 | retval = update_prstate(cs, val); |
| 2574 | out_unlock: |
Juri Lelli | 1243dc5 | 2019-07-19 15:59:57 +0200 | [diff] [blame] | 2575 | percpu_up_write(&cpuset_rwsem); |
Sebastian Andrzej Siewior | c5c63b9 | 2021-08-03 16:16:07 +0200 | [diff] [blame] | 2576 | cpus_read_unlock(); |
Waiman Long | bb5b553 | 2018-11-08 10:08:44 -0500 | [diff] [blame] | 2577 | css_put(&cs->css); |
| 2578 | return retval ?: nbytes; |
| 2579 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2580 | |
| 2581 | /* |
| 2582 | * for the common functions, 'private' gives the type of file |
| 2583 | */ |
| 2584 | |
Waiman Long | 4ec22e9 | 2018-11-08 10:08:35 -0500 | [diff] [blame] | 2585 | static struct cftype legacy_files[] = { |
Paul Menage | addf2c7 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 2586 | { |
| 2587 | .name = "cpus", |
Tejun Heo | 2da8ca8 | 2013-12-05 12:28:04 -0500 | [diff] [blame] | 2588 | .seq_show = cpuset_common_seq_show, |
Tejun Heo | 451af50 | 2014-05-13 12:16:21 -0400 | [diff] [blame] | 2589 | .write = cpuset_write_resmask, |
Paul Menage | e371239 | 2008-07-25 01:47:02 -0700 | [diff] [blame] | 2590 | .max_write_len = (100U + 6 * NR_CPUS), |
Paul Menage | addf2c7 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 2591 | .private = FILE_CPULIST, |
| 2592 | }, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2593 | |
Paul Menage | addf2c7 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 2594 | { |
| 2595 | .name = "mems", |
Tejun Heo | 2da8ca8 | 2013-12-05 12:28:04 -0500 | [diff] [blame] | 2596 | .seq_show = cpuset_common_seq_show, |
Tejun Heo | 451af50 | 2014-05-13 12:16:21 -0400 | [diff] [blame] | 2597 | .write = cpuset_write_resmask, |
Paul Menage | e371239 | 2008-07-25 01:47:02 -0700 | [diff] [blame] | 2598 | .max_write_len = (100U + 6 * MAX_NUMNODES), |
Paul Menage | addf2c7 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 2599 | .private = FILE_MEMLIST, |
| 2600 | }, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2601 | |
Paul Menage | addf2c7 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 2602 | { |
Li Zefan | afd1a8b | 2014-07-09 16:49:25 +0800 | [diff] [blame] | 2603 | .name = "effective_cpus", |
| 2604 | .seq_show = cpuset_common_seq_show, |
| 2605 | .private = FILE_EFFECTIVE_CPULIST, |
| 2606 | }, |
| 2607 | |
| 2608 | { |
| 2609 | .name = "effective_mems", |
| 2610 | .seq_show = cpuset_common_seq_show, |
| 2611 | .private = FILE_EFFECTIVE_MEMLIST, |
| 2612 | }, |
| 2613 | |
| 2614 | { |
Paul Menage | addf2c7 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 2615 | .name = "cpu_exclusive", |
| 2616 | .read_u64 = cpuset_read_u64, |
| 2617 | .write_u64 = cpuset_write_u64, |
| 2618 | .private = FILE_CPU_EXCLUSIVE, |
| 2619 | }, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2620 | |
Paul Menage | addf2c7 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 2621 | { |
| 2622 | .name = "mem_exclusive", |
| 2623 | .read_u64 = cpuset_read_u64, |
| 2624 | .write_u64 = cpuset_write_u64, |
| 2625 | .private = FILE_MEM_EXCLUSIVE, |
| 2626 | }, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2627 | |
Paul Menage | addf2c7 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 2628 | { |
Paul Menage | 7860836 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 2629 | .name = "mem_hardwall", |
| 2630 | .read_u64 = cpuset_read_u64, |
| 2631 | .write_u64 = cpuset_write_u64, |
| 2632 | .private = FILE_MEM_HARDWALL, |
| 2633 | }, |
| 2634 | |
| 2635 | { |
Paul Menage | addf2c7 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 2636 | .name = "sched_load_balance", |
| 2637 | .read_u64 = cpuset_read_u64, |
| 2638 | .write_u64 = cpuset_write_u64, |
| 2639 | .private = FILE_SCHED_LOAD_BALANCE, |
| 2640 | }, |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 2641 | |
Paul Menage | addf2c7 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 2642 | { |
| 2643 | .name = "sched_relax_domain_level", |
Paul Menage | 5be7a47 | 2008-05-06 20:42:41 -0700 | [diff] [blame] | 2644 | .read_s64 = cpuset_read_s64, |
| 2645 | .write_s64 = cpuset_write_s64, |
Paul Menage | addf2c7 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 2646 | .private = FILE_SCHED_RELAX_DOMAIN_LEVEL, |
| 2647 | }, |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 2648 | |
Paul Menage | addf2c7 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 2649 | { |
| 2650 | .name = "memory_migrate", |
| 2651 | .read_u64 = cpuset_read_u64, |
| 2652 | .write_u64 = cpuset_write_u64, |
| 2653 | .private = FILE_MEMORY_MIGRATE, |
| 2654 | }, |
| 2655 | |
| 2656 | { |
| 2657 | .name = "memory_pressure", |
| 2658 | .read_u64 = cpuset_read_u64, |
Waiman Long | 1c08c22 | 2017-08-24 12:04:29 -0400 | [diff] [blame] | 2659 | .private = FILE_MEMORY_PRESSURE, |
Paul Menage | addf2c7 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 2660 | }, |
| 2661 | |
| 2662 | { |
| 2663 | .name = "memory_spread_page", |
| 2664 | .read_u64 = cpuset_read_u64, |
| 2665 | .write_u64 = cpuset_write_u64, |
| 2666 | .private = FILE_SPREAD_PAGE, |
| 2667 | }, |
| 2668 | |
| 2669 | { |
| 2670 | .name = "memory_spread_slab", |
| 2671 | .read_u64 = cpuset_read_u64, |
| 2672 | .write_u64 = cpuset_write_u64, |
| 2673 | .private = FILE_SPREAD_SLAB, |
| 2674 | }, |
Tejun Heo | 4baf6e3 | 2012-04-01 12:09:55 -0700 | [diff] [blame] | 2675 | |
| 2676 | { |
| 2677 | .name = "memory_pressure_enabled", |
| 2678 | .flags = CFTYPE_ONLY_ON_ROOT, |
| 2679 | .read_u64 = cpuset_read_u64, |
| 2680 | .write_u64 = cpuset_write_u64, |
| 2681 | .private = FILE_MEMORY_PRESSURE_ENABLED, |
| 2682 | }, |
| 2683 | |
| 2684 | { } /* terminate */ |
Paul Jackson | 45b07ef | 2006-01-08 01:00:56 -0800 | [diff] [blame] | 2685 | }; |
| 2686 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2687 | /* |
Waiman Long | 4ec22e9 | 2018-11-08 10:08:35 -0500 | [diff] [blame] | 2688 | * This is currently a minimal set for the default hierarchy. It can be |
| 2689 | * expanded later on by migrating more features and control files from v1. |
| 2690 | */ |
| 2691 | static struct cftype dfl_files[] = { |
| 2692 | { |
| 2693 | .name = "cpus", |
| 2694 | .seq_show = cpuset_common_seq_show, |
| 2695 | .write = cpuset_write_resmask, |
| 2696 | .max_write_len = (100U + 6 * NR_CPUS), |
| 2697 | .private = FILE_CPULIST, |
| 2698 | .flags = CFTYPE_NOT_ON_ROOT, |
| 2699 | }, |
| 2700 | |
| 2701 | { |
| 2702 | .name = "mems", |
| 2703 | .seq_show = cpuset_common_seq_show, |
| 2704 | .write = cpuset_write_resmask, |
| 2705 | .max_write_len = (100U + 6 * MAX_NUMNODES), |
| 2706 | .private = FILE_MEMLIST, |
| 2707 | .flags = CFTYPE_NOT_ON_ROOT, |
| 2708 | }, |
| 2709 | |
| 2710 | { |
| 2711 | .name = "cpus.effective", |
| 2712 | .seq_show = cpuset_common_seq_show, |
| 2713 | .private = FILE_EFFECTIVE_CPULIST, |
Waiman Long | 4ec22e9 | 2018-11-08 10:08:35 -0500 | [diff] [blame] | 2714 | }, |
| 2715 | |
| 2716 | { |
| 2717 | .name = "mems.effective", |
| 2718 | .seq_show = cpuset_common_seq_show, |
| 2719 | .private = FILE_EFFECTIVE_MEMLIST, |
Waiman Long | 4ec22e9 | 2018-11-08 10:08:35 -0500 | [diff] [blame] | 2720 | }, |
| 2721 | |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 2722 | { |
Tejun Heo | b1e3aeb | 2018-11-13 12:03:33 -0800 | [diff] [blame] | 2723 | .name = "cpus.partition", |
Waiman Long | bb5b553 | 2018-11-08 10:08:44 -0500 | [diff] [blame] | 2724 | .seq_show = sched_partition_show, |
| 2725 | .write = sched_partition_write, |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 2726 | .private = FILE_PARTITION_ROOT, |
| 2727 | .flags = CFTYPE_NOT_ON_ROOT, |
Waiman Long | e7cc988 | 2021-08-10 23:06:02 -0400 | [diff] [blame] | 2728 | .file_offset = offsetof(struct cpuset, partition_file), |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 2729 | }, |
| 2730 | |
Waiman Long | 5cf8114 | 2018-11-08 10:08:46 -0500 | [diff] [blame] | 2731 | { |
| 2732 | .name = "cpus.subpartitions", |
| 2733 | .seq_show = cpuset_common_seq_show, |
| 2734 | .private = FILE_SUBPARTS_CPULIST, |
| 2735 | .flags = CFTYPE_DEBUG, |
| 2736 | }, |
| 2737 | |
Waiman Long | 4ec22e9 | 2018-11-08 10:08:35 -0500 | [diff] [blame] | 2738 | { } /* terminate */ |
| 2739 | }; |
| 2740 | |
| 2741 | |
| 2742 | /* |
Tejun Heo | 92fb974 | 2012-11-19 08:13:38 -0800 | [diff] [blame] | 2743 | * cpuset_css_alloc - allocate a cpuset css |
Li Zefan | c9e5fe6 | 2013-06-14 11:18:27 +0800 | [diff] [blame] | 2744 | * cgrp: control group that the new cpuset will be part of |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2745 | */ |
| 2746 | |
Tejun Heo | eb95419 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 2747 | static struct cgroup_subsys_state * |
| 2748 | cpuset_css_alloc(struct cgroup_subsys_state *parent_css) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2749 | { |
Tejun Heo | c8f699b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2750 | struct cpuset *cs; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2751 | |
Tejun Heo | eb95419 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 2752 | if (!parent_css) |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 2753 | return &top_cpuset.css; |
Tejun Heo | 033fa1c | 2012-11-19 08:13:39 -0800 | [diff] [blame] | 2754 | |
Tejun Heo | c8f699b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2755 | cs = kzalloc(sizeof(*cs), GFP_KERNEL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2756 | if (!cs) |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 2757 | return ERR_PTR(-ENOMEM); |
Waiman Long | bf92370 | 2018-11-08 10:08:37 -0500 | [diff] [blame] | 2758 | |
| 2759 | if (alloc_cpumasks(cs, NULL)) { |
| 2760 | kfree(cs); |
| 2761 | return ERR_PTR(-ENOMEM); |
| 2762 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2763 | |
Waiman Long | ee9707e | 2021-08-11 15:57:07 -0400 | [diff] [blame^] | 2764 | __set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); |
Mike Travis | f9a86fc | 2008-04-04 18:11:07 -0700 | [diff] [blame] | 2765 | nodes_clear(cs->mems_allowed); |
Li Zefan | e2b9a3d | 2014-07-09 16:47:03 +0800 | [diff] [blame] | 2766 | nodes_clear(cs->effective_mems); |
Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 2767 | fmeter_init(&cs->fmeter); |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 2768 | cs->relax_domain_level = -1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2769 | |
Waiman Long | ee9707e | 2021-08-11 15:57:07 -0400 | [diff] [blame^] | 2770 | /* Set CS_MEMORY_MIGRATE for default hierarchy */ |
| 2771 | if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) |
| 2772 | __set_bit(CS_MEMORY_MIGRATE, &cs->flags); |
| 2773 | |
Tejun Heo | c8f699b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2774 | return &cs->css; |
| 2775 | } |
| 2776 | |
Tejun Heo | eb95419 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 2777 | static int cpuset_css_online(struct cgroup_subsys_state *css) |
Tejun Heo | c8f699b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2778 | { |
Tejun Heo | eb95419 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 2779 | struct cpuset *cs = css_cs(css); |
Tejun Heo | c431069 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2780 | struct cpuset *parent = parent_cs(cs); |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2781 | struct cpuset *tmp_cs; |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 2782 | struct cgroup_subsys_state *pos_css; |
Tejun Heo | c8f699b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2783 | |
| 2784 | if (!parent) |
| 2785 | return 0; |
| 2786 | |
Sebastian Andrzej Siewior | c5c63b9 | 2021-08-03 16:16:07 +0200 | [diff] [blame] | 2787 | cpus_read_lock(); |
Juri Lelli | 1243dc5 | 2019-07-19 15:59:57 +0200 | [diff] [blame] | 2788 | percpu_down_write(&cpuset_rwsem); |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2789 | |
Tejun Heo | efeb77b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2790 | set_bit(CS_ONLINE, &cs->flags); |
Tejun Heo | c8f699b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2791 | if (is_spread_page(parent)) |
| 2792 | set_bit(CS_SPREAD_PAGE, &cs->flags); |
| 2793 | if (is_spread_slab(parent)) |
| 2794 | set_bit(CS_SPREAD_SLAB, &cs->flags); |
| 2795 | |
Mel Gorman | 664eedd | 2014-06-04 16:10:08 -0700 | [diff] [blame] | 2796 | cpuset_inc(); |
Tejun Heo | 033fa1c | 2012-11-19 08:13:39 -0800 | [diff] [blame] | 2797 | |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 2798 | spin_lock_irq(&callback_lock); |
Waiman Long | b8d1b8e | 2017-08-17 15:33:10 -0400 | [diff] [blame] | 2799 | if (is_in_v2_mode()) { |
Li Zefan | e2b9a3d | 2014-07-09 16:47:03 +0800 | [diff] [blame] | 2800 | cpumask_copy(cs->effective_cpus, parent->effective_cpus); |
| 2801 | cs->effective_mems = parent->effective_mems; |
Waiman Long | 4716909 | 2018-11-08 10:08:40 -0500 | [diff] [blame] | 2802 | cs->use_parent_ecpus = true; |
| 2803 | parent->child_ecpus_count++; |
Li Zefan | e2b9a3d | 2014-07-09 16:47:03 +0800 | [diff] [blame] | 2804 | } |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 2805 | spin_unlock_irq(&callback_lock); |
Li Zefan | e2b9a3d | 2014-07-09 16:47:03 +0800 | [diff] [blame] | 2806 | |
Tejun Heo | eb95419 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 2807 | if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags)) |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2808 | goto out_unlock; |
Tejun Heo | 033fa1c | 2012-11-19 08:13:39 -0800 | [diff] [blame] | 2809 | |
| 2810 | /* |
| 2811 | * Clone @parent's configuration if CGRP_CPUSET_CLONE_CHILDREN is |
| 2812 | * set. This flag handling is implemented in cgroup core for |
| 2813 | * histrical reasons - the flag may be specified during mount. |
| 2814 | * |
| 2815 | * Currently, if any sibling cpusets have exclusive cpus or mem, we |
| 2816 | * refuse to clone the configuration - thereby refusing the task to |
| 2817 | * be entered, and as a result refusing the sys_unshare() or |
| 2818 | * clone() which initiated it. If this becomes a problem for some |
| 2819 | * users who wish to allow that scenario, then this could be |
| 2820 | * changed to grant parent->cpus_allowed-sibling_cpus_exclusive |
| 2821 | * (and likewise for mems) to the new cgroup. |
| 2822 | */ |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2823 | rcu_read_lock(); |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 2824 | cpuset_for_each_child(tmp_cs, pos_css, parent) { |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2825 | if (is_mem_exclusive(tmp_cs) || is_cpu_exclusive(tmp_cs)) { |
| 2826 | rcu_read_unlock(); |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2827 | goto out_unlock; |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2828 | } |
Tejun Heo | 033fa1c | 2012-11-19 08:13:39 -0800 | [diff] [blame] | 2829 | } |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2830 | rcu_read_unlock(); |
Tejun Heo | 033fa1c | 2012-11-19 08:13:39 -0800 | [diff] [blame] | 2831 | |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 2832 | spin_lock_irq(&callback_lock); |
Tejun Heo | 033fa1c | 2012-11-19 08:13:39 -0800 | [diff] [blame] | 2833 | cs->mems_allowed = parent->mems_allowed; |
Zefan Li | 790317e | 2015-02-13 11:19:49 +0800 | [diff] [blame] | 2834 | cs->effective_mems = parent->mems_allowed; |
Tejun Heo | 033fa1c | 2012-11-19 08:13:39 -0800 | [diff] [blame] | 2835 | cpumask_copy(cs->cpus_allowed, parent->cpus_allowed); |
Zefan Li | 790317e | 2015-02-13 11:19:49 +0800 | [diff] [blame] | 2836 | cpumask_copy(cs->effective_cpus, parent->cpus_allowed); |
Dan Carpenter | cea7446 | 2014-10-27 16:27:02 +0300 | [diff] [blame] | 2837 | spin_unlock_irq(&callback_lock); |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2838 | out_unlock: |
Juri Lelli | 1243dc5 | 2019-07-19 15:59:57 +0200 | [diff] [blame] | 2839 | percpu_up_write(&cpuset_rwsem); |
Sebastian Andrzej Siewior | c5c63b9 | 2021-08-03 16:16:07 +0200 | [diff] [blame] | 2840 | cpus_read_unlock(); |
Tejun Heo | c8f699b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2841 | return 0; |
| 2842 | } |
| 2843 | |
Zhao Hongjiang | 0b9e696 | 2013-07-27 11:56:53 +0800 | [diff] [blame] | 2844 | /* |
| 2845 | * If the cpuset being removed has its flag 'sched_load_balance' |
| 2846 | * enabled, then simulate turning sched_load_balance off, which |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 2847 | * will call rebuild_sched_domains_locked(). That is not needed |
| 2848 | * in the default hierarchy where only changes in partition |
| 2849 | * will cause repartitioning. |
| 2850 | * |
| 2851 | * If the cpuset has the 'sched.partition' flag enabled, simulate |
| 2852 | * turning 'sched.partition" off. |
Zhao Hongjiang | 0b9e696 | 2013-07-27 11:56:53 +0800 | [diff] [blame] | 2853 | */ |
| 2854 | |
Tejun Heo | eb95419 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 2855 | static void cpuset_css_offline(struct cgroup_subsys_state *css) |
Tejun Heo | c8f699b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2856 | { |
Tejun Heo | eb95419 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 2857 | struct cpuset *cs = css_cs(css); |
Tejun Heo | c8f699b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2858 | |
Sebastian Andrzej Siewior | c5c63b9 | 2021-08-03 16:16:07 +0200 | [diff] [blame] | 2859 | cpus_read_lock(); |
Juri Lelli | 1243dc5 | 2019-07-19 15:59:57 +0200 | [diff] [blame] | 2860 | percpu_down_write(&cpuset_rwsem); |
Tejun Heo | c8f699b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2861 | |
Waiman Long | ee8dde0 | 2018-11-08 10:08:38 -0500 | [diff] [blame] | 2862 | if (is_partition_root(cs)) |
| 2863 | update_prstate(cs, 0); |
| 2864 | |
| 2865 | if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) && |
| 2866 | is_sched_load_balance(cs)) |
Tejun Heo | c8f699b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2867 | update_flag(CS_SCHED_LOAD_BALANCE, cs, 0); |
| 2868 | |
Waiman Long | 4716909 | 2018-11-08 10:08:40 -0500 | [diff] [blame] | 2869 | if (cs->use_parent_ecpus) { |
| 2870 | struct cpuset *parent = parent_cs(cs); |
| 2871 | |
| 2872 | cs->use_parent_ecpus = false; |
| 2873 | parent->child_ecpus_count--; |
| 2874 | } |
| 2875 | |
Mel Gorman | 664eedd | 2014-06-04 16:10:08 -0700 | [diff] [blame] | 2876 | cpuset_dec(); |
Tejun Heo | efeb77b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2877 | clear_bit(CS_ONLINE, &cs->flags); |
Tejun Heo | c8f699b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2878 | |
Juri Lelli | 1243dc5 | 2019-07-19 15:59:57 +0200 | [diff] [blame] | 2879 | percpu_up_write(&cpuset_rwsem); |
Sebastian Andrzej Siewior | c5c63b9 | 2021-08-03 16:16:07 +0200 | [diff] [blame] | 2880 | cpus_read_unlock(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2881 | } |
| 2882 | |
Tejun Heo | eb95419 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 2883 | static void cpuset_css_free(struct cgroup_subsys_state *css) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2884 | { |
Tejun Heo | eb95419 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 2885 | struct cpuset *cs = css_cs(css); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2886 | |
Waiman Long | bf92370 | 2018-11-08 10:08:37 -0500 | [diff] [blame] | 2887 | free_cpuset(cs); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2888 | } |
| 2889 | |
Li Zefan | 39bd0d1 | 2014-07-09 16:48:01 +0800 | [diff] [blame] | 2890 | static void cpuset_bind(struct cgroup_subsys_state *root_css) |
| 2891 | { |
Juri Lelli | 1243dc5 | 2019-07-19 15:59:57 +0200 | [diff] [blame] | 2892 | percpu_down_write(&cpuset_rwsem); |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 2893 | spin_lock_irq(&callback_lock); |
Li Zefan | 39bd0d1 | 2014-07-09 16:48:01 +0800 | [diff] [blame] | 2894 | |
Waiman Long | b8d1b8e | 2017-08-17 15:33:10 -0400 | [diff] [blame] | 2895 | if (is_in_v2_mode()) { |
Li Zefan | 39bd0d1 | 2014-07-09 16:48:01 +0800 | [diff] [blame] | 2896 | cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask); |
| 2897 | top_cpuset.mems_allowed = node_possible_map; |
| 2898 | } else { |
| 2899 | cpumask_copy(top_cpuset.cpus_allowed, |
| 2900 | top_cpuset.effective_cpus); |
| 2901 | top_cpuset.mems_allowed = top_cpuset.effective_mems; |
| 2902 | } |
| 2903 | |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 2904 | spin_unlock_irq(&callback_lock); |
Juri Lelli | 1243dc5 | 2019-07-19 15:59:57 +0200 | [diff] [blame] | 2905 | percpu_up_write(&cpuset_rwsem); |
Li Zefan | 39bd0d1 | 2014-07-09 16:48:01 +0800 | [diff] [blame] | 2906 | } |
| 2907 | |
Zefan Li | 06f4e94 | 2016-08-09 11:25:01 +0800 | [diff] [blame] | 2908 | /* |
| 2909 | * Make sure the new task conform to the current state of its parent, |
| 2910 | * which could have been changed by cpuset just after it inherits the |
| 2911 | * state from the parent and before it sits on the cgroup's task list. |
| 2912 | */ |
Wei Yongjun | 8a15b81 | 2016-09-16 13:02:37 +0000 | [diff] [blame] | 2913 | static void cpuset_fork(struct task_struct *task) |
Zefan Li | 06f4e94 | 2016-08-09 11:25:01 +0800 | [diff] [blame] | 2914 | { |
| 2915 | if (task_css_is_root(task, cpuset_cgrp_id)) |
| 2916 | return; |
| 2917 | |
Sebastian Andrzej Siewior | 3bd3706 | 2019-04-23 16:26:36 +0200 | [diff] [blame] | 2918 | set_cpus_allowed_ptr(task, current->cpus_ptr); |
Zefan Li | 06f4e94 | 2016-08-09 11:25:01 +0800 | [diff] [blame] | 2919 | task->mems_allowed = current->mems_allowed; |
| 2920 | } |
| 2921 | |
Tejun Heo | 073219e | 2014-02-08 10:36:58 -0500 | [diff] [blame] | 2922 | struct cgroup_subsys cpuset_cgrp_subsys = { |
Li Zefan | 39bd0d1 | 2014-07-09 16:48:01 +0800 | [diff] [blame] | 2923 | .css_alloc = cpuset_css_alloc, |
| 2924 | .css_online = cpuset_css_online, |
| 2925 | .css_offline = cpuset_css_offline, |
| 2926 | .css_free = cpuset_css_free, |
| 2927 | .can_attach = cpuset_can_attach, |
| 2928 | .cancel_attach = cpuset_cancel_attach, |
| 2929 | .attach = cpuset_attach, |
Tejun Heo | 5cf1cac | 2016-04-21 19:06:48 -0400 | [diff] [blame] | 2930 | .post_attach = cpuset_post_attach, |
Li Zefan | 39bd0d1 | 2014-07-09 16:48:01 +0800 | [diff] [blame] | 2931 | .bind = cpuset_bind, |
Zefan Li | 06f4e94 | 2016-08-09 11:25:01 +0800 | [diff] [blame] | 2932 | .fork = cpuset_fork, |
Waiman Long | 4ec22e9 | 2018-11-08 10:08:35 -0500 | [diff] [blame] | 2933 | .legacy_cftypes = legacy_files, |
| 2934 | .dfl_cftypes = dfl_files, |
Tejun Heo | b38e42e | 2016-02-23 10:00:50 -0500 | [diff] [blame] | 2935 | .early_init = true, |
Waiman Long | 4ec22e9 | 2018-11-08 10:08:35 -0500 | [diff] [blame] | 2936 | .threaded = true, |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 2937 | }; |
| 2938 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2939 | /** |
| 2940 | * cpuset_init - initialize cpusets at system boot |
| 2941 | * |
Al Viro | d5f68d3 | 2019-05-13 12:33:22 -0400 | [diff] [blame] | 2942 | * Description: Initialize top_cpuset |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2943 | **/ |
| 2944 | |
| 2945 | int __init cpuset_init(void) |
| 2946 | { |
Juri Lelli | 1243dc5 | 2019-07-19 15:59:57 +0200 | [diff] [blame] | 2947 | BUG_ON(percpu_init_rwsem(&cpuset_rwsem)); |
| 2948 | |
Nicholas Mc Guire | 75fa8e5 | 2017-03-26 18:24:06 +0200 | [diff] [blame] | 2949 | BUG_ON(!alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL)); |
| 2950 | BUG_ON(!alloc_cpumask_var(&top_cpuset.effective_cpus, GFP_KERNEL)); |
Waiman Long | bf92370 | 2018-11-08 10:08:37 -0500 | [diff] [blame] | 2951 | BUG_ON(!zalloc_cpumask_var(&top_cpuset.subparts_cpus, GFP_KERNEL)); |
Miao Xie | 58568d2 | 2009-06-16 15:31:49 -0700 | [diff] [blame] | 2952 | |
Li Zefan | 300ed6c | 2009-01-07 18:08:44 -0800 | [diff] [blame] | 2953 | cpumask_setall(top_cpuset.cpus_allowed); |
Mike Travis | f9a86fc | 2008-04-04 18:11:07 -0700 | [diff] [blame] | 2954 | nodes_setall(top_cpuset.mems_allowed); |
Li Zefan | e2b9a3d | 2014-07-09 16:47:03 +0800 | [diff] [blame] | 2955 | cpumask_setall(top_cpuset.effective_cpus); |
| 2956 | nodes_setall(top_cpuset.effective_mems); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2957 | |
Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 2958 | fmeter_init(&top_cpuset.fmeter); |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 2959 | set_bit(CS_SCHED_LOAD_BALANCE, &top_cpuset.flags); |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 2960 | top_cpuset.relax_domain_level = -1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2961 | |
Nicholas Mc Guire | 75fa8e5 | 2017-03-26 18:24:06 +0200 | [diff] [blame] | 2962 | BUG_ON(!alloc_cpumask_var(&cpus_attach, GFP_KERNEL)); |
Li Zefan | 2341d1b | 2009-01-07 18:08:42 -0800 | [diff] [blame] | 2963 | |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 2964 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2965 | } |
| 2966 | |
Paul Jackson | b1aac8b | 2006-09-29 02:01:17 -0700 | [diff] [blame] | 2967 | /* |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 2968 | * If CPU and/or memory hotplug handlers, below, unplug any CPUs |
Paul Jackson | b1aac8b | 2006-09-29 02:01:17 -0700 | [diff] [blame] | 2969 | * or memory nodes, we need to walk over the cpuset hierarchy, |
| 2970 | * removing that CPU or node from all cpusets. If this removes the |
Cliff Wickman | 956db3c | 2008-02-07 00:14:43 -0800 | [diff] [blame] | 2971 | * last CPU or node from a cpuset, then move the tasks in the empty |
| 2972 | * cpuset to its next-highest non-empty parent. |
Paul Jackson | b1aac8b | 2006-09-29 02:01:17 -0700 | [diff] [blame] | 2973 | */ |
Cliff Wickman | 956db3c | 2008-02-07 00:14:43 -0800 | [diff] [blame] | 2974 | static void remove_tasks_in_empty_cpuset(struct cpuset *cs) |
Paul Jackson | b1aac8b | 2006-09-29 02:01:17 -0700 | [diff] [blame] | 2975 | { |
Cliff Wickman | 956db3c | 2008-02-07 00:14:43 -0800 | [diff] [blame] | 2976 | struct cpuset *parent; |
Paul Jackson | b1aac8b | 2006-09-29 02:01:17 -0700 | [diff] [blame] | 2977 | |
Paul Jackson | c8d9c90 | 2008-02-07 00:14:46 -0800 | [diff] [blame] | 2978 | /* |
Cliff Wickman | 956db3c | 2008-02-07 00:14:43 -0800 | [diff] [blame] | 2979 | * Find its next-highest non-empty parent, (top cpuset |
| 2980 | * has online cpus, so can't be empty). |
| 2981 | */ |
Tejun Heo | c431069 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2982 | parent = parent_cs(cs); |
Li Zefan | 300ed6c | 2009-01-07 18:08:44 -0800 | [diff] [blame] | 2983 | while (cpumask_empty(parent->cpus_allowed) || |
Paul Jackson | b450129 | 2008-02-07 00:14:47 -0800 | [diff] [blame] | 2984 | nodes_empty(parent->mems_allowed)) |
Tejun Heo | c431069 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2985 | parent = parent_cs(parent); |
Cliff Wickman | 956db3c | 2008-02-07 00:14:43 -0800 | [diff] [blame] | 2986 | |
Tejun Heo | 8cc9934 | 2013-04-07 09:29:50 -0700 | [diff] [blame] | 2987 | if (cgroup_transfer_tasks(parent->css.cgroup, cs->css.cgroup)) { |
Fabian Frederick | 12d3089 | 2014-05-05 19:49:00 +0200 | [diff] [blame] | 2988 | pr_err("cpuset: failed to transfer tasks out of empty cpuset "); |
Tejun Heo | e61734c | 2014-02-12 09:29:50 -0500 | [diff] [blame] | 2989 | pr_cont_cgroup_name(cs->css.cgroup); |
| 2990 | pr_cont("\n"); |
Tejun Heo | 8cc9934 | 2013-04-07 09:29:50 -0700 | [diff] [blame] | 2991 | } |
Cliff Wickman | 956db3c | 2008-02-07 00:14:43 -0800 | [diff] [blame] | 2992 | } |
| 2993 | |
Li Zefan | be4c9dd | 2014-07-09 16:49:04 +0800 | [diff] [blame] | 2994 | static void |
| 2995 | hotplug_update_tasks_legacy(struct cpuset *cs, |
| 2996 | struct cpumask *new_cpus, nodemask_t *new_mems, |
| 2997 | bool cpus_updated, bool mems_updated) |
Li Zefan | 390a36a | 2014-07-09 16:48:54 +0800 | [diff] [blame] | 2998 | { |
| 2999 | bool is_empty; |
| 3000 | |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 3001 | spin_lock_irq(&callback_lock); |
Li Zefan | be4c9dd | 2014-07-09 16:49:04 +0800 | [diff] [blame] | 3002 | cpumask_copy(cs->cpus_allowed, new_cpus); |
| 3003 | cpumask_copy(cs->effective_cpus, new_cpus); |
| 3004 | cs->mems_allowed = *new_mems; |
| 3005 | cs->effective_mems = *new_mems; |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 3006 | spin_unlock_irq(&callback_lock); |
Li Zefan | 390a36a | 2014-07-09 16:48:54 +0800 | [diff] [blame] | 3007 | |
| 3008 | /* |
| 3009 | * Don't call update_tasks_cpumask() if the cpuset becomes empty, |
| 3010 | * as the tasks will be migratecd to an ancestor. |
| 3011 | */ |
Li Zefan | be4c9dd | 2014-07-09 16:49:04 +0800 | [diff] [blame] | 3012 | if (cpus_updated && !cpumask_empty(cs->cpus_allowed)) |
Li Zefan | 390a36a | 2014-07-09 16:48:54 +0800 | [diff] [blame] | 3013 | update_tasks_cpumask(cs); |
Li Zefan | be4c9dd | 2014-07-09 16:49:04 +0800 | [diff] [blame] | 3014 | if (mems_updated && !nodes_empty(cs->mems_allowed)) |
Li Zefan | 390a36a | 2014-07-09 16:48:54 +0800 | [diff] [blame] | 3015 | update_tasks_nodemask(cs); |
| 3016 | |
| 3017 | is_empty = cpumask_empty(cs->cpus_allowed) || |
| 3018 | nodes_empty(cs->mems_allowed); |
| 3019 | |
Juri Lelli | 1243dc5 | 2019-07-19 15:59:57 +0200 | [diff] [blame] | 3020 | percpu_up_write(&cpuset_rwsem); |
Li Zefan | 390a36a | 2014-07-09 16:48:54 +0800 | [diff] [blame] | 3021 | |
| 3022 | /* |
| 3023 | * Move tasks to the nearest ancestor with execution resources, |
| 3024 | * This is full cgroup operation which will also call back into |
| 3025 | * cpuset. Should be done outside any lock. |
| 3026 | */ |
| 3027 | if (is_empty) |
| 3028 | remove_tasks_in_empty_cpuset(cs); |
| 3029 | |
Juri Lelli | 1243dc5 | 2019-07-19 15:59:57 +0200 | [diff] [blame] | 3030 | percpu_down_write(&cpuset_rwsem); |
Li Zefan | 390a36a | 2014-07-09 16:48:54 +0800 | [diff] [blame] | 3031 | } |
| 3032 | |
Li Zefan | be4c9dd | 2014-07-09 16:49:04 +0800 | [diff] [blame] | 3033 | static void |
| 3034 | hotplug_update_tasks(struct cpuset *cs, |
| 3035 | struct cpumask *new_cpus, nodemask_t *new_mems, |
| 3036 | bool cpus_updated, bool mems_updated) |
Li Zefan | 390a36a | 2014-07-09 16:48:54 +0800 | [diff] [blame] | 3037 | { |
Li Zefan | be4c9dd | 2014-07-09 16:49:04 +0800 | [diff] [blame] | 3038 | if (cpumask_empty(new_cpus)) |
| 3039 | cpumask_copy(new_cpus, parent_cs(cs)->effective_cpus); |
| 3040 | if (nodes_empty(*new_mems)) |
| 3041 | *new_mems = parent_cs(cs)->effective_mems; |
Li Zefan | 390a36a | 2014-07-09 16:48:54 +0800 | [diff] [blame] | 3042 | |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 3043 | spin_lock_irq(&callback_lock); |
Li Zefan | be4c9dd | 2014-07-09 16:49:04 +0800 | [diff] [blame] | 3044 | cpumask_copy(cs->effective_cpus, new_cpus); |
| 3045 | cs->effective_mems = *new_mems; |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 3046 | spin_unlock_irq(&callback_lock); |
Li Zefan | 390a36a | 2014-07-09 16:48:54 +0800 | [diff] [blame] | 3047 | |
Li Zefan | be4c9dd | 2014-07-09 16:49:04 +0800 | [diff] [blame] | 3048 | if (cpus_updated) |
Li Zefan | 390a36a | 2014-07-09 16:48:54 +0800 | [diff] [blame] | 3049 | update_tasks_cpumask(cs); |
Li Zefan | be4c9dd | 2014-07-09 16:49:04 +0800 | [diff] [blame] | 3050 | if (mems_updated) |
Li Zefan | 390a36a | 2014-07-09 16:48:54 +0800 | [diff] [blame] | 3051 | update_tasks_nodemask(cs); |
| 3052 | } |
| 3053 | |
Waiman Long | 4b842da | 2018-11-08 10:08:41 -0500 | [diff] [blame] | 3054 | static bool force_rebuild; |
| 3055 | |
| 3056 | void cpuset_force_rebuild(void) |
| 3057 | { |
| 3058 | force_rebuild = true; |
| 3059 | } |
| 3060 | |
Tejun Heo | deb7aa3 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 3061 | /** |
Li Zefan | 388afd8 | 2013-06-09 17:14:47 +0800 | [diff] [blame] | 3062 | * cpuset_hotplug_update_tasks - update tasks in a cpuset for hotunplug |
Tejun Heo | deb7aa3 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 3063 | * @cs: cpuset in interest |
Waiman Long | 4b842da | 2018-11-08 10:08:41 -0500 | [diff] [blame] | 3064 | * @tmp: the tmpmasks structure pointer |
Cliff Wickman | 956db3c | 2008-02-07 00:14:43 -0800 | [diff] [blame] | 3065 | * |
Tejun Heo | deb7aa3 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 3066 | * Compare @cs's cpu and mem masks against top_cpuset and if some have gone |
| 3067 | * offline, update @cs accordingly. If @cs ends up with no CPU or memory, |
| 3068 | * all its tasks are moved to the nearest ancestor with both resources. |
Paul Jackson | b1aac8b | 2006-09-29 02:01:17 -0700 | [diff] [blame] | 3069 | */ |
Waiman Long | 4b842da | 2018-11-08 10:08:41 -0500 | [diff] [blame] | 3070 | static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp) |
Paul Jackson | b1aac8b | 2006-09-29 02:01:17 -0700 | [diff] [blame] | 3071 | { |
Li Zefan | be4c9dd | 2014-07-09 16:49:04 +0800 | [diff] [blame] | 3072 | static cpumask_t new_cpus; |
| 3073 | static nodemask_t new_mems; |
| 3074 | bool cpus_updated; |
| 3075 | bool mems_updated; |
Waiman Long | 4b842da | 2018-11-08 10:08:41 -0500 | [diff] [blame] | 3076 | struct cpuset *parent; |
Li Zefan | e44193d | 2013-06-09 17:14:22 +0800 | [diff] [blame] | 3077 | retry: |
| 3078 | wait_event(cpuset_attach_wq, cs->attach_in_progress == 0); |
Paul Jackson | b1aac8b | 2006-09-29 02:01:17 -0700 | [diff] [blame] | 3079 | |
Juri Lelli | 1243dc5 | 2019-07-19 15:59:57 +0200 | [diff] [blame] | 3080 | percpu_down_write(&cpuset_rwsem); |
Paul Jackson | b1aac8b | 2006-09-29 02:01:17 -0700 | [diff] [blame] | 3081 | |
Li Zefan | e44193d | 2013-06-09 17:14:22 +0800 | [diff] [blame] | 3082 | /* |
| 3083 | * We have raced with task attaching. We wait until attaching |
| 3084 | * is finished, so we won't attach a task to an empty cpuset. |
| 3085 | */ |
| 3086 | if (cs->attach_in_progress) { |
Juri Lelli | 1243dc5 | 2019-07-19 15:59:57 +0200 | [diff] [blame] | 3087 | percpu_up_write(&cpuset_rwsem); |
Li Zefan | e44193d | 2013-06-09 17:14:22 +0800 | [diff] [blame] | 3088 | goto retry; |
| 3089 | } |
| 3090 | |
Waiman Long | 0f3adb8 | 2021-07-20 10:18:26 -0400 | [diff] [blame] | 3091 | parent = parent_cs(cs); |
Waiman Long | 4b842da | 2018-11-08 10:08:41 -0500 | [diff] [blame] | 3092 | compute_effective_cpumask(&new_cpus, cs, parent); |
| 3093 | nodes_and(new_mems, cs->mems_allowed, parent->effective_mems); |
Paul Jackson | b450129 | 2008-02-07 00:14:47 -0800 | [diff] [blame] | 3094 | |
Waiman Long | 4b842da | 2018-11-08 10:08:41 -0500 | [diff] [blame] | 3095 | if (cs->nr_subparts_cpus) |
| 3096 | /* |
| 3097 | * Make sure that CPUs allocated to child partitions |
| 3098 | * do not show up in effective_cpus. |
| 3099 | */ |
| 3100 | cpumask_andnot(&new_cpus, &new_cpus, cs->subparts_cpus); |
| 3101 | |
| 3102 | if (!tmp || !cs->partition_root_state) |
| 3103 | goto update_tasks; |
| 3104 | |
| 3105 | /* |
| 3106 | * In the unlikely event that a partition root has empty |
| 3107 | * effective_cpus or its parent becomes erroneous, we have to |
| 3108 | * transition it to the erroneous state. |
| 3109 | */ |
| 3110 | if (is_partition_root(cs) && (cpumask_empty(&new_cpus) || |
| 3111 | (parent->partition_root_state == PRS_ERROR))) { |
| 3112 | if (cs->nr_subparts_cpus) { |
Waiman Long | 6ba34d3 | 2021-07-20 10:18:28 -0400 | [diff] [blame] | 3113 | spin_lock_irq(&callback_lock); |
Waiman Long | 4b842da | 2018-11-08 10:08:41 -0500 | [diff] [blame] | 3114 | cs->nr_subparts_cpus = 0; |
| 3115 | cpumask_clear(cs->subparts_cpus); |
Waiman Long | 6ba34d3 | 2021-07-20 10:18:28 -0400 | [diff] [blame] | 3116 | spin_unlock_irq(&callback_lock); |
Waiman Long | 4b842da | 2018-11-08 10:08:41 -0500 | [diff] [blame] | 3117 | compute_effective_cpumask(&new_cpus, cs, parent); |
| 3118 | } |
| 3119 | |
| 3120 | /* |
| 3121 | * If the effective_cpus is empty because the child |
| 3122 | * partitions take away all the CPUs, we can keep |
| 3123 | * the current partition and let the child partitions |
| 3124 | * fight for available CPUs. |
| 3125 | */ |
| 3126 | if ((parent->partition_root_state == PRS_ERROR) || |
| 3127 | cpumask_empty(&new_cpus)) { |
Waiman Long | e7cc988 | 2021-08-10 23:06:02 -0400 | [diff] [blame] | 3128 | int old_prs; |
| 3129 | |
Waiman Long | 4b842da | 2018-11-08 10:08:41 -0500 | [diff] [blame] | 3130 | update_parent_subparts_cpumask(cs, partcmd_disable, |
| 3131 | NULL, tmp); |
Waiman Long | e7cc988 | 2021-08-10 23:06:02 -0400 | [diff] [blame] | 3132 | old_prs = cs->partition_root_state; |
| 3133 | if (old_prs != PRS_ERROR) { |
| 3134 | spin_lock_irq(&callback_lock); |
| 3135 | cs->partition_root_state = PRS_ERROR; |
| 3136 | spin_unlock_irq(&callback_lock); |
| 3137 | notify_partition_change(cs, old_prs, PRS_ERROR); |
| 3138 | } |
Waiman Long | 4b842da | 2018-11-08 10:08:41 -0500 | [diff] [blame] | 3139 | } |
| 3140 | cpuset_force_rebuild(); |
| 3141 | } |
| 3142 | |
| 3143 | /* |
| 3144 | * On the other hand, an erroneous partition root may be transitioned |
| 3145 | * back to a regular one or a partition root with no CPU allocated |
| 3146 | * from the parent may change to erroneous. |
| 3147 | */ |
| 3148 | if (is_partition_root(parent) && |
| 3149 | ((cs->partition_root_state == PRS_ERROR) || |
| 3150 | !cpumask_intersects(&new_cpus, parent->subparts_cpus)) && |
| 3151 | update_parent_subparts_cpumask(cs, partcmd_update, NULL, tmp)) |
| 3152 | cpuset_force_rebuild(); |
| 3153 | |
| 3154 | update_tasks: |
Li Zefan | be4c9dd | 2014-07-09 16:49:04 +0800 | [diff] [blame] | 3155 | cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus); |
| 3156 | mems_updated = !nodes_equal(new_mems, cs->effective_mems); |
Cliff Wickman | 956db3c | 2008-02-07 00:14:43 -0800 | [diff] [blame] | 3157 | |
Waiman Long | b8d1b8e | 2017-08-17 15:33:10 -0400 | [diff] [blame] | 3158 | if (is_in_v2_mode()) |
Li Zefan | be4c9dd | 2014-07-09 16:49:04 +0800 | [diff] [blame] | 3159 | hotplug_update_tasks(cs, &new_cpus, &new_mems, |
| 3160 | cpus_updated, mems_updated); |
Li Zefan | 390a36a | 2014-07-09 16:48:54 +0800 | [diff] [blame] | 3161 | else |
Li Zefan | be4c9dd | 2014-07-09 16:49:04 +0800 | [diff] [blame] | 3162 | hotplug_update_tasks_legacy(cs, &new_cpus, &new_mems, |
| 3163 | cpus_updated, mems_updated); |
Tejun Heo | 8d03394 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 3164 | |
Juri Lelli | 1243dc5 | 2019-07-19 15:59:57 +0200 | [diff] [blame] | 3165 | percpu_up_write(&cpuset_rwsem); |
Tejun Heo | deb7aa3 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 3166 | } |
Paul Jackson | b450129 | 2008-02-07 00:14:47 -0800 | [diff] [blame] | 3167 | |
Tejun Heo | deb7aa3 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 3168 | /** |
Tejun Heo | 2b729fe | 2020-04-03 11:32:13 -0400 | [diff] [blame] | 3169 | * cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset |
Tejun Heo | deb7aa3 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 3170 | * |
| 3171 | * This function is called after either CPU or memory configuration has |
| 3172 | * changed and updates cpuset accordingly. The top_cpuset is always |
| 3173 | * synchronized to cpu_active_mask and N_MEMORY, which is necessary in |
| 3174 | * order to make cpusets transparent (of no affect) on systems that are |
| 3175 | * actively using CPU hotplug but making no active use of cpusets. |
| 3176 | * |
| 3177 | * Non-root cpusets are only affected by offlining. If any CPUs or memory |
Li Zefan | 388afd8 | 2013-06-09 17:14:47 +0800 | [diff] [blame] | 3178 | * nodes have been taken down, cpuset_hotplug_update_tasks() is invoked on |
| 3179 | * all descendants. |
Tejun Heo | deb7aa3 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 3180 | * |
| 3181 | * Note that CPU offlining during suspend is ignored. We don't modify |
| 3182 | * cpusets across suspend/resume cycles at all. |
| 3183 | */ |
Tejun Heo | 2b729fe | 2020-04-03 11:32:13 -0400 | [diff] [blame] | 3184 | static void cpuset_hotplug_workfn(struct work_struct *work) |
Tejun Heo | deb7aa3 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 3185 | { |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 3186 | static cpumask_t new_cpus; |
| 3187 | static nodemask_t new_mems; |
Tejun Heo | deb7aa3 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 3188 | bool cpus_updated, mems_updated; |
Waiman Long | b8d1b8e | 2017-08-17 15:33:10 -0400 | [diff] [blame] | 3189 | bool on_dfl = is_in_v2_mode(); |
Waiman Long | 4b842da | 2018-11-08 10:08:41 -0500 | [diff] [blame] | 3190 | struct tmpmasks tmp, *ptmp = NULL; |
| 3191 | |
| 3192 | if (on_dfl && !alloc_cpumasks(NULL, &tmp)) |
| 3193 | ptmp = &tmp; |
Cliff Wickman | 956db3c | 2008-02-07 00:14:43 -0800 | [diff] [blame] | 3194 | |
Juri Lelli | 1243dc5 | 2019-07-19 15:59:57 +0200 | [diff] [blame] | 3195 | percpu_down_write(&cpuset_rwsem); |
Cliff Wickman | 956db3c | 2008-02-07 00:14:43 -0800 | [diff] [blame] | 3196 | |
Tejun Heo | deb7aa3 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 3197 | /* fetch the available cpus/mems and find out which changed how */ |
| 3198 | cpumask_copy(&new_cpus, cpu_active_mask); |
| 3199 | new_mems = node_states[N_MEMORY]; |
Cliff Wickman | 956db3c | 2008-02-07 00:14:43 -0800 | [diff] [blame] | 3200 | |
Waiman Long | 4b842da | 2018-11-08 10:08:41 -0500 | [diff] [blame] | 3201 | /* |
| 3202 | * If subparts_cpus is populated, it is likely that the check below |
| 3203 | * will produce a false positive on cpus_updated when the cpu list |
| 3204 | * isn't changed. It is extra work, but it is better to be safe. |
| 3205 | */ |
Li Zefan | 7e88291 | 2014-07-09 16:48:42 +0800 | [diff] [blame] | 3206 | cpus_updated = !cpumask_equal(top_cpuset.effective_cpus, &new_cpus); |
| 3207 | mems_updated = !nodes_equal(top_cpuset.effective_mems, new_mems); |
Srivatsa S. Bhat | 7ddf96b | 2012-05-24 19:46:55 +0530 | [diff] [blame] | 3208 | |
Waiman Long | 15d428e | 2021-07-20 10:18:27 -0400 | [diff] [blame] | 3209 | /* |
| 3210 | * In the rare case that hotplug removes all the cpus in subparts_cpus, |
| 3211 | * we assumed that cpus are updated. |
| 3212 | */ |
| 3213 | if (!cpus_updated && top_cpuset.nr_subparts_cpus) |
| 3214 | cpus_updated = true; |
| 3215 | |
Tejun Heo | deb7aa3 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 3216 | /* synchronize cpus_allowed to cpu_active_mask */ |
| 3217 | if (cpus_updated) { |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 3218 | spin_lock_irq(&callback_lock); |
Li Zefan | 7e88291 | 2014-07-09 16:48:42 +0800 | [diff] [blame] | 3219 | if (!on_dfl) |
| 3220 | cpumask_copy(top_cpuset.cpus_allowed, &new_cpus); |
Waiman Long | 4b842da | 2018-11-08 10:08:41 -0500 | [diff] [blame] | 3221 | /* |
| 3222 | * Make sure that CPUs allocated to child partitions |
| 3223 | * do not show up in effective_cpus. If no CPU is left, |
| 3224 | * we clear the subparts_cpus & let the child partitions |
| 3225 | * fight for the CPUs again. |
| 3226 | */ |
| 3227 | if (top_cpuset.nr_subparts_cpus) { |
| 3228 | if (cpumask_subset(&new_cpus, |
| 3229 | top_cpuset.subparts_cpus)) { |
| 3230 | top_cpuset.nr_subparts_cpus = 0; |
| 3231 | cpumask_clear(top_cpuset.subparts_cpus); |
| 3232 | } else { |
| 3233 | cpumask_andnot(&new_cpus, &new_cpus, |
| 3234 | top_cpuset.subparts_cpus); |
| 3235 | } |
| 3236 | } |
Li Zefan | 1344ab9 | 2014-07-09 16:47:16 +0800 | [diff] [blame] | 3237 | cpumask_copy(top_cpuset.effective_cpus, &new_cpus); |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 3238 | spin_unlock_irq(&callback_lock); |
Tejun Heo | deb7aa3 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 3239 | /* we don't mess with cpumasks of tasks in top_cpuset */ |
| 3240 | } |
Srivatsa S. Bhat | 7ddf96b | 2012-05-24 19:46:55 +0530 | [diff] [blame] | 3241 | |
Tejun Heo | deb7aa3 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 3242 | /* synchronize mems_allowed to N_MEMORY */ |
| 3243 | if (mems_updated) { |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 3244 | spin_lock_irq(&callback_lock); |
Li Zefan | 7e88291 | 2014-07-09 16:48:42 +0800 | [diff] [blame] | 3245 | if (!on_dfl) |
| 3246 | top_cpuset.mems_allowed = new_mems; |
Li Zefan | 1344ab9 | 2014-07-09 16:47:16 +0800 | [diff] [blame] | 3247 | top_cpuset.effective_mems = new_mems; |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 3248 | spin_unlock_irq(&callback_lock); |
Tejun Heo | d66393e | 2014-02-13 06:58:40 -0500 | [diff] [blame] | 3249 | update_tasks_nodemask(&top_cpuset); |
Tejun Heo | deb7aa3 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 3250 | } |
| 3251 | |
Juri Lelli | 1243dc5 | 2019-07-19 15:59:57 +0200 | [diff] [blame] | 3252 | percpu_up_write(&cpuset_rwsem); |
Tejun Heo | deb7aa3 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 3253 | |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 3254 | /* if cpus or mems changed, we need to propagate to descendants */ |
| 3255 | if (cpus_updated || mems_updated) { |
Lai Jiangshan | 38d7bee | 2012-12-12 13:51:24 -0800 | [diff] [blame] | 3256 | struct cpuset *cs; |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 3257 | struct cgroup_subsys_state *pos_css; |
Paul Jackson | b450129 | 2008-02-07 00:14:47 -0800 | [diff] [blame] | 3258 | |
Paul Jackson | b450129 | 2008-02-07 00:14:47 -0800 | [diff] [blame] | 3259 | rcu_read_lock(); |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 3260 | cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { |
Tejun Heo | ec903c0 | 2014-05-13 12:11:01 -0400 | [diff] [blame] | 3261 | if (cs == &top_cpuset || !css_tryget_online(&cs->css)) |
Li Zefan | 388afd8 | 2013-06-09 17:14:47 +0800 | [diff] [blame] | 3262 | continue; |
| 3263 | rcu_read_unlock(); |
| 3264 | |
Waiman Long | 4b842da | 2018-11-08 10:08:41 -0500 | [diff] [blame] | 3265 | cpuset_hotplug_update_tasks(cs, ptmp); |
Li Zefan | 388afd8 | 2013-06-09 17:14:47 +0800 | [diff] [blame] | 3266 | |
| 3267 | rcu_read_lock(); |
| 3268 | css_put(&cs->css); |
| 3269 | } |
Tejun Heo | deb7aa3 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 3270 | rcu_read_unlock(); |
| 3271 | } |
Tejun Heo | 8d03394 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 3272 | |
Tejun Heo | deb7aa3 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 3273 | /* rebuild sched domains if cpus_allowed has changed */ |
Peter Zijlstra | 50e7663 | 2017-09-07 11:13:38 +0200 | [diff] [blame] | 3274 | if (cpus_updated || force_rebuild) { |
| 3275 | force_rebuild = false; |
Tejun Heo | 2b729fe | 2020-04-03 11:32:13 -0400 | [diff] [blame] | 3276 | rebuild_sched_domains(); |
Peter Zijlstra | 50e7663 | 2017-09-07 11:13:38 +0200 | [diff] [blame] | 3277 | } |
Waiman Long | 4b842da | 2018-11-08 10:08:41 -0500 | [diff] [blame] | 3278 | |
| 3279 | free_cpumasks(NULL, ptmp); |
Paul Jackson | b1aac8b | 2006-09-29 02:01:17 -0700 | [diff] [blame] | 3280 | } |
| 3281 | |
Prateek Sood | a49e462 | 2020-01-24 20:37:29 +0530 | [diff] [blame] | 3282 | void cpuset_update_active_cpus(void) |
Peter Zijlstra | 50e7663 | 2017-09-07 11:13:38 +0200 | [diff] [blame] | 3283 | { |
Tejun Heo | 2b729fe | 2020-04-03 11:32:13 -0400 | [diff] [blame] | 3284 | /* |
| 3285 | * We're inside cpu hotplug critical region which usually nests |
| 3286 | * inside cgroup synchronization. Bounce actual hotplug processing |
| 3287 | * to a work item to avoid reverse locking order. |
| 3288 | */ |
| 3289 | schedule_work(&cpuset_hotplug_work); |
| 3290 | } |
| 3291 | |
| 3292 | void cpuset_wait_for_hotplug(void) |
| 3293 | { |
| 3294 | flush_work(&cpuset_hotplug_work); |
Peter Zijlstra | 50e7663 | 2017-09-07 11:13:38 +0200 | [diff] [blame] | 3295 | } |
| 3296 | |
Paul Jackson | 38837fc | 2006-09-29 02:01:16 -0700 | [diff] [blame] | 3297 | /* |
Lai Jiangshan | 38d7bee | 2012-12-12 13:51:24 -0800 | [diff] [blame] | 3298 | * Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY]. |
| 3299 | * Call this routine anytime after node_states[N_MEMORY] changes. |
Srivatsa S. Bhat | a1cd2b1 | 2012-05-24 19:47:03 +0530 | [diff] [blame] | 3300 | * See cpuset_update_active_cpus() for CPU hotplug handling. |
Paul Jackson | 38837fc | 2006-09-29 02:01:16 -0700 | [diff] [blame] | 3301 | */ |
Miao Xie | f481891 | 2008-11-19 15:36:30 -0800 | [diff] [blame] | 3302 | static int cpuset_track_online_nodes(struct notifier_block *self, |
| 3303 | unsigned long action, void *arg) |
Paul Jackson | 38837fc | 2006-09-29 02:01:16 -0700 | [diff] [blame] | 3304 | { |
Tejun Heo | 3a5a6d0 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 3305 | schedule_work(&cpuset_hotplug_work); |
Miao Xie | f481891 | 2008-11-19 15:36:30 -0800 | [diff] [blame] | 3306 | return NOTIFY_OK; |
Paul Jackson | 38837fc | 2006-09-29 02:01:16 -0700 | [diff] [blame] | 3307 | } |
Andrew Morton | d8f10cb | 2013-04-29 15:08:08 -0700 | [diff] [blame] | 3308 | |
| 3309 | static struct notifier_block cpuset_track_online_nodes_nb = { |
| 3310 | .notifier_call = cpuset_track_online_nodes, |
| 3311 | .priority = 10, /* ??! */ |
| 3312 | }; |
Paul Jackson | 38837fc | 2006-09-29 02:01:16 -0700 | [diff] [blame] | 3313 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3314 | /** |
| 3315 | * cpuset_init_smp - initialize cpus_allowed |
| 3316 | * |
| 3317 | * Description: Finish top cpuset after cpu, node maps are initialized |
Andrew Morton | d8f10cb | 2013-04-29 15:08:08 -0700 | [diff] [blame] | 3318 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3319 | void __init cpuset_init_smp(void) |
| 3320 | { |
Peter Zijlstra | 6ad4c18 | 2009-11-25 13:31:39 +0100 | [diff] [blame] | 3321 | cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask); |
Lai Jiangshan | 38d7bee | 2012-12-12 13:51:24 -0800 | [diff] [blame] | 3322 | top_cpuset.mems_allowed = node_states[N_MEMORY]; |
Li Zefan | 33ad801 | 2013-06-09 17:15:08 +0800 | [diff] [blame] | 3323 | top_cpuset.old_mems_allowed = top_cpuset.mems_allowed; |
Paul Jackson | 4c4d50f | 2006-08-27 01:23:51 -0700 | [diff] [blame] | 3324 | |
Li Zefan | e2b9a3d | 2014-07-09 16:47:03 +0800 | [diff] [blame] | 3325 | cpumask_copy(top_cpuset.effective_cpus, cpu_active_mask); |
| 3326 | top_cpuset.effective_mems = node_states[N_MEMORY]; |
| 3327 | |
Andrew Morton | d8f10cb | 2013-04-29 15:08:08 -0700 | [diff] [blame] | 3328 | register_hotmemory_notifier(&cpuset_track_online_nodes_nb); |
Tejun Heo | e93ad19 | 2016-01-19 12:18:41 -0500 | [diff] [blame] | 3329 | |
| 3330 | cpuset_migrate_mm_wq = alloc_ordered_workqueue("cpuset_migrate_mm", 0); |
| 3331 | BUG_ON(!cpuset_migrate_mm_wq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3332 | } |
| 3333 | |
| 3334 | /** |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3335 | * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset. |
| 3336 | * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed. |
Li Zefan | 6af866a | 2009-01-07 18:08:45 -0800 | [diff] [blame] | 3337 | * @pmask: pointer to struct cpumask variable to receive cpus_allowed set. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3338 | * |
Li Zefan | 300ed6c | 2009-01-07 18:08:44 -0800 | [diff] [blame] | 3339 | * Description: Returns the cpumask_var_t cpus_allowed of the cpuset |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3340 | * attached to the specified @tsk. Guaranteed to return some non-empty |
Rusty Russell | 5f054e3 | 2012-03-29 15:38:31 +1030 | [diff] [blame] | 3341 | * subset of cpu_online_mask, even if this means going outside the |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3342 | * tasks cpuset. |
| 3343 | **/ |
| 3344 | |
Li Zefan | 6af866a | 2009-01-07 18:08:45 -0800 | [diff] [blame] | 3345 | void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3346 | { |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 3347 | unsigned long flags; |
| 3348 | |
| 3349 | spin_lock_irqsave(&callback_lock, flags); |
Li Zefan | b8dadcb | 2014-03-03 17:28:36 -0500 | [diff] [blame] | 3350 | rcu_read_lock(); |
Li Zefan | ae1c802 | 2014-07-09 16:48:32 +0800 | [diff] [blame] | 3351 | guarantee_online_cpus(task_cs(tsk), pmask); |
Li Zefan | b8dadcb | 2014-03-03 17:28:36 -0500 | [diff] [blame] | 3352 | rcu_read_unlock(); |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 3353 | spin_unlock_irqrestore(&callback_lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3354 | } |
| 3355 | |
Joel Savitz | d477f8c | 2019-06-12 11:50:48 -0400 | [diff] [blame] | 3356 | /** |
| 3357 | * cpuset_cpus_allowed_fallback - final fallback before complete catastrophe. |
| 3358 | * @tsk: pointer to task_struct with which the scheduler is struggling |
| 3359 | * |
| 3360 | * Description: In the case that the scheduler cannot find an allowed cpu in |
| 3361 | * tsk->cpus_allowed, we fall back to task_cs(tsk)->cpus_allowed. In legacy |
| 3362 | * mode however, this value is the same as task_cs(tsk)->effective_cpus, |
| 3363 | * which will not contain a sane cpumask during cases such as cpu hotplugging. |
| 3364 | * This is the absolute last resort for the scheduler and it is only used if |
| 3365 | * _every_ other avenue has been traveled. |
| 3366 | **/ |
| 3367 | |
Peter Zijlstra | 2baab4e | 2012-03-20 15:57:01 +0100 | [diff] [blame] | 3368 | void cpuset_cpus_allowed_fallback(struct task_struct *tsk) |
Oleg Nesterov | 9084bb8 | 2010-03-15 10:10:27 +0100 | [diff] [blame] | 3369 | { |
Oleg Nesterov | 9084bb8 | 2010-03-15 10:10:27 +0100 | [diff] [blame] | 3370 | rcu_read_lock(); |
Joel Savitz | d477f8c | 2019-06-12 11:50:48 -0400 | [diff] [blame] | 3371 | do_set_cpus_allowed(tsk, is_in_v2_mode() ? |
| 3372 | task_cs(tsk)->cpus_allowed : cpu_possible_mask); |
Oleg Nesterov | 9084bb8 | 2010-03-15 10:10:27 +0100 | [diff] [blame] | 3373 | rcu_read_unlock(); |
| 3374 | |
| 3375 | /* |
| 3376 | * We own tsk->cpus_allowed, nobody can change it under us. |
| 3377 | * |
| 3378 | * But we used cs && cs->cpus_allowed lockless and thus can |
| 3379 | * race with cgroup_attach_task() or update_cpumask() and get |
| 3380 | * the wrong tsk->cpus_allowed. However, both cases imply the |
| 3381 | * subsequent cpuset_change_cpumask()->set_cpus_allowed_ptr() |
| 3382 | * which takes task_rq_lock(). |
| 3383 | * |
| 3384 | * If we are called after it dropped the lock we must see all |
| 3385 | * changes in tsk_cs()->cpus_allowed. Otherwise we can temporary |
| 3386 | * set any mask even if it is not right from task_cs() pov, |
| 3387 | * the pending set_cpus_allowed_ptr() will fix things. |
Peter Zijlstra | 2baab4e | 2012-03-20 15:57:01 +0100 | [diff] [blame] | 3388 | * |
| 3389 | * select_fallback_rq() will fix things ups and set cpu_possible_mask |
| 3390 | * if required. |
Oleg Nesterov | 9084bb8 | 2010-03-15 10:10:27 +0100 | [diff] [blame] | 3391 | */ |
Oleg Nesterov | 9084bb8 | 2010-03-15 10:10:27 +0100 | [diff] [blame] | 3392 | } |
| 3393 | |
Rasmus Villemoes | 8f4ab07 | 2015-02-12 15:00:16 -0800 | [diff] [blame] | 3394 | void __init cpuset_init_current_mems_allowed(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3395 | { |
Mike Travis | f9a86fc | 2008-04-04 18:11:07 -0700 | [diff] [blame] | 3396 | nodes_setall(current->mems_allowed); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3397 | } |
| 3398 | |
Randy Dunlap | d9fd8a6 | 2005-07-27 11:45:11 -0700 | [diff] [blame] | 3399 | /** |
Paul Jackson | 909d75a | 2006-01-08 01:01:55 -0800 | [diff] [blame] | 3400 | * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset. |
| 3401 | * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed. |
| 3402 | * |
| 3403 | * Description: Returns the nodemask_t mems_allowed of the cpuset |
| 3404 | * attached to the specified @tsk. Guaranteed to return some non-empty |
Lai Jiangshan | 38d7bee | 2012-12-12 13:51:24 -0800 | [diff] [blame] | 3405 | * subset of node_states[N_MEMORY], even if this means going outside the |
Paul Jackson | 909d75a | 2006-01-08 01:01:55 -0800 | [diff] [blame] | 3406 | * tasks cpuset. |
| 3407 | **/ |
| 3408 | |
| 3409 | nodemask_t cpuset_mems_allowed(struct task_struct *tsk) |
| 3410 | { |
| 3411 | nodemask_t mask; |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 3412 | unsigned long flags; |
Paul Jackson | 909d75a | 2006-01-08 01:01:55 -0800 | [diff] [blame] | 3413 | |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 3414 | spin_lock_irqsave(&callback_lock, flags); |
Li Zefan | b8dadcb | 2014-03-03 17:28:36 -0500 | [diff] [blame] | 3415 | rcu_read_lock(); |
Li Zefan | ae1c802 | 2014-07-09 16:48:32 +0800 | [diff] [blame] | 3416 | guarantee_online_mems(task_cs(tsk), &mask); |
Li Zefan | b8dadcb | 2014-03-03 17:28:36 -0500 | [diff] [blame] | 3417 | rcu_read_unlock(); |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 3418 | spin_unlock_irqrestore(&callback_lock, flags); |
Paul Jackson | 909d75a | 2006-01-08 01:01:55 -0800 | [diff] [blame] | 3419 | |
| 3420 | return mask; |
| 3421 | } |
| 3422 | |
| 3423 | /** |
Zhen Lei | 08b2b6f | 2021-05-24 16:29:43 +0800 | [diff] [blame] | 3424 | * cpuset_nodemask_valid_mems_allowed - check nodemask vs. current mems_allowed |
Mel Gorman | 19770b3 | 2008-04-28 02:12:18 -0700 | [diff] [blame] | 3425 | * @nodemask: the nodemask to be checked |
Randy Dunlap | d9fd8a6 | 2005-07-27 11:45:11 -0700 | [diff] [blame] | 3426 | * |
Mel Gorman | 19770b3 | 2008-04-28 02:12:18 -0700 | [diff] [blame] | 3427 | * Are any of the nodes in the nodemask allowed in current->mems_allowed? |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3428 | */ |
Mel Gorman | 19770b3 | 2008-04-28 02:12:18 -0700 | [diff] [blame] | 3429 | int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3430 | { |
Mel Gorman | 19770b3 | 2008-04-28 02:12:18 -0700 | [diff] [blame] | 3431 | return nodes_intersects(*nodemask, current->mems_allowed); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3432 | } |
| 3433 | |
Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 3434 | /* |
Paul Menage | 7860836 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 3435 | * nearest_hardwall_ancestor() - Returns the nearest mem_exclusive or |
| 3436 | * mem_hardwall ancestor to the specified cpuset. Call holding |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 3437 | * callback_lock. If no ancestor is mem_exclusive or mem_hardwall |
Paul Menage | 7860836 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 3438 | * (an unusual configuration), then returns the root cpuset. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3439 | */ |
Tejun Heo | c9710d8 | 2013-08-08 20:11:22 -0400 | [diff] [blame] | 3440 | static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3441 | { |
Tejun Heo | c431069 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 3442 | while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && parent_cs(cs)) |
| 3443 | cs = parent_cs(cs); |
Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 3444 | return cs; |
| 3445 | } |
| 3446 | |
| 3447 | /** |
Vladimir Davydov | 344736f | 2014-10-20 15:50:30 +0400 | [diff] [blame] | 3448 | * cpuset_node_allowed - Can we allocate on a memory node? |
David Rientjes | a1bc5a4 | 2009-04-02 16:57:54 -0700 | [diff] [blame] | 3449 | * @node: is this an allowed node? |
Paul Jackson | 02a0e53 | 2006-12-13 00:34:25 -0800 | [diff] [blame] | 3450 | * @gfp_mask: memory allocation flags |
Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 3451 | * |
David Rientjes | 6e276d2 | 2015-04-14 15:47:01 -0700 | [diff] [blame] | 3452 | * If we're in interrupt, yes, we can always allocate. If @node is set in |
| 3453 | * current's mems_allowed, yes. If it's not a __GFP_HARDWALL request and this |
| 3454 | * node is set in the nearest hardwalled cpuset ancestor to current's cpuset, |
Michal Hocko | da99ecf | 2017-09-06 16:24:53 -0700 | [diff] [blame] | 3455 | * yes. If current has access to memory reserves as an oom victim, yes. |
Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 3456 | * Otherwise, no. |
| 3457 | * |
| 3458 | * GFP_USER allocations are marked with the __GFP_HARDWALL bit, |
David Rientjes | c596d9f | 2007-05-06 14:49:32 -0700 | [diff] [blame] | 3459 | * and do not allow allocations outside the current tasks cpuset |
Michal Hocko | da99ecf | 2017-09-06 16:24:53 -0700 | [diff] [blame] | 3460 | * unless the task has been OOM killed. |
Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 3461 | * GFP_KERNEL allocations are not so marked, so can escape to the |
Paul Menage | 7860836 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 3462 | * nearest enclosing hardwalled ancestor cpuset. |
Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 3463 | * |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 3464 | * Scanning up parent cpusets requires callback_lock. The |
Paul Jackson | 02a0e53 | 2006-12-13 00:34:25 -0800 | [diff] [blame] | 3465 | * __alloc_pages() routine only calls here with __GFP_HARDWALL bit |
| 3466 | * _not_ set if it's a GFP_KERNEL allocation, and all nodes in the |
| 3467 | * current tasks mems_allowed came up empty on the first pass over |
| 3468 | * the zonelist. So only GFP_KERNEL allocations, if all nodes in the |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 3469 | * cpuset are short of memory, might require taking the callback_lock. |
Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 3470 | * |
Paul Jackson | 36be57f | 2006-05-20 15:00:10 -0700 | [diff] [blame] | 3471 | * The first call here from mm/page_alloc:get_page_from_freelist() |
Paul Jackson | 02a0e53 | 2006-12-13 00:34:25 -0800 | [diff] [blame] | 3472 | * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets, |
| 3473 | * so no allocation on a node outside the cpuset is allowed (unless |
| 3474 | * in interrupt, of course). |
Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 3475 | * |
Paul Jackson | 36be57f | 2006-05-20 15:00:10 -0700 | [diff] [blame] | 3476 | * The second pass through get_page_from_freelist() doesn't even call |
| 3477 | * here for GFP_ATOMIC calls. For those calls, the __alloc_pages() |
| 3478 | * variable 'wait' is not set, and the bit ALLOC_CPUSET is not set |
| 3479 | * in alloc_flags. That logic and the checks below have the combined |
| 3480 | * affect that: |
Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 3481 | * in_interrupt - any node ok (current task context irrelevant) |
| 3482 | * GFP_ATOMIC - any node ok |
Michal Hocko | da99ecf | 2017-09-06 16:24:53 -0700 | [diff] [blame] | 3483 | * tsk_is_oom_victim - any node ok |
Paul Menage | 7860836 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 3484 | * GFP_KERNEL - any node in enclosing hardwalled cpuset ok |
Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 3485 | * GFP_USER - only nodes in current tasks mems allowed ok. |
Paul Jackson | 02a0e53 | 2006-12-13 00:34:25 -0800 | [diff] [blame] | 3486 | */ |
Vlastimil Babka | 002f290 | 2016-05-19 17:14:30 -0700 | [diff] [blame] | 3487 | bool __cpuset_node_allowed(int node, gfp_t gfp_mask) |
Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 3488 | { |
Tejun Heo | c9710d8 | 2013-08-08 20:11:22 -0400 | [diff] [blame] | 3489 | struct cpuset *cs; /* current cpuset ancestors */ |
Paul Jackson | 29afd49 | 2006-03-24 03:16:12 -0800 | [diff] [blame] | 3490 | int allowed; /* is allocation in zone z allowed? */ |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 3491 | unsigned long flags; |
Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 3492 | |
David Rientjes | 6e276d2 | 2015-04-14 15:47:01 -0700 | [diff] [blame] | 3493 | if (in_interrupt()) |
Vlastimil Babka | 002f290 | 2016-05-19 17:14:30 -0700 | [diff] [blame] | 3494 | return true; |
Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 3495 | if (node_isset(node, current->mems_allowed)) |
Vlastimil Babka | 002f290 | 2016-05-19 17:14:30 -0700 | [diff] [blame] | 3496 | return true; |
David Rientjes | c596d9f | 2007-05-06 14:49:32 -0700 | [diff] [blame] | 3497 | /* |
| 3498 | * Allow tasks that have access to memory reserves because they have |
| 3499 | * been OOM killed to get memory anywhere. |
| 3500 | */ |
Michal Hocko | da99ecf | 2017-09-06 16:24:53 -0700 | [diff] [blame] | 3501 | if (unlikely(tsk_is_oom_victim(current))) |
Vlastimil Babka | 002f290 | 2016-05-19 17:14:30 -0700 | [diff] [blame] | 3502 | return true; |
Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 3503 | if (gfp_mask & __GFP_HARDWALL) /* If hardwall request, stop here */ |
Vlastimil Babka | 002f290 | 2016-05-19 17:14:30 -0700 | [diff] [blame] | 3504 | return false; |
Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 3505 | |
Bob Picco | 5563e77 | 2005-11-13 16:06:35 -0800 | [diff] [blame] | 3506 | if (current->flags & PF_EXITING) /* Let dying task have memory */ |
Vlastimil Babka | 002f290 | 2016-05-19 17:14:30 -0700 | [diff] [blame] | 3507 | return true; |
Bob Picco | 5563e77 | 2005-11-13 16:06:35 -0800 | [diff] [blame] | 3508 | |
Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 3509 | /* Not hardwall and node outside mems_allowed: scan up cpusets */ |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 3510 | spin_lock_irqsave(&callback_lock, flags); |
Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 3511 | |
Li Zefan | b8dadcb | 2014-03-03 17:28:36 -0500 | [diff] [blame] | 3512 | rcu_read_lock(); |
Paul Menage | 7860836 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 3513 | cs = nearest_hardwall_ancestor(task_cs(current)); |
Li Zefan | 99afb0f | 2014-02-27 18:19:36 +0800 | [diff] [blame] | 3514 | allowed = node_isset(node, cs->mems_allowed); |
Li Zefan | b8dadcb | 2014-03-03 17:28:36 -0500 | [diff] [blame] | 3515 | rcu_read_unlock(); |
Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 3516 | |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 3517 | spin_unlock_irqrestore(&callback_lock, flags); |
Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 3518 | return allowed; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3519 | } |
| 3520 | |
Paul Jackson | ef08e3b | 2005-09-06 15:18:13 -0700 | [diff] [blame] | 3521 | /** |
Jack Steiner | 6adef3e | 2010-05-26 14:42:49 -0700 | [diff] [blame] | 3522 | * cpuset_mem_spread_node() - On which node to begin search for a file page |
| 3523 | * cpuset_slab_spread_node() - On which node to begin search for a slab page |
Paul Jackson | 825a46a | 2006-03-24 03:16:03 -0800 | [diff] [blame] | 3524 | * |
| 3525 | * If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for |
| 3526 | * tasks in a cpuset with is_spread_page or is_spread_slab set), |
| 3527 | * and if the memory allocation used cpuset_mem_spread_node() |
| 3528 | * to determine on which node to start looking, as it will for |
| 3529 | * certain page cache or slab cache pages such as used for file |
| 3530 | * system buffers and inode caches, then instead of starting on the |
| 3531 | * local node to look for a free page, rather spread the starting |
| 3532 | * node around the tasks mems_allowed nodes. |
| 3533 | * |
| 3534 | * We don't have to worry about the returned node being offline |
| 3535 | * because "it can't happen", and even if it did, it would be ok. |
| 3536 | * |
| 3537 | * The routines calling guarantee_online_mems() are careful to |
| 3538 | * only set nodes in task->mems_allowed that are online. So it |
| 3539 | * should not be possible for the following code to return an |
| 3540 | * offline node. But if it did, that would be ok, as this routine |
| 3541 | * is not returning the node where the allocation must be, only |
| 3542 | * the node where the search should start. The zonelist passed to |
| 3543 | * __alloc_pages() will include all nodes. If the slab allocator |
| 3544 | * is passed an offline node, it will fall back to the local node. |
| 3545 | * See kmem_cache_alloc_node(). |
| 3546 | */ |
| 3547 | |
Jack Steiner | 6adef3e | 2010-05-26 14:42:49 -0700 | [diff] [blame] | 3548 | static int cpuset_spread_node(int *rotor) |
Paul Jackson | 825a46a | 2006-03-24 03:16:03 -0800 | [diff] [blame] | 3549 | { |
Andrew Morton | 0edaf86 | 2016-05-19 17:10:58 -0700 | [diff] [blame] | 3550 | return *rotor = next_node_in(*rotor, current->mems_allowed); |
Paul Jackson | 825a46a | 2006-03-24 03:16:03 -0800 | [diff] [blame] | 3551 | } |
Jack Steiner | 6adef3e | 2010-05-26 14:42:49 -0700 | [diff] [blame] | 3552 | |
| 3553 | int cpuset_mem_spread_node(void) |
| 3554 | { |
Michal Hocko | 778d3b0 | 2011-07-26 16:08:30 -0700 | [diff] [blame] | 3555 | if (current->cpuset_mem_spread_rotor == NUMA_NO_NODE) |
| 3556 | current->cpuset_mem_spread_rotor = |
| 3557 | node_random(¤t->mems_allowed); |
| 3558 | |
Jack Steiner | 6adef3e | 2010-05-26 14:42:49 -0700 | [diff] [blame] | 3559 | return cpuset_spread_node(¤t->cpuset_mem_spread_rotor); |
| 3560 | } |
| 3561 | |
| 3562 | int cpuset_slab_spread_node(void) |
| 3563 | { |
Michal Hocko | 778d3b0 | 2011-07-26 16:08:30 -0700 | [diff] [blame] | 3564 | if (current->cpuset_slab_spread_rotor == NUMA_NO_NODE) |
| 3565 | current->cpuset_slab_spread_rotor = |
| 3566 | node_random(¤t->mems_allowed); |
| 3567 | |
Jack Steiner | 6adef3e | 2010-05-26 14:42:49 -0700 | [diff] [blame] | 3568 | return cpuset_spread_node(¤t->cpuset_slab_spread_rotor); |
| 3569 | } |
| 3570 | |
Paul Jackson | 825a46a | 2006-03-24 03:16:03 -0800 | [diff] [blame] | 3571 | EXPORT_SYMBOL_GPL(cpuset_mem_spread_node); |
| 3572 | |
| 3573 | /** |
David Rientjes | bbe373f | 2007-10-16 23:25:58 -0700 | [diff] [blame] | 3574 | * cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's? |
| 3575 | * @tsk1: pointer to task_struct of some task. |
| 3576 | * @tsk2: pointer to task_struct of some other task. |
Paul Jackson | ef08e3b | 2005-09-06 15:18:13 -0700 | [diff] [blame] | 3577 | * |
David Rientjes | bbe373f | 2007-10-16 23:25:58 -0700 | [diff] [blame] | 3578 | * Description: Return true if @tsk1's mems_allowed intersects the |
| 3579 | * mems_allowed of @tsk2. Used by the OOM killer to determine if |
| 3580 | * one of the task's memory usage might impact the memory available |
| 3581 | * to the other. |
Paul Jackson | ef08e3b | 2005-09-06 15:18:13 -0700 | [diff] [blame] | 3582 | **/ |
| 3583 | |
David Rientjes | bbe373f | 2007-10-16 23:25:58 -0700 | [diff] [blame] | 3584 | int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, |
| 3585 | const struct task_struct *tsk2) |
Paul Jackson | ef08e3b | 2005-09-06 15:18:13 -0700 | [diff] [blame] | 3586 | { |
David Rientjes | bbe373f | 2007-10-16 23:25:58 -0700 | [diff] [blame] | 3587 | return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed); |
Paul Jackson | ef08e3b | 2005-09-06 15:18:13 -0700 | [diff] [blame] | 3588 | } |
| 3589 | |
David Rientjes | 75aa199 | 2009-01-06 14:39:01 -0800 | [diff] [blame] | 3590 | /** |
David Rientjes | da39da3 | 2015-11-05 18:48:05 -0800 | [diff] [blame] | 3591 | * cpuset_print_current_mems_allowed - prints current's cpuset and mems_allowed |
David Rientjes | 75aa199 | 2009-01-06 14:39:01 -0800 | [diff] [blame] | 3592 | * |
David Rientjes | da39da3 | 2015-11-05 18:48:05 -0800 | [diff] [blame] | 3593 | * Description: Prints current's name, cpuset name, and cached copy of its |
Li Zefan | b8dadcb | 2014-03-03 17:28:36 -0500 | [diff] [blame] | 3594 | * mems_allowed to the kernel log. |
David Rientjes | 75aa199 | 2009-01-06 14:39:01 -0800 | [diff] [blame] | 3595 | */ |
David Rientjes | da39da3 | 2015-11-05 18:48:05 -0800 | [diff] [blame] | 3596 | void cpuset_print_current_mems_allowed(void) |
David Rientjes | 75aa199 | 2009-01-06 14:39:01 -0800 | [diff] [blame] | 3597 | { |
Li Zefan | b8dadcb | 2014-03-03 17:28:36 -0500 | [diff] [blame] | 3598 | struct cgroup *cgrp; |
David Rientjes | 75aa199 | 2009-01-06 14:39:01 -0800 | [diff] [blame] | 3599 | |
Li Zefan | b8dadcb | 2014-03-03 17:28:36 -0500 | [diff] [blame] | 3600 | rcu_read_lock(); |
Li Zefan | 63f43f5 | 2013-01-25 16:08:01 +0800 | [diff] [blame] | 3601 | |
David Rientjes | da39da3 | 2015-11-05 18:48:05 -0800 | [diff] [blame] | 3602 | cgrp = task_cs(current)->css.cgroup; |
yuzhoujian | ef8444e | 2018-12-28 00:36:07 -0800 | [diff] [blame] | 3603 | pr_cont(",cpuset="); |
Tejun Heo | e61734c | 2014-02-12 09:29:50 -0500 | [diff] [blame] | 3604 | pr_cont_cgroup_name(cgrp); |
yuzhoujian | ef8444e | 2018-12-28 00:36:07 -0800 | [diff] [blame] | 3605 | pr_cont(",mems_allowed=%*pbl", |
David Rientjes | da39da3 | 2015-11-05 18:48:05 -0800 | [diff] [blame] | 3606 | nodemask_pr_args(¤t->mems_allowed)); |
Li Zefan | f440d98 | 2013-03-01 15:02:15 +0800 | [diff] [blame] | 3607 | |
Li Zefan | cfb5966 | 2013-03-12 10:28:39 +0800 | [diff] [blame] | 3608 | rcu_read_unlock(); |
David Rientjes | 75aa199 | 2009-01-06 14:39:01 -0800 | [diff] [blame] | 3609 | } |
| 3610 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3611 | /* |
Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 3612 | * Collection of memory_pressure is suppressed unless |
| 3613 | * this flag is enabled by writing "1" to the special |
| 3614 | * cpuset file 'memory_pressure_enabled' in the root cpuset. |
| 3615 | */ |
| 3616 | |
Paul Jackson | c5b2aff8 | 2006-01-08 01:01:51 -0800 | [diff] [blame] | 3617 | int cpuset_memory_pressure_enabled __read_mostly; |
Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 3618 | |
| 3619 | /** |
| 3620 | * cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims. |
| 3621 | * |
| 3622 | * Keep a running average of the rate of synchronous (direct) |
| 3623 | * page reclaim efforts initiated by tasks in each cpuset. |
| 3624 | * |
| 3625 | * This represents the rate at which some task in the cpuset |
| 3626 | * ran low on memory on all nodes it was allowed to use, and |
| 3627 | * had to enter the kernels page reclaim code in an effort to |
| 3628 | * create more free memory by tossing clean pages or swapping |
| 3629 | * or writing dirty pages. |
| 3630 | * |
| 3631 | * Display to user space in the per-cpuset read-only file |
| 3632 | * "memory_pressure". Value displayed is an integer |
| 3633 | * representing the recent rate of entry into the synchronous |
| 3634 | * (direct) page reclaim by any task attached to the cpuset. |
| 3635 | **/ |
| 3636 | |
| 3637 | void __cpuset_memory_pressure_bump(void) |
| 3638 | { |
Li Zefan | b8dadcb | 2014-03-03 17:28:36 -0500 | [diff] [blame] | 3639 | rcu_read_lock(); |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 3640 | fmeter_markevent(&task_cs(current)->fmeter); |
Li Zefan | b8dadcb | 2014-03-03 17:28:36 -0500 | [diff] [blame] | 3641 | rcu_read_unlock(); |
Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 3642 | } |
| 3643 | |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 3644 | #ifdef CONFIG_PROC_PID_CPUSET |
Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 3645 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3646 | * proc_cpuset_show() |
| 3647 | * - Print tasks cpuset path into seq_file. |
| 3648 | * - Used for /proc/<pid>/cpuset. |
Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 3649 | * - No need to task_lock(tsk) on this tsk->cpuset reference, as it |
| 3650 | * doesn't really matter if tsk->cpuset changes after we read it, |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 3651 | * and we take cpuset_mutex, keeping cpuset_attach() from changing it |
Paul Menage | 2df167a | 2008-02-07 00:14:45 -0800 | [diff] [blame] | 3652 | * anyway. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3653 | */ |
Zefan Li | 52de477 | 2014-09-18 16:03:36 +0800 | [diff] [blame] | 3654 | int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns, |
| 3655 | struct pid *pid, struct task_struct *tsk) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3656 | { |
Tejun Heo | 4c737b4 | 2016-08-10 11:23:44 -0400 | [diff] [blame] | 3657 | char *buf; |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 3658 | struct cgroup_subsys_state *css; |
Eric W. Biederman | 99f8955 | 2006-06-26 00:25:55 -0700 | [diff] [blame] | 3659 | int retval; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3660 | |
Eric W. Biederman | 99f8955 | 2006-06-26 00:25:55 -0700 | [diff] [blame] | 3661 | retval = -ENOMEM; |
Tejun Heo | e61734c | 2014-02-12 09:29:50 -0500 | [diff] [blame] | 3662 | buf = kmalloc(PATH_MAX, GFP_KERNEL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3663 | if (!buf) |
Eric W. Biederman | 99f8955 | 2006-06-26 00:25:55 -0700 | [diff] [blame] | 3664 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3665 | |
Aditya Kali | a79a908 | 2016-01-29 02:54:06 -0600 | [diff] [blame] | 3666 | css = task_get_css(tsk, cpuset_cgrp_id); |
Tejun Heo | 4c737b4 | 2016-08-10 11:23:44 -0400 | [diff] [blame] | 3667 | retval = cgroup_path_ns(css->cgroup, buf, PATH_MAX, |
| 3668 | current->nsproxy->cgroup_ns); |
Aditya Kali | a79a908 | 2016-01-29 02:54:06 -0600 | [diff] [blame] | 3669 | css_put(css); |
Tejun Heo | 4c737b4 | 2016-08-10 11:23:44 -0400 | [diff] [blame] | 3670 | if (retval >= PATH_MAX) |
Tejun Heo | 679a5e3 | 2016-09-29 11:58:36 +0200 | [diff] [blame] | 3671 | retval = -ENAMETOOLONG; |
| 3672 | if (retval < 0) |
Zefan Li | 52de477 | 2014-09-18 16:03:36 +0800 | [diff] [blame] | 3673 | goto out_free; |
Tejun Heo | 4c737b4 | 2016-08-10 11:23:44 -0400 | [diff] [blame] | 3674 | seq_puts(m, buf); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3675 | seq_putc(m, '\n'); |
Tejun Heo | e61734c | 2014-02-12 09:29:50 -0500 | [diff] [blame] | 3676 | retval = 0; |
Eric W. Biederman | 99f8955 | 2006-06-26 00:25:55 -0700 | [diff] [blame] | 3677 | out_free: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3678 | kfree(buf); |
Eric W. Biederman | 99f8955 | 2006-06-26 00:25:55 -0700 | [diff] [blame] | 3679 | out: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3680 | return retval; |
| 3681 | } |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 3682 | #endif /* CONFIG_PROC_PID_CPUSET */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3683 | |
Heiko Carstens | d01d482 | 2009-09-21 11:06:27 +0200 | [diff] [blame] | 3684 | /* Display task mems_allowed in /proc/<pid>/status file. */ |
Eric W. Biederman | df5f831 | 2008-02-08 04:18:33 -0800 | [diff] [blame] | 3685 | void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3686 | { |
Tejun Heo | e8e6d97 | 2015-02-13 14:37:23 -0800 | [diff] [blame] | 3687 | seq_printf(m, "Mems_allowed:\t%*pb\n", |
| 3688 | nodemask_pr_args(&task->mems_allowed)); |
| 3689 | seq_printf(m, "Mems_allowed_list:\t%*pbl\n", |
| 3690 | nodemask_pr_args(&task->mems_allowed)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3691 | } |