Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * kernel/cpuset.c |
| 3 | * |
| 4 | * Processor and Memory placement constraints for sets of tasks. |
| 5 | * |
| 6 | * Copyright (C) 2003 BULL SA. |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7 | * Copyright (C) 2004-2007 Silicon Graphics, Inc. |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 8 | * Copyright (C) 2006 Google, Inc |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | * |
| 10 | * Portions derived from Patrick Mochel's sysfs code. |
| 11 | * sysfs is Copyright (c) 2001-3 Patrick Mochel |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | * |
Paul Jackson | 825a46a | 2006-03-24 03:16:03 -0800 | [diff] [blame] | 13 | * 2003-10-10 Written by Simon Derr. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | * 2003-10-22 Updates by Stephen Hemminger. |
Paul Jackson | 825a46a | 2006-03-24 03:16:03 -0800 | [diff] [blame] | 15 | * 2004 May-July Rework by Paul Jackson. |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 16 | * 2006 Rework by Paul Menage to use generic cgroups |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 17 | * 2008 Rework of the scheduler domains and CPU hotplug handling |
| 18 | * by Max Krasnyansky |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | * |
| 20 | * This file is subject to the terms and conditions of the GNU General Public |
| 21 | * License. See the file COPYING in the main directory of the Linux |
| 22 | * distribution for more details. |
| 23 | */ |
| 24 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | #include <linux/cpu.h> |
| 26 | #include <linux/cpumask.h> |
| 27 | #include <linux/cpuset.h> |
| 28 | #include <linux/err.h> |
| 29 | #include <linux/errno.h> |
| 30 | #include <linux/file.h> |
| 31 | #include <linux/fs.h> |
| 32 | #include <linux/init.h> |
| 33 | #include <linux/interrupt.h> |
| 34 | #include <linux/kernel.h> |
| 35 | #include <linux/kmod.h> |
| 36 | #include <linux/list.h> |
Paul Jackson | 68860ec | 2005-10-30 15:02:36 -0800 | [diff] [blame] | 37 | #include <linux/mempolicy.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | #include <linux/mm.h> |
Miao Xie | f481891 | 2008-11-19 15:36:30 -0800 | [diff] [blame] | 39 | #include <linux/memory.h> |
Paul Gortmaker | 9984de1 | 2011-05-23 14:51:41 -0400 | [diff] [blame] | 40 | #include <linux/export.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | #include <linux/mount.h> |
| 42 | #include <linux/namei.h> |
| 43 | #include <linux/pagemap.h> |
| 44 | #include <linux/proc_fs.h> |
Paul Jackson | 6b9c260 | 2006-01-08 01:02:02 -0800 | [diff] [blame] | 45 | #include <linux/rcupdate.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 | #include <linux/sched.h> |
| 47 | #include <linux/seq_file.h> |
David Quigley | 22fb52d | 2006-06-23 02:04:00 -0700 | [diff] [blame] | 48 | #include <linux/security.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 49 | #include <linux/slab.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 50 | #include <linux/spinlock.h> |
| 51 | #include <linux/stat.h> |
| 52 | #include <linux/string.h> |
| 53 | #include <linux/time.h> |
Arnd Bergmann | d2b4365 | 2015-11-25 16:16:55 +0100 | [diff] [blame] | 54 | #include <linux/time64.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 55 | #include <linux/backing-dev.h> |
| 56 | #include <linux/sort.h> |
| 57 | |
| 58 | #include <asm/uaccess.h> |
Arun Sharma | 60063497 | 2011-07-26 16:09:06 -0700 | [diff] [blame] | 59 | #include <linux/atomic.h> |
Ingo Molnar | 3d3f26a | 2006-03-23 03:00:18 -0800 | [diff] [blame] | 60 | #include <linux/mutex.h> |
Cliff Wickman | 956db3c | 2008-02-07 00:14:43 -0800 | [diff] [blame] | 61 | #include <linux/cgroup.h> |
Li Zefan | e44193d | 2013-06-09 17:14:22 +0800 | [diff] [blame] | 62 | #include <linux/wait.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 63 | |
Dima Zavin | 45a636e | 2017-08-02 13:32:18 -0700 | [diff] [blame] | 64 | DEFINE_STATIC_KEY_FALSE(cpusets_pre_enable_key); |
Vlastimil Babka | 002f290 | 2016-05-19 17:14:30 -0700 | [diff] [blame] | 65 | DEFINE_STATIC_KEY_FALSE(cpusets_enabled_key); |
Paul Jackson | 202f72d | 2006-01-08 01:01:57 -0800 | [diff] [blame] | 66 | |
Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 67 | /* See "Frequency meter" comments, below. */ |
| 68 | |
| 69 | struct fmeter { |
| 70 | int cnt; /* unprocessed events count */ |
| 71 | int val; /* most recent output value */ |
Arnd Bergmann | d2b4365 | 2015-11-25 16:16:55 +0100 | [diff] [blame] | 72 | time64_t time; /* clock (secs) when val computed */ |
Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 73 | spinlock_t lock; /* guards read or write of above */ |
| 74 | }; |
| 75 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 76 | struct cpuset { |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 77 | struct cgroup_subsys_state css; |
| 78 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 79 | unsigned long flags; /* "unsigned long" so bitops work */ |
Li Zefan | e2b9a3d | 2014-07-09 16:47:03 +0800 | [diff] [blame] | 80 | |
Li Zefan | 7e88291 | 2014-07-09 16:48:42 +0800 | [diff] [blame] | 81 | /* |
| 82 | * On default hierarchy: |
| 83 | * |
| 84 | * The user-configured masks can only be changed by writing to |
| 85 | * cpuset.cpus and cpuset.mems, and won't be limited by the |
| 86 | * parent masks. |
| 87 | * |
| 88 | * The effective masks is the real masks that apply to the tasks |
| 89 | * in the cpuset. They may be changed if the configured masks are |
| 90 | * changed or hotplug happens. |
| 91 | * |
| 92 | * effective_mask == configured_mask & parent's effective_mask, |
| 93 | * and if it ends up empty, it will inherit the parent's mask. |
| 94 | * |
| 95 | * |
| 96 | * On legacy hierachy: |
| 97 | * |
| 98 | * The user-configured masks are always the same with effective masks. |
| 99 | */ |
| 100 | |
Li Zefan | e2b9a3d | 2014-07-09 16:47:03 +0800 | [diff] [blame] | 101 | /* user-configured CPUs and Memory Nodes allow to tasks */ |
| 102 | cpumask_var_t cpus_allowed; |
Riley Andrews | 266ee74 | 2016-09-06 15:16:25 -0700 | [diff] [blame] | 103 | cpumask_var_t cpus_requested; |
Li Zefan | e2b9a3d | 2014-07-09 16:47:03 +0800 | [diff] [blame] | 104 | nodemask_t mems_allowed; |
| 105 | |
| 106 | /* effective CPUs and Memory Nodes allow to tasks */ |
| 107 | cpumask_var_t effective_cpus; |
| 108 | nodemask_t effective_mems; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 109 | |
Li Zefan | 33ad801 | 2013-06-09 17:15:08 +0800 | [diff] [blame] | 110 | /* |
| 111 | * This is old Memory Nodes tasks took on. |
| 112 | * |
| 113 | * - top_cpuset.old_mems_allowed is initialized to mems_allowed. |
| 114 | * - A new cpuset's old_mems_allowed is initialized when some |
| 115 | * task is moved into it. |
| 116 | * - old_mems_allowed is used in cpuset_migrate_mm() when we change |
| 117 | * cpuset.mems_allowed and have tasks' nodemask updated, and |
| 118 | * then old_mems_allowed is updated to mems_allowed. |
| 119 | */ |
| 120 | nodemask_t old_mems_allowed; |
| 121 | |
Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 122 | struct fmeter fmeter; /* memory_pressure filter */ |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 123 | |
Tejun Heo | 452477f | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 124 | /* |
| 125 | * Tasks are being attached to this cpuset. Used to prevent |
| 126 | * zeroing cpus/mems_allowed between ->can_attach() and ->attach(). |
| 127 | */ |
| 128 | int attach_in_progress; |
| 129 | |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 130 | /* partition number for rebuild_sched_domains() */ |
| 131 | int pn; |
Cliff Wickman | 956db3c | 2008-02-07 00:14:43 -0800 | [diff] [blame] | 132 | |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 133 | /* for custom sched domain */ |
| 134 | int relax_domain_level; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 135 | }; |
| 136 | |
Tejun Heo | a7c6d55 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 137 | static inline struct cpuset *css_cs(struct cgroup_subsys_state *css) |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 138 | { |
Tejun Heo | a7c6d55 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 139 | return css ? container_of(css, struct cpuset, css) : NULL; |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 140 | } |
| 141 | |
| 142 | /* Retrieve the cpuset for a task */ |
| 143 | static inline struct cpuset *task_cs(struct task_struct *task) |
| 144 | { |
Tejun Heo | 073219e | 2014-02-08 10:36:58 -0500 | [diff] [blame] | 145 | return css_cs(task_css(task, cpuset_cgrp_id)); |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 146 | } |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 147 | |
Tejun Heo | c9710d8 | 2013-08-08 20:11:22 -0400 | [diff] [blame] | 148 | static inline struct cpuset *parent_cs(struct cpuset *cs) |
Tejun Heo | c431069 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 149 | { |
Tejun Heo | 5c9d535 | 2014-05-16 13:22:48 -0400 | [diff] [blame] | 150 | return css_cs(cs->css.parent); |
Tejun Heo | c431069 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 151 | } |
| 152 | |
David Rientjes | b246272 | 2011-12-19 17:11:52 -0800 | [diff] [blame] | 153 | #ifdef CONFIG_NUMA |
| 154 | static inline bool task_has_mempolicy(struct task_struct *task) |
| 155 | { |
| 156 | return task->mempolicy; |
| 157 | } |
| 158 | #else |
| 159 | static inline bool task_has_mempolicy(struct task_struct *task) |
| 160 | { |
| 161 | return false; |
| 162 | } |
| 163 | #endif |
| 164 | |
| 165 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 166 | /* bits in struct cpuset flags field */ |
| 167 | typedef enum { |
Tejun Heo | efeb77b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 168 | CS_ONLINE, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 169 | CS_CPU_EXCLUSIVE, |
| 170 | CS_MEM_EXCLUSIVE, |
Paul Menage | 7860836 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 171 | CS_MEM_HARDWALL, |
Paul Jackson | 45b07ef | 2006-01-08 01:00:56 -0800 | [diff] [blame] | 172 | CS_MEMORY_MIGRATE, |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 173 | CS_SCHED_LOAD_BALANCE, |
Paul Jackson | 825a46a | 2006-03-24 03:16:03 -0800 | [diff] [blame] | 174 | CS_SPREAD_PAGE, |
| 175 | CS_SPREAD_SLAB, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 176 | } cpuset_flagbits_t; |
| 177 | |
| 178 | /* convenient tests for these bits */ |
Tejun Heo | 829a1ca | 2017-05-24 12:03:48 -0400 | [diff] [blame] | 179 | static inline bool is_cpuset_online(struct cpuset *cs) |
Tejun Heo | efeb77b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 180 | { |
Tejun Heo | 829a1ca | 2017-05-24 12:03:48 -0400 | [diff] [blame] | 181 | return test_bit(CS_ONLINE, &cs->flags) && !css_is_dying(&cs->css); |
Tejun Heo | efeb77b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 182 | } |
| 183 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 184 | static inline int is_cpu_exclusive(const struct cpuset *cs) |
| 185 | { |
Paul Jackson | 7b5b9ef | 2006-03-24 03:16:00 -0800 | [diff] [blame] | 186 | return test_bit(CS_CPU_EXCLUSIVE, &cs->flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 187 | } |
| 188 | |
| 189 | static inline int is_mem_exclusive(const struct cpuset *cs) |
| 190 | { |
Paul Jackson | 7b5b9ef | 2006-03-24 03:16:00 -0800 | [diff] [blame] | 191 | return test_bit(CS_MEM_EXCLUSIVE, &cs->flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 192 | } |
| 193 | |
Paul Menage | 7860836 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 194 | static inline int is_mem_hardwall(const struct cpuset *cs) |
| 195 | { |
| 196 | return test_bit(CS_MEM_HARDWALL, &cs->flags); |
| 197 | } |
| 198 | |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 199 | static inline int is_sched_load_balance(const struct cpuset *cs) |
| 200 | { |
| 201 | return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); |
| 202 | } |
| 203 | |
Paul Jackson | 45b07ef | 2006-01-08 01:00:56 -0800 | [diff] [blame] | 204 | static inline int is_memory_migrate(const struct cpuset *cs) |
| 205 | { |
Paul Jackson | 7b5b9ef | 2006-03-24 03:16:00 -0800 | [diff] [blame] | 206 | return test_bit(CS_MEMORY_MIGRATE, &cs->flags); |
Paul Jackson | 45b07ef | 2006-01-08 01:00:56 -0800 | [diff] [blame] | 207 | } |
| 208 | |
Paul Jackson | 825a46a | 2006-03-24 03:16:03 -0800 | [diff] [blame] | 209 | static inline int is_spread_page(const struct cpuset *cs) |
| 210 | { |
| 211 | return test_bit(CS_SPREAD_PAGE, &cs->flags); |
| 212 | } |
| 213 | |
| 214 | static inline int is_spread_slab(const struct cpuset *cs) |
| 215 | { |
| 216 | return test_bit(CS_SPREAD_SLAB, &cs->flags); |
| 217 | } |
| 218 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 219 | static struct cpuset top_cpuset = { |
Tejun Heo | efeb77b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 220 | .flags = ((1 << CS_ONLINE) | (1 << CS_CPU_EXCLUSIVE) | |
| 221 | (1 << CS_MEM_EXCLUSIVE)), |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 222 | }; |
| 223 | |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 224 | /** |
| 225 | * cpuset_for_each_child - traverse online children of a cpuset |
| 226 | * @child_cs: loop cursor pointing to the current child |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 227 | * @pos_css: used for iteration |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 228 | * @parent_cs: target cpuset to walk children of |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 229 | * |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 230 | * Walk @child_cs through the online children of @parent_cs. Must be used |
| 231 | * with RCU read locked. |
| 232 | */ |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 233 | #define cpuset_for_each_child(child_cs, pos_css, parent_cs) \ |
| 234 | css_for_each_child((pos_css), &(parent_cs)->css) \ |
| 235 | if (is_cpuset_online(((child_cs) = css_cs((pos_css))))) |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 236 | |
Tejun Heo | fc560a2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 237 | /** |
| 238 | * cpuset_for_each_descendant_pre - pre-order walk of a cpuset's descendants |
| 239 | * @des_cs: loop cursor pointing to the current descendant |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 240 | * @pos_css: used for iteration |
Tejun Heo | fc560a2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 241 | * @root_cs: target cpuset to walk ancestor of |
| 242 | * |
| 243 | * Walk @des_cs through the online descendants of @root_cs. Must be used |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 244 | * with RCU read locked. The caller may modify @pos_css by calling |
Tejun Heo | bd8815a | 2013-08-08 20:11:27 -0400 | [diff] [blame] | 245 | * css_rightmost_descendant() to skip subtree. @root_cs is included in the |
| 246 | * iteration and the first node to be visited. |
Tejun Heo | fc560a2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 247 | */ |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 248 | #define cpuset_for_each_descendant_pre(des_cs, pos_css, root_cs) \ |
| 249 | css_for_each_descendant_pre((pos_css), &(root_cs)->css) \ |
| 250 | if (is_cpuset_online(((des_cs) = css_cs((pos_css))))) |
Tejun Heo | fc560a2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 251 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 252 | /* |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 253 | * There are two global locks guarding cpuset structures - cpuset_mutex and |
| 254 | * callback_lock. We also require taking task_lock() when dereferencing a |
| 255 | * task's cpuset pointer. See "The task_lock() exception", at the end of this |
| 256 | * comment. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 257 | * |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 258 | * A task must hold both locks to modify cpusets. If a task holds |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 259 | * cpuset_mutex, then it blocks others wanting that mutex, ensuring that it |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 260 | * is the only task able to also acquire callback_lock and be able to |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 261 | * modify cpusets. It can perform various checks on the cpuset structure |
| 262 | * first, knowing nothing will change. It can also allocate memory while |
| 263 | * just holding cpuset_mutex. While it is performing these checks, various |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 264 | * callback routines can briefly acquire callback_lock to query cpusets. |
| 265 | * Once it is ready to make the changes, it takes callback_lock, blocking |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 266 | * everyone else. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 267 | * |
Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 268 | * Calls to the kernel memory allocator can not be made while holding |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 269 | * callback_lock, as that would risk double tripping on callback_lock |
Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 270 | * from one of the callbacks into the cpuset code from within |
| 271 | * __alloc_pages(). |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 272 | * |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 273 | * If a task is only holding callback_lock, then it has read-only |
Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 274 | * access to cpusets. |
| 275 | * |
Miao Xie | 58568d2 | 2009-06-16 15:31:49 -0700 | [diff] [blame] | 276 | * Now, the task_struct fields mems_allowed and mempolicy may be changed |
| 277 | * by other task, we use alloc_lock in the task_struct fields to protect |
| 278 | * them. |
Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 279 | * |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 280 | * The cpuset_common_file_read() handlers only hold callback_lock across |
Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 281 | * small pieces of code, such as when reading out possibly multi-word |
| 282 | * cpumasks and nodemasks. |
| 283 | * |
Paul Menage | 2df167a | 2008-02-07 00:14:45 -0800 | [diff] [blame] | 284 | * Accessing a task's cpuset should be done in accordance with the |
| 285 | * guidelines for accessing subsystem state in kernel/cgroup.c |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 286 | */ |
| 287 | |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 288 | static DEFINE_MUTEX(cpuset_mutex); |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 289 | static DEFINE_SPINLOCK(callback_lock); |
Paul Jackson | 4247bdc | 2005-09-10 00:26:06 -0700 | [diff] [blame] | 290 | |
Tejun Heo | e93ad19 | 2016-01-19 12:18:41 -0500 | [diff] [blame] | 291 | static struct workqueue_struct *cpuset_migrate_mm_wq; |
| 292 | |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 293 | /* |
Tejun Heo | 3a5a6d0 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 294 | * CPU / memory hotplug is handled asynchronously. |
| 295 | */ |
| 296 | static void cpuset_hotplug_workfn(struct work_struct *work); |
Tejun Heo | 3a5a6d0 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 297 | static DECLARE_WORK(cpuset_hotplug_work, cpuset_hotplug_workfn); |
| 298 | |
Li Zefan | e44193d | 2013-06-09 17:14:22 +0800 | [diff] [blame] | 299 | static DECLARE_WAIT_QUEUE_HEAD(cpuset_attach_wq); |
| 300 | |
Tejun Heo | 3a5a6d0 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 301 | /* |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 302 | * This is ugly, but preserves the userspace API for existing cpuset |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 303 | * users. If someone tries to mount the "cpuset" filesystem, we |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 304 | * silently switch it to mount "cgroup" instead |
| 305 | */ |
Al Viro | f7e8357 | 2010-07-26 13:23:11 +0400 | [diff] [blame] | 306 | static struct dentry *cpuset_mount(struct file_system_type *fs_type, |
| 307 | int flags, const char *unused_dev_name, void *data) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 308 | { |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 309 | struct file_system_type *cgroup_fs = get_fs_type("cgroup"); |
Al Viro | f7e8357 | 2010-07-26 13:23:11 +0400 | [diff] [blame] | 310 | struct dentry *ret = ERR_PTR(-ENODEV); |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 311 | if (cgroup_fs) { |
| 312 | char mountopts[] = |
| 313 | "cpuset,noprefix," |
| 314 | "release_agent=/sbin/cpuset_release_agent"; |
Al Viro | f7e8357 | 2010-07-26 13:23:11 +0400 | [diff] [blame] | 315 | ret = cgroup_fs->mount(cgroup_fs, flags, |
| 316 | unused_dev_name, mountopts); |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 317 | put_filesystem(cgroup_fs); |
| 318 | } |
| 319 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 320 | } |
| 321 | |
| 322 | static struct file_system_type cpuset_fs_type = { |
| 323 | .name = "cpuset", |
Al Viro | f7e8357 | 2010-07-26 13:23:11 +0400 | [diff] [blame] | 324 | .mount = cpuset_mount, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 325 | }; |
| 326 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 327 | /* |
Li Zefan | 300ed6c | 2009-01-07 18:08:44 -0800 | [diff] [blame] | 328 | * Return in pmask the portion of a cpusets's cpus_allowed that |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 329 | * are online. If none are online, walk up the cpuset hierarchy |
Joonwoo Park | 28b89b9 | 2016-09-11 21:14:58 -0700 | [diff] [blame] | 330 | * until we find one that does have some online cpus. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 331 | * |
| 332 | * One way or another, we guarantee to return some non-empty subset |
Rusty Russell | 5f054e3 | 2012-03-29 15:38:31 +1030 | [diff] [blame] | 333 | * of cpu_online_mask. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 334 | * |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 335 | * Call with callback_lock or cpuset_mutex held. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 336 | */ |
Tejun Heo | c9710d8 | 2013-08-08 20:11:22 -0400 | [diff] [blame] | 337 | static void guarantee_online_cpus(struct cpuset *cs, struct cpumask *pmask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 338 | { |
Joonwoo Park | 28b89b9 | 2016-09-11 21:14:58 -0700 | [diff] [blame] | 339 | while (!cpumask_intersects(cs->effective_cpus, cpu_online_mask)) { |
Tejun Heo | c431069 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 340 | cs = parent_cs(cs); |
Joonwoo Park | 28b89b9 | 2016-09-11 21:14:58 -0700 | [diff] [blame] | 341 | if (unlikely(!cs)) { |
| 342 | /* |
| 343 | * The top cpuset doesn't have any online cpu as a |
| 344 | * consequence of a race between cpuset_hotplug_work |
| 345 | * and cpu hotplug notifier. But we know the top |
| 346 | * cpuset's effective_cpus is on its way to to be |
| 347 | * identical to cpu_online_mask. |
| 348 | */ |
| 349 | cpumask_copy(pmask, cpu_online_mask); |
| 350 | return; |
| 351 | } |
| 352 | } |
Li Zefan | ae1c802 | 2014-07-09 16:48:32 +0800 | [diff] [blame] | 353 | cpumask_and(pmask, cs->effective_cpus, cpu_online_mask); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 354 | } |
| 355 | |
| 356 | /* |
| 357 | * Return in *pmask the portion of a cpusets's mems_allowed that |
Christoph Lameter | 0e1e7c7 | 2007-10-16 01:25:38 -0700 | [diff] [blame] | 358 | * are online, with memory. If none are online with memory, walk |
| 359 | * up the cpuset hierarchy until we find one that does have some |
Li Zefan | 40df2de | 2013-06-05 17:15:23 +0800 | [diff] [blame] | 360 | * online mems. The top cpuset always has some mems online. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 361 | * |
| 362 | * One way or another, we guarantee to return some non-empty subset |
Lai Jiangshan | 38d7bee | 2012-12-12 13:51:24 -0800 | [diff] [blame] | 363 | * of node_states[N_MEMORY]. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 364 | * |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 365 | * Call with callback_lock or cpuset_mutex held. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 366 | */ |
Tejun Heo | c9710d8 | 2013-08-08 20:11:22 -0400 | [diff] [blame] | 367 | static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 368 | { |
Li Zefan | ae1c802 | 2014-07-09 16:48:32 +0800 | [diff] [blame] | 369 | while (!nodes_intersects(cs->effective_mems, node_states[N_MEMORY])) |
Tejun Heo | c431069 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 370 | cs = parent_cs(cs); |
Li Zefan | ae1c802 | 2014-07-09 16:48:32 +0800 | [diff] [blame] | 371 | nodes_and(*pmask, cs->effective_mems, node_states[N_MEMORY]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 372 | } |
| 373 | |
Miao Xie | f3b39d4 | 2009-06-16 15:31:46 -0700 | [diff] [blame] | 374 | /* |
| 375 | * update task's spread flag if cpuset's page/slab spread flag is set |
| 376 | * |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 377 | * Call with callback_lock or cpuset_mutex held. |
Miao Xie | f3b39d4 | 2009-06-16 15:31:46 -0700 | [diff] [blame] | 378 | */ |
| 379 | static void cpuset_update_task_spread_flag(struct cpuset *cs, |
| 380 | struct task_struct *tsk) |
| 381 | { |
| 382 | if (is_spread_page(cs)) |
Zefan Li | 2ad654b | 2014-09-25 09:41:02 +0800 | [diff] [blame] | 383 | task_set_spread_page(tsk); |
Miao Xie | f3b39d4 | 2009-06-16 15:31:46 -0700 | [diff] [blame] | 384 | else |
Zefan Li | 2ad654b | 2014-09-25 09:41:02 +0800 | [diff] [blame] | 385 | task_clear_spread_page(tsk); |
| 386 | |
Miao Xie | f3b39d4 | 2009-06-16 15:31:46 -0700 | [diff] [blame] | 387 | if (is_spread_slab(cs)) |
Zefan Li | 2ad654b | 2014-09-25 09:41:02 +0800 | [diff] [blame] | 388 | task_set_spread_slab(tsk); |
Miao Xie | f3b39d4 | 2009-06-16 15:31:46 -0700 | [diff] [blame] | 389 | else |
Zefan Li | 2ad654b | 2014-09-25 09:41:02 +0800 | [diff] [blame] | 390 | task_clear_spread_slab(tsk); |
Miao Xie | f3b39d4 | 2009-06-16 15:31:46 -0700 | [diff] [blame] | 391 | } |
| 392 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 393 | /* |
| 394 | * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q? |
| 395 | * |
| 396 | * One cpuset is a subset of another if all its allowed CPUs and |
| 397 | * Memory Nodes are a subset of the other, and its exclusive flags |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 398 | * are only set if the other's are set. Call holding cpuset_mutex. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 399 | */ |
| 400 | |
| 401 | static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q) |
| 402 | { |
Riley Andrews | 266ee74 | 2016-09-06 15:16:25 -0700 | [diff] [blame] | 403 | return cpumask_subset(p->cpus_requested, q->cpus_requested) && |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 404 | nodes_subset(p->mems_allowed, q->mems_allowed) && |
| 405 | is_cpu_exclusive(p) <= is_cpu_exclusive(q) && |
| 406 | is_mem_exclusive(p) <= is_mem_exclusive(q); |
| 407 | } |
| 408 | |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 409 | /** |
| 410 | * alloc_trial_cpuset - allocate a trial cpuset |
| 411 | * @cs: the cpuset that the trial cpuset duplicates |
| 412 | */ |
Tejun Heo | c9710d8 | 2013-08-08 20:11:22 -0400 | [diff] [blame] | 413 | static struct cpuset *alloc_trial_cpuset(struct cpuset *cs) |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 414 | { |
Li Zefan | 300ed6c | 2009-01-07 18:08:44 -0800 | [diff] [blame] | 415 | struct cpuset *trial; |
| 416 | |
| 417 | trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL); |
| 418 | if (!trial) |
| 419 | return NULL; |
| 420 | |
Li Zefan | e2b9a3d | 2014-07-09 16:47:03 +0800 | [diff] [blame] | 421 | if (!alloc_cpumask_var(&trial->cpus_allowed, GFP_KERNEL)) |
| 422 | goto free_cs; |
Roman Kiryanov | b90514b | 2019-06-19 11:10:09 -0700 | [diff] [blame] | 423 | if (!alloc_cpumask_var(&trial->cpus_requested, GFP_KERNEL)) |
| 424 | goto free_allowed; |
Li Zefan | e2b9a3d | 2014-07-09 16:47:03 +0800 | [diff] [blame] | 425 | if (!alloc_cpumask_var(&trial->effective_cpus, GFP_KERNEL)) |
| 426 | goto free_cpus; |
Li Zefan | 300ed6c | 2009-01-07 18:08:44 -0800 | [diff] [blame] | 427 | |
Li Zefan | e2b9a3d | 2014-07-09 16:47:03 +0800 | [diff] [blame] | 428 | cpumask_copy(trial->cpus_allowed, cs->cpus_allowed); |
Roman Kiryanov | b90514b | 2019-06-19 11:10:09 -0700 | [diff] [blame] | 429 | cpumask_copy(trial->cpus_requested, cs->cpus_requested); |
Li Zefan | e2b9a3d | 2014-07-09 16:47:03 +0800 | [diff] [blame] | 430 | cpumask_copy(trial->effective_cpus, cs->effective_cpus); |
Li Zefan | 300ed6c | 2009-01-07 18:08:44 -0800 | [diff] [blame] | 431 | return trial; |
Li Zefan | e2b9a3d | 2014-07-09 16:47:03 +0800 | [diff] [blame] | 432 | |
| 433 | free_cpus: |
Roman Kiryanov | b90514b | 2019-06-19 11:10:09 -0700 | [diff] [blame] | 434 | free_cpumask_var(trial->cpus_requested); |
| 435 | free_allowed: |
Li Zefan | e2b9a3d | 2014-07-09 16:47:03 +0800 | [diff] [blame] | 436 | free_cpumask_var(trial->cpus_allowed); |
| 437 | free_cs: |
| 438 | kfree(trial); |
| 439 | return NULL; |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 440 | } |
| 441 | |
| 442 | /** |
| 443 | * free_trial_cpuset - free the trial cpuset |
| 444 | * @trial: the trial cpuset to be freed |
| 445 | */ |
| 446 | static void free_trial_cpuset(struct cpuset *trial) |
| 447 | { |
Li Zefan | e2b9a3d | 2014-07-09 16:47:03 +0800 | [diff] [blame] | 448 | free_cpumask_var(trial->effective_cpus); |
Roman Kiryanov | b90514b | 2019-06-19 11:10:09 -0700 | [diff] [blame] | 449 | free_cpumask_var(trial->cpus_requested); |
Li Zefan | 300ed6c | 2009-01-07 18:08:44 -0800 | [diff] [blame] | 450 | free_cpumask_var(trial->cpus_allowed); |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 451 | kfree(trial); |
| 452 | } |
| 453 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 454 | /* |
| 455 | * validate_change() - Used to validate that any proposed cpuset change |
| 456 | * follows the structural rules for cpusets. |
| 457 | * |
| 458 | * If we replaced the flag and mask values of the current cpuset |
| 459 | * (cur) with those values in the trial cpuset (trial), would |
| 460 | * our various subset and exclusive rules still be valid? Presumes |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 461 | * cpuset_mutex held. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 462 | * |
| 463 | * 'cur' is the address of an actual, in-use cpuset. Operations |
| 464 | * such as list traversal that depend on the actual address of the |
| 465 | * cpuset in the list must use cur below, not trial. |
| 466 | * |
| 467 | * 'trial' is the address of bulk structure copy of cur, with |
| 468 | * perhaps one or more of the fields cpus_allowed, mems_allowed, |
| 469 | * or flags changed to new, trial values. |
| 470 | * |
| 471 | * Return 0 if valid, -errno if not. |
| 472 | */ |
| 473 | |
Tejun Heo | c9710d8 | 2013-08-08 20:11:22 -0400 | [diff] [blame] | 474 | static int validate_change(struct cpuset *cur, struct cpuset *trial) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 475 | { |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 476 | struct cgroup_subsys_state *css; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 477 | struct cpuset *c, *par; |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 478 | int ret; |
| 479 | |
| 480 | rcu_read_lock(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 481 | |
| 482 | /* Each of our child cpusets must be a subset of us */ |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 483 | ret = -EBUSY; |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 484 | cpuset_for_each_child(c, css, cur) |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 485 | if (!is_cpuset_subset(c, trial)) |
| 486 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 487 | |
| 488 | /* Remaining checks don't apply to root cpuset */ |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 489 | ret = 0; |
Paul Jackson | 6960406 | 2006-12-06 20:36:15 -0800 | [diff] [blame] | 490 | if (cur == &top_cpuset) |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 491 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 492 | |
Tejun Heo | c431069 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 493 | par = parent_cs(cur); |
Paul Jackson | 6960406 | 2006-12-06 20:36:15 -0800 | [diff] [blame] | 494 | |
Li Zefan | 7e88291 | 2014-07-09 16:48:42 +0800 | [diff] [blame] | 495 | /* On legacy hiearchy, we must be a subset of our parent cpuset. */ |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 496 | ret = -EACCES; |
Tejun Heo | 9e10a13 | 2015-09-18 11:56:28 -0400 | [diff] [blame] | 497 | if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) && |
| 498 | !is_cpuset_subset(trial, par)) |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 499 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 500 | |
Paul Menage | 2df167a | 2008-02-07 00:14:45 -0800 | [diff] [blame] | 501 | /* |
| 502 | * If either I or some sibling (!= me) is exclusive, we can't |
| 503 | * overlap |
| 504 | */ |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 505 | ret = -EINVAL; |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 506 | cpuset_for_each_child(c, css, par) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 507 | if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) && |
| 508 | c != cur && |
Riley Andrews | 266ee74 | 2016-09-06 15:16:25 -0700 | [diff] [blame] | 509 | cpumask_intersects(trial->cpus_requested, c->cpus_requested)) |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 510 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 511 | if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) && |
| 512 | c != cur && |
| 513 | nodes_intersects(trial->mems_allowed, c->mems_allowed)) |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 514 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 515 | } |
| 516 | |
Tejun Heo | 452477f | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 517 | /* |
| 518 | * Cpusets with tasks - existing or newly being attached - can't |
Li Zefan | 1c09b19 | 2013-08-21 10:22:28 +0800 | [diff] [blame] | 519 | * be changed to have empty cpus_allowed or mems_allowed. |
Tejun Heo | 452477f | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 520 | */ |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 521 | ret = -ENOSPC; |
Tejun Heo | 27bd4db | 2015-10-15 16:41:50 -0400 | [diff] [blame] | 522 | if ((cgroup_is_populated(cur->css.cgroup) || cur->attach_in_progress)) { |
Li Zefan | 1c09b19 | 2013-08-21 10:22:28 +0800 | [diff] [blame] | 523 | if (!cpumask_empty(cur->cpus_allowed) && |
| 524 | cpumask_empty(trial->cpus_allowed)) |
| 525 | goto out; |
| 526 | if (!nodes_empty(cur->mems_allowed) && |
| 527 | nodes_empty(trial->mems_allowed)) |
| 528 | goto out; |
| 529 | } |
Paul Jackson | 020958b | 2007-10-18 23:40:21 -0700 | [diff] [blame] | 530 | |
Juri Lelli | f82f804 | 2014-10-07 09:52:11 +0100 | [diff] [blame] | 531 | /* |
| 532 | * We can't shrink if we won't have enough room for SCHED_DEADLINE |
| 533 | * tasks. |
| 534 | */ |
| 535 | ret = -EBUSY; |
| 536 | if (is_cpu_exclusive(cur) && |
| 537 | !cpuset_cpumask_can_shrink(cur->cpus_allowed, |
| 538 | trial->cpus_allowed)) |
| 539 | goto out; |
| 540 | |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 541 | ret = 0; |
| 542 | out: |
| 543 | rcu_read_unlock(); |
| 544 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 545 | } |
| 546 | |
Paul Menage | db7f47c | 2009-04-02 16:57:55 -0700 | [diff] [blame] | 547 | #ifdef CONFIG_SMP |
Dinakar Guniguntala | 85d7b94 | 2005-06-25 14:57:34 -0700 | [diff] [blame] | 548 | /* |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 549 | * Helper routine for generate_sched_domains(). |
Li Zefan | 8b5f1c5 | 2014-07-09 16:47:50 +0800 | [diff] [blame] | 550 | * Do cpusets a, b have overlapping effective cpus_allowed masks? |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 551 | */ |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 552 | static int cpusets_overlap(struct cpuset *a, struct cpuset *b) |
| 553 | { |
Li Zefan | 8b5f1c5 | 2014-07-09 16:47:50 +0800 | [diff] [blame] | 554 | return cpumask_intersects(a->effective_cpus, b->effective_cpus); |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 555 | } |
| 556 | |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 557 | static void |
| 558 | update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c) |
| 559 | { |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 560 | if (dattr->relax_domain_level < c->relax_domain_level) |
| 561 | dattr->relax_domain_level = c->relax_domain_level; |
| 562 | return; |
| 563 | } |
| 564 | |
Tejun Heo | fc560a2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 565 | static void update_domain_attr_tree(struct sched_domain_attr *dattr, |
| 566 | struct cpuset *root_cs) |
Lai Jiangshan | f539369 | 2008-07-29 22:33:22 -0700 | [diff] [blame] | 567 | { |
Tejun Heo | fc560a2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 568 | struct cpuset *cp; |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 569 | struct cgroup_subsys_state *pos_css; |
Lai Jiangshan | f539369 | 2008-07-29 22:33:22 -0700 | [diff] [blame] | 570 | |
Tejun Heo | fc560a2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 571 | rcu_read_lock(); |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 572 | cpuset_for_each_descendant_pre(cp, pos_css, root_cs) { |
Tejun Heo | fc560a2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 573 | /* skip the whole subtree if @cp doesn't have any CPU */ |
| 574 | if (cpumask_empty(cp->cpus_allowed)) { |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 575 | pos_css = css_rightmost_descendant(pos_css); |
Lai Jiangshan | f539369 | 2008-07-29 22:33:22 -0700 | [diff] [blame] | 576 | continue; |
Tejun Heo | fc560a2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 577 | } |
Lai Jiangshan | f539369 | 2008-07-29 22:33:22 -0700 | [diff] [blame] | 578 | |
| 579 | if (is_sched_load_balance(cp)) |
| 580 | update_domain_attr(dattr, cp); |
Lai Jiangshan | f539369 | 2008-07-29 22:33:22 -0700 | [diff] [blame] | 581 | } |
Tejun Heo | fc560a2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 582 | rcu_read_unlock(); |
Lai Jiangshan | f539369 | 2008-07-29 22:33:22 -0700 | [diff] [blame] | 583 | } |
| 584 | |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 585 | /* |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 586 | * generate_sched_domains() |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 587 | * |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 588 | * This function builds a partial partition of the systems CPUs |
| 589 | * A 'partial partition' is a set of non-overlapping subsets whose |
| 590 | * union is a subset of that set. |
Viresh Kumar | 0a0fca9 | 2013-06-04 13:10:24 +0530 | [diff] [blame] | 591 | * The output of this function needs to be passed to kernel/sched/core.c |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 592 | * partition_sched_domains() routine, which will rebuild the scheduler's |
| 593 | * load balancing domains (sched domains) as specified by that partial |
| 594 | * partition. |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 595 | * |
Li Zefan | 45ce80f | 2009-01-15 13:50:59 -0800 | [diff] [blame] | 596 | * See "What is sched_load_balance" in Documentation/cgroups/cpusets.txt |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 597 | * for a background explanation of this. |
| 598 | * |
| 599 | * Does not return errors, on the theory that the callers of this |
| 600 | * routine would rather not worry about failures to rebuild sched |
| 601 | * domains when operating in the severe memory shortage situations |
| 602 | * that could cause allocation failures below. |
| 603 | * |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 604 | * Must be called with cpuset_mutex held. |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 605 | * |
| 606 | * The three key local variables below are: |
Li Zefan | aeed682 | 2008-07-29 22:33:24 -0700 | [diff] [blame] | 607 | * q - a linked-list queue of cpuset pointers, used to implement a |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 608 | * top-down scan of all cpusets. This scan loads a pointer |
| 609 | * to each cpuset marked is_sched_load_balance into the |
| 610 | * array 'csa'. For our purposes, rebuilding the schedulers |
| 611 | * sched domains, we can ignore !is_sched_load_balance cpusets. |
| 612 | * csa - (for CpuSet Array) Array of pointers to all the cpusets |
| 613 | * that need to be load balanced, for convenient iterative |
| 614 | * access by the subsequent code that finds the best partition, |
| 615 | * i.e the set of domains (subsets) of CPUs such that the |
| 616 | * cpus_allowed of every cpuset marked is_sched_load_balance |
| 617 | * is a subset of one of these domains, while there are as |
| 618 | * many such domains as possible, each as small as possible. |
| 619 | * doms - Conversion of 'csa' to an array of cpumasks, for passing to |
Viresh Kumar | 0a0fca9 | 2013-06-04 13:10:24 +0530 | [diff] [blame] | 620 | * the kernel/sched/core.c routine partition_sched_domains() in a |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 621 | * convenient format, that can be easily compared to the prior |
| 622 | * value to determine what partition elements (sched domains) |
| 623 | * were changed (added or removed.) |
| 624 | * |
| 625 | * Finding the best partition (set of domains): |
| 626 | * The triple nested loops below over i, j, k scan over the |
| 627 | * load balanced cpusets (using the array of cpuset pointers in |
| 628 | * csa[]) looking for pairs of cpusets that have overlapping |
| 629 | * cpus_allowed, but which don't have the same 'pn' partition |
| 630 | * number and gives them in the same partition number. It keeps |
| 631 | * looping on the 'restart' label until it can no longer find |
| 632 | * any such pairs. |
| 633 | * |
| 634 | * The union of the cpus_allowed masks from the set of |
| 635 | * all cpusets having the same 'pn' value then form the one |
| 636 | * element of the partition (one sched domain) to be passed to |
| 637 | * partition_sched_domains(). |
| 638 | */ |
Rusty Russell | acc3f5d | 2009-11-03 14:53:40 +1030 | [diff] [blame] | 639 | static int generate_sched_domains(cpumask_var_t **domains, |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 640 | struct sched_domain_attr **attributes) |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 641 | { |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 642 | struct cpuset *cp; /* scans q */ |
| 643 | struct cpuset **csa; /* array of all cpuset ptrs */ |
| 644 | int csn; /* how many cpuset ptrs in csa so far */ |
| 645 | int i, j, k; /* indices for partition finding loops */ |
Rusty Russell | acc3f5d | 2009-11-03 14:53:40 +1030 | [diff] [blame] | 646 | cpumask_var_t *doms; /* resulting partition; i.e. sched domains */ |
Rik van Riel | 47b8ea7 | 2015-03-09 12:12:08 -0400 | [diff] [blame] | 647 | cpumask_var_t non_isolated_cpus; /* load balanced CPUs */ |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 648 | struct sched_domain_attr *dattr; /* attributes for custom domains */ |
Ingo Molnar | 1583715 | 2008-11-25 10:27:49 +0100 | [diff] [blame] | 649 | int ndoms = 0; /* number of sched domains in result */ |
Li Zefan | 6af866a | 2009-01-07 18:08:45 -0800 | [diff] [blame] | 650 | int nslot; /* next empty doms[] struct cpumask slot */ |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 651 | struct cgroup_subsys_state *pos_css; |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 652 | |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 653 | doms = NULL; |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 654 | dattr = NULL; |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 655 | csa = NULL; |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 656 | |
Rik van Riel | 47b8ea7 | 2015-03-09 12:12:08 -0400 | [diff] [blame] | 657 | if (!alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL)) |
| 658 | goto done; |
| 659 | cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map); |
| 660 | |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 661 | /* Special case for the 99% of systems with one, full, sched domain */ |
| 662 | if (is_sched_load_balance(&top_cpuset)) { |
Rusty Russell | acc3f5d | 2009-11-03 14:53:40 +1030 | [diff] [blame] | 663 | ndoms = 1; |
| 664 | doms = alloc_sched_domains(ndoms); |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 665 | if (!doms) |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 666 | goto done; |
| 667 | |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 668 | dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL); |
| 669 | if (dattr) { |
| 670 | *dattr = SD_ATTR_INIT; |
Li Zefan | 93a6557 | 2008-07-29 22:33:23 -0700 | [diff] [blame] | 671 | update_domain_attr_tree(dattr, &top_cpuset); |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 672 | } |
Rik van Riel | 47b8ea7 | 2015-03-09 12:12:08 -0400 | [diff] [blame] | 673 | cpumask_and(doms[0], top_cpuset.effective_cpus, |
| 674 | non_isolated_cpus); |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 675 | |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 676 | goto done; |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 677 | } |
| 678 | |
Mel Gorman | 664eedd | 2014-06-04 16:10:08 -0700 | [diff] [blame] | 679 | csa = kmalloc(nr_cpusets() * sizeof(cp), GFP_KERNEL); |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 680 | if (!csa) |
| 681 | goto done; |
| 682 | csn = 0; |
| 683 | |
Tejun Heo | fc560a2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 684 | rcu_read_lock(); |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 685 | cpuset_for_each_descendant_pre(cp, pos_css, &top_cpuset) { |
Tejun Heo | bd8815a | 2013-08-08 20:11:27 -0400 | [diff] [blame] | 686 | if (cp == &top_cpuset) |
| 687 | continue; |
Lai Jiangshan | f539369 | 2008-07-29 22:33:22 -0700 | [diff] [blame] | 688 | /* |
Tejun Heo | fc560a2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 689 | * Continue traversing beyond @cp iff @cp has some CPUs and |
| 690 | * isn't load balancing. The former is obvious. The |
| 691 | * latter: All child cpusets contain a subset of the |
| 692 | * parent's cpus, so just skip them, and then we call |
| 693 | * update_domain_attr_tree() to calc relax_domain_level of |
| 694 | * the corresponding sched domain. |
Lai Jiangshan | f539369 | 2008-07-29 22:33:22 -0700 | [diff] [blame] | 695 | */ |
Tejun Heo | fc560a2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 696 | if (!cpumask_empty(cp->cpus_allowed) && |
Rik van Riel | 47b8ea7 | 2015-03-09 12:12:08 -0400 | [diff] [blame] | 697 | !(is_sched_load_balance(cp) && |
| 698 | cpumask_intersects(cp->cpus_allowed, non_isolated_cpus))) |
Lai Jiangshan | f539369 | 2008-07-29 22:33:22 -0700 | [diff] [blame] | 699 | continue; |
Lai Jiangshan | 489a539 | 2008-07-25 01:47:23 -0700 | [diff] [blame] | 700 | |
Tejun Heo | fc560a2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 701 | if (is_sched_load_balance(cp)) |
| 702 | csa[csn++] = cp; |
| 703 | |
| 704 | /* skip @cp's subtree */ |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 705 | pos_css = css_rightmost_descendant(pos_css); |
Tejun Heo | fc560a2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 706 | } |
| 707 | rcu_read_unlock(); |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 708 | |
| 709 | for (i = 0; i < csn; i++) |
| 710 | csa[i]->pn = i; |
| 711 | ndoms = csn; |
| 712 | |
| 713 | restart: |
| 714 | /* Find the best partition (set of sched domains) */ |
| 715 | for (i = 0; i < csn; i++) { |
| 716 | struct cpuset *a = csa[i]; |
| 717 | int apn = a->pn; |
| 718 | |
| 719 | for (j = 0; j < csn; j++) { |
| 720 | struct cpuset *b = csa[j]; |
| 721 | int bpn = b->pn; |
| 722 | |
| 723 | if (apn != bpn && cpusets_overlap(a, b)) { |
| 724 | for (k = 0; k < csn; k++) { |
| 725 | struct cpuset *c = csa[k]; |
| 726 | |
| 727 | if (c->pn == bpn) |
| 728 | c->pn = apn; |
| 729 | } |
| 730 | ndoms--; /* one less element */ |
| 731 | goto restart; |
| 732 | } |
| 733 | } |
| 734 | } |
| 735 | |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 736 | /* |
| 737 | * Now we know how many domains to create. |
| 738 | * Convert <csn, csa> to <ndoms, doms> and populate cpu masks. |
| 739 | */ |
Rusty Russell | acc3f5d | 2009-11-03 14:53:40 +1030 | [diff] [blame] | 740 | doms = alloc_sched_domains(ndoms); |
Li Zefan | 700018e | 2008-11-18 14:02:03 +0800 | [diff] [blame] | 741 | if (!doms) |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 742 | goto done; |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 743 | |
| 744 | /* |
| 745 | * The rest of the code, including the scheduler, can deal with |
| 746 | * dattr==NULL case. No need to abort if alloc fails. |
| 747 | */ |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 748 | dattr = kmalloc(ndoms * sizeof(struct sched_domain_attr), GFP_KERNEL); |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 749 | |
| 750 | for (nslot = 0, i = 0; i < csn; i++) { |
| 751 | struct cpuset *a = csa[i]; |
Li Zefan | 6af866a | 2009-01-07 18:08:45 -0800 | [diff] [blame] | 752 | struct cpumask *dp; |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 753 | int apn = a->pn; |
| 754 | |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 755 | if (apn < 0) { |
| 756 | /* Skip completed partitions */ |
| 757 | continue; |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 758 | } |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 759 | |
Rusty Russell | acc3f5d | 2009-11-03 14:53:40 +1030 | [diff] [blame] | 760 | dp = doms[nslot]; |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 761 | |
| 762 | if (nslot == ndoms) { |
| 763 | static int warnings = 10; |
| 764 | if (warnings) { |
Fabian Frederick | 12d3089 | 2014-05-05 19:49:00 +0200 | [diff] [blame] | 765 | pr_warn("rebuild_sched_domains confused: nslot %d, ndoms %d, csn %d, i %d, apn %d\n", |
| 766 | nslot, ndoms, csn, i, apn); |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 767 | warnings--; |
| 768 | } |
| 769 | continue; |
| 770 | } |
| 771 | |
Li Zefan | 6af866a | 2009-01-07 18:08:45 -0800 | [diff] [blame] | 772 | cpumask_clear(dp); |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 773 | if (dattr) |
| 774 | *(dattr + nslot) = SD_ATTR_INIT; |
| 775 | for (j = i; j < csn; j++) { |
| 776 | struct cpuset *b = csa[j]; |
| 777 | |
| 778 | if (apn == b->pn) { |
Li Zefan | 8b5f1c5 | 2014-07-09 16:47:50 +0800 | [diff] [blame] | 779 | cpumask_or(dp, dp, b->effective_cpus); |
Rik van Riel | 47b8ea7 | 2015-03-09 12:12:08 -0400 | [diff] [blame] | 780 | cpumask_and(dp, dp, non_isolated_cpus); |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 781 | if (dattr) |
| 782 | update_domain_attr_tree(dattr + nslot, b); |
| 783 | |
| 784 | /* Done with this partition */ |
| 785 | b->pn = -1; |
| 786 | } |
| 787 | } |
| 788 | nslot++; |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 789 | } |
| 790 | BUG_ON(nslot != ndoms); |
| 791 | |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 792 | done: |
Rik van Riel | 47b8ea7 | 2015-03-09 12:12:08 -0400 | [diff] [blame] | 793 | free_cpumask_var(non_isolated_cpus); |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 794 | kfree(csa); |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 795 | |
Li Zefan | 700018e | 2008-11-18 14:02:03 +0800 | [diff] [blame] | 796 | /* |
| 797 | * Fallback to the default domain if kmalloc() failed. |
| 798 | * See comments in partition_sched_domains(). |
| 799 | */ |
| 800 | if (doms == NULL) |
| 801 | ndoms = 1; |
| 802 | |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 803 | *domains = doms; |
| 804 | *attributes = dattr; |
| 805 | return ndoms; |
| 806 | } |
| 807 | |
| 808 | /* |
| 809 | * Rebuild scheduler domains. |
| 810 | * |
Tejun Heo | 699140b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 811 | * If the flag 'sched_load_balance' of any cpuset with non-empty |
| 812 | * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset |
| 813 | * which has that flag enabled, or if any cpuset with a non-empty |
| 814 | * 'cpus' is removed, then call this routine to rebuild the |
| 815 | * scheduler's dynamic sched domains. |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 816 | * |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 817 | */ |
Prateek Sood | e9e7b75 | 2017-09-08 13:10:55 +0530 | [diff] [blame] | 818 | static void rebuild_sched_domains_unlocked(void) |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 819 | { |
| 820 | struct sched_domain_attr *attr; |
Rusty Russell | acc3f5d | 2009-11-03 14:53:40 +1030 | [diff] [blame] | 821 | cpumask_var_t *doms; |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 822 | int ndoms; |
| 823 | |
Prateek Sood | e9e7b75 | 2017-09-08 13:10:55 +0530 | [diff] [blame] | 824 | cpu_hotplug_mutex_held(); |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 825 | lockdep_assert_held(&cpuset_mutex); |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 826 | |
Li Zefan | 5b16c2a | 2013-04-27 06:52:43 -0700 | [diff] [blame] | 827 | /* |
| 828 | * We have raced with CPU hotplug. Don't do anything to avoid |
| 829 | * passing doms with offlined cpu to partition_sched_domains(). |
| 830 | * Anyways, hotplug work item will rebuild sched domains. |
| 831 | */ |
Li Zefan | 8b5f1c5 | 2014-07-09 16:47:50 +0800 | [diff] [blame] | 832 | if (!cpumask_equal(top_cpuset.effective_cpus, cpu_active_mask)) |
Prateek Sood | e9e7b75 | 2017-09-08 13:10:55 +0530 | [diff] [blame] | 833 | return; |
Li Zefan | 5b16c2a | 2013-04-27 06:52:43 -0700 | [diff] [blame] | 834 | |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 835 | /* Generate domain masks and attrs */ |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 836 | ndoms = generate_sched_domains(&doms, &attr); |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 837 | |
| 838 | /* Have scheduler rebuild the domains */ |
| 839 | partition_sched_domains(ndoms, doms, attr); |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 840 | } |
Paul Menage | db7f47c | 2009-04-02 16:57:55 -0700 | [diff] [blame] | 841 | #else /* !CONFIG_SMP */ |
Prateek Sood | e9e7b75 | 2017-09-08 13:10:55 +0530 | [diff] [blame] | 842 | static void rebuild_sched_domains_unlocked(void) |
Paul Menage | db7f47c | 2009-04-02 16:57:55 -0700 | [diff] [blame] | 843 | { |
| 844 | } |
Paul Menage | db7f47c | 2009-04-02 16:57:55 -0700 | [diff] [blame] | 845 | #endif /* CONFIG_SMP */ |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 846 | |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 847 | void rebuild_sched_domains(void) |
| 848 | { |
Prateek Sood | e9e7b75 | 2017-09-08 13:10:55 +0530 | [diff] [blame] | 849 | get_online_cpus(); |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 850 | mutex_lock(&cpuset_mutex); |
Prateek Sood | e9e7b75 | 2017-09-08 13:10:55 +0530 | [diff] [blame] | 851 | rebuild_sched_domains_unlocked(); |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 852 | mutex_unlock(&cpuset_mutex); |
Prateek Sood | e9e7b75 | 2017-09-08 13:10:55 +0530 | [diff] [blame] | 853 | put_online_cpus(); |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 854 | } |
| 855 | |
Pavankumar Kondeti | 435eea9 | 2019-02-28 10:40:39 +0530 | [diff] [blame] | 856 | static int update_cpus_allowed(struct cpuset *cs, struct task_struct *p, |
| 857 | const struct cpumask *new_mask) |
| 858 | { |
| 859 | int ret; |
| 860 | |
| 861 | if (cpumask_subset(&p->cpus_requested, cs->cpus_requested)) { |
| 862 | ret = set_cpus_allowed_ptr(p, &p->cpus_requested); |
| 863 | if (!ret) |
| 864 | return ret; |
| 865 | } |
| 866 | |
| 867 | return set_cpus_allowed_ptr(p, new_mask); |
| 868 | } |
| 869 | |
Cliff Wickman | 58f4790 | 2008-02-07 00:14:44 -0800 | [diff] [blame] | 870 | /** |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 871 | * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset. |
| 872 | * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed |
| 873 | * |
Tejun Heo | d66393e | 2014-02-13 06:58:40 -0500 | [diff] [blame] | 874 | * Iterate through each task of @cs updating its cpus_allowed to the |
| 875 | * effective cpuset's. As this function is called with cpuset_mutex held, |
| 876 | * cpuset membership stays stable. |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 877 | */ |
Tejun Heo | d66393e | 2014-02-13 06:58:40 -0500 | [diff] [blame] | 878 | static void update_tasks_cpumask(struct cpuset *cs) |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 879 | { |
Tejun Heo | d66393e | 2014-02-13 06:58:40 -0500 | [diff] [blame] | 880 | struct css_task_iter it; |
| 881 | struct task_struct *task; |
| 882 | |
| 883 | css_task_iter_start(&cs->css, &it); |
| 884 | while ((task = css_task_iter_next(&it))) |
Pavankumar Kondeti | 435eea9 | 2019-02-28 10:40:39 +0530 | [diff] [blame] | 885 | update_cpus_allowed(cs, task, cs->effective_cpus); |
Tejun Heo | d66393e | 2014-02-13 06:58:40 -0500 | [diff] [blame] | 886 | css_task_iter_end(&it); |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 887 | } |
| 888 | |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 889 | /* |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 890 | * update_cpumasks_hier - Update effective cpumasks and tasks in the subtree |
| 891 | * @cs: the cpuset to consider |
| 892 | * @new_cpus: temp variable for calculating new effective_cpus |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 893 | * |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 894 | * When congifured cpumask is changed, the effective cpumasks of this cpuset |
| 895 | * and all its descendants need to be updated. |
| 896 | * |
| 897 | * On legacy hierachy, effective_cpus will be the same with cpu_allowed. |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 898 | * |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 899 | */ |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 900 | static void update_cpumasks_hier(struct cpuset *cs, struct cpumask *new_cpus) |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 901 | { |
| 902 | struct cpuset *cp; |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 903 | struct cgroup_subsys_state *pos_css; |
Li Zefan | 8b5f1c5 | 2014-07-09 16:47:50 +0800 | [diff] [blame] | 904 | bool need_rebuild_sched_domains = false; |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 905 | |
| 906 | rcu_read_lock(); |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 907 | cpuset_for_each_descendant_pre(cp, pos_css, cs) { |
| 908 | struct cpuset *parent = parent_cs(cp); |
| 909 | |
| 910 | cpumask_and(new_cpus, cp->cpus_allowed, parent->effective_cpus); |
| 911 | |
Li Zefan | 554b0d1 | 2014-07-09 16:47:41 +0800 | [diff] [blame] | 912 | /* |
| 913 | * If it becomes empty, inherit the effective mask of the |
| 914 | * parent, which is guaranteed to have some CPUs. |
| 915 | */ |
Tejun Heo | 9e10a13 | 2015-09-18 11:56:28 -0400 | [diff] [blame] | 916 | if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys) && |
| 917 | cpumask_empty(new_cpus)) |
Li Zefan | 554b0d1 | 2014-07-09 16:47:41 +0800 | [diff] [blame] | 918 | cpumask_copy(new_cpus, parent->effective_cpus); |
| 919 | |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 920 | /* Skip the whole subtree if the cpumask remains the same. */ |
| 921 | if (cpumask_equal(new_cpus, cp->effective_cpus)) { |
| 922 | pos_css = css_rightmost_descendant(pos_css); |
| 923 | continue; |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 924 | } |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 925 | |
Tejun Heo | ec903c0 | 2014-05-13 12:11:01 -0400 | [diff] [blame] | 926 | if (!css_tryget_online(&cp->css)) |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 927 | continue; |
| 928 | rcu_read_unlock(); |
| 929 | |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 930 | spin_lock_irq(&callback_lock); |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 931 | cpumask_copy(cp->effective_cpus, new_cpus); |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 932 | spin_unlock_irq(&callback_lock); |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 933 | |
Tejun Heo | 9e10a13 | 2015-09-18 11:56:28 -0400 | [diff] [blame] | 934 | WARN_ON(!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) && |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 935 | !cpumask_equal(cp->cpus_allowed, cp->effective_cpus)); |
| 936 | |
Tejun Heo | d66393e | 2014-02-13 06:58:40 -0500 | [diff] [blame] | 937 | update_tasks_cpumask(cp); |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 938 | |
Li Zefan | 8b5f1c5 | 2014-07-09 16:47:50 +0800 | [diff] [blame] | 939 | /* |
| 940 | * If the effective cpumask of any non-empty cpuset is changed, |
| 941 | * we need to rebuild sched domains. |
| 942 | */ |
| 943 | if (!cpumask_empty(cp->cpus_allowed) && |
| 944 | is_sched_load_balance(cp)) |
| 945 | need_rebuild_sched_domains = true; |
| 946 | |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 947 | rcu_read_lock(); |
| 948 | css_put(&cp->css); |
| 949 | } |
| 950 | rcu_read_unlock(); |
Li Zefan | 8b5f1c5 | 2014-07-09 16:47:50 +0800 | [diff] [blame] | 951 | |
| 952 | if (need_rebuild_sched_domains) |
Prateek Sood | e9e7b75 | 2017-09-08 13:10:55 +0530 | [diff] [blame] | 953 | rebuild_sched_domains_unlocked(); |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 954 | } |
| 955 | |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 956 | /** |
Cliff Wickman | 58f4790 | 2008-02-07 00:14:44 -0800 | [diff] [blame] | 957 | * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it |
| 958 | * @cs: the cpuset to consider |
Fabian Frederick | fc34ac1 | 2014-05-05 19:46:55 +0200 | [diff] [blame] | 959 | * @trialcs: trial cpuset |
Cliff Wickman | 58f4790 | 2008-02-07 00:14:44 -0800 | [diff] [blame] | 960 | * @buf: buffer of cpu numbers written to this cpuset |
| 961 | */ |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 962 | static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, |
| 963 | const char *buf) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 964 | { |
Cliff Wickman | 58f4790 | 2008-02-07 00:14:44 -0800 | [diff] [blame] | 965 | int retval; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 966 | |
Rusty Russell | 5f054e3 | 2012-03-29 15:38:31 +1030 | [diff] [blame] | 967 | /* top_cpuset.cpus_allowed tracks cpu_online_mask; it's read-only */ |
Paul Jackson | 4c4d50f | 2006-08-27 01:23:51 -0700 | [diff] [blame] | 968 | if (cs == &top_cpuset) |
| 969 | return -EACCES; |
| 970 | |
David Rientjes | 6f7f02e | 2007-05-08 00:31:43 -0700 | [diff] [blame] | 971 | /* |
Roman Kiryanov | 4395469 | 2019-06-19 11:11:59 -0700 | [diff] [blame] | 972 | * An empty cpus_requested is ok only if the cpuset has no tasks. |
Paul Jackson | 020958b | 2007-10-18 23:40:21 -0700 | [diff] [blame] | 973 | * Since cpulist_parse() fails on an empty mask, we special case |
| 974 | * that parsing. The validate_change() call ensures that cpusets |
| 975 | * with tasks have cpus. |
David Rientjes | 6f7f02e | 2007-05-08 00:31:43 -0700 | [diff] [blame] | 976 | */ |
Paul Jackson | 020958b | 2007-10-18 23:40:21 -0700 | [diff] [blame] | 977 | if (!*buf) { |
Roman Kiryanov | 4395469 | 2019-06-19 11:11:59 -0700 | [diff] [blame] | 978 | cpumask_clear(trialcs->cpus_requested); |
David Rientjes | 6f7f02e | 2007-05-08 00:31:43 -0700 | [diff] [blame] | 979 | } else { |
Riley Andrews | 266ee74 | 2016-09-06 15:16:25 -0700 | [diff] [blame] | 980 | retval = cpulist_parse(buf, trialcs->cpus_requested); |
David Rientjes | 6f7f02e | 2007-05-08 00:31:43 -0700 | [diff] [blame] | 981 | if (retval < 0) |
| 982 | return retval; |
| 983 | } |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 984 | |
Roman Kiryanov | 4395469 | 2019-06-19 11:11:59 -0700 | [diff] [blame] | 985 | if (!cpumask_subset(trialcs->cpus_requested, cpu_present_mask)) |
| 986 | return -EINVAL; |
| 987 | |
| 988 | cpumask_and(trialcs->cpus_allowed, trialcs->cpus_requested, cpu_active_mask); |
| 989 | |
Paul Menage | 8707d8b | 2007-10-18 23:40:22 -0700 | [diff] [blame] | 990 | /* Nothing to do if the cpus didn't change */ |
Riley Andrews | 266ee74 | 2016-09-06 15:16:25 -0700 | [diff] [blame] | 991 | if (cpumask_equal(cs->cpus_requested, trialcs->cpus_requested)) |
Paul Menage | 8707d8b | 2007-10-18 23:40:22 -0700 | [diff] [blame] | 992 | return 0; |
Cliff Wickman | 58f4790 | 2008-02-07 00:14:44 -0800 | [diff] [blame] | 993 | |
Li Zefan | a73456f | 2013-06-05 17:15:59 +0800 | [diff] [blame] | 994 | retval = validate_change(cs, trialcs); |
| 995 | if (retval < 0) |
| 996 | return retval; |
| 997 | |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 998 | spin_lock_irq(&callback_lock); |
Li Zefan | 300ed6c | 2009-01-07 18:08:44 -0800 | [diff] [blame] | 999 | cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed); |
Riley Andrews | 266ee74 | 2016-09-06 15:16:25 -0700 | [diff] [blame] | 1000 | cpumask_copy(cs->cpus_requested, trialcs->cpus_requested); |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 1001 | spin_unlock_irq(&callback_lock); |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 1002 | |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 1003 | /* use trialcs->cpus_allowed as a temp variable */ |
| 1004 | update_cpumasks_hier(cs, trialcs->cpus_allowed); |
Dinakar Guniguntala | 85d7b94 | 2005-06-25 14:57:34 -0700 | [diff] [blame] | 1005 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1006 | } |
| 1007 | |
Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 1008 | /* |
Tejun Heo | e93ad19 | 2016-01-19 12:18:41 -0500 | [diff] [blame] | 1009 | * Migrate memory region from one set of nodes to another. This is |
| 1010 | * performed asynchronously as it can be called from process migration path |
| 1011 | * holding locks involved in process management. All mm migrations are |
| 1012 | * performed in the queued order and can be waited for by flushing |
| 1013 | * cpuset_migrate_mm_wq. |
Paul Jackson | e4e364e | 2006-03-31 02:30:52 -0800 | [diff] [blame] | 1014 | */ |
| 1015 | |
Tejun Heo | e93ad19 | 2016-01-19 12:18:41 -0500 | [diff] [blame] | 1016 | struct cpuset_migrate_mm_work { |
| 1017 | struct work_struct work; |
| 1018 | struct mm_struct *mm; |
| 1019 | nodemask_t from; |
| 1020 | nodemask_t to; |
| 1021 | }; |
| 1022 | |
| 1023 | static void cpuset_migrate_mm_workfn(struct work_struct *work) |
| 1024 | { |
| 1025 | struct cpuset_migrate_mm_work *mwork = |
| 1026 | container_of(work, struct cpuset_migrate_mm_work, work); |
| 1027 | |
| 1028 | /* on a wq worker, no need to worry about %current's mems_allowed */ |
| 1029 | do_migrate_pages(mwork->mm, &mwork->from, &mwork->to, MPOL_MF_MOVE_ALL); |
| 1030 | mmput(mwork->mm); |
| 1031 | kfree(mwork); |
| 1032 | } |
| 1033 | |
Paul Jackson | e4e364e | 2006-03-31 02:30:52 -0800 | [diff] [blame] | 1034 | static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from, |
| 1035 | const nodemask_t *to) |
| 1036 | { |
Tejun Heo | e93ad19 | 2016-01-19 12:18:41 -0500 | [diff] [blame] | 1037 | struct cpuset_migrate_mm_work *mwork; |
Paul Jackson | e4e364e | 2006-03-31 02:30:52 -0800 | [diff] [blame] | 1038 | |
Tejun Heo | e93ad19 | 2016-01-19 12:18:41 -0500 | [diff] [blame] | 1039 | mwork = kzalloc(sizeof(*mwork), GFP_KERNEL); |
| 1040 | if (mwork) { |
| 1041 | mwork->mm = mm; |
| 1042 | mwork->from = *from; |
| 1043 | mwork->to = *to; |
| 1044 | INIT_WORK(&mwork->work, cpuset_migrate_mm_workfn); |
| 1045 | queue_work(cpuset_migrate_mm_wq, &mwork->work); |
| 1046 | } else { |
| 1047 | mmput(mm); |
| 1048 | } |
| 1049 | } |
Paul Jackson | e4e364e | 2006-03-31 02:30:52 -0800 | [diff] [blame] | 1050 | |
Tejun Heo | 5cf1cac | 2016-04-21 19:06:48 -0400 | [diff] [blame] | 1051 | static void cpuset_post_attach(void) |
Tejun Heo | e93ad19 | 2016-01-19 12:18:41 -0500 | [diff] [blame] | 1052 | { |
| 1053 | flush_workqueue(cpuset_migrate_mm_wq); |
Paul Jackson | e4e364e | 2006-03-31 02:30:52 -0800 | [diff] [blame] | 1054 | } |
| 1055 | |
Li Zefan | 3b6766f | 2009-04-02 16:57:51 -0700 | [diff] [blame] | 1056 | /* |
Miao Xie | 58568d2 | 2009-06-16 15:31:49 -0700 | [diff] [blame] | 1057 | * cpuset_change_task_nodemask - change task's mems_allowed and mempolicy |
| 1058 | * @tsk: the task to change |
| 1059 | * @newmems: new nodes that the task will be set |
| 1060 | * |
| 1061 | * In order to avoid seeing no nodes if the old and new nodes are disjoint, |
| 1062 | * we structure updates as setting all new allowed nodes, then clearing newly |
| 1063 | * disallowed ones. |
Miao Xie | 58568d2 | 2009-06-16 15:31:49 -0700 | [diff] [blame] | 1064 | */ |
| 1065 | static void cpuset_change_task_nodemask(struct task_struct *tsk, |
| 1066 | nodemask_t *newmems) |
| 1067 | { |
David Rientjes | b246272 | 2011-12-19 17:11:52 -0800 | [diff] [blame] | 1068 | bool need_loop; |
David Rientjes | 89e8a24 | 2011-11-02 13:38:39 -0700 | [diff] [blame] | 1069 | |
Miao Xie | c0ff745 | 2010-05-24 14:32:08 -0700 | [diff] [blame] | 1070 | task_lock(tsk); |
David Rientjes | b246272 | 2011-12-19 17:11:52 -0800 | [diff] [blame] | 1071 | /* |
| 1072 | * Determine if a loop is necessary if another thread is doing |
Mel Gorman | d26914d | 2014-04-03 14:47:24 -0700 | [diff] [blame] | 1073 | * read_mems_allowed_begin(). If at least one node remains unchanged and |
David Rientjes | b246272 | 2011-12-19 17:11:52 -0800 | [diff] [blame] | 1074 | * tsk does not have a mempolicy, then an empty nodemask will not be |
| 1075 | * possible when mems_allowed is larger than a word. |
| 1076 | */ |
| 1077 | need_loop = task_has_mempolicy(tsk) || |
| 1078 | !nodes_intersects(*newmems, tsk->mems_allowed); |
Mel Gorman | cc9a6c8 | 2012-03-21 16:34:11 -0700 | [diff] [blame] | 1079 | |
Peter Zijlstra | 0fc0287 | 2013-11-26 15:03:41 +0100 | [diff] [blame] | 1080 | if (need_loop) { |
| 1081 | local_irq_disable(); |
Mel Gorman | cc9a6c8 | 2012-03-21 16:34:11 -0700 | [diff] [blame] | 1082 | write_seqcount_begin(&tsk->mems_allowed_seq); |
Peter Zijlstra | 0fc0287 | 2013-11-26 15:03:41 +0100 | [diff] [blame] | 1083 | } |
Mel Gorman | cc9a6c8 | 2012-03-21 16:34:11 -0700 | [diff] [blame] | 1084 | |
Miao Xie | 58568d2 | 2009-06-16 15:31:49 -0700 | [diff] [blame] | 1085 | nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems); |
Miao Xie | c0ff745 | 2010-05-24 14:32:08 -0700 | [diff] [blame] | 1086 | mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP1); |
| 1087 | |
Miao Xie | c0ff745 | 2010-05-24 14:32:08 -0700 | [diff] [blame] | 1088 | mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP2); |
Miao Xie | 58568d2 | 2009-06-16 15:31:49 -0700 | [diff] [blame] | 1089 | tsk->mems_allowed = *newmems; |
Mel Gorman | cc9a6c8 | 2012-03-21 16:34:11 -0700 | [diff] [blame] | 1090 | |
Peter Zijlstra | 0fc0287 | 2013-11-26 15:03:41 +0100 | [diff] [blame] | 1091 | if (need_loop) { |
Mel Gorman | cc9a6c8 | 2012-03-21 16:34:11 -0700 | [diff] [blame] | 1092 | write_seqcount_end(&tsk->mems_allowed_seq); |
Peter Zijlstra | 0fc0287 | 2013-11-26 15:03:41 +0100 | [diff] [blame] | 1093 | local_irq_enable(); |
| 1094 | } |
Mel Gorman | cc9a6c8 | 2012-03-21 16:34:11 -0700 | [diff] [blame] | 1095 | |
Miao Xie | c0ff745 | 2010-05-24 14:32:08 -0700 | [diff] [blame] | 1096 | task_unlock(tsk); |
Miao Xie | 58568d2 | 2009-06-16 15:31:49 -0700 | [diff] [blame] | 1097 | } |
| 1098 | |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 1099 | static void *cpuset_being_rebound; |
| 1100 | |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1101 | /** |
| 1102 | * update_tasks_nodemask - Update the nodemasks of tasks in the cpuset. |
| 1103 | * @cs: the cpuset in which each task's mems_allowed mask needs to be changed |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1104 | * |
Tejun Heo | d66393e | 2014-02-13 06:58:40 -0500 | [diff] [blame] | 1105 | * Iterate through each task of @cs updating its mems_allowed to the |
| 1106 | * effective cpuset's. As this function is called with cpuset_mutex held, |
| 1107 | * cpuset membership stays stable. |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1108 | */ |
Tejun Heo | d66393e | 2014-02-13 06:58:40 -0500 | [diff] [blame] | 1109 | static void update_tasks_nodemask(struct cpuset *cs) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1110 | { |
Li Zefan | 33ad801 | 2013-06-09 17:15:08 +0800 | [diff] [blame] | 1111 | static nodemask_t newmems; /* protected by cpuset_mutex */ |
Tejun Heo | d66393e | 2014-02-13 06:58:40 -0500 | [diff] [blame] | 1112 | struct css_task_iter it; |
| 1113 | struct task_struct *task; |
Paul Jackson | 59dac16 | 2006-01-08 01:01:52 -0800 | [diff] [blame] | 1114 | |
Lee Schermerhorn | 846a16b | 2008-04-28 02:13:09 -0700 | [diff] [blame] | 1115 | cpuset_being_rebound = cs; /* causes mpol_dup() rebind */ |
Paul Jackson | 4225399 | 2006-01-08 01:01:59 -0800 | [diff] [blame] | 1116 | |
Li Zefan | ae1c802 | 2014-07-09 16:48:32 +0800 | [diff] [blame] | 1117 | guarantee_online_mems(cs, &newmems); |
Li Zefan | 33ad801 | 2013-06-09 17:15:08 +0800 | [diff] [blame] | 1118 | |
Paul Jackson | 4225399 | 2006-01-08 01:01:59 -0800 | [diff] [blame] | 1119 | /* |
Li Zefan | 3b6766f | 2009-04-02 16:57:51 -0700 | [diff] [blame] | 1120 | * The mpol_rebind_mm() call takes mmap_sem, which we couldn't |
| 1121 | * take while holding tasklist_lock. Forks can happen - the |
| 1122 | * mpol_dup() cpuset_being_rebound check will catch such forks, |
| 1123 | * and rebind their vma mempolicies too. Because we still hold |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 1124 | * the global cpuset_mutex, we know that no other rebind effort |
Li Zefan | 3b6766f | 2009-04-02 16:57:51 -0700 | [diff] [blame] | 1125 | * will be contending for the global variable cpuset_being_rebound. |
Paul Jackson | 4225399 | 2006-01-08 01:01:59 -0800 | [diff] [blame] | 1126 | * It's ok if we rebind the same mm twice; mpol_rebind_mm() |
Paul Jackson | 04c19fa | 2006-01-08 01:02:00 -0800 | [diff] [blame] | 1127 | * is idempotent. Also migrate pages in each mm to new nodes. |
Paul Jackson | 4225399 | 2006-01-08 01:01:59 -0800 | [diff] [blame] | 1128 | */ |
Tejun Heo | d66393e | 2014-02-13 06:58:40 -0500 | [diff] [blame] | 1129 | css_task_iter_start(&cs->css, &it); |
| 1130 | while ((task = css_task_iter_next(&it))) { |
| 1131 | struct mm_struct *mm; |
| 1132 | bool migrate; |
| 1133 | |
| 1134 | cpuset_change_task_nodemask(task, &newmems); |
| 1135 | |
| 1136 | mm = get_task_mm(task); |
| 1137 | if (!mm) |
| 1138 | continue; |
| 1139 | |
| 1140 | migrate = is_memory_migrate(cs); |
| 1141 | |
| 1142 | mpol_rebind_mm(mm, &cs->mems_allowed); |
| 1143 | if (migrate) |
| 1144 | cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems); |
Tejun Heo | e93ad19 | 2016-01-19 12:18:41 -0500 | [diff] [blame] | 1145 | else |
| 1146 | mmput(mm); |
Tejun Heo | d66393e | 2014-02-13 06:58:40 -0500 | [diff] [blame] | 1147 | } |
| 1148 | css_task_iter_end(&it); |
Paul Jackson | 4225399 | 2006-01-08 01:01:59 -0800 | [diff] [blame] | 1149 | |
Li Zefan | 33ad801 | 2013-06-09 17:15:08 +0800 | [diff] [blame] | 1150 | /* |
| 1151 | * All the tasks' nodemasks have been updated, update |
| 1152 | * cs->old_mems_allowed. |
| 1153 | */ |
| 1154 | cs->old_mems_allowed = newmems; |
| 1155 | |
Paul Menage | 2df167a | 2008-02-07 00:14:45 -0800 | [diff] [blame] | 1156 | /* We're done rebinding vmas to this cpuset's new mems_allowed. */ |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 1157 | cpuset_being_rebound = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1158 | } |
| 1159 | |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1160 | /* |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 1161 | * update_nodemasks_hier - Update effective nodemasks and tasks in the subtree |
| 1162 | * @cs: the cpuset to consider |
| 1163 | * @new_mems: a temp variable for calculating new effective_mems |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 1164 | * |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 1165 | * When configured nodemask is changed, the effective nodemasks of this cpuset |
| 1166 | * and all its descendants need to be updated. |
| 1167 | * |
| 1168 | * On legacy hiearchy, effective_mems will be the same with mems_allowed. |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 1169 | * |
| 1170 | * Called with cpuset_mutex held |
| 1171 | */ |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 1172 | static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems) |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 1173 | { |
| 1174 | struct cpuset *cp; |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 1175 | struct cgroup_subsys_state *pos_css; |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 1176 | |
| 1177 | rcu_read_lock(); |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 1178 | cpuset_for_each_descendant_pre(cp, pos_css, cs) { |
| 1179 | struct cpuset *parent = parent_cs(cp); |
| 1180 | |
| 1181 | nodes_and(*new_mems, cp->mems_allowed, parent->effective_mems); |
| 1182 | |
Li Zefan | 554b0d1 | 2014-07-09 16:47:41 +0800 | [diff] [blame] | 1183 | /* |
| 1184 | * If it becomes empty, inherit the effective mask of the |
| 1185 | * parent, which is guaranteed to have some MEMs. |
| 1186 | */ |
Tejun Heo | 9e10a13 | 2015-09-18 11:56:28 -0400 | [diff] [blame] | 1187 | if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys) && |
| 1188 | nodes_empty(*new_mems)) |
Li Zefan | 554b0d1 | 2014-07-09 16:47:41 +0800 | [diff] [blame] | 1189 | *new_mems = parent->effective_mems; |
| 1190 | |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 1191 | /* Skip the whole subtree if the nodemask remains the same. */ |
| 1192 | if (nodes_equal(*new_mems, cp->effective_mems)) { |
| 1193 | pos_css = css_rightmost_descendant(pos_css); |
| 1194 | continue; |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 1195 | } |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 1196 | |
Tejun Heo | ec903c0 | 2014-05-13 12:11:01 -0400 | [diff] [blame] | 1197 | if (!css_tryget_online(&cp->css)) |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 1198 | continue; |
| 1199 | rcu_read_unlock(); |
| 1200 | |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 1201 | spin_lock_irq(&callback_lock); |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 1202 | cp->effective_mems = *new_mems; |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 1203 | spin_unlock_irq(&callback_lock); |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 1204 | |
Tejun Heo | 9e10a13 | 2015-09-18 11:56:28 -0400 | [diff] [blame] | 1205 | WARN_ON(!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) && |
Li Zefan | a138126 | 2014-07-30 15:07:13 +0800 | [diff] [blame] | 1206 | !nodes_equal(cp->mems_allowed, cp->effective_mems)); |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 1207 | |
Tejun Heo | d66393e | 2014-02-13 06:58:40 -0500 | [diff] [blame] | 1208 | update_tasks_nodemask(cp); |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 1209 | |
| 1210 | rcu_read_lock(); |
| 1211 | css_put(&cp->css); |
| 1212 | } |
| 1213 | rcu_read_unlock(); |
| 1214 | } |
| 1215 | |
| 1216 | /* |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1217 | * Handle user request to change the 'mems' memory placement |
| 1218 | * of a cpuset. Needs to validate the request, update the |
Miao Xie | 58568d2 | 2009-06-16 15:31:49 -0700 | [diff] [blame] | 1219 | * cpusets mems_allowed, and for each task in the cpuset, |
| 1220 | * update mems_allowed and rebind task's mempolicy and any vma |
| 1221 | * mempolicies and if the cpuset is marked 'memory_migrate', |
| 1222 | * migrate the tasks pages to the new memory. |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1223 | * |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 1224 | * Call with cpuset_mutex held. May take callback_lock during call. |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1225 | * Will take tasklist_lock, scan tasklist for tasks in cpuset cs, |
| 1226 | * lock each such tasks mm->mmap_sem, scan its vma's and rebind |
| 1227 | * their mempolicies to the cpusets new mems_allowed. |
| 1228 | */ |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 1229 | static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs, |
| 1230 | const char *buf) |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1231 | { |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1232 | int retval; |
| 1233 | |
| 1234 | /* |
Lai Jiangshan | 38d7bee | 2012-12-12 13:51:24 -0800 | [diff] [blame] | 1235 | * top_cpuset.mems_allowed tracks node_stats[N_MEMORY]; |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1236 | * it's read-only |
| 1237 | */ |
Miao Xie | 53feb29 | 2010-03-23 13:35:35 -0700 | [diff] [blame] | 1238 | if (cs == &top_cpuset) { |
| 1239 | retval = -EACCES; |
| 1240 | goto done; |
| 1241 | } |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1242 | |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1243 | /* |
| 1244 | * An empty mems_allowed is ok iff there are no tasks in the cpuset. |
| 1245 | * Since nodelist_parse() fails on an empty mask, we special case |
| 1246 | * that parsing. The validate_change() call ensures that cpusets |
| 1247 | * with tasks have memory. |
| 1248 | */ |
| 1249 | if (!*buf) { |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 1250 | nodes_clear(trialcs->mems_allowed); |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1251 | } else { |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 1252 | retval = nodelist_parse(buf, trialcs->mems_allowed); |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1253 | if (retval < 0) |
| 1254 | goto done; |
| 1255 | |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 1256 | if (!nodes_subset(trialcs->mems_allowed, |
Li Zefan | 5d8ba82 | 2014-07-09 16:49:12 +0800 | [diff] [blame] | 1257 | top_cpuset.mems_allowed)) { |
| 1258 | retval = -EINVAL; |
Miao Xie | 53feb29 | 2010-03-23 13:35:35 -0700 | [diff] [blame] | 1259 | goto done; |
| 1260 | } |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1261 | } |
Li Zefan | 33ad801 | 2013-06-09 17:15:08 +0800 | [diff] [blame] | 1262 | |
| 1263 | if (nodes_equal(cs->mems_allowed, trialcs->mems_allowed)) { |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1264 | retval = 0; /* Too easy - nothing to do */ |
| 1265 | goto done; |
| 1266 | } |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 1267 | retval = validate_change(cs, trialcs); |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1268 | if (retval < 0) |
| 1269 | goto done; |
| 1270 | |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 1271 | spin_lock_irq(&callback_lock); |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 1272 | cs->mems_allowed = trialcs->mems_allowed; |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 1273 | spin_unlock_irq(&callback_lock); |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1274 | |
Li Zefan | 734d451 | 2014-07-09 16:47:29 +0800 | [diff] [blame] | 1275 | /* use trialcs->mems_allowed as a temp variable */ |
Alban Crequy | 24ee3cf | 2015-08-06 16:21:05 +0200 | [diff] [blame] | 1276 | update_nodemasks_hier(cs, &trialcs->mems_allowed); |
Miao Xie | 0b2f630 | 2008-07-25 01:47:21 -0700 | [diff] [blame] | 1277 | done: |
| 1278 | return retval; |
| 1279 | } |
| 1280 | |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 1281 | int current_cpuset_is_being_rebound(void) |
| 1282 | { |
Gu Zheng | 391acf9 | 2014-06-25 09:57:18 +0800 | [diff] [blame] | 1283 | int ret; |
| 1284 | |
| 1285 | rcu_read_lock(); |
| 1286 | ret = task_cs(current) == cpuset_being_rebound; |
| 1287 | rcu_read_unlock(); |
| 1288 | |
| 1289 | return ret; |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 1290 | } |
| 1291 | |
Paul Menage | 5be7a47 | 2008-05-06 20:42:41 -0700 | [diff] [blame] | 1292 | static int update_relax_domain_level(struct cpuset *cs, s64 val) |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 1293 | { |
Paul Menage | db7f47c | 2009-04-02 16:57:55 -0700 | [diff] [blame] | 1294 | #ifdef CONFIG_SMP |
Peter Zijlstra | 60495e7 | 2011-04-07 14:10:04 +0200 | [diff] [blame] | 1295 | if (val < -1 || val >= sched_domain_level_max) |
Li Zefan | 30e0e17 | 2008-05-13 10:27:17 +0800 | [diff] [blame] | 1296 | return -EINVAL; |
Paul Menage | db7f47c | 2009-04-02 16:57:55 -0700 | [diff] [blame] | 1297 | #endif |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 1298 | |
| 1299 | if (val != cs->relax_domain_level) { |
| 1300 | cs->relax_domain_level = val; |
Li Zefan | 300ed6c | 2009-01-07 18:08:44 -0800 | [diff] [blame] | 1301 | if (!cpumask_empty(cs->cpus_allowed) && |
| 1302 | is_sched_load_balance(cs)) |
Prateek Sood | e9e7b75 | 2017-09-08 13:10:55 +0530 | [diff] [blame] | 1303 | rebuild_sched_domains_unlocked(); |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 1304 | } |
| 1305 | |
| 1306 | return 0; |
| 1307 | } |
| 1308 | |
Tejun Heo | 72ec702 | 2013-08-08 20:11:26 -0400 | [diff] [blame] | 1309 | /** |
Miao Xie | 950592f | 2009-06-16 15:31:47 -0700 | [diff] [blame] | 1310 | * update_tasks_flags - update the spread flags of tasks in the cpuset. |
| 1311 | * @cs: the cpuset in which each task's spread flags needs to be changed |
Miao Xie | 950592f | 2009-06-16 15:31:47 -0700 | [diff] [blame] | 1312 | * |
Tejun Heo | d66393e | 2014-02-13 06:58:40 -0500 | [diff] [blame] | 1313 | * Iterate through each task of @cs updating its spread flags. As this |
| 1314 | * function is called with cpuset_mutex held, cpuset membership stays |
| 1315 | * stable. |
Miao Xie | 950592f | 2009-06-16 15:31:47 -0700 | [diff] [blame] | 1316 | */ |
Tejun Heo | d66393e | 2014-02-13 06:58:40 -0500 | [diff] [blame] | 1317 | static void update_tasks_flags(struct cpuset *cs) |
Miao Xie | 950592f | 2009-06-16 15:31:47 -0700 | [diff] [blame] | 1318 | { |
Tejun Heo | d66393e | 2014-02-13 06:58:40 -0500 | [diff] [blame] | 1319 | struct css_task_iter it; |
| 1320 | struct task_struct *task; |
| 1321 | |
| 1322 | css_task_iter_start(&cs->css, &it); |
| 1323 | while ((task = css_task_iter_next(&it))) |
| 1324 | cpuset_update_task_spread_flag(cs, task); |
| 1325 | css_task_iter_end(&it); |
Miao Xie | 950592f | 2009-06-16 15:31:47 -0700 | [diff] [blame] | 1326 | } |
| 1327 | |
| 1328 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1329 | * update_flag - read a 0 or a 1 in a file and update associated flag |
Paul Menage | 7860836 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 1330 | * bit: the bit to update (see cpuset_flagbits_t) |
| 1331 | * cs: the cpuset to update |
| 1332 | * turning_on: whether the flag is being set or cleared |
Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 1333 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1334 | */ |
| 1335 | |
Paul Menage | 700fe1a | 2008-04-29 01:00:00 -0700 | [diff] [blame] | 1336 | static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, |
| 1337 | int turning_on) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1338 | { |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 1339 | struct cpuset *trialcs; |
Rakib Mullick | 40b6a76 | 2008-10-18 20:28:18 -0700 | [diff] [blame] | 1340 | int balance_flag_changed; |
Miao Xie | 950592f | 2009-06-16 15:31:47 -0700 | [diff] [blame] | 1341 | int spread_flag_changed; |
Miao Xie | 950592f | 2009-06-16 15:31:47 -0700 | [diff] [blame] | 1342 | int err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1343 | |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 1344 | trialcs = alloc_trial_cpuset(cs); |
| 1345 | if (!trialcs) |
| 1346 | return -ENOMEM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1347 | |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 1348 | if (turning_on) |
| 1349 | set_bit(bit, &trialcs->flags); |
| 1350 | else |
| 1351 | clear_bit(bit, &trialcs->flags); |
| 1352 | |
| 1353 | err = validate_change(cs, trialcs); |
Dinakar Guniguntala | 85d7b94 | 2005-06-25 14:57:34 -0700 | [diff] [blame] | 1354 | if (err < 0) |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 1355 | goto out; |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 1356 | |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 1357 | balance_flag_changed = (is_sched_load_balance(cs) != |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 1358 | is_sched_load_balance(trialcs)); |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 1359 | |
Miao Xie | 950592f | 2009-06-16 15:31:47 -0700 | [diff] [blame] | 1360 | spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs)) |
| 1361 | || (is_spread_page(cs) != is_spread_page(trialcs))); |
| 1362 | |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 1363 | spin_lock_irq(&callback_lock); |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 1364 | cs->flags = trialcs->flags; |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 1365 | spin_unlock_irq(&callback_lock); |
Dinakar Guniguntala | 85d7b94 | 2005-06-25 14:57:34 -0700 | [diff] [blame] | 1366 | |
Li Zefan | 300ed6c | 2009-01-07 18:08:44 -0800 | [diff] [blame] | 1367 | if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed) |
Prateek Sood | e9e7b75 | 2017-09-08 13:10:55 +0530 | [diff] [blame] | 1368 | rebuild_sched_domains_unlocked(); |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 1369 | |
Miao Xie | 950592f | 2009-06-16 15:31:47 -0700 | [diff] [blame] | 1370 | if (spread_flag_changed) |
Tejun Heo | d66393e | 2014-02-13 06:58:40 -0500 | [diff] [blame] | 1371 | update_tasks_flags(cs); |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 1372 | out: |
| 1373 | free_trial_cpuset(trialcs); |
| 1374 | return err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1375 | } |
| 1376 | |
Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 1377 | /* |
Adrian Bunk | 80f7228 | 2006-06-30 18:27:16 +0200 | [diff] [blame] | 1378 | * Frequency meter - How fast is some event occurring? |
Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 1379 | * |
| 1380 | * These routines manage a digitally filtered, constant time based, |
| 1381 | * event frequency meter. There are four routines: |
| 1382 | * fmeter_init() - initialize a frequency meter. |
| 1383 | * fmeter_markevent() - called each time the event happens. |
| 1384 | * fmeter_getrate() - returns the recent rate of such events. |
| 1385 | * fmeter_update() - internal routine used to update fmeter. |
| 1386 | * |
| 1387 | * A common data structure is passed to each of these routines, |
| 1388 | * which is used to keep track of the state required to manage the |
| 1389 | * frequency meter and its digital filter. |
| 1390 | * |
| 1391 | * The filter works on the number of events marked per unit time. |
| 1392 | * The filter is single-pole low-pass recursive (IIR). The time unit |
| 1393 | * is 1 second. Arithmetic is done using 32-bit integers scaled to |
| 1394 | * simulate 3 decimal digits of precision (multiplied by 1000). |
| 1395 | * |
| 1396 | * With an FM_COEF of 933, and a time base of 1 second, the filter |
| 1397 | * has a half-life of 10 seconds, meaning that if the events quit |
| 1398 | * happening, then the rate returned from the fmeter_getrate() |
| 1399 | * will be cut in half each 10 seconds, until it converges to zero. |
| 1400 | * |
| 1401 | * It is not worth doing a real infinitely recursive filter. If more |
| 1402 | * than FM_MAXTICKS ticks have elapsed since the last filter event, |
| 1403 | * just compute FM_MAXTICKS ticks worth, by which point the level |
| 1404 | * will be stable. |
| 1405 | * |
| 1406 | * Limit the count of unprocessed events to FM_MAXCNT, so as to avoid |
| 1407 | * arithmetic overflow in the fmeter_update() routine. |
| 1408 | * |
| 1409 | * Given the simple 32 bit integer arithmetic used, this meter works |
| 1410 | * best for reporting rates between one per millisecond (msec) and |
| 1411 | * one per 32 (approx) seconds. At constant rates faster than one |
| 1412 | * per msec it maxes out at values just under 1,000,000. At constant |
| 1413 | * rates between one per msec, and one per second it will stabilize |
| 1414 | * to a value N*1000, where N is the rate of events per second. |
| 1415 | * At constant rates between one per second and one per 32 seconds, |
| 1416 | * it will be choppy, moving up on the seconds that have an event, |
| 1417 | * and then decaying until the next event. At rates slower than |
| 1418 | * about one in 32 seconds, it decays all the way back to zero between |
| 1419 | * each event. |
| 1420 | */ |
| 1421 | |
| 1422 | #define FM_COEF 933 /* coefficient for half-life of 10 secs */ |
Arnd Bergmann | d2b4365 | 2015-11-25 16:16:55 +0100 | [diff] [blame] | 1423 | #define FM_MAXTICKS ((u32)99) /* useless computing more ticks than this */ |
Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 1424 | #define FM_MAXCNT 1000000 /* limit cnt to avoid overflow */ |
| 1425 | #define FM_SCALE 1000 /* faux fixed point scale */ |
| 1426 | |
| 1427 | /* Initialize a frequency meter */ |
| 1428 | static void fmeter_init(struct fmeter *fmp) |
| 1429 | { |
| 1430 | fmp->cnt = 0; |
| 1431 | fmp->val = 0; |
| 1432 | fmp->time = 0; |
| 1433 | spin_lock_init(&fmp->lock); |
| 1434 | } |
| 1435 | |
| 1436 | /* Internal meter update - process cnt events and update value */ |
| 1437 | static void fmeter_update(struct fmeter *fmp) |
| 1438 | { |
Arnd Bergmann | d2b4365 | 2015-11-25 16:16:55 +0100 | [diff] [blame] | 1439 | time64_t now; |
| 1440 | u32 ticks; |
| 1441 | |
| 1442 | now = ktime_get_seconds(); |
| 1443 | ticks = now - fmp->time; |
Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 1444 | |
| 1445 | if (ticks == 0) |
| 1446 | return; |
| 1447 | |
| 1448 | ticks = min(FM_MAXTICKS, ticks); |
| 1449 | while (ticks-- > 0) |
| 1450 | fmp->val = (FM_COEF * fmp->val) / FM_SCALE; |
| 1451 | fmp->time = now; |
| 1452 | |
| 1453 | fmp->val += ((FM_SCALE - FM_COEF) * fmp->cnt) / FM_SCALE; |
| 1454 | fmp->cnt = 0; |
| 1455 | } |
| 1456 | |
| 1457 | /* Process any previous ticks, then bump cnt by one (times scale). */ |
| 1458 | static void fmeter_markevent(struct fmeter *fmp) |
| 1459 | { |
| 1460 | spin_lock(&fmp->lock); |
| 1461 | fmeter_update(fmp); |
| 1462 | fmp->cnt = min(FM_MAXCNT, fmp->cnt + FM_SCALE); |
| 1463 | spin_unlock(&fmp->lock); |
| 1464 | } |
| 1465 | |
| 1466 | /* Process any previous ticks, then return current value. */ |
| 1467 | static int fmeter_getrate(struct fmeter *fmp) |
| 1468 | { |
| 1469 | int val; |
| 1470 | |
| 1471 | spin_lock(&fmp->lock); |
| 1472 | fmeter_update(fmp); |
| 1473 | val = fmp->val; |
| 1474 | spin_unlock(&fmp->lock); |
| 1475 | return val; |
| 1476 | } |
| 1477 | |
Tejun Heo | 57fce0a | 2014-02-13 06:58:41 -0500 | [diff] [blame] | 1478 | static struct cpuset *cpuset_attach_old_cs; |
| 1479 | |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 1480 | /* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */ |
Tejun Heo | 1f7dd3e5 | 2015-12-03 10:18:21 -0500 | [diff] [blame] | 1481 | static int cpuset_can_attach(struct cgroup_taskset *tset) |
Ben Blum | f780bdb | 2011-05-26 16:25:19 -0700 | [diff] [blame] | 1482 | { |
Tejun Heo | 1f7dd3e5 | 2015-12-03 10:18:21 -0500 | [diff] [blame] | 1483 | struct cgroup_subsys_state *css; |
| 1484 | struct cpuset *cs; |
Tejun Heo | bb9d97b | 2011-12-12 18:12:21 -0800 | [diff] [blame] | 1485 | struct task_struct *task; |
| 1486 | int ret; |
Ben Blum | f780bdb | 2011-05-26 16:25:19 -0700 | [diff] [blame] | 1487 | |
Tejun Heo | 57fce0a | 2014-02-13 06:58:41 -0500 | [diff] [blame] | 1488 | /* used later by cpuset_attach() */ |
Tejun Heo | 1f7dd3e5 | 2015-12-03 10:18:21 -0500 | [diff] [blame] | 1489 | cpuset_attach_old_cs = task_cs(cgroup_taskset_first(tset, &css)); |
| 1490 | cs = css_cs(css); |
Tejun Heo | 57fce0a | 2014-02-13 06:58:41 -0500 | [diff] [blame] | 1491 | |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 1492 | mutex_lock(&cpuset_mutex); |
| 1493 | |
Tejun Heo | aa6ec29 | 2014-07-09 10:08:08 -0400 | [diff] [blame] | 1494 | /* allow moving tasks into an empty cpuset if on default hierarchy */ |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 1495 | ret = -ENOSPC; |
Tejun Heo | 9e10a13 | 2015-09-18 11:56:28 -0400 | [diff] [blame] | 1496 | if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) && |
Li Zefan | 88fa523 | 2013-06-09 17:16:46 +0800 | [diff] [blame] | 1497 | (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))) |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 1498 | goto out_unlock; |
Ben Blum | be367d0 | 2009-09-23 15:56:31 -0700 | [diff] [blame] | 1499 | |
Tejun Heo | 1f7dd3e5 | 2015-12-03 10:18:21 -0500 | [diff] [blame] | 1500 | cgroup_taskset_for_each(task, css, tset) { |
Juri Lelli | 7f51412 | 2014-09-19 10:22:40 +0100 | [diff] [blame] | 1501 | ret = task_can_attach(task, cs->cpus_allowed); |
| 1502 | if (ret) |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 1503 | goto out_unlock; |
| 1504 | ret = security_task_setscheduler(task); |
| 1505 | if (ret) |
| 1506 | goto out_unlock; |
Tejun Heo | bb9d97b | 2011-12-12 18:12:21 -0800 | [diff] [blame] | 1507 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1508 | |
Tejun Heo | 452477f | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 1509 | /* |
| 1510 | * Mark attach is in progress. This makes validate_change() fail |
| 1511 | * changes which zero cpus/mems_allowed. |
| 1512 | */ |
| 1513 | cs->attach_in_progress++; |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 1514 | ret = 0; |
| 1515 | out_unlock: |
| 1516 | mutex_unlock(&cpuset_mutex); |
| 1517 | return ret; |
Tejun Heo | 4e4c9a1 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 1518 | } |
| 1519 | |
Tejun Heo | 1f7dd3e5 | 2015-12-03 10:18:21 -0500 | [diff] [blame] | 1520 | static void cpuset_cancel_attach(struct cgroup_taskset *tset) |
Tejun Heo | 452477f | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 1521 | { |
Tejun Heo | 1f7dd3e5 | 2015-12-03 10:18:21 -0500 | [diff] [blame] | 1522 | struct cgroup_subsys_state *css; |
| 1523 | struct cpuset *cs; |
| 1524 | |
| 1525 | cgroup_taskset_first(tset, &css); |
| 1526 | cs = css_cs(css); |
| 1527 | |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 1528 | mutex_lock(&cpuset_mutex); |
Tejun Heo | eb95419 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 1529 | css_cs(css)->attach_in_progress--; |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 1530 | mutex_unlock(&cpuset_mutex); |
Tejun Heo | 452477f | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 1531 | } |
| 1532 | |
Tejun Heo | 4e4c9a1 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 1533 | /* |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 1534 | * Protected by cpuset_mutex. cpus_attach is used only by cpuset_attach() |
Tejun Heo | 4e4c9a1 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 1535 | * but we can't allocate it dynamically there. Define it global and |
| 1536 | * allocate from cpuset_init(). |
| 1537 | */ |
| 1538 | static cpumask_var_t cpus_attach; |
| 1539 | |
Tejun Heo | 1f7dd3e5 | 2015-12-03 10:18:21 -0500 | [diff] [blame] | 1540 | static void cpuset_attach(struct cgroup_taskset *tset) |
Tejun Heo | 4e4c9a1 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 1541 | { |
Li Zefan | 67bd2c5 | 2013-06-05 17:15:35 +0800 | [diff] [blame] | 1542 | /* static buf protected by cpuset_mutex */ |
Tejun Heo | 4e4c9a1 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 1543 | static nodemask_t cpuset_attach_nodemask_to; |
Tejun Heo | 4e4c9a1 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 1544 | struct task_struct *task; |
Tejun Heo | 4530edd | 2015-09-11 15:00:19 -0400 | [diff] [blame] | 1545 | struct task_struct *leader; |
Tejun Heo | 1f7dd3e5 | 2015-12-03 10:18:21 -0500 | [diff] [blame] | 1546 | struct cgroup_subsys_state *css; |
| 1547 | struct cpuset *cs; |
Tejun Heo | 57fce0a | 2014-02-13 06:58:41 -0500 | [diff] [blame] | 1548 | struct cpuset *oldcs = cpuset_attach_old_cs; |
Tejun Heo | 4e4c9a1 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 1549 | |
Tejun Heo | 1f7dd3e5 | 2015-12-03 10:18:21 -0500 | [diff] [blame] | 1550 | cgroup_taskset_first(tset, &css); |
| 1551 | cs = css_cs(css); |
| 1552 | |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 1553 | mutex_lock(&cpuset_mutex); |
| 1554 | |
Tejun Heo | 94196f5 | 2011-12-12 18:12:22 -0800 | [diff] [blame] | 1555 | /* prepare for attach */ |
Ben Blum | f780bdb | 2011-05-26 16:25:19 -0700 | [diff] [blame] | 1556 | if (cs == &top_cpuset) |
| 1557 | cpumask_copy(cpus_attach, cpu_possible_mask); |
| 1558 | else |
Li Zefan | ae1c802 | 2014-07-09 16:48:32 +0800 | [diff] [blame] | 1559 | guarantee_online_cpus(cs, cpus_attach); |
Ben Blum | f780bdb | 2011-05-26 16:25:19 -0700 | [diff] [blame] | 1560 | |
Li Zefan | ae1c802 | 2014-07-09 16:48:32 +0800 | [diff] [blame] | 1561 | guarantee_online_mems(cs, &cpuset_attach_nodemask_to); |
Tejun Heo | 94196f5 | 2011-12-12 18:12:22 -0800 | [diff] [blame] | 1562 | |
Tejun Heo | 1f7dd3e5 | 2015-12-03 10:18:21 -0500 | [diff] [blame] | 1563 | cgroup_taskset_for_each(task, css, tset) { |
Tejun Heo | bb9d97b | 2011-12-12 18:12:21 -0800 | [diff] [blame] | 1564 | /* |
| 1565 | * can_attach beforehand should guarantee that this doesn't |
| 1566 | * fail. TODO: have a better way to handle failure here |
| 1567 | */ |
Pavankumar Kondeti | 435eea9 | 2019-02-28 10:40:39 +0530 | [diff] [blame] | 1568 | WARN_ON_ONCE(update_cpus_allowed(cs, task, cpus_attach)); |
Tejun Heo | bb9d97b | 2011-12-12 18:12:21 -0800 | [diff] [blame] | 1569 | |
| 1570 | cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to); |
| 1571 | cpuset_update_task_spread_flag(cs, task); |
| 1572 | } |
David Quigley | 22fb52d | 2006-06-23 02:04:00 -0700 | [diff] [blame] | 1573 | |
Ben Blum | f780bdb | 2011-05-26 16:25:19 -0700 | [diff] [blame] | 1574 | /* |
Tejun Heo | 4530edd | 2015-09-11 15:00:19 -0400 | [diff] [blame] | 1575 | * Change mm for all threadgroup leaders. This is expensive and may |
| 1576 | * sleep and should be moved outside migration path proper. |
Ben Blum | f780bdb | 2011-05-26 16:25:19 -0700 | [diff] [blame] | 1577 | */ |
Li Zefan | ae1c802 | 2014-07-09 16:48:32 +0800 | [diff] [blame] | 1578 | cpuset_attach_nodemask_to = cs->effective_mems; |
Tejun Heo | 1f7dd3e5 | 2015-12-03 10:18:21 -0500 | [diff] [blame] | 1579 | cgroup_taskset_for_each_leader(leader, css, tset) { |
Tejun Heo | 3df9ca0 | 2015-09-11 15:00:18 -0400 | [diff] [blame] | 1580 | struct mm_struct *mm = get_task_mm(leader); |
Li Zefan | f047cec | 2013-06-13 15:11:44 +0800 | [diff] [blame] | 1581 | |
Tejun Heo | 3df9ca0 | 2015-09-11 15:00:18 -0400 | [diff] [blame] | 1582 | if (mm) { |
| 1583 | mpol_rebind_mm(mm, &cpuset_attach_nodemask_to); |
| 1584 | |
| 1585 | /* |
| 1586 | * old_mems_allowed is the same with mems_allowed |
| 1587 | * here, except if this task is being moved |
| 1588 | * automatically due to hotplug. In that case |
| 1589 | * @mems_allowed has been updated and is empty, so |
| 1590 | * @old_mems_allowed is the right nodesets that we |
| 1591 | * migrate mm from. |
| 1592 | */ |
Tejun Heo | e93ad19 | 2016-01-19 12:18:41 -0500 | [diff] [blame] | 1593 | if (is_memory_migrate(cs)) |
Tejun Heo | 3df9ca0 | 2015-09-11 15:00:18 -0400 | [diff] [blame] | 1594 | cpuset_migrate_mm(mm, &oldcs->old_mems_allowed, |
| 1595 | &cpuset_attach_nodemask_to); |
Tejun Heo | e93ad19 | 2016-01-19 12:18:41 -0500 | [diff] [blame] | 1596 | else |
| 1597 | mmput(mm); |
Li Zefan | f047cec | 2013-06-13 15:11:44 +0800 | [diff] [blame] | 1598 | } |
Paul Jackson | 4225399 | 2006-01-08 01:01:59 -0800 | [diff] [blame] | 1599 | } |
Tejun Heo | 452477f | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 1600 | |
Li Zefan | 33ad801 | 2013-06-09 17:15:08 +0800 | [diff] [blame] | 1601 | cs->old_mems_allowed = cpuset_attach_nodemask_to; |
Tejun Heo | 02bb586 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 1602 | |
Tejun Heo | 452477f | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 1603 | cs->attach_in_progress--; |
Li Zefan | e44193d | 2013-06-09 17:14:22 +0800 | [diff] [blame] | 1604 | if (!cs->attach_in_progress) |
| 1605 | wake_up(&cpuset_attach_wq); |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 1606 | |
| 1607 | mutex_unlock(&cpuset_mutex); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1608 | } |
| 1609 | |
| 1610 | /* The various types of files and directories in a cpuset file system */ |
| 1611 | |
| 1612 | typedef enum { |
Paul Jackson | 45b07ef | 2006-01-08 01:00:56 -0800 | [diff] [blame] | 1613 | FILE_MEMORY_MIGRATE, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1614 | FILE_CPULIST, |
| 1615 | FILE_MEMLIST, |
Li Zefan | afd1a8b | 2014-07-09 16:49:25 +0800 | [diff] [blame] | 1616 | FILE_EFFECTIVE_CPULIST, |
| 1617 | FILE_EFFECTIVE_MEMLIST, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1618 | FILE_CPU_EXCLUSIVE, |
| 1619 | FILE_MEM_EXCLUSIVE, |
Paul Menage | 7860836 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 1620 | FILE_MEM_HARDWALL, |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 1621 | FILE_SCHED_LOAD_BALANCE, |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 1622 | FILE_SCHED_RELAX_DOMAIN_LEVEL, |
Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 1623 | FILE_MEMORY_PRESSURE_ENABLED, |
| 1624 | FILE_MEMORY_PRESSURE, |
Paul Jackson | 825a46a | 2006-03-24 03:16:03 -0800 | [diff] [blame] | 1625 | FILE_SPREAD_PAGE, |
| 1626 | FILE_SPREAD_SLAB, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1627 | } cpuset_filetype_t; |
| 1628 | |
Tejun Heo | 182446d | 2013-08-08 20:11:24 -0400 | [diff] [blame] | 1629 | static int cpuset_write_u64(struct cgroup_subsys_state *css, struct cftype *cft, |
| 1630 | u64 val) |
Paul Menage | 700fe1a | 2008-04-29 01:00:00 -0700 | [diff] [blame] | 1631 | { |
Tejun Heo | 182446d | 2013-08-08 20:11:24 -0400 | [diff] [blame] | 1632 | struct cpuset *cs = css_cs(css); |
Paul Menage | 700fe1a | 2008-04-29 01:00:00 -0700 | [diff] [blame] | 1633 | cpuset_filetype_t type = cft->private; |
Li Zefan | a903f08 | 2013-08-13 10:05:59 +0800 | [diff] [blame] | 1634 | int retval = 0; |
Paul Menage | 700fe1a | 2008-04-29 01:00:00 -0700 | [diff] [blame] | 1635 | |
Prateek Sood | e9e7b75 | 2017-09-08 13:10:55 +0530 | [diff] [blame] | 1636 | get_online_cpus(); |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 1637 | mutex_lock(&cpuset_mutex); |
Li Zefan | a903f08 | 2013-08-13 10:05:59 +0800 | [diff] [blame] | 1638 | if (!is_cpuset_online(cs)) { |
| 1639 | retval = -ENODEV; |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 1640 | goto out_unlock; |
Li Zefan | a903f08 | 2013-08-13 10:05:59 +0800 | [diff] [blame] | 1641 | } |
Paul Menage | 700fe1a | 2008-04-29 01:00:00 -0700 | [diff] [blame] | 1642 | |
| 1643 | switch (type) { |
| 1644 | case FILE_CPU_EXCLUSIVE: |
| 1645 | retval = update_flag(CS_CPU_EXCLUSIVE, cs, val); |
| 1646 | break; |
| 1647 | case FILE_MEM_EXCLUSIVE: |
| 1648 | retval = update_flag(CS_MEM_EXCLUSIVE, cs, val); |
| 1649 | break; |
Paul Menage | 7860836 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 1650 | case FILE_MEM_HARDWALL: |
| 1651 | retval = update_flag(CS_MEM_HARDWALL, cs, val); |
| 1652 | break; |
Paul Menage | 700fe1a | 2008-04-29 01:00:00 -0700 | [diff] [blame] | 1653 | case FILE_SCHED_LOAD_BALANCE: |
| 1654 | retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, val); |
| 1655 | break; |
| 1656 | case FILE_MEMORY_MIGRATE: |
| 1657 | retval = update_flag(CS_MEMORY_MIGRATE, cs, val); |
| 1658 | break; |
| 1659 | case FILE_MEMORY_PRESSURE_ENABLED: |
| 1660 | cpuset_memory_pressure_enabled = !!val; |
| 1661 | break; |
Paul Menage | 700fe1a | 2008-04-29 01:00:00 -0700 | [diff] [blame] | 1662 | case FILE_SPREAD_PAGE: |
| 1663 | retval = update_flag(CS_SPREAD_PAGE, cs, val); |
Paul Menage | 700fe1a | 2008-04-29 01:00:00 -0700 | [diff] [blame] | 1664 | break; |
| 1665 | case FILE_SPREAD_SLAB: |
| 1666 | retval = update_flag(CS_SPREAD_SLAB, cs, val); |
Paul Menage | 700fe1a | 2008-04-29 01:00:00 -0700 | [diff] [blame] | 1667 | break; |
| 1668 | default: |
| 1669 | retval = -EINVAL; |
| 1670 | break; |
| 1671 | } |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 1672 | out_unlock: |
| 1673 | mutex_unlock(&cpuset_mutex); |
Prateek Sood | e9e7b75 | 2017-09-08 13:10:55 +0530 | [diff] [blame] | 1674 | put_online_cpus(); |
Paul Menage | 700fe1a | 2008-04-29 01:00:00 -0700 | [diff] [blame] | 1675 | return retval; |
| 1676 | } |
| 1677 | |
Tejun Heo | 182446d | 2013-08-08 20:11:24 -0400 | [diff] [blame] | 1678 | static int cpuset_write_s64(struct cgroup_subsys_state *css, struct cftype *cft, |
| 1679 | s64 val) |
Paul Menage | 5be7a47 | 2008-05-06 20:42:41 -0700 | [diff] [blame] | 1680 | { |
Tejun Heo | 182446d | 2013-08-08 20:11:24 -0400 | [diff] [blame] | 1681 | struct cpuset *cs = css_cs(css); |
Paul Menage | 5be7a47 | 2008-05-06 20:42:41 -0700 | [diff] [blame] | 1682 | cpuset_filetype_t type = cft->private; |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 1683 | int retval = -ENODEV; |
Paul Menage | 5be7a47 | 2008-05-06 20:42:41 -0700 | [diff] [blame] | 1684 | |
Prateek Sood | e9e7b75 | 2017-09-08 13:10:55 +0530 | [diff] [blame] | 1685 | get_online_cpus(); |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 1686 | mutex_lock(&cpuset_mutex); |
| 1687 | if (!is_cpuset_online(cs)) |
| 1688 | goto out_unlock; |
Paul Menage | e371239 | 2008-07-25 01:47:02 -0700 | [diff] [blame] | 1689 | |
Paul Menage | 5be7a47 | 2008-05-06 20:42:41 -0700 | [diff] [blame] | 1690 | switch (type) { |
| 1691 | case FILE_SCHED_RELAX_DOMAIN_LEVEL: |
| 1692 | retval = update_relax_domain_level(cs, val); |
| 1693 | break; |
| 1694 | default: |
| 1695 | retval = -EINVAL; |
| 1696 | break; |
| 1697 | } |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 1698 | out_unlock: |
| 1699 | mutex_unlock(&cpuset_mutex); |
Prateek Sood | e9e7b75 | 2017-09-08 13:10:55 +0530 | [diff] [blame] | 1700 | put_online_cpus(); |
Paul Menage | 5be7a47 | 2008-05-06 20:42:41 -0700 | [diff] [blame] | 1701 | return retval; |
| 1702 | } |
| 1703 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1704 | /* |
Paul Menage | e371239 | 2008-07-25 01:47:02 -0700 | [diff] [blame] | 1705 | * Common handling for a write to a "cpus" or "mems" file. |
| 1706 | */ |
Tejun Heo | 451af50 | 2014-05-13 12:16:21 -0400 | [diff] [blame] | 1707 | static ssize_t cpuset_write_resmask(struct kernfs_open_file *of, |
| 1708 | char *buf, size_t nbytes, loff_t off) |
Paul Menage | e371239 | 2008-07-25 01:47:02 -0700 | [diff] [blame] | 1709 | { |
Tejun Heo | 451af50 | 2014-05-13 12:16:21 -0400 | [diff] [blame] | 1710 | struct cpuset *cs = css_cs(of_css(of)); |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 1711 | struct cpuset *trialcs; |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 1712 | int retval = -ENODEV; |
Paul Menage | e371239 | 2008-07-25 01:47:02 -0700 | [diff] [blame] | 1713 | |
Tejun Heo | 451af50 | 2014-05-13 12:16:21 -0400 | [diff] [blame] | 1714 | buf = strstrip(buf); |
| 1715 | |
Tejun Heo | 3a5a6d0 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 1716 | /* |
| 1717 | * CPU or memory hotunplug may leave @cs w/o any execution |
| 1718 | * resources, in which case the hotplug code asynchronously updates |
| 1719 | * configuration and transfers all tasks to the nearest ancestor |
| 1720 | * which can execute. |
| 1721 | * |
| 1722 | * As writes to "cpus" or "mems" may restore @cs's execution |
| 1723 | * resources, wait for the previously scheduled operations before |
| 1724 | * proceeding, so that we don't end up keep removing tasks added |
| 1725 | * after execution capability is restored. |
Tejun Heo | 76bb5ab | 2014-06-30 15:47:32 -0400 | [diff] [blame] | 1726 | * |
| 1727 | * cpuset_hotplug_work calls back into cgroup core via |
| 1728 | * cgroup_transfer_tasks() and waiting for it from a cgroupfs |
| 1729 | * operation like this one can lead to a deadlock through kernfs |
| 1730 | * active_ref protection. Let's break the protection. Losing the |
| 1731 | * protection is okay as we check whether @cs is online after |
| 1732 | * grabbing cpuset_mutex anyway. This only happens on the legacy |
| 1733 | * hierarchies. |
Tejun Heo | 3a5a6d0 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 1734 | */ |
Tejun Heo | 76bb5ab | 2014-06-30 15:47:32 -0400 | [diff] [blame] | 1735 | css_get(&cs->css); |
| 1736 | kernfs_break_active_protection(of->kn); |
Tejun Heo | 3a5a6d0 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 1737 | flush_work(&cpuset_hotplug_work); |
| 1738 | |
Prateek Sood | e9e7b75 | 2017-09-08 13:10:55 +0530 | [diff] [blame] | 1739 | get_online_cpus(); |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 1740 | mutex_lock(&cpuset_mutex); |
| 1741 | if (!is_cpuset_online(cs)) |
| 1742 | goto out_unlock; |
Paul Menage | e371239 | 2008-07-25 01:47:02 -0700 | [diff] [blame] | 1743 | |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 1744 | trialcs = alloc_trial_cpuset(cs); |
Li Zefan | b75f38d | 2011-03-04 17:36:21 -0800 | [diff] [blame] | 1745 | if (!trialcs) { |
| 1746 | retval = -ENOMEM; |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 1747 | goto out_unlock; |
Li Zefan | b75f38d | 2011-03-04 17:36:21 -0800 | [diff] [blame] | 1748 | } |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 1749 | |
Tejun Heo | 451af50 | 2014-05-13 12:16:21 -0400 | [diff] [blame] | 1750 | switch (of_cft(of)->private) { |
Paul Menage | e371239 | 2008-07-25 01:47:02 -0700 | [diff] [blame] | 1751 | case FILE_CPULIST: |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 1752 | retval = update_cpumask(cs, trialcs, buf); |
Paul Menage | e371239 | 2008-07-25 01:47:02 -0700 | [diff] [blame] | 1753 | break; |
| 1754 | case FILE_MEMLIST: |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 1755 | retval = update_nodemask(cs, trialcs, buf); |
Paul Menage | e371239 | 2008-07-25 01:47:02 -0700 | [diff] [blame] | 1756 | break; |
| 1757 | default: |
| 1758 | retval = -EINVAL; |
| 1759 | break; |
| 1760 | } |
Li Zefan | 645fcc9 | 2009-01-07 18:08:43 -0800 | [diff] [blame] | 1761 | |
| 1762 | free_trial_cpuset(trialcs); |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 1763 | out_unlock: |
| 1764 | mutex_unlock(&cpuset_mutex); |
Prateek Sood | e9e7b75 | 2017-09-08 13:10:55 +0530 | [diff] [blame] | 1765 | put_online_cpus(); |
Tejun Heo | 76bb5ab | 2014-06-30 15:47:32 -0400 | [diff] [blame] | 1766 | kernfs_unbreak_active_protection(of->kn); |
| 1767 | css_put(&cs->css); |
Tejun Heo | e93ad19 | 2016-01-19 12:18:41 -0500 | [diff] [blame] | 1768 | flush_workqueue(cpuset_migrate_mm_wq); |
Tejun Heo | 451af50 | 2014-05-13 12:16:21 -0400 | [diff] [blame] | 1769 | return retval ?: nbytes; |
Paul Menage | e371239 | 2008-07-25 01:47:02 -0700 | [diff] [blame] | 1770 | } |
| 1771 | |
| 1772 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1773 | * These ascii lists should be read in a single call, by using a user |
| 1774 | * buffer large enough to hold the entire map. If read in smaller |
| 1775 | * chunks, there is no guarantee of atomicity. Since the display format |
| 1776 | * used, list of ranges of sequential numbers, is variable length, |
| 1777 | * and since these maps can change value dynamically, one could read |
| 1778 | * gibberish by doing partial reads while a list was changing. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1779 | */ |
Tejun Heo | 2da8ca8 | 2013-12-05 12:28:04 -0500 | [diff] [blame] | 1780 | static int cpuset_common_seq_show(struct seq_file *sf, void *v) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1781 | { |
Tejun Heo | 2da8ca8 | 2013-12-05 12:28:04 -0500 | [diff] [blame] | 1782 | struct cpuset *cs = css_cs(seq_css(sf)); |
| 1783 | cpuset_filetype_t type = seq_cft(sf)->private; |
Tejun Heo | 51ffe41 | 2013-12-05 12:28:02 -0500 | [diff] [blame] | 1784 | int ret = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1785 | |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 1786 | spin_lock_irq(&callback_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1787 | |
| 1788 | switch (type) { |
| 1789 | case FILE_CPULIST: |
Riley Andrews | 266ee74 | 2016-09-06 15:16:25 -0700 | [diff] [blame] | 1790 | seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_requested)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1791 | break; |
| 1792 | case FILE_MEMLIST: |
Tejun Heo | e8e6d97 | 2015-02-13 14:37:23 -0800 | [diff] [blame] | 1793 | seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1794 | break; |
Li Zefan | afd1a8b | 2014-07-09 16:49:25 +0800 | [diff] [blame] | 1795 | case FILE_EFFECTIVE_CPULIST: |
Tejun Heo | e8e6d97 | 2015-02-13 14:37:23 -0800 | [diff] [blame] | 1796 | seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_cpus)); |
Li Zefan | afd1a8b | 2014-07-09 16:49:25 +0800 | [diff] [blame] | 1797 | break; |
| 1798 | case FILE_EFFECTIVE_MEMLIST: |
Tejun Heo | e8e6d97 | 2015-02-13 14:37:23 -0800 | [diff] [blame] | 1799 | seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->effective_mems)); |
Li Zefan | afd1a8b | 2014-07-09 16:49:25 +0800 | [diff] [blame] | 1800 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1801 | default: |
Tejun Heo | 51ffe41 | 2013-12-05 12:28:02 -0500 | [diff] [blame] | 1802 | ret = -EINVAL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1803 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1804 | |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 1805 | spin_unlock_irq(&callback_lock); |
Tejun Heo | 51ffe41 | 2013-12-05 12:28:02 -0500 | [diff] [blame] | 1806 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1807 | } |
| 1808 | |
Tejun Heo | 182446d | 2013-08-08 20:11:24 -0400 | [diff] [blame] | 1809 | static u64 cpuset_read_u64(struct cgroup_subsys_state *css, struct cftype *cft) |
Paul Menage | 700fe1a | 2008-04-29 01:00:00 -0700 | [diff] [blame] | 1810 | { |
Tejun Heo | 182446d | 2013-08-08 20:11:24 -0400 | [diff] [blame] | 1811 | struct cpuset *cs = css_cs(css); |
Paul Menage | 700fe1a | 2008-04-29 01:00:00 -0700 | [diff] [blame] | 1812 | cpuset_filetype_t type = cft->private; |
| 1813 | switch (type) { |
| 1814 | case FILE_CPU_EXCLUSIVE: |
| 1815 | return is_cpu_exclusive(cs); |
| 1816 | case FILE_MEM_EXCLUSIVE: |
| 1817 | return is_mem_exclusive(cs); |
Paul Menage | 7860836 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 1818 | case FILE_MEM_HARDWALL: |
| 1819 | return is_mem_hardwall(cs); |
Paul Menage | 700fe1a | 2008-04-29 01:00:00 -0700 | [diff] [blame] | 1820 | case FILE_SCHED_LOAD_BALANCE: |
| 1821 | return is_sched_load_balance(cs); |
| 1822 | case FILE_MEMORY_MIGRATE: |
| 1823 | return is_memory_migrate(cs); |
| 1824 | case FILE_MEMORY_PRESSURE_ENABLED: |
| 1825 | return cpuset_memory_pressure_enabled; |
| 1826 | case FILE_MEMORY_PRESSURE: |
| 1827 | return fmeter_getrate(&cs->fmeter); |
| 1828 | case FILE_SPREAD_PAGE: |
| 1829 | return is_spread_page(cs); |
| 1830 | case FILE_SPREAD_SLAB: |
| 1831 | return is_spread_slab(cs); |
| 1832 | default: |
| 1833 | BUG(); |
| 1834 | } |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 1835 | |
| 1836 | /* Unreachable but makes gcc happy */ |
| 1837 | return 0; |
Paul Menage | 700fe1a | 2008-04-29 01:00:00 -0700 | [diff] [blame] | 1838 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1839 | |
Tejun Heo | 182446d | 2013-08-08 20:11:24 -0400 | [diff] [blame] | 1840 | static s64 cpuset_read_s64(struct cgroup_subsys_state *css, struct cftype *cft) |
Paul Menage | 5be7a47 | 2008-05-06 20:42:41 -0700 | [diff] [blame] | 1841 | { |
Tejun Heo | 182446d | 2013-08-08 20:11:24 -0400 | [diff] [blame] | 1842 | struct cpuset *cs = css_cs(css); |
Paul Menage | 5be7a47 | 2008-05-06 20:42:41 -0700 | [diff] [blame] | 1843 | cpuset_filetype_t type = cft->private; |
| 1844 | switch (type) { |
| 1845 | case FILE_SCHED_RELAX_DOMAIN_LEVEL: |
| 1846 | return cs->relax_domain_level; |
| 1847 | default: |
| 1848 | BUG(); |
| 1849 | } |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 1850 | |
| 1851 | /* Unrechable but makes gcc happy */ |
| 1852 | return 0; |
Paul Menage | 5be7a47 | 2008-05-06 20:42:41 -0700 | [diff] [blame] | 1853 | } |
| 1854 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1855 | |
| 1856 | /* |
| 1857 | * for the common functions, 'private' gives the type of file |
| 1858 | */ |
| 1859 | |
Paul Menage | addf2c7 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 1860 | static struct cftype files[] = { |
| 1861 | { |
| 1862 | .name = "cpus", |
Tejun Heo | 2da8ca8 | 2013-12-05 12:28:04 -0500 | [diff] [blame] | 1863 | .seq_show = cpuset_common_seq_show, |
Tejun Heo | 451af50 | 2014-05-13 12:16:21 -0400 | [diff] [blame] | 1864 | .write = cpuset_write_resmask, |
Paul Menage | e371239 | 2008-07-25 01:47:02 -0700 | [diff] [blame] | 1865 | .max_write_len = (100U + 6 * NR_CPUS), |
Paul Menage | addf2c7 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 1866 | .private = FILE_CPULIST, |
| 1867 | }, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1868 | |
Paul Menage | addf2c7 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 1869 | { |
| 1870 | .name = "mems", |
Tejun Heo | 2da8ca8 | 2013-12-05 12:28:04 -0500 | [diff] [blame] | 1871 | .seq_show = cpuset_common_seq_show, |
Tejun Heo | 451af50 | 2014-05-13 12:16:21 -0400 | [diff] [blame] | 1872 | .write = cpuset_write_resmask, |
Paul Menage | e371239 | 2008-07-25 01:47:02 -0700 | [diff] [blame] | 1873 | .max_write_len = (100U + 6 * MAX_NUMNODES), |
Paul Menage | addf2c7 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 1874 | .private = FILE_MEMLIST, |
| 1875 | }, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1876 | |
Paul Menage | addf2c7 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 1877 | { |
Li Zefan | afd1a8b | 2014-07-09 16:49:25 +0800 | [diff] [blame] | 1878 | .name = "effective_cpus", |
| 1879 | .seq_show = cpuset_common_seq_show, |
| 1880 | .private = FILE_EFFECTIVE_CPULIST, |
| 1881 | }, |
| 1882 | |
| 1883 | { |
| 1884 | .name = "effective_mems", |
| 1885 | .seq_show = cpuset_common_seq_show, |
| 1886 | .private = FILE_EFFECTIVE_MEMLIST, |
| 1887 | }, |
| 1888 | |
| 1889 | { |
Paul Menage | addf2c7 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 1890 | .name = "cpu_exclusive", |
| 1891 | .read_u64 = cpuset_read_u64, |
| 1892 | .write_u64 = cpuset_write_u64, |
| 1893 | .private = FILE_CPU_EXCLUSIVE, |
| 1894 | }, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1895 | |
Paul Menage | addf2c7 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 1896 | { |
| 1897 | .name = "mem_exclusive", |
| 1898 | .read_u64 = cpuset_read_u64, |
| 1899 | .write_u64 = cpuset_write_u64, |
| 1900 | .private = FILE_MEM_EXCLUSIVE, |
| 1901 | }, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1902 | |
Paul Menage | addf2c7 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 1903 | { |
Paul Menage | 7860836 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 1904 | .name = "mem_hardwall", |
| 1905 | .read_u64 = cpuset_read_u64, |
| 1906 | .write_u64 = cpuset_write_u64, |
| 1907 | .private = FILE_MEM_HARDWALL, |
| 1908 | }, |
| 1909 | |
| 1910 | { |
Paul Menage | addf2c7 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 1911 | .name = "sched_load_balance", |
| 1912 | .read_u64 = cpuset_read_u64, |
| 1913 | .write_u64 = cpuset_write_u64, |
| 1914 | .private = FILE_SCHED_LOAD_BALANCE, |
| 1915 | }, |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 1916 | |
Paul Menage | addf2c7 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 1917 | { |
| 1918 | .name = "sched_relax_domain_level", |
Paul Menage | 5be7a47 | 2008-05-06 20:42:41 -0700 | [diff] [blame] | 1919 | .read_s64 = cpuset_read_s64, |
| 1920 | .write_s64 = cpuset_write_s64, |
Paul Menage | addf2c7 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 1921 | .private = FILE_SCHED_RELAX_DOMAIN_LEVEL, |
| 1922 | }, |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 1923 | |
Paul Menage | addf2c7 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 1924 | { |
| 1925 | .name = "memory_migrate", |
| 1926 | .read_u64 = cpuset_read_u64, |
| 1927 | .write_u64 = cpuset_write_u64, |
| 1928 | .private = FILE_MEMORY_MIGRATE, |
| 1929 | }, |
| 1930 | |
| 1931 | { |
| 1932 | .name = "memory_pressure", |
| 1933 | .read_u64 = cpuset_read_u64, |
Waiman Long | 309e4db | 2017-08-24 12:04:29 -0400 | [diff] [blame] | 1934 | .private = FILE_MEMORY_PRESSURE, |
Paul Menage | addf2c7 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 1935 | }, |
| 1936 | |
| 1937 | { |
| 1938 | .name = "memory_spread_page", |
| 1939 | .read_u64 = cpuset_read_u64, |
| 1940 | .write_u64 = cpuset_write_u64, |
| 1941 | .private = FILE_SPREAD_PAGE, |
| 1942 | }, |
| 1943 | |
| 1944 | { |
| 1945 | .name = "memory_spread_slab", |
| 1946 | .read_u64 = cpuset_read_u64, |
| 1947 | .write_u64 = cpuset_write_u64, |
| 1948 | .private = FILE_SPREAD_SLAB, |
| 1949 | }, |
Tejun Heo | 4baf6e3 | 2012-04-01 12:09:55 -0700 | [diff] [blame] | 1950 | |
| 1951 | { |
| 1952 | .name = "memory_pressure_enabled", |
| 1953 | .flags = CFTYPE_ONLY_ON_ROOT, |
| 1954 | .read_u64 = cpuset_read_u64, |
| 1955 | .write_u64 = cpuset_write_u64, |
| 1956 | .private = FILE_MEMORY_PRESSURE_ENABLED, |
| 1957 | }, |
| 1958 | |
| 1959 | { } /* terminate */ |
Paul Jackson | 45b07ef | 2006-01-08 01:00:56 -0800 | [diff] [blame] | 1960 | }; |
| 1961 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1962 | /* |
Tejun Heo | 92fb974 | 2012-11-19 08:13:38 -0800 | [diff] [blame] | 1963 | * cpuset_css_alloc - allocate a cpuset css |
Li Zefan | c9e5fe6 | 2013-06-14 11:18:27 +0800 | [diff] [blame] | 1964 | * cgrp: control group that the new cpuset will be part of |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1965 | */ |
| 1966 | |
Tejun Heo | eb95419 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 1967 | static struct cgroup_subsys_state * |
| 1968 | cpuset_css_alloc(struct cgroup_subsys_state *parent_css) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1969 | { |
Tejun Heo | c8f699b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 1970 | struct cpuset *cs; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1971 | |
Tejun Heo | eb95419 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 1972 | if (!parent_css) |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 1973 | return &top_cpuset.css; |
Tejun Heo | 033fa1c | 2012-11-19 08:13:39 -0800 | [diff] [blame] | 1974 | |
Tejun Heo | c8f699b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 1975 | cs = kzalloc(sizeof(*cs), GFP_KERNEL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1976 | if (!cs) |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 1977 | return ERR_PTR(-ENOMEM); |
Li Zefan | e2b9a3d | 2014-07-09 16:47:03 +0800 | [diff] [blame] | 1978 | if (!alloc_cpumask_var(&cs->cpus_allowed, GFP_KERNEL)) |
| 1979 | goto free_cs; |
Riley Andrews | 266ee74 | 2016-09-06 15:16:25 -0700 | [diff] [blame] | 1980 | if (!alloc_cpumask_var(&cs->cpus_requested, GFP_KERNEL)) |
| 1981 | goto free_allowed; |
Li Zefan | e2b9a3d | 2014-07-09 16:47:03 +0800 | [diff] [blame] | 1982 | if (!alloc_cpumask_var(&cs->effective_cpus, GFP_KERNEL)) |
Riley Andrews | 266ee74 | 2016-09-06 15:16:25 -0700 | [diff] [blame] | 1983 | goto free_requested; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1984 | |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 1985 | set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); |
Li Zefan | 300ed6c | 2009-01-07 18:08:44 -0800 | [diff] [blame] | 1986 | cpumask_clear(cs->cpus_allowed); |
Riley Andrews | 266ee74 | 2016-09-06 15:16:25 -0700 | [diff] [blame] | 1987 | cpumask_clear(cs->cpus_requested); |
Mike Travis | f9a86fc | 2008-04-04 18:11:07 -0700 | [diff] [blame] | 1988 | nodes_clear(cs->mems_allowed); |
Li Zefan | e2b9a3d | 2014-07-09 16:47:03 +0800 | [diff] [blame] | 1989 | cpumask_clear(cs->effective_cpus); |
| 1990 | nodes_clear(cs->effective_mems); |
Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 1991 | fmeter_init(&cs->fmeter); |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 1992 | cs->relax_domain_level = -1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1993 | |
Tejun Heo | c8f699b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 1994 | return &cs->css; |
Li Zefan | e2b9a3d | 2014-07-09 16:47:03 +0800 | [diff] [blame] | 1995 | |
Riley Andrews | 266ee74 | 2016-09-06 15:16:25 -0700 | [diff] [blame] | 1996 | free_requested: |
| 1997 | free_cpumask_var(cs->cpus_requested); |
| 1998 | free_allowed: |
Li Zefan | e2b9a3d | 2014-07-09 16:47:03 +0800 | [diff] [blame] | 1999 | free_cpumask_var(cs->cpus_allowed); |
| 2000 | free_cs: |
| 2001 | kfree(cs); |
| 2002 | return ERR_PTR(-ENOMEM); |
Tejun Heo | c8f699b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2003 | } |
| 2004 | |
Tejun Heo | eb95419 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 2005 | static int cpuset_css_online(struct cgroup_subsys_state *css) |
Tejun Heo | c8f699b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2006 | { |
Tejun Heo | eb95419 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 2007 | struct cpuset *cs = css_cs(css); |
Tejun Heo | c431069 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2008 | struct cpuset *parent = parent_cs(cs); |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2009 | struct cpuset *tmp_cs; |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 2010 | struct cgroup_subsys_state *pos_css; |
Tejun Heo | c8f699b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2011 | |
| 2012 | if (!parent) |
| 2013 | return 0; |
| 2014 | |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2015 | mutex_lock(&cpuset_mutex); |
| 2016 | |
Tejun Heo | efeb77b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2017 | set_bit(CS_ONLINE, &cs->flags); |
Tejun Heo | c8f699b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2018 | if (is_spread_page(parent)) |
| 2019 | set_bit(CS_SPREAD_PAGE, &cs->flags); |
| 2020 | if (is_spread_slab(parent)) |
| 2021 | set_bit(CS_SPREAD_SLAB, &cs->flags); |
| 2022 | |
Mel Gorman | 664eedd | 2014-06-04 16:10:08 -0700 | [diff] [blame] | 2023 | cpuset_inc(); |
Tejun Heo | 033fa1c | 2012-11-19 08:13:39 -0800 | [diff] [blame] | 2024 | |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 2025 | spin_lock_irq(&callback_lock); |
Tejun Heo | 9e10a13 | 2015-09-18 11:56:28 -0400 | [diff] [blame] | 2026 | if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) { |
Li Zefan | e2b9a3d | 2014-07-09 16:47:03 +0800 | [diff] [blame] | 2027 | cpumask_copy(cs->effective_cpus, parent->effective_cpus); |
| 2028 | cs->effective_mems = parent->effective_mems; |
| 2029 | } |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 2030 | spin_unlock_irq(&callback_lock); |
Li Zefan | e2b9a3d | 2014-07-09 16:47:03 +0800 | [diff] [blame] | 2031 | |
Tejun Heo | eb95419 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 2032 | if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags)) |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2033 | goto out_unlock; |
Tejun Heo | 033fa1c | 2012-11-19 08:13:39 -0800 | [diff] [blame] | 2034 | |
| 2035 | /* |
| 2036 | * Clone @parent's configuration if CGRP_CPUSET_CLONE_CHILDREN is |
| 2037 | * set. This flag handling is implemented in cgroup core for |
| 2038 | * histrical reasons - the flag may be specified during mount. |
| 2039 | * |
| 2040 | * Currently, if any sibling cpusets have exclusive cpus or mem, we |
| 2041 | * refuse to clone the configuration - thereby refusing the task to |
| 2042 | * be entered, and as a result refusing the sys_unshare() or |
| 2043 | * clone() which initiated it. If this becomes a problem for some |
| 2044 | * users who wish to allow that scenario, then this could be |
| 2045 | * changed to grant parent->cpus_allowed-sibling_cpus_exclusive |
| 2046 | * (and likewise for mems) to the new cgroup. |
| 2047 | */ |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2048 | rcu_read_lock(); |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 2049 | cpuset_for_each_child(tmp_cs, pos_css, parent) { |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2050 | if (is_mem_exclusive(tmp_cs) || is_cpu_exclusive(tmp_cs)) { |
| 2051 | rcu_read_unlock(); |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2052 | goto out_unlock; |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2053 | } |
Tejun Heo | 033fa1c | 2012-11-19 08:13:39 -0800 | [diff] [blame] | 2054 | } |
Tejun Heo | ae8086c | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2055 | rcu_read_unlock(); |
Tejun Heo | 033fa1c | 2012-11-19 08:13:39 -0800 | [diff] [blame] | 2056 | |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 2057 | spin_lock_irq(&callback_lock); |
Tejun Heo | 033fa1c | 2012-11-19 08:13:39 -0800 | [diff] [blame] | 2058 | cs->mems_allowed = parent->mems_allowed; |
Zefan Li | 790317e | 2015-02-13 11:19:49 +0800 | [diff] [blame] | 2059 | cs->effective_mems = parent->mems_allowed; |
Tejun Heo | 033fa1c | 2012-11-19 08:13:39 -0800 | [diff] [blame] | 2060 | cpumask_copy(cs->cpus_allowed, parent->cpus_allowed); |
Riley Andrews | 266ee74 | 2016-09-06 15:16:25 -0700 | [diff] [blame] | 2061 | cpumask_copy(cs->cpus_requested, parent->cpus_requested); |
Zefan Li | 790317e | 2015-02-13 11:19:49 +0800 | [diff] [blame] | 2062 | cpumask_copy(cs->effective_cpus, parent->cpus_allowed); |
Dan Carpenter | cea7446 | 2014-10-27 16:27:02 +0300 | [diff] [blame] | 2063 | spin_unlock_irq(&callback_lock); |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2064 | out_unlock: |
| 2065 | mutex_unlock(&cpuset_mutex); |
Tejun Heo | c8f699b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2066 | return 0; |
| 2067 | } |
| 2068 | |
Zhao Hongjiang | 0b9e696 | 2013-07-27 11:56:53 +0800 | [diff] [blame] | 2069 | /* |
| 2070 | * If the cpuset being removed has its flag 'sched_load_balance' |
| 2071 | * enabled, then simulate turning sched_load_balance off, which |
Prateek Sood | e9e7b75 | 2017-09-08 13:10:55 +0530 | [diff] [blame] | 2072 | * will call rebuild_sched_domains_unlocked(). |
Zhao Hongjiang | 0b9e696 | 2013-07-27 11:56:53 +0800 | [diff] [blame] | 2073 | */ |
| 2074 | |
Tejun Heo | eb95419 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 2075 | static void cpuset_css_offline(struct cgroup_subsys_state *css) |
Tejun Heo | c8f699b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2076 | { |
Tejun Heo | eb95419 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 2077 | struct cpuset *cs = css_cs(css); |
Tejun Heo | c8f699b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2078 | |
Prateek Sood | e9e7b75 | 2017-09-08 13:10:55 +0530 | [diff] [blame] | 2079 | get_online_cpus(); |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2080 | mutex_lock(&cpuset_mutex); |
Tejun Heo | c8f699b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2081 | |
| 2082 | if (is_sched_load_balance(cs)) |
| 2083 | update_flag(CS_SCHED_LOAD_BALANCE, cs, 0); |
| 2084 | |
Mel Gorman | 664eedd | 2014-06-04 16:10:08 -0700 | [diff] [blame] | 2085 | cpuset_dec(); |
Tejun Heo | efeb77b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2086 | clear_bit(CS_ONLINE, &cs->flags); |
Tejun Heo | c8f699b | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2087 | |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2088 | mutex_unlock(&cpuset_mutex); |
Prateek Sood | e9e7b75 | 2017-09-08 13:10:55 +0530 | [diff] [blame] | 2089 | put_online_cpus(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2090 | } |
| 2091 | |
Tejun Heo | eb95419 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 2092 | static void cpuset_css_free(struct cgroup_subsys_state *css) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2093 | { |
Tejun Heo | eb95419 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 2094 | struct cpuset *cs = css_cs(css); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2095 | |
Li Zefan | e2b9a3d | 2014-07-09 16:47:03 +0800 | [diff] [blame] | 2096 | free_cpumask_var(cs->effective_cpus); |
Li Zefan | 300ed6c | 2009-01-07 18:08:44 -0800 | [diff] [blame] | 2097 | free_cpumask_var(cs->cpus_allowed); |
Riley Andrews | 266ee74 | 2016-09-06 15:16:25 -0700 | [diff] [blame] | 2098 | free_cpumask_var(cs->cpus_requested); |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 2099 | kfree(cs); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2100 | } |
| 2101 | |
Li Zefan | 39bd0d1 | 2014-07-09 16:48:01 +0800 | [diff] [blame] | 2102 | static void cpuset_bind(struct cgroup_subsys_state *root_css) |
| 2103 | { |
| 2104 | mutex_lock(&cpuset_mutex); |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 2105 | spin_lock_irq(&callback_lock); |
Li Zefan | 39bd0d1 | 2014-07-09 16:48:01 +0800 | [diff] [blame] | 2106 | |
Tejun Heo | 9e10a13 | 2015-09-18 11:56:28 -0400 | [diff] [blame] | 2107 | if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) { |
Li Zefan | 39bd0d1 | 2014-07-09 16:48:01 +0800 | [diff] [blame] | 2108 | cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask); |
| 2109 | top_cpuset.mems_allowed = node_possible_map; |
| 2110 | } else { |
| 2111 | cpumask_copy(top_cpuset.cpus_allowed, |
| 2112 | top_cpuset.effective_cpus); |
| 2113 | top_cpuset.mems_allowed = top_cpuset.effective_mems; |
| 2114 | } |
| 2115 | |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 2116 | spin_unlock_irq(&callback_lock); |
Li Zefan | 39bd0d1 | 2014-07-09 16:48:01 +0800 | [diff] [blame] | 2117 | mutex_unlock(&cpuset_mutex); |
| 2118 | } |
| 2119 | |
Zefan Li | 06f4e948 | 2016-08-09 11:25:01 +0800 | [diff] [blame] | 2120 | /* |
| 2121 | * Make sure the new task conform to the current state of its parent, |
| 2122 | * which could have been changed by cpuset just after it inherits the |
| 2123 | * state from the parent and before it sits on the cgroup's task list. |
| 2124 | */ |
Wei Yongjun | 8a15b81 | 2016-09-16 13:02:37 +0000 | [diff] [blame] | 2125 | static void cpuset_fork(struct task_struct *task) |
Zefan Li | 06f4e948 | 2016-08-09 11:25:01 +0800 | [diff] [blame] | 2126 | { |
| 2127 | if (task_css_is_root(task, cpuset_cgrp_id)) |
| 2128 | return; |
| 2129 | |
| 2130 | set_cpus_allowed_ptr(task, ¤t->cpus_allowed); |
| 2131 | task->mems_allowed = current->mems_allowed; |
| 2132 | } |
| 2133 | |
Riley Andrews | 84c517b | 2015-06-05 18:59:29 -0700 | [diff] [blame] | 2134 | static int cpuset_allow_attach(struct cgroup_taskset *tset) |
| 2135 | { |
| 2136 | const struct cred *cred = current_cred(), *tcred; |
| 2137 | struct task_struct *task; |
| 2138 | struct cgroup_subsys_state *css; |
| 2139 | |
| 2140 | cgroup_taskset_for_each(task, css, tset) { |
| 2141 | tcred = __task_cred(task); |
| 2142 | |
| 2143 | if ((current != task) && !capable(CAP_SYS_ADMIN) && |
| 2144 | cred->euid.val != tcred->uid.val && cred->euid.val != tcred->suid.val) |
| 2145 | return -EACCES; |
| 2146 | } |
| 2147 | |
| 2148 | return 0; |
| 2149 | } |
| 2150 | |
Tejun Heo | 073219e | 2014-02-08 10:36:58 -0500 | [diff] [blame] | 2151 | struct cgroup_subsys cpuset_cgrp_subsys = { |
Li Zefan | 39bd0d1 | 2014-07-09 16:48:01 +0800 | [diff] [blame] | 2152 | .css_alloc = cpuset_css_alloc, |
| 2153 | .css_online = cpuset_css_online, |
| 2154 | .css_offline = cpuset_css_offline, |
| 2155 | .css_free = cpuset_css_free, |
| 2156 | .can_attach = cpuset_can_attach, |
Riley Andrews | 84c517b | 2015-06-05 18:59:29 -0700 | [diff] [blame] | 2157 | .allow_attach = cpuset_allow_attach, |
Li Zefan | 39bd0d1 | 2014-07-09 16:48:01 +0800 | [diff] [blame] | 2158 | .cancel_attach = cpuset_cancel_attach, |
| 2159 | .attach = cpuset_attach, |
Tejun Heo | 5cf1cac | 2016-04-21 19:06:48 -0400 | [diff] [blame] | 2160 | .post_attach = cpuset_post_attach, |
Li Zefan | 39bd0d1 | 2014-07-09 16:48:01 +0800 | [diff] [blame] | 2161 | .bind = cpuset_bind, |
Zefan Li | 06f4e948 | 2016-08-09 11:25:01 +0800 | [diff] [blame] | 2162 | .fork = cpuset_fork, |
Tejun Heo | 5577964 | 2014-07-15 11:05:09 -0400 | [diff] [blame] | 2163 | .legacy_cftypes = files, |
Tejun Heo | b38e42e | 2016-02-23 10:00:50 -0500 | [diff] [blame] | 2164 | .early_init = true, |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 2165 | }; |
| 2166 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2167 | /** |
| 2168 | * cpuset_init - initialize cpusets at system boot |
| 2169 | * |
| 2170 | * Description: Initialize top_cpuset and the cpuset internal file system, |
| 2171 | **/ |
| 2172 | |
| 2173 | int __init cpuset_init(void) |
| 2174 | { |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 2175 | int err = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2176 | |
Miao Xie | 58568d2 | 2009-06-16 15:31:49 -0700 | [diff] [blame] | 2177 | if (!alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL)) |
| 2178 | BUG(); |
Li Zefan | e2b9a3d | 2014-07-09 16:47:03 +0800 | [diff] [blame] | 2179 | if (!alloc_cpumask_var(&top_cpuset.effective_cpus, GFP_KERNEL)) |
| 2180 | BUG(); |
Riley Andrews | 266ee74 | 2016-09-06 15:16:25 -0700 | [diff] [blame] | 2181 | if (!alloc_cpumask_var(&top_cpuset.cpus_requested, GFP_KERNEL)) |
| 2182 | BUG(); |
Miao Xie | 58568d2 | 2009-06-16 15:31:49 -0700 | [diff] [blame] | 2183 | |
Li Zefan | 300ed6c | 2009-01-07 18:08:44 -0800 | [diff] [blame] | 2184 | cpumask_setall(top_cpuset.cpus_allowed); |
Riley Andrews | 266ee74 | 2016-09-06 15:16:25 -0700 | [diff] [blame] | 2185 | cpumask_setall(top_cpuset.cpus_requested); |
Mike Travis | f9a86fc | 2008-04-04 18:11:07 -0700 | [diff] [blame] | 2186 | nodes_setall(top_cpuset.mems_allowed); |
Li Zefan | e2b9a3d | 2014-07-09 16:47:03 +0800 | [diff] [blame] | 2187 | cpumask_setall(top_cpuset.effective_cpus); |
| 2188 | nodes_setall(top_cpuset.effective_mems); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2189 | |
Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 2190 | fmeter_init(&top_cpuset.fmeter); |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 2191 | set_bit(CS_SCHED_LOAD_BALANCE, &top_cpuset.flags); |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 2192 | top_cpuset.relax_domain_level = -1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2193 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2194 | err = register_filesystem(&cpuset_fs_type); |
| 2195 | if (err < 0) |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 2196 | return err; |
| 2197 | |
Li Zefan | 2341d1b | 2009-01-07 18:08:42 -0800 | [diff] [blame] | 2198 | if (!alloc_cpumask_var(&cpus_attach, GFP_KERNEL)) |
| 2199 | BUG(); |
| 2200 | |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 2201 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2202 | } |
| 2203 | |
Paul Jackson | b1aac8b | 2006-09-29 02:01:17 -0700 | [diff] [blame] | 2204 | /* |
Max Krasnyansky | cf41714 | 2008-08-11 14:33:53 -0700 | [diff] [blame] | 2205 | * If CPU and/or memory hotplug handlers, below, unplug any CPUs |
Paul Jackson | b1aac8b | 2006-09-29 02:01:17 -0700 | [diff] [blame] | 2206 | * or memory nodes, we need to walk over the cpuset hierarchy, |
| 2207 | * removing that CPU or node from all cpusets. If this removes the |
Cliff Wickman | 956db3c | 2008-02-07 00:14:43 -0800 | [diff] [blame] | 2208 | * last CPU or node from a cpuset, then move the tasks in the empty |
| 2209 | * cpuset to its next-highest non-empty parent. |
Paul Jackson | b1aac8b | 2006-09-29 02:01:17 -0700 | [diff] [blame] | 2210 | */ |
Cliff Wickman | 956db3c | 2008-02-07 00:14:43 -0800 | [diff] [blame] | 2211 | static void remove_tasks_in_empty_cpuset(struct cpuset *cs) |
Paul Jackson | b1aac8b | 2006-09-29 02:01:17 -0700 | [diff] [blame] | 2212 | { |
Cliff Wickman | 956db3c | 2008-02-07 00:14:43 -0800 | [diff] [blame] | 2213 | struct cpuset *parent; |
Paul Jackson | b1aac8b | 2006-09-29 02:01:17 -0700 | [diff] [blame] | 2214 | |
Paul Jackson | c8d9c90 | 2008-02-07 00:14:46 -0800 | [diff] [blame] | 2215 | /* |
Cliff Wickman | 956db3c | 2008-02-07 00:14:43 -0800 | [diff] [blame] | 2216 | * Find its next-highest non-empty parent, (top cpuset |
| 2217 | * has online cpus, so can't be empty). |
| 2218 | */ |
Tejun Heo | c431069 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2219 | parent = parent_cs(cs); |
Li Zefan | 300ed6c | 2009-01-07 18:08:44 -0800 | [diff] [blame] | 2220 | while (cpumask_empty(parent->cpus_allowed) || |
Paul Jackson | b450129 | 2008-02-07 00:14:47 -0800 | [diff] [blame] | 2221 | nodes_empty(parent->mems_allowed)) |
Tejun Heo | c431069 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2222 | parent = parent_cs(parent); |
Cliff Wickman | 956db3c | 2008-02-07 00:14:43 -0800 | [diff] [blame] | 2223 | |
Tejun Heo | 8cc9934 | 2013-04-07 09:29:50 -0700 | [diff] [blame] | 2224 | if (cgroup_transfer_tasks(parent->css.cgroup, cs->css.cgroup)) { |
Fabian Frederick | 12d3089 | 2014-05-05 19:49:00 +0200 | [diff] [blame] | 2225 | pr_err("cpuset: failed to transfer tasks out of empty cpuset "); |
Tejun Heo | e61734c | 2014-02-12 09:29:50 -0500 | [diff] [blame] | 2226 | pr_cont_cgroup_name(cs->css.cgroup); |
| 2227 | pr_cont("\n"); |
Tejun Heo | 8cc9934 | 2013-04-07 09:29:50 -0700 | [diff] [blame] | 2228 | } |
Cliff Wickman | 956db3c | 2008-02-07 00:14:43 -0800 | [diff] [blame] | 2229 | } |
| 2230 | |
Li Zefan | be4c9dd | 2014-07-09 16:49:04 +0800 | [diff] [blame] | 2231 | static void |
| 2232 | hotplug_update_tasks_legacy(struct cpuset *cs, |
| 2233 | struct cpumask *new_cpus, nodemask_t *new_mems, |
| 2234 | bool cpus_updated, bool mems_updated) |
Li Zefan | 390a36a | 2014-07-09 16:48:54 +0800 | [diff] [blame] | 2235 | { |
| 2236 | bool is_empty; |
| 2237 | |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 2238 | spin_lock_irq(&callback_lock); |
Li Zefan | be4c9dd | 2014-07-09 16:49:04 +0800 | [diff] [blame] | 2239 | cpumask_copy(cs->cpus_allowed, new_cpus); |
| 2240 | cpumask_copy(cs->effective_cpus, new_cpus); |
| 2241 | cs->mems_allowed = *new_mems; |
| 2242 | cs->effective_mems = *new_mems; |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 2243 | spin_unlock_irq(&callback_lock); |
Li Zefan | 390a36a | 2014-07-09 16:48:54 +0800 | [diff] [blame] | 2244 | |
| 2245 | /* |
| 2246 | * Don't call update_tasks_cpumask() if the cpuset becomes empty, |
| 2247 | * as the tasks will be migratecd to an ancestor. |
| 2248 | */ |
Li Zefan | be4c9dd | 2014-07-09 16:49:04 +0800 | [diff] [blame] | 2249 | if (cpus_updated && !cpumask_empty(cs->cpus_allowed)) |
Li Zefan | 390a36a | 2014-07-09 16:48:54 +0800 | [diff] [blame] | 2250 | update_tasks_cpumask(cs); |
Li Zefan | be4c9dd | 2014-07-09 16:49:04 +0800 | [diff] [blame] | 2251 | if (mems_updated && !nodes_empty(cs->mems_allowed)) |
Li Zefan | 390a36a | 2014-07-09 16:48:54 +0800 | [diff] [blame] | 2252 | update_tasks_nodemask(cs); |
| 2253 | |
| 2254 | is_empty = cpumask_empty(cs->cpus_allowed) || |
| 2255 | nodes_empty(cs->mems_allowed); |
| 2256 | |
| 2257 | mutex_unlock(&cpuset_mutex); |
| 2258 | |
| 2259 | /* |
| 2260 | * Move tasks to the nearest ancestor with execution resources, |
| 2261 | * This is full cgroup operation which will also call back into |
| 2262 | * cpuset. Should be done outside any lock. |
| 2263 | */ |
| 2264 | if (is_empty) |
| 2265 | remove_tasks_in_empty_cpuset(cs); |
| 2266 | |
| 2267 | mutex_lock(&cpuset_mutex); |
| 2268 | } |
| 2269 | |
Li Zefan | be4c9dd | 2014-07-09 16:49:04 +0800 | [diff] [blame] | 2270 | static void |
| 2271 | hotplug_update_tasks(struct cpuset *cs, |
| 2272 | struct cpumask *new_cpus, nodemask_t *new_mems, |
| 2273 | bool cpus_updated, bool mems_updated) |
Li Zefan | 390a36a | 2014-07-09 16:48:54 +0800 | [diff] [blame] | 2274 | { |
Li Zefan | be4c9dd | 2014-07-09 16:49:04 +0800 | [diff] [blame] | 2275 | if (cpumask_empty(new_cpus)) |
| 2276 | cpumask_copy(new_cpus, parent_cs(cs)->effective_cpus); |
| 2277 | if (nodes_empty(*new_mems)) |
| 2278 | *new_mems = parent_cs(cs)->effective_mems; |
Li Zefan | 390a36a | 2014-07-09 16:48:54 +0800 | [diff] [blame] | 2279 | |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 2280 | spin_lock_irq(&callback_lock); |
Li Zefan | be4c9dd | 2014-07-09 16:49:04 +0800 | [diff] [blame] | 2281 | cpumask_copy(cs->effective_cpus, new_cpus); |
| 2282 | cs->effective_mems = *new_mems; |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 2283 | spin_unlock_irq(&callback_lock); |
Li Zefan | 390a36a | 2014-07-09 16:48:54 +0800 | [diff] [blame] | 2284 | |
Li Zefan | be4c9dd | 2014-07-09 16:49:04 +0800 | [diff] [blame] | 2285 | if (cpus_updated) |
Li Zefan | 390a36a | 2014-07-09 16:48:54 +0800 | [diff] [blame] | 2286 | update_tasks_cpumask(cs); |
Li Zefan | be4c9dd | 2014-07-09 16:49:04 +0800 | [diff] [blame] | 2287 | if (mems_updated) |
Li Zefan | 390a36a | 2014-07-09 16:48:54 +0800 | [diff] [blame] | 2288 | update_tasks_nodemask(cs); |
| 2289 | } |
| 2290 | |
Tejun Heo | deb7aa3 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2291 | /** |
Li Zefan | 388afd8 | 2013-06-09 17:14:47 +0800 | [diff] [blame] | 2292 | * cpuset_hotplug_update_tasks - update tasks in a cpuset for hotunplug |
Tejun Heo | deb7aa3 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2293 | * @cs: cpuset in interest |
Cliff Wickman | 956db3c | 2008-02-07 00:14:43 -0800 | [diff] [blame] | 2294 | * |
Tejun Heo | deb7aa3 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2295 | * Compare @cs's cpu and mem masks against top_cpuset and if some have gone |
| 2296 | * offline, update @cs accordingly. If @cs ends up with no CPU or memory, |
| 2297 | * all its tasks are moved to the nearest ancestor with both resources. |
Paul Jackson | b1aac8b | 2006-09-29 02:01:17 -0700 | [diff] [blame] | 2298 | */ |
Li Zefan | 388afd8 | 2013-06-09 17:14:47 +0800 | [diff] [blame] | 2299 | static void cpuset_hotplug_update_tasks(struct cpuset *cs) |
Paul Jackson | b1aac8b | 2006-09-29 02:01:17 -0700 | [diff] [blame] | 2300 | { |
Li Zefan | be4c9dd | 2014-07-09 16:49:04 +0800 | [diff] [blame] | 2301 | static cpumask_t new_cpus; |
| 2302 | static nodemask_t new_mems; |
| 2303 | bool cpus_updated; |
| 2304 | bool mems_updated; |
Li Zefan | e44193d | 2013-06-09 17:14:22 +0800 | [diff] [blame] | 2305 | retry: |
| 2306 | wait_event(cpuset_attach_wq, cs->attach_in_progress == 0); |
Paul Jackson | b1aac8b | 2006-09-29 02:01:17 -0700 | [diff] [blame] | 2307 | |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2308 | mutex_lock(&cpuset_mutex); |
Paul Jackson | b1aac8b | 2006-09-29 02:01:17 -0700 | [diff] [blame] | 2309 | |
Li Zefan | e44193d | 2013-06-09 17:14:22 +0800 | [diff] [blame] | 2310 | /* |
| 2311 | * We have raced with task attaching. We wait until attaching |
| 2312 | * is finished, so we won't attach a task to an empty cpuset. |
| 2313 | */ |
| 2314 | if (cs->attach_in_progress) { |
| 2315 | mutex_unlock(&cpuset_mutex); |
| 2316 | goto retry; |
| 2317 | } |
| 2318 | |
Riley Andrews | 266ee74 | 2016-09-06 15:16:25 -0700 | [diff] [blame] | 2319 | cpumask_and(&new_cpus, cs->cpus_requested, parent_cs(cs)->effective_cpus); |
Li Zefan | be4c9dd | 2014-07-09 16:49:04 +0800 | [diff] [blame] | 2320 | nodes_and(new_mems, cs->mems_allowed, parent_cs(cs)->effective_mems); |
Paul Jackson | b450129 | 2008-02-07 00:14:47 -0800 | [diff] [blame] | 2321 | |
Li Zefan | be4c9dd | 2014-07-09 16:49:04 +0800 | [diff] [blame] | 2322 | cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus); |
| 2323 | mems_updated = !nodes_equal(new_mems, cs->effective_mems); |
Cliff Wickman | 956db3c | 2008-02-07 00:14:43 -0800 | [diff] [blame] | 2324 | |
Tejun Heo | 9e10a13 | 2015-09-18 11:56:28 -0400 | [diff] [blame] | 2325 | if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) |
Li Zefan | be4c9dd | 2014-07-09 16:49:04 +0800 | [diff] [blame] | 2326 | hotplug_update_tasks(cs, &new_cpus, &new_mems, |
| 2327 | cpus_updated, mems_updated); |
Li Zefan | 390a36a | 2014-07-09 16:48:54 +0800 | [diff] [blame] | 2328 | else |
Li Zefan | be4c9dd | 2014-07-09 16:49:04 +0800 | [diff] [blame] | 2329 | hotplug_update_tasks_legacy(cs, &new_cpus, &new_mems, |
| 2330 | cpus_updated, mems_updated); |
Tejun Heo | 8d03394 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2331 | |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2332 | mutex_unlock(&cpuset_mutex); |
Tejun Heo | deb7aa3 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2333 | } |
Paul Jackson | b450129 | 2008-02-07 00:14:47 -0800 | [diff] [blame] | 2334 | |
Peter Zijlstra | ba15518 | 2017-09-07 11:13:38 +0200 | [diff] [blame] | 2335 | static bool force_rebuild; |
| 2336 | |
| 2337 | void cpuset_force_rebuild(void) |
| 2338 | { |
| 2339 | force_rebuild = true; |
| 2340 | } |
| 2341 | |
Tejun Heo | deb7aa3 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2342 | /** |
Tejun Heo | 3a5a6d0 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2343 | * cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset |
Tejun Heo | deb7aa3 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2344 | * |
| 2345 | * This function is called after either CPU or memory configuration has |
| 2346 | * changed and updates cpuset accordingly. The top_cpuset is always |
| 2347 | * synchronized to cpu_active_mask and N_MEMORY, which is necessary in |
| 2348 | * order to make cpusets transparent (of no affect) on systems that are |
| 2349 | * actively using CPU hotplug but making no active use of cpusets. |
| 2350 | * |
| 2351 | * Non-root cpusets are only affected by offlining. If any CPUs or memory |
Li Zefan | 388afd8 | 2013-06-09 17:14:47 +0800 | [diff] [blame] | 2352 | * nodes have been taken down, cpuset_hotplug_update_tasks() is invoked on |
| 2353 | * all descendants. |
Tejun Heo | deb7aa3 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2354 | * |
| 2355 | * Note that CPU offlining during suspend is ignored. We don't modify |
| 2356 | * cpusets across suspend/resume cycles at all. |
| 2357 | */ |
Tejun Heo | 3a5a6d0 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2358 | static void cpuset_hotplug_workfn(struct work_struct *work) |
Tejun Heo | deb7aa3 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2359 | { |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 2360 | static cpumask_t new_cpus; |
| 2361 | static nodemask_t new_mems; |
Tejun Heo | deb7aa3 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2362 | bool cpus_updated, mems_updated; |
Tejun Heo | 9e10a13 | 2015-09-18 11:56:28 -0400 | [diff] [blame] | 2363 | bool on_dfl = cgroup_subsys_on_dfl(cpuset_cgrp_subsys); |
Cliff Wickman | 956db3c | 2008-02-07 00:14:43 -0800 | [diff] [blame] | 2364 | |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2365 | mutex_lock(&cpuset_mutex); |
Cliff Wickman | 956db3c | 2008-02-07 00:14:43 -0800 | [diff] [blame] | 2366 | |
Tejun Heo | deb7aa3 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2367 | /* fetch the available cpus/mems and find out which changed how */ |
| 2368 | cpumask_copy(&new_cpus, cpu_active_mask); |
| 2369 | new_mems = node_states[N_MEMORY]; |
Cliff Wickman | 956db3c | 2008-02-07 00:14:43 -0800 | [diff] [blame] | 2370 | |
Li Zefan | 7e88291 | 2014-07-09 16:48:42 +0800 | [diff] [blame] | 2371 | cpus_updated = !cpumask_equal(top_cpuset.effective_cpus, &new_cpus); |
| 2372 | mems_updated = !nodes_equal(top_cpuset.effective_mems, new_mems); |
Srivatsa S. Bhat | 7ddf96b | 2012-05-24 19:46:55 +0530 | [diff] [blame] | 2373 | |
Tejun Heo | deb7aa3 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2374 | /* synchronize cpus_allowed to cpu_active_mask */ |
| 2375 | if (cpus_updated) { |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 2376 | spin_lock_irq(&callback_lock); |
Li Zefan | 7e88291 | 2014-07-09 16:48:42 +0800 | [diff] [blame] | 2377 | if (!on_dfl) |
| 2378 | cpumask_copy(top_cpuset.cpus_allowed, &new_cpus); |
Li Zefan | 1344ab9 | 2014-07-09 16:47:16 +0800 | [diff] [blame] | 2379 | cpumask_copy(top_cpuset.effective_cpus, &new_cpus); |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 2380 | spin_unlock_irq(&callback_lock); |
Tejun Heo | deb7aa3 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2381 | /* we don't mess with cpumasks of tasks in top_cpuset */ |
| 2382 | } |
Srivatsa S. Bhat | 7ddf96b | 2012-05-24 19:46:55 +0530 | [diff] [blame] | 2383 | |
Tejun Heo | deb7aa3 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2384 | /* synchronize mems_allowed to N_MEMORY */ |
| 2385 | if (mems_updated) { |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 2386 | spin_lock_irq(&callback_lock); |
Li Zefan | 7e88291 | 2014-07-09 16:48:42 +0800 | [diff] [blame] | 2387 | if (!on_dfl) |
| 2388 | top_cpuset.mems_allowed = new_mems; |
Li Zefan | 1344ab9 | 2014-07-09 16:47:16 +0800 | [diff] [blame] | 2389 | top_cpuset.effective_mems = new_mems; |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 2390 | spin_unlock_irq(&callback_lock); |
Tejun Heo | d66393e | 2014-02-13 06:58:40 -0500 | [diff] [blame] | 2391 | update_tasks_nodemask(&top_cpuset); |
Tejun Heo | deb7aa3 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2392 | } |
| 2393 | |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2394 | mutex_unlock(&cpuset_mutex); |
Tejun Heo | deb7aa3 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2395 | |
Li Zefan | 5c5cc62 | 2013-06-09 17:16:29 +0800 | [diff] [blame] | 2396 | /* if cpus or mems changed, we need to propagate to descendants */ |
| 2397 | if (cpus_updated || mems_updated) { |
Lai Jiangshan | 38d7bee | 2012-12-12 13:51:24 -0800 | [diff] [blame] | 2398 | struct cpuset *cs; |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 2399 | struct cgroup_subsys_state *pos_css; |
Paul Jackson | b450129 | 2008-02-07 00:14:47 -0800 | [diff] [blame] | 2400 | |
Paul Jackson | b450129 | 2008-02-07 00:14:47 -0800 | [diff] [blame] | 2401 | rcu_read_lock(); |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 2402 | cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { |
Tejun Heo | ec903c0 | 2014-05-13 12:11:01 -0400 | [diff] [blame] | 2403 | if (cs == &top_cpuset || !css_tryget_online(&cs->css)) |
Li Zefan | 388afd8 | 2013-06-09 17:14:47 +0800 | [diff] [blame] | 2404 | continue; |
| 2405 | rcu_read_unlock(); |
| 2406 | |
| 2407 | cpuset_hotplug_update_tasks(cs); |
| 2408 | |
| 2409 | rcu_read_lock(); |
| 2410 | css_put(&cs->css); |
| 2411 | } |
Tejun Heo | deb7aa3 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2412 | rcu_read_unlock(); |
| 2413 | } |
Tejun Heo | 8d03394 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2414 | |
Tejun Heo | deb7aa3 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2415 | /* rebuild sched domains if cpus_allowed has changed */ |
Peter Zijlstra | ba15518 | 2017-09-07 11:13:38 +0200 | [diff] [blame] | 2416 | if (cpus_updated || force_rebuild) { |
| 2417 | force_rebuild = false; |
Li Zhong | e0e80a0 | 2013-04-27 06:52:43 -0700 | [diff] [blame] | 2418 | rebuild_sched_domains(); |
Peter Zijlstra | ba15518 | 2017-09-07 11:13:38 +0200 | [diff] [blame] | 2419 | } |
Paul Jackson | b1aac8b | 2006-09-29 02:01:17 -0700 | [diff] [blame] | 2420 | } |
| 2421 | |
Srivatsa S. Bhat | 7ddf96b | 2012-05-24 19:46:55 +0530 | [diff] [blame] | 2422 | void cpuset_update_active_cpus(bool cpu_online) |
Paul Jackson | 4c4d50f | 2006-08-27 01:23:51 -0700 | [diff] [blame] | 2423 | { |
Tejun Heo | 3a5a6d0 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2424 | /* |
| 2425 | * We're inside cpu hotplug critical region which usually nests |
| 2426 | * inside cgroup synchronization. Bounce actual hotplug processing |
| 2427 | * to a work item to avoid reverse locking order. |
| 2428 | * |
| 2429 | * We still need to do partition_sched_domains() synchronously; |
| 2430 | * otherwise, the scheduler will get confused and put tasks to the |
| 2431 | * dead CPU. Fall back to the default single domain. |
| 2432 | * cpuset_hotplug_workfn() will rebuild it as necessary. |
| 2433 | */ |
| 2434 | partition_sched_domains(1, NULL, NULL); |
| 2435 | schedule_work(&cpuset_hotplug_work); |
Paul Jackson | 4c4d50f | 2006-08-27 01:23:51 -0700 | [diff] [blame] | 2436 | } |
Paul Jackson | 4c4d50f | 2006-08-27 01:23:51 -0700 | [diff] [blame] | 2437 | |
Peter Zijlstra | ba15518 | 2017-09-07 11:13:38 +0200 | [diff] [blame] | 2438 | void cpuset_wait_for_hotplug(void) |
| 2439 | { |
| 2440 | flush_work(&cpuset_hotplug_work); |
| 2441 | } |
| 2442 | |
Paul Jackson | 38837fc | 2006-09-29 02:01:16 -0700 | [diff] [blame] | 2443 | /* |
Lai Jiangshan | 38d7bee | 2012-12-12 13:51:24 -0800 | [diff] [blame] | 2444 | * Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY]. |
| 2445 | * Call this routine anytime after node_states[N_MEMORY] changes. |
Srivatsa S. Bhat | a1cd2b1 | 2012-05-24 19:47:03 +0530 | [diff] [blame] | 2446 | * See cpuset_update_active_cpus() for CPU hotplug handling. |
Paul Jackson | 38837fc | 2006-09-29 02:01:16 -0700 | [diff] [blame] | 2447 | */ |
Miao Xie | f481891 | 2008-11-19 15:36:30 -0800 | [diff] [blame] | 2448 | static int cpuset_track_online_nodes(struct notifier_block *self, |
| 2449 | unsigned long action, void *arg) |
Paul Jackson | 38837fc | 2006-09-29 02:01:16 -0700 | [diff] [blame] | 2450 | { |
Tejun Heo | 3a5a6d0 | 2013-01-07 08:51:07 -0800 | [diff] [blame] | 2451 | schedule_work(&cpuset_hotplug_work); |
Miao Xie | f481891 | 2008-11-19 15:36:30 -0800 | [diff] [blame] | 2452 | return NOTIFY_OK; |
Paul Jackson | 38837fc | 2006-09-29 02:01:16 -0700 | [diff] [blame] | 2453 | } |
Andrew Morton | d8f10cb | 2013-04-29 15:08:08 -0700 | [diff] [blame] | 2454 | |
| 2455 | static struct notifier_block cpuset_track_online_nodes_nb = { |
| 2456 | .notifier_call = cpuset_track_online_nodes, |
| 2457 | .priority = 10, /* ??! */ |
| 2458 | }; |
Paul Jackson | 38837fc | 2006-09-29 02:01:16 -0700 | [diff] [blame] | 2459 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2460 | /** |
| 2461 | * cpuset_init_smp - initialize cpus_allowed |
| 2462 | * |
| 2463 | * Description: Finish top cpuset after cpu, node maps are initialized |
Andrew Morton | d8f10cb | 2013-04-29 15:08:08 -0700 | [diff] [blame] | 2464 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2465 | void __init cpuset_init_smp(void) |
| 2466 | { |
Peter Zijlstra | 6ad4c18 | 2009-11-25 13:31:39 +0100 | [diff] [blame] | 2467 | cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask); |
Lai Jiangshan | 38d7bee | 2012-12-12 13:51:24 -0800 | [diff] [blame] | 2468 | top_cpuset.mems_allowed = node_states[N_MEMORY]; |
Li Zefan | 33ad801 | 2013-06-09 17:15:08 +0800 | [diff] [blame] | 2469 | top_cpuset.old_mems_allowed = top_cpuset.mems_allowed; |
Paul Jackson | 4c4d50f | 2006-08-27 01:23:51 -0700 | [diff] [blame] | 2470 | |
Li Zefan | e2b9a3d | 2014-07-09 16:47:03 +0800 | [diff] [blame] | 2471 | cpumask_copy(top_cpuset.effective_cpus, cpu_active_mask); |
| 2472 | top_cpuset.effective_mems = node_states[N_MEMORY]; |
| 2473 | |
Andrew Morton | d8f10cb | 2013-04-29 15:08:08 -0700 | [diff] [blame] | 2474 | register_hotmemory_notifier(&cpuset_track_online_nodes_nb); |
Tejun Heo | e93ad19 | 2016-01-19 12:18:41 -0500 | [diff] [blame] | 2475 | |
| 2476 | cpuset_migrate_mm_wq = alloc_ordered_workqueue("cpuset_migrate_mm", 0); |
| 2477 | BUG_ON(!cpuset_migrate_mm_wq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2478 | } |
| 2479 | |
| 2480 | /** |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2481 | * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset. |
| 2482 | * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed. |
Li Zefan | 6af866a | 2009-01-07 18:08:45 -0800 | [diff] [blame] | 2483 | * @pmask: pointer to struct cpumask variable to receive cpus_allowed set. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2484 | * |
Li Zefan | 300ed6c | 2009-01-07 18:08:44 -0800 | [diff] [blame] | 2485 | * Description: Returns the cpumask_var_t cpus_allowed of the cpuset |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2486 | * attached to the specified @tsk. Guaranteed to return some non-empty |
Rusty Russell | 5f054e3 | 2012-03-29 15:38:31 +1030 | [diff] [blame] | 2487 | * subset of cpu_online_mask, even if this means going outside the |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2488 | * tasks cpuset. |
| 2489 | **/ |
| 2490 | |
Li Zefan | 6af866a | 2009-01-07 18:08:45 -0800 | [diff] [blame] | 2491 | void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2492 | { |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 2493 | unsigned long flags; |
| 2494 | |
| 2495 | spin_lock_irqsave(&callback_lock, flags); |
Li Zefan | b8dadcb | 2014-03-03 17:28:36 -0500 | [diff] [blame] | 2496 | rcu_read_lock(); |
Li Zefan | ae1c802 | 2014-07-09 16:48:32 +0800 | [diff] [blame] | 2497 | guarantee_online_cpus(task_cs(tsk), pmask); |
Li Zefan | b8dadcb | 2014-03-03 17:28:36 -0500 | [diff] [blame] | 2498 | rcu_read_unlock(); |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 2499 | spin_unlock_irqrestore(&callback_lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2500 | } |
| 2501 | |
Peter Zijlstra | 2baab4e | 2012-03-20 15:57:01 +0100 | [diff] [blame] | 2502 | void cpuset_cpus_allowed_fallback(struct task_struct *tsk) |
Oleg Nesterov | 9084bb8 | 2010-03-15 10:10:27 +0100 | [diff] [blame] | 2503 | { |
Oleg Nesterov | 9084bb8 | 2010-03-15 10:10:27 +0100 | [diff] [blame] | 2504 | rcu_read_lock(); |
Li Zefan | ae1c802 | 2014-07-09 16:48:32 +0800 | [diff] [blame] | 2505 | do_set_cpus_allowed(tsk, task_cs(tsk)->effective_cpus); |
Oleg Nesterov | 9084bb8 | 2010-03-15 10:10:27 +0100 | [diff] [blame] | 2506 | rcu_read_unlock(); |
| 2507 | |
| 2508 | /* |
| 2509 | * We own tsk->cpus_allowed, nobody can change it under us. |
| 2510 | * |
| 2511 | * But we used cs && cs->cpus_allowed lockless and thus can |
| 2512 | * race with cgroup_attach_task() or update_cpumask() and get |
| 2513 | * the wrong tsk->cpus_allowed. However, both cases imply the |
| 2514 | * subsequent cpuset_change_cpumask()->set_cpus_allowed_ptr() |
| 2515 | * which takes task_rq_lock(). |
| 2516 | * |
| 2517 | * If we are called after it dropped the lock we must see all |
| 2518 | * changes in tsk_cs()->cpus_allowed. Otherwise we can temporary |
| 2519 | * set any mask even if it is not right from task_cs() pov, |
| 2520 | * the pending set_cpus_allowed_ptr() will fix things. |
Peter Zijlstra | 2baab4e | 2012-03-20 15:57:01 +0100 | [diff] [blame] | 2521 | * |
| 2522 | * select_fallback_rq() will fix things ups and set cpu_possible_mask |
| 2523 | * if required. |
Oleg Nesterov | 9084bb8 | 2010-03-15 10:10:27 +0100 | [diff] [blame] | 2524 | */ |
Oleg Nesterov | 9084bb8 | 2010-03-15 10:10:27 +0100 | [diff] [blame] | 2525 | } |
| 2526 | |
Rasmus Villemoes | 8f4ab07 | 2015-02-12 15:00:16 -0800 | [diff] [blame] | 2527 | void __init cpuset_init_current_mems_allowed(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2528 | { |
Mike Travis | f9a86fc | 2008-04-04 18:11:07 -0700 | [diff] [blame] | 2529 | nodes_setall(current->mems_allowed); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2530 | } |
| 2531 | |
Randy Dunlap | d9fd8a6 | 2005-07-27 11:45:11 -0700 | [diff] [blame] | 2532 | /** |
Paul Jackson | 909d75a | 2006-01-08 01:01:55 -0800 | [diff] [blame] | 2533 | * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset. |
| 2534 | * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed. |
| 2535 | * |
| 2536 | * Description: Returns the nodemask_t mems_allowed of the cpuset |
| 2537 | * attached to the specified @tsk. Guaranteed to return some non-empty |
Lai Jiangshan | 38d7bee | 2012-12-12 13:51:24 -0800 | [diff] [blame] | 2538 | * subset of node_states[N_MEMORY], even if this means going outside the |
Paul Jackson | 909d75a | 2006-01-08 01:01:55 -0800 | [diff] [blame] | 2539 | * tasks cpuset. |
| 2540 | **/ |
| 2541 | |
| 2542 | nodemask_t cpuset_mems_allowed(struct task_struct *tsk) |
| 2543 | { |
| 2544 | nodemask_t mask; |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 2545 | unsigned long flags; |
Paul Jackson | 909d75a | 2006-01-08 01:01:55 -0800 | [diff] [blame] | 2546 | |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 2547 | spin_lock_irqsave(&callback_lock, flags); |
Li Zefan | b8dadcb | 2014-03-03 17:28:36 -0500 | [diff] [blame] | 2548 | rcu_read_lock(); |
Li Zefan | ae1c802 | 2014-07-09 16:48:32 +0800 | [diff] [blame] | 2549 | guarantee_online_mems(task_cs(tsk), &mask); |
Li Zefan | b8dadcb | 2014-03-03 17:28:36 -0500 | [diff] [blame] | 2550 | rcu_read_unlock(); |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 2551 | spin_unlock_irqrestore(&callback_lock, flags); |
Paul Jackson | 909d75a | 2006-01-08 01:01:55 -0800 | [diff] [blame] | 2552 | |
| 2553 | return mask; |
| 2554 | } |
| 2555 | |
| 2556 | /** |
Mel Gorman | 19770b3 | 2008-04-28 02:12:18 -0700 | [diff] [blame] | 2557 | * cpuset_nodemask_valid_mems_allowed - check nodemask vs. curremt mems_allowed |
| 2558 | * @nodemask: the nodemask to be checked |
Randy Dunlap | d9fd8a6 | 2005-07-27 11:45:11 -0700 | [diff] [blame] | 2559 | * |
Mel Gorman | 19770b3 | 2008-04-28 02:12:18 -0700 | [diff] [blame] | 2560 | * Are any of the nodes in the nodemask allowed in current->mems_allowed? |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2561 | */ |
Mel Gorman | 19770b3 | 2008-04-28 02:12:18 -0700 | [diff] [blame] | 2562 | int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2563 | { |
Mel Gorman | 19770b3 | 2008-04-28 02:12:18 -0700 | [diff] [blame] | 2564 | return nodes_intersects(*nodemask, current->mems_allowed); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2565 | } |
| 2566 | |
Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 2567 | /* |
Paul Menage | 7860836 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 2568 | * nearest_hardwall_ancestor() - Returns the nearest mem_exclusive or |
| 2569 | * mem_hardwall ancestor to the specified cpuset. Call holding |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 2570 | * callback_lock. If no ancestor is mem_exclusive or mem_hardwall |
Paul Menage | 7860836 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 2571 | * (an unusual configuration), then returns the root cpuset. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2572 | */ |
Tejun Heo | c9710d8 | 2013-08-08 20:11:22 -0400 | [diff] [blame] | 2573 | static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2574 | { |
Tejun Heo | c431069 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2575 | while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && parent_cs(cs)) |
| 2576 | cs = parent_cs(cs); |
Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 2577 | return cs; |
| 2578 | } |
| 2579 | |
| 2580 | /** |
Vladimir Davydov | 344736f | 2014-10-20 15:50:30 +0400 | [diff] [blame] | 2581 | * cpuset_node_allowed - Can we allocate on a memory node? |
David Rientjes | a1bc5a4 | 2009-04-02 16:57:54 -0700 | [diff] [blame] | 2582 | * @node: is this an allowed node? |
Paul Jackson | 02a0e53 | 2006-12-13 00:34:25 -0800 | [diff] [blame] | 2583 | * @gfp_mask: memory allocation flags |
Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 2584 | * |
David Rientjes | 6e276d2 | 2015-04-14 15:47:01 -0700 | [diff] [blame] | 2585 | * If we're in interrupt, yes, we can always allocate. If @node is set in |
| 2586 | * current's mems_allowed, yes. If it's not a __GFP_HARDWALL request and this |
| 2587 | * node is set in the nearest hardwalled cpuset ancestor to current's cpuset, |
| 2588 | * yes. If current has access to memory reserves due to TIF_MEMDIE, yes. |
Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 2589 | * Otherwise, no. |
| 2590 | * |
| 2591 | * GFP_USER allocations are marked with the __GFP_HARDWALL bit, |
David Rientjes | c596d9f | 2007-05-06 14:49:32 -0700 | [diff] [blame] | 2592 | * and do not allow allocations outside the current tasks cpuset |
| 2593 | * unless the task has been OOM killed as is marked TIF_MEMDIE. |
Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 2594 | * GFP_KERNEL allocations are not so marked, so can escape to the |
Paul Menage | 7860836 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 2595 | * nearest enclosing hardwalled ancestor cpuset. |
Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 2596 | * |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 2597 | * Scanning up parent cpusets requires callback_lock. The |
Paul Jackson | 02a0e53 | 2006-12-13 00:34:25 -0800 | [diff] [blame] | 2598 | * __alloc_pages() routine only calls here with __GFP_HARDWALL bit |
| 2599 | * _not_ set if it's a GFP_KERNEL allocation, and all nodes in the |
| 2600 | * current tasks mems_allowed came up empty on the first pass over |
| 2601 | * the zonelist. So only GFP_KERNEL allocations, if all nodes in the |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 2602 | * cpuset are short of memory, might require taking the callback_lock. |
Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 2603 | * |
Paul Jackson | 36be57f | 2006-05-20 15:00:10 -0700 | [diff] [blame] | 2604 | * The first call here from mm/page_alloc:get_page_from_freelist() |
Paul Jackson | 02a0e53 | 2006-12-13 00:34:25 -0800 | [diff] [blame] | 2605 | * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets, |
| 2606 | * so no allocation on a node outside the cpuset is allowed (unless |
| 2607 | * in interrupt, of course). |
Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 2608 | * |
Paul Jackson | 36be57f | 2006-05-20 15:00:10 -0700 | [diff] [blame] | 2609 | * The second pass through get_page_from_freelist() doesn't even call |
| 2610 | * here for GFP_ATOMIC calls. For those calls, the __alloc_pages() |
| 2611 | * variable 'wait' is not set, and the bit ALLOC_CPUSET is not set |
| 2612 | * in alloc_flags. That logic and the checks below have the combined |
| 2613 | * affect that: |
Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 2614 | * in_interrupt - any node ok (current task context irrelevant) |
| 2615 | * GFP_ATOMIC - any node ok |
David Rientjes | c596d9f | 2007-05-06 14:49:32 -0700 | [diff] [blame] | 2616 | * TIF_MEMDIE - any node ok |
Paul Menage | 7860836 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 2617 | * GFP_KERNEL - any node in enclosing hardwalled cpuset ok |
Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 2618 | * GFP_USER - only nodes in current tasks mems allowed ok. |
Paul Jackson | 02a0e53 | 2006-12-13 00:34:25 -0800 | [diff] [blame] | 2619 | */ |
Vlastimil Babka | 002f290 | 2016-05-19 17:14:30 -0700 | [diff] [blame] | 2620 | bool __cpuset_node_allowed(int node, gfp_t gfp_mask) |
Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 2621 | { |
Tejun Heo | c9710d8 | 2013-08-08 20:11:22 -0400 | [diff] [blame] | 2622 | struct cpuset *cs; /* current cpuset ancestors */ |
Paul Jackson | 29afd49 | 2006-03-24 03:16:12 -0800 | [diff] [blame] | 2623 | int allowed; /* is allocation in zone z allowed? */ |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 2624 | unsigned long flags; |
Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 2625 | |
David Rientjes | 6e276d2 | 2015-04-14 15:47:01 -0700 | [diff] [blame] | 2626 | if (in_interrupt()) |
Vlastimil Babka | 002f290 | 2016-05-19 17:14:30 -0700 | [diff] [blame] | 2627 | return true; |
Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 2628 | if (node_isset(node, current->mems_allowed)) |
Vlastimil Babka | 002f290 | 2016-05-19 17:14:30 -0700 | [diff] [blame] | 2629 | return true; |
David Rientjes | c596d9f | 2007-05-06 14:49:32 -0700 | [diff] [blame] | 2630 | /* |
| 2631 | * Allow tasks that have access to memory reserves because they have |
| 2632 | * been OOM killed to get memory anywhere. |
| 2633 | */ |
| 2634 | if (unlikely(test_thread_flag(TIF_MEMDIE))) |
Vlastimil Babka | 002f290 | 2016-05-19 17:14:30 -0700 | [diff] [blame] | 2635 | return true; |
Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 2636 | if (gfp_mask & __GFP_HARDWALL) /* If hardwall request, stop here */ |
Vlastimil Babka | 002f290 | 2016-05-19 17:14:30 -0700 | [diff] [blame] | 2637 | return false; |
Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 2638 | |
Bob Picco | 5563e77 | 2005-11-13 16:06:35 -0800 | [diff] [blame] | 2639 | if (current->flags & PF_EXITING) /* Let dying task have memory */ |
Vlastimil Babka | 002f290 | 2016-05-19 17:14:30 -0700 | [diff] [blame] | 2640 | return true; |
Bob Picco | 5563e77 | 2005-11-13 16:06:35 -0800 | [diff] [blame] | 2641 | |
Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 2642 | /* Not hardwall and node outside mems_allowed: scan up cpusets */ |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 2643 | spin_lock_irqsave(&callback_lock, flags); |
Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 2644 | |
Li Zefan | b8dadcb | 2014-03-03 17:28:36 -0500 | [diff] [blame] | 2645 | rcu_read_lock(); |
Paul Menage | 7860836 | 2008-04-29 01:00:26 -0700 | [diff] [blame] | 2646 | cs = nearest_hardwall_ancestor(task_cs(current)); |
Li Zefan | 99afb0f | 2014-02-27 18:19:36 +0800 | [diff] [blame] | 2647 | allowed = node_isset(node, cs->mems_allowed); |
Li Zefan | b8dadcb | 2014-03-03 17:28:36 -0500 | [diff] [blame] | 2648 | rcu_read_unlock(); |
Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 2649 | |
Vladimir Davydov | 8447a0f | 2014-10-20 15:50:29 +0400 | [diff] [blame] | 2650 | spin_unlock_irqrestore(&callback_lock, flags); |
Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 2651 | return allowed; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2652 | } |
| 2653 | |
Paul Jackson | ef08e3b | 2005-09-06 15:18:13 -0700 | [diff] [blame] | 2654 | /** |
Jack Steiner | 6adef3e | 2010-05-26 14:42:49 -0700 | [diff] [blame] | 2655 | * cpuset_mem_spread_node() - On which node to begin search for a file page |
| 2656 | * cpuset_slab_spread_node() - On which node to begin search for a slab page |
Paul Jackson | 825a46a | 2006-03-24 03:16:03 -0800 | [diff] [blame] | 2657 | * |
| 2658 | * If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for |
| 2659 | * tasks in a cpuset with is_spread_page or is_spread_slab set), |
| 2660 | * and if the memory allocation used cpuset_mem_spread_node() |
| 2661 | * to determine on which node to start looking, as it will for |
| 2662 | * certain page cache or slab cache pages such as used for file |
| 2663 | * system buffers and inode caches, then instead of starting on the |
| 2664 | * local node to look for a free page, rather spread the starting |
| 2665 | * node around the tasks mems_allowed nodes. |
| 2666 | * |
| 2667 | * We don't have to worry about the returned node being offline |
| 2668 | * because "it can't happen", and even if it did, it would be ok. |
| 2669 | * |
| 2670 | * The routines calling guarantee_online_mems() are careful to |
| 2671 | * only set nodes in task->mems_allowed that are online. So it |
| 2672 | * should not be possible for the following code to return an |
| 2673 | * offline node. But if it did, that would be ok, as this routine |
| 2674 | * is not returning the node where the allocation must be, only |
| 2675 | * the node where the search should start. The zonelist passed to |
| 2676 | * __alloc_pages() will include all nodes. If the slab allocator |
| 2677 | * is passed an offline node, it will fall back to the local node. |
| 2678 | * See kmem_cache_alloc_node(). |
| 2679 | */ |
| 2680 | |
Jack Steiner | 6adef3e | 2010-05-26 14:42:49 -0700 | [diff] [blame] | 2681 | static int cpuset_spread_node(int *rotor) |
Paul Jackson | 825a46a | 2006-03-24 03:16:03 -0800 | [diff] [blame] | 2682 | { |
Andrew Morton | 0edaf86 | 2016-05-19 17:10:58 -0700 | [diff] [blame] | 2683 | return *rotor = next_node_in(*rotor, current->mems_allowed); |
Paul Jackson | 825a46a | 2006-03-24 03:16:03 -0800 | [diff] [blame] | 2684 | } |
Jack Steiner | 6adef3e | 2010-05-26 14:42:49 -0700 | [diff] [blame] | 2685 | |
| 2686 | int cpuset_mem_spread_node(void) |
| 2687 | { |
Michal Hocko | 778d3b0 | 2011-07-26 16:08:30 -0700 | [diff] [blame] | 2688 | if (current->cpuset_mem_spread_rotor == NUMA_NO_NODE) |
| 2689 | current->cpuset_mem_spread_rotor = |
| 2690 | node_random(¤t->mems_allowed); |
| 2691 | |
Jack Steiner | 6adef3e | 2010-05-26 14:42:49 -0700 | [diff] [blame] | 2692 | return cpuset_spread_node(¤t->cpuset_mem_spread_rotor); |
| 2693 | } |
| 2694 | |
| 2695 | int cpuset_slab_spread_node(void) |
| 2696 | { |
Michal Hocko | 778d3b0 | 2011-07-26 16:08:30 -0700 | [diff] [blame] | 2697 | if (current->cpuset_slab_spread_rotor == NUMA_NO_NODE) |
| 2698 | current->cpuset_slab_spread_rotor = |
| 2699 | node_random(¤t->mems_allowed); |
| 2700 | |
Jack Steiner | 6adef3e | 2010-05-26 14:42:49 -0700 | [diff] [blame] | 2701 | return cpuset_spread_node(¤t->cpuset_slab_spread_rotor); |
| 2702 | } |
| 2703 | |
Paul Jackson | 825a46a | 2006-03-24 03:16:03 -0800 | [diff] [blame] | 2704 | EXPORT_SYMBOL_GPL(cpuset_mem_spread_node); |
| 2705 | |
| 2706 | /** |
David Rientjes | bbe373f | 2007-10-16 23:25:58 -0700 | [diff] [blame] | 2707 | * cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's? |
| 2708 | * @tsk1: pointer to task_struct of some task. |
| 2709 | * @tsk2: pointer to task_struct of some other task. |
Paul Jackson | ef08e3b | 2005-09-06 15:18:13 -0700 | [diff] [blame] | 2710 | * |
David Rientjes | bbe373f | 2007-10-16 23:25:58 -0700 | [diff] [blame] | 2711 | * Description: Return true if @tsk1's mems_allowed intersects the |
| 2712 | * mems_allowed of @tsk2. Used by the OOM killer to determine if |
| 2713 | * one of the task's memory usage might impact the memory available |
| 2714 | * to the other. |
Paul Jackson | ef08e3b | 2005-09-06 15:18:13 -0700 | [diff] [blame] | 2715 | **/ |
| 2716 | |
David Rientjes | bbe373f | 2007-10-16 23:25:58 -0700 | [diff] [blame] | 2717 | int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, |
| 2718 | const struct task_struct *tsk2) |
Paul Jackson | ef08e3b | 2005-09-06 15:18:13 -0700 | [diff] [blame] | 2719 | { |
David Rientjes | bbe373f | 2007-10-16 23:25:58 -0700 | [diff] [blame] | 2720 | return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed); |
Paul Jackson | ef08e3b | 2005-09-06 15:18:13 -0700 | [diff] [blame] | 2721 | } |
| 2722 | |
David Rientjes | 75aa199 | 2009-01-06 14:39:01 -0800 | [diff] [blame] | 2723 | /** |
David Rientjes | da39da3 | 2015-11-05 18:48:05 -0800 | [diff] [blame] | 2724 | * cpuset_print_current_mems_allowed - prints current's cpuset and mems_allowed |
David Rientjes | 75aa199 | 2009-01-06 14:39:01 -0800 | [diff] [blame] | 2725 | * |
David Rientjes | da39da3 | 2015-11-05 18:48:05 -0800 | [diff] [blame] | 2726 | * Description: Prints current's name, cpuset name, and cached copy of its |
Li Zefan | b8dadcb | 2014-03-03 17:28:36 -0500 | [diff] [blame] | 2727 | * mems_allowed to the kernel log. |
David Rientjes | 75aa199 | 2009-01-06 14:39:01 -0800 | [diff] [blame] | 2728 | */ |
David Rientjes | da39da3 | 2015-11-05 18:48:05 -0800 | [diff] [blame] | 2729 | void cpuset_print_current_mems_allowed(void) |
David Rientjes | 75aa199 | 2009-01-06 14:39:01 -0800 | [diff] [blame] | 2730 | { |
Li Zefan | b8dadcb | 2014-03-03 17:28:36 -0500 | [diff] [blame] | 2731 | struct cgroup *cgrp; |
David Rientjes | 75aa199 | 2009-01-06 14:39:01 -0800 | [diff] [blame] | 2732 | |
Li Zefan | b8dadcb | 2014-03-03 17:28:36 -0500 | [diff] [blame] | 2733 | rcu_read_lock(); |
Li Zefan | 63f43f5 | 2013-01-25 16:08:01 +0800 | [diff] [blame] | 2734 | |
David Rientjes | da39da3 | 2015-11-05 18:48:05 -0800 | [diff] [blame] | 2735 | cgrp = task_cs(current)->css.cgroup; |
| 2736 | pr_info("%s cpuset=", current->comm); |
Tejun Heo | e61734c | 2014-02-12 09:29:50 -0500 | [diff] [blame] | 2737 | pr_cont_cgroup_name(cgrp); |
David Rientjes | da39da3 | 2015-11-05 18:48:05 -0800 | [diff] [blame] | 2738 | pr_cont(" mems_allowed=%*pbl\n", |
| 2739 | nodemask_pr_args(¤t->mems_allowed)); |
Li Zefan | f440d98 | 2013-03-01 15:02:15 +0800 | [diff] [blame] | 2740 | |
Li Zefan | cfb5966 | 2013-03-12 10:28:39 +0800 | [diff] [blame] | 2741 | rcu_read_unlock(); |
David Rientjes | 75aa199 | 2009-01-06 14:39:01 -0800 | [diff] [blame] | 2742 | } |
| 2743 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2744 | /* |
Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 2745 | * Collection of memory_pressure is suppressed unless |
| 2746 | * this flag is enabled by writing "1" to the special |
| 2747 | * cpuset file 'memory_pressure_enabled' in the root cpuset. |
| 2748 | */ |
| 2749 | |
Paul Jackson | c5b2aff | 2006-01-08 01:01:51 -0800 | [diff] [blame] | 2750 | int cpuset_memory_pressure_enabled __read_mostly; |
Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 2751 | |
| 2752 | /** |
| 2753 | * cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims. |
| 2754 | * |
| 2755 | * Keep a running average of the rate of synchronous (direct) |
| 2756 | * page reclaim efforts initiated by tasks in each cpuset. |
| 2757 | * |
| 2758 | * This represents the rate at which some task in the cpuset |
| 2759 | * ran low on memory on all nodes it was allowed to use, and |
| 2760 | * had to enter the kernels page reclaim code in an effort to |
| 2761 | * create more free memory by tossing clean pages or swapping |
| 2762 | * or writing dirty pages. |
| 2763 | * |
| 2764 | * Display to user space in the per-cpuset read-only file |
| 2765 | * "memory_pressure". Value displayed is an integer |
| 2766 | * representing the recent rate of entry into the synchronous |
| 2767 | * (direct) page reclaim by any task attached to the cpuset. |
| 2768 | **/ |
| 2769 | |
| 2770 | void __cpuset_memory_pressure_bump(void) |
| 2771 | { |
Li Zefan | b8dadcb | 2014-03-03 17:28:36 -0500 | [diff] [blame] | 2772 | rcu_read_lock(); |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 2773 | fmeter_markevent(&task_cs(current)->fmeter); |
Li Zefan | b8dadcb | 2014-03-03 17:28:36 -0500 | [diff] [blame] | 2774 | rcu_read_unlock(); |
Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 2775 | } |
| 2776 | |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 2777 | #ifdef CONFIG_PROC_PID_CPUSET |
Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 2778 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2779 | * proc_cpuset_show() |
| 2780 | * - Print tasks cpuset path into seq_file. |
| 2781 | * - Used for /proc/<pid>/cpuset. |
Paul Jackson | 053199e | 2005-10-30 15:02:30 -0800 | [diff] [blame] | 2782 | * - No need to task_lock(tsk) on this tsk->cpuset reference, as it |
| 2783 | * doesn't really matter if tsk->cpuset changes after we read it, |
Tejun Heo | 5d21cc2 | 2013-01-07 08:51:08 -0800 | [diff] [blame] | 2784 | * and we take cpuset_mutex, keeping cpuset_attach() from changing it |
Paul Menage | 2df167a | 2008-02-07 00:14:45 -0800 | [diff] [blame] | 2785 | * anyway. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2786 | */ |
Zefan Li | 52de477 | 2014-09-18 16:03:36 +0800 | [diff] [blame] | 2787 | int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns, |
| 2788 | struct pid *pid, struct task_struct *tsk) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2789 | { |
Tejun Heo | 4c737b4 | 2016-08-10 11:23:44 -0400 | [diff] [blame] | 2790 | char *buf; |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 2791 | struct cgroup_subsys_state *css; |
Eric W. Biederman | 99f8955 | 2006-06-26 00:25:55 -0700 | [diff] [blame] | 2792 | int retval; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2793 | |
Eric W. Biederman | 99f8955 | 2006-06-26 00:25:55 -0700 | [diff] [blame] | 2794 | retval = -ENOMEM; |
Tejun Heo | e61734c | 2014-02-12 09:29:50 -0500 | [diff] [blame] | 2795 | buf = kmalloc(PATH_MAX, GFP_KERNEL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2796 | if (!buf) |
Eric W. Biederman | 99f8955 | 2006-06-26 00:25:55 -0700 | [diff] [blame] | 2797 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2798 | |
Aditya Kali | a79a908 | 2016-01-29 02:54:06 -0600 | [diff] [blame] | 2799 | css = task_get_css(tsk, cpuset_cgrp_id); |
Tejun Heo | 4c737b4 | 2016-08-10 11:23:44 -0400 | [diff] [blame] | 2800 | retval = cgroup_path_ns(css->cgroup, buf, PATH_MAX, |
| 2801 | current->nsproxy->cgroup_ns); |
Aditya Kali | a79a908 | 2016-01-29 02:54:06 -0600 | [diff] [blame] | 2802 | css_put(css); |
Tejun Heo | 4c737b4 | 2016-08-10 11:23:44 -0400 | [diff] [blame] | 2803 | if (retval >= PATH_MAX) |
Tejun Heo | 679a5e3 | 2016-09-29 11:58:36 +0200 | [diff] [blame] | 2804 | retval = -ENAMETOOLONG; |
| 2805 | if (retval < 0) |
Zefan Li | 52de477 | 2014-09-18 16:03:36 +0800 | [diff] [blame] | 2806 | goto out_free; |
Tejun Heo | 4c737b4 | 2016-08-10 11:23:44 -0400 | [diff] [blame] | 2807 | seq_puts(m, buf); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2808 | seq_putc(m, '\n'); |
Tejun Heo | e61734c | 2014-02-12 09:29:50 -0500 | [diff] [blame] | 2809 | retval = 0; |
Eric W. Biederman | 99f8955 | 2006-06-26 00:25:55 -0700 | [diff] [blame] | 2810 | out_free: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2811 | kfree(buf); |
Eric W. Biederman | 99f8955 | 2006-06-26 00:25:55 -0700 | [diff] [blame] | 2812 | out: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2813 | return retval; |
| 2814 | } |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 2815 | #endif /* CONFIG_PROC_PID_CPUSET */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2816 | |
Heiko Carstens | d01d482 | 2009-09-21 11:06:27 +0200 | [diff] [blame] | 2817 | /* Display task mems_allowed in /proc/<pid>/status file. */ |
Eric W. Biederman | df5f831 | 2008-02-08 04:18:33 -0800 | [diff] [blame] | 2818 | void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2819 | { |
Tejun Heo | e8e6d97 | 2015-02-13 14:37:23 -0800 | [diff] [blame] | 2820 | seq_printf(m, "Mems_allowed:\t%*pb\n", |
| 2821 | nodemask_pr_args(&task->mems_allowed)); |
| 2822 | seq_printf(m, "Mems_allowed_list:\t%*pbl\n", |
| 2823 | nodemask_pr_args(&task->mems_allowed)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2824 | } |