blob: 832004935ca76fd52a5aad04560ab8ee9b25d29e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * kernel/cpuset.c
3 *
4 * Processor and Memory placement constraints for sets of tasks.
5 *
6 * Copyright (C) 2003 BULL SA.
Paul Jackson029190c2007-10-18 23:40:20 -07007 * Copyright (C) 2004-2007 Silicon Graphics, Inc.
Paul Menage8793d852007-10-18 23:39:39 -07008 * Copyright (C) 2006 Google, Inc
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * Portions derived from Patrick Mochel's sysfs code.
11 * sysfs is Copyright (c) 2001-3 Patrick Mochel
Linus Torvalds1da177e2005-04-16 15:20:36 -070012 *
Paul Jackson825a46a2006-03-24 03:16:03 -080013 * 2003-10-10 Written by Simon Derr.
Linus Torvalds1da177e2005-04-16 15:20:36 -070014 * 2003-10-22 Updates by Stephen Hemminger.
Paul Jackson825a46a2006-03-24 03:16:03 -080015 * 2004 May-July Rework by Paul Jackson.
Paul Menage8793d852007-10-18 23:39:39 -070016 * 2006 Rework by Paul Menage to use generic cgroups
Linus Torvalds1da177e2005-04-16 15:20:36 -070017 *
18 * This file is subject to the terms and conditions of the GNU General Public
19 * License. See the file COPYING in the main directory of the Linux
20 * distribution for more details.
21 */
22
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <linux/cpu.h>
24#include <linux/cpumask.h>
25#include <linux/cpuset.h>
26#include <linux/err.h>
27#include <linux/errno.h>
28#include <linux/file.h>
29#include <linux/fs.h>
30#include <linux/init.h>
31#include <linux/interrupt.h>
32#include <linux/kernel.h>
33#include <linux/kmod.h>
34#include <linux/list.h>
Paul Jackson68860ec2005-10-30 15:02:36 -080035#include <linux/mempolicy.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <linux/mm.h>
37#include <linux/module.h>
38#include <linux/mount.h>
39#include <linux/namei.h>
40#include <linux/pagemap.h>
41#include <linux/proc_fs.h>
Paul Jackson6b9c2602006-01-08 01:02:02 -080042#include <linux/rcupdate.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043#include <linux/sched.h>
44#include <linux/seq_file.h>
David Quigley22fb52d2006-06-23 02:04:00 -070045#include <linux/security.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070047#include <linux/spinlock.h>
48#include <linux/stat.h>
49#include <linux/string.h>
50#include <linux/time.h>
51#include <linux/backing-dev.h>
52#include <linux/sort.h>
53
54#include <asm/uaccess.h>
55#include <asm/atomic.h>
Ingo Molnar3d3f26a2006-03-23 03:00:18 -080056#include <linux/mutex.h>
Paul Jackson029190c2007-10-18 23:40:20 -070057#include <linux/kfifo.h>
Cliff Wickman956db3c2008-02-07 00:14:43 -080058#include <linux/workqueue.h>
59#include <linux/cgroup.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
Paul Jackson202f72d2006-01-08 01:01:57 -080061/*
62 * Tracks how many cpusets are currently defined in system.
63 * When there is only one cpuset (the root cpuset) we can
64 * short circuit some hooks.
65 */
Paul Jackson7edc5962006-01-08 01:02:03 -080066int number_of_cpusets __read_mostly;
Paul Jackson202f72d2006-01-08 01:01:57 -080067
Paul Menage2df167a2008-02-07 00:14:45 -080068/* Forward declare cgroup structures */
Paul Menage8793d852007-10-18 23:39:39 -070069struct cgroup_subsys cpuset_subsys;
70struct cpuset;
71
Paul Jackson3e0d98b2006-01-08 01:01:49 -080072/* See "Frequency meter" comments, below. */
73
74struct fmeter {
75 int cnt; /* unprocessed events count */
76 int val; /* most recent output value */
77 time_t time; /* clock (secs) when val computed */
78 spinlock_t lock; /* guards read or write of above */
79};
80
Linus Torvalds1da177e2005-04-16 15:20:36 -070081struct cpuset {
Paul Menage8793d852007-10-18 23:39:39 -070082 struct cgroup_subsys_state css;
83
Linus Torvalds1da177e2005-04-16 15:20:36 -070084 unsigned long flags; /* "unsigned long" so bitops work */
85 cpumask_t cpus_allowed; /* CPUs allowed to tasks in cpuset */
86 nodemask_t mems_allowed; /* Memory Nodes allowed to tasks */
87
Linus Torvalds1da177e2005-04-16 15:20:36 -070088 struct cpuset *parent; /* my parent */
Linus Torvalds1da177e2005-04-16 15:20:36 -070089
90 /*
91 * Copy of global cpuset_mems_generation as of the most
92 * recent time this cpuset changed its mems_allowed.
93 */
Paul Jackson3e0d98b2006-01-08 01:01:49 -080094 int mems_generation;
95
96 struct fmeter fmeter; /* memory_pressure filter */
Paul Jackson029190c2007-10-18 23:40:20 -070097
98 /* partition number for rebuild_sched_domains() */
99 int pn;
Cliff Wickman956db3c2008-02-07 00:14:43 -0800100
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +0900101 /* for custom sched domain */
102 int relax_domain_level;
103
Cliff Wickman956db3c2008-02-07 00:14:43 -0800104 /* used for walking a cpuset heirarchy */
105 struct list_head stack_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106};
107
Paul Menage8793d852007-10-18 23:39:39 -0700108/* Retrieve the cpuset for a cgroup */
109static inline struct cpuset *cgroup_cs(struct cgroup *cont)
110{
111 return container_of(cgroup_subsys_state(cont, cpuset_subsys_id),
112 struct cpuset, css);
113}
114
115/* Retrieve the cpuset for a task */
116static inline struct cpuset *task_cs(struct task_struct *task)
117{
118 return container_of(task_subsys_state(task, cpuset_subsys_id),
119 struct cpuset, css);
120}
Cliff Wickman956db3c2008-02-07 00:14:43 -0800121struct cpuset_hotplug_scanner {
122 struct cgroup_scanner scan;
123 struct cgroup *to;
124};
Paul Menage8793d852007-10-18 23:39:39 -0700125
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126/* bits in struct cpuset flags field */
127typedef enum {
128 CS_CPU_EXCLUSIVE,
129 CS_MEM_EXCLUSIVE,
Paul Jackson45b07ef2006-01-08 01:00:56 -0800130 CS_MEMORY_MIGRATE,
Paul Jackson029190c2007-10-18 23:40:20 -0700131 CS_SCHED_LOAD_BALANCE,
Paul Jackson825a46a2006-03-24 03:16:03 -0800132 CS_SPREAD_PAGE,
133 CS_SPREAD_SLAB,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134} cpuset_flagbits_t;
135
136/* convenient tests for these bits */
137static inline int is_cpu_exclusive(const struct cpuset *cs)
138{
Paul Jackson7b5b9ef2006-03-24 03:16:00 -0800139 return test_bit(CS_CPU_EXCLUSIVE, &cs->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140}
141
142static inline int is_mem_exclusive(const struct cpuset *cs)
143{
Paul Jackson7b5b9ef2006-03-24 03:16:00 -0800144 return test_bit(CS_MEM_EXCLUSIVE, &cs->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145}
146
Paul Jackson029190c2007-10-18 23:40:20 -0700147static inline int is_sched_load_balance(const struct cpuset *cs)
148{
149 return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
150}
151
Paul Jackson45b07ef2006-01-08 01:00:56 -0800152static inline int is_memory_migrate(const struct cpuset *cs)
153{
Paul Jackson7b5b9ef2006-03-24 03:16:00 -0800154 return test_bit(CS_MEMORY_MIGRATE, &cs->flags);
Paul Jackson45b07ef2006-01-08 01:00:56 -0800155}
156
Paul Jackson825a46a2006-03-24 03:16:03 -0800157static inline int is_spread_page(const struct cpuset *cs)
158{
159 return test_bit(CS_SPREAD_PAGE, &cs->flags);
160}
161
162static inline int is_spread_slab(const struct cpuset *cs)
163{
164 return test_bit(CS_SPREAD_SLAB, &cs->flags);
165}
166
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167/*
Paul Jackson151a4422006-03-24 03:16:11 -0800168 * Increment this integer everytime any cpuset changes its
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169 * mems_allowed value. Users of cpusets can track this generation
170 * number, and avoid having to lock and reload mems_allowed unless
171 * the cpuset they're using changes generation.
172 *
Paul Menage2df167a2008-02-07 00:14:45 -0800173 * A single, global generation is needed because cpuset_attach_task() could
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174 * reattach a task to a different cpuset, which must not have its
175 * generation numbers aliased with those of that tasks previous cpuset.
176 *
177 * Generations are needed for mems_allowed because one task cannot
Paul Menage2df167a2008-02-07 00:14:45 -0800178 * modify another's memory placement. So we must enable every task,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179 * on every visit to __alloc_pages(), to efficiently check whether
180 * its current->cpuset->mems_allowed has changed, requiring an update
181 * of its current->mems_allowed.
Paul Jackson151a4422006-03-24 03:16:11 -0800182 *
Paul Menage2df167a2008-02-07 00:14:45 -0800183 * Since writes to cpuset_mems_generation are guarded by the cgroup lock
Paul Jackson151a4422006-03-24 03:16:11 -0800184 * there is no need to mark it atomic.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185 */
Paul Jackson151a4422006-03-24 03:16:11 -0800186static int cpuset_mems_generation;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187
188static struct cpuset top_cpuset = {
189 .flags = ((1 << CS_CPU_EXCLUSIVE) | (1 << CS_MEM_EXCLUSIVE)),
190 .cpus_allowed = CPU_MASK_ALL,
191 .mems_allowed = NODE_MASK_ALL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192};
193
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194/*
Paul Menage2df167a2008-02-07 00:14:45 -0800195 * There are two global mutexes guarding cpuset structures. The first
196 * is the main control groups cgroup_mutex, accessed via
197 * cgroup_lock()/cgroup_unlock(). The second is the cpuset-specific
198 * callback_mutex, below. They can nest. It is ok to first take
199 * cgroup_mutex, then nest callback_mutex. We also require taking
200 * task_lock() when dereferencing a task's cpuset pointer. See "The
201 * task_lock() exception", at the end of this comment.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 *
Ingo Molnar3d3f26a2006-03-23 03:00:18 -0800203 * A task must hold both mutexes to modify cpusets. If a task
Paul Menage2df167a2008-02-07 00:14:45 -0800204 * holds cgroup_mutex, then it blocks others wanting that mutex,
Ingo Molnar3d3f26a2006-03-23 03:00:18 -0800205 * ensuring that it is the only task able to also acquire callback_mutex
Paul Jackson053199e2005-10-30 15:02:30 -0800206 * and be able to modify cpusets. It can perform various checks on
207 * the cpuset structure first, knowing nothing will change. It can
Paul Menage2df167a2008-02-07 00:14:45 -0800208 * also allocate memory while just holding cgroup_mutex. While it is
Paul Jackson053199e2005-10-30 15:02:30 -0800209 * performing these checks, various callback routines can briefly
Ingo Molnar3d3f26a2006-03-23 03:00:18 -0800210 * acquire callback_mutex to query cpusets. Once it is ready to make
211 * the changes, it takes callback_mutex, blocking everyone else.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 *
Paul Jackson053199e2005-10-30 15:02:30 -0800213 * Calls to the kernel memory allocator can not be made while holding
Ingo Molnar3d3f26a2006-03-23 03:00:18 -0800214 * callback_mutex, as that would risk double tripping on callback_mutex
Paul Jackson053199e2005-10-30 15:02:30 -0800215 * from one of the callbacks into the cpuset code from within
216 * __alloc_pages().
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217 *
Ingo Molnar3d3f26a2006-03-23 03:00:18 -0800218 * If a task is only holding callback_mutex, then it has read-only
Paul Jackson053199e2005-10-30 15:02:30 -0800219 * access to cpusets.
220 *
221 * The task_struct fields mems_allowed and mems_generation may only
222 * be accessed in the context of that task, so require no locks.
223 *
Paul Jackson053199e2005-10-30 15:02:30 -0800224 * The cpuset_common_file_write handler for operations that modify
Paul Menage2df167a2008-02-07 00:14:45 -0800225 * the cpuset hierarchy holds cgroup_mutex across the entire operation,
Paul Jackson053199e2005-10-30 15:02:30 -0800226 * single threading all such cpuset modifications across the system.
227 *
Ingo Molnar3d3f26a2006-03-23 03:00:18 -0800228 * The cpuset_common_file_read() handlers only hold callback_mutex across
Paul Jackson053199e2005-10-30 15:02:30 -0800229 * small pieces of code, such as when reading out possibly multi-word
230 * cpumasks and nodemasks.
231 *
Paul Menage2df167a2008-02-07 00:14:45 -0800232 * Accessing a task's cpuset should be done in accordance with the
233 * guidelines for accessing subsystem state in kernel/cgroup.c
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 */
235
Ingo Molnar3d3f26a2006-03-23 03:00:18 -0800236static DEFINE_MUTEX(callback_mutex);
Paul Jackson4247bdc2005-09-10 00:26:06 -0700237
Paul Menage8793d852007-10-18 23:39:39 -0700238/* This is ugly, but preserves the userspace API for existing cpuset
239 * users. If someone tries to mount the "cpuset" filesystem, we
240 * silently switch it to mount "cgroup" instead */
David Howells454e2392006-06-23 02:02:57 -0700241static int cpuset_get_sb(struct file_system_type *fs_type,
242 int flags, const char *unused_dev_name,
243 void *data, struct vfsmount *mnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244{
Paul Menage8793d852007-10-18 23:39:39 -0700245 struct file_system_type *cgroup_fs = get_fs_type("cgroup");
246 int ret = -ENODEV;
247 if (cgroup_fs) {
248 char mountopts[] =
249 "cpuset,noprefix,"
250 "release_agent=/sbin/cpuset_release_agent";
251 ret = cgroup_fs->get_sb(cgroup_fs, flags,
252 unused_dev_name, mountopts, mnt);
253 put_filesystem(cgroup_fs);
254 }
255 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256}
257
258static struct file_system_type cpuset_fs_type = {
259 .name = "cpuset",
260 .get_sb = cpuset_get_sb,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261};
262
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263/*
264 * Return in *pmask the portion of a cpusets's cpus_allowed that
265 * are online. If none are online, walk up the cpuset hierarchy
266 * until we find one that does have some online cpus. If we get
267 * all the way to the top and still haven't found any online cpus,
268 * return cpu_online_map. Or if passed a NULL cs from an exit'ing
269 * task, return cpu_online_map.
270 *
271 * One way or another, we guarantee to return some non-empty subset
272 * of cpu_online_map.
273 *
Ingo Molnar3d3f26a2006-03-23 03:00:18 -0800274 * Call with callback_mutex held.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 */
276
277static void guarantee_online_cpus(const struct cpuset *cs, cpumask_t *pmask)
278{
279 while (cs && !cpus_intersects(cs->cpus_allowed, cpu_online_map))
280 cs = cs->parent;
281 if (cs)
282 cpus_and(*pmask, cs->cpus_allowed, cpu_online_map);
283 else
284 *pmask = cpu_online_map;
285 BUG_ON(!cpus_intersects(*pmask, cpu_online_map));
286}
287
288/*
289 * Return in *pmask the portion of a cpusets's mems_allowed that
Christoph Lameter0e1e7c72007-10-16 01:25:38 -0700290 * are online, with memory. If none are online with memory, walk
291 * up the cpuset hierarchy until we find one that does have some
292 * online mems. If we get all the way to the top and still haven't
293 * found any online mems, return node_states[N_HIGH_MEMORY].
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294 *
295 * One way or another, we guarantee to return some non-empty subset
Christoph Lameter0e1e7c72007-10-16 01:25:38 -0700296 * of node_states[N_HIGH_MEMORY].
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297 *
Ingo Molnar3d3f26a2006-03-23 03:00:18 -0800298 * Call with callback_mutex held.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299 */
300
301static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask)
302{
Christoph Lameter0e1e7c72007-10-16 01:25:38 -0700303 while (cs && !nodes_intersects(cs->mems_allowed,
304 node_states[N_HIGH_MEMORY]))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305 cs = cs->parent;
306 if (cs)
Christoph Lameter0e1e7c72007-10-16 01:25:38 -0700307 nodes_and(*pmask, cs->mems_allowed,
308 node_states[N_HIGH_MEMORY]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309 else
Christoph Lameter0e1e7c72007-10-16 01:25:38 -0700310 *pmask = node_states[N_HIGH_MEMORY];
311 BUG_ON(!nodes_intersects(*pmask, node_states[N_HIGH_MEMORY]));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312}
313
Paul Jacksoncf2a473c2006-01-08 01:01:54 -0800314/**
315 * cpuset_update_task_memory_state - update task memory placement
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316 *
Paul Jacksoncf2a473c2006-01-08 01:01:54 -0800317 * If the current tasks cpusets mems_allowed changed behind our
318 * backs, update current->mems_allowed, mems_generation and task NUMA
319 * mempolicy to the new value.
320 *
321 * Task mempolicy is updated by rebinding it relative to the
322 * current->cpuset if a task has its memory placement changed.
323 * Do not call this routine if in_interrupt().
324 *
Paul Jackson4a01c8d2006-03-31 02:30:50 -0800325 * Call without callback_mutex or task_lock() held. May be
Paul Menage2df167a2008-02-07 00:14:45 -0800326 * called with or without cgroup_mutex held. Thanks in part to
327 * 'the_top_cpuset_hack', the task's cpuset pointer will never
David Rientjes41f7f602008-03-04 23:32:38 -0800328 * be NULL. This routine also might acquire callback_mutex during
329 * call.
Paul Jackson5aa15b52005-10-30 15:02:28 -0800330 *
Paul Jackson6b9c2602006-01-08 01:02:02 -0800331 * Reading current->cpuset->mems_generation doesn't need task_lock
332 * to guard the current->cpuset derefence, because it is guarded
Paul Menage2df167a2008-02-07 00:14:45 -0800333 * from concurrent freeing of current->cpuset using RCU.
Paul Jackson6b9c2602006-01-08 01:02:02 -0800334 *
335 * The rcu_dereference() is technically probably not needed,
336 * as I don't actually mind if I see a new cpuset pointer but
337 * an old value of mems_generation. However this really only
338 * matters on alpha systems using cpusets heavily. If I dropped
339 * that rcu_dereference(), it would save them a memory barrier.
340 * For all other arch's, rcu_dereference is a no-op anyway, and for
341 * alpha systems not using cpusets, another planned optimization,
342 * avoiding the rcu critical section for tasks in the root cpuset
343 * which is statically allocated, so can't vanish, will make this
344 * irrelevant. Better to use RCU as intended, than to engage in
345 * some cute trick to save a memory barrier that is impossible to
346 * test, for alpha systems using cpusets heavily, which might not
347 * even exist.
Paul Jackson053199e2005-10-30 15:02:30 -0800348 *
349 * This routine is needed to update the per-task mems_allowed data,
350 * within the tasks context, when it is trying to allocate memory
351 * (in various mm/mempolicy.c routines) and notices that some other
352 * task has been modifying its cpuset.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353 */
354
Randy Dunlapfe85a992006-02-03 03:04:23 -0800355void cpuset_update_task_memory_state(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356{
Paul Jackson053199e2005-10-30 15:02:30 -0800357 int my_cpusets_mem_gen;
Paul Jacksoncf2a473c2006-01-08 01:01:54 -0800358 struct task_struct *tsk = current;
Paul Jackson6b9c2602006-01-08 01:02:02 -0800359 struct cpuset *cs;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360
Paul Menage8793d852007-10-18 23:39:39 -0700361 if (task_cs(tsk) == &top_cpuset) {
Paul Jackson03a285f2006-01-08 01:02:04 -0800362 /* Don't need rcu for top_cpuset. It's never freed. */
363 my_cpusets_mem_gen = top_cpuset.mems_generation;
364 } else {
365 rcu_read_lock();
Paul Menage8793d852007-10-18 23:39:39 -0700366 my_cpusets_mem_gen = task_cs(current)->mems_generation;
Paul Jackson03a285f2006-01-08 01:02:04 -0800367 rcu_read_unlock();
368 }
Paul Jacksoncf2a473c2006-01-08 01:01:54 -0800369
370 if (my_cpusets_mem_gen != tsk->cpuset_mems_generation) {
Ingo Molnar3d3f26a2006-03-23 03:00:18 -0800371 mutex_lock(&callback_mutex);
Paul Jacksoncf2a473c2006-01-08 01:01:54 -0800372 task_lock(tsk);
Paul Menage8793d852007-10-18 23:39:39 -0700373 cs = task_cs(tsk); /* Maybe changed when task not locked */
Paul Jacksoncf2a473c2006-01-08 01:01:54 -0800374 guarantee_online_mems(cs, &tsk->mems_allowed);
375 tsk->cpuset_mems_generation = cs->mems_generation;
Paul Jackson825a46a2006-03-24 03:16:03 -0800376 if (is_spread_page(cs))
377 tsk->flags |= PF_SPREAD_PAGE;
378 else
379 tsk->flags &= ~PF_SPREAD_PAGE;
380 if (is_spread_slab(cs))
381 tsk->flags |= PF_SPREAD_SLAB;
382 else
383 tsk->flags &= ~PF_SPREAD_SLAB;
Paul Jacksoncf2a473c2006-01-08 01:01:54 -0800384 task_unlock(tsk);
Ingo Molnar3d3f26a2006-03-23 03:00:18 -0800385 mutex_unlock(&callback_mutex);
Paul Jackson74cb2152006-01-08 01:01:56 -0800386 mpol_rebind_task(tsk, &tsk->mems_allowed);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387 }
388}
389
390/*
391 * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q?
392 *
393 * One cpuset is a subset of another if all its allowed CPUs and
394 * Memory Nodes are a subset of the other, and its exclusive flags
Paul Menage2df167a2008-02-07 00:14:45 -0800395 * are only set if the other's are set. Call holding cgroup_mutex.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396 */
397
398static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
399{
400 return cpus_subset(p->cpus_allowed, q->cpus_allowed) &&
401 nodes_subset(p->mems_allowed, q->mems_allowed) &&
402 is_cpu_exclusive(p) <= is_cpu_exclusive(q) &&
403 is_mem_exclusive(p) <= is_mem_exclusive(q);
404}
405
406/*
407 * validate_change() - Used to validate that any proposed cpuset change
408 * follows the structural rules for cpusets.
409 *
410 * If we replaced the flag and mask values of the current cpuset
411 * (cur) with those values in the trial cpuset (trial), would
412 * our various subset and exclusive rules still be valid? Presumes
Paul Menage2df167a2008-02-07 00:14:45 -0800413 * cgroup_mutex held.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414 *
415 * 'cur' is the address of an actual, in-use cpuset. Operations
416 * such as list traversal that depend on the actual address of the
417 * cpuset in the list must use cur below, not trial.
418 *
419 * 'trial' is the address of bulk structure copy of cur, with
420 * perhaps one or more of the fields cpus_allowed, mems_allowed,
421 * or flags changed to new, trial values.
422 *
423 * Return 0 if valid, -errno if not.
424 */
425
426static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
427{
Paul Menage8793d852007-10-18 23:39:39 -0700428 struct cgroup *cont;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429 struct cpuset *c, *par;
430
431 /* Each of our child cpusets must be a subset of us */
Paul Menage8793d852007-10-18 23:39:39 -0700432 list_for_each_entry(cont, &cur->css.cgroup->children, sibling) {
433 if (!is_cpuset_subset(cgroup_cs(cont), trial))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434 return -EBUSY;
435 }
436
437 /* Remaining checks don't apply to root cpuset */
Paul Jackson69604062006-12-06 20:36:15 -0800438 if (cur == &top_cpuset)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439 return 0;
440
Paul Jackson69604062006-12-06 20:36:15 -0800441 par = cur->parent;
442
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443 /* We must be a subset of our parent cpuset */
444 if (!is_cpuset_subset(trial, par))
445 return -EACCES;
446
Paul Menage2df167a2008-02-07 00:14:45 -0800447 /*
448 * If either I or some sibling (!= me) is exclusive, we can't
449 * overlap
450 */
Paul Menage8793d852007-10-18 23:39:39 -0700451 list_for_each_entry(cont, &par->css.cgroup->children, sibling) {
452 c = cgroup_cs(cont);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453 if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) &&
454 c != cur &&
455 cpus_intersects(trial->cpus_allowed, c->cpus_allowed))
456 return -EINVAL;
457 if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) &&
458 c != cur &&
459 nodes_intersects(trial->mems_allowed, c->mems_allowed))
460 return -EINVAL;
461 }
462
Paul Jackson020958b2007-10-18 23:40:21 -0700463 /* Cpusets with tasks can't have empty cpus_allowed or mems_allowed */
464 if (cgroup_task_count(cur->css.cgroup)) {
465 if (cpus_empty(trial->cpus_allowed) ||
466 nodes_empty(trial->mems_allowed)) {
467 return -ENOSPC;
468 }
469 }
470
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471 return 0;
472}
473
Dinakar Guniguntala85d7b942005-06-25 14:57:34 -0700474/*
Paul Jackson029190c2007-10-18 23:40:20 -0700475 * Helper routine for rebuild_sched_domains().
476 * Do cpusets a, b have overlapping cpus_allowed masks?
477 */
478
479static int cpusets_overlap(struct cpuset *a, struct cpuset *b)
480{
481 return cpus_intersects(a->cpus_allowed, b->cpus_allowed);
482}
483
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +0900484static void
485update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c)
486{
487 if (!dattr)
488 return;
489 if (dattr->relax_domain_level < c->relax_domain_level)
490 dattr->relax_domain_level = c->relax_domain_level;
491 return;
492}
493
Paul Jackson029190c2007-10-18 23:40:20 -0700494/*
495 * rebuild_sched_domains()
496 *
497 * If the flag 'sched_load_balance' of any cpuset with non-empty
498 * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset
499 * which has that flag enabled, or if any cpuset with a non-empty
500 * 'cpus' is removed, then call this routine to rebuild the
501 * scheduler's dynamic sched domains.
502 *
503 * This routine builds a partial partition of the systems CPUs
504 * (the set of non-overlappping cpumask_t's in the array 'part'
505 * below), and passes that partial partition to the kernel/sched.c
506 * partition_sched_domains() routine, which will rebuild the
507 * schedulers load balancing domains (sched domains) as specified
508 * by that partial partition. A 'partial partition' is a set of
509 * non-overlapping subsets whose union is a subset of that set.
510 *
511 * See "What is sched_load_balance" in Documentation/cpusets.txt
512 * for a background explanation of this.
513 *
514 * Does not return errors, on the theory that the callers of this
515 * routine would rather not worry about failures to rebuild sched
516 * domains when operating in the severe memory shortage situations
517 * that could cause allocation failures below.
518 *
519 * Call with cgroup_mutex held. May take callback_mutex during
520 * call due to the kfifo_alloc() and kmalloc() calls. May nest
Gautham R Shenoy86ef5c92008-01-25 21:08:02 +0100521 * a call to the get_online_cpus()/put_online_cpus() pair.
Paul Jackson029190c2007-10-18 23:40:20 -0700522 * Must not be called holding callback_mutex, because we must not
Gautham R Shenoy86ef5c92008-01-25 21:08:02 +0100523 * call get_online_cpus() while holding callback_mutex. Elsewhere
524 * the kernel nests callback_mutex inside get_online_cpus() calls.
Paul Jackson029190c2007-10-18 23:40:20 -0700525 * So the reverse nesting would risk an ABBA deadlock.
526 *
527 * The three key local variables below are:
528 * q - a kfifo queue of cpuset pointers, used to implement a
529 * top-down scan of all cpusets. This scan loads a pointer
530 * to each cpuset marked is_sched_load_balance into the
531 * array 'csa'. For our purposes, rebuilding the schedulers
532 * sched domains, we can ignore !is_sched_load_balance cpusets.
533 * csa - (for CpuSet Array) Array of pointers to all the cpusets
534 * that need to be load balanced, for convenient iterative
535 * access by the subsequent code that finds the best partition,
536 * i.e the set of domains (subsets) of CPUs such that the
537 * cpus_allowed of every cpuset marked is_sched_load_balance
538 * is a subset of one of these domains, while there are as
539 * many such domains as possible, each as small as possible.
540 * doms - Conversion of 'csa' to an array of cpumasks, for passing to
541 * the kernel/sched.c routine partition_sched_domains() in a
542 * convenient format, that can be easily compared to the prior
543 * value to determine what partition elements (sched domains)
544 * were changed (added or removed.)
545 *
546 * Finding the best partition (set of domains):
547 * The triple nested loops below over i, j, k scan over the
548 * load balanced cpusets (using the array of cpuset pointers in
549 * csa[]) looking for pairs of cpusets that have overlapping
550 * cpus_allowed, but which don't have the same 'pn' partition
551 * number and gives them in the same partition number. It keeps
552 * looping on the 'restart' label until it can no longer find
553 * any such pairs.
554 *
555 * The union of the cpus_allowed masks from the set of
556 * all cpusets having the same 'pn' value then form the one
557 * element of the partition (one sched domain) to be passed to
558 * partition_sched_domains().
559 */
560
561static void rebuild_sched_domains(void)
562{
563 struct kfifo *q; /* queue of cpusets to be scanned */
564 struct cpuset *cp; /* scans q */
565 struct cpuset **csa; /* array of all cpuset ptrs */
566 int csn; /* how many cpuset ptrs in csa so far */
567 int i, j, k; /* indices for partition finding loops */
568 cpumask_t *doms; /* resulting partition; i.e. sched domains */
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +0900569 struct sched_domain_attr *dattr; /* attributes for custom domains */
Paul Jackson029190c2007-10-18 23:40:20 -0700570 int ndoms; /* number of sched domains in result */
571 int nslot; /* next empty doms[] cpumask_t slot */
572
573 q = NULL;
574 csa = NULL;
575 doms = NULL;
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +0900576 dattr = NULL;
Paul Jackson029190c2007-10-18 23:40:20 -0700577
578 /* Special case for the 99% of systems with one, full, sched domain */
579 if (is_sched_load_balance(&top_cpuset)) {
580 ndoms = 1;
581 doms = kmalloc(sizeof(cpumask_t), GFP_KERNEL);
582 if (!doms)
583 goto rebuild;
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +0900584 dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL);
585 if (dattr) {
586 *dattr = SD_ATTR_INIT;
587 update_domain_attr(dattr, &top_cpuset);
588 }
Paul Jackson029190c2007-10-18 23:40:20 -0700589 *doms = top_cpuset.cpus_allowed;
590 goto rebuild;
591 }
592
593 q = kfifo_alloc(number_of_cpusets * sizeof(cp), GFP_KERNEL, NULL);
594 if (IS_ERR(q))
595 goto done;
596 csa = kmalloc(number_of_cpusets * sizeof(cp), GFP_KERNEL);
597 if (!csa)
598 goto done;
599 csn = 0;
600
601 cp = &top_cpuset;
602 __kfifo_put(q, (void *)&cp, sizeof(cp));
603 while (__kfifo_get(q, (void *)&cp, sizeof(cp))) {
604 struct cgroup *cont;
605 struct cpuset *child; /* scans child cpusets of cp */
606 if (is_sched_load_balance(cp))
607 csa[csn++] = cp;
608 list_for_each_entry(cont, &cp->css.cgroup->children, sibling) {
609 child = cgroup_cs(cont);
610 __kfifo_put(q, (void *)&child, sizeof(cp));
611 }
612 }
613
614 for (i = 0; i < csn; i++)
615 csa[i]->pn = i;
616 ndoms = csn;
617
618restart:
619 /* Find the best partition (set of sched domains) */
620 for (i = 0; i < csn; i++) {
621 struct cpuset *a = csa[i];
622 int apn = a->pn;
623
624 for (j = 0; j < csn; j++) {
625 struct cpuset *b = csa[j];
626 int bpn = b->pn;
627
628 if (apn != bpn && cpusets_overlap(a, b)) {
629 for (k = 0; k < csn; k++) {
630 struct cpuset *c = csa[k];
631
632 if (c->pn == bpn)
633 c->pn = apn;
634 }
635 ndoms--; /* one less element */
636 goto restart;
637 }
638 }
639 }
640
641 /* Convert <csn, csa> to <ndoms, doms> */
642 doms = kmalloc(ndoms * sizeof(cpumask_t), GFP_KERNEL);
643 if (!doms)
644 goto rebuild;
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +0900645 dattr = kmalloc(ndoms * sizeof(struct sched_domain_attr), GFP_KERNEL);
Paul Jackson029190c2007-10-18 23:40:20 -0700646
647 for (nslot = 0, i = 0; i < csn; i++) {
648 struct cpuset *a = csa[i];
649 int apn = a->pn;
650
651 if (apn >= 0) {
652 cpumask_t *dp = doms + nslot;
653
654 if (nslot == ndoms) {
655 static int warnings = 10;
656 if (warnings) {
657 printk(KERN_WARNING
658 "rebuild_sched_domains confused:"
659 " nslot %d, ndoms %d, csn %d, i %d,"
660 " apn %d\n",
661 nslot, ndoms, csn, i, apn);
662 warnings--;
663 }
664 continue;
665 }
666
667 cpus_clear(*dp);
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +0900668 if (dattr)
669 *(dattr + nslot) = SD_ATTR_INIT;
Paul Jackson029190c2007-10-18 23:40:20 -0700670 for (j = i; j < csn; j++) {
671 struct cpuset *b = csa[j];
672
673 if (apn == b->pn) {
674 cpus_or(*dp, *dp, b->cpus_allowed);
675 b->pn = -1;
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +0900676 update_domain_attr(dattr, b);
Paul Jackson029190c2007-10-18 23:40:20 -0700677 }
678 }
679 nslot++;
680 }
681 }
682 BUG_ON(nslot != ndoms);
683
684rebuild:
685 /* Have scheduler rebuild sched domains */
Gautham R Shenoy86ef5c92008-01-25 21:08:02 +0100686 get_online_cpus();
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +0900687 partition_sched_domains(ndoms, doms, dattr);
Gautham R Shenoy86ef5c92008-01-25 21:08:02 +0100688 put_online_cpus();
Paul Jackson029190c2007-10-18 23:40:20 -0700689
690done:
691 if (q && !IS_ERR(q))
692 kfifo_free(q);
693 kfree(csa);
694 /* Don't kfree(doms) -- partition_sched_domains() does that. */
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +0900695 /* Don't kfree(dattr) -- partition_sched_domains() does that. */
Paul Jackson029190c2007-10-18 23:40:20 -0700696}
697
Paul Menage8707d8b2007-10-18 23:40:22 -0700698static inline int started_after_time(struct task_struct *t1,
699 struct timespec *time,
700 struct task_struct *t2)
701{
702 int start_diff = timespec_compare(&t1->start_time, time);
703 if (start_diff > 0) {
704 return 1;
705 } else if (start_diff < 0) {
706 return 0;
707 } else {
708 /*
709 * Arbitrarily, if two processes started at the same
710 * time, we'll say that the lower pointer value
711 * started first. Note that t2 may have exited by now
712 * so this may not be a valid pointer any longer, but
713 * that's fine - it still serves to distinguish
714 * between two tasks started (effectively)
715 * simultaneously.
716 */
717 return t1 > t2;
718 }
719}
720
721static inline int started_after(void *p1, void *p2)
722{
723 struct task_struct *t1 = p1;
724 struct task_struct *t2 = p2;
725 return started_after_time(t1, &t2->start_time, t2);
726}
727
Cliff Wickman58f47902008-02-07 00:14:44 -0800728/**
729 * cpuset_test_cpumask - test a task's cpus_allowed versus its cpuset's
730 * @tsk: task to test
731 * @scan: struct cgroup_scanner contained in its struct cpuset_hotplug_scanner
732 *
Paul Menage2df167a2008-02-07 00:14:45 -0800733 * Call with cgroup_mutex held. May take callback_mutex during call.
Cliff Wickman58f47902008-02-07 00:14:44 -0800734 * Called for each task in a cgroup by cgroup_scan_tasks().
735 * Return nonzero if this tasks's cpus_allowed mask should be changed (in other
736 * words, if its mask is not equal to its cpuset's mask).
Paul Jackson053199e2005-10-30 15:02:30 -0800737 */
Cliff Wickman58f47902008-02-07 00:14:44 -0800738int cpuset_test_cpumask(struct task_struct *tsk, struct cgroup_scanner *scan)
739{
740 return !cpus_equal(tsk->cpus_allowed,
741 (cgroup_cs(scan->cg))->cpus_allowed);
742}
Paul Jackson053199e2005-10-30 15:02:30 -0800743
Cliff Wickman58f47902008-02-07 00:14:44 -0800744/**
745 * cpuset_change_cpumask - make a task's cpus_allowed the same as its cpuset's
746 * @tsk: task to test
747 * @scan: struct cgroup_scanner containing the cgroup of the task
748 *
749 * Called by cgroup_scan_tasks() for each task in a cgroup whose
750 * cpus_allowed mask needs to be changed.
751 *
752 * We don't need to re-check for the cgroup/cpuset membership, since we're
753 * holding cgroup_lock() at this point.
754 */
755void cpuset_change_cpumask(struct task_struct *tsk, struct cgroup_scanner *scan)
756{
Mike Travisf9a86fc2008-04-04 18:11:07 -0700757 set_cpus_allowed_ptr(tsk, &((cgroup_cs(scan->cg))->cpus_allowed));
Cliff Wickman58f47902008-02-07 00:14:44 -0800758}
759
760/**
761 * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it
762 * @cs: the cpuset to consider
763 * @buf: buffer of cpu numbers written to this cpuset
764 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765static int update_cpumask(struct cpuset *cs, char *buf)
766{
767 struct cpuset trialcs;
Cliff Wickman58f47902008-02-07 00:14:44 -0800768 struct cgroup_scanner scan;
Paul Menage8707d8b2007-10-18 23:40:22 -0700769 struct ptr_heap heap;
Cliff Wickman58f47902008-02-07 00:14:44 -0800770 int retval;
771 int is_load_balanced;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700772
Paul Jackson4c4d50f2006-08-27 01:23:51 -0700773 /* top_cpuset.cpus_allowed tracks cpu_online_map; it's read-only */
774 if (cs == &top_cpuset)
775 return -EACCES;
776
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777 trialcs = *cs;
David Rientjes6f7f02e2007-05-08 00:31:43 -0700778
779 /*
Paul Jacksonc8d9c902008-02-07 00:14:46 -0800780 * An empty cpus_allowed is ok only if the cpuset has no tasks.
Paul Jackson020958b2007-10-18 23:40:21 -0700781 * Since cpulist_parse() fails on an empty mask, we special case
782 * that parsing. The validate_change() call ensures that cpusets
783 * with tasks have cpus.
David Rientjes6f7f02e2007-05-08 00:31:43 -0700784 */
Paul Jackson020958b2007-10-18 23:40:21 -0700785 buf = strstrip(buf);
786 if (!*buf) {
David Rientjes6f7f02e2007-05-08 00:31:43 -0700787 cpus_clear(trialcs.cpus_allowed);
788 } else {
789 retval = cpulist_parse(buf, trialcs.cpus_allowed);
790 if (retval < 0)
791 return retval;
792 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793 cpus_and(trialcs.cpus_allowed, trialcs.cpus_allowed, cpu_online_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794 retval = validate_change(cs, &trialcs);
Dinakar Guniguntala85d7b942005-06-25 14:57:34 -0700795 if (retval < 0)
796 return retval;
Paul Jackson029190c2007-10-18 23:40:20 -0700797
Paul Menage8707d8b2007-10-18 23:40:22 -0700798 /* Nothing to do if the cpus didn't change */
799 if (cpus_equal(cs->cpus_allowed, trialcs.cpus_allowed))
800 return 0;
Cliff Wickman58f47902008-02-07 00:14:44 -0800801
Paul Menage8707d8b2007-10-18 23:40:22 -0700802 retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, &started_after);
803 if (retval)
804 return retval;
805
Paul Jackson029190c2007-10-18 23:40:20 -0700806 is_load_balanced = is_sched_load_balance(&trialcs);
807
Ingo Molnar3d3f26a2006-03-23 03:00:18 -0800808 mutex_lock(&callback_mutex);
Dinakar Guniguntala85d7b942005-06-25 14:57:34 -0700809 cs->cpus_allowed = trialcs.cpus_allowed;
Ingo Molnar3d3f26a2006-03-23 03:00:18 -0800810 mutex_unlock(&callback_mutex);
Paul Jackson029190c2007-10-18 23:40:20 -0700811
Paul Menage8707d8b2007-10-18 23:40:22 -0700812 /*
813 * Scan tasks in the cpuset, and update the cpumasks of any
Cliff Wickman58f47902008-02-07 00:14:44 -0800814 * that need an update.
Paul Menage8707d8b2007-10-18 23:40:22 -0700815 */
Cliff Wickman58f47902008-02-07 00:14:44 -0800816 scan.cg = cs->css.cgroup;
817 scan.test_task = cpuset_test_cpumask;
818 scan.process_task = cpuset_change_cpumask;
819 scan.heap = &heap;
820 cgroup_scan_tasks(&scan);
Paul Menage8707d8b2007-10-18 23:40:22 -0700821 heap_free(&heap);
Cliff Wickman58f47902008-02-07 00:14:44 -0800822
Paul Menage8707d8b2007-10-18 23:40:22 -0700823 if (is_load_balanced)
Paul Jackson029190c2007-10-18 23:40:20 -0700824 rebuild_sched_domains();
Dinakar Guniguntala85d7b942005-06-25 14:57:34 -0700825 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826}
827
Paul Jackson053199e2005-10-30 15:02:30 -0800828/*
Paul Jacksone4e364e2006-03-31 02:30:52 -0800829 * cpuset_migrate_mm
830 *
831 * Migrate memory region from one set of nodes to another.
832 *
833 * Temporarilly set tasks mems_allowed to target nodes of migration,
834 * so that the migration code can allocate pages on these nodes.
835 *
Paul Menage2df167a2008-02-07 00:14:45 -0800836 * Call holding cgroup_mutex, so current's cpuset won't change
Paul Jacksonc8d9c902008-02-07 00:14:46 -0800837 * during this call, as manage_mutex holds off any cpuset_attach()
Paul Jacksone4e364e2006-03-31 02:30:52 -0800838 * calls. Therefore we don't need to take task_lock around the
839 * call to guarantee_online_mems(), as we know no one is changing
Paul Menage2df167a2008-02-07 00:14:45 -0800840 * our task's cpuset.
Paul Jacksone4e364e2006-03-31 02:30:52 -0800841 *
842 * Hold callback_mutex around the two modifications of our tasks
843 * mems_allowed to synchronize with cpuset_mems_allowed().
844 *
845 * While the mm_struct we are migrating is typically from some
846 * other task, the task_struct mems_allowed that we are hacking
847 * is for our current task, which must allocate new pages for that
848 * migrating memory region.
849 *
850 * We call cpuset_update_task_memory_state() before hacking
851 * our tasks mems_allowed, so that we are assured of being in
852 * sync with our tasks cpuset, and in particular, callbacks to
853 * cpuset_update_task_memory_state() from nested page allocations
854 * won't see any mismatch of our cpuset and task mems_generation
855 * values, so won't overwrite our hacked tasks mems_allowed
856 * nodemask.
857 */
858
859static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
860 const nodemask_t *to)
861{
862 struct task_struct *tsk = current;
863
864 cpuset_update_task_memory_state();
865
866 mutex_lock(&callback_mutex);
867 tsk->mems_allowed = *to;
868 mutex_unlock(&callback_mutex);
869
870 do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL);
871
872 mutex_lock(&callback_mutex);
Paul Menage8793d852007-10-18 23:39:39 -0700873 guarantee_online_mems(task_cs(tsk),&tsk->mems_allowed);
Paul Jacksone4e364e2006-03-31 02:30:52 -0800874 mutex_unlock(&callback_mutex);
875}
876
877/*
Paul Jackson42253992006-01-08 01:01:59 -0800878 * Handle user request to change the 'mems' memory placement
879 * of a cpuset. Needs to validate the request, update the
880 * cpusets mems_allowed and mems_generation, and for each
Paul Jackson04c19fa2006-01-08 01:02:00 -0800881 * task in the cpuset, rebind any vma mempolicies and if
882 * the cpuset is marked 'memory_migrate', migrate the tasks
883 * pages to the new memory.
Paul Jackson42253992006-01-08 01:01:59 -0800884 *
Paul Menage2df167a2008-02-07 00:14:45 -0800885 * Call with cgroup_mutex held. May take callback_mutex during call.
Paul Jackson42253992006-01-08 01:01:59 -0800886 * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
887 * lock each such tasks mm->mmap_sem, scan its vma's and rebind
888 * their mempolicies to the cpusets new mems_allowed.
Paul Jackson053199e2005-10-30 15:02:30 -0800889 */
890
Paul Menage8793d852007-10-18 23:39:39 -0700891static void *cpuset_being_rebound;
892
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893static int update_nodemask(struct cpuset *cs, char *buf)
894{
895 struct cpuset trialcs;
Paul Jackson04c19fa2006-01-08 01:02:00 -0800896 nodemask_t oldmem;
Paul Menage8793d852007-10-18 23:39:39 -0700897 struct task_struct *p;
Paul Jackson42253992006-01-08 01:01:59 -0800898 struct mm_struct **mmarray;
899 int i, n, ntasks;
Paul Jackson04c19fa2006-01-08 01:02:00 -0800900 int migrate;
Paul Jackson42253992006-01-08 01:01:59 -0800901 int fudge;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902 int retval;
Paul Menage8793d852007-10-18 23:39:39 -0700903 struct cgroup_iter it;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904
Christoph Lameter0e1e7c72007-10-16 01:25:38 -0700905 /*
906 * top_cpuset.mems_allowed tracks node_stats[N_HIGH_MEMORY];
907 * it's read-only
908 */
Paul Jackson38837fc2006-09-29 02:01:16 -0700909 if (cs == &top_cpuset)
910 return -EACCES;
911
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912 trialcs = *cs;
David Rientjes6f7f02e2007-05-08 00:31:43 -0700913
914 /*
Paul Jackson020958b2007-10-18 23:40:21 -0700915 * An empty mems_allowed is ok iff there are no tasks in the cpuset.
916 * Since nodelist_parse() fails on an empty mask, we special case
917 * that parsing. The validate_change() call ensures that cpusets
918 * with tasks have memory.
David Rientjes6f7f02e2007-05-08 00:31:43 -0700919 */
Paul Jackson020958b2007-10-18 23:40:21 -0700920 buf = strstrip(buf);
921 if (!*buf) {
David Rientjes6f7f02e2007-05-08 00:31:43 -0700922 nodes_clear(trialcs.mems_allowed);
923 } else {
924 retval = nodelist_parse(buf, trialcs.mems_allowed);
925 if (retval < 0)
926 goto done;
927 }
Christoph Lameter0e1e7c72007-10-16 01:25:38 -0700928 nodes_and(trialcs.mems_allowed, trialcs.mems_allowed,
929 node_states[N_HIGH_MEMORY]);
Paul Jackson04c19fa2006-01-08 01:02:00 -0800930 oldmem = cs->mems_allowed;
931 if (nodes_equal(oldmem, trialcs.mems_allowed)) {
932 retval = 0; /* Too easy - nothing to do */
933 goto done;
934 }
Paul Jackson59dac162006-01-08 01:01:52 -0800935 retval = validate_change(cs, &trialcs);
936 if (retval < 0)
937 goto done;
938
Ingo Molnar3d3f26a2006-03-23 03:00:18 -0800939 mutex_lock(&callback_mutex);
Paul Jackson59dac162006-01-08 01:01:52 -0800940 cs->mems_allowed = trialcs.mems_allowed;
Paul Jackson151a4422006-03-24 03:16:11 -0800941 cs->mems_generation = cpuset_mems_generation++;
Ingo Molnar3d3f26a2006-03-23 03:00:18 -0800942 mutex_unlock(&callback_mutex);
Paul Jackson59dac162006-01-08 01:01:52 -0800943
Lee Schermerhorn846a16b2008-04-28 02:13:09 -0700944 cpuset_being_rebound = cs; /* causes mpol_dup() rebind */
Paul Jackson42253992006-01-08 01:01:59 -0800945
946 fudge = 10; /* spare mmarray[] slots */
947 fudge += cpus_weight(cs->cpus_allowed); /* imagine one fork-bomb/cpu */
948 retval = -ENOMEM;
949
950 /*
951 * Allocate mmarray[] to hold mm reference for each task
952 * in cpuset cs. Can't kmalloc GFP_KERNEL while holding
953 * tasklist_lock. We could use GFP_ATOMIC, but with a
954 * few more lines of code, we can retry until we get a big
955 * enough mmarray[] w/o using GFP_ATOMIC.
956 */
957 while (1) {
Paul Menage8793d852007-10-18 23:39:39 -0700958 ntasks = cgroup_task_count(cs->css.cgroup); /* guess */
Paul Jackson42253992006-01-08 01:01:59 -0800959 ntasks += fudge;
960 mmarray = kmalloc(ntasks * sizeof(*mmarray), GFP_KERNEL);
961 if (!mmarray)
962 goto done;
Paul Menagec2aef332007-07-15 23:40:11 -0700963 read_lock(&tasklist_lock); /* block fork */
Paul Menage8793d852007-10-18 23:39:39 -0700964 if (cgroup_task_count(cs->css.cgroup) <= ntasks)
Paul Jackson42253992006-01-08 01:01:59 -0800965 break; /* got enough */
Paul Menagec2aef332007-07-15 23:40:11 -0700966 read_unlock(&tasklist_lock); /* try again */
Paul Jackson42253992006-01-08 01:01:59 -0800967 kfree(mmarray);
968 }
969
970 n = 0;
971
972 /* Load up mmarray[] with mm reference for each task in cpuset. */
Paul Menage8793d852007-10-18 23:39:39 -0700973 cgroup_iter_start(cs->css.cgroup, &it);
974 while ((p = cgroup_iter_next(cs->css.cgroup, &it))) {
Paul Jackson42253992006-01-08 01:01:59 -0800975 struct mm_struct *mm;
976
977 if (n >= ntasks) {
978 printk(KERN_WARNING
979 "Cpuset mempolicy rebind incomplete.\n");
Paul Menage8793d852007-10-18 23:39:39 -0700980 break;
Paul Jackson42253992006-01-08 01:01:59 -0800981 }
Paul Jackson42253992006-01-08 01:01:59 -0800982 mm = get_task_mm(p);
983 if (!mm)
984 continue;
985 mmarray[n++] = mm;
Paul Menage8793d852007-10-18 23:39:39 -0700986 }
987 cgroup_iter_end(cs->css.cgroup, &it);
Paul Menagec2aef332007-07-15 23:40:11 -0700988 read_unlock(&tasklist_lock);
Paul Jackson42253992006-01-08 01:01:59 -0800989
990 /*
991 * Now that we've dropped the tasklist spinlock, we can
992 * rebind the vma mempolicies of each mm in mmarray[] to their
993 * new cpuset, and release that mm. The mpol_rebind_mm()
994 * call takes mmap_sem, which we couldn't take while holding
Lee Schermerhorn846a16b2008-04-28 02:13:09 -0700995 * tasklist_lock. Forks can happen again now - the mpol_dup()
Paul Jackson42253992006-01-08 01:01:59 -0800996 * cpuset_being_rebound check will catch such forks, and rebind
997 * their vma mempolicies too. Because we still hold the global
Paul Menage2df167a2008-02-07 00:14:45 -0800998 * cgroup_mutex, we know that no other rebind effort will
Paul Jackson42253992006-01-08 01:01:59 -0800999 * be contending for the global variable cpuset_being_rebound.
1000 * It's ok if we rebind the same mm twice; mpol_rebind_mm()
Paul Jackson04c19fa2006-01-08 01:02:00 -08001001 * is idempotent. Also migrate pages in each mm to new nodes.
Paul Jackson42253992006-01-08 01:01:59 -08001002 */
Paul Jackson04c19fa2006-01-08 01:02:00 -08001003 migrate = is_memory_migrate(cs);
Paul Jackson42253992006-01-08 01:01:59 -08001004 for (i = 0; i < n; i++) {
1005 struct mm_struct *mm = mmarray[i];
1006
1007 mpol_rebind_mm(mm, &cs->mems_allowed);
Paul Jacksone4e364e2006-03-31 02:30:52 -08001008 if (migrate)
1009 cpuset_migrate_mm(mm, &oldmem, &cs->mems_allowed);
Paul Jackson42253992006-01-08 01:01:59 -08001010 mmput(mm);
1011 }
1012
Paul Menage2df167a2008-02-07 00:14:45 -08001013 /* We're done rebinding vmas to this cpuset's new mems_allowed. */
Paul Jackson42253992006-01-08 01:01:59 -08001014 kfree(mmarray);
Paul Menage8793d852007-10-18 23:39:39 -07001015 cpuset_being_rebound = NULL;
Paul Jackson42253992006-01-08 01:01:59 -08001016 retval = 0;
Paul Jackson59dac162006-01-08 01:01:52 -08001017done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018 return retval;
1019}
1020
Paul Menage8793d852007-10-18 23:39:39 -07001021int current_cpuset_is_being_rebound(void)
1022{
1023 return task_cs(current) == cpuset_being_rebound;
1024}
1025
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09001026static int update_relax_domain_level(struct cpuset *cs, char *buf)
1027{
1028 int val = simple_strtol(buf, NULL, 10);
1029
1030 if (val < 0)
1031 val = -1;
1032
1033 if (val != cs->relax_domain_level) {
1034 cs->relax_domain_level = val;
1035 rebuild_sched_domains();
1036 }
1037
1038 return 0;
1039}
1040
Paul Jackson3e0d98b2006-01-08 01:01:49 -08001041/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042 * update_flag - read a 0 or a 1 in a file and update associated flag
1043 * bit: the bit to update (CS_CPU_EXCLUSIVE, CS_MEM_EXCLUSIVE,
Paul Jackson029190c2007-10-18 23:40:20 -07001044 * CS_SCHED_LOAD_BALANCE,
Paul Jackson825a46a2006-03-24 03:16:03 -08001045 * CS_NOTIFY_ON_RELEASE, CS_MEMORY_MIGRATE,
1046 * CS_SPREAD_PAGE, CS_SPREAD_SLAB)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001047 * cs: the cpuset to update
1048 * buf: the buffer where we read the 0 or 1
Paul Jackson053199e2005-10-30 15:02:30 -08001049 *
Paul Menage2df167a2008-02-07 00:14:45 -08001050 * Call with cgroup_mutex held.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001051 */
1052
Paul Menage700fe1a2008-04-29 01:00:00 -07001053static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
1054 int turning_on)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001056 struct cpuset trialcs;
Paul Jackson607717a2007-10-16 01:27:43 -07001057 int err;
Paul Jackson029190c2007-10-18 23:40:20 -07001058 int cpus_nonempty, balance_flag_changed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001059
Linus Torvalds1da177e2005-04-16 15:20:36 -07001060 trialcs = *cs;
1061 if (turning_on)
1062 set_bit(bit, &trialcs.flags);
1063 else
1064 clear_bit(bit, &trialcs.flags);
1065
1066 err = validate_change(cs, &trialcs);
Dinakar Guniguntala85d7b942005-06-25 14:57:34 -07001067 if (err < 0)
1068 return err;
Paul Jackson029190c2007-10-18 23:40:20 -07001069
1070 cpus_nonempty = !cpus_empty(trialcs.cpus_allowed);
1071 balance_flag_changed = (is_sched_load_balance(cs) !=
1072 is_sched_load_balance(&trialcs));
1073
Ingo Molnar3d3f26a2006-03-23 03:00:18 -08001074 mutex_lock(&callback_mutex);
Paul Jackson69604062006-12-06 20:36:15 -08001075 cs->flags = trialcs.flags;
Ingo Molnar3d3f26a2006-03-23 03:00:18 -08001076 mutex_unlock(&callback_mutex);
Dinakar Guniguntala85d7b942005-06-25 14:57:34 -07001077
Paul Jackson029190c2007-10-18 23:40:20 -07001078 if (cpus_nonempty && balance_flag_changed)
1079 rebuild_sched_domains();
1080
Dinakar Guniguntala85d7b942005-06-25 14:57:34 -07001081 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001082}
1083
Paul Jackson053199e2005-10-30 15:02:30 -08001084/*
Adrian Bunk80f72282006-06-30 18:27:16 +02001085 * Frequency meter - How fast is some event occurring?
Paul Jackson3e0d98b2006-01-08 01:01:49 -08001086 *
1087 * These routines manage a digitally filtered, constant time based,
1088 * event frequency meter. There are four routines:
1089 * fmeter_init() - initialize a frequency meter.
1090 * fmeter_markevent() - called each time the event happens.
1091 * fmeter_getrate() - returns the recent rate of such events.
1092 * fmeter_update() - internal routine used to update fmeter.
1093 *
1094 * A common data structure is passed to each of these routines,
1095 * which is used to keep track of the state required to manage the
1096 * frequency meter and its digital filter.
1097 *
1098 * The filter works on the number of events marked per unit time.
1099 * The filter is single-pole low-pass recursive (IIR). The time unit
1100 * is 1 second. Arithmetic is done using 32-bit integers scaled to
1101 * simulate 3 decimal digits of precision (multiplied by 1000).
1102 *
1103 * With an FM_COEF of 933, and a time base of 1 second, the filter
1104 * has a half-life of 10 seconds, meaning that if the events quit
1105 * happening, then the rate returned from the fmeter_getrate()
1106 * will be cut in half each 10 seconds, until it converges to zero.
1107 *
1108 * It is not worth doing a real infinitely recursive filter. If more
1109 * than FM_MAXTICKS ticks have elapsed since the last filter event,
1110 * just compute FM_MAXTICKS ticks worth, by which point the level
1111 * will be stable.
1112 *
1113 * Limit the count of unprocessed events to FM_MAXCNT, so as to avoid
1114 * arithmetic overflow in the fmeter_update() routine.
1115 *
1116 * Given the simple 32 bit integer arithmetic used, this meter works
1117 * best for reporting rates between one per millisecond (msec) and
1118 * one per 32 (approx) seconds. At constant rates faster than one
1119 * per msec it maxes out at values just under 1,000,000. At constant
1120 * rates between one per msec, and one per second it will stabilize
1121 * to a value N*1000, where N is the rate of events per second.
1122 * At constant rates between one per second and one per 32 seconds,
1123 * it will be choppy, moving up on the seconds that have an event,
1124 * and then decaying until the next event. At rates slower than
1125 * about one in 32 seconds, it decays all the way back to zero between
1126 * each event.
1127 */
1128
1129#define FM_COEF 933 /* coefficient for half-life of 10 secs */
1130#define FM_MAXTICKS ((time_t)99) /* useless computing more ticks than this */
1131#define FM_MAXCNT 1000000 /* limit cnt to avoid overflow */
1132#define FM_SCALE 1000 /* faux fixed point scale */
1133
1134/* Initialize a frequency meter */
1135static void fmeter_init(struct fmeter *fmp)
1136{
1137 fmp->cnt = 0;
1138 fmp->val = 0;
1139 fmp->time = 0;
1140 spin_lock_init(&fmp->lock);
1141}
1142
1143/* Internal meter update - process cnt events and update value */
1144static void fmeter_update(struct fmeter *fmp)
1145{
1146 time_t now = get_seconds();
1147 time_t ticks = now - fmp->time;
1148
1149 if (ticks == 0)
1150 return;
1151
1152 ticks = min(FM_MAXTICKS, ticks);
1153 while (ticks-- > 0)
1154 fmp->val = (FM_COEF * fmp->val) / FM_SCALE;
1155 fmp->time = now;
1156
1157 fmp->val += ((FM_SCALE - FM_COEF) * fmp->cnt) / FM_SCALE;
1158 fmp->cnt = 0;
1159}
1160
1161/* Process any previous ticks, then bump cnt by one (times scale). */
1162static void fmeter_markevent(struct fmeter *fmp)
1163{
1164 spin_lock(&fmp->lock);
1165 fmeter_update(fmp);
1166 fmp->cnt = min(FM_MAXCNT, fmp->cnt + FM_SCALE);
1167 spin_unlock(&fmp->lock);
1168}
1169
1170/* Process any previous ticks, then return current value. */
1171static int fmeter_getrate(struct fmeter *fmp)
1172{
1173 int val;
1174
1175 spin_lock(&fmp->lock);
1176 fmeter_update(fmp);
1177 val = fmp->val;
1178 spin_unlock(&fmp->lock);
1179 return val;
1180}
1181
Paul Menage2df167a2008-02-07 00:14:45 -08001182/* Called by cgroups to determine if a cpuset is usable; cgroup_mutex held */
Paul Menage8793d852007-10-18 23:39:39 -07001183static int cpuset_can_attach(struct cgroup_subsys *ss,
1184 struct cgroup *cont, struct task_struct *tsk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185{
Paul Menage8793d852007-10-18 23:39:39 -07001186 struct cpuset *cs = cgroup_cs(cont);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001187
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188 if (cpus_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))
1189 return -ENOSPC;
1190
Paul Menage8793d852007-10-18 23:39:39 -07001191 return security_task_setscheduler(tsk, 0, NULL);
1192}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193
Paul Menage8793d852007-10-18 23:39:39 -07001194static void cpuset_attach(struct cgroup_subsys *ss,
1195 struct cgroup *cont, struct cgroup *oldcont,
1196 struct task_struct *tsk)
1197{
1198 cpumask_t cpus;
1199 nodemask_t from, to;
1200 struct mm_struct *mm;
1201 struct cpuset *cs = cgroup_cs(cont);
1202 struct cpuset *oldcs = cgroup_cs(oldcont);
David Quigley22fb52d2006-06-23 02:04:00 -07001203
Ingo Molnar3d3f26a2006-03-23 03:00:18 -08001204 mutex_lock(&callback_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205 guarantee_online_cpus(cs, &cpus);
Mike Travisf9a86fc2008-04-04 18:11:07 -07001206 set_cpus_allowed_ptr(tsk, &cpus);
Paul Menage8793d852007-10-18 23:39:39 -07001207 mutex_unlock(&callback_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208
Paul Jackson45b07ef2006-01-08 01:00:56 -08001209 from = oldcs->mems_allowed;
1210 to = cs->mems_allowed;
Paul Jackson42253992006-01-08 01:01:59 -08001211 mm = get_task_mm(tsk);
1212 if (mm) {
1213 mpol_rebind_mm(mm, &to);
Paul Jackson2741a552006-03-31 02:30:51 -08001214 if (is_memory_migrate(cs))
Paul Jacksone4e364e2006-03-31 02:30:52 -08001215 cpuset_migrate_mm(mm, &from, &to);
Paul Jackson42253992006-01-08 01:01:59 -08001216 mmput(mm);
1217 }
1218
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219}
1220
1221/* The various types of files and directories in a cpuset file system */
1222
1223typedef enum {
Paul Jackson45b07ef2006-01-08 01:00:56 -08001224 FILE_MEMORY_MIGRATE,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001225 FILE_CPULIST,
1226 FILE_MEMLIST,
1227 FILE_CPU_EXCLUSIVE,
1228 FILE_MEM_EXCLUSIVE,
Paul Jackson029190c2007-10-18 23:40:20 -07001229 FILE_SCHED_LOAD_BALANCE,
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09001230 FILE_SCHED_RELAX_DOMAIN_LEVEL,
Paul Jackson3e0d98b2006-01-08 01:01:49 -08001231 FILE_MEMORY_PRESSURE_ENABLED,
1232 FILE_MEMORY_PRESSURE,
Paul Jackson825a46a2006-03-24 03:16:03 -08001233 FILE_SPREAD_PAGE,
1234 FILE_SPREAD_SLAB,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235} cpuset_filetype_t;
1236
Paul Menage8793d852007-10-18 23:39:39 -07001237static ssize_t cpuset_common_file_write(struct cgroup *cont,
1238 struct cftype *cft,
1239 struct file *file,
Paul Menaged3ed11c2006-12-06 20:41:37 -08001240 const char __user *userbuf,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241 size_t nbytes, loff_t *unused_ppos)
1242{
Paul Menage8793d852007-10-18 23:39:39 -07001243 struct cpuset *cs = cgroup_cs(cont);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001244 cpuset_filetype_t type = cft->private;
1245 char *buffer;
1246 int retval = 0;
1247
1248 /* Crude upper limit on largest legitimate cpulist user might write. */
Paul Jackson029190c2007-10-18 23:40:20 -07001249 if (nbytes > 100U + 6 * max(NR_CPUS, MAX_NUMNODES))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001250 return -E2BIG;
1251
1252 /* +1 for nul-terminator */
Harvey Harrisonb331d252008-04-28 14:13:19 -07001253 buffer = kmalloc(nbytes + 1, GFP_KERNEL);
1254 if (!buffer)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255 return -ENOMEM;
1256
1257 if (copy_from_user(buffer, userbuf, nbytes)) {
1258 retval = -EFAULT;
1259 goto out1;
1260 }
1261 buffer[nbytes] = 0; /* nul-terminate */
1262
Paul Menage8793d852007-10-18 23:39:39 -07001263 cgroup_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264
Paul Menage8793d852007-10-18 23:39:39 -07001265 if (cgroup_is_removed(cont)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001266 retval = -ENODEV;
1267 goto out2;
1268 }
1269
1270 switch (type) {
1271 case FILE_CPULIST:
1272 retval = update_cpumask(cs, buffer);
1273 break;
1274 case FILE_MEMLIST:
1275 retval = update_nodemask(cs, buffer);
1276 break;
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09001277 case FILE_SCHED_RELAX_DOMAIN_LEVEL:
1278 retval = update_relax_domain_level(cs, buffer);
1279 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001280 default:
1281 retval = -EINVAL;
1282 goto out2;
1283 }
1284
1285 if (retval == 0)
1286 retval = nbytes;
1287out2:
Paul Menage8793d852007-10-18 23:39:39 -07001288 cgroup_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001289out1:
1290 kfree(buffer);
1291 return retval;
1292}
1293
Paul Menage700fe1a2008-04-29 01:00:00 -07001294static int cpuset_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
1295{
1296 int retval = 0;
1297 struct cpuset *cs = cgroup_cs(cgrp);
1298 cpuset_filetype_t type = cft->private;
1299
1300 cgroup_lock();
1301
1302 if (cgroup_is_removed(cgrp)) {
1303 cgroup_unlock();
1304 return -ENODEV;
1305 }
1306
1307 switch (type) {
1308 case FILE_CPU_EXCLUSIVE:
1309 retval = update_flag(CS_CPU_EXCLUSIVE, cs, val);
1310 break;
1311 case FILE_MEM_EXCLUSIVE:
1312 retval = update_flag(CS_MEM_EXCLUSIVE, cs, val);
1313 break;
1314 case FILE_SCHED_LOAD_BALANCE:
1315 retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, val);
1316 break;
1317 case FILE_MEMORY_MIGRATE:
1318 retval = update_flag(CS_MEMORY_MIGRATE, cs, val);
1319 break;
1320 case FILE_MEMORY_PRESSURE_ENABLED:
1321 cpuset_memory_pressure_enabled = !!val;
1322 break;
1323 case FILE_MEMORY_PRESSURE:
1324 retval = -EACCES;
1325 break;
1326 case FILE_SPREAD_PAGE:
1327 retval = update_flag(CS_SPREAD_PAGE, cs, val);
1328 cs->mems_generation = cpuset_mems_generation++;
1329 break;
1330 case FILE_SPREAD_SLAB:
1331 retval = update_flag(CS_SPREAD_SLAB, cs, val);
1332 cs->mems_generation = cpuset_mems_generation++;
1333 break;
1334 default:
1335 retval = -EINVAL;
1336 break;
1337 }
1338 cgroup_unlock();
1339 return retval;
1340}
1341
Linus Torvalds1da177e2005-04-16 15:20:36 -07001342/*
1343 * These ascii lists should be read in a single call, by using a user
1344 * buffer large enough to hold the entire map. If read in smaller
1345 * chunks, there is no guarantee of atomicity. Since the display format
1346 * used, list of ranges of sequential numbers, is variable length,
1347 * and since these maps can change value dynamically, one could read
1348 * gibberish by doing partial reads while a list was changing.
1349 * A single large read to a buffer that crosses a page boundary is
1350 * ok, because the result being copied to user land is not recomputed
1351 * across a page fault.
1352 */
1353
1354static int cpuset_sprintf_cpulist(char *page, struct cpuset *cs)
1355{
1356 cpumask_t mask;
1357
Ingo Molnar3d3f26a2006-03-23 03:00:18 -08001358 mutex_lock(&callback_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001359 mask = cs->cpus_allowed;
Ingo Molnar3d3f26a2006-03-23 03:00:18 -08001360 mutex_unlock(&callback_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001361
1362 return cpulist_scnprintf(page, PAGE_SIZE, mask);
1363}
1364
1365static int cpuset_sprintf_memlist(char *page, struct cpuset *cs)
1366{
1367 nodemask_t mask;
1368
Ingo Molnar3d3f26a2006-03-23 03:00:18 -08001369 mutex_lock(&callback_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001370 mask = cs->mems_allowed;
Ingo Molnar3d3f26a2006-03-23 03:00:18 -08001371 mutex_unlock(&callback_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001372
1373 return nodelist_scnprintf(page, PAGE_SIZE, mask);
1374}
1375
Paul Menage8793d852007-10-18 23:39:39 -07001376static ssize_t cpuset_common_file_read(struct cgroup *cont,
1377 struct cftype *cft,
1378 struct file *file,
1379 char __user *buf,
1380 size_t nbytes, loff_t *ppos)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381{
Paul Menage8793d852007-10-18 23:39:39 -07001382 struct cpuset *cs = cgroup_cs(cont);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001383 cpuset_filetype_t type = cft->private;
1384 char *page;
1385 ssize_t retval = 0;
1386 char *s;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001387
Mel Gormane12ba742007-10-16 01:25:52 -07001388 if (!(page = (char *)__get_free_page(GFP_TEMPORARY)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001389 return -ENOMEM;
1390
1391 s = page;
1392
1393 switch (type) {
1394 case FILE_CPULIST:
1395 s += cpuset_sprintf_cpulist(s, cs);
1396 break;
1397 case FILE_MEMLIST:
1398 s += cpuset_sprintf_memlist(s, cs);
1399 break;
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09001400 case FILE_SCHED_RELAX_DOMAIN_LEVEL:
1401 s += sprintf(s, "%d", cs->relax_domain_level);
1402 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001403 default:
1404 retval = -EINVAL;
1405 goto out;
1406 }
1407 *s++ = '\n';
Linus Torvalds1da177e2005-04-16 15:20:36 -07001408
Al Viroeacaa1f2005-09-30 03:26:43 +01001409 retval = simple_read_from_buffer(buf, nbytes, ppos, page, s - page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001410out:
1411 free_page((unsigned long)page);
1412 return retval;
1413}
1414
Paul Menage700fe1a2008-04-29 01:00:00 -07001415static u64 cpuset_read_u64(struct cgroup *cont, struct cftype *cft)
1416{
1417 struct cpuset *cs = cgroup_cs(cont);
1418 cpuset_filetype_t type = cft->private;
1419 switch (type) {
1420 case FILE_CPU_EXCLUSIVE:
1421 return is_cpu_exclusive(cs);
1422 case FILE_MEM_EXCLUSIVE:
1423 return is_mem_exclusive(cs);
1424 case FILE_SCHED_LOAD_BALANCE:
1425 return is_sched_load_balance(cs);
1426 case FILE_MEMORY_MIGRATE:
1427 return is_memory_migrate(cs);
1428 case FILE_MEMORY_PRESSURE_ENABLED:
1429 return cpuset_memory_pressure_enabled;
1430 case FILE_MEMORY_PRESSURE:
1431 return fmeter_getrate(&cs->fmeter);
1432 case FILE_SPREAD_PAGE:
1433 return is_spread_page(cs);
1434 case FILE_SPREAD_SLAB:
1435 return is_spread_slab(cs);
1436 default:
1437 BUG();
1438 }
1439}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001440
Linus Torvalds1da177e2005-04-16 15:20:36 -07001441
1442/*
1443 * for the common functions, 'private' gives the type of file
1444 */
1445
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446static struct cftype cft_cpus = {
1447 .name = "cpus",
Paul Menage8793d852007-10-18 23:39:39 -07001448 .read = cpuset_common_file_read,
1449 .write = cpuset_common_file_write,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001450 .private = FILE_CPULIST,
1451};
1452
1453static struct cftype cft_mems = {
1454 .name = "mems",
Paul Menage8793d852007-10-18 23:39:39 -07001455 .read = cpuset_common_file_read,
1456 .write = cpuset_common_file_write,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001457 .private = FILE_MEMLIST,
1458};
1459
1460static struct cftype cft_cpu_exclusive = {
1461 .name = "cpu_exclusive",
Paul Menage700fe1a2008-04-29 01:00:00 -07001462 .read_u64 = cpuset_read_u64,
1463 .write_u64 = cpuset_write_u64,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001464 .private = FILE_CPU_EXCLUSIVE,
1465};
1466
1467static struct cftype cft_mem_exclusive = {
1468 .name = "mem_exclusive",
Paul Menage700fe1a2008-04-29 01:00:00 -07001469 .read_u64 = cpuset_read_u64,
1470 .write_u64 = cpuset_write_u64,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001471 .private = FILE_MEM_EXCLUSIVE,
1472};
1473
Paul Jackson029190c2007-10-18 23:40:20 -07001474static struct cftype cft_sched_load_balance = {
1475 .name = "sched_load_balance",
Paul Menage700fe1a2008-04-29 01:00:00 -07001476 .read_u64 = cpuset_read_u64,
1477 .write_u64 = cpuset_write_u64,
Paul Jackson029190c2007-10-18 23:40:20 -07001478 .private = FILE_SCHED_LOAD_BALANCE,
1479};
1480
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09001481static struct cftype cft_sched_relax_domain_level = {
1482 .name = "sched_relax_domain_level",
1483 .read = cpuset_common_file_read,
1484 .write = cpuset_common_file_write,
1485 .private = FILE_SCHED_RELAX_DOMAIN_LEVEL,
1486};
1487
Paul Jackson45b07ef2006-01-08 01:00:56 -08001488static struct cftype cft_memory_migrate = {
1489 .name = "memory_migrate",
Paul Menage700fe1a2008-04-29 01:00:00 -07001490 .read_u64 = cpuset_read_u64,
1491 .write_u64 = cpuset_write_u64,
Paul Jackson45b07ef2006-01-08 01:00:56 -08001492 .private = FILE_MEMORY_MIGRATE,
1493};
1494
Paul Jackson3e0d98b2006-01-08 01:01:49 -08001495static struct cftype cft_memory_pressure_enabled = {
1496 .name = "memory_pressure_enabled",
Paul Menage700fe1a2008-04-29 01:00:00 -07001497 .read_u64 = cpuset_read_u64,
1498 .write_u64 = cpuset_write_u64,
Paul Jackson3e0d98b2006-01-08 01:01:49 -08001499 .private = FILE_MEMORY_PRESSURE_ENABLED,
1500};
1501
1502static struct cftype cft_memory_pressure = {
1503 .name = "memory_pressure",
Paul Menage700fe1a2008-04-29 01:00:00 -07001504 .read_u64 = cpuset_read_u64,
1505 .write_u64 = cpuset_write_u64,
Paul Jackson3e0d98b2006-01-08 01:01:49 -08001506 .private = FILE_MEMORY_PRESSURE,
1507};
1508
Paul Jackson825a46a2006-03-24 03:16:03 -08001509static struct cftype cft_spread_page = {
1510 .name = "memory_spread_page",
Paul Menage700fe1a2008-04-29 01:00:00 -07001511 .read_u64 = cpuset_read_u64,
1512 .write_u64 = cpuset_write_u64,
Paul Jackson825a46a2006-03-24 03:16:03 -08001513 .private = FILE_SPREAD_PAGE,
1514};
1515
1516static struct cftype cft_spread_slab = {
1517 .name = "memory_spread_slab",
Paul Menage700fe1a2008-04-29 01:00:00 -07001518 .read_u64 = cpuset_read_u64,
1519 .write_u64 = cpuset_write_u64,
Paul Jackson825a46a2006-03-24 03:16:03 -08001520 .private = FILE_SPREAD_SLAB,
1521};
1522
Paul Menage8793d852007-10-18 23:39:39 -07001523static int cpuset_populate(struct cgroup_subsys *ss, struct cgroup *cont)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524{
1525 int err;
1526
Paul Menage8793d852007-10-18 23:39:39 -07001527 if ((err = cgroup_add_file(cont, ss, &cft_cpus)) < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528 return err;
Paul Menage8793d852007-10-18 23:39:39 -07001529 if ((err = cgroup_add_file(cont, ss, &cft_mems)) < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001530 return err;
Paul Menage8793d852007-10-18 23:39:39 -07001531 if ((err = cgroup_add_file(cont, ss, &cft_cpu_exclusive)) < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001532 return err;
Paul Menage8793d852007-10-18 23:39:39 -07001533 if ((err = cgroup_add_file(cont, ss, &cft_mem_exclusive)) < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534 return err;
Paul Menage8793d852007-10-18 23:39:39 -07001535 if ((err = cgroup_add_file(cont, ss, &cft_memory_migrate)) < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001536 return err;
Paul Jackson029190c2007-10-18 23:40:20 -07001537 if ((err = cgroup_add_file(cont, ss, &cft_sched_load_balance)) < 0)
1538 return err;
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09001539 if ((err = cgroup_add_file(cont, ss,
1540 &cft_sched_relax_domain_level)) < 0)
1541 return err;
Paul Menage8793d852007-10-18 23:39:39 -07001542 if ((err = cgroup_add_file(cont, ss, &cft_memory_pressure)) < 0)
Paul Jackson45b07ef2006-01-08 01:00:56 -08001543 return err;
Paul Menage8793d852007-10-18 23:39:39 -07001544 if ((err = cgroup_add_file(cont, ss, &cft_spread_page)) < 0)
Paul Jackson3e0d98b2006-01-08 01:01:49 -08001545 return err;
Paul Menage8793d852007-10-18 23:39:39 -07001546 if ((err = cgroup_add_file(cont, ss, &cft_spread_slab)) < 0)
Paul Jackson825a46a2006-03-24 03:16:03 -08001547 return err;
Paul Menage8793d852007-10-18 23:39:39 -07001548 /* memory_pressure_enabled is in root cpuset only */
1549 if (err == 0 && !cont->parent)
1550 err = cgroup_add_file(cont, ss,
1551 &cft_memory_pressure_enabled);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001552 return 0;
1553}
1554
1555/*
Paul Menage8793d852007-10-18 23:39:39 -07001556 * post_clone() is called at the end of cgroup_clone().
1557 * 'cgroup' was just created automatically as a result of
1558 * a cgroup_clone(), and the current task is about to
1559 * be moved into 'cgroup'.
1560 *
1561 * Currently we refuse to set up the cgroup - thereby
1562 * refusing the task to be entered, and as a result refusing
1563 * the sys_unshare() or clone() which initiated it - if any
1564 * sibling cpusets have exclusive cpus or mem.
1565 *
1566 * If this becomes a problem for some users who wish to
1567 * allow that scenario, then cpuset_post_clone() could be
1568 * changed to grant parent->cpus_allowed-sibling_cpus_exclusive
Paul Menage2df167a2008-02-07 00:14:45 -08001569 * (and likewise for mems) to the new cgroup. Called with cgroup_mutex
1570 * held.
Paul Menage8793d852007-10-18 23:39:39 -07001571 */
1572static void cpuset_post_clone(struct cgroup_subsys *ss,
1573 struct cgroup *cgroup)
1574{
1575 struct cgroup *parent, *child;
1576 struct cpuset *cs, *parent_cs;
1577
1578 parent = cgroup->parent;
1579 list_for_each_entry(child, &parent->children, sibling) {
1580 cs = cgroup_cs(child);
1581 if (is_mem_exclusive(cs) || is_cpu_exclusive(cs))
1582 return;
1583 }
1584 cs = cgroup_cs(cgroup);
1585 parent_cs = cgroup_cs(parent);
1586
1587 cs->mems_allowed = parent_cs->mems_allowed;
1588 cs->cpus_allowed = parent_cs->cpus_allowed;
1589 return;
1590}
1591
1592/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001593 * cpuset_create - create a cpuset
Paul Menage2df167a2008-02-07 00:14:45 -08001594 * ss: cpuset cgroup subsystem
1595 * cont: control group that the new cpuset will be part of
Linus Torvalds1da177e2005-04-16 15:20:36 -07001596 */
1597
Paul Menage8793d852007-10-18 23:39:39 -07001598static struct cgroup_subsys_state *cpuset_create(
1599 struct cgroup_subsys *ss,
1600 struct cgroup *cont)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001601{
1602 struct cpuset *cs;
Paul Menage8793d852007-10-18 23:39:39 -07001603 struct cpuset *parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001604
Paul Menage8793d852007-10-18 23:39:39 -07001605 if (!cont->parent) {
1606 /* This is early initialization for the top cgroup */
1607 top_cpuset.mems_generation = cpuset_mems_generation++;
1608 return &top_cpuset.css;
1609 }
1610 parent = cgroup_cs(cont->parent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001611 cs = kmalloc(sizeof(*cs), GFP_KERNEL);
1612 if (!cs)
Paul Menage8793d852007-10-18 23:39:39 -07001613 return ERR_PTR(-ENOMEM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001614
Paul Jacksoncf2a473c2006-01-08 01:01:54 -08001615 cpuset_update_task_memory_state();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001616 cs->flags = 0;
Paul Jackson825a46a2006-03-24 03:16:03 -08001617 if (is_spread_page(parent))
1618 set_bit(CS_SPREAD_PAGE, &cs->flags);
1619 if (is_spread_slab(parent))
1620 set_bit(CS_SPREAD_SLAB, &cs->flags);
Paul Jackson029190c2007-10-18 23:40:20 -07001621 set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
Mike Travisf9a86fc2008-04-04 18:11:07 -07001622 cpus_clear(cs->cpus_allowed);
1623 nodes_clear(cs->mems_allowed);
Paul Jackson151a4422006-03-24 03:16:11 -08001624 cs->mems_generation = cpuset_mems_generation++;
Paul Jackson3e0d98b2006-01-08 01:01:49 -08001625 fmeter_init(&cs->fmeter);
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09001626 cs->relax_domain_level = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001627
1628 cs->parent = parent;
Paul Jackson202f72d2006-01-08 01:01:57 -08001629 number_of_cpusets++;
Paul Menage8793d852007-10-18 23:39:39 -07001630 return &cs->css ;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001631}
1632
Paul Jackson029190c2007-10-18 23:40:20 -07001633/*
1634 * Locking note on the strange update_flag() call below:
1635 *
1636 * If the cpuset being removed has its flag 'sched_load_balance'
1637 * enabled, then simulate turning sched_load_balance off, which
Gautham R Shenoy86ef5c92008-01-25 21:08:02 +01001638 * will call rebuild_sched_domains(). The get_online_cpus()
Paul Jackson029190c2007-10-18 23:40:20 -07001639 * call in rebuild_sched_domains() must not be made while holding
1640 * callback_mutex. Elsewhere the kernel nests callback_mutex inside
Gautham R Shenoy86ef5c92008-01-25 21:08:02 +01001641 * get_online_cpus() calls. So the reverse nesting would risk an
Paul Jackson029190c2007-10-18 23:40:20 -07001642 * ABBA deadlock.
1643 */
1644
Paul Menage8793d852007-10-18 23:39:39 -07001645static void cpuset_destroy(struct cgroup_subsys *ss, struct cgroup *cont)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001646{
Paul Menage8793d852007-10-18 23:39:39 -07001647 struct cpuset *cs = cgroup_cs(cont);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001648
Paul Jacksoncf2a473c2006-01-08 01:01:54 -08001649 cpuset_update_task_memory_state();
Paul Jackson029190c2007-10-18 23:40:20 -07001650
1651 if (is_sched_load_balance(cs))
Paul Menage700fe1a2008-04-29 01:00:00 -07001652 update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
Paul Jackson029190c2007-10-18 23:40:20 -07001653
Paul Jackson202f72d2006-01-08 01:01:57 -08001654 number_of_cpusets--;
Paul Menage8793d852007-10-18 23:39:39 -07001655 kfree(cs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001656}
1657
Paul Menage8793d852007-10-18 23:39:39 -07001658struct cgroup_subsys cpuset_subsys = {
1659 .name = "cpuset",
1660 .create = cpuset_create,
1661 .destroy = cpuset_destroy,
1662 .can_attach = cpuset_can_attach,
1663 .attach = cpuset_attach,
1664 .populate = cpuset_populate,
1665 .post_clone = cpuset_post_clone,
1666 .subsys_id = cpuset_subsys_id,
1667 .early_init = 1,
1668};
1669
Paul Jacksonc417f022006-01-08 01:02:01 -08001670/*
1671 * cpuset_init_early - just enough so that the calls to
1672 * cpuset_update_task_memory_state() in early init code
1673 * are harmless.
1674 */
1675
1676int __init cpuset_init_early(void)
1677{
Paul Menage8793d852007-10-18 23:39:39 -07001678 top_cpuset.mems_generation = cpuset_mems_generation++;
Paul Jacksonc417f022006-01-08 01:02:01 -08001679 return 0;
1680}
1681
Paul Menage8793d852007-10-18 23:39:39 -07001682
Linus Torvalds1da177e2005-04-16 15:20:36 -07001683/**
1684 * cpuset_init - initialize cpusets at system boot
1685 *
1686 * Description: Initialize top_cpuset and the cpuset internal file system,
1687 **/
1688
1689int __init cpuset_init(void)
1690{
Paul Menage8793d852007-10-18 23:39:39 -07001691 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001692
Mike Travisf9a86fc2008-04-04 18:11:07 -07001693 cpus_setall(top_cpuset.cpus_allowed);
1694 nodes_setall(top_cpuset.mems_allowed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001695
Paul Jackson3e0d98b2006-01-08 01:01:49 -08001696 fmeter_init(&top_cpuset.fmeter);
Paul Jackson151a4422006-03-24 03:16:11 -08001697 top_cpuset.mems_generation = cpuset_mems_generation++;
Paul Jackson029190c2007-10-18 23:40:20 -07001698 set_bit(CS_SCHED_LOAD_BALANCE, &top_cpuset.flags);
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09001699 top_cpuset.relax_domain_level = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001700
Linus Torvalds1da177e2005-04-16 15:20:36 -07001701 err = register_filesystem(&cpuset_fs_type);
1702 if (err < 0)
Paul Menage8793d852007-10-18 23:39:39 -07001703 return err;
1704
Paul Jackson202f72d2006-01-08 01:01:57 -08001705 number_of_cpusets = 1;
Paul Menage8793d852007-10-18 23:39:39 -07001706 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001707}
1708
Cliff Wickman956db3c2008-02-07 00:14:43 -08001709/**
1710 * cpuset_do_move_task - move a given task to another cpuset
1711 * @tsk: pointer to task_struct the task to move
1712 * @scan: struct cgroup_scanner contained in its struct cpuset_hotplug_scanner
1713 *
1714 * Called by cgroup_scan_tasks() for each task in a cgroup.
1715 * Return nonzero to stop the walk through the tasks.
1716 */
1717void cpuset_do_move_task(struct task_struct *tsk, struct cgroup_scanner *scan)
1718{
1719 struct cpuset_hotplug_scanner *chsp;
1720
1721 chsp = container_of(scan, struct cpuset_hotplug_scanner, scan);
1722 cgroup_attach_task(chsp->to, tsk);
1723}
1724
1725/**
1726 * move_member_tasks_to_cpuset - move tasks from one cpuset to another
1727 * @from: cpuset in which the tasks currently reside
1728 * @to: cpuset to which the tasks will be moved
1729 *
Paul Jacksonc8d9c902008-02-07 00:14:46 -08001730 * Called with cgroup_mutex held
1731 * callback_mutex must not be held, as cpuset_attach() will take it.
Cliff Wickman956db3c2008-02-07 00:14:43 -08001732 *
1733 * The cgroup_scan_tasks() function will scan all the tasks in a cgroup,
1734 * calling callback functions for each.
1735 */
1736static void move_member_tasks_to_cpuset(struct cpuset *from, struct cpuset *to)
1737{
1738 struct cpuset_hotplug_scanner scan;
1739
1740 scan.scan.cg = from->css.cgroup;
1741 scan.scan.test_task = NULL; /* select all tasks in cgroup */
1742 scan.scan.process_task = cpuset_do_move_task;
1743 scan.scan.heap = NULL;
1744 scan.to = to->css.cgroup;
1745
1746 if (cgroup_scan_tasks((struct cgroup_scanner *)&scan))
1747 printk(KERN_ERR "move_member_tasks_to_cpuset: "
1748 "cgroup_scan_tasks failed\n");
1749}
1750
Paul Jacksonb1aac8b2006-09-29 02:01:17 -07001751/*
1752 * If common_cpu_mem_hotplug_unplug(), below, unplugs any CPUs
1753 * or memory nodes, we need to walk over the cpuset hierarchy,
1754 * removing that CPU or node from all cpusets. If this removes the
Cliff Wickman956db3c2008-02-07 00:14:43 -08001755 * last CPU or node from a cpuset, then move the tasks in the empty
1756 * cpuset to its next-highest non-empty parent.
Paul Jacksonb1aac8b2006-09-29 02:01:17 -07001757 *
Paul Jacksonc8d9c902008-02-07 00:14:46 -08001758 * Called with cgroup_mutex held
1759 * callback_mutex must not be held, as cpuset_attach() will take it.
Paul Jacksonb1aac8b2006-09-29 02:01:17 -07001760 */
Cliff Wickman956db3c2008-02-07 00:14:43 -08001761static void remove_tasks_in_empty_cpuset(struct cpuset *cs)
Paul Jacksonb1aac8b2006-09-29 02:01:17 -07001762{
Cliff Wickman956db3c2008-02-07 00:14:43 -08001763 struct cpuset *parent;
Paul Jacksonb1aac8b2006-09-29 02:01:17 -07001764
Paul Jacksonc8d9c902008-02-07 00:14:46 -08001765 /*
1766 * The cgroup's css_sets list is in use if there are tasks
1767 * in the cpuset; the list is empty if there are none;
1768 * the cs->css.refcnt seems always 0.
1769 */
Cliff Wickman956db3c2008-02-07 00:14:43 -08001770 if (list_empty(&cs->css.cgroup->css_sets))
1771 return;
1772
1773 /*
1774 * Find its next-highest non-empty parent, (top cpuset
1775 * has online cpus, so can't be empty).
1776 */
1777 parent = cs->parent;
Paul Jacksonb4501292008-02-07 00:14:47 -08001778 while (cpus_empty(parent->cpus_allowed) ||
1779 nodes_empty(parent->mems_allowed))
Cliff Wickman956db3c2008-02-07 00:14:43 -08001780 parent = parent->parent;
Cliff Wickman956db3c2008-02-07 00:14:43 -08001781
1782 move_member_tasks_to_cpuset(cs, parent);
1783}
1784
1785/*
1786 * Walk the specified cpuset subtree and look for empty cpusets.
1787 * The tasks of such cpuset must be moved to a parent cpuset.
1788 *
Paul Menage2df167a2008-02-07 00:14:45 -08001789 * Called with cgroup_mutex held. We take callback_mutex to modify
Cliff Wickman956db3c2008-02-07 00:14:43 -08001790 * cpus_allowed and mems_allowed.
1791 *
1792 * This walk processes the tree from top to bottom, completing one layer
1793 * before dropping down to the next. It always processes a node before
1794 * any of its children.
1795 *
1796 * For now, since we lack memory hot unplug, we'll never see a cpuset
1797 * that has tasks along with an empty 'mems'. But if we did see such
1798 * a cpuset, we'd handle it just like we do if its 'cpus' was empty.
1799 */
1800static void scan_for_empty_cpusets(const struct cpuset *root)
1801{
1802 struct cpuset *cp; /* scans cpusets being updated */
1803 struct cpuset *child; /* scans child cpusets of cp */
1804 struct list_head queue;
1805 struct cgroup *cont;
1806
1807 INIT_LIST_HEAD(&queue);
1808
1809 list_add_tail((struct list_head *)&root->stack_list, &queue);
1810
Cliff Wickman956db3c2008-02-07 00:14:43 -08001811 while (!list_empty(&queue)) {
1812 cp = container_of(queue.next, struct cpuset, stack_list);
1813 list_del(queue.next);
1814 list_for_each_entry(cont, &cp->css.cgroup->children, sibling) {
1815 child = cgroup_cs(cont);
1816 list_add_tail(&child->stack_list, &queue);
1817 }
1818 cont = cp->css.cgroup;
Paul Jacksonb4501292008-02-07 00:14:47 -08001819
1820 /* Continue past cpusets with all cpus, mems online */
1821 if (cpus_subset(cp->cpus_allowed, cpu_online_map) &&
1822 nodes_subset(cp->mems_allowed, node_states[N_HIGH_MEMORY]))
1823 continue;
1824
Cliff Wickman956db3c2008-02-07 00:14:43 -08001825 /* Remove offline cpus and mems from this cpuset. */
Paul Jacksonb4501292008-02-07 00:14:47 -08001826 mutex_lock(&callback_mutex);
Cliff Wickman956db3c2008-02-07 00:14:43 -08001827 cpus_and(cp->cpus_allowed, cp->cpus_allowed, cpu_online_map);
1828 nodes_and(cp->mems_allowed, cp->mems_allowed,
1829 node_states[N_HIGH_MEMORY]);
Paul Jacksonb4501292008-02-07 00:14:47 -08001830 mutex_unlock(&callback_mutex);
1831
1832 /* Move tasks from the empty cpuset to a parent */
Paul Jacksonc8d9c902008-02-07 00:14:46 -08001833 if (cpus_empty(cp->cpus_allowed) ||
Paul Jacksonb4501292008-02-07 00:14:47 -08001834 nodes_empty(cp->mems_allowed))
Cliff Wickman956db3c2008-02-07 00:14:43 -08001835 remove_tasks_in_empty_cpuset(cp);
Cliff Wickman956db3c2008-02-07 00:14:43 -08001836 }
Paul Jacksonb1aac8b2006-09-29 02:01:17 -07001837}
1838
1839/*
1840 * The cpus_allowed and mems_allowed nodemasks in the top_cpuset track
Christoph Lameter0e1e7c72007-10-16 01:25:38 -07001841 * cpu_online_map and node_states[N_HIGH_MEMORY]. Force the top cpuset to
Cliff Wickman956db3c2008-02-07 00:14:43 -08001842 * track what's online after any CPU or memory node hotplug or unplug event.
Paul Jacksonb1aac8b2006-09-29 02:01:17 -07001843 *
1844 * Since there are two callers of this routine, one for CPU hotplug
1845 * events and one for memory node hotplug events, we could have coded
1846 * two separate routines here. We code it as a single common routine
1847 * in order to minimize text size.
1848 */
1849
1850static void common_cpu_mem_hotplug_unplug(void)
1851{
Paul Menage8793d852007-10-18 23:39:39 -07001852 cgroup_lock();
Paul Jacksonb1aac8b2006-09-29 02:01:17 -07001853
Paul Jacksonb1aac8b2006-09-29 02:01:17 -07001854 top_cpuset.cpus_allowed = cpu_online_map;
Christoph Lameter0e1e7c72007-10-16 01:25:38 -07001855 top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
Cliff Wickman956db3c2008-02-07 00:14:43 -08001856 scan_for_empty_cpusets(&top_cpuset);
Paul Jacksonb1aac8b2006-09-29 02:01:17 -07001857
Paul Menage8793d852007-10-18 23:39:39 -07001858 cgroup_unlock();
Paul Jacksonb1aac8b2006-09-29 02:01:17 -07001859}
Paul Jacksonb1aac8b2006-09-29 02:01:17 -07001860
Paul Jackson4c4d50f2006-08-27 01:23:51 -07001861/*
1862 * The top_cpuset tracks what CPUs and Memory Nodes are online,
1863 * period. This is necessary in order to make cpusets transparent
1864 * (of no affect) on systems that are actively using CPU hotplug
1865 * but making no active use of cpusets.
1866 *
Paul Jackson38837fc2006-09-29 02:01:16 -07001867 * This routine ensures that top_cpuset.cpus_allowed tracks
1868 * cpu_online_map on each CPU hotplug (cpuhp) event.
Paul Jackson4c4d50f2006-08-27 01:23:51 -07001869 */
1870
Paul Jackson029190c2007-10-18 23:40:20 -07001871static int cpuset_handle_cpuhp(struct notifier_block *unused_nb,
1872 unsigned long phase, void *unused_cpu)
Paul Jackson4c4d50f2006-08-27 01:23:51 -07001873{
Avi Kivityac076752007-05-24 12:33:15 +03001874 if (phase == CPU_DYING || phase == CPU_DYING_FROZEN)
1875 return NOTIFY_DONE;
1876
Paul Jacksonb1aac8b2006-09-29 02:01:17 -07001877 common_cpu_mem_hotplug_unplug();
Paul Jackson4c4d50f2006-08-27 01:23:51 -07001878 return 0;
1879}
Paul Jackson4c4d50f2006-08-27 01:23:51 -07001880
Paul Jacksonb1aac8b2006-09-29 02:01:17 -07001881#ifdef CONFIG_MEMORY_HOTPLUG
Paul Jackson38837fc2006-09-29 02:01:16 -07001882/*
Christoph Lameter0e1e7c72007-10-16 01:25:38 -07001883 * Keep top_cpuset.mems_allowed tracking node_states[N_HIGH_MEMORY].
1884 * Call this routine anytime after you change
1885 * node_states[N_HIGH_MEMORY].
Paul Jackson38837fc2006-09-29 02:01:16 -07001886 * See also the previous routine cpuset_handle_cpuhp().
1887 */
1888
Al Viro1af98922006-10-10 22:48:57 +01001889void cpuset_track_online_nodes(void)
Paul Jackson38837fc2006-09-29 02:01:16 -07001890{
Paul Jacksonb1aac8b2006-09-29 02:01:17 -07001891 common_cpu_mem_hotplug_unplug();
Paul Jackson38837fc2006-09-29 02:01:16 -07001892}
1893#endif
1894
Linus Torvalds1da177e2005-04-16 15:20:36 -07001895/**
1896 * cpuset_init_smp - initialize cpus_allowed
1897 *
1898 * Description: Finish top cpuset after cpu, node maps are initialized
1899 **/
1900
1901void __init cpuset_init_smp(void)
1902{
1903 top_cpuset.cpus_allowed = cpu_online_map;
Christoph Lameter0e1e7c72007-10-16 01:25:38 -07001904 top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
Paul Jackson4c4d50f2006-08-27 01:23:51 -07001905
1906 hotcpu_notifier(cpuset_handle_cpuhp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001907}
1908
1909/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001910
Linus Torvalds1da177e2005-04-16 15:20:36 -07001911 * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset.
1912 * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
Mike Travisf9a86fc2008-04-04 18:11:07 -07001913 * @pmask: pointer to cpumask_t variable to receive cpus_allowed set.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001914 *
1915 * Description: Returns the cpumask_t cpus_allowed of the cpuset
1916 * attached to the specified @tsk. Guaranteed to return some non-empty
1917 * subset of cpu_online_map, even if this means going outside the
1918 * tasks cpuset.
1919 **/
1920
Mike Travisf9a86fc2008-04-04 18:11:07 -07001921void cpuset_cpus_allowed(struct task_struct *tsk, cpumask_t *pmask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001922{
Ingo Molnar3d3f26a2006-03-23 03:00:18 -08001923 mutex_lock(&callback_mutex);
Mike Travisf9a86fc2008-04-04 18:11:07 -07001924 cpuset_cpus_allowed_locked(tsk, pmask);
Cliff Wickman470fd642007-10-18 23:40:46 -07001925 mutex_unlock(&callback_mutex);
Cliff Wickman470fd642007-10-18 23:40:46 -07001926}
1927
1928/**
1929 * cpuset_cpus_allowed_locked - return cpus_allowed mask from a tasks cpuset.
Paul Menage2df167a2008-02-07 00:14:45 -08001930 * Must be called with callback_mutex held.
Cliff Wickman470fd642007-10-18 23:40:46 -07001931 **/
Mike Travisf9a86fc2008-04-04 18:11:07 -07001932void cpuset_cpus_allowed_locked(struct task_struct *tsk, cpumask_t *pmask)
Cliff Wickman470fd642007-10-18 23:40:46 -07001933{
Paul Jackson909d75a2006-01-08 01:01:55 -08001934 task_lock(tsk);
Mike Travisf9a86fc2008-04-04 18:11:07 -07001935 guarantee_online_cpus(task_cs(tsk), pmask);
Paul Jackson909d75a2006-01-08 01:01:55 -08001936 task_unlock(tsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001937}
1938
1939void cpuset_init_current_mems_allowed(void)
1940{
Mike Travisf9a86fc2008-04-04 18:11:07 -07001941 nodes_setall(current->mems_allowed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001942}
1943
Randy Dunlapd9fd8a62005-07-27 11:45:11 -07001944/**
Paul Jackson909d75a2006-01-08 01:01:55 -08001945 * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset.
1946 * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed.
1947 *
1948 * Description: Returns the nodemask_t mems_allowed of the cpuset
1949 * attached to the specified @tsk. Guaranteed to return some non-empty
Christoph Lameter0e1e7c72007-10-16 01:25:38 -07001950 * subset of node_states[N_HIGH_MEMORY], even if this means going outside the
Paul Jackson909d75a2006-01-08 01:01:55 -08001951 * tasks cpuset.
1952 **/
1953
1954nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
1955{
1956 nodemask_t mask;
1957
Ingo Molnar3d3f26a2006-03-23 03:00:18 -08001958 mutex_lock(&callback_mutex);
Paul Jackson909d75a2006-01-08 01:01:55 -08001959 task_lock(tsk);
Paul Menage8793d852007-10-18 23:39:39 -07001960 guarantee_online_mems(task_cs(tsk), &mask);
Paul Jackson909d75a2006-01-08 01:01:55 -08001961 task_unlock(tsk);
Ingo Molnar3d3f26a2006-03-23 03:00:18 -08001962 mutex_unlock(&callback_mutex);
Paul Jackson909d75a2006-01-08 01:01:55 -08001963
1964 return mask;
1965}
1966
1967/**
Mel Gorman19770b32008-04-28 02:12:18 -07001968 * cpuset_nodemask_valid_mems_allowed - check nodemask vs. curremt mems_allowed
1969 * @nodemask: the nodemask to be checked
Randy Dunlapd9fd8a62005-07-27 11:45:11 -07001970 *
Mel Gorman19770b32008-04-28 02:12:18 -07001971 * Are any of the nodes in the nodemask allowed in current->mems_allowed?
Linus Torvalds1da177e2005-04-16 15:20:36 -07001972 */
Mel Gorman19770b32008-04-28 02:12:18 -07001973int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001974{
Mel Gorman19770b32008-04-28 02:12:18 -07001975 return nodes_intersects(*nodemask, current->mems_allowed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001976}
1977
Paul Jackson9bf22292005-09-06 15:18:12 -07001978/*
1979 * nearest_exclusive_ancestor() - Returns the nearest mem_exclusive
Ingo Molnar3d3f26a2006-03-23 03:00:18 -08001980 * ancestor to the specified cpuset. Call holding callback_mutex.
Paul Jackson9bf22292005-09-06 15:18:12 -07001981 * If no ancestor is mem_exclusive (an unusual configuration), then
1982 * returns the root cpuset.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001983 */
Paul Jackson9bf22292005-09-06 15:18:12 -07001984static const struct cpuset *nearest_exclusive_ancestor(const struct cpuset *cs)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001985{
Paul Jackson9bf22292005-09-06 15:18:12 -07001986 while (!is_mem_exclusive(cs) && cs->parent)
1987 cs = cs->parent;
1988 return cs;
1989}
1990
1991/**
Paul Jackson02a0e532006-12-13 00:34:25 -08001992 * cpuset_zone_allowed_softwall - Can we allocate on zone z's memory node?
Paul Jackson9bf22292005-09-06 15:18:12 -07001993 * @z: is this zone on an allowed node?
Paul Jackson02a0e532006-12-13 00:34:25 -08001994 * @gfp_mask: memory allocation flags
Paul Jackson9bf22292005-09-06 15:18:12 -07001995 *
Paul Jackson02a0e532006-12-13 00:34:25 -08001996 * If we're in interrupt, yes, we can always allocate. If
1997 * __GFP_THISNODE is set, yes, we can always allocate. If zone
Paul Jackson9bf22292005-09-06 15:18:12 -07001998 * z's node is in our tasks mems_allowed, yes. If it's not a
1999 * __GFP_HARDWALL request and this zone's nodes is in the nearest
2000 * mem_exclusive cpuset ancestor to this tasks cpuset, yes.
David Rientjesc596d9f2007-05-06 14:49:32 -07002001 * If the task has been OOM killed and has access to memory reserves
2002 * as specified by the TIF_MEMDIE flag, yes.
Paul Jackson9bf22292005-09-06 15:18:12 -07002003 * Otherwise, no.
2004 *
Paul Jackson02a0e532006-12-13 00:34:25 -08002005 * If __GFP_HARDWALL is set, cpuset_zone_allowed_softwall()
2006 * reduces to cpuset_zone_allowed_hardwall(). Otherwise,
2007 * cpuset_zone_allowed_softwall() might sleep, and might allow a zone
2008 * from an enclosing cpuset.
2009 *
2010 * cpuset_zone_allowed_hardwall() only handles the simpler case of
2011 * hardwall cpusets, and never sleeps.
2012 *
2013 * The __GFP_THISNODE placement logic is really handled elsewhere,
2014 * by forcibly using a zonelist starting at a specified node, and by
2015 * (in get_page_from_freelist()) refusing to consider the zones for
2016 * any node on the zonelist except the first. By the time any such
2017 * calls get to this routine, we should just shut up and say 'yes'.
2018 *
Paul Jackson9bf22292005-09-06 15:18:12 -07002019 * GFP_USER allocations are marked with the __GFP_HARDWALL bit,
David Rientjesc596d9f2007-05-06 14:49:32 -07002020 * and do not allow allocations outside the current tasks cpuset
2021 * unless the task has been OOM killed as is marked TIF_MEMDIE.
Paul Jackson9bf22292005-09-06 15:18:12 -07002022 * GFP_KERNEL allocations are not so marked, so can escape to the
Paul Jackson02a0e532006-12-13 00:34:25 -08002023 * nearest enclosing mem_exclusive ancestor cpuset.
Paul Jackson9bf22292005-09-06 15:18:12 -07002024 *
Paul Jackson02a0e532006-12-13 00:34:25 -08002025 * Scanning up parent cpusets requires callback_mutex. The
2026 * __alloc_pages() routine only calls here with __GFP_HARDWALL bit
2027 * _not_ set if it's a GFP_KERNEL allocation, and all nodes in the
2028 * current tasks mems_allowed came up empty on the first pass over
2029 * the zonelist. So only GFP_KERNEL allocations, if all nodes in the
2030 * cpuset are short of memory, might require taking the callback_mutex
2031 * mutex.
Paul Jackson9bf22292005-09-06 15:18:12 -07002032 *
Paul Jackson36be57f2006-05-20 15:00:10 -07002033 * The first call here from mm/page_alloc:get_page_from_freelist()
Paul Jackson02a0e532006-12-13 00:34:25 -08002034 * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets,
2035 * so no allocation on a node outside the cpuset is allowed (unless
2036 * in interrupt, of course).
Paul Jackson9bf22292005-09-06 15:18:12 -07002037 *
Paul Jackson36be57f2006-05-20 15:00:10 -07002038 * The second pass through get_page_from_freelist() doesn't even call
2039 * here for GFP_ATOMIC calls. For those calls, the __alloc_pages()
2040 * variable 'wait' is not set, and the bit ALLOC_CPUSET is not set
2041 * in alloc_flags. That logic and the checks below have the combined
2042 * affect that:
Paul Jackson9bf22292005-09-06 15:18:12 -07002043 * in_interrupt - any node ok (current task context irrelevant)
2044 * GFP_ATOMIC - any node ok
David Rientjesc596d9f2007-05-06 14:49:32 -07002045 * TIF_MEMDIE - any node ok
Paul Jackson9bf22292005-09-06 15:18:12 -07002046 * GFP_KERNEL - any node in enclosing mem_exclusive cpuset ok
2047 * GFP_USER - only nodes in current tasks mems allowed ok.
Paul Jackson36be57f2006-05-20 15:00:10 -07002048 *
2049 * Rule:
Paul Jackson02a0e532006-12-13 00:34:25 -08002050 * Don't call cpuset_zone_allowed_softwall if you can't sleep, unless you
Paul Jackson36be57f2006-05-20 15:00:10 -07002051 * pass in the __GFP_HARDWALL flag set in gfp_flag, which disables
2052 * the code that might scan up ancestor cpusets and sleep.
Paul Jackson02a0e532006-12-13 00:34:25 -08002053 */
Paul Jackson9bf22292005-09-06 15:18:12 -07002054
Paul Jackson02a0e532006-12-13 00:34:25 -08002055int __cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
Paul Jackson9bf22292005-09-06 15:18:12 -07002056{
2057 int node; /* node that zone z is on */
2058 const struct cpuset *cs; /* current cpuset ancestors */
Paul Jackson29afd492006-03-24 03:16:12 -08002059 int allowed; /* is allocation in zone z allowed? */
Paul Jackson9bf22292005-09-06 15:18:12 -07002060
Christoph Lameter9b819d22006-09-25 23:31:40 -07002061 if (in_interrupt() || (gfp_mask & __GFP_THISNODE))
Paul Jackson9bf22292005-09-06 15:18:12 -07002062 return 1;
Christoph Lameter89fa3022006-09-25 23:31:55 -07002063 node = zone_to_nid(z);
Paul Jackson92d1dbd2006-05-20 15:00:11 -07002064 might_sleep_if(!(gfp_mask & __GFP_HARDWALL));
Paul Jackson9bf22292005-09-06 15:18:12 -07002065 if (node_isset(node, current->mems_allowed))
2066 return 1;
David Rientjesc596d9f2007-05-06 14:49:32 -07002067 /*
2068 * Allow tasks that have access to memory reserves because they have
2069 * been OOM killed to get memory anywhere.
2070 */
2071 if (unlikely(test_thread_flag(TIF_MEMDIE)))
2072 return 1;
Paul Jackson9bf22292005-09-06 15:18:12 -07002073 if (gfp_mask & __GFP_HARDWALL) /* If hardwall request, stop here */
2074 return 0;
2075
Bob Picco5563e772005-11-13 16:06:35 -08002076 if (current->flags & PF_EXITING) /* Let dying task have memory */
2077 return 1;
2078
Paul Jackson9bf22292005-09-06 15:18:12 -07002079 /* Not hardwall and node outside mems_allowed: scan up cpusets */
Ingo Molnar3d3f26a2006-03-23 03:00:18 -08002080 mutex_lock(&callback_mutex);
Paul Jackson053199e2005-10-30 15:02:30 -08002081
Paul Jackson053199e2005-10-30 15:02:30 -08002082 task_lock(current);
Paul Menage8793d852007-10-18 23:39:39 -07002083 cs = nearest_exclusive_ancestor(task_cs(current));
Paul Jackson053199e2005-10-30 15:02:30 -08002084 task_unlock(current);
2085
Paul Jackson9bf22292005-09-06 15:18:12 -07002086 allowed = node_isset(node, cs->mems_allowed);
Ingo Molnar3d3f26a2006-03-23 03:00:18 -08002087 mutex_unlock(&callback_mutex);
Paul Jackson9bf22292005-09-06 15:18:12 -07002088 return allowed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002089}
2090
Paul Jackson02a0e532006-12-13 00:34:25 -08002091/*
2092 * cpuset_zone_allowed_hardwall - Can we allocate on zone z's memory node?
2093 * @z: is this zone on an allowed node?
2094 * @gfp_mask: memory allocation flags
2095 *
2096 * If we're in interrupt, yes, we can always allocate.
2097 * If __GFP_THISNODE is set, yes, we can always allocate. If zone
David Rientjesc596d9f2007-05-06 14:49:32 -07002098 * z's node is in our tasks mems_allowed, yes. If the task has been
2099 * OOM killed and has access to memory reserves as specified by the
2100 * TIF_MEMDIE flag, yes. Otherwise, no.
Paul Jackson02a0e532006-12-13 00:34:25 -08002101 *
2102 * The __GFP_THISNODE placement logic is really handled elsewhere,
2103 * by forcibly using a zonelist starting at a specified node, and by
2104 * (in get_page_from_freelist()) refusing to consider the zones for
2105 * any node on the zonelist except the first. By the time any such
2106 * calls get to this routine, we should just shut up and say 'yes'.
2107 *
2108 * Unlike the cpuset_zone_allowed_softwall() variant, above,
2109 * this variant requires that the zone be in the current tasks
2110 * mems_allowed or that we're in interrupt. It does not scan up the
2111 * cpuset hierarchy for the nearest enclosing mem_exclusive cpuset.
2112 * It never sleeps.
2113 */
2114
2115int __cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
2116{
2117 int node; /* node that zone z is on */
2118
2119 if (in_interrupt() || (gfp_mask & __GFP_THISNODE))
2120 return 1;
2121 node = zone_to_nid(z);
2122 if (node_isset(node, current->mems_allowed))
2123 return 1;
Daniel Walkerdedf8b72007-10-18 03:06:04 -07002124 /*
2125 * Allow tasks that have access to memory reserves because they have
2126 * been OOM killed to get memory anywhere.
2127 */
2128 if (unlikely(test_thread_flag(TIF_MEMDIE)))
2129 return 1;
Paul Jackson02a0e532006-12-13 00:34:25 -08002130 return 0;
2131}
2132
Paul Jacksonef08e3b2005-09-06 15:18:13 -07002133/**
Paul Jackson505970b2006-01-14 13:21:06 -08002134 * cpuset_lock - lock out any changes to cpuset structures
2135 *
Ingo Molnar3d3f26a2006-03-23 03:00:18 -08002136 * The out of memory (oom) code needs to mutex_lock cpusets
Paul Jackson505970b2006-01-14 13:21:06 -08002137 * from being changed while it scans the tasklist looking for a
Ingo Molnar3d3f26a2006-03-23 03:00:18 -08002138 * task in an overlapping cpuset. Expose callback_mutex via this
Paul Jackson505970b2006-01-14 13:21:06 -08002139 * cpuset_lock() routine, so the oom code can lock it, before
2140 * locking the task list. The tasklist_lock is a spinlock, so
Ingo Molnar3d3f26a2006-03-23 03:00:18 -08002141 * must be taken inside callback_mutex.
Paul Jackson505970b2006-01-14 13:21:06 -08002142 */
2143
2144void cpuset_lock(void)
2145{
Ingo Molnar3d3f26a2006-03-23 03:00:18 -08002146 mutex_lock(&callback_mutex);
Paul Jackson505970b2006-01-14 13:21:06 -08002147}
2148
2149/**
2150 * cpuset_unlock - release lock on cpuset changes
2151 *
2152 * Undo the lock taken in a previous cpuset_lock() call.
2153 */
2154
2155void cpuset_unlock(void)
2156{
Ingo Molnar3d3f26a2006-03-23 03:00:18 -08002157 mutex_unlock(&callback_mutex);
Paul Jackson505970b2006-01-14 13:21:06 -08002158}
2159
2160/**
Paul Jackson825a46a2006-03-24 03:16:03 -08002161 * cpuset_mem_spread_node() - On which node to begin search for a page
2162 *
2163 * If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for
2164 * tasks in a cpuset with is_spread_page or is_spread_slab set),
2165 * and if the memory allocation used cpuset_mem_spread_node()
2166 * to determine on which node to start looking, as it will for
2167 * certain page cache or slab cache pages such as used for file
2168 * system buffers and inode caches, then instead of starting on the
2169 * local node to look for a free page, rather spread the starting
2170 * node around the tasks mems_allowed nodes.
2171 *
2172 * We don't have to worry about the returned node being offline
2173 * because "it can't happen", and even if it did, it would be ok.
2174 *
2175 * The routines calling guarantee_online_mems() are careful to
2176 * only set nodes in task->mems_allowed that are online. So it
2177 * should not be possible for the following code to return an
2178 * offline node. But if it did, that would be ok, as this routine
2179 * is not returning the node where the allocation must be, only
2180 * the node where the search should start. The zonelist passed to
2181 * __alloc_pages() will include all nodes. If the slab allocator
2182 * is passed an offline node, it will fall back to the local node.
2183 * See kmem_cache_alloc_node().
2184 */
2185
2186int cpuset_mem_spread_node(void)
2187{
2188 int node;
2189
2190 node = next_node(current->cpuset_mem_spread_rotor, current->mems_allowed);
2191 if (node == MAX_NUMNODES)
2192 node = first_node(current->mems_allowed);
2193 current->cpuset_mem_spread_rotor = node;
2194 return node;
2195}
2196EXPORT_SYMBOL_GPL(cpuset_mem_spread_node);
2197
2198/**
David Rientjesbbe373f2007-10-16 23:25:58 -07002199 * cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's?
2200 * @tsk1: pointer to task_struct of some task.
2201 * @tsk2: pointer to task_struct of some other task.
Paul Jacksonef08e3b2005-09-06 15:18:13 -07002202 *
David Rientjesbbe373f2007-10-16 23:25:58 -07002203 * Description: Return true if @tsk1's mems_allowed intersects the
2204 * mems_allowed of @tsk2. Used by the OOM killer to determine if
2205 * one of the task's memory usage might impact the memory available
2206 * to the other.
Paul Jacksonef08e3b2005-09-06 15:18:13 -07002207 **/
2208
David Rientjesbbe373f2007-10-16 23:25:58 -07002209int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
2210 const struct task_struct *tsk2)
Paul Jacksonef08e3b2005-09-06 15:18:13 -07002211{
David Rientjesbbe373f2007-10-16 23:25:58 -07002212 return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed);
Paul Jacksonef08e3b2005-09-06 15:18:13 -07002213}
2214
Linus Torvalds1da177e2005-04-16 15:20:36 -07002215/*
Paul Jackson3e0d98b2006-01-08 01:01:49 -08002216 * Collection of memory_pressure is suppressed unless
2217 * this flag is enabled by writing "1" to the special
2218 * cpuset file 'memory_pressure_enabled' in the root cpuset.
2219 */
2220
Paul Jacksonc5b2aff82006-01-08 01:01:51 -08002221int cpuset_memory_pressure_enabled __read_mostly;
Paul Jackson3e0d98b2006-01-08 01:01:49 -08002222
2223/**
2224 * cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims.
2225 *
2226 * Keep a running average of the rate of synchronous (direct)
2227 * page reclaim efforts initiated by tasks in each cpuset.
2228 *
2229 * This represents the rate at which some task in the cpuset
2230 * ran low on memory on all nodes it was allowed to use, and
2231 * had to enter the kernels page reclaim code in an effort to
2232 * create more free memory by tossing clean pages or swapping
2233 * or writing dirty pages.
2234 *
2235 * Display to user space in the per-cpuset read-only file
2236 * "memory_pressure". Value displayed is an integer
2237 * representing the recent rate of entry into the synchronous
2238 * (direct) page reclaim by any task attached to the cpuset.
2239 **/
2240
2241void __cpuset_memory_pressure_bump(void)
2242{
Paul Jackson3e0d98b2006-01-08 01:01:49 -08002243 task_lock(current);
Paul Menage8793d852007-10-18 23:39:39 -07002244 fmeter_markevent(&task_cs(current)->fmeter);
Paul Jackson3e0d98b2006-01-08 01:01:49 -08002245 task_unlock(current);
2246}
2247
Paul Menage8793d852007-10-18 23:39:39 -07002248#ifdef CONFIG_PROC_PID_CPUSET
Paul Jackson3e0d98b2006-01-08 01:01:49 -08002249/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002250 * proc_cpuset_show()
2251 * - Print tasks cpuset path into seq_file.
2252 * - Used for /proc/<pid>/cpuset.
Paul Jackson053199e2005-10-30 15:02:30 -08002253 * - No need to task_lock(tsk) on this tsk->cpuset reference, as it
2254 * doesn't really matter if tsk->cpuset changes after we read it,
Paul Jacksonc8d9c902008-02-07 00:14:46 -08002255 * and we take cgroup_mutex, keeping cpuset_attach() from changing it
Paul Menage2df167a2008-02-07 00:14:45 -08002256 * anyway.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002257 */
Paul Jackson029190c2007-10-18 23:40:20 -07002258static int proc_cpuset_show(struct seq_file *m, void *unused_v)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002259{
Eric W. Biederman13b41b02006-06-26 00:25:56 -07002260 struct pid *pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002261 struct task_struct *tsk;
2262 char *buf;
Paul Menage8793d852007-10-18 23:39:39 -07002263 struct cgroup_subsys_state *css;
Eric W. Biederman99f89552006-06-26 00:25:55 -07002264 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002265
Eric W. Biederman99f89552006-06-26 00:25:55 -07002266 retval = -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002267 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
2268 if (!buf)
Eric W. Biederman99f89552006-06-26 00:25:55 -07002269 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002270
Eric W. Biederman99f89552006-06-26 00:25:55 -07002271 retval = -ESRCH;
Eric W. Biederman13b41b02006-06-26 00:25:56 -07002272 pid = m->private;
2273 tsk = get_pid_task(pid, PIDTYPE_PID);
Eric W. Biederman99f89552006-06-26 00:25:55 -07002274 if (!tsk)
2275 goto out_free;
2276
2277 retval = -EINVAL;
Paul Menage8793d852007-10-18 23:39:39 -07002278 cgroup_lock();
2279 css = task_subsys_state(tsk, cpuset_subsys_id);
2280 retval = cgroup_path(css->cgroup, buf, PAGE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002281 if (retval < 0)
Eric W. Biederman99f89552006-06-26 00:25:55 -07002282 goto out_unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002283 seq_puts(m, buf);
2284 seq_putc(m, '\n');
Eric W. Biederman99f89552006-06-26 00:25:55 -07002285out_unlock:
Paul Menage8793d852007-10-18 23:39:39 -07002286 cgroup_unlock();
Eric W. Biederman99f89552006-06-26 00:25:55 -07002287 put_task_struct(tsk);
2288out_free:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002289 kfree(buf);
Eric W. Biederman99f89552006-06-26 00:25:55 -07002290out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002291 return retval;
2292}
2293
2294static int cpuset_open(struct inode *inode, struct file *file)
2295{
Eric W. Biederman13b41b02006-06-26 00:25:56 -07002296 struct pid *pid = PROC_I(inode)->pid;
2297 return single_open(file, proc_cpuset_show, pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002298}
2299
Arjan van de Ven9a321442007-02-12 00:55:35 -08002300const struct file_operations proc_cpuset_operations = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002301 .open = cpuset_open,
2302 .read = seq_read,
2303 .llseek = seq_lseek,
2304 .release = single_release,
2305};
Paul Menage8793d852007-10-18 23:39:39 -07002306#endif /* CONFIG_PROC_PID_CPUSET */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002307
2308/* Display task cpus_allowed, mems_allowed in /proc/<pid>/status file. */
Eric W. Biedermandf5f8312008-02-08 04:18:33 -08002309void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002310{
Eric W. Biedermandf5f8312008-02-08 04:18:33 -08002311 seq_printf(m, "Cpus_allowed:\t");
2312 m->count += cpumask_scnprintf(m->buf + m->count, m->size - m->count,
2313 task->cpus_allowed);
2314 seq_printf(m, "\n");
Mike Travis39106dc2008-04-08 11:43:03 -07002315 seq_printf(m, "Cpus_allowed_list:\t");
2316 m->count += cpulist_scnprintf(m->buf + m->count, m->size - m->count,
2317 task->cpus_allowed);
2318 seq_printf(m, "\n");
Eric W. Biedermandf5f8312008-02-08 04:18:33 -08002319 seq_printf(m, "Mems_allowed:\t");
2320 m->count += nodemask_scnprintf(m->buf + m->count, m->size - m->count,
2321 task->mems_allowed);
2322 seq_printf(m, "\n");
Mike Travis39106dc2008-04-08 11:43:03 -07002323 seq_printf(m, "Mems_allowed_list:\t");
2324 m->count += nodelist_scnprintf(m->buf + m->count, m->size - m->count,
2325 task->mems_allowed);
2326 seq_printf(m, "\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002327}