blob: 04c20de66afc2136e5bf3251b4b1780e30a1b4be [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002#ifndef _LINUX_CPUSET_H
3#define _LINUX_CPUSET_H
4/*
5 * cpuset interface
6 *
7 * Copyright (C) 2003 BULL SA
Paul Jackson825a46a2006-03-24 03:16:03 -08008 * Copyright (C) 2004-2006 Silicon Graphics, Inc.
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 */
11
12#include <linux/sched.h>
Ingo Molnar105ab3d2017-02-01 16:36:40 +010013#include <linux/sched/topology.h>
Ingo Molnarf719ff9b2017-02-06 10:57:33 +010014#include <linux/sched/task.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/cpumask.h>
16#include <linux/nodemask.h>
David Rientjesa1bc5a42009-04-02 16:57:54 -070017#include <linux/mm.h>
Mel Gorman664eedd2014-06-04 16:10:08 -070018#include <linux/jump_label.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019
20#ifdef CONFIG_CPUSETS
21
Dima Zavin89affbf2017-08-02 13:32:18 -070022/*
23 * Static branch rewrites can happen in an arbitrary order for a given
24 * key. In code paths where we need to loop with read_mems_allowed_begin() and
25 * read_mems_allowed_retry() to get a consistent view of mems_allowed, we need
26 * to ensure that begin() always gets rewritten before retry() in the
27 * disabled -> enabled transition. If not, then if local irqs are disabled
28 * around the loop, we can deadlock since retry() would always be
29 * comparing the latest value of the mems_allowed seqcount against 0 as
30 * begin() still would see cpusets_enabled() as false. The enabled -> disabled
31 * transition should happen in reverse order for the same reasons (want to stop
32 * looking at real value of mems_allowed.sequence in retry() first).
33 */
34extern struct static_key_false cpusets_pre_enable_key;
Vlastimil Babka002f2902016-05-19 17:14:30 -070035extern struct static_key_false cpusets_enabled_key;
Mel Gorman664eedd2014-06-04 16:10:08 -070036static inline bool cpusets_enabled(void)
37{
Vlastimil Babka002f2902016-05-19 17:14:30 -070038 return static_branch_unlikely(&cpusets_enabled_key);
Mel Gorman664eedd2014-06-04 16:10:08 -070039}
40
Mel Gorman664eedd2014-06-04 16:10:08 -070041static inline void cpuset_inc(void)
42{
Juri Lellid74b27d2019-07-19 15:59:58 +020043 static_branch_inc_cpuslocked(&cpusets_pre_enable_key);
44 static_branch_inc_cpuslocked(&cpusets_enabled_key);
Mel Gorman664eedd2014-06-04 16:10:08 -070045}
46
47static inline void cpuset_dec(void)
48{
Juri Lellid74b27d2019-07-19 15:59:58 +020049 static_branch_dec_cpuslocked(&cpusets_enabled_key);
50 static_branch_dec_cpuslocked(&cpusets_pre_enable_key);
Mel Gorman664eedd2014-06-04 16:10:08 -070051}
Paul Jackson202f72d2006-01-08 01:01:57 -080052
Linus Torvalds1da177e2005-04-16 15:20:36 -070053extern int cpuset_init(void);
54extern void cpuset_init_smp(void);
Peter Zijlstra50e76632017-09-07 11:13:38 +020055extern void cpuset_force_rebuild(void);
Rakib Mullick30e03ac2017-04-09 07:36:14 +060056extern void cpuset_update_active_cpus(void);
Peter Zijlstra50e76632017-09-07 11:13:38 +020057extern void cpuset_wait_for_hotplug(void);
Juri Lelli710da3c2019-07-19 16:00:00 +020058extern void cpuset_read_lock(void);
59extern void cpuset_read_unlock(void);
Li Zefan6af866a2009-01-07 18:08:45 -080060extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
Peter Zijlstra2baab4e2012-03-20 15:57:01 +010061extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
Paul Jackson909d75a2006-01-08 01:01:55 -080062extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
Paul Jackson9276b1bc2006-12-06 20:31:48 -080063#define cpuset_current_mems_allowed (current->mems_allowed)
Linus Torvalds1da177e2005-04-16 15:20:36 -070064void cpuset_init_current_mems_allowed(void);
Mel Gorman19770b32008-04-28 02:12:18 -070065int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
Paul Jackson202f72d2006-01-08 01:01:57 -080066
Vlastimil Babka002f2902016-05-19 17:14:30 -070067extern bool __cpuset_node_allowed(int node, gfp_t gfp_mask);
Paul Jackson02a0e532006-12-13 00:34:25 -080068
Vlastimil Babka002f2902016-05-19 17:14:30 -070069static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask)
Paul Jackson202f72d2006-01-08 01:01:57 -080070{
Vlastimil Babka002f2902016-05-19 17:14:30 -070071 if (cpusets_enabled())
72 return __cpuset_node_allowed(node, gfp_mask);
73 return true;
Paul Jackson02a0e532006-12-13 00:34:25 -080074}
75
Vlastimil Babka002f2902016-05-19 17:14:30 -070076static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
Paul Jackson02a0e532006-12-13 00:34:25 -080077{
Vlastimil Babka002f2902016-05-19 17:14:30 -070078 return __cpuset_node_allowed(zone_to_nid(z), gfp_mask);
79}
80
81static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
82{
83 if (cpusets_enabled())
84 return __cpuset_zone_allowed(z, gfp_mask);
85 return true;
Paul Jackson202f72d2006-01-08 01:01:57 -080086}
87
David Rientjesbbe373f2007-10-16 23:25:58 -070088extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
89 const struct task_struct *tsk2);
Paul Jackson3e0d98b2006-01-08 01:01:49 -080090
91#define cpuset_memory_pressure_bump() \
92 do { \
93 if (cpuset_memory_pressure_enabled) \
94 __cpuset_memory_pressure_bump(); \
95 } while (0)
96extern int cpuset_memory_pressure_enabled;
97extern void __cpuset_memory_pressure_bump(void);
98
Eric W. Biedermandf5f8312008-02-08 04:18:33 -080099extern void cpuset_task_status_allowed(struct seq_file *m,
100 struct task_struct *task);
Zefan Li52de4772014-09-18 16:03:36 +0800101extern int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
102 struct pid *pid, struct task_struct *tsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103
Paul Jackson825a46a2006-03-24 03:16:03 -0800104extern int cpuset_mem_spread_node(void);
Jack Steiner6adef3e2010-05-26 14:42:49 -0700105extern int cpuset_slab_spread_node(void);
Paul Jackson825a46a2006-03-24 03:16:03 -0800106
107static inline int cpuset_do_page_mem_spread(void)
108{
Zefan Li2ad654b2014-09-25 09:41:02 +0800109 return task_spread_page(current);
Paul Jackson825a46a2006-03-24 03:16:03 -0800110}
111
112static inline int cpuset_do_slab_mem_spread(void)
113{
Zefan Li2ad654b2014-09-25 09:41:02 +0800114 return task_spread_slab(current);
Paul Jackson825a46a2006-03-24 03:16:03 -0800115}
116
Yaowei Bai77ef80c2018-02-06 15:41:24 -0800117extern bool current_cpuset_is_being_rebound(void);
Paul Menage8793d852007-10-18 23:39:39 -0700118
Max Krasnyanskye761b772008-07-15 04:43:49 -0700119extern void rebuild_sched_domains(void);
120
David Rientjesda39da32015-11-05 18:48:05 -0800121extern void cpuset_print_current_mems_allowed(void);
David Rientjes75aa1992009-01-06 14:39:01 -0800122
Miao Xiec0ff7452010-05-24 14:32:08 -0700123/*
Mel Gormand26914d2014-04-03 14:47:24 -0700124 * read_mems_allowed_begin is required when making decisions involving
125 * mems_allowed such as during page allocation. mems_allowed can be updated in
126 * parallel and depending on the new value an operation can fail potentially
127 * causing process failure. A retry loop with read_mems_allowed_begin and
128 * read_mems_allowed_retry prevents these artificial failures.
Miao Xiec0ff7452010-05-24 14:32:08 -0700129 */
Mel Gormand26914d2014-04-03 14:47:24 -0700130static inline unsigned int read_mems_allowed_begin(void)
Miao Xiec0ff7452010-05-24 14:32:08 -0700131{
Dima Zavin89affbf2017-08-02 13:32:18 -0700132 if (!static_branch_unlikely(&cpusets_pre_enable_key))
Mel Gorman46e700ab2015-11-06 16:28:15 -0800133 return 0;
134
Mel Gormancc9a6c82012-03-21 16:34:11 -0700135 return read_seqcount_begin(&current->mems_allowed_seq);
Miao Xiec0ff7452010-05-24 14:32:08 -0700136}
137
Mel Gormancc9a6c82012-03-21 16:34:11 -0700138/*
Mel Gormand26914d2014-04-03 14:47:24 -0700139 * If this returns true, the operation that took place after
140 * read_mems_allowed_begin may have failed artificially due to a concurrent
141 * update of mems_allowed. It is up to the caller to retry the operation if
Mel Gormancc9a6c82012-03-21 16:34:11 -0700142 * appropriate.
143 */
Mel Gormand26914d2014-04-03 14:47:24 -0700144static inline bool read_mems_allowed_retry(unsigned int seq)
Miao Xiec0ff7452010-05-24 14:32:08 -0700145{
Dima Zavin89affbf2017-08-02 13:32:18 -0700146 if (!static_branch_unlikely(&cpusets_enabled_key))
Mel Gorman46e700ab2015-11-06 16:28:15 -0800147 return false;
148
Mel Gormand26914d2014-04-03 14:47:24 -0700149 return read_seqcount_retry(&current->mems_allowed_seq, seq);
Miao Xiec0ff7452010-05-24 14:32:08 -0700150}
151
Miao Xie58568d22009-06-16 15:31:49 -0700152static inline void set_mems_allowed(nodemask_t nodemask)
153{
John Stultzdb751fe2013-10-07 15:52:00 -0700154 unsigned long flags;
155
Miao Xiec0ff7452010-05-24 14:32:08 -0700156 task_lock(current);
John Stultzdb751fe2013-10-07 15:52:00 -0700157 local_irq_save(flags);
Mel Gormancc9a6c82012-03-21 16:34:11 -0700158 write_seqcount_begin(&current->mems_allowed_seq);
Miao Xie58568d22009-06-16 15:31:49 -0700159 current->mems_allowed = nodemask;
Mel Gormancc9a6c82012-03-21 16:34:11 -0700160 write_seqcount_end(&current->mems_allowed_seq);
John Stultzdb751fe2013-10-07 15:52:00 -0700161 local_irq_restore(flags);
Miao Xiec0ff7452010-05-24 14:32:08 -0700162 task_unlock(current);
Miao Xie58568d22009-06-16 15:31:49 -0700163}
164
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165#else /* !CONFIG_CPUSETS */
166
Mel Gorman664eedd2014-06-04 16:10:08 -0700167static inline bool cpusets_enabled(void) { return false; }
168
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169static inline int cpuset_init(void) { return 0; }
170static inline void cpuset_init_smp(void) {}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171
Peter Zijlstra50e76632017-09-07 11:13:38 +0200172static inline void cpuset_force_rebuild(void) { }
173
Rakib Mullick30e03ac2017-04-09 07:36:14 +0600174static inline void cpuset_update_active_cpus(void)
Tejun Heo3a101d02010-06-08 21:40:36 +0200175{
176 partition_sched_domains(1, NULL, NULL);
177}
178
Peter Zijlstra50e76632017-09-07 11:13:38 +0200179static inline void cpuset_wait_for_hotplug(void) { }
180
Juri Lelli710da3c2019-07-19 16:00:00 +0200181static inline void cpuset_read_lock(void) { }
182static inline void cpuset_read_unlock(void) { }
183
Li Zefan6af866a2009-01-07 18:08:45 -0800184static inline void cpuset_cpus_allowed(struct task_struct *p,
185 struct cpumask *mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186{
Rusty Russellaa85ea52009-03-30 22:05:15 -0600187 cpumask_copy(mask, cpu_possible_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188}
189
Peter Zijlstra2baab4e2012-03-20 15:57:01 +0100190static inline void cpuset_cpus_allowed_fallback(struct task_struct *p)
Oleg Nesterov9084bb82010-03-15 10:10:27 +0100191{
Oleg Nesterov9084bb82010-03-15 10:10:27 +0100192}
193
Paul Jackson909d75a2006-01-08 01:01:55 -0800194static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
195{
196 return node_possible_map;
197}
198
Lai Jiangshan38d7bee2012-12-12 13:51:24 -0800199#define cpuset_current_mems_allowed (node_states[N_MEMORY])
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200static inline void cpuset_init_current_mems_allowed(void) {}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201
Mel Gorman19770b32008-04-28 02:12:18 -0700202static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203{
204 return 1;
205}
206
Vlastimil Babka002f2902016-05-19 17:14:30 -0700207static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask)
David Rientjesa1bc5a42009-04-02 16:57:54 -0700208{
Vlastimil Babka002f2902016-05-19 17:14:30 -0700209 return true;
David Rientjesa1bc5a42009-04-02 16:57:54 -0700210}
211
Vlastimil Babka002f2902016-05-19 17:14:30 -0700212static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213{
Vlastimil Babka002f2902016-05-19 17:14:30 -0700214 return true;
215}
216
217static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
218{
219 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220}
221
David Rientjesbbe373f2007-10-16 23:25:58 -0700222static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
223 const struct task_struct *tsk2)
Paul Jacksonef08e3b2005-09-06 15:18:13 -0700224{
225 return 1;
226}
227
Paul Jackson3e0d98b2006-01-08 01:01:49 -0800228static inline void cpuset_memory_pressure_bump(void) {}
229
Eric W. Biedermandf5f8312008-02-08 04:18:33 -0800230static inline void cpuset_task_status_allowed(struct seq_file *m,
231 struct task_struct *task)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233}
234
Paul Jackson825a46a2006-03-24 03:16:03 -0800235static inline int cpuset_mem_spread_node(void)
236{
237 return 0;
238}
239
Jack Steiner6adef3e2010-05-26 14:42:49 -0700240static inline int cpuset_slab_spread_node(void)
241{
242 return 0;
243}
244
Paul Jackson825a46a2006-03-24 03:16:03 -0800245static inline int cpuset_do_page_mem_spread(void)
246{
247 return 0;
248}
249
250static inline int cpuset_do_slab_mem_spread(void)
251{
252 return 0;
253}
254
Yaowei Bai77ef80c2018-02-06 15:41:24 -0800255static inline bool current_cpuset_is_being_rebound(void)
Paul Menage8793d852007-10-18 23:39:39 -0700256{
Yaowei Bai77ef80c2018-02-06 15:41:24 -0800257 return false;
Paul Menage8793d852007-10-18 23:39:39 -0700258}
259
Max Krasnyanskye761b772008-07-15 04:43:49 -0700260static inline void rebuild_sched_domains(void)
261{
Max Krasnyanskydfb512e2008-08-29 13:11:41 -0700262 partition_sched_domains(1, NULL, NULL);
Max Krasnyanskye761b772008-07-15 04:43:49 -0700263}
264
David Rientjesda39da32015-11-05 18:48:05 -0800265static inline void cpuset_print_current_mems_allowed(void)
David Rientjes75aa1992009-01-06 14:39:01 -0800266{
267}
268
Miao Xie58568d22009-06-16 15:31:49 -0700269static inline void set_mems_allowed(nodemask_t nodemask)
270{
271}
272
Mel Gormand26914d2014-04-03 14:47:24 -0700273static inline unsigned int read_mems_allowed_begin(void)
Miao Xiec0ff7452010-05-24 14:32:08 -0700274{
Mel Gormancc9a6c82012-03-21 16:34:11 -0700275 return 0;
Miao Xiec0ff7452010-05-24 14:32:08 -0700276}
277
Mel Gormand26914d2014-04-03 14:47:24 -0700278static inline bool read_mems_allowed_retry(unsigned int seq)
Miao Xiec0ff7452010-05-24 14:32:08 -0700279{
Mel Gormand26914d2014-04-03 14:47:24 -0700280 return false;
Miao Xiec0ff7452010-05-24 14:32:08 -0700281}
282
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283#endif /* !CONFIG_CPUSETS */
284
285#endif /* _LINUX_CPUSET_H */