blob: 85da6ab4fbb5a6aec694104275878cea5dcaf7ba [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09002#include <linux/slab.h>
Andrew Mortonccb46002006-03-25 03:08:08 -08003#include <linux/kernel.h>
4#include <linux/bitops.h>
5#include <linux/cpumask.h>
Paul Gortmaker8bc3bcc2011-11-16 21:29:17 -05006#include <linux/export.h>
Mike Rapoport57c8a662018-10-30 15:09:49 -07007#include <linux/memblock.h>
Anshuman Khandual98fa15f2019-03-05 15:42:58 -08008#include <linux/numa.h>
Alex Belits1abdfe72020-06-25 18:34:41 -04009#include <linux/sched/isolation.h>
Andrew Mortonccb46002006-03-25 03:08:08 -080010
Rusty Russell2d3854a2008-11-05 13:39:10 +110011/**
Alexey Dobriyanf22ef332017-09-08 16:17:15 -070012 * cpumask_next - get the next cpu in a cpumask
13 * @n: the cpu prior to the place to search (ie. return will be > @n)
14 * @srcp: the cpumask pointer
15 *
16 * Returns >= nr_cpu_ids if no further cpus set.
17 */
18unsigned int cpumask_next(int n, const struct cpumask *srcp)
19{
20 /* -1 is a legal arg here. */
21 if (n != -1)
22 cpumask_check(n);
23 return find_next_bit(cpumask_bits(srcp), nr_cpumask_bits, n + 1);
24}
25EXPORT_SYMBOL(cpumask_next);
26
27/**
Rusty Russell2d3854a2008-11-05 13:39:10 +110028 * cpumask_next_and - get the next cpu in *src1p & *src2p
29 * @n: the cpu prior to the place to search (ie. return will be > @n)
30 * @src1p: the first cpumask pointer
31 * @src2p: the second cpumask pointer
32 *
33 * Returns >= nr_cpu_ids if no further cpus set in both.
34 */
35int cpumask_next_and(int n, const struct cpumask *src1p,
36 const struct cpumask *src2p)
37{
Clement Courbet0ade34c2018-02-06 15:38:34 -080038 /* -1 is a legal arg here. */
39 if (n != -1)
40 cpumask_check(n);
41 return find_next_and_bit(cpumask_bits(src1p), cpumask_bits(src2p),
42 nr_cpumask_bits, n + 1);
Rusty Russell2d3854a2008-11-05 13:39:10 +110043}
44EXPORT_SYMBOL(cpumask_next_and);
45
46/**
47 * cpumask_any_but - return a "random" in a cpumask, but not this one.
48 * @mask: the cpumask to search
49 * @cpu: the cpu to ignore.
50 *
51 * Often used to find any cpu but smp_processor_id() in a mask.
52 * Returns >= nr_cpu_ids if no cpus set.
53 */
54int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
55{
56 unsigned int i;
57
Rusty Russell984f2f32008-11-08 20:24:19 +110058 cpumask_check(cpu);
Rusty Russell2d3854a2008-11-05 13:39:10 +110059 for_each_cpu(i, mask)
60 if (i != cpu)
61 break;
62 return i;
63}
Thomas Gleixner3712bba2016-02-22 22:19:18 +000064EXPORT_SYMBOL(cpumask_any_but);
Rusty Russell2d3854a2008-11-05 13:39:10 +110065
Peter Zijlstrac743f0a2017-04-14 14:20:05 +020066/**
67 * cpumask_next_wrap - helper to implement for_each_cpu_wrap
68 * @n: the cpu prior to the place to search
69 * @mask: the cpumask pointer
70 * @start: the start point of the iteration
71 * @wrap: assume @n crossing @start terminates the iteration
72 *
73 * Returns >= nr_cpu_ids on completion
74 *
75 * Note: the @wrap argument is required for the start condition when
76 * we cannot assume @start is set in @mask.
77 */
78int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
79{
80 int next;
81
82again:
83 next = cpumask_next(n, mask);
84
85 if (wrap && n < start && next >= start) {
86 return nr_cpumask_bits;
87
88 } else if (next >= nr_cpumask_bits) {
89 wrap = true;
90 n = -1;
91 goto again;
92 }
93
94 return next;
95}
96EXPORT_SYMBOL(cpumask_next_wrap);
97
Rusty Russell2d3854a2008-11-05 13:39:10 +110098/* These are not inline because of header tangles. */
99#ifdef CONFIG_CPUMASK_OFFSTACK
Mike Travisec26b802008-12-19 16:56:52 +1030100/**
101 * alloc_cpumask_var_node - allocate a struct cpumask on a given node
102 * @mask: pointer to cpumask_var_t where the cpumask is returned
103 * @flags: GFP_ flags
104 *
105 * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
106 * a nop returning a constant 1 (in <linux/cpumask.h>)
107 * Returns TRUE if memory allocation succeeded, FALSE otherwise.
108 *
109 * In addition, mask will be NULL if this fails. Note that gcc is
110 * usually smart enough to know that mask can never be NULL if
111 * CONFIG_CPUMASK_OFFSTACK=n, so does code elimination in that case
112 * too.
113 */
Mike Travis7b4967c2008-12-19 16:56:37 +1030114bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
Rusty Russell2d3854a2008-11-05 13:39:10 +1100115{
Yinghai Lu38c7fed2009-05-25 15:10:58 +0300116 *mask = kmalloc_node(cpumask_size(), flags, node);
117
Rusty Russell2d3854a2008-11-05 13:39:10 +1100118#ifdef CONFIG_DEBUG_PER_CPU_MAPS
119 if (!*mask) {
120 printk(KERN_ERR "=> alloc_cpumask_var: failed!\n");
121 dump_stack();
122 }
123#endif
Rusty Russell2a530082009-01-01 10:12:30 +1030124
Rusty Russell2d3854a2008-11-05 13:39:10 +1100125 return *mask != NULL;
126}
Mike Travis7b4967c2008-12-19 16:56:37 +1030127EXPORT_SYMBOL(alloc_cpumask_var_node);
128
Yinghai Lu0281b5d2009-06-06 14:50:36 -0700129bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
130{
131 return alloc_cpumask_var_node(mask, flags | __GFP_ZERO, node);
132}
133EXPORT_SYMBOL(zalloc_cpumask_var_node);
134
Mike Travisec26b802008-12-19 16:56:52 +1030135/**
136 * alloc_cpumask_var - allocate a struct cpumask
137 * @mask: pointer to cpumask_var_t where the cpumask is returned
138 * @flags: GFP_ flags
139 *
140 * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
141 * a nop returning a constant 1 (in <linux/cpumask.h>).
142 *
143 * See alloc_cpumask_var_node.
144 */
Mike Travis7b4967c2008-12-19 16:56:37 +1030145bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
146{
KOSAKI Motohiro37e7b5f2011-07-26 16:08:44 -0700147 return alloc_cpumask_var_node(mask, flags, NUMA_NO_NODE);
Mike Travis7b4967c2008-12-19 16:56:37 +1030148}
Rusty Russell2d3854a2008-11-05 13:39:10 +1100149EXPORT_SYMBOL(alloc_cpumask_var);
150
Yinghai Lu0281b5d2009-06-06 14:50:36 -0700151bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
152{
153 return alloc_cpumask_var(mask, flags | __GFP_ZERO);
154}
155EXPORT_SYMBOL(zalloc_cpumask_var);
156
Mike Travisec26b802008-12-19 16:56:52 +1030157/**
158 * alloc_bootmem_cpumask_var - allocate a struct cpumask from the bootmem arena.
159 * @mask: pointer to cpumask_var_t where the cpumask is returned
160 *
161 * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
Li Zefane9690a62008-12-31 16:45:50 +0800162 * a nop (in <linux/cpumask.h>).
Mike Travisec26b802008-12-19 16:56:52 +1030163 * Either returns an allocated (zero-filled) cpumask, or causes the
164 * system to panic.
165 */
Rusty Russell2d3854a2008-11-05 13:39:10 +1100166void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask)
167{
Mike Rapoport7e1c4e22018-10-30 15:09:57 -0700168 *mask = memblock_alloc(cpumask_size(), SMP_CACHE_BYTES);
Mike Rapoport8a7f97b2019-03-11 23:30:31 -0700169 if (!*mask)
170 panic("%s: Failed to allocate %u bytes\n", __func__,
171 cpumask_size());
Rusty Russell2d3854a2008-11-05 13:39:10 +1100172}
173
Mike Travisec26b802008-12-19 16:56:52 +1030174/**
175 * free_cpumask_var - frees memory allocated for a struct cpumask.
176 * @mask: cpumask to free
177 *
178 * This is safe on a NULL mask.
179 */
Rusty Russell2d3854a2008-11-05 13:39:10 +1100180void free_cpumask_var(cpumask_var_t mask)
181{
182 kfree(mask);
183}
184EXPORT_SYMBOL(free_cpumask_var);
Rusty Russellcd83e422008-11-07 11:12:29 +1100185
Mike Travisec26b802008-12-19 16:56:52 +1030186/**
187 * free_bootmem_cpumask_var - frees result of alloc_bootmem_cpumask_var
188 * @mask: cpumask to free
189 */
Rusty Russell984f2f32008-11-08 20:24:19 +1100190void __init free_bootmem_cpumask_var(cpumask_var_t mask)
Rusty Russellcd83e422008-11-07 11:12:29 +1100191{
Santosh Shilimkarc1529502014-01-21 15:50:32 -0800192 memblock_free_early(__pa(mask), cpumask_size());
Rusty Russellcd83e422008-11-07 11:12:29 +1100193}
Rusty Russell2d3854a2008-11-05 13:39:10 +1100194#endif
Amir Vadaida913092014-06-09 10:24:38 +0300195
196/**
Rusty Russellf36963c2015-05-09 03:14:13 +0930197 * cpumask_local_spread - select the i'th cpu with local numa cpu's first
Amir Vadaida913092014-06-09 10:24:38 +0300198 * @i: index number
Rusty Russellf36963c2015-05-09 03:14:13 +0930199 * @node: local numa_node
Amir Vadaida913092014-06-09 10:24:38 +0300200 *
Rusty Russellf36963c2015-05-09 03:14:13 +0930201 * This function selects an online CPU according to a numa aware policy;
202 * local cpus are returned first, followed by non-local ones, then it
203 * wraps around.
Amir Vadaida913092014-06-09 10:24:38 +0300204 *
Rusty Russellf36963c2015-05-09 03:14:13 +0930205 * It's not very efficient, but useful for setup.
Amir Vadaida913092014-06-09 10:24:38 +0300206 */
Rusty Russellf36963c2015-05-09 03:14:13 +0930207unsigned int cpumask_local_spread(unsigned int i, int node)
Amir Vadaida913092014-06-09 10:24:38 +0300208{
Alex Belits1abdfe72020-06-25 18:34:41 -0400209 int cpu, hk_flags;
210 const struct cpumask *mask;
Amir Vadaida913092014-06-09 10:24:38 +0300211
Alex Belits1abdfe72020-06-25 18:34:41 -0400212 hk_flags = HK_FLAG_DOMAIN | HK_FLAG_MANAGED_IRQ;
213 mask = housekeeping_cpumask(hk_flags);
Rusty Russellf36963c2015-05-09 03:14:13 +0930214 /* Wrap: we always want a cpu. */
Alex Belits1abdfe72020-06-25 18:34:41 -0400215 i %= cpumask_weight(mask);
Amir Vadaida913092014-06-09 10:24:38 +0300216
Anshuman Khandual98fa15f2019-03-05 15:42:58 -0800217 if (node == NUMA_NO_NODE) {
Alex Belits1abdfe72020-06-25 18:34:41 -0400218 for_each_cpu(cpu, mask) {
Rusty Russellf36963c2015-05-09 03:14:13 +0930219 if (i-- == 0)
220 return cpu;
Alex Belits1abdfe72020-06-25 18:34:41 -0400221 }
Amir Vadaida913092014-06-09 10:24:38 +0300222 } else {
Rusty Russellf36963c2015-05-09 03:14:13 +0930223 /* NUMA first. */
Alex Belits1abdfe72020-06-25 18:34:41 -0400224 for_each_cpu_and(cpu, cpumask_of_node(node), mask) {
Rusty Russellf36963c2015-05-09 03:14:13 +0930225 if (i-- == 0)
226 return cpu;
Alex Belits1abdfe72020-06-25 18:34:41 -0400227 }
Amir Vadaida913092014-06-09 10:24:38 +0300228
Alex Belits1abdfe72020-06-25 18:34:41 -0400229 for_each_cpu(cpu, mask) {
Rusty Russellf36963c2015-05-09 03:14:13 +0930230 /* Skip NUMA nodes, done above. */
231 if (cpumask_test_cpu(cpu, cpumask_of_node(node)))
232 continue;
Amir Vadaida913092014-06-09 10:24:38 +0300233
Rusty Russellf36963c2015-05-09 03:14:13 +0930234 if (i-- == 0)
235 return cpu;
Amir Vadaida913092014-06-09 10:24:38 +0300236 }
237 }
Rusty Russellf36963c2015-05-09 03:14:13 +0930238 BUG();
Amir Vadaida913092014-06-09 10:24:38 +0300239}
Rusty Russellf36963c2015-05-09 03:14:13 +0930240EXPORT_SYMBOL(cpumask_local_spread);
Paul Turner46a87b32020-03-10 18:01:13 -0700241
242static DEFINE_PER_CPU(int, distribute_cpu_mask_prev);
243
244/**
245 * Returns an arbitrary cpu within srcp1 & srcp2.
246 *
247 * Iterated calls using the same srcp1 and srcp2 will be distributed within
248 * their intersection.
249 *
250 * Returns >= nr_cpu_ids if the intersection is empty.
251 */
252int cpumask_any_and_distribute(const struct cpumask *src1p,
253 const struct cpumask *src2p)
254{
255 int next, prev;
256
257 /* NOTE: our first selection will skip 0. */
258 prev = __this_cpu_read(distribute_cpu_mask_prev);
259
260 next = cpumask_next_and(prev, src1p, src2p);
261 if (next >= nr_cpu_ids)
262 next = cpumask_first_and(src1p, src2p);
263
264 if (next < nr_cpu_ids)
265 __this_cpu_write(distribute_cpu_mask_prev, next);
266
267 return next;
268}
269EXPORT_SYMBOL(cpumask_any_and_distribute);