blob: 028e8dd82b4425db7582c9dca713a1f77b86b4fe [file] [log] [blame]
Thomas Gleixner46aeb7e2019-05-28 10:10:27 -07001// SPDX-License-Identifier: GPL-2.0-only
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * Simple NUMA memory policy for the Linux kernel.
4 *
5 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
Christoph Lameter8bccd852005-10-29 18:16:59 -07006 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 *
8 * NUMA policy allows the user to give hints in which node(s) memory should
9 * be allocated.
10 *
11 * Support four policies per VMA and per process:
12 *
13 * The VMA policy has priority over the process policy for a page fault.
14 *
15 * interleave Allocate memory interleaved over a set of nodes,
16 * with normal fallback if it fails.
17 * For VMA based allocations this interleaves based on the
18 * offset into the backing object or offset into the mapping
19 * for anonymous memory. For process policy an process counter
20 * is used.
Christoph Lameter8bccd852005-10-29 18:16:59 -070021 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 * bind Only allocate memory on a specific set of nodes,
23 * no fallback.
Christoph Lameter8bccd852005-10-29 18:16:59 -070024 * FIXME: memory is allocated starting with the first node
25 * to the last. It would be better if bind would truly restrict
26 * the allocation to memory nodes instead
27 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070028 * preferred Try a specific node first before normal fallback.
David Rientjes00ef2d22013-02-22 16:35:36 -080029 * As a special case NUMA_NO_NODE here means do the allocation
Linus Torvalds1da177e2005-04-16 15:20:36 -070030 * on the local CPU. This is normally identical to default,
31 * but useful to set in a VMA when you have a non default
32 * process policy.
Christoph Lameter8bccd852005-10-29 18:16:59 -070033 *
Dave Hansenb27abac2021-09-02 15:00:06 -070034 * preferred many Try a set of nodes first before normal fallback. This is
35 * similar to preferred without the special case.
36 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070037 * default Allocate on the local node first, or when on a VMA
38 * use the process policy. This is what Linux always did
39 * in a NUMA aware kernel and still does by, ahem, default.
40 *
41 * The process policy is applied for most non interrupt memory allocations
42 * in that process' context. Interrupts ignore the policies and always
43 * try to allocate on the local CPU. The VMA policy is only applied for memory
44 * allocations for a VMA in the VM.
45 *
46 * Currently there are a few corner cases in swapping where the policy
47 * is not applied, but the majority should be handled. When process policy
48 * is used it is not remembered over swap outs/swap ins.
49 *
50 * Only the highest zone in the zone hierarchy gets policied. Allocations
51 * requesting a lower zone just use default policy. This implies that
52 * on systems with highmem kernel lowmem allocation don't get policied.
53 * Same with GFP_DMA allocations.
54 *
55 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
56 * all users and remembered even when nobody has memory mapped.
57 */
58
59/* Notebook:
60 fix mmap readahead to honour policy and enable policy for any page cache
61 object
62 statistics for bigpages
63 global policy for page cache? currently it uses process policy. Requires
64 first item above.
65 handle mremap for shared memory (currently ignored for the policy)
66 grows down?
67 make bind policy root only? It can trigger oom much faster and the
68 kernel is not always grateful with that.
Linus Torvalds1da177e2005-04-16 15:20:36 -070069*/
70
Mitchel Humpherysb1de0d12014-06-06 14:38:30 -070071#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
72
Linus Torvalds1da177e2005-04-16 15:20:36 -070073#include <linux/mempolicy.h>
Christoph Hellwiga5201102019-08-28 16:19:53 +020074#include <linux/pagewalk.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070075#include <linux/highmem.h>
76#include <linux/hugetlb.h>
77#include <linux/kernel.h>
78#include <linux/sched.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +010079#include <linux/sched/mm.h>
Ingo Molnar6a3827d2017-02-08 18:51:31 +010080#include <linux/sched/numa_balancing.h>
Ingo Molnarf719ff9b2017-02-06 10:57:33 +010081#include <linux/sched/task.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070082#include <linux/nodemask.h>
83#include <linux/cpuset.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070084#include <linux/slab.h>
85#include <linux/string.h>
Paul Gortmakerb95f1b312011-10-16 02:01:52 -040086#include <linux/export.h>
Pavel Emelyanovb4888932007-10-18 23:40:14 -070087#include <linux/nsproxy.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070088#include <linux/interrupt.h>
89#include <linux/init.h>
90#include <linux/compat.h>
Otto Ebeling31367462017-11-15 17:38:14 -080091#include <linux/ptrace.h>
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -080092#include <linux/swap.h>
Christoph Lameter1a75a6c2006-01-08 01:01:02 -080093#include <linux/seq_file.h>
94#include <linux/proc_fs.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080095#include <linux/migrate.h>
Hugh Dickins62b61f62009-12-14 17:59:33 -080096#include <linux/ksm.h>
Christoph Lameter95a402c2006-06-23 02:03:53 -070097#include <linux/rmap.h>
David Quigley86c3a762006-06-23 02:04:02 -070098#include <linux/security.h>
Adrian Bunkdbcb0f12007-10-16 01:26:26 -070099#include <linux/syscalls.h>
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -0700100#include <linux/ctype.h>
KOSAKI Motohiro6d9c2852009-12-14 17:58:11 -0800101#include <linux/mm_inline.h>
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200102#include <linux/mmu_notifier.h>
Mitchel Humpherysb1de0d12014-06-06 14:38:30 -0700103#include <linux/printk.h>
Naoya Horiguchic8633792017-09-08 16:11:08 -0700104#include <linux/swapops.h>
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800105
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106#include <asm/tlbflush.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -0800107#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108
Nick Piggin62695a82008-10-18 20:26:09 -0700109#include "internal.h"
110
Christoph Lameter38e35862006-01-08 01:01:01 -0800111/* Internal flags */
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800112#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
Christoph Lameter38e35862006-01-08 01:01:01 -0800113#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800114
Pekka Enbergfcc234f2006-03-22 00:08:13 -0800115static struct kmem_cache *policy_cache;
116static struct kmem_cache *sn_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118/* Highest zone. An specific allocation for a zone below that is not
119 policied. */
Christoph Lameter62672762007-02-10 01:43:07 -0800120enum zone_type policy_zone = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700122/*
123 * run-time system-wide default policy => local allocation
124 */
H Hartley Sweetene754d792011-10-31 17:09:23 -0700125static struct mempolicy default_policy = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126 .refcnt = ATOMIC_INIT(1), /* never free it */
Feng Tang7858d7b2021-06-30 18:51:00 -0700127 .mode = MPOL_LOCAL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128};
129
Mel Gorman5606e382012-11-02 18:19:13 +0000130static struct mempolicy preferred_node_policy[MAX_NUMNODES];
131
Dan Williamsb2ca9162020-02-16 12:00:48 -0800132/**
133 * numa_map_to_online_node - Find closest online node
Krzysztof Kozlowskif6e92f42020-08-11 18:31:13 -0700134 * @node: Node id to start the search
Dan Williamsb2ca9162020-02-16 12:00:48 -0800135 *
136 * Lookup the next closest node by distance if @nid is not online.
Randy Dunlapdad5b022022-01-14 14:08:24 -0800137 *
138 * Return: this @node if it is online, otherwise the closest node by distance
Dan Williamsb2ca9162020-02-16 12:00:48 -0800139 */
140int numa_map_to_online_node(int node)
141{
Dan Williams4fcbe962020-02-16 12:00:53 -0800142 int min_dist = INT_MAX, dist, n, min_node;
Dan Williamsb2ca9162020-02-16 12:00:48 -0800143
Dan Williams4fcbe962020-02-16 12:00:53 -0800144 if (node == NUMA_NO_NODE || node_online(node))
145 return node;
Dan Williamsb2ca9162020-02-16 12:00:48 -0800146
147 min_node = node;
Dan Williams4fcbe962020-02-16 12:00:53 -0800148 for_each_online_node(n) {
149 dist = node_distance(node, n);
150 if (dist < min_dist) {
151 min_dist = dist;
152 min_node = n;
Dan Williamsb2ca9162020-02-16 12:00:48 -0800153 }
154 }
155
156 return min_node;
157}
158EXPORT_SYMBOL_GPL(numa_map_to_online_node);
159
Oleg Nesterov74d2c3a2014-10-09 15:27:50 -0700160struct mempolicy *get_task_policy(struct task_struct *p)
Mel Gorman5606e382012-11-02 18:19:13 +0000161{
162 struct mempolicy *pol = p->mempolicy;
Oleg Nesterovf15ca782014-10-09 15:27:43 -0700163 int node;
Mel Gorman5606e382012-11-02 18:19:13 +0000164
Oleg Nesterovf15ca782014-10-09 15:27:43 -0700165 if (pol)
166 return pol;
Mel Gorman5606e382012-11-02 18:19:13 +0000167
Oleg Nesterovf15ca782014-10-09 15:27:43 -0700168 node = numa_node_id();
169 if (node != NUMA_NO_NODE) {
170 pol = &preferred_node_policy[node];
171 /* preferred_node_policy is not initialised early in boot */
172 if (pol->mode)
173 return pol;
Mel Gorman5606e382012-11-02 18:19:13 +0000174 }
175
Oleg Nesterovf15ca782014-10-09 15:27:43 -0700176 return &default_policy;
Mel Gorman5606e382012-11-02 18:19:13 +0000177}
178
David Rientjes37012942008-04-28 02:12:33 -0700179static const struct mempolicy_operations {
180 int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
Vlastimil Babka213980c2017-07-06 15:40:06 -0700181 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
David Rientjes37012942008-04-28 02:12:33 -0700182} mpol_ops[MPOL_MAX];
183
David Rientjesf5b087b2008-04-28 02:12:27 -0700184static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
185{
Bob Liu6d556292010-05-24 14:31:59 -0700186 return pol->flags & MPOL_MODE_FLAGS;
David Rientjes4c50bc02008-04-28 02:12:30 -0700187}
188
189static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
190 const nodemask_t *rel)
191{
192 nodemask_t tmp;
193 nodes_fold(tmp, *orig, nodes_weight(*rel));
194 nodes_onto(*ret, tmp, *rel);
David Rientjesf5b087b2008-04-28 02:12:27 -0700195}
196
Feng Tangbe897d42021-09-02 15:00:19 -0700197static int mpol_new_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
David Rientjes37012942008-04-28 02:12:33 -0700198{
199 if (nodes_empty(*nodes))
200 return -EINVAL;
Ben Widawsky269fbe72021-06-30 18:51:10 -0700201 pol->nodes = *nodes;
David Rientjes37012942008-04-28 02:12:33 -0700202 return 0;
203}
204
205static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
206{
Feng Tang7858d7b2021-06-30 18:51:00 -0700207 if (nodes_empty(*nodes))
208 return -EINVAL;
Ben Widawsky269fbe72021-06-30 18:51:10 -0700209
210 nodes_clear(pol->nodes);
211 node_set(first_node(*nodes), pol->nodes);
David Rientjes37012942008-04-28 02:12:33 -0700212 return 0;
213}
214
Miao Xie58568d22009-06-16 15:31:49 -0700215/*
216 * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
217 * any, for the new policy. mpol_new() has already validated the nodes
Feng Tang7858d7b2021-06-30 18:51:00 -0700218 * parameter with respect to the policy mode and flags.
Miao Xie58568d22009-06-16 15:31:49 -0700219 *
220 * Must be called holding task's alloc_lock to protect task's mems_allowed
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -0700221 * and mempolicy. May also be called holding the mmap_lock for write.
Miao Xie58568d22009-06-16 15:31:49 -0700222 */
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700223static int mpol_set_nodemask(struct mempolicy *pol,
224 const nodemask_t *nodes, struct nodemask_scratch *nsc)
Miao Xie58568d22009-06-16 15:31:49 -0700225{
Miao Xie58568d22009-06-16 15:31:49 -0700226 int ret;
227
Feng Tang7858d7b2021-06-30 18:51:00 -0700228 /*
229 * Default (pol==NULL) resp. local memory policies are not a
230 * subject of any remapping. They also do not need any special
231 * constructor.
232 */
233 if (!pol || pol->mode == MPOL_LOCAL)
Miao Xie58568d22009-06-16 15:31:49 -0700234 return 0;
Feng Tang7858d7b2021-06-30 18:51:00 -0700235
Lai Jiangshan01f13bd2012-12-12 13:51:33 -0800236 /* Check N_MEMORY */
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700237 nodes_and(nsc->mask1,
Lai Jiangshan01f13bd2012-12-12 13:51:33 -0800238 cpuset_current_mems_allowed, node_states[N_MEMORY]);
Miao Xie58568d22009-06-16 15:31:49 -0700239
240 VM_BUG_ON(!nodes);
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700241
Feng Tang7858d7b2021-06-30 18:51:00 -0700242 if (pol->flags & MPOL_F_RELATIVE_NODES)
243 mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700244 else
Feng Tang7858d7b2021-06-30 18:51:00 -0700245 nodes_and(nsc->mask2, *nodes, nsc->mask1);
246
247 if (mpol_store_user_nodemask(pol))
248 pol->w.user_nodemask = *nodes;
249 else
250 pol->w.cpuset_mems_allowed = cpuset_current_mems_allowed;
251
252 ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
Miao Xie58568d22009-06-16 15:31:49 -0700253 return ret;
254}
255
256/*
257 * This function just creates a new policy, does some check and simple
258 * initialization. You must invoke mpol_set_nodemask() to set nodes.
259 */
David Rientjes028fec42008-04-28 02:12:25 -0700260static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
261 nodemask_t *nodes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262{
263 struct mempolicy *policy;
264
David Rientjes028fec42008-04-28 02:12:25 -0700265 pr_debug("setting mode %d flags %d nodes[0] %lx\n",
David Rientjes00ef2d22013-02-22 16:35:36 -0800266 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
Paul Mundt140d5a42007-07-15 23:38:16 -0700267
David Rientjes3e1f06452008-04-28 02:12:34 -0700268 if (mode == MPOL_DEFAULT) {
269 if (nodes && !nodes_empty(*nodes))
David Rientjes37012942008-04-28 02:12:33 -0700270 return ERR_PTR(-EINVAL);
Lee Schermerhornd3a71032012-10-25 14:16:29 +0200271 return NULL;
David Rientjes37012942008-04-28 02:12:33 -0700272 }
David Rientjes3e1f06452008-04-28 02:12:34 -0700273 VM_BUG_ON(!nodes);
274
275 /*
276 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
277 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
278 * All other modes require a valid pointer to a non-empty nodemask.
279 */
280 if (mode == MPOL_PREFERRED) {
281 if (nodes_empty(*nodes)) {
282 if (((flags & MPOL_F_STATIC_NODES) ||
283 (flags & MPOL_F_RELATIVE_NODES)))
284 return ERR_PTR(-EINVAL);
Feng Tang7858d7b2021-06-30 18:51:00 -0700285
286 mode = MPOL_LOCAL;
David Rientjes3e1f06452008-04-28 02:12:34 -0700287 }
Peter Zijlstra479e2802012-10-25 14:16:28 +0200288 } else if (mode == MPOL_LOCAL) {
Piotr Kwapulinski8d303e42016-12-12 16:42:49 -0800289 if (!nodes_empty(*nodes) ||
290 (flags & MPOL_F_STATIC_NODES) ||
291 (flags & MPOL_F_RELATIVE_NODES))
Peter Zijlstra479e2802012-10-25 14:16:28 +0200292 return ERR_PTR(-EINVAL);
David Rientjes3e1f06452008-04-28 02:12:34 -0700293 } else if (nodes_empty(*nodes))
294 return ERR_PTR(-EINVAL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
296 if (!policy)
297 return ERR_PTR(-ENOMEM);
298 atomic_set(&policy->refcnt, 1);
Lee Schermerhorn45c47452008-04-28 02:13:12 -0700299 policy->mode = mode;
David Rientjes3e1f06452008-04-28 02:12:34 -0700300 policy->flags = flags;
Aneesh Kumar K.Vc6018b42022-01-14 14:08:17 -0800301 policy->home_node = NUMA_NO_NODE;
David Rientjesf5b087b2008-04-28 02:12:27 -0700302
David Rientjes37012942008-04-28 02:12:33 -0700303 return policy;
304}
305
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -0700306/* Slow path of a mpol destructor. */
307void __mpol_put(struct mempolicy *p)
308{
309 if (!atomic_dec_and_test(&p->refcnt))
310 return;
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -0700311 kmem_cache_free(policy_cache, p);
312}
313
Vlastimil Babka213980c2017-07-06 15:40:06 -0700314static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
David Rientjes37012942008-04-28 02:12:33 -0700315{
316}
317
Vlastimil Babka213980c2017-07-06 15:40:06 -0700318static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
David Rientjes37012942008-04-28 02:12:33 -0700319{
320 nodemask_t tmp;
321
322 if (pol->flags & MPOL_F_STATIC_NODES)
323 nodes_and(tmp, pol->w.user_nodemask, *nodes);
324 else if (pol->flags & MPOL_F_RELATIVE_NODES)
325 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
326 else {
Ben Widawsky269fbe72021-06-30 18:51:10 -0700327 nodes_remap(tmp, pol->nodes, pol->w.cpuset_mems_allowed,
Vlastimil Babka213980c2017-07-06 15:40:06 -0700328 *nodes);
zhong jiang29b190f2019-06-28 12:06:43 -0700329 pol->w.cpuset_mems_allowed = *nodes;
David Rientjes37012942008-04-28 02:12:33 -0700330 }
331
Miao Xie708c1bb2010-05-24 14:32:07 -0700332 if (nodes_empty(tmp))
333 tmp = *nodes;
334
Ben Widawsky269fbe72021-06-30 18:51:10 -0700335 pol->nodes = tmp;
David Rientjes37012942008-04-28 02:12:33 -0700336}
337
338static void mpol_rebind_preferred(struct mempolicy *pol,
Vlastimil Babka213980c2017-07-06 15:40:06 -0700339 const nodemask_t *nodes)
David Rientjes37012942008-04-28 02:12:33 -0700340{
Feng Tang7858d7b2021-06-30 18:51:00 -0700341 pol->w.cpuset_mems_allowed = *nodes;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342}
343
Miao Xie708c1bb2010-05-24 14:32:07 -0700344/*
345 * mpol_rebind_policy - Migrate a policy to a different set of nodes
346 *
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -0700347 * Per-vma policies are protected by mmap_lock. Allocations using per-task
Vlastimil Babka213980c2017-07-06 15:40:06 -0700348 * policies are protected by task->mems_allowed_seq to prevent a premature
349 * OOM/allocation failure due to parallel nodemask modification.
Miao Xie708c1bb2010-05-24 14:32:07 -0700350 */
Vlastimil Babka213980c2017-07-06 15:40:06 -0700351static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
David Rientjes1d0d2682008-04-28 02:12:32 -0700352{
David Rientjes1d0d2682008-04-28 02:12:32 -0700353 if (!pol)
354 return;
Feng Tang7858d7b2021-06-30 18:51:00 -0700355 if (!mpol_store_user_nodemask(pol) &&
David Rientjes1d0d2682008-04-28 02:12:32 -0700356 nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
357 return;
Miao Xie708c1bb2010-05-24 14:32:07 -0700358
Vlastimil Babka213980c2017-07-06 15:40:06 -0700359 mpol_ops[pol->mode].rebind(pol, newmask);
David Rientjes1d0d2682008-04-28 02:12:32 -0700360}
361
362/*
363 * Wrapper for mpol_rebind_policy() that just requires task
364 * pointer, and updates task mempolicy.
Miao Xie58568d22009-06-16 15:31:49 -0700365 *
366 * Called with task's alloc_lock held.
David Rientjes1d0d2682008-04-28 02:12:32 -0700367 */
368
Vlastimil Babka213980c2017-07-06 15:40:06 -0700369void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
David Rientjes1d0d2682008-04-28 02:12:32 -0700370{
Vlastimil Babka213980c2017-07-06 15:40:06 -0700371 mpol_rebind_policy(tsk->mempolicy, new);
David Rientjes1d0d2682008-04-28 02:12:32 -0700372}
373
374/*
375 * Rebind each vma in mm to new nodemask.
376 *
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -0700377 * Call holding a reference to mm. Takes mm->mmap_lock during call.
David Rientjes1d0d2682008-04-28 02:12:32 -0700378 */
379
380void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
381{
382 struct vm_area_struct *vma;
383
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700384 mmap_write_lock(mm);
David Rientjes1d0d2682008-04-28 02:12:32 -0700385 for (vma = mm->mmap; vma; vma = vma->vm_next)
Vlastimil Babka213980c2017-07-06 15:40:06 -0700386 mpol_rebind_policy(vma->vm_policy, new);
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700387 mmap_write_unlock(mm);
David Rientjes1d0d2682008-04-28 02:12:32 -0700388}
389
David Rientjes37012942008-04-28 02:12:33 -0700390static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
391 [MPOL_DEFAULT] = {
392 .rebind = mpol_rebind_default,
393 },
394 [MPOL_INTERLEAVE] = {
Feng Tangbe897d42021-09-02 15:00:19 -0700395 .create = mpol_new_nodemask,
David Rientjes37012942008-04-28 02:12:33 -0700396 .rebind = mpol_rebind_nodemask,
397 },
398 [MPOL_PREFERRED] = {
399 .create = mpol_new_preferred,
400 .rebind = mpol_rebind_preferred,
401 },
402 [MPOL_BIND] = {
Feng Tangbe897d42021-09-02 15:00:19 -0700403 .create = mpol_new_nodemask,
David Rientjes37012942008-04-28 02:12:33 -0700404 .rebind = mpol_rebind_nodemask,
405 },
Feng Tang7858d7b2021-06-30 18:51:00 -0700406 [MPOL_LOCAL] = {
407 .rebind = mpol_rebind_default,
408 },
Dave Hansenb27abac2021-09-02 15:00:06 -0700409 [MPOL_PREFERRED_MANY] = {
Feng Tangbe897d42021-09-02 15:00:19 -0700410 .create = mpol_new_nodemask,
Dave Hansenb27abac2021-09-02 15:00:06 -0700411 .rebind = mpol_rebind_preferred,
412 },
David Rientjes37012942008-04-28 02:12:33 -0700413};
414
Yang Shia53190a2019-08-13 15:37:18 -0700415static int migrate_page_add(struct page *page, struct list_head *pagelist,
Christoph Lameterfc301282006-01-18 17:42:29 -0800416 unsigned long flags);
Christoph Lameter1a75a6c2006-01-08 01:01:02 -0800417
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800418struct queue_pages {
419 struct list_head *pagelist;
420 unsigned long flags;
421 nodemask_t *nmask;
Li Xinhaif18da662019-11-30 17:56:18 -0800422 unsigned long start;
423 unsigned long end;
424 struct vm_area_struct *first;
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800425};
426
Naoya Horiguchi98094942013-09-11 14:22:14 -0700427/*
Naoya Horiguchi88aaa2a2017-09-08 16:10:42 -0700428 * Check if the page's nid is in qp->nmask.
429 *
430 * If MPOL_MF_INVERT is set in qp->flags, check if the nid is
431 * in the invert of qp->nmask.
432 */
433static inline bool queue_pages_required(struct page *page,
434 struct queue_pages *qp)
435{
436 int nid = page_to_nid(page);
437 unsigned long flags = qp->flags;
438
439 return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
440}
441
Yang Shia7f40cf2019-03-28 20:43:55 -0700442/*
Yang Shid8835442019-08-13 15:37:15 -0700443 * queue_pages_pmd() has four possible return values:
Yang Shie5947d22021-06-30 18:51:07 -0700444 * 0 - pages are placed on the right node or queued successfully, or
445 * special page is met, i.e. huge zero page.
Yang Shid8835442019-08-13 15:37:15 -0700446 * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
447 * specified.
448 * 2 - THP was split.
449 * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an
450 * existing page was already on a node that does not follow the
451 * policy.
Yang Shia7f40cf2019-03-28 20:43:55 -0700452 */
Naoya Horiguchic8633792017-09-08 16:11:08 -0700453static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
454 unsigned long end, struct mm_walk *walk)
Jules Irenge959a7e12020-04-06 20:08:12 -0700455 __releases(ptl)
Naoya Horiguchic8633792017-09-08 16:11:08 -0700456{
457 int ret = 0;
458 struct page *page;
459 struct queue_pages *qp = walk->private;
460 unsigned long flags;
461
462 if (unlikely(is_pmd_migration_entry(*pmd))) {
Yang Shia7f40cf2019-03-28 20:43:55 -0700463 ret = -EIO;
Naoya Horiguchic8633792017-09-08 16:11:08 -0700464 goto unlock;
465 }
466 page = pmd_page(*pmd);
467 if (is_huge_zero_page(page)) {
468 spin_unlock(ptl);
Yang Shie5947d22021-06-30 18:51:07 -0700469 walk->action = ACTION_CONTINUE;
Naoya Horiguchic8633792017-09-08 16:11:08 -0700470 goto out;
471 }
Yang Shid8835442019-08-13 15:37:15 -0700472 if (!queue_pages_required(page, qp))
Naoya Horiguchic8633792017-09-08 16:11:08 -0700473 goto unlock;
Naoya Horiguchic8633792017-09-08 16:11:08 -0700474
Naoya Horiguchic8633792017-09-08 16:11:08 -0700475 flags = qp->flags;
476 /* go to thp migration */
Yang Shia7f40cf2019-03-28 20:43:55 -0700477 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
Yang Shia53190a2019-08-13 15:37:18 -0700478 if (!vma_migratable(walk->vma) ||
479 migrate_page_add(page, qp->pagelist, flags)) {
Yang Shid8835442019-08-13 15:37:15 -0700480 ret = 1;
Yang Shia7f40cf2019-03-28 20:43:55 -0700481 goto unlock;
482 }
Yang Shia7f40cf2019-03-28 20:43:55 -0700483 } else
484 ret = -EIO;
Naoya Horiguchic8633792017-09-08 16:11:08 -0700485unlock:
486 spin_unlock(ptl);
487out:
488 return ret;
489}
490
Naoya Horiguchi88aaa2a2017-09-08 16:10:42 -0700491/*
Naoya Horiguchi98094942013-09-11 14:22:14 -0700492 * Scan through pages checking if pages follow certain conditions,
493 * and move them to the pagelist if they do.
Yang Shid8835442019-08-13 15:37:15 -0700494 *
495 * queue_pages_pte_range() has three possible return values:
Yang Shie5947d22021-06-30 18:51:07 -0700496 * 0 - pages are placed on the right node or queued successfully, or
497 * special page is met, i.e. zero page.
Yang Shid8835442019-08-13 15:37:15 -0700498 * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
499 * specified.
500 * -EIO - only MPOL_MF_STRICT was specified and an existing page was already
501 * on a node that does not follow the policy.
Naoya Horiguchi98094942013-09-11 14:22:14 -0700502 */
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800503static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
504 unsigned long end, struct mm_walk *walk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505{
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800506 struct vm_area_struct *vma = walk->vma;
507 struct page *page;
508 struct queue_pages *qp = walk->private;
509 unsigned long flags = qp->flags;
Naoya Horiguchic8633792017-09-08 16:11:08 -0700510 int ret;
Yang Shid8835442019-08-13 15:37:15 -0700511 bool has_unmovable = false;
Shijie Luo3f088422020-11-01 17:07:40 -0800512 pte_t *pte, *mapped_pte;
Hugh Dickins705e87c2005-10-29 18:16:27 -0700513 spinlock_t *ptl;
Hugh Dickins941150a2005-06-21 17:15:06 -0700514
Naoya Horiguchic8633792017-09-08 16:11:08 -0700515 ptl = pmd_trans_huge_lock(pmd, vma);
516 if (ptl) {
517 ret = queue_pages_pmd(pmd, ptl, addr, end, walk);
Yang Shid8835442019-08-13 15:37:15 -0700518 if (ret != 2)
Yang Shia7f40cf2019-03-28 20:43:55 -0700519 return ret;
Kirill A. Shutemov248db922016-01-15 16:54:14 -0800520 }
Yang Shid8835442019-08-13 15:37:15 -0700521 /* THP was split, fall through to pte walk */
Hugh Dickins91612e02005-06-21 17:15:07 -0700522
Naoya Horiguchi337d9ab2016-07-26 15:24:03 -0700523 if (pmd_trans_unstable(pmd))
524 return 0;
Michal Hocko94723aa2018-04-10 16:30:07 -0700525
Shijie Luo3f088422020-11-01 17:07:40 -0800526 mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800527 for (; addr != end; pte++, addr += PAGE_SIZE) {
Hugh Dickins91612e02005-06-21 17:15:07 -0700528 if (!pte_present(*pte))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529 continue;
Linus Torvalds6aab3412005-11-28 14:34:23 -0800530 page = vm_normal_page(vma, addr, *pte);
531 if (!page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532 continue;
Nick Piggin053837f2006-01-18 17:42:27 -0800533 /*
Hugh Dickins62b61f62009-12-14 17:59:33 -0800534 * vm_normal_page() filters out zero pages, but there might
535 * still be PageReserved pages to skip, perhaps in a VDSO.
Nick Piggin053837f2006-01-18 17:42:27 -0800536 */
Hugh Dickinsb79bc0a2013-02-22 16:35:13 -0800537 if (PageReserved(page))
Christoph Lameterf4598c82006-01-12 01:05:20 -0800538 continue;
Naoya Horiguchi88aaa2a2017-09-08 16:10:42 -0700539 if (!queue_pages_required(page, qp))
Christoph Lameter38e35862006-01-08 01:01:01 -0800540 continue;
Yang Shia7f40cf2019-03-28 20:43:55 -0700541 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
Yang Shid8835442019-08-13 15:37:15 -0700542 /* MPOL_MF_STRICT must be specified if we get here */
543 if (!vma_migratable(vma)) {
544 has_unmovable = true;
Yang Shia7f40cf2019-03-28 20:43:55 -0700545 break;
Yang Shid8835442019-08-13 15:37:15 -0700546 }
Yang Shia53190a2019-08-13 15:37:18 -0700547
548 /*
549 * Do not abort immediately since there may be
550 * temporary off LRU pages in the range. Still
551 * need migrate other LRU pages.
552 */
553 if (migrate_page_add(page, qp->pagelist, flags))
554 has_unmovable = true;
Yang Shia7f40cf2019-03-28 20:43:55 -0700555 } else
556 break;
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800557 }
Shijie Luo3f088422020-11-01 17:07:40 -0800558 pte_unmap_unlock(mapped_pte, ptl);
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800559 cond_resched();
Yang Shid8835442019-08-13 15:37:15 -0700560
561 if (has_unmovable)
562 return 1;
563
Yang Shia7f40cf2019-03-28 20:43:55 -0700564 return addr != end ? -EIO : 0;
Hugh Dickins91612e02005-06-21 17:15:07 -0700565}
566
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800567static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
568 unsigned long addr, unsigned long end,
569 struct mm_walk *walk)
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700570{
Li Xinhaidcf17632020-04-01 21:10:48 -0700571 int ret = 0;
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700572#ifdef CONFIG_HUGETLB_PAGE
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800573 struct queue_pages *qp = walk->private;
Li Xinhaidcf17632020-04-01 21:10:48 -0700574 unsigned long flags = (qp->flags & MPOL_MF_VALID);
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700575 struct page *page;
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -0800576 spinlock_t *ptl;
Naoya Horiguchid4c54912014-06-06 10:00:01 -0400577 pte_t entry;
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700578
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800579 ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
580 entry = huge_ptep_get(pte);
Naoya Horiguchid4c54912014-06-06 10:00:01 -0400581 if (!pte_present(entry))
582 goto unlock;
583 page = pte_page(entry);
Naoya Horiguchi88aaa2a2017-09-08 16:10:42 -0700584 if (!queue_pages_required(page, qp))
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700585 goto unlock;
Li Xinhaidcf17632020-04-01 21:10:48 -0700586
587 if (flags == MPOL_MF_STRICT) {
588 /*
589 * STRICT alone means only detecting misplaced page and no
590 * need to further check other vma.
591 */
592 ret = -EIO;
593 goto unlock;
594 }
595
596 if (!vma_migratable(walk->vma)) {
597 /*
598 * Must be STRICT with MOVE*, otherwise .test_walk() have
599 * stopped walking current vma.
600 * Detecting misplaced page but allow migrating pages which
601 * have been queued.
602 */
603 ret = 1;
604 goto unlock;
605 }
606
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700607 /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
608 if (flags & (MPOL_MF_MOVE_ALL) ||
Li Xinhaidcf17632020-04-01 21:10:48 -0700609 (flags & MPOL_MF_MOVE && page_mapcount(page) == 1)) {
610 if (!isolate_huge_page(page, qp->pagelist) &&
611 (flags & MPOL_MF_STRICT))
612 /*
613 * Failed to isolate page but allow migrating pages
614 * which have been queued.
615 */
616 ret = 1;
617 }
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700618unlock:
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -0800619 spin_unlock(ptl);
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700620#else
621 BUG();
622#endif
Li Xinhaidcf17632020-04-01 21:10:48 -0700623 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624}
625
Aneesh Kumar K.V58772312013-12-06 00:08:22 +0530626#ifdef CONFIG_NUMA_BALANCING
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200627/*
Mel Gorman4b10e7d2012-10-25 14:16:32 +0200628 * This is used to mark a range of virtual addresses to be inaccessible.
629 * These are later cleared by a NUMA hinting fault. Depending on these
630 * faults, pages may be migrated for better NUMA placement.
631 *
632 * This is assuming that NUMA faults are handled using PROT_NONE. If
633 * an architecture makes a different choice, it will need further
634 * changes to the core.
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200635 */
Mel Gorman4b10e7d2012-10-25 14:16:32 +0200636unsigned long change_prot_numa(struct vm_area_struct *vma,
637 unsigned long addr, unsigned long end)
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200638{
Mel Gorman4b10e7d2012-10-25 14:16:32 +0200639 int nr_updated;
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200640
Peter Xu58705442020-04-06 20:05:45 -0700641 nr_updated = change_protection(vma, addr, end, PAGE_NONE, MM_CP_PROT_NUMA);
Mel Gorman03c5a6e2012-11-02 14:52:48 +0000642 if (nr_updated)
643 count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200644
Mel Gorman4b10e7d2012-10-25 14:16:32 +0200645 return nr_updated;
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200646}
647#else
648static unsigned long change_prot_numa(struct vm_area_struct *vma,
649 unsigned long addr, unsigned long end)
650{
651 return 0;
652}
Aneesh Kumar K.V58772312013-12-06 00:08:22 +0530653#endif /* CONFIG_NUMA_BALANCING */
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200654
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800655static int queue_pages_test_walk(unsigned long start, unsigned long end,
656 struct mm_walk *walk)
657{
658 struct vm_area_struct *vma = walk->vma;
659 struct queue_pages *qp = walk->private;
660 unsigned long endvma = vma->vm_end;
661 unsigned long flags = qp->flags;
662
Li Xinhaia18b3ac22019-11-30 17:56:15 -0800663 /* range check first */
Miaohe Lince331352021-02-24 12:09:47 -0800664 VM_BUG_ON_VMA(!range_in_vma(vma, start, end), vma);
Li Xinhaif18da662019-11-30 17:56:18 -0800665
666 if (!qp->first) {
667 qp->first = vma;
668 if (!(flags & MPOL_MF_DISCONTIG_OK) &&
669 (qp->start < vma->vm_start))
670 /* hole at head side of range */
Li Xinhaia18b3ac22019-11-30 17:56:15 -0800671 return -EFAULT;
672 }
Li Xinhaif18da662019-11-30 17:56:18 -0800673 if (!(flags & MPOL_MF_DISCONTIG_OK) &&
674 ((vma->vm_end < qp->end) &&
675 (!vma->vm_next || vma->vm_end < vma->vm_next->vm_start)))
676 /* hole at middle or tail of range */
677 return -EFAULT;
Li Xinhaia18b3ac22019-11-30 17:56:15 -0800678
Yang Shia7f40cf2019-03-28 20:43:55 -0700679 /*
680 * Need check MPOL_MF_STRICT to return -EIO if possible
681 * regardless of vma_migratable
682 */
683 if (!vma_migratable(vma) &&
684 !(flags & MPOL_MF_STRICT))
Naoya Horiguchi48684a62015-02-11 15:28:06 -0800685 return 1;
686
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800687 if (endvma > end)
688 endvma = end;
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800689
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800690 if (flags & MPOL_MF_LAZY) {
691 /* Similar to task_numa_work, skip inaccessible VMAs */
Anshuman Khandual3122e802020-04-06 20:03:47 -0700692 if (!is_vm_hugetlb_page(vma) && vma_is_accessible(vma) &&
Liang Chen4355c012016-03-15 14:56:42 -0700693 !(vma->vm_flags & VM_MIXEDMAP))
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800694 change_prot_numa(vma, start, endvma);
695 return 1;
696 }
697
Kirill A. Shutemov77bf45e2016-02-05 15:36:33 -0800698 /* queue pages from current vma */
Yang Shia7f40cf2019-03-28 20:43:55 -0700699 if (flags & MPOL_MF_VALID)
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800700 return 0;
701 return 1;
702}
703
Christoph Hellwig7b86ac32019-08-28 16:19:54 +0200704static const struct mm_walk_ops queue_pages_walk_ops = {
705 .hugetlb_entry = queue_pages_hugetlb,
706 .pmd_entry = queue_pages_pte_range,
707 .test_walk = queue_pages_test_walk,
708};
709
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800710/*
Naoya Horiguchi98094942013-09-11 14:22:14 -0700711 * Walk through page tables and collect pages to be migrated.
712 *
713 * If pages found in a given range are on a set of nodes (determined by
714 * @nodes and @flags,) it's isolated and queued to the pagelist which is
Yang Shid8835442019-08-13 15:37:15 -0700715 * passed via @private.
716 *
717 * queue_pages_range() has three possible return values:
718 * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were
719 * specified.
720 * 0 - queue pages successfully or no misplaced page.
Yang Shia85dfc32019-11-15 17:34:33 -0800721 * errno - i.e. misplaced pages with MPOL_MF_STRICT specified (-EIO) or
722 * memory range specified by nodemask and maxnode points outside
723 * your accessible address space (-EFAULT)
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800724 */
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -0700725static int
Naoya Horiguchi98094942013-09-11 14:22:14 -0700726queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800727 nodemask_t *nodes, unsigned long flags,
728 struct list_head *pagelist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729{
Li Xinhaif18da662019-11-30 17:56:18 -0800730 int err;
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800731 struct queue_pages qp = {
732 .pagelist = pagelist,
733 .flags = flags,
734 .nmask = nodes,
Li Xinhaif18da662019-11-30 17:56:18 -0800735 .start = start,
736 .end = end,
737 .first = NULL,
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800738 };
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739
Li Xinhaif18da662019-11-30 17:56:18 -0800740 err = walk_page_range(mm, start, end, &queue_pages_walk_ops, &qp);
741
742 if (!qp.first)
743 /* whole range in hole */
744 err = -EFAULT;
745
746 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747}
748
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700749/*
750 * Apply policy to a single VMA
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -0700751 * This must be called with the mmap_lock held for writing.
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700752 */
753static int vma_replace_policy(struct vm_area_struct *vma,
754 struct mempolicy *pol)
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700755{
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700756 int err;
757 struct mempolicy *old;
758 struct mempolicy *new;
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700759
760 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
761 vma->vm_start, vma->vm_end, vma->vm_pgoff,
762 vma->vm_ops, vma->vm_file,
763 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
764
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700765 new = mpol_dup(pol);
766 if (IS_ERR(new))
767 return PTR_ERR(new);
768
769 if (vma->vm_ops && vma->vm_ops->set_policy) {
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700770 err = vma->vm_ops->set_policy(vma, new);
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700771 if (err)
772 goto err_out;
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700773 }
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700774
775 old = vma->vm_policy;
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -0700776 vma->vm_policy = new; /* protected by mmap_lock */
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700777 mpol_put(old);
778
779 return 0;
780 err_out:
781 mpol_put(new);
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700782 return err;
783}
784
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785/* Step 2: apply policy to a range and do splits. */
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800786static int mbind_range(struct mm_struct *mm, unsigned long start,
787 unsigned long end, struct mempolicy *new_pol)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788{
789 struct vm_area_struct *next;
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800790 struct vm_area_struct *prev;
791 struct vm_area_struct *vma;
792 int err = 0;
KOSAKI Motohiroe26a5112011-12-28 15:57:11 -0800793 pgoff_t pgoff;
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800794 unsigned long vmstart;
795 unsigned long vmend;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796
Linus Torvalds097d5912012-03-06 18:23:36 -0800797 vma = find_vma(mm, start);
Li Xinhaif18da662019-11-30 17:56:18 -0800798 VM_BUG_ON(!vma);
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800799
Linus Torvalds097d5912012-03-06 18:23:36 -0800800 prev = vma->vm_prev;
KOSAKI Motohiroe26a5112011-12-28 15:57:11 -0800801 if (start > vma->vm_start)
802 prev = vma;
803
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800804 for (; vma && vma->vm_start < end; prev = vma, vma = next) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700805 next = vma->vm_next;
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800806 vmstart = max(start, vma->vm_start);
807 vmend = min(end, vma->vm_end);
808
KOSAKI Motohiroe26a5112011-12-28 15:57:11 -0800809 if (mpol_equal(vma_policy(vma), new_pol))
810 continue;
811
812 pgoff = vma->vm_pgoff +
813 ((vmstart - vma->vm_start) >> PAGE_SHIFT);
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800814 prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
Andrea Arcangeli19a809a2015-09-04 15:46:24 -0700815 vma->anon_vma, vma->vm_file, pgoff,
Colin Cross9a100642022-01-14 14:05:59 -0800816 new_pol, vma->vm_userfaultfd_ctx,
817 vma_anon_name(vma));
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800818 if (prev) {
819 vma = prev;
820 next = vma->vm_next;
Oleg Nesterov3964acd2013-07-31 13:53:28 -0700821 if (mpol_equal(vma_policy(vma), new_pol))
822 continue;
823 /* vma_merge() joined vma && vma->next, case 8 */
824 goto replace;
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800825 }
826 if (vma->vm_start != vmstart) {
827 err = split_vma(vma->vm_mm, vma, vmstart, 1);
828 if (err)
829 goto out;
830 }
831 if (vma->vm_end != vmend) {
832 err = split_vma(vma->vm_mm, vma, vmend, 0);
833 if (err)
834 goto out;
835 }
Oleg Nesterov3964acd2013-07-31 13:53:28 -0700836 replace:
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700837 err = vma_replace_policy(vma, new_pol);
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700838 if (err)
839 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840 }
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800841
842 out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843 return err;
844}
845
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846/* Set the process memory policy */
David Rientjes028fec42008-04-28 02:12:25 -0700847static long do_set_mempolicy(unsigned short mode, unsigned short flags,
848 nodemask_t *nodes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849{
Miao Xie58568d22009-06-16 15:31:49 -0700850 struct mempolicy *new, *old;
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700851 NODEMASK_SCRATCH(scratch);
Miao Xie58568d22009-06-16 15:31:49 -0700852 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700854 if (!scratch)
855 return -ENOMEM;
Lee Schermerhornf4e53d92008-04-28 02:13:10 -0700856
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700857 new = mpol_new(mode, flags, nodes);
858 if (IS_ERR(new)) {
859 ret = PTR_ERR(new);
860 goto out;
861 }
Oleg Nesterov2c7c3a72014-10-09 15:27:55 -0700862
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700863 ret = mpol_set_nodemask(new, nodes, scratch);
Miao Xie58568d22009-06-16 15:31:49 -0700864 if (ret) {
Miao Xie58568d22009-06-16 15:31:49 -0700865 mpol_put(new);
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700866 goto out;
Miao Xie58568d22009-06-16 15:31:49 -0700867 }
Wei Yang78b132e2020-10-13 16:57:08 -0700868 task_lock(current);
Miao Xie58568d22009-06-16 15:31:49 -0700869 old = current->mempolicy;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700870 current->mempolicy = new;
Vlastimil Babka45816682017-07-06 15:39:59 -0700871 if (new && new->mode == MPOL_INTERLEAVE)
872 current->il_prev = MAX_NUMNODES-1;
Miao Xie58568d22009-06-16 15:31:49 -0700873 task_unlock(current);
Miao Xie58568d22009-06-16 15:31:49 -0700874 mpol_put(old);
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700875 ret = 0;
876out:
877 NODEMASK_SCRATCH_FREE(scratch);
878 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879}
880
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700881/*
882 * Return nodemask for policy for get_mempolicy() query
Miao Xie58568d22009-06-16 15:31:49 -0700883 *
884 * Called with task's alloc_lock held
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700885 */
886static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700887{
Andi Kleendfcd3c02005-10-29 18:15:48 -0700888 nodes_clear(*nodes);
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700889 if (p == &default_policy)
890 return;
891
Lee Schermerhorn45c47452008-04-28 02:13:12 -0700892 switch (p->mode) {
Mel Gorman19770b32008-04-28 02:12:18 -0700893 case MPOL_BIND:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894 case MPOL_INTERLEAVE:
Ben Widawsky269fbe72021-06-30 18:51:10 -0700895 case MPOL_PREFERRED:
Dave Hansenb27abac2021-09-02 15:00:06 -0700896 case MPOL_PREFERRED_MANY:
Ben Widawsky269fbe72021-06-30 18:51:10 -0700897 *nodes = p->nodes;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700898 break;
Feng Tang7858d7b2021-06-30 18:51:00 -0700899 case MPOL_LOCAL:
900 /* return empty node mask for local allocation */
901 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902 default:
903 BUG();
904 }
905}
906
Andrea Arcangeli3b9aadf2018-10-26 15:05:16 -0700907static int lookup_node(struct mm_struct *mm, unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908{
Peter Xuba841072020-04-07 21:40:09 -0400909 struct page *p = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910 int err;
911
Andrea Arcangeli3b9aadf2018-10-26 15:05:16 -0700912 int locked = 1;
913 err = get_user_pages_locked(addr & PAGE_MASK, 1, 0, &p, &locked);
Michal Hocko2d3a36a2020-06-03 16:03:25 -0700914 if (err > 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700915 err = page_to_nid(p);
916 put_page(p);
917 }
Andrea Arcangeli3b9aadf2018-10-26 15:05:16 -0700918 if (locked)
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700919 mmap_read_unlock(mm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920 return err;
921}
922
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923/* Retrieve NUMA policy */
Adrian Bunkdbcb0f12007-10-16 01:26:26 -0700924static long do_get_mempolicy(int *policy, nodemask_t *nmask,
925 unsigned long addr, unsigned long flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926{
Christoph Lameter8bccd852005-10-29 18:16:59 -0700927 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928 struct mm_struct *mm = current->mm;
929 struct vm_area_struct *vma = NULL;
Andrea Arcangeli3b9aadf2018-10-26 15:05:16 -0700930 struct mempolicy *pol = current->mempolicy, *pol_refcount = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931
Lee Schermerhorn754af6f2007-10-16 01:24:51 -0700932 if (flags &
933 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700934 return -EINVAL;
Lee Schermerhorn754af6f2007-10-16 01:24:51 -0700935
936 if (flags & MPOL_F_MEMS_ALLOWED) {
937 if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
938 return -EINVAL;
939 *policy = 0; /* just so it's initialized */
Miao Xie58568d22009-06-16 15:31:49 -0700940 task_lock(current);
Lee Schermerhorn754af6f2007-10-16 01:24:51 -0700941 *nmask = cpuset_current_mems_allowed;
Miao Xie58568d22009-06-16 15:31:49 -0700942 task_unlock(current);
Lee Schermerhorn754af6f2007-10-16 01:24:51 -0700943 return 0;
944 }
945
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946 if (flags & MPOL_F_ADDR) {
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700947 /*
948 * Do NOT fall back to task policy if the
949 * vma/shared policy at addr is NULL. We
950 * want to return MPOL_DEFAULT in this case.
951 */
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700952 mmap_read_lock(mm);
Liam Howlett33e35752021-06-28 19:39:53 -0700953 vma = vma_lookup(mm, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954 if (!vma) {
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700955 mmap_read_unlock(mm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956 return -EFAULT;
957 }
958 if (vma->vm_ops && vma->vm_ops->get_policy)
959 pol = vma->vm_ops->get_policy(vma, addr);
960 else
961 pol = vma->vm_policy;
962 } else if (addr)
963 return -EINVAL;
964
965 if (!pol)
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700966 pol = &default_policy; /* indicates default behavior */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967
968 if (flags & MPOL_F_NODE) {
969 if (flags & MPOL_F_ADDR) {
Andrea Arcangeli3b9aadf2018-10-26 15:05:16 -0700970 /*
971 * Take a refcount on the mpol, lookup_node()
Lu Jialinbaf2f902021-05-06 18:06:50 -0700972 * will drop the mmap_lock, so after calling
Andrea Arcangeli3b9aadf2018-10-26 15:05:16 -0700973 * lookup_node() only "pol" remains valid, "vma"
974 * is stale.
975 */
976 pol_refcount = pol;
977 vma = NULL;
978 mpol_get(pol);
979 err = lookup_node(mm, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980 if (err < 0)
981 goto out;
Christoph Lameter8bccd852005-10-29 18:16:59 -0700982 *policy = err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983 } else if (pol == current->mempolicy &&
Lee Schermerhorn45c47452008-04-28 02:13:12 -0700984 pol->mode == MPOL_INTERLEAVE) {
Ben Widawsky269fbe72021-06-30 18:51:10 -0700985 *policy = next_node_in(current->il_prev, pol->nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986 } else {
987 err = -EINVAL;
988 goto out;
989 }
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700990 } else {
991 *policy = pol == &default_policy ? MPOL_DEFAULT :
992 pol->mode;
David Rientjesd79df632008-07-04 12:24:13 -0700993 /*
994 * Internal mempolicy flags must be masked off before exposing
995 * the policy to userspace.
996 */
997 *policy |= (pol->flags & MPOL_MODE_FLAGS);
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700998 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999
Linus Torvalds1da177e2005-04-16 15:20:36 -07001000 err = 0;
Miao Xie58568d22009-06-16 15:31:49 -07001001 if (nmask) {
Lee Schermerhornc6b6ef82010-03-23 13:35:41 -07001002 if (mpol_store_user_nodemask(pol)) {
1003 *nmask = pol->w.user_nodemask;
1004 } else {
1005 task_lock(current);
1006 get_policy_nodemask(pol, nmask);
1007 task_unlock(current);
1008 }
Miao Xie58568d22009-06-16 15:31:49 -07001009 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001010
1011 out:
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001012 mpol_cond_put(pol);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013 if (vma)
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001014 mmap_read_unlock(mm);
Andrea Arcangeli3b9aadf2018-10-26 15:05:16 -07001015 if (pol_refcount)
1016 mpol_put(pol_refcount);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001017 return err;
1018}
1019
Christoph Lameterb20a3502006-03-22 00:09:12 -08001020#ifdef CONFIG_MIGRATION
Christoph Lameter8bccd852005-10-29 18:16:59 -07001021/*
Naoya Horiguchic8633792017-09-08 16:11:08 -07001022 * page migration, thp tail pages can be passed.
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001023 */
Yang Shia53190a2019-08-13 15:37:18 -07001024static int migrate_page_add(struct page *page, struct list_head *pagelist,
Christoph Lameterfc301282006-01-18 17:42:29 -08001025 unsigned long flags)
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001026{
Naoya Horiguchic8633792017-09-08 16:11:08 -07001027 struct page *head = compound_head(page);
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001028 /*
Christoph Lameterfc301282006-01-18 17:42:29 -08001029 * Avoid migrating a page that is shared with others.
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001030 */
Naoya Horiguchic8633792017-09-08 16:11:08 -07001031 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) {
1032 if (!isolate_lru_page(head)) {
1033 list_add_tail(&head->lru, pagelist);
1034 mod_node_page_state(page_pgdat(head),
Huang Ying9de4f222020-04-06 20:04:41 -07001035 NR_ISOLATED_ANON + page_is_file_lru(head),
Matthew Wilcox (Oracle)6c357842020-08-14 17:30:37 -07001036 thp_nr_pages(head));
Yang Shia53190a2019-08-13 15:37:18 -07001037 } else if (flags & MPOL_MF_STRICT) {
1038 /*
1039 * Non-movable page may reach here. And, there may be
1040 * temporary off LRU pages or non-LRU movable pages.
1041 * Treat them as unmovable pages since they can't be
1042 * isolated, so they can't be moved at the moment. It
1043 * should return -EIO for this case too.
1044 */
1045 return -EIO;
Nick Piggin62695a82008-10-18 20:26:09 -07001046 }
1047 }
Yang Shia53190a2019-08-13 15:37:18 -07001048
1049 return 0;
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001050}
1051
1052/*
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001053 * Migrate pages from one node to a target node.
1054 * Returns error or the number of pages not migrated.
1055 */
Adrian Bunkdbcb0f12007-10-16 01:26:26 -07001056static int migrate_to_node(struct mm_struct *mm, int source, int dest,
1057 int flags)
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001058{
1059 nodemask_t nmask;
1060 LIST_HEAD(pagelist);
1061 int err = 0;
Joonsoo Kima0976312020-08-11 18:37:28 -07001062 struct migration_target_control mtc = {
1063 .nid = dest,
1064 .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
1065 };
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001066
1067 nodes_clear(nmask);
1068 node_set(source, nmask);
1069
Minchan Kim08270802012-10-08 16:33:38 -07001070 /*
1071 * This does not "check" the range but isolates all pages that
1072 * need migration. Between passing in the full user address
1073 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
1074 */
1075 VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
Naoya Horiguchi98094942013-09-11 14:22:14 -07001076 queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001077 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
1078
Minchan Kimcf608ac2010-10-26 14:21:29 -07001079 if (!list_empty(&pagelist)) {
Joonsoo Kima0976312020-08-11 18:37:28 -07001080 err = migrate_pages(&pagelist, alloc_migration_target, NULL,
Yang Shi5ac95882021-09-02 14:59:13 -07001081 (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL);
Minchan Kimcf608ac2010-10-26 14:21:29 -07001082 if (err)
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -07001083 putback_movable_pages(&pagelist);
Minchan Kimcf608ac2010-10-26 14:21:29 -07001084 }
Christoph Lameter95a402c2006-06-23 02:03:53 -07001085
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001086 return err;
1087}
1088
1089/*
1090 * Move pages between the two nodesets so as to preserve the physical
1091 * layout as much as possible.
Christoph Lameter39743882006-01-08 01:00:51 -08001092 *
1093 * Returns the number of page that could not be moved.
1094 */
Andrew Morton0ce72d42012-05-29 15:06:24 -07001095int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1096 const nodemask_t *to, int flags)
Christoph Lameter39743882006-01-08 01:00:51 -08001097{
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001098 int busy = 0;
Jan Stancekf555bef2021-01-12 15:49:21 -08001099 int err = 0;
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001100 nodemask_t tmp;
Christoph Lameter39743882006-01-08 01:00:51 -08001101
Minchan Kim361a2a22021-05-04 18:36:57 -07001102 lru_cache_disable();
Christoph Lameter0aedadf2008-11-06 12:53:30 -08001103
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001104 mmap_read_lock(mm);
Christoph Lameter39743882006-01-08 01:00:51 -08001105
KOSAKI Motohiroda0aa132010-03-05 13:41:59 -08001106 /*
1107 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
1108 * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
1109 * bit in 'tmp', and return that <source, dest> pair for migration.
1110 * The pair of nodemasks 'to' and 'from' define the map.
1111 *
1112 * If no pair of bits is found that way, fallback to picking some
1113 * pair of 'source' and 'dest' bits that are not the same. If the
1114 * 'source' and 'dest' bits are the same, this represents a node
1115 * that will be migrating to itself, so no pages need move.
1116 *
1117 * If no bits are left in 'tmp', or if all remaining bits left
1118 * in 'tmp' correspond to the same bit in 'to', return false
1119 * (nothing left to migrate).
1120 *
1121 * This lets us pick a pair of nodes to migrate between, such that
1122 * if possible the dest node is not already occupied by some other
1123 * source node, minimizing the risk of overloading the memory on a
1124 * node that would happen if we migrated incoming memory to a node
1125 * before migrating outgoing memory source that same node.
1126 *
1127 * A single scan of tmp is sufficient. As we go, we remember the
1128 * most recent <s, d> pair that moved (s != d). If we find a pair
1129 * that not only moved, but what's better, moved to an empty slot
1130 * (d is not set in tmp), then we break out then, with that pair.
Justin P. Mattockae0e47f2011-03-01 15:06:02 +01001131 * Otherwise when we finish scanning from_tmp, we at least have the
KOSAKI Motohiroda0aa132010-03-05 13:41:59 -08001132 * most recent <s, d> pair that moved. If we get all the way through
1133 * the scan of tmp without finding any node that moved, much less
1134 * moved to an empty node, then there is nothing left worth migrating.
1135 */
Christoph Lameterd4984712006-01-08 01:00:55 -08001136
Andrew Morton0ce72d42012-05-29 15:06:24 -07001137 tmp = *from;
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001138 while (!nodes_empty(tmp)) {
Zhiyuan Dai68d68ff2021-05-04 18:40:12 -07001139 int s, d;
Jianguo Wub76ac7e2013-11-12 15:07:39 -08001140 int source = NUMA_NO_NODE;
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001141 int dest = 0;
1142
1143 for_each_node_mask(s, tmp) {
Larry Woodman4a5b18c2012-05-29 15:06:24 -07001144
1145 /*
1146 * do_migrate_pages() tries to maintain the relative
1147 * node relationship of the pages established between
1148 * threads and memory areas.
1149 *
1150 * However if the number of source nodes is not equal to
1151 * the number of destination nodes we can not preserve
1152 * this node relative relationship. In that case, skip
1153 * copying memory from a node that is in the destination
1154 * mask.
1155 *
1156 * Example: [2,3,4] -> [3,4,5] moves everything.
1157 * [0-7] - > [3,4,5] moves only 0,1,2,6,7.
1158 */
1159
Andrew Morton0ce72d42012-05-29 15:06:24 -07001160 if ((nodes_weight(*from) != nodes_weight(*to)) &&
1161 (node_isset(s, *to)))
Larry Woodman4a5b18c2012-05-29 15:06:24 -07001162 continue;
1163
Andrew Morton0ce72d42012-05-29 15:06:24 -07001164 d = node_remap(s, *from, *to);
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001165 if (s == d)
1166 continue;
1167
1168 source = s; /* Node moved. Memorize */
1169 dest = d;
1170
1171 /* dest not in remaining from nodes? */
1172 if (!node_isset(dest, tmp))
1173 break;
1174 }
Jianguo Wub76ac7e2013-11-12 15:07:39 -08001175 if (source == NUMA_NO_NODE)
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001176 break;
1177
1178 node_clear(source, tmp);
1179 err = migrate_to_node(mm, source, dest, flags);
1180 if (err > 0)
1181 busy += err;
1182 if (err < 0)
1183 break;
Christoph Lameter39743882006-01-08 01:00:51 -08001184 }
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001185 mmap_read_unlock(mm);
Minchan Kimd479960e2021-05-04 18:36:54 -07001186
Minchan Kim361a2a22021-05-04 18:36:57 -07001187 lru_cache_enable();
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001188 if (err < 0)
1189 return err;
1190 return busy;
Christoph Lameterb20a3502006-03-22 00:09:12 -08001191
Christoph Lameter39743882006-01-08 01:00:51 -08001192}
1193
Lee Schermerhorn3ad33b242007-11-14 16:59:10 -08001194/*
1195 * Allocate a new page for page migration based on vma policy.
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -07001196 * Start by assuming the page is mapped by the same vma as contains @start.
Lee Schermerhorn3ad33b242007-11-14 16:59:10 -08001197 * Search forward from there, if not. N.B., this assumes that the
1198 * list of pages handed to migrate_pages()--which is how we get here--
1199 * is in virtual address order.
1200 */
Michal Hocko666feb22018-04-10 16:30:03 -07001201static struct page *new_page(struct page *page, unsigned long start)
Christoph Lameter95a402c2006-06-23 02:03:53 -07001202{
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -07001203 struct vm_area_struct *vma;
Kees Cook3f649ab2020-06-03 13:09:38 -07001204 unsigned long address;
Christoph Lameter95a402c2006-06-23 02:03:53 -07001205
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -07001206 vma = find_vma(current->mm, start);
Lee Schermerhorn3ad33b242007-11-14 16:59:10 -08001207 while (vma) {
1208 address = page_address_in_vma(page, vma);
1209 if (address != -EFAULT)
1210 break;
1211 vma = vma->vm_next;
1212 }
1213
Wanpeng Li11c731e2013-12-18 17:08:56 -08001214 if (PageHuge(page)) {
Michal Hocko389c8172018-01-31 16:21:03 -08001215 return alloc_huge_page_vma(page_hstate(compound_head(page)),
1216 vma, address);
Michal Hocko94723aa2018-04-10 16:30:07 -07001217 } else if (PageTransHuge(page)) {
Naoya Horiguchic8633792017-09-08 16:11:08 -07001218 struct page *thp;
1219
David Rientjes19deb762019-09-04 12:54:20 -07001220 thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address,
1221 HPAGE_PMD_ORDER);
Naoya Horiguchic8633792017-09-08 16:11:08 -07001222 if (!thp)
1223 return NULL;
1224 prep_transhuge_page(thp);
1225 return thp;
Wanpeng Li11c731e2013-12-18 17:08:56 -08001226 }
1227 /*
1228 * if !vma, alloc_page_vma() will use task or system default policy
1229 */
Michal Hocko0f556852017-07-12 14:36:58 -07001230 return alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL,
1231 vma, address);
Christoph Lameter95a402c2006-06-23 02:03:53 -07001232}
Christoph Lameterb20a3502006-03-22 00:09:12 -08001233#else
1234
Yang Shia53190a2019-08-13 15:37:18 -07001235static int migrate_page_add(struct page *page, struct list_head *pagelist,
Christoph Lameterb20a3502006-03-22 00:09:12 -08001236 unsigned long flags)
1237{
Yang Shia53190a2019-08-13 15:37:18 -07001238 return -EIO;
Christoph Lameterb20a3502006-03-22 00:09:12 -08001239}
1240
Andrew Morton0ce72d42012-05-29 15:06:24 -07001241int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1242 const nodemask_t *to, int flags)
Christoph Lameterb20a3502006-03-22 00:09:12 -08001243{
1244 return -ENOSYS;
1245}
Christoph Lameter95a402c2006-06-23 02:03:53 -07001246
Michal Hocko666feb22018-04-10 16:30:03 -07001247static struct page *new_page(struct page *page, unsigned long start)
Christoph Lameter95a402c2006-06-23 02:03:53 -07001248{
1249 return NULL;
1250}
Christoph Lameterb20a3502006-03-22 00:09:12 -08001251#endif
1252
Adrian Bunkdbcb0f12007-10-16 01:26:26 -07001253static long do_mbind(unsigned long start, unsigned long len,
David Rientjes028fec42008-04-28 02:12:25 -07001254 unsigned short mode, unsigned short mode_flags,
1255 nodemask_t *nmask, unsigned long flags)
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001256{
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001257 struct mm_struct *mm = current->mm;
1258 struct mempolicy *new;
1259 unsigned long end;
1260 int err;
Yang Shid8835442019-08-13 15:37:15 -07001261 int ret;
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001262 LIST_HEAD(pagelist);
1263
Lee Schermerhornb24f53a2012-10-25 14:16:32 +02001264 if (flags & ~(unsigned long)MPOL_MF_VALID)
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001265 return -EINVAL;
Christoph Lameter74c00242006-03-14 19:50:21 -08001266 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001267 return -EPERM;
1268
1269 if (start & ~PAGE_MASK)
1270 return -EINVAL;
1271
1272 if (mode == MPOL_DEFAULT)
1273 flags &= ~MPOL_MF_STRICT;
1274
1275 len = (len + PAGE_SIZE - 1) & PAGE_MASK;
1276 end = start + len;
1277
1278 if (end < start)
1279 return -EINVAL;
1280 if (end == start)
1281 return 0;
1282
David Rientjes028fec42008-04-28 02:12:25 -07001283 new = mpol_new(mode, mode_flags, nmask);
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001284 if (IS_ERR(new))
1285 return PTR_ERR(new);
1286
Lee Schermerhornb24f53a2012-10-25 14:16:32 +02001287 if (flags & MPOL_MF_LAZY)
1288 new->flags |= MPOL_F_MOF;
1289
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001290 /*
1291 * If we are using the default policy then operation
1292 * on discontinuous address spaces is okay after all
1293 */
1294 if (!new)
1295 flags |= MPOL_MF_DISCONTIG_OK;
1296
David Rientjes028fec42008-04-28 02:12:25 -07001297 pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1298 start, start + len, mode, mode_flags,
David Rientjes00ef2d22013-02-22 16:35:36 -08001299 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001300
Christoph Lameter0aedadf2008-11-06 12:53:30 -08001301 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1302
Minchan Kim361a2a22021-05-04 18:36:57 -07001303 lru_cache_disable();
Christoph Lameter0aedadf2008-11-06 12:53:30 -08001304 }
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07001305 {
1306 NODEMASK_SCRATCH(scratch);
1307 if (scratch) {
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001308 mmap_write_lock(mm);
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07001309 err = mpol_set_nodemask(new, nmask, scratch);
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07001310 if (err)
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001311 mmap_write_unlock(mm);
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07001312 } else
1313 err = -ENOMEM;
1314 NODEMASK_SCRATCH_FREE(scratch);
1315 }
KOSAKI Motohirob05ca732009-10-26 16:49:59 -07001316 if (err)
1317 goto mpol_out;
1318
Yang Shid8835442019-08-13 15:37:15 -07001319 ret = queue_pages_range(mm, start, end, nmask,
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001320 flags | MPOL_MF_INVERT, &pagelist);
Yang Shid8835442019-08-13 15:37:15 -07001321
1322 if (ret < 0) {
Yang Shia85dfc32019-11-15 17:34:33 -08001323 err = ret;
Yang Shid8835442019-08-13 15:37:15 -07001324 goto up_out;
1325 }
1326
1327 err = mbind_range(mm, start, end, new);
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001328
Lee Schermerhornb24f53a2012-10-25 14:16:32 +02001329 if (!err) {
1330 int nr_failed = 0;
1331
Minchan Kimcf608ac2010-10-26 14:21:29 -07001332 if (!list_empty(&pagelist)) {
Lee Schermerhornb24f53a2012-10-25 14:16:32 +02001333 WARN_ON_ONCE(flags & MPOL_MF_LAZY);
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -07001334 nr_failed = migrate_pages(&pagelist, new_page, NULL,
Yang Shi5ac95882021-09-02 14:59:13 -07001335 start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND, NULL);
Minchan Kimcf608ac2010-10-26 14:21:29 -07001336 if (nr_failed)
Naoya Horiguchi74060e42013-09-11 14:22:06 -07001337 putback_movable_pages(&pagelist);
Minchan Kimcf608ac2010-10-26 14:21:29 -07001338 }
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001339
Yang Shid8835442019-08-13 15:37:15 -07001340 if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT)))
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001341 err = -EIO;
Yang Shia85dfc32019-11-15 17:34:33 -08001342 } else {
Yang Shid8835442019-08-13 15:37:15 -07001343up_out:
Yang Shia85dfc32019-11-15 17:34:33 -08001344 if (!list_empty(&pagelist))
1345 putback_movable_pages(&pagelist);
1346 }
1347
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001348 mmap_write_unlock(mm);
Yang Shid8835442019-08-13 15:37:15 -07001349mpol_out:
Lee Schermerhornf0be3d32008-04-28 02:13:08 -07001350 mpol_put(new);
Minchan Kimd479960e2021-05-04 18:36:54 -07001351 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
Minchan Kim361a2a22021-05-04 18:36:57 -07001352 lru_cache_enable();
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001353 return err;
1354}
1355
Christoph Lameter39743882006-01-08 01:00:51 -08001356/*
Christoph Lameter8bccd852005-10-29 18:16:59 -07001357 * User space interface with variable sized bitmaps for nodelists.
1358 */
Arnd Bergmanne1302422021-09-08 15:18:21 -07001359static int get_bitmap(unsigned long *mask, const unsigned long __user *nmask,
1360 unsigned long maxnode)
1361{
1362 unsigned long nlongs = BITS_TO_LONGS(maxnode);
1363 int ret;
1364
1365 if (in_compat_syscall())
1366 ret = compat_get_bitmap(mask,
1367 (const compat_ulong_t __user *)nmask,
1368 maxnode);
1369 else
1370 ret = copy_from_user(mask, nmask,
1371 nlongs * sizeof(unsigned long));
1372
1373 if (ret)
1374 return -EFAULT;
1375
1376 if (maxnode % BITS_PER_LONG)
1377 mask[nlongs - 1] &= (1UL << (maxnode % BITS_PER_LONG)) - 1;
1378
1379 return 0;
1380}
Christoph Lameter8bccd852005-10-29 18:16:59 -07001381
1382/* Copy a node mask from user space. */
Christoph Lameter39743882006-01-08 01:00:51 -08001383static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
Christoph Lameter8bccd852005-10-29 18:16:59 -07001384 unsigned long maxnode)
1385{
Christoph Lameter8bccd852005-10-29 18:16:59 -07001386 --maxnode;
1387 nodes_clear(*nodes);
1388 if (maxnode == 0 || !nmask)
1389 return 0;
Andi Kleena9c930b2006-02-20 18:27:59 -08001390 if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
Chris Wright636f13c2006-02-17 13:59:36 -08001391 return -EINVAL;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001392
Yisheng Xie56521e72018-01-31 16:16:11 -08001393 /*
1394 * When the user specified more nodes than supported just check
Arnd Bergmanne1302422021-09-08 15:18:21 -07001395 * if the non supported part is all zero, one word at a time,
1396 * starting at the end.
Yisheng Xie56521e72018-01-31 16:16:11 -08001397 */
Arnd Bergmanne1302422021-09-08 15:18:21 -07001398 while (maxnode > MAX_NUMNODES) {
1399 unsigned long bits = min_t(unsigned long, maxnode, BITS_PER_LONG);
1400 unsigned long t;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001401
Arnd Bergmanne1302422021-09-08 15:18:21 -07001402 if (get_bitmap(&t, &nmask[maxnode / BITS_PER_LONG], bits))
Yisheng Xie56521e72018-01-31 16:16:11 -08001403 return -EFAULT;
Arnd Bergmanne1302422021-09-08 15:18:21 -07001404
1405 if (maxnode - bits >= MAX_NUMNODES) {
1406 maxnode -= bits;
1407 } else {
1408 maxnode = MAX_NUMNODES;
1409 t &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1);
1410 }
1411 if (t)
Yisheng Xie56521e72018-01-31 16:16:11 -08001412 return -EINVAL;
1413 }
1414
Arnd Bergmanne1302422021-09-08 15:18:21 -07001415 return get_bitmap(nodes_addr(*nodes), nmask, maxnode);
Christoph Lameter8bccd852005-10-29 18:16:59 -07001416}
1417
1418/* Copy a kernel node mask to user space */
1419static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1420 nodemask_t *nodes)
1421{
1422 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
Ralph Campbell050c17f2019-02-20 22:18:58 -08001423 unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long);
Arnd Bergmanne1302422021-09-08 15:18:21 -07001424 bool compat = in_compat_syscall();
1425
1426 if (compat)
1427 nbytes = BITS_TO_COMPAT_LONGS(nr_node_ids) * sizeof(compat_long_t);
Christoph Lameter8bccd852005-10-29 18:16:59 -07001428
1429 if (copy > nbytes) {
1430 if (copy > PAGE_SIZE)
1431 return -EINVAL;
1432 if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1433 return -EFAULT;
1434 copy = nbytes;
Arnd Bergmanne1302422021-09-08 15:18:21 -07001435 maxnode = nr_node_ids;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001436 }
Arnd Bergmanne1302422021-09-08 15:18:21 -07001437
1438 if (compat)
1439 return compat_put_bitmap((compat_ulong_t __user *)mask,
1440 nodes_addr(*nodes), maxnode);
1441
Christoph Lameter8bccd852005-10-29 18:16:59 -07001442 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1443}
1444
Feng Tang95837922021-06-30 18:51:03 -07001445/* Basic parameter sanity check used by both mbind() and set_mempolicy() */
1446static inline int sanitize_mpol_flags(int *mode, unsigned short *flags)
1447{
1448 *flags = *mode & MPOL_MODE_FLAGS;
1449 *mode &= ~MPOL_MODE_FLAGS;
Dave Hansenb27abac2021-09-02 15:00:06 -07001450
Ben Widawskya38a59f2021-09-02 15:00:16 -07001451 if ((unsigned int)(*mode) >= MPOL_MAX)
Feng Tang95837922021-06-30 18:51:03 -07001452 return -EINVAL;
1453 if ((*flags & MPOL_F_STATIC_NODES) && (*flags & MPOL_F_RELATIVE_NODES))
1454 return -EINVAL;
Eric Dumazet6d2aec92021-10-18 15:15:49 -07001455 if (*flags & MPOL_F_NUMA_BALANCING) {
1456 if (*mode != MPOL_BIND)
1457 return -EINVAL;
1458 *flags |= (MPOL_F_MOF | MPOL_F_MORON);
1459 }
Feng Tang95837922021-06-30 18:51:03 -07001460 return 0;
1461}
1462
Dominik Brodowskie7dc9ad62018-03-17 16:12:22 +01001463static long kernel_mbind(unsigned long start, unsigned long len,
1464 unsigned long mode, const unsigned long __user *nmask,
1465 unsigned long maxnode, unsigned int flags)
Christoph Lameter8bccd852005-10-29 18:16:59 -07001466{
David Rientjes028fec42008-04-28 02:12:25 -07001467 unsigned short mode_flags;
Feng Tang95837922021-06-30 18:51:03 -07001468 nodemask_t nodes;
1469 int lmode = mode;
1470 int err;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001471
Andrey Konovalov057d33892019-09-25 16:48:30 -07001472 start = untagged_addr(start);
Feng Tang95837922021-06-30 18:51:03 -07001473 err = sanitize_mpol_flags(&lmode, &mode_flags);
1474 if (err)
1475 return err;
1476
Christoph Lameter8bccd852005-10-29 18:16:59 -07001477 err = get_nodes(&nodes, nmask, maxnode);
1478 if (err)
1479 return err;
Feng Tang95837922021-06-30 18:51:03 -07001480
1481 return do_mbind(start, len, lmode, mode_flags, &nodes, flags);
Christoph Lameter8bccd852005-10-29 18:16:59 -07001482}
1483
Aneesh Kumar K.Vc6018b42022-01-14 14:08:17 -08001484SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, len,
1485 unsigned long, home_node, unsigned long, flags)
1486{
1487 struct mm_struct *mm = current->mm;
1488 struct vm_area_struct *vma;
1489 struct mempolicy *new;
1490 unsigned long vmstart;
1491 unsigned long vmend;
1492 unsigned long end;
1493 int err = -ENOENT;
1494
1495 start = untagged_addr(start);
1496 if (start & ~PAGE_MASK)
1497 return -EINVAL;
1498 /*
1499 * flags is used for future extension if any.
1500 */
1501 if (flags != 0)
1502 return -EINVAL;
1503
1504 /*
1505 * Check home_node is online to avoid accessing uninitialized
1506 * NODE_DATA.
1507 */
1508 if (home_node >= MAX_NUMNODES || !node_online(home_node))
1509 return -EINVAL;
1510
1511 len = (len + PAGE_SIZE - 1) & PAGE_MASK;
1512 end = start + len;
1513
1514 if (end < start)
1515 return -EINVAL;
1516 if (end == start)
1517 return 0;
1518 mmap_write_lock(mm);
1519 vma = find_vma(mm, start);
1520 for (; vma && vma->vm_start < end; vma = vma->vm_next) {
1521
1522 vmstart = max(start, vma->vm_start);
1523 vmend = min(end, vma->vm_end);
1524 new = mpol_dup(vma_policy(vma));
1525 if (IS_ERR(new)) {
1526 err = PTR_ERR(new);
1527 break;
1528 }
1529 /*
1530 * Only update home node if there is an existing vma policy
1531 */
1532 if (!new)
1533 continue;
1534
1535 /*
1536 * If any vma in the range got policy other than MPOL_BIND
1537 * or MPOL_PREFERRED_MANY we return error. We don't reset
1538 * the home node for vmas we already updated before.
1539 */
1540 if (new->mode != MPOL_BIND && new->mode != MPOL_PREFERRED_MANY) {
1541 err = -EOPNOTSUPP;
1542 break;
1543 }
1544
1545 new->home_node = home_node;
1546 err = mbind_range(mm, vmstart, vmend, new);
1547 mpol_put(new);
1548 if (err)
1549 break;
1550 }
1551 mmap_write_unlock(mm);
1552 return err;
1553}
1554
Dominik Brodowskie7dc9ad62018-03-17 16:12:22 +01001555SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1556 unsigned long, mode, const unsigned long __user *, nmask,
1557 unsigned long, maxnode, unsigned int, flags)
1558{
1559 return kernel_mbind(start, len, mode, nmask, maxnode, flags);
1560}
1561
Christoph Lameter8bccd852005-10-29 18:16:59 -07001562/* Set the process memory policy */
Dominik Brodowskiaf03c4a2018-03-17 16:20:01 +01001563static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask,
1564 unsigned long maxnode)
Christoph Lameter8bccd852005-10-29 18:16:59 -07001565{
Feng Tang95837922021-06-30 18:51:03 -07001566 unsigned short mode_flags;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001567 nodemask_t nodes;
Feng Tang95837922021-06-30 18:51:03 -07001568 int lmode = mode;
1569 int err;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001570
Feng Tang95837922021-06-30 18:51:03 -07001571 err = sanitize_mpol_flags(&lmode, &mode_flags);
1572 if (err)
1573 return err;
1574
Christoph Lameter8bccd852005-10-29 18:16:59 -07001575 err = get_nodes(&nodes, nmask, maxnode);
1576 if (err)
1577 return err;
Feng Tang95837922021-06-30 18:51:03 -07001578
1579 return do_set_mempolicy(lmode, mode_flags, &nodes);
Christoph Lameter8bccd852005-10-29 18:16:59 -07001580}
1581
Dominik Brodowskiaf03c4a2018-03-17 16:20:01 +01001582SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
1583 unsigned long, maxnode)
1584{
1585 return kernel_set_mempolicy(mode, nmask, maxnode);
1586}
1587
Dominik Brodowskib6e9b0b2018-03-17 16:00:25 +01001588static int kernel_migrate_pages(pid_t pid, unsigned long maxnode,
1589 const unsigned long __user *old_nodes,
1590 const unsigned long __user *new_nodes)
Christoph Lameter39743882006-01-08 01:00:51 -08001591{
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001592 struct mm_struct *mm = NULL;
Christoph Lameter39743882006-01-08 01:00:51 -08001593 struct task_struct *task;
Christoph Lameter39743882006-01-08 01:00:51 -08001594 nodemask_t task_nodes;
1595 int err;
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001596 nodemask_t *old;
1597 nodemask_t *new;
1598 NODEMASK_SCRATCH(scratch);
Christoph Lameter39743882006-01-08 01:00:51 -08001599
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001600 if (!scratch)
1601 return -ENOMEM;
Christoph Lameter39743882006-01-08 01:00:51 -08001602
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001603 old = &scratch->mask1;
1604 new = &scratch->mask2;
1605
1606 err = get_nodes(old, old_nodes, maxnode);
Christoph Lameter39743882006-01-08 01:00:51 -08001607 if (err)
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001608 goto out;
1609
1610 err = get_nodes(new, new_nodes, maxnode);
1611 if (err)
1612 goto out;
Christoph Lameter39743882006-01-08 01:00:51 -08001613
1614 /* Find the mm_struct */
Zeng Zhaoming55cfaa32010-12-02 14:31:13 -08001615 rcu_read_lock();
Pavel Emelyanov228ebcb2007-10-18 23:40:16 -07001616 task = pid ? find_task_by_vpid(pid) : current;
Christoph Lameter39743882006-01-08 01:00:51 -08001617 if (!task) {
Zeng Zhaoming55cfaa32010-12-02 14:31:13 -08001618 rcu_read_unlock();
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001619 err = -ESRCH;
1620 goto out;
Christoph Lameter39743882006-01-08 01:00:51 -08001621 }
Christoph Lameter3268c632012-03-21 16:34:06 -07001622 get_task_struct(task);
Christoph Lameter39743882006-01-08 01:00:51 -08001623
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001624 err = -EINVAL;
Christoph Lameter39743882006-01-08 01:00:51 -08001625
1626 /*
Otto Ebeling31367462017-11-15 17:38:14 -08001627 * Check if this process has the right to modify the specified process.
1628 * Use the regular "ptrace_may_access()" checks.
Christoph Lameter39743882006-01-08 01:00:51 -08001629 */
Otto Ebeling31367462017-11-15 17:38:14 -08001630 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
David Howellsc69e8d92008-11-14 10:39:19 +11001631 rcu_read_unlock();
Christoph Lameter39743882006-01-08 01:00:51 -08001632 err = -EPERM;
Christoph Lameter3268c632012-03-21 16:34:06 -07001633 goto out_put;
Christoph Lameter39743882006-01-08 01:00:51 -08001634 }
David Howellsc69e8d92008-11-14 10:39:19 +11001635 rcu_read_unlock();
Christoph Lameter39743882006-01-08 01:00:51 -08001636
1637 task_nodes = cpuset_mems_allowed(task);
1638 /* Is the user allowed to access the target nodes? */
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001639 if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
Christoph Lameter39743882006-01-08 01:00:51 -08001640 err = -EPERM;
Christoph Lameter3268c632012-03-21 16:34:06 -07001641 goto out_put;
Christoph Lameter39743882006-01-08 01:00:51 -08001642 }
1643
Yisheng Xie0486a382018-01-31 16:16:15 -08001644 task_nodes = cpuset_mems_allowed(current);
1645 nodes_and(*new, *new, task_nodes);
1646 if (nodes_empty(*new))
Christoph Lameter3268c632012-03-21 16:34:06 -07001647 goto out_put;
Yisheng Xie0486a382018-01-31 16:16:15 -08001648
David Quigley86c3a762006-06-23 02:04:02 -07001649 err = security_task_movememory(task);
1650 if (err)
Christoph Lameter3268c632012-03-21 16:34:06 -07001651 goto out_put;
David Quigley86c3a762006-06-23 02:04:02 -07001652
Christoph Lameter3268c632012-03-21 16:34:06 -07001653 mm = get_task_mm(task);
1654 put_task_struct(task);
Sasha Levinf2a9ef82012-04-25 16:01:52 -07001655
1656 if (!mm) {
Christoph Lameter3268c632012-03-21 16:34:06 -07001657 err = -EINVAL;
Sasha Levinf2a9ef82012-04-25 16:01:52 -07001658 goto out;
1659 }
1660
1661 err = do_migrate_pages(mm, old, new,
1662 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
Christoph Lameter3268c632012-03-21 16:34:06 -07001663
1664 mmput(mm);
1665out:
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001666 NODEMASK_SCRATCH_FREE(scratch);
1667
Christoph Lameter39743882006-01-08 01:00:51 -08001668 return err;
Christoph Lameter3268c632012-03-21 16:34:06 -07001669
1670out_put:
1671 put_task_struct(task);
1672 goto out;
1673
Christoph Lameter39743882006-01-08 01:00:51 -08001674}
1675
Dominik Brodowskib6e9b0b2018-03-17 16:00:25 +01001676SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1677 const unsigned long __user *, old_nodes,
1678 const unsigned long __user *, new_nodes)
1679{
1680 return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes);
1681}
1682
Christoph Lameter39743882006-01-08 01:00:51 -08001683
Christoph Lameter8bccd852005-10-29 18:16:59 -07001684/* Retrieve NUMA policy */
Dominik Brodowskiaf03c4a2018-03-17 16:20:01 +01001685static int kernel_get_mempolicy(int __user *policy,
1686 unsigned long __user *nmask,
1687 unsigned long maxnode,
1688 unsigned long addr,
1689 unsigned long flags)
Christoph Lameter8bccd852005-10-29 18:16:59 -07001690{
Adrian Bunkdbcb0f12007-10-16 01:26:26 -07001691 int err;
Kees Cook3f649ab2020-06-03 13:09:38 -07001692 int pval;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001693 nodemask_t nodes;
1694
Ralph Campbell050c17f2019-02-20 22:18:58 -08001695 if (nmask != NULL && maxnode < nr_node_ids)
Christoph Lameter8bccd852005-10-29 18:16:59 -07001696 return -EINVAL;
1697
Wenchao Hao4605f052020-08-11 18:31:16 -07001698 addr = untagged_addr(addr);
1699
Christoph Lameter8bccd852005-10-29 18:16:59 -07001700 err = do_get_mempolicy(&pval, &nodes, addr, flags);
1701
1702 if (err)
1703 return err;
1704
1705 if (policy && put_user(pval, policy))
1706 return -EFAULT;
1707
1708 if (nmask)
1709 err = copy_nodes_to_user(nmask, maxnode, &nodes);
1710
1711 return err;
1712}
1713
Dominik Brodowskiaf03c4a2018-03-17 16:20:01 +01001714SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1715 unsigned long __user *, nmask, unsigned long, maxnode,
1716 unsigned long, addr, unsigned long, flags)
1717{
1718 return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags);
1719}
1720
Li Xinhai20ca87f2020-04-01 21:10:52 -07001721bool vma_migratable(struct vm_area_struct *vma)
1722{
1723 if (vma->vm_flags & (VM_IO | VM_PFNMAP))
1724 return false;
1725
1726 /*
1727 * DAX device mappings require predictable access latency, so avoid
1728 * incurring periodic faults.
1729 */
1730 if (vma_is_dax(vma))
1731 return false;
1732
1733 if (is_vm_hugetlb_page(vma) &&
1734 !hugepage_migration_supported(hstate_vma(vma)))
1735 return false;
1736
1737 /*
1738 * Migration allocates pages in the highest zone. If we cannot
1739 * do so then migration (at least from node to node) is not
1740 * possible.
1741 */
1742 if (vma->vm_file &&
1743 gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
1744 < policy_zone)
1745 return false;
1746 return true;
1747}
1748
Oleg Nesterov74d2c3a2014-10-09 15:27:50 -07001749struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
1750 unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001751{
Oleg Nesterov8d902742014-10-09 15:27:45 -07001752 struct mempolicy *pol = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001753
1754 if (vma) {
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001755 if (vma->vm_ops && vma->vm_ops->get_policy) {
Oleg Nesterov8d902742014-10-09 15:27:45 -07001756 pol = vma->vm_ops->get_policy(vma, addr);
Mel Gorman00442ad2012-10-08 16:29:20 -07001757 } else if (vma->vm_policy) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001758 pol = vma->vm_policy;
Mel Gorman00442ad2012-10-08 16:29:20 -07001759
1760 /*
1761 * shmem_alloc_page() passes MPOL_F_SHARED policy with
1762 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
1763 * count on these policies which will be dropped by
1764 * mpol_cond_put() later
1765 */
1766 if (mpol_needs_cond_ref(pol))
1767 mpol_get(pol);
1768 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001769 }
Oleg Nesterovf15ca782014-10-09 15:27:43 -07001770
Oleg Nesterov74d2c3a2014-10-09 15:27:50 -07001771 return pol;
1772}
1773
1774/*
Oleg Nesterovdd6eecb2014-10-09 15:27:57 -07001775 * get_vma_policy(@vma, @addr)
Oleg Nesterov74d2c3a2014-10-09 15:27:50 -07001776 * @vma: virtual memory area whose policy is sought
1777 * @addr: address in @vma for shared policy lookup
1778 *
1779 * Returns effective policy for a VMA at specified address.
Oleg Nesterovdd6eecb2014-10-09 15:27:57 -07001780 * Falls back to current->mempolicy or system default policy, as necessary.
Oleg Nesterov74d2c3a2014-10-09 15:27:50 -07001781 * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1782 * count--added by the get_policy() vm_op, as appropriate--to protect against
1783 * freeing by another task. It is the caller's responsibility to free the
1784 * extra reference for shared policies.
1785 */
David Rientjesac79f782019-09-04 12:54:18 -07001786static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
Oleg Nesterovdd6eecb2014-10-09 15:27:57 -07001787 unsigned long addr)
Oleg Nesterov74d2c3a2014-10-09 15:27:50 -07001788{
1789 struct mempolicy *pol = __get_vma_policy(vma, addr);
1790
Oleg Nesterov8d902742014-10-09 15:27:45 -07001791 if (!pol)
Oleg Nesterovdd6eecb2014-10-09 15:27:57 -07001792 pol = get_task_policy(current);
Oleg Nesterov8d902742014-10-09 15:27:45 -07001793
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794 return pol;
1795}
1796
Oleg Nesterov6b6482b2014-10-09 15:27:48 -07001797bool vma_policy_mof(struct vm_area_struct *vma)
Mel Gormanfc3147242013-10-07 11:29:09 +01001798{
Oleg Nesterov6b6482b2014-10-09 15:27:48 -07001799 struct mempolicy *pol;
Oleg Nesterovf15ca782014-10-09 15:27:43 -07001800
Oleg Nesterov6b6482b2014-10-09 15:27:48 -07001801 if (vma->vm_ops && vma->vm_ops->get_policy) {
1802 bool ret = false;
Mel Gormanfc3147242013-10-07 11:29:09 +01001803
Oleg Nesterov6b6482b2014-10-09 15:27:48 -07001804 pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1805 if (pol && (pol->flags & MPOL_F_MOF))
1806 ret = true;
1807 mpol_cond_put(pol);
Mel Gormanfc3147242013-10-07 11:29:09 +01001808
Oleg Nesterov6b6482b2014-10-09 15:27:48 -07001809 return ret;
Mel Gormanfc3147242013-10-07 11:29:09 +01001810 }
1811
Oleg Nesterov6b6482b2014-10-09 15:27:48 -07001812 pol = vma->vm_policy;
Oleg Nesterov8d902742014-10-09 15:27:45 -07001813 if (!pol)
Oleg Nesterov6b6482b2014-10-09 15:27:48 -07001814 pol = get_task_policy(current);
Oleg Nesterov8d902742014-10-09 15:27:45 -07001815
Mel Gormanfc3147242013-10-07 11:29:09 +01001816 return pol->flags & MPOL_F_MOF;
1817}
1818
Lai Jiangshand3eb1572013-02-22 16:33:22 -08001819static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1820{
1821 enum zone_type dynamic_policy_zone = policy_zone;
1822
1823 BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1824
1825 /*
Ben Widawsky269fbe72021-06-30 18:51:10 -07001826 * if policy->nodes has movable memory only,
Lai Jiangshand3eb1572013-02-22 16:33:22 -08001827 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1828 *
Ben Widawsky269fbe72021-06-30 18:51:10 -07001829 * policy->nodes is intersect with node_states[N_MEMORY].
Ingo Molnarf0953a12021-05-06 18:06:47 -07001830 * so if the following test fails, it implies
Ben Widawsky269fbe72021-06-30 18:51:10 -07001831 * policy->nodes has movable memory only.
Lai Jiangshand3eb1572013-02-22 16:33:22 -08001832 */
Ben Widawsky269fbe72021-06-30 18:51:10 -07001833 if (!nodes_intersects(policy->nodes, node_states[N_HIGH_MEMORY]))
Lai Jiangshand3eb1572013-02-22 16:33:22 -08001834 dynamic_policy_zone = ZONE_MOVABLE;
1835
1836 return zone >= dynamic_policy_zone;
1837}
1838
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001839/*
1840 * Return a nodemask representing a mempolicy for filtering nodes for
1841 * page allocation
1842 */
Muchun Song8ca39e62020-08-11 18:30:32 -07001843nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
Mel Gorman19770b32008-04-28 02:12:18 -07001844{
Dave Hansenb27abac2021-09-02 15:00:06 -07001845 int mode = policy->mode;
1846
Mel Gorman19770b32008-04-28 02:12:18 -07001847 /* Lower zones don't get a nodemask applied for MPOL_BIND */
Dave Hansenb27abac2021-09-02 15:00:06 -07001848 if (unlikely(mode == MPOL_BIND) &&
1849 apply_policy_zone(policy, gfp_zone(gfp)) &&
1850 cpuset_nodemask_valid_mems_allowed(&policy->nodes))
1851 return &policy->nodes;
1852
1853 if (mode == MPOL_PREFERRED_MANY)
Ben Widawsky269fbe72021-06-30 18:51:10 -07001854 return &policy->nodes;
Mel Gorman19770b32008-04-28 02:12:18 -07001855
1856 return NULL;
1857}
1858
Dave Hansenb27abac2021-09-02 15:00:06 -07001859/*
1860 * Return the preferred node id for 'prefer' mempolicy, and return
1861 * the given id for all other policies.
1862 *
1863 * policy_node() is always coupled with policy_nodemask(), which
1864 * secures the nodemask limit for 'bind' and 'prefer-many' policy.
1865 */
Wei Yangf8fd5252020-10-13 16:57:11 -07001866static int policy_node(gfp_t gfp, struct mempolicy *policy, int nd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001867{
Feng Tang7858d7b2021-06-30 18:51:00 -07001868 if (policy->mode == MPOL_PREFERRED) {
Ben Widawsky269fbe72021-06-30 18:51:10 -07001869 nd = first_node(policy->nodes);
Feng Tang7858d7b2021-06-30 18:51:00 -07001870 } else {
Mel Gorman19770b32008-04-28 02:12:18 -07001871 /*
Michal Hocko6d840952016-12-12 16:42:23 -08001872 * __GFP_THISNODE shouldn't even be used with the bind policy
1873 * because we might easily break the expectation to stay on the
1874 * requested node and not break the policy.
Mel Gorman19770b32008-04-28 02:12:18 -07001875 */
Michal Hocko6d840952016-12-12 16:42:23 -08001876 WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001877 }
Michal Hocko6d840952016-12-12 16:42:23 -08001878
Aneesh Kumar K.Vc6018b42022-01-14 14:08:17 -08001879 if ((policy->mode == MPOL_BIND ||
1880 policy->mode == MPOL_PREFERRED_MANY) &&
1881 policy->home_node != NUMA_NO_NODE)
1882 return policy->home_node;
1883
Vlastimil Babka04ec6262017-07-06 15:40:03 -07001884 return nd;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001885}
1886
1887/* Do dynamic interleaving for a process */
1888static unsigned interleave_nodes(struct mempolicy *policy)
1889{
Vlastimil Babka45816682017-07-06 15:39:59 -07001890 unsigned next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001891 struct task_struct *me = current;
1892
Ben Widawsky269fbe72021-06-30 18:51:10 -07001893 next = next_node_in(me->il_prev, policy->nodes);
David Rientjesf5b087b2008-04-28 02:12:27 -07001894 if (next < MAX_NUMNODES)
Vlastimil Babka45816682017-07-06 15:39:59 -07001895 me->il_prev = next;
1896 return next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001897}
1898
Christoph Lameterdc85da12006-01-18 17:42:36 -08001899/*
1900 * Depending on the memory policy provide a node from which to allocate the
1901 * next slab entry.
1902 */
David Rientjes2a389612014-04-07 15:37:29 -07001903unsigned int mempolicy_slab_node(void)
Christoph Lameterdc85da12006-01-18 17:42:36 -08001904{
Andi Kleene7b691b2012-06-09 02:40:03 -07001905 struct mempolicy *policy;
David Rientjes2a389612014-04-07 15:37:29 -07001906 int node = numa_mem_id();
Andi Kleene7b691b2012-06-09 02:40:03 -07001907
Vasily Averin38b031d2021-09-02 15:00:23 -07001908 if (!in_task())
David Rientjes2a389612014-04-07 15:37:29 -07001909 return node;
Andi Kleene7b691b2012-06-09 02:40:03 -07001910
1911 policy = current->mempolicy;
Feng Tang7858d7b2021-06-30 18:51:00 -07001912 if (!policy)
David Rientjes2a389612014-04-07 15:37:29 -07001913 return node;
Christoph Lameter765c4502006-09-27 01:50:08 -07001914
Lee Schermerhornbea904d2008-04-28 02:13:18 -07001915 switch (policy->mode) {
1916 case MPOL_PREFERRED:
Ben Widawsky269fbe72021-06-30 18:51:10 -07001917 return first_node(policy->nodes);
Lee Schermerhornbea904d2008-04-28 02:13:18 -07001918
Christoph Lameterdc85da12006-01-18 17:42:36 -08001919 case MPOL_INTERLEAVE:
1920 return interleave_nodes(policy);
1921
Dave Hansenb27abac2021-09-02 15:00:06 -07001922 case MPOL_BIND:
1923 case MPOL_PREFERRED_MANY:
1924 {
Mel Gormanc33d6c02016-05-19 17:14:10 -07001925 struct zoneref *z;
1926
Christoph Lameterdc85da12006-01-18 17:42:36 -08001927 /*
1928 * Follow bind policy behavior and start allocation at the
1929 * first node.
1930 */
Mel Gorman19770b32008-04-28 02:12:18 -07001931 struct zonelist *zonelist;
Mel Gorman19770b32008-04-28 02:12:18 -07001932 enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
Aneesh Kumar K.Vc9634cf2016-10-07 16:59:12 -07001933 zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
Mel Gormanc33d6c02016-05-19 17:14:10 -07001934 z = first_zones_zonelist(zonelist, highest_zoneidx,
Ben Widawsky269fbe72021-06-30 18:51:10 -07001935 &policy->nodes);
Pavel Tatashinc1093b72018-08-21 21:53:32 -07001936 return z->zone ? zone_to_nid(z->zone) : node;
Mel Gormandd1a2392008-04-28 02:12:17 -07001937 }
Feng Tang7858d7b2021-06-30 18:51:00 -07001938 case MPOL_LOCAL:
1939 return node;
Christoph Lameterdc85da12006-01-18 17:42:36 -08001940
Christoph Lameterdc85da12006-01-18 17:42:36 -08001941 default:
Lee Schermerhornbea904d2008-04-28 02:13:18 -07001942 BUG();
Christoph Lameterdc85da12006-01-18 17:42:36 -08001943 }
1944}
1945
Andrew Mortonfee83b32016-05-19 17:11:43 -07001946/*
1947 * Do static interleaving for a VMA with known offset @n. Returns the n'th
Ben Widawsky269fbe72021-06-30 18:51:10 -07001948 * node in pol->nodes (starting from n=0), wrapping around if n exceeds the
Andrew Mortonfee83b32016-05-19 17:11:43 -07001949 * number of present nodes.
1950 */
Laurent Dufour98c70ba2017-09-08 16:12:39 -07001951static unsigned offset_il_node(struct mempolicy *pol, unsigned long n)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001952{
yanghui276aeee2021-09-08 18:10:20 -07001953 nodemask_t nodemask = pol->nodes;
1954 unsigned int target, nnodes;
Andrew Mortonfee83b32016-05-19 17:11:43 -07001955 int i;
1956 int nid;
yanghui276aeee2021-09-08 18:10:20 -07001957 /*
1958 * The barrier will stabilize the nodemask in a register or on
1959 * the stack so that it will stop changing under the code.
1960 *
1961 * Between first_node() and next_node(), pol->nodes could be changed
1962 * by other threads. So we put pol->nodes in a local stack.
1963 */
1964 barrier();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001965
yanghui276aeee2021-09-08 18:10:20 -07001966 nnodes = nodes_weight(nodemask);
David Rientjesf5b087b2008-04-28 02:12:27 -07001967 if (!nnodes)
1968 return numa_node_id();
Andrew Mortonfee83b32016-05-19 17:11:43 -07001969 target = (unsigned int)n % nnodes;
yanghui276aeee2021-09-08 18:10:20 -07001970 nid = first_node(nodemask);
Andrew Mortonfee83b32016-05-19 17:11:43 -07001971 for (i = 0; i < target; i++)
yanghui276aeee2021-09-08 18:10:20 -07001972 nid = next_node(nid, nodemask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001973 return nid;
1974}
1975
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001976/* Determine a node number for interleave */
1977static inline unsigned interleave_nid(struct mempolicy *pol,
1978 struct vm_area_struct *vma, unsigned long addr, int shift)
1979{
1980 if (vma) {
1981 unsigned long off;
1982
Nishanth Aravamudan3b98b082006-08-31 21:27:53 -07001983 /*
1984 * for small pages, there is no difference between
1985 * shift and PAGE_SHIFT, so the bit-shift is safe.
1986 * for huge pages, since vm_pgoff is in units of small
1987 * pages, we need to shift off the always 0 bits to get
1988 * a useful offset.
1989 */
1990 BUG_ON(shift < PAGE_SHIFT);
1991 off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001992 off += (addr - vma->vm_start) >> shift;
Laurent Dufour98c70ba2017-09-08 16:12:39 -07001993 return offset_il_node(pol, off);
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001994 } else
1995 return interleave_nodes(pol);
1996}
1997
Chen, Kenneth W00ac59a2006-02-03 21:51:14 +01001998#ifdef CONFIG_HUGETLBFS
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001999/*
Vlastimil Babka04ec6262017-07-06 15:40:03 -07002000 * huge_node(@vma, @addr, @gfp_flags, @mpol)
Fabian Frederickb46e14a2014-06-04 16:08:18 -07002001 * @vma: virtual memory area whose policy is sought
2002 * @addr: address in @vma for shared policy lookup and interleave policy
2003 * @gfp_flags: for requested zone
2004 * @mpol: pointer to mempolicy pointer for reference counted mempolicy
Dave Hansenb27abac2021-09-02 15:00:06 -07002005 * @nodemask: pointer to nodemask pointer for 'bind' and 'prefer-many' policy
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07002006 *
Vlastimil Babka04ec6262017-07-06 15:40:03 -07002007 * Returns a nid suitable for a huge page allocation and a pointer
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07002008 * to the struct mempolicy for conditional unref after allocation.
Dave Hansenb27abac2021-09-02 15:00:06 -07002009 * If the effective policy is 'bind' or 'prefer-many', returns a pointer
2010 * to the mempolicy's @nodemask for filtering the zonelist.
Miao Xiec0ff7452010-05-24 14:32:08 -07002011 *
Mel Gormand26914d2014-04-03 14:47:24 -07002012 * Must be protected by read_mems_allowed_begin()
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07002013 */
Vlastimil Babka04ec6262017-07-06 15:40:03 -07002014int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags,
2015 struct mempolicy **mpol, nodemask_t **nodemask)
Christoph Lameter5da7ca82006-01-06 00:10:46 -08002016{
Vlastimil Babka04ec6262017-07-06 15:40:03 -07002017 int nid;
Dave Hansenb27abac2021-09-02 15:00:06 -07002018 int mode;
Christoph Lameter5da7ca82006-01-06 00:10:46 -08002019
Oleg Nesterovdd6eecb2014-10-09 15:27:57 -07002020 *mpol = get_vma_policy(vma, addr);
Dave Hansenb27abac2021-09-02 15:00:06 -07002021 *nodemask = NULL;
2022 mode = (*mpol)->mode;
Christoph Lameter5da7ca82006-01-06 00:10:46 -08002023
Dave Hansenb27abac2021-09-02 15:00:06 -07002024 if (unlikely(mode == MPOL_INTERLEAVE)) {
Vlastimil Babka04ec6262017-07-06 15:40:03 -07002025 nid = interleave_nid(*mpol, vma, addr,
2026 huge_page_shift(hstate_vma(vma)));
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07002027 } else {
Vlastimil Babka04ec6262017-07-06 15:40:03 -07002028 nid = policy_node(gfp_flags, *mpol, numa_node_id());
Dave Hansenb27abac2021-09-02 15:00:06 -07002029 if (mode == MPOL_BIND || mode == MPOL_PREFERRED_MANY)
Ben Widawsky269fbe72021-06-30 18:51:10 -07002030 *nodemask = &(*mpol)->nodes;
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07002031 }
Vlastimil Babka04ec6262017-07-06 15:40:03 -07002032 return nid;
Christoph Lameter5da7ca82006-01-06 00:10:46 -08002033}
Lee Schermerhorn06808b02009-12-14 17:58:21 -08002034
2035/*
2036 * init_nodemask_of_mempolicy
2037 *
2038 * If the current task's mempolicy is "default" [NULL], return 'false'
2039 * to indicate default policy. Otherwise, extract the policy nodemask
2040 * for 'bind' or 'interleave' policy into the argument nodemask, or
2041 * initialize the argument nodemask to contain the single node for
2042 * 'preferred' or 'local' policy and return 'true' to indicate presence
2043 * of non-default mempolicy.
2044 *
2045 * We don't bother with reference counting the mempolicy [mpol_get/put]
2046 * because the current task is examining it's own mempolicy and a task's
2047 * mempolicy is only ever changed by the task itself.
2048 *
2049 * N.B., it is the caller's responsibility to free a returned nodemask.
2050 */
2051bool init_nodemask_of_mempolicy(nodemask_t *mask)
2052{
2053 struct mempolicy *mempolicy;
Lee Schermerhorn06808b02009-12-14 17:58:21 -08002054
2055 if (!(mask && current->mempolicy))
2056 return false;
2057
Miao Xiec0ff7452010-05-24 14:32:08 -07002058 task_lock(current);
Lee Schermerhorn06808b02009-12-14 17:58:21 -08002059 mempolicy = current->mempolicy;
2060 switch (mempolicy->mode) {
2061 case MPOL_PREFERRED:
Dave Hansenb27abac2021-09-02 15:00:06 -07002062 case MPOL_PREFERRED_MANY:
Lee Schermerhorn06808b02009-12-14 17:58:21 -08002063 case MPOL_BIND:
Lee Schermerhorn06808b02009-12-14 17:58:21 -08002064 case MPOL_INTERLEAVE:
Ben Widawsky269fbe72021-06-30 18:51:10 -07002065 *mask = mempolicy->nodes;
Feng Tang7858d7b2021-06-30 18:51:00 -07002066 break;
2067
2068 case MPOL_LOCAL:
Ben Widawsky269fbe72021-06-30 18:51:10 -07002069 init_nodemask_of_node(mask, numa_node_id());
Lee Schermerhorn06808b02009-12-14 17:58:21 -08002070 break;
2071
2072 default:
2073 BUG();
2074 }
Miao Xiec0ff7452010-05-24 14:32:08 -07002075 task_unlock(current);
Lee Schermerhorn06808b02009-12-14 17:58:21 -08002076
2077 return true;
2078}
Chen, Kenneth W00ac59a2006-02-03 21:51:14 +01002079#endif
Christoph Lameter5da7ca82006-01-06 00:10:46 -08002080
David Rientjes6f48d0eb2010-08-09 17:18:52 -07002081/*
Feng Tangb26e5172021-06-30 18:50:56 -07002082 * mempolicy_in_oom_domain
David Rientjes6f48d0eb2010-08-09 17:18:52 -07002083 *
Feng Tangb26e5172021-06-30 18:50:56 -07002084 * If tsk's mempolicy is "bind", check for intersection between mask and
2085 * the policy nodemask. Otherwise, return true for all other policies
2086 * including "interleave", as a tsk with "interleave" policy may have
2087 * memory allocated from all nodes in system.
David Rientjes6f48d0eb2010-08-09 17:18:52 -07002088 *
2089 * Takes task_lock(tsk) to prevent freeing of its mempolicy.
2090 */
Feng Tangb26e5172021-06-30 18:50:56 -07002091bool mempolicy_in_oom_domain(struct task_struct *tsk,
David Rientjes6f48d0eb2010-08-09 17:18:52 -07002092 const nodemask_t *mask)
2093{
2094 struct mempolicy *mempolicy;
2095 bool ret = true;
2096
2097 if (!mask)
2098 return ret;
Feng Tangb26e5172021-06-30 18:50:56 -07002099
David Rientjes6f48d0eb2010-08-09 17:18:52 -07002100 task_lock(tsk);
2101 mempolicy = tsk->mempolicy;
Feng Tangb26e5172021-06-30 18:50:56 -07002102 if (mempolicy && mempolicy->mode == MPOL_BIND)
Ben Widawsky269fbe72021-06-30 18:51:10 -07002103 ret = nodes_intersects(mempolicy->nodes, *mask);
David Rientjes6f48d0eb2010-08-09 17:18:52 -07002104 task_unlock(tsk);
Feng Tangb26e5172021-06-30 18:50:56 -07002105
David Rientjes6f48d0eb2010-08-09 17:18:52 -07002106 return ret;
2107}
2108
Linus Torvalds1da177e2005-04-16 15:20:36 -07002109/* Allocate a page in interleaved policy.
2110 Own path because it needs to do special accounting. */
Andi Kleen662f3a02005-10-29 18:15:49 -07002111static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
2112 unsigned nid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002113{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002114 struct page *page;
2115
Matthew Wilcox (Oracle)84172f42021-04-29 23:01:15 -07002116 page = __alloc_pages(gfp, order, nid, NULL);
Kemi Wang45180852017-11-15 17:38:22 -08002117 /* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */
2118 if (!static_branch_likely(&vm_numa_stat_key))
2119 return page;
Andrey Ryabininde55c8b2017-10-13 15:57:43 -07002120 if (page && page_to_nid(page) == nid) {
2121 preempt_disable();
Mel Gormanf19298b2021-06-28 19:41:44 -07002122 __count_numa_event(page_zone(page), NUMA_INTERLEAVE_HIT);
Andrey Ryabininde55c8b2017-10-13 15:57:43 -07002123 preempt_enable();
2124 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002125 return page;
2126}
2127
Feng Tang4c54d942021-09-02 15:00:10 -07002128static struct page *alloc_pages_preferred_many(gfp_t gfp, unsigned int order,
2129 int nid, struct mempolicy *pol)
2130{
2131 struct page *page;
2132 gfp_t preferred_gfp;
2133
2134 /*
2135 * This is a two pass approach. The first pass will only try the
2136 * preferred nodes but skip the direct reclaim and allow the
2137 * allocation to fail, while the second pass will try all the
2138 * nodes in system.
2139 */
2140 preferred_gfp = gfp | __GFP_NOWARN;
2141 preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
2142 page = __alloc_pages(preferred_gfp, order, nid, &pol->nodes);
2143 if (!page)
Aneesh Kumar K.Vc0455112022-01-14 14:08:14 -08002144 page = __alloc_pages(gfp, order, nid, NULL);
Feng Tang4c54d942021-09-02 15:00:10 -07002145
2146 return page;
2147}
2148
Linus Torvalds1da177e2005-04-16 15:20:36 -07002149/**
Matthew Wilcox (Oracle)eb350732021-04-29 23:01:24 -07002150 * alloc_pages_vma - Allocate a page for a VMA.
2151 * @gfp: GFP flags.
2152 * @order: Order of the GFP allocation.
2153 * @vma: Pointer to VMA or NULL if not available.
2154 * @addr: Virtual address of the allocation. Must be inside @vma.
Matthew Wilcox (Oracle)eb350732021-04-29 23:01:24 -07002155 * @hugepage: For hugepages try only the preferred node if possible.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002156 *
Matthew Wilcox (Oracle)eb350732021-04-29 23:01:24 -07002157 * Allocate a page for a specific address in @vma, using the appropriate
2158 * NUMA policy. When @vma is not NULL the caller must hold the mmap_lock
2159 * of the mm_struct of the VMA to prevent it from going away. Should be
2160 * used for all allocations for pages that will be mapped into user space.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002161 *
Matthew Wilcox (Oracle)eb350732021-04-29 23:01:24 -07002162 * Return: The page on success or NULL if allocation fails.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002163 */
Matthew Wilcox (Oracle)eb350732021-04-29 23:01:24 -07002164struct page *alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
Michal Hockobe1a13e2022-01-14 14:07:27 -08002165 unsigned long addr, bool hugepage)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002166{
Mel Gormancc9a6c82012-03-21 16:34:11 -07002167 struct mempolicy *pol;
Michal Hockobe1a13e2022-01-14 14:07:27 -08002168 int node = numa_node_id();
Miao Xiec0ff7452010-05-24 14:32:08 -07002169 struct page *page;
Vlastimil Babka04ec6262017-07-06 15:40:03 -07002170 int preferred_nid;
Vlastimil Babkabe97a412015-02-11 15:27:15 -08002171 nodemask_t *nmask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002172
Oleg Nesterovdd6eecb2014-10-09 15:27:57 -07002173 pol = get_vma_policy(vma, addr);
Mel Gormancc9a6c82012-03-21 16:34:11 -07002174
Vlastimil Babkabe97a412015-02-11 15:27:15 -08002175 if (pol->mode == MPOL_INTERLEAVE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002176 unsigned nid;
Christoph Lameter5da7ca82006-01-06 00:10:46 -08002177
Andi Kleen8eac5632011-02-25 14:44:28 -08002178 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07002179 mpol_cond_put(pol);
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002180 page = alloc_page_interleave(gfp, order, nid);
Vlastimil Babkabe97a412015-02-11 15:27:15 -08002181 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002182 }
Vlastimil Babkabe97a412015-02-11 15:27:15 -08002183
Feng Tang4c54d942021-09-02 15:00:10 -07002184 if (pol->mode == MPOL_PREFERRED_MANY) {
Aneesh Kumar K.Vc0455112022-01-14 14:08:14 -08002185 node = policy_node(gfp, pol, node);
Feng Tang4c54d942021-09-02 15:00:10 -07002186 page = alloc_pages_preferred_many(gfp, order, node, pol);
2187 mpol_cond_put(pol);
2188 goto out;
2189 }
2190
David Rientjes19deb762019-09-04 12:54:20 -07002191 if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
2192 int hpage_node = node;
2193
2194 /*
2195 * For hugepage allocation and non-interleave policy which
2196 * allows the current node (or other explicitly preferred
2197 * node) we only try to allocate from the current/preferred
2198 * node and don't fall back to other nodes, as the cost of
2199 * remote accesses would likely offset THP benefits.
2200 *
Dave Hansenb27abac2021-09-02 15:00:06 -07002201 * If the policy is interleave or does not allow the current
David Rientjes19deb762019-09-04 12:54:20 -07002202 * node in its nodemask, we allocate the standard way.
2203 */
Feng Tang7858d7b2021-06-30 18:51:00 -07002204 if (pol->mode == MPOL_PREFERRED)
Ben Widawsky269fbe72021-06-30 18:51:10 -07002205 hpage_node = first_node(pol->nodes);
David Rientjes19deb762019-09-04 12:54:20 -07002206
2207 nmask = policy_nodemask(gfp, pol);
2208 if (!nmask || node_isset(hpage_node, *nmask)) {
2209 mpol_cond_put(pol);
Vlastimil Babkacc638f32020-01-13 16:29:04 -08002210 /*
2211 * First, try to allocate THP only on local node, but
2212 * don't reclaim unnecessarily, just compact.
2213 */
David Rientjes19deb762019-09-04 12:54:20 -07002214 page = __alloc_pages_node(hpage_node,
Vlastimil Babkacc638f32020-01-13 16:29:04 -08002215 gfp | __GFP_THISNODE | __GFP_NORETRY, order);
David Rientjes76e654c2019-09-04 12:54:25 -07002216
2217 /*
2218 * If hugepage allocations are configured to always
2219 * synchronous compact or the vma has been madvised
2220 * to prefer hugepage backing, retry allowing remote
Vlastimil Babkacc638f32020-01-13 16:29:04 -08002221 * memory with both reclaim and compact as well.
David Rientjes76e654c2019-09-04 12:54:25 -07002222 */
2223 if (!page && (gfp & __GFP_DIRECT_RECLAIM))
Andrey Ryabinin33863532021-12-24 21:12:35 -08002224 page = __alloc_pages(gfp, order, hpage_node, nmask);
David Rientjes76e654c2019-09-04 12:54:25 -07002225
David Rientjes19deb762019-09-04 12:54:20 -07002226 goto out;
2227 }
2228 }
2229
Vlastimil Babkabe97a412015-02-11 15:27:15 -08002230 nmask = policy_nodemask(gfp, pol);
Vlastimil Babka04ec6262017-07-06 15:40:03 -07002231 preferred_nid = policy_node(gfp, pol, node);
Matthew Wilcox (Oracle)84172f42021-04-29 23:01:15 -07002232 page = __alloc_pages(gfp, order, preferred_nid, nmask);
Vlastimil Babkad51e9892017-01-24 15:18:18 -08002233 mpol_cond_put(pol);
Vlastimil Babkabe97a412015-02-11 15:27:15 -08002234out:
Miao Xiec0ff7452010-05-24 14:32:08 -07002235 return page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002236}
Christoph Hellwig69262212019-06-26 14:27:05 +02002237EXPORT_SYMBOL(alloc_pages_vma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002238
2239/**
Matthew Wilcox (Oracle)6421ec72021-04-29 23:01:21 -07002240 * alloc_pages - Allocate pages.
2241 * @gfp: GFP flags.
2242 * @order: Power of two of number of pages to allocate.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002243 *
Matthew Wilcox (Oracle)6421ec72021-04-29 23:01:21 -07002244 * Allocate 1 << @order contiguous pages. The physical address of the
2245 * first page is naturally aligned (eg an order-3 allocation will be aligned
2246 * to a multiple of 8 * PAGE_SIZE bytes). The NUMA policy of the current
2247 * process is honoured when in process context.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002248 *
Matthew Wilcox (Oracle)6421ec72021-04-29 23:01:21 -07002249 * Context: Can be called from any context, providing the appropriate GFP
2250 * flags are used.
2251 * Return: The page on success or NULL if allocation fails.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002252 */
Matthew Wilcox (Oracle)d7f946d2021-04-29 23:01:18 -07002253struct page *alloc_pages(gfp_t gfp, unsigned order)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002254{
Oleg Nesterov8d902742014-10-09 15:27:45 -07002255 struct mempolicy *pol = &default_policy;
Miao Xiec0ff7452010-05-24 14:32:08 -07002256 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002257
Oleg Nesterov8d902742014-10-09 15:27:45 -07002258 if (!in_interrupt() && !(gfp & __GFP_THISNODE))
2259 pol = get_task_policy(current);
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07002260
2261 /*
2262 * No reference counting needed for current->mempolicy
2263 * nor system default_policy
2264 */
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002265 if (pol->mode == MPOL_INTERLEAVE)
Miao Xiec0ff7452010-05-24 14:32:08 -07002266 page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
Feng Tang4c54d942021-09-02 15:00:10 -07002267 else if (pol->mode == MPOL_PREFERRED_MANY)
2268 page = alloc_pages_preferred_many(gfp, order,
Aneesh Kumar K.Vc0455112022-01-14 14:08:14 -08002269 policy_node(gfp, pol, numa_node_id()), pol);
Miao Xiec0ff7452010-05-24 14:32:08 -07002270 else
Matthew Wilcox (Oracle)84172f42021-04-29 23:01:15 -07002271 page = __alloc_pages(gfp, order,
Vlastimil Babka04ec6262017-07-06 15:40:03 -07002272 policy_node(gfp, pol, numa_node_id()),
Andi Kleen5c4b4be2011-03-04 17:36:32 -08002273 policy_nodemask(gfp, pol));
Mel Gormancc9a6c82012-03-21 16:34:11 -07002274
Miao Xiec0ff7452010-05-24 14:32:08 -07002275 return page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002276}
Matthew Wilcox (Oracle)d7f946d2021-04-29 23:01:18 -07002277EXPORT_SYMBOL(alloc_pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002278
Matthew Wilcox (Oracle)cc09cb12020-12-15 22:55:54 -05002279struct folio *folio_alloc(gfp_t gfp, unsigned order)
2280{
2281 struct page *page = alloc_pages(gfp | __GFP_COMP, order);
2282
2283 if (page && order > 1)
2284 prep_transhuge_page(page);
2285 return (struct folio *)page;
2286}
2287EXPORT_SYMBOL(folio_alloc);
2288
Chen Wandunc00b6b92021-11-05 13:39:53 -07002289static unsigned long alloc_pages_bulk_array_interleave(gfp_t gfp,
2290 struct mempolicy *pol, unsigned long nr_pages,
2291 struct page **page_array)
2292{
2293 int nodes;
2294 unsigned long nr_pages_per_node;
2295 int delta;
2296 int i;
2297 unsigned long nr_allocated;
2298 unsigned long total_allocated = 0;
2299
2300 nodes = nodes_weight(pol->nodes);
2301 nr_pages_per_node = nr_pages / nodes;
2302 delta = nr_pages - nodes * nr_pages_per_node;
2303
2304 for (i = 0; i < nodes; i++) {
2305 if (delta) {
2306 nr_allocated = __alloc_pages_bulk(gfp,
2307 interleave_nodes(pol), NULL,
2308 nr_pages_per_node + 1, NULL,
2309 page_array);
2310 delta--;
2311 } else {
2312 nr_allocated = __alloc_pages_bulk(gfp,
2313 interleave_nodes(pol), NULL,
2314 nr_pages_per_node, NULL, page_array);
2315 }
2316
2317 page_array += nr_allocated;
2318 total_allocated += nr_allocated;
2319 }
2320
2321 return total_allocated;
2322}
2323
2324static unsigned long alloc_pages_bulk_array_preferred_many(gfp_t gfp, int nid,
2325 struct mempolicy *pol, unsigned long nr_pages,
2326 struct page **page_array)
2327{
2328 gfp_t preferred_gfp;
2329 unsigned long nr_allocated = 0;
2330
2331 preferred_gfp = gfp | __GFP_NOWARN;
2332 preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
2333
2334 nr_allocated = __alloc_pages_bulk(preferred_gfp, nid, &pol->nodes,
2335 nr_pages, NULL, page_array);
2336
2337 if (nr_allocated < nr_pages)
2338 nr_allocated += __alloc_pages_bulk(gfp, numa_node_id(), NULL,
2339 nr_pages - nr_allocated, NULL,
2340 page_array + nr_allocated);
2341 return nr_allocated;
2342}
2343
2344/* alloc pages bulk and mempolicy should be considered at the
2345 * same time in some situation such as vmalloc.
2346 *
2347 * It can accelerate memory allocation especially interleaving
2348 * allocate memory.
2349 */
2350unsigned long alloc_pages_bulk_array_mempolicy(gfp_t gfp,
2351 unsigned long nr_pages, struct page **page_array)
2352{
2353 struct mempolicy *pol = &default_policy;
2354
2355 if (!in_interrupt() && !(gfp & __GFP_THISNODE))
2356 pol = get_task_policy(current);
2357
2358 if (pol->mode == MPOL_INTERLEAVE)
2359 return alloc_pages_bulk_array_interleave(gfp, pol,
2360 nr_pages, page_array);
2361
2362 if (pol->mode == MPOL_PREFERRED_MANY)
2363 return alloc_pages_bulk_array_preferred_many(gfp,
2364 numa_node_id(), pol, nr_pages, page_array);
2365
2366 return __alloc_pages_bulk(gfp, policy_node(gfp, pol, numa_node_id()),
2367 policy_nodemask(gfp, pol), nr_pages, NULL,
2368 page_array);
2369}
2370
Oleg Nesterovef0855d2013-09-11 14:20:14 -07002371int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2372{
2373 struct mempolicy *pol = mpol_dup(vma_policy(src));
2374
2375 if (IS_ERR(pol))
2376 return PTR_ERR(pol);
2377 dst->vm_policy = pol;
2378 return 0;
2379}
2380
Paul Jackson42253992006-01-08 01:01:59 -08002381/*
Lee Schermerhorn846a16b2008-04-28 02:13:09 -07002382 * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
Paul Jackson42253992006-01-08 01:01:59 -08002383 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
2384 * with the mems_allowed returned by cpuset_mems_allowed(). This
2385 * keeps mempolicies cpuset relative after its cpuset moves. See
2386 * further kernel/cpuset.c update_nodemask().
Miao Xie708c1bb2010-05-24 14:32:07 -07002387 *
2388 * current's mempolicy may be rebinded by the other task(the task that changes
2389 * cpuset's mems), so we needn't do rebind work for current task.
Paul Jackson42253992006-01-08 01:01:59 -08002390 */
Paul Jackson42253992006-01-08 01:01:59 -08002391
Lee Schermerhorn846a16b2008-04-28 02:13:09 -07002392/* Slow path of a mempolicy duplicate */
2393struct mempolicy *__mpol_dup(struct mempolicy *old)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002394{
2395 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2396
2397 if (!new)
2398 return ERR_PTR(-ENOMEM);
Miao Xie708c1bb2010-05-24 14:32:07 -07002399
2400 /* task's mempolicy is protected by alloc_lock */
2401 if (old == current->mempolicy) {
2402 task_lock(current);
2403 *new = *old;
2404 task_unlock(current);
2405 } else
2406 *new = *old;
2407
Paul Jackson42253992006-01-08 01:01:59 -08002408 if (current_cpuset_is_being_rebound()) {
2409 nodemask_t mems = cpuset_mems_allowed(current);
Vlastimil Babka213980c2017-07-06 15:40:06 -07002410 mpol_rebind_policy(new, &mems);
Paul Jackson42253992006-01-08 01:01:59 -08002411 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002412 atomic_set(&new->refcnt, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002413 return new;
2414}
2415
2416/* Slow path of a mempolicy comparison */
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002417bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002418{
2419 if (!a || !b)
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002420 return false;
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002421 if (a->mode != b->mode)
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002422 return false;
Bob Liu19800502010-05-24 14:32:01 -07002423 if (a->flags != b->flags)
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002424 return false;
Aneesh Kumar K.Vc6018b42022-01-14 14:08:17 -08002425 if (a->home_node != b->home_node)
2426 return false;
Bob Liu19800502010-05-24 14:32:01 -07002427 if (mpol_store_user_nodemask(a))
2428 if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002429 return false;
Bob Liu19800502010-05-24 14:32:01 -07002430
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002431 switch (a->mode) {
Mel Gorman19770b32008-04-28 02:12:18 -07002432 case MPOL_BIND:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002433 case MPOL_INTERLEAVE:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002434 case MPOL_PREFERRED:
Dave Hansenb27abac2021-09-02 15:00:06 -07002435 case MPOL_PREFERRED_MANY:
Ben Widawsky269fbe72021-06-30 18:51:10 -07002436 return !!nodes_equal(a->nodes, b->nodes);
Feng Tang7858d7b2021-06-30 18:51:00 -07002437 case MPOL_LOCAL:
2438 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002439 default:
2440 BUG();
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002441 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002442 }
2443}
2444
Linus Torvalds1da177e2005-04-16 15:20:36 -07002445/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002446 * Shared memory backing store policy support.
2447 *
2448 * Remember policies even when nobody has shared memory mapped.
2449 * The policies are kept in Red-Black tree linked from the inode.
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002450 * They are protected by the sp->lock rwlock, which should be held
Linus Torvalds1da177e2005-04-16 15:20:36 -07002451 * for any accesses to the tree.
2452 */
2453
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002454/*
2455 * lookup first element intersecting start-end. Caller holds sp->lock for
2456 * reading or for writing
2457 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002458static struct sp_node *
2459sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
2460{
2461 struct rb_node *n = sp->root.rb_node;
2462
2463 while (n) {
2464 struct sp_node *p = rb_entry(n, struct sp_node, nd);
2465
2466 if (start >= p->end)
2467 n = n->rb_right;
2468 else if (end <= p->start)
2469 n = n->rb_left;
2470 else
2471 break;
2472 }
2473 if (!n)
2474 return NULL;
2475 for (;;) {
2476 struct sp_node *w = NULL;
2477 struct rb_node *prev = rb_prev(n);
2478 if (!prev)
2479 break;
2480 w = rb_entry(prev, struct sp_node, nd);
2481 if (w->end <= start)
2482 break;
2483 n = prev;
2484 }
2485 return rb_entry(n, struct sp_node, nd);
2486}
2487
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002488/*
2489 * Insert a new shared policy into the list. Caller holds sp->lock for
2490 * writing.
2491 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002492static void sp_insert(struct shared_policy *sp, struct sp_node *new)
2493{
2494 struct rb_node **p = &sp->root.rb_node;
2495 struct rb_node *parent = NULL;
2496 struct sp_node *nd;
2497
2498 while (*p) {
2499 parent = *p;
2500 nd = rb_entry(parent, struct sp_node, nd);
2501 if (new->start < nd->start)
2502 p = &(*p)->rb_left;
2503 else if (new->end > nd->end)
2504 p = &(*p)->rb_right;
2505 else
2506 BUG();
2507 }
2508 rb_link_node(&new->nd, parent, p);
2509 rb_insert_color(&new->nd, &sp->root);
Paul Mundt140d5a42007-07-15 23:38:16 -07002510 pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002511 new->policy ? new->policy->mode : 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002512}
2513
2514/* Find shared policy intersecting idx */
2515struct mempolicy *
2516mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
2517{
2518 struct mempolicy *pol = NULL;
2519 struct sp_node *sn;
2520
2521 if (!sp->root.rb_node)
2522 return NULL;
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002523 read_lock(&sp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002524 sn = sp_lookup(sp, idx, idx+1);
2525 if (sn) {
2526 mpol_get(sn->policy);
2527 pol = sn->policy;
2528 }
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002529 read_unlock(&sp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002530 return pol;
2531}
2532
KOSAKI Motohiro63f74ca2012-10-08 16:29:19 -07002533static void sp_free(struct sp_node *n)
2534{
2535 mpol_put(n->policy);
2536 kmem_cache_free(sn_cache, n);
2537}
2538
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002539/**
2540 * mpol_misplaced - check whether current page node is valid in policy
2541 *
Fabian Frederickb46e14a2014-06-04 16:08:18 -07002542 * @page: page to be checked
2543 * @vma: vm area where page mapped
2544 * @addr: virtual address where page mapped
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002545 *
2546 * Lookup current policy node id for vma,addr and "compare to" page's
Matthew Wilcox (Oracle)5f076942021-04-29 23:01:27 -07002547 * node id. Policy determination "mimics" alloc_page_vma().
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002548 * Called from fault path where we know the vma and faulting address.
Matthew Wilcox (Oracle)5f076942021-04-29 23:01:27 -07002549 *
Baolin Wang062db292021-09-02 15:00:03 -07002550 * Return: NUMA_NO_NODE if the page is in a node that is valid for this
2551 * policy, or a suitable node ID to allocate a replacement page from.
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002552 */
2553int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2554{
2555 struct mempolicy *pol;
Mel Gormanc33d6c02016-05-19 17:14:10 -07002556 struct zoneref *z;
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002557 int curnid = page_to_nid(page);
2558 unsigned long pgoff;
Peter Zijlstra90572892013-10-07 11:29:20 +01002559 int thiscpu = raw_smp_processor_id();
2560 int thisnid = cpu_to_node(thiscpu);
Anshuman Khandual98fa15f2019-03-05 15:42:58 -08002561 int polnid = NUMA_NO_NODE;
Baolin Wang062db292021-09-02 15:00:03 -07002562 int ret = NUMA_NO_NODE;
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002563
Oleg Nesterovdd6eecb2014-10-09 15:27:57 -07002564 pol = get_vma_policy(vma, addr);
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002565 if (!(pol->flags & MPOL_F_MOF))
2566 goto out;
2567
2568 switch (pol->mode) {
2569 case MPOL_INTERLEAVE:
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002570 pgoff = vma->vm_pgoff;
2571 pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
Laurent Dufour98c70ba2017-09-08 16:12:39 -07002572 polnid = offset_il_node(pol, pgoff);
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002573 break;
2574
2575 case MPOL_PREFERRED:
Dave Hansenb27abac2021-09-02 15:00:06 -07002576 if (node_isset(curnid, pol->nodes))
2577 goto out;
Ben Widawsky269fbe72021-06-30 18:51:10 -07002578 polnid = first_node(pol->nodes);
Feng Tang7858d7b2021-06-30 18:51:00 -07002579 break;
2580
2581 case MPOL_LOCAL:
2582 polnid = numa_node_id();
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002583 break;
2584
2585 case MPOL_BIND:
Huang Yingbda420b2021-02-24 12:09:43 -08002586 /* Optimize placement among multiple nodes via NUMA balancing */
2587 if (pol->flags & MPOL_F_MORON) {
Ben Widawsky269fbe72021-06-30 18:51:10 -07002588 if (node_isset(thisnid, pol->nodes))
Huang Yingbda420b2021-02-24 12:09:43 -08002589 break;
2590 goto out;
2591 }
Dave Hansenb27abac2021-09-02 15:00:06 -07002592 fallthrough;
Mel Gormanc33d6c02016-05-19 17:14:10 -07002593
Dave Hansenb27abac2021-09-02 15:00:06 -07002594 case MPOL_PREFERRED_MANY:
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002595 /*
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002596 * use current page if in policy nodemask,
2597 * else select nearest allowed node, if any.
2598 * If no allowed nodes, use current [!misplaced].
2599 */
Ben Widawsky269fbe72021-06-30 18:51:10 -07002600 if (node_isset(curnid, pol->nodes))
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002601 goto out;
Mel Gormanc33d6c02016-05-19 17:14:10 -07002602 z = first_zones_zonelist(
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002603 node_zonelist(numa_node_id(), GFP_HIGHUSER),
2604 gfp_zone(GFP_HIGHUSER),
Ben Widawsky269fbe72021-06-30 18:51:10 -07002605 &pol->nodes);
Pavel Tatashinc1093b72018-08-21 21:53:32 -07002606 polnid = zone_to_nid(z->zone);
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002607 break;
2608
2609 default:
2610 BUG();
2611 }
Mel Gorman5606e382012-11-02 18:19:13 +00002612
2613 /* Migrate the page towards the node whose CPU is referencing it */
Mel Gormane42c8ff2012-11-12 09:17:07 +00002614 if (pol->flags & MPOL_F_MORON) {
Peter Zijlstra90572892013-10-07 11:29:20 +01002615 polnid = thisnid;
Mel Gorman5606e382012-11-02 18:19:13 +00002616
Rik van Riel10f39042014-01-27 17:03:44 -05002617 if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
Rik van Rielde1c9ce2013-10-07 11:29:39 +01002618 goto out;
Mel Gormane42c8ff2012-11-12 09:17:07 +00002619 }
2620
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002621 if (curnid != polnid)
2622 ret = polnid;
2623out:
2624 mpol_cond_put(pol);
2625
2626 return ret;
2627}
2628
David Rientjesc11600e2016-09-01 16:15:07 -07002629/*
2630 * Drop the (possibly final) reference to task->mempolicy. It needs to be
2631 * dropped after task->mempolicy is set to NULL so that any allocation done as
2632 * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed
2633 * policy.
2634 */
2635void mpol_put_task_policy(struct task_struct *task)
2636{
2637 struct mempolicy *pol;
2638
2639 task_lock(task);
2640 pol = task->mempolicy;
2641 task->mempolicy = NULL;
2642 task_unlock(task);
2643 mpol_put(pol);
2644}
2645
Linus Torvalds1da177e2005-04-16 15:20:36 -07002646static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2647{
Paul Mundt140d5a42007-07-15 23:38:16 -07002648 pr_debug("deleting %lx-l%lx\n", n->start, n->end);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002649 rb_erase(&n->nd, &sp->root);
KOSAKI Motohiro63f74ca2012-10-08 16:29:19 -07002650 sp_free(n);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002651}
2652
Mel Gorman42288fe2012-12-21 23:10:25 +00002653static void sp_node_init(struct sp_node *node, unsigned long start,
2654 unsigned long end, struct mempolicy *pol)
2655{
2656 node->start = start;
2657 node->end = end;
2658 node->policy = pol;
2659}
2660
Adrian Bunkdbcb0f12007-10-16 01:26:26 -07002661static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2662 struct mempolicy *pol)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002663{
KOSAKI Motohiro869833f2012-10-08 16:29:16 -07002664 struct sp_node *n;
2665 struct mempolicy *newpol;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002666
KOSAKI Motohiro869833f2012-10-08 16:29:16 -07002667 n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002668 if (!n)
2669 return NULL;
KOSAKI Motohiro869833f2012-10-08 16:29:16 -07002670
2671 newpol = mpol_dup(pol);
2672 if (IS_ERR(newpol)) {
2673 kmem_cache_free(sn_cache, n);
2674 return NULL;
2675 }
2676 newpol->flags |= MPOL_F_SHARED;
Mel Gorman42288fe2012-12-21 23:10:25 +00002677 sp_node_init(n, start, end, newpol);
KOSAKI Motohiro869833f2012-10-08 16:29:16 -07002678
Linus Torvalds1da177e2005-04-16 15:20:36 -07002679 return n;
2680}
2681
2682/* Replace a policy range. */
2683static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
2684 unsigned long end, struct sp_node *new)
2685{
Mel Gormanb22d1272012-10-08 16:29:17 -07002686 struct sp_node *n;
Mel Gorman42288fe2012-12-21 23:10:25 +00002687 struct sp_node *n_new = NULL;
2688 struct mempolicy *mpol_new = NULL;
Mel Gormanb22d1272012-10-08 16:29:17 -07002689 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002690
Mel Gorman42288fe2012-12-21 23:10:25 +00002691restart:
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002692 write_lock(&sp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002693 n = sp_lookup(sp, start, end);
2694 /* Take care of old policies in the same range. */
2695 while (n && n->start < end) {
2696 struct rb_node *next = rb_next(&n->nd);
2697 if (n->start >= start) {
2698 if (n->end <= end)
2699 sp_delete(sp, n);
2700 else
2701 n->start = end;
2702 } else {
2703 /* Old policy spanning whole new range. */
2704 if (n->end > end) {
Mel Gorman42288fe2012-12-21 23:10:25 +00002705 if (!n_new)
2706 goto alloc_new;
2707
2708 *mpol_new = *n->policy;
2709 atomic_set(&mpol_new->refcnt, 1);
KOSAKI Motohiro78806392013-03-08 12:43:29 -08002710 sp_node_init(n_new, end, n->end, mpol_new);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002711 n->end = start;
Hillf Danton5ca39572013-03-08 12:43:28 -08002712 sp_insert(sp, n_new);
Mel Gorman42288fe2012-12-21 23:10:25 +00002713 n_new = NULL;
2714 mpol_new = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002715 break;
2716 } else
2717 n->end = start;
2718 }
2719 if (!next)
2720 break;
2721 n = rb_entry(next, struct sp_node, nd);
2722 }
2723 if (new)
2724 sp_insert(sp, new);
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002725 write_unlock(&sp->lock);
Mel Gorman42288fe2012-12-21 23:10:25 +00002726 ret = 0;
2727
2728err_out:
2729 if (mpol_new)
2730 mpol_put(mpol_new);
2731 if (n_new)
2732 kmem_cache_free(sn_cache, n_new);
2733
Mel Gormanb22d1272012-10-08 16:29:17 -07002734 return ret;
Mel Gorman42288fe2012-12-21 23:10:25 +00002735
2736alloc_new:
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002737 write_unlock(&sp->lock);
Mel Gorman42288fe2012-12-21 23:10:25 +00002738 ret = -ENOMEM;
2739 n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2740 if (!n_new)
2741 goto err_out;
2742 mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2743 if (!mpol_new)
2744 goto err_out;
2745 goto restart;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002746}
2747
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002748/**
2749 * mpol_shared_policy_init - initialize shared policy for inode
2750 * @sp: pointer to inode shared policy
2751 * @mpol: struct mempolicy to install
2752 *
2753 * Install non-NULL @mpol in inode's shared policy rb-tree.
2754 * On entry, the current task has a reference on a non-NULL @mpol.
2755 * This must be released on exit.
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002756 * This is called at get_inode() calls and we can use GFP_KERNEL.
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002757 */
2758void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
Robin Holt7339ff82006-01-14 13:20:48 -08002759{
Miao Xie58568d22009-06-16 15:31:49 -07002760 int ret;
2761
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002762 sp->root = RB_ROOT; /* empty tree == default mempolicy */
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002763 rwlock_init(&sp->lock);
Robin Holt7339ff82006-01-14 13:20:48 -08002764
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002765 if (mpol) {
2766 struct vm_area_struct pvma;
2767 struct mempolicy *new;
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002768 NODEMASK_SCRATCH(scratch);
Robin Holt7339ff82006-01-14 13:20:48 -08002769
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002770 if (!scratch)
Lee Schermerhorn5c0c1652010-06-29 15:05:30 -07002771 goto put_mpol;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002772 /* contextualize the tmpfs mount point mempolicy */
2773 new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
Lee Schermerhorn15d77832010-05-24 14:32:04 -07002774 if (IS_ERR(new))
Dan Carpenter0cae3452010-05-25 23:42:58 -07002775 goto free_scratch; /* no valid nodemask intersection */
Miao Xie58568d22009-06-16 15:31:49 -07002776
2777 task_lock(current);
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002778 ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
Miao Xie58568d22009-06-16 15:31:49 -07002779 task_unlock(current);
Lee Schermerhorn15d77832010-05-24 14:32:04 -07002780 if (ret)
Lee Schermerhorn5c0c1652010-06-29 15:05:30 -07002781 goto put_new;
Robin Holt7339ff82006-01-14 13:20:48 -08002782
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002783 /* Create pseudo-vma that contains just the policy */
Kirill A. Shutemov2c4541e2018-07-26 16:37:30 -07002784 vma_init(&pvma, NULL);
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002785 pvma.vm_end = TASK_SIZE; /* policy covers entire file */
2786 mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
Lee Schermerhorn15d77832010-05-24 14:32:04 -07002787
Lee Schermerhorn5c0c1652010-06-29 15:05:30 -07002788put_new:
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002789 mpol_put(new); /* drop initial ref */
Dan Carpenter0cae3452010-05-25 23:42:58 -07002790free_scratch:
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002791 NODEMASK_SCRATCH_FREE(scratch);
Lee Schermerhorn5c0c1652010-06-29 15:05:30 -07002792put_mpol:
2793 mpol_put(mpol); /* drop our incoming ref on sb mpol */
Robin Holt7339ff82006-01-14 13:20:48 -08002794 }
2795}
2796
Linus Torvalds1da177e2005-04-16 15:20:36 -07002797int mpol_set_shared_policy(struct shared_policy *info,
2798 struct vm_area_struct *vma, struct mempolicy *npol)
2799{
2800 int err;
2801 struct sp_node *new = NULL;
2802 unsigned long sz = vma_pages(vma);
2803
David Rientjes028fec42008-04-28 02:12:25 -07002804 pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002805 vma->vm_pgoff,
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002806 sz, npol ? npol->mode : -1,
David Rientjes028fec42008-04-28 02:12:25 -07002807 npol ? npol->flags : -1,
Ben Widawsky269fbe72021-06-30 18:51:10 -07002808 npol ? nodes_addr(npol->nodes)[0] : NUMA_NO_NODE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002809
2810 if (npol) {
2811 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
2812 if (!new)
2813 return -ENOMEM;
2814 }
2815 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2816 if (err && new)
KOSAKI Motohiro63f74ca2012-10-08 16:29:19 -07002817 sp_free(new);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002818 return err;
2819}
2820
2821/* Free a backing policy store on inode delete. */
2822void mpol_free_shared_policy(struct shared_policy *p)
2823{
2824 struct sp_node *n;
2825 struct rb_node *next;
2826
2827 if (!p->root.rb_node)
2828 return;
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002829 write_lock(&p->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002830 next = rb_first(&p->root);
2831 while (next) {
2832 n = rb_entry(next, struct sp_node, nd);
2833 next = rb_next(&n->nd);
KOSAKI Motohiro63f74ca2012-10-08 16:29:19 -07002834 sp_delete(p, n);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002835 }
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002836 write_unlock(&p->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002837}
2838
Mel Gorman1a687c22012-11-22 11:16:36 +00002839#ifdef CONFIG_NUMA_BALANCING
Mel Gormanc2976632014-01-29 14:05:42 -08002840static int __initdata numabalancing_override;
Mel Gorman1a687c22012-11-22 11:16:36 +00002841
2842static void __init check_numabalancing_enable(void)
2843{
2844 bool numabalancing_default = false;
2845
2846 if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
2847 numabalancing_default = true;
2848
Mel Gormanc2976632014-01-29 14:05:42 -08002849 /* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
2850 if (numabalancing_override)
2851 set_numabalancing_state(numabalancing_override == 1);
2852
Mel Gormanb0dc2b92015-05-14 15:17:09 -07002853 if (num_online_nodes() > 1 && !numabalancing_override) {
Joe Perches756a0252016-03-17 14:19:47 -07002854 pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n",
Mel Gormanc2976632014-01-29 14:05:42 -08002855 numabalancing_default ? "Enabling" : "Disabling");
Mel Gorman1a687c22012-11-22 11:16:36 +00002856 set_numabalancing_state(numabalancing_default);
2857 }
2858}
2859
2860static int __init setup_numabalancing(char *str)
2861{
2862 int ret = 0;
2863 if (!str)
2864 goto out;
Mel Gorman1a687c22012-11-22 11:16:36 +00002865
2866 if (!strcmp(str, "enable")) {
Mel Gormanc2976632014-01-29 14:05:42 -08002867 numabalancing_override = 1;
Mel Gorman1a687c22012-11-22 11:16:36 +00002868 ret = 1;
2869 } else if (!strcmp(str, "disable")) {
Mel Gormanc2976632014-01-29 14:05:42 -08002870 numabalancing_override = -1;
Mel Gorman1a687c22012-11-22 11:16:36 +00002871 ret = 1;
2872 }
2873out:
2874 if (!ret)
Andrew Morton4a404be2014-01-29 14:05:43 -08002875 pr_warn("Unable to parse numa_balancing=\n");
Mel Gorman1a687c22012-11-22 11:16:36 +00002876
2877 return ret;
2878}
2879__setup("numa_balancing=", setup_numabalancing);
2880#else
2881static inline void __init check_numabalancing_enable(void)
2882{
2883}
2884#endif /* CONFIG_NUMA_BALANCING */
2885
Linus Torvalds1da177e2005-04-16 15:20:36 -07002886/* assumes fs == KERNEL_DS */
2887void __init numa_policy_init(void)
2888{
Paul Mundtb71636e2007-07-15 23:38:15 -07002889 nodemask_t interleave_nodes;
2890 unsigned long largest = 0;
2891 int nid, prefer = 0;
2892
Linus Torvalds1da177e2005-04-16 15:20:36 -07002893 policy_cache = kmem_cache_create("numa_policy",
2894 sizeof(struct mempolicy),
Paul Mundt20c2df82007-07-20 10:11:58 +09002895 0, SLAB_PANIC, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002896
2897 sn_cache = kmem_cache_create("shared_policy_node",
2898 sizeof(struct sp_node),
Paul Mundt20c2df82007-07-20 10:11:58 +09002899 0, SLAB_PANIC, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002900
Mel Gorman5606e382012-11-02 18:19:13 +00002901 for_each_node(nid) {
2902 preferred_node_policy[nid] = (struct mempolicy) {
2903 .refcnt = ATOMIC_INIT(1),
2904 .mode = MPOL_PREFERRED,
2905 .flags = MPOL_F_MOF | MPOL_F_MORON,
Ben Widawsky269fbe72021-06-30 18:51:10 -07002906 .nodes = nodemask_of_node(nid),
Mel Gorman5606e382012-11-02 18:19:13 +00002907 };
2908 }
2909
Paul Mundtb71636e2007-07-15 23:38:15 -07002910 /*
2911 * Set interleaving policy for system init. Interleaving is only
2912 * enabled across suitably sized nodes (default is >= 16MB), or
2913 * fall back to the largest node if they're all smaller.
2914 */
2915 nodes_clear(interleave_nodes);
Lai Jiangshan01f13bd2012-12-12 13:51:33 -08002916 for_each_node_state(nid, N_MEMORY) {
Paul Mundtb71636e2007-07-15 23:38:15 -07002917 unsigned long total_pages = node_present_pages(nid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002918
Paul Mundtb71636e2007-07-15 23:38:15 -07002919 /* Preserve the largest node */
2920 if (largest < total_pages) {
2921 largest = total_pages;
2922 prefer = nid;
2923 }
2924
2925 /* Interleave this node? */
2926 if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2927 node_set(nid, interleave_nodes);
2928 }
2929
2930 /* All too small, use the largest */
2931 if (unlikely(nodes_empty(interleave_nodes)))
2932 node_set(prefer, interleave_nodes);
2933
David Rientjes028fec42008-04-28 02:12:25 -07002934 if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
Mitchel Humpherysb1de0d12014-06-06 14:38:30 -07002935 pr_err("%s: interleaving failed\n", __func__);
Mel Gorman1a687c22012-11-22 11:16:36 +00002936
2937 check_numabalancing_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002938}
2939
Christoph Lameter8bccd852005-10-29 18:16:59 -07002940/* Reset policy of current process to default */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002941void numa_default_policy(void)
2942{
David Rientjes028fec42008-04-28 02:12:25 -07002943 do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002944}
Paul Jackson68860ec2005-10-30 15:02:36 -08002945
Paul Jackson42253992006-01-08 01:01:59 -08002946/*
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002947 * Parse and format mempolicy from/to strings
2948 */
2949
Lee Schermerhorn345ace92010-05-24 14:32:04 -07002950static const char * const policy_modes[] =
2951{
2952 [MPOL_DEFAULT] = "default",
2953 [MPOL_PREFERRED] = "prefer",
2954 [MPOL_BIND] = "bind",
2955 [MPOL_INTERLEAVE] = "interleave",
Lee Schermerhornd3a71032012-10-25 14:16:29 +02002956 [MPOL_LOCAL] = "local",
Dave Hansenb27abac2021-09-02 15:00:06 -07002957 [MPOL_PREFERRED_MANY] = "prefer (many)",
Lee Schermerhorn345ace92010-05-24 14:32:04 -07002958};
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002959
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002960
2961#ifdef CONFIG_TMPFS
2962/**
Hugh Dickinsf2a07f42013-01-02 02:01:33 -08002963 * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002964 * @str: string containing mempolicy to parse
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002965 * @mpol: pointer to struct mempolicy pointer, returned on success.
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002966 *
2967 * Format of input:
2968 * <mode>[=<flags>][:<nodelist>]
2969 *
Randy Dunlapdad5b022022-01-14 14:08:24 -08002970 * Return: %0 on success, else %1
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002971 */
Hugh Dickinsa7a88b22013-01-02 02:04:23 -08002972int mpol_parse_str(char *str, struct mempolicy **mpol)
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002973{
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002974 struct mempolicy *new = NULL;
Hugh Dickinsf2a07f42013-01-02 02:01:33 -08002975 unsigned short mode_flags;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002976 nodemask_t nodes;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002977 char *nodelist = strchr(str, ':');
2978 char *flags = strchr(str, '=');
zhong jiangdedf2c72018-10-26 15:06:57 -07002979 int err = 1, mode;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002980
Dan Carpenterc7a91bc2020-01-30 22:11:07 -08002981 if (flags)
2982 *flags++ = '\0'; /* terminate mode string */
2983
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002984 if (nodelist) {
2985 /* NUL-terminate mode or flags string */
2986 *nodelist++ = '\0';
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002987 if (nodelist_parse(nodelist, nodes))
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002988 goto out;
Lai Jiangshan01f13bd2012-12-12 13:51:33 -08002989 if (!nodes_subset(nodes, node_states[N_MEMORY]))
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002990 goto out;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002991 } else
2992 nodes_clear(nodes);
2993
zhong jiangdedf2c72018-10-26 15:06:57 -07002994 mode = match_string(policy_modes, MPOL_MAX, str);
2995 if (mode < 0)
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002996 goto out;
2997
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002998 switch (mode) {
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002999 case MPOL_PREFERRED:
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07003000 /*
Randy Dunlapaa9f7d52020-04-01 21:10:58 -07003001 * Insist on a nodelist of one node only, although later
3002 * we use first_node(nodes) to grab a single node, so here
3003 * nodelist (or nodes) cannot be empty.
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07003004 */
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07003005 if (nodelist) {
3006 char *rest = nodelist;
3007 while (isdigit(*rest))
3008 rest++;
KOSAKI Motohiro926f2ae2010-03-23 13:35:32 -07003009 if (*rest)
3010 goto out;
Randy Dunlapaa9f7d52020-04-01 21:10:58 -07003011 if (nodes_empty(nodes))
3012 goto out;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07003013 }
3014 break;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07003015 case MPOL_INTERLEAVE:
3016 /*
3017 * Default to online nodes with memory if no nodelist
3018 */
3019 if (!nodelist)
Lai Jiangshan01f13bd2012-12-12 13:51:33 -08003020 nodes = node_states[N_MEMORY];
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07003021 break;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07003022 case MPOL_LOCAL:
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07003023 /*
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07003024 * Don't allow a nodelist; mpol_new() checks flags
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07003025 */
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07003026 if (nodelist)
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07003027 goto out;
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07003028 break;
Ravikiran G Thirumalai413b43d2010-03-23 13:35:28 -07003029 case MPOL_DEFAULT:
3030 /*
3031 * Insist on a empty nodelist
3032 */
3033 if (!nodelist)
3034 err = 0;
3035 goto out;
Dave Hansenb27abac2021-09-02 15:00:06 -07003036 case MPOL_PREFERRED_MANY:
KOSAKI Motohirod69b2e632010-03-23 13:35:30 -07003037 case MPOL_BIND:
3038 /*
3039 * Insist on a nodelist
3040 */
3041 if (!nodelist)
3042 goto out;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07003043 }
3044
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07003045 mode_flags = 0;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07003046 if (flags) {
3047 /*
3048 * Currently, we only support two mutually exclusive
3049 * mode flags.
3050 */
3051 if (!strcmp(flags, "static"))
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07003052 mode_flags |= MPOL_F_STATIC_NODES;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07003053 else if (!strcmp(flags, "relative"))
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07003054 mode_flags |= MPOL_F_RELATIVE_NODES;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07003055 else
KOSAKI Motohiro926f2ae2010-03-23 13:35:32 -07003056 goto out;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07003057 }
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07003058
3059 new = mpol_new(mode, mode_flags, &nodes);
3060 if (IS_ERR(new))
KOSAKI Motohiro926f2ae2010-03-23 13:35:32 -07003061 goto out;
3062
Hugh Dickinsf2a07f42013-01-02 02:01:33 -08003063 /*
3064 * Save nodes for mpol_to_str() to show the tmpfs mount options
3065 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
3066 */
Ben Widawsky269fbe72021-06-30 18:51:10 -07003067 if (mode != MPOL_PREFERRED) {
3068 new->nodes = nodes;
3069 } else if (nodelist) {
3070 nodes_clear(new->nodes);
3071 node_set(first_node(nodes), new->nodes);
3072 } else {
Feng Tang7858d7b2021-06-30 18:51:00 -07003073 new->mode = MPOL_LOCAL;
Ben Widawsky269fbe72021-06-30 18:51:10 -07003074 }
Hugh Dickinsf2a07f42013-01-02 02:01:33 -08003075
3076 /*
3077 * Save nodes for contextualization: this will be used to "clone"
3078 * the mempolicy in a specific context [cpuset] at a later time.
3079 */
3080 new->w.user_nodemask = nodes;
3081
KOSAKI Motohiro926f2ae2010-03-23 13:35:32 -07003082 err = 0;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07003083
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07003084out:
3085 /* Restore string for error message */
3086 if (nodelist)
3087 *--nodelist = ':';
3088 if (flags)
3089 *--flags = '=';
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07003090 if (!err)
3091 *mpol = new;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07003092 return err;
3093}
3094#endif /* CONFIG_TMPFS */
3095
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07003096/**
3097 * mpol_to_str - format a mempolicy structure for printing
3098 * @buffer: to contain formatted mempolicy string
3099 * @maxlen: length of @buffer
3100 * @pol: pointer to mempolicy to be formatted
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07003101 *
David Rientjes948927e2013-11-12 15:07:28 -08003102 * Convert @pol into a string. If @buffer is too short, truncate the string.
3103 * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
3104 * longest flag, "relative", and to display at least a few node ids.
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08003105 */
David Rientjes948927e2013-11-12 15:07:28 -08003106void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08003107{
3108 char *p = buffer;
David Rientjes948927e2013-11-12 15:07:28 -08003109 nodemask_t nodes = NODE_MASK_NONE;
3110 unsigned short mode = MPOL_DEFAULT;
3111 unsigned short flags = 0;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08003112
David Rientjes8790c71a2014-01-30 15:46:08 -08003113 if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
Lee Schermerhornbea904d2008-04-28 02:13:18 -07003114 mode = pol->mode;
David Rientjes948927e2013-11-12 15:07:28 -08003115 flags = pol->flags;
3116 }
Lee Schermerhornbea904d2008-04-28 02:13:18 -07003117
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08003118 switch (mode) {
3119 case MPOL_DEFAULT:
Feng Tang7858d7b2021-06-30 18:51:00 -07003120 case MPOL_LOCAL:
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08003121 break;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08003122 case MPOL_PREFERRED:
Dave Hansenb27abac2021-09-02 15:00:06 -07003123 case MPOL_PREFERRED_MANY:
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08003124 case MPOL_BIND:
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08003125 case MPOL_INTERLEAVE:
Ben Widawsky269fbe72021-06-30 18:51:10 -07003126 nodes = pol->nodes;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08003127 break;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08003128 default:
David Rientjes948927e2013-11-12 15:07:28 -08003129 WARN_ON_ONCE(1);
3130 snprintf(p, maxlen, "unknown");
3131 return;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08003132 }
3133
David Rientjesb7a9f422013-11-21 14:32:06 -08003134 p += snprintf(p, maxlen, "%s", policy_modes[mode]);
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08003135
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07003136 if (flags & MPOL_MODE_FLAGS) {
David Rientjes948927e2013-11-12 15:07:28 -08003137 p += snprintf(p, buffer + maxlen - p, "=");
David Rientjesf5b087b2008-04-28 02:12:27 -07003138
Lee Schermerhorn22919902008-04-28 02:13:22 -07003139 /*
3140 * Currently, the only defined flags are mutually exclusive
3141 */
David Rientjesf5b087b2008-04-28 02:12:27 -07003142 if (flags & MPOL_F_STATIC_NODES)
Lee Schermerhorn22919902008-04-28 02:13:22 -07003143 p += snprintf(p, buffer + maxlen - p, "static");
3144 else if (flags & MPOL_F_RELATIVE_NODES)
3145 p += snprintf(p, buffer + maxlen - p, "relative");
David Rientjesf5b087b2008-04-28 02:12:27 -07003146 }
3147
Tejun Heo9e763e02015-02-13 14:38:02 -08003148 if (!nodes_empty(nodes))
3149 p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
3150 nodemask_pr_args(&nodes));
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08003151}