blob: 19f7e71945a72ba671f7bf1ad503b6cba6a1ae66 [file] [log] [blame]
Thomas Gleixner46aeb7e2019-05-28 10:10:27 -07001// SPDX-License-Identifier: GPL-2.0-only
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * Simple NUMA memory policy for the Linux kernel.
4 *
5 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
Christoph Lameter8bccd852005-10-29 18:16:59 -07006 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 *
8 * NUMA policy allows the user to give hints in which node(s) memory should
9 * be allocated.
10 *
11 * Support four policies per VMA and per process:
12 *
13 * The VMA policy has priority over the process policy for a page fault.
14 *
15 * interleave Allocate memory interleaved over a set of nodes,
16 * with normal fallback if it fails.
17 * For VMA based allocations this interleaves based on the
18 * offset into the backing object or offset into the mapping
19 * for anonymous memory. For process policy an process counter
20 * is used.
Christoph Lameter8bccd852005-10-29 18:16:59 -070021 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 * bind Only allocate memory on a specific set of nodes,
23 * no fallback.
Christoph Lameter8bccd852005-10-29 18:16:59 -070024 * FIXME: memory is allocated starting with the first node
25 * to the last. It would be better if bind would truly restrict
26 * the allocation to memory nodes instead
27 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070028 * preferred Try a specific node first before normal fallback.
David Rientjes00ef2d22013-02-22 16:35:36 -080029 * As a special case NUMA_NO_NODE here means do the allocation
Linus Torvalds1da177e2005-04-16 15:20:36 -070030 * on the local CPU. This is normally identical to default,
31 * but useful to set in a VMA when you have a non default
32 * process policy.
Christoph Lameter8bccd852005-10-29 18:16:59 -070033 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070034 * default Allocate on the local node first, or when on a VMA
35 * use the process policy. This is what Linux always did
36 * in a NUMA aware kernel and still does by, ahem, default.
37 *
38 * The process policy is applied for most non interrupt memory allocations
39 * in that process' context. Interrupts ignore the policies and always
40 * try to allocate on the local CPU. The VMA policy is only applied for memory
41 * allocations for a VMA in the VM.
42 *
43 * Currently there are a few corner cases in swapping where the policy
44 * is not applied, but the majority should be handled. When process policy
45 * is used it is not remembered over swap outs/swap ins.
46 *
47 * Only the highest zone in the zone hierarchy gets policied. Allocations
48 * requesting a lower zone just use default policy. This implies that
49 * on systems with highmem kernel lowmem allocation don't get policied.
50 * Same with GFP_DMA allocations.
51 *
52 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
53 * all users and remembered even when nobody has memory mapped.
54 */
55
56/* Notebook:
57 fix mmap readahead to honour policy and enable policy for any page cache
58 object
59 statistics for bigpages
60 global policy for page cache? currently it uses process policy. Requires
61 first item above.
62 handle mremap for shared memory (currently ignored for the policy)
63 grows down?
64 make bind policy root only? It can trigger oom much faster and the
65 kernel is not always grateful with that.
Linus Torvalds1da177e2005-04-16 15:20:36 -070066*/
67
Mitchel Humpherysb1de0d12014-06-06 14:38:30 -070068#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
69
Linus Torvalds1da177e2005-04-16 15:20:36 -070070#include <linux/mempolicy.h>
Christoph Hellwiga5201102019-08-28 16:19:53 +020071#include <linux/pagewalk.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070072#include <linux/highmem.h>
73#include <linux/hugetlb.h>
74#include <linux/kernel.h>
75#include <linux/sched.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +010076#include <linux/sched/mm.h>
Ingo Molnar6a3827d2017-02-08 18:51:31 +010077#include <linux/sched/numa_balancing.h>
Ingo Molnarf719ff9b2017-02-06 10:57:33 +010078#include <linux/sched/task.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070079#include <linux/nodemask.h>
80#include <linux/cpuset.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070081#include <linux/slab.h>
82#include <linux/string.h>
Paul Gortmakerb95f1b312011-10-16 02:01:52 -040083#include <linux/export.h>
Pavel Emelyanovb4888932007-10-18 23:40:14 -070084#include <linux/nsproxy.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070085#include <linux/interrupt.h>
86#include <linux/init.h>
87#include <linux/compat.h>
Otto Ebeling31367462017-11-15 17:38:14 -080088#include <linux/ptrace.h>
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -080089#include <linux/swap.h>
Christoph Lameter1a75a6c2006-01-08 01:01:02 -080090#include <linux/seq_file.h>
91#include <linux/proc_fs.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080092#include <linux/migrate.h>
Hugh Dickins62b61f62009-12-14 17:59:33 -080093#include <linux/ksm.h>
Christoph Lameter95a402c2006-06-23 02:03:53 -070094#include <linux/rmap.h>
David Quigley86c3a762006-06-23 02:04:02 -070095#include <linux/security.h>
Adrian Bunkdbcb0f12007-10-16 01:26:26 -070096#include <linux/syscalls.h>
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -070097#include <linux/ctype.h>
KOSAKI Motohiro6d9c2852009-12-14 17:58:11 -080098#include <linux/mm_inline.h>
Lee Schermerhornb24f53a2012-10-25 14:16:32 +020099#include <linux/mmu_notifier.h>
Mitchel Humpherysb1de0d12014-06-06 14:38:30 -0700100#include <linux/printk.h>
Naoya Horiguchic8633792017-09-08 16:11:08 -0700101#include <linux/swapops.h>
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800102
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103#include <asm/tlbflush.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -0800104#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105
Nick Piggin62695a82008-10-18 20:26:09 -0700106#include "internal.h"
107
Christoph Lameter38e35862006-01-08 01:01:01 -0800108/* Internal flags */
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800109#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
Christoph Lameter38e35862006-01-08 01:01:01 -0800110#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800111
Pekka Enbergfcc234f2006-03-22 00:08:13 -0800112static struct kmem_cache *policy_cache;
113static struct kmem_cache *sn_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115/* Highest zone. An specific allocation for a zone below that is not
116 policied. */
Christoph Lameter62672762007-02-10 01:43:07 -0800117enum zone_type policy_zone = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700119/*
120 * run-time system-wide default policy => local allocation
121 */
H Hartley Sweetene754d792011-10-31 17:09:23 -0700122static struct mempolicy default_policy = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123 .refcnt = ATOMIC_INIT(1), /* never free it */
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700124 .mode = MPOL_PREFERRED,
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700125 .flags = MPOL_F_LOCAL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126};
127
Mel Gorman5606e382012-11-02 18:19:13 +0000128static struct mempolicy preferred_node_policy[MAX_NUMNODES];
129
Dan Williamsb2ca9162020-02-16 12:00:48 -0800130/**
131 * numa_map_to_online_node - Find closest online node
132 * @nid: Node id to start the search
133 *
134 * Lookup the next closest node by distance if @nid is not online.
135 */
136int numa_map_to_online_node(int node)
137{
Dan Williams4fcbe962020-02-16 12:00:53 -0800138 int min_dist = INT_MAX, dist, n, min_node;
Dan Williamsb2ca9162020-02-16 12:00:48 -0800139
Dan Williams4fcbe962020-02-16 12:00:53 -0800140 if (node == NUMA_NO_NODE || node_online(node))
141 return node;
Dan Williamsb2ca9162020-02-16 12:00:48 -0800142
143 min_node = node;
Dan Williams4fcbe962020-02-16 12:00:53 -0800144 for_each_online_node(n) {
145 dist = node_distance(node, n);
146 if (dist < min_dist) {
147 min_dist = dist;
148 min_node = n;
Dan Williamsb2ca9162020-02-16 12:00:48 -0800149 }
150 }
151
152 return min_node;
153}
154EXPORT_SYMBOL_GPL(numa_map_to_online_node);
155
Oleg Nesterov74d2c3a2014-10-09 15:27:50 -0700156struct mempolicy *get_task_policy(struct task_struct *p)
Mel Gorman5606e382012-11-02 18:19:13 +0000157{
158 struct mempolicy *pol = p->mempolicy;
Oleg Nesterovf15ca782014-10-09 15:27:43 -0700159 int node;
Mel Gorman5606e382012-11-02 18:19:13 +0000160
Oleg Nesterovf15ca782014-10-09 15:27:43 -0700161 if (pol)
162 return pol;
Mel Gorman5606e382012-11-02 18:19:13 +0000163
Oleg Nesterovf15ca782014-10-09 15:27:43 -0700164 node = numa_node_id();
165 if (node != NUMA_NO_NODE) {
166 pol = &preferred_node_policy[node];
167 /* preferred_node_policy is not initialised early in boot */
168 if (pol->mode)
169 return pol;
Mel Gorman5606e382012-11-02 18:19:13 +0000170 }
171
Oleg Nesterovf15ca782014-10-09 15:27:43 -0700172 return &default_policy;
Mel Gorman5606e382012-11-02 18:19:13 +0000173}
174
David Rientjes37012942008-04-28 02:12:33 -0700175static const struct mempolicy_operations {
176 int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
Vlastimil Babka213980c2017-07-06 15:40:06 -0700177 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
David Rientjes37012942008-04-28 02:12:33 -0700178} mpol_ops[MPOL_MAX];
179
David Rientjesf5b087b2008-04-28 02:12:27 -0700180static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
181{
Bob Liu6d556292010-05-24 14:31:59 -0700182 return pol->flags & MPOL_MODE_FLAGS;
David Rientjes4c50bc02008-04-28 02:12:30 -0700183}
184
185static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
186 const nodemask_t *rel)
187{
188 nodemask_t tmp;
189 nodes_fold(tmp, *orig, nodes_weight(*rel));
190 nodes_onto(*ret, tmp, *rel);
David Rientjesf5b087b2008-04-28 02:12:27 -0700191}
192
David Rientjes37012942008-04-28 02:12:33 -0700193static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
194{
195 if (nodes_empty(*nodes))
196 return -EINVAL;
197 pol->v.nodes = *nodes;
198 return 0;
199}
200
201static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
202{
203 if (!nodes)
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700204 pol->flags |= MPOL_F_LOCAL; /* local allocation */
David Rientjes37012942008-04-28 02:12:33 -0700205 else if (nodes_empty(*nodes))
206 return -EINVAL; /* no allowed nodes */
207 else
208 pol->v.preferred_node = first_node(*nodes);
209 return 0;
210}
211
212static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
213{
Zhihui Zhang859f7ef2014-12-18 16:17:09 -0800214 if (nodes_empty(*nodes))
David Rientjes37012942008-04-28 02:12:33 -0700215 return -EINVAL;
216 pol->v.nodes = *nodes;
217 return 0;
218}
219
Miao Xie58568d22009-06-16 15:31:49 -0700220/*
221 * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
222 * any, for the new policy. mpol_new() has already validated the nodes
223 * parameter with respect to the policy mode and flags. But, we need to
224 * handle an empty nodemask with MPOL_PREFERRED here.
225 *
226 * Must be called holding task's alloc_lock to protect task's mems_allowed
227 * and mempolicy. May also be called holding the mmap_semaphore for write.
228 */
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700229static int mpol_set_nodemask(struct mempolicy *pol,
230 const nodemask_t *nodes, struct nodemask_scratch *nsc)
Miao Xie58568d22009-06-16 15:31:49 -0700231{
Miao Xie58568d22009-06-16 15:31:49 -0700232 int ret;
233
234 /* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
235 if (pol == NULL)
236 return 0;
Lai Jiangshan01f13bd2012-12-12 13:51:33 -0800237 /* Check N_MEMORY */
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700238 nodes_and(nsc->mask1,
Lai Jiangshan01f13bd2012-12-12 13:51:33 -0800239 cpuset_current_mems_allowed, node_states[N_MEMORY]);
Miao Xie58568d22009-06-16 15:31:49 -0700240
241 VM_BUG_ON(!nodes);
242 if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
243 nodes = NULL; /* explicit local allocation */
244 else {
245 if (pol->flags & MPOL_F_RELATIVE_NODES)
Zhihui Zhang859f7ef2014-12-18 16:17:09 -0800246 mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
Miao Xie58568d22009-06-16 15:31:49 -0700247 else
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700248 nodes_and(nsc->mask2, *nodes, nsc->mask1);
249
Miao Xie58568d22009-06-16 15:31:49 -0700250 if (mpol_store_user_nodemask(pol))
251 pol->w.user_nodemask = *nodes;
252 else
253 pol->w.cpuset_mems_allowed =
254 cpuset_current_mems_allowed;
255 }
256
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700257 if (nodes)
258 ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
259 else
260 ret = mpol_ops[pol->mode].create(pol, NULL);
Miao Xie58568d22009-06-16 15:31:49 -0700261 return ret;
262}
263
264/*
265 * This function just creates a new policy, does some check and simple
266 * initialization. You must invoke mpol_set_nodemask() to set nodes.
267 */
David Rientjes028fec42008-04-28 02:12:25 -0700268static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
269 nodemask_t *nodes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270{
271 struct mempolicy *policy;
272
David Rientjes028fec42008-04-28 02:12:25 -0700273 pr_debug("setting mode %d flags %d nodes[0] %lx\n",
David Rientjes00ef2d22013-02-22 16:35:36 -0800274 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
Paul Mundt140d5a42007-07-15 23:38:16 -0700275
David Rientjes3e1f06452008-04-28 02:12:34 -0700276 if (mode == MPOL_DEFAULT) {
277 if (nodes && !nodes_empty(*nodes))
David Rientjes37012942008-04-28 02:12:33 -0700278 return ERR_PTR(-EINVAL);
Lee Schermerhornd3a71032012-10-25 14:16:29 +0200279 return NULL;
David Rientjes37012942008-04-28 02:12:33 -0700280 }
David Rientjes3e1f06452008-04-28 02:12:34 -0700281 VM_BUG_ON(!nodes);
282
283 /*
284 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
285 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
286 * All other modes require a valid pointer to a non-empty nodemask.
287 */
288 if (mode == MPOL_PREFERRED) {
289 if (nodes_empty(*nodes)) {
290 if (((flags & MPOL_F_STATIC_NODES) ||
291 (flags & MPOL_F_RELATIVE_NODES)))
292 return ERR_PTR(-EINVAL);
David Rientjes3e1f06452008-04-28 02:12:34 -0700293 }
Peter Zijlstra479e2802012-10-25 14:16:28 +0200294 } else if (mode == MPOL_LOCAL) {
Piotr Kwapulinski8d303e42016-12-12 16:42:49 -0800295 if (!nodes_empty(*nodes) ||
296 (flags & MPOL_F_STATIC_NODES) ||
297 (flags & MPOL_F_RELATIVE_NODES))
Peter Zijlstra479e2802012-10-25 14:16:28 +0200298 return ERR_PTR(-EINVAL);
299 mode = MPOL_PREFERRED;
David Rientjes3e1f06452008-04-28 02:12:34 -0700300 } else if (nodes_empty(*nodes))
301 return ERR_PTR(-EINVAL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
303 if (!policy)
304 return ERR_PTR(-ENOMEM);
305 atomic_set(&policy->refcnt, 1);
Lee Schermerhorn45c47452008-04-28 02:13:12 -0700306 policy->mode = mode;
David Rientjes3e1f06452008-04-28 02:12:34 -0700307 policy->flags = flags;
David Rientjesf5b087b2008-04-28 02:12:27 -0700308
David Rientjes37012942008-04-28 02:12:33 -0700309 return policy;
310}
311
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -0700312/* Slow path of a mpol destructor. */
313void __mpol_put(struct mempolicy *p)
314{
315 if (!atomic_dec_and_test(&p->refcnt))
316 return;
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -0700317 kmem_cache_free(policy_cache, p);
318}
319
Vlastimil Babka213980c2017-07-06 15:40:06 -0700320static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
David Rientjes37012942008-04-28 02:12:33 -0700321{
322}
323
Vlastimil Babka213980c2017-07-06 15:40:06 -0700324static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
David Rientjes37012942008-04-28 02:12:33 -0700325{
326 nodemask_t tmp;
327
328 if (pol->flags & MPOL_F_STATIC_NODES)
329 nodes_and(tmp, pol->w.user_nodemask, *nodes);
330 else if (pol->flags & MPOL_F_RELATIVE_NODES)
331 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
332 else {
Vlastimil Babka213980c2017-07-06 15:40:06 -0700333 nodes_remap(tmp, pol->v.nodes,pol->w.cpuset_mems_allowed,
334 *nodes);
zhong jiang29b190f2019-06-28 12:06:43 -0700335 pol->w.cpuset_mems_allowed = *nodes;
David Rientjes37012942008-04-28 02:12:33 -0700336 }
337
Miao Xie708c1bb2010-05-24 14:32:07 -0700338 if (nodes_empty(tmp))
339 tmp = *nodes;
340
Vlastimil Babka213980c2017-07-06 15:40:06 -0700341 pol->v.nodes = tmp;
David Rientjes37012942008-04-28 02:12:33 -0700342}
343
344static void mpol_rebind_preferred(struct mempolicy *pol,
Vlastimil Babka213980c2017-07-06 15:40:06 -0700345 const nodemask_t *nodes)
David Rientjes37012942008-04-28 02:12:33 -0700346{
347 nodemask_t tmp;
348
David Rientjes37012942008-04-28 02:12:33 -0700349 if (pol->flags & MPOL_F_STATIC_NODES) {
350 int node = first_node(pol->w.user_nodemask);
351
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700352 if (node_isset(node, *nodes)) {
David Rientjes37012942008-04-28 02:12:33 -0700353 pol->v.preferred_node = node;
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700354 pol->flags &= ~MPOL_F_LOCAL;
355 } else
356 pol->flags |= MPOL_F_LOCAL;
David Rientjes37012942008-04-28 02:12:33 -0700357 } else if (pol->flags & MPOL_F_RELATIVE_NODES) {
358 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
359 pol->v.preferred_node = first_node(tmp);
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700360 } else if (!(pol->flags & MPOL_F_LOCAL)) {
David Rientjes37012942008-04-28 02:12:33 -0700361 pol->v.preferred_node = node_remap(pol->v.preferred_node,
362 pol->w.cpuset_mems_allowed,
363 *nodes);
364 pol->w.cpuset_mems_allowed = *nodes;
365 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366}
367
Miao Xie708c1bb2010-05-24 14:32:07 -0700368/*
369 * mpol_rebind_policy - Migrate a policy to a different set of nodes
370 *
Vlastimil Babka213980c2017-07-06 15:40:06 -0700371 * Per-vma policies are protected by mmap_sem. Allocations using per-task
372 * policies are protected by task->mems_allowed_seq to prevent a premature
373 * OOM/allocation failure due to parallel nodemask modification.
Miao Xie708c1bb2010-05-24 14:32:07 -0700374 */
Vlastimil Babka213980c2017-07-06 15:40:06 -0700375static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
David Rientjes1d0d2682008-04-28 02:12:32 -0700376{
David Rientjes1d0d2682008-04-28 02:12:32 -0700377 if (!pol)
378 return;
Vlastimil Babka2e256442019-03-05 15:46:50 -0800379 if (!mpol_store_user_nodemask(pol) && !(pol->flags & MPOL_F_LOCAL) &&
David Rientjes1d0d2682008-04-28 02:12:32 -0700380 nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
381 return;
Miao Xie708c1bb2010-05-24 14:32:07 -0700382
Vlastimil Babka213980c2017-07-06 15:40:06 -0700383 mpol_ops[pol->mode].rebind(pol, newmask);
David Rientjes1d0d2682008-04-28 02:12:32 -0700384}
385
386/*
387 * Wrapper for mpol_rebind_policy() that just requires task
388 * pointer, and updates task mempolicy.
Miao Xie58568d22009-06-16 15:31:49 -0700389 *
390 * Called with task's alloc_lock held.
David Rientjes1d0d2682008-04-28 02:12:32 -0700391 */
392
Vlastimil Babka213980c2017-07-06 15:40:06 -0700393void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
David Rientjes1d0d2682008-04-28 02:12:32 -0700394{
Vlastimil Babka213980c2017-07-06 15:40:06 -0700395 mpol_rebind_policy(tsk->mempolicy, new);
David Rientjes1d0d2682008-04-28 02:12:32 -0700396}
397
398/*
399 * Rebind each vma in mm to new nodemask.
400 *
401 * Call holding a reference to mm. Takes mm->mmap_sem during call.
402 */
403
404void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
405{
406 struct vm_area_struct *vma;
407
408 down_write(&mm->mmap_sem);
409 for (vma = mm->mmap; vma; vma = vma->vm_next)
Vlastimil Babka213980c2017-07-06 15:40:06 -0700410 mpol_rebind_policy(vma->vm_policy, new);
David Rientjes1d0d2682008-04-28 02:12:32 -0700411 up_write(&mm->mmap_sem);
412}
413
David Rientjes37012942008-04-28 02:12:33 -0700414static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
415 [MPOL_DEFAULT] = {
416 .rebind = mpol_rebind_default,
417 },
418 [MPOL_INTERLEAVE] = {
419 .create = mpol_new_interleave,
420 .rebind = mpol_rebind_nodemask,
421 },
422 [MPOL_PREFERRED] = {
423 .create = mpol_new_preferred,
424 .rebind = mpol_rebind_preferred,
425 },
426 [MPOL_BIND] = {
427 .create = mpol_new_bind,
428 .rebind = mpol_rebind_nodemask,
429 },
430};
431
Yang Shia53190a2019-08-13 15:37:18 -0700432static int migrate_page_add(struct page *page, struct list_head *pagelist,
Christoph Lameterfc301282006-01-18 17:42:29 -0800433 unsigned long flags);
Christoph Lameter1a75a6c2006-01-08 01:01:02 -0800434
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800435struct queue_pages {
436 struct list_head *pagelist;
437 unsigned long flags;
438 nodemask_t *nmask;
Li Xinhaif18da662019-11-30 17:56:18 -0800439 unsigned long start;
440 unsigned long end;
441 struct vm_area_struct *first;
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800442};
443
Naoya Horiguchi98094942013-09-11 14:22:14 -0700444/*
Naoya Horiguchi88aaa2a2017-09-08 16:10:42 -0700445 * Check if the page's nid is in qp->nmask.
446 *
447 * If MPOL_MF_INVERT is set in qp->flags, check if the nid is
448 * in the invert of qp->nmask.
449 */
450static inline bool queue_pages_required(struct page *page,
451 struct queue_pages *qp)
452{
453 int nid = page_to_nid(page);
454 unsigned long flags = qp->flags;
455
456 return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
457}
458
Yang Shia7f40cf2019-03-28 20:43:55 -0700459/*
Yang Shid8835442019-08-13 15:37:15 -0700460 * queue_pages_pmd() has four possible return values:
461 * 0 - pages are placed on the right node or queued successfully.
462 * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
463 * specified.
464 * 2 - THP was split.
465 * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an
466 * existing page was already on a node that does not follow the
467 * policy.
Yang Shia7f40cf2019-03-28 20:43:55 -0700468 */
Naoya Horiguchic8633792017-09-08 16:11:08 -0700469static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
470 unsigned long end, struct mm_walk *walk)
471{
472 int ret = 0;
473 struct page *page;
474 struct queue_pages *qp = walk->private;
475 unsigned long flags;
476
477 if (unlikely(is_pmd_migration_entry(*pmd))) {
Yang Shia7f40cf2019-03-28 20:43:55 -0700478 ret = -EIO;
Naoya Horiguchic8633792017-09-08 16:11:08 -0700479 goto unlock;
480 }
481 page = pmd_page(*pmd);
482 if (is_huge_zero_page(page)) {
483 spin_unlock(ptl);
484 __split_huge_pmd(walk->vma, pmd, addr, false, NULL);
Yang Shid8835442019-08-13 15:37:15 -0700485 ret = 2;
Naoya Horiguchic8633792017-09-08 16:11:08 -0700486 goto out;
487 }
Yang Shid8835442019-08-13 15:37:15 -0700488 if (!queue_pages_required(page, qp))
Naoya Horiguchic8633792017-09-08 16:11:08 -0700489 goto unlock;
Naoya Horiguchic8633792017-09-08 16:11:08 -0700490
Naoya Horiguchic8633792017-09-08 16:11:08 -0700491 flags = qp->flags;
492 /* go to thp migration */
Yang Shia7f40cf2019-03-28 20:43:55 -0700493 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
Yang Shia53190a2019-08-13 15:37:18 -0700494 if (!vma_migratable(walk->vma) ||
495 migrate_page_add(page, qp->pagelist, flags)) {
Yang Shid8835442019-08-13 15:37:15 -0700496 ret = 1;
Yang Shia7f40cf2019-03-28 20:43:55 -0700497 goto unlock;
498 }
Yang Shia7f40cf2019-03-28 20:43:55 -0700499 } else
500 ret = -EIO;
Naoya Horiguchic8633792017-09-08 16:11:08 -0700501unlock:
502 spin_unlock(ptl);
503out:
504 return ret;
505}
506
Naoya Horiguchi88aaa2a2017-09-08 16:10:42 -0700507/*
Naoya Horiguchi98094942013-09-11 14:22:14 -0700508 * Scan through pages checking if pages follow certain conditions,
509 * and move them to the pagelist if they do.
Yang Shid8835442019-08-13 15:37:15 -0700510 *
511 * queue_pages_pte_range() has three possible return values:
512 * 0 - pages are placed on the right node or queued successfully.
513 * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
514 * specified.
515 * -EIO - only MPOL_MF_STRICT was specified and an existing page was already
516 * on a node that does not follow the policy.
Naoya Horiguchi98094942013-09-11 14:22:14 -0700517 */
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800518static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
519 unsigned long end, struct mm_walk *walk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520{
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800521 struct vm_area_struct *vma = walk->vma;
522 struct page *page;
523 struct queue_pages *qp = walk->private;
524 unsigned long flags = qp->flags;
Naoya Horiguchic8633792017-09-08 16:11:08 -0700525 int ret;
Yang Shid8835442019-08-13 15:37:15 -0700526 bool has_unmovable = false;
Hugh Dickins91612e02005-06-21 17:15:07 -0700527 pte_t *pte;
Hugh Dickins705e87c2005-10-29 18:16:27 -0700528 spinlock_t *ptl;
Hugh Dickins941150a2005-06-21 17:15:06 -0700529
Naoya Horiguchic8633792017-09-08 16:11:08 -0700530 ptl = pmd_trans_huge_lock(pmd, vma);
531 if (ptl) {
532 ret = queue_pages_pmd(pmd, ptl, addr, end, walk);
Yang Shid8835442019-08-13 15:37:15 -0700533 if (ret != 2)
Yang Shia7f40cf2019-03-28 20:43:55 -0700534 return ret;
Kirill A. Shutemov248db922016-01-15 16:54:14 -0800535 }
Yang Shid8835442019-08-13 15:37:15 -0700536 /* THP was split, fall through to pte walk */
Hugh Dickins91612e02005-06-21 17:15:07 -0700537
Naoya Horiguchi337d9ab2016-07-26 15:24:03 -0700538 if (pmd_trans_unstable(pmd))
539 return 0;
Michal Hocko94723aa2018-04-10 16:30:07 -0700540
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800541 pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
542 for (; addr != end; pte++, addr += PAGE_SIZE) {
Hugh Dickins91612e02005-06-21 17:15:07 -0700543 if (!pte_present(*pte))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544 continue;
Linus Torvalds6aab3412005-11-28 14:34:23 -0800545 page = vm_normal_page(vma, addr, *pte);
546 if (!page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547 continue;
Nick Piggin053837f2006-01-18 17:42:27 -0800548 /*
Hugh Dickins62b61f62009-12-14 17:59:33 -0800549 * vm_normal_page() filters out zero pages, but there might
550 * still be PageReserved pages to skip, perhaps in a VDSO.
Nick Piggin053837f2006-01-18 17:42:27 -0800551 */
Hugh Dickinsb79bc0a2013-02-22 16:35:13 -0800552 if (PageReserved(page))
Christoph Lameterf4598c82006-01-12 01:05:20 -0800553 continue;
Naoya Horiguchi88aaa2a2017-09-08 16:10:42 -0700554 if (!queue_pages_required(page, qp))
Christoph Lameter38e35862006-01-08 01:01:01 -0800555 continue;
Yang Shia7f40cf2019-03-28 20:43:55 -0700556 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
Yang Shid8835442019-08-13 15:37:15 -0700557 /* MPOL_MF_STRICT must be specified if we get here */
558 if (!vma_migratable(vma)) {
559 has_unmovable = true;
Yang Shia7f40cf2019-03-28 20:43:55 -0700560 break;
Yang Shid8835442019-08-13 15:37:15 -0700561 }
Yang Shia53190a2019-08-13 15:37:18 -0700562
563 /*
564 * Do not abort immediately since there may be
565 * temporary off LRU pages in the range. Still
566 * need migrate other LRU pages.
567 */
568 if (migrate_page_add(page, qp->pagelist, flags))
569 has_unmovable = true;
Yang Shia7f40cf2019-03-28 20:43:55 -0700570 } else
571 break;
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800572 }
573 pte_unmap_unlock(pte - 1, ptl);
574 cond_resched();
Yang Shid8835442019-08-13 15:37:15 -0700575
576 if (has_unmovable)
577 return 1;
578
Yang Shia7f40cf2019-03-28 20:43:55 -0700579 return addr != end ? -EIO : 0;
Hugh Dickins91612e02005-06-21 17:15:07 -0700580}
581
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800582static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
583 unsigned long addr, unsigned long end,
584 struct mm_walk *walk)
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700585{
586#ifdef CONFIG_HUGETLB_PAGE
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800587 struct queue_pages *qp = walk->private;
588 unsigned long flags = qp->flags;
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700589 struct page *page;
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -0800590 spinlock_t *ptl;
Naoya Horiguchid4c54912014-06-06 10:00:01 -0400591 pte_t entry;
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700592
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800593 ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
594 entry = huge_ptep_get(pte);
Naoya Horiguchid4c54912014-06-06 10:00:01 -0400595 if (!pte_present(entry))
596 goto unlock;
597 page = pte_page(entry);
Naoya Horiguchi88aaa2a2017-09-08 16:10:42 -0700598 if (!queue_pages_required(page, qp))
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700599 goto unlock;
600 /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
601 if (flags & (MPOL_MF_MOVE_ALL) ||
602 (flags & MPOL_MF_MOVE && page_mapcount(page) == 1))
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800603 isolate_huge_page(page, qp->pagelist);
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700604unlock:
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -0800605 spin_unlock(ptl);
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700606#else
607 BUG();
608#endif
Hugh Dickins91612e02005-06-21 17:15:07 -0700609 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610}
611
Aneesh Kumar K.V58772312013-12-06 00:08:22 +0530612#ifdef CONFIG_NUMA_BALANCING
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200613/*
Mel Gorman4b10e7d2012-10-25 14:16:32 +0200614 * This is used to mark a range of virtual addresses to be inaccessible.
615 * These are later cleared by a NUMA hinting fault. Depending on these
616 * faults, pages may be migrated for better NUMA placement.
617 *
618 * This is assuming that NUMA faults are handled using PROT_NONE. If
619 * an architecture makes a different choice, it will need further
620 * changes to the core.
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200621 */
Mel Gorman4b10e7d2012-10-25 14:16:32 +0200622unsigned long change_prot_numa(struct vm_area_struct *vma,
623 unsigned long addr, unsigned long end)
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200624{
Mel Gorman4b10e7d2012-10-25 14:16:32 +0200625 int nr_updated;
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200626
Mel Gorman4d942462015-02-12 14:58:28 -0800627 nr_updated = change_protection(vma, addr, end, PAGE_NONE, 0, 1);
Mel Gorman03c5a6e2012-11-02 14:52:48 +0000628 if (nr_updated)
629 count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200630
Mel Gorman4b10e7d2012-10-25 14:16:32 +0200631 return nr_updated;
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200632}
633#else
634static unsigned long change_prot_numa(struct vm_area_struct *vma,
635 unsigned long addr, unsigned long end)
636{
637 return 0;
638}
Aneesh Kumar K.V58772312013-12-06 00:08:22 +0530639#endif /* CONFIG_NUMA_BALANCING */
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200640
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800641static int queue_pages_test_walk(unsigned long start, unsigned long end,
642 struct mm_walk *walk)
643{
644 struct vm_area_struct *vma = walk->vma;
645 struct queue_pages *qp = walk->private;
646 unsigned long endvma = vma->vm_end;
647 unsigned long flags = qp->flags;
648
Li Xinhaia18b3ac22019-11-30 17:56:15 -0800649 /* range check first */
Li Xinhaif18da662019-11-30 17:56:18 -0800650 VM_BUG_ON((vma->vm_start > start) || (vma->vm_end < end));
651
652 if (!qp->first) {
653 qp->first = vma;
654 if (!(flags & MPOL_MF_DISCONTIG_OK) &&
655 (qp->start < vma->vm_start))
656 /* hole at head side of range */
Li Xinhaia18b3ac22019-11-30 17:56:15 -0800657 return -EFAULT;
658 }
Li Xinhaif18da662019-11-30 17:56:18 -0800659 if (!(flags & MPOL_MF_DISCONTIG_OK) &&
660 ((vma->vm_end < qp->end) &&
661 (!vma->vm_next || vma->vm_end < vma->vm_next->vm_start)))
662 /* hole at middle or tail of range */
663 return -EFAULT;
Li Xinhaia18b3ac22019-11-30 17:56:15 -0800664
Yang Shia7f40cf2019-03-28 20:43:55 -0700665 /*
666 * Need check MPOL_MF_STRICT to return -EIO if possible
667 * regardless of vma_migratable
668 */
669 if (!vma_migratable(vma) &&
670 !(flags & MPOL_MF_STRICT))
Naoya Horiguchi48684a62015-02-11 15:28:06 -0800671 return 1;
672
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800673 if (endvma > end)
674 endvma = end;
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800675
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800676 if (flags & MPOL_MF_LAZY) {
677 /* Similar to task_numa_work, skip inaccessible VMAs */
Liang Chen4355c012016-03-15 14:56:42 -0700678 if (!is_vm_hugetlb_page(vma) &&
679 (vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)) &&
680 !(vma->vm_flags & VM_MIXEDMAP))
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800681 change_prot_numa(vma, start, endvma);
682 return 1;
683 }
684
Kirill A. Shutemov77bf45e2016-02-05 15:36:33 -0800685 /* queue pages from current vma */
Yang Shia7f40cf2019-03-28 20:43:55 -0700686 if (flags & MPOL_MF_VALID)
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800687 return 0;
688 return 1;
689}
690
Christoph Hellwig7b86ac32019-08-28 16:19:54 +0200691static const struct mm_walk_ops queue_pages_walk_ops = {
692 .hugetlb_entry = queue_pages_hugetlb,
693 .pmd_entry = queue_pages_pte_range,
694 .test_walk = queue_pages_test_walk,
695};
696
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800697/*
Naoya Horiguchi98094942013-09-11 14:22:14 -0700698 * Walk through page tables and collect pages to be migrated.
699 *
700 * If pages found in a given range are on a set of nodes (determined by
701 * @nodes and @flags,) it's isolated and queued to the pagelist which is
Yang Shid8835442019-08-13 15:37:15 -0700702 * passed via @private.
703 *
704 * queue_pages_range() has three possible return values:
705 * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were
706 * specified.
707 * 0 - queue pages successfully or no misplaced page.
Yang Shia85dfc32019-11-15 17:34:33 -0800708 * errno - i.e. misplaced pages with MPOL_MF_STRICT specified (-EIO) or
709 * memory range specified by nodemask and maxnode points outside
710 * your accessible address space (-EFAULT)
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800711 */
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -0700712static int
Naoya Horiguchi98094942013-09-11 14:22:14 -0700713queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800714 nodemask_t *nodes, unsigned long flags,
715 struct list_head *pagelist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716{
Li Xinhaif18da662019-11-30 17:56:18 -0800717 int err;
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800718 struct queue_pages qp = {
719 .pagelist = pagelist,
720 .flags = flags,
721 .nmask = nodes,
Li Xinhaif18da662019-11-30 17:56:18 -0800722 .start = start,
723 .end = end,
724 .first = NULL,
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800725 };
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726
Li Xinhaif18da662019-11-30 17:56:18 -0800727 err = walk_page_range(mm, start, end, &queue_pages_walk_ops, &qp);
728
729 if (!qp.first)
730 /* whole range in hole */
731 err = -EFAULT;
732
733 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734}
735
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700736/*
737 * Apply policy to a single VMA
738 * This must be called with the mmap_sem held for writing.
739 */
740static int vma_replace_policy(struct vm_area_struct *vma,
741 struct mempolicy *pol)
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700742{
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700743 int err;
744 struct mempolicy *old;
745 struct mempolicy *new;
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700746
747 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
748 vma->vm_start, vma->vm_end, vma->vm_pgoff,
749 vma->vm_ops, vma->vm_file,
750 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
751
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700752 new = mpol_dup(pol);
753 if (IS_ERR(new))
754 return PTR_ERR(new);
755
756 if (vma->vm_ops && vma->vm_ops->set_policy) {
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700757 err = vma->vm_ops->set_policy(vma, new);
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700758 if (err)
759 goto err_out;
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700760 }
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700761
762 old = vma->vm_policy;
763 vma->vm_policy = new; /* protected by mmap_sem */
764 mpol_put(old);
765
766 return 0;
767 err_out:
768 mpol_put(new);
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700769 return err;
770}
771
Linus Torvalds1da177e2005-04-16 15:20:36 -0700772/* Step 2: apply policy to a range and do splits. */
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800773static int mbind_range(struct mm_struct *mm, unsigned long start,
774 unsigned long end, struct mempolicy *new_pol)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775{
776 struct vm_area_struct *next;
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800777 struct vm_area_struct *prev;
778 struct vm_area_struct *vma;
779 int err = 0;
KOSAKI Motohiroe26a5112011-12-28 15:57:11 -0800780 pgoff_t pgoff;
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800781 unsigned long vmstart;
782 unsigned long vmend;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783
Linus Torvalds097d5912012-03-06 18:23:36 -0800784 vma = find_vma(mm, start);
Li Xinhaif18da662019-11-30 17:56:18 -0800785 VM_BUG_ON(!vma);
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800786
Linus Torvalds097d5912012-03-06 18:23:36 -0800787 prev = vma->vm_prev;
KOSAKI Motohiroe26a5112011-12-28 15:57:11 -0800788 if (start > vma->vm_start)
789 prev = vma;
790
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800791 for (; vma && vma->vm_start < end; prev = vma, vma = next) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792 next = vma->vm_next;
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800793 vmstart = max(start, vma->vm_start);
794 vmend = min(end, vma->vm_end);
795
KOSAKI Motohiroe26a5112011-12-28 15:57:11 -0800796 if (mpol_equal(vma_policy(vma), new_pol))
797 continue;
798
799 pgoff = vma->vm_pgoff +
800 ((vmstart - vma->vm_start) >> PAGE_SHIFT);
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800801 prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
Andrea Arcangeli19a809a2015-09-04 15:46:24 -0700802 vma->anon_vma, vma->vm_file, pgoff,
803 new_pol, vma->vm_userfaultfd_ctx);
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800804 if (prev) {
805 vma = prev;
806 next = vma->vm_next;
Oleg Nesterov3964acd2013-07-31 13:53:28 -0700807 if (mpol_equal(vma_policy(vma), new_pol))
808 continue;
809 /* vma_merge() joined vma && vma->next, case 8 */
810 goto replace;
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800811 }
812 if (vma->vm_start != vmstart) {
813 err = split_vma(vma->vm_mm, vma, vmstart, 1);
814 if (err)
815 goto out;
816 }
817 if (vma->vm_end != vmend) {
818 err = split_vma(vma->vm_mm, vma, vmend, 0);
819 if (err)
820 goto out;
821 }
Oleg Nesterov3964acd2013-07-31 13:53:28 -0700822 replace:
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700823 err = vma_replace_policy(vma, new_pol);
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700824 if (err)
825 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826 }
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800827
828 out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829 return err;
830}
831
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832/* Set the process memory policy */
David Rientjes028fec42008-04-28 02:12:25 -0700833static long do_set_mempolicy(unsigned short mode, unsigned short flags,
834 nodemask_t *nodes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700835{
Miao Xie58568d22009-06-16 15:31:49 -0700836 struct mempolicy *new, *old;
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700837 NODEMASK_SCRATCH(scratch);
Miao Xie58568d22009-06-16 15:31:49 -0700838 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700839
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700840 if (!scratch)
841 return -ENOMEM;
Lee Schermerhornf4e53d92008-04-28 02:13:10 -0700842
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700843 new = mpol_new(mode, flags, nodes);
844 if (IS_ERR(new)) {
845 ret = PTR_ERR(new);
846 goto out;
847 }
Oleg Nesterov2c7c3a72014-10-09 15:27:55 -0700848
Miao Xie58568d22009-06-16 15:31:49 -0700849 task_lock(current);
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700850 ret = mpol_set_nodemask(new, nodes, scratch);
Miao Xie58568d22009-06-16 15:31:49 -0700851 if (ret) {
852 task_unlock(current);
Miao Xie58568d22009-06-16 15:31:49 -0700853 mpol_put(new);
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700854 goto out;
Miao Xie58568d22009-06-16 15:31:49 -0700855 }
856 old = current->mempolicy;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857 current->mempolicy = new;
Vlastimil Babka45816682017-07-06 15:39:59 -0700858 if (new && new->mode == MPOL_INTERLEAVE)
859 current->il_prev = MAX_NUMNODES-1;
Miao Xie58568d22009-06-16 15:31:49 -0700860 task_unlock(current);
Miao Xie58568d22009-06-16 15:31:49 -0700861 mpol_put(old);
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700862 ret = 0;
863out:
864 NODEMASK_SCRATCH_FREE(scratch);
865 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866}
867
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700868/*
869 * Return nodemask for policy for get_mempolicy() query
Miao Xie58568d22009-06-16 15:31:49 -0700870 *
871 * Called with task's alloc_lock held
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700872 */
873static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874{
Andi Kleendfcd3c02005-10-29 18:15:48 -0700875 nodes_clear(*nodes);
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700876 if (p == &default_policy)
877 return;
878
Lee Schermerhorn45c47452008-04-28 02:13:12 -0700879 switch (p->mode) {
Mel Gorman19770b32008-04-28 02:12:18 -0700880 case MPOL_BIND:
881 /* Fall through */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882 case MPOL_INTERLEAVE:
Andi Kleendfcd3c02005-10-29 18:15:48 -0700883 *nodes = p->v.nodes;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884 break;
885 case MPOL_PREFERRED:
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700886 if (!(p->flags & MPOL_F_LOCAL))
Andi Kleendfcd3c02005-10-29 18:15:48 -0700887 node_set(p->v.preferred_node, *nodes);
Lee Schermerhorn53f25562008-04-28 02:13:20 -0700888 /* else return empty node mask for local allocation */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889 break;
890 default:
891 BUG();
892 }
893}
894
Andrea Arcangeli3b9aadf2018-10-26 15:05:16 -0700895static int lookup_node(struct mm_struct *mm, unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896{
897 struct page *p;
898 int err;
899
Andrea Arcangeli3b9aadf2018-10-26 15:05:16 -0700900 int locked = 1;
901 err = get_user_pages_locked(addr & PAGE_MASK, 1, 0, &p, &locked);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902 if (err >= 0) {
903 err = page_to_nid(p);
904 put_page(p);
905 }
Andrea Arcangeli3b9aadf2018-10-26 15:05:16 -0700906 if (locked)
907 up_read(&mm->mmap_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908 return err;
909}
910
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911/* Retrieve NUMA policy */
Adrian Bunkdbcb0f12007-10-16 01:26:26 -0700912static long do_get_mempolicy(int *policy, nodemask_t *nmask,
913 unsigned long addr, unsigned long flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914{
Christoph Lameter8bccd852005-10-29 18:16:59 -0700915 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700916 struct mm_struct *mm = current->mm;
917 struct vm_area_struct *vma = NULL;
Andrea Arcangeli3b9aadf2018-10-26 15:05:16 -0700918 struct mempolicy *pol = current->mempolicy, *pol_refcount = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700919
Lee Schermerhorn754af6f2007-10-16 01:24:51 -0700920 if (flags &
921 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922 return -EINVAL;
Lee Schermerhorn754af6f2007-10-16 01:24:51 -0700923
924 if (flags & MPOL_F_MEMS_ALLOWED) {
925 if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
926 return -EINVAL;
927 *policy = 0; /* just so it's initialized */
Miao Xie58568d22009-06-16 15:31:49 -0700928 task_lock(current);
Lee Schermerhorn754af6f2007-10-16 01:24:51 -0700929 *nmask = cpuset_current_mems_allowed;
Miao Xie58568d22009-06-16 15:31:49 -0700930 task_unlock(current);
Lee Schermerhorn754af6f2007-10-16 01:24:51 -0700931 return 0;
932 }
933
Linus Torvalds1da177e2005-04-16 15:20:36 -0700934 if (flags & MPOL_F_ADDR) {
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700935 /*
936 * Do NOT fall back to task policy if the
937 * vma/shared policy at addr is NULL. We
938 * want to return MPOL_DEFAULT in this case.
939 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940 down_read(&mm->mmap_sem);
941 vma = find_vma_intersection(mm, addr, addr+1);
942 if (!vma) {
943 up_read(&mm->mmap_sem);
944 return -EFAULT;
945 }
946 if (vma->vm_ops && vma->vm_ops->get_policy)
947 pol = vma->vm_ops->get_policy(vma, addr);
948 else
949 pol = vma->vm_policy;
950 } else if (addr)
951 return -EINVAL;
952
953 if (!pol)
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700954 pol = &default_policy; /* indicates default behavior */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700955
956 if (flags & MPOL_F_NODE) {
957 if (flags & MPOL_F_ADDR) {
Andrea Arcangeli3b9aadf2018-10-26 15:05:16 -0700958 /*
959 * Take a refcount on the mpol, lookup_node()
960 * wil drop the mmap_sem, so after calling
961 * lookup_node() only "pol" remains valid, "vma"
962 * is stale.
963 */
964 pol_refcount = pol;
965 vma = NULL;
966 mpol_get(pol);
967 err = lookup_node(mm, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968 if (err < 0)
969 goto out;
Christoph Lameter8bccd852005-10-29 18:16:59 -0700970 *policy = err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971 } else if (pol == current->mempolicy &&
Lee Schermerhorn45c47452008-04-28 02:13:12 -0700972 pol->mode == MPOL_INTERLEAVE) {
Vlastimil Babka45816682017-07-06 15:39:59 -0700973 *policy = next_node_in(current->il_prev, pol->v.nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974 } else {
975 err = -EINVAL;
976 goto out;
977 }
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700978 } else {
979 *policy = pol == &default_policy ? MPOL_DEFAULT :
980 pol->mode;
David Rientjesd79df632008-07-04 12:24:13 -0700981 /*
982 * Internal mempolicy flags must be masked off before exposing
983 * the policy to userspace.
984 */
985 *policy |= (pol->flags & MPOL_MODE_FLAGS);
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700986 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700987
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988 err = 0;
Miao Xie58568d22009-06-16 15:31:49 -0700989 if (nmask) {
Lee Schermerhornc6b6ef82010-03-23 13:35:41 -0700990 if (mpol_store_user_nodemask(pol)) {
991 *nmask = pol->w.user_nodemask;
992 } else {
993 task_lock(current);
994 get_policy_nodemask(pol, nmask);
995 task_unlock(current);
996 }
Miao Xie58568d22009-06-16 15:31:49 -0700997 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998
999 out:
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001000 mpol_cond_put(pol);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001 if (vma)
Andrea Arcangeli3b9aadf2018-10-26 15:05:16 -07001002 up_read(&mm->mmap_sem);
1003 if (pol_refcount)
1004 mpol_put(pol_refcount);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005 return err;
1006}
1007
Christoph Lameterb20a3502006-03-22 00:09:12 -08001008#ifdef CONFIG_MIGRATION
Christoph Lameter8bccd852005-10-29 18:16:59 -07001009/*
Naoya Horiguchic8633792017-09-08 16:11:08 -07001010 * page migration, thp tail pages can be passed.
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001011 */
Yang Shia53190a2019-08-13 15:37:18 -07001012static int migrate_page_add(struct page *page, struct list_head *pagelist,
Christoph Lameterfc301282006-01-18 17:42:29 -08001013 unsigned long flags)
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001014{
Naoya Horiguchic8633792017-09-08 16:11:08 -07001015 struct page *head = compound_head(page);
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001016 /*
Christoph Lameterfc301282006-01-18 17:42:29 -08001017 * Avoid migrating a page that is shared with others.
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001018 */
Naoya Horiguchic8633792017-09-08 16:11:08 -07001019 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) {
1020 if (!isolate_lru_page(head)) {
1021 list_add_tail(&head->lru, pagelist);
1022 mod_node_page_state(page_pgdat(head),
1023 NR_ISOLATED_ANON + page_is_file_cache(head),
1024 hpage_nr_pages(head));
Yang Shia53190a2019-08-13 15:37:18 -07001025 } else if (flags & MPOL_MF_STRICT) {
1026 /*
1027 * Non-movable page may reach here. And, there may be
1028 * temporary off LRU pages or non-LRU movable pages.
1029 * Treat them as unmovable pages since they can't be
1030 * isolated, so they can't be moved at the moment. It
1031 * should return -EIO for this case too.
1032 */
1033 return -EIO;
Nick Piggin62695a82008-10-18 20:26:09 -07001034 }
1035 }
Yang Shia53190a2019-08-13 15:37:18 -07001036
1037 return 0;
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001038}
1039
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001040/* page allocation callback for NUMA node migration */
Michal Hocko666feb22018-04-10 16:30:03 -07001041struct page *alloc_new_node_page(struct page *page, unsigned long node)
Christoph Lameter95a402c2006-06-23 02:03:53 -07001042{
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -07001043 if (PageHuge(page))
1044 return alloc_huge_page_node(page_hstate(compound_head(page)),
1045 node);
Michal Hocko94723aa2018-04-10 16:30:07 -07001046 else if (PageTransHuge(page)) {
Naoya Horiguchic8633792017-09-08 16:11:08 -07001047 struct page *thp;
1048
1049 thp = alloc_pages_node(node,
1050 (GFP_TRANSHUGE | __GFP_THISNODE),
1051 HPAGE_PMD_ORDER);
1052 if (!thp)
1053 return NULL;
1054 prep_transhuge_page(thp);
1055 return thp;
1056 } else
Vlastimil Babka96db8002015-09-08 15:03:50 -07001057 return __alloc_pages_node(node, GFP_HIGHUSER_MOVABLE |
David Rientjesb360edb2015-04-14 15:46:52 -07001058 __GFP_THISNODE, 0);
Christoph Lameter95a402c2006-06-23 02:03:53 -07001059}
1060
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001061/*
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001062 * Migrate pages from one node to a target node.
1063 * Returns error or the number of pages not migrated.
1064 */
Adrian Bunkdbcb0f12007-10-16 01:26:26 -07001065static int migrate_to_node(struct mm_struct *mm, int source, int dest,
1066 int flags)
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001067{
1068 nodemask_t nmask;
1069 LIST_HEAD(pagelist);
1070 int err = 0;
1071
1072 nodes_clear(nmask);
1073 node_set(source, nmask);
1074
Minchan Kim08270802012-10-08 16:33:38 -07001075 /*
1076 * This does not "check" the range but isolates all pages that
1077 * need migration. Between passing in the full user address
1078 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
1079 */
1080 VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
Naoya Horiguchi98094942013-09-11 14:22:14 -07001081 queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001082 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
1083
Minchan Kimcf608ac2010-10-26 14:21:29 -07001084 if (!list_empty(&pagelist)) {
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001085 err = migrate_pages(&pagelist, alloc_new_node_page, NULL, dest,
Hugh Dickins9c620e22013-02-22 16:35:14 -08001086 MIGRATE_SYNC, MR_SYSCALL);
Minchan Kimcf608ac2010-10-26 14:21:29 -07001087 if (err)
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -07001088 putback_movable_pages(&pagelist);
Minchan Kimcf608ac2010-10-26 14:21:29 -07001089 }
Christoph Lameter95a402c2006-06-23 02:03:53 -07001090
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001091 return err;
1092}
1093
1094/*
1095 * Move pages between the two nodesets so as to preserve the physical
1096 * layout as much as possible.
Christoph Lameter39743882006-01-08 01:00:51 -08001097 *
1098 * Returns the number of page that could not be moved.
1099 */
Andrew Morton0ce72d42012-05-29 15:06:24 -07001100int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1101 const nodemask_t *to, int flags)
Christoph Lameter39743882006-01-08 01:00:51 -08001102{
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001103 int busy = 0;
Christoph Lameter0aedadf2008-11-06 12:53:30 -08001104 int err;
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001105 nodemask_t tmp;
Christoph Lameter39743882006-01-08 01:00:51 -08001106
Christoph Lameter0aedadf2008-11-06 12:53:30 -08001107 err = migrate_prep();
1108 if (err)
1109 return err;
1110
Lee Schermerhorn53f25562008-04-28 02:13:20 -07001111 down_read(&mm->mmap_sem);
Christoph Lameter39743882006-01-08 01:00:51 -08001112
KOSAKI Motohiroda0aa132010-03-05 13:41:59 -08001113 /*
1114 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
1115 * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
1116 * bit in 'tmp', and return that <source, dest> pair for migration.
1117 * The pair of nodemasks 'to' and 'from' define the map.
1118 *
1119 * If no pair of bits is found that way, fallback to picking some
1120 * pair of 'source' and 'dest' bits that are not the same. If the
1121 * 'source' and 'dest' bits are the same, this represents a node
1122 * that will be migrating to itself, so no pages need move.
1123 *
1124 * If no bits are left in 'tmp', or if all remaining bits left
1125 * in 'tmp' correspond to the same bit in 'to', return false
1126 * (nothing left to migrate).
1127 *
1128 * This lets us pick a pair of nodes to migrate between, such that
1129 * if possible the dest node is not already occupied by some other
1130 * source node, minimizing the risk of overloading the memory on a
1131 * node that would happen if we migrated incoming memory to a node
1132 * before migrating outgoing memory source that same node.
1133 *
1134 * A single scan of tmp is sufficient. As we go, we remember the
1135 * most recent <s, d> pair that moved (s != d). If we find a pair
1136 * that not only moved, but what's better, moved to an empty slot
1137 * (d is not set in tmp), then we break out then, with that pair.
Justin P. Mattockae0e47f2011-03-01 15:06:02 +01001138 * Otherwise when we finish scanning from_tmp, we at least have the
KOSAKI Motohiroda0aa132010-03-05 13:41:59 -08001139 * most recent <s, d> pair that moved. If we get all the way through
1140 * the scan of tmp without finding any node that moved, much less
1141 * moved to an empty node, then there is nothing left worth migrating.
1142 */
Christoph Lameterd4984712006-01-08 01:00:55 -08001143
Andrew Morton0ce72d42012-05-29 15:06:24 -07001144 tmp = *from;
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001145 while (!nodes_empty(tmp)) {
1146 int s,d;
Jianguo Wub76ac7e2013-11-12 15:07:39 -08001147 int source = NUMA_NO_NODE;
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001148 int dest = 0;
1149
1150 for_each_node_mask(s, tmp) {
Larry Woodman4a5b18c2012-05-29 15:06:24 -07001151
1152 /*
1153 * do_migrate_pages() tries to maintain the relative
1154 * node relationship of the pages established between
1155 * threads and memory areas.
1156 *
1157 * However if the number of source nodes is not equal to
1158 * the number of destination nodes we can not preserve
1159 * this node relative relationship. In that case, skip
1160 * copying memory from a node that is in the destination
1161 * mask.
1162 *
1163 * Example: [2,3,4] -> [3,4,5] moves everything.
1164 * [0-7] - > [3,4,5] moves only 0,1,2,6,7.
1165 */
1166
Andrew Morton0ce72d42012-05-29 15:06:24 -07001167 if ((nodes_weight(*from) != nodes_weight(*to)) &&
1168 (node_isset(s, *to)))
Larry Woodman4a5b18c2012-05-29 15:06:24 -07001169 continue;
1170
Andrew Morton0ce72d42012-05-29 15:06:24 -07001171 d = node_remap(s, *from, *to);
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001172 if (s == d)
1173 continue;
1174
1175 source = s; /* Node moved. Memorize */
1176 dest = d;
1177
1178 /* dest not in remaining from nodes? */
1179 if (!node_isset(dest, tmp))
1180 break;
1181 }
Jianguo Wub76ac7e2013-11-12 15:07:39 -08001182 if (source == NUMA_NO_NODE)
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001183 break;
1184
1185 node_clear(source, tmp);
1186 err = migrate_to_node(mm, source, dest, flags);
1187 if (err > 0)
1188 busy += err;
1189 if (err < 0)
1190 break;
Christoph Lameter39743882006-01-08 01:00:51 -08001191 }
1192 up_read(&mm->mmap_sem);
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001193 if (err < 0)
1194 return err;
1195 return busy;
Christoph Lameterb20a3502006-03-22 00:09:12 -08001196
Christoph Lameter39743882006-01-08 01:00:51 -08001197}
1198
Lee Schermerhorn3ad33b242007-11-14 16:59:10 -08001199/*
1200 * Allocate a new page for page migration based on vma policy.
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -07001201 * Start by assuming the page is mapped by the same vma as contains @start.
Lee Schermerhorn3ad33b242007-11-14 16:59:10 -08001202 * Search forward from there, if not. N.B., this assumes that the
1203 * list of pages handed to migrate_pages()--which is how we get here--
1204 * is in virtual address order.
1205 */
Michal Hocko666feb22018-04-10 16:30:03 -07001206static struct page *new_page(struct page *page, unsigned long start)
Christoph Lameter95a402c2006-06-23 02:03:53 -07001207{
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -07001208 struct vm_area_struct *vma;
Lee Schermerhorn3ad33b242007-11-14 16:59:10 -08001209 unsigned long uninitialized_var(address);
Christoph Lameter95a402c2006-06-23 02:03:53 -07001210
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -07001211 vma = find_vma(current->mm, start);
Lee Schermerhorn3ad33b242007-11-14 16:59:10 -08001212 while (vma) {
1213 address = page_address_in_vma(page, vma);
1214 if (address != -EFAULT)
1215 break;
1216 vma = vma->vm_next;
1217 }
1218
Wanpeng Li11c731e2013-12-18 17:08:56 -08001219 if (PageHuge(page)) {
Michal Hocko389c8172018-01-31 16:21:03 -08001220 return alloc_huge_page_vma(page_hstate(compound_head(page)),
1221 vma, address);
Michal Hocko94723aa2018-04-10 16:30:07 -07001222 } else if (PageTransHuge(page)) {
Naoya Horiguchic8633792017-09-08 16:11:08 -07001223 struct page *thp;
1224
David Rientjes19deb762019-09-04 12:54:20 -07001225 thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address,
1226 HPAGE_PMD_ORDER);
Naoya Horiguchic8633792017-09-08 16:11:08 -07001227 if (!thp)
1228 return NULL;
1229 prep_transhuge_page(thp);
1230 return thp;
Wanpeng Li11c731e2013-12-18 17:08:56 -08001231 }
1232 /*
1233 * if !vma, alloc_page_vma() will use task or system default policy
1234 */
Michal Hocko0f556852017-07-12 14:36:58 -07001235 return alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL,
1236 vma, address);
Christoph Lameter95a402c2006-06-23 02:03:53 -07001237}
Christoph Lameterb20a3502006-03-22 00:09:12 -08001238#else
1239
Yang Shia53190a2019-08-13 15:37:18 -07001240static int migrate_page_add(struct page *page, struct list_head *pagelist,
Christoph Lameterb20a3502006-03-22 00:09:12 -08001241 unsigned long flags)
1242{
Yang Shia53190a2019-08-13 15:37:18 -07001243 return -EIO;
Christoph Lameterb20a3502006-03-22 00:09:12 -08001244}
1245
Andrew Morton0ce72d42012-05-29 15:06:24 -07001246int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1247 const nodemask_t *to, int flags)
Christoph Lameterb20a3502006-03-22 00:09:12 -08001248{
1249 return -ENOSYS;
1250}
Christoph Lameter95a402c2006-06-23 02:03:53 -07001251
Michal Hocko666feb22018-04-10 16:30:03 -07001252static struct page *new_page(struct page *page, unsigned long start)
Christoph Lameter95a402c2006-06-23 02:03:53 -07001253{
1254 return NULL;
1255}
Christoph Lameterb20a3502006-03-22 00:09:12 -08001256#endif
1257
Adrian Bunkdbcb0f12007-10-16 01:26:26 -07001258static long do_mbind(unsigned long start, unsigned long len,
David Rientjes028fec42008-04-28 02:12:25 -07001259 unsigned short mode, unsigned short mode_flags,
1260 nodemask_t *nmask, unsigned long flags)
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001261{
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001262 struct mm_struct *mm = current->mm;
1263 struct mempolicy *new;
1264 unsigned long end;
1265 int err;
Yang Shid8835442019-08-13 15:37:15 -07001266 int ret;
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001267 LIST_HEAD(pagelist);
1268
Lee Schermerhornb24f53a2012-10-25 14:16:32 +02001269 if (flags & ~(unsigned long)MPOL_MF_VALID)
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001270 return -EINVAL;
Christoph Lameter74c00242006-03-14 19:50:21 -08001271 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001272 return -EPERM;
1273
1274 if (start & ~PAGE_MASK)
1275 return -EINVAL;
1276
1277 if (mode == MPOL_DEFAULT)
1278 flags &= ~MPOL_MF_STRICT;
1279
1280 len = (len + PAGE_SIZE - 1) & PAGE_MASK;
1281 end = start + len;
1282
1283 if (end < start)
1284 return -EINVAL;
1285 if (end == start)
1286 return 0;
1287
David Rientjes028fec42008-04-28 02:12:25 -07001288 new = mpol_new(mode, mode_flags, nmask);
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001289 if (IS_ERR(new))
1290 return PTR_ERR(new);
1291
Lee Schermerhornb24f53a2012-10-25 14:16:32 +02001292 if (flags & MPOL_MF_LAZY)
1293 new->flags |= MPOL_F_MOF;
1294
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001295 /*
1296 * If we are using the default policy then operation
1297 * on discontinuous address spaces is okay after all
1298 */
1299 if (!new)
1300 flags |= MPOL_MF_DISCONTIG_OK;
1301
David Rientjes028fec42008-04-28 02:12:25 -07001302 pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1303 start, start + len, mode, mode_flags,
David Rientjes00ef2d22013-02-22 16:35:36 -08001304 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001305
Christoph Lameter0aedadf2008-11-06 12:53:30 -08001306 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1307
1308 err = migrate_prep();
1309 if (err)
KOSAKI Motohirob05ca732009-10-26 16:49:59 -07001310 goto mpol_out;
Christoph Lameter0aedadf2008-11-06 12:53:30 -08001311 }
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07001312 {
1313 NODEMASK_SCRATCH(scratch);
1314 if (scratch) {
1315 down_write(&mm->mmap_sem);
1316 task_lock(current);
1317 err = mpol_set_nodemask(new, nmask, scratch);
1318 task_unlock(current);
1319 if (err)
1320 up_write(&mm->mmap_sem);
1321 } else
1322 err = -ENOMEM;
1323 NODEMASK_SCRATCH_FREE(scratch);
1324 }
KOSAKI Motohirob05ca732009-10-26 16:49:59 -07001325 if (err)
1326 goto mpol_out;
1327
Yang Shid8835442019-08-13 15:37:15 -07001328 ret = queue_pages_range(mm, start, end, nmask,
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001329 flags | MPOL_MF_INVERT, &pagelist);
Yang Shid8835442019-08-13 15:37:15 -07001330
1331 if (ret < 0) {
Yang Shia85dfc32019-11-15 17:34:33 -08001332 err = ret;
Yang Shid8835442019-08-13 15:37:15 -07001333 goto up_out;
1334 }
1335
1336 err = mbind_range(mm, start, end, new);
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001337
Lee Schermerhornb24f53a2012-10-25 14:16:32 +02001338 if (!err) {
1339 int nr_failed = 0;
1340
Minchan Kimcf608ac2010-10-26 14:21:29 -07001341 if (!list_empty(&pagelist)) {
Lee Schermerhornb24f53a2012-10-25 14:16:32 +02001342 WARN_ON_ONCE(flags & MPOL_MF_LAZY);
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -07001343 nr_failed = migrate_pages(&pagelist, new_page, NULL,
1344 start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
Minchan Kimcf608ac2010-10-26 14:21:29 -07001345 if (nr_failed)
Naoya Horiguchi74060e42013-09-11 14:22:06 -07001346 putback_movable_pages(&pagelist);
Minchan Kimcf608ac2010-10-26 14:21:29 -07001347 }
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001348
Yang Shid8835442019-08-13 15:37:15 -07001349 if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT)))
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001350 err = -EIO;
Yang Shia85dfc32019-11-15 17:34:33 -08001351 } else {
Yang Shid8835442019-08-13 15:37:15 -07001352up_out:
Yang Shia85dfc32019-11-15 17:34:33 -08001353 if (!list_empty(&pagelist))
1354 putback_movable_pages(&pagelist);
1355 }
1356
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001357 up_write(&mm->mmap_sem);
Yang Shid8835442019-08-13 15:37:15 -07001358mpol_out:
Lee Schermerhornf0be3d32008-04-28 02:13:08 -07001359 mpol_put(new);
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001360 return err;
1361}
1362
Christoph Lameter39743882006-01-08 01:00:51 -08001363/*
Christoph Lameter8bccd852005-10-29 18:16:59 -07001364 * User space interface with variable sized bitmaps for nodelists.
1365 */
1366
1367/* Copy a node mask from user space. */
Christoph Lameter39743882006-01-08 01:00:51 -08001368static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
Christoph Lameter8bccd852005-10-29 18:16:59 -07001369 unsigned long maxnode)
1370{
1371 unsigned long k;
Yisheng Xie56521e72018-01-31 16:16:11 -08001372 unsigned long t;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001373 unsigned long nlongs;
1374 unsigned long endmask;
1375
1376 --maxnode;
1377 nodes_clear(*nodes);
1378 if (maxnode == 0 || !nmask)
1379 return 0;
Andi Kleena9c930b2006-02-20 18:27:59 -08001380 if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
Chris Wright636f13c2006-02-17 13:59:36 -08001381 return -EINVAL;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001382
1383 nlongs = BITS_TO_LONGS(maxnode);
1384 if ((maxnode % BITS_PER_LONG) == 0)
1385 endmask = ~0UL;
1386 else
1387 endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
1388
Yisheng Xie56521e72018-01-31 16:16:11 -08001389 /*
1390 * When the user specified more nodes than supported just check
1391 * if the non supported part is all zero.
1392 *
1393 * If maxnode have more longs than MAX_NUMNODES, check
1394 * the bits in that area first. And then go through to
1395 * check the rest bits which equal or bigger than MAX_NUMNODES.
1396 * Otherwise, just check bits [MAX_NUMNODES, maxnode).
1397 */
Christoph Lameter8bccd852005-10-29 18:16:59 -07001398 if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
Christoph Lameter8bccd852005-10-29 18:16:59 -07001399 for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
Christoph Lameter8bccd852005-10-29 18:16:59 -07001400 if (get_user(t, nmask + k))
1401 return -EFAULT;
1402 if (k == nlongs - 1) {
1403 if (t & endmask)
1404 return -EINVAL;
1405 } else if (t)
1406 return -EINVAL;
1407 }
1408 nlongs = BITS_TO_LONGS(MAX_NUMNODES);
1409 endmask = ~0UL;
1410 }
1411
Yisheng Xie56521e72018-01-31 16:16:11 -08001412 if (maxnode > MAX_NUMNODES && MAX_NUMNODES % BITS_PER_LONG != 0) {
1413 unsigned long valid_mask = endmask;
1414
1415 valid_mask &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1);
1416 if (get_user(t, nmask + nlongs - 1))
1417 return -EFAULT;
1418 if (t & valid_mask)
1419 return -EINVAL;
1420 }
1421
Christoph Lameter8bccd852005-10-29 18:16:59 -07001422 if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
1423 return -EFAULT;
1424 nodes_addr(*nodes)[nlongs-1] &= endmask;
1425 return 0;
1426}
1427
1428/* Copy a kernel node mask to user space */
1429static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1430 nodemask_t *nodes)
1431{
1432 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
Ralph Campbell050c17f2019-02-20 22:18:58 -08001433 unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long);
Christoph Lameter8bccd852005-10-29 18:16:59 -07001434
1435 if (copy > nbytes) {
1436 if (copy > PAGE_SIZE)
1437 return -EINVAL;
1438 if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1439 return -EFAULT;
1440 copy = nbytes;
1441 }
1442 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1443}
1444
Dominik Brodowskie7dc9ad62018-03-17 16:12:22 +01001445static long kernel_mbind(unsigned long start, unsigned long len,
1446 unsigned long mode, const unsigned long __user *nmask,
1447 unsigned long maxnode, unsigned int flags)
Christoph Lameter8bccd852005-10-29 18:16:59 -07001448{
1449 nodemask_t nodes;
1450 int err;
David Rientjes028fec42008-04-28 02:12:25 -07001451 unsigned short mode_flags;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001452
Andrey Konovalov057d33892019-09-25 16:48:30 -07001453 start = untagged_addr(start);
David Rientjes028fec42008-04-28 02:12:25 -07001454 mode_flags = mode & MPOL_MODE_FLAGS;
1455 mode &= ~MPOL_MODE_FLAGS;
David Rientjesa3b51e02008-04-28 02:12:23 -07001456 if (mode >= MPOL_MAX)
1457 return -EINVAL;
David Rientjes4c50bc02008-04-28 02:12:30 -07001458 if ((mode_flags & MPOL_F_STATIC_NODES) &&
1459 (mode_flags & MPOL_F_RELATIVE_NODES))
1460 return -EINVAL;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001461 err = get_nodes(&nodes, nmask, maxnode);
1462 if (err)
1463 return err;
David Rientjes028fec42008-04-28 02:12:25 -07001464 return do_mbind(start, len, mode, mode_flags, &nodes, flags);
Christoph Lameter8bccd852005-10-29 18:16:59 -07001465}
1466
Dominik Brodowskie7dc9ad62018-03-17 16:12:22 +01001467SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1468 unsigned long, mode, const unsigned long __user *, nmask,
1469 unsigned long, maxnode, unsigned int, flags)
1470{
1471 return kernel_mbind(start, len, mode, nmask, maxnode, flags);
1472}
1473
Christoph Lameter8bccd852005-10-29 18:16:59 -07001474/* Set the process memory policy */
Dominik Brodowskiaf03c4a2018-03-17 16:20:01 +01001475static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask,
1476 unsigned long maxnode)
Christoph Lameter8bccd852005-10-29 18:16:59 -07001477{
1478 int err;
1479 nodemask_t nodes;
David Rientjes028fec42008-04-28 02:12:25 -07001480 unsigned short flags;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001481
David Rientjes028fec42008-04-28 02:12:25 -07001482 flags = mode & MPOL_MODE_FLAGS;
1483 mode &= ~MPOL_MODE_FLAGS;
1484 if ((unsigned int)mode >= MPOL_MAX)
Christoph Lameter8bccd852005-10-29 18:16:59 -07001485 return -EINVAL;
David Rientjes4c50bc02008-04-28 02:12:30 -07001486 if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
1487 return -EINVAL;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001488 err = get_nodes(&nodes, nmask, maxnode);
1489 if (err)
1490 return err;
David Rientjes028fec42008-04-28 02:12:25 -07001491 return do_set_mempolicy(mode, flags, &nodes);
Christoph Lameter8bccd852005-10-29 18:16:59 -07001492}
1493
Dominik Brodowskiaf03c4a2018-03-17 16:20:01 +01001494SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
1495 unsigned long, maxnode)
1496{
1497 return kernel_set_mempolicy(mode, nmask, maxnode);
1498}
1499
Dominik Brodowskib6e9b0b2018-03-17 16:00:25 +01001500static int kernel_migrate_pages(pid_t pid, unsigned long maxnode,
1501 const unsigned long __user *old_nodes,
1502 const unsigned long __user *new_nodes)
Christoph Lameter39743882006-01-08 01:00:51 -08001503{
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001504 struct mm_struct *mm = NULL;
Christoph Lameter39743882006-01-08 01:00:51 -08001505 struct task_struct *task;
Christoph Lameter39743882006-01-08 01:00:51 -08001506 nodemask_t task_nodes;
1507 int err;
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001508 nodemask_t *old;
1509 nodemask_t *new;
1510 NODEMASK_SCRATCH(scratch);
Christoph Lameter39743882006-01-08 01:00:51 -08001511
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001512 if (!scratch)
1513 return -ENOMEM;
Christoph Lameter39743882006-01-08 01:00:51 -08001514
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001515 old = &scratch->mask1;
1516 new = &scratch->mask2;
1517
1518 err = get_nodes(old, old_nodes, maxnode);
Christoph Lameter39743882006-01-08 01:00:51 -08001519 if (err)
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001520 goto out;
1521
1522 err = get_nodes(new, new_nodes, maxnode);
1523 if (err)
1524 goto out;
Christoph Lameter39743882006-01-08 01:00:51 -08001525
1526 /* Find the mm_struct */
Zeng Zhaoming55cfaa32010-12-02 14:31:13 -08001527 rcu_read_lock();
Pavel Emelyanov228ebcb2007-10-18 23:40:16 -07001528 task = pid ? find_task_by_vpid(pid) : current;
Christoph Lameter39743882006-01-08 01:00:51 -08001529 if (!task) {
Zeng Zhaoming55cfaa32010-12-02 14:31:13 -08001530 rcu_read_unlock();
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001531 err = -ESRCH;
1532 goto out;
Christoph Lameter39743882006-01-08 01:00:51 -08001533 }
Christoph Lameter3268c632012-03-21 16:34:06 -07001534 get_task_struct(task);
Christoph Lameter39743882006-01-08 01:00:51 -08001535
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001536 err = -EINVAL;
Christoph Lameter39743882006-01-08 01:00:51 -08001537
1538 /*
Otto Ebeling31367462017-11-15 17:38:14 -08001539 * Check if this process has the right to modify the specified process.
1540 * Use the regular "ptrace_may_access()" checks.
Christoph Lameter39743882006-01-08 01:00:51 -08001541 */
Otto Ebeling31367462017-11-15 17:38:14 -08001542 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
David Howellsc69e8d92008-11-14 10:39:19 +11001543 rcu_read_unlock();
Christoph Lameter39743882006-01-08 01:00:51 -08001544 err = -EPERM;
Christoph Lameter3268c632012-03-21 16:34:06 -07001545 goto out_put;
Christoph Lameter39743882006-01-08 01:00:51 -08001546 }
David Howellsc69e8d92008-11-14 10:39:19 +11001547 rcu_read_unlock();
Christoph Lameter39743882006-01-08 01:00:51 -08001548
1549 task_nodes = cpuset_mems_allowed(task);
1550 /* Is the user allowed to access the target nodes? */
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001551 if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
Christoph Lameter39743882006-01-08 01:00:51 -08001552 err = -EPERM;
Christoph Lameter3268c632012-03-21 16:34:06 -07001553 goto out_put;
Christoph Lameter39743882006-01-08 01:00:51 -08001554 }
1555
Yisheng Xie0486a382018-01-31 16:16:15 -08001556 task_nodes = cpuset_mems_allowed(current);
1557 nodes_and(*new, *new, task_nodes);
1558 if (nodes_empty(*new))
Christoph Lameter3268c632012-03-21 16:34:06 -07001559 goto out_put;
Yisheng Xie0486a382018-01-31 16:16:15 -08001560
David Quigley86c3a762006-06-23 02:04:02 -07001561 err = security_task_movememory(task);
1562 if (err)
Christoph Lameter3268c632012-03-21 16:34:06 -07001563 goto out_put;
David Quigley86c3a762006-06-23 02:04:02 -07001564
Christoph Lameter3268c632012-03-21 16:34:06 -07001565 mm = get_task_mm(task);
1566 put_task_struct(task);
Sasha Levinf2a9ef82012-04-25 16:01:52 -07001567
1568 if (!mm) {
Christoph Lameter3268c632012-03-21 16:34:06 -07001569 err = -EINVAL;
Sasha Levinf2a9ef82012-04-25 16:01:52 -07001570 goto out;
1571 }
1572
1573 err = do_migrate_pages(mm, old, new,
1574 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
Christoph Lameter3268c632012-03-21 16:34:06 -07001575
1576 mmput(mm);
1577out:
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001578 NODEMASK_SCRATCH_FREE(scratch);
1579
Christoph Lameter39743882006-01-08 01:00:51 -08001580 return err;
Christoph Lameter3268c632012-03-21 16:34:06 -07001581
1582out_put:
1583 put_task_struct(task);
1584 goto out;
1585
Christoph Lameter39743882006-01-08 01:00:51 -08001586}
1587
Dominik Brodowskib6e9b0b2018-03-17 16:00:25 +01001588SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1589 const unsigned long __user *, old_nodes,
1590 const unsigned long __user *, new_nodes)
1591{
1592 return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes);
1593}
1594
Christoph Lameter39743882006-01-08 01:00:51 -08001595
Christoph Lameter8bccd852005-10-29 18:16:59 -07001596/* Retrieve NUMA policy */
Dominik Brodowskiaf03c4a2018-03-17 16:20:01 +01001597static int kernel_get_mempolicy(int __user *policy,
1598 unsigned long __user *nmask,
1599 unsigned long maxnode,
1600 unsigned long addr,
1601 unsigned long flags)
Christoph Lameter8bccd852005-10-29 18:16:59 -07001602{
Adrian Bunkdbcb0f12007-10-16 01:26:26 -07001603 int err;
1604 int uninitialized_var(pval);
Christoph Lameter8bccd852005-10-29 18:16:59 -07001605 nodemask_t nodes;
1606
Andrey Konovalov057d33892019-09-25 16:48:30 -07001607 addr = untagged_addr(addr);
1608
Ralph Campbell050c17f2019-02-20 22:18:58 -08001609 if (nmask != NULL && maxnode < nr_node_ids)
Christoph Lameter8bccd852005-10-29 18:16:59 -07001610 return -EINVAL;
1611
1612 err = do_get_mempolicy(&pval, &nodes, addr, flags);
1613
1614 if (err)
1615 return err;
1616
1617 if (policy && put_user(pval, policy))
1618 return -EFAULT;
1619
1620 if (nmask)
1621 err = copy_nodes_to_user(nmask, maxnode, &nodes);
1622
1623 return err;
1624}
1625
Dominik Brodowskiaf03c4a2018-03-17 16:20:01 +01001626SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1627 unsigned long __user *, nmask, unsigned long, maxnode,
1628 unsigned long, addr, unsigned long, flags)
1629{
1630 return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags);
1631}
1632
Linus Torvalds1da177e2005-04-16 15:20:36 -07001633#ifdef CONFIG_COMPAT
1634
Heiko Carstensc93e0f62014-03-03 16:32:26 +01001635COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1636 compat_ulong_t __user *, nmask,
1637 compat_ulong_t, maxnode,
1638 compat_ulong_t, addr, compat_ulong_t, flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001639{
1640 long err;
1641 unsigned long __user *nm = NULL;
1642 unsigned long nr_bits, alloc_size;
1643 DECLARE_BITMAP(bm, MAX_NUMNODES);
1644
Ralph Campbell050c17f2019-02-20 22:18:58 -08001645 nr_bits = min_t(unsigned long, maxnode-1, nr_node_ids);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001646 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1647
1648 if (nmask)
1649 nm = compat_alloc_user_space(alloc_size);
1650
Dominik Brodowskiaf03c4a2018-03-17 16:20:01 +01001651 err = kernel_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001652
1653 if (!err && nmask) {
KAMEZAWA Hiroyuki2bbff6c2011-09-14 16:21:02 -07001654 unsigned long copy_size;
1655 copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
1656 err = copy_from_user(bm, nm, copy_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001657 /* ensure entire bitmap is zeroed */
1658 err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1659 err |= compat_put_bitmap(nmask, bm, nr_bits);
1660 }
1661
1662 return err;
1663}
1664
Heiko Carstensc93e0f62014-03-03 16:32:26 +01001665COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
1666 compat_ulong_t, maxnode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001667{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001668 unsigned long __user *nm = NULL;
1669 unsigned long nr_bits, alloc_size;
1670 DECLARE_BITMAP(bm, MAX_NUMNODES);
1671
1672 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1673 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1674
1675 if (nmask) {
Chris Sallscf01fb92017-04-07 23:48:11 -07001676 if (compat_get_bitmap(bm, nmask, nr_bits))
1677 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001678 nm = compat_alloc_user_space(alloc_size);
Chris Sallscf01fb92017-04-07 23:48:11 -07001679 if (copy_to_user(nm, bm, alloc_size))
1680 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001681 }
1682
Dominik Brodowskiaf03c4a2018-03-17 16:20:01 +01001683 return kernel_set_mempolicy(mode, nm, nr_bits+1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001684}
1685
Heiko Carstensc93e0f62014-03-03 16:32:26 +01001686COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
1687 compat_ulong_t, mode, compat_ulong_t __user *, nmask,
1688 compat_ulong_t, maxnode, compat_ulong_t, flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001689{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001690 unsigned long __user *nm = NULL;
1691 unsigned long nr_bits, alloc_size;
Andi Kleendfcd3c02005-10-29 18:15:48 -07001692 nodemask_t bm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001693
1694 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1695 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1696
1697 if (nmask) {
Chris Sallscf01fb92017-04-07 23:48:11 -07001698 if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits))
1699 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001700 nm = compat_alloc_user_space(alloc_size);
Chris Sallscf01fb92017-04-07 23:48:11 -07001701 if (copy_to_user(nm, nodes_addr(bm), alloc_size))
1702 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001703 }
1704
Dominik Brodowskie7dc9ad62018-03-17 16:12:22 +01001705 return kernel_mbind(start, len, mode, nm, nr_bits+1, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001706}
1707
Dominik Brodowskib6e9b0b2018-03-17 16:00:25 +01001708COMPAT_SYSCALL_DEFINE4(migrate_pages, compat_pid_t, pid,
1709 compat_ulong_t, maxnode,
1710 const compat_ulong_t __user *, old_nodes,
1711 const compat_ulong_t __user *, new_nodes)
1712{
1713 unsigned long __user *old = NULL;
1714 unsigned long __user *new = NULL;
1715 nodemask_t tmp_mask;
1716 unsigned long nr_bits;
1717 unsigned long size;
1718
1719 nr_bits = min_t(unsigned long, maxnode - 1, MAX_NUMNODES);
1720 size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1721 if (old_nodes) {
1722 if (compat_get_bitmap(nodes_addr(tmp_mask), old_nodes, nr_bits))
1723 return -EFAULT;
1724 old = compat_alloc_user_space(new_nodes ? size * 2 : size);
1725 if (new_nodes)
1726 new = old + size / sizeof(unsigned long);
1727 if (copy_to_user(old, nodes_addr(tmp_mask), size))
1728 return -EFAULT;
1729 }
1730 if (new_nodes) {
1731 if (compat_get_bitmap(nodes_addr(tmp_mask), new_nodes, nr_bits))
1732 return -EFAULT;
1733 if (new == NULL)
1734 new = compat_alloc_user_space(size);
1735 if (copy_to_user(new, nodes_addr(tmp_mask), size))
1736 return -EFAULT;
1737 }
1738 return kernel_migrate_pages(pid, nr_bits + 1, old, new);
1739}
1740
1741#endif /* CONFIG_COMPAT */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001742
Oleg Nesterov74d2c3a2014-10-09 15:27:50 -07001743struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
1744 unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001745{
Oleg Nesterov8d902742014-10-09 15:27:45 -07001746 struct mempolicy *pol = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001747
1748 if (vma) {
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001749 if (vma->vm_ops && vma->vm_ops->get_policy) {
Oleg Nesterov8d902742014-10-09 15:27:45 -07001750 pol = vma->vm_ops->get_policy(vma, addr);
Mel Gorman00442ad2012-10-08 16:29:20 -07001751 } else if (vma->vm_policy) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001752 pol = vma->vm_policy;
Mel Gorman00442ad2012-10-08 16:29:20 -07001753
1754 /*
1755 * shmem_alloc_page() passes MPOL_F_SHARED policy with
1756 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
1757 * count on these policies which will be dropped by
1758 * mpol_cond_put() later
1759 */
1760 if (mpol_needs_cond_ref(pol))
1761 mpol_get(pol);
1762 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001763 }
Oleg Nesterovf15ca782014-10-09 15:27:43 -07001764
Oleg Nesterov74d2c3a2014-10-09 15:27:50 -07001765 return pol;
1766}
1767
1768/*
Oleg Nesterovdd6eecb2014-10-09 15:27:57 -07001769 * get_vma_policy(@vma, @addr)
Oleg Nesterov74d2c3a2014-10-09 15:27:50 -07001770 * @vma: virtual memory area whose policy is sought
1771 * @addr: address in @vma for shared policy lookup
1772 *
1773 * Returns effective policy for a VMA at specified address.
Oleg Nesterovdd6eecb2014-10-09 15:27:57 -07001774 * Falls back to current->mempolicy or system default policy, as necessary.
Oleg Nesterov74d2c3a2014-10-09 15:27:50 -07001775 * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1776 * count--added by the get_policy() vm_op, as appropriate--to protect against
1777 * freeing by another task. It is the caller's responsibility to free the
1778 * extra reference for shared policies.
1779 */
David Rientjesac79f782019-09-04 12:54:18 -07001780static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
Oleg Nesterovdd6eecb2014-10-09 15:27:57 -07001781 unsigned long addr)
Oleg Nesterov74d2c3a2014-10-09 15:27:50 -07001782{
1783 struct mempolicy *pol = __get_vma_policy(vma, addr);
1784
Oleg Nesterov8d902742014-10-09 15:27:45 -07001785 if (!pol)
Oleg Nesterovdd6eecb2014-10-09 15:27:57 -07001786 pol = get_task_policy(current);
Oleg Nesterov8d902742014-10-09 15:27:45 -07001787
Linus Torvalds1da177e2005-04-16 15:20:36 -07001788 return pol;
1789}
1790
Oleg Nesterov6b6482b2014-10-09 15:27:48 -07001791bool vma_policy_mof(struct vm_area_struct *vma)
Mel Gormanfc3147242013-10-07 11:29:09 +01001792{
Oleg Nesterov6b6482b2014-10-09 15:27:48 -07001793 struct mempolicy *pol;
Oleg Nesterovf15ca782014-10-09 15:27:43 -07001794
Oleg Nesterov6b6482b2014-10-09 15:27:48 -07001795 if (vma->vm_ops && vma->vm_ops->get_policy) {
1796 bool ret = false;
Mel Gormanfc3147242013-10-07 11:29:09 +01001797
Oleg Nesterov6b6482b2014-10-09 15:27:48 -07001798 pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1799 if (pol && (pol->flags & MPOL_F_MOF))
1800 ret = true;
1801 mpol_cond_put(pol);
Mel Gormanfc3147242013-10-07 11:29:09 +01001802
Oleg Nesterov6b6482b2014-10-09 15:27:48 -07001803 return ret;
Mel Gormanfc3147242013-10-07 11:29:09 +01001804 }
1805
Oleg Nesterov6b6482b2014-10-09 15:27:48 -07001806 pol = vma->vm_policy;
Oleg Nesterov8d902742014-10-09 15:27:45 -07001807 if (!pol)
Oleg Nesterov6b6482b2014-10-09 15:27:48 -07001808 pol = get_task_policy(current);
Oleg Nesterov8d902742014-10-09 15:27:45 -07001809
Mel Gormanfc3147242013-10-07 11:29:09 +01001810 return pol->flags & MPOL_F_MOF;
1811}
1812
Lai Jiangshand3eb1572013-02-22 16:33:22 -08001813static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1814{
1815 enum zone_type dynamic_policy_zone = policy_zone;
1816
1817 BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1818
1819 /*
1820 * if policy->v.nodes has movable memory only,
1821 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1822 *
1823 * policy->v.nodes is intersect with node_states[N_MEMORY].
1824 * so if the following test faile, it implies
1825 * policy->v.nodes has movable memory only.
1826 */
1827 if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY]))
1828 dynamic_policy_zone = ZONE_MOVABLE;
1829
1830 return zone >= dynamic_policy_zone;
1831}
1832
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001833/*
1834 * Return a nodemask representing a mempolicy for filtering nodes for
1835 * page allocation
1836 */
1837static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
Mel Gorman19770b32008-04-28 02:12:18 -07001838{
1839 /* Lower zones don't get a nodemask applied for MPOL_BIND */
Lee Schermerhorn45c47452008-04-28 02:13:12 -07001840 if (unlikely(policy->mode == MPOL_BIND) &&
Lai Jiangshand3eb1572013-02-22 16:33:22 -08001841 apply_policy_zone(policy, gfp_zone(gfp)) &&
Mel Gorman19770b32008-04-28 02:12:18 -07001842 cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
1843 return &policy->v.nodes;
1844
1845 return NULL;
1846}
1847
Vlastimil Babka04ec6262017-07-06 15:40:03 -07001848/* Return the node id preferred by the given mempolicy, or the given id */
1849static int policy_node(gfp_t gfp, struct mempolicy *policy,
1850 int nd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001851{
Michal Hocko6d840952016-12-12 16:42:23 -08001852 if (policy->mode == MPOL_PREFERRED && !(policy->flags & MPOL_F_LOCAL))
1853 nd = policy->v.preferred_node;
1854 else {
Mel Gorman19770b32008-04-28 02:12:18 -07001855 /*
Michal Hocko6d840952016-12-12 16:42:23 -08001856 * __GFP_THISNODE shouldn't even be used with the bind policy
1857 * because we might easily break the expectation to stay on the
1858 * requested node and not break the policy.
Mel Gorman19770b32008-04-28 02:12:18 -07001859 */
Michal Hocko6d840952016-12-12 16:42:23 -08001860 WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001861 }
Michal Hocko6d840952016-12-12 16:42:23 -08001862
Vlastimil Babka04ec6262017-07-06 15:40:03 -07001863 return nd;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001864}
1865
1866/* Do dynamic interleaving for a process */
1867static unsigned interleave_nodes(struct mempolicy *policy)
1868{
Vlastimil Babka45816682017-07-06 15:39:59 -07001869 unsigned next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001870 struct task_struct *me = current;
1871
Vlastimil Babka45816682017-07-06 15:39:59 -07001872 next = next_node_in(me->il_prev, policy->v.nodes);
David Rientjesf5b087b2008-04-28 02:12:27 -07001873 if (next < MAX_NUMNODES)
Vlastimil Babka45816682017-07-06 15:39:59 -07001874 me->il_prev = next;
1875 return next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001876}
1877
Christoph Lameterdc85da12006-01-18 17:42:36 -08001878/*
1879 * Depending on the memory policy provide a node from which to allocate the
1880 * next slab entry.
1881 */
David Rientjes2a389612014-04-07 15:37:29 -07001882unsigned int mempolicy_slab_node(void)
Christoph Lameterdc85da12006-01-18 17:42:36 -08001883{
Andi Kleene7b691b2012-06-09 02:40:03 -07001884 struct mempolicy *policy;
David Rientjes2a389612014-04-07 15:37:29 -07001885 int node = numa_mem_id();
Andi Kleene7b691b2012-06-09 02:40:03 -07001886
1887 if (in_interrupt())
David Rientjes2a389612014-04-07 15:37:29 -07001888 return node;
Andi Kleene7b691b2012-06-09 02:40:03 -07001889
1890 policy = current->mempolicy;
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07001891 if (!policy || policy->flags & MPOL_F_LOCAL)
David Rientjes2a389612014-04-07 15:37:29 -07001892 return node;
Christoph Lameter765c4502006-09-27 01:50:08 -07001893
Lee Schermerhornbea904d2008-04-28 02:13:18 -07001894 switch (policy->mode) {
1895 case MPOL_PREFERRED:
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07001896 /*
1897 * handled MPOL_F_LOCAL above
1898 */
1899 return policy->v.preferred_node;
Lee Schermerhornbea904d2008-04-28 02:13:18 -07001900
Christoph Lameterdc85da12006-01-18 17:42:36 -08001901 case MPOL_INTERLEAVE:
1902 return interleave_nodes(policy);
1903
Mel Gormandd1a2392008-04-28 02:12:17 -07001904 case MPOL_BIND: {
Mel Gormanc33d6c02016-05-19 17:14:10 -07001905 struct zoneref *z;
1906
Christoph Lameterdc85da12006-01-18 17:42:36 -08001907 /*
1908 * Follow bind policy behavior and start allocation at the
1909 * first node.
1910 */
Mel Gorman19770b32008-04-28 02:12:18 -07001911 struct zonelist *zonelist;
Mel Gorman19770b32008-04-28 02:12:18 -07001912 enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
Aneesh Kumar K.Vc9634cf2016-10-07 16:59:12 -07001913 zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
Mel Gormanc33d6c02016-05-19 17:14:10 -07001914 z = first_zones_zonelist(zonelist, highest_zoneidx,
1915 &policy->v.nodes);
Pavel Tatashinc1093b72018-08-21 21:53:32 -07001916 return z->zone ? zone_to_nid(z->zone) : node;
Mel Gormandd1a2392008-04-28 02:12:17 -07001917 }
Christoph Lameterdc85da12006-01-18 17:42:36 -08001918
Christoph Lameterdc85da12006-01-18 17:42:36 -08001919 default:
Lee Schermerhornbea904d2008-04-28 02:13:18 -07001920 BUG();
Christoph Lameterdc85da12006-01-18 17:42:36 -08001921 }
1922}
1923
Andrew Mortonfee83b32016-05-19 17:11:43 -07001924/*
1925 * Do static interleaving for a VMA with known offset @n. Returns the n'th
1926 * node in pol->v.nodes (starting from n=0), wrapping around if n exceeds the
1927 * number of present nodes.
1928 */
Laurent Dufour98c70ba2017-09-08 16:12:39 -07001929static unsigned offset_il_node(struct mempolicy *pol, unsigned long n)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001930{
Andi Kleendfcd3c02005-10-29 18:15:48 -07001931 unsigned nnodes = nodes_weight(pol->v.nodes);
David Rientjesf5b087b2008-04-28 02:12:27 -07001932 unsigned target;
Andrew Mortonfee83b32016-05-19 17:11:43 -07001933 int i;
1934 int nid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001935
David Rientjesf5b087b2008-04-28 02:12:27 -07001936 if (!nnodes)
1937 return numa_node_id();
Andrew Mortonfee83b32016-05-19 17:11:43 -07001938 target = (unsigned int)n % nnodes;
1939 nid = first_node(pol->v.nodes);
1940 for (i = 0; i < target; i++)
Andi Kleendfcd3c02005-10-29 18:15:48 -07001941 nid = next_node(nid, pol->v.nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001942 return nid;
1943}
1944
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001945/* Determine a node number for interleave */
1946static inline unsigned interleave_nid(struct mempolicy *pol,
1947 struct vm_area_struct *vma, unsigned long addr, int shift)
1948{
1949 if (vma) {
1950 unsigned long off;
1951
Nishanth Aravamudan3b98b082006-08-31 21:27:53 -07001952 /*
1953 * for small pages, there is no difference between
1954 * shift and PAGE_SHIFT, so the bit-shift is safe.
1955 * for huge pages, since vm_pgoff is in units of small
1956 * pages, we need to shift off the always 0 bits to get
1957 * a useful offset.
1958 */
1959 BUG_ON(shift < PAGE_SHIFT);
1960 off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001961 off += (addr - vma->vm_start) >> shift;
Laurent Dufour98c70ba2017-09-08 16:12:39 -07001962 return offset_il_node(pol, off);
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001963 } else
1964 return interleave_nodes(pol);
1965}
1966
Chen, Kenneth W00ac59a2006-02-03 21:51:14 +01001967#ifdef CONFIG_HUGETLBFS
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001968/*
Vlastimil Babka04ec6262017-07-06 15:40:03 -07001969 * huge_node(@vma, @addr, @gfp_flags, @mpol)
Fabian Frederickb46e14a2014-06-04 16:08:18 -07001970 * @vma: virtual memory area whose policy is sought
1971 * @addr: address in @vma for shared policy lookup and interleave policy
1972 * @gfp_flags: for requested zone
1973 * @mpol: pointer to mempolicy pointer for reference counted mempolicy
1974 * @nodemask: pointer to nodemask pointer for MPOL_BIND nodemask
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001975 *
Vlastimil Babka04ec6262017-07-06 15:40:03 -07001976 * Returns a nid suitable for a huge page allocation and a pointer
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001977 * to the struct mempolicy for conditional unref after allocation.
1978 * If the effective policy is 'BIND, returns a pointer to the mempolicy's
1979 * @nodemask for filtering the zonelist.
Miao Xiec0ff7452010-05-24 14:32:08 -07001980 *
Mel Gormand26914d2014-04-03 14:47:24 -07001981 * Must be protected by read_mems_allowed_begin()
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001982 */
Vlastimil Babka04ec6262017-07-06 15:40:03 -07001983int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags,
1984 struct mempolicy **mpol, nodemask_t **nodemask)
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001985{
Vlastimil Babka04ec6262017-07-06 15:40:03 -07001986 int nid;
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001987
Oleg Nesterovdd6eecb2014-10-09 15:27:57 -07001988 *mpol = get_vma_policy(vma, addr);
Mel Gorman19770b32008-04-28 02:12:18 -07001989 *nodemask = NULL; /* assume !MPOL_BIND */
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001990
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001991 if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
Vlastimil Babka04ec6262017-07-06 15:40:03 -07001992 nid = interleave_nid(*mpol, vma, addr,
1993 huge_page_shift(hstate_vma(vma)));
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001994 } else {
Vlastimil Babka04ec6262017-07-06 15:40:03 -07001995 nid = policy_node(gfp_flags, *mpol, numa_node_id());
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001996 if ((*mpol)->mode == MPOL_BIND)
1997 *nodemask = &(*mpol)->v.nodes;
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001998 }
Vlastimil Babka04ec6262017-07-06 15:40:03 -07001999 return nid;
Christoph Lameter5da7ca82006-01-06 00:10:46 -08002000}
Lee Schermerhorn06808b02009-12-14 17:58:21 -08002001
2002/*
2003 * init_nodemask_of_mempolicy
2004 *
2005 * If the current task's mempolicy is "default" [NULL], return 'false'
2006 * to indicate default policy. Otherwise, extract the policy nodemask
2007 * for 'bind' or 'interleave' policy into the argument nodemask, or
2008 * initialize the argument nodemask to contain the single node for
2009 * 'preferred' or 'local' policy and return 'true' to indicate presence
2010 * of non-default mempolicy.
2011 *
2012 * We don't bother with reference counting the mempolicy [mpol_get/put]
2013 * because the current task is examining it's own mempolicy and a task's
2014 * mempolicy is only ever changed by the task itself.
2015 *
2016 * N.B., it is the caller's responsibility to free a returned nodemask.
2017 */
2018bool init_nodemask_of_mempolicy(nodemask_t *mask)
2019{
2020 struct mempolicy *mempolicy;
2021 int nid;
2022
2023 if (!(mask && current->mempolicy))
2024 return false;
2025
Miao Xiec0ff7452010-05-24 14:32:08 -07002026 task_lock(current);
Lee Schermerhorn06808b02009-12-14 17:58:21 -08002027 mempolicy = current->mempolicy;
2028 switch (mempolicy->mode) {
2029 case MPOL_PREFERRED:
2030 if (mempolicy->flags & MPOL_F_LOCAL)
2031 nid = numa_node_id();
2032 else
2033 nid = mempolicy->v.preferred_node;
2034 init_nodemask_of_node(mask, nid);
2035 break;
2036
2037 case MPOL_BIND:
2038 /* Fall through */
2039 case MPOL_INTERLEAVE:
2040 *mask = mempolicy->v.nodes;
2041 break;
2042
2043 default:
2044 BUG();
2045 }
Miao Xiec0ff7452010-05-24 14:32:08 -07002046 task_unlock(current);
Lee Schermerhorn06808b02009-12-14 17:58:21 -08002047
2048 return true;
2049}
Chen, Kenneth W00ac59a2006-02-03 21:51:14 +01002050#endif
Christoph Lameter5da7ca82006-01-06 00:10:46 -08002051
David Rientjes6f48d0eb2010-08-09 17:18:52 -07002052/*
2053 * mempolicy_nodemask_intersects
2054 *
2055 * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
2056 * policy. Otherwise, check for intersection between mask and the policy
2057 * nodemask for 'bind' or 'interleave' policy. For 'perferred' or 'local'
2058 * policy, always return true since it may allocate elsewhere on fallback.
2059 *
2060 * Takes task_lock(tsk) to prevent freeing of its mempolicy.
2061 */
2062bool mempolicy_nodemask_intersects(struct task_struct *tsk,
2063 const nodemask_t *mask)
2064{
2065 struct mempolicy *mempolicy;
2066 bool ret = true;
2067
2068 if (!mask)
2069 return ret;
2070 task_lock(tsk);
2071 mempolicy = tsk->mempolicy;
2072 if (!mempolicy)
2073 goto out;
2074
2075 switch (mempolicy->mode) {
2076 case MPOL_PREFERRED:
2077 /*
2078 * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
2079 * allocate from, they may fallback to other nodes when oom.
2080 * Thus, it's possible for tsk to have allocated memory from
2081 * nodes in mask.
2082 */
2083 break;
2084 case MPOL_BIND:
2085 case MPOL_INTERLEAVE:
2086 ret = nodes_intersects(mempolicy->v.nodes, *mask);
2087 break;
2088 default:
2089 BUG();
2090 }
2091out:
2092 task_unlock(tsk);
2093 return ret;
2094}
2095
Linus Torvalds1da177e2005-04-16 15:20:36 -07002096/* Allocate a page in interleaved policy.
2097 Own path because it needs to do special accounting. */
Andi Kleen662f3a02005-10-29 18:15:49 -07002098static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
2099 unsigned nid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002100{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002101 struct page *page;
2102
Vlastimil Babka04ec6262017-07-06 15:40:03 -07002103 page = __alloc_pages(gfp, order, nid);
Kemi Wang45180852017-11-15 17:38:22 -08002104 /* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */
2105 if (!static_branch_likely(&vm_numa_stat_key))
2106 return page;
Andrey Ryabininde55c8b2017-10-13 15:57:43 -07002107 if (page && page_to_nid(page) == nid) {
2108 preempt_disable();
2109 __inc_numa_state(page_zone(page), NUMA_INTERLEAVE_HIT);
2110 preempt_enable();
2111 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002112 return page;
2113}
2114
2115/**
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002116 * alloc_pages_vma - Allocate a page for a VMA.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002117 *
2118 * @gfp:
2119 * %GFP_USER user allocation.
2120 * %GFP_KERNEL kernel allocations,
2121 * %GFP_HIGHMEM highmem/user allocations,
2122 * %GFP_FS allocation should not call back into a file system.
2123 * %GFP_ATOMIC don't sleep.
2124 *
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002125 * @order:Order of the GFP allocation.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002126 * @vma: Pointer to VMA or NULL if not available.
2127 * @addr: Virtual Address of the allocation. Must be inside the VMA.
Vlastimil Babkabe97a412015-02-11 15:27:15 -08002128 * @node: Which node to prefer for allocation (modulo policy).
David Rientjes19deb762019-09-04 12:54:20 -07002129 * @hugepage: for hugepages try only the preferred node if possible
Linus Torvalds1da177e2005-04-16 15:20:36 -07002130 *
2131 * This function allocates a page from the kernel page pool and applies
2132 * a NUMA policy associated with the VMA or the current process.
2133 * When VMA is not NULL caller must hold down_read on the mmap_sem of the
2134 * mm_struct of the VMA to prevent it from going away. Should be used for
Vlastimil Babkabe97a412015-02-11 15:27:15 -08002135 * all allocations for pages that will be mapped into user space. Returns
2136 * NULL when no page can be allocated.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002137 */
2138struct page *
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002139alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
David Rientjes19deb762019-09-04 12:54:20 -07002140 unsigned long addr, int node, bool hugepage)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002141{
Mel Gormancc9a6c82012-03-21 16:34:11 -07002142 struct mempolicy *pol;
Miao Xiec0ff7452010-05-24 14:32:08 -07002143 struct page *page;
Vlastimil Babka04ec6262017-07-06 15:40:03 -07002144 int preferred_nid;
Vlastimil Babkabe97a412015-02-11 15:27:15 -08002145 nodemask_t *nmask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002146
Oleg Nesterovdd6eecb2014-10-09 15:27:57 -07002147 pol = get_vma_policy(vma, addr);
Mel Gormancc9a6c82012-03-21 16:34:11 -07002148
Vlastimil Babkabe97a412015-02-11 15:27:15 -08002149 if (pol->mode == MPOL_INTERLEAVE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002150 unsigned nid;
Christoph Lameter5da7ca82006-01-06 00:10:46 -08002151
Andi Kleen8eac5632011-02-25 14:44:28 -08002152 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07002153 mpol_cond_put(pol);
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002154 page = alloc_page_interleave(gfp, order, nid);
Vlastimil Babkabe97a412015-02-11 15:27:15 -08002155 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002156 }
Vlastimil Babkabe97a412015-02-11 15:27:15 -08002157
David Rientjes19deb762019-09-04 12:54:20 -07002158 if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
2159 int hpage_node = node;
2160
2161 /*
2162 * For hugepage allocation and non-interleave policy which
2163 * allows the current node (or other explicitly preferred
2164 * node) we only try to allocate from the current/preferred
2165 * node and don't fall back to other nodes, as the cost of
2166 * remote accesses would likely offset THP benefits.
2167 *
2168 * If the policy is interleave, or does not allow the current
2169 * node in its nodemask, we allocate the standard way.
2170 */
2171 if (pol->mode == MPOL_PREFERRED && !(pol->flags & MPOL_F_LOCAL))
2172 hpage_node = pol->v.preferred_node;
2173
2174 nmask = policy_nodemask(gfp, pol);
2175 if (!nmask || node_isset(hpage_node, *nmask)) {
2176 mpol_cond_put(pol);
Vlastimil Babkacc638f32020-01-13 16:29:04 -08002177 /*
2178 * First, try to allocate THP only on local node, but
2179 * don't reclaim unnecessarily, just compact.
2180 */
David Rientjes19deb762019-09-04 12:54:20 -07002181 page = __alloc_pages_node(hpage_node,
Vlastimil Babkacc638f32020-01-13 16:29:04 -08002182 gfp | __GFP_THISNODE | __GFP_NORETRY, order);
David Rientjes76e654c2019-09-04 12:54:25 -07002183
2184 /*
2185 * If hugepage allocations are configured to always
2186 * synchronous compact or the vma has been madvised
2187 * to prefer hugepage backing, retry allowing remote
Vlastimil Babkacc638f32020-01-13 16:29:04 -08002188 * memory with both reclaim and compact as well.
David Rientjes76e654c2019-09-04 12:54:25 -07002189 */
2190 if (!page && (gfp & __GFP_DIRECT_RECLAIM))
2191 page = __alloc_pages_node(hpage_node,
Vlastimil Babkacc638f32020-01-13 16:29:04 -08002192 gfp, order);
David Rientjes76e654c2019-09-04 12:54:25 -07002193
David Rientjes19deb762019-09-04 12:54:20 -07002194 goto out;
2195 }
2196 }
2197
Vlastimil Babkabe97a412015-02-11 15:27:15 -08002198 nmask = policy_nodemask(gfp, pol);
Vlastimil Babka04ec6262017-07-06 15:40:03 -07002199 preferred_nid = policy_node(gfp, pol, node);
2200 page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask);
Vlastimil Babkad51e9892017-01-24 15:18:18 -08002201 mpol_cond_put(pol);
Vlastimil Babkabe97a412015-02-11 15:27:15 -08002202out:
Miao Xiec0ff7452010-05-24 14:32:08 -07002203 return page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002204}
Christoph Hellwig69262212019-06-26 14:27:05 +02002205EXPORT_SYMBOL(alloc_pages_vma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002206
2207/**
2208 * alloc_pages_current - Allocate pages.
2209 *
2210 * @gfp:
2211 * %GFP_USER user allocation,
2212 * %GFP_KERNEL kernel allocation,
2213 * %GFP_HIGHMEM highmem allocation,
2214 * %GFP_FS don't call back into a file system.
2215 * %GFP_ATOMIC don't sleep.
2216 * @order: Power of two of allocation size in pages. 0 is a single page.
2217 *
2218 * Allocate a page from the kernel page pool. When not in
2219 * interrupt context and apply the current process NUMA policy.
2220 * Returns NULL when no page can be allocated.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002221 */
Al Virodd0fc662005-10-07 07:46:04 +01002222struct page *alloc_pages_current(gfp_t gfp, unsigned order)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002223{
Oleg Nesterov8d902742014-10-09 15:27:45 -07002224 struct mempolicy *pol = &default_policy;
Miao Xiec0ff7452010-05-24 14:32:08 -07002225 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002226
Oleg Nesterov8d902742014-10-09 15:27:45 -07002227 if (!in_interrupt() && !(gfp & __GFP_THISNODE))
2228 pol = get_task_policy(current);
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07002229
2230 /*
2231 * No reference counting needed for current->mempolicy
2232 * nor system default_policy
2233 */
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002234 if (pol->mode == MPOL_INTERLEAVE)
Miao Xiec0ff7452010-05-24 14:32:08 -07002235 page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
2236 else
2237 page = __alloc_pages_nodemask(gfp, order,
Vlastimil Babka04ec6262017-07-06 15:40:03 -07002238 policy_node(gfp, pol, numa_node_id()),
Andi Kleen5c4b4be2011-03-04 17:36:32 -08002239 policy_nodemask(gfp, pol));
Mel Gormancc9a6c82012-03-21 16:34:11 -07002240
Miao Xiec0ff7452010-05-24 14:32:08 -07002241 return page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002242}
2243EXPORT_SYMBOL(alloc_pages_current);
2244
Oleg Nesterovef0855d2013-09-11 14:20:14 -07002245int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2246{
2247 struct mempolicy *pol = mpol_dup(vma_policy(src));
2248
2249 if (IS_ERR(pol))
2250 return PTR_ERR(pol);
2251 dst->vm_policy = pol;
2252 return 0;
2253}
2254
Paul Jackson42253992006-01-08 01:01:59 -08002255/*
Lee Schermerhorn846a16b2008-04-28 02:13:09 -07002256 * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
Paul Jackson42253992006-01-08 01:01:59 -08002257 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
2258 * with the mems_allowed returned by cpuset_mems_allowed(). This
2259 * keeps mempolicies cpuset relative after its cpuset moves. See
2260 * further kernel/cpuset.c update_nodemask().
Miao Xie708c1bb2010-05-24 14:32:07 -07002261 *
2262 * current's mempolicy may be rebinded by the other task(the task that changes
2263 * cpuset's mems), so we needn't do rebind work for current task.
Paul Jackson42253992006-01-08 01:01:59 -08002264 */
Paul Jackson42253992006-01-08 01:01:59 -08002265
Lee Schermerhorn846a16b2008-04-28 02:13:09 -07002266/* Slow path of a mempolicy duplicate */
2267struct mempolicy *__mpol_dup(struct mempolicy *old)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002268{
2269 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2270
2271 if (!new)
2272 return ERR_PTR(-ENOMEM);
Miao Xie708c1bb2010-05-24 14:32:07 -07002273
2274 /* task's mempolicy is protected by alloc_lock */
2275 if (old == current->mempolicy) {
2276 task_lock(current);
2277 *new = *old;
2278 task_unlock(current);
2279 } else
2280 *new = *old;
2281
Paul Jackson42253992006-01-08 01:01:59 -08002282 if (current_cpuset_is_being_rebound()) {
2283 nodemask_t mems = cpuset_mems_allowed(current);
Vlastimil Babka213980c2017-07-06 15:40:06 -07002284 mpol_rebind_policy(new, &mems);
Paul Jackson42253992006-01-08 01:01:59 -08002285 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002286 atomic_set(&new->refcnt, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002287 return new;
2288}
2289
2290/* Slow path of a mempolicy comparison */
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002291bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002292{
2293 if (!a || !b)
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002294 return false;
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002295 if (a->mode != b->mode)
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002296 return false;
Bob Liu19800502010-05-24 14:32:01 -07002297 if (a->flags != b->flags)
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002298 return false;
Bob Liu19800502010-05-24 14:32:01 -07002299 if (mpol_store_user_nodemask(a))
2300 if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002301 return false;
Bob Liu19800502010-05-24 14:32:01 -07002302
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002303 switch (a->mode) {
Mel Gorman19770b32008-04-28 02:12:18 -07002304 case MPOL_BIND:
2305 /* Fall through */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002306 case MPOL_INTERLEAVE:
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002307 return !!nodes_equal(a->v.nodes, b->v.nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002308 case MPOL_PREFERRED:
Yisheng Xie8970a632018-03-22 16:17:02 -07002309 /* a's ->flags is the same as b's */
2310 if (a->flags & MPOL_F_LOCAL)
2311 return true;
Namhyung Kim75719662011-03-22 16:33:02 -07002312 return a->v.preferred_node == b->v.preferred_node;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002313 default:
2314 BUG();
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002315 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002316 }
2317}
2318
Linus Torvalds1da177e2005-04-16 15:20:36 -07002319/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002320 * Shared memory backing store policy support.
2321 *
2322 * Remember policies even when nobody has shared memory mapped.
2323 * The policies are kept in Red-Black tree linked from the inode.
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002324 * They are protected by the sp->lock rwlock, which should be held
Linus Torvalds1da177e2005-04-16 15:20:36 -07002325 * for any accesses to the tree.
2326 */
2327
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002328/*
2329 * lookup first element intersecting start-end. Caller holds sp->lock for
2330 * reading or for writing
2331 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002332static struct sp_node *
2333sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
2334{
2335 struct rb_node *n = sp->root.rb_node;
2336
2337 while (n) {
2338 struct sp_node *p = rb_entry(n, struct sp_node, nd);
2339
2340 if (start >= p->end)
2341 n = n->rb_right;
2342 else if (end <= p->start)
2343 n = n->rb_left;
2344 else
2345 break;
2346 }
2347 if (!n)
2348 return NULL;
2349 for (;;) {
2350 struct sp_node *w = NULL;
2351 struct rb_node *prev = rb_prev(n);
2352 if (!prev)
2353 break;
2354 w = rb_entry(prev, struct sp_node, nd);
2355 if (w->end <= start)
2356 break;
2357 n = prev;
2358 }
2359 return rb_entry(n, struct sp_node, nd);
2360}
2361
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002362/*
2363 * Insert a new shared policy into the list. Caller holds sp->lock for
2364 * writing.
2365 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002366static void sp_insert(struct shared_policy *sp, struct sp_node *new)
2367{
2368 struct rb_node **p = &sp->root.rb_node;
2369 struct rb_node *parent = NULL;
2370 struct sp_node *nd;
2371
2372 while (*p) {
2373 parent = *p;
2374 nd = rb_entry(parent, struct sp_node, nd);
2375 if (new->start < nd->start)
2376 p = &(*p)->rb_left;
2377 else if (new->end > nd->end)
2378 p = &(*p)->rb_right;
2379 else
2380 BUG();
2381 }
2382 rb_link_node(&new->nd, parent, p);
2383 rb_insert_color(&new->nd, &sp->root);
Paul Mundt140d5a42007-07-15 23:38:16 -07002384 pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002385 new->policy ? new->policy->mode : 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002386}
2387
2388/* Find shared policy intersecting idx */
2389struct mempolicy *
2390mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
2391{
2392 struct mempolicy *pol = NULL;
2393 struct sp_node *sn;
2394
2395 if (!sp->root.rb_node)
2396 return NULL;
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002397 read_lock(&sp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002398 sn = sp_lookup(sp, idx, idx+1);
2399 if (sn) {
2400 mpol_get(sn->policy);
2401 pol = sn->policy;
2402 }
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002403 read_unlock(&sp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002404 return pol;
2405}
2406
KOSAKI Motohiro63f74ca2012-10-08 16:29:19 -07002407static void sp_free(struct sp_node *n)
2408{
2409 mpol_put(n->policy);
2410 kmem_cache_free(sn_cache, n);
2411}
2412
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002413/**
2414 * mpol_misplaced - check whether current page node is valid in policy
2415 *
Fabian Frederickb46e14a2014-06-04 16:08:18 -07002416 * @page: page to be checked
2417 * @vma: vm area where page mapped
2418 * @addr: virtual address where page mapped
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002419 *
2420 * Lookup current policy node id for vma,addr and "compare to" page's
2421 * node id.
2422 *
2423 * Returns:
2424 * -1 - not misplaced, page is in the right node
2425 * node - node id where the page should be
2426 *
2427 * Policy determination "mimics" alloc_page_vma().
2428 * Called from fault path where we know the vma and faulting address.
2429 */
2430int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2431{
2432 struct mempolicy *pol;
Mel Gormanc33d6c02016-05-19 17:14:10 -07002433 struct zoneref *z;
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002434 int curnid = page_to_nid(page);
2435 unsigned long pgoff;
Peter Zijlstra90572892013-10-07 11:29:20 +01002436 int thiscpu = raw_smp_processor_id();
2437 int thisnid = cpu_to_node(thiscpu);
Anshuman Khandual98fa15f2019-03-05 15:42:58 -08002438 int polnid = NUMA_NO_NODE;
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002439 int ret = -1;
2440
Oleg Nesterovdd6eecb2014-10-09 15:27:57 -07002441 pol = get_vma_policy(vma, addr);
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002442 if (!(pol->flags & MPOL_F_MOF))
2443 goto out;
2444
2445 switch (pol->mode) {
2446 case MPOL_INTERLEAVE:
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002447 pgoff = vma->vm_pgoff;
2448 pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
Laurent Dufour98c70ba2017-09-08 16:12:39 -07002449 polnid = offset_il_node(pol, pgoff);
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002450 break;
2451
2452 case MPOL_PREFERRED:
2453 if (pol->flags & MPOL_F_LOCAL)
2454 polnid = numa_node_id();
2455 else
2456 polnid = pol->v.preferred_node;
2457 break;
2458
2459 case MPOL_BIND:
Mel Gormanc33d6c02016-05-19 17:14:10 -07002460
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002461 /*
2462 * allows binding to multiple nodes.
2463 * use current page if in policy nodemask,
2464 * else select nearest allowed node, if any.
2465 * If no allowed nodes, use current [!misplaced].
2466 */
2467 if (node_isset(curnid, pol->v.nodes))
2468 goto out;
Mel Gormanc33d6c02016-05-19 17:14:10 -07002469 z = first_zones_zonelist(
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002470 node_zonelist(numa_node_id(), GFP_HIGHUSER),
2471 gfp_zone(GFP_HIGHUSER),
Mel Gormanc33d6c02016-05-19 17:14:10 -07002472 &pol->v.nodes);
Pavel Tatashinc1093b72018-08-21 21:53:32 -07002473 polnid = zone_to_nid(z->zone);
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002474 break;
2475
2476 default:
2477 BUG();
2478 }
Mel Gorman5606e382012-11-02 18:19:13 +00002479
2480 /* Migrate the page towards the node whose CPU is referencing it */
Mel Gormane42c8ff2012-11-12 09:17:07 +00002481 if (pol->flags & MPOL_F_MORON) {
Peter Zijlstra90572892013-10-07 11:29:20 +01002482 polnid = thisnid;
Mel Gorman5606e382012-11-02 18:19:13 +00002483
Rik van Riel10f39042014-01-27 17:03:44 -05002484 if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
Rik van Rielde1c9ce2013-10-07 11:29:39 +01002485 goto out;
Mel Gormane42c8ff2012-11-12 09:17:07 +00002486 }
2487
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002488 if (curnid != polnid)
2489 ret = polnid;
2490out:
2491 mpol_cond_put(pol);
2492
2493 return ret;
2494}
2495
David Rientjesc11600e2016-09-01 16:15:07 -07002496/*
2497 * Drop the (possibly final) reference to task->mempolicy. It needs to be
2498 * dropped after task->mempolicy is set to NULL so that any allocation done as
2499 * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed
2500 * policy.
2501 */
2502void mpol_put_task_policy(struct task_struct *task)
2503{
2504 struct mempolicy *pol;
2505
2506 task_lock(task);
2507 pol = task->mempolicy;
2508 task->mempolicy = NULL;
2509 task_unlock(task);
2510 mpol_put(pol);
2511}
2512
Linus Torvalds1da177e2005-04-16 15:20:36 -07002513static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2514{
Paul Mundt140d5a42007-07-15 23:38:16 -07002515 pr_debug("deleting %lx-l%lx\n", n->start, n->end);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002516 rb_erase(&n->nd, &sp->root);
KOSAKI Motohiro63f74ca2012-10-08 16:29:19 -07002517 sp_free(n);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002518}
2519
Mel Gorman42288fe2012-12-21 23:10:25 +00002520static void sp_node_init(struct sp_node *node, unsigned long start,
2521 unsigned long end, struct mempolicy *pol)
2522{
2523 node->start = start;
2524 node->end = end;
2525 node->policy = pol;
2526}
2527
Adrian Bunkdbcb0f12007-10-16 01:26:26 -07002528static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2529 struct mempolicy *pol)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002530{
KOSAKI Motohiro869833f2012-10-08 16:29:16 -07002531 struct sp_node *n;
2532 struct mempolicy *newpol;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002533
KOSAKI Motohiro869833f2012-10-08 16:29:16 -07002534 n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002535 if (!n)
2536 return NULL;
KOSAKI Motohiro869833f2012-10-08 16:29:16 -07002537
2538 newpol = mpol_dup(pol);
2539 if (IS_ERR(newpol)) {
2540 kmem_cache_free(sn_cache, n);
2541 return NULL;
2542 }
2543 newpol->flags |= MPOL_F_SHARED;
Mel Gorman42288fe2012-12-21 23:10:25 +00002544 sp_node_init(n, start, end, newpol);
KOSAKI Motohiro869833f2012-10-08 16:29:16 -07002545
Linus Torvalds1da177e2005-04-16 15:20:36 -07002546 return n;
2547}
2548
2549/* Replace a policy range. */
2550static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
2551 unsigned long end, struct sp_node *new)
2552{
Mel Gormanb22d1272012-10-08 16:29:17 -07002553 struct sp_node *n;
Mel Gorman42288fe2012-12-21 23:10:25 +00002554 struct sp_node *n_new = NULL;
2555 struct mempolicy *mpol_new = NULL;
Mel Gormanb22d1272012-10-08 16:29:17 -07002556 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002557
Mel Gorman42288fe2012-12-21 23:10:25 +00002558restart:
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002559 write_lock(&sp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002560 n = sp_lookup(sp, start, end);
2561 /* Take care of old policies in the same range. */
2562 while (n && n->start < end) {
2563 struct rb_node *next = rb_next(&n->nd);
2564 if (n->start >= start) {
2565 if (n->end <= end)
2566 sp_delete(sp, n);
2567 else
2568 n->start = end;
2569 } else {
2570 /* Old policy spanning whole new range. */
2571 if (n->end > end) {
Mel Gorman42288fe2012-12-21 23:10:25 +00002572 if (!n_new)
2573 goto alloc_new;
2574
2575 *mpol_new = *n->policy;
2576 atomic_set(&mpol_new->refcnt, 1);
KOSAKI Motohiro78806392013-03-08 12:43:29 -08002577 sp_node_init(n_new, end, n->end, mpol_new);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002578 n->end = start;
Hillf Danton5ca39572013-03-08 12:43:28 -08002579 sp_insert(sp, n_new);
Mel Gorman42288fe2012-12-21 23:10:25 +00002580 n_new = NULL;
2581 mpol_new = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002582 break;
2583 } else
2584 n->end = start;
2585 }
2586 if (!next)
2587 break;
2588 n = rb_entry(next, struct sp_node, nd);
2589 }
2590 if (new)
2591 sp_insert(sp, new);
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002592 write_unlock(&sp->lock);
Mel Gorman42288fe2012-12-21 23:10:25 +00002593 ret = 0;
2594
2595err_out:
2596 if (mpol_new)
2597 mpol_put(mpol_new);
2598 if (n_new)
2599 kmem_cache_free(sn_cache, n_new);
2600
Mel Gormanb22d1272012-10-08 16:29:17 -07002601 return ret;
Mel Gorman42288fe2012-12-21 23:10:25 +00002602
2603alloc_new:
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002604 write_unlock(&sp->lock);
Mel Gorman42288fe2012-12-21 23:10:25 +00002605 ret = -ENOMEM;
2606 n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2607 if (!n_new)
2608 goto err_out;
2609 mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2610 if (!mpol_new)
2611 goto err_out;
2612 goto restart;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002613}
2614
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002615/**
2616 * mpol_shared_policy_init - initialize shared policy for inode
2617 * @sp: pointer to inode shared policy
2618 * @mpol: struct mempolicy to install
2619 *
2620 * Install non-NULL @mpol in inode's shared policy rb-tree.
2621 * On entry, the current task has a reference on a non-NULL @mpol.
2622 * This must be released on exit.
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002623 * This is called at get_inode() calls and we can use GFP_KERNEL.
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002624 */
2625void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
Robin Holt7339ff82006-01-14 13:20:48 -08002626{
Miao Xie58568d22009-06-16 15:31:49 -07002627 int ret;
2628
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002629 sp->root = RB_ROOT; /* empty tree == default mempolicy */
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002630 rwlock_init(&sp->lock);
Robin Holt7339ff82006-01-14 13:20:48 -08002631
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002632 if (mpol) {
2633 struct vm_area_struct pvma;
2634 struct mempolicy *new;
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002635 NODEMASK_SCRATCH(scratch);
Robin Holt7339ff82006-01-14 13:20:48 -08002636
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002637 if (!scratch)
Lee Schermerhorn5c0c1652010-06-29 15:05:30 -07002638 goto put_mpol;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002639 /* contextualize the tmpfs mount point mempolicy */
2640 new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
Lee Schermerhorn15d77832010-05-24 14:32:04 -07002641 if (IS_ERR(new))
Dan Carpenter0cae3452010-05-25 23:42:58 -07002642 goto free_scratch; /* no valid nodemask intersection */
Miao Xie58568d22009-06-16 15:31:49 -07002643
2644 task_lock(current);
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002645 ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
Miao Xie58568d22009-06-16 15:31:49 -07002646 task_unlock(current);
Lee Schermerhorn15d77832010-05-24 14:32:04 -07002647 if (ret)
Lee Schermerhorn5c0c1652010-06-29 15:05:30 -07002648 goto put_new;
Robin Holt7339ff82006-01-14 13:20:48 -08002649
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002650 /* Create pseudo-vma that contains just the policy */
Kirill A. Shutemov2c4541e2018-07-26 16:37:30 -07002651 vma_init(&pvma, NULL);
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002652 pvma.vm_end = TASK_SIZE; /* policy covers entire file */
2653 mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
Lee Schermerhorn15d77832010-05-24 14:32:04 -07002654
Lee Schermerhorn5c0c1652010-06-29 15:05:30 -07002655put_new:
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002656 mpol_put(new); /* drop initial ref */
Dan Carpenter0cae3452010-05-25 23:42:58 -07002657free_scratch:
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002658 NODEMASK_SCRATCH_FREE(scratch);
Lee Schermerhorn5c0c1652010-06-29 15:05:30 -07002659put_mpol:
2660 mpol_put(mpol); /* drop our incoming ref on sb mpol */
Robin Holt7339ff82006-01-14 13:20:48 -08002661 }
2662}
2663
Linus Torvalds1da177e2005-04-16 15:20:36 -07002664int mpol_set_shared_policy(struct shared_policy *info,
2665 struct vm_area_struct *vma, struct mempolicy *npol)
2666{
2667 int err;
2668 struct sp_node *new = NULL;
2669 unsigned long sz = vma_pages(vma);
2670
David Rientjes028fec42008-04-28 02:12:25 -07002671 pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002672 vma->vm_pgoff,
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002673 sz, npol ? npol->mode : -1,
David Rientjes028fec42008-04-28 02:12:25 -07002674 npol ? npol->flags : -1,
David Rientjes00ef2d22013-02-22 16:35:36 -08002675 npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002676
2677 if (npol) {
2678 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
2679 if (!new)
2680 return -ENOMEM;
2681 }
2682 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2683 if (err && new)
KOSAKI Motohiro63f74ca2012-10-08 16:29:19 -07002684 sp_free(new);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002685 return err;
2686}
2687
2688/* Free a backing policy store on inode delete. */
2689void mpol_free_shared_policy(struct shared_policy *p)
2690{
2691 struct sp_node *n;
2692 struct rb_node *next;
2693
2694 if (!p->root.rb_node)
2695 return;
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002696 write_lock(&p->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002697 next = rb_first(&p->root);
2698 while (next) {
2699 n = rb_entry(next, struct sp_node, nd);
2700 next = rb_next(&n->nd);
KOSAKI Motohiro63f74ca2012-10-08 16:29:19 -07002701 sp_delete(p, n);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002702 }
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002703 write_unlock(&p->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002704}
2705
Mel Gorman1a687c22012-11-22 11:16:36 +00002706#ifdef CONFIG_NUMA_BALANCING
Mel Gormanc2976632014-01-29 14:05:42 -08002707static int __initdata numabalancing_override;
Mel Gorman1a687c22012-11-22 11:16:36 +00002708
2709static void __init check_numabalancing_enable(void)
2710{
2711 bool numabalancing_default = false;
2712
2713 if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
2714 numabalancing_default = true;
2715
Mel Gormanc2976632014-01-29 14:05:42 -08002716 /* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
2717 if (numabalancing_override)
2718 set_numabalancing_state(numabalancing_override == 1);
2719
Mel Gormanb0dc2b92015-05-14 15:17:09 -07002720 if (num_online_nodes() > 1 && !numabalancing_override) {
Joe Perches756a0252016-03-17 14:19:47 -07002721 pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n",
Mel Gormanc2976632014-01-29 14:05:42 -08002722 numabalancing_default ? "Enabling" : "Disabling");
Mel Gorman1a687c22012-11-22 11:16:36 +00002723 set_numabalancing_state(numabalancing_default);
2724 }
2725}
2726
2727static int __init setup_numabalancing(char *str)
2728{
2729 int ret = 0;
2730 if (!str)
2731 goto out;
Mel Gorman1a687c22012-11-22 11:16:36 +00002732
2733 if (!strcmp(str, "enable")) {
Mel Gormanc2976632014-01-29 14:05:42 -08002734 numabalancing_override = 1;
Mel Gorman1a687c22012-11-22 11:16:36 +00002735 ret = 1;
2736 } else if (!strcmp(str, "disable")) {
Mel Gormanc2976632014-01-29 14:05:42 -08002737 numabalancing_override = -1;
Mel Gorman1a687c22012-11-22 11:16:36 +00002738 ret = 1;
2739 }
2740out:
2741 if (!ret)
Andrew Morton4a404be2014-01-29 14:05:43 -08002742 pr_warn("Unable to parse numa_balancing=\n");
Mel Gorman1a687c22012-11-22 11:16:36 +00002743
2744 return ret;
2745}
2746__setup("numa_balancing=", setup_numabalancing);
2747#else
2748static inline void __init check_numabalancing_enable(void)
2749{
2750}
2751#endif /* CONFIG_NUMA_BALANCING */
2752
Linus Torvalds1da177e2005-04-16 15:20:36 -07002753/* assumes fs == KERNEL_DS */
2754void __init numa_policy_init(void)
2755{
Paul Mundtb71636e2007-07-15 23:38:15 -07002756 nodemask_t interleave_nodes;
2757 unsigned long largest = 0;
2758 int nid, prefer = 0;
2759
Linus Torvalds1da177e2005-04-16 15:20:36 -07002760 policy_cache = kmem_cache_create("numa_policy",
2761 sizeof(struct mempolicy),
Paul Mundt20c2df82007-07-20 10:11:58 +09002762 0, SLAB_PANIC, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002763
2764 sn_cache = kmem_cache_create("shared_policy_node",
2765 sizeof(struct sp_node),
Paul Mundt20c2df82007-07-20 10:11:58 +09002766 0, SLAB_PANIC, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002767
Mel Gorman5606e382012-11-02 18:19:13 +00002768 for_each_node(nid) {
2769 preferred_node_policy[nid] = (struct mempolicy) {
2770 .refcnt = ATOMIC_INIT(1),
2771 .mode = MPOL_PREFERRED,
2772 .flags = MPOL_F_MOF | MPOL_F_MORON,
2773 .v = { .preferred_node = nid, },
2774 };
2775 }
2776
Paul Mundtb71636e2007-07-15 23:38:15 -07002777 /*
2778 * Set interleaving policy for system init. Interleaving is only
2779 * enabled across suitably sized nodes (default is >= 16MB), or
2780 * fall back to the largest node if they're all smaller.
2781 */
2782 nodes_clear(interleave_nodes);
Lai Jiangshan01f13bd2012-12-12 13:51:33 -08002783 for_each_node_state(nid, N_MEMORY) {
Paul Mundtb71636e2007-07-15 23:38:15 -07002784 unsigned long total_pages = node_present_pages(nid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002785
Paul Mundtb71636e2007-07-15 23:38:15 -07002786 /* Preserve the largest node */
2787 if (largest < total_pages) {
2788 largest = total_pages;
2789 prefer = nid;
2790 }
2791
2792 /* Interleave this node? */
2793 if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2794 node_set(nid, interleave_nodes);
2795 }
2796
2797 /* All too small, use the largest */
2798 if (unlikely(nodes_empty(interleave_nodes)))
2799 node_set(prefer, interleave_nodes);
2800
David Rientjes028fec42008-04-28 02:12:25 -07002801 if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
Mitchel Humpherysb1de0d12014-06-06 14:38:30 -07002802 pr_err("%s: interleaving failed\n", __func__);
Mel Gorman1a687c22012-11-22 11:16:36 +00002803
2804 check_numabalancing_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002805}
2806
Christoph Lameter8bccd852005-10-29 18:16:59 -07002807/* Reset policy of current process to default */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002808void numa_default_policy(void)
2809{
David Rientjes028fec42008-04-28 02:12:25 -07002810 do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002811}
Paul Jackson68860ec2005-10-30 15:02:36 -08002812
Paul Jackson42253992006-01-08 01:01:59 -08002813/*
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002814 * Parse and format mempolicy from/to strings
2815 */
2816
2817/*
Hugh Dickinsf2a07f42013-01-02 02:01:33 -08002818 * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag.
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002819 */
Lee Schermerhorn345ace92010-05-24 14:32:04 -07002820static const char * const policy_modes[] =
2821{
2822 [MPOL_DEFAULT] = "default",
2823 [MPOL_PREFERRED] = "prefer",
2824 [MPOL_BIND] = "bind",
2825 [MPOL_INTERLEAVE] = "interleave",
Lee Schermerhornd3a71032012-10-25 14:16:29 +02002826 [MPOL_LOCAL] = "local",
Lee Schermerhorn345ace92010-05-24 14:32:04 -07002827};
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002828
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002829
2830#ifdef CONFIG_TMPFS
2831/**
Hugh Dickinsf2a07f42013-01-02 02:01:33 -08002832 * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002833 * @str: string containing mempolicy to parse
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002834 * @mpol: pointer to struct mempolicy pointer, returned on success.
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002835 *
2836 * Format of input:
2837 * <mode>[=<flags>][:<nodelist>]
2838 *
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002839 * On success, returns 0, else 1
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002840 */
Hugh Dickinsa7a88b22013-01-02 02:04:23 -08002841int mpol_parse_str(char *str, struct mempolicy **mpol)
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002842{
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002843 struct mempolicy *new = NULL;
Hugh Dickinsf2a07f42013-01-02 02:01:33 -08002844 unsigned short mode_flags;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002845 nodemask_t nodes;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002846 char *nodelist = strchr(str, ':');
2847 char *flags = strchr(str, '=');
zhong jiangdedf2c72018-10-26 15:06:57 -07002848 int err = 1, mode;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002849
Dan Carpenterc7a91bc2020-01-30 22:11:07 -08002850 if (flags)
2851 *flags++ = '\0'; /* terminate mode string */
2852
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002853 if (nodelist) {
2854 /* NUL-terminate mode or flags string */
2855 *nodelist++ = '\0';
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002856 if (nodelist_parse(nodelist, nodes))
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002857 goto out;
Lai Jiangshan01f13bd2012-12-12 13:51:33 -08002858 if (!nodes_subset(nodes, node_states[N_MEMORY]))
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002859 goto out;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002860 } else
2861 nodes_clear(nodes);
2862
zhong jiangdedf2c72018-10-26 15:06:57 -07002863 mode = match_string(policy_modes, MPOL_MAX, str);
2864 if (mode < 0)
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002865 goto out;
2866
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002867 switch (mode) {
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002868 case MPOL_PREFERRED:
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002869 /*
2870 * Insist on a nodelist of one node only
2871 */
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002872 if (nodelist) {
2873 char *rest = nodelist;
2874 while (isdigit(*rest))
2875 rest++;
KOSAKI Motohiro926f2ae2010-03-23 13:35:32 -07002876 if (*rest)
2877 goto out;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002878 }
2879 break;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002880 case MPOL_INTERLEAVE:
2881 /*
2882 * Default to online nodes with memory if no nodelist
2883 */
2884 if (!nodelist)
Lai Jiangshan01f13bd2012-12-12 13:51:33 -08002885 nodes = node_states[N_MEMORY];
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07002886 break;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002887 case MPOL_LOCAL:
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07002888 /*
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002889 * Don't allow a nodelist; mpol_new() checks flags
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07002890 */
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002891 if (nodelist)
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07002892 goto out;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002893 mode = MPOL_PREFERRED;
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07002894 break;
Ravikiran G Thirumalai413b43d2010-03-23 13:35:28 -07002895 case MPOL_DEFAULT:
2896 /*
2897 * Insist on a empty nodelist
2898 */
2899 if (!nodelist)
2900 err = 0;
2901 goto out;
KOSAKI Motohirod69b2e632010-03-23 13:35:30 -07002902 case MPOL_BIND:
2903 /*
2904 * Insist on a nodelist
2905 */
2906 if (!nodelist)
2907 goto out;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002908 }
2909
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002910 mode_flags = 0;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002911 if (flags) {
2912 /*
2913 * Currently, we only support two mutually exclusive
2914 * mode flags.
2915 */
2916 if (!strcmp(flags, "static"))
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002917 mode_flags |= MPOL_F_STATIC_NODES;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002918 else if (!strcmp(flags, "relative"))
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002919 mode_flags |= MPOL_F_RELATIVE_NODES;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002920 else
KOSAKI Motohiro926f2ae2010-03-23 13:35:32 -07002921 goto out;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002922 }
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002923
2924 new = mpol_new(mode, mode_flags, &nodes);
2925 if (IS_ERR(new))
KOSAKI Motohiro926f2ae2010-03-23 13:35:32 -07002926 goto out;
2927
Hugh Dickinsf2a07f42013-01-02 02:01:33 -08002928 /*
2929 * Save nodes for mpol_to_str() to show the tmpfs mount options
2930 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
2931 */
2932 if (mode != MPOL_PREFERRED)
2933 new->v.nodes = nodes;
2934 else if (nodelist)
2935 new->v.preferred_node = first_node(nodes);
2936 else
2937 new->flags |= MPOL_F_LOCAL;
2938
2939 /*
2940 * Save nodes for contextualization: this will be used to "clone"
2941 * the mempolicy in a specific context [cpuset] at a later time.
2942 */
2943 new->w.user_nodemask = nodes;
2944
KOSAKI Motohiro926f2ae2010-03-23 13:35:32 -07002945 err = 0;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002946
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002947out:
2948 /* Restore string for error message */
2949 if (nodelist)
2950 *--nodelist = ':';
2951 if (flags)
2952 *--flags = '=';
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002953 if (!err)
2954 *mpol = new;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002955 return err;
2956}
2957#endif /* CONFIG_TMPFS */
2958
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002959/**
2960 * mpol_to_str - format a mempolicy structure for printing
2961 * @buffer: to contain formatted mempolicy string
2962 * @maxlen: length of @buffer
2963 * @pol: pointer to mempolicy to be formatted
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002964 *
David Rientjes948927e2013-11-12 15:07:28 -08002965 * Convert @pol into a string. If @buffer is too short, truncate the string.
2966 * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
2967 * longest flag, "relative", and to display at least a few node ids.
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002968 */
David Rientjes948927e2013-11-12 15:07:28 -08002969void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002970{
2971 char *p = buffer;
David Rientjes948927e2013-11-12 15:07:28 -08002972 nodemask_t nodes = NODE_MASK_NONE;
2973 unsigned short mode = MPOL_DEFAULT;
2974 unsigned short flags = 0;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002975
David Rientjes8790c71a2014-01-30 15:46:08 -08002976 if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
Lee Schermerhornbea904d2008-04-28 02:13:18 -07002977 mode = pol->mode;
David Rientjes948927e2013-11-12 15:07:28 -08002978 flags = pol->flags;
2979 }
Lee Schermerhornbea904d2008-04-28 02:13:18 -07002980
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002981 switch (mode) {
2982 case MPOL_DEFAULT:
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002983 break;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002984 case MPOL_PREFERRED:
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07002985 if (flags & MPOL_F_LOCAL)
Hugh Dickinsf2a07f42013-01-02 02:01:33 -08002986 mode = MPOL_LOCAL;
Lee Schermerhorn53f25562008-04-28 02:13:20 -07002987 else
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07002988 node_set(pol->v.preferred_node, nodes);
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002989 break;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002990 case MPOL_BIND:
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002991 case MPOL_INTERLEAVE:
Hugh Dickinsf2a07f42013-01-02 02:01:33 -08002992 nodes = pol->v.nodes;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002993 break;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002994 default:
David Rientjes948927e2013-11-12 15:07:28 -08002995 WARN_ON_ONCE(1);
2996 snprintf(p, maxlen, "unknown");
2997 return;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002998 }
2999
David Rientjesb7a9f422013-11-21 14:32:06 -08003000 p += snprintf(p, maxlen, "%s", policy_modes[mode]);
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08003001
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07003002 if (flags & MPOL_MODE_FLAGS) {
David Rientjes948927e2013-11-12 15:07:28 -08003003 p += snprintf(p, buffer + maxlen - p, "=");
David Rientjesf5b087b2008-04-28 02:12:27 -07003004
Lee Schermerhorn22919902008-04-28 02:13:22 -07003005 /*
3006 * Currently, the only defined flags are mutually exclusive
3007 */
David Rientjesf5b087b2008-04-28 02:12:27 -07003008 if (flags & MPOL_F_STATIC_NODES)
Lee Schermerhorn22919902008-04-28 02:13:22 -07003009 p += snprintf(p, buffer + maxlen - p, "static");
3010 else if (flags & MPOL_F_RELATIVE_NODES)
3011 p += snprintf(p, buffer + maxlen - p, "relative");
David Rientjesf5b087b2008-04-28 02:12:27 -07003012 }
3013
Tejun Heo9e763e02015-02-13 14:38:02 -08003014 if (!nodes_empty(nodes))
3015 p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
3016 nodemask_pr_args(&nodes));
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08003017}