blob: 38132067167797da3041fed59a6bfd330dca5e2a [file] [log] [blame]
Thomas Gleixner46aeb7e2019-05-28 10:10:27 -07001// SPDX-License-Identifier: GPL-2.0-only
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * Simple NUMA memory policy for the Linux kernel.
4 *
5 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
Christoph Lameter8bccd852005-10-29 18:16:59 -07006 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 *
8 * NUMA policy allows the user to give hints in which node(s) memory should
9 * be allocated.
10 *
11 * Support four policies per VMA and per process:
12 *
13 * The VMA policy has priority over the process policy for a page fault.
14 *
15 * interleave Allocate memory interleaved over a set of nodes,
16 * with normal fallback if it fails.
17 * For VMA based allocations this interleaves based on the
18 * offset into the backing object or offset into the mapping
19 * for anonymous memory. For process policy an process counter
20 * is used.
Christoph Lameter8bccd852005-10-29 18:16:59 -070021 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 * bind Only allocate memory on a specific set of nodes,
23 * no fallback.
Christoph Lameter8bccd852005-10-29 18:16:59 -070024 * FIXME: memory is allocated starting with the first node
25 * to the last. It would be better if bind would truly restrict
26 * the allocation to memory nodes instead
27 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070028 * preferred Try a specific node first before normal fallback.
David Rientjes00ef2d22013-02-22 16:35:36 -080029 * As a special case NUMA_NO_NODE here means do the allocation
Linus Torvalds1da177e2005-04-16 15:20:36 -070030 * on the local CPU. This is normally identical to default,
31 * but useful to set in a VMA when you have a non default
32 * process policy.
Christoph Lameter8bccd852005-10-29 18:16:59 -070033 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070034 * default Allocate on the local node first, or when on a VMA
35 * use the process policy. This is what Linux always did
36 * in a NUMA aware kernel and still does by, ahem, default.
37 *
38 * The process policy is applied for most non interrupt memory allocations
39 * in that process' context. Interrupts ignore the policies and always
40 * try to allocate on the local CPU. The VMA policy is only applied for memory
41 * allocations for a VMA in the VM.
42 *
43 * Currently there are a few corner cases in swapping where the policy
44 * is not applied, but the majority should be handled. When process policy
45 * is used it is not remembered over swap outs/swap ins.
46 *
47 * Only the highest zone in the zone hierarchy gets policied. Allocations
48 * requesting a lower zone just use default policy. This implies that
49 * on systems with highmem kernel lowmem allocation don't get policied.
50 * Same with GFP_DMA allocations.
51 *
52 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
53 * all users and remembered even when nobody has memory mapped.
54 */
55
56/* Notebook:
57 fix mmap readahead to honour policy and enable policy for any page cache
58 object
59 statistics for bigpages
60 global policy for page cache? currently it uses process policy. Requires
61 first item above.
62 handle mremap for shared memory (currently ignored for the policy)
63 grows down?
64 make bind policy root only? It can trigger oom much faster and the
65 kernel is not always grateful with that.
Linus Torvalds1da177e2005-04-16 15:20:36 -070066*/
67
Mitchel Humpherysb1de0d12014-06-06 14:38:30 -070068#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
69
Linus Torvalds1da177e2005-04-16 15:20:36 -070070#include <linux/mempolicy.h>
Christoph Hellwiga5201102019-08-28 16:19:53 +020071#include <linux/pagewalk.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070072#include <linux/highmem.h>
73#include <linux/hugetlb.h>
74#include <linux/kernel.h>
75#include <linux/sched.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +010076#include <linux/sched/mm.h>
Ingo Molnar6a3827d2017-02-08 18:51:31 +010077#include <linux/sched/numa_balancing.h>
Ingo Molnarf719ff9b2017-02-06 10:57:33 +010078#include <linux/sched/task.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070079#include <linux/nodemask.h>
80#include <linux/cpuset.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070081#include <linux/slab.h>
82#include <linux/string.h>
Paul Gortmakerb95f1b312011-10-16 02:01:52 -040083#include <linux/export.h>
Pavel Emelyanovb4888932007-10-18 23:40:14 -070084#include <linux/nsproxy.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070085#include <linux/interrupt.h>
86#include <linux/init.h>
87#include <linux/compat.h>
Otto Ebeling31367462017-11-15 17:38:14 -080088#include <linux/ptrace.h>
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -080089#include <linux/swap.h>
Christoph Lameter1a75a6c2006-01-08 01:01:02 -080090#include <linux/seq_file.h>
91#include <linux/proc_fs.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080092#include <linux/migrate.h>
Hugh Dickins62b61f62009-12-14 17:59:33 -080093#include <linux/ksm.h>
Christoph Lameter95a402c2006-06-23 02:03:53 -070094#include <linux/rmap.h>
David Quigley86c3a762006-06-23 02:04:02 -070095#include <linux/security.h>
Adrian Bunkdbcb0f12007-10-16 01:26:26 -070096#include <linux/syscalls.h>
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -070097#include <linux/ctype.h>
KOSAKI Motohiro6d9c2852009-12-14 17:58:11 -080098#include <linux/mm_inline.h>
Lee Schermerhornb24f53a2012-10-25 14:16:32 +020099#include <linux/mmu_notifier.h>
Mitchel Humpherysb1de0d12014-06-06 14:38:30 -0700100#include <linux/printk.h>
Naoya Horiguchic8633792017-09-08 16:11:08 -0700101#include <linux/swapops.h>
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800102
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103#include <asm/tlbflush.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -0800104#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105
Nick Piggin62695a82008-10-18 20:26:09 -0700106#include "internal.h"
107
Christoph Lameter38e35862006-01-08 01:01:01 -0800108/* Internal flags */
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800109#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
Christoph Lameter38e35862006-01-08 01:01:01 -0800110#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800111
Pekka Enbergfcc234f2006-03-22 00:08:13 -0800112static struct kmem_cache *policy_cache;
113static struct kmem_cache *sn_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115/* Highest zone. An specific allocation for a zone below that is not
116 policied. */
Christoph Lameter62672762007-02-10 01:43:07 -0800117enum zone_type policy_zone = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700119/*
120 * run-time system-wide default policy => local allocation
121 */
H Hartley Sweetene754d792011-10-31 17:09:23 -0700122static struct mempolicy default_policy = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123 .refcnt = ATOMIC_INIT(1), /* never free it */
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700124 .mode = MPOL_PREFERRED,
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700125 .flags = MPOL_F_LOCAL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126};
127
Mel Gorman5606e382012-11-02 18:19:13 +0000128static struct mempolicy preferred_node_policy[MAX_NUMNODES];
129
Dan Williamsb2ca9162020-02-16 12:00:48 -0800130/**
131 * numa_map_to_online_node - Find closest online node
132 * @nid: Node id to start the search
133 *
134 * Lookup the next closest node by distance if @nid is not online.
135 */
136int numa_map_to_online_node(int node)
137{
Dan Williams4fcbe962020-02-16 12:00:53 -0800138 int min_dist = INT_MAX, dist, n, min_node;
Dan Williamsb2ca9162020-02-16 12:00:48 -0800139
Dan Williams4fcbe962020-02-16 12:00:53 -0800140 if (node == NUMA_NO_NODE || node_online(node))
141 return node;
Dan Williamsb2ca9162020-02-16 12:00:48 -0800142
143 min_node = node;
Dan Williams4fcbe962020-02-16 12:00:53 -0800144 for_each_online_node(n) {
145 dist = node_distance(node, n);
146 if (dist < min_dist) {
147 min_dist = dist;
148 min_node = n;
Dan Williamsb2ca9162020-02-16 12:00:48 -0800149 }
150 }
151
152 return min_node;
153}
154EXPORT_SYMBOL_GPL(numa_map_to_online_node);
155
Oleg Nesterov74d2c3a2014-10-09 15:27:50 -0700156struct mempolicy *get_task_policy(struct task_struct *p)
Mel Gorman5606e382012-11-02 18:19:13 +0000157{
158 struct mempolicy *pol = p->mempolicy;
Oleg Nesterovf15ca782014-10-09 15:27:43 -0700159 int node;
Mel Gorman5606e382012-11-02 18:19:13 +0000160
Oleg Nesterovf15ca782014-10-09 15:27:43 -0700161 if (pol)
162 return pol;
Mel Gorman5606e382012-11-02 18:19:13 +0000163
Oleg Nesterovf15ca782014-10-09 15:27:43 -0700164 node = numa_node_id();
165 if (node != NUMA_NO_NODE) {
166 pol = &preferred_node_policy[node];
167 /* preferred_node_policy is not initialised early in boot */
168 if (pol->mode)
169 return pol;
Mel Gorman5606e382012-11-02 18:19:13 +0000170 }
171
Oleg Nesterovf15ca782014-10-09 15:27:43 -0700172 return &default_policy;
Mel Gorman5606e382012-11-02 18:19:13 +0000173}
174
David Rientjes37012942008-04-28 02:12:33 -0700175static const struct mempolicy_operations {
176 int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
Vlastimil Babka213980c2017-07-06 15:40:06 -0700177 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
David Rientjes37012942008-04-28 02:12:33 -0700178} mpol_ops[MPOL_MAX];
179
David Rientjesf5b087b2008-04-28 02:12:27 -0700180static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
181{
Bob Liu6d556292010-05-24 14:31:59 -0700182 return pol->flags & MPOL_MODE_FLAGS;
David Rientjes4c50bc02008-04-28 02:12:30 -0700183}
184
185static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
186 const nodemask_t *rel)
187{
188 nodemask_t tmp;
189 nodes_fold(tmp, *orig, nodes_weight(*rel));
190 nodes_onto(*ret, tmp, *rel);
David Rientjesf5b087b2008-04-28 02:12:27 -0700191}
192
David Rientjes37012942008-04-28 02:12:33 -0700193static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
194{
195 if (nodes_empty(*nodes))
196 return -EINVAL;
197 pol->v.nodes = *nodes;
198 return 0;
199}
200
201static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
202{
203 if (!nodes)
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700204 pol->flags |= MPOL_F_LOCAL; /* local allocation */
David Rientjes37012942008-04-28 02:12:33 -0700205 else if (nodes_empty(*nodes))
206 return -EINVAL; /* no allowed nodes */
207 else
208 pol->v.preferred_node = first_node(*nodes);
209 return 0;
210}
211
212static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
213{
Zhihui Zhang859f7ef2014-12-18 16:17:09 -0800214 if (nodes_empty(*nodes))
David Rientjes37012942008-04-28 02:12:33 -0700215 return -EINVAL;
216 pol->v.nodes = *nodes;
217 return 0;
218}
219
Miao Xie58568d22009-06-16 15:31:49 -0700220/*
221 * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
222 * any, for the new policy. mpol_new() has already validated the nodes
223 * parameter with respect to the policy mode and flags. But, we need to
224 * handle an empty nodemask with MPOL_PREFERRED here.
225 *
226 * Must be called holding task's alloc_lock to protect task's mems_allowed
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -0700227 * and mempolicy. May also be called holding the mmap_lock for write.
Miao Xie58568d22009-06-16 15:31:49 -0700228 */
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700229static int mpol_set_nodemask(struct mempolicy *pol,
230 const nodemask_t *nodes, struct nodemask_scratch *nsc)
Miao Xie58568d22009-06-16 15:31:49 -0700231{
Miao Xie58568d22009-06-16 15:31:49 -0700232 int ret;
233
234 /* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
235 if (pol == NULL)
236 return 0;
Lai Jiangshan01f13bd2012-12-12 13:51:33 -0800237 /* Check N_MEMORY */
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700238 nodes_and(nsc->mask1,
Lai Jiangshan01f13bd2012-12-12 13:51:33 -0800239 cpuset_current_mems_allowed, node_states[N_MEMORY]);
Miao Xie58568d22009-06-16 15:31:49 -0700240
241 VM_BUG_ON(!nodes);
242 if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
243 nodes = NULL; /* explicit local allocation */
244 else {
245 if (pol->flags & MPOL_F_RELATIVE_NODES)
Zhihui Zhang859f7ef2014-12-18 16:17:09 -0800246 mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
Miao Xie58568d22009-06-16 15:31:49 -0700247 else
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700248 nodes_and(nsc->mask2, *nodes, nsc->mask1);
249
Miao Xie58568d22009-06-16 15:31:49 -0700250 if (mpol_store_user_nodemask(pol))
251 pol->w.user_nodemask = *nodes;
252 else
253 pol->w.cpuset_mems_allowed =
254 cpuset_current_mems_allowed;
255 }
256
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700257 if (nodes)
258 ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
259 else
260 ret = mpol_ops[pol->mode].create(pol, NULL);
Miao Xie58568d22009-06-16 15:31:49 -0700261 return ret;
262}
263
264/*
265 * This function just creates a new policy, does some check and simple
266 * initialization. You must invoke mpol_set_nodemask() to set nodes.
267 */
David Rientjes028fec42008-04-28 02:12:25 -0700268static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
269 nodemask_t *nodes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270{
271 struct mempolicy *policy;
272
David Rientjes028fec42008-04-28 02:12:25 -0700273 pr_debug("setting mode %d flags %d nodes[0] %lx\n",
David Rientjes00ef2d22013-02-22 16:35:36 -0800274 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
Paul Mundt140d5a42007-07-15 23:38:16 -0700275
David Rientjes3e1f06452008-04-28 02:12:34 -0700276 if (mode == MPOL_DEFAULT) {
277 if (nodes && !nodes_empty(*nodes))
David Rientjes37012942008-04-28 02:12:33 -0700278 return ERR_PTR(-EINVAL);
Lee Schermerhornd3a71032012-10-25 14:16:29 +0200279 return NULL;
David Rientjes37012942008-04-28 02:12:33 -0700280 }
David Rientjes3e1f06452008-04-28 02:12:34 -0700281 VM_BUG_ON(!nodes);
282
283 /*
284 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
285 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
286 * All other modes require a valid pointer to a non-empty nodemask.
287 */
288 if (mode == MPOL_PREFERRED) {
289 if (nodes_empty(*nodes)) {
290 if (((flags & MPOL_F_STATIC_NODES) ||
291 (flags & MPOL_F_RELATIVE_NODES)))
292 return ERR_PTR(-EINVAL);
David Rientjes3e1f06452008-04-28 02:12:34 -0700293 }
Peter Zijlstra479e2802012-10-25 14:16:28 +0200294 } else if (mode == MPOL_LOCAL) {
Piotr Kwapulinski8d303e42016-12-12 16:42:49 -0800295 if (!nodes_empty(*nodes) ||
296 (flags & MPOL_F_STATIC_NODES) ||
297 (flags & MPOL_F_RELATIVE_NODES))
Peter Zijlstra479e2802012-10-25 14:16:28 +0200298 return ERR_PTR(-EINVAL);
299 mode = MPOL_PREFERRED;
David Rientjes3e1f06452008-04-28 02:12:34 -0700300 } else if (nodes_empty(*nodes))
301 return ERR_PTR(-EINVAL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
303 if (!policy)
304 return ERR_PTR(-ENOMEM);
305 atomic_set(&policy->refcnt, 1);
Lee Schermerhorn45c47452008-04-28 02:13:12 -0700306 policy->mode = mode;
David Rientjes3e1f06452008-04-28 02:12:34 -0700307 policy->flags = flags;
David Rientjesf5b087b2008-04-28 02:12:27 -0700308
David Rientjes37012942008-04-28 02:12:33 -0700309 return policy;
310}
311
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -0700312/* Slow path of a mpol destructor. */
313void __mpol_put(struct mempolicy *p)
314{
315 if (!atomic_dec_and_test(&p->refcnt))
316 return;
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -0700317 kmem_cache_free(policy_cache, p);
318}
319
Vlastimil Babka213980c2017-07-06 15:40:06 -0700320static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
David Rientjes37012942008-04-28 02:12:33 -0700321{
322}
323
Vlastimil Babka213980c2017-07-06 15:40:06 -0700324static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
David Rientjes37012942008-04-28 02:12:33 -0700325{
326 nodemask_t tmp;
327
328 if (pol->flags & MPOL_F_STATIC_NODES)
329 nodes_and(tmp, pol->w.user_nodemask, *nodes);
330 else if (pol->flags & MPOL_F_RELATIVE_NODES)
331 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
332 else {
Vlastimil Babka213980c2017-07-06 15:40:06 -0700333 nodes_remap(tmp, pol->v.nodes,pol->w.cpuset_mems_allowed,
334 *nodes);
zhong jiang29b190f2019-06-28 12:06:43 -0700335 pol->w.cpuset_mems_allowed = *nodes;
David Rientjes37012942008-04-28 02:12:33 -0700336 }
337
Miao Xie708c1bb2010-05-24 14:32:07 -0700338 if (nodes_empty(tmp))
339 tmp = *nodes;
340
Vlastimil Babka213980c2017-07-06 15:40:06 -0700341 pol->v.nodes = tmp;
David Rientjes37012942008-04-28 02:12:33 -0700342}
343
344static void mpol_rebind_preferred(struct mempolicy *pol,
Vlastimil Babka213980c2017-07-06 15:40:06 -0700345 const nodemask_t *nodes)
David Rientjes37012942008-04-28 02:12:33 -0700346{
347 nodemask_t tmp;
348
David Rientjes37012942008-04-28 02:12:33 -0700349 if (pol->flags & MPOL_F_STATIC_NODES) {
350 int node = first_node(pol->w.user_nodemask);
351
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700352 if (node_isset(node, *nodes)) {
David Rientjes37012942008-04-28 02:12:33 -0700353 pol->v.preferred_node = node;
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700354 pol->flags &= ~MPOL_F_LOCAL;
355 } else
356 pol->flags |= MPOL_F_LOCAL;
David Rientjes37012942008-04-28 02:12:33 -0700357 } else if (pol->flags & MPOL_F_RELATIVE_NODES) {
358 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
359 pol->v.preferred_node = first_node(tmp);
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700360 } else if (!(pol->flags & MPOL_F_LOCAL)) {
David Rientjes37012942008-04-28 02:12:33 -0700361 pol->v.preferred_node = node_remap(pol->v.preferred_node,
362 pol->w.cpuset_mems_allowed,
363 *nodes);
364 pol->w.cpuset_mems_allowed = *nodes;
365 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366}
367
Miao Xie708c1bb2010-05-24 14:32:07 -0700368/*
369 * mpol_rebind_policy - Migrate a policy to a different set of nodes
370 *
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -0700371 * Per-vma policies are protected by mmap_lock. Allocations using per-task
Vlastimil Babka213980c2017-07-06 15:40:06 -0700372 * policies are protected by task->mems_allowed_seq to prevent a premature
373 * OOM/allocation failure due to parallel nodemask modification.
Miao Xie708c1bb2010-05-24 14:32:07 -0700374 */
Vlastimil Babka213980c2017-07-06 15:40:06 -0700375static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
David Rientjes1d0d2682008-04-28 02:12:32 -0700376{
David Rientjes1d0d2682008-04-28 02:12:32 -0700377 if (!pol)
378 return;
Vlastimil Babka2e256442019-03-05 15:46:50 -0800379 if (!mpol_store_user_nodemask(pol) && !(pol->flags & MPOL_F_LOCAL) &&
David Rientjes1d0d2682008-04-28 02:12:32 -0700380 nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
381 return;
Miao Xie708c1bb2010-05-24 14:32:07 -0700382
Vlastimil Babka213980c2017-07-06 15:40:06 -0700383 mpol_ops[pol->mode].rebind(pol, newmask);
David Rientjes1d0d2682008-04-28 02:12:32 -0700384}
385
386/*
387 * Wrapper for mpol_rebind_policy() that just requires task
388 * pointer, and updates task mempolicy.
Miao Xie58568d22009-06-16 15:31:49 -0700389 *
390 * Called with task's alloc_lock held.
David Rientjes1d0d2682008-04-28 02:12:32 -0700391 */
392
Vlastimil Babka213980c2017-07-06 15:40:06 -0700393void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
David Rientjes1d0d2682008-04-28 02:12:32 -0700394{
Vlastimil Babka213980c2017-07-06 15:40:06 -0700395 mpol_rebind_policy(tsk->mempolicy, new);
David Rientjes1d0d2682008-04-28 02:12:32 -0700396}
397
398/*
399 * Rebind each vma in mm to new nodemask.
400 *
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -0700401 * Call holding a reference to mm. Takes mm->mmap_lock during call.
David Rientjes1d0d2682008-04-28 02:12:32 -0700402 */
403
404void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
405{
406 struct vm_area_struct *vma;
407
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700408 mmap_write_lock(mm);
David Rientjes1d0d2682008-04-28 02:12:32 -0700409 for (vma = mm->mmap; vma; vma = vma->vm_next)
Vlastimil Babka213980c2017-07-06 15:40:06 -0700410 mpol_rebind_policy(vma->vm_policy, new);
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700411 mmap_write_unlock(mm);
David Rientjes1d0d2682008-04-28 02:12:32 -0700412}
413
David Rientjes37012942008-04-28 02:12:33 -0700414static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
415 [MPOL_DEFAULT] = {
416 .rebind = mpol_rebind_default,
417 },
418 [MPOL_INTERLEAVE] = {
419 .create = mpol_new_interleave,
420 .rebind = mpol_rebind_nodemask,
421 },
422 [MPOL_PREFERRED] = {
423 .create = mpol_new_preferred,
424 .rebind = mpol_rebind_preferred,
425 },
426 [MPOL_BIND] = {
427 .create = mpol_new_bind,
428 .rebind = mpol_rebind_nodemask,
429 },
430};
431
Yang Shia53190a2019-08-13 15:37:18 -0700432static int migrate_page_add(struct page *page, struct list_head *pagelist,
Christoph Lameterfc301282006-01-18 17:42:29 -0800433 unsigned long flags);
Christoph Lameter1a75a6c2006-01-08 01:01:02 -0800434
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800435struct queue_pages {
436 struct list_head *pagelist;
437 unsigned long flags;
438 nodemask_t *nmask;
Li Xinhaif18da662019-11-30 17:56:18 -0800439 unsigned long start;
440 unsigned long end;
441 struct vm_area_struct *first;
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800442};
443
Naoya Horiguchi98094942013-09-11 14:22:14 -0700444/*
Naoya Horiguchi88aaa2a2017-09-08 16:10:42 -0700445 * Check if the page's nid is in qp->nmask.
446 *
447 * If MPOL_MF_INVERT is set in qp->flags, check if the nid is
448 * in the invert of qp->nmask.
449 */
450static inline bool queue_pages_required(struct page *page,
451 struct queue_pages *qp)
452{
453 int nid = page_to_nid(page);
454 unsigned long flags = qp->flags;
455
456 return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
457}
458
Yang Shia7f40cf2019-03-28 20:43:55 -0700459/*
Yang Shid8835442019-08-13 15:37:15 -0700460 * queue_pages_pmd() has four possible return values:
461 * 0 - pages are placed on the right node or queued successfully.
462 * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
463 * specified.
464 * 2 - THP was split.
465 * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an
466 * existing page was already on a node that does not follow the
467 * policy.
Yang Shia7f40cf2019-03-28 20:43:55 -0700468 */
Naoya Horiguchic8633792017-09-08 16:11:08 -0700469static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
470 unsigned long end, struct mm_walk *walk)
Jules Irenge959a7e12020-04-06 20:08:12 -0700471 __releases(ptl)
Naoya Horiguchic8633792017-09-08 16:11:08 -0700472{
473 int ret = 0;
474 struct page *page;
475 struct queue_pages *qp = walk->private;
476 unsigned long flags;
477
478 if (unlikely(is_pmd_migration_entry(*pmd))) {
Yang Shia7f40cf2019-03-28 20:43:55 -0700479 ret = -EIO;
Naoya Horiguchic8633792017-09-08 16:11:08 -0700480 goto unlock;
481 }
482 page = pmd_page(*pmd);
483 if (is_huge_zero_page(page)) {
484 spin_unlock(ptl);
485 __split_huge_pmd(walk->vma, pmd, addr, false, NULL);
Yang Shid8835442019-08-13 15:37:15 -0700486 ret = 2;
Naoya Horiguchic8633792017-09-08 16:11:08 -0700487 goto out;
488 }
Yang Shid8835442019-08-13 15:37:15 -0700489 if (!queue_pages_required(page, qp))
Naoya Horiguchic8633792017-09-08 16:11:08 -0700490 goto unlock;
Naoya Horiguchic8633792017-09-08 16:11:08 -0700491
Naoya Horiguchic8633792017-09-08 16:11:08 -0700492 flags = qp->flags;
493 /* go to thp migration */
Yang Shia7f40cf2019-03-28 20:43:55 -0700494 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
Yang Shia53190a2019-08-13 15:37:18 -0700495 if (!vma_migratable(walk->vma) ||
496 migrate_page_add(page, qp->pagelist, flags)) {
Yang Shid8835442019-08-13 15:37:15 -0700497 ret = 1;
Yang Shia7f40cf2019-03-28 20:43:55 -0700498 goto unlock;
499 }
Yang Shia7f40cf2019-03-28 20:43:55 -0700500 } else
501 ret = -EIO;
Naoya Horiguchic8633792017-09-08 16:11:08 -0700502unlock:
503 spin_unlock(ptl);
504out:
505 return ret;
506}
507
Naoya Horiguchi88aaa2a2017-09-08 16:10:42 -0700508/*
Naoya Horiguchi98094942013-09-11 14:22:14 -0700509 * Scan through pages checking if pages follow certain conditions,
510 * and move them to the pagelist if they do.
Yang Shid8835442019-08-13 15:37:15 -0700511 *
512 * queue_pages_pte_range() has three possible return values:
513 * 0 - pages are placed on the right node or queued successfully.
514 * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
515 * specified.
516 * -EIO - only MPOL_MF_STRICT was specified and an existing page was already
517 * on a node that does not follow the policy.
Naoya Horiguchi98094942013-09-11 14:22:14 -0700518 */
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800519static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
520 unsigned long end, struct mm_walk *walk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521{
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800522 struct vm_area_struct *vma = walk->vma;
523 struct page *page;
524 struct queue_pages *qp = walk->private;
525 unsigned long flags = qp->flags;
Naoya Horiguchic8633792017-09-08 16:11:08 -0700526 int ret;
Yang Shid8835442019-08-13 15:37:15 -0700527 bool has_unmovable = false;
Hugh Dickins91612e02005-06-21 17:15:07 -0700528 pte_t *pte;
Hugh Dickins705e87c2005-10-29 18:16:27 -0700529 spinlock_t *ptl;
Hugh Dickins941150a2005-06-21 17:15:06 -0700530
Naoya Horiguchic8633792017-09-08 16:11:08 -0700531 ptl = pmd_trans_huge_lock(pmd, vma);
532 if (ptl) {
533 ret = queue_pages_pmd(pmd, ptl, addr, end, walk);
Yang Shid8835442019-08-13 15:37:15 -0700534 if (ret != 2)
Yang Shia7f40cf2019-03-28 20:43:55 -0700535 return ret;
Kirill A. Shutemov248db922016-01-15 16:54:14 -0800536 }
Yang Shid8835442019-08-13 15:37:15 -0700537 /* THP was split, fall through to pte walk */
Hugh Dickins91612e02005-06-21 17:15:07 -0700538
Naoya Horiguchi337d9ab2016-07-26 15:24:03 -0700539 if (pmd_trans_unstable(pmd))
540 return 0;
Michal Hocko94723aa2018-04-10 16:30:07 -0700541
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800542 pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
543 for (; addr != end; pte++, addr += PAGE_SIZE) {
Hugh Dickins91612e02005-06-21 17:15:07 -0700544 if (!pte_present(*pte))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545 continue;
Linus Torvalds6aab3412005-11-28 14:34:23 -0800546 page = vm_normal_page(vma, addr, *pte);
547 if (!page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548 continue;
Nick Piggin053837f2006-01-18 17:42:27 -0800549 /*
Hugh Dickins62b61f62009-12-14 17:59:33 -0800550 * vm_normal_page() filters out zero pages, but there might
551 * still be PageReserved pages to skip, perhaps in a VDSO.
Nick Piggin053837f2006-01-18 17:42:27 -0800552 */
Hugh Dickinsb79bc0a2013-02-22 16:35:13 -0800553 if (PageReserved(page))
Christoph Lameterf4598c82006-01-12 01:05:20 -0800554 continue;
Naoya Horiguchi88aaa2a2017-09-08 16:10:42 -0700555 if (!queue_pages_required(page, qp))
Christoph Lameter38e35862006-01-08 01:01:01 -0800556 continue;
Yang Shia7f40cf2019-03-28 20:43:55 -0700557 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
Yang Shid8835442019-08-13 15:37:15 -0700558 /* MPOL_MF_STRICT must be specified if we get here */
559 if (!vma_migratable(vma)) {
560 has_unmovable = true;
Yang Shia7f40cf2019-03-28 20:43:55 -0700561 break;
Yang Shid8835442019-08-13 15:37:15 -0700562 }
Yang Shia53190a2019-08-13 15:37:18 -0700563
564 /*
565 * Do not abort immediately since there may be
566 * temporary off LRU pages in the range. Still
567 * need migrate other LRU pages.
568 */
569 if (migrate_page_add(page, qp->pagelist, flags))
570 has_unmovable = true;
Yang Shia7f40cf2019-03-28 20:43:55 -0700571 } else
572 break;
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800573 }
574 pte_unmap_unlock(pte - 1, ptl);
575 cond_resched();
Yang Shid8835442019-08-13 15:37:15 -0700576
577 if (has_unmovable)
578 return 1;
579
Yang Shia7f40cf2019-03-28 20:43:55 -0700580 return addr != end ? -EIO : 0;
Hugh Dickins91612e02005-06-21 17:15:07 -0700581}
582
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800583static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
584 unsigned long addr, unsigned long end,
585 struct mm_walk *walk)
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700586{
Li Xinhaidcf17632020-04-01 21:10:48 -0700587 int ret = 0;
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700588#ifdef CONFIG_HUGETLB_PAGE
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800589 struct queue_pages *qp = walk->private;
Li Xinhaidcf17632020-04-01 21:10:48 -0700590 unsigned long flags = (qp->flags & MPOL_MF_VALID);
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700591 struct page *page;
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -0800592 spinlock_t *ptl;
Naoya Horiguchid4c54912014-06-06 10:00:01 -0400593 pte_t entry;
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700594
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800595 ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
596 entry = huge_ptep_get(pte);
Naoya Horiguchid4c54912014-06-06 10:00:01 -0400597 if (!pte_present(entry))
598 goto unlock;
599 page = pte_page(entry);
Naoya Horiguchi88aaa2a2017-09-08 16:10:42 -0700600 if (!queue_pages_required(page, qp))
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700601 goto unlock;
Li Xinhaidcf17632020-04-01 21:10:48 -0700602
603 if (flags == MPOL_MF_STRICT) {
604 /*
605 * STRICT alone means only detecting misplaced page and no
606 * need to further check other vma.
607 */
608 ret = -EIO;
609 goto unlock;
610 }
611
612 if (!vma_migratable(walk->vma)) {
613 /*
614 * Must be STRICT with MOVE*, otherwise .test_walk() have
615 * stopped walking current vma.
616 * Detecting misplaced page but allow migrating pages which
617 * have been queued.
618 */
619 ret = 1;
620 goto unlock;
621 }
622
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700623 /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
624 if (flags & (MPOL_MF_MOVE_ALL) ||
Li Xinhaidcf17632020-04-01 21:10:48 -0700625 (flags & MPOL_MF_MOVE && page_mapcount(page) == 1)) {
626 if (!isolate_huge_page(page, qp->pagelist) &&
627 (flags & MPOL_MF_STRICT))
628 /*
629 * Failed to isolate page but allow migrating pages
630 * which have been queued.
631 */
632 ret = 1;
633 }
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700634unlock:
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -0800635 spin_unlock(ptl);
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700636#else
637 BUG();
638#endif
Li Xinhaidcf17632020-04-01 21:10:48 -0700639 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640}
641
Aneesh Kumar K.V58772312013-12-06 00:08:22 +0530642#ifdef CONFIG_NUMA_BALANCING
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200643/*
Mel Gorman4b10e7d2012-10-25 14:16:32 +0200644 * This is used to mark a range of virtual addresses to be inaccessible.
645 * These are later cleared by a NUMA hinting fault. Depending on these
646 * faults, pages may be migrated for better NUMA placement.
647 *
648 * This is assuming that NUMA faults are handled using PROT_NONE. If
649 * an architecture makes a different choice, it will need further
650 * changes to the core.
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200651 */
Mel Gorman4b10e7d2012-10-25 14:16:32 +0200652unsigned long change_prot_numa(struct vm_area_struct *vma,
653 unsigned long addr, unsigned long end)
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200654{
Mel Gorman4b10e7d2012-10-25 14:16:32 +0200655 int nr_updated;
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200656
Peter Xu58705442020-04-06 20:05:45 -0700657 nr_updated = change_protection(vma, addr, end, PAGE_NONE, MM_CP_PROT_NUMA);
Mel Gorman03c5a6e2012-11-02 14:52:48 +0000658 if (nr_updated)
659 count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200660
Mel Gorman4b10e7d2012-10-25 14:16:32 +0200661 return nr_updated;
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200662}
663#else
664static unsigned long change_prot_numa(struct vm_area_struct *vma,
665 unsigned long addr, unsigned long end)
666{
667 return 0;
668}
Aneesh Kumar K.V58772312013-12-06 00:08:22 +0530669#endif /* CONFIG_NUMA_BALANCING */
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200670
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800671static int queue_pages_test_walk(unsigned long start, unsigned long end,
672 struct mm_walk *walk)
673{
674 struct vm_area_struct *vma = walk->vma;
675 struct queue_pages *qp = walk->private;
676 unsigned long endvma = vma->vm_end;
677 unsigned long flags = qp->flags;
678
Li Xinhaia18b3ac22019-11-30 17:56:15 -0800679 /* range check first */
Yang Shid888fb22020-04-01 21:10:55 -0700680 VM_BUG_ON_VMA((vma->vm_start > start) || (vma->vm_end < end), vma);
Li Xinhaif18da662019-11-30 17:56:18 -0800681
682 if (!qp->first) {
683 qp->first = vma;
684 if (!(flags & MPOL_MF_DISCONTIG_OK) &&
685 (qp->start < vma->vm_start))
686 /* hole at head side of range */
Li Xinhaia18b3ac22019-11-30 17:56:15 -0800687 return -EFAULT;
688 }
Li Xinhaif18da662019-11-30 17:56:18 -0800689 if (!(flags & MPOL_MF_DISCONTIG_OK) &&
690 ((vma->vm_end < qp->end) &&
691 (!vma->vm_next || vma->vm_end < vma->vm_next->vm_start)))
692 /* hole at middle or tail of range */
693 return -EFAULT;
Li Xinhaia18b3ac22019-11-30 17:56:15 -0800694
Yang Shia7f40cf2019-03-28 20:43:55 -0700695 /*
696 * Need check MPOL_MF_STRICT to return -EIO if possible
697 * regardless of vma_migratable
698 */
699 if (!vma_migratable(vma) &&
700 !(flags & MPOL_MF_STRICT))
Naoya Horiguchi48684a62015-02-11 15:28:06 -0800701 return 1;
702
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800703 if (endvma > end)
704 endvma = end;
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800705
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800706 if (flags & MPOL_MF_LAZY) {
707 /* Similar to task_numa_work, skip inaccessible VMAs */
Anshuman Khandual3122e802020-04-06 20:03:47 -0700708 if (!is_vm_hugetlb_page(vma) && vma_is_accessible(vma) &&
Liang Chen4355c012016-03-15 14:56:42 -0700709 !(vma->vm_flags & VM_MIXEDMAP))
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800710 change_prot_numa(vma, start, endvma);
711 return 1;
712 }
713
Kirill A. Shutemov77bf45e2016-02-05 15:36:33 -0800714 /* queue pages from current vma */
Yang Shia7f40cf2019-03-28 20:43:55 -0700715 if (flags & MPOL_MF_VALID)
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800716 return 0;
717 return 1;
718}
719
Christoph Hellwig7b86ac32019-08-28 16:19:54 +0200720static const struct mm_walk_ops queue_pages_walk_ops = {
721 .hugetlb_entry = queue_pages_hugetlb,
722 .pmd_entry = queue_pages_pte_range,
723 .test_walk = queue_pages_test_walk,
724};
725
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800726/*
Naoya Horiguchi98094942013-09-11 14:22:14 -0700727 * Walk through page tables and collect pages to be migrated.
728 *
729 * If pages found in a given range are on a set of nodes (determined by
730 * @nodes and @flags,) it's isolated and queued to the pagelist which is
Yang Shid8835442019-08-13 15:37:15 -0700731 * passed via @private.
732 *
733 * queue_pages_range() has three possible return values:
734 * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were
735 * specified.
736 * 0 - queue pages successfully or no misplaced page.
Yang Shia85dfc32019-11-15 17:34:33 -0800737 * errno - i.e. misplaced pages with MPOL_MF_STRICT specified (-EIO) or
738 * memory range specified by nodemask and maxnode points outside
739 * your accessible address space (-EFAULT)
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800740 */
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -0700741static int
Naoya Horiguchi98094942013-09-11 14:22:14 -0700742queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800743 nodemask_t *nodes, unsigned long flags,
744 struct list_head *pagelist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745{
Li Xinhaif18da662019-11-30 17:56:18 -0800746 int err;
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800747 struct queue_pages qp = {
748 .pagelist = pagelist,
749 .flags = flags,
750 .nmask = nodes,
Li Xinhaif18da662019-11-30 17:56:18 -0800751 .start = start,
752 .end = end,
753 .first = NULL,
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800754 };
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755
Li Xinhaif18da662019-11-30 17:56:18 -0800756 err = walk_page_range(mm, start, end, &queue_pages_walk_ops, &qp);
757
758 if (!qp.first)
759 /* whole range in hole */
760 err = -EFAULT;
761
762 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763}
764
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700765/*
766 * Apply policy to a single VMA
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -0700767 * This must be called with the mmap_lock held for writing.
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700768 */
769static int vma_replace_policy(struct vm_area_struct *vma,
770 struct mempolicy *pol)
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700771{
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700772 int err;
773 struct mempolicy *old;
774 struct mempolicy *new;
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700775
776 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
777 vma->vm_start, vma->vm_end, vma->vm_pgoff,
778 vma->vm_ops, vma->vm_file,
779 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
780
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700781 new = mpol_dup(pol);
782 if (IS_ERR(new))
783 return PTR_ERR(new);
784
785 if (vma->vm_ops && vma->vm_ops->set_policy) {
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700786 err = vma->vm_ops->set_policy(vma, new);
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700787 if (err)
788 goto err_out;
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700789 }
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700790
791 old = vma->vm_policy;
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -0700792 vma->vm_policy = new; /* protected by mmap_lock */
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700793 mpol_put(old);
794
795 return 0;
796 err_out:
797 mpol_put(new);
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700798 return err;
799}
800
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801/* Step 2: apply policy to a range and do splits. */
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800802static int mbind_range(struct mm_struct *mm, unsigned long start,
803 unsigned long end, struct mempolicy *new_pol)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804{
805 struct vm_area_struct *next;
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800806 struct vm_area_struct *prev;
807 struct vm_area_struct *vma;
808 int err = 0;
KOSAKI Motohiroe26a5112011-12-28 15:57:11 -0800809 pgoff_t pgoff;
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800810 unsigned long vmstart;
811 unsigned long vmend;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812
Linus Torvalds097d5912012-03-06 18:23:36 -0800813 vma = find_vma(mm, start);
Li Xinhaif18da662019-11-30 17:56:18 -0800814 VM_BUG_ON(!vma);
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800815
Linus Torvalds097d5912012-03-06 18:23:36 -0800816 prev = vma->vm_prev;
KOSAKI Motohiroe26a5112011-12-28 15:57:11 -0800817 if (start > vma->vm_start)
818 prev = vma;
819
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800820 for (; vma && vma->vm_start < end; prev = vma, vma = next) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700821 next = vma->vm_next;
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800822 vmstart = max(start, vma->vm_start);
823 vmend = min(end, vma->vm_end);
824
KOSAKI Motohiroe26a5112011-12-28 15:57:11 -0800825 if (mpol_equal(vma_policy(vma), new_pol))
826 continue;
827
828 pgoff = vma->vm_pgoff +
829 ((vmstart - vma->vm_start) >> PAGE_SHIFT);
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800830 prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
Andrea Arcangeli19a809a2015-09-04 15:46:24 -0700831 vma->anon_vma, vma->vm_file, pgoff,
832 new_pol, vma->vm_userfaultfd_ctx);
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800833 if (prev) {
834 vma = prev;
835 next = vma->vm_next;
Oleg Nesterov3964acd2013-07-31 13:53:28 -0700836 if (mpol_equal(vma_policy(vma), new_pol))
837 continue;
838 /* vma_merge() joined vma && vma->next, case 8 */
839 goto replace;
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800840 }
841 if (vma->vm_start != vmstart) {
842 err = split_vma(vma->vm_mm, vma, vmstart, 1);
843 if (err)
844 goto out;
845 }
846 if (vma->vm_end != vmend) {
847 err = split_vma(vma->vm_mm, vma, vmend, 0);
848 if (err)
849 goto out;
850 }
Oleg Nesterov3964acd2013-07-31 13:53:28 -0700851 replace:
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700852 err = vma_replace_policy(vma, new_pol);
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700853 if (err)
854 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855 }
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800856
857 out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858 return err;
859}
860
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861/* Set the process memory policy */
David Rientjes028fec42008-04-28 02:12:25 -0700862static long do_set_mempolicy(unsigned short mode, unsigned short flags,
863 nodemask_t *nodes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864{
Miao Xie58568d22009-06-16 15:31:49 -0700865 struct mempolicy *new, *old;
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700866 NODEMASK_SCRATCH(scratch);
Miao Xie58568d22009-06-16 15:31:49 -0700867 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700869 if (!scratch)
870 return -ENOMEM;
Lee Schermerhornf4e53d92008-04-28 02:13:10 -0700871
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700872 new = mpol_new(mode, flags, nodes);
873 if (IS_ERR(new)) {
874 ret = PTR_ERR(new);
875 goto out;
876 }
Oleg Nesterov2c7c3a72014-10-09 15:27:55 -0700877
Miao Xie58568d22009-06-16 15:31:49 -0700878 task_lock(current);
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700879 ret = mpol_set_nodemask(new, nodes, scratch);
Miao Xie58568d22009-06-16 15:31:49 -0700880 if (ret) {
881 task_unlock(current);
Miao Xie58568d22009-06-16 15:31:49 -0700882 mpol_put(new);
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700883 goto out;
Miao Xie58568d22009-06-16 15:31:49 -0700884 }
885 old = current->mempolicy;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886 current->mempolicy = new;
Vlastimil Babka45816682017-07-06 15:39:59 -0700887 if (new && new->mode == MPOL_INTERLEAVE)
888 current->il_prev = MAX_NUMNODES-1;
Miao Xie58568d22009-06-16 15:31:49 -0700889 task_unlock(current);
Miao Xie58568d22009-06-16 15:31:49 -0700890 mpol_put(old);
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700891 ret = 0;
892out:
893 NODEMASK_SCRATCH_FREE(scratch);
894 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895}
896
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700897/*
898 * Return nodemask for policy for get_mempolicy() query
Miao Xie58568d22009-06-16 15:31:49 -0700899 *
900 * Called with task's alloc_lock held
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700901 */
902static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903{
Andi Kleendfcd3c02005-10-29 18:15:48 -0700904 nodes_clear(*nodes);
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700905 if (p == &default_policy)
906 return;
907
Lee Schermerhorn45c47452008-04-28 02:13:12 -0700908 switch (p->mode) {
Mel Gorman19770b32008-04-28 02:12:18 -0700909 case MPOL_BIND:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910 case MPOL_INTERLEAVE:
Andi Kleendfcd3c02005-10-29 18:15:48 -0700911 *nodes = p->v.nodes;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912 break;
913 case MPOL_PREFERRED:
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700914 if (!(p->flags & MPOL_F_LOCAL))
Andi Kleendfcd3c02005-10-29 18:15:48 -0700915 node_set(p->v.preferred_node, *nodes);
Lee Schermerhorn53f25562008-04-28 02:13:20 -0700916 /* else return empty node mask for local allocation */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917 break;
918 default:
919 BUG();
920 }
921}
922
Andrea Arcangeli3b9aadf2018-10-26 15:05:16 -0700923static int lookup_node(struct mm_struct *mm, unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924{
Peter Xuba841072020-04-07 21:40:09 -0400925 struct page *p = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926 int err;
927
Andrea Arcangeli3b9aadf2018-10-26 15:05:16 -0700928 int locked = 1;
929 err = get_user_pages_locked(addr & PAGE_MASK, 1, 0, &p, &locked);
Michal Hocko2d3a36a2020-06-03 16:03:25 -0700930 if (err > 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931 err = page_to_nid(p);
932 put_page(p);
933 }
Andrea Arcangeli3b9aadf2018-10-26 15:05:16 -0700934 if (locked)
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700935 mmap_read_unlock(mm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700936 return err;
937}
938
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939/* Retrieve NUMA policy */
Adrian Bunkdbcb0f12007-10-16 01:26:26 -0700940static long do_get_mempolicy(int *policy, nodemask_t *nmask,
941 unsigned long addr, unsigned long flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942{
Christoph Lameter8bccd852005-10-29 18:16:59 -0700943 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944 struct mm_struct *mm = current->mm;
945 struct vm_area_struct *vma = NULL;
Andrea Arcangeli3b9aadf2018-10-26 15:05:16 -0700946 struct mempolicy *pol = current->mempolicy, *pol_refcount = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700947
Lee Schermerhorn754af6f2007-10-16 01:24:51 -0700948 if (flags &
949 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950 return -EINVAL;
Lee Schermerhorn754af6f2007-10-16 01:24:51 -0700951
952 if (flags & MPOL_F_MEMS_ALLOWED) {
953 if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
954 return -EINVAL;
955 *policy = 0; /* just so it's initialized */
Miao Xie58568d22009-06-16 15:31:49 -0700956 task_lock(current);
Lee Schermerhorn754af6f2007-10-16 01:24:51 -0700957 *nmask = cpuset_current_mems_allowed;
Miao Xie58568d22009-06-16 15:31:49 -0700958 task_unlock(current);
Lee Schermerhorn754af6f2007-10-16 01:24:51 -0700959 return 0;
960 }
961
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962 if (flags & MPOL_F_ADDR) {
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700963 /*
964 * Do NOT fall back to task policy if the
965 * vma/shared policy at addr is NULL. We
966 * want to return MPOL_DEFAULT in this case.
967 */
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700968 mmap_read_lock(mm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700969 vma = find_vma_intersection(mm, addr, addr+1);
970 if (!vma) {
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700971 mmap_read_unlock(mm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972 return -EFAULT;
973 }
974 if (vma->vm_ops && vma->vm_ops->get_policy)
975 pol = vma->vm_ops->get_policy(vma, addr);
976 else
977 pol = vma->vm_policy;
978 } else if (addr)
979 return -EINVAL;
980
981 if (!pol)
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700982 pol = &default_policy; /* indicates default behavior */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983
984 if (flags & MPOL_F_NODE) {
985 if (flags & MPOL_F_ADDR) {
Andrea Arcangeli3b9aadf2018-10-26 15:05:16 -0700986 /*
987 * Take a refcount on the mpol, lookup_node()
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -0700988 * wil drop the mmap_lock, so after calling
Andrea Arcangeli3b9aadf2018-10-26 15:05:16 -0700989 * lookup_node() only "pol" remains valid, "vma"
990 * is stale.
991 */
992 pol_refcount = pol;
993 vma = NULL;
994 mpol_get(pol);
995 err = lookup_node(mm, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996 if (err < 0)
997 goto out;
Christoph Lameter8bccd852005-10-29 18:16:59 -0700998 *policy = err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999 } else if (pol == current->mempolicy &&
Lee Schermerhorn45c47452008-04-28 02:13:12 -07001000 pol->mode == MPOL_INTERLEAVE) {
Vlastimil Babka45816682017-07-06 15:39:59 -07001001 *policy = next_node_in(current->il_prev, pol->v.nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001002 } else {
1003 err = -EINVAL;
1004 goto out;
1005 }
Lee Schermerhornbea904d2008-04-28 02:13:18 -07001006 } else {
1007 *policy = pol == &default_policy ? MPOL_DEFAULT :
1008 pol->mode;
David Rientjesd79df632008-07-04 12:24:13 -07001009 /*
1010 * Internal mempolicy flags must be masked off before exposing
1011 * the policy to userspace.
1012 */
1013 *policy |= (pol->flags & MPOL_MODE_FLAGS);
Lee Schermerhornbea904d2008-04-28 02:13:18 -07001014 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016 err = 0;
Miao Xie58568d22009-06-16 15:31:49 -07001017 if (nmask) {
Lee Schermerhornc6b6ef82010-03-23 13:35:41 -07001018 if (mpol_store_user_nodemask(pol)) {
1019 *nmask = pol->w.user_nodemask;
1020 } else {
1021 task_lock(current);
1022 get_policy_nodemask(pol, nmask);
1023 task_unlock(current);
1024 }
Miao Xie58568d22009-06-16 15:31:49 -07001025 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001026
1027 out:
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001028 mpol_cond_put(pol);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001029 if (vma)
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001030 mmap_read_unlock(mm);
Andrea Arcangeli3b9aadf2018-10-26 15:05:16 -07001031 if (pol_refcount)
1032 mpol_put(pol_refcount);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001033 return err;
1034}
1035
Christoph Lameterb20a3502006-03-22 00:09:12 -08001036#ifdef CONFIG_MIGRATION
Christoph Lameter8bccd852005-10-29 18:16:59 -07001037/*
Naoya Horiguchic8633792017-09-08 16:11:08 -07001038 * page migration, thp tail pages can be passed.
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001039 */
Yang Shia53190a2019-08-13 15:37:18 -07001040static int migrate_page_add(struct page *page, struct list_head *pagelist,
Christoph Lameterfc301282006-01-18 17:42:29 -08001041 unsigned long flags)
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001042{
Naoya Horiguchic8633792017-09-08 16:11:08 -07001043 struct page *head = compound_head(page);
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001044 /*
Christoph Lameterfc301282006-01-18 17:42:29 -08001045 * Avoid migrating a page that is shared with others.
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001046 */
Naoya Horiguchic8633792017-09-08 16:11:08 -07001047 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) {
1048 if (!isolate_lru_page(head)) {
1049 list_add_tail(&head->lru, pagelist);
1050 mod_node_page_state(page_pgdat(head),
Huang Ying9de4f222020-04-06 20:04:41 -07001051 NR_ISOLATED_ANON + page_is_file_lru(head),
Naoya Horiguchic8633792017-09-08 16:11:08 -07001052 hpage_nr_pages(head));
Yang Shia53190a2019-08-13 15:37:18 -07001053 } else if (flags & MPOL_MF_STRICT) {
1054 /*
1055 * Non-movable page may reach here. And, there may be
1056 * temporary off LRU pages or non-LRU movable pages.
1057 * Treat them as unmovable pages since they can't be
1058 * isolated, so they can't be moved at the moment. It
1059 * should return -EIO for this case too.
1060 */
1061 return -EIO;
Nick Piggin62695a82008-10-18 20:26:09 -07001062 }
1063 }
Yang Shia53190a2019-08-13 15:37:18 -07001064
1065 return 0;
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001066}
1067
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001068/* page allocation callback for NUMA node migration */
Michal Hocko666feb22018-04-10 16:30:03 -07001069struct page *alloc_new_node_page(struct page *page, unsigned long node)
Christoph Lameter95a402c2006-06-23 02:03:53 -07001070{
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -07001071 if (PageHuge(page))
1072 return alloc_huge_page_node(page_hstate(compound_head(page)),
1073 node);
Michal Hocko94723aa2018-04-10 16:30:07 -07001074 else if (PageTransHuge(page)) {
Naoya Horiguchic8633792017-09-08 16:11:08 -07001075 struct page *thp;
1076
1077 thp = alloc_pages_node(node,
1078 (GFP_TRANSHUGE | __GFP_THISNODE),
1079 HPAGE_PMD_ORDER);
1080 if (!thp)
1081 return NULL;
1082 prep_transhuge_page(thp);
1083 return thp;
1084 } else
Vlastimil Babka96db8002015-09-08 15:03:50 -07001085 return __alloc_pages_node(node, GFP_HIGHUSER_MOVABLE |
David Rientjesb360edb2015-04-14 15:46:52 -07001086 __GFP_THISNODE, 0);
Christoph Lameter95a402c2006-06-23 02:03:53 -07001087}
1088
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001089/*
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001090 * Migrate pages from one node to a target node.
1091 * Returns error or the number of pages not migrated.
1092 */
Adrian Bunkdbcb0f12007-10-16 01:26:26 -07001093static int migrate_to_node(struct mm_struct *mm, int source, int dest,
1094 int flags)
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001095{
1096 nodemask_t nmask;
1097 LIST_HEAD(pagelist);
1098 int err = 0;
1099
1100 nodes_clear(nmask);
1101 node_set(source, nmask);
1102
Minchan Kim08270802012-10-08 16:33:38 -07001103 /*
1104 * This does not "check" the range but isolates all pages that
1105 * need migration. Between passing in the full user address
1106 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
1107 */
1108 VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
Naoya Horiguchi98094942013-09-11 14:22:14 -07001109 queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001110 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
1111
Minchan Kimcf608ac2010-10-26 14:21:29 -07001112 if (!list_empty(&pagelist)) {
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001113 err = migrate_pages(&pagelist, alloc_new_node_page, NULL, dest,
Hugh Dickins9c620e22013-02-22 16:35:14 -08001114 MIGRATE_SYNC, MR_SYSCALL);
Minchan Kimcf608ac2010-10-26 14:21:29 -07001115 if (err)
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -07001116 putback_movable_pages(&pagelist);
Minchan Kimcf608ac2010-10-26 14:21:29 -07001117 }
Christoph Lameter95a402c2006-06-23 02:03:53 -07001118
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001119 return err;
1120}
1121
1122/*
1123 * Move pages between the two nodesets so as to preserve the physical
1124 * layout as much as possible.
Christoph Lameter39743882006-01-08 01:00:51 -08001125 *
1126 * Returns the number of page that could not be moved.
1127 */
Andrew Morton0ce72d42012-05-29 15:06:24 -07001128int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1129 const nodemask_t *to, int flags)
Christoph Lameter39743882006-01-08 01:00:51 -08001130{
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001131 int busy = 0;
Christoph Lameter0aedadf2008-11-06 12:53:30 -08001132 int err;
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001133 nodemask_t tmp;
Christoph Lameter39743882006-01-08 01:00:51 -08001134
Christoph Lameter0aedadf2008-11-06 12:53:30 -08001135 err = migrate_prep();
1136 if (err)
1137 return err;
1138
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001139 mmap_read_lock(mm);
Christoph Lameter39743882006-01-08 01:00:51 -08001140
KOSAKI Motohiroda0aa132010-03-05 13:41:59 -08001141 /*
1142 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
1143 * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
1144 * bit in 'tmp', and return that <source, dest> pair for migration.
1145 * The pair of nodemasks 'to' and 'from' define the map.
1146 *
1147 * If no pair of bits is found that way, fallback to picking some
1148 * pair of 'source' and 'dest' bits that are not the same. If the
1149 * 'source' and 'dest' bits are the same, this represents a node
1150 * that will be migrating to itself, so no pages need move.
1151 *
1152 * If no bits are left in 'tmp', or if all remaining bits left
1153 * in 'tmp' correspond to the same bit in 'to', return false
1154 * (nothing left to migrate).
1155 *
1156 * This lets us pick a pair of nodes to migrate between, such that
1157 * if possible the dest node is not already occupied by some other
1158 * source node, minimizing the risk of overloading the memory on a
1159 * node that would happen if we migrated incoming memory to a node
1160 * before migrating outgoing memory source that same node.
1161 *
1162 * A single scan of tmp is sufficient. As we go, we remember the
1163 * most recent <s, d> pair that moved (s != d). If we find a pair
1164 * that not only moved, but what's better, moved to an empty slot
1165 * (d is not set in tmp), then we break out then, with that pair.
Justin P. Mattockae0e47f2011-03-01 15:06:02 +01001166 * Otherwise when we finish scanning from_tmp, we at least have the
KOSAKI Motohiroda0aa132010-03-05 13:41:59 -08001167 * most recent <s, d> pair that moved. If we get all the way through
1168 * the scan of tmp without finding any node that moved, much less
1169 * moved to an empty node, then there is nothing left worth migrating.
1170 */
Christoph Lameterd4984712006-01-08 01:00:55 -08001171
Andrew Morton0ce72d42012-05-29 15:06:24 -07001172 tmp = *from;
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001173 while (!nodes_empty(tmp)) {
1174 int s,d;
Jianguo Wub76ac7e2013-11-12 15:07:39 -08001175 int source = NUMA_NO_NODE;
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001176 int dest = 0;
1177
1178 for_each_node_mask(s, tmp) {
Larry Woodman4a5b18c2012-05-29 15:06:24 -07001179
1180 /*
1181 * do_migrate_pages() tries to maintain the relative
1182 * node relationship of the pages established between
1183 * threads and memory areas.
1184 *
1185 * However if the number of source nodes is not equal to
1186 * the number of destination nodes we can not preserve
1187 * this node relative relationship. In that case, skip
1188 * copying memory from a node that is in the destination
1189 * mask.
1190 *
1191 * Example: [2,3,4] -> [3,4,5] moves everything.
1192 * [0-7] - > [3,4,5] moves only 0,1,2,6,7.
1193 */
1194
Andrew Morton0ce72d42012-05-29 15:06:24 -07001195 if ((nodes_weight(*from) != nodes_weight(*to)) &&
1196 (node_isset(s, *to)))
Larry Woodman4a5b18c2012-05-29 15:06:24 -07001197 continue;
1198
Andrew Morton0ce72d42012-05-29 15:06:24 -07001199 d = node_remap(s, *from, *to);
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001200 if (s == d)
1201 continue;
1202
1203 source = s; /* Node moved. Memorize */
1204 dest = d;
1205
1206 /* dest not in remaining from nodes? */
1207 if (!node_isset(dest, tmp))
1208 break;
1209 }
Jianguo Wub76ac7e2013-11-12 15:07:39 -08001210 if (source == NUMA_NO_NODE)
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001211 break;
1212
1213 node_clear(source, tmp);
1214 err = migrate_to_node(mm, source, dest, flags);
1215 if (err > 0)
1216 busy += err;
1217 if (err < 0)
1218 break;
Christoph Lameter39743882006-01-08 01:00:51 -08001219 }
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001220 mmap_read_unlock(mm);
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001221 if (err < 0)
1222 return err;
1223 return busy;
Christoph Lameterb20a3502006-03-22 00:09:12 -08001224
Christoph Lameter39743882006-01-08 01:00:51 -08001225}
1226
Lee Schermerhorn3ad33b242007-11-14 16:59:10 -08001227/*
1228 * Allocate a new page for page migration based on vma policy.
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -07001229 * Start by assuming the page is mapped by the same vma as contains @start.
Lee Schermerhorn3ad33b242007-11-14 16:59:10 -08001230 * Search forward from there, if not. N.B., this assumes that the
1231 * list of pages handed to migrate_pages()--which is how we get here--
1232 * is in virtual address order.
1233 */
Michal Hocko666feb22018-04-10 16:30:03 -07001234static struct page *new_page(struct page *page, unsigned long start)
Christoph Lameter95a402c2006-06-23 02:03:53 -07001235{
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -07001236 struct vm_area_struct *vma;
Lee Schermerhorn3ad33b242007-11-14 16:59:10 -08001237 unsigned long uninitialized_var(address);
Christoph Lameter95a402c2006-06-23 02:03:53 -07001238
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -07001239 vma = find_vma(current->mm, start);
Lee Schermerhorn3ad33b242007-11-14 16:59:10 -08001240 while (vma) {
1241 address = page_address_in_vma(page, vma);
1242 if (address != -EFAULT)
1243 break;
1244 vma = vma->vm_next;
1245 }
1246
Wanpeng Li11c731e2013-12-18 17:08:56 -08001247 if (PageHuge(page)) {
Michal Hocko389c8172018-01-31 16:21:03 -08001248 return alloc_huge_page_vma(page_hstate(compound_head(page)),
1249 vma, address);
Michal Hocko94723aa2018-04-10 16:30:07 -07001250 } else if (PageTransHuge(page)) {
Naoya Horiguchic8633792017-09-08 16:11:08 -07001251 struct page *thp;
1252
David Rientjes19deb762019-09-04 12:54:20 -07001253 thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address,
1254 HPAGE_PMD_ORDER);
Naoya Horiguchic8633792017-09-08 16:11:08 -07001255 if (!thp)
1256 return NULL;
1257 prep_transhuge_page(thp);
1258 return thp;
Wanpeng Li11c731e2013-12-18 17:08:56 -08001259 }
1260 /*
1261 * if !vma, alloc_page_vma() will use task or system default policy
1262 */
Michal Hocko0f556852017-07-12 14:36:58 -07001263 return alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL,
1264 vma, address);
Christoph Lameter95a402c2006-06-23 02:03:53 -07001265}
Christoph Lameterb20a3502006-03-22 00:09:12 -08001266#else
1267
Yang Shia53190a2019-08-13 15:37:18 -07001268static int migrate_page_add(struct page *page, struct list_head *pagelist,
Christoph Lameterb20a3502006-03-22 00:09:12 -08001269 unsigned long flags)
1270{
Yang Shia53190a2019-08-13 15:37:18 -07001271 return -EIO;
Christoph Lameterb20a3502006-03-22 00:09:12 -08001272}
1273
Andrew Morton0ce72d42012-05-29 15:06:24 -07001274int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1275 const nodemask_t *to, int flags)
Christoph Lameterb20a3502006-03-22 00:09:12 -08001276{
1277 return -ENOSYS;
1278}
Christoph Lameter95a402c2006-06-23 02:03:53 -07001279
Michal Hocko666feb22018-04-10 16:30:03 -07001280static struct page *new_page(struct page *page, unsigned long start)
Christoph Lameter95a402c2006-06-23 02:03:53 -07001281{
1282 return NULL;
1283}
Christoph Lameterb20a3502006-03-22 00:09:12 -08001284#endif
1285
Adrian Bunkdbcb0f12007-10-16 01:26:26 -07001286static long do_mbind(unsigned long start, unsigned long len,
David Rientjes028fec42008-04-28 02:12:25 -07001287 unsigned short mode, unsigned short mode_flags,
1288 nodemask_t *nmask, unsigned long flags)
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001289{
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001290 struct mm_struct *mm = current->mm;
1291 struct mempolicy *new;
1292 unsigned long end;
1293 int err;
Yang Shid8835442019-08-13 15:37:15 -07001294 int ret;
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001295 LIST_HEAD(pagelist);
1296
Lee Schermerhornb24f53a2012-10-25 14:16:32 +02001297 if (flags & ~(unsigned long)MPOL_MF_VALID)
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001298 return -EINVAL;
Christoph Lameter74c00242006-03-14 19:50:21 -08001299 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001300 return -EPERM;
1301
1302 if (start & ~PAGE_MASK)
1303 return -EINVAL;
1304
1305 if (mode == MPOL_DEFAULT)
1306 flags &= ~MPOL_MF_STRICT;
1307
1308 len = (len + PAGE_SIZE - 1) & PAGE_MASK;
1309 end = start + len;
1310
1311 if (end < start)
1312 return -EINVAL;
1313 if (end == start)
1314 return 0;
1315
David Rientjes028fec42008-04-28 02:12:25 -07001316 new = mpol_new(mode, mode_flags, nmask);
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001317 if (IS_ERR(new))
1318 return PTR_ERR(new);
1319
Lee Schermerhornb24f53a2012-10-25 14:16:32 +02001320 if (flags & MPOL_MF_LAZY)
1321 new->flags |= MPOL_F_MOF;
1322
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001323 /*
1324 * If we are using the default policy then operation
1325 * on discontinuous address spaces is okay after all
1326 */
1327 if (!new)
1328 flags |= MPOL_MF_DISCONTIG_OK;
1329
David Rientjes028fec42008-04-28 02:12:25 -07001330 pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1331 start, start + len, mode, mode_flags,
David Rientjes00ef2d22013-02-22 16:35:36 -08001332 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001333
Christoph Lameter0aedadf2008-11-06 12:53:30 -08001334 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1335
1336 err = migrate_prep();
1337 if (err)
KOSAKI Motohirob05ca732009-10-26 16:49:59 -07001338 goto mpol_out;
Christoph Lameter0aedadf2008-11-06 12:53:30 -08001339 }
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07001340 {
1341 NODEMASK_SCRATCH(scratch);
1342 if (scratch) {
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001343 mmap_write_lock(mm);
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07001344 task_lock(current);
1345 err = mpol_set_nodemask(new, nmask, scratch);
1346 task_unlock(current);
1347 if (err)
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001348 mmap_write_unlock(mm);
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07001349 } else
1350 err = -ENOMEM;
1351 NODEMASK_SCRATCH_FREE(scratch);
1352 }
KOSAKI Motohirob05ca732009-10-26 16:49:59 -07001353 if (err)
1354 goto mpol_out;
1355
Yang Shid8835442019-08-13 15:37:15 -07001356 ret = queue_pages_range(mm, start, end, nmask,
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001357 flags | MPOL_MF_INVERT, &pagelist);
Yang Shid8835442019-08-13 15:37:15 -07001358
1359 if (ret < 0) {
Yang Shia85dfc32019-11-15 17:34:33 -08001360 err = ret;
Yang Shid8835442019-08-13 15:37:15 -07001361 goto up_out;
1362 }
1363
1364 err = mbind_range(mm, start, end, new);
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001365
Lee Schermerhornb24f53a2012-10-25 14:16:32 +02001366 if (!err) {
1367 int nr_failed = 0;
1368
Minchan Kimcf608ac2010-10-26 14:21:29 -07001369 if (!list_empty(&pagelist)) {
Lee Schermerhornb24f53a2012-10-25 14:16:32 +02001370 WARN_ON_ONCE(flags & MPOL_MF_LAZY);
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -07001371 nr_failed = migrate_pages(&pagelist, new_page, NULL,
1372 start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
Minchan Kimcf608ac2010-10-26 14:21:29 -07001373 if (nr_failed)
Naoya Horiguchi74060e42013-09-11 14:22:06 -07001374 putback_movable_pages(&pagelist);
Minchan Kimcf608ac2010-10-26 14:21:29 -07001375 }
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001376
Yang Shid8835442019-08-13 15:37:15 -07001377 if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT)))
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001378 err = -EIO;
Yang Shia85dfc32019-11-15 17:34:33 -08001379 } else {
Yang Shid8835442019-08-13 15:37:15 -07001380up_out:
Yang Shia85dfc32019-11-15 17:34:33 -08001381 if (!list_empty(&pagelist))
1382 putback_movable_pages(&pagelist);
1383 }
1384
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001385 mmap_write_unlock(mm);
Yang Shid8835442019-08-13 15:37:15 -07001386mpol_out:
Lee Schermerhornf0be3d32008-04-28 02:13:08 -07001387 mpol_put(new);
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001388 return err;
1389}
1390
Christoph Lameter39743882006-01-08 01:00:51 -08001391/*
Christoph Lameter8bccd852005-10-29 18:16:59 -07001392 * User space interface with variable sized bitmaps for nodelists.
1393 */
1394
1395/* Copy a node mask from user space. */
Christoph Lameter39743882006-01-08 01:00:51 -08001396static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
Christoph Lameter8bccd852005-10-29 18:16:59 -07001397 unsigned long maxnode)
1398{
1399 unsigned long k;
Yisheng Xie56521e72018-01-31 16:16:11 -08001400 unsigned long t;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001401 unsigned long nlongs;
1402 unsigned long endmask;
1403
1404 --maxnode;
1405 nodes_clear(*nodes);
1406 if (maxnode == 0 || !nmask)
1407 return 0;
Andi Kleena9c930b2006-02-20 18:27:59 -08001408 if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
Chris Wright636f13c2006-02-17 13:59:36 -08001409 return -EINVAL;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001410
1411 nlongs = BITS_TO_LONGS(maxnode);
1412 if ((maxnode % BITS_PER_LONG) == 0)
1413 endmask = ~0UL;
1414 else
1415 endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
1416
Yisheng Xie56521e72018-01-31 16:16:11 -08001417 /*
1418 * When the user specified more nodes than supported just check
1419 * if the non supported part is all zero.
1420 *
1421 * If maxnode have more longs than MAX_NUMNODES, check
1422 * the bits in that area first. And then go through to
1423 * check the rest bits which equal or bigger than MAX_NUMNODES.
1424 * Otherwise, just check bits [MAX_NUMNODES, maxnode).
1425 */
Christoph Lameter8bccd852005-10-29 18:16:59 -07001426 if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
Christoph Lameter8bccd852005-10-29 18:16:59 -07001427 for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
Christoph Lameter8bccd852005-10-29 18:16:59 -07001428 if (get_user(t, nmask + k))
1429 return -EFAULT;
1430 if (k == nlongs - 1) {
1431 if (t & endmask)
1432 return -EINVAL;
1433 } else if (t)
1434 return -EINVAL;
1435 }
1436 nlongs = BITS_TO_LONGS(MAX_NUMNODES);
1437 endmask = ~0UL;
1438 }
1439
Yisheng Xie56521e72018-01-31 16:16:11 -08001440 if (maxnode > MAX_NUMNODES && MAX_NUMNODES % BITS_PER_LONG != 0) {
1441 unsigned long valid_mask = endmask;
1442
1443 valid_mask &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1);
1444 if (get_user(t, nmask + nlongs - 1))
1445 return -EFAULT;
1446 if (t & valid_mask)
1447 return -EINVAL;
1448 }
1449
Christoph Lameter8bccd852005-10-29 18:16:59 -07001450 if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
1451 return -EFAULT;
1452 nodes_addr(*nodes)[nlongs-1] &= endmask;
1453 return 0;
1454}
1455
1456/* Copy a kernel node mask to user space */
1457static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1458 nodemask_t *nodes)
1459{
1460 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
Ralph Campbell050c17f2019-02-20 22:18:58 -08001461 unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long);
Christoph Lameter8bccd852005-10-29 18:16:59 -07001462
1463 if (copy > nbytes) {
1464 if (copy > PAGE_SIZE)
1465 return -EINVAL;
1466 if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1467 return -EFAULT;
1468 copy = nbytes;
1469 }
1470 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1471}
1472
Dominik Brodowskie7dc9ad62018-03-17 16:12:22 +01001473static long kernel_mbind(unsigned long start, unsigned long len,
1474 unsigned long mode, const unsigned long __user *nmask,
1475 unsigned long maxnode, unsigned int flags)
Christoph Lameter8bccd852005-10-29 18:16:59 -07001476{
1477 nodemask_t nodes;
1478 int err;
David Rientjes028fec42008-04-28 02:12:25 -07001479 unsigned short mode_flags;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001480
Andrey Konovalov057d33892019-09-25 16:48:30 -07001481 start = untagged_addr(start);
David Rientjes028fec42008-04-28 02:12:25 -07001482 mode_flags = mode & MPOL_MODE_FLAGS;
1483 mode &= ~MPOL_MODE_FLAGS;
David Rientjesa3b51e02008-04-28 02:12:23 -07001484 if (mode >= MPOL_MAX)
1485 return -EINVAL;
David Rientjes4c50bc02008-04-28 02:12:30 -07001486 if ((mode_flags & MPOL_F_STATIC_NODES) &&
1487 (mode_flags & MPOL_F_RELATIVE_NODES))
1488 return -EINVAL;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001489 err = get_nodes(&nodes, nmask, maxnode);
1490 if (err)
1491 return err;
David Rientjes028fec42008-04-28 02:12:25 -07001492 return do_mbind(start, len, mode, mode_flags, &nodes, flags);
Christoph Lameter8bccd852005-10-29 18:16:59 -07001493}
1494
Dominik Brodowskie7dc9ad62018-03-17 16:12:22 +01001495SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1496 unsigned long, mode, const unsigned long __user *, nmask,
1497 unsigned long, maxnode, unsigned int, flags)
1498{
1499 return kernel_mbind(start, len, mode, nmask, maxnode, flags);
1500}
1501
Christoph Lameter8bccd852005-10-29 18:16:59 -07001502/* Set the process memory policy */
Dominik Brodowskiaf03c4a2018-03-17 16:20:01 +01001503static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask,
1504 unsigned long maxnode)
Christoph Lameter8bccd852005-10-29 18:16:59 -07001505{
1506 int err;
1507 nodemask_t nodes;
David Rientjes028fec42008-04-28 02:12:25 -07001508 unsigned short flags;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001509
David Rientjes028fec42008-04-28 02:12:25 -07001510 flags = mode & MPOL_MODE_FLAGS;
1511 mode &= ~MPOL_MODE_FLAGS;
1512 if ((unsigned int)mode >= MPOL_MAX)
Christoph Lameter8bccd852005-10-29 18:16:59 -07001513 return -EINVAL;
David Rientjes4c50bc02008-04-28 02:12:30 -07001514 if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
1515 return -EINVAL;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001516 err = get_nodes(&nodes, nmask, maxnode);
1517 if (err)
1518 return err;
David Rientjes028fec42008-04-28 02:12:25 -07001519 return do_set_mempolicy(mode, flags, &nodes);
Christoph Lameter8bccd852005-10-29 18:16:59 -07001520}
1521
Dominik Brodowskiaf03c4a2018-03-17 16:20:01 +01001522SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
1523 unsigned long, maxnode)
1524{
1525 return kernel_set_mempolicy(mode, nmask, maxnode);
1526}
1527
Dominik Brodowskib6e9b0b2018-03-17 16:00:25 +01001528static int kernel_migrate_pages(pid_t pid, unsigned long maxnode,
1529 const unsigned long __user *old_nodes,
1530 const unsigned long __user *new_nodes)
Christoph Lameter39743882006-01-08 01:00:51 -08001531{
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001532 struct mm_struct *mm = NULL;
Christoph Lameter39743882006-01-08 01:00:51 -08001533 struct task_struct *task;
Christoph Lameter39743882006-01-08 01:00:51 -08001534 nodemask_t task_nodes;
1535 int err;
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001536 nodemask_t *old;
1537 nodemask_t *new;
1538 NODEMASK_SCRATCH(scratch);
Christoph Lameter39743882006-01-08 01:00:51 -08001539
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001540 if (!scratch)
1541 return -ENOMEM;
Christoph Lameter39743882006-01-08 01:00:51 -08001542
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001543 old = &scratch->mask1;
1544 new = &scratch->mask2;
1545
1546 err = get_nodes(old, old_nodes, maxnode);
Christoph Lameter39743882006-01-08 01:00:51 -08001547 if (err)
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001548 goto out;
1549
1550 err = get_nodes(new, new_nodes, maxnode);
1551 if (err)
1552 goto out;
Christoph Lameter39743882006-01-08 01:00:51 -08001553
1554 /* Find the mm_struct */
Zeng Zhaoming55cfaa32010-12-02 14:31:13 -08001555 rcu_read_lock();
Pavel Emelyanov228ebcb2007-10-18 23:40:16 -07001556 task = pid ? find_task_by_vpid(pid) : current;
Christoph Lameter39743882006-01-08 01:00:51 -08001557 if (!task) {
Zeng Zhaoming55cfaa32010-12-02 14:31:13 -08001558 rcu_read_unlock();
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001559 err = -ESRCH;
1560 goto out;
Christoph Lameter39743882006-01-08 01:00:51 -08001561 }
Christoph Lameter3268c632012-03-21 16:34:06 -07001562 get_task_struct(task);
Christoph Lameter39743882006-01-08 01:00:51 -08001563
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001564 err = -EINVAL;
Christoph Lameter39743882006-01-08 01:00:51 -08001565
1566 /*
Otto Ebeling31367462017-11-15 17:38:14 -08001567 * Check if this process has the right to modify the specified process.
1568 * Use the regular "ptrace_may_access()" checks.
Christoph Lameter39743882006-01-08 01:00:51 -08001569 */
Otto Ebeling31367462017-11-15 17:38:14 -08001570 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
David Howellsc69e8d92008-11-14 10:39:19 +11001571 rcu_read_unlock();
Christoph Lameter39743882006-01-08 01:00:51 -08001572 err = -EPERM;
Christoph Lameter3268c632012-03-21 16:34:06 -07001573 goto out_put;
Christoph Lameter39743882006-01-08 01:00:51 -08001574 }
David Howellsc69e8d92008-11-14 10:39:19 +11001575 rcu_read_unlock();
Christoph Lameter39743882006-01-08 01:00:51 -08001576
1577 task_nodes = cpuset_mems_allowed(task);
1578 /* Is the user allowed to access the target nodes? */
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001579 if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
Christoph Lameter39743882006-01-08 01:00:51 -08001580 err = -EPERM;
Christoph Lameter3268c632012-03-21 16:34:06 -07001581 goto out_put;
Christoph Lameter39743882006-01-08 01:00:51 -08001582 }
1583
Yisheng Xie0486a382018-01-31 16:16:15 -08001584 task_nodes = cpuset_mems_allowed(current);
1585 nodes_and(*new, *new, task_nodes);
1586 if (nodes_empty(*new))
Christoph Lameter3268c632012-03-21 16:34:06 -07001587 goto out_put;
Yisheng Xie0486a382018-01-31 16:16:15 -08001588
David Quigley86c3a762006-06-23 02:04:02 -07001589 err = security_task_movememory(task);
1590 if (err)
Christoph Lameter3268c632012-03-21 16:34:06 -07001591 goto out_put;
David Quigley86c3a762006-06-23 02:04:02 -07001592
Christoph Lameter3268c632012-03-21 16:34:06 -07001593 mm = get_task_mm(task);
1594 put_task_struct(task);
Sasha Levinf2a9ef82012-04-25 16:01:52 -07001595
1596 if (!mm) {
Christoph Lameter3268c632012-03-21 16:34:06 -07001597 err = -EINVAL;
Sasha Levinf2a9ef82012-04-25 16:01:52 -07001598 goto out;
1599 }
1600
1601 err = do_migrate_pages(mm, old, new,
1602 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
Christoph Lameter3268c632012-03-21 16:34:06 -07001603
1604 mmput(mm);
1605out:
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001606 NODEMASK_SCRATCH_FREE(scratch);
1607
Christoph Lameter39743882006-01-08 01:00:51 -08001608 return err;
Christoph Lameter3268c632012-03-21 16:34:06 -07001609
1610out_put:
1611 put_task_struct(task);
1612 goto out;
1613
Christoph Lameter39743882006-01-08 01:00:51 -08001614}
1615
Dominik Brodowskib6e9b0b2018-03-17 16:00:25 +01001616SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1617 const unsigned long __user *, old_nodes,
1618 const unsigned long __user *, new_nodes)
1619{
1620 return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes);
1621}
1622
Christoph Lameter39743882006-01-08 01:00:51 -08001623
Christoph Lameter8bccd852005-10-29 18:16:59 -07001624/* Retrieve NUMA policy */
Dominik Brodowskiaf03c4a2018-03-17 16:20:01 +01001625static int kernel_get_mempolicy(int __user *policy,
1626 unsigned long __user *nmask,
1627 unsigned long maxnode,
1628 unsigned long addr,
1629 unsigned long flags)
Christoph Lameter8bccd852005-10-29 18:16:59 -07001630{
Adrian Bunkdbcb0f12007-10-16 01:26:26 -07001631 int err;
1632 int uninitialized_var(pval);
Christoph Lameter8bccd852005-10-29 18:16:59 -07001633 nodemask_t nodes;
1634
Andrey Konovalov057d33892019-09-25 16:48:30 -07001635 addr = untagged_addr(addr);
1636
Ralph Campbell050c17f2019-02-20 22:18:58 -08001637 if (nmask != NULL && maxnode < nr_node_ids)
Christoph Lameter8bccd852005-10-29 18:16:59 -07001638 return -EINVAL;
1639
1640 err = do_get_mempolicy(&pval, &nodes, addr, flags);
1641
1642 if (err)
1643 return err;
1644
1645 if (policy && put_user(pval, policy))
1646 return -EFAULT;
1647
1648 if (nmask)
1649 err = copy_nodes_to_user(nmask, maxnode, &nodes);
1650
1651 return err;
1652}
1653
Dominik Brodowskiaf03c4a2018-03-17 16:20:01 +01001654SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1655 unsigned long __user *, nmask, unsigned long, maxnode,
1656 unsigned long, addr, unsigned long, flags)
1657{
1658 return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags);
1659}
1660
Linus Torvalds1da177e2005-04-16 15:20:36 -07001661#ifdef CONFIG_COMPAT
1662
Heiko Carstensc93e0f62014-03-03 16:32:26 +01001663COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1664 compat_ulong_t __user *, nmask,
1665 compat_ulong_t, maxnode,
1666 compat_ulong_t, addr, compat_ulong_t, flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001667{
1668 long err;
1669 unsigned long __user *nm = NULL;
1670 unsigned long nr_bits, alloc_size;
1671 DECLARE_BITMAP(bm, MAX_NUMNODES);
1672
Ralph Campbell050c17f2019-02-20 22:18:58 -08001673 nr_bits = min_t(unsigned long, maxnode-1, nr_node_ids);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001674 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1675
1676 if (nmask)
1677 nm = compat_alloc_user_space(alloc_size);
1678
Dominik Brodowskiaf03c4a2018-03-17 16:20:01 +01001679 err = kernel_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001680
1681 if (!err && nmask) {
KAMEZAWA Hiroyuki2bbff6c2011-09-14 16:21:02 -07001682 unsigned long copy_size;
1683 copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
1684 err = copy_from_user(bm, nm, copy_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001685 /* ensure entire bitmap is zeroed */
1686 err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1687 err |= compat_put_bitmap(nmask, bm, nr_bits);
1688 }
1689
1690 return err;
1691}
1692
Heiko Carstensc93e0f62014-03-03 16:32:26 +01001693COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
1694 compat_ulong_t, maxnode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001695{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001696 unsigned long __user *nm = NULL;
1697 unsigned long nr_bits, alloc_size;
1698 DECLARE_BITMAP(bm, MAX_NUMNODES);
1699
1700 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1701 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1702
1703 if (nmask) {
Chris Sallscf01fb92017-04-07 23:48:11 -07001704 if (compat_get_bitmap(bm, nmask, nr_bits))
1705 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001706 nm = compat_alloc_user_space(alloc_size);
Chris Sallscf01fb92017-04-07 23:48:11 -07001707 if (copy_to_user(nm, bm, alloc_size))
1708 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001709 }
1710
Dominik Brodowskiaf03c4a2018-03-17 16:20:01 +01001711 return kernel_set_mempolicy(mode, nm, nr_bits+1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001712}
1713
Heiko Carstensc93e0f62014-03-03 16:32:26 +01001714COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
1715 compat_ulong_t, mode, compat_ulong_t __user *, nmask,
1716 compat_ulong_t, maxnode, compat_ulong_t, flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001717{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718 unsigned long __user *nm = NULL;
1719 unsigned long nr_bits, alloc_size;
Andi Kleendfcd3c02005-10-29 18:15:48 -07001720 nodemask_t bm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001721
1722 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1723 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1724
1725 if (nmask) {
Chris Sallscf01fb92017-04-07 23:48:11 -07001726 if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits))
1727 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001728 nm = compat_alloc_user_space(alloc_size);
Chris Sallscf01fb92017-04-07 23:48:11 -07001729 if (copy_to_user(nm, nodes_addr(bm), alloc_size))
1730 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001731 }
1732
Dominik Brodowskie7dc9ad62018-03-17 16:12:22 +01001733 return kernel_mbind(start, len, mode, nm, nr_bits+1, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001734}
1735
Dominik Brodowskib6e9b0b2018-03-17 16:00:25 +01001736COMPAT_SYSCALL_DEFINE4(migrate_pages, compat_pid_t, pid,
1737 compat_ulong_t, maxnode,
1738 const compat_ulong_t __user *, old_nodes,
1739 const compat_ulong_t __user *, new_nodes)
1740{
1741 unsigned long __user *old = NULL;
1742 unsigned long __user *new = NULL;
1743 nodemask_t tmp_mask;
1744 unsigned long nr_bits;
1745 unsigned long size;
1746
1747 nr_bits = min_t(unsigned long, maxnode - 1, MAX_NUMNODES);
1748 size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1749 if (old_nodes) {
1750 if (compat_get_bitmap(nodes_addr(tmp_mask), old_nodes, nr_bits))
1751 return -EFAULT;
1752 old = compat_alloc_user_space(new_nodes ? size * 2 : size);
1753 if (new_nodes)
1754 new = old + size / sizeof(unsigned long);
1755 if (copy_to_user(old, nodes_addr(tmp_mask), size))
1756 return -EFAULT;
1757 }
1758 if (new_nodes) {
1759 if (compat_get_bitmap(nodes_addr(tmp_mask), new_nodes, nr_bits))
1760 return -EFAULT;
1761 if (new == NULL)
1762 new = compat_alloc_user_space(size);
1763 if (copy_to_user(new, nodes_addr(tmp_mask), size))
1764 return -EFAULT;
1765 }
1766 return kernel_migrate_pages(pid, nr_bits + 1, old, new);
1767}
1768
1769#endif /* CONFIG_COMPAT */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001770
Li Xinhai20ca87f2020-04-01 21:10:52 -07001771bool vma_migratable(struct vm_area_struct *vma)
1772{
1773 if (vma->vm_flags & (VM_IO | VM_PFNMAP))
1774 return false;
1775
1776 /*
1777 * DAX device mappings require predictable access latency, so avoid
1778 * incurring periodic faults.
1779 */
1780 if (vma_is_dax(vma))
1781 return false;
1782
1783 if (is_vm_hugetlb_page(vma) &&
1784 !hugepage_migration_supported(hstate_vma(vma)))
1785 return false;
1786
1787 /*
1788 * Migration allocates pages in the highest zone. If we cannot
1789 * do so then migration (at least from node to node) is not
1790 * possible.
1791 */
1792 if (vma->vm_file &&
1793 gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
1794 < policy_zone)
1795 return false;
1796 return true;
1797}
1798
Oleg Nesterov74d2c3a2014-10-09 15:27:50 -07001799struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
1800 unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001801{
Oleg Nesterov8d902742014-10-09 15:27:45 -07001802 struct mempolicy *pol = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001803
1804 if (vma) {
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001805 if (vma->vm_ops && vma->vm_ops->get_policy) {
Oleg Nesterov8d902742014-10-09 15:27:45 -07001806 pol = vma->vm_ops->get_policy(vma, addr);
Mel Gorman00442ad2012-10-08 16:29:20 -07001807 } else if (vma->vm_policy) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001808 pol = vma->vm_policy;
Mel Gorman00442ad2012-10-08 16:29:20 -07001809
1810 /*
1811 * shmem_alloc_page() passes MPOL_F_SHARED policy with
1812 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
1813 * count on these policies which will be dropped by
1814 * mpol_cond_put() later
1815 */
1816 if (mpol_needs_cond_ref(pol))
1817 mpol_get(pol);
1818 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001819 }
Oleg Nesterovf15ca782014-10-09 15:27:43 -07001820
Oleg Nesterov74d2c3a2014-10-09 15:27:50 -07001821 return pol;
1822}
1823
1824/*
Oleg Nesterovdd6eecb2014-10-09 15:27:57 -07001825 * get_vma_policy(@vma, @addr)
Oleg Nesterov74d2c3a2014-10-09 15:27:50 -07001826 * @vma: virtual memory area whose policy is sought
1827 * @addr: address in @vma for shared policy lookup
1828 *
1829 * Returns effective policy for a VMA at specified address.
Oleg Nesterovdd6eecb2014-10-09 15:27:57 -07001830 * Falls back to current->mempolicy or system default policy, as necessary.
Oleg Nesterov74d2c3a2014-10-09 15:27:50 -07001831 * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1832 * count--added by the get_policy() vm_op, as appropriate--to protect against
1833 * freeing by another task. It is the caller's responsibility to free the
1834 * extra reference for shared policies.
1835 */
David Rientjesac79f782019-09-04 12:54:18 -07001836static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
Oleg Nesterovdd6eecb2014-10-09 15:27:57 -07001837 unsigned long addr)
Oleg Nesterov74d2c3a2014-10-09 15:27:50 -07001838{
1839 struct mempolicy *pol = __get_vma_policy(vma, addr);
1840
Oleg Nesterov8d902742014-10-09 15:27:45 -07001841 if (!pol)
Oleg Nesterovdd6eecb2014-10-09 15:27:57 -07001842 pol = get_task_policy(current);
Oleg Nesterov8d902742014-10-09 15:27:45 -07001843
Linus Torvalds1da177e2005-04-16 15:20:36 -07001844 return pol;
1845}
1846
Oleg Nesterov6b6482b2014-10-09 15:27:48 -07001847bool vma_policy_mof(struct vm_area_struct *vma)
Mel Gormanfc3147242013-10-07 11:29:09 +01001848{
Oleg Nesterov6b6482b2014-10-09 15:27:48 -07001849 struct mempolicy *pol;
Oleg Nesterovf15ca782014-10-09 15:27:43 -07001850
Oleg Nesterov6b6482b2014-10-09 15:27:48 -07001851 if (vma->vm_ops && vma->vm_ops->get_policy) {
1852 bool ret = false;
Mel Gormanfc3147242013-10-07 11:29:09 +01001853
Oleg Nesterov6b6482b2014-10-09 15:27:48 -07001854 pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1855 if (pol && (pol->flags & MPOL_F_MOF))
1856 ret = true;
1857 mpol_cond_put(pol);
Mel Gormanfc3147242013-10-07 11:29:09 +01001858
Oleg Nesterov6b6482b2014-10-09 15:27:48 -07001859 return ret;
Mel Gormanfc3147242013-10-07 11:29:09 +01001860 }
1861
Oleg Nesterov6b6482b2014-10-09 15:27:48 -07001862 pol = vma->vm_policy;
Oleg Nesterov8d902742014-10-09 15:27:45 -07001863 if (!pol)
Oleg Nesterov6b6482b2014-10-09 15:27:48 -07001864 pol = get_task_policy(current);
Oleg Nesterov8d902742014-10-09 15:27:45 -07001865
Mel Gormanfc3147242013-10-07 11:29:09 +01001866 return pol->flags & MPOL_F_MOF;
1867}
1868
Lai Jiangshand3eb1572013-02-22 16:33:22 -08001869static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1870{
1871 enum zone_type dynamic_policy_zone = policy_zone;
1872
1873 BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1874
1875 /*
1876 * if policy->v.nodes has movable memory only,
1877 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1878 *
1879 * policy->v.nodes is intersect with node_states[N_MEMORY].
1880 * so if the following test faile, it implies
1881 * policy->v.nodes has movable memory only.
1882 */
1883 if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY]))
1884 dynamic_policy_zone = ZONE_MOVABLE;
1885
1886 return zone >= dynamic_policy_zone;
1887}
1888
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001889/*
1890 * Return a nodemask representing a mempolicy for filtering nodes for
1891 * page allocation
1892 */
1893static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
Mel Gorman19770b32008-04-28 02:12:18 -07001894{
1895 /* Lower zones don't get a nodemask applied for MPOL_BIND */
Lee Schermerhorn45c47452008-04-28 02:13:12 -07001896 if (unlikely(policy->mode == MPOL_BIND) &&
Lai Jiangshand3eb1572013-02-22 16:33:22 -08001897 apply_policy_zone(policy, gfp_zone(gfp)) &&
Mel Gorman19770b32008-04-28 02:12:18 -07001898 cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
1899 return &policy->v.nodes;
1900
1901 return NULL;
1902}
1903
Vlastimil Babka04ec6262017-07-06 15:40:03 -07001904/* Return the node id preferred by the given mempolicy, or the given id */
1905static int policy_node(gfp_t gfp, struct mempolicy *policy,
1906 int nd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001907{
Michal Hocko6d840952016-12-12 16:42:23 -08001908 if (policy->mode == MPOL_PREFERRED && !(policy->flags & MPOL_F_LOCAL))
1909 nd = policy->v.preferred_node;
1910 else {
Mel Gorman19770b32008-04-28 02:12:18 -07001911 /*
Michal Hocko6d840952016-12-12 16:42:23 -08001912 * __GFP_THISNODE shouldn't even be used with the bind policy
1913 * because we might easily break the expectation to stay on the
1914 * requested node and not break the policy.
Mel Gorman19770b32008-04-28 02:12:18 -07001915 */
Michal Hocko6d840952016-12-12 16:42:23 -08001916 WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001917 }
Michal Hocko6d840952016-12-12 16:42:23 -08001918
Vlastimil Babka04ec6262017-07-06 15:40:03 -07001919 return nd;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001920}
1921
1922/* Do dynamic interleaving for a process */
1923static unsigned interleave_nodes(struct mempolicy *policy)
1924{
Vlastimil Babka45816682017-07-06 15:39:59 -07001925 unsigned next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001926 struct task_struct *me = current;
1927
Vlastimil Babka45816682017-07-06 15:39:59 -07001928 next = next_node_in(me->il_prev, policy->v.nodes);
David Rientjesf5b087b2008-04-28 02:12:27 -07001929 if (next < MAX_NUMNODES)
Vlastimil Babka45816682017-07-06 15:39:59 -07001930 me->il_prev = next;
1931 return next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001932}
1933
Christoph Lameterdc85da12006-01-18 17:42:36 -08001934/*
1935 * Depending on the memory policy provide a node from which to allocate the
1936 * next slab entry.
1937 */
David Rientjes2a389612014-04-07 15:37:29 -07001938unsigned int mempolicy_slab_node(void)
Christoph Lameterdc85da12006-01-18 17:42:36 -08001939{
Andi Kleene7b691b2012-06-09 02:40:03 -07001940 struct mempolicy *policy;
David Rientjes2a389612014-04-07 15:37:29 -07001941 int node = numa_mem_id();
Andi Kleene7b691b2012-06-09 02:40:03 -07001942
1943 if (in_interrupt())
David Rientjes2a389612014-04-07 15:37:29 -07001944 return node;
Andi Kleene7b691b2012-06-09 02:40:03 -07001945
1946 policy = current->mempolicy;
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07001947 if (!policy || policy->flags & MPOL_F_LOCAL)
David Rientjes2a389612014-04-07 15:37:29 -07001948 return node;
Christoph Lameter765c4502006-09-27 01:50:08 -07001949
Lee Schermerhornbea904d2008-04-28 02:13:18 -07001950 switch (policy->mode) {
1951 case MPOL_PREFERRED:
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07001952 /*
1953 * handled MPOL_F_LOCAL above
1954 */
1955 return policy->v.preferred_node;
Lee Schermerhornbea904d2008-04-28 02:13:18 -07001956
Christoph Lameterdc85da12006-01-18 17:42:36 -08001957 case MPOL_INTERLEAVE:
1958 return interleave_nodes(policy);
1959
Mel Gormandd1a2392008-04-28 02:12:17 -07001960 case MPOL_BIND: {
Mel Gormanc33d6c02016-05-19 17:14:10 -07001961 struct zoneref *z;
1962
Christoph Lameterdc85da12006-01-18 17:42:36 -08001963 /*
1964 * Follow bind policy behavior and start allocation at the
1965 * first node.
1966 */
Mel Gorman19770b32008-04-28 02:12:18 -07001967 struct zonelist *zonelist;
Mel Gorman19770b32008-04-28 02:12:18 -07001968 enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
Aneesh Kumar K.Vc9634cf2016-10-07 16:59:12 -07001969 zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
Mel Gormanc33d6c02016-05-19 17:14:10 -07001970 z = first_zones_zonelist(zonelist, highest_zoneidx,
1971 &policy->v.nodes);
Pavel Tatashinc1093b72018-08-21 21:53:32 -07001972 return z->zone ? zone_to_nid(z->zone) : node;
Mel Gormandd1a2392008-04-28 02:12:17 -07001973 }
Christoph Lameterdc85da12006-01-18 17:42:36 -08001974
Christoph Lameterdc85da12006-01-18 17:42:36 -08001975 default:
Lee Schermerhornbea904d2008-04-28 02:13:18 -07001976 BUG();
Christoph Lameterdc85da12006-01-18 17:42:36 -08001977 }
1978}
1979
Andrew Mortonfee83b32016-05-19 17:11:43 -07001980/*
1981 * Do static interleaving for a VMA with known offset @n. Returns the n'th
1982 * node in pol->v.nodes (starting from n=0), wrapping around if n exceeds the
1983 * number of present nodes.
1984 */
Laurent Dufour98c70ba2017-09-08 16:12:39 -07001985static unsigned offset_il_node(struct mempolicy *pol, unsigned long n)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001986{
Andi Kleendfcd3c02005-10-29 18:15:48 -07001987 unsigned nnodes = nodes_weight(pol->v.nodes);
David Rientjesf5b087b2008-04-28 02:12:27 -07001988 unsigned target;
Andrew Mortonfee83b32016-05-19 17:11:43 -07001989 int i;
1990 int nid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001991
David Rientjesf5b087b2008-04-28 02:12:27 -07001992 if (!nnodes)
1993 return numa_node_id();
Andrew Mortonfee83b32016-05-19 17:11:43 -07001994 target = (unsigned int)n % nnodes;
1995 nid = first_node(pol->v.nodes);
1996 for (i = 0; i < target; i++)
Andi Kleendfcd3c02005-10-29 18:15:48 -07001997 nid = next_node(nid, pol->v.nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001998 return nid;
1999}
2000
Christoph Lameter5da7ca82006-01-06 00:10:46 -08002001/* Determine a node number for interleave */
2002static inline unsigned interleave_nid(struct mempolicy *pol,
2003 struct vm_area_struct *vma, unsigned long addr, int shift)
2004{
2005 if (vma) {
2006 unsigned long off;
2007
Nishanth Aravamudan3b98b082006-08-31 21:27:53 -07002008 /*
2009 * for small pages, there is no difference between
2010 * shift and PAGE_SHIFT, so the bit-shift is safe.
2011 * for huge pages, since vm_pgoff is in units of small
2012 * pages, we need to shift off the always 0 bits to get
2013 * a useful offset.
2014 */
2015 BUG_ON(shift < PAGE_SHIFT);
2016 off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
Christoph Lameter5da7ca82006-01-06 00:10:46 -08002017 off += (addr - vma->vm_start) >> shift;
Laurent Dufour98c70ba2017-09-08 16:12:39 -07002018 return offset_il_node(pol, off);
Christoph Lameter5da7ca82006-01-06 00:10:46 -08002019 } else
2020 return interleave_nodes(pol);
2021}
2022
Chen, Kenneth W00ac59a2006-02-03 21:51:14 +01002023#ifdef CONFIG_HUGETLBFS
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07002024/*
Vlastimil Babka04ec6262017-07-06 15:40:03 -07002025 * huge_node(@vma, @addr, @gfp_flags, @mpol)
Fabian Frederickb46e14a2014-06-04 16:08:18 -07002026 * @vma: virtual memory area whose policy is sought
2027 * @addr: address in @vma for shared policy lookup and interleave policy
2028 * @gfp_flags: for requested zone
2029 * @mpol: pointer to mempolicy pointer for reference counted mempolicy
2030 * @nodemask: pointer to nodemask pointer for MPOL_BIND nodemask
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07002031 *
Vlastimil Babka04ec6262017-07-06 15:40:03 -07002032 * Returns a nid suitable for a huge page allocation and a pointer
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07002033 * to the struct mempolicy for conditional unref after allocation.
2034 * If the effective policy is 'BIND, returns a pointer to the mempolicy's
2035 * @nodemask for filtering the zonelist.
Miao Xiec0ff7452010-05-24 14:32:08 -07002036 *
Mel Gormand26914d2014-04-03 14:47:24 -07002037 * Must be protected by read_mems_allowed_begin()
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07002038 */
Vlastimil Babka04ec6262017-07-06 15:40:03 -07002039int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags,
2040 struct mempolicy **mpol, nodemask_t **nodemask)
Christoph Lameter5da7ca82006-01-06 00:10:46 -08002041{
Vlastimil Babka04ec6262017-07-06 15:40:03 -07002042 int nid;
Christoph Lameter5da7ca82006-01-06 00:10:46 -08002043
Oleg Nesterovdd6eecb2014-10-09 15:27:57 -07002044 *mpol = get_vma_policy(vma, addr);
Mel Gorman19770b32008-04-28 02:12:18 -07002045 *nodemask = NULL; /* assume !MPOL_BIND */
Christoph Lameter5da7ca82006-01-06 00:10:46 -08002046
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07002047 if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
Vlastimil Babka04ec6262017-07-06 15:40:03 -07002048 nid = interleave_nid(*mpol, vma, addr,
2049 huge_page_shift(hstate_vma(vma)));
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07002050 } else {
Vlastimil Babka04ec6262017-07-06 15:40:03 -07002051 nid = policy_node(gfp_flags, *mpol, numa_node_id());
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07002052 if ((*mpol)->mode == MPOL_BIND)
2053 *nodemask = &(*mpol)->v.nodes;
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07002054 }
Vlastimil Babka04ec6262017-07-06 15:40:03 -07002055 return nid;
Christoph Lameter5da7ca82006-01-06 00:10:46 -08002056}
Lee Schermerhorn06808b02009-12-14 17:58:21 -08002057
2058/*
2059 * init_nodemask_of_mempolicy
2060 *
2061 * If the current task's mempolicy is "default" [NULL], return 'false'
2062 * to indicate default policy. Otherwise, extract the policy nodemask
2063 * for 'bind' or 'interleave' policy into the argument nodemask, or
2064 * initialize the argument nodemask to contain the single node for
2065 * 'preferred' or 'local' policy and return 'true' to indicate presence
2066 * of non-default mempolicy.
2067 *
2068 * We don't bother with reference counting the mempolicy [mpol_get/put]
2069 * because the current task is examining it's own mempolicy and a task's
2070 * mempolicy is only ever changed by the task itself.
2071 *
2072 * N.B., it is the caller's responsibility to free a returned nodemask.
2073 */
2074bool init_nodemask_of_mempolicy(nodemask_t *mask)
2075{
2076 struct mempolicy *mempolicy;
2077 int nid;
2078
2079 if (!(mask && current->mempolicy))
2080 return false;
2081
Miao Xiec0ff7452010-05-24 14:32:08 -07002082 task_lock(current);
Lee Schermerhorn06808b02009-12-14 17:58:21 -08002083 mempolicy = current->mempolicy;
2084 switch (mempolicy->mode) {
2085 case MPOL_PREFERRED:
2086 if (mempolicy->flags & MPOL_F_LOCAL)
2087 nid = numa_node_id();
2088 else
2089 nid = mempolicy->v.preferred_node;
2090 init_nodemask_of_node(mask, nid);
2091 break;
2092
2093 case MPOL_BIND:
Lee Schermerhorn06808b02009-12-14 17:58:21 -08002094 case MPOL_INTERLEAVE:
2095 *mask = mempolicy->v.nodes;
2096 break;
2097
2098 default:
2099 BUG();
2100 }
Miao Xiec0ff7452010-05-24 14:32:08 -07002101 task_unlock(current);
Lee Schermerhorn06808b02009-12-14 17:58:21 -08002102
2103 return true;
2104}
Chen, Kenneth W00ac59a2006-02-03 21:51:14 +01002105#endif
Christoph Lameter5da7ca82006-01-06 00:10:46 -08002106
David Rientjes6f48d0eb2010-08-09 17:18:52 -07002107/*
2108 * mempolicy_nodemask_intersects
2109 *
2110 * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
2111 * policy. Otherwise, check for intersection between mask and the policy
2112 * nodemask for 'bind' or 'interleave' policy. For 'perferred' or 'local'
2113 * policy, always return true since it may allocate elsewhere on fallback.
2114 *
2115 * Takes task_lock(tsk) to prevent freeing of its mempolicy.
2116 */
2117bool mempolicy_nodemask_intersects(struct task_struct *tsk,
2118 const nodemask_t *mask)
2119{
2120 struct mempolicy *mempolicy;
2121 bool ret = true;
2122
2123 if (!mask)
2124 return ret;
2125 task_lock(tsk);
2126 mempolicy = tsk->mempolicy;
2127 if (!mempolicy)
2128 goto out;
2129
2130 switch (mempolicy->mode) {
2131 case MPOL_PREFERRED:
2132 /*
2133 * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
2134 * allocate from, they may fallback to other nodes when oom.
2135 * Thus, it's possible for tsk to have allocated memory from
2136 * nodes in mask.
2137 */
2138 break;
2139 case MPOL_BIND:
2140 case MPOL_INTERLEAVE:
2141 ret = nodes_intersects(mempolicy->v.nodes, *mask);
2142 break;
2143 default:
2144 BUG();
2145 }
2146out:
2147 task_unlock(tsk);
2148 return ret;
2149}
2150
Linus Torvalds1da177e2005-04-16 15:20:36 -07002151/* Allocate a page in interleaved policy.
2152 Own path because it needs to do special accounting. */
Andi Kleen662f3a02005-10-29 18:15:49 -07002153static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
2154 unsigned nid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002155{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002156 struct page *page;
2157
Vlastimil Babka04ec6262017-07-06 15:40:03 -07002158 page = __alloc_pages(gfp, order, nid);
Kemi Wang45180852017-11-15 17:38:22 -08002159 /* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */
2160 if (!static_branch_likely(&vm_numa_stat_key))
2161 return page;
Andrey Ryabininde55c8b2017-10-13 15:57:43 -07002162 if (page && page_to_nid(page) == nid) {
2163 preempt_disable();
2164 __inc_numa_state(page_zone(page), NUMA_INTERLEAVE_HIT);
2165 preempt_enable();
2166 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002167 return page;
2168}
2169
2170/**
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002171 * alloc_pages_vma - Allocate a page for a VMA.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002172 *
2173 * @gfp:
2174 * %GFP_USER user allocation.
2175 * %GFP_KERNEL kernel allocations,
2176 * %GFP_HIGHMEM highmem/user allocations,
2177 * %GFP_FS allocation should not call back into a file system.
2178 * %GFP_ATOMIC don't sleep.
2179 *
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002180 * @order:Order of the GFP allocation.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002181 * @vma: Pointer to VMA or NULL if not available.
2182 * @addr: Virtual Address of the allocation. Must be inside the VMA.
Vlastimil Babkabe97a412015-02-11 15:27:15 -08002183 * @node: Which node to prefer for allocation (modulo policy).
David Rientjes19deb762019-09-04 12:54:20 -07002184 * @hugepage: for hugepages try only the preferred node if possible
Linus Torvalds1da177e2005-04-16 15:20:36 -07002185 *
2186 * This function allocates a page from the kernel page pool and applies
2187 * a NUMA policy associated with the VMA or the current process.
Michel Lespinasse3e4e28c2020-06-08 21:33:51 -07002188 * When VMA is not NULL caller must read-lock the mmap_lock of the
Linus Torvalds1da177e2005-04-16 15:20:36 -07002189 * mm_struct of the VMA to prevent it from going away. Should be used for
Vlastimil Babkabe97a412015-02-11 15:27:15 -08002190 * all allocations for pages that will be mapped into user space. Returns
2191 * NULL when no page can be allocated.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002192 */
2193struct page *
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002194alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
David Rientjes19deb762019-09-04 12:54:20 -07002195 unsigned long addr, int node, bool hugepage)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002196{
Mel Gormancc9a6c82012-03-21 16:34:11 -07002197 struct mempolicy *pol;
Miao Xiec0ff7452010-05-24 14:32:08 -07002198 struct page *page;
Vlastimil Babka04ec6262017-07-06 15:40:03 -07002199 int preferred_nid;
Vlastimil Babkabe97a412015-02-11 15:27:15 -08002200 nodemask_t *nmask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002201
Oleg Nesterovdd6eecb2014-10-09 15:27:57 -07002202 pol = get_vma_policy(vma, addr);
Mel Gormancc9a6c82012-03-21 16:34:11 -07002203
Vlastimil Babkabe97a412015-02-11 15:27:15 -08002204 if (pol->mode == MPOL_INTERLEAVE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002205 unsigned nid;
Christoph Lameter5da7ca82006-01-06 00:10:46 -08002206
Andi Kleen8eac5632011-02-25 14:44:28 -08002207 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07002208 mpol_cond_put(pol);
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002209 page = alloc_page_interleave(gfp, order, nid);
Vlastimil Babkabe97a412015-02-11 15:27:15 -08002210 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002211 }
Vlastimil Babkabe97a412015-02-11 15:27:15 -08002212
David Rientjes19deb762019-09-04 12:54:20 -07002213 if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
2214 int hpage_node = node;
2215
2216 /*
2217 * For hugepage allocation and non-interleave policy which
2218 * allows the current node (or other explicitly preferred
2219 * node) we only try to allocate from the current/preferred
2220 * node and don't fall back to other nodes, as the cost of
2221 * remote accesses would likely offset THP benefits.
2222 *
2223 * If the policy is interleave, or does not allow the current
2224 * node in its nodemask, we allocate the standard way.
2225 */
2226 if (pol->mode == MPOL_PREFERRED && !(pol->flags & MPOL_F_LOCAL))
2227 hpage_node = pol->v.preferred_node;
2228
2229 nmask = policy_nodemask(gfp, pol);
2230 if (!nmask || node_isset(hpage_node, *nmask)) {
2231 mpol_cond_put(pol);
Vlastimil Babkacc638f32020-01-13 16:29:04 -08002232 /*
2233 * First, try to allocate THP only on local node, but
2234 * don't reclaim unnecessarily, just compact.
2235 */
David Rientjes19deb762019-09-04 12:54:20 -07002236 page = __alloc_pages_node(hpage_node,
Vlastimil Babkacc638f32020-01-13 16:29:04 -08002237 gfp | __GFP_THISNODE | __GFP_NORETRY, order);
David Rientjes76e654c2019-09-04 12:54:25 -07002238
2239 /*
2240 * If hugepage allocations are configured to always
2241 * synchronous compact or the vma has been madvised
2242 * to prefer hugepage backing, retry allowing remote
Vlastimil Babkacc638f32020-01-13 16:29:04 -08002243 * memory with both reclaim and compact as well.
David Rientjes76e654c2019-09-04 12:54:25 -07002244 */
2245 if (!page && (gfp & __GFP_DIRECT_RECLAIM))
2246 page = __alloc_pages_node(hpage_node,
Vlastimil Babkacc638f32020-01-13 16:29:04 -08002247 gfp, order);
David Rientjes76e654c2019-09-04 12:54:25 -07002248
David Rientjes19deb762019-09-04 12:54:20 -07002249 goto out;
2250 }
2251 }
2252
Vlastimil Babkabe97a412015-02-11 15:27:15 -08002253 nmask = policy_nodemask(gfp, pol);
Vlastimil Babka04ec6262017-07-06 15:40:03 -07002254 preferred_nid = policy_node(gfp, pol, node);
2255 page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask);
Vlastimil Babkad51e9892017-01-24 15:18:18 -08002256 mpol_cond_put(pol);
Vlastimil Babkabe97a412015-02-11 15:27:15 -08002257out:
Miao Xiec0ff7452010-05-24 14:32:08 -07002258 return page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002259}
Christoph Hellwig69262212019-06-26 14:27:05 +02002260EXPORT_SYMBOL(alloc_pages_vma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002261
2262/**
2263 * alloc_pages_current - Allocate pages.
2264 *
2265 * @gfp:
2266 * %GFP_USER user allocation,
2267 * %GFP_KERNEL kernel allocation,
2268 * %GFP_HIGHMEM highmem allocation,
2269 * %GFP_FS don't call back into a file system.
2270 * %GFP_ATOMIC don't sleep.
2271 * @order: Power of two of allocation size in pages. 0 is a single page.
2272 *
2273 * Allocate a page from the kernel page pool. When not in
2274 * interrupt context and apply the current process NUMA policy.
2275 * Returns NULL when no page can be allocated.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002276 */
Al Virodd0fc662005-10-07 07:46:04 +01002277struct page *alloc_pages_current(gfp_t gfp, unsigned order)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002278{
Oleg Nesterov8d902742014-10-09 15:27:45 -07002279 struct mempolicy *pol = &default_policy;
Miao Xiec0ff7452010-05-24 14:32:08 -07002280 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002281
Oleg Nesterov8d902742014-10-09 15:27:45 -07002282 if (!in_interrupt() && !(gfp & __GFP_THISNODE))
2283 pol = get_task_policy(current);
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07002284
2285 /*
2286 * No reference counting needed for current->mempolicy
2287 * nor system default_policy
2288 */
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002289 if (pol->mode == MPOL_INTERLEAVE)
Miao Xiec0ff7452010-05-24 14:32:08 -07002290 page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
2291 else
2292 page = __alloc_pages_nodemask(gfp, order,
Vlastimil Babka04ec6262017-07-06 15:40:03 -07002293 policy_node(gfp, pol, numa_node_id()),
Andi Kleen5c4b4be2011-03-04 17:36:32 -08002294 policy_nodemask(gfp, pol));
Mel Gormancc9a6c82012-03-21 16:34:11 -07002295
Miao Xiec0ff7452010-05-24 14:32:08 -07002296 return page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002297}
2298EXPORT_SYMBOL(alloc_pages_current);
2299
Oleg Nesterovef0855d2013-09-11 14:20:14 -07002300int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2301{
2302 struct mempolicy *pol = mpol_dup(vma_policy(src));
2303
2304 if (IS_ERR(pol))
2305 return PTR_ERR(pol);
2306 dst->vm_policy = pol;
2307 return 0;
2308}
2309
Paul Jackson42253992006-01-08 01:01:59 -08002310/*
Lee Schermerhorn846a16b2008-04-28 02:13:09 -07002311 * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
Paul Jackson42253992006-01-08 01:01:59 -08002312 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
2313 * with the mems_allowed returned by cpuset_mems_allowed(). This
2314 * keeps mempolicies cpuset relative after its cpuset moves. See
2315 * further kernel/cpuset.c update_nodemask().
Miao Xie708c1bb2010-05-24 14:32:07 -07002316 *
2317 * current's mempolicy may be rebinded by the other task(the task that changes
2318 * cpuset's mems), so we needn't do rebind work for current task.
Paul Jackson42253992006-01-08 01:01:59 -08002319 */
Paul Jackson42253992006-01-08 01:01:59 -08002320
Lee Schermerhorn846a16b2008-04-28 02:13:09 -07002321/* Slow path of a mempolicy duplicate */
2322struct mempolicy *__mpol_dup(struct mempolicy *old)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002323{
2324 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2325
2326 if (!new)
2327 return ERR_PTR(-ENOMEM);
Miao Xie708c1bb2010-05-24 14:32:07 -07002328
2329 /* task's mempolicy is protected by alloc_lock */
2330 if (old == current->mempolicy) {
2331 task_lock(current);
2332 *new = *old;
2333 task_unlock(current);
2334 } else
2335 *new = *old;
2336
Paul Jackson42253992006-01-08 01:01:59 -08002337 if (current_cpuset_is_being_rebound()) {
2338 nodemask_t mems = cpuset_mems_allowed(current);
Vlastimil Babka213980c2017-07-06 15:40:06 -07002339 mpol_rebind_policy(new, &mems);
Paul Jackson42253992006-01-08 01:01:59 -08002340 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002341 atomic_set(&new->refcnt, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002342 return new;
2343}
2344
2345/* Slow path of a mempolicy comparison */
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002346bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002347{
2348 if (!a || !b)
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002349 return false;
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002350 if (a->mode != b->mode)
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002351 return false;
Bob Liu19800502010-05-24 14:32:01 -07002352 if (a->flags != b->flags)
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002353 return false;
Bob Liu19800502010-05-24 14:32:01 -07002354 if (mpol_store_user_nodemask(a))
2355 if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002356 return false;
Bob Liu19800502010-05-24 14:32:01 -07002357
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002358 switch (a->mode) {
Mel Gorman19770b32008-04-28 02:12:18 -07002359 case MPOL_BIND:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002360 case MPOL_INTERLEAVE:
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002361 return !!nodes_equal(a->v.nodes, b->v.nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002362 case MPOL_PREFERRED:
Yisheng Xie8970a632018-03-22 16:17:02 -07002363 /* a's ->flags is the same as b's */
2364 if (a->flags & MPOL_F_LOCAL)
2365 return true;
Namhyung Kim75719662011-03-22 16:33:02 -07002366 return a->v.preferred_node == b->v.preferred_node;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002367 default:
2368 BUG();
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002369 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002370 }
2371}
2372
Linus Torvalds1da177e2005-04-16 15:20:36 -07002373/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002374 * Shared memory backing store policy support.
2375 *
2376 * Remember policies even when nobody has shared memory mapped.
2377 * The policies are kept in Red-Black tree linked from the inode.
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002378 * They are protected by the sp->lock rwlock, which should be held
Linus Torvalds1da177e2005-04-16 15:20:36 -07002379 * for any accesses to the tree.
2380 */
2381
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002382/*
2383 * lookup first element intersecting start-end. Caller holds sp->lock for
2384 * reading or for writing
2385 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002386static struct sp_node *
2387sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
2388{
2389 struct rb_node *n = sp->root.rb_node;
2390
2391 while (n) {
2392 struct sp_node *p = rb_entry(n, struct sp_node, nd);
2393
2394 if (start >= p->end)
2395 n = n->rb_right;
2396 else if (end <= p->start)
2397 n = n->rb_left;
2398 else
2399 break;
2400 }
2401 if (!n)
2402 return NULL;
2403 for (;;) {
2404 struct sp_node *w = NULL;
2405 struct rb_node *prev = rb_prev(n);
2406 if (!prev)
2407 break;
2408 w = rb_entry(prev, struct sp_node, nd);
2409 if (w->end <= start)
2410 break;
2411 n = prev;
2412 }
2413 return rb_entry(n, struct sp_node, nd);
2414}
2415
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002416/*
2417 * Insert a new shared policy into the list. Caller holds sp->lock for
2418 * writing.
2419 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002420static void sp_insert(struct shared_policy *sp, struct sp_node *new)
2421{
2422 struct rb_node **p = &sp->root.rb_node;
2423 struct rb_node *parent = NULL;
2424 struct sp_node *nd;
2425
2426 while (*p) {
2427 parent = *p;
2428 nd = rb_entry(parent, struct sp_node, nd);
2429 if (new->start < nd->start)
2430 p = &(*p)->rb_left;
2431 else if (new->end > nd->end)
2432 p = &(*p)->rb_right;
2433 else
2434 BUG();
2435 }
2436 rb_link_node(&new->nd, parent, p);
2437 rb_insert_color(&new->nd, &sp->root);
Paul Mundt140d5a42007-07-15 23:38:16 -07002438 pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002439 new->policy ? new->policy->mode : 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002440}
2441
2442/* Find shared policy intersecting idx */
2443struct mempolicy *
2444mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
2445{
2446 struct mempolicy *pol = NULL;
2447 struct sp_node *sn;
2448
2449 if (!sp->root.rb_node)
2450 return NULL;
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002451 read_lock(&sp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002452 sn = sp_lookup(sp, idx, idx+1);
2453 if (sn) {
2454 mpol_get(sn->policy);
2455 pol = sn->policy;
2456 }
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002457 read_unlock(&sp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002458 return pol;
2459}
2460
KOSAKI Motohiro63f74ca2012-10-08 16:29:19 -07002461static void sp_free(struct sp_node *n)
2462{
2463 mpol_put(n->policy);
2464 kmem_cache_free(sn_cache, n);
2465}
2466
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002467/**
2468 * mpol_misplaced - check whether current page node is valid in policy
2469 *
Fabian Frederickb46e14a2014-06-04 16:08:18 -07002470 * @page: page to be checked
2471 * @vma: vm area where page mapped
2472 * @addr: virtual address where page mapped
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002473 *
2474 * Lookup current policy node id for vma,addr and "compare to" page's
2475 * node id.
2476 *
2477 * Returns:
2478 * -1 - not misplaced, page is in the right node
2479 * node - node id where the page should be
2480 *
2481 * Policy determination "mimics" alloc_page_vma().
2482 * Called from fault path where we know the vma and faulting address.
2483 */
2484int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2485{
2486 struct mempolicy *pol;
Mel Gormanc33d6c02016-05-19 17:14:10 -07002487 struct zoneref *z;
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002488 int curnid = page_to_nid(page);
2489 unsigned long pgoff;
Peter Zijlstra90572892013-10-07 11:29:20 +01002490 int thiscpu = raw_smp_processor_id();
2491 int thisnid = cpu_to_node(thiscpu);
Anshuman Khandual98fa15f2019-03-05 15:42:58 -08002492 int polnid = NUMA_NO_NODE;
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002493 int ret = -1;
2494
Oleg Nesterovdd6eecb2014-10-09 15:27:57 -07002495 pol = get_vma_policy(vma, addr);
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002496 if (!(pol->flags & MPOL_F_MOF))
2497 goto out;
2498
2499 switch (pol->mode) {
2500 case MPOL_INTERLEAVE:
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002501 pgoff = vma->vm_pgoff;
2502 pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
Laurent Dufour98c70ba2017-09-08 16:12:39 -07002503 polnid = offset_il_node(pol, pgoff);
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002504 break;
2505
2506 case MPOL_PREFERRED:
2507 if (pol->flags & MPOL_F_LOCAL)
2508 polnid = numa_node_id();
2509 else
2510 polnid = pol->v.preferred_node;
2511 break;
2512
2513 case MPOL_BIND:
Mel Gormanc33d6c02016-05-19 17:14:10 -07002514
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002515 /*
2516 * allows binding to multiple nodes.
2517 * use current page if in policy nodemask,
2518 * else select nearest allowed node, if any.
2519 * If no allowed nodes, use current [!misplaced].
2520 */
2521 if (node_isset(curnid, pol->v.nodes))
2522 goto out;
Mel Gormanc33d6c02016-05-19 17:14:10 -07002523 z = first_zones_zonelist(
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002524 node_zonelist(numa_node_id(), GFP_HIGHUSER),
2525 gfp_zone(GFP_HIGHUSER),
Mel Gormanc33d6c02016-05-19 17:14:10 -07002526 &pol->v.nodes);
Pavel Tatashinc1093b72018-08-21 21:53:32 -07002527 polnid = zone_to_nid(z->zone);
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002528 break;
2529
2530 default:
2531 BUG();
2532 }
Mel Gorman5606e382012-11-02 18:19:13 +00002533
2534 /* Migrate the page towards the node whose CPU is referencing it */
Mel Gormane42c8ff2012-11-12 09:17:07 +00002535 if (pol->flags & MPOL_F_MORON) {
Peter Zijlstra90572892013-10-07 11:29:20 +01002536 polnid = thisnid;
Mel Gorman5606e382012-11-02 18:19:13 +00002537
Rik van Riel10f39042014-01-27 17:03:44 -05002538 if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
Rik van Rielde1c9ce2013-10-07 11:29:39 +01002539 goto out;
Mel Gormane42c8ff2012-11-12 09:17:07 +00002540 }
2541
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002542 if (curnid != polnid)
2543 ret = polnid;
2544out:
2545 mpol_cond_put(pol);
2546
2547 return ret;
2548}
2549
David Rientjesc11600e2016-09-01 16:15:07 -07002550/*
2551 * Drop the (possibly final) reference to task->mempolicy. It needs to be
2552 * dropped after task->mempolicy is set to NULL so that any allocation done as
2553 * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed
2554 * policy.
2555 */
2556void mpol_put_task_policy(struct task_struct *task)
2557{
2558 struct mempolicy *pol;
2559
2560 task_lock(task);
2561 pol = task->mempolicy;
2562 task->mempolicy = NULL;
2563 task_unlock(task);
2564 mpol_put(pol);
2565}
2566
Linus Torvalds1da177e2005-04-16 15:20:36 -07002567static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2568{
Paul Mundt140d5a42007-07-15 23:38:16 -07002569 pr_debug("deleting %lx-l%lx\n", n->start, n->end);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002570 rb_erase(&n->nd, &sp->root);
KOSAKI Motohiro63f74ca2012-10-08 16:29:19 -07002571 sp_free(n);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002572}
2573
Mel Gorman42288fe2012-12-21 23:10:25 +00002574static void sp_node_init(struct sp_node *node, unsigned long start,
2575 unsigned long end, struct mempolicy *pol)
2576{
2577 node->start = start;
2578 node->end = end;
2579 node->policy = pol;
2580}
2581
Adrian Bunkdbcb0f12007-10-16 01:26:26 -07002582static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2583 struct mempolicy *pol)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002584{
KOSAKI Motohiro869833f2012-10-08 16:29:16 -07002585 struct sp_node *n;
2586 struct mempolicy *newpol;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002587
KOSAKI Motohiro869833f2012-10-08 16:29:16 -07002588 n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002589 if (!n)
2590 return NULL;
KOSAKI Motohiro869833f2012-10-08 16:29:16 -07002591
2592 newpol = mpol_dup(pol);
2593 if (IS_ERR(newpol)) {
2594 kmem_cache_free(sn_cache, n);
2595 return NULL;
2596 }
2597 newpol->flags |= MPOL_F_SHARED;
Mel Gorman42288fe2012-12-21 23:10:25 +00002598 sp_node_init(n, start, end, newpol);
KOSAKI Motohiro869833f2012-10-08 16:29:16 -07002599
Linus Torvalds1da177e2005-04-16 15:20:36 -07002600 return n;
2601}
2602
2603/* Replace a policy range. */
2604static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
2605 unsigned long end, struct sp_node *new)
2606{
Mel Gormanb22d1272012-10-08 16:29:17 -07002607 struct sp_node *n;
Mel Gorman42288fe2012-12-21 23:10:25 +00002608 struct sp_node *n_new = NULL;
2609 struct mempolicy *mpol_new = NULL;
Mel Gormanb22d1272012-10-08 16:29:17 -07002610 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002611
Mel Gorman42288fe2012-12-21 23:10:25 +00002612restart:
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002613 write_lock(&sp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002614 n = sp_lookup(sp, start, end);
2615 /* Take care of old policies in the same range. */
2616 while (n && n->start < end) {
2617 struct rb_node *next = rb_next(&n->nd);
2618 if (n->start >= start) {
2619 if (n->end <= end)
2620 sp_delete(sp, n);
2621 else
2622 n->start = end;
2623 } else {
2624 /* Old policy spanning whole new range. */
2625 if (n->end > end) {
Mel Gorman42288fe2012-12-21 23:10:25 +00002626 if (!n_new)
2627 goto alloc_new;
2628
2629 *mpol_new = *n->policy;
2630 atomic_set(&mpol_new->refcnt, 1);
KOSAKI Motohiro78806392013-03-08 12:43:29 -08002631 sp_node_init(n_new, end, n->end, mpol_new);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002632 n->end = start;
Hillf Danton5ca39572013-03-08 12:43:28 -08002633 sp_insert(sp, n_new);
Mel Gorman42288fe2012-12-21 23:10:25 +00002634 n_new = NULL;
2635 mpol_new = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002636 break;
2637 } else
2638 n->end = start;
2639 }
2640 if (!next)
2641 break;
2642 n = rb_entry(next, struct sp_node, nd);
2643 }
2644 if (new)
2645 sp_insert(sp, new);
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002646 write_unlock(&sp->lock);
Mel Gorman42288fe2012-12-21 23:10:25 +00002647 ret = 0;
2648
2649err_out:
2650 if (mpol_new)
2651 mpol_put(mpol_new);
2652 if (n_new)
2653 kmem_cache_free(sn_cache, n_new);
2654
Mel Gormanb22d1272012-10-08 16:29:17 -07002655 return ret;
Mel Gorman42288fe2012-12-21 23:10:25 +00002656
2657alloc_new:
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002658 write_unlock(&sp->lock);
Mel Gorman42288fe2012-12-21 23:10:25 +00002659 ret = -ENOMEM;
2660 n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2661 if (!n_new)
2662 goto err_out;
2663 mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2664 if (!mpol_new)
2665 goto err_out;
2666 goto restart;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002667}
2668
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002669/**
2670 * mpol_shared_policy_init - initialize shared policy for inode
2671 * @sp: pointer to inode shared policy
2672 * @mpol: struct mempolicy to install
2673 *
2674 * Install non-NULL @mpol in inode's shared policy rb-tree.
2675 * On entry, the current task has a reference on a non-NULL @mpol.
2676 * This must be released on exit.
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002677 * This is called at get_inode() calls and we can use GFP_KERNEL.
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002678 */
2679void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
Robin Holt7339ff82006-01-14 13:20:48 -08002680{
Miao Xie58568d22009-06-16 15:31:49 -07002681 int ret;
2682
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002683 sp->root = RB_ROOT; /* empty tree == default mempolicy */
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002684 rwlock_init(&sp->lock);
Robin Holt7339ff82006-01-14 13:20:48 -08002685
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002686 if (mpol) {
2687 struct vm_area_struct pvma;
2688 struct mempolicy *new;
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002689 NODEMASK_SCRATCH(scratch);
Robin Holt7339ff82006-01-14 13:20:48 -08002690
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002691 if (!scratch)
Lee Schermerhorn5c0c1652010-06-29 15:05:30 -07002692 goto put_mpol;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002693 /* contextualize the tmpfs mount point mempolicy */
2694 new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
Lee Schermerhorn15d77832010-05-24 14:32:04 -07002695 if (IS_ERR(new))
Dan Carpenter0cae3452010-05-25 23:42:58 -07002696 goto free_scratch; /* no valid nodemask intersection */
Miao Xie58568d22009-06-16 15:31:49 -07002697
2698 task_lock(current);
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002699 ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
Miao Xie58568d22009-06-16 15:31:49 -07002700 task_unlock(current);
Lee Schermerhorn15d77832010-05-24 14:32:04 -07002701 if (ret)
Lee Schermerhorn5c0c1652010-06-29 15:05:30 -07002702 goto put_new;
Robin Holt7339ff82006-01-14 13:20:48 -08002703
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002704 /* Create pseudo-vma that contains just the policy */
Kirill A. Shutemov2c4541e2018-07-26 16:37:30 -07002705 vma_init(&pvma, NULL);
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002706 pvma.vm_end = TASK_SIZE; /* policy covers entire file */
2707 mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
Lee Schermerhorn15d77832010-05-24 14:32:04 -07002708
Lee Schermerhorn5c0c1652010-06-29 15:05:30 -07002709put_new:
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002710 mpol_put(new); /* drop initial ref */
Dan Carpenter0cae3452010-05-25 23:42:58 -07002711free_scratch:
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002712 NODEMASK_SCRATCH_FREE(scratch);
Lee Schermerhorn5c0c1652010-06-29 15:05:30 -07002713put_mpol:
2714 mpol_put(mpol); /* drop our incoming ref on sb mpol */
Robin Holt7339ff82006-01-14 13:20:48 -08002715 }
2716}
2717
Linus Torvalds1da177e2005-04-16 15:20:36 -07002718int mpol_set_shared_policy(struct shared_policy *info,
2719 struct vm_area_struct *vma, struct mempolicy *npol)
2720{
2721 int err;
2722 struct sp_node *new = NULL;
2723 unsigned long sz = vma_pages(vma);
2724
David Rientjes028fec42008-04-28 02:12:25 -07002725 pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002726 vma->vm_pgoff,
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002727 sz, npol ? npol->mode : -1,
David Rientjes028fec42008-04-28 02:12:25 -07002728 npol ? npol->flags : -1,
David Rientjes00ef2d22013-02-22 16:35:36 -08002729 npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002730
2731 if (npol) {
2732 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
2733 if (!new)
2734 return -ENOMEM;
2735 }
2736 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2737 if (err && new)
KOSAKI Motohiro63f74ca2012-10-08 16:29:19 -07002738 sp_free(new);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002739 return err;
2740}
2741
2742/* Free a backing policy store on inode delete. */
2743void mpol_free_shared_policy(struct shared_policy *p)
2744{
2745 struct sp_node *n;
2746 struct rb_node *next;
2747
2748 if (!p->root.rb_node)
2749 return;
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002750 write_lock(&p->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002751 next = rb_first(&p->root);
2752 while (next) {
2753 n = rb_entry(next, struct sp_node, nd);
2754 next = rb_next(&n->nd);
KOSAKI Motohiro63f74ca2012-10-08 16:29:19 -07002755 sp_delete(p, n);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002756 }
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002757 write_unlock(&p->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002758}
2759
Mel Gorman1a687c22012-11-22 11:16:36 +00002760#ifdef CONFIG_NUMA_BALANCING
Mel Gormanc2976632014-01-29 14:05:42 -08002761static int __initdata numabalancing_override;
Mel Gorman1a687c22012-11-22 11:16:36 +00002762
2763static void __init check_numabalancing_enable(void)
2764{
2765 bool numabalancing_default = false;
2766
2767 if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
2768 numabalancing_default = true;
2769
Mel Gormanc2976632014-01-29 14:05:42 -08002770 /* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
2771 if (numabalancing_override)
2772 set_numabalancing_state(numabalancing_override == 1);
2773
Mel Gormanb0dc2b92015-05-14 15:17:09 -07002774 if (num_online_nodes() > 1 && !numabalancing_override) {
Joe Perches756a0252016-03-17 14:19:47 -07002775 pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n",
Mel Gormanc2976632014-01-29 14:05:42 -08002776 numabalancing_default ? "Enabling" : "Disabling");
Mel Gorman1a687c22012-11-22 11:16:36 +00002777 set_numabalancing_state(numabalancing_default);
2778 }
2779}
2780
2781static int __init setup_numabalancing(char *str)
2782{
2783 int ret = 0;
2784 if (!str)
2785 goto out;
Mel Gorman1a687c22012-11-22 11:16:36 +00002786
2787 if (!strcmp(str, "enable")) {
Mel Gormanc2976632014-01-29 14:05:42 -08002788 numabalancing_override = 1;
Mel Gorman1a687c22012-11-22 11:16:36 +00002789 ret = 1;
2790 } else if (!strcmp(str, "disable")) {
Mel Gormanc2976632014-01-29 14:05:42 -08002791 numabalancing_override = -1;
Mel Gorman1a687c22012-11-22 11:16:36 +00002792 ret = 1;
2793 }
2794out:
2795 if (!ret)
Andrew Morton4a404be2014-01-29 14:05:43 -08002796 pr_warn("Unable to parse numa_balancing=\n");
Mel Gorman1a687c22012-11-22 11:16:36 +00002797
2798 return ret;
2799}
2800__setup("numa_balancing=", setup_numabalancing);
2801#else
2802static inline void __init check_numabalancing_enable(void)
2803{
2804}
2805#endif /* CONFIG_NUMA_BALANCING */
2806
Linus Torvalds1da177e2005-04-16 15:20:36 -07002807/* assumes fs == KERNEL_DS */
2808void __init numa_policy_init(void)
2809{
Paul Mundtb71636e2007-07-15 23:38:15 -07002810 nodemask_t interleave_nodes;
2811 unsigned long largest = 0;
2812 int nid, prefer = 0;
2813
Linus Torvalds1da177e2005-04-16 15:20:36 -07002814 policy_cache = kmem_cache_create("numa_policy",
2815 sizeof(struct mempolicy),
Paul Mundt20c2df82007-07-20 10:11:58 +09002816 0, SLAB_PANIC, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002817
2818 sn_cache = kmem_cache_create("shared_policy_node",
2819 sizeof(struct sp_node),
Paul Mundt20c2df82007-07-20 10:11:58 +09002820 0, SLAB_PANIC, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002821
Mel Gorman5606e382012-11-02 18:19:13 +00002822 for_each_node(nid) {
2823 preferred_node_policy[nid] = (struct mempolicy) {
2824 .refcnt = ATOMIC_INIT(1),
2825 .mode = MPOL_PREFERRED,
2826 .flags = MPOL_F_MOF | MPOL_F_MORON,
2827 .v = { .preferred_node = nid, },
2828 };
2829 }
2830
Paul Mundtb71636e2007-07-15 23:38:15 -07002831 /*
2832 * Set interleaving policy for system init. Interleaving is only
2833 * enabled across suitably sized nodes (default is >= 16MB), or
2834 * fall back to the largest node if they're all smaller.
2835 */
2836 nodes_clear(interleave_nodes);
Lai Jiangshan01f13bd2012-12-12 13:51:33 -08002837 for_each_node_state(nid, N_MEMORY) {
Paul Mundtb71636e2007-07-15 23:38:15 -07002838 unsigned long total_pages = node_present_pages(nid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002839
Paul Mundtb71636e2007-07-15 23:38:15 -07002840 /* Preserve the largest node */
2841 if (largest < total_pages) {
2842 largest = total_pages;
2843 prefer = nid;
2844 }
2845
2846 /* Interleave this node? */
2847 if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2848 node_set(nid, interleave_nodes);
2849 }
2850
2851 /* All too small, use the largest */
2852 if (unlikely(nodes_empty(interleave_nodes)))
2853 node_set(prefer, interleave_nodes);
2854
David Rientjes028fec42008-04-28 02:12:25 -07002855 if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
Mitchel Humpherysb1de0d12014-06-06 14:38:30 -07002856 pr_err("%s: interleaving failed\n", __func__);
Mel Gorman1a687c22012-11-22 11:16:36 +00002857
2858 check_numabalancing_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002859}
2860
Christoph Lameter8bccd852005-10-29 18:16:59 -07002861/* Reset policy of current process to default */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002862void numa_default_policy(void)
2863{
David Rientjes028fec42008-04-28 02:12:25 -07002864 do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002865}
Paul Jackson68860ec2005-10-30 15:02:36 -08002866
Paul Jackson42253992006-01-08 01:01:59 -08002867/*
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002868 * Parse and format mempolicy from/to strings
2869 */
2870
2871/*
Hugh Dickinsf2a07f42013-01-02 02:01:33 -08002872 * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag.
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002873 */
Lee Schermerhorn345ace92010-05-24 14:32:04 -07002874static const char * const policy_modes[] =
2875{
2876 [MPOL_DEFAULT] = "default",
2877 [MPOL_PREFERRED] = "prefer",
2878 [MPOL_BIND] = "bind",
2879 [MPOL_INTERLEAVE] = "interleave",
Lee Schermerhornd3a71032012-10-25 14:16:29 +02002880 [MPOL_LOCAL] = "local",
Lee Schermerhorn345ace92010-05-24 14:32:04 -07002881};
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002882
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002883
2884#ifdef CONFIG_TMPFS
2885/**
Hugh Dickinsf2a07f42013-01-02 02:01:33 -08002886 * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002887 * @str: string containing mempolicy to parse
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002888 * @mpol: pointer to struct mempolicy pointer, returned on success.
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002889 *
2890 * Format of input:
2891 * <mode>[=<flags>][:<nodelist>]
2892 *
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002893 * On success, returns 0, else 1
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002894 */
Hugh Dickinsa7a88b22013-01-02 02:04:23 -08002895int mpol_parse_str(char *str, struct mempolicy **mpol)
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002896{
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002897 struct mempolicy *new = NULL;
Hugh Dickinsf2a07f42013-01-02 02:01:33 -08002898 unsigned short mode_flags;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002899 nodemask_t nodes;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002900 char *nodelist = strchr(str, ':');
2901 char *flags = strchr(str, '=');
zhong jiangdedf2c72018-10-26 15:06:57 -07002902 int err = 1, mode;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002903
Dan Carpenterc7a91bc2020-01-30 22:11:07 -08002904 if (flags)
2905 *flags++ = '\0'; /* terminate mode string */
2906
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002907 if (nodelist) {
2908 /* NUL-terminate mode or flags string */
2909 *nodelist++ = '\0';
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002910 if (nodelist_parse(nodelist, nodes))
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002911 goto out;
Lai Jiangshan01f13bd2012-12-12 13:51:33 -08002912 if (!nodes_subset(nodes, node_states[N_MEMORY]))
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002913 goto out;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002914 } else
2915 nodes_clear(nodes);
2916
zhong jiangdedf2c72018-10-26 15:06:57 -07002917 mode = match_string(policy_modes, MPOL_MAX, str);
2918 if (mode < 0)
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002919 goto out;
2920
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002921 switch (mode) {
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002922 case MPOL_PREFERRED:
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002923 /*
Randy Dunlapaa9f7d52020-04-01 21:10:58 -07002924 * Insist on a nodelist of one node only, although later
2925 * we use first_node(nodes) to grab a single node, so here
2926 * nodelist (or nodes) cannot be empty.
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002927 */
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002928 if (nodelist) {
2929 char *rest = nodelist;
2930 while (isdigit(*rest))
2931 rest++;
KOSAKI Motohiro926f2ae2010-03-23 13:35:32 -07002932 if (*rest)
2933 goto out;
Randy Dunlapaa9f7d52020-04-01 21:10:58 -07002934 if (nodes_empty(nodes))
2935 goto out;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002936 }
2937 break;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002938 case MPOL_INTERLEAVE:
2939 /*
2940 * Default to online nodes with memory if no nodelist
2941 */
2942 if (!nodelist)
Lai Jiangshan01f13bd2012-12-12 13:51:33 -08002943 nodes = node_states[N_MEMORY];
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07002944 break;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002945 case MPOL_LOCAL:
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07002946 /*
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002947 * Don't allow a nodelist; mpol_new() checks flags
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07002948 */
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002949 if (nodelist)
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07002950 goto out;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002951 mode = MPOL_PREFERRED;
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07002952 break;
Ravikiran G Thirumalai413b43d2010-03-23 13:35:28 -07002953 case MPOL_DEFAULT:
2954 /*
2955 * Insist on a empty nodelist
2956 */
2957 if (!nodelist)
2958 err = 0;
2959 goto out;
KOSAKI Motohirod69b2e62010-03-23 13:35:30 -07002960 case MPOL_BIND:
2961 /*
2962 * Insist on a nodelist
2963 */
2964 if (!nodelist)
2965 goto out;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002966 }
2967
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002968 mode_flags = 0;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002969 if (flags) {
2970 /*
2971 * Currently, we only support two mutually exclusive
2972 * mode flags.
2973 */
2974 if (!strcmp(flags, "static"))
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002975 mode_flags |= MPOL_F_STATIC_NODES;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002976 else if (!strcmp(flags, "relative"))
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002977 mode_flags |= MPOL_F_RELATIVE_NODES;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002978 else
KOSAKI Motohiro926f2ae2010-03-23 13:35:32 -07002979 goto out;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002980 }
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002981
2982 new = mpol_new(mode, mode_flags, &nodes);
2983 if (IS_ERR(new))
KOSAKI Motohiro926f2ae2010-03-23 13:35:32 -07002984 goto out;
2985
Hugh Dickinsf2a07f42013-01-02 02:01:33 -08002986 /*
2987 * Save nodes for mpol_to_str() to show the tmpfs mount options
2988 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
2989 */
2990 if (mode != MPOL_PREFERRED)
2991 new->v.nodes = nodes;
2992 else if (nodelist)
2993 new->v.preferred_node = first_node(nodes);
2994 else
2995 new->flags |= MPOL_F_LOCAL;
2996
2997 /*
2998 * Save nodes for contextualization: this will be used to "clone"
2999 * the mempolicy in a specific context [cpuset] at a later time.
3000 */
3001 new->w.user_nodemask = nodes;
3002
KOSAKI Motohiro926f2ae2010-03-23 13:35:32 -07003003 err = 0;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07003004
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07003005out:
3006 /* Restore string for error message */
3007 if (nodelist)
3008 *--nodelist = ':';
3009 if (flags)
3010 *--flags = '=';
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07003011 if (!err)
3012 *mpol = new;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07003013 return err;
3014}
3015#endif /* CONFIG_TMPFS */
3016
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07003017/**
3018 * mpol_to_str - format a mempolicy structure for printing
3019 * @buffer: to contain formatted mempolicy string
3020 * @maxlen: length of @buffer
3021 * @pol: pointer to mempolicy to be formatted
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07003022 *
David Rientjes948927e2013-11-12 15:07:28 -08003023 * Convert @pol into a string. If @buffer is too short, truncate the string.
3024 * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
3025 * longest flag, "relative", and to display at least a few node ids.
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08003026 */
David Rientjes948927e2013-11-12 15:07:28 -08003027void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08003028{
3029 char *p = buffer;
David Rientjes948927e2013-11-12 15:07:28 -08003030 nodemask_t nodes = NODE_MASK_NONE;
3031 unsigned short mode = MPOL_DEFAULT;
3032 unsigned short flags = 0;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08003033
David Rientjes8790c71a2014-01-30 15:46:08 -08003034 if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
Lee Schermerhornbea904d2008-04-28 02:13:18 -07003035 mode = pol->mode;
David Rientjes948927e2013-11-12 15:07:28 -08003036 flags = pol->flags;
3037 }
Lee Schermerhornbea904d2008-04-28 02:13:18 -07003038
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08003039 switch (mode) {
3040 case MPOL_DEFAULT:
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08003041 break;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08003042 case MPOL_PREFERRED:
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07003043 if (flags & MPOL_F_LOCAL)
Hugh Dickinsf2a07f42013-01-02 02:01:33 -08003044 mode = MPOL_LOCAL;
Lee Schermerhorn53f25562008-04-28 02:13:20 -07003045 else
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07003046 node_set(pol->v.preferred_node, nodes);
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08003047 break;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08003048 case MPOL_BIND:
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08003049 case MPOL_INTERLEAVE:
Hugh Dickinsf2a07f42013-01-02 02:01:33 -08003050 nodes = pol->v.nodes;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08003051 break;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08003052 default:
David Rientjes948927e2013-11-12 15:07:28 -08003053 WARN_ON_ONCE(1);
3054 snprintf(p, maxlen, "unknown");
3055 return;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08003056 }
3057
David Rientjesb7a9f422013-11-21 14:32:06 -08003058 p += snprintf(p, maxlen, "%s", policy_modes[mode]);
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08003059
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07003060 if (flags & MPOL_MODE_FLAGS) {
David Rientjes948927e2013-11-12 15:07:28 -08003061 p += snprintf(p, buffer + maxlen - p, "=");
David Rientjesf5b087b2008-04-28 02:12:27 -07003062
Lee Schermerhorn22919902008-04-28 02:13:22 -07003063 /*
3064 * Currently, the only defined flags are mutually exclusive
3065 */
David Rientjesf5b087b2008-04-28 02:12:27 -07003066 if (flags & MPOL_F_STATIC_NODES)
Lee Schermerhorn22919902008-04-28 02:13:22 -07003067 p += snprintf(p, buffer + maxlen - p, "static");
3068 else if (flags & MPOL_F_RELATIVE_NODES)
3069 p += snprintf(p, buffer + maxlen - p, "relative");
David Rientjesf5b087b2008-04-28 02:12:27 -07003070 }
3071
Tejun Heo9e763e02015-02-13 14:38:02 -08003072 if (!nodes_empty(nodes))
3073 p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
3074 nodemask_pr_args(&nodes));
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08003075}