blob: de70f119984a22c9c215617da1734bcaa792d54f [file] [log] [blame]
Thomas Gleixner46aeb7e2019-05-28 10:10:27 -07001// SPDX-License-Identifier: GPL-2.0-only
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * Simple NUMA memory policy for the Linux kernel.
4 *
5 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
Christoph Lameter8bccd852005-10-29 18:16:59 -07006 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 *
8 * NUMA policy allows the user to give hints in which node(s) memory should
9 * be allocated.
10 *
11 * Support four policies per VMA and per process:
12 *
13 * The VMA policy has priority over the process policy for a page fault.
14 *
15 * interleave Allocate memory interleaved over a set of nodes,
16 * with normal fallback if it fails.
17 * For VMA based allocations this interleaves based on the
18 * offset into the backing object or offset into the mapping
19 * for anonymous memory. For process policy an process counter
20 * is used.
Christoph Lameter8bccd852005-10-29 18:16:59 -070021 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 * bind Only allocate memory on a specific set of nodes,
23 * no fallback.
Christoph Lameter8bccd852005-10-29 18:16:59 -070024 * FIXME: memory is allocated starting with the first node
25 * to the last. It would be better if bind would truly restrict
26 * the allocation to memory nodes instead
27 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070028 * preferred Try a specific node first before normal fallback.
David Rientjes00ef2d22013-02-22 16:35:36 -080029 * As a special case NUMA_NO_NODE here means do the allocation
Linus Torvalds1da177e2005-04-16 15:20:36 -070030 * on the local CPU. This is normally identical to default,
31 * but useful to set in a VMA when you have a non default
32 * process policy.
Christoph Lameter8bccd852005-10-29 18:16:59 -070033 *
Dave Hansenb27abac2021-09-02 15:00:06 -070034 * preferred many Try a set of nodes first before normal fallback. This is
35 * similar to preferred without the special case.
36 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070037 * default Allocate on the local node first, or when on a VMA
38 * use the process policy. This is what Linux always did
39 * in a NUMA aware kernel and still does by, ahem, default.
40 *
41 * The process policy is applied for most non interrupt memory allocations
42 * in that process' context. Interrupts ignore the policies and always
43 * try to allocate on the local CPU. The VMA policy is only applied for memory
44 * allocations for a VMA in the VM.
45 *
46 * Currently there are a few corner cases in swapping where the policy
47 * is not applied, but the majority should be handled. When process policy
48 * is used it is not remembered over swap outs/swap ins.
49 *
50 * Only the highest zone in the zone hierarchy gets policied. Allocations
51 * requesting a lower zone just use default policy. This implies that
52 * on systems with highmem kernel lowmem allocation don't get policied.
53 * Same with GFP_DMA allocations.
54 *
55 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
56 * all users and remembered even when nobody has memory mapped.
57 */
58
59/* Notebook:
60 fix mmap readahead to honour policy and enable policy for any page cache
61 object
62 statistics for bigpages
63 global policy for page cache? currently it uses process policy. Requires
64 first item above.
65 handle mremap for shared memory (currently ignored for the policy)
66 grows down?
67 make bind policy root only? It can trigger oom much faster and the
68 kernel is not always grateful with that.
Linus Torvalds1da177e2005-04-16 15:20:36 -070069*/
70
Mitchel Humpherysb1de0d12014-06-06 14:38:30 -070071#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
72
Linus Torvalds1da177e2005-04-16 15:20:36 -070073#include <linux/mempolicy.h>
Christoph Hellwiga5201102019-08-28 16:19:53 +020074#include <linux/pagewalk.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070075#include <linux/highmem.h>
76#include <linux/hugetlb.h>
77#include <linux/kernel.h>
78#include <linux/sched.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +010079#include <linux/sched/mm.h>
Ingo Molnar6a3827d2017-02-08 18:51:31 +010080#include <linux/sched/numa_balancing.h>
Ingo Molnarf719ff9b2017-02-06 10:57:33 +010081#include <linux/sched/task.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070082#include <linux/nodemask.h>
83#include <linux/cpuset.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070084#include <linux/slab.h>
85#include <linux/string.h>
Paul Gortmakerb95f1b312011-10-16 02:01:52 -040086#include <linux/export.h>
Pavel Emelyanovb4888932007-10-18 23:40:14 -070087#include <linux/nsproxy.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070088#include <linux/interrupt.h>
89#include <linux/init.h>
90#include <linux/compat.h>
Otto Ebeling31367462017-11-15 17:38:14 -080091#include <linux/ptrace.h>
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -080092#include <linux/swap.h>
Christoph Lameter1a75a6c2006-01-08 01:01:02 -080093#include <linux/seq_file.h>
94#include <linux/proc_fs.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080095#include <linux/migrate.h>
Hugh Dickins62b61f62009-12-14 17:59:33 -080096#include <linux/ksm.h>
Christoph Lameter95a402c2006-06-23 02:03:53 -070097#include <linux/rmap.h>
David Quigley86c3a762006-06-23 02:04:02 -070098#include <linux/security.h>
Adrian Bunkdbcb0f12007-10-16 01:26:26 -070099#include <linux/syscalls.h>
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -0700100#include <linux/ctype.h>
KOSAKI Motohiro6d9c2852009-12-14 17:58:11 -0800101#include <linux/mm_inline.h>
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200102#include <linux/mmu_notifier.h>
Mitchel Humpherysb1de0d12014-06-06 14:38:30 -0700103#include <linux/printk.h>
Naoya Horiguchic8633792017-09-08 16:11:08 -0700104#include <linux/swapops.h>
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800105
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106#include <asm/tlbflush.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -0800107#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108
Nick Piggin62695a82008-10-18 20:26:09 -0700109#include "internal.h"
110
Christoph Lameter38e35862006-01-08 01:01:01 -0800111/* Internal flags */
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800112#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
Christoph Lameter38e35862006-01-08 01:01:01 -0800113#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800114
Pekka Enbergfcc234f2006-03-22 00:08:13 -0800115static struct kmem_cache *policy_cache;
116static struct kmem_cache *sn_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118/* Highest zone. An specific allocation for a zone below that is not
119 policied. */
Christoph Lameter62672762007-02-10 01:43:07 -0800120enum zone_type policy_zone = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700122/*
123 * run-time system-wide default policy => local allocation
124 */
H Hartley Sweetene754d792011-10-31 17:09:23 -0700125static struct mempolicy default_policy = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126 .refcnt = ATOMIC_INIT(1), /* never free it */
Feng Tang7858d7b2021-06-30 18:51:00 -0700127 .mode = MPOL_LOCAL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128};
129
Mel Gorman5606e382012-11-02 18:19:13 +0000130static struct mempolicy preferred_node_policy[MAX_NUMNODES];
131
Dan Williamsb2ca9162020-02-16 12:00:48 -0800132/**
133 * numa_map_to_online_node - Find closest online node
Krzysztof Kozlowskif6e92f42020-08-11 18:31:13 -0700134 * @node: Node id to start the search
Dan Williamsb2ca9162020-02-16 12:00:48 -0800135 *
136 * Lookup the next closest node by distance if @nid is not online.
137 */
138int numa_map_to_online_node(int node)
139{
Dan Williams4fcbe962020-02-16 12:00:53 -0800140 int min_dist = INT_MAX, dist, n, min_node;
Dan Williamsb2ca9162020-02-16 12:00:48 -0800141
Dan Williams4fcbe962020-02-16 12:00:53 -0800142 if (node == NUMA_NO_NODE || node_online(node))
143 return node;
Dan Williamsb2ca9162020-02-16 12:00:48 -0800144
145 min_node = node;
Dan Williams4fcbe962020-02-16 12:00:53 -0800146 for_each_online_node(n) {
147 dist = node_distance(node, n);
148 if (dist < min_dist) {
149 min_dist = dist;
150 min_node = n;
Dan Williamsb2ca9162020-02-16 12:00:48 -0800151 }
152 }
153
154 return min_node;
155}
156EXPORT_SYMBOL_GPL(numa_map_to_online_node);
157
Oleg Nesterov74d2c3a2014-10-09 15:27:50 -0700158struct mempolicy *get_task_policy(struct task_struct *p)
Mel Gorman5606e382012-11-02 18:19:13 +0000159{
160 struct mempolicy *pol = p->mempolicy;
Oleg Nesterovf15ca782014-10-09 15:27:43 -0700161 int node;
Mel Gorman5606e382012-11-02 18:19:13 +0000162
Oleg Nesterovf15ca782014-10-09 15:27:43 -0700163 if (pol)
164 return pol;
Mel Gorman5606e382012-11-02 18:19:13 +0000165
Oleg Nesterovf15ca782014-10-09 15:27:43 -0700166 node = numa_node_id();
167 if (node != NUMA_NO_NODE) {
168 pol = &preferred_node_policy[node];
169 /* preferred_node_policy is not initialised early in boot */
170 if (pol->mode)
171 return pol;
Mel Gorman5606e382012-11-02 18:19:13 +0000172 }
173
Oleg Nesterovf15ca782014-10-09 15:27:43 -0700174 return &default_policy;
Mel Gorman5606e382012-11-02 18:19:13 +0000175}
176
David Rientjes37012942008-04-28 02:12:33 -0700177static const struct mempolicy_operations {
178 int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
Vlastimil Babka213980c2017-07-06 15:40:06 -0700179 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
David Rientjes37012942008-04-28 02:12:33 -0700180} mpol_ops[MPOL_MAX];
181
David Rientjesf5b087b2008-04-28 02:12:27 -0700182static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
183{
Bob Liu6d556292010-05-24 14:31:59 -0700184 return pol->flags & MPOL_MODE_FLAGS;
David Rientjes4c50bc02008-04-28 02:12:30 -0700185}
186
187static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
188 const nodemask_t *rel)
189{
190 nodemask_t tmp;
191 nodes_fold(tmp, *orig, nodes_weight(*rel));
192 nodes_onto(*ret, tmp, *rel);
David Rientjesf5b087b2008-04-28 02:12:27 -0700193}
194
Feng Tangbe897d42021-09-02 15:00:19 -0700195static int mpol_new_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
David Rientjes37012942008-04-28 02:12:33 -0700196{
197 if (nodes_empty(*nodes))
198 return -EINVAL;
Ben Widawsky269fbe72021-06-30 18:51:10 -0700199 pol->nodes = *nodes;
David Rientjes37012942008-04-28 02:12:33 -0700200 return 0;
201}
202
203static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
204{
Feng Tang7858d7b2021-06-30 18:51:00 -0700205 if (nodes_empty(*nodes))
206 return -EINVAL;
Ben Widawsky269fbe72021-06-30 18:51:10 -0700207
208 nodes_clear(pol->nodes);
209 node_set(first_node(*nodes), pol->nodes);
David Rientjes37012942008-04-28 02:12:33 -0700210 return 0;
211}
212
Miao Xie58568d22009-06-16 15:31:49 -0700213/*
214 * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
215 * any, for the new policy. mpol_new() has already validated the nodes
Feng Tang7858d7b2021-06-30 18:51:00 -0700216 * parameter with respect to the policy mode and flags.
Miao Xie58568d22009-06-16 15:31:49 -0700217 *
218 * Must be called holding task's alloc_lock to protect task's mems_allowed
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -0700219 * and mempolicy. May also be called holding the mmap_lock for write.
Miao Xie58568d22009-06-16 15:31:49 -0700220 */
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700221static int mpol_set_nodemask(struct mempolicy *pol,
222 const nodemask_t *nodes, struct nodemask_scratch *nsc)
Miao Xie58568d22009-06-16 15:31:49 -0700223{
Miao Xie58568d22009-06-16 15:31:49 -0700224 int ret;
225
Feng Tang7858d7b2021-06-30 18:51:00 -0700226 /*
227 * Default (pol==NULL) resp. local memory policies are not a
228 * subject of any remapping. They also do not need any special
229 * constructor.
230 */
231 if (!pol || pol->mode == MPOL_LOCAL)
Miao Xie58568d22009-06-16 15:31:49 -0700232 return 0;
Feng Tang7858d7b2021-06-30 18:51:00 -0700233
Lai Jiangshan01f13bd2012-12-12 13:51:33 -0800234 /* Check N_MEMORY */
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700235 nodes_and(nsc->mask1,
Lai Jiangshan01f13bd2012-12-12 13:51:33 -0800236 cpuset_current_mems_allowed, node_states[N_MEMORY]);
Miao Xie58568d22009-06-16 15:31:49 -0700237
238 VM_BUG_ON(!nodes);
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700239
Feng Tang7858d7b2021-06-30 18:51:00 -0700240 if (pol->flags & MPOL_F_RELATIVE_NODES)
241 mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700242 else
Feng Tang7858d7b2021-06-30 18:51:00 -0700243 nodes_and(nsc->mask2, *nodes, nsc->mask1);
244
245 if (mpol_store_user_nodemask(pol))
246 pol->w.user_nodemask = *nodes;
247 else
248 pol->w.cpuset_mems_allowed = cpuset_current_mems_allowed;
249
250 ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
Miao Xie58568d22009-06-16 15:31:49 -0700251 return ret;
252}
253
254/*
255 * This function just creates a new policy, does some check and simple
256 * initialization. You must invoke mpol_set_nodemask() to set nodes.
257 */
David Rientjes028fec42008-04-28 02:12:25 -0700258static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
259 nodemask_t *nodes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260{
261 struct mempolicy *policy;
262
David Rientjes028fec42008-04-28 02:12:25 -0700263 pr_debug("setting mode %d flags %d nodes[0] %lx\n",
David Rientjes00ef2d22013-02-22 16:35:36 -0800264 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
Paul Mundt140d5a42007-07-15 23:38:16 -0700265
David Rientjes3e1f06452008-04-28 02:12:34 -0700266 if (mode == MPOL_DEFAULT) {
267 if (nodes && !nodes_empty(*nodes))
David Rientjes37012942008-04-28 02:12:33 -0700268 return ERR_PTR(-EINVAL);
Lee Schermerhornd3a71032012-10-25 14:16:29 +0200269 return NULL;
David Rientjes37012942008-04-28 02:12:33 -0700270 }
David Rientjes3e1f06452008-04-28 02:12:34 -0700271 VM_BUG_ON(!nodes);
272
273 /*
274 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
275 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
276 * All other modes require a valid pointer to a non-empty nodemask.
277 */
278 if (mode == MPOL_PREFERRED) {
279 if (nodes_empty(*nodes)) {
280 if (((flags & MPOL_F_STATIC_NODES) ||
281 (flags & MPOL_F_RELATIVE_NODES)))
282 return ERR_PTR(-EINVAL);
Feng Tang7858d7b2021-06-30 18:51:00 -0700283
284 mode = MPOL_LOCAL;
David Rientjes3e1f06452008-04-28 02:12:34 -0700285 }
Peter Zijlstra479e2802012-10-25 14:16:28 +0200286 } else if (mode == MPOL_LOCAL) {
Piotr Kwapulinski8d303e42016-12-12 16:42:49 -0800287 if (!nodes_empty(*nodes) ||
288 (flags & MPOL_F_STATIC_NODES) ||
289 (flags & MPOL_F_RELATIVE_NODES))
Peter Zijlstra479e2802012-10-25 14:16:28 +0200290 return ERR_PTR(-EINVAL);
David Rientjes3e1f06452008-04-28 02:12:34 -0700291 } else if (nodes_empty(*nodes))
292 return ERR_PTR(-EINVAL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
294 if (!policy)
295 return ERR_PTR(-ENOMEM);
296 atomic_set(&policy->refcnt, 1);
Lee Schermerhorn45c47452008-04-28 02:13:12 -0700297 policy->mode = mode;
David Rientjes3e1f06452008-04-28 02:12:34 -0700298 policy->flags = flags;
David Rientjesf5b087b2008-04-28 02:12:27 -0700299
David Rientjes37012942008-04-28 02:12:33 -0700300 return policy;
301}
302
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -0700303/* Slow path of a mpol destructor. */
304void __mpol_put(struct mempolicy *p)
305{
306 if (!atomic_dec_and_test(&p->refcnt))
307 return;
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -0700308 kmem_cache_free(policy_cache, p);
309}
310
Vlastimil Babka213980c2017-07-06 15:40:06 -0700311static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
David Rientjes37012942008-04-28 02:12:33 -0700312{
313}
314
Vlastimil Babka213980c2017-07-06 15:40:06 -0700315static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
David Rientjes37012942008-04-28 02:12:33 -0700316{
317 nodemask_t tmp;
318
319 if (pol->flags & MPOL_F_STATIC_NODES)
320 nodes_and(tmp, pol->w.user_nodemask, *nodes);
321 else if (pol->flags & MPOL_F_RELATIVE_NODES)
322 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
323 else {
Ben Widawsky269fbe72021-06-30 18:51:10 -0700324 nodes_remap(tmp, pol->nodes, pol->w.cpuset_mems_allowed,
Vlastimil Babka213980c2017-07-06 15:40:06 -0700325 *nodes);
zhong jiang29b190f2019-06-28 12:06:43 -0700326 pol->w.cpuset_mems_allowed = *nodes;
David Rientjes37012942008-04-28 02:12:33 -0700327 }
328
Miao Xie708c1bb2010-05-24 14:32:07 -0700329 if (nodes_empty(tmp))
330 tmp = *nodes;
331
Ben Widawsky269fbe72021-06-30 18:51:10 -0700332 pol->nodes = tmp;
David Rientjes37012942008-04-28 02:12:33 -0700333}
334
335static void mpol_rebind_preferred(struct mempolicy *pol,
Vlastimil Babka213980c2017-07-06 15:40:06 -0700336 const nodemask_t *nodes)
David Rientjes37012942008-04-28 02:12:33 -0700337{
Feng Tang7858d7b2021-06-30 18:51:00 -0700338 pol->w.cpuset_mems_allowed = *nodes;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339}
340
Miao Xie708c1bb2010-05-24 14:32:07 -0700341/*
342 * mpol_rebind_policy - Migrate a policy to a different set of nodes
343 *
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -0700344 * Per-vma policies are protected by mmap_lock. Allocations using per-task
Vlastimil Babka213980c2017-07-06 15:40:06 -0700345 * policies are protected by task->mems_allowed_seq to prevent a premature
346 * OOM/allocation failure due to parallel nodemask modification.
Miao Xie708c1bb2010-05-24 14:32:07 -0700347 */
Vlastimil Babka213980c2017-07-06 15:40:06 -0700348static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
David Rientjes1d0d2682008-04-28 02:12:32 -0700349{
David Rientjes1d0d2682008-04-28 02:12:32 -0700350 if (!pol)
351 return;
Feng Tang7858d7b2021-06-30 18:51:00 -0700352 if (!mpol_store_user_nodemask(pol) &&
David Rientjes1d0d2682008-04-28 02:12:32 -0700353 nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
354 return;
Miao Xie708c1bb2010-05-24 14:32:07 -0700355
Vlastimil Babka213980c2017-07-06 15:40:06 -0700356 mpol_ops[pol->mode].rebind(pol, newmask);
David Rientjes1d0d2682008-04-28 02:12:32 -0700357}
358
359/*
360 * Wrapper for mpol_rebind_policy() that just requires task
361 * pointer, and updates task mempolicy.
Miao Xie58568d22009-06-16 15:31:49 -0700362 *
363 * Called with task's alloc_lock held.
David Rientjes1d0d2682008-04-28 02:12:32 -0700364 */
365
Vlastimil Babka213980c2017-07-06 15:40:06 -0700366void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
David Rientjes1d0d2682008-04-28 02:12:32 -0700367{
Vlastimil Babka213980c2017-07-06 15:40:06 -0700368 mpol_rebind_policy(tsk->mempolicy, new);
David Rientjes1d0d2682008-04-28 02:12:32 -0700369}
370
371/*
372 * Rebind each vma in mm to new nodemask.
373 *
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -0700374 * Call holding a reference to mm. Takes mm->mmap_lock during call.
David Rientjes1d0d2682008-04-28 02:12:32 -0700375 */
376
377void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
378{
379 struct vm_area_struct *vma;
380
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700381 mmap_write_lock(mm);
David Rientjes1d0d2682008-04-28 02:12:32 -0700382 for (vma = mm->mmap; vma; vma = vma->vm_next)
Vlastimil Babka213980c2017-07-06 15:40:06 -0700383 mpol_rebind_policy(vma->vm_policy, new);
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700384 mmap_write_unlock(mm);
David Rientjes1d0d2682008-04-28 02:12:32 -0700385}
386
David Rientjes37012942008-04-28 02:12:33 -0700387static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
388 [MPOL_DEFAULT] = {
389 .rebind = mpol_rebind_default,
390 },
391 [MPOL_INTERLEAVE] = {
Feng Tangbe897d42021-09-02 15:00:19 -0700392 .create = mpol_new_nodemask,
David Rientjes37012942008-04-28 02:12:33 -0700393 .rebind = mpol_rebind_nodemask,
394 },
395 [MPOL_PREFERRED] = {
396 .create = mpol_new_preferred,
397 .rebind = mpol_rebind_preferred,
398 },
399 [MPOL_BIND] = {
Feng Tangbe897d42021-09-02 15:00:19 -0700400 .create = mpol_new_nodemask,
David Rientjes37012942008-04-28 02:12:33 -0700401 .rebind = mpol_rebind_nodemask,
402 },
Feng Tang7858d7b2021-06-30 18:51:00 -0700403 [MPOL_LOCAL] = {
404 .rebind = mpol_rebind_default,
405 },
Dave Hansenb27abac2021-09-02 15:00:06 -0700406 [MPOL_PREFERRED_MANY] = {
Feng Tangbe897d42021-09-02 15:00:19 -0700407 .create = mpol_new_nodemask,
Dave Hansenb27abac2021-09-02 15:00:06 -0700408 .rebind = mpol_rebind_preferred,
409 },
David Rientjes37012942008-04-28 02:12:33 -0700410};
411
Yang Shia53190a2019-08-13 15:37:18 -0700412static int migrate_page_add(struct page *page, struct list_head *pagelist,
Christoph Lameterfc301282006-01-18 17:42:29 -0800413 unsigned long flags);
Christoph Lameter1a75a6c2006-01-08 01:01:02 -0800414
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800415struct queue_pages {
416 struct list_head *pagelist;
417 unsigned long flags;
418 nodemask_t *nmask;
Li Xinhaif18da662019-11-30 17:56:18 -0800419 unsigned long start;
420 unsigned long end;
421 struct vm_area_struct *first;
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800422};
423
Naoya Horiguchi98094942013-09-11 14:22:14 -0700424/*
Naoya Horiguchi88aaa2a2017-09-08 16:10:42 -0700425 * Check if the page's nid is in qp->nmask.
426 *
427 * If MPOL_MF_INVERT is set in qp->flags, check if the nid is
428 * in the invert of qp->nmask.
429 */
430static inline bool queue_pages_required(struct page *page,
431 struct queue_pages *qp)
432{
433 int nid = page_to_nid(page);
434 unsigned long flags = qp->flags;
435
436 return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
437}
438
Yang Shia7f40cf2019-03-28 20:43:55 -0700439/*
Yang Shid8835442019-08-13 15:37:15 -0700440 * queue_pages_pmd() has four possible return values:
Yang Shie5947d22021-06-30 18:51:07 -0700441 * 0 - pages are placed on the right node or queued successfully, or
442 * special page is met, i.e. huge zero page.
Yang Shid8835442019-08-13 15:37:15 -0700443 * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
444 * specified.
445 * 2 - THP was split.
446 * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an
447 * existing page was already on a node that does not follow the
448 * policy.
Yang Shia7f40cf2019-03-28 20:43:55 -0700449 */
Naoya Horiguchic8633792017-09-08 16:11:08 -0700450static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
451 unsigned long end, struct mm_walk *walk)
Jules Irenge959a7e12020-04-06 20:08:12 -0700452 __releases(ptl)
Naoya Horiguchic8633792017-09-08 16:11:08 -0700453{
454 int ret = 0;
455 struct page *page;
456 struct queue_pages *qp = walk->private;
457 unsigned long flags;
458
459 if (unlikely(is_pmd_migration_entry(*pmd))) {
Yang Shia7f40cf2019-03-28 20:43:55 -0700460 ret = -EIO;
Naoya Horiguchic8633792017-09-08 16:11:08 -0700461 goto unlock;
462 }
463 page = pmd_page(*pmd);
464 if (is_huge_zero_page(page)) {
465 spin_unlock(ptl);
Yang Shie5947d22021-06-30 18:51:07 -0700466 walk->action = ACTION_CONTINUE;
Naoya Horiguchic8633792017-09-08 16:11:08 -0700467 goto out;
468 }
Yang Shid8835442019-08-13 15:37:15 -0700469 if (!queue_pages_required(page, qp))
Naoya Horiguchic8633792017-09-08 16:11:08 -0700470 goto unlock;
Naoya Horiguchic8633792017-09-08 16:11:08 -0700471
Naoya Horiguchic8633792017-09-08 16:11:08 -0700472 flags = qp->flags;
473 /* go to thp migration */
Yang Shia7f40cf2019-03-28 20:43:55 -0700474 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
Yang Shia53190a2019-08-13 15:37:18 -0700475 if (!vma_migratable(walk->vma) ||
476 migrate_page_add(page, qp->pagelist, flags)) {
Yang Shid8835442019-08-13 15:37:15 -0700477 ret = 1;
Yang Shia7f40cf2019-03-28 20:43:55 -0700478 goto unlock;
479 }
Yang Shia7f40cf2019-03-28 20:43:55 -0700480 } else
481 ret = -EIO;
Naoya Horiguchic8633792017-09-08 16:11:08 -0700482unlock:
483 spin_unlock(ptl);
484out:
485 return ret;
486}
487
Naoya Horiguchi88aaa2a2017-09-08 16:10:42 -0700488/*
Naoya Horiguchi98094942013-09-11 14:22:14 -0700489 * Scan through pages checking if pages follow certain conditions,
490 * and move them to the pagelist if they do.
Yang Shid8835442019-08-13 15:37:15 -0700491 *
492 * queue_pages_pte_range() has three possible return values:
Yang Shie5947d22021-06-30 18:51:07 -0700493 * 0 - pages are placed on the right node or queued successfully, or
494 * special page is met, i.e. zero page.
Yang Shid8835442019-08-13 15:37:15 -0700495 * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
496 * specified.
497 * -EIO - only MPOL_MF_STRICT was specified and an existing page was already
498 * on a node that does not follow the policy.
Naoya Horiguchi98094942013-09-11 14:22:14 -0700499 */
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800500static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
501 unsigned long end, struct mm_walk *walk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502{
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800503 struct vm_area_struct *vma = walk->vma;
504 struct page *page;
505 struct queue_pages *qp = walk->private;
506 unsigned long flags = qp->flags;
Naoya Horiguchic8633792017-09-08 16:11:08 -0700507 int ret;
Yang Shid8835442019-08-13 15:37:15 -0700508 bool has_unmovable = false;
Shijie Luo3f088422020-11-01 17:07:40 -0800509 pte_t *pte, *mapped_pte;
Hugh Dickins705e87c2005-10-29 18:16:27 -0700510 spinlock_t *ptl;
Hugh Dickins941150a2005-06-21 17:15:06 -0700511
Naoya Horiguchic8633792017-09-08 16:11:08 -0700512 ptl = pmd_trans_huge_lock(pmd, vma);
513 if (ptl) {
514 ret = queue_pages_pmd(pmd, ptl, addr, end, walk);
Yang Shid8835442019-08-13 15:37:15 -0700515 if (ret != 2)
Yang Shia7f40cf2019-03-28 20:43:55 -0700516 return ret;
Kirill A. Shutemov248db922016-01-15 16:54:14 -0800517 }
Yang Shid8835442019-08-13 15:37:15 -0700518 /* THP was split, fall through to pte walk */
Hugh Dickins91612e02005-06-21 17:15:07 -0700519
Naoya Horiguchi337d9ab2016-07-26 15:24:03 -0700520 if (pmd_trans_unstable(pmd))
521 return 0;
Michal Hocko94723aa2018-04-10 16:30:07 -0700522
Shijie Luo3f088422020-11-01 17:07:40 -0800523 mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800524 for (; addr != end; pte++, addr += PAGE_SIZE) {
Hugh Dickins91612e02005-06-21 17:15:07 -0700525 if (!pte_present(*pte))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526 continue;
Linus Torvalds6aab3412005-11-28 14:34:23 -0800527 page = vm_normal_page(vma, addr, *pte);
528 if (!page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529 continue;
Nick Piggin053837f2006-01-18 17:42:27 -0800530 /*
Hugh Dickins62b61f62009-12-14 17:59:33 -0800531 * vm_normal_page() filters out zero pages, but there might
532 * still be PageReserved pages to skip, perhaps in a VDSO.
Nick Piggin053837f2006-01-18 17:42:27 -0800533 */
Hugh Dickinsb79bc0a2013-02-22 16:35:13 -0800534 if (PageReserved(page))
Christoph Lameterf4598c82006-01-12 01:05:20 -0800535 continue;
Naoya Horiguchi88aaa2a2017-09-08 16:10:42 -0700536 if (!queue_pages_required(page, qp))
Christoph Lameter38e35862006-01-08 01:01:01 -0800537 continue;
Yang Shia7f40cf2019-03-28 20:43:55 -0700538 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
Yang Shid8835442019-08-13 15:37:15 -0700539 /* MPOL_MF_STRICT must be specified if we get here */
540 if (!vma_migratable(vma)) {
541 has_unmovable = true;
Yang Shia7f40cf2019-03-28 20:43:55 -0700542 break;
Yang Shid8835442019-08-13 15:37:15 -0700543 }
Yang Shia53190a2019-08-13 15:37:18 -0700544
545 /*
546 * Do not abort immediately since there may be
547 * temporary off LRU pages in the range. Still
548 * need migrate other LRU pages.
549 */
550 if (migrate_page_add(page, qp->pagelist, flags))
551 has_unmovable = true;
Yang Shia7f40cf2019-03-28 20:43:55 -0700552 } else
553 break;
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800554 }
Shijie Luo3f088422020-11-01 17:07:40 -0800555 pte_unmap_unlock(mapped_pte, ptl);
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800556 cond_resched();
Yang Shid8835442019-08-13 15:37:15 -0700557
558 if (has_unmovable)
559 return 1;
560
Yang Shia7f40cf2019-03-28 20:43:55 -0700561 return addr != end ? -EIO : 0;
Hugh Dickins91612e02005-06-21 17:15:07 -0700562}
563
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800564static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
565 unsigned long addr, unsigned long end,
566 struct mm_walk *walk)
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700567{
Li Xinhaidcf17632020-04-01 21:10:48 -0700568 int ret = 0;
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700569#ifdef CONFIG_HUGETLB_PAGE
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800570 struct queue_pages *qp = walk->private;
Li Xinhaidcf17632020-04-01 21:10:48 -0700571 unsigned long flags = (qp->flags & MPOL_MF_VALID);
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700572 struct page *page;
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -0800573 spinlock_t *ptl;
Naoya Horiguchid4c54912014-06-06 10:00:01 -0400574 pte_t entry;
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700575
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800576 ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
577 entry = huge_ptep_get(pte);
Naoya Horiguchid4c54912014-06-06 10:00:01 -0400578 if (!pte_present(entry))
579 goto unlock;
580 page = pte_page(entry);
Naoya Horiguchi88aaa2a2017-09-08 16:10:42 -0700581 if (!queue_pages_required(page, qp))
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700582 goto unlock;
Li Xinhaidcf17632020-04-01 21:10:48 -0700583
584 if (flags == MPOL_MF_STRICT) {
585 /*
586 * STRICT alone means only detecting misplaced page and no
587 * need to further check other vma.
588 */
589 ret = -EIO;
590 goto unlock;
591 }
592
593 if (!vma_migratable(walk->vma)) {
594 /*
595 * Must be STRICT with MOVE*, otherwise .test_walk() have
596 * stopped walking current vma.
597 * Detecting misplaced page but allow migrating pages which
598 * have been queued.
599 */
600 ret = 1;
601 goto unlock;
602 }
603
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700604 /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
605 if (flags & (MPOL_MF_MOVE_ALL) ||
Li Xinhaidcf17632020-04-01 21:10:48 -0700606 (flags & MPOL_MF_MOVE && page_mapcount(page) == 1)) {
607 if (!isolate_huge_page(page, qp->pagelist) &&
608 (flags & MPOL_MF_STRICT))
609 /*
610 * Failed to isolate page but allow migrating pages
611 * which have been queued.
612 */
613 ret = 1;
614 }
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700615unlock:
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -0800616 spin_unlock(ptl);
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700617#else
618 BUG();
619#endif
Li Xinhaidcf17632020-04-01 21:10:48 -0700620 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621}
622
Aneesh Kumar K.V58772312013-12-06 00:08:22 +0530623#ifdef CONFIG_NUMA_BALANCING
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200624/*
Mel Gorman4b10e7d2012-10-25 14:16:32 +0200625 * This is used to mark a range of virtual addresses to be inaccessible.
626 * These are later cleared by a NUMA hinting fault. Depending on these
627 * faults, pages may be migrated for better NUMA placement.
628 *
629 * This is assuming that NUMA faults are handled using PROT_NONE. If
630 * an architecture makes a different choice, it will need further
631 * changes to the core.
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200632 */
Mel Gorman4b10e7d2012-10-25 14:16:32 +0200633unsigned long change_prot_numa(struct vm_area_struct *vma,
634 unsigned long addr, unsigned long end)
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200635{
Mel Gorman4b10e7d2012-10-25 14:16:32 +0200636 int nr_updated;
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200637
Peter Xu58705442020-04-06 20:05:45 -0700638 nr_updated = change_protection(vma, addr, end, PAGE_NONE, MM_CP_PROT_NUMA);
Mel Gorman03c5a6e2012-11-02 14:52:48 +0000639 if (nr_updated)
640 count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200641
Mel Gorman4b10e7d2012-10-25 14:16:32 +0200642 return nr_updated;
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200643}
644#else
645static unsigned long change_prot_numa(struct vm_area_struct *vma,
646 unsigned long addr, unsigned long end)
647{
648 return 0;
649}
Aneesh Kumar K.V58772312013-12-06 00:08:22 +0530650#endif /* CONFIG_NUMA_BALANCING */
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200651
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800652static int queue_pages_test_walk(unsigned long start, unsigned long end,
653 struct mm_walk *walk)
654{
655 struct vm_area_struct *vma = walk->vma;
656 struct queue_pages *qp = walk->private;
657 unsigned long endvma = vma->vm_end;
658 unsigned long flags = qp->flags;
659
Li Xinhaia18b3ac22019-11-30 17:56:15 -0800660 /* range check first */
Miaohe Lince331352021-02-24 12:09:47 -0800661 VM_BUG_ON_VMA(!range_in_vma(vma, start, end), vma);
Li Xinhaif18da662019-11-30 17:56:18 -0800662
663 if (!qp->first) {
664 qp->first = vma;
665 if (!(flags & MPOL_MF_DISCONTIG_OK) &&
666 (qp->start < vma->vm_start))
667 /* hole at head side of range */
Li Xinhaia18b3ac22019-11-30 17:56:15 -0800668 return -EFAULT;
669 }
Li Xinhaif18da662019-11-30 17:56:18 -0800670 if (!(flags & MPOL_MF_DISCONTIG_OK) &&
671 ((vma->vm_end < qp->end) &&
672 (!vma->vm_next || vma->vm_end < vma->vm_next->vm_start)))
673 /* hole at middle or tail of range */
674 return -EFAULT;
Li Xinhaia18b3ac22019-11-30 17:56:15 -0800675
Yang Shia7f40cf2019-03-28 20:43:55 -0700676 /*
677 * Need check MPOL_MF_STRICT to return -EIO if possible
678 * regardless of vma_migratable
679 */
680 if (!vma_migratable(vma) &&
681 !(flags & MPOL_MF_STRICT))
Naoya Horiguchi48684a62015-02-11 15:28:06 -0800682 return 1;
683
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800684 if (endvma > end)
685 endvma = end;
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800686
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800687 if (flags & MPOL_MF_LAZY) {
688 /* Similar to task_numa_work, skip inaccessible VMAs */
Anshuman Khandual3122e802020-04-06 20:03:47 -0700689 if (!is_vm_hugetlb_page(vma) && vma_is_accessible(vma) &&
Liang Chen4355c012016-03-15 14:56:42 -0700690 !(vma->vm_flags & VM_MIXEDMAP))
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800691 change_prot_numa(vma, start, endvma);
692 return 1;
693 }
694
Kirill A. Shutemov77bf45e2016-02-05 15:36:33 -0800695 /* queue pages from current vma */
Yang Shia7f40cf2019-03-28 20:43:55 -0700696 if (flags & MPOL_MF_VALID)
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800697 return 0;
698 return 1;
699}
700
Christoph Hellwig7b86ac32019-08-28 16:19:54 +0200701static const struct mm_walk_ops queue_pages_walk_ops = {
702 .hugetlb_entry = queue_pages_hugetlb,
703 .pmd_entry = queue_pages_pte_range,
704 .test_walk = queue_pages_test_walk,
705};
706
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800707/*
Naoya Horiguchi98094942013-09-11 14:22:14 -0700708 * Walk through page tables and collect pages to be migrated.
709 *
710 * If pages found in a given range are on a set of nodes (determined by
711 * @nodes and @flags,) it's isolated and queued to the pagelist which is
Yang Shid8835442019-08-13 15:37:15 -0700712 * passed via @private.
713 *
714 * queue_pages_range() has three possible return values:
715 * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were
716 * specified.
717 * 0 - queue pages successfully or no misplaced page.
Yang Shia85dfc32019-11-15 17:34:33 -0800718 * errno - i.e. misplaced pages with MPOL_MF_STRICT specified (-EIO) or
719 * memory range specified by nodemask and maxnode points outside
720 * your accessible address space (-EFAULT)
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800721 */
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -0700722static int
Naoya Horiguchi98094942013-09-11 14:22:14 -0700723queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800724 nodemask_t *nodes, unsigned long flags,
725 struct list_head *pagelist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726{
Li Xinhaif18da662019-11-30 17:56:18 -0800727 int err;
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800728 struct queue_pages qp = {
729 .pagelist = pagelist,
730 .flags = flags,
731 .nmask = nodes,
Li Xinhaif18da662019-11-30 17:56:18 -0800732 .start = start,
733 .end = end,
734 .first = NULL,
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800735 };
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736
Li Xinhaif18da662019-11-30 17:56:18 -0800737 err = walk_page_range(mm, start, end, &queue_pages_walk_ops, &qp);
738
739 if (!qp.first)
740 /* whole range in hole */
741 err = -EFAULT;
742
743 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744}
745
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700746/*
747 * Apply policy to a single VMA
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -0700748 * This must be called with the mmap_lock held for writing.
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700749 */
750static int vma_replace_policy(struct vm_area_struct *vma,
751 struct mempolicy *pol)
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700752{
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700753 int err;
754 struct mempolicy *old;
755 struct mempolicy *new;
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700756
757 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
758 vma->vm_start, vma->vm_end, vma->vm_pgoff,
759 vma->vm_ops, vma->vm_file,
760 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
761
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700762 new = mpol_dup(pol);
763 if (IS_ERR(new))
764 return PTR_ERR(new);
765
766 if (vma->vm_ops && vma->vm_ops->set_policy) {
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700767 err = vma->vm_ops->set_policy(vma, new);
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700768 if (err)
769 goto err_out;
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700770 }
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700771
772 old = vma->vm_policy;
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -0700773 vma->vm_policy = new; /* protected by mmap_lock */
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700774 mpol_put(old);
775
776 return 0;
777 err_out:
778 mpol_put(new);
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700779 return err;
780}
781
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782/* Step 2: apply policy to a range and do splits. */
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800783static int mbind_range(struct mm_struct *mm, unsigned long start,
784 unsigned long end, struct mempolicy *new_pol)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785{
786 struct vm_area_struct *next;
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800787 struct vm_area_struct *prev;
788 struct vm_area_struct *vma;
789 int err = 0;
KOSAKI Motohiroe26a5112011-12-28 15:57:11 -0800790 pgoff_t pgoff;
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800791 unsigned long vmstart;
792 unsigned long vmend;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793
Linus Torvalds097d5912012-03-06 18:23:36 -0800794 vma = find_vma(mm, start);
Li Xinhaif18da662019-11-30 17:56:18 -0800795 VM_BUG_ON(!vma);
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800796
Linus Torvalds097d5912012-03-06 18:23:36 -0800797 prev = vma->vm_prev;
KOSAKI Motohiroe26a5112011-12-28 15:57:11 -0800798 if (start > vma->vm_start)
799 prev = vma;
800
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800801 for (; vma && vma->vm_start < end; prev = vma, vma = next) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802 next = vma->vm_next;
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800803 vmstart = max(start, vma->vm_start);
804 vmend = min(end, vma->vm_end);
805
KOSAKI Motohiroe26a5112011-12-28 15:57:11 -0800806 if (mpol_equal(vma_policy(vma), new_pol))
807 continue;
808
809 pgoff = vma->vm_pgoff +
810 ((vmstart - vma->vm_start) >> PAGE_SHIFT);
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800811 prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
Andrea Arcangeli19a809a2015-09-04 15:46:24 -0700812 vma->anon_vma, vma->vm_file, pgoff,
Colin Cross9a100642022-01-14 14:05:59 -0800813 new_pol, vma->vm_userfaultfd_ctx,
814 vma_anon_name(vma));
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800815 if (prev) {
816 vma = prev;
817 next = vma->vm_next;
Oleg Nesterov3964acd2013-07-31 13:53:28 -0700818 if (mpol_equal(vma_policy(vma), new_pol))
819 continue;
820 /* vma_merge() joined vma && vma->next, case 8 */
821 goto replace;
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800822 }
823 if (vma->vm_start != vmstart) {
824 err = split_vma(vma->vm_mm, vma, vmstart, 1);
825 if (err)
826 goto out;
827 }
828 if (vma->vm_end != vmend) {
829 err = split_vma(vma->vm_mm, vma, vmend, 0);
830 if (err)
831 goto out;
832 }
Oleg Nesterov3964acd2013-07-31 13:53:28 -0700833 replace:
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700834 err = vma_replace_policy(vma, new_pol);
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700835 if (err)
836 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837 }
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800838
839 out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840 return err;
841}
842
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843/* Set the process memory policy */
David Rientjes028fec42008-04-28 02:12:25 -0700844static long do_set_mempolicy(unsigned short mode, unsigned short flags,
845 nodemask_t *nodes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846{
Miao Xie58568d22009-06-16 15:31:49 -0700847 struct mempolicy *new, *old;
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700848 NODEMASK_SCRATCH(scratch);
Miao Xie58568d22009-06-16 15:31:49 -0700849 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700851 if (!scratch)
852 return -ENOMEM;
Lee Schermerhornf4e53d92008-04-28 02:13:10 -0700853
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700854 new = mpol_new(mode, flags, nodes);
855 if (IS_ERR(new)) {
856 ret = PTR_ERR(new);
857 goto out;
858 }
Oleg Nesterov2c7c3a72014-10-09 15:27:55 -0700859
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700860 ret = mpol_set_nodemask(new, nodes, scratch);
Miao Xie58568d22009-06-16 15:31:49 -0700861 if (ret) {
Miao Xie58568d22009-06-16 15:31:49 -0700862 mpol_put(new);
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700863 goto out;
Miao Xie58568d22009-06-16 15:31:49 -0700864 }
Wei Yang78b132e2020-10-13 16:57:08 -0700865 task_lock(current);
Miao Xie58568d22009-06-16 15:31:49 -0700866 old = current->mempolicy;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867 current->mempolicy = new;
Vlastimil Babka45816682017-07-06 15:39:59 -0700868 if (new && new->mode == MPOL_INTERLEAVE)
869 current->il_prev = MAX_NUMNODES-1;
Miao Xie58568d22009-06-16 15:31:49 -0700870 task_unlock(current);
Miao Xie58568d22009-06-16 15:31:49 -0700871 mpol_put(old);
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700872 ret = 0;
873out:
874 NODEMASK_SCRATCH_FREE(scratch);
875 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700876}
877
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700878/*
879 * Return nodemask for policy for get_mempolicy() query
Miao Xie58568d22009-06-16 15:31:49 -0700880 *
881 * Called with task's alloc_lock held
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700882 */
883static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884{
Andi Kleendfcd3c02005-10-29 18:15:48 -0700885 nodes_clear(*nodes);
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700886 if (p == &default_policy)
887 return;
888
Lee Schermerhorn45c47452008-04-28 02:13:12 -0700889 switch (p->mode) {
Mel Gorman19770b32008-04-28 02:12:18 -0700890 case MPOL_BIND:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891 case MPOL_INTERLEAVE:
Ben Widawsky269fbe72021-06-30 18:51:10 -0700892 case MPOL_PREFERRED:
Dave Hansenb27abac2021-09-02 15:00:06 -0700893 case MPOL_PREFERRED_MANY:
Ben Widawsky269fbe72021-06-30 18:51:10 -0700894 *nodes = p->nodes;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895 break;
Feng Tang7858d7b2021-06-30 18:51:00 -0700896 case MPOL_LOCAL:
897 /* return empty node mask for local allocation */
898 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899 default:
900 BUG();
901 }
902}
903
Andrea Arcangeli3b9aadf2018-10-26 15:05:16 -0700904static int lookup_node(struct mm_struct *mm, unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905{
Peter Xuba841072020-04-07 21:40:09 -0400906 struct page *p = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907 int err;
908
Andrea Arcangeli3b9aadf2018-10-26 15:05:16 -0700909 int locked = 1;
910 err = get_user_pages_locked(addr & PAGE_MASK, 1, 0, &p, &locked);
Michal Hocko2d3a36a2020-06-03 16:03:25 -0700911 if (err > 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912 err = page_to_nid(p);
913 put_page(p);
914 }
Andrea Arcangeli3b9aadf2018-10-26 15:05:16 -0700915 if (locked)
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700916 mmap_read_unlock(mm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917 return err;
918}
919
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920/* Retrieve NUMA policy */
Adrian Bunkdbcb0f12007-10-16 01:26:26 -0700921static long do_get_mempolicy(int *policy, nodemask_t *nmask,
922 unsigned long addr, unsigned long flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923{
Christoph Lameter8bccd852005-10-29 18:16:59 -0700924 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700925 struct mm_struct *mm = current->mm;
926 struct vm_area_struct *vma = NULL;
Andrea Arcangeli3b9aadf2018-10-26 15:05:16 -0700927 struct mempolicy *pol = current->mempolicy, *pol_refcount = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928
Lee Schermerhorn754af6f2007-10-16 01:24:51 -0700929 if (flags &
930 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931 return -EINVAL;
Lee Schermerhorn754af6f2007-10-16 01:24:51 -0700932
933 if (flags & MPOL_F_MEMS_ALLOWED) {
934 if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
935 return -EINVAL;
936 *policy = 0; /* just so it's initialized */
Miao Xie58568d22009-06-16 15:31:49 -0700937 task_lock(current);
Lee Schermerhorn754af6f2007-10-16 01:24:51 -0700938 *nmask = cpuset_current_mems_allowed;
Miao Xie58568d22009-06-16 15:31:49 -0700939 task_unlock(current);
Lee Schermerhorn754af6f2007-10-16 01:24:51 -0700940 return 0;
941 }
942
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943 if (flags & MPOL_F_ADDR) {
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700944 /*
945 * Do NOT fall back to task policy if the
946 * vma/shared policy at addr is NULL. We
947 * want to return MPOL_DEFAULT in this case.
948 */
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700949 mmap_read_lock(mm);
Liam Howlett33e35752021-06-28 19:39:53 -0700950 vma = vma_lookup(mm, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951 if (!vma) {
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700952 mmap_read_unlock(mm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953 return -EFAULT;
954 }
955 if (vma->vm_ops && vma->vm_ops->get_policy)
956 pol = vma->vm_ops->get_policy(vma, addr);
957 else
958 pol = vma->vm_policy;
959 } else if (addr)
960 return -EINVAL;
961
962 if (!pol)
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700963 pol = &default_policy; /* indicates default behavior */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964
965 if (flags & MPOL_F_NODE) {
966 if (flags & MPOL_F_ADDR) {
Andrea Arcangeli3b9aadf2018-10-26 15:05:16 -0700967 /*
968 * Take a refcount on the mpol, lookup_node()
Lu Jialinbaf2f902021-05-06 18:06:50 -0700969 * will drop the mmap_lock, so after calling
Andrea Arcangeli3b9aadf2018-10-26 15:05:16 -0700970 * lookup_node() only "pol" remains valid, "vma"
971 * is stale.
972 */
973 pol_refcount = pol;
974 vma = NULL;
975 mpol_get(pol);
976 err = lookup_node(mm, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977 if (err < 0)
978 goto out;
Christoph Lameter8bccd852005-10-29 18:16:59 -0700979 *policy = err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980 } else if (pol == current->mempolicy &&
Lee Schermerhorn45c47452008-04-28 02:13:12 -0700981 pol->mode == MPOL_INTERLEAVE) {
Ben Widawsky269fbe72021-06-30 18:51:10 -0700982 *policy = next_node_in(current->il_prev, pol->nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983 } else {
984 err = -EINVAL;
985 goto out;
986 }
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700987 } else {
988 *policy = pol == &default_policy ? MPOL_DEFAULT :
989 pol->mode;
David Rientjesd79df632008-07-04 12:24:13 -0700990 /*
991 * Internal mempolicy flags must be masked off before exposing
992 * the policy to userspace.
993 */
994 *policy |= (pol->flags & MPOL_MODE_FLAGS);
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700995 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997 err = 0;
Miao Xie58568d22009-06-16 15:31:49 -0700998 if (nmask) {
Lee Schermerhornc6b6ef82010-03-23 13:35:41 -0700999 if (mpol_store_user_nodemask(pol)) {
1000 *nmask = pol->w.user_nodemask;
1001 } else {
1002 task_lock(current);
1003 get_policy_nodemask(pol, nmask);
1004 task_unlock(current);
1005 }
Miao Xie58568d22009-06-16 15:31:49 -07001006 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001007
1008 out:
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001009 mpol_cond_put(pol);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001010 if (vma)
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001011 mmap_read_unlock(mm);
Andrea Arcangeli3b9aadf2018-10-26 15:05:16 -07001012 if (pol_refcount)
1013 mpol_put(pol_refcount);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014 return err;
1015}
1016
Christoph Lameterb20a3502006-03-22 00:09:12 -08001017#ifdef CONFIG_MIGRATION
Christoph Lameter8bccd852005-10-29 18:16:59 -07001018/*
Naoya Horiguchic8633792017-09-08 16:11:08 -07001019 * page migration, thp tail pages can be passed.
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001020 */
Yang Shia53190a2019-08-13 15:37:18 -07001021static int migrate_page_add(struct page *page, struct list_head *pagelist,
Christoph Lameterfc301282006-01-18 17:42:29 -08001022 unsigned long flags)
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001023{
Naoya Horiguchic8633792017-09-08 16:11:08 -07001024 struct page *head = compound_head(page);
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001025 /*
Christoph Lameterfc301282006-01-18 17:42:29 -08001026 * Avoid migrating a page that is shared with others.
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001027 */
Naoya Horiguchic8633792017-09-08 16:11:08 -07001028 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) {
1029 if (!isolate_lru_page(head)) {
1030 list_add_tail(&head->lru, pagelist);
1031 mod_node_page_state(page_pgdat(head),
Huang Ying9de4f222020-04-06 20:04:41 -07001032 NR_ISOLATED_ANON + page_is_file_lru(head),
Matthew Wilcox (Oracle)6c357842020-08-14 17:30:37 -07001033 thp_nr_pages(head));
Yang Shia53190a2019-08-13 15:37:18 -07001034 } else if (flags & MPOL_MF_STRICT) {
1035 /*
1036 * Non-movable page may reach here. And, there may be
1037 * temporary off LRU pages or non-LRU movable pages.
1038 * Treat them as unmovable pages since they can't be
1039 * isolated, so they can't be moved at the moment. It
1040 * should return -EIO for this case too.
1041 */
1042 return -EIO;
Nick Piggin62695a82008-10-18 20:26:09 -07001043 }
1044 }
Yang Shia53190a2019-08-13 15:37:18 -07001045
1046 return 0;
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001047}
1048
1049/*
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001050 * Migrate pages from one node to a target node.
1051 * Returns error or the number of pages not migrated.
1052 */
Adrian Bunkdbcb0f12007-10-16 01:26:26 -07001053static int migrate_to_node(struct mm_struct *mm, int source, int dest,
1054 int flags)
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001055{
1056 nodemask_t nmask;
1057 LIST_HEAD(pagelist);
1058 int err = 0;
Joonsoo Kima0976312020-08-11 18:37:28 -07001059 struct migration_target_control mtc = {
1060 .nid = dest,
1061 .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
1062 };
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001063
1064 nodes_clear(nmask);
1065 node_set(source, nmask);
1066
Minchan Kim08270802012-10-08 16:33:38 -07001067 /*
1068 * This does not "check" the range but isolates all pages that
1069 * need migration. Between passing in the full user address
1070 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
1071 */
1072 VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
Naoya Horiguchi98094942013-09-11 14:22:14 -07001073 queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001074 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
1075
Minchan Kimcf608ac2010-10-26 14:21:29 -07001076 if (!list_empty(&pagelist)) {
Joonsoo Kima0976312020-08-11 18:37:28 -07001077 err = migrate_pages(&pagelist, alloc_migration_target, NULL,
Yang Shi5ac95882021-09-02 14:59:13 -07001078 (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL);
Minchan Kimcf608ac2010-10-26 14:21:29 -07001079 if (err)
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -07001080 putback_movable_pages(&pagelist);
Minchan Kimcf608ac2010-10-26 14:21:29 -07001081 }
Christoph Lameter95a402c2006-06-23 02:03:53 -07001082
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001083 return err;
1084}
1085
1086/*
1087 * Move pages between the two nodesets so as to preserve the physical
1088 * layout as much as possible.
Christoph Lameter39743882006-01-08 01:00:51 -08001089 *
1090 * Returns the number of page that could not be moved.
1091 */
Andrew Morton0ce72d42012-05-29 15:06:24 -07001092int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1093 const nodemask_t *to, int flags)
Christoph Lameter39743882006-01-08 01:00:51 -08001094{
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001095 int busy = 0;
Jan Stancekf555bef2021-01-12 15:49:21 -08001096 int err = 0;
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001097 nodemask_t tmp;
Christoph Lameter39743882006-01-08 01:00:51 -08001098
Minchan Kim361a2a22021-05-04 18:36:57 -07001099 lru_cache_disable();
Christoph Lameter0aedadf2008-11-06 12:53:30 -08001100
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001101 mmap_read_lock(mm);
Christoph Lameter39743882006-01-08 01:00:51 -08001102
KOSAKI Motohiroda0aa132010-03-05 13:41:59 -08001103 /*
1104 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
1105 * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
1106 * bit in 'tmp', and return that <source, dest> pair for migration.
1107 * The pair of nodemasks 'to' and 'from' define the map.
1108 *
1109 * If no pair of bits is found that way, fallback to picking some
1110 * pair of 'source' and 'dest' bits that are not the same. If the
1111 * 'source' and 'dest' bits are the same, this represents a node
1112 * that will be migrating to itself, so no pages need move.
1113 *
1114 * If no bits are left in 'tmp', or if all remaining bits left
1115 * in 'tmp' correspond to the same bit in 'to', return false
1116 * (nothing left to migrate).
1117 *
1118 * This lets us pick a pair of nodes to migrate between, such that
1119 * if possible the dest node is not already occupied by some other
1120 * source node, minimizing the risk of overloading the memory on a
1121 * node that would happen if we migrated incoming memory to a node
1122 * before migrating outgoing memory source that same node.
1123 *
1124 * A single scan of tmp is sufficient. As we go, we remember the
1125 * most recent <s, d> pair that moved (s != d). If we find a pair
1126 * that not only moved, but what's better, moved to an empty slot
1127 * (d is not set in tmp), then we break out then, with that pair.
Justin P. Mattockae0e47f2011-03-01 15:06:02 +01001128 * Otherwise when we finish scanning from_tmp, we at least have the
KOSAKI Motohiroda0aa132010-03-05 13:41:59 -08001129 * most recent <s, d> pair that moved. If we get all the way through
1130 * the scan of tmp without finding any node that moved, much less
1131 * moved to an empty node, then there is nothing left worth migrating.
1132 */
Christoph Lameterd4984712006-01-08 01:00:55 -08001133
Andrew Morton0ce72d42012-05-29 15:06:24 -07001134 tmp = *from;
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001135 while (!nodes_empty(tmp)) {
Zhiyuan Dai68d68ff2021-05-04 18:40:12 -07001136 int s, d;
Jianguo Wub76ac7e2013-11-12 15:07:39 -08001137 int source = NUMA_NO_NODE;
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001138 int dest = 0;
1139
1140 for_each_node_mask(s, tmp) {
Larry Woodman4a5b18c2012-05-29 15:06:24 -07001141
1142 /*
1143 * do_migrate_pages() tries to maintain the relative
1144 * node relationship of the pages established between
1145 * threads and memory areas.
1146 *
1147 * However if the number of source nodes is not equal to
1148 * the number of destination nodes we can not preserve
1149 * this node relative relationship. In that case, skip
1150 * copying memory from a node that is in the destination
1151 * mask.
1152 *
1153 * Example: [2,3,4] -> [3,4,5] moves everything.
1154 * [0-7] - > [3,4,5] moves only 0,1,2,6,7.
1155 */
1156
Andrew Morton0ce72d42012-05-29 15:06:24 -07001157 if ((nodes_weight(*from) != nodes_weight(*to)) &&
1158 (node_isset(s, *to)))
Larry Woodman4a5b18c2012-05-29 15:06:24 -07001159 continue;
1160
Andrew Morton0ce72d42012-05-29 15:06:24 -07001161 d = node_remap(s, *from, *to);
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001162 if (s == d)
1163 continue;
1164
1165 source = s; /* Node moved. Memorize */
1166 dest = d;
1167
1168 /* dest not in remaining from nodes? */
1169 if (!node_isset(dest, tmp))
1170 break;
1171 }
Jianguo Wub76ac7e2013-11-12 15:07:39 -08001172 if (source == NUMA_NO_NODE)
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001173 break;
1174
1175 node_clear(source, tmp);
1176 err = migrate_to_node(mm, source, dest, flags);
1177 if (err > 0)
1178 busy += err;
1179 if (err < 0)
1180 break;
Christoph Lameter39743882006-01-08 01:00:51 -08001181 }
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001182 mmap_read_unlock(mm);
Minchan Kimd479960e2021-05-04 18:36:54 -07001183
Minchan Kim361a2a22021-05-04 18:36:57 -07001184 lru_cache_enable();
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001185 if (err < 0)
1186 return err;
1187 return busy;
Christoph Lameterb20a3502006-03-22 00:09:12 -08001188
Christoph Lameter39743882006-01-08 01:00:51 -08001189}
1190
Lee Schermerhorn3ad33b242007-11-14 16:59:10 -08001191/*
1192 * Allocate a new page for page migration based on vma policy.
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -07001193 * Start by assuming the page is mapped by the same vma as contains @start.
Lee Schermerhorn3ad33b242007-11-14 16:59:10 -08001194 * Search forward from there, if not. N.B., this assumes that the
1195 * list of pages handed to migrate_pages()--which is how we get here--
1196 * is in virtual address order.
1197 */
Michal Hocko666feb22018-04-10 16:30:03 -07001198static struct page *new_page(struct page *page, unsigned long start)
Christoph Lameter95a402c2006-06-23 02:03:53 -07001199{
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -07001200 struct vm_area_struct *vma;
Kees Cook3f649ab2020-06-03 13:09:38 -07001201 unsigned long address;
Christoph Lameter95a402c2006-06-23 02:03:53 -07001202
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -07001203 vma = find_vma(current->mm, start);
Lee Schermerhorn3ad33b242007-11-14 16:59:10 -08001204 while (vma) {
1205 address = page_address_in_vma(page, vma);
1206 if (address != -EFAULT)
1207 break;
1208 vma = vma->vm_next;
1209 }
1210
Wanpeng Li11c731e2013-12-18 17:08:56 -08001211 if (PageHuge(page)) {
Michal Hocko389c8172018-01-31 16:21:03 -08001212 return alloc_huge_page_vma(page_hstate(compound_head(page)),
1213 vma, address);
Michal Hocko94723aa2018-04-10 16:30:07 -07001214 } else if (PageTransHuge(page)) {
Naoya Horiguchic8633792017-09-08 16:11:08 -07001215 struct page *thp;
1216
David Rientjes19deb762019-09-04 12:54:20 -07001217 thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address,
1218 HPAGE_PMD_ORDER);
Naoya Horiguchic8633792017-09-08 16:11:08 -07001219 if (!thp)
1220 return NULL;
1221 prep_transhuge_page(thp);
1222 return thp;
Wanpeng Li11c731e2013-12-18 17:08:56 -08001223 }
1224 /*
1225 * if !vma, alloc_page_vma() will use task or system default policy
1226 */
Michal Hocko0f556852017-07-12 14:36:58 -07001227 return alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL,
1228 vma, address);
Christoph Lameter95a402c2006-06-23 02:03:53 -07001229}
Christoph Lameterb20a3502006-03-22 00:09:12 -08001230#else
1231
Yang Shia53190a2019-08-13 15:37:18 -07001232static int migrate_page_add(struct page *page, struct list_head *pagelist,
Christoph Lameterb20a3502006-03-22 00:09:12 -08001233 unsigned long flags)
1234{
Yang Shia53190a2019-08-13 15:37:18 -07001235 return -EIO;
Christoph Lameterb20a3502006-03-22 00:09:12 -08001236}
1237
Andrew Morton0ce72d42012-05-29 15:06:24 -07001238int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1239 const nodemask_t *to, int flags)
Christoph Lameterb20a3502006-03-22 00:09:12 -08001240{
1241 return -ENOSYS;
1242}
Christoph Lameter95a402c2006-06-23 02:03:53 -07001243
Michal Hocko666feb22018-04-10 16:30:03 -07001244static struct page *new_page(struct page *page, unsigned long start)
Christoph Lameter95a402c2006-06-23 02:03:53 -07001245{
1246 return NULL;
1247}
Christoph Lameterb20a3502006-03-22 00:09:12 -08001248#endif
1249
Adrian Bunkdbcb0f12007-10-16 01:26:26 -07001250static long do_mbind(unsigned long start, unsigned long len,
David Rientjes028fec42008-04-28 02:12:25 -07001251 unsigned short mode, unsigned short mode_flags,
1252 nodemask_t *nmask, unsigned long flags)
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001253{
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001254 struct mm_struct *mm = current->mm;
1255 struct mempolicy *new;
1256 unsigned long end;
1257 int err;
Yang Shid8835442019-08-13 15:37:15 -07001258 int ret;
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001259 LIST_HEAD(pagelist);
1260
Lee Schermerhornb24f53a2012-10-25 14:16:32 +02001261 if (flags & ~(unsigned long)MPOL_MF_VALID)
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001262 return -EINVAL;
Christoph Lameter74c00242006-03-14 19:50:21 -08001263 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001264 return -EPERM;
1265
1266 if (start & ~PAGE_MASK)
1267 return -EINVAL;
1268
1269 if (mode == MPOL_DEFAULT)
1270 flags &= ~MPOL_MF_STRICT;
1271
1272 len = (len + PAGE_SIZE - 1) & PAGE_MASK;
1273 end = start + len;
1274
1275 if (end < start)
1276 return -EINVAL;
1277 if (end == start)
1278 return 0;
1279
David Rientjes028fec42008-04-28 02:12:25 -07001280 new = mpol_new(mode, mode_flags, nmask);
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001281 if (IS_ERR(new))
1282 return PTR_ERR(new);
1283
Lee Schermerhornb24f53a2012-10-25 14:16:32 +02001284 if (flags & MPOL_MF_LAZY)
1285 new->flags |= MPOL_F_MOF;
1286
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001287 /*
1288 * If we are using the default policy then operation
1289 * on discontinuous address spaces is okay after all
1290 */
1291 if (!new)
1292 flags |= MPOL_MF_DISCONTIG_OK;
1293
David Rientjes028fec42008-04-28 02:12:25 -07001294 pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1295 start, start + len, mode, mode_flags,
David Rientjes00ef2d22013-02-22 16:35:36 -08001296 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001297
Christoph Lameter0aedadf2008-11-06 12:53:30 -08001298 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1299
Minchan Kim361a2a22021-05-04 18:36:57 -07001300 lru_cache_disable();
Christoph Lameter0aedadf2008-11-06 12:53:30 -08001301 }
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07001302 {
1303 NODEMASK_SCRATCH(scratch);
1304 if (scratch) {
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001305 mmap_write_lock(mm);
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07001306 err = mpol_set_nodemask(new, nmask, scratch);
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07001307 if (err)
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001308 mmap_write_unlock(mm);
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07001309 } else
1310 err = -ENOMEM;
1311 NODEMASK_SCRATCH_FREE(scratch);
1312 }
KOSAKI Motohirob05ca732009-10-26 16:49:59 -07001313 if (err)
1314 goto mpol_out;
1315
Yang Shid8835442019-08-13 15:37:15 -07001316 ret = queue_pages_range(mm, start, end, nmask,
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001317 flags | MPOL_MF_INVERT, &pagelist);
Yang Shid8835442019-08-13 15:37:15 -07001318
1319 if (ret < 0) {
Yang Shia85dfc32019-11-15 17:34:33 -08001320 err = ret;
Yang Shid8835442019-08-13 15:37:15 -07001321 goto up_out;
1322 }
1323
1324 err = mbind_range(mm, start, end, new);
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001325
Lee Schermerhornb24f53a2012-10-25 14:16:32 +02001326 if (!err) {
1327 int nr_failed = 0;
1328
Minchan Kimcf608ac2010-10-26 14:21:29 -07001329 if (!list_empty(&pagelist)) {
Lee Schermerhornb24f53a2012-10-25 14:16:32 +02001330 WARN_ON_ONCE(flags & MPOL_MF_LAZY);
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -07001331 nr_failed = migrate_pages(&pagelist, new_page, NULL,
Yang Shi5ac95882021-09-02 14:59:13 -07001332 start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND, NULL);
Minchan Kimcf608ac2010-10-26 14:21:29 -07001333 if (nr_failed)
Naoya Horiguchi74060e42013-09-11 14:22:06 -07001334 putback_movable_pages(&pagelist);
Minchan Kimcf608ac2010-10-26 14:21:29 -07001335 }
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001336
Yang Shid8835442019-08-13 15:37:15 -07001337 if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT)))
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001338 err = -EIO;
Yang Shia85dfc32019-11-15 17:34:33 -08001339 } else {
Yang Shid8835442019-08-13 15:37:15 -07001340up_out:
Yang Shia85dfc32019-11-15 17:34:33 -08001341 if (!list_empty(&pagelist))
1342 putback_movable_pages(&pagelist);
1343 }
1344
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001345 mmap_write_unlock(mm);
Yang Shid8835442019-08-13 15:37:15 -07001346mpol_out:
Lee Schermerhornf0be3d32008-04-28 02:13:08 -07001347 mpol_put(new);
Minchan Kimd479960e2021-05-04 18:36:54 -07001348 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
Minchan Kim361a2a22021-05-04 18:36:57 -07001349 lru_cache_enable();
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001350 return err;
1351}
1352
Christoph Lameter39743882006-01-08 01:00:51 -08001353/*
Christoph Lameter8bccd852005-10-29 18:16:59 -07001354 * User space interface with variable sized bitmaps for nodelists.
1355 */
Arnd Bergmanne1302422021-09-08 15:18:21 -07001356static int get_bitmap(unsigned long *mask, const unsigned long __user *nmask,
1357 unsigned long maxnode)
1358{
1359 unsigned long nlongs = BITS_TO_LONGS(maxnode);
1360 int ret;
1361
1362 if (in_compat_syscall())
1363 ret = compat_get_bitmap(mask,
1364 (const compat_ulong_t __user *)nmask,
1365 maxnode);
1366 else
1367 ret = copy_from_user(mask, nmask,
1368 nlongs * sizeof(unsigned long));
1369
1370 if (ret)
1371 return -EFAULT;
1372
1373 if (maxnode % BITS_PER_LONG)
1374 mask[nlongs - 1] &= (1UL << (maxnode % BITS_PER_LONG)) - 1;
1375
1376 return 0;
1377}
Christoph Lameter8bccd852005-10-29 18:16:59 -07001378
1379/* Copy a node mask from user space. */
Christoph Lameter39743882006-01-08 01:00:51 -08001380static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
Christoph Lameter8bccd852005-10-29 18:16:59 -07001381 unsigned long maxnode)
1382{
Christoph Lameter8bccd852005-10-29 18:16:59 -07001383 --maxnode;
1384 nodes_clear(*nodes);
1385 if (maxnode == 0 || !nmask)
1386 return 0;
Andi Kleena9c930b2006-02-20 18:27:59 -08001387 if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
Chris Wright636f13c2006-02-17 13:59:36 -08001388 return -EINVAL;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001389
Yisheng Xie56521e72018-01-31 16:16:11 -08001390 /*
1391 * When the user specified more nodes than supported just check
Arnd Bergmanne1302422021-09-08 15:18:21 -07001392 * if the non supported part is all zero, one word at a time,
1393 * starting at the end.
Yisheng Xie56521e72018-01-31 16:16:11 -08001394 */
Arnd Bergmanne1302422021-09-08 15:18:21 -07001395 while (maxnode > MAX_NUMNODES) {
1396 unsigned long bits = min_t(unsigned long, maxnode, BITS_PER_LONG);
1397 unsigned long t;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001398
Arnd Bergmanne1302422021-09-08 15:18:21 -07001399 if (get_bitmap(&t, &nmask[maxnode / BITS_PER_LONG], bits))
Yisheng Xie56521e72018-01-31 16:16:11 -08001400 return -EFAULT;
Arnd Bergmanne1302422021-09-08 15:18:21 -07001401
1402 if (maxnode - bits >= MAX_NUMNODES) {
1403 maxnode -= bits;
1404 } else {
1405 maxnode = MAX_NUMNODES;
1406 t &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1);
1407 }
1408 if (t)
Yisheng Xie56521e72018-01-31 16:16:11 -08001409 return -EINVAL;
1410 }
1411
Arnd Bergmanne1302422021-09-08 15:18:21 -07001412 return get_bitmap(nodes_addr(*nodes), nmask, maxnode);
Christoph Lameter8bccd852005-10-29 18:16:59 -07001413}
1414
1415/* Copy a kernel node mask to user space */
1416static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1417 nodemask_t *nodes)
1418{
1419 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
Ralph Campbell050c17f2019-02-20 22:18:58 -08001420 unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long);
Arnd Bergmanne1302422021-09-08 15:18:21 -07001421 bool compat = in_compat_syscall();
1422
1423 if (compat)
1424 nbytes = BITS_TO_COMPAT_LONGS(nr_node_ids) * sizeof(compat_long_t);
Christoph Lameter8bccd852005-10-29 18:16:59 -07001425
1426 if (copy > nbytes) {
1427 if (copy > PAGE_SIZE)
1428 return -EINVAL;
1429 if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1430 return -EFAULT;
1431 copy = nbytes;
Arnd Bergmanne1302422021-09-08 15:18:21 -07001432 maxnode = nr_node_ids;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001433 }
Arnd Bergmanne1302422021-09-08 15:18:21 -07001434
1435 if (compat)
1436 return compat_put_bitmap((compat_ulong_t __user *)mask,
1437 nodes_addr(*nodes), maxnode);
1438
Christoph Lameter8bccd852005-10-29 18:16:59 -07001439 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1440}
1441
Feng Tang95837922021-06-30 18:51:03 -07001442/* Basic parameter sanity check used by both mbind() and set_mempolicy() */
1443static inline int sanitize_mpol_flags(int *mode, unsigned short *flags)
1444{
1445 *flags = *mode & MPOL_MODE_FLAGS;
1446 *mode &= ~MPOL_MODE_FLAGS;
Dave Hansenb27abac2021-09-02 15:00:06 -07001447
Ben Widawskya38a59f2021-09-02 15:00:16 -07001448 if ((unsigned int)(*mode) >= MPOL_MAX)
Feng Tang95837922021-06-30 18:51:03 -07001449 return -EINVAL;
1450 if ((*flags & MPOL_F_STATIC_NODES) && (*flags & MPOL_F_RELATIVE_NODES))
1451 return -EINVAL;
Eric Dumazet6d2aec92021-10-18 15:15:49 -07001452 if (*flags & MPOL_F_NUMA_BALANCING) {
1453 if (*mode != MPOL_BIND)
1454 return -EINVAL;
1455 *flags |= (MPOL_F_MOF | MPOL_F_MORON);
1456 }
Feng Tang95837922021-06-30 18:51:03 -07001457 return 0;
1458}
1459
Dominik Brodowskie7dc9ad62018-03-17 16:12:22 +01001460static long kernel_mbind(unsigned long start, unsigned long len,
1461 unsigned long mode, const unsigned long __user *nmask,
1462 unsigned long maxnode, unsigned int flags)
Christoph Lameter8bccd852005-10-29 18:16:59 -07001463{
David Rientjes028fec42008-04-28 02:12:25 -07001464 unsigned short mode_flags;
Feng Tang95837922021-06-30 18:51:03 -07001465 nodemask_t nodes;
1466 int lmode = mode;
1467 int err;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001468
Andrey Konovalov057d33892019-09-25 16:48:30 -07001469 start = untagged_addr(start);
Feng Tang95837922021-06-30 18:51:03 -07001470 err = sanitize_mpol_flags(&lmode, &mode_flags);
1471 if (err)
1472 return err;
1473
Christoph Lameter8bccd852005-10-29 18:16:59 -07001474 err = get_nodes(&nodes, nmask, maxnode);
1475 if (err)
1476 return err;
Feng Tang95837922021-06-30 18:51:03 -07001477
1478 return do_mbind(start, len, lmode, mode_flags, &nodes, flags);
Christoph Lameter8bccd852005-10-29 18:16:59 -07001479}
1480
Dominik Brodowskie7dc9ad62018-03-17 16:12:22 +01001481SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1482 unsigned long, mode, const unsigned long __user *, nmask,
1483 unsigned long, maxnode, unsigned int, flags)
1484{
1485 return kernel_mbind(start, len, mode, nmask, maxnode, flags);
1486}
1487
Christoph Lameter8bccd852005-10-29 18:16:59 -07001488/* Set the process memory policy */
Dominik Brodowskiaf03c4a2018-03-17 16:20:01 +01001489static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask,
1490 unsigned long maxnode)
Christoph Lameter8bccd852005-10-29 18:16:59 -07001491{
Feng Tang95837922021-06-30 18:51:03 -07001492 unsigned short mode_flags;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001493 nodemask_t nodes;
Feng Tang95837922021-06-30 18:51:03 -07001494 int lmode = mode;
1495 int err;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001496
Feng Tang95837922021-06-30 18:51:03 -07001497 err = sanitize_mpol_flags(&lmode, &mode_flags);
1498 if (err)
1499 return err;
1500
Christoph Lameter8bccd852005-10-29 18:16:59 -07001501 err = get_nodes(&nodes, nmask, maxnode);
1502 if (err)
1503 return err;
Feng Tang95837922021-06-30 18:51:03 -07001504
1505 return do_set_mempolicy(lmode, mode_flags, &nodes);
Christoph Lameter8bccd852005-10-29 18:16:59 -07001506}
1507
Dominik Brodowskiaf03c4a2018-03-17 16:20:01 +01001508SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
1509 unsigned long, maxnode)
1510{
1511 return kernel_set_mempolicy(mode, nmask, maxnode);
1512}
1513
Dominik Brodowskib6e9b0b2018-03-17 16:00:25 +01001514static int kernel_migrate_pages(pid_t pid, unsigned long maxnode,
1515 const unsigned long __user *old_nodes,
1516 const unsigned long __user *new_nodes)
Christoph Lameter39743882006-01-08 01:00:51 -08001517{
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001518 struct mm_struct *mm = NULL;
Christoph Lameter39743882006-01-08 01:00:51 -08001519 struct task_struct *task;
Christoph Lameter39743882006-01-08 01:00:51 -08001520 nodemask_t task_nodes;
1521 int err;
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001522 nodemask_t *old;
1523 nodemask_t *new;
1524 NODEMASK_SCRATCH(scratch);
Christoph Lameter39743882006-01-08 01:00:51 -08001525
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001526 if (!scratch)
1527 return -ENOMEM;
Christoph Lameter39743882006-01-08 01:00:51 -08001528
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001529 old = &scratch->mask1;
1530 new = &scratch->mask2;
1531
1532 err = get_nodes(old, old_nodes, maxnode);
Christoph Lameter39743882006-01-08 01:00:51 -08001533 if (err)
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001534 goto out;
1535
1536 err = get_nodes(new, new_nodes, maxnode);
1537 if (err)
1538 goto out;
Christoph Lameter39743882006-01-08 01:00:51 -08001539
1540 /* Find the mm_struct */
Zeng Zhaoming55cfaa32010-12-02 14:31:13 -08001541 rcu_read_lock();
Pavel Emelyanov228ebcb2007-10-18 23:40:16 -07001542 task = pid ? find_task_by_vpid(pid) : current;
Christoph Lameter39743882006-01-08 01:00:51 -08001543 if (!task) {
Zeng Zhaoming55cfaa32010-12-02 14:31:13 -08001544 rcu_read_unlock();
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001545 err = -ESRCH;
1546 goto out;
Christoph Lameter39743882006-01-08 01:00:51 -08001547 }
Christoph Lameter3268c632012-03-21 16:34:06 -07001548 get_task_struct(task);
Christoph Lameter39743882006-01-08 01:00:51 -08001549
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001550 err = -EINVAL;
Christoph Lameter39743882006-01-08 01:00:51 -08001551
1552 /*
Otto Ebeling31367462017-11-15 17:38:14 -08001553 * Check if this process has the right to modify the specified process.
1554 * Use the regular "ptrace_may_access()" checks.
Christoph Lameter39743882006-01-08 01:00:51 -08001555 */
Otto Ebeling31367462017-11-15 17:38:14 -08001556 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
David Howellsc69e8d92008-11-14 10:39:19 +11001557 rcu_read_unlock();
Christoph Lameter39743882006-01-08 01:00:51 -08001558 err = -EPERM;
Christoph Lameter3268c632012-03-21 16:34:06 -07001559 goto out_put;
Christoph Lameter39743882006-01-08 01:00:51 -08001560 }
David Howellsc69e8d92008-11-14 10:39:19 +11001561 rcu_read_unlock();
Christoph Lameter39743882006-01-08 01:00:51 -08001562
1563 task_nodes = cpuset_mems_allowed(task);
1564 /* Is the user allowed to access the target nodes? */
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001565 if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
Christoph Lameter39743882006-01-08 01:00:51 -08001566 err = -EPERM;
Christoph Lameter3268c632012-03-21 16:34:06 -07001567 goto out_put;
Christoph Lameter39743882006-01-08 01:00:51 -08001568 }
1569
Yisheng Xie0486a382018-01-31 16:16:15 -08001570 task_nodes = cpuset_mems_allowed(current);
1571 nodes_and(*new, *new, task_nodes);
1572 if (nodes_empty(*new))
Christoph Lameter3268c632012-03-21 16:34:06 -07001573 goto out_put;
Yisheng Xie0486a382018-01-31 16:16:15 -08001574
David Quigley86c3a762006-06-23 02:04:02 -07001575 err = security_task_movememory(task);
1576 if (err)
Christoph Lameter3268c632012-03-21 16:34:06 -07001577 goto out_put;
David Quigley86c3a762006-06-23 02:04:02 -07001578
Christoph Lameter3268c632012-03-21 16:34:06 -07001579 mm = get_task_mm(task);
1580 put_task_struct(task);
Sasha Levinf2a9ef82012-04-25 16:01:52 -07001581
1582 if (!mm) {
Christoph Lameter3268c632012-03-21 16:34:06 -07001583 err = -EINVAL;
Sasha Levinf2a9ef82012-04-25 16:01:52 -07001584 goto out;
1585 }
1586
1587 err = do_migrate_pages(mm, old, new,
1588 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
Christoph Lameter3268c632012-03-21 16:34:06 -07001589
1590 mmput(mm);
1591out:
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001592 NODEMASK_SCRATCH_FREE(scratch);
1593
Christoph Lameter39743882006-01-08 01:00:51 -08001594 return err;
Christoph Lameter3268c632012-03-21 16:34:06 -07001595
1596out_put:
1597 put_task_struct(task);
1598 goto out;
1599
Christoph Lameter39743882006-01-08 01:00:51 -08001600}
1601
Dominik Brodowskib6e9b0b2018-03-17 16:00:25 +01001602SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1603 const unsigned long __user *, old_nodes,
1604 const unsigned long __user *, new_nodes)
1605{
1606 return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes);
1607}
1608
Christoph Lameter39743882006-01-08 01:00:51 -08001609
Christoph Lameter8bccd852005-10-29 18:16:59 -07001610/* Retrieve NUMA policy */
Dominik Brodowskiaf03c4a2018-03-17 16:20:01 +01001611static int kernel_get_mempolicy(int __user *policy,
1612 unsigned long __user *nmask,
1613 unsigned long maxnode,
1614 unsigned long addr,
1615 unsigned long flags)
Christoph Lameter8bccd852005-10-29 18:16:59 -07001616{
Adrian Bunkdbcb0f12007-10-16 01:26:26 -07001617 int err;
Kees Cook3f649ab2020-06-03 13:09:38 -07001618 int pval;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001619 nodemask_t nodes;
1620
Ralph Campbell050c17f2019-02-20 22:18:58 -08001621 if (nmask != NULL && maxnode < nr_node_ids)
Christoph Lameter8bccd852005-10-29 18:16:59 -07001622 return -EINVAL;
1623
Wenchao Hao4605f052020-08-11 18:31:16 -07001624 addr = untagged_addr(addr);
1625
Christoph Lameter8bccd852005-10-29 18:16:59 -07001626 err = do_get_mempolicy(&pval, &nodes, addr, flags);
1627
1628 if (err)
1629 return err;
1630
1631 if (policy && put_user(pval, policy))
1632 return -EFAULT;
1633
1634 if (nmask)
1635 err = copy_nodes_to_user(nmask, maxnode, &nodes);
1636
1637 return err;
1638}
1639
Dominik Brodowskiaf03c4a2018-03-17 16:20:01 +01001640SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1641 unsigned long __user *, nmask, unsigned long, maxnode,
1642 unsigned long, addr, unsigned long, flags)
1643{
1644 return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags);
1645}
1646
Li Xinhai20ca87f2020-04-01 21:10:52 -07001647bool vma_migratable(struct vm_area_struct *vma)
1648{
1649 if (vma->vm_flags & (VM_IO | VM_PFNMAP))
1650 return false;
1651
1652 /*
1653 * DAX device mappings require predictable access latency, so avoid
1654 * incurring periodic faults.
1655 */
1656 if (vma_is_dax(vma))
1657 return false;
1658
1659 if (is_vm_hugetlb_page(vma) &&
1660 !hugepage_migration_supported(hstate_vma(vma)))
1661 return false;
1662
1663 /*
1664 * Migration allocates pages in the highest zone. If we cannot
1665 * do so then migration (at least from node to node) is not
1666 * possible.
1667 */
1668 if (vma->vm_file &&
1669 gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
1670 < policy_zone)
1671 return false;
1672 return true;
1673}
1674
Oleg Nesterov74d2c3a2014-10-09 15:27:50 -07001675struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
1676 unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001677{
Oleg Nesterov8d902742014-10-09 15:27:45 -07001678 struct mempolicy *pol = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001679
1680 if (vma) {
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001681 if (vma->vm_ops && vma->vm_ops->get_policy) {
Oleg Nesterov8d902742014-10-09 15:27:45 -07001682 pol = vma->vm_ops->get_policy(vma, addr);
Mel Gorman00442ad2012-10-08 16:29:20 -07001683 } else if (vma->vm_policy) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001684 pol = vma->vm_policy;
Mel Gorman00442ad2012-10-08 16:29:20 -07001685
1686 /*
1687 * shmem_alloc_page() passes MPOL_F_SHARED policy with
1688 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
1689 * count on these policies which will be dropped by
1690 * mpol_cond_put() later
1691 */
1692 if (mpol_needs_cond_ref(pol))
1693 mpol_get(pol);
1694 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001695 }
Oleg Nesterovf15ca782014-10-09 15:27:43 -07001696
Oleg Nesterov74d2c3a2014-10-09 15:27:50 -07001697 return pol;
1698}
1699
1700/*
Oleg Nesterovdd6eecb2014-10-09 15:27:57 -07001701 * get_vma_policy(@vma, @addr)
Oleg Nesterov74d2c3a2014-10-09 15:27:50 -07001702 * @vma: virtual memory area whose policy is sought
1703 * @addr: address in @vma for shared policy lookup
1704 *
1705 * Returns effective policy for a VMA at specified address.
Oleg Nesterovdd6eecb2014-10-09 15:27:57 -07001706 * Falls back to current->mempolicy or system default policy, as necessary.
Oleg Nesterov74d2c3a2014-10-09 15:27:50 -07001707 * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1708 * count--added by the get_policy() vm_op, as appropriate--to protect against
1709 * freeing by another task. It is the caller's responsibility to free the
1710 * extra reference for shared policies.
1711 */
David Rientjesac79f782019-09-04 12:54:18 -07001712static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
Oleg Nesterovdd6eecb2014-10-09 15:27:57 -07001713 unsigned long addr)
Oleg Nesterov74d2c3a2014-10-09 15:27:50 -07001714{
1715 struct mempolicy *pol = __get_vma_policy(vma, addr);
1716
Oleg Nesterov8d902742014-10-09 15:27:45 -07001717 if (!pol)
Oleg Nesterovdd6eecb2014-10-09 15:27:57 -07001718 pol = get_task_policy(current);
Oleg Nesterov8d902742014-10-09 15:27:45 -07001719
Linus Torvalds1da177e2005-04-16 15:20:36 -07001720 return pol;
1721}
1722
Oleg Nesterov6b6482b2014-10-09 15:27:48 -07001723bool vma_policy_mof(struct vm_area_struct *vma)
Mel Gormanfc3147242013-10-07 11:29:09 +01001724{
Oleg Nesterov6b6482b2014-10-09 15:27:48 -07001725 struct mempolicy *pol;
Oleg Nesterovf15ca782014-10-09 15:27:43 -07001726
Oleg Nesterov6b6482b2014-10-09 15:27:48 -07001727 if (vma->vm_ops && vma->vm_ops->get_policy) {
1728 bool ret = false;
Mel Gormanfc3147242013-10-07 11:29:09 +01001729
Oleg Nesterov6b6482b2014-10-09 15:27:48 -07001730 pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1731 if (pol && (pol->flags & MPOL_F_MOF))
1732 ret = true;
1733 mpol_cond_put(pol);
Mel Gormanfc3147242013-10-07 11:29:09 +01001734
Oleg Nesterov6b6482b2014-10-09 15:27:48 -07001735 return ret;
Mel Gormanfc3147242013-10-07 11:29:09 +01001736 }
1737
Oleg Nesterov6b6482b2014-10-09 15:27:48 -07001738 pol = vma->vm_policy;
Oleg Nesterov8d902742014-10-09 15:27:45 -07001739 if (!pol)
Oleg Nesterov6b6482b2014-10-09 15:27:48 -07001740 pol = get_task_policy(current);
Oleg Nesterov8d902742014-10-09 15:27:45 -07001741
Mel Gormanfc3147242013-10-07 11:29:09 +01001742 return pol->flags & MPOL_F_MOF;
1743}
1744
Lai Jiangshand3eb1572013-02-22 16:33:22 -08001745static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1746{
1747 enum zone_type dynamic_policy_zone = policy_zone;
1748
1749 BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1750
1751 /*
Ben Widawsky269fbe72021-06-30 18:51:10 -07001752 * if policy->nodes has movable memory only,
Lai Jiangshand3eb1572013-02-22 16:33:22 -08001753 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1754 *
Ben Widawsky269fbe72021-06-30 18:51:10 -07001755 * policy->nodes is intersect with node_states[N_MEMORY].
Ingo Molnarf0953a12021-05-06 18:06:47 -07001756 * so if the following test fails, it implies
Ben Widawsky269fbe72021-06-30 18:51:10 -07001757 * policy->nodes has movable memory only.
Lai Jiangshand3eb1572013-02-22 16:33:22 -08001758 */
Ben Widawsky269fbe72021-06-30 18:51:10 -07001759 if (!nodes_intersects(policy->nodes, node_states[N_HIGH_MEMORY]))
Lai Jiangshand3eb1572013-02-22 16:33:22 -08001760 dynamic_policy_zone = ZONE_MOVABLE;
1761
1762 return zone >= dynamic_policy_zone;
1763}
1764
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001765/*
1766 * Return a nodemask representing a mempolicy for filtering nodes for
1767 * page allocation
1768 */
Muchun Song8ca39e62020-08-11 18:30:32 -07001769nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
Mel Gorman19770b32008-04-28 02:12:18 -07001770{
Dave Hansenb27abac2021-09-02 15:00:06 -07001771 int mode = policy->mode;
1772
Mel Gorman19770b32008-04-28 02:12:18 -07001773 /* Lower zones don't get a nodemask applied for MPOL_BIND */
Dave Hansenb27abac2021-09-02 15:00:06 -07001774 if (unlikely(mode == MPOL_BIND) &&
1775 apply_policy_zone(policy, gfp_zone(gfp)) &&
1776 cpuset_nodemask_valid_mems_allowed(&policy->nodes))
1777 return &policy->nodes;
1778
1779 if (mode == MPOL_PREFERRED_MANY)
Ben Widawsky269fbe72021-06-30 18:51:10 -07001780 return &policy->nodes;
Mel Gorman19770b32008-04-28 02:12:18 -07001781
1782 return NULL;
1783}
1784
Dave Hansenb27abac2021-09-02 15:00:06 -07001785/*
1786 * Return the preferred node id for 'prefer' mempolicy, and return
1787 * the given id for all other policies.
1788 *
1789 * policy_node() is always coupled with policy_nodemask(), which
1790 * secures the nodemask limit for 'bind' and 'prefer-many' policy.
1791 */
Wei Yangf8fd5252020-10-13 16:57:11 -07001792static int policy_node(gfp_t gfp, struct mempolicy *policy, int nd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001793{
Feng Tang7858d7b2021-06-30 18:51:00 -07001794 if (policy->mode == MPOL_PREFERRED) {
Ben Widawsky269fbe72021-06-30 18:51:10 -07001795 nd = first_node(policy->nodes);
Feng Tang7858d7b2021-06-30 18:51:00 -07001796 } else {
Mel Gorman19770b32008-04-28 02:12:18 -07001797 /*
Michal Hocko6d840952016-12-12 16:42:23 -08001798 * __GFP_THISNODE shouldn't even be used with the bind policy
1799 * because we might easily break the expectation to stay on the
1800 * requested node and not break the policy.
Mel Gorman19770b32008-04-28 02:12:18 -07001801 */
Michal Hocko6d840952016-12-12 16:42:23 -08001802 WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001803 }
Michal Hocko6d840952016-12-12 16:42:23 -08001804
Vlastimil Babka04ec6262017-07-06 15:40:03 -07001805 return nd;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001806}
1807
1808/* Do dynamic interleaving for a process */
1809static unsigned interleave_nodes(struct mempolicy *policy)
1810{
Vlastimil Babka45816682017-07-06 15:39:59 -07001811 unsigned next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001812 struct task_struct *me = current;
1813
Ben Widawsky269fbe72021-06-30 18:51:10 -07001814 next = next_node_in(me->il_prev, policy->nodes);
David Rientjesf5b087b2008-04-28 02:12:27 -07001815 if (next < MAX_NUMNODES)
Vlastimil Babka45816682017-07-06 15:39:59 -07001816 me->il_prev = next;
1817 return next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001818}
1819
Christoph Lameterdc85da12006-01-18 17:42:36 -08001820/*
1821 * Depending on the memory policy provide a node from which to allocate the
1822 * next slab entry.
1823 */
David Rientjes2a389612014-04-07 15:37:29 -07001824unsigned int mempolicy_slab_node(void)
Christoph Lameterdc85da12006-01-18 17:42:36 -08001825{
Andi Kleene7b691b2012-06-09 02:40:03 -07001826 struct mempolicy *policy;
David Rientjes2a389612014-04-07 15:37:29 -07001827 int node = numa_mem_id();
Andi Kleene7b691b2012-06-09 02:40:03 -07001828
Vasily Averin38b031d2021-09-02 15:00:23 -07001829 if (!in_task())
David Rientjes2a389612014-04-07 15:37:29 -07001830 return node;
Andi Kleene7b691b2012-06-09 02:40:03 -07001831
1832 policy = current->mempolicy;
Feng Tang7858d7b2021-06-30 18:51:00 -07001833 if (!policy)
David Rientjes2a389612014-04-07 15:37:29 -07001834 return node;
Christoph Lameter765c4502006-09-27 01:50:08 -07001835
Lee Schermerhornbea904d2008-04-28 02:13:18 -07001836 switch (policy->mode) {
1837 case MPOL_PREFERRED:
Ben Widawsky269fbe72021-06-30 18:51:10 -07001838 return first_node(policy->nodes);
Lee Schermerhornbea904d2008-04-28 02:13:18 -07001839
Christoph Lameterdc85da12006-01-18 17:42:36 -08001840 case MPOL_INTERLEAVE:
1841 return interleave_nodes(policy);
1842
Dave Hansenb27abac2021-09-02 15:00:06 -07001843 case MPOL_BIND:
1844 case MPOL_PREFERRED_MANY:
1845 {
Mel Gormanc33d6c02016-05-19 17:14:10 -07001846 struct zoneref *z;
1847
Christoph Lameterdc85da12006-01-18 17:42:36 -08001848 /*
1849 * Follow bind policy behavior and start allocation at the
1850 * first node.
1851 */
Mel Gorman19770b32008-04-28 02:12:18 -07001852 struct zonelist *zonelist;
Mel Gorman19770b32008-04-28 02:12:18 -07001853 enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
Aneesh Kumar K.Vc9634cf2016-10-07 16:59:12 -07001854 zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
Mel Gormanc33d6c02016-05-19 17:14:10 -07001855 z = first_zones_zonelist(zonelist, highest_zoneidx,
Ben Widawsky269fbe72021-06-30 18:51:10 -07001856 &policy->nodes);
Pavel Tatashinc1093b72018-08-21 21:53:32 -07001857 return z->zone ? zone_to_nid(z->zone) : node;
Mel Gormandd1a2392008-04-28 02:12:17 -07001858 }
Feng Tang7858d7b2021-06-30 18:51:00 -07001859 case MPOL_LOCAL:
1860 return node;
Christoph Lameterdc85da12006-01-18 17:42:36 -08001861
Christoph Lameterdc85da12006-01-18 17:42:36 -08001862 default:
Lee Schermerhornbea904d2008-04-28 02:13:18 -07001863 BUG();
Christoph Lameterdc85da12006-01-18 17:42:36 -08001864 }
1865}
1866
Andrew Mortonfee83b32016-05-19 17:11:43 -07001867/*
1868 * Do static interleaving for a VMA with known offset @n. Returns the n'th
Ben Widawsky269fbe72021-06-30 18:51:10 -07001869 * node in pol->nodes (starting from n=0), wrapping around if n exceeds the
Andrew Mortonfee83b32016-05-19 17:11:43 -07001870 * number of present nodes.
1871 */
Laurent Dufour98c70ba2017-09-08 16:12:39 -07001872static unsigned offset_il_node(struct mempolicy *pol, unsigned long n)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001873{
yanghui276aeee2021-09-08 18:10:20 -07001874 nodemask_t nodemask = pol->nodes;
1875 unsigned int target, nnodes;
Andrew Mortonfee83b32016-05-19 17:11:43 -07001876 int i;
1877 int nid;
yanghui276aeee2021-09-08 18:10:20 -07001878 /*
1879 * The barrier will stabilize the nodemask in a register or on
1880 * the stack so that it will stop changing under the code.
1881 *
1882 * Between first_node() and next_node(), pol->nodes could be changed
1883 * by other threads. So we put pol->nodes in a local stack.
1884 */
1885 barrier();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001886
yanghui276aeee2021-09-08 18:10:20 -07001887 nnodes = nodes_weight(nodemask);
David Rientjesf5b087b2008-04-28 02:12:27 -07001888 if (!nnodes)
1889 return numa_node_id();
Andrew Mortonfee83b32016-05-19 17:11:43 -07001890 target = (unsigned int)n % nnodes;
yanghui276aeee2021-09-08 18:10:20 -07001891 nid = first_node(nodemask);
Andrew Mortonfee83b32016-05-19 17:11:43 -07001892 for (i = 0; i < target; i++)
yanghui276aeee2021-09-08 18:10:20 -07001893 nid = next_node(nid, nodemask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001894 return nid;
1895}
1896
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001897/* Determine a node number for interleave */
1898static inline unsigned interleave_nid(struct mempolicy *pol,
1899 struct vm_area_struct *vma, unsigned long addr, int shift)
1900{
1901 if (vma) {
1902 unsigned long off;
1903
Nishanth Aravamudan3b98b082006-08-31 21:27:53 -07001904 /*
1905 * for small pages, there is no difference between
1906 * shift and PAGE_SHIFT, so the bit-shift is safe.
1907 * for huge pages, since vm_pgoff is in units of small
1908 * pages, we need to shift off the always 0 bits to get
1909 * a useful offset.
1910 */
1911 BUG_ON(shift < PAGE_SHIFT);
1912 off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001913 off += (addr - vma->vm_start) >> shift;
Laurent Dufour98c70ba2017-09-08 16:12:39 -07001914 return offset_il_node(pol, off);
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001915 } else
1916 return interleave_nodes(pol);
1917}
1918
Chen, Kenneth W00ac59a2006-02-03 21:51:14 +01001919#ifdef CONFIG_HUGETLBFS
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001920/*
Vlastimil Babka04ec6262017-07-06 15:40:03 -07001921 * huge_node(@vma, @addr, @gfp_flags, @mpol)
Fabian Frederickb46e14a2014-06-04 16:08:18 -07001922 * @vma: virtual memory area whose policy is sought
1923 * @addr: address in @vma for shared policy lookup and interleave policy
1924 * @gfp_flags: for requested zone
1925 * @mpol: pointer to mempolicy pointer for reference counted mempolicy
Dave Hansenb27abac2021-09-02 15:00:06 -07001926 * @nodemask: pointer to nodemask pointer for 'bind' and 'prefer-many' policy
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001927 *
Vlastimil Babka04ec6262017-07-06 15:40:03 -07001928 * Returns a nid suitable for a huge page allocation and a pointer
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001929 * to the struct mempolicy for conditional unref after allocation.
Dave Hansenb27abac2021-09-02 15:00:06 -07001930 * If the effective policy is 'bind' or 'prefer-many', returns a pointer
1931 * to the mempolicy's @nodemask for filtering the zonelist.
Miao Xiec0ff7452010-05-24 14:32:08 -07001932 *
Mel Gormand26914d2014-04-03 14:47:24 -07001933 * Must be protected by read_mems_allowed_begin()
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001934 */
Vlastimil Babka04ec6262017-07-06 15:40:03 -07001935int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags,
1936 struct mempolicy **mpol, nodemask_t **nodemask)
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001937{
Vlastimil Babka04ec6262017-07-06 15:40:03 -07001938 int nid;
Dave Hansenb27abac2021-09-02 15:00:06 -07001939 int mode;
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001940
Oleg Nesterovdd6eecb2014-10-09 15:27:57 -07001941 *mpol = get_vma_policy(vma, addr);
Dave Hansenb27abac2021-09-02 15:00:06 -07001942 *nodemask = NULL;
1943 mode = (*mpol)->mode;
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001944
Dave Hansenb27abac2021-09-02 15:00:06 -07001945 if (unlikely(mode == MPOL_INTERLEAVE)) {
Vlastimil Babka04ec6262017-07-06 15:40:03 -07001946 nid = interleave_nid(*mpol, vma, addr,
1947 huge_page_shift(hstate_vma(vma)));
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001948 } else {
Vlastimil Babka04ec6262017-07-06 15:40:03 -07001949 nid = policy_node(gfp_flags, *mpol, numa_node_id());
Dave Hansenb27abac2021-09-02 15:00:06 -07001950 if (mode == MPOL_BIND || mode == MPOL_PREFERRED_MANY)
Ben Widawsky269fbe72021-06-30 18:51:10 -07001951 *nodemask = &(*mpol)->nodes;
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001952 }
Vlastimil Babka04ec6262017-07-06 15:40:03 -07001953 return nid;
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001954}
Lee Schermerhorn06808b02009-12-14 17:58:21 -08001955
1956/*
1957 * init_nodemask_of_mempolicy
1958 *
1959 * If the current task's mempolicy is "default" [NULL], return 'false'
1960 * to indicate default policy. Otherwise, extract the policy nodemask
1961 * for 'bind' or 'interleave' policy into the argument nodemask, or
1962 * initialize the argument nodemask to contain the single node for
1963 * 'preferred' or 'local' policy and return 'true' to indicate presence
1964 * of non-default mempolicy.
1965 *
1966 * We don't bother with reference counting the mempolicy [mpol_get/put]
1967 * because the current task is examining it's own mempolicy and a task's
1968 * mempolicy is only ever changed by the task itself.
1969 *
1970 * N.B., it is the caller's responsibility to free a returned nodemask.
1971 */
1972bool init_nodemask_of_mempolicy(nodemask_t *mask)
1973{
1974 struct mempolicy *mempolicy;
Lee Schermerhorn06808b02009-12-14 17:58:21 -08001975
1976 if (!(mask && current->mempolicy))
1977 return false;
1978
Miao Xiec0ff7452010-05-24 14:32:08 -07001979 task_lock(current);
Lee Schermerhorn06808b02009-12-14 17:58:21 -08001980 mempolicy = current->mempolicy;
1981 switch (mempolicy->mode) {
1982 case MPOL_PREFERRED:
Dave Hansenb27abac2021-09-02 15:00:06 -07001983 case MPOL_PREFERRED_MANY:
Lee Schermerhorn06808b02009-12-14 17:58:21 -08001984 case MPOL_BIND:
Lee Schermerhorn06808b02009-12-14 17:58:21 -08001985 case MPOL_INTERLEAVE:
Ben Widawsky269fbe72021-06-30 18:51:10 -07001986 *mask = mempolicy->nodes;
Feng Tang7858d7b2021-06-30 18:51:00 -07001987 break;
1988
1989 case MPOL_LOCAL:
Ben Widawsky269fbe72021-06-30 18:51:10 -07001990 init_nodemask_of_node(mask, numa_node_id());
Lee Schermerhorn06808b02009-12-14 17:58:21 -08001991 break;
1992
1993 default:
1994 BUG();
1995 }
Miao Xiec0ff7452010-05-24 14:32:08 -07001996 task_unlock(current);
Lee Schermerhorn06808b02009-12-14 17:58:21 -08001997
1998 return true;
1999}
Chen, Kenneth W00ac59a2006-02-03 21:51:14 +01002000#endif
Christoph Lameter5da7ca82006-01-06 00:10:46 -08002001
David Rientjes6f48d0eb2010-08-09 17:18:52 -07002002/*
Feng Tangb26e5172021-06-30 18:50:56 -07002003 * mempolicy_in_oom_domain
David Rientjes6f48d0eb2010-08-09 17:18:52 -07002004 *
Feng Tangb26e5172021-06-30 18:50:56 -07002005 * If tsk's mempolicy is "bind", check for intersection between mask and
2006 * the policy nodemask. Otherwise, return true for all other policies
2007 * including "interleave", as a tsk with "interleave" policy may have
2008 * memory allocated from all nodes in system.
David Rientjes6f48d0eb2010-08-09 17:18:52 -07002009 *
2010 * Takes task_lock(tsk) to prevent freeing of its mempolicy.
2011 */
Feng Tangb26e5172021-06-30 18:50:56 -07002012bool mempolicy_in_oom_domain(struct task_struct *tsk,
David Rientjes6f48d0eb2010-08-09 17:18:52 -07002013 const nodemask_t *mask)
2014{
2015 struct mempolicy *mempolicy;
2016 bool ret = true;
2017
2018 if (!mask)
2019 return ret;
Feng Tangb26e5172021-06-30 18:50:56 -07002020
David Rientjes6f48d0eb2010-08-09 17:18:52 -07002021 task_lock(tsk);
2022 mempolicy = tsk->mempolicy;
Feng Tangb26e5172021-06-30 18:50:56 -07002023 if (mempolicy && mempolicy->mode == MPOL_BIND)
Ben Widawsky269fbe72021-06-30 18:51:10 -07002024 ret = nodes_intersects(mempolicy->nodes, *mask);
David Rientjes6f48d0eb2010-08-09 17:18:52 -07002025 task_unlock(tsk);
Feng Tangb26e5172021-06-30 18:50:56 -07002026
David Rientjes6f48d0eb2010-08-09 17:18:52 -07002027 return ret;
2028}
2029
Linus Torvalds1da177e2005-04-16 15:20:36 -07002030/* Allocate a page in interleaved policy.
2031 Own path because it needs to do special accounting. */
Andi Kleen662f3a02005-10-29 18:15:49 -07002032static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
2033 unsigned nid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002034{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002035 struct page *page;
2036
Matthew Wilcox (Oracle)84172f42021-04-29 23:01:15 -07002037 page = __alloc_pages(gfp, order, nid, NULL);
Kemi Wang45180852017-11-15 17:38:22 -08002038 /* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */
2039 if (!static_branch_likely(&vm_numa_stat_key))
2040 return page;
Andrey Ryabininde55c8b2017-10-13 15:57:43 -07002041 if (page && page_to_nid(page) == nid) {
2042 preempt_disable();
Mel Gormanf19298b2021-06-28 19:41:44 -07002043 __count_numa_event(page_zone(page), NUMA_INTERLEAVE_HIT);
Andrey Ryabininde55c8b2017-10-13 15:57:43 -07002044 preempt_enable();
2045 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002046 return page;
2047}
2048
Feng Tang4c54d942021-09-02 15:00:10 -07002049static struct page *alloc_pages_preferred_many(gfp_t gfp, unsigned int order,
2050 int nid, struct mempolicy *pol)
2051{
2052 struct page *page;
2053 gfp_t preferred_gfp;
2054
2055 /*
2056 * This is a two pass approach. The first pass will only try the
2057 * preferred nodes but skip the direct reclaim and allow the
2058 * allocation to fail, while the second pass will try all the
2059 * nodes in system.
2060 */
2061 preferred_gfp = gfp | __GFP_NOWARN;
2062 preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
2063 page = __alloc_pages(preferred_gfp, order, nid, &pol->nodes);
2064 if (!page)
Aneesh Kumar K.Vc0455112022-01-14 14:08:14 -08002065 page = __alloc_pages(gfp, order, nid, NULL);
Feng Tang4c54d942021-09-02 15:00:10 -07002066
2067 return page;
2068}
2069
Linus Torvalds1da177e2005-04-16 15:20:36 -07002070/**
Matthew Wilcox (Oracle)eb350732021-04-29 23:01:24 -07002071 * alloc_pages_vma - Allocate a page for a VMA.
2072 * @gfp: GFP flags.
2073 * @order: Order of the GFP allocation.
2074 * @vma: Pointer to VMA or NULL if not available.
2075 * @addr: Virtual address of the allocation. Must be inside @vma.
2076 * @node: Which node to prefer for allocation (modulo policy).
2077 * @hugepage: For hugepages try only the preferred node if possible.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002078 *
Matthew Wilcox (Oracle)eb350732021-04-29 23:01:24 -07002079 * Allocate a page for a specific address in @vma, using the appropriate
2080 * NUMA policy. When @vma is not NULL the caller must hold the mmap_lock
2081 * of the mm_struct of the VMA to prevent it from going away. Should be
2082 * used for all allocations for pages that will be mapped into user space.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002083 *
Matthew Wilcox (Oracle)eb350732021-04-29 23:01:24 -07002084 * Return: The page on success or NULL if allocation fails.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002085 */
Matthew Wilcox (Oracle)eb350732021-04-29 23:01:24 -07002086struct page *alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
Michal Hockobe1a13e2022-01-14 14:07:27 -08002087 unsigned long addr, bool hugepage)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002088{
Mel Gormancc9a6c82012-03-21 16:34:11 -07002089 struct mempolicy *pol;
Michal Hockobe1a13e2022-01-14 14:07:27 -08002090 int node = numa_node_id();
Miao Xiec0ff7452010-05-24 14:32:08 -07002091 struct page *page;
Vlastimil Babka04ec6262017-07-06 15:40:03 -07002092 int preferred_nid;
Vlastimil Babkabe97a412015-02-11 15:27:15 -08002093 nodemask_t *nmask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002094
Oleg Nesterovdd6eecb2014-10-09 15:27:57 -07002095 pol = get_vma_policy(vma, addr);
Mel Gormancc9a6c82012-03-21 16:34:11 -07002096
Vlastimil Babkabe97a412015-02-11 15:27:15 -08002097 if (pol->mode == MPOL_INTERLEAVE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002098 unsigned nid;
Christoph Lameter5da7ca82006-01-06 00:10:46 -08002099
Andi Kleen8eac5632011-02-25 14:44:28 -08002100 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07002101 mpol_cond_put(pol);
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002102 page = alloc_page_interleave(gfp, order, nid);
Vlastimil Babkabe97a412015-02-11 15:27:15 -08002103 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002104 }
Vlastimil Babkabe97a412015-02-11 15:27:15 -08002105
Feng Tang4c54d942021-09-02 15:00:10 -07002106 if (pol->mode == MPOL_PREFERRED_MANY) {
Aneesh Kumar K.Vc0455112022-01-14 14:08:14 -08002107 node = policy_node(gfp, pol, node);
Feng Tang4c54d942021-09-02 15:00:10 -07002108 page = alloc_pages_preferred_many(gfp, order, node, pol);
2109 mpol_cond_put(pol);
2110 goto out;
2111 }
2112
David Rientjes19deb762019-09-04 12:54:20 -07002113 if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
2114 int hpage_node = node;
2115
2116 /*
2117 * For hugepage allocation and non-interleave policy which
2118 * allows the current node (or other explicitly preferred
2119 * node) we only try to allocate from the current/preferred
2120 * node and don't fall back to other nodes, as the cost of
2121 * remote accesses would likely offset THP benefits.
2122 *
Dave Hansenb27abac2021-09-02 15:00:06 -07002123 * If the policy is interleave or does not allow the current
David Rientjes19deb762019-09-04 12:54:20 -07002124 * node in its nodemask, we allocate the standard way.
2125 */
Feng Tang7858d7b2021-06-30 18:51:00 -07002126 if (pol->mode == MPOL_PREFERRED)
Ben Widawsky269fbe72021-06-30 18:51:10 -07002127 hpage_node = first_node(pol->nodes);
David Rientjes19deb762019-09-04 12:54:20 -07002128
2129 nmask = policy_nodemask(gfp, pol);
2130 if (!nmask || node_isset(hpage_node, *nmask)) {
2131 mpol_cond_put(pol);
Vlastimil Babkacc638f32020-01-13 16:29:04 -08002132 /*
2133 * First, try to allocate THP only on local node, but
2134 * don't reclaim unnecessarily, just compact.
2135 */
David Rientjes19deb762019-09-04 12:54:20 -07002136 page = __alloc_pages_node(hpage_node,
Vlastimil Babkacc638f32020-01-13 16:29:04 -08002137 gfp | __GFP_THISNODE | __GFP_NORETRY, order);
David Rientjes76e654c2019-09-04 12:54:25 -07002138
2139 /*
2140 * If hugepage allocations are configured to always
2141 * synchronous compact or the vma has been madvised
2142 * to prefer hugepage backing, retry allowing remote
Vlastimil Babkacc638f32020-01-13 16:29:04 -08002143 * memory with both reclaim and compact as well.
David Rientjes76e654c2019-09-04 12:54:25 -07002144 */
2145 if (!page && (gfp & __GFP_DIRECT_RECLAIM))
Andrey Ryabinin33863532021-12-24 21:12:35 -08002146 page = __alloc_pages(gfp, order, hpage_node, nmask);
David Rientjes76e654c2019-09-04 12:54:25 -07002147
David Rientjes19deb762019-09-04 12:54:20 -07002148 goto out;
2149 }
2150 }
2151
Vlastimil Babkabe97a412015-02-11 15:27:15 -08002152 nmask = policy_nodemask(gfp, pol);
Vlastimil Babka04ec6262017-07-06 15:40:03 -07002153 preferred_nid = policy_node(gfp, pol, node);
Matthew Wilcox (Oracle)84172f42021-04-29 23:01:15 -07002154 page = __alloc_pages(gfp, order, preferred_nid, nmask);
Vlastimil Babkad51e9892017-01-24 15:18:18 -08002155 mpol_cond_put(pol);
Vlastimil Babkabe97a412015-02-11 15:27:15 -08002156out:
Miao Xiec0ff7452010-05-24 14:32:08 -07002157 return page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002158}
Christoph Hellwig69262212019-06-26 14:27:05 +02002159EXPORT_SYMBOL(alloc_pages_vma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002160
2161/**
Matthew Wilcox (Oracle)6421ec72021-04-29 23:01:21 -07002162 * alloc_pages - Allocate pages.
2163 * @gfp: GFP flags.
2164 * @order: Power of two of number of pages to allocate.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002165 *
Matthew Wilcox (Oracle)6421ec72021-04-29 23:01:21 -07002166 * Allocate 1 << @order contiguous pages. The physical address of the
2167 * first page is naturally aligned (eg an order-3 allocation will be aligned
2168 * to a multiple of 8 * PAGE_SIZE bytes). The NUMA policy of the current
2169 * process is honoured when in process context.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002170 *
Matthew Wilcox (Oracle)6421ec72021-04-29 23:01:21 -07002171 * Context: Can be called from any context, providing the appropriate GFP
2172 * flags are used.
2173 * Return: The page on success or NULL if allocation fails.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002174 */
Matthew Wilcox (Oracle)d7f946d2021-04-29 23:01:18 -07002175struct page *alloc_pages(gfp_t gfp, unsigned order)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002176{
Oleg Nesterov8d902742014-10-09 15:27:45 -07002177 struct mempolicy *pol = &default_policy;
Miao Xiec0ff7452010-05-24 14:32:08 -07002178 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002179
Oleg Nesterov8d902742014-10-09 15:27:45 -07002180 if (!in_interrupt() && !(gfp & __GFP_THISNODE))
2181 pol = get_task_policy(current);
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07002182
2183 /*
2184 * No reference counting needed for current->mempolicy
2185 * nor system default_policy
2186 */
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002187 if (pol->mode == MPOL_INTERLEAVE)
Miao Xiec0ff7452010-05-24 14:32:08 -07002188 page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
Feng Tang4c54d942021-09-02 15:00:10 -07002189 else if (pol->mode == MPOL_PREFERRED_MANY)
2190 page = alloc_pages_preferred_many(gfp, order,
Aneesh Kumar K.Vc0455112022-01-14 14:08:14 -08002191 policy_node(gfp, pol, numa_node_id()), pol);
Miao Xiec0ff7452010-05-24 14:32:08 -07002192 else
Matthew Wilcox (Oracle)84172f42021-04-29 23:01:15 -07002193 page = __alloc_pages(gfp, order,
Vlastimil Babka04ec6262017-07-06 15:40:03 -07002194 policy_node(gfp, pol, numa_node_id()),
Andi Kleen5c4b4be2011-03-04 17:36:32 -08002195 policy_nodemask(gfp, pol));
Mel Gormancc9a6c82012-03-21 16:34:11 -07002196
Miao Xiec0ff7452010-05-24 14:32:08 -07002197 return page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002198}
Matthew Wilcox (Oracle)d7f946d2021-04-29 23:01:18 -07002199EXPORT_SYMBOL(alloc_pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002200
Matthew Wilcox (Oracle)cc09cb12020-12-15 22:55:54 -05002201struct folio *folio_alloc(gfp_t gfp, unsigned order)
2202{
2203 struct page *page = alloc_pages(gfp | __GFP_COMP, order);
2204
2205 if (page && order > 1)
2206 prep_transhuge_page(page);
2207 return (struct folio *)page;
2208}
2209EXPORT_SYMBOL(folio_alloc);
2210
Chen Wandunc00b6b92021-11-05 13:39:53 -07002211static unsigned long alloc_pages_bulk_array_interleave(gfp_t gfp,
2212 struct mempolicy *pol, unsigned long nr_pages,
2213 struct page **page_array)
2214{
2215 int nodes;
2216 unsigned long nr_pages_per_node;
2217 int delta;
2218 int i;
2219 unsigned long nr_allocated;
2220 unsigned long total_allocated = 0;
2221
2222 nodes = nodes_weight(pol->nodes);
2223 nr_pages_per_node = nr_pages / nodes;
2224 delta = nr_pages - nodes * nr_pages_per_node;
2225
2226 for (i = 0; i < nodes; i++) {
2227 if (delta) {
2228 nr_allocated = __alloc_pages_bulk(gfp,
2229 interleave_nodes(pol), NULL,
2230 nr_pages_per_node + 1, NULL,
2231 page_array);
2232 delta--;
2233 } else {
2234 nr_allocated = __alloc_pages_bulk(gfp,
2235 interleave_nodes(pol), NULL,
2236 nr_pages_per_node, NULL, page_array);
2237 }
2238
2239 page_array += nr_allocated;
2240 total_allocated += nr_allocated;
2241 }
2242
2243 return total_allocated;
2244}
2245
2246static unsigned long alloc_pages_bulk_array_preferred_many(gfp_t gfp, int nid,
2247 struct mempolicy *pol, unsigned long nr_pages,
2248 struct page **page_array)
2249{
2250 gfp_t preferred_gfp;
2251 unsigned long nr_allocated = 0;
2252
2253 preferred_gfp = gfp | __GFP_NOWARN;
2254 preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
2255
2256 nr_allocated = __alloc_pages_bulk(preferred_gfp, nid, &pol->nodes,
2257 nr_pages, NULL, page_array);
2258
2259 if (nr_allocated < nr_pages)
2260 nr_allocated += __alloc_pages_bulk(gfp, numa_node_id(), NULL,
2261 nr_pages - nr_allocated, NULL,
2262 page_array + nr_allocated);
2263 return nr_allocated;
2264}
2265
2266/* alloc pages bulk and mempolicy should be considered at the
2267 * same time in some situation such as vmalloc.
2268 *
2269 * It can accelerate memory allocation especially interleaving
2270 * allocate memory.
2271 */
2272unsigned long alloc_pages_bulk_array_mempolicy(gfp_t gfp,
2273 unsigned long nr_pages, struct page **page_array)
2274{
2275 struct mempolicy *pol = &default_policy;
2276
2277 if (!in_interrupt() && !(gfp & __GFP_THISNODE))
2278 pol = get_task_policy(current);
2279
2280 if (pol->mode == MPOL_INTERLEAVE)
2281 return alloc_pages_bulk_array_interleave(gfp, pol,
2282 nr_pages, page_array);
2283
2284 if (pol->mode == MPOL_PREFERRED_MANY)
2285 return alloc_pages_bulk_array_preferred_many(gfp,
2286 numa_node_id(), pol, nr_pages, page_array);
2287
2288 return __alloc_pages_bulk(gfp, policy_node(gfp, pol, numa_node_id()),
2289 policy_nodemask(gfp, pol), nr_pages, NULL,
2290 page_array);
2291}
2292
Oleg Nesterovef0855d2013-09-11 14:20:14 -07002293int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2294{
2295 struct mempolicy *pol = mpol_dup(vma_policy(src));
2296
2297 if (IS_ERR(pol))
2298 return PTR_ERR(pol);
2299 dst->vm_policy = pol;
2300 return 0;
2301}
2302
Paul Jackson42253992006-01-08 01:01:59 -08002303/*
Lee Schermerhorn846a16b2008-04-28 02:13:09 -07002304 * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
Paul Jackson42253992006-01-08 01:01:59 -08002305 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
2306 * with the mems_allowed returned by cpuset_mems_allowed(). This
2307 * keeps mempolicies cpuset relative after its cpuset moves. See
2308 * further kernel/cpuset.c update_nodemask().
Miao Xie708c1bb2010-05-24 14:32:07 -07002309 *
2310 * current's mempolicy may be rebinded by the other task(the task that changes
2311 * cpuset's mems), so we needn't do rebind work for current task.
Paul Jackson42253992006-01-08 01:01:59 -08002312 */
Paul Jackson42253992006-01-08 01:01:59 -08002313
Lee Schermerhorn846a16b2008-04-28 02:13:09 -07002314/* Slow path of a mempolicy duplicate */
2315struct mempolicy *__mpol_dup(struct mempolicy *old)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002316{
2317 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2318
2319 if (!new)
2320 return ERR_PTR(-ENOMEM);
Miao Xie708c1bb2010-05-24 14:32:07 -07002321
2322 /* task's mempolicy is protected by alloc_lock */
2323 if (old == current->mempolicy) {
2324 task_lock(current);
2325 *new = *old;
2326 task_unlock(current);
2327 } else
2328 *new = *old;
2329
Paul Jackson42253992006-01-08 01:01:59 -08002330 if (current_cpuset_is_being_rebound()) {
2331 nodemask_t mems = cpuset_mems_allowed(current);
Vlastimil Babka213980c2017-07-06 15:40:06 -07002332 mpol_rebind_policy(new, &mems);
Paul Jackson42253992006-01-08 01:01:59 -08002333 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002334 atomic_set(&new->refcnt, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002335 return new;
2336}
2337
2338/* Slow path of a mempolicy comparison */
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002339bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002340{
2341 if (!a || !b)
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002342 return false;
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002343 if (a->mode != b->mode)
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002344 return false;
Bob Liu19800502010-05-24 14:32:01 -07002345 if (a->flags != b->flags)
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002346 return false;
Bob Liu19800502010-05-24 14:32:01 -07002347 if (mpol_store_user_nodemask(a))
2348 if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002349 return false;
Bob Liu19800502010-05-24 14:32:01 -07002350
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002351 switch (a->mode) {
Mel Gorman19770b32008-04-28 02:12:18 -07002352 case MPOL_BIND:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002353 case MPOL_INTERLEAVE:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002354 case MPOL_PREFERRED:
Dave Hansenb27abac2021-09-02 15:00:06 -07002355 case MPOL_PREFERRED_MANY:
Ben Widawsky269fbe72021-06-30 18:51:10 -07002356 return !!nodes_equal(a->nodes, b->nodes);
Feng Tang7858d7b2021-06-30 18:51:00 -07002357 case MPOL_LOCAL:
2358 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002359 default:
2360 BUG();
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002361 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002362 }
2363}
2364
Linus Torvalds1da177e2005-04-16 15:20:36 -07002365/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002366 * Shared memory backing store policy support.
2367 *
2368 * Remember policies even when nobody has shared memory mapped.
2369 * The policies are kept in Red-Black tree linked from the inode.
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002370 * They are protected by the sp->lock rwlock, which should be held
Linus Torvalds1da177e2005-04-16 15:20:36 -07002371 * for any accesses to the tree.
2372 */
2373
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002374/*
2375 * lookup first element intersecting start-end. Caller holds sp->lock for
2376 * reading or for writing
2377 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002378static struct sp_node *
2379sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
2380{
2381 struct rb_node *n = sp->root.rb_node;
2382
2383 while (n) {
2384 struct sp_node *p = rb_entry(n, struct sp_node, nd);
2385
2386 if (start >= p->end)
2387 n = n->rb_right;
2388 else if (end <= p->start)
2389 n = n->rb_left;
2390 else
2391 break;
2392 }
2393 if (!n)
2394 return NULL;
2395 for (;;) {
2396 struct sp_node *w = NULL;
2397 struct rb_node *prev = rb_prev(n);
2398 if (!prev)
2399 break;
2400 w = rb_entry(prev, struct sp_node, nd);
2401 if (w->end <= start)
2402 break;
2403 n = prev;
2404 }
2405 return rb_entry(n, struct sp_node, nd);
2406}
2407
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002408/*
2409 * Insert a new shared policy into the list. Caller holds sp->lock for
2410 * writing.
2411 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002412static void sp_insert(struct shared_policy *sp, struct sp_node *new)
2413{
2414 struct rb_node **p = &sp->root.rb_node;
2415 struct rb_node *parent = NULL;
2416 struct sp_node *nd;
2417
2418 while (*p) {
2419 parent = *p;
2420 nd = rb_entry(parent, struct sp_node, nd);
2421 if (new->start < nd->start)
2422 p = &(*p)->rb_left;
2423 else if (new->end > nd->end)
2424 p = &(*p)->rb_right;
2425 else
2426 BUG();
2427 }
2428 rb_link_node(&new->nd, parent, p);
2429 rb_insert_color(&new->nd, &sp->root);
Paul Mundt140d5a42007-07-15 23:38:16 -07002430 pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002431 new->policy ? new->policy->mode : 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002432}
2433
2434/* Find shared policy intersecting idx */
2435struct mempolicy *
2436mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
2437{
2438 struct mempolicy *pol = NULL;
2439 struct sp_node *sn;
2440
2441 if (!sp->root.rb_node)
2442 return NULL;
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002443 read_lock(&sp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002444 sn = sp_lookup(sp, idx, idx+1);
2445 if (sn) {
2446 mpol_get(sn->policy);
2447 pol = sn->policy;
2448 }
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002449 read_unlock(&sp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002450 return pol;
2451}
2452
KOSAKI Motohiro63f74ca2012-10-08 16:29:19 -07002453static void sp_free(struct sp_node *n)
2454{
2455 mpol_put(n->policy);
2456 kmem_cache_free(sn_cache, n);
2457}
2458
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002459/**
2460 * mpol_misplaced - check whether current page node is valid in policy
2461 *
Fabian Frederickb46e14a2014-06-04 16:08:18 -07002462 * @page: page to be checked
2463 * @vma: vm area where page mapped
2464 * @addr: virtual address where page mapped
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002465 *
2466 * Lookup current policy node id for vma,addr and "compare to" page's
Matthew Wilcox (Oracle)5f076942021-04-29 23:01:27 -07002467 * node id. Policy determination "mimics" alloc_page_vma().
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002468 * Called from fault path where we know the vma and faulting address.
Matthew Wilcox (Oracle)5f076942021-04-29 23:01:27 -07002469 *
Baolin Wang062db292021-09-02 15:00:03 -07002470 * Return: NUMA_NO_NODE if the page is in a node that is valid for this
2471 * policy, or a suitable node ID to allocate a replacement page from.
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002472 */
2473int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2474{
2475 struct mempolicy *pol;
Mel Gormanc33d6c02016-05-19 17:14:10 -07002476 struct zoneref *z;
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002477 int curnid = page_to_nid(page);
2478 unsigned long pgoff;
Peter Zijlstra90572892013-10-07 11:29:20 +01002479 int thiscpu = raw_smp_processor_id();
2480 int thisnid = cpu_to_node(thiscpu);
Anshuman Khandual98fa15f2019-03-05 15:42:58 -08002481 int polnid = NUMA_NO_NODE;
Baolin Wang062db292021-09-02 15:00:03 -07002482 int ret = NUMA_NO_NODE;
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002483
Oleg Nesterovdd6eecb2014-10-09 15:27:57 -07002484 pol = get_vma_policy(vma, addr);
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002485 if (!(pol->flags & MPOL_F_MOF))
2486 goto out;
2487
2488 switch (pol->mode) {
2489 case MPOL_INTERLEAVE:
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002490 pgoff = vma->vm_pgoff;
2491 pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
Laurent Dufour98c70ba2017-09-08 16:12:39 -07002492 polnid = offset_il_node(pol, pgoff);
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002493 break;
2494
2495 case MPOL_PREFERRED:
Dave Hansenb27abac2021-09-02 15:00:06 -07002496 if (node_isset(curnid, pol->nodes))
2497 goto out;
Ben Widawsky269fbe72021-06-30 18:51:10 -07002498 polnid = first_node(pol->nodes);
Feng Tang7858d7b2021-06-30 18:51:00 -07002499 break;
2500
2501 case MPOL_LOCAL:
2502 polnid = numa_node_id();
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002503 break;
2504
2505 case MPOL_BIND:
Huang Yingbda420b2021-02-24 12:09:43 -08002506 /* Optimize placement among multiple nodes via NUMA balancing */
2507 if (pol->flags & MPOL_F_MORON) {
Ben Widawsky269fbe72021-06-30 18:51:10 -07002508 if (node_isset(thisnid, pol->nodes))
Huang Yingbda420b2021-02-24 12:09:43 -08002509 break;
2510 goto out;
2511 }
Dave Hansenb27abac2021-09-02 15:00:06 -07002512 fallthrough;
Mel Gormanc33d6c02016-05-19 17:14:10 -07002513
Dave Hansenb27abac2021-09-02 15:00:06 -07002514 case MPOL_PREFERRED_MANY:
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002515 /*
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002516 * use current page if in policy nodemask,
2517 * else select nearest allowed node, if any.
2518 * If no allowed nodes, use current [!misplaced].
2519 */
Ben Widawsky269fbe72021-06-30 18:51:10 -07002520 if (node_isset(curnid, pol->nodes))
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002521 goto out;
Mel Gormanc33d6c02016-05-19 17:14:10 -07002522 z = first_zones_zonelist(
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002523 node_zonelist(numa_node_id(), GFP_HIGHUSER),
2524 gfp_zone(GFP_HIGHUSER),
Ben Widawsky269fbe72021-06-30 18:51:10 -07002525 &pol->nodes);
Pavel Tatashinc1093b72018-08-21 21:53:32 -07002526 polnid = zone_to_nid(z->zone);
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002527 break;
2528
2529 default:
2530 BUG();
2531 }
Mel Gorman5606e382012-11-02 18:19:13 +00002532
2533 /* Migrate the page towards the node whose CPU is referencing it */
Mel Gormane42c8ff2012-11-12 09:17:07 +00002534 if (pol->flags & MPOL_F_MORON) {
Peter Zijlstra90572892013-10-07 11:29:20 +01002535 polnid = thisnid;
Mel Gorman5606e382012-11-02 18:19:13 +00002536
Rik van Riel10f39042014-01-27 17:03:44 -05002537 if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
Rik van Rielde1c9ce2013-10-07 11:29:39 +01002538 goto out;
Mel Gormane42c8ff2012-11-12 09:17:07 +00002539 }
2540
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002541 if (curnid != polnid)
2542 ret = polnid;
2543out:
2544 mpol_cond_put(pol);
2545
2546 return ret;
2547}
2548
David Rientjesc11600e2016-09-01 16:15:07 -07002549/*
2550 * Drop the (possibly final) reference to task->mempolicy. It needs to be
2551 * dropped after task->mempolicy is set to NULL so that any allocation done as
2552 * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed
2553 * policy.
2554 */
2555void mpol_put_task_policy(struct task_struct *task)
2556{
2557 struct mempolicy *pol;
2558
2559 task_lock(task);
2560 pol = task->mempolicy;
2561 task->mempolicy = NULL;
2562 task_unlock(task);
2563 mpol_put(pol);
2564}
2565
Linus Torvalds1da177e2005-04-16 15:20:36 -07002566static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2567{
Paul Mundt140d5a42007-07-15 23:38:16 -07002568 pr_debug("deleting %lx-l%lx\n", n->start, n->end);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002569 rb_erase(&n->nd, &sp->root);
KOSAKI Motohiro63f74ca2012-10-08 16:29:19 -07002570 sp_free(n);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002571}
2572
Mel Gorman42288fe2012-12-21 23:10:25 +00002573static void sp_node_init(struct sp_node *node, unsigned long start,
2574 unsigned long end, struct mempolicy *pol)
2575{
2576 node->start = start;
2577 node->end = end;
2578 node->policy = pol;
2579}
2580
Adrian Bunkdbcb0f12007-10-16 01:26:26 -07002581static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2582 struct mempolicy *pol)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002583{
KOSAKI Motohiro869833f2012-10-08 16:29:16 -07002584 struct sp_node *n;
2585 struct mempolicy *newpol;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002586
KOSAKI Motohiro869833f2012-10-08 16:29:16 -07002587 n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002588 if (!n)
2589 return NULL;
KOSAKI Motohiro869833f2012-10-08 16:29:16 -07002590
2591 newpol = mpol_dup(pol);
2592 if (IS_ERR(newpol)) {
2593 kmem_cache_free(sn_cache, n);
2594 return NULL;
2595 }
2596 newpol->flags |= MPOL_F_SHARED;
Mel Gorman42288fe2012-12-21 23:10:25 +00002597 sp_node_init(n, start, end, newpol);
KOSAKI Motohiro869833f2012-10-08 16:29:16 -07002598
Linus Torvalds1da177e2005-04-16 15:20:36 -07002599 return n;
2600}
2601
2602/* Replace a policy range. */
2603static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
2604 unsigned long end, struct sp_node *new)
2605{
Mel Gormanb22d1272012-10-08 16:29:17 -07002606 struct sp_node *n;
Mel Gorman42288fe2012-12-21 23:10:25 +00002607 struct sp_node *n_new = NULL;
2608 struct mempolicy *mpol_new = NULL;
Mel Gormanb22d1272012-10-08 16:29:17 -07002609 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002610
Mel Gorman42288fe2012-12-21 23:10:25 +00002611restart:
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002612 write_lock(&sp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002613 n = sp_lookup(sp, start, end);
2614 /* Take care of old policies in the same range. */
2615 while (n && n->start < end) {
2616 struct rb_node *next = rb_next(&n->nd);
2617 if (n->start >= start) {
2618 if (n->end <= end)
2619 sp_delete(sp, n);
2620 else
2621 n->start = end;
2622 } else {
2623 /* Old policy spanning whole new range. */
2624 if (n->end > end) {
Mel Gorman42288fe2012-12-21 23:10:25 +00002625 if (!n_new)
2626 goto alloc_new;
2627
2628 *mpol_new = *n->policy;
2629 atomic_set(&mpol_new->refcnt, 1);
KOSAKI Motohiro78806392013-03-08 12:43:29 -08002630 sp_node_init(n_new, end, n->end, mpol_new);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002631 n->end = start;
Hillf Danton5ca39572013-03-08 12:43:28 -08002632 sp_insert(sp, n_new);
Mel Gorman42288fe2012-12-21 23:10:25 +00002633 n_new = NULL;
2634 mpol_new = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002635 break;
2636 } else
2637 n->end = start;
2638 }
2639 if (!next)
2640 break;
2641 n = rb_entry(next, struct sp_node, nd);
2642 }
2643 if (new)
2644 sp_insert(sp, new);
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002645 write_unlock(&sp->lock);
Mel Gorman42288fe2012-12-21 23:10:25 +00002646 ret = 0;
2647
2648err_out:
2649 if (mpol_new)
2650 mpol_put(mpol_new);
2651 if (n_new)
2652 kmem_cache_free(sn_cache, n_new);
2653
Mel Gormanb22d1272012-10-08 16:29:17 -07002654 return ret;
Mel Gorman42288fe2012-12-21 23:10:25 +00002655
2656alloc_new:
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002657 write_unlock(&sp->lock);
Mel Gorman42288fe2012-12-21 23:10:25 +00002658 ret = -ENOMEM;
2659 n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2660 if (!n_new)
2661 goto err_out;
2662 mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2663 if (!mpol_new)
2664 goto err_out;
2665 goto restart;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002666}
2667
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002668/**
2669 * mpol_shared_policy_init - initialize shared policy for inode
2670 * @sp: pointer to inode shared policy
2671 * @mpol: struct mempolicy to install
2672 *
2673 * Install non-NULL @mpol in inode's shared policy rb-tree.
2674 * On entry, the current task has a reference on a non-NULL @mpol.
2675 * This must be released on exit.
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002676 * This is called at get_inode() calls and we can use GFP_KERNEL.
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002677 */
2678void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
Robin Holt7339ff82006-01-14 13:20:48 -08002679{
Miao Xie58568d22009-06-16 15:31:49 -07002680 int ret;
2681
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002682 sp->root = RB_ROOT; /* empty tree == default mempolicy */
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002683 rwlock_init(&sp->lock);
Robin Holt7339ff82006-01-14 13:20:48 -08002684
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002685 if (mpol) {
2686 struct vm_area_struct pvma;
2687 struct mempolicy *new;
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002688 NODEMASK_SCRATCH(scratch);
Robin Holt7339ff82006-01-14 13:20:48 -08002689
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002690 if (!scratch)
Lee Schermerhorn5c0c1652010-06-29 15:05:30 -07002691 goto put_mpol;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002692 /* contextualize the tmpfs mount point mempolicy */
2693 new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
Lee Schermerhorn15d77832010-05-24 14:32:04 -07002694 if (IS_ERR(new))
Dan Carpenter0cae3452010-05-25 23:42:58 -07002695 goto free_scratch; /* no valid nodemask intersection */
Miao Xie58568d22009-06-16 15:31:49 -07002696
2697 task_lock(current);
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002698 ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
Miao Xie58568d22009-06-16 15:31:49 -07002699 task_unlock(current);
Lee Schermerhorn15d77832010-05-24 14:32:04 -07002700 if (ret)
Lee Schermerhorn5c0c1652010-06-29 15:05:30 -07002701 goto put_new;
Robin Holt7339ff82006-01-14 13:20:48 -08002702
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002703 /* Create pseudo-vma that contains just the policy */
Kirill A. Shutemov2c4541e2018-07-26 16:37:30 -07002704 vma_init(&pvma, NULL);
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002705 pvma.vm_end = TASK_SIZE; /* policy covers entire file */
2706 mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
Lee Schermerhorn15d77832010-05-24 14:32:04 -07002707
Lee Schermerhorn5c0c1652010-06-29 15:05:30 -07002708put_new:
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002709 mpol_put(new); /* drop initial ref */
Dan Carpenter0cae3452010-05-25 23:42:58 -07002710free_scratch:
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002711 NODEMASK_SCRATCH_FREE(scratch);
Lee Schermerhorn5c0c1652010-06-29 15:05:30 -07002712put_mpol:
2713 mpol_put(mpol); /* drop our incoming ref on sb mpol */
Robin Holt7339ff82006-01-14 13:20:48 -08002714 }
2715}
2716
Linus Torvalds1da177e2005-04-16 15:20:36 -07002717int mpol_set_shared_policy(struct shared_policy *info,
2718 struct vm_area_struct *vma, struct mempolicy *npol)
2719{
2720 int err;
2721 struct sp_node *new = NULL;
2722 unsigned long sz = vma_pages(vma);
2723
David Rientjes028fec42008-04-28 02:12:25 -07002724 pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002725 vma->vm_pgoff,
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002726 sz, npol ? npol->mode : -1,
David Rientjes028fec42008-04-28 02:12:25 -07002727 npol ? npol->flags : -1,
Ben Widawsky269fbe72021-06-30 18:51:10 -07002728 npol ? nodes_addr(npol->nodes)[0] : NUMA_NO_NODE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002729
2730 if (npol) {
2731 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
2732 if (!new)
2733 return -ENOMEM;
2734 }
2735 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2736 if (err && new)
KOSAKI Motohiro63f74ca2012-10-08 16:29:19 -07002737 sp_free(new);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002738 return err;
2739}
2740
2741/* Free a backing policy store on inode delete. */
2742void mpol_free_shared_policy(struct shared_policy *p)
2743{
2744 struct sp_node *n;
2745 struct rb_node *next;
2746
2747 if (!p->root.rb_node)
2748 return;
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002749 write_lock(&p->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002750 next = rb_first(&p->root);
2751 while (next) {
2752 n = rb_entry(next, struct sp_node, nd);
2753 next = rb_next(&n->nd);
KOSAKI Motohiro63f74ca2012-10-08 16:29:19 -07002754 sp_delete(p, n);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002755 }
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002756 write_unlock(&p->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002757}
2758
Mel Gorman1a687c22012-11-22 11:16:36 +00002759#ifdef CONFIG_NUMA_BALANCING
Mel Gormanc2976632014-01-29 14:05:42 -08002760static int __initdata numabalancing_override;
Mel Gorman1a687c22012-11-22 11:16:36 +00002761
2762static void __init check_numabalancing_enable(void)
2763{
2764 bool numabalancing_default = false;
2765
2766 if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
2767 numabalancing_default = true;
2768
Mel Gormanc2976632014-01-29 14:05:42 -08002769 /* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
2770 if (numabalancing_override)
2771 set_numabalancing_state(numabalancing_override == 1);
2772
Mel Gormanb0dc2b92015-05-14 15:17:09 -07002773 if (num_online_nodes() > 1 && !numabalancing_override) {
Joe Perches756a0252016-03-17 14:19:47 -07002774 pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n",
Mel Gormanc2976632014-01-29 14:05:42 -08002775 numabalancing_default ? "Enabling" : "Disabling");
Mel Gorman1a687c22012-11-22 11:16:36 +00002776 set_numabalancing_state(numabalancing_default);
2777 }
2778}
2779
2780static int __init setup_numabalancing(char *str)
2781{
2782 int ret = 0;
2783 if (!str)
2784 goto out;
Mel Gorman1a687c22012-11-22 11:16:36 +00002785
2786 if (!strcmp(str, "enable")) {
Mel Gormanc2976632014-01-29 14:05:42 -08002787 numabalancing_override = 1;
Mel Gorman1a687c22012-11-22 11:16:36 +00002788 ret = 1;
2789 } else if (!strcmp(str, "disable")) {
Mel Gormanc2976632014-01-29 14:05:42 -08002790 numabalancing_override = -1;
Mel Gorman1a687c22012-11-22 11:16:36 +00002791 ret = 1;
2792 }
2793out:
2794 if (!ret)
Andrew Morton4a404be2014-01-29 14:05:43 -08002795 pr_warn("Unable to parse numa_balancing=\n");
Mel Gorman1a687c22012-11-22 11:16:36 +00002796
2797 return ret;
2798}
2799__setup("numa_balancing=", setup_numabalancing);
2800#else
2801static inline void __init check_numabalancing_enable(void)
2802{
2803}
2804#endif /* CONFIG_NUMA_BALANCING */
2805
Linus Torvalds1da177e2005-04-16 15:20:36 -07002806/* assumes fs == KERNEL_DS */
2807void __init numa_policy_init(void)
2808{
Paul Mundtb71636e2007-07-15 23:38:15 -07002809 nodemask_t interleave_nodes;
2810 unsigned long largest = 0;
2811 int nid, prefer = 0;
2812
Linus Torvalds1da177e2005-04-16 15:20:36 -07002813 policy_cache = kmem_cache_create("numa_policy",
2814 sizeof(struct mempolicy),
Paul Mundt20c2df82007-07-20 10:11:58 +09002815 0, SLAB_PANIC, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002816
2817 sn_cache = kmem_cache_create("shared_policy_node",
2818 sizeof(struct sp_node),
Paul Mundt20c2df82007-07-20 10:11:58 +09002819 0, SLAB_PANIC, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002820
Mel Gorman5606e382012-11-02 18:19:13 +00002821 for_each_node(nid) {
2822 preferred_node_policy[nid] = (struct mempolicy) {
2823 .refcnt = ATOMIC_INIT(1),
2824 .mode = MPOL_PREFERRED,
2825 .flags = MPOL_F_MOF | MPOL_F_MORON,
Ben Widawsky269fbe72021-06-30 18:51:10 -07002826 .nodes = nodemask_of_node(nid),
Mel Gorman5606e382012-11-02 18:19:13 +00002827 };
2828 }
2829
Paul Mundtb71636e2007-07-15 23:38:15 -07002830 /*
2831 * Set interleaving policy for system init. Interleaving is only
2832 * enabled across suitably sized nodes (default is >= 16MB), or
2833 * fall back to the largest node if they're all smaller.
2834 */
2835 nodes_clear(interleave_nodes);
Lai Jiangshan01f13bd2012-12-12 13:51:33 -08002836 for_each_node_state(nid, N_MEMORY) {
Paul Mundtb71636e2007-07-15 23:38:15 -07002837 unsigned long total_pages = node_present_pages(nid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002838
Paul Mundtb71636e2007-07-15 23:38:15 -07002839 /* Preserve the largest node */
2840 if (largest < total_pages) {
2841 largest = total_pages;
2842 prefer = nid;
2843 }
2844
2845 /* Interleave this node? */
2846 if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2847 node_set(nid, interleave_nodes);
2848 }
2849
2850 /* All too small, use the largest */
2851 if (unlikely(nodes_empty(interleave_nodes)))
2852 node_set(prefer, interleave_nodes);
2853
David Rientjes028fec42008-04-28 02:12:25 -07002854 if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
Mitchel Humpherysb1de0d12014-06-06 14:38:30 -07002855 pr_err("%s: interleaving failed\n", __func__);
Mel Gorman1a687c22012-11-22 11:16:36 +00002856
2857 check_numabalancing_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002858}
2859
Christoph Lameter8bccd852005-10-29 18:16:59 -07002860/* Reset policy of current process to default */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002861void numa_default_policy(void)
2862{
David Rientjes028fec42008-04-28 02:12:25 -07002863 do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002864}
Paul Jackson68860ec2005-10-30 15:02:36 -08002865
Paul Jackson42253992006-01-08 01:01:59 -08002866/*
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002867 * Parse and format mempolicy from/to strings
2868 */
2869
Lee Schermerhorn345ace92010-05-24 14:32:04 -07002870static const char * const policy_modes[] =
2871{
2872 [MPOL_DEFAULT] = "default",
2873 [MPOL_PREFERRED] = "prefer",
2874 [MPOL_BIND] = "bind",
2875 [MPOL_INTERLEAVE] = "interleave",
Lee Schermerhornd3a71032012-10-25 14:16:29 +02002876 [MPOL_LOCAL] = "local",
Dave Hansenb27abac2021-09-02 15:00:06 -07002877 [MPOL_PREFERRED_MANY] = "prefer (many)",
Lee Schermerhorn345ace92010-05-24 14:32:04 -07002878};
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002879
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002880
2881#ifdef CONFIG_TMPFS
2882/**
Hugh Dickinsf2a07f42013-01-02 02:01:33 -08002883 * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002884 * @str: string containing mempolicy to parse
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002885 * @mpol: pointer to struct mempolicy pointer, returned on success.
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002886 *
2887 * Format of input:
2888 * <mode>[=<flags>][:<nodelist>]
2889 *
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002890 * On success, returns 0, else 1
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002891 */
Hugh Dickinsa7a88b22013-01-02 02:04:23 -08002892int mpol_parse_str(char *str, struct mempolicy **mpol)
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002893{
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002894 struct mempolicy *new = NULL;
Hugh Dickinsf2a07f42013-01-02 02:01:33 -08002895 unsigned short mode_flags;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002896 nodemask_t nodes;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002897 char *nodelist = strchr(str, ':');
2898 char *flags = strchr(str, '=');
zhong jiangdedf2c72018-10-26 15:06:57 -07002899 int err = 1, mode;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002900
Dan Carpenterc7a91bc2020-01-30 22:11:07 -08002901 if (flags)
2902 *flags++ = '\0'; /* terminate mode string */
2903
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002904 if (nodelist) {
2905 /* NUL-terminate mode or flags string */
2906 *nodelist++ = '\0';
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002907 if (nodelist_parse(nodelist, nodes))
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002908 goto out;
Lai Jiangshan01f13bd2012-12-12 13:51:33 -08002909 if (!nodes_subset(nodes, node_states[N_MEMORY]))
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002910 goto out;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002911 } else
2912 nodes_clear(nodes);
2913
zhong jiangdedf2c72018-10-26 15:06:57 -07002914 mode = match_string(policy_modes, MPOL_MAX, str);
2915 if (mode < 0)
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002916 goto out;
2917
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002918 switch (mode) {
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002919 case MPOL_PREFERRED:
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002920 /*
Randy Dunlapaa9f7d52020-04-01 21:10:58 -07002921 * Insist on a nodelist of one node only, although later
2922 * we use first_node(nodes) to grab a single node, so here
2923 * nodelist (or nodes) cannot be empty.
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002924 */
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002925 if (nodelist) {
2926 char *rest = nodelist;
2927 while (isdigit(*rest))
2928 rest++;
KOSAKI Motohiro926f2ae2010-03-23 13:35:32 -07002929 if (*rest)
2930 goto out;
Randy Dunlapaa9f7d52020-04-01 21:10:58 -07002931 if (nodes_empty(nodes))
2932 goto out;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002933 }
2934 break;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002935 case MPOL_INTERLEAVE:
2936 /*
2937 * Default to online nodes with memory if no nodelist
2938 */
2939 if (!nodelist)
Lai Jiangshan01f13bd2012-12-12 13:51:33 -08002940 nodes = node_states[N_MEMORY];
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07002941 break;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002942 case MPOL_LOCAL:
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07002943 /*
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002944 * Don't allow a nodelist; mpol_new() checks flags
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07002945 */
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002946 if (nodelist)
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07002947 goto out;
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07002948 break;
Ravikiran G Thirumalai413b43d2010-03-23 13:35:28 -07002949 case MPOL_DEFAULT:
2950 /*
2951 * Insist on a empty nodelist
2952 */
2953 if (!nodelist)
2954 err = 0;
2955 goto out;
Dave Hansenb27abac2021-09-02 15:00:06 -07002956 case MPOL_PREFERRED_MANY:
KOSAKI Motohirod69b2e632010-03-23 13:35:30 -07002957 case MPOL_BIND:
2958 /*
2959 * Insist on a nodelist
2960 */
2961 if (!nodelist)
2962 goto out;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002963 }
2964
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002965 mode_flags = 0;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002966 if (flags) {
2967 /*
2968 * Currently, we only support two mutually exclusive
2969 * mode flags.
2970 */
2971 if (!strcmp(flags, "static"))
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002972 mode_flags |= MPOL_F_STATIC_NODES;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002973 else if (!strcmp(flags, "relative"))
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002974 mode_flags |= MPOL_F_RELATIVE_NODES;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002975 else
KOSAKI Motohiro926f2ae2010-03-23 13:35:32 -07002976 goto out;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002977 }
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002978
2979 new = mpol_new(mode, mode_flags, &nodes);
2980 if (IS_ERR(new))
KOSAKI Motohiro926f2ae2010-03-23 13:35:32 -07002981 goto out;
2982
Hugh Dickinsf2a07f42013-01-02 02:01:33 -08002983 /*
2984 * Save nodes for mpol_to_str() to show the tmpfs mount options
2985 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
2986 */
Ben Widawsky269fbe72021-06-30 18:51:10 -07002987 if (mode != MPOL_PREFERRED) {
2988 new->nodes = nodes;
2989 } else if (nodelist) {
2990 nodes_clear(new->nodes);
2991 node_set(first_node(nodes), new->nodes);
2992 } else {
Feng Tang7858d7b2021-06-30 18:51:00 -07002993 new->mode = MPOL_LOCAL;
Ben Widawsky269fbe72021-06-30 18:51:10 -07002994 }
Hugh Dickinsf2a07f42013-01-02 02:01:33 -08002995
2996 /*
2997 * Save nodes for contextualization: this will be used to "clone"
2998 * the mempolicy in a specific context [cpuset] at a later time.
2999 */
3000 new->w.user_nodemask = nodes;
3001
KOSAKI Motohiro926f2ae2010-03-23 13:35:32 -07003002 err = 0;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07003003
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07003004out:
3005 /* Restore string for error message */
3006 if (nodelist)
3007 *--nodelist = ':';
3008 if (flags)
3009 *--flags = '=';
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07003010 if (!err)
3011 *mpol = new;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07003012 return err;
3013}
3014#endif /* CONFIG_TMPFS */
3015
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07003016/**
3017 * mpol_to_str - format a mempolicy structure for printing
3018 * @buffer: to contain formatted mempolicy string
3019 * @maxlen: length of @buffer
3020 * @pol: pointer to mempolicy to be formatted
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07003021 *
David Rientjes948927e2013-11-12 15:07:28 -08003022 * Convert @pol into a string. If @buffer is too short, truncate the string.
3023 * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
3024 * longest flag, "relative", and to display at least a few node ids.
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08003025 */
David Rientjes948927e2013-11-12 15:07:28 -08003026void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08003027{
3028 char *p = buffer;
David Rientjes948927e2013-11-12 15:07:28 -08003029 nodemask_t nodes = NODE_MASK_NONE;
3030 unsigned short mode = MPOL_DEFAULT;
3031 unsigned short flags = 0;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08003032
David Rientjes8790c71a2014-01-30 15:46:08 -08003033 if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
Lee Schermerhornbea904d2008-04-28 02:13:18 -07003034 mode = pol->mode;
David Rientjes948927e2013-11-12 15:07:28 -08003035 flags = pol->flags;
3036 }
Lee Schermerhornbea904d2008-04-28 02:13:18 -07003037
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08003038 switch (mode) {
3039 case MPOL_DEFAULT:
Feng Tang7858d7b2021-06-30 18:51:00 -07003040 case MPOL_LOCAL:
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08003041 break;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08003042 case MPOL_PREFERRED:
Dave Hansenb27abac2021-09-02 15:00:06 -07003043 case MPOL_PREFERRED_MANY:
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08003044 case MPOL_BIND:
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08003045 case MPOL_INTERLEAVE:
Ben Widawsky269fbe72021-06-30 18:51:10 -07003046 nodes = pol->nodes;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08003047 break;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08003048 default:
David Rientjes948927e2013-11-12 15:07:28 -08003049 WARN_ON_ONCE(1);
3050 snprintf(p, maxlen, "unknown");
3051 return;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08003052 }
3053
David Rientjesb7a9f422013-11-21 14:32:06 -08003054 p += snprintf(p, maxlen, "%s", policy_modes[mode]);
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08003055
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07003056 if (flags & MPOL_MODE_FLAGS) {
David Rientjes948927e2013-11-12 15:07:28 -08003057 p += snprintf(p, buffer + maxlen - p, "=");
David Rientjesf5b087b2008-04-28 02:12:27 -07003058
Lee Schermerhorn22919902008-04-28 02:13:22 -07003059 /*
3060 * Currently, the only defined flags are mutually exclusive
3061 */
David Rientjesf5b087b2008-04-28 02:12:27 -07003062 if (flags & MPOL_F_STATIC_NODES)
Lee Schermerhorn22919902008-04-28 02:13:22 -07003063 p += snprintf(p, buffer + maxlen - p, "static");
3064 else if (flags & MPOL_F_RELATIVE_NODES)
3065 p += snprintf(p, buffer + maxlen - p, "relative");
David Rientjesf5b087b2008-04-28 02:12:27 -07003066 }
3067
Tejun Heo9e763e02015-02-13 14:38:02 -08003068 if (!nodes_empty(nodes))
3069 p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
3070 nodemask_pr_args(&nodes));
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08003071}