blob: 4a83268e23c2ee656dbdd1df4a56badc068a5122 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Christoph Lameterb20a3502006-03-22 00:09:12 -08002/*
Hugh Dickins14e0f9b2015-11-05 18:49:43 -08003 * Memory Migration functionality - linux/mm/migrate.c
Christoph Lameterb20a3502006-03-22 00:09:12 -08004 *
5 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
6 *
7 * Page migration was first developed in the context of the memory hotplug
8 * project. The main authors of the migration code are:
9 *
10 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
11 * Hirokazu Takahashi <taka@valinux.co.jp>
12 * Dave Hansen <haveblue@us.ibm.com>
Christoph Lametercde53532008-07-04 09:59:22 -070013 * Christoph Lameter
Christoph Lameterb20a3502006-03-22 00:09:12 -080014 */
15
16#include <linux/migrate.h>
Paul Gortmakerb95f1b312011-10-16 02:01:52 -040017#include <linux/export.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080018#include <linux/swap.h>
Christoph Lameter06972122006-06-23 02:03:35 -070019#include <linux/swapops.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080020#include <linux/pagemap.h>
Christoph Lametere23ca002006-04-10 22:52:57 -070021#include <linux/buffer_head.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080022#include <linux/mm_inline.h>
Pavel Emelyanovb4888932007-10-18 23:40:14 -070023#include <linux/nsproxy.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080024#include <linux/pagevec.h>
Hugh Dickinse9995ef2009-12-14 17:59:31 -080025#include <linux/ksm.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080026#include <linux/rmap.h>
27#include <linux/topology.h>
28#include <linux/cpu.h>
29#include <linux/cpuset.h>
Christoph Lameter04e62a22006-06-23 02:03:38 -070030#include <linux/writeback.h>
Christoph Lameter742755a2006-06-23 02:03:55 -070031#include <linux/mempolicy.h>
32#include <linux/vmalloc.h>
David Quigley86c3a762006-06-23 02:04:02 -070033#include <linux/security.h>
Hugh Dickins42cb14b2015-11-05 18:50:05 -080034#include <linux/backing-dev.h>
Minchan Kimbda807d2016-07-26 15:23:05 -070035#include <linux/compaction.h>
Adrian Bunk4f5ca262008-07-23 21:27:02 -070036#include <linux/syscalls.h>
Dominik Brodowski7addf442018-03-17 16:08:03 +010037#include <linux/compat.h>
Naoya Horiguchi290408d2010-09-08 10:19:35 +090038#include <linux/hugetlb.h>
Aneesh Kumar K.V8e6ac7f2012-07-31 16:42:27 -070039#include <linux/hugetlb_cgroup.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090040#include <linux/gfp.h>
Jérôme Glissedf6ad692017-09-08 16:12:24 -070041#include <linux/pfn_t.h>
Jérôme Glissea5430dd2017-09-08 16:12:17 -070042#include <linux/memremap.h>
Jérôme Glisse8315ada2017-09-08 16:12:21 -070043#include <linux/userfaultfd_k.h>
Rafael Aquinibf6bddf12012-12-11 16:02:42 -080044#include <linux/balloon_compaction.h>
Mel Gormanf714f4f2013-12-18 17:08:33 -080045#include <linux/mmu_notifier.h>
Vladimir Davydov33c3fc72015-09-09 15:35:45 -070046#include <linux/page_idle.h>
Vlastimil Babkad435edc2016-03-15 14:56:15 -070047#include <linux/page_owner.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +010048#include <linux/sched/mm.h>
Linus Torvalds197e7e52017-08-20 13:26:27 -070049#include <linux/ptrace.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080050
Michal Nazarewicz0d1836c2010-12-21 17:24:26 -080051#include <asm/tlbflush.h>
52
Mel Gorman7b2a2d42012-10-19 14:07:31 +010053#define CREATE_TRACE_POINTS
54#include <trace/events/migrate.h>
55
Christoph Lameterb20a3502006-03-22 00:09:12 -080056#include "internal.h"
57
Christoph Lameterb20a3502006-03-22 00:09:12 -080058/*
Christoph Lameter742755a2006-06-23 02:03:55 -070059 * migrate_prep() needs to be called before we start compiling a list of pages
Mel Gorman748446b2010-05-24 14:32:27 -070060 * to be migrated using isolate_lru_page(). If scheduling work on other CPUs is
61 * undesirable, use migrate_prep_local()
Christoph Lameterb20a3502006-03-22 00:09:12 -080062 */
63int migrate_prep(void)
64{
Christoph Lameterb20a3502006-03-22 00:09:12 -080065 /*
66 * Clear the LRU lists so pages can be isolated.
67 * Note that pages may be moved off the LRU after we have
68 * drained them. Those pages will fail to migrate like other
69 * pages that may be busy.
70 */
71 lru_add_drain_all();
72
73 return 0;
74}
75
Mel Gorman748446b2010-05-24 14:32:27 -070076/* Do the necessary work of migrate_prep but not if it involves other CPUs */
77int migrate_prep_local(void)
78{
79 lru_add_drain();
80
81 return 0;
82}
83
Yisheng Xie9e5bcd62017-02-24 14:57:29 -080084int isolate_movable_page(struct page *page, isolate_mode_t mode)
Minchan Kimbda807d2016-07-26 15:23:05 -070085{
86 struct address_space *mapping;
87
88 /*
89 * Avoid burning cycles with pages that are yet under __free_pages(),
90 * or just got freed under us.
91 *
92 * In case we 'win' a race for a movable page being freed under us and
93 * raise its refcount preventing __free_pages() from doing its job
94 * the put_page() at the end of this block will take care of
95 * release this page, thus avoiding a nasty leakage.
96 */
97 if (unlikely(!get_page_unless_zero(page)))
98 goto out;
99
100 /*
101 * Check PageMovable before holding a PG_lock because page's owner
102 * assumes anybody doesn't touch PG_lock of newly allocated page
103 * so unconditionally grapping the lock ruins page's owner side.
104 */
105 if (unlikely(!__PageMovable(page)))
106 goto out_putpage;
107 /*
108 * As movable pages are not isolated from LRU lists, concurrent
109 * compaction threads can race against page migration functions
110 * as well as race against the releasing a page.
111 *
112 * In order to avoid having an already isolated movable page
113 * being (wrongly) re-isolated while it is under migration,
114 * or to avoid attempting to isolate pages being released,
115 * lets be sure we have the page lock
116 * before proceeding with the movable page isolation steps.
117 */
118 if (unlikely(!trylock_page(page)))
119 goto out_putpage;
120
121 if (!PageMovable(page) || PageIsolated(page))
122 goto out_no_isolated;
123
124 mapping = page_mapping(page);
125 VM_BUG_ON_PAGE(!mapping, page);
126
127 if (!mapping->a_ops->isolate_page(page, mode))
128 goto out_no_isolated;
129
130 /* Driver shouldn't use PG_isolated bit of page->flags */
131 WARN_ON_ONCE(PageIsolated(page));
132 __SetPageIsolated(page);
133 unlock_page(page);
134
Yisheng Xie9e5bcd62017-02-24 14:57:29 -0800135 return 0;
Minchan Kimbda807d2016-07-26 15:23:05 -0700136
137out_no_isolated:
138 unlock_page(page);
139out_putpage:
140 put_page(page);
141out:
Yisheng Xie9e5bcd62017-02-24 14:57:29 -0800142 return -EBUSY;
Minchan Kimbda807d2016-07-26 15:23:05 -0700143}
144
145/* It should be called on page which is PG_movable */
146void putback_movable_page(struct page *page)
147{
148 struct address_space *mapping;
149
150 VM_BUG_ON_PAGE(!PageLocked(page), page);
151 VM_BUG_ON_PAGE(!PageMovable(page), page);
152 VM_BUG_ON_PAGE(!PageIsolated(page), page);
153
154 mapping = page_mapping(page);
155 mapping->a_ops->putback_page(page);
156 __ClearPageIsolated(page);
157}
158
Christoph Lameterb20a3502006-03-22 00:09:12 -0800159/*
Rafael Aquini5733c7d2012-12-11 16:02:47 -0800160 * Put previously isolated pages back onto the appropriate lists
161 * from where they were once taken off for compaction/migration.
162 *
Joonsoo Kim59c82b72014-01-21 15:51:17 -0800163 * This function shall be used whenever the isolated pageset has been
164 * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
165 * and isolate_huge_page().
Rafael Aquini5733c7d2012-12-11 16:02:47 -0800166 */
167void putback_movable_pages(struct list_head *l)
168{
169 struct page *page;
170 struct page *page2;
171
172 list_for_each_entry_safe(page, page2, l, lru) {
Naoya Horiguchi31caf662013-09-11 14:21:59 -0700173 if (unlikely(PageHuge(page))) {
174 putback_active_hugepage(page);
175 continue;
176 }
Rafael Aquini5733c7d2012-12-11 16:02:47 -0800177 list_del(&page->lru);
Minchan Kimbda807d2016-07-26 15:23:05 -0700178 /*
179 * We isolated non-lru movable page so here we can use
180 * __PageMovable because LRU page's mapping cannot have
181 * PAGE_MAPPING_MOVABLE.
182 */
Minchan Kimb1123ea62016-07-26 15:23:09 -0700183 if (unlikely(__PageMovable(page))) {
Minchan Kimbda807d2016-07-26 15:23:05 -0700184 VM_BUG_ON_PAGE(!PageIsolated(page), page);
185 lock_page(page);
186 if (PageMovable(page))
187 putback_movable_page(page);
188 else
189 __ClearPageIsolated(page);
190 unlock_page(page);
191 put_page(page);
192 } else {
Naoya Horiguchie8db67e2017-09-08 16:11:12 -0700193 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
194 page_is_file_cache(page), -hpage_nr_pages(page));
Rabin Vincentfc280fe2017-04-20 14:37:46 -0700195 putback_lru_page(page);
Minchan Kimbda807d2016-07-26 15:23:05 -0700196 }
Christoph Lameterb20a3502006-03-22 00:09:12 -0800197 }
Christoph Lameterb20a3502006-03-22 00:09:12 -0800198}
199
Christoph Lameter06972122006-06-23 02:03:35 -0700200/*
201 * Restore a potential migration pte to a working pte entry
202 */
Minchan Kime4b82222017-05-03 14:54:27 -0700203static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
Hugh Dickinse9995ef2009-12-14 17:59:31 -0800204 unsigned long addr, void *old)
Christoph Lameter06972122006-06-23 02:03:35 -0700205{
Kirill A. Shutemov3fe87962017-02-24 14:58:16 -0800206 struct page_vma_mapped_walk pvmw = {
207 .page = old,
208 .vma = vma,
209 .address = addr,
210 .flags = PVMW_SYNC | PVMW_MIGRATION,
211 };
212 struct page *new;
213 pte_t pte;
Christoph Lameter06972122006-06-23 02:03:35 -0700214 swp_entry_t entry;
Christoph Lameter06972122006-06-23 02:03:35 -0700215
Kirill A. Shutemov3fe87962017-02-24 14:58:16 -0800216 VM_BUG_ON_PAGE(PageTail(page), page);
217 while (page_vma_mapped_walk(&pvmw)) {
Naoya Horiguchi4b0ece62017-03-31 15:11:44 -0700218 if (PageKsm(page))
219 new = page;
220 else
221 new = page - pvmw.page->index +
222 linear_page_index(vma, pvmw.address);
Christoph Lameter06972122006-06-23 02:03:35 -0700223
Zi Yan616b8372017-09-08 16:10:57 -0700224#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
225 /* PMD-mapped THP migration entry */
226 if (!pvmw.pte) {
227 VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page);
228 remove_migration_pmd(&pvmw, new);
229 continue;
230 }
231#endif
232
Kirill A. Shutemov3fe87962017-02-24 14:58:16 -0800233 get_page(new);
234 pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot)));
235 if (pte_swp_soft_dirty(*pvmw.pte))
236 pte = pte_mksoft_dirty(pte);
Christoph Lameter06972122006-06-23 02:03:35 -0700237
Hugh Dickins486cf462011-10-19 12:50:35 -0700238 /*
Kirill A. Shutemov3fe87962017-02-24 14:58:16 -0800239 * Recheck VMA as permissions can change since migration started
Hugh Dickins486cf462011-10-19 12:50:35 -0700240 */
Kirill A. Shutemov3fe87962017-02-24 14:58:16 -0800241 entry = pte_to_swp_entry(*pvmw.pte);
242 if (is_write_migration_entry(entry))
243 pte = maybe_mkwrite(pte, vma);
Mel Gormand3cb8bf2014-10-02 19:47:41 +0100244
Jérôme Glissedf6ad692017-09-08 16:12:24 -0700245 if (unlikely(is_zone_device_page(new))) {
246 if (is_device_private_page(new)) {
247 entry = make_device_private_entry(new, pte_write(pte));
248 pte = swp_entry_to_pte(entry);
249 } else if (is_device_public_page(new)) {
250 pte = pte_mkdevmap(pte);
251 flush_dcache_page(new);
252 }
Jérôme Glissea5430dd2017-09-08 16:12:17 -0700253 } else
254 flush_dcache_page(new);
255
Andi Kleen3ef8fd72010-10-11 16:03:21 +0200256#ifdef CONFIG_HUGETLB_PAGE
Kirill A. Shutemov3fe87962017-02-24 14:58:16 -0800257 if (PageHuge(new)) {
258 pte = pte_mkhuge(pte);
259 pte = arch_make_huge_pte(pte, vma, new, 0);
Aneesh Kumar K.V383321a2017-07-06 15:38:41 -0700260 set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
Kirill A. Shutemov3fe87962017-02-24 14:58:16 -0800261 if (PageAnon(new))
262 hugepage_add_anon_rmap(new, vma, pvmw.address);
263 else
264 page_dup_rmap(new, true);
Aneesh Kumar K.V383321a2017-07-06 15:38:41 -0700265 } else
266#endif
267 {
268 set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
Christoph Lameter04e62a22006-06-23 02:03:38 -0700269
Aneesh Kumar K.V383321a2017-07-06 15:38:41 -0700270 if (PageAnon(new))
271 page_add_anon_rmap(new, vma, pvmw.address, false);
272 else
273 page_add_file_rmap(new, false);
274 }
Kirill A. Shutemov3fe87962017-02-24 14:58:16 -0800275 if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new))
276 mlock_vma_page(new);
Hugh Dickins51afb122015-11-05 18:49:37 -0800277
Kirill A. Shutemov3fe87962017-02-24 14:58:16 -0800278 /* No need to invalidate - it was non-present before */
279 update_mmu_cache(vma, pvmw.address, pvmw.pte);
280 }
281
Minchan Kime4b82222017-05-03 14:54:27 -0700282 return true;
Christoph Lameter06972122006-06-23 02:03:35 -0700283}
284
285/*
Christoph Lameter04e62a22006-06-23 02:03:38 -0700286 * Get rid of all migration entries and replace them by
287 * references to the indicated page.
288 */
Kirill A. Shutemove3884662016-03-17 14:20:07 -0700289void remove_migration_ptes(struct page *old, struct page *new, bool locked)
Christoph Lameter04e62a22006-06-23 02:03:38 -0700290{
Joonsoo Kim051ac832014-01-21 15:49:48 -0800291 struct rmap_walk_control rwc = {
292 .rmap_one = remove_migration_pte,
293 .arg = old,
294 };
295
Kirill A. Shutemove3884662016-03-17 14:20:07 -0700296 if (locked)
297 rmap_walk_locked(new, &rwc);
298 else
299 rmap_walk(new, &rwc);
Christoph Lameter04e62a22006-06-23 02:03:38 -0700300}
301
302/*
Christoph Lameter06972122006-06-23 02:03:35 -0700303 * Something used the pte of a page under migration. We need to
304 * get to the page and wait until migration is finished.
305 * When we return from this function the fault will be retried.
Christoph Lameter06972122006-06-23 02:03:35 -0700306 */
Naoya Horiguchie66f17f2015-02-11 15:25:22 -0800307void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
Naoya Horiguchi30dad302013-06-12 14:05:04 -0700308 spinlock_t *ptl)
Christoph Lameter06972122006-06-23 02:03:35 -0700309{
Naoya Horiguchi30dad302013-06-12 14:05:04 -0700310 pte_t pte;
Christoph Lameter06972122006-06-23 02:03:35 -0700311 swp_entry_t entry;
312 struct page *page;
313
Naoya Horiguchi30dad302013-06-12 14:05:04 -0700314 spin_lock(ptl);
Christoph Lameter06972122006-06-23 02:03:35 -0700315 pte = *ptep;
316 if (!is_swap_pte(pte))
317 goto out;
318
319 entry = pte_to_swp_entry(pte);
320 if (!is_migration_entry(entry))
321 goto out;
322
323 page = migration_entry_to_page(entry);
324
Nick Piggine2867812008-07-25 19:45:30 -0700325 /*
326 * Once radix-tree replacement of page migration started, page_count
327 * *must* be zero. And, we don't want to call wait_on_page_locked()
328 * against a page without get_page().
329 * So, we use get_page_unless_zero(), here. Even failed, page fault
330 * will occur again.
331 */
332 if (!get_page_unless_zero(page))
333 goto out;
Christoph Lameter06972122006-06-23 02:03:35 -0700334 pte_unmap_unlock(ptep, ptl);
335 wait_on_page_locked(page);
336 put_page(page);
337 return;
338out:
339 pte_unmap_unlock(ptep, ptl);
340}
341
Naoya Horiguchi30dad302013-06-12 14:05:04 -0700342void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
343 unsigned long address)
344{
345 spinlock_t *ptl = pte_lockptr(mm, pmd);
346 pte_t *ptep = pte_offset_map(pmd, address);
347 __migration_entry_wait(mm, ptep, ptl);
348}
349
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -0800350void migration_entry_wait_huge(struct vm_area_struct *vma,
351 struct mm_struct *mm, pte_t *pte)
Naoya Horiguchi30dad302013-06-12 14:05:04 -0700352{
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -0800353 spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), mm, pte);
Naoya Horiguchi30dad302013-06-12 14:05:04 -0700354 __migration_entry_wait(mm, pte, ptl);
355}
356
Zi Yan616b8372017-09-08 16:10:57 -0700357#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
358void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
359{
360 spinlock_t *ptl;
361 struct page *page;
362
363 ptl = pmd_lock(mm, pmd);
364 if (!is_pmd_migration_entry(*pmd))
365 goto unlock;
366 page = migration_entry_to_page(pmd_to_swp_entry(*pmd));
367 if (!get_page_unless_zero(page))
368 goto unlock;
369 spin_unlock(ptl);
370 wait_on_page_locked(page);
371 put_page(page);
372 return;
373unlock:
374 spin_unlock(ptl);
375}
376#endif
377
Mel Gormanb969c4ab2012-01-12 17:19:34 -0800378#ifdef CONFIG_BLOCK
379/* Returns true if all buffers are successfully locked */
Mel Gormana6bc32b2012-01-12 17:19:43 -0800380static bool buffer_migrate_lock_buffers(struct buffer_head *head,
381 enum migrate_mode mode)
Mel Gormanb969c4ab2012-01-12 17:19:34 -0800382{
383 struct buffer_head *bh = head;
384
385 /* Simple case, sync compaction */
Mel Gormana6bc32b2012-01-12 17:19:43 -0800386 if (mode != MIGRATE_ASYNC) {
Mel Gormanb969c4ab2012-01-12 17:19:34 -0800387 do {
388 get_bh(bh);
389 lock_buffer(bh);
390 bh = bh->b_this_page;
391
392 } while (bh != head);
393
394 return true;
395 }
396
397 /* async case, we cannot block on lock_buffer so use trylock_buffer */
398 do {
399 get_bh(bh);
400 if (!trylock_buffer(bh)) {
401 /*
402 * We failed to lock the buffer and cannot stall in
403 * async migration. Release the taken locks
404 */
405 struct buffer_head *failed_bh = bh;
406 put_bh(failed_bh);
407 bh = head;
408 while (bh != failed_bh) {
409 unlock_buffer(bh);
410 put_bh(bh);
411 bh = bh->b_this_page;
412 }
413 return false;
414 }
415
416 bh = bh->b_this_page;
417 } while (bh != head);
418 return true;
419}
420#else
421static inline bool buffer_migrate_lock_buffers(struct buffer_head *head,
Mel Gormana6bc32b2012-01-12 17:19:43 -0800422 enum migrate_mode mode)
Mel Gormanb969c4ab2012-01-12 17:19:34 -0800423{
424 return true;
425}
426#endif /* CONFIG_BLOCK */
427
Christoph Lameterb20a3502006-03-22 00:09:12 -0800428/*
Christoph Lameterc3fcf8a2006-06-23 02:03:32 -0700429 * Replace the page in the mapping.
Christoph Lameter5b5c7122006-06-23 02:03:29 -0700430 *
431 * The number of remaining references must be:
432 * 1 for anonymous pages without a mapping
433 * 2 for pages with a mapping
David Howells266cf652009-04-03 16:42:36 +0100434 * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
Christoph Lameterb20a3502006-03-22 00:09:12 -0800435 */
Gu Zheng36bc08c2013-07-16 17:56:16 +0800436int migrate_page_move_mapping(struct address_space *mapping,
Mel Gormanb969c4ab2012-01-12 17:19:34 -0800437 struct page *newpage, struct page *page,
Benjamin LaHaise8e321fe2013-12-21 17:56:08 -0500438 struct buffer_head *head, enum migrate_mode mode,
439 int extra_count)
Christoph Lameterb20a3502006-03-22 00:09:12 -0800440{
Hugh Dickins42cb14b2015-11-05 18:50:05 -0800441 struct zone *oldzone, *newzone;
442 int dirty;
Benjamin LaHaise8e321fe2013-12-21 17:56:08 -0500443 int expected_count = 1 + extra_count;
Nick Piggin7cf9c2c2006-12-06 20:33:44 -0800444 void **pslot;
Christoph Lameterb20a3502006-03-22 00:09:12 -0800445
Jérôme Glisse8763cb42017-09-08 16:12:09 -0700446 /*
Jérôme Glissedf6ad692017-09-08 16:12:24 -0700447 * Device public or private pages have an extra refcount as they are
448 * ZONE_DEVICE pages.
Jérôme Glisse8763cb42017-09-08 16:12:09 -0700449 */
Jérôme Glissedf6ad692017-09-08 16:12:24 -0700450 expected_count += is_device_private_page(page);
451 expected_count += is_device_public_page(page);
Jérôme Glisse8763cb42017-09-08 16:12:09 -0700452
Christoph Lameter6c5240a2006-06-23 02:03:37 -0700453 if (!mapping) {
Christoph Lameter0e8c7d02007-04-23 14:41:09 -0700454 /* Anonymous page without mapping */
Benjamin LaHaise8e321fe2013-12-21 17:56:08 -0500455 if (page_count(page) != expected_count)
Christoph Lameter6c5240a2006-06-23 02:03:37 -0700456 return -EAGAIN;
Hugh Dickinscf4b7692015-11-05 18:50:02 -0800457
458 /* No turning back from here */
Hugh Dickinscf4b7692015-11-05 18:50:02 -0800459 newpage->index = page->index;
460 newpage->mapping = page->mapping;
461 if (PageSwapBacked(page))
Hugh Dickinsfa9949d2016-05-19 17:12:41 -0700462 __SetPageSwapBacked(newpage);
Hugh Dickinscf4b7692015-11-05 18:50:02 -0800463
Rafael Aquini78bd5202012-12-11 16:02:31 -0800464 return MIGRATEPAGE_SUCCESS;
Christoph Lameter6c5240a2006-06-23 02:03:37 -0700465 }
466
Hugh Dickins42cb14b2015-11-05 18:50:05 -0800467 oldzone = page_zone(page);
468 newzone = page_zone(newpage);
469
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700470 xa_lock_irq(&mapping->i_pages);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800471
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700472 pslot = radix_tree_lookup_slot(&mapping->i_pages,
Nick Piggin7cf9c2c2006-12-06 20:33:44 -0800473 page_index(page));
Christoph Lameterb20a3502006-03-22 00:09:12 -0800474
Naoya Horiguchie71769a2018-04-20 14:55:45 -0700475 expected_count += hpage_nr_pages(page) + page_has_private(page);
Nick Piggine2867812008-07-25 19:45:30 -0700476 if (page_count(page) != expected_count ||
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700477 radix_tree_deref_slot_protected(pslot,
478 &mapping->i_pages.xa_lock) != page) {
479 xa_unlock_irq(&mapping->i_pages);
Christoph Lametere23ca002006-04-10 22:52:57 -0700480 return -EAGAIN;
Christoph Lameterb20a3502006-03-22 00:09:12 -0800481 }
482
Joonsoo Kimfe896d12016-03-17 14:19:26 -0700483 if (!page_ref_freeze(page, expected_count)) {
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700484 xa_unlock_irq(&mapping->i_pages);
Nick Piggine2867812008-07-25 19:45:30 -0700485 return -EAGAIN;
486 }
487
Christoph Lameterb20a3502006-03-22 00:09:12 -0800488 /*
Mel Gormanb969c4ab2012-01-12 17:19:34 -0800489 * In the async migration case of moving a page with buffers, lock the
490 * buffers using trylock before the mapping is moved. If the mapping
491 * was moved, we later failed to lock the buffers and could not move
492 * the mapping back due to an elevated page count, we would have to
493 * block waiting on other references to be dropped.
494 */
Mel Gormana6bc32b2012-01-12 17:19:43 -0800495 if (mode == MIGRATE_ASYNC && head &&
496 !buffer_migrate_lock_buffers(head, mode)) {
Joonsoo Kimfe896d12016-03-17 14:19:26 -0700497 page_ref_unfreeze(page, expected_count);
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700498 xa_unlock_irq(&mapping->i_pages);
Mel Gormanb969c4ab2012-01-12 17:19:34 -0800499 return -EAGAIN;
500 }
501
502 /*
Hugh Dickinscf4b7692015-11-05 18:50:02 -0800503 * Now we know that no one else is looking at the page:
504 * no turning back from here.
Christoph Lameterb20a3502006-03-22 00:09:12 -0800505 */
Hugh Dickinscf4b7692015-11-05 18:50:02 -0800506 newpage->index = page->index;
507 newpage->mapping = page->mapping;
Naoya Horiguchie71769a2018-04-20 14:55:45 -0700508 page_ref_add(newpage, hpage_nr_pages(page)); /* add cache reference */
Nicholas Piggin6326fec2016-12-25 13:00:29 +1000509 if (PageSwapBacked(page)) {
510 __SetPageSwapBacked(newpage);
511 if (PageSwapCache(page)) {
512 SetPageSwapCache(newpage);
513 set_page_private(newpage, page_private(page));
514 }
515 } else {
516 VM_BUG_ON_PAGE(PageSwapCache(page), page);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800517 }
518
Hugh Dickins42cb14b2015-11-05 18:50:05 -0800519 /* Move dirty while page refs frozen and newpage not yet exposed */
520 dirty = PageDirty(page);
521 if (dirty) {
522 ClearPageDirty(page);
523 SetPageDirty(newpage);
524 }
525
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700526 radix_tree_replace_slot(&mapping->i_pages, pslot, newpage);
Naoya Horiguchie71769a2018-04-20 14:55:45 -0700527 if (PageTransHuge(page)) {
528 int i;
529 int index = page_index(page);
530
Naoya Horiguchi013567b2018-05-11 16:02:00 -0700531 for (i = 1; i < HPAGE_PMD_NR; i++) {
Naoya Horiguchie71769a2018-04-20 14:55:45 -0700532 pslot = radix_tree_lookup_slot(&mapping->i_pages,
533 index + i);
534 radix_tree_replace_slot(&mapping->i_pages, pslot,
535 newpage + i);
536 }
Naoya Horiguchie71769a2018-04-20 14:55:45 -0700537 }
Nick Piggin7cf9c2c2006-12-06 20:33:44 -0800538
539 /*
Jacobo Giralt937a94c2012-01-10 15:07:11 -0800540 * Drop cache reference from old page by unfreezing
541 * to one less reference.
Nick Piggin7cf9c2c2006-12-06 20:33:44 -0800542 * We know this isn't the last reference.
543 */
Naoya Horiguchie71769a2018-04-20 14:55:45 -0700544 page_ref_unfreeze(page, expected_count - hpage_nr_pages(page));
Nick Piggin7cf9c2c2006-12-06 20:33:44 -0800545
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700546 xa_unlock(&mapping->i_pages);
Hugh Dickins42cb14b2015-11-05 18:50:05 -0800547 /* Leave irq disabled to prevent preemption while updating stats */
548
Christoph Lameter0e8c7d02007-04-23 14:41:09 -0700549 /*
550 * If moved to a different zone then also account
551 * the page for that zone. Other VM counters will be
552 * taken care of when we establish references to the
553 * new page and drop references to the old page.
554 *
555 * Note that anonymous pages are accounted for
Mel Gorman4b9d0fa2016-07-28 15:46:17 -0700556 * via NR_FILE_PAGES and NR_ANON_MAPPED if they
Christoph Lameter0e8c7d02007-04-23 14:41:09 -0700557 * are mapped to swap space.
558 */
Hugh Dickins42cb14b2015-11-05 18:50:05 -0800559 if (newzone != oldzone) {
Mel Gorman11fb9982016-07-28 15:46:20 -0700560 __dec_node_state(oldzone->zone_pgdat, NR_FILE_PAGES);
561 __inc_node_state(newzone->zone_pgdat, NR_FILE_PAGES);
Hugh Dickins42cb14b2015-11-05 18:50:05 -0800562 if (PageSwapBacked(page) && !PageSwapCache(page)) {
Mel Gorman11fb9982016-07-28 15:46:20 -0700563 __dec_node_state(oldzone->zone_pgdat, NR_SHMEM);
564 __inc_node_state(newzone->zone_pgdat, NR_SHMEM);
Hugh Dickins42cb14b2015-11-05 18:50:05 -0800565 }
566 if (dirty && mapping_cap_account_dirty(mapping)) {
Mel Gorman11fb9982016-07-28 15:46:20 -0700567 __dec_node_state(oldzone->zone_pgdat, NR_FILE_DIRTY);
Mel Gorman5a1c84b2016-07-28 15:47:31 -0700568 __dec_zone_state(oldzone, NR_ZONE_WRITE_PENDING);
Mel Gorman11fb9982016-07-28 15:46:20 -0700569 __inc_node_state(newzone->zone_pgdat, NR_FILE_DIRTY);
Mel Gorman5a1c84b2016-07-28 15:47:31 -0700570 __inc_zone_state(newzone, NR_ZONE_WRITE_PENDING);
Hugh Dickins42cb14b2015-11-05 18:50:05 -0800571 }
KOSAKI Motohiro4b021082009-09-21 17:01:33 -0700572 }
Hugh Dickins42cb14b2015-11-05 18:50:05 -0800573 local_irq_enable();
Christoph Lameterb20a3502006-03-22 00:09:12 -0800574
Rafael Aquini78bd5202012-12-11 16:02:31 -0800575 return MIGRATEPAGE_SUCCESS;
Christoph Lameterb20a3502006-03-22 00:09:12 -0800576}
Richard Weinberger1118dce2016-06-16 23:26:14 +0200577EXPORT_SYMBOL(migrate_page_move_mapping);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800578
579/*
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900580 * The expected number of remaining references is the same as that
581 * of migrate_page_move_mapping().
582 */
583int migrate_huge_page_move_mapping(struct address_space *mapping,
584 struct page *newpage, struct page *page)
585{
586 int expected_count;
587 void **pslot;
588
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700589 xa_lock_irq(&mapping->i_pages);
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900590
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700591 pslot = radix_tree_lookup_slot(&mapping->i_pages, page_index(page));
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900592
593 expected_count = 2 + page_has_private(page);
594 if (page_count(page) != expected_count ||
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700595 radix_tree_deref_slot_protected(pslot, &mapping->i_pages.xa_lock) != page) {
596 xa_unlock_irq(&mapping->i_pages);
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900597 return -EAGAIN;
598 }
599
Joonsoo Kimfe896d12016-03-17 14:19:26 -0700600 if (!page_ref_freeze(page, expected_count)) {
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700601 xa_unlock_irq(&mapping->i_pages);
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900602 return -EAGAIN;
603 }
604
Hugh Dickinscf4b7692015-11-05 18:50:02 -0800605 newpage->index = page->index;
606 newpage->mapping = page->mapping;
Johannes Weiner6a93ca82016-03-15 14:57:19 -0700607
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900608 get_page(newpage);
609
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700610 radix_tree_replace_slot(&mapping->i_pages, pslot, newpage);
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900611
Joonsoo Kimfe896d12016-03-17 14:19:26 -0700612 page_ref_unfreeze(page, expected_count - 1);
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900613
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700614 xa_unlock_irq(&mapping->i_pages);
Johannes Weiner6a93ca82016-03-15 14:57:19 -0700615
Rafael Aquini78bd5202012-12-11 16:02:31 -0800616 return MIGRATEPAGE_SUCCESS;
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900617}
618
619/*
Dave Hansen30b0a102013-11-21 14:31:58 -0800620 * Gigantic pages are so large that we do not guarantee that page++ pointer
621 * arithmetic will work across the entire page. We need something more
622 * specialized.
623 */
624static void __copy_gigantic_page(struct page *dst, struct page *src,
625 int nr_pages)
626{
627 int i;
628 struct page *dst_base = dst;
629 struct page *src_base = src;
630
631 for (i = 0; i < nr_pages; ) {
632 cond_resched();
633 copy_highpage(dst, src);
634
635 i++;
636 dst = mem_map_next(dst, dst_base, i);
637 src = mem_map_next(src, src_base, i);
638 }
639}
640
641static void copy_huge_page(struct page *dst, struct page *src)
642{
643 int i;
644 int nr_pages;
645
646 if (PageHuge(src)) {
647 /* hugetlbfs page */
648 struct hstate *h = page_hstate(src);
649 nr_pages = pages_per_huge_page(h);
650
651 if (unlikely(nr_pages > MAX_ORDER_NR_PAGES)) {
652 __copy_gigantic_page(dst, src, nr_pages);
653 return;
654 }
655 } else {
656 /* thp page */
657 BUG_ON(!PageTransHuge(src));
658 nr_pages = hpage_nr_pages(src);
659 }
660
661 for (i = 0; i < nr_pages; i++) {
662 cond_resched();
663 copy_highpage(dst + i, src + i);
664 }
665}
666
667/*
Christoph Lameterb20a3502006-03-22 00:09:12 -0800668 * Copy the page to its new location
669 */
Jérôme Glisse2916ecc2017-09-08 16:12:06 -0700670void migrate_page_states(struct page *newpage, struct page *page)
Christoph Lameterb20a3502006-03-22 00:09:12 -0800671{
Rik van Riel7851a452013-10-07 11:29:23 +0100672 int cpupid;
673
Christoph Lameterb20a3502006-03-22 00:09:12 -0800674 if (PageError(page))
675 SetPageError(newpage);
676 if (PageReferenced(page))
677 SetPageReferenced(newpage);
678 if (PageUptodate(page))
679 SetPageUptodate(newpage);
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700680 if (TestClearPageActive(page)) {
Sasha Levin309381fea2014-01-23 15:52:54 -0800681 VM_BUG_ON_PAGE(PageUnevictable(page), page);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800682 SetPageActive(newpage);
Lee Schermerhorn418b27e2009-12-14 17:59:54 -0800683 } else if (TestClearPageUnevictable(page))
684 SetPageUnevictable(newpage);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800685 if (PageChecked(page))
686 SetPageChecked(newpage);
687 if (PageMappedToDisk(page))
688 SetPageMappedToDisk(newpage);
689
Hugh Dickins42cb14b2015-11-05 18:50:05 -0800690 /* Move dirty on pages not done by migrate_page_move_mapping() */
691 if (PageDirty(page))
692 SetPageDirty(newpage);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800693
Vladimir Davydov33c3fc72015-09-09 15:35:45 -0700694 if (page_is_young(page))
695 set_page_young(newpage);
696 if (page_is_idle(page))
697 set_page_idle(newpage);
698
Rik van Riel7851a452013-10-07 11:29:23 +0100699 /*
700 * Copy NUMA information to the new page, to prevent over-eager
701 * future migrations of this same page.
702 */
703 cpupid = page_cpupid_xchg_last(page, -1);
704 page_cpupid_xchg_last(newpage, cpupid);
705
Hugh Dickinse9995ef2009-12-14 17:59:31 -0800706 ksm_migrate_page(newpage, page);
Hugh Dickinsc8d65532013-02-22 16:35:10 -0800707 /*
708 * Please do not reorder this without considering how mm/ksm.c's
709 * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache().
710 */
Naoya Horiguchib3b3a992015-04-15 16:13:15 -0700711 if (PageSwapCache(page))
712 ClearPageSwapCache(page);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800713 ClearPagePrivate(page);
714 set_page_private(page, 0);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800715
716 /*
717 * If any waiters have accumulated on the new page then
718 * wake them up.
719 */
720 if (PageWriteback(newpage))
721 end_page_writeback(newpage);
Vlastimil Babkad435edc2016-03-15 14:56:15 -0700722
723 copy_page_owner(page, newpage);
Johannes Weiner74485cf2016-03-15 14:57:54 -0700724
725 mem_cgroup_migrate(page, newpage);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800726}
Jérôme Glisse2916ecc2017-09-08 16:12:06 -0700727EXPORT_SYMBOL(migrate_page_states);
728
729void migrate_page_copy(struct page *newpage, struct page *page)
730{
731 if (PageHuge(page) || PageTransHuge(page))
732 copy_huge_page(newpage, page);
733 else
734 copy_highpage(newpage, page);
735
736 migrate_page_states(newpage, page);
737}
Richard Weinberger1118dce2016-06-16 23:26:14 +0200738EXPORT_SYMBOL(migrate_page_copy);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800739
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700740/************************************************************
741 * Migration functions
742 ***********************************************************/
743
Christoph Lameterb20a3502006-03-22 00:09:12 -0800744/*
Minchan Kimbda807d2016-07-26 15:23:05 -0700745 * Common logic to directly migrate a single LRU page suitable for
David Howells266cf652009-04-03 16:42:36 +0100746 * pages that do not use PagePrivate/PagePrivate2.
Christoph Lameterb20a3502006-03-22 00:09:12 -0800747 *
748 * Pages are locked upon entry and exit.
749 */
Christoph Lameter2d1db3b2006-06-23 02:03:33 -0700750int migrate_page(struct address_space *mapping,
Mel Gormana6bc32b2012-01-12 17:19:43 -0800751 struct page *newpage, struct page *page,
752 enum migrate_mode mode)
Christoph Lameterb20a3502006-03-22 00:09:12 -0800753{
754 int rc;
755
756 BUG_ON(PageWriteback(page)); /* Writeback must be complete */
757
Benjamin LaHaise8e321fe2013-12-21 17:56:08 -0500758 rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800759
Rafael Aquini78bd5202012-12-11 16:02:31 -0800760 if (rc != MIGRATEPAGE_SUCCESS)
Christoph Lameterb20a3502006-03-22 00:09:12 -0800761 return rc;
762
Jérôme Glisse2916ecc2017-09-08 16:12:06 -0700763 if (mode != MIGRATE_SYNC_NO_COPY)
764 migrate_page_copy(newpage, page);
765 else
766 migrate_page_states(newpage, page);
Rafael Aquini78bd5202012-12-11 16:02:31 -0800767 return MIGRATEPAGE_SUCCESS;
Christoph Lameterb20a3502006-03-22 00:09:12 -0800768}
769EXPORT_SYMBOL(migrate_page);
770
David Howells93614012006-09-30 20:45:40 +0200771#ifdef CONFIG_BLOCK
Christoph Lameterb20a3502006-03-22 00:09:12 -0800772/*
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700773 * Migration function for pages with buffers. This function can only be used
774 * if the underlying filesystem guarantees that no other references to "page"
775 * exist.
776 */
Christoph Lameter2d1db3b2006-06-23 02:03:33 -0700777int buffer_migrate_page(struct address_space *mapping,
Mel Gormana6bc32b2012-01-12 17:19:43 -0800778 struct page *newpage, struct page *page, enum migrate_mode mode)
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700779{
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700780 struct buffer_head *bh, *head;
781 int rc;
782
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700783 if (!page_has_buffers(page))
Mel Gormana6bc32b2012-01-12 17:19:43 -0800784 return migrate_page(mapping, newpage, page, mode);
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700785
786 head = page_buffers(page);
787
Benjamin LaHaise8e321fe2013-12-21 17:56:08 -0500788 rc = migrate_page_move_mapping(mapping, newpage, page, head, mode, 0);
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700789
Rafael Aquini78bd5202012-12-11 16:02:31 -0800790 if (rc != MIGRATEPAGE_SUCCESS)
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700791 return rc;
792
Mel Gormanb969c4ab2012-01-12 17:19:34 -0800793 /*
794 * In the async case, migrate_page_move_mapping locked the buffers
795 * with an IRQ-safe spinlock held. In the sync case, the buffers
796 * need to be locked now
797 */
Mel Gormana6bc32b2012-01-12 17:19:43 -0800798 if (mode != MIGRATE_ASYNC)
799 BUG_ON(!buffer_migrate_lock_buffers(head, mode));
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700800
801 ClearPagePrivate(page);
802 set_page_private(newpage, page_private(page));
803 set_page_private(page, 0);
804 put_page(page);
805 get_page(newpage);
806
807 bh = head;
808 do {
809 set_bh_page(bh, newpage, bh_offset(bh));
810 bh = bh->b_this_page;
811
812 } while (bh != head);
813
814 SetPagePrivate(newpage);
815
Jérôme Glisse2916ecc2017-09-08 16:12:06 -0700816 if (mode != MIGRATE_SYNC_NO_COPY)
817 migrate_page_copy(newpage, page);
818 else
819 migrate_page_states(newpage, page);
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700820
821 bh = head;
822 do {
823 unlock_buffer(bh);
Jérôme Glisse2916ecc2017-09-08 16:12:06 -0700824 put_bh(bh);
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700825 bh = bh->b_this_page;
826
827 } while (bh != head);
828
Rafael Aquini78bd5202012-12-11 16:02:31 -0800829 return MIGRATEPAGE_SUCCESS;
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700830}
831EXPORT_SYMBOL(buffer_migrate_page);
David Howells93614012006-09-30 20:45:40 +0200832#endif
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700833
Christoph Lameter04e62a22006-06-23 02:03:38 -0700834/*
835 * Writeback a page to clean the dirty state
836 */
837static int writeout(struct address_space *mapping, struct page *page)
838{
839 struct writeback_control wbc = {
840 .sync_mode = WB_SYNC_NONE,
841 .nr_to_write = 1,
842 .range_start = 0,
843 .range_end = LLONG_MAX,
Christoph Lameter04e62a22006-06-23 02:03:38 -0700844 .for_reclaim = 1
845 };
846 int rc;
847
848 if (!mapping->a_ops->writepage)
849 /* No write method for the address space */
850 return -EINVAL;
851
852 if (!clear_page_dirty_for_io(page))
853 /* Someone else already triggered a write */
854 return -EAGAIN;
855
856 /*
857 * A dirty page may imply that the underlying filesystem has
858 * the page on some queue. So the page must be clean for
859 * migration. Writeout may mean we loose the lock and the
860 * page state is no longer what we checked for earlier.
861 * At this point we know that the migration attempt cannot
862 * be successful.
863 */
Kirill A. Shutemove3884662016-03-17 14:20:07 -0700864 remove_migration_ptes(page, page, false);
Christoph Lameter04e62a22006-06-23 02:03:38 -0700865
866 rc = mapping->a_ops->writepage(page, &wbc);
Christoph Lameter04e62a22006-06-23 02:03:38 -0700867
868 if (rc != AOP_WRITEPAGE_ACTIVATE)
869 /* unlocked. Relock */
870 lock_page(page);
871
Hugh Dickinsbda85502008-11-19 15:36:36 -0800872 return (rc < 0) ? -EIO : -EAGAIN;
Christoph Lameter04e62a22006-06-23 02:03:38 -0700873}
874
875/*
876 * Default handling if a filesystem does not provide a migration function.
877 */
Christoph Lameter8351a6e2006-06-23 02:03:33 -0700878static int fallback_migrate_page(struct address_space *mapping,
Mel Gormana6bc32b2012-01-12 17:19:43 -0800879 struct page *newpage, struct page *page, enum migrate_mode mode)
Christoph Lameter8351a6e2006-06-23 02:03:33 -0700880{
Mel Gormanb969c4ab2012-01-12 17:19:34 -0800881 if (PageDirty(page)) {
Mel Gormana6bc32b2012-01-12 17:19:43 -0800882 /* Only writeback pages in full synchronous migration */
Jérôme Glisse2916ecc2017-09-08 16:12:06 -0700883 switch (mode) {
884 case MIGRATE_SYNC:
885 case MIGRATE_SYNC_NO_COPY:
886 break;
887 default:
Mel Gormanb969c4ab2012-01-12 17:19:34 -0800888 return -EBUSY;
Jérôme Glisse2916ecc2017-09-08 16:12:06 -0700889 }
Christoph Lameter04e62a22006-06-23 02:03:38 -0700890 return writeout(mapping, page);
Mel Gormanb969c4ab2012-01-12 17:19:34 -0800891 }
Christoph Lameter8351a6e2006-06-23 02:03:33 -0700892
893 /*
894 * Buffers may be managed in a filesystem specific way.
895 * We must have no buffers or drop them.
896 */
David Howells266cf652009-04-03 16:42:36 +0100897 if (page_has_private(page) &&
Christoph Lameter8351a6e2006-06-23 02:03:33 -0700898 !try_to_release_page(page, GFP_KERNEL))
899 return -EAGAIN;
900
Mel Gormana6bc32b2012-01-12 17:19:43 -0800901 return migrate_page(mapping, newpage, page, mode);
Christoph Lameter8351a6e2006-06-23 02:03:33 -0700902}
903
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700904/*
Christoph Lametere24f0b82006-06-23 02:03:51 -0700905 * Move a page to a newly allocated page
906 * The page is locked and all ptes have been successfully removed.
907 *
908 * The new page will have replaced the old page if this function
909 * is successful.
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700910 *
911 * Return value:
912 * < 0 - error code
Rafael Aquini78bd5202012-12-11 16:02:31 -0800913 * MIGRATEPAGE_SUCCESS - success
Christoph Lametere24f0b82006-06-23 02:03:51 -0700914 */
Mel Gorman3fe20112010-05-24 14:32:20 -0700915static int move_to_new_page(struct page *newpage, struct page *page,
Hugh Dickins5c3f9a62015-11-05 18:49:53 -0800916 enum migrate_mode mode)
Christoph Lametere24f0b82006-06-23 02:03:51 -0700917{
918 struct address_space *mapping;
Minchan Kimbda807d2016-07-26 15:23:05 -0700919 int rc = -EAGAIN;
920 bool is_lru = !__PageMovable(page);
Christoph Lametere24f0b82006-06-23 02:03:51 -0700921
Hugh Dickins7db76712015-11-05 18:49:49 -0800922 VM_BUG_ON_PAGE(!PageLocked(page), page);
923 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
Christoph Lametere24f0b82006-06-23 02:03:51 -0700924
Christoph Lametere24f0b82006-06-23 02:03:51 -0700925 mapping = page_mapping(page);
Minchan Kimbda807d2016-07-26 15:23:05 -0700926
927 if (likely(is_lru)) {
928 if (!mapping)
929 rc = migrate_page(mapping, newpage, page, mode);
930 else if (mapping->a_ops->migratepage)
931 /*
932 * Most pages have a mapping and most filesystems
933 * provide a migratepage callback. Anonymous pages
934 * are part of swap space which also has its own
935 * migratepage callback. This is the most common path
936 * for page migration.
937 */
938 rc = mapping->a_ops->migratepage(mapping, newpage,
939 page, mode);
940 else
941 rc = fallback_migrate_page(mapping, newpage,
942 page, mode);
943 } else {
Christoph Lametere24f0b82006-06-23 02:03:51 -0700944 /*
Minchan Kimbda807d2016-07-26 15:23:05 -0700945 * In case of non-lru page, it could be released after
946 * isolation step. In that case, we shouldn't try migration.
Christoph Lametere24f0b82006-06-23 02:03:51 -0700947 */
Minchan Kimbda807d2016-07-26 15:23:05 -0700948 VM_BUG_ON_PAGE(!PageIsolated(page), page);
949 if (!PageMovable(page)) {
950 rc = MIGRATEPAGE_SUCCESS;
951 __ClearPageIsolated(page);
952 goto out;
953 }
954
955 rc = mapping->a_ops->migratepage(mapping, newpage,
956 page, mode);
957 WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS &&
958 !PageIsolated(page));
959 }
Christoph Lametere24f0b82006-06-23 02:03:51 -0700960
Hugh Dickins5c3f9a62015-11-05 18:49:53 -0800961 /*
962 * When successful, old pagecache page->mapping must be cleared before
963 * page is freed; but stats require that PageAnon be left as PageAnon.
964 */
965 if (rc == MIGRATEPAGE_SUCCESS) {
Minchan Kimbda807d2016-07-26 15:23:05 -0700966 if (__PageMovable(page)) {
967 VM_BUG_ON_PAGE(!PageIsolated(page), page);
968
969 /*
970 * We clear PG_movable under page_lock so any compactor
971 * cannot try to migrate this page.
972 */
973 __ClearPageIsolated(page);
974 }
975
976 /*
977 * Anonymous and movable page->mapping will be cleard by
978 * free_pages_prepare so don't reset it here for keeping
979 * the type to work PageAnon, for example.
980 */
981 if (!PageMappingFlags(page))
Hugh Dickins5c3f9a62015-11-05 18:49:53 -0800982 page->mapping = NULL;
Mel Gorman3fe20112010-05-24 14:32:20 -0700983 }
Minchan Kimbda807d2016-07-26 15:23:05 -0700984out:
Christoph Lametere24f0b82006-06-23 02:03:51 -0700985 return rc;
986}
987
Minchan Kim0dabec92011-10-31 17:06:57 -0700988static int __unmap_and_move(struct page *page, struct page *newpage,
Hugh Dickins9c620e22013-02-22 16:35:14 -0800989 int force, enum migrate_mode mode)
Christoph Lametere24f0b82006-06-23 02:03:51 -0700990{
Minchan Kim0dabec92011-10-31 17:06:57 -0700991 int rc = -EAGAIN;
Hugh Dickins2ebba6b2014-12-12 16:56:19 -0800992 int page_was_mapped = 0;
Mel Gorman3f6c8272010-05-24 14:32:17 -0700993 struct anon_vma *anon_vma = NULL;
Minchan Kimbda807d2016-07-26 15:23:05 -0700994 bool is_lru = !__PageMovable(page);
Christoph Lameter95a402c2006-06-23 02:03:53 -0700995
Nick Piggin529ae9a2008-08-02 12:01:03 +0200996 if (!trylock_page(page)) {
Mel Gormana6bc32b2012-01-12 17:19:43 -0800997 if (!force || mode == MIGRATE_ASYNC)
Minchan Kim0dabec92011-10-31 17:06:57 -0700998 goto out;
Mel Gorman3e7d3442011-01-13 15:45:56 -0800999
1000 /*
1001 * It's not safe for direct compaction to call lock_page.
1002 * For example, during page readahead pages are added locked
1003 * to the LRU. Later, when the IO completes the pages are
1004 * marked uptodate and unlocked. However, the queueing
1005 * could be merging multiple pages for one bio (e.g.
1006 * mpage_readpages). If an allocation happens for the
1007 * second or third page, the process can end up locking
1008 * the same page twice and deadlocking. Rather than
1009 * trying to be clever about what pages can be locked,
1010 * avoid the use of lock_page for direct compaction
1011 * altogether.
1012 */
1013 if (current->flags & PF_MEMALLOC)
Minchan Kim0dabec92011-10-31 17:06:57 -07001014 goto out;
Mel Gorman3e7d3442011-01-13 15:45:56 -08001015
Christoph Lametere24f0b82006-06-23 02:03:51 -07001016 lock_page(page);
1017 }
1018
1019 if (PageWriteback(page)) {
Andrea Arcangeli11bc82d2011-03-22 16:33:11 -07001020 /*
Jianguo Wufed5b642013-04-29 15:07:58 -07001021 * Only in the case of a full synchronous migration is it
Mel Gormana6bc32b2012-01-12 17:19:43 -08001022 * necessary to wait for PageWriteback. In the async case,
1023 * the retry loop is too short and in the sync-light case,
1024 * the overhead of stalling is too much
Andrea Arcangeli11bc82d2011-03-22 16:33:11 -07001025 */
Jérôme Glisse2916ecc2017-09-08 16:12:06 -07001026 switch (mode) {
1027 case MIGRATE_SYNC:
1028 case MIGRATE_SYNC_NO_COPY:
1029 break;
1030 default:
Andrea Arcangeli11bc82d2011-03-22 16:33:11 -07001031 rc = -EBUSY;
Johannes Weiner0a31bc92014-08-08 14:19:22 -07001032 goto out_unlock;
Andrea Arcangeli11bc82d2011-03-22 16:33:11 -07001033 }
1034 if (!force)
Johannes Weiner0a31bc92014-08-08 14:19:22 -07001035 goto out_unlock;
Christoph Lametere24f0b82006-06-23 02:03:51 -07001036 wait_on_page_writeback(page);
1037 }
Hugh Dickins03f15c82015-11-05 18:49:56 -08001038
Christoph Lametere24f0b82006-06-23 02:03:51 -07001039 /*
KAMEZAWA Hiroyukidc386d42007-07-26 10:41:07 -07001040 * By try_to_unmap(), page->mapcount goes down to 0 here. In this case,
1041 * we cannot notice that anon_vma is freed while we migrates a page.
Hugh Dickins1ce82b62011-01-13 15:47:30 -08001042 * This get_anon_vma() delays freeing anon_vma pointer until the end
KAMEZAWA Hiroyukidc386d42007-07-26 10:41:07 -07001043 * of migration. File cache pages are no problem because of page_lock()
KAMEZAWA Hiroyuki989f89c2007-08-30 23:56:21 -07001044 * File Caches may use write_page() or lock_page() in migration, then,
1045 * just care Anon page here.
Hugh Dickins03f15c82015-11-05 18:49:56 -08001046 *
1047 * Only page_get_anon_vma() understands the subtleties of
1048 * getting a hold on an anon_vma from outside one of its mms.
1049 * But if we cannot get anon_vma, then we won't need it anyway,
1050 * because that implies that the anon page is no longer mapped
1051 * (and cannot be remapped so long as we hold the page lock).
Christoph Lametere24f0b82006-06-23 02:03:51 -07001052 */
Hugh Dickins03f15c82015-11-05 18:49:56 -08001053 if (PageAnon(page) && !PageKsm(page))
Peter Zijlstra746b18d2011-05-24 17:12:10 -07001054 anon_vma = page_get_anon_vma(page);
Shaohua Li62e1c552008-02-04 22:29:33 -08001055
Hugh Dickins7db76712015-11-05 18:49:49 -08001056 /*
1057 * Block others from accessing the new page when we get around to
1058 * establishing additional references. We are usually the only one
1059 * holding a reference to newpage at this point. We used to have a BUG
1060 * here if trylock_page(newpage) fails, but would like to allow for
1061 * cases where there might be a race with the previous use of newpage.
1062 * This is much like races on refcount of oldpage: just don't BUG().
1063 */
1064 if (unlikely(!trylock_page(newpage)))
1065 goto out_unlock;
1066
Minchan Kimbda807d2016-07-26 15:23:05 -07001067 if (unlikely(!is_lru)) {
1068 rc = move_to_new_page(newpage, page, mode);
1069 goto out_unlock_both;
1070 }
1071
KAMEZAWA Hiroyukidc386d42007-07-26 10:41:07 -07001072 /*
Shaohua Li62e1c552008-02-04 22:29:33 -08001073 * Corner case handling:
1074 * 1. When a new swap-cache page is read into, it is added to the LRU
1075 * and treated as swapcache but it has no rmap yet.
1076 * Calling try_to_unmap() against a page->mapping==NULL page will
1077 * trigger a BUG. So handle it here.
1078 * 2. An orphaned page (see truncate_complete_page) might have
1079 * fs-private metadata. The page can be picked up due to memory
1080 * offlining. Everywhere else except page reclaim, the page is
1081 * invisible to the vm, so the page can not be migrated. So try to
1082 * free the metadata, so the page can be freed.
KAMEZAWA Hiroyukidc386d42007-07-26 10:41:07 -07001083 */
Shaohua Li62e1c552008-02-04 22:29:33 -08001084 if (!page->mapping) {
Sasha Levin309381fea2014-01-23 15:52:54 -08001085 VM_BUG_ON_PAGE(PageAnon(page), page);
Hugh Dickins1ce82b62011-01-13 15:47:30 -08001086 if (page_has_private(page)) {
Shaohua Li62e1c552008-02-04 22:29:33 -08001087 try_to_free_buffers(page);
Hugh Dickins7db76712015-11-05 18:49:49 -08001088 goto out_unlock_both;
Shaohua Li62e1c552008-02-04 22:29:33 -08001089 }
Hugh Dickins7db76712015-11-05 18:49:49 -08001090 } else if (page_mapped(page)) {
1091 /* Establish migration ptes */
Hugh Dickins03f15c82015-11-05 18:49:56 -08001092 VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma,
1093 page);
Hugh Dickins2ebba6b2014-12-12 16:56:19 -08001094 try_to_unmap(page,
Wanpeng Lida1b13c2015-09-08 15:03:27 -07001095 TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
Hugh Dickins2ebba6b2014-12-12 16:56:19 -08001096 page_was_mapped = 1;
1097 }
KAMEZAWA Hiroyukidc386d42007-07-26 10:41:07 -07001098
Christoph Lametere6a15302006-06-25 05:46:49 -07001099 if (!page_mapped(page))
Hugh Dickins5c3f9a62015-11-05 18:49:53 -08001100 rc = move_to_new_page(newpage, page, mode);
Christoph Lametere24f0b82006-06-23 02:03:51 -07001101
Hugh Dickins5c3f9a62015-11-05 18:49:53 -08001102 if (page_was_mapped)
1103 remove_migration_ptes(page,
Kirill A. Shutemove3884662016-03-17 14:20:07 -07001104 rc == MIGRATEPAGE_SUCCESS ? newpage : page, false);
Mel Gorman3f6c8272010-05-24 14:32:17 -07001105
Hugh Dickins7db76712015-11-05 18:49:49 -08001106out_unlock_both:
1107 unlock_page(newpage);
1108out_unlock:
Mel Gorman3f6c8272010-05-24 14:32:17 -07001109 /* Drop an anon_vma reference if we took one */
Rik van Riel76545062010-08-09 17:18:41 -07001110 if (anon_vma)
Peter Zijlstra9e601092011-03-22 16:32:46 -07001111 put_anon_vma(anon_vma);
Christoph Lametere24f0b82006-06-23 02:03:51 -07001112 unlock_page(page);
Minchan Kim0dabec92011-10-31 17:06:57 -07001113out:
Minchan Kimc6c919e2016-07-26 15:23:02 -07001114 /*
1115 * If migration is successful, decrease refcount of the newpage
1116 * which will not free the page because new page owner increased
1117 * refcounter. As well, if it is LRU page, add the page to LRU
1118 * list in here.
1119 */
1120 if (rc == MIGRATEPAGE_SUCCESS) {
Minchan Kimb1123ea62016-07-26 15:23:09 -07001121 if (unlikely(__PageMovable(newpage)))
Minchan Kimc6c919e2016-07-26 15:23:02 -07001122 put_page(newpage);
1123 else
1124 putback_lru_page(newpage);
1125 }
1126
Minchan Kim0dabec92011-10-31 17:06:57 -07001127 return rc;
1128}
Christoph Lameter95a402c2006-06-23 02:03:53 -07001129
Minchan Kim0dabec92011-10-31 17:06:57 -07001130/*
Geert Uytterhoevenef2a5152015-04-14 15:44:22 -07001131 * gcc 4.7 and 4.8 on arm get an ICEs when inlining unmap_and_move(). Work
1132 * around it.
1133 */
1134#if (GCC_VERSION >= 40700 && GCC_VERSION < 40900) && defined(CONFIG_ARM)
1135#define ICE_noinline noinline
1136#else
1137#define ICE_noinline
1138#endif
1139
1140/*
Minchan Kim0dabec92011-10-31 17:06:57 -07001141 * Obtain the lock on page, remove all ptes and migrate the page
1142 * to the newly allocated page in newpage.
1143 */
Geert Uytterhoevenef2a5152015-04-14 15:44:22 -07001144static ICE_noinline int unmap_and_move(new_page_t get_new_page,
1145 free_page_t put_new_page,
1146 unsigned long private, struct page *page,
Naoya Horiguchiadd05ce2015-06-24 16:56:50 -07001147 int force, enum migrate_mode mode,
1148 enum migrate_reason reason)
Minchan Kim0dabec92011-10-31 17:06:57 -07001149{
Hugh Dickins2def7422015-11-05 18:49:46 -08001150 int rc = MIGRATEPAGE_SUCCESS;
Hugh Dickins2def7422015-11-05 18:49:46 -08001151 struct page *newpage;
Minchan Kim0dabec92011-10-31 17:06:57 -07001152
Michal Hocko94723aa2018-04-10 16:30:07 -07001153 if (!thp_migration_supported() && PageTransHuge(page))
1154 return -ENOMEM;
1155
Michal Hocko666feb22018-04-10 16:30:03 -07001156 newpage = get_new_page(page, private);
Minchan Kim0dabec92011-10-31 17:06:57 -07001157 if (!newpage)
1158 return -ENOMEM;
1159
1160 if (page_count(page) == 1) {
1161 /* page was freed from under us. So we are done. */
Minchan Kimc6c919e2016-07-26 15:23:02 -07001162 ClearPageActive(page);
1163 ClearPageUnevictable(page);
Minchan Kimbda807d2016-07-26 15:23:05 -07001164 if (unlikely(__PageMovable(page))) {
1165 lock_page(page);
1166 if (!PageMovable(page))
1167 __ClearPageIsolated(page);
1168 unlock_page(page);
1169 }
Minchan Kimc6c919e2016-07-26 15:23:02 -07001170 if (put_new_page)
1171 put_new_page(newpage, private);
1172 else
1173 put_page(newpage);
Minchan Kim0dabec92011-10-31 17:06:57 -07001174 goto out;
1175 }
1176
Hugh Dickins9c620e22013-02-22 16:35:14 -08001177 rc = __unmap_and_move(page, newpage, force, mode);
Minchan Kimc6c919e2016-07-26 15:23:02 -07001178 if (rc == MIGRATEPAGE_SUCCESS)
Vlastimil Babka7cd12b42016-03-15 14:56:18 -07001179 set_page_owner_migrate_reason(newpage, reason);
Rafael Aquinibf6bddf12012-12-11 16:02:42 -08001180
Minchan Kim0dabec92011-10-31 17:06:57 -07001181out:
Christoph Lametere24f0b82006-06-23 02:03:51 -07001182 if (rc != -EAGAIN) {
Minchan Kim0dabec92011-10-31 17:06:57 -07001183 /*
1184 * A page that has been migrated has all references
1185 * removed and will be freed. A page that has not been
1186 * migrated will have kepts its references and be
1187 * restored.
1188 */
1189 list_del(&page->lru);
Ming Ling6afcf8e2016-12-12 16:42:26 -08001190
1191 /*
1192 * Compaction can migrate also non-LRU pages which are
1193 * not accounted to NR_ISOLATED_*. They can be recognized
1194 * as __PageMovable
1195 */
1196 if (likely(!__PageMovable(page)))
Naoya Horiguchie8db67e2017-09-08 16:11:12 -07001197 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
1198 page_is_file_cache(page), -hpage_nr_pages(page));
Christoph Lametere24f0b82006-06-23 02:03:51 -07001199 }
David Rientjes68711a72014-06-04 16:08:25 -07001200
Christoph Lameter95a402c2006-06-23 02:03:53 -07001201 /*
Minchan Kimc6c919e2016-07-26 15:23:02 -07001202 * If migration is successful, releases reference grabbed during
1203 * isolation. Otherwise, restore the page to right list unless
1204 * we want to retry.
Christoph Lameter95a402c2006-06-23 02:03:53 -07001205 */
Minchan Kimc6c919e2016-07-26 15:23:02 -07001206 if (rc == MIGRATEPAGE_SUCCESS) {
1207 put_page(page);
1208 if (reason == MR_MEMORY_FAILURE) {
1209 /*
1210 * Set PG_HWPoison on just freed page
1211 * intentionally. Although it's rather weird,
1212 * it's how HWPoison flag works at the moment.
1213 */
1214 if (!test_set_page_hwpoison(page))
1215 num_poisoned_pages_inc();
1216 }
1217 } else {
Minchan Kimbda807d2016-07-26 15:23:05 -07001218 if (rc != -EAGAIN) {
1219 if (likely(!__PageMovable(page))) {
1220 putback_lru_page(page);
1221 goto put_new;
1222 }
1223
1224 lock_page(page);
1225 if (PageMovable(page))
1226 putback_movable_page(page);
1227 else
1228 __ClearPageIsolated(page);
1229 unlock_page(page);
1230 put_page(page);
1231 }
1232put_new:
Minchan Kimc6c919e2016-07-26 15:23:02 -07001233 if (put_new_page)
1234 put_new_page(newpage, private);
1235 else
1236 put_page(newpage);
1237 }
David Rientjes68711a72014-06-04 16:08:25 -07001238
Christoph Lametere24f0b82006-06-23 02:03:51 -07001239 return rc;
1240}
1241
1242/*
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001243 * Counterpart of unmap_and_move_page() for hugepage migration.
1244 *
1245 * This function doesn't wait the completion of hugepage I/O
1246 * because there is no race between I/O and migration for hugepage.
1247 * Note that currently hugepage I/O occurs only in direct I/O
1248 * where no lock is held and PG_writeback is irrelevant,
1249 * and writeback status of all subpages are counted in the reference
1250 * count of the head page (i.e. if all subpages of a 2MB hugepage are
1251 * under direct I/O, the reference of the head page is 512 and a bit more.)
1252 * This means that when we try to migrate hugepage whose subpages are
1253 * doing direct I/O, some references remain after try_to_unmap() and
1254 * hugepage migration fails without data corruption.
1255 *
1256 * There is also no race when direct I/O is issued on the page under migration,
1257 * because then pte is replaced with migration swap entry and direct I/O code
1258 * will wait in the page fault for migration to complete.
1259 */
1260static int unmap_and_move_huge_page(new_page_t get_new_page,
David Rientjes68711a72014-06-04 16:08:25 -07001261 free_page_t put_new_page, unsigned long private,
1262 struct page *hpage, int force,
Vlastimil Babka7cd12b42016-03-15 14:56:18 -07001263 enum migrate_mode mode, int reason)
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001264{
Hugh Dickins2def7422015-11-05 18:49:46 -08001265 int rc = -EAGAIN;
Hugh Dickins2ebba6b2014-12-12 16:56:19 -08001266 int page_was_mapped = 0;
Joonsoo Kim32665f22014-01-21 15:51:15 -08001267 struct page *new_hpage;
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001268 struct anon_vma *anon_vma = NULL;
1269
Naoya Horiguchi83467ef2013-09-11 14:22:11 -07001270 /*
1271 * Movability of hugepages depends on architectures and hugepage size.
1272 * This check is necessary because some callers of hugepage migration
1273 * like soft offline and memory hotremove don't walk through page
1274 * tables or check whether the hugepage is pmd-based or not before
1275 * kicking migration.
1276 */
Naoya Horiguchi100873d2014-06-04 16:10:56 -07001277 if (!hugepage_migration_supported(page_hstate(hpage))) {
Joonsoo Kim32665f22014-01-21 15:51:15 -08001278 putback_active_hugepage(hpage);
Naoya Horiguchi83467ef2013-09-11 14:22:11 -07001279 return -ENOSYS;
Joonsoo Kim32665f22014-01-21 15:51:15 -08001280 }
Naoya Horiguchi83467ef2013-09-11 14:22:11 -07001281
Michal Hocko666feb22018-04-10 16:30:03 -07001282 new_hpage = get_new_page(hpage, private);
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001283 if (!new_hpage)
1284 return -ENOMEM;
1285
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001286 if (!trylock_page(hpage)) {
Jérôme Glisse2916ecc2017-09-08 16:12:06 -07001287 if (!force)
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001288 goto out;
Jérôme Glisse2916ecc2017-09-08 16:12:06 -07001289 switch (mode) {
1290 case MIGRATE_SYNC:
1291 case MIGRATE_SYNC_NO_COPY:
1292 break;
1293 default:
1294 goto out;
1295 }
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001296 lock_page(hpage);
1297 }
1298
Peter Zijlstra746b18d2011-05-24 17:12:10 -07001299 if (PageAnon(hpage))
1300 anon_vma = page_get_anon_vma(hpage);
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001301
Hugh Dickins7db76712015-11-05 18:49:49 -08001302 if (unlikely(!trylock_page(new_hpage)))
1303 goto put_anon;
1304
Hugh Dickins2ebba6b2014-12-12 16:56:19 -08001305 if (page_mapped(hpage)) {
1306 try_to_unmap(hpage,
1307 TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
1308 page_was_mapped = 1;
1309 }
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001310
1311 if (!page_mapped(hpage))
Hugh Dickins5c3f9a62015-11-05 18:49:53 -08001312 rc = move_to_new_page(new_hpage, hpage, mode);
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001313
Hugh Dickins5c3f9a62015-11-05 18:49:53 -08001314 if (page_was_mapped)
1315 remove_migration_ptes(hpage,
Kirill A. Shutemove3884662016-03-17 14:20:07 -07001316 rc == MIGRATEPAGE_SUCCESS ? new_hpage : hpage, false);
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001317
Hugh Dickins7db76712015-11-05 18:49:49 -08001318 unlock_page(new_hpage);
1319
1320put_anon:
Hugh Dickinsfd4a4662011-01-13 15:47:31 -08001321 if (anon_vma)
Peter Zijlstra9e601092011-03-22 16:32:46 -07001322 put_anon_vma(anon_vma);
Aneesh Kumar K.V8e6ac7f2012-07-31 16:42:27 -07001323
Hugh Dickins2def7422015-11-05 18:49:46 -08001324 if (rc == MIGRATEPAGE_SUCCESS) {
Michal Hockoab5ac902018-01-31 16:20:48 -08001325 move_hugetlb_state(hpage, new_hpage, reason);
Hugh Dickins2def7422015-11-05 18:49:46 -08001326 put_new_page = NULL;
1327 }
Aneesh Kumar K.V8e6ac7f2012-07-31 16:42:27 -07001328
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001329 unlock_page(hpage);
Hillf Danton09761332011-12-08 14:34:20 -08001330out:
Naoya Horiguchib8ec1ce2013-09-11 14:22:01 -07001331 if (rc != -EAGAIN)
1332 putback_active_hugepage(hpage);
Anshuman Khandualc3114a82017-07-10 15:47:41 -07001333 if (reason == MR_MEMORY_FAILURE && !test_set_page_hwpoison(hpage))
1334 num_poisoned_pages_inc();
David Rientjes68711a72014-06-04 16:08:25 -07001335
1336 /*
1337 * If migration was not successful and there's a freeing callback, use
1338 * it. Otherwise, put_page() will drop the reference grabbed during
1339 * isolation.
1340 */
Hugh Dickins2def7422015-11-05 18:49:46 -08001341 if (put_new_page)
David Rientjes68711a72014-06-04 16:08:25 -07001342 put_new_page(new_hpage, private);
1343 else
Naoya Horiguchi3aaa76e2015-09-22 14:59:14 -07001344 putback_active_hugepage(new_hpage);
David Rientjes68711a72014-06-04 16:08:25 -07001345
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001346 return rc;
1347}
1348
1349/*
Srivatsa S. Bhatc73e5c92013-04-29 15:08:16 -07001350 * migrate_pages - migrate the pages specified in a list, to the free pages
1351 * supplied as the target for the page migration
Christoph Lameterb20a3502006-03-22 00:09:12 -08001352 *
Srivatsa S. Bhatc73e5c92013-04-29 15:08:16 -07001353 * @from: The list of pages to be migrated.
1354 * @get_new_page: The function used to allocate free pages to be used
1355 * as the target of the page migration.
David Rientjes68711a72014-06-04 16:08:25 -07001356 * @put_new_page: The function used to free target pages if migration
1357 * fails, or NULL if no special handling is necessary.
Srivatsa S. Bhatc73e5c92013-04-29 15:08:16 -07001358 * @private: Private data to be passed on to get_new_page()
1359 * @mode: The migration mode that specifies the constraints for
1360 * page migration, if any.
1361 * @reason: The reason for page migration.
Christoph Lameterb20a3502006-03-22 00:09:12 -08001362 *
Srivatsa S. Bhatc73e5c92013-04-29 15:08:16 -07001363 * The function returns after 10 attempts or if no pages are movable any more
1364 * because the list has become empty or no retryable pages exist any more.
Hugh Dickins14e0f9b2015-11-05 18:49:43 -08001365 * The caller should call putback_movable_pages() to return pages to the LRU
Minchan Kim28bd6572011-01-25 15:07:26 -08001366 * or free list only if ret != 0.
Christoph Lameterb20a3502006-03-22 00:09:12 -08001367 *
Srivatsa S. Bhatc73e5c92013-04-29 15:08:16 -07001368 * Returns the number of pages that were not migrated, or an error code.
Christoph Lameterb20a3502006-03-22 00:09:12 -08001369 */
Hugh Dickins9c620e22013-02-22 16:35:14 -08001370int migrate_pages(struct list_head *from, new_page_t get_new_page,
David Rientjes68711a72014-06-04 16:08:25 -07001371 free_page_t put_new_page, unsigned long private,
1372 enum migrate_mode mode, int reason)
Christoph Lameterb20a3502006-03-22 00:09:12 -08001373{
Christoph Lametere24f0b82006-06-23 02:03:51 -07001374 int retry = 1;
Christoph Lameterb20a3502006-03-22 00:09:12 -08001375 int nr_failed = 0;
Mel Gorman5647bc22012-10-19 10:46:20 +01001376 int nr_succeeded = 0;
Christoph Lameterb20a3502006-03-22 00:09:12 -08001377 int pass = 0;
1378 struct page *page;
1379 struct page *page2;
1380 int swapwrite = current->flags & PF_SWAPWRITE;
1381 int rc;
1382
1383 if (!swapwrite)
1384 current->flags |= PF_SWAPWRITE;
1385
Christoph Lametere24f0b82006-06-23 02:03:51 -07001386 for(pass = 0; pass < 10 && retry; pass++) {
1387 retry = 0;
Christoph Lameterb20a3502006-03-22 00:09:12 -08001388
Christoph Lametere24f0b82006-06-23 02:03:51 -07001389 list_for_each_entry_safe(page, page2, from, lru) {
Michal Hocko94723aa2018-04-10 16:30:07 -07001390retry:
Christoph Lametere24f0b82006-06-23 02:03:51 -07001391 cond_resched();
Christoph Lameterb20a3502006-03-22 00:09:12 -08001392
Naoya Horiguchi31caf662013-09-11 14:21:59 -07001393 if (PageHuge(page))
1394 rc = unmap_and_move_huge_page(get_new_page,
David Rientjes68711a72014-06-04 16:08:25 -07001395 put_new_page, private, page,
Vlastimil Babka7cd12b42016-03-15 14:56:18 -07001396 pass > 2, mode, reason);
Naoya Horiguchi31caf662013-09-11 14:21:59 -07001397 else
David Rientjes68711a72014-06-04 16:08:25 -07001398 rc = unmap_and_move(get_new_page, put_new_page,
Naoya Horiguchiadd05ce2015-06-24 16:56:50 -07001399 private, page, pass > 2, mode,
1400 reason);
Christoph Lameterb20a3502006-03-22 00:09:12 -08001401
Christoph Lametere24f0b82006-06-23 02:03:51 -07001402 switch(rc) {
Christoph Lameter95a402c2006-06-23 02:03:53 -07001403 case -ENOMEM:
Michal Hocko94723aa2018-04-10 16:30:07 -07001404 /*
1405 * THP migration might be unsupported or the
1406 * allocation could've failed so we should
1407 * retry on the same page with the THP split
1408 * to base pages.
1409 *
1410 * Head page is retried immediately and tail
1411 * pages are added to the tail of the list so
1412 * we encounter them after the rest of the list
1413 * is processed.
1414 */
1415 if (PageTransHuge(page)) {
1416 lock_page(page);
1417 rc = split_huge_page_to_list(page, from);
1418 unlock_page(page);
1419 if (!rc) {
1420 list_safe_reset_next(page, page2, lru);
1421 goto retry;
1422 }
1423 }
David Rientjesdfef2ef2016-05-20 16:59:05 -07001424 nr_failed++;
Christoph Lameter95a402c2006-06-23 02:03:53 -07001425 goto out;
Christoph Lametere24f0b82006-06-23 02:03:51 -07001426 case -EAGAIN:
Christoph Lameter2d1db3b2006-06-23 02:03:33 -07001427 retry++;
Christoph Lametere24f0b82006-06-23 02:03:51 -07001428 break;
Rafael Aquini78bd5202012-12-11 16:02:31 -08001429 case MIGRATEPAGE_SUCCESS:
Mel Gorman5647bc22012-10-19 10:46:20 +01001430 nr_succeeded++;
Christoph Lametere24f0b82006-06-23 02:03:51 -07001431 break;
1432 default:
Naoya Horiguchi354a3362014-01-21 15:51:14 -08001433 /*
1434 * Permanent failure (-EBUSY, -ENOSYS, etc.):
1435 * unlike -EAGAIN case, the failed page is
1436 * removed from migration page list and not
1437 * retried in the next outer loop.
1438 */
Christoph Lameter2d1db3b2006-06-23 02:03:33 -07001439 nr_failed++;
Christoph Lametere24f0b82006-06-23 02:03:51 -07001440 break;
Christoph Lameter2d1db3b2006-06-23 02:03:33 -07001441 }
Christoph Lameterb20a3502006-03-22 00:09:12 -08001442 }
1443 }
Vlastimil Babkaf2f81fb2015-11-05 18:47:03 -08001444 nr_failed += retry;
1445 rc = nr_failed;
Christoph Lameter95a402c2006-06-23 02:03:53 -07001446out:
Mel Gorman5647bc22012-10-19 10:46:20 +01001447 if (nr_succeeded)
1448 count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded);
1449 if (nr_failed)
1450 count_vm_events(PGMIGRATE_FAIL, nr_failed);
Mel Gorman7b2a2d42012-10-19 14:07:31 +01001451 trace_mm_migrate_pages(nr_succeeded, nr_failed, mode, reason);
1452
Christoph Lameterb20a3502006-03-22 00:09:12 -08001453 if (!swapwrite)
1454 current->flags &= ~PF_SWAPWRITE;
1455
Rafael Aquini78bd5202012-12-11 16:02:31 -08001456 return rc;
Christoph Lameterb20a3502006-03-22 00:09:12 -08001457}
1458
Christoph Lameter742755a2006-06-23 02:03:55 -07001459#ifdef CONFIG_NUMA
Christoph Lameter742755a2006-06-23 02:03:55 -07001460
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001461static int store_status(int __user *status, int start, int value, int nr)
Christoph Lameter742755a2006-06-23 02:03:55 -07001462{
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001463 while (nr-- > 0) {
1464 if (put_user(value, status + start))
1465 return -EFAULT;
1466 start++;
1467 }
Christoph Lameter742755a2006-06-23 02:03:55 -07001468
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001469 return 0;
1470}
Christoph Lameter742755a2006-06-23 02:03:55 -07001471
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001472static int do_move_pages_to_node(struct mm_struct *mm,
1473 struct list_head *pagelist, int node)
1474{
1475 int err;
Christoph Lameter742755a2006-06-23 02:03:55 -07001476
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001477 if (list_empty(pagelist))
1478 return 0;
Christoph Lameter742755a2006-06-23 02:03:55 -07001479
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001480 err = migrate_pages(pagelist, alloc_new_node_page, NULL, node,
1481 MIGRATE_SYNC, MR_SYSCALL);
1482 if (err)
1483 putback_movable_pages(pagelist);
1484 return err;
Christoph Lameter742755a2006-06-23 02:03:55 -07001485}
1486
1487/*
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001488 * Resolves the given address to a struct page, isolates it from the LRU and
1489 * puts it to the given pagelist.
1490 * Returns -errno if the page cannot be found/isolated or 0 when it has been
1491 * queued or the page doesn't need to be migrated because it is already on
1492 * the target node
Christoph Lameter742755a2006-06-23 02:03:55 -07001493 */
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001494static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
1495 int node, struct list_head *pagelist, bool migrate_all)
Christoph Lameter742755a2006-06-23 02:03:55 -07001496{
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001497 struct vm_area_struct *vma;
1498 struct page *page;
1499 unsigned int follflags;
Christoph Lameter742755a2006-06-23 02:03:55 -07001500 int err;
Christoph Lameter742755a2006-06-23 02:03:55 -07001501
1502 down_read(&mm->mmap_sem);
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001503 err = -EFAULT;
1504 vma = find_vma(mm, addr);
1505 if (!vma || addr < vma->vm_start || !vma_migratable(vma))
1506 goto out;
Christoph Lameter742755a2006-06-23 02:03:55 -07001507
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001508 /* FOLL_DUMP to ignore special (like zero) pages */
1509 follflags = FOLL_GET | FOLL_DUMP;
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001510 page = follow_page(vma, addr, follflags);
Christoph Lameter742755a2006-06-23 02:03:55 -07001511
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001512 err = PTR_ERR(page);
1513 if (IS_ERR(page))
1514 goto out;
Christoph Lameter742755a2006-06-23 02:03:55 -07001515
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001516 err = -ENOENT;
1517 if (!page)
1518 goto out;
Christoph Lameter742755a2006-06-23 02:03:55 -07001519
Brice Gogline78bbfa2008-10-18 20:27:15 -07001520 err = 0;
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001521 if (page_to_nid(page) == node)
1522 goto out_putpage;
Christoph Lameter742755a2006-06-23 02:03:55 -07001523
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001524 err = -EACCES;
1525 if (page_mapcount(page) > 1 && !migrate_all)
1526 goto out_putpage;
1527
1528 if (PageHuge(page)) {
1529 if (PageHead(page)) {
1530 isolate_huge_page(page, pagelist);
1531 err = 0;
1532 }
1533 } else {
1534 struct page *head;
1535
1536 head = compound_head(page);
1537 err = isolate_lru_page(head);
1538 if (err)
1539 goto out_putpage;
1540
1541 err = 0;
1542 list_add_tail(&head->lru, pagelist);
1543 mod_node_page_state(page_pgdat(head),
1544 NR_ISOLATED_ANON + page_is_file_cache(head),
1545 hpage_nr_pages(head));
1546 }
1547out_putpage:
1548 /*
1549 * Either remove the duplicate refcount from
1550 * isolate_lru_page() or drop the page ref if it was
1551 * not isolated.
1552 */
1553 put_page(page);
1554out:
Christoph Lameter742755a2006-06-23 02:03:55 -07001555 up_read(&mm->mmap_sem);
1556 return err;
1557}
1558
1559/*
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001560 * Migrate an array of page address onto an array of nodes and fill
1561 * the corresponding array of status.
1562 */
Christoph Lameter3268c632012-03-21 16:34:06 -07001563static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001564 unsigned long nr_pages,
1565 const void __user * __user *pages,
1566 const int __user *nodes,
1567 int __user *status, int flags)
1568{
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001569 int current_node = NUMA_NO_NODE;
1570 LIST_HEAD(pagelist);
1571 int start, i;
1572 int err = 0, err1;
Brice Goglin35282a22009-06-16 15:32:43 -07001573
1574 migrate_prep();
1575
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001576 for (i = start = 0; i < nr_pages; i++) {
1577 const void __user *p;
1578 unsigned long addr;
1579 int node;
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001580
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001581 err = -EFAULT;
1582 if (get_user(p, pages + i))
1583 goto out_flush;
1584 if (get_user(node, nodes + i))
1585 goto out_flush;
1586 addr = (unsigned long)p;
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001587
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001588 err = -ENODEV;
1589 if (node < 0 || node >= MAX_NUMNODES)
1590 goto out_flush;
1591 if (!node_state(node, N_MEMORY))
1592 goto out_flush;
Brice Goglin3140a222009-01-06 14:38:57 -08001593
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001594 err = -EACCES;
1595 if (!node_isset(node, task_nodes))
1596 goto out_flush;
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001597
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001598 if (current_node == NUMA_NO_NODE) {
1599 current_node = node;
1600 start = i;
1601 } else if (node != current_node) {
1602 err = do_move_pages_to_node(mm, &pagelist, current_node);
1603 if (err)
1604 goto out;
1605 err = store_status(status, start, current_node, i - start);
1606 if (err)
1607 goto out;
1608 start = i;
1609 current_node = node;
Brice Goglin3140a222009-01-06 14:38:57 -08001610 }
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001611
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001612 /*
1613 * Errors in the page lookup or isolation are not fatal and we simply
1614 * report them via status
1615 */
1616 err = add_page_for_migration(mm, addr, current_node,
1617 &pagelist, flags & MPOL_MF_MOVE_ALL);
1618 if (!err)
1619 continue;
Brice Goglin3140a222009-01-06 14:38:57 -08001620
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001621 err = store_status(status, i, err, 1);
1622 if (err)
1623 goto out_flush;
Brice Goglin3140a222009-01-06 14:38:57 -08001624
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001625 err = do_move_pages_to_node(mm, &pagelist, current_node);
1626 if (err)
1627 goto out;
1628 if (i > start) {
1629 err = store_status(status, start, current_node, i - start);
1630 if (err)
1631 goto out;
1632 }
1633 current_node = NUMA_NO_NODE;
Brice Goglin3140a222009-01-06 14:38:57 -08001634 }
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001635out_flush:
Michal Hocko8f175cf2018-04-20 14:55:35 -07001636 if (list_empty(&pagelist))
1637 return err;
1638
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001639 /* Make sure we do not overwrite the existing error */
1640 err1 = do_move_pages_to_node(mm, &pagelist, current_node);
1641 if (!err1)
1642 err1 = store_status(status, start, current_node, i - start);
1643 if (!err)
1644 err = err1;
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001645out:
1646 return err;
1647}
1648
1649/*
Brice Goglin2f007e72008-10-18 20:27:16 -07001650 * Determine the nodes of an array of pages and store it in an array of status.
Christoph Lameter742755a2006-06-23 02:03:55 -07001651 */
Brice Goglin80bba122008-12-09 13:14:23 -08001652static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
1653 const void __user **pages, int *status)
Christoph Lameter742755a2006-06-23 02:03:55 -07001654{
Brice Goglin2f007e72008-10-18 20:27:16 -07001655 unsigned long i;
Brice Goglin2f007e72008-10-18 20:27:16 -07001656
Christoph Lameter742755a2006-06-23 02:03:55 -07001657 down_read(&mm->mmap_sem);
1658
Brice Goglin2f007e72008-10-18 20:27:16 -07001659 for (i = 0; i < nr_pages; i++) {
Brice Goglin80bba122008-12-09 13:14:23 -08001660 unsigned long addr = (unsigned long)(*pages);
Christoph Lameter742755a2006-06-23 02:03:55 -07001661 struct vm_area_struct *vma;
1662 struct page *page;
KOSAKI Motohiroc095adb2008-12-16 16:06:43 +09001663 int err = -EFAULT;
Brice Goglin2f007e72008-10-18 20:27:16 -07001664
1665 vma = find_vma(mm, addr);
Gleb Natapov70384dc2010-10-26 14:22:07 -07001666 if (!vma || addr < vma->vm_start)
Christoph Lameter742755a2006-06-23 02:03:55 -07001667 goto set_status;
1668
Kirill A. Shutemovd8998442015-09-04 15:47:53 -07001669 /* FOLL_DUMP to ignore special (like zero) pages */
1670 page = follow_page(vma, addr, FOLL_DUMP);
Linus Torvalds89f5b7d2008-06-20 11:18:25 -07001671
1672 err = PTR_ERR(page);
1673 if (IS_ERR(page))
1674 goto set_status;
1675
Kirill A. Shutemovd8998442015-09-04 15:47:53 -07001676 err = page ? page_to_nid(page) : -ENOENT;
Christoph Lameter742755a2006-06-23 02:03:55 -07001677set_status:
Brice Goglin80bba122008-12-09 13:14:23 -08001678 *status = err;
1679
1680 pages++;
1681 status++;
1682 }
1683
1684 up_read(&mm->mmap_sem);
1685}
1686
1687/*
1688 * Determine the nodes of a user array of pages and store it in
1689 * a user array of status.
1690 */
1691static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
1692 const void __user * __user *pages,
1693 int __user *status)
1694{
1695#define DO_PAGES_STAT_CHUNK_NR 16
1696 const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
1697 int chunk_status[DO_PAGES_STAT_CHUNK_NR];
Brice Goglin80bba122008-12-09 13:14:23 -08001698
H. Peter Anvin87b8d1a2010-02-18 16:13:40 -08001699 while (nr_pages) {
1700 unsigned long chunk_nr;
Brice Goglin80bba122008-12-09 13:14:23 -08001701
H. Peter Anvin87b8d1a2010-02-18 16:13:40 -08001702 chunk_nr = nr_pages;
1703 if (chunk_nr > DO_PAGES_STAT_CHUNK_NR)
1704 chunk_nr = DO_PAGES_STAT_CHUNK_NR;
1705
1706 if (copy_from_user(chunk_pages, pages, chunk_nr * sizeof(*chunk_pages)))
1707 break;
Brice Goglin80bba122008-12-09 13:14:23 -08001708
1709 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
1710
H. Peter Anvin87b8d1a2010-02-18 16:13:40 -08001711 if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
1712 break;
Christoph Lameter742755a2006-06-23 02:03:55 -07001713
H. Peter Anvin87b8d1a2010-02-18 16:13:40 -08001714 pages += chunk_nr;
1715 status += chunk_nr;
1716 nr_pages -= chunk_nr;
1717 }
1718 return nr_pages ? -EFAULT : 0;
Christoph Lameter742755a2006-06-23 02:03:55 -07001719}
1720
1721/*
1722 * Move a list of pages in the address space of the currently executing
1723 * process.
1724 */
Dominik Brodowski7addf442018-03-17 16:08:03 +01001725static int kernel_move_pages(pid_t pid, unsigned long nr_pages,
1726 const void __user * __user *pages,
1727 const int __user *nodes,
1728 int __user *status, int flags)
Christoph Lameter742755a2006-06-23 02:03:55 -07001729{
Christoph Lameter742755a2006-06-23 02:03:55 -07001730 struct task_struct *task;
Christoph Lameter742755a2006-06-23 02:03:55 -07001731 struct mm_struct *mm;
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001732 int err;
Christoph Lameter3268c632012-03-21 16:34:06 -07001733 nodemask_t task_nodes;
Christoph Lameter742755a2006-06-23 02:03:55 -07001734
1735 /* Check flags */
1736 if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
1737 return -EINVAL;
1738
1739 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1740 return -EPERM;
1741
1742 /* Find the mm_struct */
Greg Thelena879bf52011-02-25 14:44:13 -08001743 rcu_read_lock();
Pavel Emelyanov228ebcb2007-10-18 23:40:16 -07001744 task = pid ? find_task_by_vpid(pid) : current;
Christoph Lameter742755a2006-06-23 02:03:55 -07001745 if (!task) {
Greg Thelena879bf52011-02-25 14:44:13 -08001746 rcu_read_unlock();
Christoph Lameter742755a2006-06-23 02:03:55 -07001747 return -ESRCH;
1748 }
Christoph Lameter3268c632012-03-21 16:34:06 -07001749 get_task_struct(task);
Christoph Lameter742755a2006-06-23 02:03:55 -07001750
1751 /*
1752 * Check if this process has the right to modify the specified
Linus Torvalds197e7e52017-08-20 13:26:27 -07001753 * process. Use the regular "ptrace_may_access()" checks.
Christoph Lameter742755a2006-06-23 02:03:55 -07001754 */
Linus Torvalds197e7e52017-08-20 13:26:27 -07001755 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
David Howellsc69e8d92008-11-14 10:39:19 +11001756 rcu_read_unlock();
Christoph Lameter742755a2006-06-23 02:03:55 -07001757 err = -EPERM;
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001758 goto out;
Christoph Lameter742755a2006-06-23 02:03:55 -07001759 }
David Howellsc69e8d92008-11-14 10:39:19 +11001760 rcu_read_unlock();
Christoph Lameter742755a2006-06-23 02:03:55 -07001761
David Quigley86c3a762006-06-23 02:04:02 -07001762 err = security_task_movememory(task);
1763 if (err)
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001764 goto out;
David Quigley86c3a762006-06-23 02:04:02 -07001765
Christoph Lameter3268c632012-03-21 16:34:06 -07001766 task_nodes = cpuset_mems_allowed(task);
1767 mm = get_task_mm(task);
1768 put_task_struct(task);
1769
Sasha Levin6e8b09e2012-04-25 16:01:53 -07001770 if (!mm)
1771 return -EINVAL;
1772
1773 if (nodes)
1774 err = do_pages_move(mm, task_nodes, nr_pages, pages,
1775 nodes, status, flags);
1776 else
1777 err = do_pages_stat(mm, nr_pages, pages, status);
Christoph Lameter3268c632012-03-21 16:34:06 -07001778
1779 mmput(mm);
1780 return err;
David Quigley86c3a762006-06-23 02:04:02 -07001781
Christoph Lameter742755a2006-06-23 02:03:55 -07001782out:
Christoph Lameter3268c632012-03-21 16:34:06 -07001783 put_task_struct(task);
Christoph Lameter742755a2006-06-23 02:03:55 -07001784 return err;
1785}
Christoph Lameter742755a2006-06-23 02:03:55 -07001786
Dominik Brodowski7addf442018-03-17 16:08:03 +01001787SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
1788 const void __user * __user *, pages,
1789 const int __user *, nodes,
1790 int __user *, status, int, flags)
1791{
1792 return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags);
1793}
1794
1795#ifdef CONFIG_COMPAT
1796COMPAT_SYSCALL_DEFINE6(move_pages, pid_t, pid, compat_ulong_t, nr_pages,
1797 compat_uptr_t __user *, pages32,
1798 const int __user *, nodes,
1799 int __user *, status,
1800 int, flags)
1801{
1802 const void __user * __user *pages;
1803 int i;
1804
1805 pages = compat_alloc_user_space(nr_pages * sizeof(void *));
1806 for (i = 0; i < nr_pages; i++) {
1807 compat_uptr_t p;
1808
1809 if (get_user(p, pages32 + i) ||
1810 put_user(compat_ptr(p), pages + i))
1811 return -EFAULT;
1812 }
1813 return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags);
1814}
1815#endif /* CONFIG_COMPAT */
1816
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02001817#ifdef CONFIG_NUMA_BALANCING
1818/*
1819 * Returns true if this is a safe migration target node for misplaced NUMA
1820 * pages. Currently it only checks the watermarks which crude
1821 */
1822static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
Mel Gorman3abef4e2013-02-22 16:34:27 -08001823 unsigned long nr_migrate_pages)
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02001824{
1825 int z;
Mel Gorman599d0c92016-07-28 15:45:31 -07001826
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02001827 for (z = pgdat->nr_zones - 1; z >= 0; z--) {
1828 struct zone *zone = pgdat->node_zones + z;
1829
1830 if (!populated_zone(zone))
1831 continue;
1832
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02001833 /* Avoid waking kswapd by allocating pages_to_migrate pages. */
1834 if (!zone_watermark_ok(zone, 0,
1835 high_wmark_pages(zone) +
1836 nr_migrate_pages,
1837 0, 0))
1838 continue;
1839 return true;
1840 }
1841 return false;
1842}
1843
1844static struct page *alloc_misplaced_dst_page(struct page *page,
Michal Hocko666feb22018-04-10 16:30:03 -07001845 unsigned long data)
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02001846{
1847 int nid = (int) data;
1848 struct page *newpage;
1849
Vlastimil Babka96db8002015-09-08 15:03:50 -07001850 newpage = __alloc_pages_node(nid,
Johannes Weinere97ca8e52014-03-10 15:49:43 -07001851 (GFP_HIGHUSER_MOVABLE |
1852 __GFP_THISNODE | __GFP_NOMEMALLOC |
1853 __GFP_NORETRY | __GFP_NOWARN) &
Mel Gorman8479eba2016-02-26 15:19:31 -08001854 ~__GFP_RECLAIM, 0);
Hillf Dantonbac03822012-11-27 14:46:24 +00001855
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02001856 return newpage;
1857}
1858
1859/*
Mel Gormana8f60772012-11-14 21:41:46 +00001860 * page migration rate limiting control.
1861 * Do not migrate more than @pages_to_migrate in a @migrate_interval_millisecs
1862 * window of time. Default here says do not migrate more than 1280M per second.
1863 */
1864static unsigned int migrate_interval_millisecs __read_mostly = 100;
1865static unsigned int ratelimit_pages __read_mostly = 128 << (20 - PAGE_SHIFT);
1866
Mel Gormanb32967f2012-11-19 12:35:47 +00001867/* Returns true if the node is migrate rate-limited after the update */
Mel Gorman1c30e012014-01-21 15:50:58 -08001868static bool numamigrate_update_ratelimit(pg_data_t *pgdat,
1869 unsigned long nr_pages)
Mel Gormanb32967f2012-11-19 12:35:47 +00001870{
Mel Gormanb32967f2012-11-19 12:35:47 +00001871 /*
1872 * Rate-limit the amount of data that is being migrated to a node.
1873 * Optimal placement is no good if the memory bus is saturated and
1874 * all the time is being spent migrating!
1875 */
Mel Gormanb32967f2012-11-19 12:35:47 +00001876 if (time_after(jiffies, pgdat->numabalancing_migrate_next_window)) {
Mel Gorman1c5e9c22014-01-21 15:50:59 -08001877 spin_lock(&pgdat->numabalancing_migrate_lock);
Mel Gormanb32967f2012-11-19 12:35:47 +00001878 pgdat->numabalancing_migrate_nr_pages = 0;
1879 pgdat->numabalancing_migrate_next_window = jiffies +
1880 msecs_to_jiffies(migrate_interval_millisecs);
Mel Gorman1c5e9c22014-01-21 15:50:59 -08001881 spin_unlock(&pgdat->numabalancing_migrate_lock);
Mel Gormanb32967f2012-11-19 12:35:47 +00001882 }
Mel Gormanaf1839d2014-01-21 15:51:01 -08001883 if (pgdat->numabalancing_migrate_nr_pages > ratelimit_pages) {
1884 trace_mm_numa_migrate_ratelimit(current, pgdat->node_id,
1885 nr_pages);
Mel Gorman1c5e9c22014-01-21 15:50:59 -08001886 return true;
Mel Gormanaf1839d2014-01-21 15:51:01 -08001887 }
Mel Gorman1c5e9c22014-01-21 15:50:59 -08001888
1889 /*
1890 * This is an unlocked non-atomic update so errors are possible.
1891 * The consequences are failing to migrate when we potentiall should
1892 * have which is not severe enough to warrant locking. If it is ever
1893 * a problem, it can be converted to a per-cpu counter.
1894 */
1895 pgdat->numabalancing_migrate_nr_pages += nr_pages;
1896 return false;
Mel Gormanb32967f2012-11-19 12:35:47 +00001897}
1898
Mel Gorman1c30e012014-01-21 15:50:58 -08001899static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
Mel Gormanb32967f2012-11-19 12:35:47 +00001900{
Hugh Dickins340ef392013-02-22 16:34:33 -08001901 int page_lru;
Mel Gormanb32967f2012-11-19 12:35:47 +00001902
Sasha Levin309381fea2014-01-23 15:52:54 -08001903 VM_BUG_ON_PAGE(compound_order(page) && !PageTransHuge(page), page);
Mel Gorman3abef4e2013-02-22 16:34:27 -08001904
Mel Gormanb32967f2012-11-19 12:35:47 +00001905 /* Avoid migrating to a node that is nearly full */
Hugh Dickins340ef392013-02-22 16:34:33 -08001906 if (!migrate_balanced_pgdat(pgdat, 1UL << compound_order(page)))
1907 return 0;
Mel Gormanb32967f2012-11-19 12:35:47 +00001908
Hugh Dickins340ef392013-02-22 16:34:33 -08001909 if (isolate_lru_page(page))
1910 return 0;
Mel Gormanb32967f2012-11-19 12:35:47 +00001911
1912 /*
Hugh Dickins340ef392013-02-22 16:34:33 -08001913 * migrate_misplaced_transhuge_page() skips page migration's usual
1914 * check on page_count(), so we must do it here, now that the page
1915 * has been isolated: a GUP pin, or any other pin, prevents migration.
1916 * The expected page count is 3: 1 for page's mapcount and 1 for the
1917 * caller's pin and 1 for the reference taken by isolate_lru_page().
1918 */
1919 if (PageTransHuge(page) && page_count(page) != 3) {
1920 putback_lru_page(page);
1921 return 0;
1922 }
1923
1924 page_lru = page_is_file_cache(page);
Mel Gorman599d0c92016-07-28 15:45:31 -07001925 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_lru,
Hugh Dickins340ef392013-02-22 16:34:33 -08001926 hpage_nr_pages(page));
1927
1928 /*
1929 * Isolating the page has taken another reference, so the
1930 * caller's reference can be safely dropped without the page
1931 * disappearing underneath us during migration.
Mel Gormanb32967f2012-11-19 12:35:47 +00001932 */
1933 put_page(page);
Hugh Dickins340ef392013-02-22 16:34:33 -08001934 return 1;
Mel Gormanb32967f2012-11-19 12:35:47 +00001935}
1936
Mel Gormande466bd2013-12-18 17:08:42 -08001937bool pmd_trans_migrating(pmd_t pmd)
1938{
1939 struct page *page = pmd_page(pmd);
1940 return PageLocked(page);
1941}
1942
Mel Gormana8f60772012-11-14 21:41:46 +00001943/*
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02001944 * Attempt to migrate a misplaced page to the specified destination
1945 * node. Caller is expected to have an elevated reference count on
1946 * the page that will be dropped by this function before returning.
1947 */
Mel Gorman1bc115d2013-10-07 11:29:05 +01001948int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
1949 int node)
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02001950{
Mel Gormana8f60772012-11-14 21:41:46 +00001951 pg_data_t *pgdat = NODE_DATA(node);
Hugh Dickins340ef392013-02-22 16:34:33 -08001952 int isolated;
Mel Gormanb32967f2012-11-19 12:35:47 +00001953 int nr_remaining;
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02001954 LIST_HEAD(migratepages);
1955
1956 /*
Mel Gorman1bc115d2013-10-07 11:29:05 +01001957 * Don't migrate file pages that are mapped in multiple processes
1958 * with execute permissions as they are probably shared libraries.
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02001959 */
Mel Gorman1bc115d2013-10-07 11:29:05 +01001960 if (page_mapcount(page) != 1 && page_is_file_cache(page) &&
1961 (vma->vm_flags & VM_EXEC))
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02001962 goto out;
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02001963
Mel Gormana8f60772012-11-14 21:41:46 +00001964 /*
Mel Gorman09a913a2018-04-10 16:29:20 -07001965 * Also do not migrate dirty pages as not all filesystems can move
1966 * dirty pages in MIGRATE_ASYNC mode which is a waste of cycles.
1967 */
1968 if (page_is_file_cache(page) && PageDirty(page))
1969 goto out;
1970
1971 /*
Mel Gormana8f60772012-11-14 21:41:46 +00001972 * Rate-limit the amount of data that is being migrated to a node.
1973 * Optimal placement is no good if the memory bus is saturated and
1974 * all the time is being spent migrating!
1975 */
Hugh Dickins340ef392013-02-22 16:34:33 -08001976 if (numamigrate_update_ratelimit(pgdat, 1))
Mel Gormana8f60772012-11-14 21:41:46 +00001977 goto out;
Mel Gormana8f60772012-11-14 21:41:46 +00001978
Mel Gormanb32967f2012-11-19 12:35:47 +00001979 isolated = numamigrate_isolate_page(pgdat, page);
1980 if (!isolated)
1981 goto out;
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02001982
Mel Gormanb32967f2012-11-19 12:35:47 +00001983 list_add(&page->lru, &migratepages);
Hugh Dickins9c620e22013-02-22 16:35:14 -08001984 nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page,
David Rientjes68711a72014-06-04 16:08:25 -07001985 NULL, node, MIGRATE_ASYNC,
1986 MR_NUMA_MISPLACED);
Mel Gormanb32967f2012-11-19 12:35:47 +00001987 if (nr_remaining) {
Joonsoo Kim59c82b72014-01-21 15:51:17 -08001988 if (!list_empty(&migratepages)) {
1989 list_del(&page->lru);
Mel Gorman599d0c92016-07-28 15:45:31 -07001990 dec_node_page_state(page, NR_ISOLATED_ANON +
Joonsoo Kim59c82b72014-01-21 15:51:17 -08001991 page_is_file_cache(page));
1992 putback_lru_page(page);
1993 }
Mel Gormanb32967f2012-11-19 12:35:47 +00001994 isolated = 0;
1995 } else
1996 count_vm_numa_event(NUMA_PAGE_MIGRATE);
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02001997 BUG_ON(!list_empty(&migratepages));
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02001998 return isolated;
Hugh Dickins340ef392013-02-22 16:34:33 -08001999
2000out:
2001 put_page(page);
2002 return 0;
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02002003}
Mel Gorman220018d2012-12-05 09:32:56 +00002004#endif /* CONFIG_NUMA_BALANCING */
Mel Gormanb32967f2012-11-19 12:35:47 +00002005
Mel Gorman220018d2012-12-05 09:32:56 +00002006#if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
Hugh Dickins340ef392013-02-22 16:34:33 -08002007/*
2008 * Migrates a THP to a given target node. page must be locked and is unlocked
2009 * before returning.
2010 */
Mel Gormanb32967f2012-11-19 12:35:47 +00002011int migrate_misplaced_transhuge_page(struct mm_struct *mm,
2012 struct vm_area_struct *vma,
2013 pmd_t *pmd, pmd_t entry,
2014 unsigned long address,
2015 struct page *page, int node)
2016{
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08002017 spinlock_t *ptl;
Mel Gormanb32967f2012-11-19 12:35:47 +00002018 pg_data_t *pgdat = NODE_DATA(node);
2019 int isolated = 0;
2020 struct page *new_page = NULL;
Mel Gormanb32967f2012-11-19 12:35:47 +00002021 int page_lru = page_is_file_cache(page);
Mel Gormanf714f4f2013-12-18 17:08:33 -08002022 unsigned long mmun_start = address & HPAGE_PMD_MASK;
2023 unsigned long mmun_end = mmun_start + HPAGE_PMD_SIZE;
Mel Gormanb32967f2012-11-19 12:35:47 +00002024
2025 /*
Mel Gormanb32967f2012-11-19 12:35:47 +00002026 * Rate-limit the amount of data that is being migrated to a node.
2027 * Optimal placement is no good if the memory bus is saturated and
2028 * all the time is being spent migrating!
2029 */
Mel Gormand28d43352012-11-29 09:24:36 +00002030 if (numamigrate_update_ratelimit(pgdat, HPAGE_PMD_NR))
Mel Gormanb32967f2012-11-19 12:35:47 +00002031 goto out_dropref;
2032
2033 new_page = alloc_pages_node(node,
Vlastimil Babka25160352016-07-28 15:49:25 -07002034 (GFP_TRANSHUGE_LIGHT | __GFP_THISNODE),
Johannes Weinere97ca8e52014-03-10 15:49:43 -07002035 HPAGE_PMD_ORDER);
Hugh Dickins340ef392013-02-22 16:34:33 -08002036 if (!new_page)
2037 goto out_fail;
Kirill A. Shutemov9a982252016-01-15 16:54:17 -08002038 prep_transhuge_page(new_page);
Hugh Dickins340ef392013-02-22 16:34:33 -08002039
Mel Gormanb32967f2012-11-19 12:35:47 +00002040 isolated = numamigrate_isolate_page(pgdat, page);
Hugh Dickins340ef392013-02-22 16:34:33 -08002041 if (!isolated) {
Mel Gormanb32967f2012-11-19 12:35:47 +00002042 put_page(new_page);
Hugh Dickins340ef392013-02-22 16:34:33 -08002043 goto out_fail;
Mel Gormanb32967f2012-11-19 12:35:47 +00002044 }
Mel Gormanb0943d62013-12-18 17:08:46 -08002045
Mel Gormanb32967f2012-11-19 12:35:47 +00002046 /* Prepare a page as a migration target */
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08002047 __SetPageLocked(new_page);
Shaohua Lid44d3632017-05-03 14:52:26 -07002048 if (PageSwapBacked(page))
2049 __SetPageSwapBacked(new_page);
Mel Gormanb32967f2012-11-19 12:35:47 +00002050
2051 /* anon mapping, we can simply copy page->mapping to the new page: */
2052 new_page->mapping = page->mapping;
2053 new_page->index = page->index;
2054 migrate_page_copy(new_page, page);
2055 WARN_ON(PageLRU(new_page));
2056
2057 /* Recheck the target PMD */
Mel Gormanf714f4f2013-12-18 17:08:33 -08002058 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08002059 ptl = pmd_lock(mm, pmd);
Will Deaconf4e177d2017-07-10 15:48:31 -07002060 if (unlikely(!pmd_same(*pmd, entry) || !page_ref_freeze(page, 2))) {
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08002061 spin_unlock(ptl);
Mel Gormanf714f4f2013-12-18 17:08:33 -08002062 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
Mel Gormanb32967f2012-11-19 12:35:47 +00002063
2064 /* Reverse changes made by migrate_page_copy() */
2065 if (TestClearPageActive(new_page))
2066 SetPageActive(page);
2067 if (TestClearPageUnevictable(new_page))
2068 SetPageUnevictable(page);
Mel Gormanb32967f2012-11-19 12:35:47 +00002069
2070 unlock_page(new_page);
2071 put_page(new_page); /* Free it */
2072
Mel Gormana54a4072013-10-07 11:28:46 +01002073 /* Retake the callers reference and putback on LRU */
2074 get_page(page);
Mel Gormanb32967f2012-11-19 12:35:47 +00002075 putback_lru_page(page);
Mel Gorman599d0c92016-07-28 15:45:31 -07002076 mod_node_page_state(page_pgdat(page),
Mel Gormana54a4072013-10-07 11:28:46 +01002077 NR_ISOLATED_ANON + page_lru, -HPAGE_PMD_NR);
Mel Gormaneb4489f62013-12-18 17:08:39 -08002078
2079 goto out_unlock;
Mel Gormanb32967f2012-11-19 12:35:47 +00002080 }
2081
Kirill A. Shutemov10102452016-07-26 15:25:29 -07002082 entry = mk_huge_pmd(new_page, vma->vm_page_prot);
Linus Torvaldsf55e1012017-11-29 09:01:01 -08002083 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
Mel Gormanb32967f2012-11-19 12:35:47 +00002084
Mel Gorman2b4847e2013-12-18 17:08:32 -08002085 /*
2086 * Clear the old entry under pagetable lock and establish the new PTE.
2087 * Any parallel GUP will either observe the old page blocking on the
2088 * page lock, block on the page table lock or observe the new page.
2089 * The SetPageUptodate on the new page and page_add_new_anon_rmap
2090 * guarantee the copy is visible before the pagetable update.
2091 */
Mel Gormanf714f4f2013-12-18 17:08:33 -08002092 flush_cache_range(vma, mmun_start, mmun_end);
Kirill A. Shutemovd281ee62016-01-15 16:52:16 -08002093 page_add_anon_rmap(new_page, vma, mmun_start, true);
Aneesh Kumar K.V8809aa22015-06-24 16:57:44 -07002094 pmdp_huge_clear_flush_notify(vma, mmun_start, pmd);
Mel Gormanf714f4f2013-12-18 17:08:33 -08002095 set_pmd_at(mm, mmun_start, pmd, entry);
Stephen Rothwellce4a9cc2012-12-10 19:50:57 +11002096 update_mmu_cache_pmd(vma, address, &entry);
Mel Gorman2b4847e2013-12-18 17:08:32 -08002097
Will Deaconf4e177d2017-07-10 15:48:31 -07002098 page_ref_unfreeze(page, 2);
Hugh Dickins51afb122015-11-05 18:49:37 -08002099 mlock_migrate_page(new_page, page);
Kirill A. Shutemovd281ee62016-01-15 16:52:16 -08002100 page_remove_rmap(page, true);
Vlastimil Babka7cd12b42016-03-15 14:56:18 -07002101 set_page_owner_migrate_reason(new_page, MR_NUMA_MISPLACED);
Mel Gorman2b4847e2013-12-18 17:08:32 -08002102
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08002103 spin_unlock(ptl);
Jérôme Glisse4645b9f2017-11-15 17:34:11 -08002104 /*
2105 * No need to double call mmu_notifier->invalidate_range() callback as
2106 * the above pmdp_huge_clear_flush_notify() did already call it.
2107 */
2108 mmu_notifier_invalidate_range_only_end(mm, mmun_start, mmun_end);
Mel Gormanb32967f2012-11-19 12:35:47 +00002109
Mel Gorman11de9922014-06-04 16:07:41 -07002110 /* Take an "isolate" reference and put new page on the LRU. */
2111 get_page(new_page);
2112 putback_lru_page(new_page);
2113
Mel Gormanb32967f2012-11-19 12:35:47 +00002114 unlock_page(new_page);
2115 unlock_page(page);
2116 put_page(page); /* Drop the rmap reference */
2117 put_page(page); /* Drop the LRU isolation reference */
2118
2119 count_vm_events(PGMIGRATE_SUCCESS, HPAGE_PMD_NR);
2120 count_vm_numa_events(NUMA_PAGE_MIGRATE, HPAGE_PMD_NR);
2121
Mel Gorman599d0c92016-07-28 15:45:31 -07002122 mod_node_page_state(page_pgdat(page),
Mel Gormanb32967f2012-11-19 12:35:47 +00002123 NR_ISOLATED_ANON + page_lru,
2124 -HPAGE_PMD_NR);
2125 return isolated;
2126
Hugh Dickins340ef392013-02-22 16:34:33 -08002127out_fail:
2128 count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
Mel Gormanb32967f2012-11-19 12:35:47 +00002129out_dropref:
Mel Gorman2b4847e2013-12-18 17:08:32 -08002130 ptl = pmd_lock(mm, pmd);
2131 if (pmd_same(*pmd, entry)) {
Mel Gorman4d942462015-02-12 14:58:28 -08002132 entry = pmd_modify(entry, vma->vm_page_prot);
Mel Gormanf714f4f2013-12-18 17:08:33 -08002133 set_pmd_at(mm, mmun_start, pmd, entry);
Mel Gorman2b4847e2013-12-18 17:08:32 -08002134 update_mmu_cache_pmd(vma, address, &entry);
2135 }
2136 spin_unlock(ptl);
Mel Gormana54a4072013-10-07 11:28:46 +01002137
Mel Gormaneb4489f62013-12-18 17:08:39 -08002138out_unlock:
Hugh Dickins340ef392013-02-22 16:34:33 -08002139 unlock_page(page);
Mel Gormanb32967f2012-11-19 12:35:47 +00002140 put_page(page);
Mel Gormanb32967f2012-11-19 12:35:47 +00002141 return 0;
2142}
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02002143#endif /* CONFIG_NUMA_BALANCING */
2144
2145#endif /* CONFIG_NUMA */
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002146
Jérôme Glisse6b368cd2017-09-08 16:12:32 -07002147#if defined(CONFIG_MIGRATE_VMA_HELPER)
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002148struct migrate_vma {
2149 struct vm_area_struct *vma;
2150 unsigned long *dst;
2151 unsigned long *src;
2152 unsigned long cpages;
2153 unsigned long npages;
2154 unsigned long start;
2155 unsigned long end;
2156};
2157
2158static int migrate_vma_collect_hole(unsigned long start,
2159 unsigned long end,
2160 struct mm_walk *walk)
2161{
2162 struct migrate_vma *migrate = walk->private;
2163 unsigned long addr;
2164
2165 for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
Mark Hairgrovee20d1032017-10-13 15:57:30 -07002166 migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE;
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002167 migrate->dst[migrate->npages] = 0;
Mark Hairgrovee20d1032017-10-13 15:57:30 -07002168 migrate->npages++;
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002169 migrate->cpages++;
2170 }
2171
2172 return 0;
2173}
2174
2175static int migrate_vma_collect_skip(unsigned long start,
2176 unsigned long end,
2177 struct mm_walk *walk)
2178{
2179 struct migrate_vma *migrate = walk->private;
2180 unsigned long addr;
2181
2182 for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002183 migrate->dst[migrate->npages] = 0;
2184 migrate->src[migrate->npages++] = 0;
2185 }
2186
2187 return 0;
2188}
2189
2190static int migrate_vma_collect_pmd(pmd_t *pmdp,
2191 unsigned long start,
2192 unsigned long end,
2193 struct mm_walk *walk)
2194{
2195 struct migrate_vma *migrate = walk->private;
2196 struct vm_area_struct *vma = walk->vma;
2197 struct mm_struct *mm = vma->vm_mm;
Jérôme Glisse8c3328f2017-09-08 16:12:13 -07002198 unsigned long addr = start, unmapped = 0;
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002199 spinlock_t *ptl;
2200 pte_t *ptep;
2201
2202again:
2203 if (pmd_none(*pmdp))
2204 return migrate_vma_collect_hole(start, end, walk);
2205
2206 if (pmd_trans_huge(*pmdp)) {
2207 struct page *page;
2208
2209 ptl = pmd_lock(mm, pmdp);
2210 if (unlikely(!pmd_trans_huge(*pmdp))) {
2211 spin_unlock(ptl);
2212 goto again;
2213 }
2214
2215 page = pmd_page(*pmdp);
2216 if (is_huge_zero_page(page)) {
2217 spin_unlock(ptl);
2218 split_huge_pmd(vma, pmdp, addr);
2219 if (pmd_trans_unstable(pmdp))
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002220 return migrate_vma_collect_skip(start, end,
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002221 walk);
2222 } else {
2223 int ret;
2224
2225 get_page(page);
2226 spin_unlock(ptl);
2227 if (unlikely(!trylock_page(page)))
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002228 return migrate_vma_collect_skip(start, end,
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002229 walk);
2230 ret = split_huge_page(page);
2231 unlock_page(page);
2232 put_page(page);
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002233 if (ret)
2234 return migrate_vma_collect_skip(start, end,
2235 walk);
2236 if (pmd_none(*pmdp))
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002237 return migrate_vma_collect_hole(start, end,
2238 walk);
2239 }
2240 }
2241
2242 if (unlikely(pmd_bad(*pmdp)))
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002243 return migrate_vma_collect_skip(start, end, walk);
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002244
2245 ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
Jérôme Glisse8c3328f2017-09-08 16:12:13 -07002246 arch_enter_lazy_mmu_mode();
2247
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002248 for (; addr < end; addr += PAGE_SIZE, ptep++) {
2249 unsigned long mpfn, pfn;
2250 struct page *page;
Jérôme Glisse8c3328f2017-09-08 16:12:13 -07002251 swp_entry_t entry;
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002252 pte_t pte;
2253
2254 pte = *ptep;
2255 pfn = pte_pfn(pte);
2256
Jérôme Glissea5430dd2017-09-08 16:12:17 -07002257 if (pte_none(pte)) {
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002258 mpfn = MIGRATE_PFN_MIGRATE;
2259 migrate->cpages++;
2260 pfn = 0;
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002261 goto next;
2262 }
2263
Jérôme Glissea5430dd2017-09-08 16:12:17 -07002264 if (!pte_present(pte)) {
2265 mpfn = pfn = 0;
2266
2267 /*
2268 * Only care about unaddressable device page special
2269 * page table entry. Other special swap entries are not
2270 * migratable, and we ignore regular swapped page.
2271 */
2272 entry = pte_to_swp_entry(pte);
2273 if (!is_device_private_entry(entry))
2274 goto next;
2275
2276 page = device_private_entry_to_page(entry);
2277 mpfn = migrate_pfn(page_to_pfn(page))|
2278 MIGRATE_PFN_DEVICE | MIGRATE_PFN_MIGRATE;
2279 if (is_write_device_private_entry(entry))
2280 mpfn |= MIGRATE_PFN_WRITE;
2281 } else {
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002282 if (is_zero_pfn(pfn)) {
2283 mpfn = MIGRATE_PFN_MIGRATE;
2284 migrate->cpages++;
2285 pfn = 0;
2286 goto next;
2287 }
Jérôme Glissedf6ad692017-09-08 16:12:24 -07002288 page = _vm_normal_page(migrate->vma, addr, pte, true);
Jérôme Glissea5430dd2017-09-08 16:12:17 -07002289 mpfn = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE;
2290 mpfn |= pte_write(pte) ? MIGRATE_PFN_WRITE : 0;
2291 }
2292
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002293 /* FIXME support THP */
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002294 if (!page || !page->mapping || PageTransCompound(page)) {
2295 mpfn = pfn = 0;
2296 goto next;
2297 }
Jérôme Glissea5430dd2017-09-08 16:12:17 -07002298 pfn = page_to_pfn(page);
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002299
2300 /*
2301 * By getting a reference on the page we pin it and that blocks
2302 * any kind of migration. Side effect is that it "freezes" the
2303 * pte.
2304 *
2305 * We drop this reference after isolating the page from the lru
2306 * for non device page (device page are not on the lru and thus
2307 * can't be dropped from it).
2308 */
2309 get_page(page);
2310 migrate->cpages++;
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002311
Jérôme Glisse8c3328f2017-09-08 16:12:13 -07002312 /*
2313 * Optimize for the common case where page is only mapped once
2314 * in one process. If we can lock the page, then we can safely
2315 * set up a special migration page table entry now.
2316 */
2317 if (trylock_page(page)) {
2318 pte_t swp_pte;
2319
2320 mpfn |= MIGRATE_PFN_LOCKED;
2321 ptep_get_and_clear(mm, addr, ptep);
2322
2323 /* Setup special migration page table entry */
Ralph Campbell07707122018-04-10 16:29:27 -07002324 entry = make_migration_entry(page, mpfn &
2325 MIGRATE_PFN_WRITE);
Jérôme Glisse8c3328f2017-09-08 16:12:13 -07002326 swp_pte = swp_entry_to_pte(entry);
2327 if (pte_soft_dirty(pte))
2328 swp_pte = pte_swp_mksoft_dirty(swp_pte);
2329 set_pte_at(mm, addr, ptep, swp_pte);
2330
2331 /*
2332 * This is like regular unmap: we remove the rmap and
2333 * drop page refcount. Page won't be freed, as we took
2334 * a reference just above.
2335 */
2336 page_remove_rmap(page, false);
2337 put_page(page);
Jérôme Glissea5430dd2017-09-08 16:12:17 -07002338
2339 if (pte_present(pte))
2340 unmapped++;
Jérôme Glisse8c3328f2017-09-08 16:12:13 -07002341 }
2342
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002343next:
Jérôme Glissea5430dd2017-09-08 16:12:17 -07002344 migrate->dst[migrate->npages] = 0;
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002345 migrate->src[migrate->npages++] = mpfn;
2346 }
Jérôme Glisse8c3328f2017-09-08 16:12:13 -07002347 arch_leave_lazy_mmu_mode();
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002348 pte_unmap_unlock(ptep - 1, ptl);
2349
Jérôme Glisse8c3328f2017-09-08 16:12:13 -07002350 /* Only flush the TLB if we actually modified any entries */
2351 if (unmapped)
2352 flush_tlb_range(walk->vma, start, end);
2353
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002354 return 0;
2355}
2356
2357/*
2358 * migrate_vma_collect() - collect pages over a range of virtual addresses
2359 * @migrate: migrate struct containing all migration information
2360 *
2361 * This will walk the CPU page table. For each virtual address backed by a
2362 * valid page, it updates the src array and takes a reference on the page, in
2363 * order to pin the page until we lock it and unmap it.
2364 */
2365static void migrate_vma_collect(struct migrate_vma *migrate)
2366{
2367 struct mm_walk mm_walk;
2368
2369 mm_walk.pmd_entry = migrate_vma_collect_pmd;
2370 mm_walk.pte_entry = NULL;
2371 mm_walk.pte_hole = migrate_vma_collect_hole;
2372 mm_walk.hugetlb_entry = NULL;
2373 mm_walk.test_walk = NULL;
2374 mm_walk.vma = migrate->vma;
2375 mm_walk.mm = migrate->vma->vm_mm;
2376 mm_walk.private = migrate;
2377
Jérôme Glisse8c3328f2017-09-08 16:12:13 -07002378 mmu_notifier_invalidate_range_start(mm_walk.mm,
2379 migrate->start,
2380 migrate->end);
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002381 walk_page_range(migrate->start, migrate->end, &mm_walk);
Jérôme Glisse8c3328f2017-09-08 16:12:13 -07002382 mmu_notifier_invalidate_range_end(mm_walk.mm,
2383 migrate->start,
2384 migrate->end);
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002385
2386 migrate->end = migrate->start + (migrate->npages << PAGE_SHIFT);
2387}
2388
2389/*
2390 * migrate_vma_check_page() - check if page is pinned or not
2391 * @page: struct page to check
2392 *
2393 * Pinned pages cannot be migrated. This is the same test as in
2394 * migrate_page_move_mapping(), except that here we allow migration of a
2395 * ZONE_DEVICE page.
2396 */
2397static bool migrate_vma_check_page(struct page *page)
2398{
2399 /*
2400 * One extra ref because caller holds an extra reference, either from
2401 * isolate_lru_page() for a regular page, or migrate_vma_collect() for
2402 * a device page.
2403 */
2404 int extra = 1;
2405
2406 /*
2407 * FIXME support THP (transparent huge page), it is bit more complex to
2408 * check them than regular pages, because they can be mapped with a pmd
2409 * or with a pte (split pte mapping).
2410 */
2411 if (PageCompound(page))
2412 return false;
2413
Jérôme Glissea5430dd2017-09-08 16:12:17 -07002414 /* Page from ZONE_DEVICE have one extra reference */
2415 if (is_zone_device_page(page)) {
2416 /*
2417 * Private page can never be pin as they have no valid pte and
2418 * GUP will fail for those. Yet if there is a pending migration
2419 * a thread might try to wait on the pte migration entry and
2420 * will bump the page reference count. Sadly there is no way to
2421 * differentiate a regular pin from migration wait. Hence to
2422 * avoid 2 racing thread trying to migrate back to CPU to enter
2423 * infinite loop (one stoping migration because the other is
2424 * waiting on pte migration entry). We always return true here.
2425 *
2426 * FIXME proper solution is to rework migration_entry_wait() so
2427 * it does not need to take a reference on page.
2428 */
2429 if (is_device_private_page(page))
2430 return true;
2431
Jérôme Glissedf6ad692017-09-08 16:12:24 -07002432 /*
2433 * Only allow device public page to be migrated and account for
2434 * the extra reference count imply by ZONE_DEVICE pages.
2435 */
2436 if (!is_device_public_page(page))
2437 return false;
2438 extra++;
Jérôme Glissea5430dd2017-09-08 16:12:17 -07002439 }
2440
Jérôme Glissedf6ad692017-09-08 16:12:24 -07002441 /* For file back page */
2442 if (page_mapping(page))
2443 extra += 1 + page_has_private(page);
2444
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002445 if ((page_count(page) - extra) > page_mapcount(page))
2446 return false;
2447
2448 return true;
2449}
2450
2451/*
2452 * migrate_vma_prepare() - lock pages and isolate them from the lru
2453 * @migrate: migrate struct containing all migration information
2454 *
2455 * This locks pages that have been collected by migrate_vma_collect(). Once each
2456 * page is locked it is isolated from the lru (for non-device pages). Finally,
2457 * the ref taken by migrate_vma_collect() is dropped, as locked pages cannot be
2458 * migrated by concurrent kernel threads.
2459 */
2460static void migrate_vma_prepare(struct migrate_vma *migrate)
2461{
2462 const unsigned long npages = migrate->npages;
Jérôme Glisse8c3328f2017-09-08 16:12:13 -07002463 const unsigned long start = migrate->start;
2464 unsigned long addr, i, restore = 0;
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002465 bool allow_drain = true;
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002466
2467 lru_add_drain();
2468
2469 for (i = 0; (i < npages) && migrate->cpages; i++) {
2470 struct page *page = migrate_pfn_to_page(migrate->src[i]);
Jérôme Glisse8c3328f2017-09-08 16:12:13 -07002471 bool remap = true;
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002472
2473 if (!page)
2474 continue;
2475
Jérôme Glisse8c3328f2017-09-08 16:12:13 -07002476 if (!(migrate->src[i] & MIGRATE_PFN_LOCKED)) {
2477 /*
2478 * Because we are migrating several pages there can be
2479 * a deadlock between 2 concurrent migration where each
2480 * are waiting on each other page lock.
2481 *
2482 * Make migrate_vma() a best effort thing and backoff
2483 * for any page we can not lock right away.
2484 */
2485 if (!trylock_page(page)) {
2486 migrate->src[i] = 0;
2487 migrate->cpages--;
2488 put_page(page);
2489 continue;
2490 }
2491 remap = false;
2492 migrate->src[i] |= MIGRATE_PFN_LOCKED;
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002493 }
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002494
Jérôme Glissea5430dd2017-09-08 16:12:17 -07002495 /* ZONE_DEVICE pages are not on LRU */
2496 if (!is_zone_device_page(page)) {
2497 if (!PageLRU(page) && allow_drain) {
2498 /* Drain CPU's pagevec */
2499 lru_add_drain_all();
2500 allow_drain = false;
Jérôme Glisse8c3328f2017-09-08 16:12:13 -07002501 }
Jérôme Glissea5430dd2017-09-08 16:12:17 -07002502
2503 if (isolate_lru_page(page)) {
2504 if (remap) {
2505 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2506 migrate->cpages--;
2507 restore++;
2508 } else {
2509 migrate->src[i] = 0;
2510 unlock_page(page);
2511 migrate->cpages--;
2512 put_page(page);
2513 }
2514 continue;
2515 }
2516
2517 /* Drop the reference we took in collect */
2518 put_page(page);
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002519 }
2520
2521 if (!migrate_vma_check_page(page)) {
Jérôme Glisse8c3328f2017-09-08 16:12:13 -07002522 if (remap) {
2523 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2524 migrate->cpages--;
2525 restore++;
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002526
Jérôme Glissea5430dd2017-09-08 16:12:17 -07002527 if (!is_zone_device_page(page)) {
2528 get_page(page);
2529 putback_lru_page(page);
2530 }
Jérôme Glisse8c3328f2017-09-08 16:12:13 -07002531 } else {
2532 migrate->src[i] = 0;
2533 unlock_page(page);
2534 migrate->cpages--;
2535
Jérôme Glissea5430dd2017-09-08 16:12:17 -07002536 if (!is_zone_device_page(page))
2537 putback_lru_page(page);
2538 else
2539 put_page(page);
Jérôme Glisse8c3328f2017-09-08 16:12:13 -07002540 }
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002541 }
2542 }
Jérôme Glisse8c3328f2017-09-08 16:12:13 -07002543
2544 for (i = 0, addr = start; i < npages && restore; i++, addr += PAGE_SIZE) {
2545 struct page *page = migrate_pfn_to_page(migrate->src[i]);
2546
2547 if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE))
2548 continue;
2549
2550 remove_migration_pte(page, migrate->vma, addr, page);
2551
2552 migrate->src[i] = 0;
2553 unlock_page(page);
2554 put_page(page);
2555 restore--;
2556 }
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002557}
2558
2559/*
2560 * migrate_vma_unmap() - replace page mapping with special migration pte entry
2561 * @migrate: migrate struct containing all migration information
2562 *
2563 * Replace page mapping (CPU page table pte) with a special migration pte entry
2564 * and check again if it has been pinned. Pinned pages are restored because we
2565 * cannot migrate them.
2566 *
2567 * This is the last step before we call the device driver callback to allocate
2568 * destination memory and copy contents of original page over to new page.
2569 */
2570static void migrate_vma_unmap(struct migrate_vma *migrate)
2571{
2572 int flags = TTU_MIGRATION | TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS;
2573 const unsigned long npages = migrate->npages;
2574 const unsigned long start = migrate->start;
2575 unsigned long addr, i, restore = 0;
2576
2577 for (i = 0; i < npages; i++) {
2578 struct page *page = migrate_pfn_to_page(migrate->src[i]);
2579
2580 if (!page || !(migrate->src[i] & MIGRATE_PFN_MIGRATE))
2581 continue;
2582
Jérôme Glisse8c3328f2017-09-08 16:12:13 -07002583 if (page_mapped(page)) {
2584 try_to_unmap(page, flags);
2585 if (page_mapped(page))
2586 goto restore;
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002587 }
Jérôme Glisse8c3328f2017-09-08 16:12:13 -07002588
2589 if (migrate_vma_check_page(page))
2590 continue;
2591
2592restore:
2593 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2594 migrate->cpages--;
2595 restore++;
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002596 }
2597
2598 for (addr = start, i = 0; i < npages && restore; addr += PAGE_SIZE, i++) {
2599 struct page *page = migrate_pfn_to_page(migrate->src[i]);
2600
2601 if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE))
2602 continue;
2603
2604 remove_migration_ptes(page, page, false);
2605
2606 migrate->src[i] = 0;
2607 unlock_page(page);
2608 restore--;
2609
Jérôme Glissea5430dd2017-09-08 16:12:17 -07002610 if (is_zone_device_page(page))
2611 put_page(page);
2612 else
2613 putback_lru_page(page);
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002614 }
2615}
2616
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002617static void migrate_vma_insert_page(struct migrate_vma *migrate,
2618 unsigned long addr,
2619 struct page *page,
2620 unsigned long *src,
2621 unsigned long *dst)
2622{
2623 struct vm_area_struct *vma = migrate->vma;
2624 struct mm_struct *mm = vma->vm_mm;
2625 struct mem_cgroup *memcg;
2626 bool flush = false;
2627 spinlock_t *ptl;
2628 pte_t entry;
2629 pgd_t *pgdp;
2630 p4d_t *p4dp;
2631 pud_t *pudp;
2632 pmd_t *pmdp;
2633 pte_t *ptep;
2634
2635 /* Only allow populating anonymous memory */
2636 if (!vma_is_anonymous(vma))
2637 goto abort;
2638
2639 pgdp = pgd_offset(mm, addr);
2640 p4dp = p4d_alloc(mm, pgdp, addr);
2641 if (!p4dp)
2642 goto abort;
2643 pudp = pud_alloc(mm, p4dp, addr);
2644 if (!pudp)
2645 goto abort;
2646 pmdp = pmd_alloc(mm, pudp, addr);
2647 if (!pmdp)
2648 goto abort;
2649
2650 if (pmd_trans_huge(*pmdp) || pmd_devmap(*pmdp))
2651 goto abort;
2652
2653 /*
2654 * Use pte_alloc() instead of pte_alloc_map(). We can't run
2655 * pte_offset_map() on pmds where a huge pmd might be created
2656 * from a different thread.
2657 *
2658 * pte_alloc_map() is safe to use under down_write(mmap_sem) or when
2659 * parallel threads are excluded by other means.
2660 *
2661 * Here we only have down_read(mmap_sem).
2662 */
2663 if (pte_alloc(mm, pmdp, addr))
2664 goto abort;
2665
2666 /* See the comment in pte_alloc_one_map() */
2667 if (unlikely(pmd_trans_unstable(pmdp)))
2668 goto abort;
2669
2670 if (unlikely(anon_vma_prepare(vma)))
2671 goto abort;
2672 if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL, &memcg, false))
2673 goto abort;
2674
2675 /*
2676 * The memory barrier inside __SetPageUptodate makes sure that
2677 * preceding stores to the page contents become visible before
2678 * the set_pte_at() write.
2679 */
2680 __SetPageUptodate(page);
2681
Jérôme Glissedf6ad692017-09-08 16:12:24 -07002682 if (is_zone_device_page(page)) {
2683 if (is_device_private_page(page)) {
2684 swp_entry_t swp_entry;
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002685
Jérôme Glissedf6ad692017-09-08 16:12:24 -07002686 swp_entry = make_device_private_entry(page, vma->vm_flags & VM_WRITE);
2687 entry = swp_entry_to_pte(swp_entry);
2688 } else if (is_device_public_page(page)) {
2689 entry = pte_mkold(mk_pte(page, READ_ONCE(vma->vm_page_prot)));
2690 if (vma->vm_flags & VM_WRITE)
2691 entry = pte_mkwrite(pte_mkdirty(entry));
2692 entry = pte_mkdevmap(entry);
2693 }
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002694 } else {
2695 entry = mk_pte(page, vma->vm_page_prot);
2696 if (vma->vm_flags & VM_WRITE)
2697 entry = pte_mkwrite(pte_mkdirty(entry));
2698 }
2699
2700 ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
2701
2702 if (pte_present(*ptep)) {
2703 unsigned long pfn = pte_pfn(*ptep);
2704
2705 if (!is_zero_pfn(pfn)) {
2706 pte_unmap_unlock(ptep, ptl);
2707 mem_cgroup_cancel_charge(page, memcg, false);
2708 goto abort;
2709 }
2710 flush = true;
2711 } else if (!pte_none(*ptep)) {
2712 pte_unmap_unlock(ptep, ptl);
2713 mem_cgroup_cancel_charge(page, memcg, false);
2714 goto abort;
2715 }
2716
2717 /*
2718 * Check for usefaultfd but do not deliver the fault. Instead,
2719 * just back off.
2720 */
2721 if (userfaultfd_missing(vma)) {
2722 pte_unmap_unlock(ptep, ptl);
2723 mem_cgroup_cancel_charge(page, memcg, false);
2724 goto abort;
2725 }
2726
2727 inc_mm_counter(mm, MM_ANONPAGES);
2728 page_add_new_anon_rmap(page, vma, addr, false);
2729 mem_cgroup_commit_charge(page, memcg, false, false);
2730 if (!is_zone_device_page(page))
2731 lru_cache_add_active_or_unevictable(page, vma);
2732 get_page(page);
2733
2734 if (flush) {
2735 flush_cache_page(vma, addr, pte_pfn(*ptep));
2736 ptep_clear_flush_notify(vma, addr, ptep);
2737 set_pte_at_notify(mm, addr, ptep, entry);
2738 update_mmu_cache(vma, addr, ptep);
2739 } else {
2740 /* No need to invalidate - it was non-present before */
2741 set_pte_at(mm, addr, ptep, entry);
2742 update_mmu_cache(vma, addr, ptep);
2743 }
2744
2745 pte_unmap_unlock(ptep, ptl);
2746 *src = MIGRATE_PFN_MIGRATE;
2747 return;
2748
2749abort:
2750 *src &= ~MIGRATE_PFN_MIGRATE;
2751}
2752
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002753/*
2754 * migrate_vma_pages() - migrate meta-data from src page to dst page
2755 * @migrate: migrate struct containing all migration information
2756 *
2757 * This migrates struct page meta-data from source struct page to destination
2758 * struct page. This effectively finishes the migration from source page to the
2759 * destination page.
2760 */
2761static void migrate_vma_pages(struct migrate_vma *migrate)
2762{
2763 const unsigned long npages = migrate->npages;
2764 const unsigned long start = migrate->start;
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002765 struct vm_area_struct *vma = migrate->vma;
2766 struct mm_struct *mm = vma->vm_mm;
2767 unsigned long addr, i, mmu_start;
2768 bool notified = false;
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002769
2770 for (i = 0, addr = start; i < npages; addr += PAGE_SIZE, i++) {
2771 struct page *newpage = migrate_pfn_to_page(migrate->dst[i]);
2772 struct page *page = migrate_pfn_to_page(migrate->src[i]);
2773 struct address_space *mapping;
2774 int r;
2775
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002776 if (!newpage) {
2777 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002778 continue;
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002779 }
2780
2781 if (!page) {
2782 if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE)) {
2783 continue;
2784 }
2785 if (!notified) {
2786 mmu_start = addr;
2787 notified = true;
2788 mmu_notifier_invalidate_range_start(mm,
2789 mmu_start,
2790 migrate->end);
2791 }
2792 migrate_vma_insert_page(migrate, addr, newpage,
2793 &migrate->src[i],
2794 &migrate->dst[i]);
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002795 continue;
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002796 }
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002797
2798 mapping = page_mapping(page);
2799
Jérôme Glissea5430dd2017-09-08 16:12:17 -07002800 if (is_zone_device_page(newpage)) {
2801 if (is_device_private_page(newpage)) {
2802 /*
2803 * For now only support private anonymous when
2804 * migrating to un-addressable device memory.
2805 */
2806 if (mapping) {
2807 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2808 continue;
2809 }
Jérôme Glissedf6ad692017-09-08 16:12:24 -07002810 } else if (!is_device_public_page(newpage)) {
Jérôme Glissea5430dd2017-09-08 16:12:17 -07002811 /*
2812 * Other types of ZONE_DEVICE page are not
2813 * supported.
2814 */
2815 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2816 continue;
2817 }
2818 }
2819
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002820 r = migrate_page(mapping, newpage, page, MIGRATE_SYNC_NO_COPY);
2821 if (r != MIGRATEPAGE_SUCCESS)
2822 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2823 }
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002824
Jérôme Glisse4645b9f2017-11-15 17:34:11 -08002825 /*
2826 * No need to double call mmu_notifier->invalidate_range() callback as
2827 * the above ptep_clear_flush_notify() inside migrate_vma_insert_page()
2828 * did already call it.
2829 */
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002830 if (notified)
Jérôme Glisse4645b9f2017-11-15 17:34:11 -08002831 mmu_notifier_invalidate_range_only_end(mm, mmu_start,
2832 migrate->end);
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002833}
2834
2835/*
2836 * migrate_vma_finalize() - restore CPU page table entry
2837 * @migrate: migrate struct containing all migration information
2838 *
2839 * This replaces the special migration pte entry with either a mapping to the
2840 * new page if migration was successful for that page, or to the original page
2841 * otherwise.
2842 *
2843 * This also unlocks the pages and puts them back on the lru, or drops the extra
2844 * refcount, for device pages.
2845 */
2846static void migrate_vma_finalize(struct migrate_vma *migrate)
2847{
2848 const unsigned long npages = migrate->npages;
2849 unsigned long i;
2850
2851 for (i = 0; i < npages; i++) {
2852 struct page *newpage = migrate_pfn_to_page(migrate->dst[i]);
2853 struct page *page = migrate_pfn_to_page(migrate->src[i]);
2854
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002855 if (!page) {
2856 if (newpage) {
2857 unlock_page(newpage);
2858 put_page(newpage);
2859 }
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002860 continue;
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002861 }
2862
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002863 if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE) || !newpage) {
2864 if (newpage) {
2865 unlock_page(newpage);
2866 put_page(newpage);
2867 }
2868 newpage = page;
2869 }
2870
2871 remove_migration_ptes(page, newpage, false);
2872 unlock_page(page);
2873 migrate->cpages--;
2874
Jérôme Glissea5430dd2017-09-08 16:12:17 -07002875 if (is_zone_device_page(page))
2876 put_page(page);
2877 else
2878 putback_lru_page(page);
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002879
2880 if (newpage != page) {
2881 unlock_page(newpage);
Jérôme Glissea5430dd2017-09-08 16:12:17 -07002882 if (is_zone_device_page(newpage))
2883 put_page(newpage);
2884 else
2885 putback_lru_page(newpage);
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002886 }
2887 }
2888}
2889
2890/*
2891 * migrate_vma() - migrate a range of memory inside vma
2892 *
2893 * @ops: migration callback for allocating destination memory and copying
2894 * @vma: virtual memory area containing the range to be migrated
2895 * @start: start address of the range to migrate (inclusive)
2896 * @end: end address of the range to migrate (exclusive)
2897 * @src: array of hmm_pfn_t containing source pfns
2898 * @dst: array of hmm_pfn_t containing destination pfns
2899 * @private: pointer passed back to each of the callback
2900 * Returns: 0 on success, error code otherwise
2901 *
2902 * This function tries to migrate a range of memory virtual address range, using
2903 * callbacks to allocate and copy memory from source to destination. First it
2904 * collects all the pages backing each virtual address in the range, saving this
2905 * inside the src array. Then it locks those pages and unmaps them. Once the pages
2906 * are locked and unmapped, it checks whether each page is pinned or not. Pages
2907 * that aren't pinned have the MIGRATE_PFN_MIGRATE flag set (by this function)
2908 * in the corresponding src array entry. It then restores any pages that are
2909 * pinned, by remapping and unlocking those pages.
2910 *
2911 * At this point it calls the alloc_and_copy() callback. For documentation on
2912 * what is expected from that callback, see struct migrate_vma_ops comments in
2913 * include/linux/migrate.h
2914 *
2915 * After the alloc_and_copy() callback, this function goes over each entry in
2916 * the src array that has the MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag
2917 * set. If the corresponding entry in dst array has MIGRATE_PFN_VALID flag set,
2918 * then the function tries to migrate struct page information from the source
2919 * struct page to the destination struct page. If it fails to migrate the struct
2920 * page information, then it clears the MIGRATE_PFN_MIGRATE flag in the src
2921 * array.
2922 *
2923 * At this point all successfully migrated pages have an entry in the src
2924 * array with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag set and the dst
2925 * array entry with MIGRATE_PFN_VALID flag set.
2926 *
2927 * It then calls the finalize_and_map() callback. See comments for "struct
2928 * migrate_vma_ops", in include/linux/migrate.h for details about
2929 * finalize_and_map() behavior.
2930 *
2931 * After the finalize_and_map() callback, for successfully migrated pages, this
2932 * function updates the CPU page table to point to new pages, otherwise it
2933 * restores the CPU page table to point to the original source pages.
2934 *
2935 * Function returns 0 after the above steps, even if no pages were migrated
2936 * (The function only returns an error if any of the arguments are invalid.)
2937 *
2938 * Both src and dst array must be big enough for (end - start) >> PAGE_SHIFT
2939 * unsigned long entries.
2940 */
2941int migrate_vma(const struct migrate_vma_ops *ops,
2942 struct vm_area_struct *vma,
2943 unsigned long start,
2944 unsigned long end,
2945 unsigned long *src,
2946 unsigned long *dst,
2947 void *private)
2948{
2949 struct migrate_vma migrate;
2950
2951 /* Sanity check the arguments */
2952 start &= PAGE_MASK;
2953 end &= PAGE_MASK;
Dave Jiange1fb4a02018-08-17 15:43:40 -07002954 if (!vma || is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL) ||
2955 vma_is_dax(vma))
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002956 return -EINVAL;
2957 if (start < vma->vm_start || start >= vma->vm_end)
2958 return -EINVAL;
2959 if (end <= vma->vm_start || end > vma->vm_end)
2960 return -EINVAL;
2961 if (!ops || !src || !dst || start >= end)
2962 return -EINVAL;
2963
2964 memset(src, 0, sizeof(*src) * ((end - start) >> PAGE_SHIFT));
2965 migrate.src = src;
2966 migrate.dst = dst;
2967 migrate.start = start;
2968 migrate.npages = 0;
2969 migrate.cpages = 0;
2970 migrate.end = end;
2971 migrate.vma = vma;
2972
2973 /* Collect, and try to unmap source pages */
2974 migrate_vma_collect(&migrate);
2975 if (!migrate.cpages)
2976 return 0;
2977
2978 /* Lock and isolate page */
2979 migrate_vma_prepare(&migrate);
2980 if (!migrate.cpages)
2981 return 0;
2982
2983 /* Unmap pages */
2984 migrate_vma_unmap(&migrate);
2985 if (!migrate.cpages)
2986 return 0;
2987
2988 /*
2989 * At this point pages are locked and unmapped, and thus they have
2990 * stable content and can safely be copied to destination memory that
2991 * is allocated by the callback.
2992 *
2993 * Note that migration can fail in migrate_vma_struct_page() for each
2994 * individual page.
2995 */
2996 ops->alloc_and_copy(vma, src, dst, start, end, private);
2997
2998 /* This does the real migration of struct page */
2999 migrate_vma_pages(&migrate);
3000
3001 ops->finalize_and_map(vma, src, dst, start, end, private);
3002
3003 /* Unlock and remap pages */
3004 migrate_vma_finalize(&migrate);
3005
3006 return 0;
3007}
3008EXPORT_SYMBOL(migrate_vma);
Jérôme Glisse6b368cd2017-09-08 16:12:32 -07003009#endif /* defined(MIGRATE_VMA_HELPER) */