blob: e581253ef3301164b8e1d2ed2e9a14c28022b853 [file] [log] [blame]
Christoph Lameterb20a3502006-03-22 00:09:12 -08001/*
Hugh Dickins14e0f9b2015-11-05 18:49:43 -08002 * Memory Migration functionality - linux/mm/migrate.c
Christoph Lameterb20a3502006-03-22 00:09:12 -08003 *
4 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
5 *
6 * Page migration was first developed in the context of the memory hotplug
7 * project. The main authors of the migration code are:
8 *
9 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
10 * Hirokazu Takahashi <taka@valinux.co.jp>
11 * Dave Hansen <haveblue@us.ibm.com>
Christoph Lametercde53532008-07-04 09:59:22 -070012 * Christoph Lameter
Christoph Lameterb20a3502006-03-22 00:09:12 -080013 */
14
15#include <linux/migrate.h>
Paul Gortmakerb95f1b312011-10-16 02:01:52 -040016#include <linux/export.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080017#include <linux/swap.h>
Christoph Lameter06972122006-06-23 02:03:35 -070018#include <linux/swapops.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080019#include <linux/pagemap.h>
Christoph Lametere23ca002006-04-10 22:52:57 -070020#include <linux/buffer_head.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080021#include <linux/mm_inline.h>
Pavel Emelyanovb4888932007-10-18 23:40:14 -070022#include <linux/nsproxy.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080023#include <linux/pagevec.h>
Hugh Dickinse9995ef2009-12-14 17:59:31 -080024#include <linux/ksm.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080025#include <linux/rmap.h>
26#include <linux/topology.h>
27#include <linux/cpu.h>
28#include <linux/cpuset.h>
Christoph Lameter04e62a22006-06-23 02:03:38 -070029#include <linux/writeback.h>
Christoph Lameter742755a2006-06-23 02:03:55 -070030#include <linux/mempolicy.h>
31#include <linux/vmalloc.h>
David Quigley86c3a762006-06-23 02:04:02 -070032#include <linux/security.h>
Hugh Dickins42cb14b2015-11-05 18:50:05 -080033#include <linux/backing-dev.h>
Minchan Kimbda807d2016-07-26 15:23:05 -070034#include <linux/compaction.h>
Adrian Bunk4f5ca262008-07-23 21:27:02 -070035#include <linux/syscalls.h>
Naoya Horiguchi290408d2010-09-08 10:19:35 +090036#include <linux/hugetlb.h>
Aneesh Kumar K.V8e6ac7f2012-07-31 16:42:27 -070037#include <linux/hugetlb_cgroup.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090038#include <linux/gfp.h>
Jérôme Glissea5430dd2017-09-08 16:12:17 -070039#include <linux/memremap.h>
Jérôme Glisse8315ada2017-09-08 16:12:21 -070040#include <linux/userfaultfd_k.h>
Rafael Aquinibf6bddf12012-12-11 16:02:42 -080041#include <linux/balloon_compaction.h>
Mel Gormanf714f4f2013-12-18 17:08:33 -080042#include <linux/mmu_notifier.h>
Vladimir Davydov33c3fc72015-09-09 15:35:45 -070043#include <linux/page_idle.h>
Vlastimil Babkad435edc2016-03-15 14:56:15 -070044#include <linux/page_owner.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +010045#include <linux/sched/mm.h>
Linus Torvalds197e7e52017-08-20 13:26:27 -070046#include <linux/ptrace.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080047
Michal Nazarewicz0d1836c2010-12-21 17:24:26 -080048#include <asm/tlbflush.h>
49
Mel Gorman7b2a2d42012-10-19 14:07:31 +010050#define CREATE_TRACE_POINTS
51#include <trace/events/migrate.h>
52
Christoph Lameterb20a3502006-03-22 00:09:12 -080053#include "internal.h"
54
Christoph Lameterb20a3502006-03-22 00:09:12 -080055/*
Christoph Lameter742755a2006-06-23 02:03:55 -070056 * migrate_prep() needs to be called before we start compiling a list of pages
Mel Gorman748446b2010-05-24 14:32:27 -070057 * to be migrated using isolate_lru_page(). If scheduling work on other CPUs is
58 * undesirable, use migrate_prep_local()
Christoph Lameterb20a3502006-03-22 00:09:12 -080059 */
60int migrate_prep(void)
61{
Christoph Lameterb20a3502006-03-22 00:09:12 -080062 /*
63 * Clear the LRU lists so pages can be isolated.
64 * Note that pages may be moved off the LRU after we have
65 * drained them. Those pages will fail to migrate like other
66 * pages that may be busy.
67 */
68 lru_add_drain_all();
69
70 return 0;
71}
72
Mel Gorman748446b2010-05-24 14:32:27 -070073/* Do the necessary work of migrate_prep but not if it involves other CPUs */
74int migrate_prep_local(void)
75{
76 lru_add_drain();
77
78 return 0;
79}
80
Yisheng Xie9e5bcd62017-02-24 14:57:29 -080081int isolate_movable_page(struct page *page, isolate_mode_t mode)
Minchan Kimbda807d2016-07-26 15:23:05 -070082{
83 struct address_space *mapping;
84
85 /*
86 * Avoid burning cycles with pages that are yet under __free_pages(),
87 * or just got freed under us.
88 *
89 * In case we 'win' a race for a movable page being freed under us and
90 * raise its refcount preventing __free_pages() from doing its job
91 * the put_page() at the end of this block will take care of
92 * release this page, thus avoiding a nasty leakage.
93 */
94 if (unlikely(!get_page_unless_zero(page)))
95 goto out;
96
97 /*
98 * Check PageMovable before holding a PG_lock because page's owner
99 * assumes anybody doesn't touch PG_lock of newly allocated page
100 * so unconditionally grapping the lock ruins page's owner side.
101 */
102 if (unlikely(!__PageMovable(page)))
103 goto out_putpage;
104 /*
105 * As movable pages are not isolated from LRU lists, concurrent
106 * compaction threads can race against page migration functions
107 * as well as race against the releasing a page.
108 *
109 * In order to avoid having an already isolated movable page
110 * being (wrongly) re-isolated while it is under migration,
111 * or to avoid attempting to isolate pages being released,
112 * lets be sure we have the page lock
113 * before proceeding with the movable page isolation steps.
114 */
115 if (unlikely(!trylock_page(page)))
116 goto out_putpage;
117
118 if (!PageMovable(page) || PageIsolated(page))
119 goto out_no_isolated;
120
121 mapping = page_mapping(page);
122 VM_BUG_ON_PAGE(!mapping, page);
123
124 if (!mapping->a_ops->isolate_page(page, mode))
125 goto out_no_isolated;
126
127 /* Driver shouldn't use PG_isolated bit of page->flags */
128 WARN_ON_ONCE(PageIsolated(page));
129 __SetPageIsolated(page);
130 unlock_page(page);
131
Yisheng Xie9e5bcd62017-02-24 14:57:29 -0800132 return 0;
Minchan Kimbda807d2016-07-26 15:23:05 -0700133
134out_no_isolated:
135 unlock_page(page);
136out_putpage:
137 put_page(page);
138out:
Yisheng Xie9e5bcd62017-02-24 14:57:29 -0800139 return -EBUSY;
Minchan Kimbda807d2016-07-26 15:23:05 -0700140}
141
142/* It should be called on page which is PG_movable */
143void putback_movable_page(struct page *page)
144{
145 struct address_space *mapping;
146
147 VM_BUG_ON_PAGE(!PageLocked(page), page);
148 VM_BUG_ON_PAGE(!PageMovable(page), page);
149 VM_BUG_ON_PAGE(!PageIsolated(page), page);
150
151 mapping = page_mapping(page);
152 mapping->a_ops->putback_page(page);
153 __ClearPageIsolated(page);
154}
155
Christoph Lameterb20a3502006-03-22 00:09:12 -0800156/*
Rafael Aquini5733c7d2012-12-11 16:02:47 -0800157 * Put previously isolated pages back onto the appropriate lists
158 * from where they were once taken off for compaction/migration.
159 *
Joonsoo Kim59c82b72014-01-21 15:51:17 -0800160 * This function shall be used whenever the isolated pageset has been
161 * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
162 * and isolate_huge_page().
Rafael Aquini5733c7d2012-12-11 16:02:47 -0800163 */
164void putback_movable_pages(struct list_head *l)
165{
166 struct page *page;
167 struct page *page2;
168
169 list_for_each_entry_safe(page, page2, l, lru) {
Naoya Horiguchi31caf662013-09-11 14:21:59 -0700170 if (unlikely(PageHuge(page))) {
171 putback_active_hugepage(page);
172 continue;
173 }
Rafael Aquini5733c7d2012-12-11 16:02:47 -0800174 list_del(&page->lru);
Minchan Kimbda807d2016-07-26 15:23:05 -0700175 /*
176 * We isolated non-lru movable page so here we can use
177 * __PageMovable because LRU page's mapping cannot have
178 * PAGE_MAPPING_MOVABLE.
179 */
Minchan Kimb1123ea62016-07-26 15:23:09 -0700180 if (unlikely(__PageMovable(page))) {
Minchan Kimbda807d2016-07-26 15:23:05 -0700181 VM_BUG_ON_PAGE(!PageIsolated(page), page);
182 lock_page(page);
183 if (PageMovable(page))
184 putback_movable_page(page);
185 else
186 __ClearPageIsolated(page);
187 unlock_page(page);
188 put_page(page);
189 } else {
Naoya Horiguchie8db67e2017-09-08 16:11:12 -0700190 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
191 page_is_file_cache(page), -hpage_nr_pages(page));
Rabin Vincentfc280fe2017-04-20 14:37:46 -0700192 putback_lru_page(page);
Minchan Kimbda807d2016-07-26 15:23:05 -0700193 }
Christoph Lameterb20a3502006-03-22 00:09:12 -0800194 }
Christoph Lameterb20a3502006-03-22 00:09:12 -0800195}
196
Christoph Lameter06972122006-06-23 02:03:35 -0700197/*
198 * Restore a potential migration pte to a working pte entry
199 */
Minchan Kime4b82222017-05-03 14:54:27 -0700200static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
Hugh Dickinse9995ef2009-12-14 17:59:31 -0800201 unsigned long addr, void *old)
Christoph Lameter06972122006-06-23 02:03:35 -0700202{
Kirill A. Shutemov3fe87962017-02-24 14:58:16 -0800203 struct page_vma_mapped_walk pvmw = {
204 .page = old,
205 .vma = vma,
206 .address = addr,
207 .flags = PVMW_SYNC | PVMW_MIGRATION,
208 };
209 struct page *new;
210 pte_t pte;
Christoph Lameter06972122006-06-23 02:03:35 -0700211 swp_entry_t entry;
Christoph Lameter06972122006-06-23 02:03:35 -0700212
Kirill A. Shutemov3fe87962017-02-24 14:58:16 -0800213 VM_BUG_ON_PAGE(PageTail(page), page);
214 while (page_vma_mapped_walk(&pvmw)) {
Naoya Horiguchi4b0ece62017-03-31 15:11:44 -0700215 if (PageKsm(page))
216 new = page;
217 else
218 new = page - pvmw.page->index +
219 linear_page_index(vma, pvmw.address);
Christoph Lameter06972122006-06-23 02:03:35 -0700220
Zi Yan616b8372017-09-08 16:10:57 -0700221#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
222 /* PMD-mapped THP migration entry */
223 if (!pvmw.pte) {
224 VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page);
225 remove_migration_pmd(&pvmw, new);
226 continue;
227 }
228#endif
229
Kirill A. Shutemov3fe87962017-02-24 14:58:16 -0800230 get_page(new);
231 pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot)));
232 if (pte_swp_soft_dirty(*pvmw.pte))
233 pte = pte_mksoft_dirty(pte);
Christoph Lameter06972122006-06-23 02:03:35 -0700234
Hugh Dickins486cf462011-10-19 12:50:35 -0700235 /*
Kirill A. Shutemov3fe87962017-02-24 14:58:16 -0800236 * Recheck VMA as permissions can change since migration started
Hugh Dickins486cf462011-10-19 12:50:35 -0700237 */
Kirill A. Shutemov3fe87962017-02-24 14:58:16 -0800238 entry = pte_to_swp_entry(*pvmw.pte);
239 if (is_write_migration_entry(entry))
240 pte = maybe_mkwrite(pte, vma);
Mel Gormand3cb8bf2014-10-02 19:47:41 +0100241
Jérôme Glissea5430dd2017-09-08 16:12:17 -0700242 if (unlikely(is_zone_device_page(new)) &&
243 is_device_private_page(new)) {
244 entry = make_device_private_entry(new, pte_write(pte));
245 pte = swp_entry_to_pte(entry);
246 } else
247 flush_dcache_page(new);
248
Andi Kleen3ef8fd72010-10-11 16:03:21 +0200249#ifdef CONFIG_HUGETLB_PAGE
Kirill A. Shutemov3fe87962017-02-24 14:58:16 -0800250 if (PageHuge(new)) {
251 pte = pte_mkhuge(pte);
252 pte = arch_make_huge_pte(pte, vma, new, 0);
Aneesh Kumar K.V383321a2017-07-06 15:38:41 -0700253 set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
Kirill A. Shutemov3fe87962017-02-24 14:58:16 -0800254 if (PageAnon(new))
255 hugepage_add_anon_rmap(new, vma, pvmw.address);
256 else
257 page_dup_rmap(new, true);
Aneesh Kumar K.V383321a2017-07-06 15:38:41 -0700258 } else
259#endif
260 {
261 set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
Christoph Lameter04e62a22006-06-23 02:03:38 -0700262
Aneesh Kumar K.V383321a2017-07-06 15:38:41 -0700263 if (PageAnon(new))
264 page_add_anon_rmap(new, vma, pvmw.address, false);
265 else
266 page_add_file_rmap(new, false);
267 }
Kirill A. Shutemov3fe87962017-02-24 14:58:16 -0800268 if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new))
269 mlock_vma_page(new);
Hugh Dickins51afb122015-11-05 18:49:37 -0800270
Kirill A. Shutemov3fe87962017-02-24 14:58:16 -0800271 /* No need to invalidate - it was non-present before */
272 update_mmu_cache(vma, pvmw.address, pvmw.pte);
273 }
274
Minchan Kime4b82222017-05-03 14:54:27 -0700275 return true;
Christoph Lameter06972122006-06-23 02:03:35 -0700276}
277
278/*
Christoph Lameter04e62a22006-06-23 02:03:38 -0700279 * Get rid of all migration entries and replace them by
280 * references to the indicated page.
281 */
Kirill A. Shutemove3884662016-03-17 14:20:07 -0700282void remove_migration_ptes(struct page *old, struct page *new, bool locked)
Christoph Lameter04e62a22006-06-23 02:03:38 -0700283{
Joonsoo Kim051ac832014-01-21 15:49:48 -0800284 struct rmap_walk_control rwc = {
285 .rmap_one = remove_migration_pte,
286 .arg = old,
287 };
288
Kirill A. Shutemove3884662016-03-17 14:20:07 -0700289 if (locked)
290 rmap_walk_locked(new, &rwc);
291 else
292 rmap_walk(new, &rwc);
Christoph Lameter04e62a22006-06-23 02:03:38 -0700293}
294
295/*
Christoph Lameter06972122006-06-23 02:03:35 -0700296 * Something used the pte of a page under migration. We need to
297 * get to the page and wait until migration is finished.
298 * When we return from this function the fault will be retried.
Christoph Lameter06972122006-06-23 02:03:35 -0700299 */
Naoya Horiguchie66f17f2015-02-11 15:25:22 -0800300void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
Naoya Horiguchi30dad302013-06-12 14:05:04 -0700301 spinlock_t *ptl)
Christoph Lameter06972122006-06-23 02:03:35 -0700302{
Naoya Horiguchi30dad302013-06-12 14:05:04 -0700303 pte_t pte;
Christoph Lameter06972122006-06-23 02:03:35 -0700304 swp_entry_t entry;
305 struct page *page;
306
Naoya Horiguchi30dad302013-06-12 14:05:04 -0700307 spin_lock(ptl);
Christoph Lameter06972122006-06-23 02:03:35 -0700308 pte = *ptep;
309 if (!is_swap_pte(pte))
310 goto out;
311
312 entry = pte_to_swp_entry(pte);
313 if (!is_migration_entry(entry))
314 goto out;
315
316 page = migration_entry_to_page(entry);
317
Nick Piggine2867812008-07-25 19:45:30 -0700318 /*
319 * Once radix-tree replacement of page migration started, page_count
320 * *must* be zero. And, we don't want to call wait_on_page_locked()
321 * against a page without get_page().
322 * So, we use get_page_unless_zero(), here. Even failed, page fault
323 * will occur again.
324 */
325 if (!get_page_unless_zero(page))
326 goto out;
Christoph Lameter06972122006-06-23 02:03:35 -0700327 pte_unmap_unlock(ptep, ptl);
328 wait_on_page_locked(page);
329 put_page(page);
330 return;
331out:
332 pte_unmap_unlock(ptep, ptl);
333}
334
Naoya Horiguchi30dad302013-06-12 14:05:04 -0700335void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
336 unsigned long address)
337{
338 spinlock_t *ptl = pte_lockptr(mm, pmd);
339 pte_t *ptep = pte_offset_map(pmd, address);
340 __migration_entry_wait(mm, ptep, ptl);
341}
342
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -0800343void migration_entry_wait_huge(struct vm_area_struct *vma,
344 struct mm_struct *mm, pte_t *pte)
Naoya Horiguchi30dad302013-06-12 14:05:04 -0700345{
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -0800346 spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), mm, pte);
Naoya Horiguchi30dad302013-06-12 14:05:04 -0700347 __migration_entry_wait(mm, pte, ptl);
348}
349
Zi Yan616b8372017-09-08 16:10:57 -0700350#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
351void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
352{
353 spinlock_t *ptl;
354 struct page *page;
355
356 ptl = pmd_lock(mm, pmd);
357 if (!is_pmd_migration_entry(*pmd))
358 goto unlock;
359 page = migration_entry_to_page(pmd_to_swp_entry(*pmd));
360 if (!get_page_unless_zero(page))
361 goto unlock;
362 spin_unlock(ptl);
363 wait_on_page_locked(page);
364 put_page(page);
365 return;
366unlock:
367 spin_unlock(ptl);
368}
369#endif
370
Mel Gormanb969c4ab2012-01-12 17:19:34 -0800371#ifdef CONFIG_BLOCK
372/* Returns true if all buffers are successfully locked */
Mel Gormana6bc32b2012-01-12 17:19:43 -0800373static bool buffer_migrate_lock_buffers(struct buffer_head *head,
374 enum migrate_mode mode)
Mel Gormanb969c4ab2012-01-12 17:19:34 -0800375{
376 struct buffer_head *bh = head;
377
378 /* Simple case, sync compaction */
Mel Gormana6bc32b2012-01-12 17:19:43 -0800379 if (mode != MIGRATE_ASYNC) {
Mel Gormanb969c4ab2012-01-12 17:19:34 -0800380 do {
381 get_bh(bh);
382 lock_buffer(bh);
383 bh = bh->b_this_page;
384
385 } while (bh != head);
386
387 return true;
388 }
389
390 /* async case, we cannot block on lock_buffer so use trylock_buffer */
391 do {
392 get_bh(bh);
393 if (!trylock_buffer(bh)) {
394 /*
395 * We failed to lock the buffer and cannot stall in
396 * async migration. Release the taken locks
397 */
398 struct buffer_head *failed_bh = bh;
399 put_bh(failed_bh);
400 bh = head;
401 while (bh != failed_bh) {
402 unlock_buffer(bh);
403 put_bh(bh);
404 bh = bh->b_this_page;
405 }
406 return false;
407 }
408
409 bh = bh->b_this_page;
410 } while (bh != head);
411 return true;
412}
413#else
414static inline bool buffer_migrate_lock_buffers(struct buffer_head *head,
Mel Gormana6bc32b2012-01-12 17:19:43 -0800415 enum migrate_mode mode)
Mel Gormanb969c4ab2012-01-12 17:19:34 -0800416{
417 return true;
418}
419#endif /* CONFIG_BLOCK */
420
Christoph Lameterb20a3502006-03-22 00:09:12 -0800421/*
Christoph Lameterc3fcf8a2006-06-23 02:03:32 -0700422 * Replace the page in the mapping.
Christoph Lameter5b5c7122006-06-23 02:03:29 -0700423 *
424 * The number of remaining references must be:
425 * 1 for anonymous pages without a mapping
426 * 2 for pages with a mapping
David Howells266cf652009-04-03 16:42:36 +0100427 * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
Christoph Lameterb20a3502006-03-22 00:09:12 -0800428 */
Gu Zheng36bc08c2013-07-16 17:56:16 +0800429int migrate_page_move_mapping(struct address_space *mapping,
Mel Gormanb969c4ab2012-01-12 17:19:34 -0800430 struct page *newpage, struct page *page,
Benjamin LaHaise8e321fe2013-12-21 17:56:08 -0500431 struct buffer_head *head, enum migrate_mode mode,
432 int extra_count)
Christoph Lameterb20a3502006-03-22 00:09:12 -0800433{
Hugh Dickins42cb14b2015-11-05 18:50:05 -0800434 struct zone *oldzone, *newzone;
435 int dirty;
Benjamin LaHaise8e321fe2013-12-21 17:56:08 -0500436 int expected_count = 1 + extra_count;
Nick Piggin7cf9c2c2006-12-06 20:33:44 -0800437 void **pslot;
Christoph Lameterb20a3502006-03-22 00:09:12 -0800438
Jérôme Glisse8763cb42017-09-08 16:12:09 -0700439 /*
440 * ZONE_DEVICE pages have 1 refcount always held by their device
441 *
442 * Note that DAX memory will never reach that point as it does not have
443 * the MEMORY_DEVICE_ALLOW_MIGRATE flag set (see memory_hotplug.h).
444 */
445 expected_count += is_zone_device_page(page);
446
Christoph Lameter6c5240a2006-06-23 02:03:37 -0700447 if (!mapping) {
Christoph Lameter0e8c7d02007-04-23 14:41:09 -0700448 /* Anonymous page without mapping */
Benjamin LaHaise8e321fe2013-12-21 17:56:08 -0500449 if (page_count(page) != expected_count)
Christoph Lameter6c5240a2006-06-23 02:03:37 -0700450 return -EAGAIN;
Hugh Dickinscf4b7692015-11-05 18:50:02 -0800451
452 /* No turning back from here */
Hugh Dickinscf4b7692015-11-05 18:50:02 -0800453 newpage->index = page->index;
454 newpage->mapping = page->mapping;
455 if (PageSwapBacked(page))
Hugh Dickinsfa9949d2016-05-19 17:12:41 -0700456 __SetPageSwapBacked(newpage);
Hugh Dickinscf4b7692015-11-05 18:50:02 -0800457
Rafael Aquini78bd5202012-12-11 16:02:31 -0800458 return MIGRATEPAGE_SUCCESS;
Christoph Lameter6c5240a2006-06-23 02:03:37 -0700459 }
460
Hugh Dickins42cb14b2015-11-05 18:50:05 -0800461 oldzone = page_zone(page);
462 newzone = page_zone(newpage);
463
Nick Piggin19fd6232008-07-25 19:45:32 -0700464 spin_lock_irq(&mapping->tree_lock);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800465
Nick Piggin7cf9c2c2006-12-06 20:33:44 -0800466 pslot = radix_tree_lookup_slot(&mapping->page_tree,
467 page_index(page));
Christoph Lameterb20a3502006-03-22 00:09:12 -0800468
Benjamin LaHaise8e321fe2013-12-21 17:56:08 -0500469 expected_count += 1 + page_has_private(page);
Nick Piggine2867812008-07-25 19:45:30 -0700470 if (page_count(page) != expected_count ||
Mel Gorman29c1f672011-01-13 15:47:21 -0800471 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) {
Nick Piggin19fd6232008-07-25 19:45:32 -0700472 spin_unlock_irq(&mapping->tree_lock);
Christoph Lametere23ca002006-04-10 22:52:57 -0700473 return -EAGAIN;
Christoph Lameterb20a3502006-03-22 00:09:12 -0800474 }
475
Joonsoo Kimfe896d12016-03-17 14:19:26 -0700476 if (!page_ref_freeze(page, expected_count)) {
Nick Piggin19fd6232008-07-25 19:45:32 -0700477 spin_unlock_irq(&mapping->tree_lock);
Nick Piggine2867812008-07-25 19:45:30 -0700478 return -EAGAIN;
479 }
480
Christoph Lameterb20a3502006-03-22 00:09:12 -0800481 /*
Mel Gormanb969c4ab2012-01-12 17:19:34 -0800482 * In the async migration case of moving a page with buffers, lock the
483 * buffers using trylock before the mapping is moved. If the mapping
484 * was moved, we later failed to lock the buffers and could not move
485 * the mapping back due to an elevated page count, we would have to
486 * block waiting on other references to be dropped.
487 */
Mel Gormana6bc32b2012-01-12 17:19:43 -0800488 if (mode == MIGRATE_ASYNC && head &&
489 !buffer_migrate_lock_buffers(head, mode)) {
Joonsoo Kimfe896d12016-03-17 14:19:26 -0700490 page_ref_unfreeze(page, expected_count);
Mel Gormanb969c4ab2012-01-12 17:19:34 -0800491 spin_unlock_irq(&mapping->tree_lock);
492 return -EAGAIN;
493 }
494
495 /*
Hugh Dickinscf4b7692015-11-05 18:50:02 -0800496 * Now we know that no one else is looking at the page:
497 * no turning back from here.
Christoph Lameterb20a3502006-03-22 00:09:12 -0800498 */
Hugh Dickinscf4b7692015-11-05 18:50:02 -0800499 newpage->index = page->index;
500 newpage->mapping = page->mapping;
Nick Piggin7cf9c2c2006-12-06 20:33:44 -0800501 get_page(newpage); /* add cache reference */
Nicholas Piggin6326fec2016-12-25 13:00:29 +1000502 if (PageSwapBacked(page)) {
503 __SetPageSwapBacked(newpage);
504 if (PageSwapCache(page)) {
505 SetPageSwapCache(newpage);
506 set_page_private(newpage, page_private(page));
507 }
508 } else {
509 VM_BUG_ON_PAGE(PageSwapCache(page), page);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800510 }
511
Hugh Dickins42cb14b2015-11-05 18:50:05 -0800512 /* Move dirty while page refs frozen and newpage not yet exposed */
513 dirty = PageDirty(page);
514 if (dirty) {
515 ClearPageDirty(page);
516 SetPageDirty(newpage);
517 }
518
Johannes Weiner6d75f362016-12-12 16:43:43 -0800519 radix_tree_replace_slot(&mapping->page_tree, pslot, newpage);
Nick Piggin7cf9c2c2006-12-06 20:33:44 -0800520
521 /*
Jacobo Giralt937a94c2012-01-10 15:07:11 -0800522 * Drop cache reference from old page by unfreezing
523 * to one less reference.
Nick Piggin7cf9c2c2006-12-06 20:33:44 -0800524 * We know this isn't the last reference.
525 */
Joonsoo Kimfe896d12016-03-17 14:19:26 -0700526 page_ref_unfreeze(page, expected_count - 1);
Nick Piggin7cf9c2c2006-12-06 20:33:44 -0800527
Hugh Dickins42cb14b2015-11-05 18:50:05 -0800528 spin_unlock(&mapping->tree_lock);
529 /* Leave irq disabled to prevent preemption while updating stats */
530
Christoph Lameter0e8c7d02007-04-23 14:41:09 -0700531 /*
532 * If moved to a different zone then also account
533 * the page for that zone. Other VM counters will be
534 * taken care of when we establish references to the
535 * new page and drop references to the old page.
536 *
537 * Note that anonymous pages are accounted for
Mel Gorman4b9d0fa2016-07-28 15:46:17 -0700538 * via NR_FILE_PAGES and NR_ANON_MAPPED if they
Christoph Lameter0e8c7d02007-04-23 14:41:09 -0700539 * are mapped to swap space.
540 */
Hugh Dickins42cb14b2015-11-05 18:50:05 -0800541 if (newzone != oldzone) {
Mel Gorman11fb9982016-07-28 15:46:20 -0700542 __dec_node_state(oldzone->zone_pgdat, NR_FILE_PAGES);
543 __inc_node_state(newzone->zone_pgdat, NR_FILE_PAGES);
Hugh Dickins42cb14b2015-11-05 18:50:05 -0800544 if (PageSwapBacked(page) && !PageSwapCache(page)) {
Mel Gorman11fb9982016-07-28 15:46:20 -0700545 __dec_node_state(oldzone->zone_pgdat, NR_SHMEM);
546 __inc_node_state(newzone->zone_pgdat, NR_SHMEM);
Hugh Dickins42cb14b2015-11-05 18:50:05 -0800547 }
548 if (dirty && mapping_cap_account_dirty(mapping)) {
Mel Gorman11fb9982016-07-28 15:46:20 -0700549 __dec_node_state(oldzone->zone_pgdat, NR_FILE_DIRTY);
Mel Gorman5a1c84b2016-07-28 15:47:31 -0700550 __dec_zone_state(oldzone, NR_ZONE_WRITE_PENDING);
Mel Gorman11fb9982016-07-28 15:46:20 -0700551 __inc_node_state(newzone->zone_pgdat, NR_FILE_DIRTY);
Mel Gorman5a1c84b2016-07-28 15:47:31 -0700552 __inc_zone_state(newzone, NR_ZONE_WRITE_PENDING);
Hugh Dickins42cb14b2015-11-05 18:50:05 -0800553 }
KOSAKI Motohiro4b021082009-09-21 17:01:33 -0700554 }
Hugh Dickins42cb14b2015-11-05 18:50:05 -0800555 local_irq_enable();
Christoph Lameterb20a3502006-03-22 00:09:12 -0800556
Rafael Aquini78bd5202012-12-11 16:02:31 -0800557 return MIGRATEPAGE_SUCCESS;
Christoph Lameterb20a3502006-03-22 00:09:12 -0800558}
Richard Weinberger1118dce2016-06-16 23:26:14 +0200559EXPORT_SYMBOL(migrate_page_move_mapping);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800560
561/*
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900562 * The expected number of remaining references is the same as that
563 * of migrate_page_move_mapping().
564 */
565int migrate_huge_page_move_mapping(struct address_space *mapping,
566 struct page *newpage, struct page *page)
567{
568 int expected_count;
569 void **pslot;
570
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900571 spin_lock_irq(&mapping->tree_lock);
572
573 pslot = radix_tree_lookup_slot(&mapping->page_tree,
574 page_index(page));
575
576 expected_count = 2 + page_has_private(page);
577 if (page_count(page) != expected_count ||
Mel Gorman29c1f672011-01-13 15:47:21 -0800578 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) {
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900579 spin_unlock_irq(&mapping->tree_lock);
580 return -EAGAIN;
581 }
582
Joonsoo Kimfe896d12016-03-17 14:19:26 -0700583 if (!page_ref_freeze(page, expected_count)) {
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900584 spin_unlock_irq(&mapping->tree_lock);
585 return -EAGAIN;
586 }
587
Hugh Dickinscf4b7692015-11-05 18:50:02 -0800588 newpage->index = page->index;
589 newpage->mapping = page->mapping;
Johannes Weiner6a93ca82016-03-15 14:57:19 -0700590
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900591 get_page(newpage);
592
Johannes Weiner6d75f362016-12-12 16:43:43 -0800593 radix_tree_replace_slot(&mapping->page_tree, pslot, newpage);
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900594
Joonsoo Kimfe896d12016-03-17 14:19:26 -0700595 page_ref_unfreeze(page, expected_count - 1);
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900596
597 spin_unlock_irq(&mapping->tree_lock);
Johannes Weiner6a93ca82016-03-15 14:57:19 -0700598
Rafael Aquini78bd5202012-12-11 16:02:31 -0800599 return MIGRATEPAGE_SUCCESS;
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900600}
601
602/*
Dave Hansen30b0a102013-11-21 14:31:58 -0800603 * Gigantic pages are so large that we do not guarantee that page++ pointer
604 * arithmetic will work across the entire page. We need something more
605 * specialized.
606 */
607static void __copy_gigantic_page(struct page *dst, struct page *src,
608 int nr_pages)
609{
610 int i;
611 struct page *dst_base = dst;
612 struct page *src_base = src;
613
614 for (i = 0; i < nr_pages; ) {
615 cond_resched();
616 copy_highpage(dst, src);
617
618 i++;
619 dst = mem_map_next(dst, dst_base, i);
620 src = mem_map_next(src, src_base, i);
621 }
622}
623
624static void copy_huge_page(struct page *dst, struct page *src)
625{
626 int i;
627 int nr_pages;
628
629 if (PageHuge(src)) {
630 /* hugetlbfs page */
631 struct hstate *h = page_hstate(src);
632 nr_pages = pages_per_huge_page(h);
633
634 if (unlikely(nr_pages > MAX_ORDER_NR_PAGES)) {
635 __copy_gigantic_page(dst, src, nr_pages);
636 return;
637 }
638 } else {
639 /* thp page */
640 BUG_ON(!PageTransHuge(src));
641 nr_pages = hpage_nr_pages(src);
642 }
643
644 for (i = 0; i < nr_pages; i++) {
645 cond_resched();
646 copy_highpage(dst + i, src + i);
647 }
648}
649
650/*
Christoph Lameterb20a3502006-03-22 00:09:12 -0800651 * Copy the page to its new location
652 */
Jérôme Glisse2916ecc2017-09-08 16:12:06 -0700653void migrate_page_states(struct page *newpage, struct page *page)
Christoph Lameterb20a3502006-03-22 00:09:12 -0800654{
Rik van Riel7851a452013-10-07 11:29:23 +0100655 int cpupid;
656
Christoph Lameterb20a3502006-03-22 00:09:12 -0800657 if (PageError(page))
658 SetPageError(newpage);
659 if (PageReferenced(page))
660 SetPageReferenced(newpage);
661 if (PageUptodate(page))
662 SetPageUptodate(newpage);
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700663 if (TestClearPageActive(page)) {
Sasha Levin309381fea2014-01-23 15:52:54 -0800664 VM_BUG_ON_PAGE(PageUnevictable(page), page);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800665 SetPageActive(newpage);
Lee Schermerhorn418b27e2009-12-14 17:59:54 -0800666 } else if (TestClearPageUnevictable(page))
667 SetPageUnevictable(newpage);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800668 if (PageChecked(page))
669 SetPageChecked(newpage);
670 if (PageMappedToDisk(page))
671 SetPageMappedToDisk(newpage);
672
Hugh Dickins42cb14b2015-11-05 18:50:05 -0800673 /* Move dirty on pages not done by migrate_page_move_mapping() */
674 if (PageDirty(page))
675 SetPageDirty(newpage);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800676
Vladimir Davydov33c3fc72015-09-09 15:35:45 -0700677 if (page_is_young(page))
678 set_page_young(newpage);
679 if (page_is_idle(page))
680 set_page_idle(newpage);
681
Rik van Riel7851a452013-10-07 11:29:23 +0100682 /*
683 * Copy NUMA information to the new page, to prevent over-eager
684 * future migrations of this same page.
685 */
686 cpupid = page_cpupid_xchg_last(page, -1);
687 page_cpupid_xchg_last(newpage, cpupid);
688
Hugh Dickinse9995ef2009-12-14 17:59:31 -0800689 ksm_migrate_page(newpage, page);
Hugh Dickinsc8d65532013-02-22 16:35:10 -0800690 /*
691 * Please do not reorder this without considering how mm/ksm.c's
692 * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache().
693 */
Naoya Horiguchib3b3a992015-04-15 16:13:15 -0700694 if (PageSwapCache(page))
695 ClearPageSwapCache(page);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800696 ClearPagePrivate(page);
697 set_page_private(page, 0);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800698
699 /*
700 * If any waiters have accumulated on the new page then
701 * wake them up.
702 */
703 if (PageWriteback(newpage))
704 end_page_writeback(newpage);
Vlastimil Babkad435edc2016-03-15 14:56:15 -0700705
706 copy_page_owner(page, newpage);
Johannes Weiner74485cf2016-03-15 14:57:54 -0700707
708 mem_cgroup_migrate(page, newpage);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800709}
Jérôme Glisse2916ecc2017-09-08 16:12:06 -0700710EXPORT_SYMBOL(migrate_page_states);
711
712void migrate_page_copy(struct page *newpage, struct page *page)
713{
714 if (PageHuge(page) || PageTransHuge(page))
715 copy_huge_page(newpage, page);
716 else
717 copy_highpage(newpage, page);
718
719 migrate_page_states(newpage, page);
720}
Richard Weinberger1118dce2016-06-16 23:26:14 +0200721EXPORT_SYMBOL(migrate_page_copy);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800722
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700723/************************************************************
724 * Migration functions
725 ***********************************************************/
726
Christoph Lameterb20a3502006-03-22 00:09:12 -0800727/*
Minchan Kimbda807d2016-07-26 15:23:05 -0700728 * Common logic to directly migrate a single LRU page suitable for
David Howells266cf652009-04-03 16:42:36 +0100729 * pages that do not use PagePrivate/PagePrivate2.
Christoph Lameterb20a3502006-03-22 00:09:12 -0800730 *
731 * Pages are locked upon entry and exit.
732 */
Christoph Lameter2d1db3b2006-06-23 02:03:33 -0700733int migrate_page(struct address_space *mapping,
Mel Gormana6bc32b2012-01-12 17:19:43 -0800734 struct page *newpage, struct page *page,
735 enum migrate_mode mode)
Christoph Lameterb20a3502006-03-22 00:09:12 -0800736{
737 int rc;
738
739 BUG_ON(PageWriteback(page)); /* Writeback must be complete */
740
Benjamin LaHaise8e321fe2013-12-21 17:56:08 -0500741 rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800742
Rafael Aquini78bd5202012-12-11 16:02:31 -0800743 if (rc != MIGRATEPAGE_SUCCESS)
Christoph Lameterb20a3502006-03-22 00:09:12 -0800744 return rc;
745
Jérôme Glisse2916ecc2017-09-08 16:12:06 -0700746 if (mode != MIGRATE_SYNC_NO_COPY)
747 migrate_page_copy(newpage, page);
748 else
749 migrate_page_states(newpage, page);
Rafael Aquini78bd5202012-12-11 16:02:31 -0800750 return MIGRATEPAGE_SUCCESS;
Christoph Lameterb20a3502006-03-22 00:09:12 -0800751}
752EXPORT_SYMBOL(migrate_page);
753
David Howells93614012006-09-30 20:45:40 +0200754#ifdef CONFIG_BLOCK
Christoph Lameterb20a3502006-03-22 00:09:12 -0800755/*
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700756 * Migration function for pages with buffers. This function can only be used
757 * if the underlying filesystem guarantees that no other references to "page"
758 * exist.
759 */
Christoph Lameter2d1db3b2006-06-23 02:03:33 -0700760int buffer_migrate_page(struct address_space *mapping,
Mel Gormana6bc32b2012-01-12 17:19:43 -0800761 struct page *newpage, struct page *page, enum migrate_mode mode)
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700762{
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700763 struct buffer_head *bh, *head;
764 int rc;
765
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700766 if (!page_has_buffers(page))
Mel Gormana6bc32b2012-01-12 17:19:43 -0800767 return migrate_page(mapping, newpage, page, mode);
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700768
769 head = page_buffers(page);
770
Benjamin LaHaise8e321fe2013-12-21 17:56:08 -0500771 rc = migrate_page_move_mapping(mapping, newpage, page, head, mode, 0);
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700772
Rafael Aquini78bd5202012-12-11 16:02:31 -0800773 if (rc != MIGRATEPAGE_SUCCESS)
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700774 return rc;
775
Mel Gormanb969c4ab2012-01-12 17:19:34 -0800776 /*
777 * In the async case, migrate_page_move_mapping locked the buffers
778 * with an IRQ-safe spinlock held. In the sync case, the buffers
779 * need to be locked now
780 */
Mel Gormana6bc32b2012-01-12 17:19:43 -0800781 if (mode != MIGRATE_ASYNC)
782 BUG_ON(!buffer_migrate_lock_buffers(head, mode));
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700783
784 ClearPagePrivate(page);
785 set_page_private(newpage, page_private(page));
786 set_page_private(page, 0);
787 put_page(page);
788 get_page(newpage);
789
790 bh = head;
791 do {
792 set_bh_page(bh, newpage, bh_offset(bh));
793 bh = bh->b_this_page;
794
795 } while (bh != head);
796
797 SetPagePrivate(newpage);
798
Jérôme Glisse2916ecc2017-09-08 16:12:06 -0700799 if (mode != MIGRATE_SYNC_NO_COPY)
800 migrate_page_copy(newpage, page);
801 else
802 migrate_page_states(newpage, page);
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700803
804 bh = head;
805 do {
806 unlock_buffer(bh);
Jérôme Glisse2916ecc2017-09-08 16:12:06 -0700807 put_bh(bh);
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700808 bh = bh->b_this_page;
809
810 } while (bh != head);
811
Rafael Aquini78bd5202012-12-11 16:02:31 -0800812 return MIGRATEPAGE_SUCCESS;
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700813}
814EXPORT_SYMBOL(buffer_migrate_page);
David Howells93614012006-09-30 20:45:40 +0200815#endif
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700816
Christoph Lameter04e62a22006-06-23 02:03:38 -0700817/*
818 * Writeback a page to clean the dirty state
819 */
820static int writeout(struct address_space *mapping, struct page *page)
821{
822 struct writeback_control wbc = {
823 .sync_mode = WB_SYNC_NONE,
824 .nr_to_write = 1,
825 .range_start = 0,
826 .range_end = LLONG_MAX,
Christoph Lameter04e62a22006-06-23 02:03:38 -0700827 .for_reclaim = 1
828 };
829 int rc;
830
831 if (!mapping->a_ops->writepage)
832 /* No write method for the address space */
833 return -EINVAL;
834
835 if (!clear_page_dirty_for_io(page))
836 /* Someone else already triggered a write */
837 return -EAGAIN;
838
839 /*
840 * A dirty page may imply that the underlying filesystem has
841 * the page on some queue. So the page must be clean for
842 * migration. Writeout may mean we loose the lock and the
843 * page state is no longer what we checked for earlier.
844 * At this point we know that the migration attempt cannot
845 * be successful.
846 */
Kirill A. Shutemove3884662016-03-17 14:20:07 -0700847 remove_migration_ptes(page, page, false);
Christoph Lameter04e62a22006-06-23 02:03:38 -0700848
849 rc = mapping->a_ops->writepage(page, &wbc);
Christoph Lameter04e62a22006-06-23 02:03:38 -0700850
851 if (rc != AOP_WRITEPAGE_ACTIVATE)
852 /* unlocked. Relock */
853 lock_page(page);
854
Hugh Dickinsbda85502008-11-19 15:36:36 -0800855 return (rc < 0) ? -EIO : -EAGAIN;
Christoph Lameter04e62a22006-06-23 02:03:38 -0700856}
857
858/*
859 * Default handling if a filesystem does not provide a migration function.
860 */
Christoph Lameter8351a6e2006-06-23 02:03:33 -0700861static int fallback_migrate_page(struct address_space *mapping,
Mel Gormana6bc32b2012-01-12 17:19:43 -0800862 struct page *newpage, struct page *page, enum migrate_mode mode)
Christoph Lameter8351a6e2006-06-23 02:03:33 -0700863{
Mel Gormanb969c4ab2012-01-12 17:19:34 -0800864 if (PageDirty(page)) {
Mel Gormana6bc32b2012-01-12 17:19:43 -0800865 /* Only writeback pages in full synchronous migration */
Jérôme Glisse2916ecc2017-09-08 16:12:06 -0700866 switch (mode) {
867 case MIGRATE_SYNC:
868 case MIGRATE_SYNC_NO_COPY:
869 break;
870 default:
Mel Gormanb969c4ab2012-01-12 17:19:34 -0800871 return -EBUSY;
Jérôme Glisse2916ecc2017-09-08 16:12:06 -0700872 }
Christoph Lameter04e62a22006-06-23 02:03:38 -0700873 return writeout(mapping, page);
Mel Gormanb969c4ab2012-01-12 17:19:34 -0800874 }
Christoph Lameter8351a6e2006-06-23 02:03:33 -0700875
876 /*
877 * Buffers may be managed in a filesystem specific way.
878 * We must have no buffers or drop them.
879 */
David Howells266cf652009-04-03 16:42:36 +0100880 if (page_has_private(page) &&
Christoph Lameter8351a6e2006-06-23 02:03:33 -0700881 !try_to_release_page(page, GFP_KERNEL))
882 return -EAGAIN;
883
Mel Gormana6bc32b2012-01-12 17:19:43 -0800884 return migrate_page(mapping, newpage, page, mode);
Christoph Lameter8351a6e2006-06-23 02:03:33 -0700885}
886
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700887/*
Christoph Lametere24f0b82006-06-23 02:03:51 -0700888 * Move a page to a newly allocated page
889 * The page is locked and all ptes have been successfully removed.
890 *
891 * The new page will have replaced the old page if this function
892 * is successful.
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700893 *
894 * Return value:
895 * < 0 - error code
Rafael Aquini78bd5202012-12-11 16:02:31 -0800896 * MIGRATEPAGE_SUCCESS - success
Christoph Lametere24f0b82006-06-23 02:03:51 -0700897 */
Mel Gorman3fe20112010-05-24 14:32:20 -0700898static int move_to_new_page(struct page *newpage, struct page *page,
Hugh Dickins5c3f9a62015-11-05 18:49:53 -0800899 enum migrate_mode mode)
Christoph Lametere24f0b82006-06-23 02:03:51 -0700900{
901 struct address_space *mapping;
Minchan Kimbda807d2016-07-26 15:23:05 -0700902 int rc = -EAGAIN;
903 bool is_lru = !__PageMovable(page);
Christoph Lametere24f0b82006-06-23 02:03:51 -0700904
Hugh Dickins7db76712015-11-05 18:49:49 -0800905 VM_BUG_ON_PAGE(!PageLocked(page), page);
906 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
Christoph Lametere24f0b82006-06-23 02:03:51 -0700907
Christoph Lametere24f0b82006-06-23 02:03:51 -0700908 mapping = page_mapping(page);
Minchan Kimbda807d2016-07-26 15:23:05 -0700909
910 if (likely(is_lru)) {
911 if (!mapping)
912 rc = migrate_page(mapping, newpage, page, mode);
913 else if (mapping->a_ops->migratepage)
914 /*
915 * Most pages have a mapping and most filesystems
916 * provide a migratepage callback. Anonymous pages
917 * are part of swap space which also has its own
918 * migratepage callback. This is the most common path
919 * for page migration.
920 */
921 rc = mapping->a_ops->migratepage(mapping, newpage,
922 page, mode);
923 else
924 rc = fallback_migrate_page(mapping, newpage,
925 page, mode);
926 } else {
Christoph Lametere24f0b82006-06-23 02:03:51 -0700927 /*
Minchan Kimbda807d2016-07-26 15:23:05 -0700928 * In case of non-lru page, it could be released after
929 * isolation step. In that case, we shouldn't try migration.
Christoph Lametere24f0b82006-06-23 02:03:51 -0700930 */
Minchan Kimbda807d2016-07-26 15:23:05 -0700931 VM_BUG_ON_PAGE(!PageIsolated(page), page);
932 if (!PageMovable(page)) {
933 rc = MIGRATEPAGE_SUCCESS;
934 __ClearPageIsolated(page);
935 goto out;
936 }
937
938 rc = mapping->a_ops->migratepage(mapping, newpage,
939 page, mode);
940 WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS &&
941 !PageIsolated(page));
942 }
Christoph Lametere24f0b82006-06-23 02:03:51 -0700943
Hugh Dickins5c3f9a62015-11-05 18:49:53 -0800944 /*
945 * When successful, old pagecache page->mapping must be cleared before
946 * page is freed; but stats require that PageAnon be left as PageAnon.
947 */
948 if (rc == MIGRATEPAGE_SUCCESS) {
Minchan Kimbda807d2016-07-26 15:23:05 -0700949 if (__PageMovable(page)) {
950 VM_BUG_ON_PAGE(!PageIsolated(page), page);
951
952 /*
953 * We clear PG_movable under page_lock so any compactor
954 * cannot try to migrate this page.
955 */
956 __ClearPageIsolated(page);
957 }
958
959 /*
960 * Anonymous and movable page->mapping will be cleard by
961 * free_pages_prepare so don't reset it here for keeping
962 * the type to work PageAnon, for example.
963 */
964 if (!PageMappingFlags(page))
Hugh Dickins5c3f9a62015-11-05 18:49:53 -0800965 page->mapping = NULL;
Mel Gorman3fe20112010-05-24 14:32:20 -0700966 }
Minchan Kimbda807d2016-07-26 15:23:05 -0700967out:
Christoph Lametere24f0b82006-06-23 02:03:51 -0700968 return rc;
969}
970
Minchan Kim0dabec92011-10-31 17:06:57 -0700971static int __unmap_and_move(struct page *page, struct page *newpage,
Hugh Dickins9c620e22013-02-22 16:35:14 -0800972 int force, enum migrate_mode mode)
Christoph Lametere24f0b82006-06-23 02:03:51 -0700973{
Minchan Kim0dabec92011-10-31 17:06:57 -0700974 int rc = -EAGAIN;
Hugh Dickins2ebba6b2014-12-12 16:56:19 -0800975 int page_was_mapped = 0;
Mel Gorman3f6c8272010-05-24 14:32:17 -0700976 struct anon_vma *anon_vma = NULL;
Minchan Kimbda807d2016-07-26 15:23:05 -0700977 bool is_lru = !__PageMovable(page);
Christoph Lameter95a402c2006-06-23 02:03:53 -0700978
Nick Piggin529ae9a2008-08-02 12:01:03 +0200979 if (!trylock_page(page)) {
Mel Gormana6bc32b2012-01-12 17:19:43 -0800980 if (!force || mode == MIGRATE_ASYNC)
Minchan Kim0dabec92011-10-31 17:06:57 -0700981 goto out;
Mel Gorman3e7d3442011-01-13 15:45:56 -0800982
983 /*
984 * It's not safe for direct compaction to call lock_page.
985 * For example, during page readahead pages are added locked
986 * to the LRU. Later, when the IO completes the pages are
987 * marked uptodate and unlocked. However, the queueing
988 * could be merging multiple pages for one bio (e.g.
989 * mpage_readpages). If an allocation happens for the
990 * second or third page, the process can end up locking
991 * the same page twice and deadlocking. Rather than
992 * trying to be clever about what pages can be locked,
993 * avoid the use of lock_page for direct compaction
994 * altogether.
995 */
996 if (current->flags & PF_MEMALLOC)
Minchan Kim0dabec92011-10-31 17:06:57 -0700997 goto out;
Mel Gorman3e7d3442011-01-13 15:45:56 -0800998
Christoph Lametere24f0b82006-06-23 02:03:51 -0700999 lock_page(page);
1000 }
1001
1002 if (PageWriteback(page)) {
Andrea Arcangeli11bc82d2011-03-22 16:33:11 -07001003 /*
Jianguo Wufed5b642013-04-29 15:07:58 -07001004 * Only in the case of a full synchronous migration is it
Mel Gormana6bc32b2012-01-12 17:19:43 -08001005 * necessary to wait for PageWriteback. In the async case,
1006 * the retry loop is too short and in the sync-light case,
1007 * the overhead of stalling is too much
Andrea Arcangeli11bc82d2011-03-22 16:33:11 -07001008 */
Jérôme Glisse2916ecc2017-09-08 16:12:06 -07001009 switch (mode) {
1010 case MIGRATE_SYNC:
1011 case MIGRATE_SYNC_NO_COPY:
1012 break;
1013 default:
Andrea Arcangeli11bc82d2011-03-22 16:33:11 -07001014 rc = -EBUSY;
Johannes Weiner0a31bc92014-08-08 14:19:22 -07001015 goto out_unlock;
Andrea Arcangeli11bc82d2011-03-22 16:33:11 -07001016 }
1017 if (!force)
Johannes Weiner0a31bc92014-08-08 14:19:22 -07001018 goto out_unlock;
Christoph Lametere24f0b82006-06-23 02:03:51 -07001019 wait_on_page_writeback(page);
1020 }
Hugh Dickins03f15c82015-11-05 18:49:56 -08001021
Christoph Lametere24f0b82006-06-23 02:03:51 -07001022 /*
KAMEZAWA Hiroyukidc386d42007-07-26 10:41:07 -07001023 * By try_to_unmap(), page->mapcount goes down to 0 here. In this case,
1024 * we cannot notice that anon_vma is freed while we migrates a page.
Hugh Dickins1ce82b62011-01-13 15:47:30 -08001025 * This get_anon_vma() delays freeing anon_vma pointer until the end
KAMEZAWA Hiroyukidc386d42007-07-26 10:41:07 -07001026 * of migration. File cache pages are no problem because of page_lock()
KAMEZAWA Hiroyuki989f89c2007-08-30 23:56:21 -07001027 * File Caches may use write_page() or lock_page() in migration, then,
1028 * just care Anon page here.
Hugh Dickins03f15c82015-11-05 18:49:56 -08001029 *
1030 * Only page_get_anon_vma() understands the subtleties of
1031 * getting a hold on an anon_vma from outside one of its mms.
1032 * But if we cannot get anon_vma, then we won't need it anyway,
1033 * because that implies that the anon page is no longer mapped
1034 * (and cannot be remapped so long as we hold the page lock).
Christoph Lametere24f0b82006-06-23 02:03:51 -07001035 */
Hugh Dickins03f15c82015-11-05 18:49:56 -08001036 if (PageAnon(page) && !PageKsm(page))
Peter Zijlstra746b18d2011-05-24 17:12:10 -07001037 anon_vma = page_get_anon_vma(page);
Shaohua Li62e1c552008-02-04 22:29:33 -08001038
Hugh Dickins7db76712015-11-05 18:49:49 -08001039 /*
1040 * Block others from accessing the new page when we get around to
1041 * establishing additional references. We are usually the only one
1042 * holding a reference to newpage at this point. We used to have a BUG
1043 * here if trylock_page(newpage) fails, but would like to allow for
1044 * cases where there might be a race with the previous use of newpage.
1045 * This is much like races on refcount of oldpage: just don't BUG().
1046 */
1047 if (unlikely(!trylock_page(newpage)))
1048 goto out_unlock;
1049
Minchan Kimbda807d2016-07-26 15:23:05 -07001050 if (unlikely(!is_lru)) {
1051 rc = move_to_new_page(newpage, page, mode);
1052 goto out_unlock_both;
1053 }
1054
KAMEZAWA Hiroyukidc386d42007-07-26 10:41:07 -07001055 /*
Shaohua Li62e1c552008-02-04 22:29:33 -08001056 * Corner case handling:
1057 * 1. When a new swap-cache page is read into, it is added to the LRU
1058 * and treated as swapcache but it has no rmap yet.
1059 * Calling try_to_unmap() against a page->mapping==NULL page will
1060 * trigger a BUG. So handle it here.
1061 * 2. An orphaned page (see truncate_complete_page) might have
1062 * fs-private metadata. The page can be picked up due to memory
1063 * offlining. Everywhere else except page reclaim, the page is
1064 * invisible to the vm, so the page can not be migrated. So try to
1065 * free the metadata, so the page can be freed.
KAMEZAWA Hiroyukidc386d42007-07-26 10:41:07 -07001066 */
Shaohua Li62e1c552008-02-04 22:29:33 -08001067 if (!page->mapping) {
Sasha Levin309381fea2014-01-23 15:52:54 -08001068 VM_BUG_ON_PAGE(PageAnon(page), page);
Hugh Dickins1ce82b62011-01-13 15:47:30 -08001069 if (page_has_private(page)) {
Shaohua Li62e1c552008-02-04 22:29:33 -08001070 try_to_free_buffers(page);
Hugh Dickins7db76712015-11-05 18:49:49 -08001071 goto out_unlock_both;
Shaohua Li62e1c552008-02-04 22:29:33 -08001072 }
Hugh Dickins7db76712015-11-05 18:49:49 -08001073 } else if (page_mapped(page)) {
1074 /* Establish migration ptes */
Hugh Dickins03f15c82015-11-05 18:49:56 -08001075 VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma,
1076 page);
Hugh Dickins2ebba6b2014-12-12 16:56:19 -08001077 try_to_unmap(page,
Wanpeng Lida1b13c2015-09-08 15:03:27 -07001078 TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
Hugh Dickins2ebba6b2014-12-12 16:56:19 -08001079 page_was_mapped = 1;
1080 }
KAMEZAWA Hiroyukidc386d42007-07-26 10:41:07 -07001081
Christoph Lametere6a15302006-06-25 05:46:49 -07001082 if (!page_mapped(page))
Hugh Dickins5c3f9a62015-11-05 18:49:53 -08001083 rc = move_to_new_page(newpage, page, mode);
Christoph Lametere24f0b82006-06-23 02:03:51 -07001084
Hugh Dickins5c3f9a62015-11-05 18:49:53 -08001085 if (page_was_mapped)
1086 remove_migration_ptes(page,
Kirill A. Shutemove3884662016-03-17 14:20:07 -07001087 rc == MIGRATEPAGE_SUCCESS ? newpage : page, false);
Mel Gorman3f6c8272010-05-24 14:32:17 -07001088
Hugh Dickins7db76712015-11-05 18:49:49 -08001089out_unlock_both:
1090 unlock_page(newpage);
1091out_unlock:
Mel Gorman3f6c8272010-05-24 14:32:17 -07001092 /* Drop an anon_vma reference if we took one */
Rik van Riel76545062010-08-09 17:18:41 -07001093 if (anon_vma)
Peter Zijlstra9e601092011-03-22 16:32:46 -07001094 put_anon_vma(anon_vma);
Christoph Lametere24f0b82006-06-23 02:03:51 -07001095 unlock_page(page);
Minchan Kim0dabec92011-10-31 17:06:57 -07001096out:
Minchan Kimc6c919e2016-07-26 15:23:02 -07001097 /*
1098 * If migration is successful, decrease refcount of the newpage
1099 * which will not free the page because new page owner increased
1100 * refcounter. As well, if it is LRU page, add the page to LRU
1101 * list in here.
1102 */
1103 if (rc == MIGRATEPAGE_SUCCESS) {
Minchan Kimb1123ea62016-07-26 15:23:09 -07001104 if (unlikely(__PageMovable(newpage)))
Minchan Kimc6c919e2016-07-26 15:23:02 -07001105 put_page(newpage);
1106 else
1107 putback_lru_page(newpage);
1108 }
1109
Minchan Kim0dabec92011-10-31 17:06:57 -07001110 return rc;
1111}
Christoph Lameter95a402c2006-06-23 02:03:53 -07001112
Minchan Kim0dabec92011-10-31 17:06:57 -07001113/*
Geert Uytterhoevenef2a5152015-04-14 15:44:22 -07001114 * gcc 4.7 and 4.8 on arm get an ICEs when inlining unmap_and_move(). Work
1115 * around it.
1116 */
1117#if (GCC_VERSION >= 40700 && GCC_VERSION < 40900) && defined(CONFIG_ARM)
1118#define ICE_noinline noinline
1119#else
1120#define ICE_noinline
1121#endif
1122
1123/*
Minchan Kim0dabec92011-10-31 17:06:57 -07001124 * Obtain the lock on page, remove all ptes and migrate the page
1125 * to the newly allocated page in newpage.
1126 */
Geert Uytterhoevenef2a5152015-04-14 15:44:22 -07001127static ICE_noinline int unmap_and_move(new_page_t get_new_page,
1128 free_page_t put_new_page,
1129 unsigned long private, struct page *page,
Naoya Horiguchiadd05ce2015-06-24 16:56:50 -07001130 int force, enum migrate_mode mode,
1131 enum migrate_reason reason)
Minchan Kim0dabec92011-10-31 17:06:57 -07001132{
Hugh Dickins2def7422015-11-05 18:49:46 -08001133 int rc = MIGRATEPAGE_SUCCESS;
Minchan Kim0dabec92011-10-31 17:06:57 -07001134 int *result = NULL;
Hugh Dickins2def7422015-11-05 18:49:46 -08001135 struct page *newpage;
Minchan Kim0dabec92011-10-31 17:06:57 -07001136
Hugh Dickins2def7422015-11-05 18:49:46 -08001137 newpage = get_new_page(page, private, &result);
Minchan Kim0dabec92011-10-31 17:06:57 -07001138 if (!newpage)
1139 return -ENOMEM;
1140
1141 if (page_count(page) == 1) {
1142 /* page was freed from under us. So we are done. */
Minchan Kimc6c919e2016-07-26 15:23:02 -07001143 ClearPageActive(page);
1144 ClearPageUnevictable(page);
Minchan Kimbda807d2016-07-26 15:23:05 -07001145 if (unlikely(__PageMovable(page))) {
1146 lock_page(page);
1147 if (!PageMovable(page))
1148 __ClearPageIsolated(page);
1149 unlock_page(page);
1150 }
Minchan Kimc6c919e2016-07-26 15:23:02 -07001151 if (put_new_page)
1152 put_new_page(newpage, private);
1153 else
1154 put_page(newpage);
Minchan Kim0dabec92011-10-31 17:06:57 -07001155 goto out;
1156 }
1157
Zi Yan616b8372017-09-08 16:10:57 -07001158 if (unlikely(PageTransHuge(page) && !PageTransHuge(newpage))) {
Kirill A. Shutemov4d2fa962016-01-15 16:54:00 -08001159 lock_page(page);
1160 rc = split_huge_page(page);
1161 unlock_page(page);
1162 if (rc)
Minchan Kim0dabec92011-10-31 17:06:57 -07001163 goto out;
Kirill A. Shutemov4d2fa962016-01-15 16:54:00 -08001164 }
Minchan Kim0dabec92011-10-31 17:06:57 -07001165
Hugh Dickins9c620e22013-02-22 16:35:14 -08001166 rc = __unmap_and_move(page, newpage, force, mode);
Minchan Kimc6c919e2016-07-26 15:23:02 -07001167 if (rc == MIGRATEPAGE_SUCCESS)
Vlastimil Babka7cd12b42016-03-15 14:56:18 -07001168 set_page_owner_migrate_reason(newpage, reason);
Rafael Aquinibf6bddf12012-12-11 16:02:42 -08001169
Minchan Kim0dabec92011-10-31 17:06:57 -07001170out:
Christoph Lametere24f0b82006-06-23 02:03:51 -07001171 if (rc != -EAGAIN) {
Minchan Kim0dabec92011-10-31 17:06:57 -07001172 /*
1173 * A page that has been migrated has all references
1174 * removed and will be freed. A page that has not been
1175 * migrated will have kepts its references and be
1176 * restored.
1177 */
1178 list_del(&page->lru);
Ming Ling6afcf8e2016-12-12 16:42:26 -08001179
1180 /*
1181 * Compaction can migrate also non-LRU pages which are
1182 * not accounted to NR_ISOLATED_*. They can be recognized
1183 * as __PageMovable
1184 */
1185 if (likely(!__PageMovable(page)))
Naoya Horiguchie8db67e2017-09-08 16:11:12 -07001186 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
1187 page_is_file_cache(page), -hpage_nr_pages(page));
Christoph Lametere24f0b82006-06-23 02:03:51 -07001188 }
David Rientjes68711a72014-06-04 16:08:25 -07001189
Christoph Lameter95a402c2006-06-23 02:03:53 -07001190 /*
Minchan Kimc6c919e2016-07-26 15:23:02 -07001191 * If migration is successful, releases reference grabbed during
1192 * isolation. Otherwise, restore the page to right list unless
1193 * we want to retry.
Christoph Lameter95a402c2006-06-23 02:03:53 -07001194 */
Minchan Kimc6c919e2016-07-26 15:23:02 -07001195 if (rc == MIGRATEPAGE_SUCCESS) {
1196 put_page(page);
1197 if (reason == MR_MEMORY_FAILURE) {
1198 /*
1199 * Set PG_HWPoison on just freed page
1200 * intentionally. Although it's rather weird,
1201 * it's how HWPoison flag works at the moment.
1202 */
1203 if (!test_set_page_hwpoison(page))
1204 num_poisoned_pages_inc();
1205 }
1206 } else {
Minchan Kimbda807d2016-07-26 15:23:05 -07001207 if (rc != -EAGAIN) {
1208 if (likely(!__PageMovable(page))) {
1209 putback_lru_page(page);
1210 goto put_new;
1211 }
1212
1213 lock_page(page);
1214 if (PageMovable(page))
1215 putback_movable_page(page);
1216 else
1217 __ClearPageIsolated(page);
1218 unlock_page(page);
1219 put_page(page);
1220 }
1221put_new:
Minchan Kimc6c919e2016-07-26 15:23:02 -07001222 if (put_new_page)
1223 put_new_page(newpage, private);
1224 else
1225 put_page(newpage);
1226 }
David Rientjes68711a72014-06-04 16:08:25 -07001227
Christoph Lameter742755a2006-06-23 02:03:55 -07001228 if (result) {
1229 if (rc)
1230 *result = rc;
1231 else
1232 *result = page_to_nid(newpage);
1233 }
Christoph Lametere24f0b82006-06-23 02:03:51 -07001234 return rc;
1235}
1236
1237/*
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001238 * Counterpart of unmap_and_move_page() for hugepage migration.
1239 *
1240 * This function doesn't wait the completion of hugepage I/O
1241 * because there is no race between I/O and migration for hugepage.
1242 * Note that currently hugepage I/O occurs only in direct I/O
1243 * where no lock is held and PG_writeback is irrelevant,
1244 * and writeback status of all subpages are counted in the reference
1245 * count of the head page (i.e. if all subpages of a 2MB hugepage are
1246 * under direct I/O, the reference of the head page is 512 and a bit more.)
1247 * This means that when we try to migrate hugepage whose subpages are
1248 * doing direct I/O, some references remain after try_to_unmap() and
1249 * hugepage migration fails without data corruption.
1250 *
1251 * There is also no race when direct I/O is issued on the page under migration,
1252 * because then pte is replaced with migration swap entry and direct I/O code
1253 * will wait in the page fault for migration to complete.
1254 */
1255static int unmap_and_move_huge_page(new_page_t get_new_page,
David Rientjes68711a72014-06-04 16:08:25 -07001256 free_page_t put_new_page, unsigned long private,
1257 struct page *hpage, int force,
Vlastimil Babka7cd12b42016-03-15 14:56:18 -07001258 enum migrate_mode mode, int reason)
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001259{
Hugh Dickins2def7422015-11-05 18:49:46 -08001260 int rc = -EAGAIN;
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001261 int *result = NULL;
Hugh Dickins2ebba6b2014-12-12 16:56:19 -08001262 int page_was_mapped = 0;
Joonsoo Kim32665f22014-01-21 15:51:15 -08001263 struct page *new_hpage;
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001264 struct anon_vma *anon_vma = NULL;
1265
Naoya Horiguchi83467ef2013-09-11 14:22:11 -07001266 /*
1267 * Movability of hugepages depends on architectures and hugepage size.
1268 * This check is necessary because some callers of hugepage migration
1269 * like soft offline and memory hotremove don't walk through page
1270 * tables or check whether the hugepage is pmd-based or not before
1271 * kicking migration.
1272 */
Naoya Horiguchi100873d2014-06-04 16:10:56 -07001273 if (!hugepage_migration_supported(page_hstate(hpage))) {
Joonsoo Kim32665f22014-01-21 15:51:15 -08001274 putback_active_hugepage(hpage);
Naoya Horiguchi83467ef2013-09-11 14:22:11 -07001275 return -ENOSYS;
Joonsoo Kim32665f22014-01-21 15:51:15 -08001276 }
Naoya Horiguchi83467ef2013-09-11 14:22:11 -07001277
Joonsoo Kim32665f22014-01-21 15:51:15 -08001278 new_hpage = get_new_page(hpage, private, &result);
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001279 if (!new_hpage)
1280 return -ENOMEM;
1281
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001282 if (!trylock_page(hpage)) {
Jérôme Glisse2916ecc2017-09-08 16:12:06 -07001283 if (!force)
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001284 goto out;
Jérôme Glisse2916ecc2017-09-08 16:12:06 -07001285 switch (mode) {
1286 case MIGRATE_SYNC:
1287 case MIGRATE_SYNC_NO_COPY:
1288 break;
1289 default:
1290 goto out;
1291 }
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001292 lock_page(hpage);
1293 }
1294
Peter Zijlstra746b18d2011-05-24 17:12:10 -07001295 if (PageAnon(hpage))
1296 anon_vma = page_get_anon_vma(hpage);
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001297
Hugh Dickins7db76712015-11-05 18:49:49 -08001298 if (unlikely(!trylock_page(new_hpage)))
1299 goto put_anon;
1300
Hugh Dickins2ebba6b2014-12-12 16:56:19 -08001301 if (page_mapped(hpage)) {
1302 try_to_unmap(hpage,
1303 TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
1304 page_was_mapped = 1;
1305 }
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001306
1307 if (!page_mapped(hpage))
Hugh Dickins5c3f9a62015-11-05 18:49:53 -08001308 rc = move_to_new_page(new_hpage, hpage, mode);
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001309
Hugh Dickins5c3f9a62015-11-05 18:49:53 -08001310 if (page_was_mapped)
1311 remove_migration_ptes(hpage,
Kirill A. Shutemove3884662016-03-17 14:20:07 -07001312 rc == MIGRATEPAGE_SUCCESS ? new_hpage : hpage, false);
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001313
Hugh Dickins7db76712015-11-05 18:49:49 -08001314 unlock_page(new_hpage);
1315
1316put_anon:
Hugh Dickinsfd4a4662011-01-13 15:47:31 -08001317 if (anon_vma)
Peter Zijlstra9e601092011-03-22 16:32:46 -07001318 put_anon_vma(anon_vma);
Aneesh Kumar K.V8e6ac7f2012-07-31 16:42:27 -07001319
Hugh Dickins2def7422015-11-05 18:49:46 -08001320 if (rc == MIGRATEPAGE_SUCCESS) {
Aneesh Kumar K.V8e6ac7f2012-07-31 16:42:27 -07001321 hugetlb_cgroup_migrate(hpage, new_hpage);
Hugh Dickins2def7422015-11-05 18:49:46 -08001322 put_new_page = NULL;
Vlastimil Babka7cd12b42016-03-15 14:56:18 -07001323 set_page_owner_migrate_reason(new_hpage, reason);
Hugh Dickins2def7422015-11-05 18:49:46 -08001324 }
Aneesh Kumar K.V8e6ac7f2012-07-31 16:42:27 -07001325
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001326 unlock_page(hpage);
Hillf Danton09761332011-12-08 14:34:20 -08001327out:
Naoya Horiguchib8ec1ce2013-09-11 14:22:01 -07001328 if (rc != -EAGAIN)
1329 putback_active_hugepage(hpage);
Anshuman Khandualc3114a82017-07-10 15:47:41 -07001330 if (reason == MR_MEMORY_FAILURE && !test_set_page_hwpoison(hpage))
1331 num_poisoned_pages_inc();
David Rientjes68711a72014-06-04 16:08:25 -07001332
1333 /*
1334 * If migration was not successful and there's a freeing callback, use
1335 * it. Otherwise, put_page() will drop the reference grabbed during
1336 * isolation.
1337 */
Hugh Dickins2def7422015-11-05 18:49:46 -08001338 if (put_new_page)
David Rientjes68711a72014-06-04 16:08:25 -07001339 put_new_page(new_hpage, private);
1340 else
Naoya Horiguchi3aaa76e2015-09-22 14:59:14 -07001341 putback_active_hugepage(new_hpage);
David Rientjes68711a72014-06-04 16:08:25 -07001342
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001343 if (result) {
1344 if (rc)
1345 *result = rc;
1346 else
1347 *result = page_to_nid(new_hpage);
1348 }
1349 return rc;
1350}
1351
1352/*
Srivatsa S. Bhatc73e5c92013-04-29 15:08:16 -07001353 * migrate_pages - migrate the pages specified in a list, to the free pages
1354 * supplied as the target for the page migration
Christoph Lameterb20a3502006-03-22 00:09:12 -08001355 *
Srivatsa S. Bhatc73e5c92013-04-29 15:08:16 -07001356 * @from: The list of pages to be migrated.
1357 * @get_new_page: The function used to allocate free pages to be used
1358 * as the target of the page migration.
David Rientjes68711a72014-06-04 16:08:25 -07001359 * @put_new_page: The function used to free target pages if migration
1360 * fails, or NULL if no special handling is necessary.
Srivatsa S. Bhatc73e5c92013-04-29 15:08:16 -07001361 * @private: Private data to be passed on to get_new_page()
1362 * @mode: The migration mode that specifies the constraints for
1363 * page migration, if any.
1364 * @reason: The reason for page migration.
Christoph Lameterb20a3502006-03-22 00:09:12 -08001365 *
Srivatsa S. Bhatc73e5c92013-04-29 15:08:16 -07001366 * The function returns after 10 attempts or if no pages are movable any more
1367 * because the list has become empty or no retryable pages exist any more.
Hugh Dickins14e0f9b2015-11-05 18:49:43 -08001368 * The caller should call putback_movable_pages() to return pages to the LRU
Minchan Kim28bd6572011-01-25 15:07:26 -08001369 * or free list only if ret != 0.
Christoph Lameterb20a3502006-03-22 00:09:12 -08001370 *
Srivatsa S. Bhatc73e5c92013-04-29 15:08:16 -07001371 * Returns the number of pages that were not migrated, or an error code.
Christoph Lameterb20a3502006-03-22 00:09:12 -08001372 */
Hugh Dickins9c620e22013-02-22 16:35:14 -08001373int migrate_pages(struct list_head *from, new_page_t get_new_page,
David Rientjes68711a72014-06-04 16:08:25 -07001374 free_page_t put_new_page, unsigned long private,
1375 enum migrate_mode mode, int reason)
Christoph Lameterb20a3502006-03-22 00:09:12 -08001376{
Christoph Lametere24f0b82006-06-23 02:03:51 -07001377 int retry = 1;
Christoph Lameterb20a3502006-03-22 00:09:12 -08001378 int nr_failed = 0;
Mel Gorman5647bc22012-10-19 10:46:20 +01001379 int nr_succeeded = 0;
Christoph Lameterb20a3502006-03-22 00:09:12 -08001380 int pass = 0;
1381 struct page *page;
1382 struct page *page2;
1383 int swapwrite = current->flags & PF_SWAPWRITE;
1384 int rc;
1385
1386 if (!swapwrite)
1387 current->flags |= PF_SWAPWRITE;
1388
Christoph Lametere24f0b82006-06-23 02:03:51 -07001389 for(pass = 0; pass < 10 && retry; pass++) {
1390 retry = 0;
Christoph Lameterb20a3502006-03-22 00:09:12 -08001391
Christoph Lametere24f0b82006-06-23 02:03:51 -07001392 list_for_each_entry_safe(page, page2, from, lru) {
Christoph Lametere24f0b82006-06-23 02:03:51 -07001393 cond_resched();
Christoph Lameterb20a3502006-03-22 00:09:12 -08001394
Naoya Horiguchi31caf662013-09-11 14:21:59 -07001395 if (PageHuge(page))
1396 rc = unmap_and_move_huge_page(get_new_page,
David Rientjes68711a72014-06-04 16:08:25 -07001397 put_new_page, private, page,
Vlastimil Babka7cd12b42016-03-15 14:56:18 -07001398 pass > 2, mode, reason);
Naoya Horiguchi31caf662013-09-11 14:21:59 -07001399 else
David Rientjes68711a72014-06-04 16:08:25 -07001400 rc = unmap_and_move(get_new_page, put_new_page,
Naoya Horiguchiadd05ce2015-06-24 16:56:50 -07001401 private, page, pass > 2, mode,
1402 reason);
Christoph Lameterb20a3502006-03-22 00:09:12 -08001403
Christoph Lametere24f0b82006-06-23 02:03:51 -07001404 switch(rc) {
Christoph Lameter95a402c2006-06-23 02:03:53 -07001405 case -ENOMEM:
David Rientjesdfef2ef2016-05-20 16:59:05 -07001406 nr_failed++;
Christoph Lameter95a402c2006-06-23 02:03:53 -07001407 goto out;
Christoph Lametere24f0b82006-06-23 02:03:51 -07001408 case -EAGAIN:
Christoph Lameter2d1db3b2006-06-23 02:03:33 -07001409 retry++;
Christoph Lametere24f0b82006-06-23 02:03:51 -07001410 break;
Rafael Aquini78bd5202012-12-11 16:02:31 -08001411 case MIGRATEPAGE_SUCCESS:
Mel Gorman5647bc22012-10-19 10:46:20 +01001412 nr_succeeded++;
Christoph Lametere24f0b82006-06-23 02:03:51 -07001413 break;
1414 default:
Naoya Horiguchi354a3362014-01-21 15:51:14 -08001415 /*
1416 * Permanent failure (-EBUSY, -ENOSYS, etc.):
1417 * unlike -EAGAIN case, the failed page is
1418 * removed from migration page list and not
1419 * retried in the next outer loop.
1420 */
Christoph Lameter2d1db3b2006-06-23 02:03:33 -07001421 nr_failed++;
Christoph Lametere24f0b82006-06-23 02:03:51 -07001422 break;
Christoph Lameter2d1db3b2006-06-23 02:03:33 -07001423 }
Christoph Lameterb20a3502006-03-22 00:09:12 -08001424 }
1425 }
Vlastimil Babkaf2f81fb2015-11-05 18:47:03 -08001426 nr_failed += retry;
1427 rc = nr_failed;
Christoph Lameter95a402c2006-06-23 02:03:53 -07001428out:
Mel Gorman5647bc22012-10-19 10:46:20 +01001429 if (nr_succeeded)
1430 count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded);
1431 if (nr_failed)
1432 count_vm_events(PGMIGRATE_FAIL, nr_failed);
Mel Gorman7b2a2d42012-10-19 14:07:31 +01001433 trace_mm_migrate_pages(nr_succeeded, nr_failed, mode, reason);
1434
Christoph Lameterb20a3502006-03-22 00:09:12 -08001435 if (!swapwrite)
1436 current->flags &= ~PF_SWAPWRITE;
1437
Rafael Aquini78bd5202012-12-11 16:02:31 -08001438 return rc;
Christoph Lameterb20a3502006-03-22 00:09:12 -08001439}
1440
Christoph Lameter742755a2006-06-23 02:03:55 -07001441#ifdef CONFIG_NUMA
1442/*
1443 * Move a list of individual pages
1444 */
1445struct page_to_node {
1446 unsigned long addr;
1447 struct page *page;
1448 int node;
1449 int status;
1450};
1451
1452static struct page *new_page_node(struct page *p, unsigned long private,
1453 int **result)
1454{
1455 struct page_to_node *pm = (struct page_to_node *)private;
1456
1457 while (pm->node != MAX_NUMNODES && pm->page != p)
1458 pm++;
1459
1460 if (pm->node == MAX_NUMNODES)
1461 return NULL;
1462
1463 *result = &pm->status;
1464
Naoya Horiguchie632a932013-09-11 14:22:04 -07001465 if (PageHuge(p))
1466 return alloc_huge_page_node(page_hstate(compound_head(p)),
1467 pm->node);
Naoya Horiguchie8db67e2017-09-08 16:11:12 -07001468 else if (thp_migration_supported() && PageTransHuge(p)) {
1469 struct page *thp;
1470
1471 thp = alloc_pages_node(pm->node,
1472 (GFP_TRANSHUGE | __GFP_THISNODE) & ~__GFP_RECLAIM,
1473 HPAGE_PMD_ORDER);
1474 if (!thp)
1475 return NULL;
1476 prep_transhuge_page(thp);
1477 return thp;
1478 } else
Vlastimil Babka96db8002015-09-08 15:03:50 -07001479 return __alloc_pages_node(pm->node,
Johannes Weinere97ca8e52014-03-10 15:49:43 -07001480 GFP_HIGHUSER_MOVABLE | __GFP_THISNODE, 0);
Christoph Lameter742755a2006-06-23 02:03:55 -07001481}
1482
1483/*
1484 * Move a set of pages as indicated in the pm array. The addr
1485 * field must be set to the virtual address of the page to be moved
1486 * and the node number must contain a valid target node.
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001487 * The pm array ends with node = MAX_NUMNODES.
Christoph Lameter742755a2006-06-23 02:03:55 -07001488 */
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001489static int do_move_page_to_node_array(struct mm_struct *mm,
1490 struct page_to_node *pm,
1491 int migrate_all)
Christoph Lameter742755a2006-06-23 02:03:55 -07001492{
1493 int err;
1494 struct page_to_node *pp;
1495 LIST_HEAD(pagelist);
1496
1497 down_read(&mm->mmap_sem);
1498
1499 /*
1500 * Build a list of pages to migrate
1501 */
Christoph Lameter742755a2006-06-23 02:03:55 -07001502 for (pp = pm; pp->node != MAX_NUMNODES; pp++) {
1503 struct vm_area_struct *vma;
1504 struct page *page;
Naoya Horiguchie8db67e2017-09-08 16:11:12 -07001505 struct page *head;
1506 unsigned int follflags;
Christoph Lameter742755a2006-06-23 02:03:55 -07001507
Christoph Lameter742755a2006-06-23 02:03:55 -07001508 err = -EFAULT;
1509 vma = find_vma(mm, pp->addr);
Gleb Natapov70384dc2010-10-26 14:22:07 -07001510 if (!vma || pp->addr < vma->vm_start || !vma_migratable(vma))
Christoph Lameter742755a2006-06-23 02:03:55 -07001511 goto set_status;
1512
Kirill A. Shutemovd8998442015-09-04 15:47:53 -07001513 /* FOLL_DUMP to ignore special (like zero) pages */
Naoya Horiguchie8db67e2017-09-08 16:11:12 -07001514 follflags = FOLL_GET | FOLL_DUMP;
1515 if (!thp_migration_supported())
1516 follflags |= FOLL_SPLIT;
1517 page = follow_page(vma, pp->addr, follflags);
Linus Torvalds89f5b7d2008-06-20 11:18:25 -07001518
1519 err = PTR_ERR(page);
1520 if (IS_ERR(page))
1521 goto set_status;
1522
Christoph Lameter742755a2006-06-23 02:03:55 -07001523 err = -ENOENT;
1524 if (!page)
1525 goto set_status;
1526
Christoph Lameter742755a2006-06-23 02:03:55 -07001527 err = page_to_nid(page);
1528
1529 if (err == pp->node)
1530 /*
1531 * Node already in the right place
1532 */
1533 goto put_and_set;
1534
1535 err = -EACCES;
1536 if (page_mapcount(page) > 1 &&
1537 !migrate_all)
1538 goto put_and_set;
1539
Naoya Horiguchie632a932013-09-11 14:22:04 -07001540 if (PageHuge(page)) {
Naoya Horiguchie8db67e2017-09-08 16:11:12 -07001541 if (PageHead(page)) {
Naoya Horiguchie66f17f2015-02-11 15:25:22 -08001542 isolate_huge_page(page, &pagelist);
Naoya Horiguchie8db67e2017-09-08 16:11:12 -07001543 err = 0;
1544 pp->page = page;
1545 }
Naoya Horiguchie632a932013-09-11 14:22:04 -07001546 goto put_and_set;
1547 }
1548
Naoya Horiguchie8db67e2017-09-08 16:11:12 -07001549 pp->page = compound_head(page);
1550 head = compound_head(page);
1551 err = isolate_lru_page(head);
KOSAKI Motohiro6d9c2852009-12-14 17:58:11 -08001552 if (!err) {
Naoya Horiguchie8db67e2017-09-08 16:11:12 -07001553 list_add_tail(&head->lru, &pagelist);
1554 mod_node_page_state(page_pgdat(head),
1555 NR_ISOLATED_ANON + page_is_file_cache(head),
1556 hpage_nr_pages(head));
KOSAKI Motohiro6d9c2852009-12-14 17:58:11 -08001557 }
Christoph Lameter742755a2006-06-23 02:03:55 -07001558put_and_set:
1559 /*
1560 * Either remove the duplicate refcount from
1561 * isolate_lru_page() or drop the page ref if it was
1562 * not isolated.
1563 */
1564 put_page(page);
1565set_status:
1566 pp->status = err;
1567 }
1568
Brice Gogline78bbfa2008-10-18 20:27:15 -07001569 err = 0;
Minchan Kimcf608ac2010-10-26 14:21:29 -07001570 if (!list_empty(&pagelist)) {
David Rientjes68711a72014-06-04 16:08:25 -07001571 err = migrate_pages(&pagelist, new_page_node, NULL,
Hugh Dickins9c620e22013-02-22 16:35:14 -08001572 (unsigned long)pm, MIGRATE_SYNC, MR_SYSCALL);
Minchan Kimcf608ac2010-10-26 14:21:29 -07001573 if (err)
Naoya Horiguchie632a932013-09-11 14:22:04 -07001574 putback_movable_pages(&pagelist);
Minchan Kimcf608ac2010-10-26 14:21:29 -07001575 }
Christoph Lameter742755a2006-06-23 02:03:55 -07001576
1577 up_read(&mm->mmap_sem);
1578 return err;
1579}
1580
1581/*
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001582 * Migrate an array of page address onto an array of nodes and fill
1583 * the corresponding array of status.
1584 */
Christoph Lameter3268c632012-03-21 16:34:06 -07001585static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001586 unsigned long nr_pages,
1587 const void __user * __user *pages,
1588 const int __user *nodes,
1589 int __user *status, int flags)
1590{
Brice Goglin3140a222009-01-06 14:38:57 -08001591 struct page_to_node *pm;
Brice Goglin3140a222009-01-06 14:38:57 -08001592 unsigned long chunk_nr_pages;
1593 unsigned long chunk_start;
1594 int err;
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001595
Brice Goglin3140a222009-01-06 14:38:57 -08001596 err = -ENOMEM;
1597 pm = (struct page_to_node *)__get_free_page(GFP_KERNEL);
1598 if (!pm)
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001599 goto out;
Brice Goglin35282a22009-06-16 15:32:43 -07001600
1601 migrate_prep();
1602
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001603 /*
Brice Goglin3140a222009-01-06 14:38:57 -08001604 * Store a chunk of page_to_node array in a page,
1605 * but keep the last one as a marker
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001606 */
Brice Goglin3140a222009-01-06 14:38:57 -08001607 chunk_nr_pages = (PAGE_SIZE / sizeof(struct page_to_node)) - 1;
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001608
Brice Goglin3140a222009-01-06 14:38:57 -08001609 for (chunk_start = 0;
1610 chunk_start < nr_pages;
1611 chunk_start += chunk_nr_pages) {
1612 int j;
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001613
Brice Goglin3140a222009-01-06 14:38:57 -08001614 if (chunk_start + chunk_nr_pages > nr_pages)
1615 chunk_nr_pages = nr_pages - chunk_start;
1616
1617 /* fill the chunk pm with addrs and nodes from user-space */
1618 for (j = 0; j < chunk_nr_pages; j++) {
1619 const void __user *p;
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001620 int node;
1621
Brice Goglin3140a222009-01-06 14:38:57 -08001622 err = -EFAULT;
1623 if (get_user(p, pages + j + chunk_start))
1624 goto out_pm;
1625 pm[j].addr = (unsigned long) p;
1626
1627 if (get_user(node, nodes + j + chunk_start))
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001628 goto out_pm;
1629
1630 err = -ENODEV;
Linus Torvalds6f5a55f2010-02-05 16:16:50 -08001631 if (node < 0 || node >= MAX_NUMNODES)
1632 goto out_pm;
1633
Lai Jiangshan389162c2012-12-12 13:51:30 -08001634 if (!node_state(node, N_MEMORY))
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001635 goto out_pm;
1636
1637 err = -EACCES;
1638 if (!node_isset(node, task_nodes))
1639 goto out_pm;
1640
Brice Goglin3140a222009-01-06 14:38:57 -08001641 pm[j].node = node;
1642 }
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001643
Brice Goglin3140a222009-01-06 14:38:57 -08001644 /* End marker for this chunk */
1645 pm[chunk_nr_pages].node = MAX_NUMNODES;
1646
1647 /* Migrate this chunk */
1648 err = do_move_page_to_node_array(mm, pm,
1649 flags & MPOL_MF_MOVE_ALL);
1650 if (err < 0)
1651 goto out_pm;
1652
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001653 /* Return status information */
Brice Goglin3140a222009-01-06 14:38:57 -08001654 for (j = 0; j < chunk_nr_pages; j++)
1655 if (put_user(pm[j].status, status + j + chunk_start)) {
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001656 err = -EFAULT;
Brice Goglin3140a222009-01-06 14:38:57 -08001657 goto out_pm;
1658 }
1659 }
1660 err = 0;
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001661
1662out_pm:
Brice Goglin3140a222009-01-06 14:38:57 -08001663 free_page((unsigned long)pm);
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001664out:
1665 return err;
1666}
1667
1668/*
Brice Goglin2f007e72008-10-18 20:27:16 -07001669 * Determine the nodes of an array of pages and store it in an array of status.
Christoph Lameter742755a2006-06-23 02:03:55 -07001670 */
Brice Goglin80bba122008-12-09 13:14:23 -08001671static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
1672 const void __user **pages, int *status)
Christoph Lameter742755a2006-06-23 02:03:55 -07001673{
Brice Goglin2f007e72008-10-18 20:27:16 -07001674 unsigned long i;
Brice Goglin2f007e72008-10-18 20:27:16 -07001675
Christoph Lameter742755a2006-06-23 02:03:55 -07001676 down_read(&mm->mmap_sem);
1677
Brice Goglin2f007e72008-10-18 20:27:16 -07001678 for (i = 0; i < nr_pages; i++) {
Brice Goglin80bba122008-12-09 13:14:23 -08001679 unsigned long addr = (unsigned long)(*pages);
Christoph Lameter742755a2006-06-23 02:03:55 -07001680 struct vm_area_struct *vma;
1681 struct page *page;
KOSAKI Motohiroc095adb2008-12-16 16:06:43 +09001682 int err = -EFAULT;
Brice Goglin2f007e72008-10-18 20:27:16 -07001683
1684 vma = find_vma(mm, addr);
Gleb Natapov70384dc2010-10-26 14:22:07 -07001685 if (!vma || addr < vma->vm_start)
Christoph Lameter742755a2006-06-23 02:03:55 -07001686 goto set_status;
1687
Kirill A. Shutemovd8998442015-09-04 15:47:53 -07001688 /* FOLL_DUMP to ignore special (like zero) pages */
1689 page = follow_page(vma, addr, FOLL_DUMP);
Linus Torvalds89f5b7d2008-06-20 11:18:25 -07001690
1691 err = PTR_ERR(page);
1692 if (IS_ERR(page))
1693 goto set_status;
1694
Kirill A. Shutemovd8998442015-09-04 15:47:53 -07001695 err = page ? page_to_nid(page) : -ENOENT;
Christoph Lameter742755a2006-06-23 02:03:55 -07001696set_status:
Brice Goglin80bba122008-12-09 13:14:23 -08001697 *status = err;
1698
1699 pages++;
1700 status++;
1701 }
1702
1703 up_read(&mm->mmap_sem);
1704}
1705
1706/*
1707 * Determine the nodes of a user array of pages and store it in
1708 * a user array of status.
1709 */
1710static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
1711 const void __user * __user *pages,
1712 int __user *status)
1713{
1714#define DO_PAGES_STAT_CHUNK_NR 16
1715 const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
1716 int chunk_status[DO_PAGES_STAT_CHUNK_NR];
Brice Goglin80bba122008-12-09 13:14:23 -08001717
H. Peter Anvin87b8d1a2010-02-18 16:13:40 -08001718 while (nr_pages) {
1719 unsigned long chunk_nr;
Brice Goglin80bba122008-12-09 13:14:23 -08001720
H. Peter Anvin87b8d1a2010-02-18 16:13:40 -08001721 chunk_nr = nr_pages;
1722 if (chunk_nr > DO_PAGES_STAT_CHUNK_NR)
1723 chunk_nr = DO_PAGES_STAT_CHUNK_NR;
1724
1725 if (copy_from_user(chunk_pages, pages, chunk_nr * sizeof(*chunk_pages)))
1726 break;
Brice Goglin80bba122008-12-09 13:14:23 -08001727
1728 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
1729
H. Peter Anvin87b8d1a2010-02-18 16:13:40 -08001730 if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
1731 break;
Christoph Lameter742755a2006-06-23 02:03:55 -07001732
H. Peter Anvin87b8d1a2010-02-18 16:13:40 -08001733 pages += chunk_nr;
1734 status += chunk_nr;
1735 nr_pages -= chunk_nr;
1736 }
1737 return nr_pages ? -EFAULT : 0;
Christoph Lameter742755a2006-06-23 02:03:55 -07001738}
1739
1740/*
1741 * Move a list of pages in the address space of the currently executing
1742 * process.
1743 */
Heiko Carstens938bb9f2009-01-14 14:14:30 +01001744SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
1745 const void __user * __user *, pages,
1746 const int __user *, nodes,
1747 int __user *, status, int, flags)
Christoph Lameter742755a2006-06-23 02:03:55 -07001748{
Christoph Lameter742755a2006-06-23 02:03:55 -07001749 struct task_struct *task;
Christoph Lameter742755a2006-06-23 02:03:55 -07001750 struct mm_struct *mm;
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001751 int err;
Christoph Lameter3268c632012-03-21 16:34:06 -07001752 nodemask_t task_nodes;
Christoph Lameter742755a2006-06-23 02:03:55 -07001753
1754 /* Check flags */
1755 if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
1756 return -EINVAL;
1757
1758 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1759 return -EPERM;
1760
1761 /* Find the mm_struct */
Greg Thelena879bf52011-02-25 14:44:13 -08001762 rcu_read_lock();
Pavel Emelyanov228ebcb2007-10-18 23:40:16 -07001763 task = pid ? find_task_by_vpid(pid) : current;
Christoph Lameter742755a2006-06-23 02:03:55 -07001764 if (!task) {
Greg Thelena879bf52011-02-25 14:44:13 -08001765 rcu_read_unlock();
Christoph Lameter742755a2006-06-23 02:03:55 -07001766 return -ESRCH;
1767 }
Christoph Lameter3268c632012-03-21 16:34:06 -07001768 get_task_struct(task);
Christoph Lameter742755a2006-06-23 02:03:55 -07001769
1770 /*
1771 * Check if this process has the right to modify the specified
Linus Torvalds197e7e52017-08-20 13:26:27 -07001772 * process. Use the regular "ptrace_may_access()" checks.
Christoph Lameter742755a2006-06-23 02:03:55 -07001773 */
Linus Torvalds197e7e52017-08-20 13:26:27 -07001774 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
David Howellsc69e8d92008-11-14 10:39:19 +11001775 rcu_read_unlock();
Christoph Lameter742755a2006-06-23 02:03:55 -07001776 err = -EPERM;
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001777 goto out;
Christoph Lameter742755a2006-06-23 02:03:55 -07001778 }
David Howellsc69e8d92008-11-14 10:39:19 +11001779 rcu_read_unlock();
Christoph Lameter742755a2006-06-23 02:03:55 -07001780
David Quigley86c3a762006-06-23 02:04:02 -07001781 err = security_task_movememory(task);
1782 if (err)
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001783 goto out;
David Quigley86c3a762006-06-23 02:04:02 -07001784
Christoph Lameter3268c632012-03-21 16:34:06 -07001785 task_nodes = cpuset_mems_allowed(task);
1786 mm = get_task_mm(task);
1787 put_task_struct(task);
1788
Sasha Levin6e8b09e2012-04-25 16:01:53 -07001789 if (!mm)
1790 return -EINVAL;
1791
1792 if (nodes)
1793 err = do_pages_move(mm, task_nodes, nr_pages, pages,
1794 nodes, status, flags);
1795 else
1796 err = do_pages_stat(mm, nr_pages, pages, status);
Christoph Lameter3268c632012-03-21 16:34:06 -07001797
1798 mmput(mm);
1799 return err;
David Quigley86c3a762006-06-23 02:04:02 -07001800
Christoph Lameter742755a2006-06-23 02:03:55 -07001801out:
Christoph Lameter3268c632012-03-21 16:34:06 -07001802 put_task_struct(task);
Christoph Lameter742755a2006-06-23 02:03:55 -07001803 return err;
1804}
Christoph Lameter742755a2006-06-23 02:03:55 -07001805
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02001806#ifdef CONFIG_NUMA_BALANCING
1807/*
1808 * Returns true if this is a safe migration target node for misplaced NUMA
1809 * pages. Currently it only checks the watermarks which crude
1810 */
1811static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
Mel Gorman3abef4e2013-02-22 16:34:27 -08001812 unsigned long nr_migrate_pages)
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02001813{
1814 int z;
Mel Gorman599d0c92016-07-28 15:45:31 -07001815
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02001816 for (z = pgdat->nr_zones - 1; z >= 0; z--) {
1817 struct zone *zone = pgdat->node_zones + z;
1818
1819 if (!populated_zone(zone))
1820 continue;
1821
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02001822 /* Avoid waking kswapd by allocating pages_to_migrate pages. */
1823 if (!zone_watermark_ok(zone, 0,
1824 high_wmark_pages(zone) +
1825 nr_migrate_pages,
1826 0, 0))
1827 continue;
1828 return true;
1829 }
1830 return false;
1831}
1832
1833static struct page *alloc_misplaced_dst_page(struct page *page,
1834 unsigned long data,
1835 int **result)
1836{
1837 int nid = (int) data;
1838 struct page *newpage;
1839
Vlastimil Babka96db8002015-09-08 15:03:50 -07001840 newpage = __alloc_pages_node(nid,
Johannes Weinere97ca8e52014-03-10 15:49:43 -07001841 (GFP_HIGHUSER_MOVABLE |
1842 __GFP_THISNODE | __GFP_NOMEMALLOC |
1843 __GFP_NORETRY | __GFP_NOWARN) &
Mel Gorman8479eba2016-02-26 15:19:31 -08001844 ~__GFP_RECLAIM, 0);
Hillf Dantonbac03822012-11-27 14:46:24 +00001845
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02001846 return newpage;
1847}
1848
1849/*
Mel Gormana8f60772012-11-14 21:41:46 +00001850 * page migration rate limiting control.
1851 * Do not migrate more than @pages_to_migrate in a @migrate_interval_millisecs
1852 * window of time. Default here says do not migrate more than 1280M per second.
1853 */
1854static unsigned int migrate_interval_millisecs __read_mostly = 100;
1855static unsigned int ratelimit_pages __read_mostly = 128 << (20 - PAGE_SHIFT);
1856
Mel Gormanb32967f2012-11-19 12:35:47 +00001857/* Returns true if the node is migrate rate-limited after the update */
Mel Gorman1c30e012014-01-21 15:50:58 -08001858static bool numamigrate_update_ratelimit(pg_data_t *pgdat,
1859 unsigned long nr_pages)
Mel Gormanb32967f2012-11-19 12:35:47 +00001860{
Mel Gormanb32967f2012-11-19 12:35:47 +00001861 /*
1862 * Rate-limit the amount of data that is being migrated to a node.
1863 * Optimal placement is no good if the memory bus is saturated and
1864 * all the time is being spent migrating!
1865 */
Mel Gormanb32967f2012-11-19 12:35:47 +00001866 if (time_after(jiffies, pgdat->numabalancing_migrate_next_window)) {
Mel Gorman1c5e9c22014-01-21 15:50:59 -08001867 spin_lock(&pgdat->numabalancing_migrate_lock);
Mel Gormanb32967f2012-11-19 12:35:47 +00001868 pgdat->numabalancing_migrate_nr_pages = 0;
1869 pgdat->numabalancing_migrate_next_window = jiffies +
1870 msecs_to_jiffies(migrate_interval_millisecs);
Mel Gorman1c5e9c22014-01-21 15:50:59 -08001871 spin_unlock(&pgdat->numabalancing_migrate_lock);
Mel Gormanb32967f2012-11-19 12:35:47 +00001872 }
Mel Gormanaf1839d2014-01-21 15:51:01 -08001873 if (pgdat->numabalancing_migrate_nr_pages > ratelimit_pages) {
1874 trace_mm_numa_migrate_ratelimit(current, pgdat->node_id,
1875 nr_pages);
Mel Gorman1c5e9c22014-01-21 15:50:59 -08001876 return true;
Mel Gormanaf1839d2014-01-21 15:51:01 -08001877 }
Mel Gorman1c5e9c22014-01-21 15:50:59 -08001878
1879 /*
1880 * This is an unlocked non-atomic update so errors are possible.
1881 * The consequences are failing to migrate when we potentiall should
1882 * have which is not severe enough to warrant locking. If it is ever
1883 * a problem, it can be converted to a per-cpu counter.
1884 */
1885 pgdat->numabalancing_migrate_nr_pages += nr_pages;
1886 return false;
Mel Gormanb32967f2012-11-19 12:35:47 +00001887}
1888
Mel Gorman1c30e012014-01-21 15:50:58 -08001889static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
Mel Gormanb32967f2012-11-19 12:35:47 +00001890{
Hugh Dickins340ef392013-02-22 16:34:33 -08001891 int page_lru;
Mel Gormanb32967f2012-11-19 12:35:47 +00001892
Sasha Levin309381fea2014-01-23 15:52:54 -08001893 VM_BUG_ON_PAGE(compound_order(page) && !PageTransHuge(page), page);
Mel Gorman3abef4e2013-02-22 16:34:27 -08001894
Mel Gormanb32967f2012-11-19 12:35:47 +00001895 /* Avoid migrating to a node that is nearly full */
Hugh Dickins340ef392013-02-22 16:34:33 -08001896 if (!migrate_balanced_pgdat(pgdat, 1UL << compound_order(page)))
1897 return 0;
Mel Gormanb32967f2012-11-19 12:35:47 +00001898
Hugh Dickins340ef392013-02-22 16:34:33 -08001899 if (isolate_lru_page(page))
1900 return 0;
Mel Gormanb32967f2012-11-19 12:35:47 +00001901
1902 /*
Hugh Dickins340ef392013-02-22 16:34:33 -08001903 * migrate_misplaced_transhuge_page() skips page migration's usual
1904 * check on page_count(), so we must do it here, now that the page
1905 * has been isolated: a GUP pin, or any other pin, prevents migration.
1906 * The expected page count is 3: 1 for page's mapcount and 1 for the
1907 * caller's pin and 1 for the reference taken by isolate_lru_page().
1908 */
1909 if (PageTransHuge(page) && page_count(page) != 3) {
1910 putback_lru_page(page);
1911 return 0;
1912 }
1913
1914 page_lru = page_is_file_cache(page);
Mel Gorman599d0c92016-07-28 15:45:31 -07001915 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_lru,
Hugh Dickins340ef392013-02-22 16:34:33 -08001916 hpage_nr_pages(page));
1917
1918 /*
1919 * Isolating the page has taken another reference, so the
1920 * caller's reference can be safely dropped without the page
1921 * disappearing underneath us during migration.
Mel Gormanb32967f2012-11-19 12:35:47 +00001922 */
1923 put_page(page);
Hugh Dickins340ef392013-02-22 16:34:33 -08001924 return 1;
Mel Gormanb32967f2012-11-19 12:35:47 +00001925}
1926
Mel Gormande466bd2013-12-18 17:08:42 -08001927bool pmd_trans_migrating(pmd_t pmd)
1928{
1929 struct page *page = pmd_page(pmd);
1930 return PageLocked(page);
1931}
1932
Mel Gormana8f60772012-11-14 21:41:46 +00001933/*
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02001934 * Attempt to migrate a misplaced page to the specified destination
1935 * node. Caller is expected to have an elevated reference count on
1936 * the page that will be dropped by this function before returning.
1937 */
Mel Gorman1bc115d2013-10-07 11:29:05 +01001938int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
1939 int node)
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02001940{
Mel Gormana8f60772012-11-14 21:41:46 +00001941 pg_data_t *pgdat = NODE_DATA(node);
Hugh Dickins340ef392013-02-22 16:34:33 -08001942 int isolated;
Mel Gormanb32967f2012-11-19 12:35:47 +00001943 int nr_remaining;
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02001944 LIST_HEAD(migratepages);
1945
1946 /*
Mel Gorman1bc115d2013-10-07 11:29:05 +01001947 * Don't migrate file pages that are mapped in multiple processes
1948 * with execute permissions as they are probably shared libraries.
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02001949 */
Mel Gorman1bc115d2013-10-07 11:29:05 +01001950 if (page_mapcount(page) != 1 && page_is_file_cache(page) &&
1951 (vma->vm_flags & VM_EXEC))
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02001952 goto out;
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02001953
Mel Gormana8f60772012-11-14 21:41:46 +00001954 /*
1955 * Rate-limit the amount of data that is being migrated to a node.
1956 * Optimal placement is no good if the memory bus is saturated and
1957 * all the time is being spent migrating!
1958 */
Hugh Dickins340ef392013-02-22 16:34:33 -08001959 if (numamigrate_update_ratelimit(pgdat, 1))
Mel Gormana8f60772012-11-14 21:41:46 +00001960 goto out;
Mel Gormana8f60772012-11-14 21:41:46 +00001961
Mel Gormanb32967f2012-11-19 12:35:47 +00001962 isolated = numamigrate_isolate_page(pgdat, page);
1963 if (!isolated)
1964 goto out;
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02001965
Mel Gormanb32967f2012-11-19 12:35:47 +00001966 list_add(&page->lru, &migratepages);
Hugh Dickins9c620e22013-02-22 16:35:14 -08001967 nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page,
David Rientjes68711a72014-06-04 16:08:25 -07001968 NULL, node, MIGRATE_ASYNC,
1969 MR_NUMA_MISPLACED);
Mel Gormanb32967f2012-11-19 12:35:47 +00001970 if (nr_remaining) {
Joonsoo Kim59c82b72014-01-21 15:51:17 -08001971 if (!list_empty(&migratepages)) {
1972 list_del(&page->lru);
Mel Gorman599d0c92016-07-28 15:45:31 -07001973 dec_node_page_state(page, NR_ISOLATED_ANON +
Joonsoo Kim59c82b72014-01-21 15:51:17 -08001974 page_is_file_cache(page));
1975 putback_lru_page(page);
1976 }
Mel Gormanb32967f2012-11-19 12:35:47 +00001977 isolated = 0;
1978 } else
1979 count_vm_numa_event(NUMA_PAGE_MIGRATE);
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02001980 BUG_ON(!list_empty(&migratepages));
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02001981 return isolated;
Hugh Dickins340ef392013-02-22 16:34:33 -08001982
1983out:
1984 put_page(page);
1985 return 0;
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02001986}
Mel Gorman220018d2012-12-05 09:32:56 +00001987#endif /* CONFIG_NUMA_BALANCING */
Mel Gormanb32967f2012-11-19 12:35:47 +00001988
Mel Gorman220018d2012-12-05 09:32:56 +00001989#if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
Hugh Dickins340ef392013-02-22 16:34:33 -08001990/*
1991 * Migrates a THP to a given target node. page must be locked and is unlocked
1992 * before returning.
1993 */
Mel Gormanb32967f2012-11-19 12:35:47 +00001994int migrate_misplaced_transhuge_page(struct mm_struct *mm,
1995 struct vm_area_struct *vma,
1996 pmd_t *pmd, pmd_t entry,
1997 unsigned long address,
1998 struct page *page, int node)
1999{
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08002000 spinlock_t *ptl;
Mel Gormanb32967f2012-11-19 12:35:47 +00002001 pg_data_t *pgdat = NODE_DATA(node);
2002 int isolated = 0;
2003 struct page *new_page = NULL;
Mel Gormanb32967f2012-11-19 12:35:47 +00002004 int page_lru = page_is_file_cache(page);
Mel Gormanf714f4f2013-12-18 17:08:33 -08002005 unsigned long mmun_start = address & HPAGE_PMD_MASK;
2006 unsigned long mmun_end = mmun_start + HPAGE_PMD_SIZE;
Mel Gormanb32967f2012-11-19 12:35:47 +00002007
2008 /*
Mel Gormanb32967f2012-11-19 12:35:47 +00002009 * Rate-limit the amount of data that is being migrated to a node.
2010 * Optimal placement is no good if the memory bus is saturated and
2011 * all the time is being spent migrating!
2012 */
Mel Gormand28d43352012-11-29 09:24:36 +00002013 if (numamigrate_update_ratelimit(pgdat, HPAGE_PMD_NR))
Mel Gormanb32967f2012-11-19 12:35:47 +00002014 goto out_dropref;
2015
2016 new_page = alloc_pages_node(node,
Vlastimil Babka25160352016-07-28 15:49:25 -07002017 (GFP_TRANSHUGE_LIGHT | __GFP_THISNODE),
Johannes Weinere97ca8e52014-03-10 15:49:43 -07002018 HPAGE_PMD_ORDER);
Hugh Dickins340ef392013-02-22 16:34:33 -08002019 if (!new_page)
2020 goto out_fail;
Kirill A. Shutemov9a982252016-01-15 16:54:17 -08002021 prep_transhuge_page(new_page);
Hugh Dickins340ef392013-02-22 16:34:33 -08002022
Mel Gormanb32967f2012-11-19 12:35:47 +00002023 isolated = numamigrate_isolate_page(pgdat, page);
Hugh Dickins340ef392013-02-22 16:34:33 -08002024 if (!isolated) {
Mel Gormanb32967f2012-11-19 12:35:47 +00002025 put_page(new_page);
Hugh Dickins340ef392013-02-22 16:34:33 -08002026 goto out_fail;
Mel Gormanb32967f2012-11-19 12:35:47 +00002027 }
Mel Gormanb0943d62013-12-18 17:08:46 -08002028
Mel Gormanb32967f2012-11-19 12:35:47 +00002029 /* Prepare a page as a migration target */
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08002030 __SetPageLocked(new_page);
Shaohua Lid44d3632017-05-03 14:52:26 -07002031 if (PageSwapBacked(page))
2032 __SetPageSwapBacked(new_page);
Mel Gormanb32967f2012-11-19 12:35:47 +00002033
2034 /* anon mapping, we can simply copy page->mapping to the new page: */
2035 new_page->mapping = page->mapping;
2036 new_page->index = page->index;
2037 migrate_page_copy(new_page, page);
2038 WARN_ON(PageLRU(new_page));
2039
2040 /* Recheck the target PMD */
Mel Gormanf714f4f2013-12-18 17:08:33 -08002041 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08002042 ptl = pmd_lock(mm, pmd);
Will Deaconf4e177d2017-07-10 15:48:31 -07002043 if (unlikely(!pmd_same(*pmd, entry) || !page_ref_freeze(page, 2))) {
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08002044 spin_unlock(ptl);
Mel Gormanf714f4f2013-12-18 17:08:33 -08002045 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
Mel Gormanb32967f2012-11-19 12:35:47 +00002046
2047 /* Reverse changes made by migrate_page_copy() */
2048 if (TestClearPageActive(new_page))
2049 SetPageActive(page);
2050 if (TestClearPageUnevictable(new_page))
2051 SetPageUnevictable(page);
Mel Gormanb32967f2012-11-19 12:35:47 +00002052
2053 unlock_page(new_page);
2054 put_page(new_page); /* Free it */
2055
Mel Gormana54a4072013-10-07 11:28:46 +01002056 /* Retake the callers reference and putback on LRU */
2057 get_page(page);
Mel Gormanb32967f2012-11-19 12:35:47 +00002058 putback_lru_page(page);
Mel Gorman599d0c92016-07-28 15:45:31 -07002059 mod_node_page_state(page_pgdat(page),
Mel Gormana54a4072013-10-07 11:28:46 +01002060 NR_ISOLATED_ANON + page_lru, -HPAGE_PMD_NR);
Mel Gormaneb4489f62013-12-18 17:08:39 -08002061
2062 goto out_unlock;
Mel Gormanb32967f2012-11-19 12:35:47 +00002063 }
2064
Kirill A. Shutemov10102452016-07-26 15:25:29 -07002065 entry = mk_huge_pmd(new_page, vma->vm_page_prot);
Mel Gorman2b4847e2013-12-18 17:08:32 -08002066 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
Mel Gormanb32967f2012-11-19 12:35:47 +00002067
Mel Gorman2b4847e2013-12-18 17:08:32 -08002068 /*
2069 * Clear the old entry under pagetable lock and establish the new PTE.
2070 * Any parallel GUP will either observe the old page blocking on the
2071 * page lock, block on the page table lock or observe the new page.
2072 * The SetPageUptodate on the new page and page_add_new_anon_rmap
2073 * guarantee the copy is visible before the pagetable update.
2074 */
Mel Gormanf714f4f2013-12-18 17:08:33 -08002075 flush_cache_range(vma, mmun_start, mmun_end);
Kirill A. Shutemovd281ee62016-01-15 16:52:16 -08002076 page_add_anon_rmap(new_page, vma, mmun_start, true);
Aneesh Kumar K.V8809aa22015-06-24 16:57:44 -07002077 pmdp_huge_clear_flush_notify(vma, mmun_start, pmd);
Mel Gormanf714f4f2013-12-18 17:08:33 -08002078 set_pmd_at(mm, mmun_start, pmd, entry);
Stephen Rothwellce4a9cc2012-12-10 19:50:57 +11002079 update_mmu_cache_pmd(vma, address, &entry);
Mel Gorman2b4847e2013-12-18 17:08:32 -08002080
Will Deaconf4e177d2017-07-10 15:48:31 -07002081 page_ref_unfreeze(page, 2);
Hugh Dickins51afb122015-11-05 18:49:37 -08002082 mlock_migrate_page(new_page, page);
Kirill A. Shutemovd281ee62016-01-15 16:52:16 -08002083 page_remove_rmap(page, true);
Vlastimil Babka7cd12b42016-03-15 14:56:18 -07002084 set_page_owner_migrate_reason(new_page, MR_NUMA_MISPLACED);
Mel Gorman2b4847e2013-12-18 17:08:32 -08002085
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08002086 spin_unlock(ptl);
Mel Gormanf714f4f2013-12-18 17:08:33 -08002087 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
Mel Gormanb32967f2012-11-19 12:35:47 +00002088
Mel Gorman11de9922014-06-04 16:07:41 -07002089 /* Take an "isolate" reference and put new page on the LRU. */
2090 get_page(new_page);
2091 putback_lru_page(new_page);
2092
Mel Gormanb32967f2012-11-19 12:35:47 +00002093 unlock_page(new_page);
2094 unlock_page(page);
2095 put_page(page); /* Drop the rmap reference */
2096 put_page(page); /* Drop the LRU isolation reference */
2097
2098 count_vm_events(PGMIGRATE_SUCCESS, HPAGE_PMD_NR);
2099 count_vm_numa_events(NUMA_PAGE_MIGRATE, HPAGE_PMD_NR);
2100
Mel Gorman599d0c92016-07-28 15:45:31 -07002101 mod_node_page_state(page_pgdat(page),
Mel Gormanb32967f2012-11-19 12:35:47 +00002102 NR_ISOLATED_ANON + page_lru,
2103 -HPAGE_PMD_NR);
2104 return isolated;
2105
Hugh Dickins340ef392013-02-22 16:34:33 -08002106out_fail:
2107 count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
Mel Gormanb32967f2012-11-19 12:35:47 +00002108out_dropref:
Mel Gorman2b4847e2013-12-18 17:08:32 -08002109 ptl = pmd_lock(mm, pmd);
2110 if (pmd_same(*pmd, entry)) {
Mel Gorman4d942462015-02-12 14:58:28 -08002111 entry = pmd_modify(entry, vma->vm_page_prot);
Mel Gormanf714f4f2013-12-18 17:08:33 -08002112 set_pmd_at(mm, mmun_start, pmd, entry);
Mel Gorman2b4847e2013-12-18 17:08:32 -08002113 update_mmu_cache_pmd(vma, address, &entry);
2114 }
2115 spin_unlock(ptl);
Mel Gormana54a4072013-10-07 11:28:46 +01002116
Mel Gormaneb4489f62013-12-18 17:08:39 -08002117out_unlock:
Hugh Dickins340ef392013-02-22 16:34:33 -08002118 unlock_page(page);
Mel Gormanb32967f2012-11-19 12:35:47 +00002119 put_page(page);
Mel Gormanb32967f2012-11-19 12:35:47 +00002120 return 0;
2121}
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02002122#endif /* CONFIG_NUMA_BALANCING */
2123
2124#endif /* CONFIG_NUMA */
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002125
2126
2127struct migrate_vma {
2128 struct vm_area_struct *vma;
2129 unsigned long *dst;
2130 unsigned long *src;
2131 unsigned long cpages;
2132 unsigned long npages;
2133 unsigned long start;
2134 unsigned long end;
2135};
2136
2137static int migrate_vma_collect_hole(unsigned long start,
2138 unsigned long end,
2139 struct mm_walk *walk)
2140{
2141 struct migrate_vma *migrate = walk->private;
2142 unsigned long addr;
2143
2144 for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002145 migrate->src[migrate->npages++] = MIGRATE_PFN_MIGRATE;
2146 migrate->dst[migrate->npages] = 0;
2147 migrate->cpages++;
2148 }
2149
2150 return 0;
2151}
2152
2153static int migrate_vma_collect_skip(unsigned long start,
2154 unsigned long end,
2155 struct mm_walk *walk)
2156{
2157 struct migrate_vma *migrate = walk->private;
2158 unsigned long addr;
2159
2160 for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002161 migrate->dst[migrate->npages] = 0;
2162 migrate->src[migrate->npages++] = 0;
2163 }
2164
2165 return 0;
2166}
2167
2168static int migrate_vma_collect_pmd(pmd_t *pmdp,
2169 unsigned long start,
2170 unsigned long end,
2171 struct mm_walk *walk)
2172{
2173 struct migrate_vma *migrate = walk->private;
2174 struct vm_area_struct *vma = walk->vma;
2175 struct mm_struct *mm = vma->vm_mm;
Jérôme Glisse8c3328f2017-09-08 16:12:13 -07002176 unsigned long addr = start, unmapped = 0;
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002177 spinlock_t *ptl;
2178 pte_t *ptep;
2179
2180again:
2181 if (pmd_none(*pmdp))
2182 return migrate_vma_collect_hole(start, end, walk);
2183
2184 if (pmd_trans_huge(*pmdp)) {
2185 struct page *page;
2186
2187 ptl = pmd_lock(mm, pmdp);
2188 if (unlikely(!pmd_trans_huge(*pmdp))) {
2189 spin_unlock(ptl);
2190 goto again;
2191 }
2192
2193 page = pmd_page(*pmdp);
2194 if (is_huge_zero_page(page)) {
2195 spin_unlock(ptl);
2196 split_huge_pmd(vma, pmdp, addr);
2197 if (pmd_trans_unstable(pmdp))
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002198 return migrate_vma_collect_skip(start, end,
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002199 walk);
2200 } else {
2201 int ret;
2202
2203 get_page(page);
2204 spin_unlock(ptl);
2205 if (unlikely(!trylock_page(page)))
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002206 return migrate_vma_collect_skip(start, end,
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002207 walk);
2208 ret = split_huge_page(page);
2209 unlock_page(page);
2210 put_page(page);
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002211 if (ret)
2212 return migrate_vma_collect_skip(start, end,
2213 walk);
2214 if (pmd_none(*pmdp))
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002215 return migrate_vma_collect_hole(start, end,
2216 walk);
2217 }
2218 }
2219
2220 if (unlikely(pmd_bad(*pmdp)))
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002221 return migrate_vma_collect_skip(start, end, walk);
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002222
2223 ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
Jérôme Glisse8c3328f2017-09-08 16:12:13 -07002224 arch_enter_lazy_mmu_mode();
2225
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002226 for (; addr < end; addr += PAGE_SIZE, ptep++) {
2227 unsigned long mpfn, pfn;
2228 struct page *page;
Jérôme Glisse8c3328f2017-09-08 16:12:13 -07002229 swp_entry_t entry;
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002230 pte_t pte;
2231
2232 pte = *ptep;
2233 pfn = pte_pfn(pte);
2234
Jérôme Glissea5430dd2017-09-08 16:12:17 -07002235 if (pte_none(pte)) {
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002236 mpfn = MIGRATE_PFN_MIGRATE;
2237 migrate->cpages++;
2238 pfn = 0;
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002239 goto next;
2240 }
2241
Jérôme Glissea5430dd2017-09-08 16:12:17 -07002242 if (!pte_present(pte)) {
2243 mpfn = pfn = 0;
2244
2245 /*
2246 * Only care about unaddressable device page special
2247 * page table entry. Other special swap entries are not
2248 * migratable, and we ignore regular swapped page.
2249 */
2250 entry = pte_to_swp_entry(pte);
2251 if (!is_device_private_entry(entry))
2252 goto next;
2253
2254 page = device_private_entry_to_page(entry);
2255 mpfn = migrate_pfn(page_to_pfn(page))|
2256 MIGRATE_PFN_DEVICE | MIGRATE_PFN_MIGRATE;
2257 if (is_write_device_private_entry(entry))
2258 mpfn |= MIGRATE_PFN_WRITE;
2259 } else {
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002260 if (is_zero_pfn(pfn)) {
2261 mpfn = MIGRATE_PFN_MIGRATE;
2262 migrate->cpages++;
2263 pfn = 0;
2264 goto next;
2265 }
Jérôme Glissea5430dd2017-09-08 16:12:17 -07002266 page = vm_normal_page(migrate->vma, addr, pte);
2267 mpfn = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE;
2268 mpfn |= pte_write(pte) ? MIGRATE_PFN_WRITE : 0;
2269 }
2270
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002271 /* FIXME support THP */
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002272 if (!page || !page->mapping || PageTransCompound(page)) {
2273 mpfn = pfn = 0;
2274 goto next;
2275 }
Jérôme Glissea5430dd2017-09-08 16:12:17 -07002276 pfn = page_to_pfn(page);
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002277
2278 /*
2279 * By getting a reference on the page we pin it and that blocks
2280 * any kind of migration. Side effect is that it "freezes" the
2281 * pte.
2282 *
2283 * We drop this reference after isolating the page from the lru
2284 * for non device page (device page are not on the lru and thus
2285 * can't be dropped from it).
2286 */
2287 get_page(page);
2288 migrate->cpages++;
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002289
Jérôme Glisse8c3328f2017-09-08 16:12:13 -07002290 /*
2291 * Optimize for the common case where page is only mapped once
2292 * in one process. If we can lock the page, then we can safely
2293 * set up a special migration page table entry now.
2294 */
2295 if (trylock_page(page)) {
2296 pte_t swp_pte;
2297
2298 mpfn |= MIGRATE_PFN_LOCKED;
2299 ptep_get_and_clear(mm, addr, ptep);
2300
2301 /* Setup special migration page table entry */
2302 entry = make_migration_entry(page, pte_write(pte));
2303 swp_pte = swp_entry_to_pte(entry);
2304 if (pte_soft_dirty(pte))
2305 swp_pte = pte_swp_mksoft_dirty(swp_pte);
2306 set_pte_at(mm, addr, ptep, swp_pte);
2307
2308 /*
2309 * This is like regular unmap: we remove the rmap and
2310 * drop page refcount. Page won't be freed, as we took
2311 * a reference just above.
2312 */
2313 page_remove_rmap(page, false);
2314 put_page(page);
Jérôme Glissea5430dd2017-09-08 16:12:17 -07002315
2316 if (pte_present(pte))
2317 unmapped++;
Jérôme Glisse8c3328f2017-09-08 16:12:13 -07002318 }
2319
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002320next:
Jérôme Glissea5430dd2017-09-08 16:12:17 -07002321 migrate->dst[migrate->npages] = 0;
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002322 migrate->src[migrate->npages++] = mpfn;
2323 }
Jérôme Glisse8c3328f2017-09-08 16:12:13 -07002324 arch_leave_lazy_mmu_mode();
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002325 pte_unmap_unlock(ptep - 1, ptl);
2326
Jérôme Glisse8c3328f2017-09-08 16:12:13 -07002327 /* Only flush the TLB if we actually modified any entries */
2328 if (unmapped)
2329 flush_tlb_range(walk->vma, start, end);
2330
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002331 return 0;
2332}
2333
2334/*
2335 * migrate_vma_collect() - collect pages over a range of virtual addresses
2336 * @migrate: migrate struct containing all migration information
2337 *
2338 * This will walk the CPU page table. For each virtual address backed by a
2339 * valid page, it updates the src array and takes a reference on the page, in
2340 * order to pin the page until we lock it and unmap it.
2341 */
2342static void migrate_vma_collect(struct migrate_vma *migrate)
2343{
2344 struct mm_walk mm_walk;
2345
2346 mm_walk.pmd_entry = migrate_vma_collect_pmd;
2347 mm_walk.pte_entry = NULL;
2348 mm_walk.pte_hole = migrate_vma_collect_hole;
2349 mm_walk.hugetlb_entry = NULL;
2350 mm_walk.test_walk = NULL;
2351 mm_walk.vma = migrate->vma;
2352 mm_walk.mm = migrate->vma->vm_mm;
2353 mm_walk.private = migrate;
2354
Jérôme Glisse8c3328f2017-09-08 16:12:13 -07002355 mmu_notifier_invalidate_range_start(mm_walk.mm,
2356 migrate->start,
2357 migrate->end);
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002358 walk_page_range(migrate->start, migrate->end, &mm_walk);
Jérôme Glisse8c3328f2017-09-08 16:12:13 -07002359 mmu_notifier_invalidate_range_end(mm_walk.mm,
2360 migrate->start,
2361 migrate->end);
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002362
2363 migrate->end = migrate->start + (migrate->npages << PAGE_SHIFT);
2364}
2365
2366/*
2367 * migrate_vma_check_page() - check if page is pinned or not
2368 * @page: struct page to check
2369 *
2370 * Pinned pages cannot be migrated. This is the same test as in
2371 * migrate_page_move_mapping(), except that here we allow migration of a
2372 * ZONE_DEVICE page.
2373 */
2374static bool migrate_vma_check_page(struct page *page)
2375{
2376 /*
2377 * One extra ref because caller holds an extra reference, either from
2378 * isolate_lru_page() for a regular page, or migrate_vma_collect() for
2379 * a device page.
2380 */
2381 int extra = 1;
2382
2383 /*
2384 * FIXME support THP (transparent huge page), it is bit more complex to
2385 * check them than regular pages, because they can be mapped with a pmd
2386 * or with a pte (split pte mapping).
2387 */
2388 if (PageCompound(page))
2389 return false;
2390
Jérôme Glissea5430dd2017-09-08 16:12:17 -07002391 /* Page from ZONE_DEVICE have one extra reference */
2392 if (is_zone_device_page(page)) {
2393 /*
2394 * Private page can never be pin as they have no valid pte and
2395 * GUP will fail for those. Yet if there is a pending migration
2396 * a thread might try to wait on the pte migration entry and
2397 * will bump the page reference count. Sadly there is no way to
2398 * differentiate a regular pin from migration wait. Hence to
2399 * avoid 2 racing thread trying to migrate back to CPU to enter
2400 * infinite loop (one stoping migration because the other is
2401 * waiting on pte migration entry). We always return true here.
2402 *
2403 * FIXME proper solution is to rework migration_entry_wait() so
2404 * it does not need to take a reference on page.
2405 */
2406 if (is_device_private_page(page))
2407 return true;
2408
2409 /* Other ZONE_DEVICE memory type are not supported */
2410 return false;
2411 }
2412
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002413 if ((page_count(page) - extra) > page_mapcount(page))
2414 return false;
2415
2416 return true;
2417}
2418
2419/*
2420 * migrate_vma_prepare() - lock pages and isolate them from the lru
2421 * @migrate: migrate struct containing all migration information
2422 *
2423 * This locks pages that have been collected by migrate_vma_collect(). Once each
2424 * page is locked it is isolated from the lru (for non-device pages). Finally,
2425 * the ref taken by migrate_vma_collect() is dropped, as locked pages cannot be
2426 * migrated by concurrent kernel threads.
2427 */
2428static void migrate_vma_prepare(struct migrate_vma *migrate)
2429{
2430 const unsigned long npages = migrate->npages;
Jérôme Glisse8c3328f2017-09-08 16:12:13 -07002431 const unsigned long start = migrate->start;
2432 unsigned long addr, i, restore = 0;
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002433 bool allow_drain = true;
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002434
2435 lru_add_drain();
2436
2437 for (i = 0; (i < npages) && migrate->cpages; i++) {
2438 struct page *page = migrate_pfn_to_page(migrate->src[i]);
Jérôme Glisse8c3328f2017-09-08 16:12:13 -07002439 bool remap = true;
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002440
2441 if (!page)
2442 continue;
2443
Jérôme Glisse8c3328f2017-09-08 16:12:13 -07002444 if (!(migrate->src[i] & MIGRATE_PFN_LOCKED)) {
2445 /*
2446 * Because we are migrating several pages there can be
2447 * a deadlock between 2 concurrent migration where each
2448 * are waiting on each other page lock.
2449 *
2450 * Make migrate_vma() a best effort thing and backoff
2451 * for any page we can not lock right away.
2452 */
2453 if (!trylock_page(page)) {
2454 migrate->src[i] = 0;
2455 migrate->cpages--;
2456 put_page(page);
2457 continue;
2458 }
2459 remap = false;
2460 migrate->src[i] |= MIGRATE_PFN_LOCKED;
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002461 }
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002462
Jérôme Glissea5430dd2017-09-08 16:12:17 -07002463 /* ZONE_DEVICE pages are not on LRU */
2464 if (!is_zone_device_page(page)) {
2465 if (!PageLRU(page) && allow_drain) {
2466 /* Drain CPU's pagevec */
2467 lru_add_drain_all();
2468 allow_drain = false;
Jérôme Glisse8c3328f2017-09-08 16:12:13 -07002469 }
Jérôme Glissea5430dd2017-09-08 16:12:17 -07002470
2471 if (isolate_lru_page(page)) {
2472 if (remap) {
2473 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2474 migrate->cpages--;
2475 restore++;
2476 } else {
2477 migrate->src[i] = 0;
2478 unlock_page(page);
2479 migrate->cpages--;
2480 put_page(page);
2481 }
2482 continue;
2483 }
2484
2485 /* Drop the reference we took in collect */
2486 put_page(page);
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002487 }
2488
2489 if (!migrate_vma_check_page(page)) {
Jérôme Glisse8c3328f2017-09-08 16:12:13 -07002490 if (remap) {
2491 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2492 migrate->cpages--;
2493 restore++;
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002494
Jérôme Glissea5430dd2017-09-08 16:12:17 -07002495 if (!is_zone_device_page(page)) {
2496 get_page(page);
2497 putback_lru_page(page);
2498 }
Jérôme Glisse8c3328f2017-09-08 16:12:13 -07002499 } else {
2500 migrate->src[i] = 0;
2501 unlock_page(page);
2502 migrate->cpages--;
2503
Jérôme Glissea5430dd2017-09-08 16:12:17 -07002504 if (!is_zone_device_page(page))
2505 putback_lru_page(page);
2506 else
2507 put_page(page);
Jérôme Glisse8c3328f2017-09-08 16:12:13 -07002508 }
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002509 }
2510 }
Jérôme Glisse8c3328f2017-09-08 16:12:13 -07002511
2512 for (i = 0, addr = start; i < npages && restore; i++, addr += PAGE_SIZE) {
2513 struct page *page = migrate_pfn_to_page(migrate->src[i]);
2514
2515 if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE))
2516 continue;
2517
2518 remove_migration_pte(page, migrate->vma, addr, page);
2519
2520 migrate->src[i] = 0;
2521 unlock_page(page);
2522 put_page(page);
2523 restore--;
2524 }
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002525}
2526
2527/*
2528 * migrate_vma_unmap() - replace page mapping with special migration pte entry
2529 * @migrate: migrate struct containing all migration information
2530 *
2531 * Replace page mapping (CPU page table pte) with a special migration pte entry
2532 * and check again if it has been pinned. Pinned pages are restored because we
2533 * cannot migrate them.
2534 *
2535 * This is the last step before we call the device driver callback to allocate
2536 * destination memory and copy contents of original page over to new page.
2537 */
2538static void migrate_vma_unmap(struct migrate_vma *migrate)
2539{
2540 int flags = TTU_MIGRATION | TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS;
2541 const unsigned long npages = migrate->npages;
2542 const unsigned long start = migrate->start;
2543 unsigned long addr, i, restore = 0;
2544
2545 for (i = 0; i < npages; i++) {
2546 struct page *page = migrate_pfn_to_page(migrate->src[i]);
2547
2548 if (!page || !(migrate->src[i] & MIGRATE_PFN_MIGRATE))
2549 continue;
2550
Jérôme Glisse8c3328f2017-09-08 16:12:13 -07002551 if (page_mapped(page)) {
2552 try_to_unmap(page, flags);
2553 if (page_mapped(page))
2554 goto restore;
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002555 }
Jérôme Glisse8c3328f2017-09-08 16:12:13 -07002556
2557 if (migrate_vma_check_page(page))
2558 continue;
2559
2560restore:
2561 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2562 migrate->cpages--;
2563 restore++;
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002564 }
2565
2566 for (addr = start, i = 0; i < npages && restore; addr += PAGE_SIZE, i++) {
2567 struct page *page = migrate_pfn_to_page(migrate->src[i]);
2568
2569 if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE))
2570 continue;
2571
2572 remove_migration_ptes(page, page, false);
2573
2574 migrate->src[i] = 0;
2575 unlock_page(page);
2576 restore--;
2577
Jérôme Glissea5430dd2017-09-08 16:12:17 -07002578 if (is_zone_device_page(page))
2579 put_page(page);
2580 else
2581 putback_lru_page(page);
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002582 }
2583}
2584
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002585static void migrate_vma_insert_page(struct migrate_vma *migrate,
2586 unsigned long addr,
2587 struct page *page,
2588 unsigned long *src,
2589 unsigned long *dst)
2590{
2591 struct vm_area_struct *vma = migrate->vma;
2592 struct mm_struct *mm = vma->vm_mm;
2593 struct mem_cgroup *memcg;
2594 bool flush = false;
2595 spinlock_t *ptl;
2596 pte_t entry;
2597 pgd_t *pgdp;
2598 p4d_t *p4dp;
2599 pud_t *pudp;
2600 pmd_t *pmdp;
2601 pte_t *ptep;
2602
2603 /* Only allow populating anonymous memory */
2604 if (!vma_is_anonymous(vma))
2605 goto abort;
2606
2607 pgdp = pgd_offset(mm, addr);
2608 p4dp = p4d_alloc(mm, pgdp, addr);
2609 if (!p4dp)
2610 goto abort;
2611 pudp = pud_alloc(mm, p4dp, addr);
2612 if (!pudp)
2613 goto abort;
2614 pmdp = pmd_alloc(mm, pudp, addr);
2615 if (!pmdp)
2616 goto abort;
2617
2618 if (pmd_trans_huge(*pmdp) || pmd_devmap(*pmdp))
2619 goto abort;
2620
2621 /*
2622 * Use pte_alloc() instead of pte_alloc_map(). We can't run
2623 * pte_offset_map() on pmds where a huge pmd might be created
2624 * from a different thread.
2625 *
2626 * pte_alloc_map() is safe to use under down_write(mmap_sem) or when
2627 * parallel threads are excluded by other means.
2628 *
2629 * Here we only have down_read(mmap_sem).
2630 */
2631 if (pte_alloc(mm, pmdp, addr))
2632 goto abort;
2633
2634 /* See the comment in pte_alloc_one_map() */
2635 if (unlikely(pmd_trans_unstable(pmdp)))
2636 goto abort;
2637
2638 if (unlikely(anon_vma_prepare(vma)))
2639 goto abort;
2640 if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL, &memcg, false))
2641 goto abort;
2642
2643 /*
2644 * The memory barrier inside __SetPageUptodate makes sure that
2645 * preceding stores to the page contents become visible before
2646 * the set_pte_at() write.
2647 */
2648 __SetPageUptodate(page);
2649
2650 if (is_zone_device_page(page) && is_device_private_page(page)) {
2651 swp_entry_t swp_entry;
2652
2653 swp_entry = make_device_private_entry(page, vma->vm_flags & VM_WRITE);
2654 entry = swp_entry_to_pte(swp_entry);
2655 } else {
2656 entry = mk_pte(page, vma->vm_page_prot);
2657 if (vma->vm_flags & VM_WRITE)
2658 entry = pte_mkwrite(pte_mkdirty(entry));
2659 }
2660
2661 ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
2662
2663 if (pte_present(*ptep)) {
2664 unsigned long pfn = pte_pfn(*ptep);
2665
2666 if (!is_zero_pfn(pfn)) {
2667 pte_unmap_unlock(ptep, ptl);
2668 mem_cgroup_cancel_charge(page, memcg, false);
2669 goto abort;
2670 }
2671 flush = true;
2672 } else if (!pte_none(*ptep)) {
2673 pte_unmap_unlock(ptep, ptl);
2674 mem_cgroup_cancel_charge(page, memcg, false);
2675 goto abort;
2676 }
2677
2678 /*
2679 * Check for usefaultfd but do not deliver the fault. Instead,
2680 * just back off.
2681 */
2682 if (userfaultfd_missing(vma)) {
2683 pte_unmap_unlock(ptep, ptl);
2684 mem_cgroup_cancel_charge(page, memcg, false);
2685 goto abort;
2686 }
2687
2688 inc_mm_counter(mm, MM_ANONPAGES);
2689 page_add_new_anon_rmap(page, vma, addr, false);
2690 mem_cgroup_commit_charge(page, memcg, false, false);
2691 if (!is_zone_device_page(page))
2692 lru_cache_add_active_or_unevictable(page, vma);
2693 get_page(page);
2694
2695 if (flush) {
2696 flush_cache_page(vma, addr, pte_pfn(*ptep));
2697 ptep_clear_flush_notify(vma, addr, ptep);
2698 set_pte_at_notify(mm, addr, ptep, entry);
2699 update_mmu_cache(vma, addr, ptep);
2700 } else {
2701 /* No need to invalidate - it was non-present before */
2702 set_pte_at(mm, addr, ptep, entry);
2703 update_mmu_cache(vma, addr, ptep);
2704 }
2705
2706 pte_unmap_unlock(ptep, ptl);
2707 *src = MIGRATE_PFN_MIGRATE;
2708 return;
2709
2710abort:
2711 *src &= ~MIGRATE_PFN_MIGRATE;
2712}
2713
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002714/*
2715 * migrate_vma_pages() - migrate meta-data from src page to dst page
2716 * @migrate: migrate struct containing all migration information
2717 *
2718 * This migrates struct page meta-data from source struct page to destination
2719 * struct page. This effectively finishes the migration from source page to the
2720 * destination page.
2721 */
2722static void migrate_vma_pages(struct migrate_vma *migrate)
2723{
2724 const unsigned long npages = migrate->npages;
2725 const unsigned long start = migrate->start;
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002726 struct vm_area_struct *vma = migrate->vma;
2727 struct mm_struct *mm = vma->vm_mm;
2728 unsigned long addr, i, mmu_start;
2729 bool notified = false;
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002730
2731 for (i = 0, addr = start; i < npages; addr += PAGE_SIZE, i++) {
2732 struct page *newpage = migrate_pfn_to_page(migrate->dst[i]);
2733 struct page *page = migrate_pfn_to_page(migrate->src[i]);
2734 struct address_space *mapping;
2735 int r;
2736
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002737 if (!newpage) {
2738 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002739 continue;
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002740 }
2741
2742 if (!page) {
2743 if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE)) {
2744 continue;
2745 }
2746 if (!notified) {
2747 mmu_start = addr;
2748 notified = true;
2749 mmu_notifier_invalidate_range_start(mm,
2750 mmu_start,
2751 migrate->end);
2752 }
2753 migrate_vma_insert_page(migrate, addr, newpage,
2754 &migrate->src[i],
2755 &migrate->dst[i]);
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002756 continue;
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002757 }
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002758
2759 mapping = page_mapping(page);
2760
Jérôme Glissea5430dd2017-09-08 16:12:17 -07002761 if (is_zone_device_page(newpage)) {
2762 if (is_device_private_page(newpage)) {
2763 /*
2764 * For now only support private anonymous when
2765 * migrating to un-addressable device memory.
2766 */
2767 if (mapping) {
2768 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2769 continue;
2770 }
2771 } else {
2772 /*
2773 * Other types of ZONE_DEVICE page are not
2774 * supported.
2775 */
2776 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2777 continue;
2778 }
2779 }
2780
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002781 r = migrate_page(mapping, newpage, page, MIGRATE_SYNC_NO_COPY);
2782 if (r != MIGRATEPAGE_SUCCESS)
2783 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2784 }
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002785
2786 if (notified)
2787 mmu_notifier_invalidate_range_end(mm, mmu_start,
2788 migrate->end);
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002789}
2790
2791/*
2792 * migrate_vma_finalize() - restore CPU page table entry
2793 * @migrate: migrate struct containing all migration information
2794 *
2795 * This replaces the special migration pte entry with either a mapping to the
2796 * new page if migration was successful for that page, or to the original page
2797 * otherwise.
2798 *
2799 * This also unlocks the pages and puts them back on the lru, or drops the extra
2800 * refcount, for device pages.
2801 */
2802static void migrate_vma_finalize(struct migrate_vma *migrate)
2803{
2804 const unsigned long npages = migrate->npages;
2805 unsigned long i;
2806
2807 for (i = 0; i < npages; i++) {
2808 struct page *newpage = migrate_pfn_to_page(migrate->dst[i]);
2809 struct page *page = migrate_pfn_to_page(migrate->src[i]);
2810
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002811 if (!page) {
2812 if (newpage) {
2813 unlock_page(newpage);
2814 put_page(newpage);
2815 }
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002816 continue;
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002817 }
2818
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002819 if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE) || !newpage) {
2820 if (newpage) {
2821 unlock_page(newpage);
2822 put_page(newpage);
2823 }
2824 newpage = page;
2825 }
2826
2827 remove_migration_ptes(page, newpage, false);
2828 unlock_page(page);
2829 migrate->cpages--;
2830
Jérôme Glissea5430dd2017-09-08 16:12:17 -07002831 if (is_zone_device_page(page))
2832 put_page(page);
2833 else
2834 putback_lru_page(page);
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002835
2836 if (newpage != page) {
2837 unlock_page(newpage);
Jérôme Glissea5430dd2017-09-08 16:12:17 -07002838 if (is_zone_device_page(newpage))
2839 put_page(newpage);
2840 else
2841 putback_lru_page(newpage);
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002842 }
2843 }
2844}
2845
2846/*
2847 * migrate_vma() - migrate a range of memory inside vma
2848 *
2849 * @ops: migration callback for allocating destination memory and copying
2850 * @vma: virtual memory area containing the range to be migrated
2851 * @start: start address of the range to migrate (inclusive)
2852 * @end: end address of the range to migrate (exclusive)
2853 * @src: array of hmm_pfn_t containing source pfns
2854 * @dst: array of hmm_pfn_t containing destination pfns
2855 * @private: pointer passed back to each of the callback
2856 * Returns: 0 on success, error code otherwise
2857 *
2858 * This function tries to migrate a range of memory virtual address range, using
2859 * callbacks to allocate and copy memory from source to destination. First it
2860 * collects all the pages backing each virtual address in the range, saving this
2861 * inside the src array. Then it locks those pages and unmaps them. Once the pages
2862 * are locked and unmapped, it checks whether each page is pinned or not. Pages
2863 * that aren't pinned have the MIGRATE_PFN_MIGRATE flag set (by this function)
2864 * in the corresponding src array entry. It then restores any pages that are
2865 * pinned, by remapping and unlocking those pages.
2866 *
2867 * At this point it calls the alloc_and_copy() callback. For documentation on
2868 * what is expected from that callback, see struct migrate_vma_ops comments in
2869 * include/linux/migrate.h
2870 *
2871 * After the alloc_and_copy() callback, this function goes over each entry in
2872 * the src array that has the MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag
2873 * set. If the corresponding entry in dst array has MIGRATE_PFN_VALID flag set,
2874 * then the function tries to migrate struct page information from the source
2875 * struct page to the destination struct page. If it fails to migrate the struct
2876 * page information, then it clears the MIGRATE_PFN_MIGRATE flag in the src
2877 * array.
2878 *
2879 * At this point all successfully migrated pages have an entry in the src
2880 * array with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag set and the dst
2881 * array entry with MIGRATE_PFN_VALID flag set.
2882 *
2883 * It then calls the finalize_and_map() callback. See comments for "struct
2884 * migrate_vma_ops", in include/linux/migrate.h for details about
2885 * finalize_and_map() behavior.
2886 *
2887 * After the finalize_and_map() callback, for successfully migrated pages, this
2888 * function updates the CPU page table to point to new pages, otherwise it
2889 * restores the CPU page table to point to the original source pages.
2890 *
2891 * Function returns 0 after the above steps, even if no pages were migrated
2892 * (The function only returns an error if any of the arguments are invalid.)
2893 *
2894 * Both src and dst array must be big enough for (end - start) >> PAGE_SHIFT
2895 * unsigned long entries.
2896 */
2897int migrate_vma(const struct migrate_vma_ops *ops,
2898 struct vm_area_struct *vma,
2899 unsigned long start,
2900 unsigned long end,
2901 unsigned long *src,
2902 unsigned long *dst,
2903 void *private)
2904{
2905 struct migrate_vma migrate;
2906
2907 /* Sanity check the arguments */
2908 start &= PAGE_MASK;
2909 end &= PAGE_MASK;
2910 if (!vma || is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL))
2911 return -EINVAL;
2912 if (start < vma->vm_start || start >= vma->vm_end)
2913 return -EINVAL;
2914 if (end <= vma->vm_start || end > vma->vm_end)
2915 return -EINVAL;
2916 if (!ops || !src || !dst || start >= end)
2917 return -EINVAL;
2918
2919 memset(src, 0, sizeof(*src) * ((end - start) >> PAGE_SHIFT));
2920 migrate.src = src;
2921 migrate.dst = dst;
2922 migrate.start = start;
2923 migrate.npages = 0;
2924 migrate.cpages = 0;
2925 migrate.end = end;
2926 migrate.vma = vma;
2927
2928 /* Collect, and try to unmap source pages */
2929 migrate_vma_collect(&migrate);
2930 if (!migrate.cpages)
2931 return 0;
2932
2933 /* Lock and isolate page */
2934 migrate_vma_prepare(&migrate);
2935 if (!migrate.cpages)
2936 return 0;
2937
2938 /* Unmap pages */
2939 migrate_vma_unmap(&migrate);
2940 if (!migrate.cpages)
2941 return 0;
2942
2943 /*
2944 * At this point pages are locked and unmapped, and thus they have
2945 * stable content and can safely be copied to destination memory that
2946 * is allocated by the callback.
2947 *
2948 * Note that migration can fail in migrate_vma_struct_page() for each
2949 * individual page.
2950 */
2951 ops->alloc_and_copy(vma, src, dst, start, end, private);
2952
2953 /* This does the real migration of struct page */
2954 migrate_vma_pages(&migrate);
2955
2956 ops->finalize_and_map(vma, src, dst, start, end, private);
2957
2958 /* Unlock and remap pages */
2959 migrate_vma_finalize(&migrate);
2960
2961 return 0;
2962}
2963EXPORT_SYMBOL(migrate_vma);