blob: c74412b381ffe412a8724afc2759bf509e836543 [file] [log] [blame]
Christoph Lameterb20a3502006-03-22 00:09:12 -08001/*
Hugh Dickins14e0f9b2015-11-05 18:49:43 -08002 * Memory Migration functionality - linux/mm/migrate.c
Christoph Lameterb20a3502006-03-22 00:09:12 -08003 *
4 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
5 *
6 * Page migration was first developed in the context of the memory hotplug
7 * project. The main authors of the migration code are:
8 *
9 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
10 * Hirokazu Takahashi <taka@valinux.co.jp>
11 * Dave Hansen <haveblue@us.ibm.com>
Christoph Lametercde53532008-07-04 09:59:22 -070012 * Christoph Lameter
Christoph Lameterb20a3502006-03-22 00:09:12 -080013 */
14
15#include <linux/migrate.h>
Paul Gortmakerb95f1b312011-10-16 02:01:52 -040016#include <linux/export.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080017#include <linux/swap.h>
Christoph Lameter06972122006-06-23 02:03:35 -070018#include <linux/swapops.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080019#include <linux/pagemap.h>
Christoph Lametere23ca002006-04-10 22:52:57 -070020#include <linux/buffer_head.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080021#include <linux/mm_inline.h>
Pavel Emelyanovb4888932007-10-18 23:40:14 -070022#include <linux/nsproxy.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080023#include <linux/pagevec.h>
Hugh Dickinse9995ef2009-12-14 17:59:31 -080024#include <linux/ksm.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080025#include <linux/rmap.h>
26#include <linux/topology.h>
27#include <linux/cpu.h>
28#include <linux/cpuset.h>
Christoph Lameter04e62a22006-06-23 02:03:38 -070029#include <linux/writeback.h>
Christoph Lameter742755a2006-06-23 02:03:55 -070030#include <linux/mempolicy.h>
31#include <linux/vmalloc.h>
David Quigley86c3a762006-06-23 02:04:02 -070032#include <linux/security.h>
Hugh Dickins42cb14b2015-11-05 18:50:05 -080033#include <linux/backing-dev.h>
Adrian Bunk4f5ca262008-07-23 21:27:02 -070034#include <linux/syscalls.h>
Naoya Horiguchi290408d2010-09-08 10:19:35 +090035#include <linux/hugetlb.h>
Aneesh Kumar K.V8e6ac7f2012-07-31 16:42:27 -070036#include <linux/hugetlb_cgroup.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090037#include <linux/gfp.h>
Rafael Aquinibf6bddf12012-12-11 16:02:42 -080038#include <linux/balloon_compaction.h>
Mel Gormanf714f4f2013-12-18 17:08:33 -080039#include <linux/mmu_notifier.h>
Vladimir Davydov33c3fc72015-09-09 15:35:45 -070040#include <linux/page_idle.h>
Vlastimil Babkad435edc2016-03-15 14:56:15 -070041#include <linux/page_owner.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080042
Michal Nazarewicz0d1836c2010-12-21 17:24:26 -080043#include <asm/tlbflush.h>
44
Mel Gorman7b2a2d42012-10-19 14:07:31 +010045#define CREATE_TRACE_POINTS
46#include <trace/events/migrate.h>
47
Christoph Lameterb20a3502006-03-22 00:09:12 -080048#include "internal.h"
49
Christoph Lameterb20a3502006-03-22 00:09:12 -080050/*
Christoph Lameter742755a2006-06-23 02:03:55 -070051 * migrate_prep() needs to be called before we start compiling a list of pages
Mel Gorman748446b2010-05-24 14:32:27 -070052 * to be migrated using isolate_lru_page(). If scheduling work on other CPUs is
53 * undesirable, use migrate_prep_local()
Christoph Lameterb20a3502006-03-22 00:09:12 -080054 */
55int migrate_prep(void)
56{
Christoph Lameterb20a3502006-03-22 00:09:12 -080057 /*
58 * Clear the LRU lists so pages can be isolated.
59 * Note that pages may be moved off the LRU after we have
60 * drained them. Those pages will fail to migrate like other
61 * pages that may be busy.
62 */
63 lru_add_drain_all();
64
65 return 0;
66}
67
Mel Gorman748446b2010-05-24 14:32:27 -070068/* Do the necessary work of migrate_prep but not if it involves other CPUs */
69int migrate_prep_local(void)
70{
71 lru_add_drain();
72
73 return 0;
74}
75
Christoph Lameterb20a3502006-03-22 00:09:12 -080076/*
Rafael Aquini5733c7d2012-12-11 16:02:47 -080077 * Put previously isolated pages back onto the appropriate lists
78 * from where they were once taken off for compaction/migration.
79 *
Joonsoo Kim59c82b72014-01-21 15:51:17 -080080 * This function shall be used whenever the isolated pageset has been
81 * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
82 * and isolate_huge_page().
Rafael Aquini5733c7d2012-12-11 16:02:47 -080083 */
84void putback_movable_pages(struct list_head *l)
85{
86 struct page *page;
87 struct page *page2;
88
89 list_for_each_entry_safe(page, page2, l, lru) {
Naoya Horiguchi31caf662013-09-11 14:21:59 -070090 if (unlikely(PageHuge(page))) {
91 putback_active_hugepage(page);
92 continue;
93 }
Rafael Aquini5733c7d2012-12-11 16:02:47 -080094 list_del(&page->lru);
95 dec_zone_page_state(page, NR_ISOLATED_ANON +
96 page_is_file_cache(page));
Rafael Aquini117aad12013-09-30 13:45:16 -070097 if (unlikely(isolated_balloon_page(page)))
Rafael Aquinibf6bddf12012-12-11 16:02:42 -080098 balloon_page_putback(page);
99 else
100 putback_lru_page(page);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800101 }
Christoph Lameterb20a3502006-03-22 00:09:12 -0800102}
103
Christoph Lameter06972122006-06-23 02:03:35 -0700104/*
105 * Restore a potential migration pte to a working pte entry
106 */
Hugh Dickinse9995ef2009-12-14 17:59:31 -0800107static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
108 unsigned long addr, void *old)
Christoph Lameter06972122006-06-23 02:03:35 -0700109{
110 struct mm_struct *mm = vma->vm_mm;
111 swp_entry_t entry;
Christoph Lameter06972122006-06-23 02:03:35 -0700112 pmd_t *pmd;
113 pte_t *ptep, pte;
114 spinlock_t *ptl;
115
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900116 if (unlikely(PageHuge(new))) {
117 ptep = huge_pte_offset(mm, addr);
118 if (!ptep)
119 goto out;
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -0800120 ptl = huge_pte_lockptr(hstate_vma(vma), mm, ptep);
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900121 } else {
Bob Liu62190492012-12-11 16:00:37 -0800122 pmd = mm_find_pmd(mm, addr);
123 if (!pmd)
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900124 goto out;
Christoph Lameter06972122006-06-23 02:03:35 -0700125
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900126 ptep = pte_offset_map(pmd, addr);
Christoph Lameter06972122006-06-23 02:03:35 -0700127
Hugh Dickins486cf462011-10-19 12:50:35 -0700128 /*
129 * Peek to check is_swap_pte() before taking ptlock? No, we
130 * can race mremap's move_ptes(), which skips anon_vma lock.
131 */
Christoph Lameter06972122006-06-23 02:03:35 -0700132
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900133 ptl = pte_lockptr(mm, pmd);
134 }
135
Christoph Lameter06972122006-06-23 02:03:35 -0700136 spin_lock(ptl);
137 pte = *ptep;
138 if (!is_swap_pte(pte))
Hugh Dickinse9995ef2009-12-14 17:59:31 -0800139 goto unlock;
Christoph Lameter06972122006-06-23 02:03:35 -0700140
141 entry = pte_to_swp_entry(pte);
142
Hugh Dickinse9995ef2009-12-14 17:59:31 -0800143 if (!is_migration_entry(entry) ||
144 migration_entry_to_page(entry) != old)
145 goto unlock;
Christoph Lameter06972122006-06-23 02:03:35 -0700146
Christoph Lameter06972122006-06-23 02:03:35 -0700147 get_page(new);
148 pte = pte_mkold(mk_pte(new, vma->vm_page_prot));
Cyrill Gorcunovc3d16e12013-10-16 13:46:51 -0700149 if (pte_swp_soft_dirty(*ptep))
150 pte = pte_mksoft_dirty(pte);
Mel Gormand3cb8bf2014-10-02 19:47:41 +0100151
152 /* Recheck VMA as permissions can change since migration started */
Christoph Lameter06972122006-06-23 02:03:35 -0700153 if (is_write_migration_entry(entry))
Mel Gormand3cb8bf2014-10-02 19:47:41 +0100154 pte = maybe_mkwrite(pte, vma);
155
Andi Kleen3ef8fd72010-10-11 16:03:21 +0200156#ifdef CONFIG_HUGETLB_PAGE
Tony Lube7517d2013-02-04 14:28:46 -0800157 if (PageHuge(new)) {
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900158 pte = pte_mkhuge(pte);
Tony Lube7517d2013-02-04 14:28:46 -0800159 pte = arch_make_huge_pte(pte, vma, new, 0);
160 }
Andi Kleen3ef8fd72010-10-11 16:03:21 +0200161#endif
Leonid Yegoshinc2cc4992013-05-24 15:55:18 -0700162 flush_dcache_page(new);
Christoph Lameter06972122006-06-23 02:03:35 -0700163 set_pte_at(mm, addr, ptep, pte);
Christoph Lameter04e62a22006-06-23 02:03:38 -0700164
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900165 if (PageHuge(new)) {
166 if (PageAnon(new))
167 hugepage_add_anon_rmap(new, vma, addr);
168 else
Kirill A. Shutemov53f92632016-01-15 16:53:42 -0800169 page_dup_rmap(new, true);
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900170 } else if (PageAnon(new))
Kirill A. Shutemovd281ee62016-01-15 16:52:16 -0800171 page_add_anon_rmap(new, vma, addr, false);
Christoph Lameter04e62a22006-06-23 02:03:38 -0700172 else
173 page_add_file_rmap(new);
174
Kirill A. Shutemove3884662016-03-17 14:20:07 -0700175 if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new))
Hugh Dickins51afb122015-11-05 18:49:37 -0800176 mlock_vma_page(new);
177
Christoph Lameter04e62a22006-06-23 02:03:38 -0700178 /* No need to invalidate - it was non-present before */
Russell King4b3073e2009-12-18 16:40:18 +0000179 update_mmu_cache(vma, addr, ptep);
Hugh Dickinse9995ef2009-12-14 17:59:31 -0800180unlock:
Christoph Lameter06972122006-06-23 02:03:35 -0700181 pte_unmap_unlock(ptep, ptl);
Hugh Dickinse9995ef2009-12-14 17:59:31 -0800182out:
183 return SWAP_AGAIN;
Christoph Lameter06972122006-06-23 02:03:35 -0700184}
185
186/*
Christoph Lameter04e62a22006-06-23 02:03:38 -0700187 * Get rid of all migration entries and replace them by
188 * references to the indicated page.
189 */
Kirill A. Shutemove3884662016-03-17 14:20:07 -0700190void remove_migration_ptes(struct page *old, struct page *new, bool locked)
Christoph Lameter04e62a22006-06-23 02:03:38 -0700191{
Joonsoo Kim051ac832014-01-21 15:49:48 -0800192 struct rmap_walk_control rwc = {
193 .rmap_one = remove_migration_pte,
194 .arg = old,
195 };
196
Kirill A. Shutemove3884662016-03-17 14:20:07 -0700197 if (locked)
198 rmap_walk_locked(new, &rwc);
199 else
200 rmap_walk(new, &rwc);
Christoph Lameter04e62a22006-06-23 02:03:38 -0700201}
202
203/*
Christoph Lameter06972122006-06-23 02:03:35 -0700204 * Something used the pte of a page under migration. We need to
205 * get to the page and wait until migration is finished.
206 * When we return from this function the fault will be retried.
Christoph Lameter06972122006-06-23 02:03:35 -0700207 */
Naoya Horiguchie66f17f2015-02-11 15:25:22 -0800208void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
Naoya Horiguchi30dad302013-06-12 14:05:04 -0700209 spinlock_t *ptl)
Christoph Lameter06972122006-06-23 02:03:35 -0700210{
Naoya Horiguchi30dad302013-06-12 14:05:04 -0700211 pte_t pte;
Christoph Lameter06972122006-06-23 02:03:35 -0700212 swp_entry_t entry;
213 struct page *page;
214
Naoya Horiguchi30dad302013-06-12 14:05:04 -0700215 spin_lock(ptl);
Christoph Lameter06972122006-06-23 02:03:35 -0700216 pte = *ptep;
217 if (!is_swap_pte(pte))
218 goto out;
219
220 entry = pte_to_swp_entry(pte);
221 if (!is_migration_entry(entry))
222 goto out;
223
224 page = migration_entry_to_page(entry);
225
Nick Piggine2867812008-07-25 19:45:30 -0700226 /*
227 * Once radix-tree replacement of page migration started, page_count
228 * *must* be zero. And, we don't want to call wait_on_page_locked()
229 * against a page without get_page().
230 * So, we use get_page_unless_zero(), here. Even failed, page fault
231 * will occur again.
232 */
233 if (!get_page_unless_zero(page))
234 goto out;
Christoph Lameter06972122006-06-23 02:03:35 -0700235 pte_unmap_unlock(ptep, ptl);
236 wait_on_page_locked(page);
237 put_page(page);
238 return;
239out:
240 pte_unmap_unlock(ptep, ptl);
241}
242
Naoya Horiguchi30dad302013-06-12 14:05:04 -0700243void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
244 unsigned long address)
245{
246 spinlock_t *ptl = pte_lockptr(mm, pmd);
247 pte_t *ptep = pte_offset_map(pmd, address);
248 __migration_entry_wait(mm, ptep, ptl);
249}
250
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -0800251void migration_entry_wait_huge(struct vm_area_struct *vma,
252 struct mm_struct *mm, pte_t *pte)
Naoya Horiguchi30dad302013-06-12 14:05:04 -0700253{
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -0800254 spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), mm, pte);
Naoya Horiguchi30dad302013-06-12 14:05:04 -0700255 __migration_entry_wait(mm, pte, ptl);
256}
257
Mel Gormanb969c4a2012-01-12 17:19:34 -0800258#ifdef CONFIG_BLOCK
259/* Returns true if all buffers are successfully locked */
Mel Gormana6bc32b2012-01-12 17:19:43 -0800260static bool buffer_migrate_lock_buffers(struct buffer_head *head,
261 enum migrate_mode mode)
Mel Gormanb969c4a2012-01-12 17:19:34 -0800262{
263 struct buffer_head *bh = head;
264
265 /* Simple case, sync compaction */
Mel Gormana6bc32b2012-01-12 17:19:43 -0800266 if (mode != MIGRATE_ASYNC) {
Mel Gormanb969c4a2012-01-12 17:19:34 -0800267 do {
268 get_bh(bh);
269 lock_buffer(bh);
270 bh = bh->b_this_page;
271
272 } while (bh != head);
273
274 return true;
275 }
276
277 /* async case, we cannot block on lock_buffer so use trylock_buffer */
278 do {
279 get_bh(bh);
280 if (!trylock_buffer(bh)) {
281 /*
282 * We failed to lock the buffer and cannot stall in
283 * async migration. Release the taken locks
284 */
285 struct buffer_head *failed_bh = bh;
286 put_bh(failed_bh);
287 bh = head;
288 while (bh != failed_bh) {
289 unlock_buffer(bh);
290 put_bh(bh);
291 bh = bh->b_this_page;
292 }
293 return false;
294 }
295
296 bh = bh->b_this_page;
297 } while (bh != head);
298 return true;
299}
300#else
301static inline bool buffer_migrate_lock_buffers(struct buffer_head *head,
Mel Gormana6bc32b2012-01-12 17:19:43 -0800302 enum migrate_mode mode)
Mel Gormanb969c4a2012-01-12 17:19:34 -0800303{
304 return true;
305}
306#endif /* CONFIG_BLOCK */
307
Christoph Lameterb20a3502006-03-22 00:09:12 -0800308/*
Christoph Lameterc3fcf8a2006-06-23 02:03:32 -0700309 * Replace the page in the mapping.
Christoph Lameter5b5c7122006-06-23 02:03:29 -0700310 *
311 * The number of remaining references must be:
312 * 1 for anonymous pages without a mapping
313 * 2 for pages with a mapping
David Howells266cf652009-04-03 16:42:36 +0100314 * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
Christoph Lameterb20a3502006-03-22 00:09:12 -0800315 */
Gu Zheng36bc08c2013-07-16 17:56:16 +0800316int migrate_page_move_mapping(struct address_space *mapping,
Mel Gormanb969c4a2012-01-12 17:19:34 -0800317 struct page *newpage, struct page *page,
Benjamin LaHaise8e321fe2013-12-21 17:56:08 -0500318 struct buffer_head *head, enum migrate_mode mode,
319 int extra_count)
Christoph Lameterb20a3502006-03-22 00:09:12 -0800320{
Hugh Dickins42cb14b2015-11-05 18:50:05 -0800321 struct zone *oldzone, *newzone;
322 int dirty;
Benjamin LaHaise8e321fe2013-12-21 17:56:08 -0500323 int expected_count = 1 + extra_count;
Nick Piggin7cf9c2c2006-12-06 20:33:44 -0800324 void **pslot;
Christoph Lameterb20a3502006-03-22 00:09:12 -0800325
Christoph Lameter6c5240a2006-06-23 02:03:37 -0700326 if (!mapping) {
Christoph Lameter0e8c7d02007-04-23 14:41:09 -0700327 /* Anonymous page without mapping */
Benjamin LaHaise8e321fe2013-12-21 17:56:08 -0500328 if (page_count(page) != expected_count)
Christoph Lameter6c5240a2006-06-23 02:03:37 -0700329 return -EAGAIN;
Hugh Dickinscf4b7692015-11-05 18:50:02 -0800330
331 /* No turning back from here */
Hugh Dickinscf4b7692015-11-05 18:50:02 -0800332 newpage->index = page->index;
333 newpage->mapping = page->mapping;
334 if (PageSwapBacked(page))
Hugh Dickinsfa9949d2016-05-19 17:12:41 -0700335 __SetPageSwapBacked(newpage);
Hugh Dickinscf4b7692015-11-05 18:50:02 -0800336
Rafael Aquini78bd5202012-12-11 16:02:31 -0800337 return MIGRATEPAGE_SUCCESS;
Christoph Lameter6c5240a2006-06-23 02:03:37 -0700338 }
339
Hugh Dickins42cb14b2015-11-05 18:50:05 -0800340 oldzone = page_zone(page);
341 newzone = page_zone(newpage);
342
Nick Piggin19fd6232008-07-25 19:45:32 -0700343 spin_lock_irq(&mapping->tree_lock);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800344
Nick Piggin7cf9c2c2006-12-06 20:33:44 -0800345 pslot = radix_tree_lookup_slot(&mapping->page_tree,
346 page_index(page));
Christoph Lameterb20a3502006-03-22 00:09:12 -0800347
Benjamin LaHaise8e321fe2013-12-21 17:56:08 -0500348 expected_count += 1 + page_has_private(page);
Nick Piggine2867812008-07-25 19:45:30 -0700349 if (page_count(page) != expected_count ||
Mel Gorman29c1f672011-01-13 15:47:21 -0800350 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) {
Nick Piggin19fd6232008-07-25 19:45:32 -0700351 spin_unlock_irq(&mapping->tree_lock);
Christoph Lametere23ca002006-04-10 22:52:57 -0700352 return -EAGAIN;
Christoph Lameterb20a3502006-03-22 00:09:12 -0800353 }
354
Joonsoo Kimfe896d12016-03-17 14:19:26 -0700355 if (!page_ref_freeze(page, expected_count)) {
Nick Piggin19fd6232008-07-25 19:45:32 -0700356 spin_unlock_irq(&mapping->tree_lock);
Nick Piggine2867812008-07-25 19:45:30 -0700357 return -EAGAIN;
358 }
359
Christoph Lameterb20a3502006-03-22 00:09:12 -0800360 /*
Mel Gormanb969c4a2012-01-12 17:19:34 -0800361 * In the async migration case of moving a page with buffers, lock the
362 * buffers using trylock before the mapping is moved. If the mapping
363 * was moved, we later failed to lock the buffers and could not move
364 * the mapping back due to an elevated page count, we would have to
365 * block waiting on other references to be dropped.
366 */
Mel Gormana6bc32b2012-01-12 17:19:43 -0800367 if (mode == MIGRATE_ASYNC && head &&
368 !buffer_migrate_lock_buffers(head, mode)) {
Joonsoo Kimfe896d12016-03-17 14:19:26 -0700369 page_ref_unfreeze(page, expected_count);
Mel Gormanb969c4a2012-01-12 17:19:34 -0800370 spin_unlock_irq(&mapping->tree_lock);
371 return -EAGAIN;
372 }
373
374 /*
Hugh Dickinscf4b7692015-11-05 18:50:02 -0800375 * Now we know that no one else is looking at the page:
376 * no turning back from here.
Christoph Lameterb20a3502006-03-22 00:09:12 -0800377 */
Hugh Dickinscf4b7692015-11-05 18:50:02 -0800378 newpage->index = page->index;
379 newpage->mapping = page->mapping;
380 if (PageSwapBacked(page))
Hugh Dickinsfa9949d2016-05-19 17:12:41 -0700381 __SetPageSwapBacked(newpage);
Hugh Dickinscf4b7692015-11-05 18:50:02 -0800382
Nick Piggin7cf9c2c2006-12-06 20:33:44 -0800383 get_page(newpage); /* add cache reference */
Christoph Lameterb20a3502006-03-22 00:09:12 -0800384 if (PageSwapCache(page)) {
385 SetPageSwapCache(newpage);
386 set_page_private(newpage, page_private(page));
387 }
388
Hugh Dickins42cb14b2015-11-05 18:50:05 -0800389 /* Move dirty while page refs frozen and newpage not yet exposed */
390 dirty = PageDirty(page);
391 if (dirty) {
392 ClearPageDirty(page);
393 SetPageDirty(newpage);
394 }
395
Nick Piggin7cf9c2c2006-12-06 20:33:44 -0800396 radix_tree_replace_slot(pslot, newpage);
397
398 /*
Jacobo Giralt937a94c2012-01-10 15:07:11 -0800399 * Drop cache reference from old page by unfreezing
400 * to one less reference.
Nick Piggin7cf9c2c2006-12-06 20:33:44 -0800401 * We know this isn't the last reference.
402 */
Joonsoo Kimfe896d12016-03-17 14:19:26 -0700403 page_ref_unfreeze(page, expected_count - 1);
Nick Piggin7cf9c2c2006-12-06 20:33:44 -0800404
Hugh Dickins42cb14b2015-11-05 18:50:05 -0800405 spin_unlock(&mapping->tree_lock);
406 /* Leave irq disabled to prevent preemption while updating stats */
407
Christoph Lameter0e8c7d02007-04-23 14:41:09 -0700408 /*
409 * If moved to a different zone then also account
410 * the page for that zone. Other VM counters will be
411 * taken care of when we establish references to the
412 * new page and drop references to the old page.
413 *
414 * Note that anonymous pages are accounted for
415 * via NR_FILE_PAGES and NR_ANON_PAGES if they
416 * are mapped to swap space.
417 */
Hugh Dickins42cb14b2015-11-05 18:50:05 -0800418 if (newzone != oldzone) {
419 __dec_zone_state(oldzone, NR_FILE_PAGES);
420 __inc_zone_state(newzone, NR_FILE_PAGES);
421 if (PageSwapBacked(page) && !PageSwapCache(page)) {
422 __dec_zone_state(oldzone, NR_SHMEM);
423 __inc_zone_state(newzone, NR_SHMEM);
424 }
425 if (dirty && mapping_cap_account_dirty(mapping)) {
426 __dec_zone_state(oldzone, NR_FILE_DIRTY);
427 __inc_zone_state(newzone, NR_FILE_DIRTY);
428 }
KOSAKI Motohiro4b021082009-09-21 17:01:33 -0700429 }
Hugh Dickins42cb14b2015-11-05 18:50:05 -0800430 local_irq_enable();
Christoph Lameterb20a3502006-03-22 00:09:12 -0800431
Rafael Aquini78bd5202012-12-11 16:02:31 -0800432 return MIGRATEPAGE_SUCCESS;
Christoph Lameterb20a3502006-03-22 00:09:12 -0800433}
Richard Weinberger1118dce2016-06-16 23:26:14 +0200434EXPORT_SYMBOL(migrate_page_move_mapping);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800435
436/*
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900437 * The expected number of remaining references is the same as that
438 * of migrate_page_move_mapping().
439 */
440int migrate_huge_page_move_mapping(struct address_space *mapping,
441 struct page *newpage, struct page *page)
442{
443 int expected_count;
444 void **pslot;
445
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900446 spin_lock_irq(&mapping->tree_lock);
447
448 pslot = radix_tree_lookup_slot(&mapping->page_tree,
449 page_index(page));
450
451 expected_count = 2 + page_has_private(page);
452 if (page_count(page) != expected_count ||
Mel Gorman29c1f672011-01-13 15:47:21 -0800453 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) {
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900454 spin_unlock_irq(&mapping->tree_lock);
455 return -EAGAIN;
456 }
457
Joonsoo Kimfe896d12016-03-17 14:19:26 -0700458 if (!page_ref_freeze(page, expected_count)) {
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900459 spin_unlock_irq(&mapping->tree_lock);
460 return -EAGAIN;
461 }
462
Hugh Dickinscf4b7692015-11-05 18:50:02 -0800463 newpage->index = page->index;
464 newpage->mapping = page->mapping;
Johannes Weiner6a93ca82016-03-15 14:57:19 -0700465
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900466 get_page(newpage);
467
468 radix_tree_replace_slot(pslot, newpage);
469
Joonsoo Kimfe896d12016-03-17 14:19:26 -0700470 page_ref_unfreeze(page, expected_count - 1);
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900471
472 spin_unlock_irq(&mapping->tree_lock);
Johannes Weiner6a93ca82016-03-15 14:57:19 -0700473
Rafael Aquini78bd5202012-12-11 16:02:31 -0800474 return MIGRATEPAGE_SUCCESS;
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900475}
476
477/*
Dave Hansen30b0a102013-11-21 14:31:58 -0800478 * Gigantic pages are so large that we do not guarantee that page++ pointer
479 * arithmetic will work across the entire page. We need something more
480 * specialized.
481 */
482static void __copy_gigantic_page(struct page *dst, struct page *src,
483 int nr_pages)
484{
485 int i;
486 struct page *dst_base = dst;
487 struct page *src_base = src;
488
489 for (i = 0; i < nr_pages; ) {
490 cond_resched();
491 copy_highpage(dst, src);
492
493 i++;
494 dst = mem_map_next(dst, dst_base, i);
495 src = mem_map_next(src, src_base, i);
496 }
497}
498
499static void copy_huge_page(struct page *dst, struct page *src)
500{
501 int i;
502 int nr_pages;
503
504 if (PageHuge(src)) {
505 /* hugetlbfs page */
506 struct hstate *h = page_hstate(src);
507 nr_pages = pages_per_huge_page(h);
508
509 if (unlikely(nr_pages > MAX_ORDER_NR_PAGES)) {
510 __copy_gigantic_page(dst, src, nr_pages);
511 return;
512 }
513 } else {
514 /* thp page */
515 BUG_ON(!PageTransHuge(src));
516 nr_pages = hpage_nr_pages(src);
517 }
518
519 for (i = 0; i < nr_pages; i++) {
520 cond_resched();
521 copy_highpage(dst + i, src + i);
522 }
523}
524
525/*
Christoph Lameterb20a3502006-03-22 00:09:12 -0800526 * Copy the page to its new location
527 */
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900528void migrate_page_copy(struct page *newpage, struct page *page)
Christoph Lameterb20a3502006-03-22 00:09:12 -0800529{
Rik van Riel7851a452013-10-07 11:29:23 +0100530 int cpupid;
531
Mel Gormanb32967f2012-11-19 12:35:47 +0000532 if (PageHuge(page) || PageTransHuge(page))
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900533 copy_huge_page(newpage, page);
534 else
535 copy_highpage(newpage, page);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800536
537 if (PageError(page))
538 SetPageError(newpage);
539 if (PageReferenced(page))
540 SetPageReferenced(newpage);
541 if (PageUptodate(page))
542 SetPageUptodate(newpage);
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700543 if (TestClearPageActive(page)) {
Sasha Levin309381fea2014-01-23 15:52:54 -0800544 VM_BUG_ON_PAGE(PageUnevictable(page), page);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800545 SetPageActive(newpage);
Lee Schermerhorn418b27e2009-12-14 17:59:54 -0800546 } else if (TestClearPageUnevictable(page))
547 SetPageUnevictable(newpage);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800548 if (PageChecked(page))
549 SetPageChecked(newpage);
550 if (PageMappedToDisk(page))
551 SetPageMappedToDisk(newpage);
552
Hugh Dickins42cb14b2015-11-05 18:50:05 -0800553 /* Move dirty on pages not done by migrate_page_move_mapping() */
554 if (PageDirty(page))
555 SetPageDirty(newpage);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800556
Vladimir Davydov33c3fc72015-09-09 15:35:45 -0700557 if (page_is_young(page))
558 set_page_young(newpage);
559 if (page_is_idle(page))
560 set_page_idle(newpage);
561
Rik van Riel7851a452013-10-07 11:29:23 +0100562 /*
563 * Copy NUMA information to the new page, to prevent over-eager
564 * future migrations of this same page.
565 */
566 cpupid = page_cpupid_xchg_last(page, -1);
567 page_cpupid_xchg_last(newpage, cpupid);
568
Hugh Dickinse9995ef2009-12-14 17:59:31 -0800569 ksm_migrate_page(newpage, page);
Hugh Dickinsc8d65532013-02-22 16:35:10 -0800570 /*
571 * Please do not reorder this without considering how mm/ksm.c's
572 * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache().
573 */
Naoya Horiguchib3b3a992015-04-15 16:13:15 -0700574 if (PageSwapCache(page))
575 ClearPageSwapCache(page);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800576 ClearPagePrivate(page);
577 set_page_private(page, 0);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800578
579 /*
580 * If any waiters have accumulated on the new page then
581 * wake them up.
582 */
583 if (PageWriteback(newpage))
584 end_page_writeback(newpage);
Vlastimil Babkad435edc2016-03-15 14:56:15 -0700585
586 copy_page_owner(page, newpage);
Johannes Weiner74485cf2016-03-15 14:57:54 -0700587
588 mem_cgroup_migrate(page, newpage);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800589}
Richard Weinberger1118dce2016-06-16 23:26:14 +0200590EXPORT_SYMBOL(migrate_page_copy);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800591
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700592/************************************************************
593 * Migration functions
594 ***********************************************************/
595
Christoph Lameterb20a3502006-03-22 00:09:12 -0800596/*
597 * Common logic to directly migrate a single page suitable for
David Howells266cf652009-04-03 16:42:36 +0100598 * pages that do not use PagePrivate/PagePrivate2.
Christoph Lameterb20a3502006-03-22 00:09:12 -0800599 *
600 * Pages are locked upon entry and exit.
601 */
Christoph Lameter2d1db3b2006-06-23 02:03:33 -0700602int migrate_page(struct address_space *mapping,
Mel Gormana6bc32b2012-01-12 17:19:43 -0800603 struct page *newpage, struct page *page,
604 enum migrate_mode mode)
Christoph Lameterb20a3502006-03-22 00:09:12 -0800605{
606 int rc;
607
608 BUG_ON(PageWriteback(page)); /* Writeback must be complete */
609
Benjamin LaHaise8e321fe2013-12-21 17:56:08 -0500610 rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800611
Rafael Aquini78bd5202012-12-11 16:02:31 -0800612 if (rc != MIGRATEPAGE_SUCCESS)
Christoph Lameterb20a3502006-03-22 00:09:12 -0800613 return rc;
614
615 migrate_page_copy(newpage, page);
Rafael Aquini78bd5202012-12-11 16:02:31 -0800616 return MIGRATEPAGE_SUCCESS;
Christoph Lameterb20a3502006-03-22 00:09:12 -0800617}
618EXPORT_SYMBOL(migrate_page);
619
David Howells93614012006-09-30 20:45:40 +0200620#ifdef CONFIG_BLOCK
Christoph Lameterb20a3502006-03-22 00:09:12 -0800621/*
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700622 * Migration function for pages with buffers. This function can only be used
623 * if the underlying filesystem guarantees that no other references to "page"
624 * exist.
625 */
Christoph Lameter2d1db3b2006-06-23 02:03:33 -0700626int buffer_migrate_page(struct address_space *mapping,
Mel Gormana6bc32b2012-01-12 17:19:43 -0800627 struct page *newpage, struct page *page, enum migrate_mode mode)
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700628{
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700629 struct buffer_head *bh, *head;
630 int rc;
631
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700632 if (!page_has_buffers(page))
Mel Gormana6bc32b2012-01-12 17:19:43 -0800633 return migrate_page(mapping, newpage, page, mode);
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700634
635 head = page_buffers(page);
636
Benjamin LaHaise8e321fe2013-12-21 17:56:08 -0500637 rc = migrate_page_move_mapping(mapping, newpage, page, head, mode, 0);
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700638
Rafael Aquini78bd5202012-12-11 16:02:31 -0800639 if (rc != MIGRATEPAGE_SUCCESS)
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700640 return rc;
641
Mel Gormanb969c4a2012-01-12 17:19:34 -0800642 /*
643 * In the async case, migrate_page_move_mapping locked the buffers
644 * with an IRQ-safe spinlock held. In the sync case, the buffers
645 * need to be locked now
646 */
Mel Gormana6bc32b2012-01-12 17:19:43 -0800647 if (mode != MIGRATE_ASYNC)
648 BUG_ON(!buffer_migrate_lock_buffers(head, mode));
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700649
650 ClearPagePrivate(page);
651 set_page_private(newpage, page_private(page));
652 set_page_private(page, 0);
653 put_page(page);
654 get_page(newpage);
655
656 bh = head;
657 do {
658 set_bh_page(bh, newpage, bh_offset(bh));
659 bh = bh->b_this_page;
660
661 } while (bh != head);
662
663 SetPagePrivate(newpage);
664
665 migrate_page_copy(newpage, page);
666
667 bh = head;
668 do {
669 unlock_buffer(bh);
670 put_bh(bh);
671 bh = bh->b_this_page;
672
673 } while (bh != head);
674
Rafael Aquini78bd5202012-12-11 16:02:31 -0800675 return MIGRATEPAGE_SUCCESS;
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700676}
677EXPORT_SYMBOL(buffer_migrate_page);
David Howells93614012006-09-30 20:45:40 +0200678#endif
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700679
Christoph Lameter04e62a22006-06-23 02:03:38 -0700680/*
681 * Writeback a page to clean the dirty state
682 */
683static int writeout(struct address_space *mapping, struct page *page)
684{
685 struct writeback_control wbc = {
686 .sync_mode = WB_SYNC_NONE,
687 .nr_to_write = 1,
688 .range_start = 0,
689 .range_end = LLONG_MAX,
Christoph Lameter04e62a22006-06-23 02:03:38 -0700690 .for_reclaim = 1
691 };
692 int rc;
693
694 if (!mapping->a_ops->writepage)
695 /* No write method for the address space */
696 return -EINVAL;
697
698 if (!clear_page_dirty_for_io(page))
699 /* Someone else already triggered a write */
700 return -EAGAIN;
701
702 /*
703 * A dirty page may imply that the underlying filesystem has
704 * the page on some queue. So the page must be clean for
705 * migration. Writeout may mean we loose the lock and the
706 * page state is no longer what we checked for earlier.
707 * At this point we know that the migration attempt cannot
708 * be successful.
709 */
Kirill A. Shutemove3884662016-03-17 14:20:07 -0700710 remove_migration_ptes(page, page, false);
Christoph Lameter04e62a22006-06-23 02:03:38 -0700711
712 rc = mapping->a_ops->writepage(page, &wbc);
Christoph Lameter04e62a22006-06-23 02:03:38 -0700713
714 if (rc != AOP_WRITEPAGE_ACTIVATE)
715 /* unlocked. Relock */
716 lock_page(page);
717
Hugh Dickinsbda85502008-11-19 15:36:36 -0800718 return (rc < 0) ? -EIO : -EAGAIN;
Christoph Lameter04e62a22006-06-23 02:03:38 -0700719}
720
721/*
722 * Default handling if a filesystem does not provide a migration function.
723 */
Christoph Lameter8351a6e2006-06-23 02:03:33 -0700724static int fallback_migrate_page(struct address_space *mapping,
Mel Gormana6bc32b2012-01-12 17:19:43 -0800725 struct page *newpage, struct page *page, enum migrate_mode mode)
Christoph Lameter8351a6e2006-06-23 02:03:33 -0700726{
Mel Gormanb969c4a2012-01-12 17:19:34 -0800727 if (PageDirty(page)) {
Mel Gormana6bc32b2012-01-12 17:19:43 -0800728 /* Only writeback pages in full synchronous migration */
729 if (mode != MIGRATE_SYNC)
Mel Gormanb969c4a2012-01-12 17:19:34 -0800730 return -EBUSY;
Christoph Lameter04e62a22006-06-23 02:03:38 -0700731 return writeout(mapping, page);
Mel Gormanb969c4a2012-01-12 17:19:34 -0800732 }
Christoph Lameter8351a6e2006-06-23 02:03:33 -0700733
734 /*
735 * Buffers may be managed in a filesystem specific way.
736 * We must have no buffers or drop them.
737 */
David Howells266cf652009-04-03 16:42:36 +0100738 if (page_has_private(page) &&
Christoph Lameter8351a6e2006-06-23 02:03:33 -0700739 !try_to_release_page(page, GFP_KERNEL))
740 return -EAGAIN;
741
Mel Gormana6bc32b2012-01-12 17:19:43 -0800742 return migrate_page(mapping, newpage, page, mode);
Christoph Lameter8351a6e2006-06-23 02:03:33 -0700743}
744
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700745/*
Christoph Lametere24f0b82006-06-23 02:03:51 -0700746 * Move a page to a newly allocated page
747 * The page is locked and all ptes have been successfully removed.
748 *
749 * The new page will have replaced the old page if this function
750 * is successful.
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700751 *
752 * Return value:
753 * < 0 - error code
Rafael Aquini78bd5202012-12-11 16:02:31 -0800754 * MIGRATEPAGE_SUCCESS - success
Christoph Lametere24f0b82006-06-23 02:03:51 -0700755 */
Mel Gorman3fe20112010-05-24 14:32:20 -0700756static int move_to_new_page(struct page *newpage, struct page *page,
Hugh Dickins5c3f9a62015-11-05 18:49:53 -0800757 enum migrate_mode mode)
Christoph Lametere24f0b82006-06-23 02:03:51 -0700758{
759 struct address_space *mapping;
760 int rc;
761
Hugh Dickins7db76712015-11-05 18:49:49 -0800762 VM_BUG_ON_PAGE(!PageLocked(page), page);
763 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
Christoph Lametere24f0b82006-06-23 02:03:51 -0700764
Christoph Lametere24f0b82006-06-23 02:03:51 -0700765 mapping = page_mapping(page);
766 if (!mapping)
Mel Gormana6bc32b2012-01-12 17:19:43 -0800767 rc = migrate_page(mapping, newpage, page, mode);
Mel Gormanb969c4a2012-01-12 17:19:34 -0800768 else if (mapping->a_ops->migratepage)
Christoph Lametere24f0b82006-06-23 02:03:51 -0700769 /*
Mel Gormanb969c4a2012-01-12 17:19:34 -0800770 * Most pages have a mapping and most filesystems provide a
771 * migratepage callback. Anonymous pages are part of swap
772 * space which also has its own migratepage callback. This
773 * is the most common path for page migration.
Christoph Lametere24f0b82006-06-23 02:03:51 -0700774 */
Hugh Dickins5c3f9a62015-11-05 18:49:53 -0800775 rc = mapping->a_ops->migratepage(mapping, newpage, page, mode);
Mel Gormanb969c4a2012-01-12 17:19:34 -0800776 else
Mel Gormana6bc32b2012-01-12 17:19:43 -0800777 rc = fallback_migrate_page(mapping, newpage, page, mode);
Christoph Lametere24f0b82006-06-23 02:03:51 -0700778
Hugh Dickins5c3f9a62015-11-05 18:49:53 -0800779 /*
780 * When successful, old pagecache page->mapping must be cleared before
781 * page is freed; but stats require that PageAnon be left as PageAnon.
782 */
783 if (rc == MIGRATEPAGE_SUCCESS) {
Hugh Dickins5c3f9a62015-11-05 18:49:53 -0800784 if (!PageAnon(page))
785 page->mapping = NULL;
Mel Gorman3fe20112010-05-24 14:32:20 -0700786 }
Christoph Lametere24f0b82006-06-23 02:03:51 -0700787 return rc;
788}
789
Minchan Kim0dabec92011-10-31 17:06:57 -0700790static int __unmap_and_move(struct page *page, struct page *newpage,
Hugh Dickins9c620e22013-02-22 16:35:14 -0800791 int force, enum migrate_mode mode)
Christoph Lametere24f0b82006-06-23 02:03:51 -0700792{
Minchan Kim0dabec92011-10-31 17:06:57 -0700793 int rc = -EAGAIN;
Hugh Dickins2ebba6b2014-12-12 16:56:19 -0800794 int page_was_mapped = 0;
Mel Gorman3f6c8272010-05-24 14:32:17 -0700795 struct anon_vma *anon_vma = NULL;
Christoph Lameter95a402c2006-06-23 02:03:53 -0700796
Nick Piggin529ae9a2008-08-02 12:01:03 +0200797 if (!trylock_page(page)) {
Mel Gormana6bc32b2012-01-12 17:19:43 -0800798 if (!force || mode == MIGRATE_ASYNC)
Minchan Kim0dabec92011-10-31 17:06:57 -0700799 goto out;
Mel Gorman3e7d3442011-01-13 15:45:56 -0800800
801 /*
802 * It's not safe for direct compaction to call lock_page.
803 * For example, during page readahead pages are added locked
804 * to the LRU. Later, when the IO completes the pages are
805 * marked uptodate and unlocked. However, the queueing
806 * could be merging multiple pages for one bio (e.g.
807 * mpage_readpages). If an allocation happens for the
808 * second or third page, the process can end up locking
809 * the same page twice and deadlocking. Rather than
810 * trying to be clever about what pages can be locked,
811 * avoid the use of lock_page for direct compaction
812 * altogether.
813 */
814 if (current->flags & PF_MEMALLOC)
Minchan Kim0dabec92011-10-31 17:06:57 -0700815 goto out;
Mel Gorman3e7d3442011-01-13 15:45:56 -0800816
Christoph Lametere24f0b82006-06-23 02:03:51 -0700817 lock_page(page);
818 }
819
820 if (PageWriteback(page)) {
Andrea Arcangeli11bc82d2011-03-22 16:33:11 -0700821 /*
Jianguo Wufed5b642013-04-29 15:07:58 -0700822 * Only in the case of a full synchronous migration is it
Mel Gormana6bc32b2012-01-12 17:19:43 -0800823 * necessary to wait for PageWriteback. In the async case,
824 * the retry loop is too short and in the sync-light case,
825 * the overhead of stalling is too much
Andrea Arcangeli11bc82d2011-03-22 16:33:11 -0700826 */
Mel Gormana6bc32b2012-01-12 17:19:43 -0800827 if (mode != MIGRATE_SYNC) {
Andrea Arcangeli11bc82d2011-03-22 16:33:11 -0700828 rc = -EBUSY;
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700829 goto out_unlock;
Andrea Arcangeli11bc82d2011-03-22 16:33:11 -0700830 }
831 if (!force)
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700832 goto out_unlock;
Christoph Lametere24f0b82006-06-23 02:03:51 -0700833 wait_on_page_writeback(page);
834 }
Hugh Dickins03f15c82015-11-05 18:49:56 -0800835
Christoph Lametere24f0b82006-06-23 02:03:51 -0700836 /*
KAMEZAWA Hiroyukidc386d42007-07-26 10:41:07 -0700837 * By try_to_unmap(), page->mapcount goes down to 0 here. In this case,
838 * we cannot notice that anon_vma is freed while we migrates a page.
Hugh Dickins1ce82b62011-01-13 15:47:30 -0800839 * This get_anon_vma() delays freeing anon_vma pointer until the end
KAMEZAWA Hiroyukidc386d42007-07-26 10:41:07 -0700840 * of migration. File cache pages are no problem because of page_lock()
KAMEZAWA Hiroyuki989f89c2007-08-30 23:56:21 -0700841 * File Caches may use write_page() or lock_page() in migration, then,
842 * just care Anon page here.
Hugh Dickins03f15c82015-11-05 18:49:56 -0800843 *
844 * Only page_get_anon_vma() understands the subtleties of
845 * getting a hold on an anon_vma from outside one of its mms.
846 * But if we cannot get anon_vma, then we won't need it anyway,
847 * because that implies that the anon page is no longer mapped
848 * (and cannot be remapped so long as we hold the page lock).
Christoph Lametere24f0b82006-06-23 02:03:51 -0700849 */
Hugh Dickins03f15c82015-11-05 18:49:56 -0800850 if (PageAnon(page) && !PageKsm(page))
Peter Zijlstra746b18d2011-05-24 17:12:10 -0700851 anon_vma = page_get_anon_vma(page);
Shaohua Li62e1c552008-02-04 22:29:33 -0800852
Hugh Dickins7db76712015-11-05 18:49:49 -0800853 /*
854 * Block others from accessing the new page when we get around to
855 * establishing additional references. We are usually the only one
856 * holding a reference to newpage at this point. We used to have a BUG
857 * here if trylock_page(newpage) fails, but would like to allow for
858 * cases where there might be a race with the previous use of newpage.
859 * This is much like races on refcount of oldpage: just don't BUG().
860 */
861 if (unlikely(!trylock_page(newpage)))
862 goto out_unlock;
863
Konstantin Khlebnikovd6d86c02014-10-09 15:29:27 -0700864 if (unlikely(isolated_balloon_page(page))) {
Rafael Aquinibf6bddf12012-12-11 16:02:42 -0800865 /*
866 * A ballooned page does not need any special attention from
867 * physical to virtual reverse mapping procedures.
868 * Skip any attempt to unmap PTEs or to remap swap cache,
869 * in order to avoid burning cycles at rmap level, and perform
870 * the page migration right away (proteced by page lock).
871 */
872 rc = balloon_page_migrate(newpage, page, mode);
Hugh Dickins7db76712015-11-05 18:49:49 -0800873 goto out_unlock_both;
Rafael Aquinibf6bddf12012-12-11 16:02:42 -0800874 }
875
KAMEZAWA Hiroyukidc386d42007-07-26 10:41:07 -0700876 /*
Shaohua Li62e1c552008-02-04 22:29:33 -0800877 * Corner case handling:
878 * 1. When a new swap-cache page is read into, it is added to the LRU
879 * and treated as swapcache but it has no rmap yet.
880 * Calling try_to_unmap() against a page->mapping==NULL page will
881 * trigger a BUG. So handle it here.
882 * 2. An orphaned page (see truncate_complete_page) might have
883 * fs-private metadata. The page can be picked up due to memory
884 * offlining. Everywhere else except page reclaim, the page is
885 * invisible to the vm, so the page can not be migrated. So try to
886 * free the metadata, so the page can be freed.
KAMEZAWA Hiroyukidc386d42007-07-26 10:41:07 -0700887 */
Shaohua Li62e1c552008-02-04 22:29:33 -0800888 if (!page->mapping) {
Sasha Levin309381fea2014-01-23 15:52:54 -0800889 VM_BUG_ON_PAGE(PageAnon(page), page);
Hugh Dickins1ce82b62011-01-13 15:47:30 -0800890 if (page_has_private(page)) {
Shaohua Li62e1c552008-02-04 22:29:33 -0800891 try_to_free_buffers(page);
Hugh Dickins7db76712015-11-05 18:49:49 -0800892 goto out_unlock_both;
Shaohua Li62e1c552008-02-04 22:29:33 -0800893 }
Hugh Dickins7db76712015-11-05 18:49:49 -0800894 } else if (page_mapped(page)) {
895 /* Establish migration ptes */
Hugh Dickins03f15c82015-11-05 18:49:56 -0800896 VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma,
897 page);
Hugh Dickins2ebba6b2014-12-12 16:56:19 -0800898 try_to_unmap(page,
Wanpeng Lida1b13c2015-09-08 15:03:27 -0700899 TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
Hugh Dickins2ebba6b2014-12-12 16:56:19 -0800900 page_was_mapped = 1;
901 }
KAMEZAWA Hiroyukidc386d42007-07-26 10:41:07 -0700902
Christoph Lametere6a15302006-06-25 05:46:49 -0700903 if (!page_mapped(page))
Hugh Dickins5c3f9a62015-11-05 18:49:53 -0800904 rc = move_to_new_page(newpage, page, mode);
Christoph Lametere24f0b82006-06-23 02:03:51 -0700905
Hugh Dickins5c3f9a62015-11-05 18:49:53 -0800906 if (page_was_mapped)
907 remove_migration_ptes(page,
Kirill A. Shutemove3884662016-03-17 14:20:07 -0700908 rc == MIGRATEPAGE_SUCCESS ? newpage : page, false);
Mel Gorman3f6c8272010-05-24 14:32:17 -0700909
Hugh Dickins7db76712015-11-05 18:49:49 -0800910out_unlock_both:
911 unlock_page(newpage);
912out_unlock:
Mel Gorman3f6c8272010-05-24 14:32:17 -0700913 /* Drop an anon_vma reference if we took one */
Rik van Riel76545062010-08-09 17:18:41 -0700914 if (anon_vma)
Peter Zijlstra9e601092011-03-22 16:32:46 -0700915 put_anon_vma(anon_vma);
Christoph Lametere24f0b82006-06-23 02:03:51 -0700916 unlock_page(page);
Minchan Kim0dabec92011-10-31 17:06:57 -0700917out:
Minchan Kimc6c919e2016-07-26 15:23:02 -0700918 /*
919 * If migration is successful, decrease refcount of the newpage
920 * which will not free the page because new page owner increased
921 * refcounter. As well, if it is LRU page, add the page to LRU
922 * list in here.
923 */
924 if (rc == MIGRATEPAGE_SUCCESS) {
925 if (unlikely(__is_movable_balloon_page(newpage)))
926 put_page(newpage);
927 else
928 putback_lru_page(newpage);
929 }
930
Minchan Kim0dabec92011-10-31 17:06:57 -0700931 return rc;
932}
Christoph Lameter95a402c2006-06-23 02:03:53 -0700933
Minchan Kim0dabec92011-10-31 17:06:57 -0700934/*
Geert Uytterhoevenef2a5152015-04-14 15:44:22 -0700935 * gcc 4.7 and 4.8 on arm get an ICEs when inlining unmap_and_move(). Work
936 * around it.
937 */
938#if (GCC_VERSION >= 40700 && GCC_VERSION < 40900) && defined(CONFIG_ARM)
939#define ICE_noinline noinline
940#else
941#define ICE_noinline
942#endif
943
944/*
Minchan Kim0dabec92011-10-31 17:06:57 -0700945 * Obtain the lock on page, remove all ptes and migrate the page
946 * to the newly allocated page in newpage.
947 */
Geert Uytterhoevenef2a5152015-04-14 15:44:22 -0700948static ICE_noinline int unmap_and_move(new_page_t get_new_page,
949 free_page_t put_new_page,
950 unsigned long private, struct page *page,
Naoya Horiguchiadd05ce2015-06-24 16:56:50 -0700951 int force, enum migrate_mode mode,
952 enum migrate_reason reason)
Minchan Kim0dabec92011-10-31 17:06:57 -0700953{
Hugh Dickins2def7422015-11-05 18:49:46 -0800954 int rc = MIGRATEPAGE_SUCCESS;
Minchan Kim0dabec92011-10-31 17:06:57 -0700955 int *result = NULL;
Hugh Dickins2def7422015-11-05 18:49:46 -0800956 struct page *newpage;
Minchan Kim0dabec92011-10-31 17:06:57 -0700957
Hugh Dickins2def7422015-11-05 18:49:46 -0800958 newpage = get_new_page(page, private, &result);
Minchan Kim0dabec92011-10-31 17:06:57 -0700959 if (!newpage)
960 return -ENOMEM;
961
962 if (page_count(page) == 1) {
963 /* page was freed from under us. So we are done. */
Minchan Kimc6c919e2016-07-26 15:23:02 -0700964 ClearPageActive(page);
965 ClearPageUnevictable(page);
966 if (put_new_page)
967 put_new_page(newpage, private);
968 else
969 put_page(newpage);
Minchan Kim0dabec92011-10-31 17:06:57 -0700970 goto out;
971 }
972
Kirill A. Shutemov4d2fa962016-01-15 16:54:00 -0800973 if (unlikely(PageTransHuge(page))) {
974 lock_page(page);
975 rc = split_huge_page(page);
976 unlock_page(page);
977 if (rc)
Minchan Kim0dabec92011-10-31 17:06:57 -0700978 goto out;
Kirill A. Shutemov4d2fa962016-01-15 16:54:00 -0800979 }
Minchan Kim0dabec92011-10-31 17:06:57 -0700980
Hugh Dickins9c620e22013-02-22 16:35:14 -0800981 rc = __unmap_and_move(page, newpage, force, mode);
Minchan Kimc6c919e2016-07-26 15:23:02 -0700982 if (rc == MIGRATEPAGE_SUCCESS)
Vlastimil Babka7cd12b42016-03-15 14:56:18 -0700983 set_page_owner_migrate_reason(newpage, reason);
Rafael Aquinibf6bddf12012-12-11 16:02:42 -0800984
Minchan Kim0dabec92011-10-31 17:06:57 -0700985out:
Christoph Lametere24f0b82006-06-23 02:03:51 -0700986 if (rc != -EAGAIN) {
Minchan Kim0dabec92011-10-31 17:06:57 -0700987 /*
988 * A page that has been migrated has all references
989 * removed and will be freed. A page that has not been
990 * migrated will have kepts its references and be
991 * restored.
992 */
993 list_del(&page->lru);
KOSAKI Motohiroa7312862009-09-21 17:01:37 -0700994 dec_zone_page_state(page, NR_ISOLATED_ANON +
Johannes Weiner6c0b1352009-09-21 17:02:59 -0700995 page_is_file_cache(page));
Christoph Lametere24f0b82006-06-23 02:03:51 -0700996 }
David Rientjes68711a72014-06-04 16:08:25 -0700997
Christoph Lameter95a402c2006-06-23 02:03:53 -0700998 /*
Minchan Kimc6c919e2016-07-26 15:23:02 -0700999 * If migration is successful, releases reference grabbed during
1000 * isolation. Otherwise, restore the page to right list unless
1001 * we want to retry.
Christoph Lameter95a402c2006-06-23 02:03:53 -07001002 */
Minchan Kimc6c919e2016-07-26 15:23:02 -07001003 if (rc == MIGRATEPAGE_SUCCESS) {
1004 put_page(page);
1005 if (reason == MR_MEMORY_FAILURE) {
1006 /*
1007 * Set PG_HWPoison on just freed page
1008 * intentionally. Although it's rather weird,
1009 * it's how HWPoison flag works at the moment.
1010 */
1011 if (!test_set_page_hwpoison(page))
1012 num_poisoned_pages_inc();
1013 }
1014 } else {
1015 if (rc != -EAGAIN)
1016 putback_lru_page(page);
1017 if (put_new_page)
1018 put_new_page(newpage, private);
1019 else
1020 put_page(newpage);
1021 }
David Rientjes68711a72014-06-04 16:08:25 -07001022
Christoph Lameter742755a2006-06-23 02:03:55 -07001023 if (result) {
1024 if (rc)
1025 *result = rc;
1026 else
1027 *result = page_to_nid(newpage);
1028 }
Christoph Lametere24f0b82006-06-23 02:03:51 -07001029 return rc;
1030}
1031
1032/*
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001033 * Counterpart of unmap_and_move_page() for hugepage migration.
1034 *
1035 * This function doesn't wait the completion of hugepage I/O
1036 * because there is no race between I/O and migration for hugepage.
1037 * Note that currently hugepage I/O occurs only in direct I/O
1038 * where no lock is held and PG_writeback is irrelevant,
1039 * and writeback status of all subpages are counted in the reference
1040 * count of the head page (i.e. if all subpages of a 2MB hugepage are
1041 * under direct I/O, the reference of the head page is 512 and a bit more.)
1042 * This means that when we try to migrate hugepage whose subpages are
1043 * doing direct I/O, some references remain after try_to_unmap() and
1044 * hugepage migration fails without data corruption.
1045 *
1046 * There is also no race when direct I/O is issued on the page under migration,
1047 * because then pte is replaced with migration swap entry and direct I/O code
1048 * will wait in the page fault for migration to complete.
1049 */
1050static int unmap_and_move_huge_page(new_page_t get_new_page,
David Rientjes68711a72014-06-04 16:08:25 -07001051 free_page_t put_new_page, unsigned long private,
1052 struct page *hpage, int force,
Vlastimil Babka7cd12b42016-03-15 14:56:18 -07001053 enum migrate_mode mode, int reason)
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001054{
Hugh Dickins2def7422015-11-05 18:49:46 -08001055 int rc = -EAGAIN;
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001056 int *result = NULL;
Hugh Dickins2ebba6b2014-12-12 16:56:19 -08001057 int page_was_mapped = 0;
Joonsoo Kim32665f22014-01-21 15:51:15 -08001058 struct page *new_hpage;
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001059 struct anon_vma *anon_vma = NULL;
1060
Naoya Horiguchi83467ef2013-09-11 14:22:11 -07001061 /*
1062 * Movability of hugepages depends on architectures and hugepage size.
1063 * This check is necessary because some callers of hugepage migration
1064 * like soft offline and memory hotremove don't walk through page
1065 * tables or check whether the hugepage is pmd-based or not before
1066 * kicking migration.
1067 */
Naoya Horiguchi100873d2014-06-04 16:10:56 -07001068 if (!hugepage_migration_supported(page_hstate(hpage))) {
Joonsoo Kim32665f22014-01-21 15:51:15 -08001069 putback_active_hugepage(hpage);
Naoya Horiguchi83467ef2013-09-11 14:22:11 -07001070 return -ENOSYS;
Joonsoo Kim32665f22014-01-21 15:51:15 -08001071 }
Naoya Horiguchi83467ef2013-09-11 14:22:11 -07001072
Joonsoo Kim32665f22014-01-21 15:51:15 -08001073 new_hpage = get_new_page(hpage, private, &result);
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001074 if (!new_hpage)
1075 return -ENOMEM;
1076
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001077 if (!trylock_page(hpage)) {
Mel Gormana6bc32b2012-01-12 17:19:43 -08001078 if (!force || mode != MIGRATE_SYNC)
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001079 goto out;
1080 lock_page(hpage);
1081 }
1082
Peter Zijlstra746b18d2011-05-24 17:12:10 -07001083 if (PageAnon(hpage))
1084 anon_vma = page_get_anon_vma(hpage);
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001085
Hugh Dickins7db76712015-11-05 18:49:49 -08001086 if (unlikely(!trylock_page(new_hpage)))
1087 goto put_anon;
1088
Hugh Dickins2ebba6b2014-12-12 16:56:19 -08001089 if (page_mapped(hpage)) {
1090 try_to_unmap(hpage,
1091 TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
1092 page_was_mapped = 1;
1093 }
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001094
1095 if (!page_mapped(hpage))
Hugh Dickins5c3f9a62015-11-05 18:49:53 -08001096 rc = move_to_new_page(new_hpage, hpage, mode);
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001097
Hugh Dickins5c3f9a62015-11-05 18:49:53 -08001098 if (page_was_mapped)
1099 remove_migration_ptes(hpage,
Kirill A. Shutemove3884662016-03-17 14:20:07 -07001100 rc == MIGRATEPAGE_SUCCESS ? new_hpage : hpage, false);
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001101
Hugh Dickins7db76712015-11-05 18:49:49 -08001102 unlock_page(new_hpage);
1103
1104put_anon:
Hugh Dickinsfd4a4662011-01-13 15:47:31 -08001105 if (anon_vma)
Peter Zijlstra9e601092011-03-22 16:32:46 -07001106 put_anon_vma(anon_vma);
Aneesh Kumar K.V8e6ac7f2012-07-31 16:42:27 -07001107
Hugh Dickins2def7422015-11-05 18:49:46 -08001108 if (rc == MIGRATEPAGE_SUCCESS) {
Aneesh Kumar K.V8e6ac7f2012-07-31 16:42:27 -07001109 hugetlb_cgroup_migrate(hpage, new_hpage);
Hugh Dickins2def7422015-11-05 18:49:46 -08001110 put_new_page = NULL;
Vlastimil Babka7cd12b42016-03-15 14:56:18 -07001111 set_page_owner_migrate_reason(new_hpage, reason);
Hugh Dickins2def7422015-11-05 18:49:46 -08001112 }
Aneesh Kumar K.V8e6ac7f2012-07-31 16:42:27 -07001113
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001114 unlock_page(hpage);
Hillf Danton09761332011-12-08 14:34:20 -08001115out:
Naoya Horiguchib8ec1ce2013-09-11 14:22:01 -07001116 if (rc != -EAGAIN)
1117 putback_active_hugepage(hpage);
David Rientjes68711a72014-06-04 16:08:25 -07001118
1119 /*
1120 * If migration was not successful and there's a freeing callback, use
1121 * it. Otherwise, put_page() will drop the reference grabbed during
1122 * isolation.
1123 */
Hugh Dickins2def7422015-11-05 18:49:46 -08001124 if (put_new_page)
David Rientjes68711a72014-06-04 16:08:25 -07001125 put_new_page(new_hpage, private);
1126 else
Naoya Horiguchi3aaa76e2015-09-22 14:59:14 -07001127 putback_active_hugepage(new_hpage);
David Rientjes68711a72014-06-04 16:08:25 -07001128
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001129 if (result) {
1130 if (rc)
1131 *result = rc;
1132 else
1133 *result = page_to_nid(new_hpage);
1134 }
1135 return rc;
1136}
1137
1138/*
Srivatsa S. Bhatc73e5c92013-04-29 15:08:16 -07001139 * migrate_pages - migrate the pages specified in a list, to the free pages
1140 * supplied as the target for the page migration
Christoph Lameterb20a3502006-03-22 00:09:12 -08001141 *
Srivatsa S. Bhatc73e5c92013-04-29 15:08:16 -07001142 * @from: The list of pages to be migrated.
1143 * @get_new_page: The function used to allocate free pages to be used
1144 * as the target of the page migration.
David Rientjes68711a72014-06-04 16:08:25 -07001145 * @put_new_page: The function used to free target pages if migration
1146 * fails, or NULL if no special handling is necessary.
Srivatsa S. Bhatc73e5c92013-04-29 15:08:16 -07001147 * @private: Private data to be passed on to get_new_page()
1148 * @mode: The migration mode that specifies the constraints for
1149 * page migration, if any.
1150 * @reason: The reason for page migration.
Christoph Lameterb20a3502006-03-22 00:09:12 -08001151 *
Srivatsa S. Bhatc73e5c92013-04-29 15:08:16 -07001152 * The function returns after 10 attempts or if no pages are movable any more
1153 * because the list has become empty or no retryable pages exist any more.
Hugh Dickins14e0f9b2015-11-05 18:49:43 -08001154 * The caller should call putback_movable_pages() to return pages to the LRU
Minchan Kim28bd6572011-01-25 15:07:26 -08001155 * or free list only if ret != 0.
Christoph Lameterb20a3502006-03-22 00:09:12 -08001156 *
Srivatsa S. Bhatc73e5c92013-04-29 15:08:16 -07001157 * Returns the number of pages that were not migrated, or an error code.
Christoph Lameterb20a3502006-03-22 00:09:12 -08001158 */
Hugh Dickins9c620e22013-02-22 16:35:14 -08001159int migrate_pages(struct list_head *from, new_page_t get_new_page,
David Rientjes68711a72014-06-04 16:08:25 -07001160 free_page_t put_new_page, unsigned long private,
1161 enum migrate_mode mode, int reason)
Christoph Lameterb20a3502006-03-22 00:09:12 -08001162{
Christoph Lametere24f0b82006-06-23 02:03:51 -07001163 int retry = 1;
Christoph Lameterb20a3502006-03-22 00:09:12 -08001164 int nr_failed = 0;
Mel Gorman5647bc22012-10-19 10:46:20 +01001165 int nr_succeeded = 0;
Christoph Lameterb20a3502006-03-22 00:09:12 -08001166 int pass = 0;
1167 struct page *page;
1168 struct page *page2;
1169 int swapwrite = current->flags & PF_SWAPWRITE;
1170 int rc;
1171
1172 if (!swapwrite)
1173 current->flags |= PF_SWAPWRITE;
1174
Christoph Lametere24f0b82006-06-23 02:03:51 -07001175 for(pass = 0; pass < 10 && retry; pass++) {
1176 retry = 0;
Christoph Lameterb20a3502006-03-22 00:09:12 -08001177
Christoph Lametere24f0b82006-06-23 02:03:51 -07001178 list_for_each_entry_safe(page, page2, from, lru) {
Christoph Lametere24f0b82006-06-23 02:03:51 -07001179 cond_resched();
Christoph Lameterb20a3502006-03-22 00:09:12 -08001180
Naoya Horiguchi31caf662013-09-11 14:21:59 -07001181 if (PageHuge(page))
1182 rc = unmap_and_move_huge_page(get_new_page,
David Rientjes68711a72014-06-04 16:08:25 -07001183 put_new_page, private, page,
Vlastimil Babka7cd12b42016-03-15 14:56:18 -07001184 pass > 2, mode, reason);
Naoya Horiguchi31caf662013-09-11 14:21:59 -07001185 else
David Rientjes68711a72014-06-04 16:08:25 -07001186 rc = unmap_and_move(get_new_page, put_new_page,
Naoya Horiguchiadd05ce2015-06-24 16:56:50 -07001187 private, page, pass > 2, mode,
1188 reason);
Christoph Lameterb20a3502006-03-22 00:09:12 -08001189
Christoph Lametere24f0b82006-06-23 02:03:51 -07001190 switch(rc) {
Christoph Lameter95a402c2006-06-23 02:03:53 -07001191 case -ENOMEM:
David Rientjesdfef2ef2016-05-20 16:59:05 -07001192 nr_failed++;
Christoph Lameter95a402c2006-06-23 02:03:53 -07001193 goto out;
Christoph Lametere24f0b82006-06-23 02:03:51 -07001194 case -EAGAIN:
Christoph Lameter2d1db3b2006-06-23 02:03:33 -07001195 retry++;
Christoph Lametere24f0b82006-06-23 02:03:51 -07001196 break;
Rafael Aquini78bd5202012-12-11 16:02:31 -08001197 case MIGRATEPAGE_SUCCESS:
Mel Gorman5647bc22012-10-19 10:46:20 +01001198 nr_succeeded++;
Christoph Lametere24f0b82006-06-23 02:03:51 -07001199 break;
1200 default:
Naoya Horiguchi354a3362014-01-21 15:51:14 -08001201 /*
1202 * Permanent failure (-EBUSY, -ENOSYS, etc.):
1203 * unlike -EAGAIN case, the failed page is
1204 * removed from migration page list and not
1205 * retried in the next outer loop.
1206 */
Christoph Lameter2d1db3b2006-06-23 02:03:33 -07001207 nr_failed++;
Christoph Lametere24f0b82006-06-23 02:03:51 -07001208 break;
Christoph Lameter2d1db3b2006-06-23 02:03:33 -07001209 }
Christoph Lameterb20a3502006-03-22 00:09:12 -08001210 }
1211 }
Vlastimil Babkaf2f81fb2015-11-05 18:47:03 -08001212 nr_failed += retry;
1213 rc = nr_failed;
Christoph Lameter95a402c2006-06-23 02:03:53 -07001214out:
Mel Gorman5647bc22012-10-19 10:46:20 +01001215 if (nr_succeeded)
1216 count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded);
1217 if (nr_failed)
1218 count_vm_events(PGMIGRATE_FAIL, nr_failed);
Mel Gorman7b2a2d42012-10-19 14:07:31 +01001219 trace_mm_migrate_pages(nr_succeeded, nr_failed, mode, reason);
1220
Christoph Lameterb20a3502006-03-22 00:09:12 -08001221 if (!swapwrite)
1222 current->flags &= ~PF_SWAPWRITE;
1223
Rafael Aquini78bd5202012-12-11 16:02:31 -08001224 return rc;
Christoph Lameterb20a3502006-03-22 00:09:12 -08001225}
1226
Christoph Lameter742755a2006-06-23 02:03:55 -07001227#ifdef CONFIG_NUMA
1228/*
1229 * Move a list of individual pages
1230 */
1231struct page_to_node {
1232 unsigned long addr;
1233 struct page *page;
1234 int node;
1235 int status;
1236};
1237
1238static struct page *new_page_node(struct page *p, unsigned long private,
1239 int **result)
1240{
1241 struct page_to_node *pm = (struct page_to_node *)private;
1242
1243 while (pm->node != MAX_NUMNODES && pm->page != p)
1244 pm++;
1245
1246 if (pm->node == MAX_NUMNODES)
1247 return NULL;
1248
1249 *result = &pm->status;
1250
Naoya Horiguchie632a932013-09-11 14:22:04 -07001251 if (PageHuge(p))
1252 return alloc_huge_page_node(page_hstate(compound_head(p)),
1253 pm->node);
1254 else
Vlastimil Babka96db8002015-09-08 15:03:50 -07001255 return __alloc_pages_node(pm->node,
Johannes Weinere97ca8e52014-03-10 15:49:43 -07001256 GFP_HIGHUSER_MOVABLE | __GFP_THISNODE, 0);
Christoph Lameter742755a2006-06-23 02:03:55 -07001257}
1258
1259/*
1260 * Move a set of pages as indicated in the pm array. The addr
1261 * field must be set to the virtual address of the page to be moved
1262 * and the node number must contain a valid target node.
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001263 * The pm array ends with node = MAX_NUMNODES.
Christoph Lameter742755a2006-06-23 02:03:55 -07001264 */
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001265static int do_move_page_to_node_array(struct mm_struct *mm,
1266 struct page_to_node *pm,
1267 int migrate_all)
Christoph Lameter742755a2006-06-23 02:03:55 -07001268{
1269 int err;
1270 struct page_to_node *pp;
1271 LIST_HEAD(pagelist);
1272
1273 down_read(&mm->mmap_sem);
1274
1275 /*
1276 * Build a list of pages to migrate
1277 */
Christoph Lameter742755a2006-06-23 02:03:55 -07001278 for (pp = pm; pp->node != MAX_NUMNODES; pp++) {
1279 struct vm_area_struct *vma;
1280 struct page *page;
1281
Christoph Lameter742755a2006-06-23 02:03:55 -07001282 err = -EFAULT;
1283 vma = find_vma(mm, pp->addr);
Gleb Natapov70384dc2010-10-26 14:22:07 -07001284 if (!vma || pp->addr < vma->vm_start || !vma_migratable(vma))
Christoph Lameter742755a2006-06-23 02:03:55 -07001285 goto set_status;
1286
Kirill A. Shutemovd8998442015-09-04 15:47:53 -07001287 /* FOLL_DUMP to ignore special (like zero) pages */
1288 page = follow_page(vma, pp->addr,
1289 FOLL_GET | FOLL_SPLIT | FOLL_DUMP);
Linus Torvalds89f5b7d2008-06-20 11:18:25 -07001290
1291 err = PTR_ERR(page);
1292 if (IS_ERR(page))
1293 goto set_status;
1294
Christoph Lameter742755a2006-06-23 02:03:55 -07001295 err = -ENOENT;
1296 if (!page)
1297 goto set_status;
1298
Christoph Lameter742755a2006-06-23 02:03:55 -07001299 pp->page = page;
1300 err = page_to_nid(page);
1301
1302 if (err == pp->node)
1303 /*
1304 * Node already in the right place
1305 */
1306 goto put_and_set;
1307
1308 err = -EACCES;
1309 if (page_mapcount(page) > 1 &&
1310 !migrate_all)
1311 goto put_and_set;
1312
Naoya Horiguchie632a932013-09-11 14:22:04 -07001313 if (PageHuge(page)) {
Naoya Horiguchie66f17f2015-02-11 15:25:22 -08001314 if (PageHead(page))
1315 isolate_huge_page(page, &pagelist);
Naoya Horiguchie632a932013-09-11 14:22:04 -07001316 goto put_and_set;
1317 }
1318
Nick Piggin62695a82008-10-18 20:26:09 -07001319 err = isolate_lru_page(page);
KOSAKI Motohiro6d9c2852009-12-14 17:58:11 -08001320 if (!err) {
Nick Piggin62695a82008-10-18 20:26:09 -07001321 list_add_tail(&page->lru, &pagelist);
KOSAKI Motohiro6d9c2852009-12-14 17:58:11 -08001322 inc_zone_page_state(page, NR_ISOLATED_ANON +
1323 page_is_file_cache(page));
1324 }
Christoph Lameter742755a2006-06-23 02:03:55 -07001325put_and_set:
1326 /*
1327 * Either remove the duplicate refcount from
1328 * isolate_lru_page() or drop the page ref if it was
1329 * not isolated.
1330 */
1331 put_page(page);
1332set_status:
1333 pp->status = err;
1334 }
1335
Brice Gogline78bbfa2008-10-18 20:27:15 -07001336 err = 0;
Minchan Kimcf608ac2010-10-26 14:21:29 -07001337 if (!list_empty(&pagelist)) {
David Rientjes68711a72014-06-04 16:08:25 -07001338 err = migrate_pages(&pagelist, new_page_node, NULL,
Hugh Dickins9c620e22013-02-22 16:35:14 -08001339 (unsigned long)pm, MIGRATE_SYNC, MR_SYSCALL);
Minchan Kimcf608ac2010-10-26 14:21:29 -07001340 if (err)
Naoya Horiguchie632a932013-09-11 14:22:04 -07001341 putback_movable_pages(&pagelist);
Minchan Kimcf608ac2010-10-26 14:21:29 -07001342 }
Christoph Lameter742755a2006-06-23 02:03:55 -07001343
1344 up_read(&mm->mmap_sem);
1345 return err;
1346}
1347
1348/*
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001349 * Migrate an array of page address onto an array of nodes and fill
1350 * the corresponding array of status.
1351 */
Christoph Lameter3268c632012-03-21 16:34:06 -07001352static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001353 unsigned long nr_pages,
1354 const void __user * __user *pages,
1355 const int __user *nodes,
1356 int __user *status, int flags)
1357{
Brice Goglin3140a222009-01-06 14:38:57 -08001358 struct page_to_node *pm;
Brice Goglin3140a222009-01-06 14:38:57 -08001359 unsigned long chunk_nr_pages;
1360 unsigned long chunk_start;
1361 int err;
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001362
Brice Goglin3140a222009-01-06 14:38:57 -08001363 err = -ENOMEM;
1364 pm = (struct page_to_node *)__get_free_page(GFP_KERNEL);
1365 if (!pm)
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001366 goto out;
Brice Goglin35282a22009-06-16 15:32:43 -07001367
1368 migrate_prep();
1369
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001370 /*
Brice Goglin3140a222009-01-06 14:38:57 -08001371 * Store a chunk of page_to_node array in a page,
1372 * but keep the last one as a marker
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001373 */
Brice Goglin3140a222009-01-06 14:38:57 -08001374 chunk_nr_pages = (PAGE_SIZE / sizeof(struct page_to_node)) - 1;
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001375
Brice Goglin3140a222009-01-06 14:38:57 -08001376 for (chunk_start = 0;
1377 chunk_start < nr_pages;
1378 chunk_start += chunk_nr_pages) {
1379 int j;
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001380
Brice Goglin3140a222009-01-06 14:38:57 -08001381 if (chunk_start + chunk_nr_pages > nr_pages)
1382 chunk_nr_pages = nr_pages - chunk_start;
1383
1384 /* fill the chunk pm with addrs and nodes from user-space */
1385 for (j = 0; j < chunk_nr_pages; j++) {
1386 const void __user *p;
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001387 int node;
1388
Brice Goglin3140a222009-01-06 14:38:57 -08001389 err = -EFAULT;
1390 if (get_user(p, pages + j + chunk_start))
1391 goto out_pm;
1392 pm[j].addr = (unsigned long) p;
1393
1394 if (get_user(node, nodes + j + chunk_start))
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001395 goto out_pm;
1396
1397 err = -ENODEV;
Linus Torvalds6f5a55f2010-02-05 16:16:50 -08001398 if (node < 0 || node >= MAX_NUMNODES)
1399 goto out_pm;
1400
Lai Jiangshan389162c2012-12-12 13:51:30 -08001401 if (!node_state(node, N_MEMORY))
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001402 goto out_pm;
1403
1404 err = -EACCES;
1405 if (!node_isset(node, task_nodes))
1406 goto out_pm;
1407
Brice Goglin3140a222009-01-06 14:38:57 -08001408 pm[j].node = node;
1409 }
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001410
Brice Goglin3140a222009-01-06 14:38:57 -08001411 /* End marker for this chunk */
1412 pm[chunk_nr_pages].node = MAX_NUMNODES;
1413
1414 /* Migrate this chunk */
1415 err = do_move_page_to_node_array(mm, pm,
1416 flags & MPOL_MF_MOVE_ALL);
1417 if (err < 0)
1418 goto out_pm;
1419
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001420 /* Return status information */
Brice Goglin3140a222009-01-06 14:38:57 -08001421 for (j = 0; j < chunk_nr_pages; j++)
1422 if (put_user(pm[j].status, status + j + chunk_start)) {
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001423 err = -EFAULT;
Brice Goglin3140a222009-01-06 14:38:57 -08001424 goto out_pm;
1425 }
1426 }
1427 err = 0;
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001428
1429out_pm:
Brice Goglin3140a222009-01-06 14:38:57 -08001430 free_page((unsigned long)pm);
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001431out:
1432 return err;
1433}
1434
1435/*
Brice Goglin2f007e72008-10-18 20:27:16 -07001436 * Determine the nodes of an array of pages and store it in an array of status.
Christoph Lameter742755a2006-06-23 02:03:55 -07001437 */
Brice Goglin80bba122008-12-09 13:14:23 -08001438static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
1439 const void __user **pages, int *status)
Christoph Lameter742755a2006-06-23 02:03:55 -07001440{
Brice Goglin2f007e72008-10-18 20:27:16 -07001441 unsigned long i;
Brice Goglin2f007e72008-10-18 20:27:16 -07001442
Christoph Lameter742755a2006-06-23 02:03:55 -07001443 down_read(&mm->mmap_sem);
1444
Brice Goglin2f007e72008-10-18 20:27:16 -07001445 for (i = 0; i < nr_pages; i++) {
Brice Goglin80bba122008-12-09 13:14:23 -08001446 unsigned long addr = (unsigned long)(*pages);
Christoph Lameter742755a2006-06-23 02:03:55 -07001447 struct vm_area_struct *vma;
1448 struct page *page;
KOSAKI Motohiroc095adb2008-12-16 16:06:43 +09001449 int err = -EFAULT;
Brice Goglin2f007e72008-10-18 20:27:16 -07001450
1451 vma = find_vma(mm, addr);
Gleb Natapov70384dc2010-10-26 14:22:07 -07001452 if (!vma || addr < vma->vm_start)
Christoph Lameter742755a2006-06-23 02:03:55 -07001453 goto set_status;
1454
Kirill A. Shutemovd8998442015-09-04 15:47:53 -07001455 /* FOLL_DUMP to ignore special (like zero) pages */
1456 page = follow_page(vma, addr, FOLL_DUMP);
Linus Torvalds89f5b7d2008-06-20 11:18:25 -07001457
1458 err = PTR_ERR(page);
1459 if (IS_ERR(page))
1460 goto set_status;
1461
Kirill A. Shutemovd8998442015-09-04 15:47:53 -07001462 err = page ? page_to_nid(page) : -ENOENT;
Christoph Lameter742755a2006-06-23 02:03:55 -07001463set_status:
Brice Goglin80bba122008-12-09 13:14:23 -08001464 *status = err;
1465
1466 pages++;
1467 status++;
1468 }
1469
1470 up_read(&mm->mmap_sem);
1471}
1472
1473/*
1474 * Determine the nodes of a user array of pages and store it in
1475 * a user array of status.
1476 */
1477static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
1478 const void __user * __user *pages,
1479 int __user *status)
1480{
1481#define DO_PAGES_STAT_CHUNK_NR 16
1482 const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
1483 int chunk_status[DO_PAGES_STAT_CHUNK_NR];
Brice Goglin80bba122008-12-09 13:14:23 -08001484
H. Peter Anvin87b8d1a2010-02-18 16:13:40 -08001485 while (nr_pages) {
1486 unsigned long chunk_nr;
Brice Goglin80bba122008-12-09 13:14:23 -08001487
H. Peter Anvin87b8d1a2010-02-18 16:13:40 -08001488 chunk_nr = nr_pages;
1489 if (chunk_nr > DO_PAGES_STAT_CHUNK_NR)
1490 chunk_nr = DO_PAGES_STAT_CHUNK_NR;
1491
1492 if (copy_from_user(chunk_pages, pages, chunk_nr * sizeof(*chunk_pages)))
1493 break;
Brice Goglin80bba122008-12-09 13:14:23 -08001494
1495 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
1496
H. Peter Anvin87b8d1a2010-02-18 16:13:40 -08001497 if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
1498 break;
Christoph Lameter742755a2006-06-23 02:03:55 -07001499
H. Peter Anvin87b8d1a2010-02-18 16:13:40 -08001500 pages += chunk_nr;
1501 status += chunk_nr;
1502 nr_pages -= chunk_nr;
1503 }
1504 return nr_pages ? -EFAULT : 0;
Christoph Lameter742755a2006-06-23 02:03:55 -07001505}
1506
1507/*
1508 * Move a list of pages in the address space of the currently executing
1509 * process.
1510 */
Heiko Carstens938bb9f2009-01-14 14:14:30 +01001511SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
1512 const void __user * __user *, pages,
1513 const int __user *, nodes,
1514 int __user *, status, int, flags)
Christoph Lameter742755a2006-06-23 02:03:55 -07001515{
David Howellsc69e8d92008-11-14 10:39:19 +11001516 const struct cred *cred = current_cred(), *tcred;
Christoph Lameter742755a2006-06-23 02:03:55 -07001517 struct task_struct *task;
Christoph Lameter742755a2006-06-23 02:03:55 -07001518 struct mm_struct *mm;
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001519 int err;
Christoph Lameter3268c632012-03-21 16:34:06 -07001520 nodemask_t task_nodes;
Christoph Lameter742755a2006-06-23 02:03:55 -07001521
1522 /* Check flags */
1523 if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
1524 return -EINVAL;
1525
1526 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1527 return -EPERM;
1528
1529 /* Find the mm_struct */
Greg Thelena879bf52011-02-25 14:44:13 -08001530 rcu_read_lock();
Pavel Emelyanov228ebcb2007-10-18 23:40:16 -07001531 task = pid ? find_task_by_vpid(pid) : current;
Christoph Lameter742755a2006-06-23 02:03:55 -07001532 if (!task) {
Greg Thelena879bf52011-02-25 14:44:13 -08001533 rcu_read_unlock();
Christoph Lameter742755a2006-06-23 02:03:55 -07001534 return -ESRCH;
1535 }
Christoph Lameter3268c632012-03-21 16:34:06 -07001536 get_task_struct(task);
Christoph Lameter742755a2006-06-23 02:03:55 -07001537
1538 /*
1539 * Check if this process has the right to modify the specified
1540 * process. The right exists if the process has administrative
1541 * capabilities, superuser privileges or the same
1542 * userid as the target process.
1543 */
David Howellsc69e8d92008-11-14 10:39:19 +11001544 tcred = __task_cred(task);
Eric W. Biedermanb38a86e2012-03-12 15:48:24 -07001545 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
1546 !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
Christoph Lameter742755a2006-06-23 02:03:55 -07001547 !capable(CAP_SYS_NICE)) {
David Howellsc69e8d92008-11-14 10:39:19 +11001548 rcu_read_unlock();
Christoph Lameter742755a2006-06-23 02:03:55 -07001549 err = -EPERM;
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001550 goto out;
Christoph Lameter742755a2006-06-23 02:03:55 -07001551 }
David Howellsc69e8d92008-11-14 10:39:19 +11001552 rcu_read_unlock();
Christoph Lameter742755a2006-06-23 02:03:55 -07001553
David Quigley86c3a762006-06-23 02:04:02 -07001554 err = security_task_movememory(task);
1555 if (err)
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001556 goto out;
David Quigley86c3a762006-06-23 02:04:02 -07001557
Christoph Lameter3268c632012-03-21 16:34:06 -07001558 task_nodes = cpuset_mems_allowed(task);
1559 mm = get_task_mm(task);
1560 put_task_struct(task);
1561
Sasha Levin6e8b09e2012-04-25 16:01:53 -07001562 if (!mm)
1563 return -EINVAL;
1564
1565 if (nodes)
1566 err = do_pages_move(mm, task_nodes, nr_pages, pages,
1567 nodes, status, flags);
1568 else
1569 err = do_pages_stat(mm, nr_pages, pages, status);
Christoph Lameter3268c632012-03-21 16:34:06 -07001570
1571 mmput(mm);
1572 return err;
David Quigley86c3a762006-06-23 02:04:02 -07001573
Christoph Lameter742755a2006-06-23 02:03:55 -07001574out:
Christoph Lameter3268c632012-03-21 16:34:06 -07001575 put_task_struct(task);
Christoph Lameter742755a2006-06-23 02:03:55 -07001576 return err;
1577}
Christoph Lameter742755a2006-06-23 02:03:55 -07001578
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02001579#ifdef CONFIG_NUMA_BALANCING
1580/*
1581 * Returns true if this is a safe migration target node for misplaced NUMA
1582 * pages. Currently it only checks the watermarks which crude
1583 */
1584static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
Mel Gorman3abef4e2013-02-22 16:34:27 -08001585 unsigned long nr_migrate_pages)
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02001586{
1587 int z;
1588 for (z = pgdat->nr_zones - 1; z >= 0; z--) {
1589 struct zone *zone = pgdat->node_zones + z;
1590
1591 if (!populated_zone(zone))
1592 continue;
1593
Lisa Du6e543d52013-09-11 14:22:36 -07001594 if (!zone_reclaimable(zone))
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02001595 continue;
1596
1597 /* Avoid waking kswapd by allocating pages_to_migrate pages. */
1598 if (!zone_watermark_ok(zone, 0,
1599 high_wmark_pages(zone) +
1600 nr_migrate_pages,
1601 0, 0))
1602 continue;
1603 return true;
1604 }
1605 return false;
1606}
1607
1608static struct page *alloc_misplaced_dst_page(struct page *page,
1609 unsigned long data,
1610 int **result)
1611{
1612 int nid = (int) data;
1613 struct page *newpage;
1614
Vlastimil Babka96db8002015-09-08 15:03:50 -07001615 newpage = __alloc_pages_node(nid,
Johannes Weinere97ca8e52014-03-10 15:49:43 -07001616 (GFP_HIGHUSER_MOVABLE |
1617 __GFP_THISNODE | __GFP_NOMEMALLOC |
1618 __GFP_NORETRY | __GFP_NOWARN) &
Mel Gorman8479eba2016-02-26 15:19:31 -08001619 ~__GFP_RECLAIM, 0);
Hillf Dantonbac03822012-11-27 14:46:24 +00001620
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02001621 return newpage;
1622}
1623
1624/*
Mel Gormana8f60772012-11-14 21:41:46 +00001625 * page migration rate limiting control.
1626 * Do not migrate more than @pages_to_migrate in a @migrate_interval_millisecs
1627 * window of time. Default here says do not migrate more than 1280M per second.
1628 */
1629static unsigned int migrate_interval_millisecs __read_mostly = 100;
1630static unsigned int ratelimit_pages __read_mostly = 128 << (20 - PAGE_SHIFT);
1631
Mel Gormanb32967f2012-11-19 12:35:47 +00001632/* Returns true if the node is migrate rate-limited after the update */
Mel Gorman1c30e012014-01-21 15:50:58 -08001633static bool numamigrate_update_ratelimit(pg_data_t *pgdat,
1634 unsigned long nr_pages)
Mel Gormanb32967f2012-11-19 12:35:47 +00001635{
Mel Gormanb32967f2012-11-19 12:35:47 +00001636 /*
1637 * Rate-limit the amount of data that is being migrated to a node.
1638 * Optimal placement is no good if the memory bus is saturated and
1639 * all the time is being spent migrating!
1640 */
Mel Gormanb32967f2012-11-19 12:35:47 +00001641 if (time_after(jiffies, pgdat->numabalancing_migrate_next_window)) {
Mel Gorman1c5e9c22014-01-21 15:50:59 -08001642 spin_lock(&pgdat->numabalancing_migrate_lock);
Mel Gormanb32967f2012-11-19 12:35:47 +00001643 pgdat->numabalancing_migrate_nr_pages = 0;
1644 pgdat->numabalancing_migrate_next_window = jiffies +
1645 msecs_to_jiffies(migrate_interval_millisecs);
Mel Gorman1c5e9c22014-01-21 15:50:59 -08001646 spin_unlock(&pgdat->numabalancing_migrate_lock);
Mel Gormanb32967f2012-11-19 12:35:47 +00001647 }
Mel Gormanaf1839d2014-01-21 15:51:01 -08001648 if (pgdat->numabalancing_migrate_nr_pages > ratelimit_pages) {
1649 trace_mm_numa_migrate_ratelimit(current, pgdat->node_id,
1650 nr_pages);
Mel Gorman1c5e9c22014-01-21 15:50:59 -08001651 return true;
Mel Gormanaf1839d2014-01-21 15:51:01 -08001652 }
Mel Gorman1c5e9c22014-01-21 15:50:59 -08001653
1654 /*
1655 * This is an unlocked non-atomic update so errors are possible.
1656 * The consequences are failing to migrate when we potentiall should
1657 * have which is not severe enough to warrant locking. If it is ever
1658 * a problem, it can be converted to a per-cpu counter.
1659 */
1660 pgdat->numabalancing_migrate_nr_pages += nr_pages;
1661 return false;
Mel Gormanb32967f2012-11-19 12:35:47 +00001662}
1663
Mel Gorman1c30e012014-01-21 15:50:58 -08001664static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
Mel Gormanb32967f2012-11-19 12:35:47 +00001665{
Hugh Dickins340ef392013-02-22 16:34:33 -08001666 int page_lru;
Mel Gormanb32967f2012-11-19 12:35:47 +00001667
Sasha Levin309381fea2014-01-23 15:52:54 -08001668 VM_BUG_ON_PAGE(compound_order(page) && !PageTransHuge(page), page);
Mel Gorman3abef4e2013-02-22 16:34:27 -08001669
Mel Gormanb32967f2012-11-19 12:35:47 +00001670 /* Avoid migrating to a node that is nearly full */
Hugh Dickins340ef392013-02-22 16:34:33 -08001671 if (!migrate_balanced_pgdat(pgdat, 1UL << compound_order(page)))
1672 return 0;
Mel Gormanb32967f2012-11-19 12:35:47 +00001673
Hugh Dickins340ef392013-02-22 16:34:33 -08001674 if (isolate_lru_page(page))
1675 return 0;
Mel Gormanb32967f2012-11-19 12:35:47 +00001676
1677 /*
Hugh Dickins340ef392013-02-22 16:34:33 -08001678 * migrate_misplaced_transhuge_page() skips page migration's usual
1679 * check on page_count(), so we must do it here, now that the page
1680 * has been isolated: a GUP pin, or any other pin, prevents migration.
1681 * The expected page count is 3: 1 for page's mapcount and 1 for the
1682 * caller's pin and 1 for the reference taken by isolate_lru_page().
1683 */
1684 if (PageTransHuge(page) && page_count(page) != 3) {
1685 putback_lru_page(page);
1686 return 0;
1687 }
1688
1689 page_lru = page_is_file_cache(page);
1690 mod_zone_page_state(page_zone(page), NR_ISOLATED_ANON + page_lru,
1691 hpage_nr_pages(page));
1692
1693 /*
1694 * Isolating the page has taken another reference, so the
1695 * caller's reference can be safely dropped without the page
1696 * disappearing underneath us during migration.
Mel Gormanb32967f2012-11-19 12:35:47 +00001697 */
1698 put_page(page);
Hugh Dickins340ef392013-02-22 16:34:33 -08001699 return 1;
Mel Gormanb32967f2012-11-19 12:35:47 +00001700}
1701
Mel Gormande466bd2013-12-18 17:08:42 -08001702bool pmd_trans_migrating(pmd_t pmd)
1703{
1704 struct page *page = pmd_page(pmd);
1705 return PageLocked(page);
1706}
1707
Mel Gormana8f60772012-11-14 21:41:46 +00001708/*
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02001709 * Attempt to migrate a misplaced page to the specified destination
1710 * node. Caller is expected to have an elevated reference count on
1711 * the page that will be dropped by this function before returning.
1712 */
Mel Gorman1bc115d2013-10-07 11:29:05 +01001713int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
1714 int node)
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02001715{
Mel Gormana8f60772012-11-14 21:41:46 +00001716 pg_data_t *pgdat = NODE_DATA(node);
Hugh Dickins340ef392013-02-22 16:34:33 -08001717 int isolated;
Mel Gormanb32967f2012-11-19 12:35:47 +00001718 int nr_remaining;
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02001719 LIST_HEAD(migratepages);
1720
1721 /*
Mel Gorman1bc115d2013-10-07 11:29:05 +01001722 * Don't migrate file pages that are mapped in multiple processes
1723 * with execute permissions as they are probably shared libraries.
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02001724 */
Mel Gorman1bc115d2013-10-07 11:29:05 +01001725 if (page_mapcount(page) != 1 && page_is_file_cache(page) &&
1726 (vma->vm_flags & VM_EXEC))
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02001727 goto out;
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02001728
Mel Gormana8f60772012-11-14 21:41:46 +00001729 /*
1730 * Rate-limit the amount of data that is being migrated to a node.
1731 * Optimal placement is no good if the memory bus is saturated and
1732 * all the time is being spent migrating!
1733 */
Hugh Dickins340ef392013-02-22 16:34:33 -08001734 if (numamigrate_update_ratelimit(pgdat, 1))
Mel Gormana8f60772012-11-14 21:41:46 +00001735 goto out;
Mel Gormana8f60772012-11-14 21:41:46 +00001736
Mel Gormanb32967f2012-11-19 12:35:47 +00001737 isolated = numamigrate_isolate_page(pgdat, page);
1738 if (!isolated)
1739 goto out;
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02001740
Mel Gormanb32967f2012-11-19 12:35:47 +00001741 list_add(&page->lru, &migratepages);
Hugh Dickins9c620e22013-02-22 16:35:14 -08001742 nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page,
David Rientjes68711a72014-06-04 16:08:25 -07001743 NULL, node, MIGRATE_ASYNC,
1744 MR_NUMA_MISPLACED);
Mel Gormanb32967f2012-11-19 12:35:47 +00001745 if (nr_remaining) {
Joonsoo Kim59c82b72014-01-21 15:51:17 -08001746 if (!list_empty(&migratepages)) {
1747 list_del(&page->lru);
1748 dec_zone_page_state(page, NR_ISOLATED_ANON +
1749 page_is_file_cache(page));
1750 putback_lru_page(page);
1751 }
Mel Gormanb32967f2012-11-19 12:35:47 +00001752 isolated = 0;
1753 } else
1754 count_vm_numa_event(NUMA_PAGE_MIGRATE);
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02001755 BUG_ON(!list_empty(&migratepages));
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02001756 return isolated;
Hugh Dickins340ef392013-02-22 16:34:33 -08001757
1758out:
1759 put_page(page);
1760 return 0;
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02001761}
Mel Gorman220018d2012-12-05 09:32:56 +00001762#endif /* CONFIG_NUMA_BALANCING */
Mel Gormanb32967f2012-11-19 12:35:47 +00001763
Mel Gorman220018d2012-12-05 09:32:56 +00001764#if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
Hugh Dickins340ef392013-02-22 16:34:33 -08001765/*
1766 * Migrates a THP to a given target node. page must be locked and is unlocked
1767 * before returning.
1768 */
Mel Gormanb32967f2012-11-19 12:35:47 +00001769int migrate_misplaced_transhuge_page(struct mm_struct *mm,
1770 struct vm_area_struct *vma,
1771 pmd_t *pmd, pmd_t entry,
1772 unsigned long address,
1773 struct page *page, int node)
1774{
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08001775 spinlock_t *ptl;
Mel Gormanb32967f2012-11-19 12:35:47 +00001776 pg_data_t *pgdat = NODE_DATA(node);
1777 int isolated = 0;
1778 struct page *new_page = NULL;
Mel Gormanb32967f2012-11-19 12:35:47 +00001779 int page_lru = page_is_file_cache(page);
Mel Gormanf714f4f2013-12-18 17:08:33 -08001780 unsigned long mmun_start = address & HPAGE_PMD_MASK;
1781 unsigned long mmun_end = mmun_start + HPAGE_PMD_SIZE;
Mel Gorman2b4847e2013-12-18 17:08:32 -08001782 pmd_t orig_entry;
Mel Gormanb32967f2012-11-19 12:35:47 +00001783
1784 /*
Mel Gormanb32967f2012-11-19 12:35:47 +00001785 * Rate-limit the amount of data that is being migrated to a node.
1786 * Optimal placement is no good if the memory bus is saturated and
1787 * all the time is being spent migrating!
1788 */
Mel Gormand28d43352012-11-29 09:24:36 +00001789 if (numamigrate_update_ratelimit(pgdat, HPAGE_PMD_NR))
Mel Gormanb32967f2012-11-19 12:35:47 +00001790 goto out_dropref;
1791
1792 new_page = alloc_pages_node(node,
Mel Gorman71baba42015-11-06 16:28:28 -08001793 (GFP_TRANSHUGE | __GFP_THISNODE) & ~__GFP_RECLAIM,
Johannes Weinere97ca8e52014-03-10 15:49:43 -07001794 HPAGE_PMD_ORDER);
Hugh Dickins340ef392013-02-22 16:34:33 -08001795 if (!new_page)
1796 goto out_fail;
Kirill A. Shutemov9a982252016-01-15 16:54:17 -08001797 prep_transhuge_page(new_page);
Hugh Dickins340ef392013-02-22 16:34:33 -08001798
Mel Gormanb32967f2012-11-19 12:35:47 +00001799 isolated = numamigrate_isolate_page(pgdat, page);
Hugh Dickins340ef392013-02-22 16:34:33 -08001800 if (!isolated) {
Mel Gormanb32967f2012-11-19 12:35:47 +00001801 put_page(new_page);
Hugh Dickins340ef392013-02-22 16:34:33 -08001802 goto out_fail;
Mel Gormanb32967f2012-11-19 12:35:47 +00001803 }
Aneesh Kumar K.V458aa762016-03-17 14:18:56 -07001804 /*
1805 * We are not sure a pending tlb flush here is for a huge page
1806 * mapping or not. Hence use the tlb range variant
1807 */
Mel Gormanb0943d62013-12-18 17:08:46 -08001808 if (mm_tlb_flush_pending(mm))
1809 flush_tlb_range(vma, mmun_start, mmun_end);
1810
Mel Gormanb32967f2012-11-19 12:35:47 +00001811 /* Prepare a page as a migration target */
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08001812 __SetPageLocked(new_page);
Hugh Dickinsfa9949d2016-05-19 17:12:41 -07001813 __SetPageSwapBacked(new_page);
Mel Gormanb32967f2012-11-19 12:35:47 +00001814
1815 /* anon mapping, we can simply copy page->mapping to the new page: */
1816 new_page->mapping = page->mapping;
1817 new_page->index = page->index;
1818 migrate_page_copy(new_page, page);
1819 WARN_ON(PageLRU(new_page));
1820
1821 /* Recheck the target PMD */
Mel Gormanf714f4f2013-12-18 17:08:33 -08001822 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08001823 ptl = pmd_lock(mm, pmd);
Mel Gorman2b4847e2013-12-18 17:08:32 -08001824 if (unlikely(!pmd_same(*pmd, entry) || page_count(page) != 2)) {
1825fail_putback:
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08001826 spin_unlock(ptl);
Mel Gormanf714f4f2013-12-18 17:08:33 -08001827 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
Mel Gormanb32967f2012-11-19 12:35:47 +00001828
1829 /* Reverse changes made by migrate_page_copy() */
1830 if (TestClearPageActive(new_page))
1831 SetPageActive(page);
1832 if (TestClearPageUnevictable(new_page))
1833 SetPageUnevictable(page);
Mel Gormanb32967f2012-11-19 12:35:47 +00001834
1835 unlock_page(new_page);
1836 put_page(new_page); /* Free it */
1837
Mel Gormana54a4072013-10-07 11:28:46 +01001838 /* Retake the callers reference and putback on LRU */
1839 get_page(page);
Mel Gormanb32967f2012-11-19 12:35:47 +00001840 putback_lru_page(page);
Mel Gormana54a4072013-10-07 11:28:46 +01001841 mod_zone_page_state(page_zone(page),
1842 NR_ISOLATED_ANON + page_lru, -HPAGE_PMD_NR);
Mel Gormaneb4489f62013-12-18 17:08:39 -08001843
1844 goto out_unlock;
Mel Gormanb32967f2012-11-19 12:35:47 +00001845 }
1846
Mel Gorman2b4847e2013-12-18 17:08:32 -08001847 orig_entry = *pmd;
Mel Gormanb32967f2012-11-19 12:35:47 +00001848 entry = mk_pmd(new_page, vma->vm_page_prot);
Mel Gormanb32967f2012-11-19 12:35:47 +00001849 entry = pmd_mkhuge(entry);
Mel Gorman2b4847e2013-12-18 17:08:32 -08001850 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
Mel Gormanb32967f2012-11-19 12:35:47 +00001851
Mel Gorman2b4847e2013-12-18 17:08:32 -08001852 /*
1853 * Clear the old entry under pagetable lock and establish the new PTE.
1854 * Any parallel GUP will either observe the old page blocking on the
1855 * page lock, block on the page table lock or observe the new page.
1856 * The SetPageUptodate on the new page and page_add_new_anon_rmap
1857 * guarantee the copy is visible before the pagetable update.
1858 */
Mel Gormanf714f4f2013-12-18 17:08:33 -08001859 flush_cache_range(vma, mmun_start, mmun_end);
Kirill A. Shutemovd281ee62016-01-15 16:52:16 -08001860 page_add_anon_rmap(new_page, vma, mmun_start, true);
Aneesh Kumar K.V8809aa22015-06-24 16:57:44 -07001861 pmdp_huge_clear_flush_notify(vma, mmun_start, pmd);
Mel Gormanf714f4f2013-12-18 17:08:33 -08001862 set_pmd_at(mm, mmun_start, pmd, entry);
Stephen Rothwellce4a9cc2012-12-10 19:50:57 +11001863 update_mmu_cache_pmd(vma, address, &entry);
Mel Gorman2b4847e2013-12-18 17:08:32 -08001864
1865 if (page_count(page) != 2) {
Mel Gormanf714f4f2013-12-18 17:08:33 -08001866 set_pmd_at(mm, mmun_start, pmd, orig_entry);
Aneesh Kumar K.V458aa762016-03-17 14:18:56 -07001867 flush_pmd_tlb_range(vma, mmun_start, mmun_end);
Joerg Roedel34ee6452014-11-13 13:46:09 +11001868 mmu_notifier_invalidate_range(mm, mmun_start, mmun_end);
Mel Gorman2b4847e2013-12-18 17:08:32 -08001869 update_mmu_cache_pmd(vma, address, &entry);
Kirill A. Shutemovd281ee62016-01-15 16:52:16 -08001870 page_remove_rmap(new_page, true);
Mel Gorman2b4847e2013-12-18 17:08:32 -08001871 goto fail_putback;
1872 }
1873
Hugh Dickins51afb122015-11-05 18:49:37 -08001874 mlock_migrate_page(new_page, page);
Kirill A. Shutemovd281ee62016-01-15 16:52:16 -08001875 page_remove_rmap(page, true);
Vlastimil Babka7cd12b42016-03-15 14:56:18 -07001876 set_page_owner_migrate_reason(new_page, MR_NUMA_MISPLACED);
Mel Gorman2b4847e2013-12-18 17:08:32 -08001877
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08001878 spin_unlock(ptl);
Mel Gormanf714f4f2013-12-18 17:08:33 -08001879 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
Mel Gormanb32967f2012-11-19 12:35:47 +00001880
Mel Gorman11de9922014-06-04 16:07:41 -07001881 /* Take an "isolate" reference and put new page on the LRU. */
1882 get_page(new_page);
1883 putback_lru_page(new_page);
1884
Mel Gormanb32967f2012-11-19 12:35:47 +00001885 unlock_page(new_page);
1886 unlock_page(page);
1887 put_page(page); /* Drop the rmap reference */
1888 put_page(page); /* Drop the LRU isolation reference */
1889
1890 count_vm_events(PGMIGRATE_SUCCESS, HPAGE_PMD_NR);
1891 count_vm_numa_events(NUMA_PAGE_MIGRATE, HPAGE_PMD_NR);
1892
Mel Gormanb32967f2012-11-19 12:35:47 +00001893 mod_zone_page_state(page_zone(page),
1894 NR_ISOLATED_ANON + page_lru,
1895 -HPAGE_PMD_NR);
1896 return isolated;
1897
Hugh Dickins340ef392013-02-22 16:34:33 -08001898out_fail:
1899 count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
Mel Gormanb32967f2012-11-19 12:35:47 +00001900out_dropref:
Mel Gorman2b4847e2013-12-18 17:08:32 -08001901 ptl = pmd_lock(mm, pmd);
1902 if (pmd_same(*pmd, entry)) {
Mel Gorman4d942462015-02-12 14:58:28 -08001903 entry = pmd_modify(entry, vma->vm_page_prot);
Mel Gormanf714f4f2013-12-18 17:08:33 -08001904 set_pmd_at(mm, mmun_start, pmd, entry);
Mel Gorman2b4847e2013-12-18 17:08:32 -08001905 update_mmu_cache_pmd(vma, address, &entry);
1906 }
1907 spin_unlock(ptl);
Mel Gormana54a4072013-10-07 11:28:46 +01001908
Mel Gormaneb4489f62013-12-18 17:08:39 -08001909out_unlock:
Hugh Dickins340ef392013-02-22 16:34:33 -08001910 unlock_page(page);
Mel Gormanb32967f2012-11-19 12:35:47 +00001911 put_page(page);
Mel Gormanb32967f2012-11-19 12:35:47 +00001912 return 0;
1913}
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02001914#endif /* CONFIG_NUMA_BALANCING */
1915
1916#endif /* CONFIG_NUMA */