blob: eae1565285e3a0e1ecf2bc12a731d8081d17d31b [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Christoph Lameterb20a3502006-03-22 00:09:12 -08002/*
Hugh Dickins14e0f9b2015-11-05 18:49:43 -08003 * Memory Migration functionality - linux/mm/migrate.c
Christoph Lameterb20a3502006-03-22 00:09:12 -08004 *
5 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
6 *
7 * Page migration was first developed in the context of the memory hotplug
8 * project. The main authors of the migration code are:
9 *
10 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
11 * Hirokazu Takahashi <taka@valinux.co.jp>
12 * Dave Hansen <haveblue@us.ibm.com>
Christoph Lametercde53532008-07-04 09:59:22 -070013 * Christoph Lameter
Christoph Lameterb20a3502006-03-22 00:09:12 -080014 */
15
16#include <linux/migrate.h>
Paul Gortmakerb95f1b312011-10-16 02:01:52 -040017#include <linux/export.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080018#include <linux/swap.h>
Christoph Lameter06972122006-06-23 02:03:35 -070019#include <linux/swapops.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080020#include <linux/pagemap.h>
Christoph Lametere23ca002006-04-10 22:52:57 -070021#include <linux/buffer_head.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080022#include <linux/mm_inline.h>
Pavel Emelyanovb4888932007-10-18 23:40:14 -070023#include <linux/nsproxy.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080024#include <linux/pagevec.h>
Hugh Dickinse9995ef2009-12-14 17:59:31 -080025#include <linux/ksm.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080026#include <linux/rmap.h>
27#include <linux/topology.h>
28#include <linux/cpu.h>
29#include <linux/cpuset.h>
Christoph Lameter04e62a22006-06-23 02:03:38 -070030#include <linux/writeback.h>
Christoph Lameter742755a2006-06-23 02:03:55 -070031#include <linux/mempolicy.h>
32#include <linux/vmalloc.h>
David Quigley86c3a762006-06-23 02:04:02 -070033#include <linux/security.h>
Hugh Dickins42cb14b2015-11-05 18:50:05 -080034#include <linux/backing-dev.h>
Minchan Kimbda807d2016-07-26 15:23:05 -070035#include <linux/compaction.h>
Adrian Bunk4f5ca262008-07-23 21:27:02 -070036#include <linux/syscalls.h>
Dominik Brodowski7addf442018-03-17 16:08:03 +010037#include <linux/compat.h>
Naoya Horiguchi290408d2010-09-08 10:19:35 +090038#include <linux/hugetlb.h>
Aneesh Kumar K.V8e6ac7f2012-07-31 16:42:27 -070039#include <linux/hugetlb_cgroup.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090040#include <linux/gfp.h>
Christoph Hellwiga5201102019-08-28 16:19:53 +020041#include <linux/pagewalk.h>
Jérôme Glissedf6ad692017-09-08 16:12:24 -070042#include <linux/pfn_t.h>
Jérôme Glissea5430dd2017-09-08 16:12:17 -070043#include <linux/memremap.h>
Jérôme Glisse8315ada2017-09-08 16:12:21 -070044#include <linux/userfaultfd_k.h>
Rafael Aquinibf6bddf12012-12-11 16:02:42 -080045#include <linux/balloon_compaction.h>
Mel Gormanf714f4f2013-12-18 17:08:33 -080046#include <linux/mmu_notifier.h>
Vladimir Davydov33c3fc72015-09-09 15:35:45 -070047#include <linux/page_idle.h>
Vlastimil Babkad435edc2016-03-15 14:56:15 -070048#include <linux/page_owner.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +010049#include <linux/sched/mm.h>
Linus Torvalds197e7e52017-08-20 13:26:27 -070050#include <linux/ptrace.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080051
Michal Nazarewicz0d1836c2010-12-21 17:24:26 -080052#include <asm/tlbflush.h>
53
Mel Gorman7b2a2d42012-10-19 14:07:31 +010054#define CREATE_TRACE_POINTS
55#include <trace/events/migrate.h>
56
Christoph Lameterb20a3502006-03-22 00:09:12 -080057#include "internal.h"
58
Christoph Lameterb20a3502006-03-22 00:09:12 -080059/*
Christoph Lameter742755a2006-06-23 02:03:55 -070060 * migrate_prep() needs to be called before we start compiling a list of pages
Mel Gorman748446b2010-05-24 14:32:27 -070061 * to be migrated using isolate_lru_page(). If scheduling work on other CPUs is
62 * undesirable, use migrate_prep_local()
Christoph Lameterb20a3502006-03-22 00:09:12 -080063 */
64int migrate_prep(void)
65{
Christoph Lameterb20a3502006-03-22 00:09:12 -080066 /*
67 * Clear the LRU lists so pages can be isolated.
68 * Note that pages may be moved off the LRU after we have
69 * drained them. Those pages will fail to migrate like other
70 * pages that may be busy.
71 */
72 lru_add_drain_all();
73
74 return 0;
75}
76
Mel Gorman748446b2010-05-24 14:32:27 -070077/* Do the necessary work of migrate_prep but not if it involves other CPUs */
78int migrate_prep_local(void)
79{
80 lru_add_drain();
81
82 return 0;
83}
84
Yisheng Xie9e5bcd62017-02-24 14:57:29 -080085int isolate_movable_page(struct page *page, isolate_mode_t mode)
Minchan Kimbda807d2016-07-26 15:23:05 -070086{
87 struct address_space *mapping;
88
89 /*
90 * Avoid burning cycles with pages that are yet under __free_pages(),
91 * or just got freed under us.
92 *
93 * In case we 'win' a race for a movable page being freed under us and
94 * raise its refcount preventing __free_pages() from doing its job
95 * the put_page() at the end of this block will take care of
96 * release this page, thus avoiding a nasty leakage.
97 */
98 if (unlikely(!get_page_unless_zero(page)))
99 goto out;
100
101 /*
102 * Check PageMovable before holding a PG_lock because page's owner
103 * assumes anybody doesn't touch PG_lock of newly allocated page
Wei Yang8bb4e7a2019-03-05 15:46:22 -0800104 * so unconditionally grabbing the lock ruins page's owner side.
Minchan Kimbda807d2016-07-26 15:23:05 -0700105 */
106 if (unlikely(!__PageMovable(page)))
107 goto out_putpage;
108 /*
109 * As movable pages are not isolated from LRU lists, concurrent
110 * compaction threads can race against page migration functions
111 * as well as race against the releasing a page.
112 *
113 * In order to avoid having an already isolated movable page
114 * being (wrongly) re-isolated while it is under migration,
115 * or to avoid attempting to isolate pages being released,
116 * lets be sure we have the page lock
117 * before proceeding with the movable page isolation steps.
118 */
119 if (unlikely(!trylock_page(page)))
120 goto out_putpage;
121
122 if (!PageMovable(page) || PageIsolated(page))
123 goto out_no_isolated;
124
125 mapping = page_mapping(page);
126 VM_BUG_ON_PAGE(!mapping, page);
127
128 if (!mapping->a_ops->isolate_page(page, mode))
129 goto out_no_isolated;
130
131 /* Driver shouldn't use PG_isolated bit of page->flags */
132 WARN_ON_ONCE(PageIsolated(page));
133 __SetPageIsolated(page);
134 unlock_page(page);
135
Yisheng Xie9e5bcd62017-02-24 14:57:29 -0800136 return 0;
Minchan Kimbda807d2016-07-26 15:23:05 -0700137
138out_no_isolated:
139 unlock_page(page);
140out_putpage:
141 put_page(page);
142out:
Yisheng Xie9e5bcd62017-02-24 14:57:29 -0800143 return -EBUSY;
Minchan Kimbda807d2016-07-26 15:23:05 -0700144}
145
146/* It should be called on page which is PG_movable */
147void putback_movable_page(struct page *page)
148{
149 struct address_space *mapping;
150
151 VM_BUG_ON_PAGE(!PageLocked(page), page);
152 VM_BUG_ON_PAGE(!PageMovable(page), page);
153 VM_BUG_ON_PAGE(!PageIsolated(page), page);
154
155 mapping = page_mapping(page);
156 mapping->a_ops->putback_page(page);
157 __ClearPageIsolated(page);
158}
159
Christoph Lameterb20a3502006-03-22 00:09:12 -0800160/*
Rafael Aquini5733c7d2012-12-11 16:02:47 -0800161 * Put previously isolated pages back onto the appropriate lists
162 * from where they were once taken off for compaction/migration.
163 *
Joonsoo Kim59c82b72014-01-21 15:51:17 -0800164 * This function shall be used whenever the isolated pageset has been
165 * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
166 * and isolate_huge_page().
Rafael Aquini5733c7d2012-12-11 16:02:47 -0800167 */
168void putback_movable_pages(struct list_head *l)
169{
170 struct page *page;
171 struct page *page2;
172
173 list_for_each_entry_safe(page, page2, l, lru) {
Naoya Horiguchi31caf662013-09-11 14:21:59 -0700174 if (unlikely(PageHuge(page))) {
175 putback_active_hugepage(page);
176 continue;
177 }
Rafael Aquini5733c7d2012-12-11 16:02:47 -0800178 list_del(&page->lru);
Minchan Kimbda807d2016-07-26 15:23:05 -0700179 /*
180 * We isolated non-lru movable page so here we can use
181 * __PageMovable because LRU page's mapping cannot have
182 * PAGE_MAPPING_MOVABLE.
183 */
Minchan Kimb1123ea62016-07-26 15:23:09 -0700184 if (unlikely(__PageMovable(page))) {
Minchan Kimbda807d2016-07-26 15:23:05 -0700185 VM_BUG_ON_PAGE(!PageIsolated(page), page);
186 lock_page(page);
187 if (PageMovable(page))
188 putback_movable_page(page);
189 else
190 __ClearPageIsolated(page);
191 unlock_page(page);
192 put_page(page);
193 } else {
Naoya Horiguchie8db67e2017-09-08 16:11:12 -0700194 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
195 page_is_file_cache(page), -hpage_nr_pages(page));
Rabin Vincentfc280fe2017-04-20 14:37:46 -0700196 putback_lru_page(page);
Minchan Kimbda807d2016-07-26 15:23:05 -0700197 }
Christoph Lameterb20a3502006-03-22 00:09:12 -0800198 }
Christoph Lameterb20a3502006-03-22 00:09:12 -0800199}
200
Christoph Lameter06972122006-06-23 02:03:35 -0700201/*
202 * Restore a potential migration pte to a working pte entry
203 */
Minchan Kime4b82222017-05-03 14:54:27 -0700204static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
Hugh Dickinse9995ef2009-12-14 17:59:31 -0800205 unsigned long addr, void *old)
Christoph Lameter06972122006-06-23 02:03:35 -0700206{
Kirill A. Shutemov3fe87962017-02-24 14:58:16 -0800207 struct page_vma_mapped_walk pvmw = {
208 .page = old,
209 .vma = vma,
210 .address = addr,
211 .flags = PVMW_SYNC | PVMW_MIGRATION,
212 };
213 struct page *new;
214 pte_t pte;
Christoph Lameter06972122006-06-23 02:03:35 -0700215 swp_entry_t entry;
Christoph Lameter06972122006-06-23 02:03:35 -0700216
Kirill A. Shutemov3fe87962017-02-24 14:58:16 -0800217 VM_BUG_ON_PAGE(PageTail(page), page);
218 while (page_vma_mapped_walk(&pvmw)) {
Naoya Horiguchi4b0ece62017-03-31 15:11:44 -0700219 if (PageKsm(page))
220 new = page;
221 else
222 new = page - pvmw.page->index +
223 linear_page_index(vma, pvmw.address);
Christoph Lameter06972122006-06-23 02:03:35 -0700224
Zi Yan616b8372017-09-08 16:10:57 -0700225#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
226 /* PMD-mapped THP migration entry */
227 if (!pvmw.pte) {
228 VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page);
229 remove_migration_pmd(&pvmw, new);
230 continue;
231 }
232#endif
233
Kirill A. Shutemov3fe87962017-02-24 14:58:16 -0800234 get_page(new);
235 pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot)));
236 if (pte_swp_soft_dirty(*pvmw.pte))
237 pte = pte_mksoft_dirty(pte);
Christoph Lameter06972122006-06-23 02:03:35 -0700238
Hugh Dickins486cf462011-10-19 12:50:35 -0700239 /*
Kirill A. Shutemov3fe87962017-02-24 14:58:16 -0800240 * Recheck VMA as permissions can change since migration started
Hugh Dickins486cf462011-10-19 12:50:35 -0700241 */
Kirill A. Shutemov3fe87962017-02-24 14:58:16 -0800242 entry = pte_to_swp_entry(*pvmw.pte);
243 if (is_write_migration_entry(entry))
244 pte = maybe_mkwrite(pte, vma);
Mel Gormand3cb8bf2014-10-02 19:47:41 +0100245
Jérôme Glissedf6ad692017-09-08 16:12:24 -0700246 if (unlikely(is_zone_device_page(new))) {
247 if (is_device_private_page(new)) {
248 entry = make_device_private_entry(new, pte_write(pte));
249 pte = swp_entry_to_pte(entry);
Jérôme Glissedf6ad692017-09-08 16:12:24 -0700250 }
Lars Perssond2b2c6dd2019-03-28 20:44:28 -0700251 }
Jérôme Glissea5430dd2017-09-08 16:12:17 -0700252
Andi Kleen3ef8fd72010-10-11 16:03:21 +0200253#ifdef CONFIG_HUGETLB_PAGE
Kirill A. Shutemov3fe87962017-02-24 14:58:16 -0800254 if (PageHuge(new)) {
255 pte = pte_mkhuge(pte);
256 pte = arch_make_huge_pte(pte, vma, new, 0);
Aneesh Kumar K.V383321a2017-07-06 15:38:41 -0700257 set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
Kirill A. Shutemov3fe87962017-02-24 14:58:16 -0800258 if (PageAnon(new))
259 hugepage_add_anon_rmap(new, vma, pvmw.address);
260 else
261 page_dup_rmap(new, true);
Aneesh Kumar K.V383321a2017-07-06 15:38:41 -0700262 } else
263#endif
264 {
265 set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
Christoph Lameter04e62a22006-06-23 02:03:38 -0700266
Aneesh Kumar K.V383321a2017-07-06 15:38:41 -0700267 if (PageAnon(new))
268 page_add_anon_rmap(new, vma, pvmw.address, false);
269 else
270 page_add_file_rmap(new, false);
271 }
Kirill A. Shutemov3fe87962017-02-24 14:58:16 -0800272 if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new))
273 mlock_vma_page(new);
Hugh Dickins51afb122015-11-05 18:49:37 -0800274
Kirill A. Shutemove125fe42018-10-05 15:51:41 -0700275 if (PageTransHuge(page) && PageMlocked(page))
276 clear_page_mlock(page);
277
Kirill A. Shutemov3fe87962017-02-24 14:58:16 -0800278 /* No need to invalidate - it was non-present before */
279 update_mmu_cache(vma, pvmw.address, pvmw.pte);
280 }
281
Minchan Kime4b82222017-05-03 14:54:27 -0700282 return true;
Christoph Lameter06972122006-06-23 02:03:35 -0700283}
284
285/*
Christoph Lameter04e62a22006-06-23 02:03:38 -0700286 * Get rid of all migration entries and replace them by
287 * references to the indicated page.
288 */
Kirill A. Shutemove3884662016-03-17 14:20:07 -0700289void remove_migration_ptes(struct page *old, struct page *new, bool locked)
Christoph Lameter04e62a22006-06-23 02:03:38 -0700290{
Joonsoo Kim051ac832014-01-21 15:49:48 -0800291 struct rmap_walk_control rwc = {
292 .rmap_one = remove_migration_pte,
293 .arg = old,
294 };
295
Kirill A. Shutemove3884662016-03-17 14:20:07 -0700296 if (locked)
297 rmap_walk_locked(new, &rwc);
298 else
299 rmap_walk(new, &rwc);
Christoph Lameter04e62a22006-06-23 02:03:38 -0700300}
301
302/*
Christoph Lameter06972122006-06-23 02:03:35 -0700303 * Something used the pte of a page under migration. We need to
304 * get to the page and wait until migration is finished.
305 * When we return from this function the fault will be retried.
Christoph Lameter06972122006-06-23 02:03:35 -0700306 */
Naoya Horiguchie66f17f2015-02-11 15:25:22 -0800307void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
Naoya Horiguchi30dad302013-06-12 14:05:04 -0700308 spinlock_t *ptl)
Christoph Lameter06972122006-06-23 02:03:35 -0700309{
Naoya Horiguchi30dad302013-06-12 14:05:04 -0700310 pte_t pte;
Christoph Lameter06972122006-06-23 02:03:35 -0700311 swp_entry_t entry;
312 struct page *page;
313
Naoya Horiguchi30dad302013-06-12 14:05:04 -0700314 spin_lock(ptl);
Christoph Lameter06972122006-06-23 02:03:35 -0700315 pte = *ptep;
316 if (!is_swap_pte(pte))
317 goto out;
318
319 entry = pte_to_swp_entry(pte);
320 if (!is_migration_entry(entry))
321 goto out;
322
323 page = migration_entry_to_page(entry);
324
Nick Piggine2867812008-07-25 19:45:30 -0700325 /*
Matthew Wilcox89eb9462017-12-04 04:35:16 -0500326 * Once page cache replacement of page migration started, page_count
Hugh Dickins9a1ea432018-12-28 00:36:14 -0800327 * is zero; but we must not call put_and_wait_on_page_locked() without
328 * a ref. Use get_page_unless_zero(), and just fault again if it fails.
Nick Piggine2867812008-07-25 19:45:30 -0700329 */
330 if (!get_page_unless_zero(page))
331 goto out;
Christoph Lameter06972122006-06-23 02:03:35 -0700332 pte_unmap_unlock(ptep, ptl);
Hugh Dickins9a1ea432018-12-28 00:36:14 -0800333 put_and_wait_on_page_locked(page);
Christoph Lameter06972122006-06-23 02:03:35 -0700334 return;
335out:
336 pte_unmap_unlock(ptep, ptl);
337}
338
Naoya Horiguchi30dad302013-06-12 14:05:04 -0700339void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
340 unsigned long address)
341{
342 spinlock_t *ptl = pte_lockptr(mm, pmd);
343 pte_t *ptep = pte_offset_map(pmd, address);
344 __migration_entry_wait(mm, ptep, ptl);
345}
346
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -0800347void migration_entry_wait_huge(struct vm_area_struct *vma,
348 struct mm_struct *mm, pte_t *pte)
Naoya Horiguchi30dad302013-06-12 14:05:04 -0700349{
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -0800350 spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), mm, pte);
Naoya Horiguchi30dad302013-06-12 14:05:04 -0700351 __migration_entry_wait(mm, pte, ptl);
352}
353
Zi Yan616b8372017-09-08 16:10:57 -0700354#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
355void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
356{
357 spinlock_t *ptl;
358 struct page *page;
359
360 ptl = pmd_lock(mm, pmd);
361 if (!is_pmd_migration_entry(*pmd))
362 goto unlock;
363 page = migration_entry_to_page(pmd_to_swp_entry(*pmd));
364 if (!get_page_unless_zero(page))
365 goto unlock;
366 spin_unlock(ptl);
Hugh Dickins9a1ea432018-12-28 00:36:14 -0800367 put_and_wait_on_page_locked(page);
Zi Yan616b8372017-09-08 16:10:57 -0700368 return;
369unlock:
370 spin_unlock(ptl);
371}
372#endif
373
Jan Karaf9004822019-03-05 15:48:46 -0800374static int expected_page_refs(struct address_space *mapping, struct page *page)
Jan Kara0b3901b2018-12-28 00:39:01 -0800375{
376 int expected_count = 1;
377
378 /*
379 * Device public or private pages have an extra refcount as they are
380 * ZONE_DEVICE pages.
381 */
382 expected_count += is_device_private_page(page);
Jan Karaf9004822019-03-05 15:48:46 -0800383 if (mapping)
Jan Kara0b3901b2018-12-28 00:39:01 -0800384 expected_count += hpage_nr_pages(page) + page_has_private(page);
385
386 return expected_count;
387}
388
Christoph Lameterb20a3502006-03-22 00:09:12 -0800389/*
Christoph Lameterc3fcf8a2006-06-23 02:03:32 -0700390 * Replace the page in the mapping.
Christoph Lameter5b5c7122006-06-23 02:03:29 -0700391 *
392 * The number of remaining references must be:
393 * 1 for anonymous pages without a mapping
394 * 2 for pages with a mapping
David Howells266cf652009-04-03 16:42:36 +0100395 * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
Christoph Lameterb20a3502006-03-22 00:09:12 -0800396 */
Gu Zheng36bc08c2013-07-16 17:56:16 +0800397int migrate_page_move_mapping(struct address_space *mapping,
Keith Busch37109692019-07-18 15:58:46 -0700398 struct page *newpage, struct page *page, int extra_count)
Christoph Lameterb20a3502006-03-22 00:09:12 -0800399{
Matthew Wilcox89eb9462017-12-04 04:35:16 -0500400 XA_STATE(xas, &mapping->i_pages, page_index(page));
Hugh Dickins42cb14b2015-11-05 18:50:05 -0800401 struct zone *oldzone, *newzone;
402 int dirty;
Jan Karaf9004822019-03-05 15:48:46 -0800403 int expected_count = expected_page_refs(mapping, page) + extra_count;
Jérôme Glisse8763cb42017-09-08 16:12:09 -0700404
Christoph Lameter6c5240a2006-06-23 02:03:37 -0700405 if (!mapping) {
Christoph Lameter0e8c7d02007-04-23 14:41:09 -0700406 /* Anonymous page without mapping */
Benjamin LaHaise8e321fe2013-12-21 17:56:08 -0500407 if (page_count(page) != expected_count)
Christoph Lameter6c5240a2006-06-23 02:03:37 -0700408 return -EAGAIN;
Hugh Dickinscf4b7692015-11-05 18:50:02 -0800409
410 /* No turning back from here */
Hugh Dickinscf4b7692015-11-05 18:50:02 -0800411 newpage->index = page->index;
412 newpage->mapping = page->mapping;
413 if (PageSwapBacked(page))
Hugh Dickinsfa9949d2016-05-19 17:12:41 -0700414 __SetPageSwapBacked(newpage);
Hugh Dickinscf4b7692015-11-05 18:50:02 -0800415
Rafael Aquini78bd5202012-12-11 16:02:31 -0800416 return MIGRATEPAGE_SUCCESS;
Christoph Lameter6c5240a2006-06-23 02:03:37 -0700417 }
418
Hugh Dickins42cb14b2015-11-05 18:50:05 -0800419 oldzone = page_zone(page);
420 newzone = page_zone(newpage);
421
Matthew Wilcox89eb9462017-12-04 04:35:16 -0500422 xas_lock_irq(&xas);
Matthew Wilcox89eb9462017-12-04 04:35:16 -0500423 if (page_count(page) != expected_count || xas_load(&xas) != page) {
424 xas_unlock_irq(&xas);
Christoph Lametere23ca002006-04-10 22:52:57 -0700425 return -EAGAIN;
Christoph Lameterb20a3502006-03-22 00:09:12 -0800426 }
427
Joonsoo Kimfe896d12016-03-17 14:19:26 -0700428 if (!page_ref_freeze(page, expected_count)) {
Matthew Wilcox89eb9462017-12-04 04:35:16 -0500429 xas_unlock_irq(&xas);
Nick Piggine2867812008-07-25 19:45:30 -0700430 return -EAGAIN;
431 }
432
Christoph Lameterb20a3502006-03-22 00:09:12 -0800433 /*
Hugh Dickinscf4b7692015-11-05 18:50:02 -0800434 * Now we know that no one else is looking at the page:
435 * no turning back from here.
Christoph Lameterb20a3502006-03-22 00:09:12 -0800436 */
Hugh Dickinscf4b7692015-11-05 18:50:02 -0800437 newpage->index = page->index;
438 newpage->mapping = page->mapping;
Naoya Horiguchie71769a2018-04-20 14:55:45 -0700439 page_ref_add(newpage, hpage_nr_pages(page)); /* add cache reference */
Nicholas Piggin6326fec2016-12-25 13:00:29 +1000440 if (PageSwapBacked(page)) {
441 __SetPageSwapBacked(newpage);
442 if (PageSwapCache(page)) {
443 SetPageSwapCache(newpage);
444 set_page_private(newpage, page_private(page));
445 }
446 } else {
447 VM_BUG_ON_PAGE(PageSwapCache(page), page);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800448 }
449
Hugh Dickins42cb14b2015-11-05 18:50:05 -0800450 /* Move dirty while page refs frozen and newpage not yet exposed */
451 dirty = PageDirty(page);
452 if (dirty) {
453 ClearPageDirty(page);
454 SetPageDirty(newpage);
455 }
456
Matthew Wilcox89eb9462017-12-04 04:35:16 -0500457 xas_store(&xas, newpage);
Naoya Horiguchie71769a2018-04-20 14:55:45 -0700458 if (PageTransHuge(page)) {
459 int i;
Naoya Horiguchie71769a2018-04-20 14:55:45 -0700460
Naoya Horiguchi013567b2018-05-11 16:02:00 -0700461 for (i = 1; i < HPAGE_PMD_NR; i++) {
Matthew Wilcox89eb9462017-12-04 04:35:16 -0500462 xas_next(&xas);
Matthew Wilcox (Oracle)41011962019-09-23 15:34:52 -0700463 xas_store(&xas, newpage);
Naoya Horiguchie71769a2018-04-20 14:55:45 -0700464 }
Naoya Horiguchie71769a2018-04-20 14:55:45 -0700465 }
Nick Piggin7cf9c2c2006-12-06 20:33:44 -0800466
467 /*
Jacobo Giralt937a94c2012-01-10 15:07:11 -0800468 * Drop cache reference from old page by unfreezing
469 * to one less reference.
Nick Piggin7cf9c2c2006-12-06 20:33:44 -0800470 * We know this isn't the last reference.
471 */
Naoya Horiguchie71769a2018-04-20 14:55:45 -0700472 page_ref_unfreeze(page, expected_count - hpage_nr_pages(page));
Nick Piggin7cf9c2c2006-12-06 20:33:44 -0800473
Matthew Wilcox89eb9462017-12-04 04:35:16 -0500474 xas_unlock(&xas);
Hugh Dickins42cb14b2015-11-05 18:50:05 -0800475 /* Leave irq disabled to prevent preemption while updating stats */
476
Christoph Lameter0e8c7d02007-04-23 14:41:09 -0700477 /*
478 * If moved to a different zone then also account
479 * the page for that zone. Other VM counters will be
480 * taken care of when we establish references to the
481 * new page and drop references to the old page.
482 *
483 * Note that anonymous pages are accounted for
Mel Gorman4b9d0fa2016-07-28 15:46:17 -0700484 * via NR_FILE_PAGES and NR_ANON_MAPPED if they
Christoph Lameter0e8c7d02007-04-23 14:41:09 -0700485 * are mapped to swap space.
486 */
Hugh Dickins42cb14b2015-11-05 18:50:05 -0800487 if (newzone != oldzone) {
Mel Gorman11fb9982016-07-28 15:46:20 -0700488 __dec_node_state(oldzone->zone_pgdat, NR_FILE_PAGES);
489 __inc_node_state(newzone->zone_pgdat, NR_FILE_PAGES);
Hugh Dickins42cb14b2015-11-05 18:50:05 -0800490 if (PageSwapBacked(page) && !PageSwapCache(page)) {
Mel Gorman11fb9982016-07-28 15:46:20 -0700491 __dec_node_state(oldzone->zone_pgdat, NR_SHMEM);
492 __inc_node_state(newzone->zone_pgdat, NR_SHMEM);
Hugh Dickins42cb14b2015-11-05 18:50:05 -0800493 }
494 if (dirty && mapping_cap_account_dirty(mapping)) {
Mel Gorman11fb9982016-07-28 15:46:20 -0700495 __dec_node_state(oldzone->zone_pgdat, NR_FILE_DIRTY);
Mel Gorman5a1c84b2016-07-28 15:47:31 -0700496 __dec_zone_state(oldzone, NR_ZONE_WRITE_PENDING);
Mel Gorman11fb9982016-07-28 15:46:20 -0700497 __inc_node_state(newzone->zone_pgdat, NR_FILE_DIRTY);
Mel Gorman5a1c84b2016-07-28 15:47:31 -0700498 __inc_zone_state(newzone, NR_ZONE_WRITE_PENDING);
Hugh Dickins42cb14b2015-11-05 18:50:05 -0800499 }
KOSAKI Motohiro4b021082009-09-21 17:01:33 -0700500 }
Hugh Dickins42cb14b2015-11-05 18:50:05 -0800501 local_irq_enable();
Christoph Lameterb20a3502006-03-22 00:09:12 -0800502
Rafael Aquini78bd5202012-12-11 16:02:31 -0800503 return MIGRATEPAGE_SUCCESS;
Christoph Lameterb20a3502006-03-22 00:09:12 -0800504}
Richard Weinberger1118dce2016-06-16 23:26:14 +0200505EXPORT_SYMBOL(migrate_page_move_mapping);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800506
507/*
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900508 * The expected number of remaining references is the same as that
509 * of migrate_page_move_mapping().
510 */
511int migrate_huge_page_move_mapping(struct address_space *mapping,
512 struct page *newpage, struct page *page)
513{
Matthew Wilcox89eb9462017-12-04 04:35:16 -0500514 XA_STATE(xas, &mapping->i_pages, page_index(page));
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900515 int expected_count;
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900516
Matthew Wilcox89eb9462017-12-04 04:35:16 -0500517 xas_lock_irq(&xas);
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900518 expected_count = 2 + page_has_private(page);
Matthew Wilcox89eb9462017-12-04 04:35:16 -0500519 if (page_count(page) != expected_count || xas_load(&xas) != page) {
520 xas_unlock_irq(&xas);
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900521 return -EAGAIN;
522 }
523
Joonsoo Kimfe896d12016-03-17 14:19:26 -0700524 if (!page_ref_freeze(page, expected_count)) {
Matthew Wilcox89eb9462017-12-04 04:35:16 -0500525 xas_unlock_irq(&xas);
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900526 return -EAGAIN;
527 }
528
Hugh Dickinscf4b7692015-11-05 18:50:02 -0800529 newpage->index = page->index;
530 newpage->mapping = page->mapping;
Johannes Weiner6a93ca82016-03-15 14:57:19 -0700531
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900532 get_page(newpage);
533
Matthew Wilcox89eb9462017-12-04 04:35:16 -0500534 xas_store(&xas, newpage);
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900535
Joonsoo Kimfe896d12016-03-17 14:19:26 -0700536 page_ref_unfreeze(page, expected_count - 1);
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900537
Matthew Wilcox89eb9462017-12-04 04:35:16 -0500538 xas_unlock_irq(&xas);
Johannes Weiner6a93ca82016-03-15 14:57:19 -0700539
Rafael Aquini78bd5202012-12-11 16:02:31 -0800540 return MIGRATEPAGE_SUCCESS;
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900541}
542
543/*
Dave Hansen30b0a102013-11-21 14:31:58 -0800544 * Gigantic pages are so large that we do not guarantee that page++ pointer
545 * arithmetic will work across the entire page. We need something more
546 * specialized.
547 */
548static void __copy_gigantic_page(struct page *dst, struct page *src,
549 int nr_pages)
550{
551 int i;
552 struct page *dst_base = dst;
553 struct page *src_base = src;
554
555 for (i = 0; i < nr_pages; ) {
556 cond_resched();
557 copy_highpage(dst, src);
558
559 i++;
560 dst = mem_map_next(dst, dst_base, i);
561 src = mem_map_next(src, src_base, i);
562 }
563}
564
565static void copy_huge_page(struct page *dst, struct page *src)
566{
567 int i;
568 int nr_pages;
569
570 if (PageHuge(src)) {
571 /* hugetlbfs page */
572 struct hstate *h = page_hstate(src);
573 nr_pages = pages_per_huge_page(h);
574
575 if (unlikely(nr_pages > MAX_ORDER_NR_PAGES)) {
576 __copy_gigantic_page(dst, src, nr_pages);
577 return;
578 }
579 } else {
580 /* thp page */
581 BUG_ON(!PageTransHuge(src));
582 nr_pages = hpage_nr_pages(src);
583 }
584
585 for (i = 0; i < nr_pages; i++) {
586 cond_resched();
587 copy_highpage(dst + i, src + i);
588 }
589}
590
591/*
Christoph Lameterb20a3502006-03-22 00:09:12 -0800592 * Copy the page to its new location
593 */
Jérôme Glisse2916ecc2017-09-08 16:12:06 -0700594void migrate_page_states(struct page *newpage, struct page *page)
Christoph Lameterb20a3502006-03-22 00:09:12 -0800595{
Rik van Riel7851a452013-10-07 11:29:23 +0100596 int cpupid;
597
Christoph Lameterb20a3502006-03-22 00:09:12 -0800598 if (PageError(page))
599 SetPageError(newpage);
600 if (PageReferenced(page))
601 SetPageReferenced(newpage);
602 if (PageUptodate(page))
603 SetPageUptodate(newpage);
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700604 if (TestClearPageActive(page)) {
Sasha Levin309381fea2014-01-23 15:52:54 -0800605 VM_BUG_ON_PAGE(PageUnevictable(page), page);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800606 SetPageActive(newpage);
Lee Schermerhorn418b27e2009-12-14 17:59:54 -0800607 } else if (TestClearPageUnevictable(page))
608 SetPageUnevictable(newpage);
Johannes Weiner1899ad12018-10-26 15:06:04 -0700609 if (PageWorkingset(page))
610 SetPageWorkingset(newpage);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800611 if (PageChecked(page))
612 SetPageChecked(newpage);
613 if (PageMappedToDisk(page))
614 SetPageMappedToDisk(newpage);
615
Hugh Dickins42cb14b2015-11-05 18:50:05 -0800616 /* Move dirty on pages not done by migrate_page_move_mapping() */
617 if (PageDirty(page))
618 SetPageDirty(newpage);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800619
Vladimir Davydov33c3fc72015-09-09 15:35:45 -0700620 if (page_is_young(page))
621 set_page_young(newpage);
622 if (page_is_idle(page))
623 set_page_idle(newpage);
624
Rik van Riel7851a452013-10-07 11:29:23 +0100625 /*
626 * Copy NUMA information to the new page, to prevent over-eager
627 * future migrations of this same page.
628 */
629 cpupid = page_cpupid_xchg_last(page, -1);
630 page_cpupid_xchg_last(newpage, cpupid);
631
Hugh Dickinse9995ef2009-12-14 17:59:31 -0800632 ksm_migrate_page(newpage, page);
Hugh Dickinsc8d65532013-02-22 16:35:10 -0800633 /*
634 * Please do not reorder this without considering how mm/ksm.c's
635 * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache().
636 */
Naoya Horiguchib3b3a992015-04-15 16:13:15 -0700637 if (PageSwapCache(page))
638 ClearPageSwapCache(page);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800639 ClearPagePrivate(page);
640 set_page_private(page, 0);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800641
642 /*
643 * If any waiters have accumulated on the new page then
644 * wake them up.
645 */
646 if (PageWriteback(newpage))
647 end_page_writeback(newpage);
Vlastimil Babkad435edc2016-03-15 14:56:15 -0700648
649 copy_page_owner(page, newpage);
Johannes Weiner74485cf2016-03-15 14:57:54 -0700650
651 mem_cgroup_migrate(page, newpage);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800652}
Jérôme Glisse2916ecc2017-09-08 16:12:06 -0700653EXPORT_SYMBOL(migrate_page_states);
654
655void migrate_page_copy(struct page *newpage, struct page *page)
656{
657 if (PageHuge(page) || PageTransHuge(page))
658 copy_huge_page(newpage, page);
659 else
660 copy_highpage(newpage, page);
661
662 migrate_page_states(newpage, page);
663}
Richard Weinberger1118dce2016-06-16 23:26:14 +0200664EXPORT_SYMBOL(migrate_page_copy);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800665
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700666/************************************************************
667 * Migration functions
668 ***********************************************************/
669
Christoph Lameterb20a3502006-03-22 00:09:12 -0800670/*
Minchan Kimbda807d2016-07-26 15:23:05 -0700671 * Common logic to directly migrate a single LRU page suitable for
David Howells266cf652009-04-03 16:42:36 +0100672 * pages that do not use PagePrivate/PagePrivate2.
Christoph Lameterb20a3502006-03-22 00:09:12 -0800673 *
674 * Pages are locked upon entry and exit.
675 */
Christoph Lameter2d1db3b2006-06-23 02:03:33 -0700676int migrate_page(struct address_space *mapping,
Mel Gormana6bc32b2012-01-12 17:19:43 -0800677 struct page *newpage, struct page *page,
678 enum migrate_mode mode)
Christoph Lameterb20a3502006-03-22 00:09:12 -0800679{
680 int rc;
681
682 BUG_ON(PageWriteback(page)); /* Writeback must be complete */
683
Keith Busch37109692019-07-18 15:58:46 -0700684 rc = migrate_page_move_mapping(mapping, newpage, page, 0);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800685
Rafael Aquini78bd5202012-12-11 16:02:31 -0800686 if (rc != MIGRATEPAGE_SUCCESS)
Christoph Lameterb20a3502006-03-22 00:09:12 -0800687 return rc;
688
Jérôme Glisse2916ecc2017-09-08 16:12:06 -0700689 if (mode != MIGRATE_SYNC_NO_COPY)
690 migrate_page_copy(newpage, page);
691 else
692 migrate_page_states(newpage, page);
Rafael Aquini78bd5202012-12-11 16:02:31 -0800693 return MIGRATEPAGE_SUCCESS;
Christoph Lameterb20a3502006-03-22 00:09:12 -0800694}
695EXPORT_SYMBOL(migrate_page);
696
David Howells93614012006-09-30 20:45:40 +0200697#ifdef CONFIG_BLOCK
Jan Kara84ade7c2018-12-28 00:39:09 -0800698/* Returns true if all buffers are successfully locked */
699static bool buffer_migrate_lock_buffers(struct buffer_head *head,
700 enum migrate_mode mode)
701{
702 struct buffer_head *bh = head;
703
704 /* Simple case, sync compaction */
705 if (mode != MIGRATE_ASYNC) {
706 do {
Jan Kara84ade7c2018-12-28 00:39:09 -0800707 lock_buffer(bh);
708 bh = bh->b_this_page;
709
710 } while (bh != head);
711
712 return true;
713 }
714
715 /* async case, we cannot block on lock_buffer so use trylock_buffer */
716 do {
Jan Kara84ade7c2018-12-28 00:39:09 -0800717 if (!trylock_buffer(bh)) {
718 /*
719 * We failed to lock the buffer and cannot stall in
720 * async migration. Release the taken locks
721 */
722 struct buffer_head *failed_bh = bh;
Jan Kara84ade7c2018-12-28 00:39:09 -0800723 bh = head;
724 while (bh != failed_bh) {
725 unlock_buffer(bh);
Jan Kara84ade7c2018-12-28 00:39:09 -0800726 bh = bh->b_this_page;
727 }
728 return false;
729 }
730
731 bh = bh->b_this_page;
732 } while (bh != head);
733 return true;
734}
735
Jan Kara89cb0882018-12-28 00:39:12 -0800736static int __buffer_migrate_page(struct address_space *mapping,
737 struct page *newpage, struct page *page, enum migrate_mode mode,
738 bool check_refs)
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700739{
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700740 struct buffer_head *bh, *head;
741 int rc;
Jan Karacc4f11e2018-12-28 00:39:05 -0800742 int expected_count;
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700743
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700744 if (!page_has_buffers(page))
Mel Gormana6bc32b2012-01-12 17:19:43 -0800745 return migrate_page(mapping, newpage, page, mode);
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700746
Jan Karacc4f11e2018-12-28 00:39:05 -0800747 /* Check whether page does not have extra refs before we do more work */
Jan Karaf9004822019-03-05 15:48:46 -0800748 expected_count = expected_page_refs(mapping, page);
Jan Karacc4f11e2018-12-28 00:39:05 -0800749 if (page_count(page) != expected_count)
750 return -EAGAIN;
751
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700752 head = page_buffers(page);
Jan Karacc4f11e2018-12-28 00:39:05 -0800753 if (!buffer_migrate_lock_buffers(head, mode))
754 return -EAGAIN;
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700755
Jan Kara89cb0882018-12-28 00:39:12 -0800756 if (check_refs) {
757 bool busy;
758 bool invalidated = false;
759
760recheck_buffers:
761 busy = false;
762 spin_lock(&mapping->private_lock);
763 bh = head;
764 do {
765 if (atomic_read(&bh->b_count)) {
766 busy = true;
767 break;
768 }
769 bh = bh->b_this_page;
770 } while (bh != head);
Jan Kara89cb0882018-12-28 00:39:12 -0800771 if (busy) {
772 if (invalidated) {
773 rc = -EAGAIN;
774 goto unlock_buffers;
775 }
Jan Karaebdf4de2019-08-02 21:48:47 -0700776 spin_unlock(&mapping->private_lock);
Jan Kara89cb0882018-12-28 00:39:12 -0800777 invalidate_bh_lrus();
778 invalidated = true;
779 goto recheck_buffers;
780 }
781 }
782
Keith Busch37109692019-07-18 15:58:46 -0700783 rc = migrate_page_move_mapping(mapping, newpage, page, 0);
Rafael Aquini78bd5202012-12-11 16:02:31 -0800784 if (rc != MIGRATEPAGE_SUCCESS)
Jan Karacc4f11e2018-12-28 00:39:05 -0800785 goto unlock_buffers;
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700786
787 ClearPagePrivate(page);
788 set_page_private(newpage, page_private(page));
789 set_page_private(page, 0);
790 put_page(page);
791 get_page(newpage);
792
793 bh = head;
794 do {
795 set_bh_page(bh, newpage, bh_offset(bh));
796 bh = bh->b_this_page;
797
798 } while (bh != head);
799
800 SetPagePrivate(newpage);
801
Jérôme Glisse2916ecc2017-09-08 16:12:06 -0700802 if (mode != MIGRATE_SYNC_NO_COPY)
803 migrate_page_copy(newpage, page);
804 else
805 migrate_page_states(newpage, page);
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700806
Jan Karacc4f11e2018-12-28 00:39:05 -0800807 rc = MIGRATEPAGE_SUCCESS;
808unlock_buffers:
Jan Karaebdf4de2019-08-02 21:48:47 -0700809 if (check_refs)
810 spin_unlock(&mapping->private_lock);
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700811 bh = head;
812 do {
813 unlock_buffer(bh);
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700814 bh = bh->b_this_page;
815
816 } while (bh != head);
817
Jan Karacc4f11e2018-12-28 00:39:05 -0800818 return rc;
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700819}
Jan Kara89cb0882018-12-28 00:39:12 -0800820
821/*
822 * Migration function for pages with buffers. This function can only be used
823 * if the underlying filesystem guarantees that no other references to "page"
824 * exist. For example attached buffer heads are accessed only under page lock.
825 */
826int buffer_migrate_page(struct address_space *mapping,
827 struct page *newpage, struct page *page, enum migrate_mode mode)
828{
829 return __buffer_migrate_page(mapping, newpage, page, mode, false);
830}
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700831EXPORT_SYMBOL(buffer_migrate_page);
Jan Kara89cb0882018-12-28 00:39:12 -0800832
833/*
834 * Same as above except that this variant is more careful and checks that there
835 * are also no buffer head references. This function is the right one for
836 * mappings where buffer heads are directly looked up and referenced (such as
837 * block device mappings).
838 */
839int buffer_migrate_page_norefs(struct address_space *mapping,
840 struct page *newpage, struct page *page, enum migrate_mode mode)
841{
842 return __buffer_migrate_page(mapping, newpage, page, mode, true);
843}
David Howells93614012006-09-30 20:45:40 +0200844#endif
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700845
Christoph Lameter04e62a22006-06-23 02:03:38 -0700846/*
847 * Writeback a page to clean the dirty state
848 */
849static int writeout(struct address_space *mapping, struct page *page)
850{
851 struct writeback_control wbc = {
852 .sync_mode = WB_SYNC_NONE,
853 .nr_to_write = 1,
854 .range_start = 0,
855 .range_end = LLONG_MAX,
Christoph Lameter04e62a22006-06-23 02:03:38 -0700856 .for_reclaim = 1
857 };
858 int rc;
859
860 if (!mapping->a_ops->writepage)
861 /* No write method for the address space */
862 return -EINVAL;
863
864 if (!clear_page_dirty_for_io(page))
865 /* Someone else already triggered a write */
866 return -EAGAIN;
867
868 /*
869 * A dirty page may imply that the underlying filesystem has
870 * the page on some queue. So the page must be clean for
871 * migration. Writeout may mean we loose the lock and the
872 * page state is no longer what we checked for earlier.
873 * At this point we know that the migration attempt cannot
874 * be successful.
875 */
Kirill A. Shutemove3884662016-03-17 14:20:07 -0700876 remove_migration_ptes(page, page, false);
Christoph Lameter04e62a22006-06-23 02:03:38 -0700877
878 rc = mapping->a_ops->writepage(page, &wbc);
Christoph Lameter04e62a22006-06-23 02:03:38 -0700879
880 if (rc != AOP_WRITEPAGE_ACTIVATE)
881 /* unlocked. Relock */
882 lock_page(page);
883
Hugh Dickinsbda85502008-11-19 15:36:36 -0800884 return (rc < 0) ? -EIO : -EAGAIN;
Christoph Lameter04e62a22006-06-23 02:03:38 -0700885}
886
887/*
888 * Default handling if a filesystem does not provide a migration function.
889 */
Christoph Lameter8351a6e2006-06-23 02:03:33 -0700890static int fallback_migrate_page(struct address_space *mapping,
Mel Gormana6bc32b2012-01-12 17:19:43 -0800891 struct page *newpage, struct page *page, enum migrate_mode mode)
Christoph Lameter8351a6e2006-06-23 02:03:33 -0700892{
Mel Gormanb969c4ab2012-01-12 17:19:34 -0800893 if (PageDirty(page)) {
Mel Gormana6bc32b2012-01-12 17:19:43 -0800894 /* Only writeback pages in full synchronous migration */
Jérôme Glisse2916ecc2017-09-08 16:12:06 -0700895 switch (mode) {
896 case MIGRATE_SYNC:
897 case MIGRATE_SYNC_NO_COPY:
898 break;
899 default:
Mel Gormanb969c4ab2012-01-12 17:19:34 -0800900 return -EBUSY;
Jérôme Glisse2916ecc2017-09-08 16:12:06 -0700901 }
Christoph Lameter04e62a22006-06-23 02:03:38 -0700902 return writeout(mapping, page);
Mel Gormanb969c4ab2012-01-12 17:19:34 -0800903 }
Christoph Lameter8351a6e2006-06-23 02:03:33 -0700904
905 /*
906 * Buffers may be managed in a filesystem specific way.
907 * We must have no buffers or drop them.
908 */
David Howells266cf652009-04-03 16:42:36 +0100909 if (page_has_private(page) &&
Christoph Lameter8351a6e2006-06-23 02:03:33 -0700910 !try_to_release_page(page, GFP_KERNEL))
Mel Gorman806031b2019-03-05 15:44:43 -0800911 return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY;
Christoph Lameter8351a6e2006-06-23 02:03:33 -0700912
Mel Gormana6bc32b2012-01-12 17:19:43 -0800913 return migrate_page(mapping, newpage, page, mode);
Christoph Lameter8351a6e2006-06-23 02:03:33 -0700914}
915
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700916/*
Christoph Lametere24f0b82006-06-23 02:03:51 -0700917 * Move a page to a newly allocated page
918 * The page is locked and all ptes have been successfully removed.
919 *
920 * The new page will have replaced the old page if this function
921 * is successful.
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700922 *
923 * Return value:
924 * < 0 - error code
Rafael Aquini78bd5202012-12-11 16:02:31 -0800925 * MIGRATEPAGE_SUCCESS - success
Christoph Lametere24f0b82006-06-23 02:03:51 -0700926 */
Mel Gorman3fe20112010-05-24 14:32:20 -0700927static int move_to_new_page(struct page *newpage, struct page *page,
Hugh Dickins5c3f9a62015-11-05 18:49:53 -0800928 enum migrate_mode mode)
Christoph Lametere24f0b82006-06-23 02:03:51 -0700929{
930 struct address_space *mapping;
Minchan Kimbda807d2016-07-26 15:23:05 -0700931 int rc = -EAGAIN;
932 bool is_lru = !__PageMovable(page);
Christoph Lametere24f0b82006-06-23 02:03:51 -0700933
Hugh Dickins7db76712015-11-05 18:49:49 -0800934 VM_BUG_ON_PAGE(!PageLocked(page), page);
935 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
Christoph Lametere24f0b82006-06-23 02:03:51 -0700936
Christoph Lametere24f0b82006-06-23 02:03:51 -0700937 mapping = page_mapping(page);
Minchan Kimbda807d2016-07-26 15:23:05 -0700938
939 if (likely(is_lru)) {
940 if (!mapping)
941 rc = migrate_page(mapping, newpage, page, mode);
942 else if (mapping->a_ops->migratepage)
943 /*
944 * Most pages have a mapping and most filesystems
945 * provide a migratepage callback. Anonymous pages
946 * are part of swap space which also has its own
947 * migratepage callback. This is the most common path
948 * for page migration.
949 */
950 rc = mapping->a_ops->migratepage(mapping, newpage,
951 page, mode);
952 else
953 rc = fallback_migrate_page(mapping, newpage,
954 page, mode);
955 } else {
Christoph Lametere24f0b82006-06-23 02:03:51 -0700956 /*
Minchan Kimbda807d2016-07-26 15:23:05 -0700957 * In case of non-lru page, it could be released after
958 * isolation step. In that case, we shouldn't try migration.
Christoph Lametere24f0b82006-06-23 02:03:51 -0700959 */
Minchan Kimbda807d2016-07-26 15:23:05 -0700960 VM_BUG_ON_PAGE(!PageIsolated(page), page);
961 if (!PageMovable(page)) {
962 rc = MIGRATEPAGE_SUCCESS;
963 __ClearPageIsolated(page);
964 goto out;
965 }
966
967 rc = mapping->a_ops->migratepage(mapping, newpage,
968 page, mode);
969 WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS &&
970 !PageIsolated(page));
971 }
Christoph Lametere24f0b82006-06-23 02:03:51 -0700972
Hugh Dickins5c3f9a62015-11-05 18:49:53 -0800973 /*
974 * When successful, old pagecache page->mapping must be cleared before
975 * page is freed; but stats require that PageAnon be left as PageAnon.
976 */
977 if (rc == MIGRATEPAGE_SUCCESS) {
Minchan Kimbda807d2016-07-26 15:23:05 -0700978 if (__PageMovable(page)) {
979 VM_BUG_ON_PAGE(!PageIsolated(page), page);
980
981 /*
982 * We clear PG_movable under page_lock so any compactor
983 * cannot try to migrate this page.
984 */
985 __ClearPageIsolated(page);
986 }
987
988 /*
989 * Anonymous and movable page->mapping will be cleard by
990 * free_pages_prepare so don't reset it here for keeping
991 * the type to work PageAnon, for example.
992 */
993 if (!PageMappingFlags(page))
Hugh Dickins5c3f9a62015-11-05 18:49:53 -0800994 page->mapping = NULL;
Lars Perssond2b2c6dd2019-03-28 20:44:28 -0700995
Christoph Hellwig25b29952019-06-13 22:50:49 +0200996 if (likely(!is_zone_device_page(newpage)))
Lars Perssond2b2c6dd2019-03-28 20:44:28 -0700997 flush_dcache_page(newpage);
998
Mel Gorman3fe20112010-05-24 14:32:20 -0700999 }
Minchan Kimbda807d2016-07-26 15:23:05 -07001000out:
Christoph Lametere24f0b82006-06-23 02:03:51 -07001001 return rc;
1002}
1003
Minchan Kim0dabec92011-10-31 17:06:57 -07001004static int __unmap_and_move(struct page *page, struct page *newpage,
Hugh Dickins9c620e22013-02-22 16:35:14 -08001005 int force, enum migrate_mode mode)
Christoph Lametere24f0b82006-06-23 02:03:51 -07001006{
Minchan Kim0dabec92011-10-31 17:06:57 -07001007 int rc = -EAGAIN;
Hugh Dickins2ebba6b2014-12-12 16:56:19 -08001008 int page_was_mapped = 0;
Mel Gorman3f6c8272010-05-24 14:32:17 -07001009 struct anon_vma *anon_vma = NULL;
Minchan Kimbda807d2016-07-26 15:23:05 -07001010 bool is_lru = !__PageMovable(page);
Christoph Lameter95a402c2006-06-23 02:03:53 -07001011
Nick Piggin529ae9a2008-08-02 12:01:03 +02001012 if (!trylock_page(page)) {
Mel Gormana6bc32b2012-01-12 17:19:43 -08001013 if (!force || mode == MIGRATE_ASYNC)
Minchan Kim0dabec92011-10-31 17:06:57 -07001014 goto out;
Mel Gorman3e7d3442011-01-13 15:45:56 -08001015
1016 /*
1017 * It's not safe for direct compaction to call lock_page.
1018 * For example, during page readahead pages are added locked
1019 * to the LRU. Later, when the IO completes the pages are
1020 * marked uptodate and unlocked. However, the queueing
1021 * could be merging multiple pages for one bio (e.g.
1022 * mpage_readpages). If an allocation happens for the
1023 * second or third page, the process can end up locking
1024 * the same page twice and deadlocking. Rather than
1025 * trying to be clever about what pages can be locked,
1026 * avoid the use of lock_page for direct compaction
1027 * altogether.
1028 */
1029 if (current->flags & PF_MEMALLOC)
Minchan Kim0dabec92011-10-31 17:06:57 -07001030 goto out;
Mel Gorman3e7d3442011-01-13 15:45:56 -08001031
Christoph Lametere24f0b82006-06-23 02:03:51 -07001032 lock_page(page);
1033 }
1034
1035 if (PageWriteback(page)) {
Andrea Arcangeli11bc82d2011-03-22 16:33:11 -07001036 /*
Jianguo Wufed5b642013-04-29 15:07:58 -07001037 * Only in the case of a full synchronous migration is it
Mel Gormana6bc32b2012-01-12 17:19:43 -08001038 * necessary to wait for PageWriteback. In the async case,
1039 * the retry loop is too short and in the sync-light case,
1040 * the overhead of stalling is too much
Andrea Arcangeli11bc82d2011-03-22 16:33:11 -07001041 */
Jérôme Glisse2916ecc2017-09-08 16:12:06 -07001042 switch (mode) {
1043 case MIGRATE_SYNC:
1044 case MIGRATE_SYNC_NO_COPY:
1045 break;
1046 default:
Andrea Arcangeli11bc82d2011-03-22 16:33:11 -07001047 rc = -EBUSY;
Johannes Weiner0a31bc92014-08-08 14:19:22 -07001048 goto out_unlock;
Andrea Arcangeli11bc82d2011-03-22 16:33:11 -07001049 }
1050 if (!force)
Johannes Weiner0a31bc92014-08-08 14:19:22 -07001051 goto out_unlock;
Christoph Lametere24f0b82006-06-23 02:03:51 -07001052 wait_on_page_writeback(page);
1053 }
Hugh Dickins03f15c82015-11-05 18:49:56 -08001054
Christoph Lametere24f0b82006-06-23 02:03:51 -07001055 /*
KAMEZAWA Hiroyukidc386d42007-07-26 10:41:07 -07001056 * By try_to_unmap(), page->mapcount goes down to 0 here. In this case,
1057 * we cannot notice that anon_vma is freed while we migrates a page.
Hugh Dickins1ce82b62011-01-13 15:47:30 -08001058 * This get_anon_vma() delays freeing anon_vma pointer until the end
KAMEZAWA Hiroyukidc386d42007-07-26 10:41:07 -07001059 * of migration. File cache pages are no problem because of page_lock()
KAMEZAWA Hiroyuki989f89c2007-08-30 23:56:21 -07001060 * File Caches may use write_page() or lock_page() in migration, then,
1061 * just care Anon page here.
Hugh Dickins03f15c82015-11-05 18:49:56 -08001062 *
1063 * Only page_get_anon_vma() understands the subtleties of
1064 * getting a hold on an anon_vma from outside one of its mms.
1065 * But if we cannot get anon_vma, then we won't need it anyway,
1066 * because that implies that the anon page is no longer mapped
1067 * (and cannot be remapped so long as we hold the page lock).
Christoph Lametere24f0b82006-06-23 02:03:51 -07001068 */
Hugh Dickins03f15c82015-11-05 18:49:56 -08001069 if (PageAnon(page) && !PageKsm(page))
Peter Zijlstra746b18d2011-05-24 17:12:10 -07001070 anon_vma = page_get_anon_vma(page);
Shaohua Li62e1c552008-02-04 22:29:33 -08001071
Hugh Dickins7db76712015-11-05 18:49:49 -08001072 /*
1073 * Block others from accessing the new page when we get around to
1074 * establishing additional references. We are usually the only one
1075 * holding a reference to newpage at this point. We used to have a BUG
1076 * here if trylock_page(newpage) fails, but would like to allow for
1077 * cases where there might be a race with the previous use of newpage.
1078 * This is much like races on refcount of oldpage: just don't BUG().
1079 */
1080 if (unlikely(!trylock_page(newpage)))
1081 goto out_unlock;
1082
Minchan Kimbda807d2016-07-26 15:23:05 -07001083 if (unlikely(!is_lru)) {
1084 rc = move_to_new_page(newpage, page, mode);
1085 goto out_unlock_both;
1086 }
1087
KAMEZAWA Hiroyukidc386d42007-07-26 10:41:07 -07001088 /*
Shaohua Li62e1c552008-02-04 22:29:33 -08001089 * Corner case handling:
1090 * 1. When a new swap-cache page is read into, it is added to the LRU
1091 * and treated as swapcache but it has no rmap yet.
1092 * Calling try_to_unmap() against a page->mapping==NULL page will
1093 * trigger a BUG. So handle it here.
1094 * 2. An orphaned page (see truncate_complete_page) might have
1095 * fs-private metadata. The page can be picked up due to memory
1096 * offlining. Everywhere else except page reclaim, the page is
1097 * invisible to the vm, so the page can not be migrated. So try to
1098 * free the metadata, so the page can be freed.
KAMEZAWA Hiroyukidc386d42007-07-26 10:41:07 -07001099 */
Shaohua Li62e1c552008-02-04 22:29:33 -08001100 if (!page->mapping) {
Sasha Levin309381fea2014-01-23 15:52:54 -08001101 VM_BUG_ON_PAGE(PageAnon(page), page);
Hugh Dickins1ce82b62011-01-13 15:47:30 -08001102 if (page_has_private(page)) {
Shaohua Li62e1c552008-02-04 22:29:33 -08001103 try_to_free_buffers(page);
Hugh Dickins7db76712015-11-05 18:49:49 -08001104 goto out_unlock_both;
Shaohua Li62e1c552008-02-04 22:29:33 -08001105 }
Hugh Dickins7db76712015-11-05 18:49:49 -08001106 } else if (page_mapped(page)) {
1107 /* Establish migration ptes */
Hugh Dickins03f15c82015-11-05 18:49:56 -08001108 VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma,
1109 page);
Hugh Dickins2ebba6b2014-12-12 16:56:19 -08001110 try_to_unmap(page,
Wanpeng Lida1b13c2015-09-08 15:03:27 -07001111 TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
Hugh Dickins2ebba6b2014-12-12 16:56:19 -08001112 page_was_mapped = 1;
1113 }
KAMEZAWA Hiroyukidc386d42007-07-26 10:41:07 -07001114
Christoph Lametere6a15302006-06-25 05:46:49 -07001115 if (!page_mapped(page))
Hugh Dickins5c3f9a62015-11-05 18:49:53 -08001116 rc = move_to_new_page(newpage, page, mode);
Christoph Lametere24f0b82006-06-23 02:03:51 -07001117
Hugh Dickins5c3f9a62015-11-05 18:49:53 -08001118 if (page_was_mapped)
1119 remove_migration_ptes(page,
Kirill A. Shutemove3884662016-03-17 14:20:07 -07001120 rc == MIGRATEPAGE_SUCCESS ? newpage : page, false);
Mel Gorman3f6c8272010-05-24 14:32:17 -07001121
Hugh Dickins7db76712015-11-05 18:49:49 -08001122out_unlock_both:
1123 unlock_page(newpage);
1124out_unlock:
Mel Gorman3f6c8272010-05-24 14:32:17 -07001125 /* Drop an anon_vma reference if we took one */
Rik van Riel76545062010-08-09 17:18:41 -07001126 if (anon_vma)
Peter Zijlstra9e601092011-03-22 16:32:46 -07001127 put_anon_vma(anon_vma);
Christoph Lametere24f0b82006-06-23 02:03:51 -07001128 unlock_page(page);
Minchan Kim0dabec92011-10-31 17:06:57 -07001129out:
Minchan Kimc6c919e2016-07-26 15:23:02 -07001130 /*
1131 * If migration is successful, decrease refcount of the newpage
1132 * which will not free the page because new page owner increased
1133 * refcounter. As well, if it is LRU page, add the page to LRU
David Hildenbrande0a352f2019-02-01 14:21:19 -08001134 * list in here. Use the old state of the isolated source page to
1135 * determine if we migrated a LRU page. newpage was already unlocked
1136 * and possibly modified by its owner - don't rely on the page
1137 * state.
Minchan Kimc6c919e2016-07-26 15:23:02 -07001138 */
1139 if (rc == MIGRATEPAGE_SUCCESS) {
David Hildenbrande0a352f2019-02-01 14:21:19 -08001140 if (unlikely(!is_lru))
Minchan Kimc6c919e2016-07-26 15:23:02 -07001141 put_page(newpage);
1142 else
1143 putback_lru_page(newpage);
1144 }
1145
Minchan Kim0dabec92011-10-31 17:06:57 -07001146 return rc;
1147}
Christoph Lameter95a402c2006-06-23 02:03:53 -07001148
Minchan Kim0dabec92011-10-31 17:06:57 -07001149/*
Geert Uytterhoevenef2a5152015-04-14 15:44:22 -07001150 * gcc 4.7 and 4.8 on arm get an ICEs when inlining unmap_and_move(). Work
1151 * around it.
1152 */
Nick Desaulniers815f0dd2018-08-22 16:37:24 -07001153#if defined(CONFIG_ARM) && \
1154 defined(GCC_VERSION) && GCC_VERSION < 40900 && GCC_VERSION >= 40700
Geert Uytterhoevenef2a5152015-04-14 15:44:22 -07001155#define ICE_noinline noinline
1156#else
1157#define ICE_noinline
1158#endif
1159
1160/*
Minchan Kim0dabec92011-10-31 17:06:57 -07001161 * Obtain the lock on page, remove all ptes and migrate the page
1162 * to the newly allocated page in newpage.
1163 */
Geert Uytterhoevenef2a5152015-04-14 15:44:22 -07001164static ICE_noinline int unmap_and_move(new_page_t get_new_page,
1165 free_page_t put_new_page,
1166 unsigned long private, struct page *page,
Naoya Horiguchiadd05ce2015-06-24 16:56:50 -07001167 int force, enum migrate_mode mode,
1168 enum migrate_reason reason)
Minchan Kim0dabec92011-10-31 17:06:57 -07001169{
Hugh Dickins2def7422015-11-05 18:49:46 -08001170 int rc = MIGRATEPAGE_SUCCESS;
Yang Shi74d4a572019-11-30 17:57:12 -08001171 struct page *newpage = NULL;
Minchan Kim0dabec92011-10-31 17:06:57 -07001172
Michal Hocko94723aa2018-04-10 16:30:07 -07001173 if (!thp_migration_supported() && PageTransHuge(page))
1174 return -ENOMEM;
1175
Minchan Kim0dabec92011-10-31 17:06:57 -07001176 if (page_count(page) == 1) {
1177 /* page was freed from under us. So we are done. */
Minchan Kimc6c919e2016-07-26 15:23:02 -07001178 ClearPageActive(page);
1179 ClearPageUnevictable(page);
Minchan Kimbda807d2016-07-26 15:23:05 -07001180 if (unlikely(__PageMovable(page))) {
1181 lock_page(page);
1182 if (!PageMovable(page))
1183 __ClearPageIsolated(page);
1184 unlock_page(page);
1185 }
Minchan Kim0dabec92011-10-31 17:06:57 -07001186 goto out;
1187 }
1188
Yang Shi74d4a572019-11-30 17:57:12 -08001189 newpage = get_new_page(page, private);
1190 if (!newpage)
1191 return -ENOMEM;
1192
Hugh Dickins9c620e22013-02-22 16:35:14 -08001193 rc = __unmap_and_move(page, newpage, force, mode);
Minchan Kimc6c919e2016-07-26 15:23:02 -07001194 if (rc == MIGRATEPAGE_SUCCESS)
Vlastimil Babka7cd12b42016-03-15 14:56:18 -07001195 set_page_owner_migrate_reason(newpage, reason);
Rafael Aquinibf6bddf12012-12-11 16:02:42 -08001196
Minchan Kim0dabec92011-10-31 17:06:57 -07001197out:
Christoph Lametere24f0b82006-06-23 02:03:51 -07001198 if (rc != -EAGAIN) {
Minchan Kim0dabec92011-10-31 17:06:57 -07001199 /*
1200 * A page that has been migrated has all references
1201 * removed and will be freed. A page that has not been
1202 * migrated will have kepts its references and be
1203 * restored.
1204 */
1205 list_del(&page->lru);
Ming Ling6afcf8e2016-12-12 16:42:26 -08001206
1207 /*
1208 * Compaction can migrate also non-LRU pages which are
1209 * not accounted to NR_ISOLATED_*. They can be recognized
1210 * as __PageMovable
1211 */
1212 if (likely(!__PageMovable(page)))
Naoya Horiguchie8db67e2017-09-08 16:11:12 -07001213 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
1214 page_is_file_cache(page), -hpage_nr_pages(page));
Christoph Lametere24f0b82006-06-23 02:03:51 -07001215 }
David Rientjes68711a72014-06-04 16:08:25 -07001216
Christoph Lameter95a402c2006-06-23 02:03:53 -07001217 /*
Minchan Kimc6c919e2016-07-26 15:23:02 -07001218 * If migration is successful, releases reference grabbed during
1219 * isolation. Otherwise, restore the page to right list unless
1220 * we want to retry.
Christoph Lameter95a402c2006-06-23 02:03:53 -07001221 */
Minchan Kimc6c919e2016-07-26 15:23:02 -07001222 if (rc == MIGRATEPAGE_SUCCESS) {
1223 put_page(page);
1224 if (reason == MR_MEMORY_FAILURE) {
1225 /*
1226 * Set PG_HWPoison on just freed page
1227 * intentionally. Although it's rather weird,
1228 * it's how HWPoison flag works at the moment.
1229 */
Naoya Horiguchid4ae9912018-08-23 17:00:42 -07001230 if (set_hwpoison_free_buddy_page(page))
Minchan Kimc6c919e2016-07-26 15:23:02 -07001231 num_poisoned_pages_inc();
1232 }
1233 } else {
Minchan Kimbda807d2016-07-26 15:23:05 -07001234 if (rc != -EAGAIN) {
1235 if (likely(!__PageMovable(page))) {
1236 putback_lru_page(page);
1237 goto put_new;
1238 }
1239
1240 lock_page(page);
1241 if (PageMovable(page))
1242 putback_movable_page(page);
1243 else
1244 __ClearPageIsolated(page);
1245 unlock_page(page);
1246 put_page(page);
1247 }
1248put_new:
Minchan Kimc6c919e2016-07-26 15:23:02 -07001249 if (put_new_page)
1250 put_new_page(newpage, private);
1251 else
1252 put_page(newpage);
1253 }
David Rientjes68711a72014-06-04 16:08:25 -07001254
Christoph Lametere24f0b82006-06-23 02:03:51 -07001255 return rc;
1256}
1257
1258/*
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001259 * Counterpart of unmap_and_move_page() for hugepage migration.
1260 *
1261 * This function doesn't wait the completion of hugepage I/O
1262 * because there is no race between I/O and migration for hugepage.
1263 * Note that currently hugepage I/O occurs only in direct I/O
1264 * where no lock is held and PG_writeback is irrelevant,
1265 * and writeback status of all subpages are counted in the reference
1266 * count of the head page (i.e. if all subpages of a 2MB hugepage are
1267 * under direct I/O, the reference of the head page is 512 and a bit more.)
1268 * This means that when we try to migrate hugepage whose subpages are
1269 * doing direct I/O, some references remain after try_to_unmap() and
1270 * hugepage migration fails without data corruption.
1271 *
1272 * There is also no race when direct I/O is issued on the page under migration,
1273 * because then pte is replaced with migration swap entry and direct I/O code
1274 * will wait in the page fault for migration to complete.
1275 */
1276static int unmap_and_move_huge_page(new_page_t get_new_page,
David Rientjes68711a72014-06-04 16:08:25 -07001277 free_page_t put_new_page, unsigned long private,
1278 struct page *hpage, int force,
Vlastimil Babka7cd12b42016-03-15 14:56:18 -07001279 enum migrate_mode mode, int reason)
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001280{
Hugh Dickins2def7422015-11-05 18:49:46 -08001281 int rc = -EAGAIN;
Hugh Dickins2ebba6b2014-12-12 16:56:19 -08001282 int page_was_mapped = 0;
Joonsoo Kim32665f22014-01-21 15:51:15 -08001283 struct page *new_hpage;
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001284 struct anon_vma *anon_vma = NULL;
1285
Naoya Horiguchi83467ef2013-09-11 14:22:11 -07001286 /*
Anshuman Khandual7ed2c312019-03-05 15:43:44 -08001287 * Migratability of hugepages depends on architectures and their size.
Naoya Horiguchi83467ef2013-09-11 14:22:11 -07001288 * This check is necessary because some callers of hugepage migration
1289 * like soft offline and memory hotremove don't walk through page
1290 * tables or check whether the hugepage is pmd-based or not before
1291 * kicking migration.
1292 */
Naoya Horiguchi100873d2014-06-04 16:10:56 -07001293 if (!hugepage_migration_supported(page_hstate(hpage))) {
Joonsoo Kim32665f22014-01-21 15:51:15 -08001294 putback_active_hugepage(hpage);
Naoya Horiguchi83467ef2013-09-11 14:22:11 -07001295 return -ENOSYS;
Joonsoo Kim32665f22014-01-21 15:51:15 -08001296 }
Naoya Horiguchi83467ef2013-09-11 14:22:11 -07001297
Michal Hocko666feb22018-04-10 16:30:03 -07001298 new_hpage = get_new_page(hpage, private);
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001299 if (!new_hpage)
1300 return -ENOMEM;
1301
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001302 if (!trylock_page(hpage)) {
Jérôme Glisse2916ecc2017-09-08 16:12:06 -07001303 if (!force)
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001304 goto out;
Jérôme Glisse2916ecc2017-09-08 16:12:06 -07001305 switch (mode) {
1306 case MIGRATE_SYNC:
1307 case MIGRATE_SYNC_NO_COPY:
1308 break;
1309 default:
1310 goto out;
1311 }
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001312 lock_page(hpage);
1313 }
1314
Mike Kravetzcb6acd02019-02-28 16:22:02 -08001315 /*
1316 * Check for pages which are in the process of being freed. Without
1317 * page_mapping() set, hugetlbfs specific move page routine will not
1318 * be called and we could leak usage counts for subpools.
1319 */
1320 if (page_private(hpage) && !page_mapping(hpage)) {
1321 rc = -EBUSY;
1322 goto out_unlock;
1323 }
1324
Peter Zijlstra746b18d2011-05-24 17:12:10 -07001325 if (PageAnon(hpage))
1326 anon_vma = page_get_anon_vma(hpage);
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001327
Hugh Dickins7db76712015-11-05 18:49:49 -08001328 if (unlikely(!trylock_page(new_hpage)))
1329 goto put_anon;
1330
Hugh Dickins2ebba6b2014-12-12 16:56:19 -08001331 if (page_mapped(hpage)) {
1332 try_to_unmap(hpage,
Mike Kravetzddeaab32019-01-08 15:23:36 -08001333 TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
Hugh Dickins2ebba6b2014-12-12 16:56:19 -08001334 page_was_mapped = 1;
1335 }
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001336
1337 if (!page_mapped(hpage))
Hugh Dickins5c3f9a62015-11-05 18:49:53 -08001338 rc = move_to_new_page(new_hpage, hpage, mode);
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001339
Hugh Dickins5c3f9a62015-11-05 18:49:53 -08001340 if (page_was_mapped)
1341 remove_migration_ptes(hpage,
Kirill A. Shutemove3884662016-03-17 14:20:07 -07001342 rc == MIGRATEPAGE_SUCCESS ? new_hpage : hpage, false);
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001343
Hugh Dickins7db76712015-11-05 18:49:49 -08001344 unlock_page(new_hpage);
1345
1346put_anon:
Hugh Dickinsfd4a4662011-01-13 15:47:31 -08001347 if (anon_vma)
Peter Zijlstra9e601092011-03-22 16:32:46 -07001348 put_anon_vma(anon_vma);
Aneesh Kumar K.V8e6ac7f2012-07-31 16:42:27 -07001349
Hugh Dickins2def7422015-11-05 18:49:46 -08001350 if (rc == MIGRATEPAGE_SUCCESS) {
Michal Hockoab5ac902018-01-31 16:20:48 -08001351 move_hugetlb_state(hpage, new_hpage, reason);
Hugh Dickins2def7422015-11-05 18:49:46 -08001352 put_new_page = NULL;
1353 }
Aneesh Kumar K.V8e6ac7f2012-07-31 16:42:27 -07001354
Mike Kravetzcb6acd02019-02-28 16:22:02 -08001355out_unlock:
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001356 unlock_page(hpage);
Hillf Danton09761332011-12-08 14:34:20 -08001357out:
Naoya Horiguchib8ec1ce2013-09-11 14:22:01 -07001358 if (rc != -EAGAIN)
1359 putback_active_hugepage(hpage);
David Rientjes68711a72014-06-04 16:08:25 -07001360
1361 /*
1362 * If migration was not successful and there's a freeing callback, use
1363 * it. Otherwise, put_page() will drop the reference grabbed during
1364 * isolation.
1365 */
Hugh Dickins2def7422015-11-05 18:49:46 -08001366 if (put_new_page)
David Rientjes68711a72014-06-04 16:08:25 -07001367 put_new_page(new_hpage, private);
1368 else
Naoya Horiguchi3aaa76e2015-09-22 14:59:14 -07001369 putback_active_hugepage(new_hpage);
David Rientjes68711a72014-06-04 16:08:25 -07001370
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001371 return rc;
1372}
1373
1374/*
Srivatsa S. Bhatc73e5c92013-04-29 15:08:16 -07001375 * migrate_pages - migrate the pages specified in a list, to the free pages
1376 * supplied as the target for the page migration
Christoph Lameterb20a3502006-03-22 00:09:12 -08001377 *
Srivatsa S. Bhatc73e5c92013-04-29 15:08:16 -07001378 * @from: The list of pages to be migrated.
1379 * @get_new_page: The function used to allocate free pages to be used
1380 * as the target of the page migration.
David Rientjes68711a72014-06-04 16:08:25 -07001381 * @put_new_page: The function used to free target pages if migration
1382 * fails, or NULL if no special handling is necessary.
Srivatsa S. Bhatc73e5c92013-04-29 15:08:16 -07001383 * @private: Private data to be passed on to get_new_page()
1384 * @mode: The migration mode that specifies the constraints for
1385 * page migration, if any.
1386 * @reason: The reason for page migration.
Christoph Lameterb20a3502006-03-22 00:09:12 -08001387 *
Srivatsa S. Bhatc73e5c92013-04-29 15:08:16 -07001388 * The function returns after 10 attempts or if no pages are movable any more
1389 * because the list has become empty or no retryable pages exist any more.
Hugh Dickins14e0f9b2015-11-05 18:49:43 -08001390 * The caller should call putback_movable_pages() to return pages to the LRU
Minchan Kim28bd6572011-01-25 15:07:26 -08001391 * or free list only if ret != 0.
Christoph Lameterb20a3502006-03-22 00:09:12 -08001392 *
Srivatsa S. Bhatc73e5c92013-04-29 15:08:16 -07001393 * Returns the number of pages that were not migrated, or an error code.
Christoph Lameterb20a3502006-03-22 00:09:12 -08001394 */
Hugh Dickins9c620e22013-02-22 16:35:14 -08001395int migrate_pages(struct list_head *from, new_page_t get_new_page,
David Rientjes68711a72014-06-04 16:08:25 -07001396 free_page_t put_new_page, unsigned long private,
1397 enum migrate_mode mode, int reason)
Christoph Lameterb20a3502006-03-22 00:09:12 -08001398{
Christoph Lametere24f0b82006-06-23 02:03:51 -07001399 int retry = 1;
Christoph Lameterb20a3502006-03-22 00:09:12 -08001400 int nr_failed = 0;
Mel Gorman5647bc22012-10-19 10:46:20 +01001401 int nr_succeeded = 0;
Christoph Lameterb20a3502006-03-22 00:09:12 -08001402 int pass = 0;
1403 struct page *page;
1404 struct page *page2;
1405 int swapwrite = current->flags & PF_SWAPWRITE;
1406 int rc;
1407
1408 if (!swapwrite)
1409 current->flags |= PF_SWAPWRITE;
1410
Christoph Lametere24f0b82006-06-23 02:03:51 -07001411 for(pass = 0; pass < 10 && retry; pass++) {
1412 retry = 0;
Christoph Lameterb20a3502006-03-22 00:09:12 -08001413
Christoph Lametere24f0b82006-06-23 02:03:51 -07001414 list_for_each_entry_safe(page, page2, from, lru) {
Michal Hocko94723aa2018-04-10 16:30:07 -07001415retry:
Christoph Lametere24f0b82006-06-23 02:03:51 -07001416 cond_resched();
Christoph Lameterb20a3502006-03-22 00:09:12 -08001417
Naoya Horiguchi31caf662013-09-11 14:21:59 -07001418 if (PageHuge(page))
1419 rc = unmap_and_move_huge_page(get_new_page,
David Rientjes68711a72014-06-04 16:08:25 -07001420 put_new_page, private, page,
Vlastimil Babka7cd12b42016-03-15 14:56:18 -07001421 pass > 2, mode, reason);
Naoya Horiguchi31caf662013-09-11 14:21:59 -07001422 else
David Rientjes68711a72014-06-04 16:08:25 -07001423 rc = unmap_and_move(get_new_page, put_new_page,
Naoya Horiguchiadd05ce2015-06-24 16:56:50 -07001424 private, page, pass > 2, mode,
1425 reason);
Christoph Lameterb20a3502006-03-22 00:09:12 -08001426
Christoph Lametere24f0b82006-06-23 02:03:51 -07001427 switch(rc) {
Christoph Lameter95a402c2006-06-23 02:03:53 -07001428 case -ENOMEM:
Michal Hocko94723aa2018-04-10 16:30:07 -07001429 /*
1430 * THP migration might be unsupported or the
1431 * allocation could've failed so we should
1432 * retry on the same page with the THP split
1433 * to base pages.
1434 *
1435 * Head page is retried immediately and tail
1436 * pages are added to the tail of the list so
1437 * we encounter them after the rest of the list
1438 * is processed.
1439 */
Anshuman Khanduale6112fc2018-10-05 15:51:51 -07001440 if (PageTransHuge(page) && !PageHuge(page)) {
Michal Hocko94723aa2018-04-10 16:30:07 -07001441 lock_page(page);
1442 rc = split_huge_page_to_list(page, from);
1443 unlock_page(page);
1444 if (!rc) {
1445 list_safe_reset_next(page, page2, lru);
1446 goto retry;
1447 }
1448 }
David Rientjesdfef2ef2016-05-20 16:59:05 -07001449 nr_failed++;
Christoph Lameter95a402c2006-06-23 02:03:53 -07001450 goto out;
Christoph Lametere24f0b82006-06-23 02:03:51 -07001451 case -EAGAIN:
Christoph Lameter2d1db3b2006-06-23 02:03:33 -07001452 retry++;
Christoph Lametere24f0b82006-06-23 02:03:51 -07001453 break;
Rafael Aquini78bd5202012-12-11 16:02:31 -08001454 case MIGRATEPAGE_SUCCESS:
Mel Gorman5647bc22012-10-19 10:46:20 +01001455 nr_succeeded++;
Christoph Lametere24f0b82006-06-23 02:03:51 -07001456 break;
1457 default:
Naoya Horiguchi354a3362014-01-21 15:51:14 -08001458 /*
1459 * Permanent failure (-EBUSY, -ENOSYS, etc.):
1460 * unlike -EAGAIN case, the failed page is
1461 * removed from migration page list and not
1462 * retried in the next outer loop.
1463 */
Christoph Lameter2d1db3b2006-06-23 02:03:33 -07001464 nr_failed++;
Christoph Lametere24f0b82006-06-23 02:03:51 -07001465 break;
Christoph Lameter2d1db3b2006-06-23 02:03:33 -07001466 }
Christoph Lameterb20a3502006-03-22 00:09:12 -08001467 }
1468 }
Vlastimil Babkaf2f81fb2015-11-05 18:47:03 -08001469 nr_failed += retry;
1470 rc = nr_failed;
Christoph Lameter95a402c2006-06-23 02:03:53 -07001471out:
Mel Gorman5647bc22012-10-19 10:46:20 +01001472 if (nr_succeeded)
1473 count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded);
1474 if (nr_failed)
1475 count_vm_events(PGMIGRATE_FAIL, nr_failed);
Mel Gorman7b2a2d42012-10-19 14:07:31 +01001476 trace_mm_migrate_pages(nr_succeeded, nr_failed, mode, reason);
1477
Christoph Lameterb20a3502006-03-22 00:09:12 -08001478 if (!swapwrite)
1479 current->flags &= ~PF_SWAPWRITE;
1480
Rafael Aquini78bd5202012-12-11 16:02:31 -08001481 return rc;
Christoph Lameterb20a3502006-03-22 00:09:12 -08001482}
1483
Christoph Lameter742755a2006-06-23 02:03:55 -07001484#ifdef CONFIG_NUMA
Christoph Lameter742755a2006-06-23 02:03:55 -07001485
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001486static int store_status(int __user *status, int start, int value, int nr)
Christoph Lameter742755a2006-06-23 02:03:55 -07001487{
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001488 while (nr-- > 0) {
1489 if (put_user(value, status + start))
1490 return -EFAULT;
1491 start++;
1492 }
Christoph Lameter742755a2006-06-23 02:03:55 -07001493
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001494 return 0;
1495}
Christoph Lameter742755a2006-06-23 02:03:55 -07001496
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001497static int do_move_pages_to_node(struct mm_struct *mm,
1498 struct list_head *pagelist, int node)
1499{
1500 int err;
Christoph Lameter742755a2006-06-23 02:03:55 -07001501
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001502 if (list_empty(pagelist))
1503 return 0;
Christoph Lameter742755a2006-06-23 02:03:55 -07001504
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001505 err = migrate_pages(pagelist, alloc_new_node_page, NULL, node,
1506 MIGRATE_SYNC, MR_SYSCALL);
1507 if (err)
1508 putback_movable_pages(pagelist);
1509 return err;
Christoph Lameter742755a2006-06-23 02:03:55 -07001510}
1511
1512/*
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001513 * Resolves the given address to a struct page, isolates it from the LRU and
1514 * puts it to the given pagelist.
1515 * Returns -errno if the page cannot be found/isolated or 0 when it has been
1516 * queued or the page doesn't need to be migrated because it is already on
1517 * the target node
Christoph Lameter742755a2006-06-23 02:03:55 -07001518 */
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001519static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
1520 int node, struct list_head *pagelist, bool migrate_all)
Christoph Lameter742755a2006-06-23 02:03:55 -07001521{
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001522 struct vm_area_struct *vma;
1523 struct page *page;
1524 unsigned int follflags;
Christoph Lameter742755a2006-06-23 02:03:55 -07001525 int err;
Christoph Lameter742755a2006-06-23 02:03:55 -07001526
1527 down_read(&mm->mmap_sem);
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001528 err = -EFAULT;
1529 vma = find_vma(mm, addr);
1530 if (!vma || addr < vma->vm_start || !vma_migratable(vma))
1531 goto out;
Christoph Lameter742755a2006-06-23 02:03:55 -07001532
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001533 /* FOLL_DUMP to ignore special (like zero) pages */
1534 follflags = FOLL_GET | FOLL_DUMP;
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001535 page = follow_page(vma, addr, follflags);
Christoph Lameter742755a2006-06-23 02:03:55 -07001536
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001537 err = PTR_ERR(page);
1538 if (IS_ERR(page))
1539 goto out;
Christoph Lameter742755a2006-06-23 02:03:55 -07001540
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001541 err = -ENOENT;
1542 if (!page)
1543 goto out;
Christoph Lameter742755a2006-06-23 02:03:55 -07001544
Brice Gogline78bbfa2008-10-18 20:27:15 -07001545 err = 0;
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001546 if (page_to_nid(page) == node)
1547 goto out_putpage;
Christoph Lameter742755a2006-06-23 02:03:55 -07001548
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001549 err = -EACCES;
1550 if (page_mapcount(page) > 1 && !migrate_all)
1551 goto out_putpage;
1552
1553 if (PageHuge(page)) {
1554 if (PageHead(page)) {
1555 isolate_huge_page(page, pagelist);
1556 err = 0;
1557 }
1558 } else {
1559 struct page *head;
1560
1561 head = compound_head(page);
1562 err = isolate_lru_page(head);
1563 if (err)
1564 goto out_putpage;
1565
1566 err = 0;
1567 list_add_tail(&head->lru, pagelist);
1568 mod_node_page_state(page_pgdat(head),
1569 NR_ISOLATED_ANON + page_is_file_cache(head),
1570 hpage_nr_pages(head));
1571 }
1572out_putpage:
1573 /*
1574 * Either remove the duplicate refcount from
1575 * isolate_lru_page() or drop the page ref if it was
1576 * not isolated.
1577 */
1578 put_page(page);
1579out:
Christoph Lameter742755a2006-06-23 02:03:55 -07001580 up_read(&mm->mmap_sem);
1581 return err;
1582}
1583
1584/*
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001585 * Migrate an array of page address onto an array of nodes and fill
1586 * the corresponding array of status.
1587 */
Christoph Lameter3268c632012-03-21 16:34:06 -07001588static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001589 unsigned long nr_pages,
1590 const void __user * __user *pages,
1591 const int __user *nodes,
1592 int __user *status, int flags)
1593{
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001594 int current_node = NUMA_NO_NODE;
1595 LIST_HEAD(pagelist);
1596 int start, i;
1597 int err = 0, err1;
Brice Goglin35282a22009-06-16 15:32:43 -07001598
1599 migrate_prep();
1600
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001601 for (i = start = 0; i < nr_pages; i++) {
1602 const void __user *p;
1603 unsigned long addr;
1604 int node;
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001605
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001606 err = -EFAULT;
1607 if (get_user(p, pages + i))
1608 goto out_flush;
1609 if (get_user(node, nodes + i))
1610 goto out_flush;
Andrey Konovalov057d33892019-09-25 16:48:30 -07001611 addr = (unsigned long)untagged_addr(p);
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001612
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001613 err = -ENODEV;
1614 if (node < 0 || node >= MAX_NUMNODES)
1615 goto out_flush;
1616 if (!node_state(node, N_MEMORY))
1617 goto out_flush;
Brice Goglin3140a222009-01-06 14:38:57 -08001618
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001619 err = -EACCES;
1620 if (!node_isset(node, task_nodes))
1621 goto out_flush;
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001622
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001623 if (current_node == NUMA_NO_NODE) {
1624 current_node = node;
1625 start = i;
1626 } else if (node != current_node) {
1627 err = do_move_pages_to_node(mm, &pagelist, current_node);
1628 if (err)
1629 goto out;
1630 err = store_status(status, start, current_node, i - start);
1631 if (err)
1632 goto out;
1633 start = i;
1634 current_node = node;
Brice Goglin3140a222009-01-06 14:38:57 -08001635 }
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001636
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001637 /*
1638 * Errors in the page lookup or isolation are not fatal and we simply
1639 * report them via status
1640 */
1641 err = add_page_for_migration(mm, addr, current_node,
1642 &pagelist, flags & MPOL_MF_MOVE_ALL);
1643 if (!err)
1644 continue;
Brice Goglin3140a222009-01-06 14:38:57 -08001645
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001646 err = store_status(status, i, err, 1);
1647 if (err)
1648 goto out_flush;
Brice Goglin3140a222009-01-06 14:38:57 -08001649
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001650 err = do_move_pages_to_node(mm, &pagelist, current_node);
1651 if (err)
1652 goto out;
1653 if (i > start) {
1654 err = store_status(status, start, current_node, i - start);
1655 if (err)
1656 goto out;
1657 }
1658 current_node = NUMA_NO_NODE;
Brice Goglin3140a222009-01-06 14:38:57 -08001659 }
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001660out_flush:
Michal Hocko8f175cf2018-04-20 14:55:35 -07001661 if (list_empty(&pagelist))
1662 return err;
1663
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001664 /* Make sure we do not overwrite the existing error */
1665 err1 = do_move_pages_to_node(mm, &pagelist, current_node);
1666 if (!err1)
1667 err1 = store_status(status, start, current_node, i - start);
1668 if (!err)
1669 err = err1;
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001670out:
1671 return err;
1672}
1673
1674/*
Brice Goglin2f007e72008-10-18 20:27:16 -07001675 * Determine the nodes of an array of pages and store it in an array of status.
Christoph Lameter742755a2006-06-23 02:03:55 -07001676 */
Brice Goglin80bba122008-12-09 13:14:23 -08001677static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
1678 const void __user **pages, int *status)
Christoph Lameter742755a2006-06-23 02:03:55 -07001679{
Brice Goglin2f007e72008-10-18 20:27:16 -07001680 unsigned long i;
Brice Goglin2f007e72008-10-18 20:27:16 -07001681
Christoph Lameter742755a2006-06-23 02:03:55 -07001682 down_read(&mm->mmap_sem);
1683
Brice Goglin2f007e72008-10-18 20:27:16 -07001684 for (i = 0; i < nr_pages; i++) {
Brice Goglin80bba122008-12-09 13:14:23 -08001685 unsigned long addr = (unsigned long)(*pages);
Christoph Lameter742755a2006-06-23 02:03:55 -07001686 struct vm_area_struct *vma;
1687 struct page *page;
KOSAKI Motohiroc095adb2008-12-16 16:06:43 +09001688 int err = -EFAULT;
Brice Goglin2f007e72008-10-18 20:27:16 -07001689
1690 vma = find_vma(mm, addr);
Gleb Natapov70384dc2010-10-26 14:22:07 -07001691 if (!vma || addr < vma->vm_start)
Christoph Lameter742755a2006-06-23 02:03:55 -07001692 goto set_status;
1693
Kirill A. Shutemovd8998442015-09-04 15:47:53 -07001694 /* FOLL_DUMP to ignore special (like zero) pages */
1695 page = follow_page(vma, addr, FOLL_DUMP);
Linus Torvalds89f5b7d2008-06-20 11:18:25 -07001696
1697 err = PTR_ERR(page);
1698 if (IS_ERR(page))
1699 goto set_status;
1700
Kirill A. Shutemovd8998442015-09-04 15:47:53 -07001701 err = page ? page_to_nid(page) : -ENOENT;
Christoph Lameter742755a2006-06-23 02:03:55 -07001702set_status:
Brice Goglin80bba122008-12-09 13:14:23 -08001703 *status = err;
1704
1705 pages++;
1706 status++;
1707 }
1708
1709 up_read(&mm->mmap_sem);
1710}
1711
1712/*
1713 * Determine the nodes of a user array of pages and store it in
1714 * a user array of status.
1715 */
1716static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
1717 const void __user * __user *pages,
1718 int __user *status)
1719{
1720#define DO_PAGES_STAT_CHUNK_NR 16
1721 const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
1722 int chunk_status[DO_PAGES_STAT_CHUNK_NR];
Brice Goglin80bba122008-12-09 13:14:23 -08001723
H. Peter Anvin87b8d1a2010-02-18 16:13:40 -08001724 while (nr_pages) {
1725 unsigned long chunk_nr;
Brice Goglin80bba122008-12-09 13:14:23 -08001726
H. Peter Anvin87b8d1a2010-02-18 16:13:40 -08001727 chunk_nr = nr_pages;
1728 if (chunk_nr > DO_PAGES_STAT_CHUNK_NR)
1729 chunk_nr = DO_PAGES_STAT_CHUNK_NR;
1730
1731 if (copy_from_user(chunk_pages, pages, chunk_nr * sizeof(*chunk_pages)))
1732 break;
Brice Goglin80bba122008-12-09 13:14:23 -08001733
1734 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
1735
H. Peter Anvin87b8d1a2010-02-18 16:13:40 -08001736 if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
1737 break;
Christoph Lameter742755a2006-06-23 02:03:55 -07001738
H. Peter Anvin87b8d1a2010-02-18 16:13:40 -08001739 pages += chunk_nr;
1740 status += chunk_nr;
1741 nr_pages -= chunk_nr;
1742 }
1743 return nr_pages ? -EFAULT : 0;
Christoph Lameter742755a2006-06-23 02:03:55 -07001744}
1745
1746/*
1747 * Move a list of pages in the address space of the currently executing
1748 * process.
1749 */
Dominik Brodowski7addf442018-03-17 16:08:03 +01001750static int kernel_move_pages(pid_t pid, unsigned long nr_pages,
1751 const void __user * __user *pages,
1752 const int __user *nodes,
1753 int __user *status, int flags)
Christoph Lameter742755a2006-06-23 02:03:55 -07001754{
Christoph Lameter742755a2006-06-23 02:03:55 -07001755 struct task_struct *task;
Christoph Lameter742755a2006-06-23 02:03:55 -07001756 struct mm_struct *mm;
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001757 int err;
Christoph Lameter3268c632012-03-21 16:34:06 -07001758 nodemask_t task_nodes;
Christoph Lameter742755a2006-06-23 02:03:55 -07001759
1760 /* Check flags */
1761 if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
1762 return -EINVAL;
1763
1764 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1765 return -EPERM;
1766
1767 /* Find the mm_struct */
Greg Thelena879bf52011-02-25 14:44:13 -08001768 rcu_read_lock();
Pavel Emelyanov228ebcb2007-10-18 23:40:16 -07001769 task = pid ? find_task_by_vpid(pid) : current;
Christoph Lameter742755a2006-06-23 02:03:55 -07001770 if (!task) {
Greg Thelena879bf52011-02-25 14:44:13 -08001771 rcu_read_unlock();
Christoph Lameter742755a2006-06-23 02:03:55 -07001772 return -ESRCH;
1773 }
Christoph Lameter3268c632012-03-21 16:34:06 -07001774 get_task_struct(task);
Christoph Lameter742755a2006-06-23 02:03:55 -07001775
1776 /*
1777 * Check if this process has the right to modify the specified
Linus Torvalds197e7e52017-08-20 13:26:27 -07001778 * process. Use the regular "ptrace_may_access()" checks.
Christoph Lameter742755a2006-06-23 02:03:55 -07001779 */
Linus Torvalds197e7e52017-08-20 13:26:27 -07001780 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
David Howellsc69e8d92008-11-14 10:39:19 +11001781 rcu_read_unlock();
Christoph Lameter742755a2006-06-23 02:03:55 -07001782 err = -EPERM;
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001783 goto out;
Christoph Lameter742755a2006-06-23 02:03:55 -07001784 }
David Howellsc69e8d92008-11-14 10:39:19 +11001785 rcu_read_unlock();
Christoph Lameter742755a2006-06-23 02:03:55 -07001786
David Quigley86c3a762006-06-23 02:04:02 -07001787 err = security_task_movememory(task);
1788 if (err)
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001789 goto out;
David Quigley86c3a762006-06-23 02:04:02 -07001790
Christoph Lameter3268c632012-03-21 16:34:06 -07001791 task_nodes = cpuset_mems_allowed(task);
1792 mm = get_task_mm(task);
1793 put_task_struct(task);
1794
Sasha Levin6e8b09e2012-04-25 16:01:53 -07001795 if (!mm)
1796 return -EINVAL;
1797
1798 if (nodes)
1799 err = do_pages_move(mm, task_nodes, nr_pages, pages,
1800 nodes, status, flags);
1801 else
1802 err = do_pages_stat(mm, nr_pages, pages, status);
Christoph Lameter3268c632012-03-21 16:34:06 -07001803
1804 mmput(mm);
1805 return err;
David Quigley86c3a762006-06-23 02:04:02 -07001806
Christoph Lameter742755a2006-06-23 02:03:55 -07001807out:
Christoph Lameter3268c632012-03-21 16:34:06 -07001808 put_task_struct(task);
Christoph Lameter742755a2006-06-23 02:03:55 -07001809 return err;
1810}
Christoph Lameter742755a2006-06-23 02:03:55 -07001811
Dominik Brodowski7addf442018-03-17 16:08:03 +01001812SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
1813 const void __user * __user *, pages,
1814 const int __user *, nodes,
1815 int __user *, status, int, flags)
1816{
1817 return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags);
1818}
1819
1820#ifdef CONFIG_COMPAT
1821COMPAT_SYSCALL_DEFINE6(move_pages, pid_t, pid, compat_ulong_t, nr_pages,
1822 compat_uptr_t __user *, pages32,
1823 const int __user *, nodes,
1824 int __user *, status,
1825 int, flags)
1826{
1827 const void __user * __user *pages;
1828 int i;
1829
1830 pages = compat_alloc_user_space(nr_pages * sizeof(void *));
1831 for (i = 0; i < nr_pages; i++) {
1832 compat_uptr_t p;
1833
1834 if (get_user(p, pages32 + i) ||
1835 put_user(compat_ptr(p), pages + i))
1836 return -EFAULT;
1837 }
1838 return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags);
1839}
1840#endif /* CONFIG_COMPAT */
1841
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02001842#ifdef CONFIG_NUMA_BALANCING
1843/*
1844 * Returns true if this is a safe migration target node for misplaced NUMA
1845 * pages. Currently it only checks the watermarks which crude
1846 */
1847static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
Mel Gorman3abef4e2013-02-22 16:34:27 -08001848 unsigned long nr_migrate_pages)
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02001849{
1850 int z;
Mel Gorman599d0c92016-07-28 15:45:31 -07001851
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02001852 for (z = pgdat->nr_zones - 1; z >= 0; z--) {
1853 struct zone *zone = pgdat->node_zones + z;
1854
1855 if (!populated_zone(zone))
1856 continue;
1857
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02001858 /* Avoid waking kswapd by allocating pages_to_migrate pages. */
1859 if (!zone_watermark_ok(zone, 0,
1860 high_wmark_pages(zone) +
1861 nr_migrate_pages,
Huang Yingbfe9d002019-11-30 17:57:28 -08001862 ZONE_MOVABLE, 0))
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02001863 continue;
1864 return true;
1865 }
1866 return false;
1867}
1868
1869static struct page *alloc_misplaced_dst_page(struct page *page,
Michal Hocko666feb22018-04-10 16:30:03 -07001870 unsigned long data)
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02001871{
1872 int nid = (int) data;
1873 struct page *newpage;
1874
Vlastimil Babka96db8002015-09-08 15:03:50 -07001875 newpage = __alloc_pages_node(nid,
Johannes Weinere97ca8e52014-03-10 15:49:43 -07001876 (GFP_HIGHUSER_MOVABLE |
1877 __GFP_THISNODE | __GFP_NOMEMALLOC |
1878 __GFP_NORETRY | __GFP_NOWARN) &
Mel Gorman8479eba2016-02-26 15:19:31 -08001879 ~__GFP_RECLAIM, 0);
Hillf Dantonbac03822012-11-27 14:46:24 +00001880
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02001881 return newpage;
1882}
1883
Mel Gorman1c30e012014-01-21 15:50:58 -08001884static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
Mel Gormanb32967f2012-11-19 12:35:47 +00001885{
Hugh Dickins340ef392013-02-22 16:34:33 -08001886 int page_lru;
Mel Gormanb32967f2012-11-19 12:35:47 +00001887
Sasha Levin309381fea2014-01-23 15:52:54 -08001888 VM_BUG_ON_PAGE(compound_order(page) && !PageTransHuge(page), page);
Mel Gorman3abef4e2013-02-22 16:34:27 -08001889
Mel Gormanb32967f2012-11-19 12:35:47 +00001890 /* Avoid migrating to a node that is nearly full */
Matthew Wilcox (Oracle)d8c65462019-09-23 15:34:30 -07001891 if (!migrate_balanced_pgdat(pgdat, compound_nr(page)))
Hugh Dickins340ef392013-02-22 16:34:33 -08001892 return 0;
Mel Gormanb32967f2012-11-19 12:35:47 +00001893
Hugh Dickins340ef392013-02-22 16:34:33 -08001894 if (isolate_lru_page(page))
1895 return 0;
Mel Gormanb32967f2012-11-19 12:35:47 +00001896
1897 /*
Hugh Dickins340ef392013-02-22 16:34:33 -08001898 * migrate_misplaced_transhuge_page() skips page migration's usual
1899 * check on page_count(), so we must do it here, now that the page
1900 * has been isolated: a GUP pin, or any other pin, prevents migration.
1901 * The expected page count is 3: 1 for page's mapcount and 1 for the
1902 * caller's pin and 1 for the reference taken by isolate_lru_page().
1903 */
1904 if (PageTransHuge(page) && page_count(page) != 3) {
1905 putback_lru_page(page);
1906 return 0;
1907 }
1908
1909 page_lru = page_is_file_cache(page);
Mel Gorman599d0c92016-07-28 15:45:31 -07001910 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_lru,
Hugh Dickins340ef392013-02-22 16:34:33 -08001911 hpage_nr_pages(page));
1912
1913 /*
1914 * Isolating the page has taken another reference, so the
1915 * caller's reference can be safely dropped without the page
1916 * disappearing underneath us during migration.
Mel Gormanb32967f2012-11-19 12:35:47 +00001917 */
1918 put_page(page);
Hugh Dickins340ef392013-02-22 16:34:33 -08001919 return 1;
Mel Gormanb32967f2012-11-19 12:35:47 +00001920}
1921
Mel Gormande466bd2013-12-18 17:08:42 -08001922bool pmd_trans_migrating(pmd_t pmd)
1923{
1924 struct page *page = pmd_page(pmd);
1925 return PageLocked(page);
1926}
1927
Mel Gormana8f60772012-11-14 21:41:46 +00001928/*
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02001929 * Attempt to migrate a misplaced page to the specified destination
1930 * node. Caller is expected to have an elevated reference count on
1931 * the page that will be dropped by this function before returning.
1932 */
Mel Gorman1bc115d2013-10-07 11:29:05 +01001933int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
1934 int node)
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02001935{
Mel Gormana8f60772012-11-14 21:41:46 +00001936 pg_data_t *pgdat = NODE_DATA(node);
Hugh Dickins340ef392013-02-22 16:34:33 -08001937 int isolated;
Mel Gormanb32967f2012-11-19 12:35:47 +00001938 int nr_remaining;
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02001939 LIST_HEAD(migratepages);
1940
1941 /*
Mel Gorman1bc115d2013-10-07 11:29:05 +01001942 * Don't migrate file pages that are mapped in multiple processes
1943 * with execute permissions as they are probably shared libraries.
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02001944 */
Mel Gorman1bc115d2013-10-07 11:29:05 +01001945 if (page_mapcount(page) != 1 && page_is_file_cache(page) &&
1946 (vma->vm_flags & VM_EXEC))
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02001947 goto out;
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02001948
Mel Gormana8f60772012-11-14 21:41:46 +00001949 /*
Mel Gorman09a913a2018-04-10 16:29:20 -07001950 * Also do not migrate dirty pages as not all filesystems can move
1951 * dirty pages in MIGRATE_ASYNC mode which is a waste of cycles.
1952 */
1953 if (page_is_file_cache(page) && PageDirty(page))
1954 goto out;
1955
Mel Gormanb32967f2012-11-19 12:35:47 +00001956 isolated = numamigrate_isolate_page(pgdat, page);
1957 if (!isolated)
1958 goto out;
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02001959
Mel Gormanb32967f2012-11-19 12:35:47 +00001960 list_add(&page->lru, &migratepages);
Hugh Dickins9c620e22013-02-22 16:35:14 -08001961 nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page,
David Rientjes68711a72014-06-04 16:08:25 -07001962 NULL, node, MIGRATE_ASYNC,
1963 MR_NUMA_MISPLACED);
Mel Gormanb32967f2012-11-19 12:35:47 +00001964 if (nr_remaining) {
Joonsoo Kim59c82b72014-01-21 15:51:17 -08001965 if (!list_empty(&migratepages)) {
1966 list_del(&page->lru);
Mel Gorman599d0c92016-07-28 15:45:31 -07001967 dec_node_page_state(page, NR_ISOLATED_ANON +
Joonsoo Kim59c82b72014-01-21 15:51:17 -08001968 page_is_file_cache(page));
1969 putback_lru_page(page);
1970 }
Mel Gormanb32967f2012-11-19 12:35:47 +00001971 isolated = 0;
1972 } else
1973 count_vm_numa_event(NUMA_PAGE_MIGRATE);
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02001974 BUG_ON(!list_empty(&migratepages));
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02001975 return isolated;
Hugh Dickins340ef392013-02-22 16:34:33 -08001976
1977out:
1978 put_page(page);
1979 return 0;
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02001980}
Mel Gorman220018d2012-12-05 09:32:56 +00001981#endif /* CONFIG_NUMA_BALANCING */
Mel Gormanb32967f2012-11-19 12:35:47 +00001982
Mel Gorman220018d2012-12-05 09:32:56 +00001983#if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
Hugh Dickins340ef392013-02-22 16:34:33 -08001984/*
1985 * Migrates a THP to a given target node. page must be locked and is unlocked
1986 * before returning.
1987 */
Mel Gormanb32967f2012-11-19 12:35:47 +00001988int migrate_misplaced_transhuge_page(struct mm_struct *mm,
1989 struct vm_area_struct *vma,
1990 pmd_t *pmd, pmd_t entry,
1991 unsigned long address,
1992 struct page *page, int node)
1993{
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08001994 spinlock_t *ptl;
Mel Gormanb32967f2012-11-19 12:35:47 +00001995 pg_data_t *pgdat = NODE_DATA(node);
1996 int isolated = 0;
1997 struct page *new_page = NULL;
Mel Gormanb32967f2012-11-19 12:35:47 +00001998 int page_lru = page_is_file_cache(page);
Andrea Arcangeli7066f0f2018-10-26 15:10:40 -07001999 unsigned long start = address & HPAGE_PMD_MASK;
Mel Gormanb32967f2012-11-19 12:35:47 +00002000
Mel Gormanb32967f2012-11-19 12:35:47 +00002001 new_page = alloc_pages_node(node,
Vlastimil Babka25160352016-07-28 15:49:25 -07002002 (GFP_TRANSHUGE_LIGHT | __GFP_THISNODE),
Johannes Weinere97ca8e52014-03-10 15:49:43 -07002003 HPAGE_PMD_ORDER);
Hugh Dickins340ef392013-02-22 16:34:33 -08002004 if (!new_page)
2005 goto out_fail;
Kirill A. Shutemov9a982252016-01-15 16:54:17 -08002006 prep_transhuge_page(new_page);
Hugh Dickins340ef392013-02-22 16:34:33 -08002007
Mel Gormanb32967f2012-11-19 12:35:47 +00002008 isolated = numamigrate_isolate_page(pgdat, page);
Hugh Dickins340ef392013-02-22 16:34:33 -08002009 if (!isolated) {
Mel Gormanb32967f2012-11-19 12:35:47 +00002010 put_page(new_page);
Hugh Dickins340ef392013-02-22 16:34:33 -08002011 goto out_fail;
Mel Gormanb32967f2012-11-19 12:35:47 +00002012 }
Mel Gormanb0943d62013-12-18 17:08:46 -08002013
Mel Gormanb32967f2012-11-19 12:35:47 +00002014 /* Prepare a page as a migration target */
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08002015 __SetPageLocked(new_page);
Shaohua Lid44d3632017-05-03 14:52:26 -07002016 if (PageSwapBacked(page))
2017 __SetPageSwapBacked(new_page);
Mel Gormanb32967f2012-11-19 12:35:47 +00002018
2019 /* anon mapping, we can simply copy page->mapping to the new page: */
2020 new_page->mapping = page->mapping;
2021 new_page->index = page->index;
Andrea Arcangeli7eef5f92018-10-26 15:10:43 -07002022 /* flush the cache before copying using the kernel virtual address */
2023 flush_cache_range(vma, start, start + HPAGE_PMD_SIZE);
Mel Gormanb32967f2012-11-19 12:35:47 +00002024 migrate_page_copy(new_page, page);
2025 WARN_ON(PageLRU(new_page));
2026
2027 /* Recheck the target PMD */
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08002028 ptl = pmd_lock(mm, pmd);
Will Deaconf4e177d2017-07-10 15:48:31 -07002029 if (unlikely(!pmd_same(*pmd, entry) || !page_ref_freeze(page, 2))) {
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08002030 spin_unlock(ptl);
Mel Gormanb32967f2012-11-19 12:35:47 +00002031
2032 /* Reverse changes made by migrate_page_copy() */
2033 if (TestClearPageActive(new_page))
2034 SetPageActive(page);
2035 if (TestClearPageUnevictable(new_page))
2036 SetPageUnevictable(page);
Mel Gormanb32967f2012-11-19 12:35:47 +00002037
2038 unlock_page(new_page);
2039 put_page(new_page); /* Free it */
2040
Mel Gormana54a4072013-10-07 11:28:46 +01002041 /* Retake the callers reference and putback on LRU */
2042 get_page(page);
Mel Gormanb32967f2012-11-19 12:35:47 +00002043 putback_lru_page(page);
Mel Gorman599d0c92016-07-28 15:45:31 -07002044 mod_node_page_state(page_pgdat(page),
Mel Gormana54a4072013-10-07 11:28:46 +01002045 NR_ISOLATED_ANON + page_lru, -HPAGE_PMD_NR);
Mel Gormaneb4489f62013-12-18 17:08:39 -08002046
2047 goto out_unlock;
Mel Gormanb32967f2012-11-19 12:35:47 +00002048 }
2049
Kirill A. Shutemov10102452016-07-26 15:25:29 -07002050 entry = mk_huge_pmd(new_page, vma->vm_page_prot);
Linus Torvaldsf55e1012017-11-29 09:01:01 -08002051 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
Mel Gormanb32967f2012-11-19 12:35:47 +00002052
Mel Gorman2b4847e2013-12-18 17:08:32 -08002053 /*
Andrea Arcangelid7c33932018-10-26 15:10:36 -07002054 * Overwrite the old entry under pagetable lock and establish
2055 * the new PTE. Any parallel GUP will either observe the old
2056 * page blocking on the page lock, block on the page table
2057 * lock or observe the new page. The SetPageUptodate on the
2058 * new page and page_add_new_anon_rmap guarantee the copy is
2059 * visible before the pagetable update.
Mel Gorman2b4847e2013-12-18 17:08:32 -08002060 */
Andrea Arcangeli7066f0f2018-10-26 15:10:40 -07002061 page_add_anon_rmap(new_page, vma, start, true);
Andrea Arcangelid7c33932018-10-26 15:10:36 -07002062 /*
2063 * At this point the pmd is numa/protnone (i.e. non present) and the TLB
2064 * has already been flushed globally. So no TLB can be currently
2065 * caching this non present pmd mapping. There's no need to clear the
2066 * pmd before doing set_pmd_at(), nor to flush the TLB after
2067 * set_pmd_at(). Clearing the pmd here would introduce a race
2068 * condition against MADV_DONTNEED, because MADV_DONTNEED only holds the
2069 * mmap_sem for reading. If the pmd is set to NULL at any given time,
2070 * MADV_DONTNEED won't wait on the pmd lock and it'll skip clearing this
2071 * pmd.
2072 */
Andrea Arcangeli7066f0f2018-10-26 15:10:40 -07002073 set_pmd_at(mm, start, pmd, entry);
Stephen Rothwellce4a9cc2012-12-10 19:50:57 +11002074 update_mmu_cache_pmd(vma, address, &entry);
Mel Gorman2b4847e2013-12-18 17:08:32 -08002075
Will Deaconf4e177d2017-07-10 15:48:31 -07002076 page_ref_unfreeze(page, 2);
Hugh Dickins51afb122015-11-05 18:49:37 -08002077 mlock_migrate_page(new_page, page);
Kirill A. Shutemovd281ee62016-01-15 16:52:16 -08002078 page_remove_rmap(page, true);
Vlastimil Babka7cd12b42016-03-15 14:56:18 -07002079 set_page_owner_migrate_reason(new_page, MR_NUMA_MISPLACED);
Mel Gorman2b4847e2013-12-18 17:08:32 -08002080
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08002081 spin_unlock(ptl);
Mel Gormanb32967f2012-11-19 12:35:47 +00002082
Mel Gorman11de9922014-06-04 16:07:41 -07002083 /* Take an "isolate" reference and put new page on the LRU. */
2084 get_page(new_page);
2085 putback_lru_page(new_page);
2086
Mel Gormanb32967f2012-11-19 12:35:47 +00002087 unlock_page(new_page);
2088 unlock_page(page);
2089 put_page(page); /* Drop the rmap reference */
2090 put_page(page); /* Drop the LRU isolation reference */
2091
2092 count_vm_events(PGMIGRATE_SUCCESS, HPAGE_PMD_NR);
2093 count_vm_numa_events(NUMA_PAGE_MIGRATE, HPAGE_PMD_NR);
2094
Mel Gorman599d0c92016-07-28 15:45:31 -07002095 mod_node_page_state(page_pgdat(page),
Mel Gormanb32967f2012-11-19 12:35:47 +00002096 NR_ISOLATED_ANON + page_lru,
2097 -HPAGE_PMD_NR);
2098 return isolated;
2099
Hugh Dickins340ef392013-02-22 16:34:33 -08002100out_fail:
2101 count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
Mel Gorman2b4847e2013-12-18 17:08:32 -08002102 ptl = pmd_lock(mm, pmd);
2103 if (pmd_same(*pmd, entry)) {
Mel Gorman4d942462015-02-12 14:58:28 -08002104 entry = pmd_modify(entry, vma->vm_page_prot);
Andrea Arcangeli7066f0f2018-10-26 15:10:40 -07002105 set_pmd_at(mm, start, pmd, entry);
Mel Gorman2b4847e2013-12-18 17:08:32 -08002106 update_mmu_cache_pmd(vma, address, &entry);
2107 }
2108 spin_unlock(ptl);
Mel Gormana54a4072013-10-07 11:28:46 +01002109
Mel Gormaneb4489f62013-12-18 17:08:39 -08002110out_unlock:
Hugh Dickins340ef392013-02-22 16:34:33 -08002111 unlock_page(page);
Mel Gormanb32967f2012-11-19 12:35:47 +00002112 put_page(page);
Mel Gormanb32967f2012-11-19 12:35:47 +00002113 return 0;
2114}
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02002115#endif /* CONFIG_NUMA_BALANCING */
2116
2117#endif /* CONFIG_NUMA */
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002118
Christoph Hellwig9b2ed9c2019-08-14 09:59:28 +02002119#ifdef CONFIG_DEVICE_PRIVATE
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002120static int migrate_vma_collect_hole(unsigned long start,
2121 unsigned long end,
2122 struct mm_walk *walk)
2123{
2124 struct migrate_vma *migrate = walk->private;
2125 unsigned long addr;
2126
2127 for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
Mark Hairgrovee20d1032017-10-13 15:57:30 -07002128 migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE;
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002129 migrate->dst[migrate->npages] = 0;
Mark Hairgrovee20d1032017-10-13 15:57:30 -07002130 migrate->npages++;
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002131 migrate->cpages++;
2132 }
2133
2134 return 0;
2135}
2136
2137static int migrate_vma_collect_skip(unsigned long start,
2138 unsigned long end,
2139 struct mm_walk *walk)
2140{
2141 struct migrate_vma *migrate = walk->private;
2142 unsigned long addr;
2143
2144 for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002145 migrate->dst[migrate->npages] = 0;
2146 migrate->src[migrate->npages++] = 0;
2147 }
2148
2149 return 0;
2150}
2151
2152static int migrate_vma_collect_pmd(pmd_t *pmdp,
2153 unsigned long start,
2154 unsigned long end,
2155 struct mm_walk *walk)
2156{
2157 struct migrate_vma *migrate = walk->private;
2158 struct vm_area_struct *vma = walk->vma;
2159 struct mm_struct *mm = vma->vm_mm;
Jérôme Glisse8c3328f2017-09-08 16:12:13 -07002160 unsigned long addr = start, unmapped = 0;
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002161 spinlock_t *ptl;
2162 pte_t *ptep;
2163
2164again:
2165 if (pmd_none(*pmdp))
2166 return migrate_vma_collect_hole(start, end, walk);
2167
2168 if (pmd_trans_huge(*pmdp)) {
2169 struct page *page;
2170
2171 ptl = pmd_lock(mm, pmdp);
2172 if (unlikely(!pmd_trans_huge(*pmdp))) {
2173 spin_unlock(ptl);
2174 goto again;
2175 }
2176
2177 page = pmd_page(*pmdp);
2178 if (is_huge_zero_page(page)) {
2179 spin_unlock(ptl);
2180 split_huge_pmd(vma, pmdp, addr);
2181 if (pmd_trans_unstable(pmdp))
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002182 return migrate_vma_collect_skip(start, end,
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002183 walk);
2184 } else {
2185 int ret;
2186
2187 get_page(page);
2188 spin_unlock(ptl);
2189 if (unlikely(!trylock_page(page)))
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002190 return migrate_vma_collect_skip(start, end,
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002191 walk);
2192 ret = split_huge_page(page);
2193 unlock_page(page);
2194 put_page(page);
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002195 if (ret)
2196 return migrate_vma_collect_skip(start, end,
2197 walk);
2198 if (pmd_none(*pmdp))
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002199 return migrate_vma_collect_hole(start, end,
2200 walk);
2201 }
2202 }
2203
2204 if (unlikely(pmd_bad(*pmdp)))
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002205 return migrate_vma_collect_skip(start, end, walk);
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002206
2207 ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
Jérôme Glisse8c3328f2017-09-08 16:12:13 -07002208 arch_enter_lazy_mmu_mode();
2209
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002210 for (; addr < end; addr += PAGE_SIZE, ptep++) {
2211 unsigned long mpfn, pfn;
2212 struct page *page;
Jérôme Glisse8c3328f2017-09-08 16:12:13 -07002213 swp_entry_t entry;
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002214 pte_t pte;
2215
2216 pte = *ptep;
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002217
Jérôme Glissea5430dd2017-09-08 16:12:17 -07002218 if (pte_none(pte)) {
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002219 mpfn = MIGRATE_PFN_MIGRATE;
2220 migrate->cpages++;
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002221 goto next;
2222 }
2223
Jérôme Glissea5430dd2017-09-08 16:12:17 -07002224 if (!pte_present(pte)) {
Pingfan Liu276f7562019-09-23 15:37:38 -07002225 mpfn = 0;
Jérôme Glissea5430dd2017-09-08 16:12:17 -07002226
2227 /*
2228 * Only care about unaddressable device page special
2229 * page table entry. Other special swap entries are not
2230 * migratable, and we ignore regular swapped page.
2231 */
2232 entry = pte_to_swp_entry(pte);
2233 if (!is_device_private_entry(entry))
2234 goto next;
2235
2236 page = device_private_entry_to_page(entry);
Christoph Hellwig06d462b2019-08-14 09:59:27 +02002237 mpfn = migrate_pfn(page_to_pfn(page)) |
2238 MIGRATE_PFN_MIGRATE;
Jérôme Glissea5430dd2017-09-08 16:12:17 -07002239 if (is_write_device_private_entry(entry))
2240 mpfn |= MIGRATE_PFN_WRITE;
2241 } else {
Pingfan Liu276f7562019-09-23 15:37:38 -07002242 pfn = pte_pfn(pte);
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002243 if (is_zero_pfn(pfn)) {
2244 mpfn = MIGRATE_PFN_MIGRATE;
2245 migrate->cpages++;
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002246 goto next;
2247 }
Christoph Hellwig25b29952019-06-13 22:50:49 +02002248 page = vm_normal_page(migrate->vma, addr, pte);
Jérôme Glissea5430dd2017-09-08 16:12:17 -07002249 mpfn = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE;
2250 mpfn |= pte_write(pte) ? MIGRATE_PFN_WRITE : 0;
2251 }
2252
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002253 /* FIXME support THP */
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002254 if (!page || !page->mapping || PageTransCompound(page)) {
Pingfan Liu276f7562019-09-23 15:37:38 -07002255 mpfn = 0;
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002256 goto next;
2257 }
2258
2259 /*
2260 * By getting a reference on the page we pin it and that blocks
2261 * any kind of migration. Side effect is that it "freezes" the
2262 * pte.
2263 *
2264 * We drop this reference after isolating the page from the lru
2265 * for non device page (device page are not on the lru and thus
2266 * can't be dropped from it).
2267 */
2268 get_page(page);
2269 migrate->cpages++;
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002270
Jérôme Glisse8c3328f2017-09-08 16:12:13 -07002271 /*
2272 * Optimize for the common case where page is only mapped once
2273 * in one process. If we can lock the page, then we can safely
2274 * set up a special migration page table entry now.
2275 */
2276 if (trylock_page(page)) {
2277 pte_t swp_pte;
2278
2279 mpfn |= MIGRATE_PFN_LOCKED;
2280 ptep_get_and_clear(mm, addr, ptep);
2281
2282 /* Setup special migration page table entry */
Ralph Campbell07707122018-04-10 16:29:27 -07002283 entry = make_migration_entry(page, mpfn &
2284 MIGRATE_PFN_WRITE);
Jérôme Glisse8c3328f2017-09-08 16:12:13 -07002285 swp_pte = swp_entry_to_pte(entry);
2286 if (pte_soft_dirty(pte))
2287 swp_pte = pte_swp_mksoft_dirty(swp_pte);
2288 set_pte_at(mm, addr, ptep, swp_pte);
2289
2290 /*
2291 * This is like regular unmap: we remove the rmap and
2292 * drop page refcount. Page won't be freed, as we took
2293 * a reference just above.
2294 */
2295 page_remove_rmap(page, false);
2296 put_page(page);
Jérôme Glissea5430dd2017-09-08 16:12:17 -07002297
2298 if (pte_present(pte))
2299 unmapped++;
Jérôme Glisse8c3328f2017-09-08 16:12:13 -07002300 }
2301
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002302next:
Jérôme Glissea5430dd2017-09-08 16:12:17 -07002303 migrate->dst[migrate->npages] = 0;
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002304 migrate->src[migrate->npages++] = mpfn;
2305 }
Jérôme Glisse8c3328f2017-09-08 16:12:13 -07002306 arch_leave_lazy_mmu_mode();
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002307 pte_unmap_unlock(ptep - 1, ptl);
2308
Jérôme Glisse8c3328f2017-09-08 16:12:13 -07002309 /* Only flush the TLB if we actually modified any entries */
2310 if (unmapped)
2311 flush_tlb_range(walk->vma, start, end);
2312
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002313 return 0;
2314}
2315
Christoph Hellwig7b86ac32019-08-28 16:19:54 +02002316static const struct mm_walk_ops migrate_vma_walk_ops = {
2317 .pmd_entry = migrate_vma_collect_pmd,
2318 .pte_hole = migrate_vma_collect_hole,
2319};
2320
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002321/*
2322 * migrate_vma_collect() - collect pages over a range of virtual addresses
2323 * @migrate: migrate struct containing all migration information
2324 *
2325 * This will walk the CPU page table. For each virtual address backed by a
2326 * valid page, it updates the src array and takes a reference on the page, in
2327 * order to pin the page until we lock it and unmap it.
2328 */
2329static void migrate_vma_collect(struct migrate_vma *migrate)
2330{
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08002331 struct mmu_notifier_range range;
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002332
Christoph Hellwig7b86ac32019-08-28 16:19:54 +02002333 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL,
2334 migrate->vma->vm_mm, migrate->start, migrate->end);
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08002335 mmu_notifier_invalidate_range_start(&range);
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002336
Christoph Hellwig7b86ac32019-08-28 16:19:54 +02002337 walk_page_range(migrate->vma->vm_mm, migrate->start, migrate->end,
2338 &migrate_vma_walk_ops, migrate);
2339
2340 mmu_notifier_invalidate_range_end(&range);
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002341 migrate->end = migrate->start + (migrate->npages << PAGE_SHIFT);
2342}
2343
2344/*
2345 * migrate_vma_check_page() - check if page is pinned or not
2346 * @page: struct page to check
2347 *
2348 * Pinned pages cannot be migrated. This is the same test as in
2349 * migrate_page_move_mapping(), except that here we allow migration of a
2350 * ZONE_DEVICE page.
2351 */
2352static bool migrate_vma_check_page(struct page *page)
2353{
2354 /*
2355 * One extra ref because caller holds an extra reference, either from
2356 * isolate_lru_page() for a regular page, or migrate_vma_collect() for
2357 * a device page.
2358 */
2359 int extra = 1;
2360
2361 /*
2362 * FIXME support THP (transparent huge page), it is bit more complex to
2363 * check them than regular pages, because they can be mapped with a pmd
2364 * or with a pte (split pte mapping).
2365 */
2366 if (PageCompound(page))
2367 return false;
2368
Jérôme Glissea5430dd2017-09-08 16:12:17 -07002369 /* Page from ZONE_DEVICE have one extra reference */
2370 if (is_zone_device_page(page)) {
2371 /*
2372 * Private page can never be pin as they have no valid pte and
2373 * GUP will fail for those. Yet if there is a pending migration
2374 * a thread might try to wait on the pte migration entry and
2375 * will bump the page reference count. Sadly there is no way to
2376 * differentiate a regular pin from migration wait. Hence to
2377 * avoid 2 racing thread trying to migrate back to CPU to enter
2378 * infinite loop (one stoping migration because the other is
2379 * waiting on pte migration entry). We always return true here.
2380 *
2381 * FIXME proper solution is to rework migration_entry_wait() so
2382 * it does not need to take a reference on page.
2383 */
Christoph Hellwig25b29952019-06-13 22:50:49 +02002384 return is_device_private_page(page);
Jérôme Glissea5430dd2017-09-08 16:12:17 -07002385 }
2386
Jérôme Glissedf6ad692017-09-08 16:12:24 -07002387 /* For file back page */
2388 if (page_mapping(page))
2389 extra += 1 + page_has_private(page);
2390
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002391 if ((page_count(page) - extra) > page_mapcount(page))
2392 return false;
2393
2394 return true;
2395}
2396
2397/*
2398 * migrate_vma_prepare() - lock pages and isolate them from the lru
2399 * @migrate: migrate struct containing all migration information
2400 *
2401 * This locks pages that have been collected by migrate_vma_collect(). Once each
2402 * page is locked it is isolated from the lru (for non-device pages). Finally,
2403 * the ref taken by migrate_vma_collect() is dropped, as locked pages cannot be
2404 * migrated by concurrent kernel threads.
2405 */
2406static void migrate_vma_prepare(struct migrate_vma *migrate)
2407{
2408 const unsigned long npages = migrate->npages;
Jérôme Glisse8c3328f2017-09-08 16:12:13 -07002409 const unsigned long start = migrate->start;
2410 unsigned long addr, i, restore = 0;
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002411 bool allow_drain = true;
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002412
2413 lru_add_drain();
2414
2415 for (i = 0; (i < npages) && migrate->cpages; i++) {
2416 struct page *page = migrate_pfn_to_page(migrate->src[i]);
Jérôme Glisse8c3328f2017-09-08 16:12:13 -07002417 bool remap = true;
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002418
2419 if (!page)
2420 continue;
2421
Jérôme Glisse8c3328f2017-09-08 16:12:13 -07002422 if (!(migrate->src[i] & MIGRATE_PFN_LOCKED)) {
2423 /*
2424 * Because we are migrating several pages there can be
2425 * a deadlock between 2 concurrent migration where each
2426 * are waiting on each other page lock.
2427 *
2428 * Make migrate_vma() a best effort thing and backoff
2429 * for any page we can not lock right away.
2430 */
2431 if (!trylock_page(page)) {
2432 migrate->src[i] = 0;
2433 migrate->cpages--;
2434 put_page(page);
2435 continue;
2436 }
2437 remap = false;
2438 migrate->src[i] |= MIGRATE_PFN_LOCKED;
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002439 }
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002440
Jérôme Glissea5430dd2017-09-08 16:12:17 -07002441 /* ZONE_DEVICE pages are not on LRU */
2442 if (!is_zone_device_page(page)) {
2443 if (!PageLRU(page) && allow_drain) {
2444 /* Drain CPU's pagevec */
2445 lru_add_drain_all();
2446 allow_drain = false;
Jérôme Glisse8c3328f2017-09-08 16:12:13 -07002447 }
Jérôme Glissea5430dd2017-09-08 16:12:17 -07002448
2449 if (isolate_lru_page(page)) {
2450 if (remap) {
2451 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2452 migrate->cpages--;
2453 restore++;
2454 } else {
2455 migrate->src[i] = 0;
2456 unlock_page(page);
2457 migrate->cpages--;
2458 put_page(page);
2459 }
2460 continue;
2461 }
2462
2463 /* Drop the reference we took in collect */
2464 put_page(page);
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002465 }
2466
2467 if (!migrate_vma_check_page(page)) {
Jérôme Glisse8c3328f2017-09-08 16:12:13 -07002468 if (remap) {
2469 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2470 migrate->cpages--;
2471 restore++;
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002472
Jérôme Glissea5430dd2017-09-08 16:12:17 -07002473 if (!is_zone_device_page(page)) {
2474 get_page(page);
2475 putback_lru_page(page);
2476 }
Jérôme Glisse8c3328f2017-09-08 16:12:13 -07002477 } else {
2478 migrate->src[i] = 0;
2479 unlock_page(page);
2480 migrate->cpages--;
2481
Jérôme Glissea5430dd2017-09-08 16:12:17 -07002482 if (!is_zone_device_page(page))
2483 putback_lru_page(page);
2484 else
2485 put_page(page);
Jérôme Glisse8c3328f2017-09-08 16:12:13 -07002486 }
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002487 }
2488 }
Jérôme Glisse8c3328f2017-09-08 16:12:13 -07002489
2490 for (i = 0, addr = start; i < npages && restore; i++, addr += PAGE_SIZE) {
2491 struct page *page = migrate_pfn_to_page(migrate->src[i]);
2492
2493 if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE))
2494 continue;
2495
2496 remove_migration_pte(page, migrate->vma, addr, page);
2497
2498 migrate->src[i] = 0;
2499 unlock_page(page);
2500 put_page(page);
2501 restore--;
2502 }
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002503}
2504
2505/*
2506 * migrate_vma_unmap() - replace page mapping with special migration pte entry
2507 * @migrate: migrate struct containing all migration information
2508 *
2509 * Replace page mapping (CPU page table pte) with a special migration pte entry
2510 * and check again if it has been pinned. Pinned pages are restored because we
2511 * cannot migrate them.
2512 *
2513 * This is the last step before we call the device driver callback to allocate
2514 * destination memory and copy contents of original page over to new page.
2515 */
2516static void migrate_vma_unmap(struct migrate_vma *migrate)
2517{
2518 int flags = TTU_MIGRATION | TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS;
2519 const unsigned long npages = migrate->npages;
2520 const unsigned long start = migrate->start;
2521 unsigned long addr, i, restore = 0;
2522
2523 for (i = 0; i < npages; i++) {
2524 struct page *page = migrate_pfn_to_page(migrate->src[i]);
2525
2526 if (!page || !(migrate->src[i] & MIGRATE_PFN_MIGRATE))
2527 continue;
2528
Jérôme Glisse8c3328f2017-09-08 16:12:13 -07002529 if (page_mapped(page)) {
2530 try_to_unmap(page, flags);
2531 if (page_mapped(page))
2532 goto restore;
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002533 }
Jérôme Glisse8c3328f2017-09-08 16:12:13 -07002534
2535 if (migrate_vma_check_page(page))
2536 continue;
2537
2538restore:
2539 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2540 migrate->cpages--;
2541 restore++;
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002542 }
2543
2544 for (addr = start, i = 0; i < npages && restore; addr += PAGE_SIZE, i++) {
2545 struct page *page = migrate_pfn_to_page(migrate->src[i]);
2546
2547 if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE))
2548 continue;
2549
2550 remove_migration_ptes(page, page, false);
2551
2552 migrate->src[i] = 0;
2553 unlock_page(page);
2554 restore--;
2555
Jérôme Glissea5430dd2017-09-08 16:12:17 -07002556 if (is_zone_device_page(page))
2557 put_page(page);
2558 else
2559 putback_lru_page(page);
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002560 }
2561}
2562
Christoph Hellwiga7d1f222019-08-14 09:59:19 +02002563/**
2564 * migrate_vma_setup() - prepare to migrate a range of memory
2565 * @args: contains the vma, start, and and pfns arrays for the migration
2566 *
2567 * Returns: negative errno on failures, 0 when 0 or more pages were migrated
2568 * without an error.
2569 *
2570 * Prepare to migrate a range of memory virtual address range by collecting all
2571 * the pages backing each virtual address in the range, saving them inside the
2572 * src array. Then lock those pages and unmap them. Once the pages are locked
2573 * and unmapped, check whether each page is pinned or not. Pages that aren't
2574 * pinned have the MIGRATE_PFN_MIGRATE flag set (by this function) in the
2575 * corresponding src array entry. Then restores any pages that are pinned, by
2576 * remapping and unlocking those pages.
2577 *
2578 * The caller should then allocate destination memory and copy source memory to
2579 * it for all those entries (ie with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE
2580 * flag set). Once these are allocated and copied, the caller must update each
2581 * corresponding entry in the dst array with the pfn value of the destination
2582 * page and with the MIGRATE_PFN_VALID and MIGRATE_PFN_LOCKED flags set
2583 * (destination pages must have their struct pages locked, via lock_page()).
2584 *
2585 * Note that the caller does not have to migrate all the pages that are marked
2586 * with MIGRATE_PFN_MIGRATE flag in src array unless this is a migration from
2587 * device memory to system memory. If the caller cannot migrate a device page
2588 * back to system memory, then it must return VM_FAULT_SIGBUS, which has severe
2589 * consequences for the userspace process, so it must be avoided if at all
2590 * possible.
2591 *
2592 * For empty entries inside CPU page table (pte_none() or pmd_none() is true) we
2593 * do set MIGRATE_PFN_MIGRATE flag inside the corresponding source array thus
2594 * allowing the caller to allocate device memory for those unback virtual
2595 * address. For this the caller simply has to allocate device memory and
2596 * properly set the destination entry like for regular migration. Note that
2597 * this can still fails and thus inside the device driver must check if the
2598 * migration was successful for those entries after calling migrate_vma_pages()
2599 * just like for regular migration.
2600 *
2601 * After that, the callers must call migrate_vma_pages() to go over each entry
2602 * in the src array that has the MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag
2603 * set. If the corresponding entry in dst array has MIGRATE_PFN_VALID flag set,
2604 * then migrate_vma_pages() to migrate struct page information from the source
2605 * struct page to the destination struct page. If it fails to migrate the
2606 * struct page information, then it clears the MIGRATE_PFN_MIGRATE flag in the
2607 * src array.
2608 *
2609 * At this point all successfully migrated pages have an entry in the src
2610 * array with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag set and the dst
2611 * array entry with MIGRATE_PFN_VALID flag set.
2612 *
2613 * Once migrate_vma_pages() returns the caller may inspect which pages were
2614 * successfully migrated, and which were not. Successfully migrated pages will
2615 * have the MIGRATE_PFN_MIGRATE flag set for their src array entry.
2616 *
2617 * It is safe to update device page table after migrate_vma_pages() because
2618 * both destination and source page are still locked, and the mmap_sem is held
2619 * in read mode (hence no one can unmap the range being migrated).
2620 *
2621 * Once the caller is done cleaning up things and updating its page table (if it
2622 * chose to do so, this is not an obligation) it finally calls
2623 * migrate_vma_finalize() to update the CPU page table to point to new pages
2624 * for successfully migrated pages or otherwise restore the CPU page table to
2625 * point to the original source pages.
2626 */
2627int migrate_vma_setup(struct migrate_vma *args)
2628{
2629 long nr_pages = (args->end - args->start) >> PAGE_SHIFT;
2630
2631 args->start &= PAGE_MASK;
2632 args->end &= PAGE_MASK;
2633 if (!args->vma || is_vm_hugetlb_page(args->vma) ||
2634 (args->vma->vm_flags & VM_SPECIAL) || vma_is_dax(args->vma))
2635 return -EINVAL;
2636 if (nr_pages <= 0)
2637 return -EINVAL;
2638 if (args->start < args->vma->vm_start ||
2639 args->start >= args->vma->vm_end)
2640 return -EINVAL;
2641 if (args->end <= args->vma->vm_start || args->end > args->vma->vm_end)
2642 return -EINVAL;
2643 if (!args->src || !args->dst)
2644 return -EINVAL;
2645
2646 memset(args->src, 0, sizeof(*args->src) * nr_pages);
2647 args->cpages = 0;
2648 args->npages = 0;
2649
2650 migrate_vma_collect(args);
2651
2652 if (args->cpages)
2653 migrate_vma_prepare(args);
2654 if (args->cpages)
2655 migrate_vma_unmap(args);
2656
2657 /*
2658 * At this point pages are locked and unmapped, and thus they have
2659 * stable content and can safely be copied to destination memory that
2660 * is allocated by the drivers.
2661 */
2662 return 0;
2663
2664}
2665EXPORT_SYMBOL(migrate_vma_setup);
2666
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002667static void migrate_vma_insert_page(struct migrate_vma *migrate,
2668 unsigned long addr,
2669 struct page *page,
2670 unsigned long *src,
2671 unsigned long *dst)
2672{
2673 struct vm_area_struct *vma = migrate->vma;
2674 struct mm_struct *mm = vma->vm_mm;
2675 struct mem_cgroup *memcg;
2676 bool flush = false;
2677 spinlock_t *ptl;
2678 pte_t entry;
2679 pgd_t *pgdp;
2680 p4d_t *p4dp;
2681 pud_t *pudp;
2682 pmd_t *pmdp;
2683 pte_t *ptep;
2684
2685 /* Only allow populating anonymous memory */
2686 if (!vma_is_anonymous(vma))
2687 goto abort;
2688
2689 pgdp = pgd_offset(mm, addr);
2690 p4dp = p4d_alloc(mm, pgdp, addr);
2691 if (!p4dp)
2692 goto abort;
2693 pudp = pud_alloc(mm, p4dp, addr);
2694 if (!pudp)
2695 goto abort;
2696 pmdp = pmd_alloc(mm, pudp, addr);
2697 if (!pmdp)
2698 goto abort;
2699
2700 if (pmd_trans_huge(*pmdp) || pmd_devmap(*pmdp))
2701 goto abort;
2702
2703 /*
2704 * Use pte_alloc() instead of pte_alloc_map(). We can't run
2705 * pte_offset_map() on pmds where a huge pmd might be created
2706 * from a different thread.
2707 *
2708 * pte_alloc_map() is safe to use under down_write(mmap_sem) or when
2709 * parallel threads are excluded by other means.
2710 *
2711 * Here we only have down_read(mmap_sem).
2712 */
Joel Fernandes (Google)4cf58922019-01-03 15:28:34 -08002713 if (pte_alloc(mm, pmdp))
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002714 goto abort;
2715
2716 /* See the comment in pte_alloc_one_map() */
2717 if (unlikely(pmd_trans_unstable(pmdp)))
2718 goto abort;
2719
2720 if (unlikely(anon_vma_prepare(vma)))
2721 goto abort;
2722 if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL, &memcg, false))
2723 goto abort;
2724
2725 /*
2726 * The memory barrier inside __SetPageUptodate makes sure that
2727 * preceding stores to the page contents become visible before
2728 * the set_pte_at() write.
2729 */
2730 __SetPageUptodate(page);
2731
Jérôme Glissedf6ad692017-09-08 16:12:24 -07002732 if (is_zone_device_page(page)) {
2733 if (is_device_private_page(page)) {
2734 swp_entry_t swp_entry;
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002735
Jérôme Glissedf6ad692017-09-08 16:12:24 -07002736 swp_entry = make_device_private_entry(page, vma->vm_flags & VM_WRITE);
2737 entry = swp_entry_to_pte(swp_entry);
Jérôme Glissedf6ad692017-09-08 16:12:24 -07002738 }
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002739 } else {
2740 entry = mk_pte(page, vma->vm_page_prot);
2741 if (vma->vm_flags & VM_WRITE)
2742 entry = pte_mkwrite(pte_mkdirty(entry));
2743 }
2744
2745 ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
2746
2747 if (pte_present(*ptep)) {
2748 unsigned long pfn = pte_pfn(*ptep);
2749
2750 if (!is_zero_pfn(pfn)) {
2751 pte_unmap_unlock(ptep, ptl);
2752 mem_cgroup_cancel_charge(page, memcg, false);
2753 goto abort;
2754 }
2755 flush = true;
2756 } else if (!pte_none(*ptep)) {
2757 pte_unmap_unlock(ptep, ptl);
2758 mem_cgroup_cancel_charge(page, memcg, false);
2759 goto abort;
2760 }
2761
2762 /*
2763 * Check for usefaultfd but do not deliver the fault. Instead,
2764 * just back off.
2765 */
2766 if (userfaultfd_missing(vma)) {
2767 pte_unmap_unlock(ptep, ptl);
2768 mem_cgroup_cancel_charge(page, memcg, false);
2769 goto abort;
2770 }
2771
2772 inc_mm_counter(mm, MM_ANONPAGES);
2773 page_add_new_anon_rmap(page, vma, addr, false);
2774 mem_cgroup_commit_charge(page, memcg, false, false);
2775 if (!is_zone_device_page(page))
2776 lru_cache_add_active_or_unevictable(page, vma);
2777 get_page(page);
2778
2779 if (flush) {
2780 flush_cache_page(vma, addr, pte_pfn(*ptep));
2781 ptep_clear_flush_notify(vma, addr, ptep);
2782 set_pte_at_notify(mm, addr, ptep, entry);
2783 update_mmu_cache(vma, addr, ptep);
2784 } else {
2785 /* No need to invalidate - it was non-present before */
2786 set_pte_at(mm, addr, ptep, entry);
2787 update_mmu_cache(vma, addr, ptep);
2788 }
2789
2790 pte_unmap_unlock(ptep, ptl);
2791 *src = MIGRATE_PFN_MIGRATE;
2792 return;
2793
2794abort:
2795 *src &= ~MIGRATE_PFN_MIGRATE;
2796}
2797
Christoph Hellwiga7d1f222019-08-14 09:59:19 +02002798/**
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002799 * migrate_vma_pages() - migrate meta-data from src page to dst page
2800 * @migrate: migrate struct containing all migration information
2801 *
2802 * This migrates struct page meta-data from source struct page to destination
2803 * struct page. This effectively finishes the migration from source page to the
2804 * destination page.
2805 */
Christoph Hellwiga7d1f222019-08-14 09:59:19 +02002806void migrate_vma_pages(struct migrate_vma *migrate)
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002807{
2808 const unsigned long npages = migrate->npages;
2809 const unsigned long start = migrate->start;
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08002810 struct mmu_notifier_range range;
2811 unsigned long addr, i;
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002812 bool notified = false;
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002813
2814 for (i = 0, addr = start; i < npages; addr += PAGE_SIZE, i++) {
2815 struct page *newpage = migrate_pfn_to_page(migrate->dst[i]);
2816 struct page *page = migrate_pfn_to_page(migrate->src[i]);
2817 struct address_space *mapping;
2818 int r;
2819
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002820 if (!newpage) {
2821 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002822 continue;
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002823 }
2824
2825 if (!page) {
2826 if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE)) {
2827 continue;
2828 }
2829 if (!notified) {
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002830 notified = true;
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08002831
2832 mmu_notifier_range_init(&range,
Jérôme Glisse7269f992019-05-13 17:20:53 -07002833 MMU_NOTIFY_CLEAR, 0,
Jérôme Glisse6f4f13e2019-05-13 17:20:49 -07002834 NULL,
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08002835 migrate->vma->vm_mm,
2836 addr, migrate->end);
2837 mmu_notifier_invalidate_range_start(&range);
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002838 }
2839 migrate_vma_insert_page(migrate, addr, newpage,
2840 &migrate->src[i],
2841 &migrate->dst[i]);
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002842 continue;
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002843 }
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002844
2845 mapping = page_mapping(page);
2846
Jérôme Glissea5430dd2017-09-08 16:12:17 -07002847 if (is_zone_device_page(newpage)) {
2848 if (is_device_private_page(newpage)) {
2849 /*
2850 * For now only support private anonymous when
2851 * migrating to un-addressable device memory.
2852 */
2853 if (mapping) {
2854 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2855 continue;
2856 }
Christoph Hellwig25b29952019-06-13 22:50:49 +02002857 } else {
Jérôme Glissea5430dd2017-09-08 16:12:17 -07002858 /*
2859 * Other types of ZONE_DEVICE page are not
2860 * supported.
2861 */
2862 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2863 continue;
2864 }
2865 }
2866
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002867 r = migrate_page(mapping, newpage, page, MIGRATE_SYNC_NO_COPY);
2868 if (r != MIGRATEPAGE_SUCCESS)
2869 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2870 }
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002871
Jérôme Glisse4645b9f2017-11-15 17:34:11 -08002872 /*
2873 * No need to double call mmu_notifier->invalidate_range() callback as
2874 * the above ptep_clear_flush_notify() inside migrate_vma_insert_page()
2875 * did already call it.
2876 */
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002877 if (notified)
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08002878 mmu_notifier_invalidate_range_only_end(&range);
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002879}
Christoph Hellwiga7d1f222019-08-14 09:59:19 +02002880EXPORT_SYMBOL(migrate_vma_pages);
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002881
Christoph Hellwiga7d1f222019-08-14 09:59:19 +02002882/**
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002883 * migrate_vma_finalize() - restore CPU page table entry
2884 * @migrate: migrate struct containing all migration information
2885 *
2886 * This replaces the special migration pte entry with either a mapping to the
2887 * new page if migration was successful for that page, or to the original page
2888 * otherwise.
2889 *
2890 * This also unlocks the pages and puts them back on the lru, or drops the extra
2891 * refcount, for device pages.
2892 */
Christoph Hellwiga7d1f222019-08-14 09:59:19 +02002893void migrate_vma_finalize(struct migrate_vma *migrate)
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002894{
2895 const unsigned long npages = migrate->npages;
2896 unsigned long i;
2897
2898 for (i = 0; i < npages; i++) {
2899 struct page *newpage = migrate_pfn_to_page(migrate->dst[i]);
2900 struct page *page = migrate_pfn_to_page(migrate->src[i]);
2901
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002902 if (!page) {
2903 if (newpage) {
2904 unlock_page(newpage);
2905 put_page(newpage);
2906 }
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002907 continue;
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002908 }
2909
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002910 if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE) || !newpage) {
2911 if (newpage) {
2912 unlock_page(newpage);
2913 put_page(newpage);
2914 }
2915 newpage = page;
2916 }
2917
2918 remove_migration_ptes(page, newpage, false);
2919 unlock_page(page);
2920 migrate->cpages--;
2921
Jérôme Glissea5430dd2017-09-08 16:12:17 -07002922 if (is_zone_device_page(page))
2923 put_page(page);
2924 else
2925 putback_lru_page(page);
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002926
2927 if (newpage != page) {
2928 unlock_page(newpage);
Jérôme Glissea5430dd2017-09-08 16:12:17 -07002929 if (is_zone_device_page(newpage))
2930 put_page(newpage);
2931 else
2932 putback_lru_page(newpage);
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002933 }
2934 }
2935}
Christoph Hellwiga7d1f222019-08-14 09:59:19 +02002936EXPORT_SYMBOL(migrate_vma_finalize);
Christoph Hellwig9b2ed9c2019-08-14 09:59:28 +02002937#endif /* CONFIG_DEVICE_PRIVATE */