blob: 59d8caab584a5cccf7c3c7d421f161fa8316eab5 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3
4#include <linux/mm.h>
5#include <linux/sched.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +01006#include <linux/sched/mm.h>
Ingo Molnarf7ccbae2017-02-08 18:51:30 +01007#include <linux/sched/coredump.h>
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07008#include <linux/mmu_notifier.h>
9#include <linux/rmap.h>
10#include <linux/swap.h>
11#include <linux/mm_inline.h>
12#include <linux/kthread.h>
13#include <linux/khugepaged.h>
14#include <linux/freezer.h>
15#include <linux/mman.h>
16#include <linux/hashtable.h>
17#include <linux/userfaultfd_k.h>
18#include <linux/page_idle.h>
19#include <linux/swapops.h>
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -070020#include <linux/shmem_fs.h>
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070021
22#include <asm/tlb.h>
23#include <asm/pgalloc.h>
24#include "internal.h"
25
26enum scan_result {
27 SCAN_FAIL,
28 SCAN_SUCCEED,
29 SCAN_PMD_NULL,
30 SCAN_EXCEED_NONE_PTE,
Kirill A. Shutemov71a2c112020-06-03 16:00:30 -070031 SCAN_EXCEED_SWAP_PTE,
32 SCAN_EXCEED_SHARED_PTE,
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070033 SCAN_PTE_NON_PRESENT,
Peter Xue1e267c2020-04-06 20:06:04 -070034 SCAN_PTE_UFFD_WP,
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070035 SCAN_PAGE_RO,
Ebru Akagunduz0db501f2016-07-26 15:26:46 -070036 SCAN_LACK_REFERENCED_PAGE,
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070037 SCAN_PAGE_NULL,
38 SCAN_SCAN_ABORT,
39 SCAN_PAGE_COUNT,
40 SCAN_PAGE_LRU,
41 SCAN_PAGE_LOCK,
42 SCAN_PAGE_ANON,
43 SCAN_PAGE_COMPOUND,
44 SCAN_ANY_PROCESS,
45 SCAN_VMA_NULL,
46 SCAN_VMA_CHECK,
47 SCAN_ADDRESS_RANGE,
48 SCAN_SWAP_CACHE_PAGE,
49 SCAN_DEL_PAGE_LRU,
50 SCAN_ALLOC_HUGE_PAGE_FAIL,
51 SCAN_CGROUP_CHARGE_FAIL,
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -070052 SCAN_TRUNCATED,
Song Liu99cb0db2019-09-23 15:38:00 -070053 SCAN_PAGE_HAS_PRIVATE,
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070054};
55
56#define CREATE_TRACE_POINTS
57#include <trace/events/huge_memory.h>
58
Vijay Balakrishna4aab2be2020-10-10 23:16:40 -070059static struct task_struct *khugepaged_thread __read_mostly;
60static DEFINE_MUTEX(khugepaged_mutex);
61
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070062/* default scan 8*512 pte (or vmas) every 30 second */
63static unsigned int khugepaged_pages_to_scan __read_mostly;
64static unsigned int khugepaged_pages_collapsed;
65static unsigned int khugepaged_full_scans;
66static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
67/* during fragmentation poll the hugepage allocator once every minute */
68static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
69static unsigned long khugepaged_sleep_expire;
70static DEFINE_SPINLOCK(khugepaged_mm_lock);
71static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
72/*
73 * default collapse hugepages if there is at least one pte mapped like
74 * it would have happened if the vma was large enough during page
75 * fault.
76 */
77static unsigned int khugepaged_max_ptes_none __read_mostly;
78static unsigned int khugepaged_max_ptes_swap __read_mostly;
Kirill A. Shutemov71a2c112020-06-03 16:00:30 -070079static unsigned int khugepaged_max_ptes_shared __read_mostly;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070080
81#define MM_SLOTS_HASH_BITS 10
82static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
83
84static struct kmem_cache *mm_slot_cache __read_mostly;
85
Song Liu27e1f822019-09-23 15:38:30 -070086#define MAX_PTE_MAPPED_THP 8
87
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070088/**
89 * struct mm_slot - hash lookup from mm to mm_slot
90 * @hash: hash collision list
91 * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
92 * @mm: the mm that this information is valid for
93 */
94struct mm_slot {
95 struct hlist_node hash;
96 struct list_head mm_node;
97 struct mm_struct *mm;
Song Liu27e1f822019-09-23 15:38:30 -070098
99 /* pte-mapped THP in this mm */
100 int nr_pte_mapped_thp;
101 unsigned long pte_mapped_thp[MAX_PTE_MAPPED_THP];
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700102};
103
104/**
105 * struct khugepaged_scan - cursor for scanning
106 * @mm_head: the head of the mm list to scan
107 * @mm_slot: the current mm_slot we are scanning
108 * @address: the next address inside that to be scanned
109 *
110 * There is only the one khugepaged_scan instance of this cursor structure.
111 */
112struct khugepaged_scan {
113 struct list_head mm_head;
114 struct mm_slot *mm_slot;
115 unsigned long address;
116};
117
118static struct khugepaged_scan khugepaged_scan = {
119 .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
120};
121
Jérémy Lefauree1465d12016-11-30 15:54:02 -0800122#ifdef CONFIG_SYSFS
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700123static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
124 struct kobj_attribute *attr,
125 char *buf)
126{
127 return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs);
128}
129
130static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
131 struct kobj_attribute *attr,
132 const char *buf, size_t count)
133{
134 unsigned long msecs;
135 int err;
136
137 err = kstrtoul(buf, 10, &msecs);
138 if (err || msecs > UINT_MAX)
139 return -EINVAL;
140
141 khugepaged_scan_sleep_millisecs = msecs;
142 khugepaged_sleep_expire = 0;
143 wake_up_interruptible(&khugepaged_wait);
144
145 return count;
146}
147static struct kobj_attribute scan_sleep_millisecs_attr =
148 __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
149 scan_sleep_millisecs_store);
150
151static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
152 struct kobj_attribute *attr,
153 char *buf)
154{
155 return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
156}
157
158static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
159 struct kobj_attribute *attr,
160 const char *buf, size_t count)
161{
162 unsigned long msecs;
163 int err;
164
165 err = kstrtoul(buf, 10, &msecs);
166 if (err || msecs > UINT_MAX)
167 return -EINVAL;
168
169 khugepaged_alloc_sleep_millisecs = msecs;
170 khugepaged_sleep_expire = 0;
171 wake_up_interruptible(&khugepaged_wait);
172
173 return count;
174}
175static struct kobj_attribute alloc_sleep_millisecs_attr =
176 __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
177 alloc_sleep_millisecs_store);
178
179static ssize_t pages_to_scan_show(struct kobject *kobj,
180 struct kobj_attribute *attr,
181 char *buf)
182{
183 return sprintf(buf, "%u\n", khugepaged_pages_to_scan);
184}
185static ssize_t pages_to_scan_store(struct kobject *kobj,
186 struct kobj_attribute *attr,
187 const char *buf, size_t count)
188{
189 int err;
190 unsigned long pages;
191
192 err = kstrtoul(buf, 10, &pages);
193 if (err || !pages || pages > UINT_MAX)
194 return -EINVAL;
195
196 khugepaged_pages_to_scan = pages;
197
198 return count;
199}
200static struct kobj_attribute pages_to_scan_attr =
201 __ATTR(pages_to_scan, 0644, pages_to_scan_show,
202 pages_to_scan_store);
203
204static ssize_t pages_collapsed_show(struct kobject *kobj,
205 struct kobj_attribute *attr,
206 char *buf)
207{
208 return sprintf(buf, "%u\n", khugepaged_pages_collapsed);
209}
210static struct kobj_attribute pages_collapsed_attr =
211 __ATTR_RO(pages_collapsed);
212
213static ssize_t full_scans_show(struct kobject *kobj,
214 struct kobj_attribute *attr,
215 char *buf)
216{
217 return sprintf(buf, "%u\n", khugepaged_full_scans);
218}
219static struct kobj_attribute full_scans_attr =
220 __ATTR_RO(full_scans);
221
222static ssize_t khugepaged_defrag_show(struct kobject *kobj,
223 struct kobj_attribute *attr, char *buf)
224{
225 return single_hugepage_flag_show(kobj, attr, buf,
226 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
227}
228static ssize_t khugepaged_defrag_store(struct kobject *kobj,
229 struct kobj_attribute *attr,
230 const char *buf, size_t count)
231{
232 return single_hugepage_flag_store(kobj, attr, buf, count,
233 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
234}
235static struct kobj_attribute khugepaged_defrag_attr =
236 __ATTR(defrag, 0644, khugepaged_defrag_show,
237 khugepaged_defrag_store);
238
239/*
240 * max_ptes_none controls if khugepaged should collapse hugepages over
241 * any unmapped ptes in turn potentially increasing the memory
242 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
243 * reduce the available free memory in the system as it
244 * runs. Increasing max_ptes_none will instead potentially reduce the
245 * free memory in the system during the khugepaged scan.
246 */
247static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
248 struct kobj_attribute *attr,
249 char *buf)
250{
251 return sprintf(buf, "%u\n", khugepaged_max_ptes_none);
252}
253static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
254 struct kobj_attribute *attr,
255 const char *buf, size_t count)
256{
257 int err;
258 unsigned long max_ptes_none;
259
260 err = kstrtoul(buf, 10, &max_ptes_none);
261 if (err || max_ptes_none > HPAGE_PMD_NR-1)
262 return -EINVAL;
263
264 khugepaged_max_ptes_none = max_ptes_none;
265
266 return count;
267}
268static struct kobj_attribute khugepaged_max_ptes_none_attr =
269 __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
270 khugepaged_max_ptes_none_store);
271
272static ssize_t khugepaged_max_ptes_swap_show(struct kobject *kobj,
273 struct kobj_attribute *attr,
274 char *buf)
275{
276 return sprintf(buf, "%u\n", khugepaged_max_ptes_swap);
277}
278
279static ssize_t khugepaged_max_ptes_swap_store(struct kobject *kobj,
280 struct kobj_attribute *attr,
281 const char *buf, size_t count)
282{
283 int err;
284 unsigned long max_ptes_swap;
285
286 err = kstrtoul(buf, 10, &max_ptes_swap);
287 if (err || max_ptes_swap > HPAGE_PMD_NR-1)
288 return -EINVAL;
289
290 khugepaged_max_ptes_swap = max_ptes_swap;
291
292 return count;
293}
294
295static struct kobj_attribute khugepaged_max_ptes_swap_attr =
296 __ATTR(max_ptes_swap, 0644, khugepaged_max_ptes_swap_show,
297 khugepaged_max_ptes_swap_store);
298
Kirill A. Shutemov71a2c112020-06-03 16:00:30 -0700299static ssize_t khugepaged_max_ptes_shared_show(struct kobject *kobj,
300 struct kobj_attribute *attr,
301 char *buf)
302{
303 return sprintf(buf, "%u\n", khugepaged_max_ptes_shared);
304}
305
306static ssize_t khugepaged_max_ptes_shared_store(struct kobject *kobj,
307 struct kobj_attribute *attr,
308 const char *buf, size_t count)
309{
310 int err;
311 unsigned long max_ptes_shared;
312
313 err = kstrtoul(buf, 10, &max_ptes_shared);
314 if (err || max_ptes_shared > HPAGE_PMD_NR-1)
315 return -EINVAL;
316
317 khugepaged_max_ptes_shared = max_ptes_shared;
318
319 return count;
320}
321
322static struct kobj_attribute khugepaged_max_ptes_shared_attr =
323 __ATTR(max_ptes_shared, 0644, khugepaged_max_ptes_shared_show,
324 khugepaged_max_ptes_shared_store);
325
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700326static struct attribute *khugepaged_attr[] = {
327 &khugepaged_defrag_attr.attr,
328 &khugepaged_max_ptes_none_attr.attr,
Kirill A. Shutemov71a2c112020-06-03 16:00:30 -0700329 &khugepaged_max_ptes_swap_attr.attr,
330 &khugepaged_max_ptes_shared_attr.attr,
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700331 &pages_to_scan_attr.attr,
332 &pages_collapsed_attr.attr,
333 &full_scans_attr.attr,
334 &scan_sleep_millisecs_attr.attr,
335 &alloc_sleep_millisecs_attr.attr,
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700336 NULL,
337};
338
339struct attribute_group khugepaged_attr_group = {
340 .attrs = khugepaged_attr,
341 .name = "khugepaged",
342};
Jérémy Lefauree1465d12016-11-30 15:54:02 -0800343#endif /* CONFIG_SYSFS */
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700344
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700345int hugepage_madvise(struct vm_area_struct *vma,
346 unsigned long *vm_flags, int advice)
347{
348 switch (advice) {
349 case MADV_HUGEPAGE:
350#ifdef CONFIG_S390
351 /*
352 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
353 * can't handle this properly after s390_enable_sie, so we simply
354 * ignore the madvise to prevent qemu from causing a SIGSEGV.
355 */
356 if (mm_has_pgste(vma->vm_mm))
357 return 0;
358#endif
359 *vm_flags &= ~VM_NOHUGEPAGE;
360 *vm_flags |= VM_HUGEPAGE;
361 /*
362 * If the vma become good for khugepaged to scan,
363 * register it here without waiting a page fault that
364 * may not happen any time soon.
365 */
366 if (!(*vm_flags & VM_NO_KHUGEPAGED) &&
367 khugepaged_enter_vma_merge(vma, *vm_flags))
368 return -ENOMEM;
369 break;
370 case MADV_NOHUGEPAGE:
371 *vm_flags &= ~VM_HUGEPAGE;
372 *vm_flags |= VM_NOHUGEPAGE;
373 /*
374 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
375 * this vma even if we leave the mm registered in khugepaged if
376 * it got registered before VM_NOHUGEPAGE was set.
377 */
378 break;
379 }
380
381 return 0;
382}
383
384int __init khugepaged_init(void)
385{
386 mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
387 sizeof(struct mm_slot),
388 __alignof__(struct mm_slot), 0, NULL);
389 if (!mm_slot_cache)
390 return -ENOMEM;
391
392 khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
393 khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
394 khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8;
Kirill A. Shutemov71a2c112020-06-03 16:00:30 -0700395 khugepaged_max_ptes_shared = HPAGE_PMD_NR / 2;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700396
397 return 0;
398}
399
400void __init khugepaged_destroy(void)
401{
402 kmem_cache_destroy(mm_slot_cache);
403}
404
405static inline struct mm_slot *alloc_mm_slot(void)
406{
407 if (!mm_slot_cache) /* initialization failed */
408 return NULL;
409 return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
410}
411
412static inline void free_mm_slot(struct mm_slot *mm_slot)
413{
414 kmem_cache_free(mm_slot_cache, mm_slot);
415}
416
417static struct mm_slot *get_mm_slot(struct mm_struct *mm)
418{
419 struct mm_slot *mm_slot;
420
421 hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
422 if (mm == mm_slot->mm)
423 return mm_slot;
424
425 return NULL;
426}
427
428static void insert_to_mm_slots_hash(struct mm_struct *mm,
429 struct mm_slot *mm_slot)
430{
431 mm_slot->mm = mm;
432 hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
433}
434
435static inline int khugepaged_test_exit(struct mm_struct *mm)
436{
Jann Horn4d45e752020-10-15 20:13:00 -0700437 return atomic_read(&mm->mm_users) == 0;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700438}
439
Song Liu50f8b922018-08-17 15:47:00 -0700440static bool hugepage_vma_check(struct vm_area_struct *vma,
441 unsigned long vm_flags)
Yang Shic2231022018-08-17 15:45:26 -0700442{
Miaohe Linb6559732021-06-30 18:47:50 -0700443 if (!transhuge_vma_enabled(vma, vm_flags))
Yang Shic2231022018-08-17 15:45:26 -0700444 return false;
Song Liu99cb0db2019-09-23 15:38:00 -0700445
Rik van Riela7fbcb32021-02-25 17:16:25 -0800446 /* Enabled via shmem mount options or sysfs settings. */
447 if (shmem_file(vma->vm_file) && shmem_huge_enabled(vma)) {
Yang Shic2231022018-08-17 15:45:26 -0700448 return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
449 HPAGE_PMD_NR);
450 }
Rik van Riela7fbcb32021-02-25 17:16:25 -0800451
452 /* THP settings require madvise. */
453 if (!(vm_flags & VM_HUGEPAGE) && !khugepaged_always())
454 return false;
455
456 /* Read-only file mappings need to be aligned for THP to work. */
457 if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && vma->vm_file &&
Collin Fijalkovich28b4b152021-03-23 16:29:26 -0700458 !inode_is_open_for_write(vma->vm_file->f_inode) &&
459 (vm_flags & VM_EXEC)) {
Rik van Riela7fbcb32021-02-25 17:16:25 -0800460 return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
461 HPAGE_PMD_NR);
462 }
463
Yang Shic2231022018-08-17 15:45:26 -0700464 if (!vma->anon_vma || vma->vm_ops)
465 return false;
Anshuman Khandual222100e2020-04-01 21:07:52 -0700466 if (vma_is_temporary_stack(vma))
Yang Shic2231022018-08-17 15:45:26 -0700467 return false;
Song Liu50f8b922018-08-17 15:47:00 -0700468 return !(vm_flags & VM_NO_KHUGEPAGED);
Yang Shic2231022018-08-17 15:45:26 -0700469}
470
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700471int __khugepaged_enter(struct mm_struct *mm)
472{
473 struct mm_slot *mm_slot;
474 int wakeup;
475
476 mm_slot = alloc_mm_slot();
477 if (!mm_slot)
478 return -ENOMEM;
479
480 /* __khugepaged_exit() must not run from under us */
Hugh Dickinsf3f99d62020-08-20 17:42:02 -0700481 VM_BUG_ON_MM(atomic_read(&mm->mm_users) == 0, mm);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700482 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
483 free_mm_slot(mm_slot);
484 return 0;
485 }
486
487 spin_lock(&khugepaged_mm_lock);
488 insert_to_mm_slots_hash(mm, mm_slot);
489 /*
490 * Insert just behind the scanning cursor, to let the area settle
491 * down a little.
492 */
493 wakeup = list_empty(&khugepaged_scan.mm_head);
494 list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
495 spin_unlock(&khugepaged_mm_lock);
496
Vegard Nossumf1f10072017-02-27 14:30:07 -0800497 mmgrab(mm);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700498 if (wakeup)
499 wake_up_interruptible(&khugepaged_wait);
500
501 return 0;
502}
503
504int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
505 unsigned long vm_flags)
506{
507 unsigned long hstart, hend;
Yang Shic2231022018-08-17 15:45:26 -0700508
509 /*
Song Liu99cb0db2019-09-23 15:38:00 -0700510 * khugepaged only supports read-only files for non-shmem files.
511 * khugepaged does not yet work on special mappings. And
512 * file-private shmem THP is not supported.
Yang Shic2231022018-08-17 15:45:26 -0700513 */
Song Liu50f8b922018-08-17 15:47:00 -0700514 if (!hugepage_vma_check(vma, vm_flags))
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700515 return 0;
Yang Shic2231022018-08-17 15:45:26 -0700516
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700517 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
518 hend = vma->vm_end & HPAGE_PMD_MASK;
519 if (hstart < hend)
520 return khugepaged_enter(vma, vm_flags);
521 return 0;
522}
523
524void __khugepaged_exit(struct mm_struct *mm)
525{
526 struct mm_slot *mm_slot;
527 int free = 0;
528
529 spin_lock(&khugepaged_mm_lock);
530 mm_slot = get_mm_slot(mm);
531 if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
532 hash_del(&mm_slot->hash);
533 list_del(&mm_slot->mm_node);
534 free = 1;
535 }
536 spin_unlock(&khugepaged_mm_lock);
537
538 if (free) {
539 clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
540 free_mm_slot(mm_slot);
541 mmdrop(mm);
542 } else if (mm_slot) {
543 /*
544 * This is required to serialize against
545 * khugepaged_test_exit() (which is guaranteed to run
546 * under mmap sem read mode). Stop here (after we
547 * return all pagetables will be destroyed) until
548 * khugepaged has finished working on the pagetables
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -0700549 * under the mmap_lock.
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700550 */
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700551 mmap_write_lock(mm);
552 mmap_write_unlock(mm);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700553 }
554}
555
556static void release_pte_page(struct page *page)
557{
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700558 mod_node_page_state(page_pgdat(page),
559 NR_ISOLATED_ANON + page_is_file_lru(page),
560 -compound_nr(page));
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700561 unlock_page(page);
562 putback_lru_page(page);
563}
564
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700565static void release_pte_pages(pte_t *pte, pte_t *_pte,
566 struct list_head *compound_pagelist)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700567{
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700568 struct page *page, *tmp;
569
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700570 while (--_pte >= pte) {
571 pte_t pteval = *_pte;
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700572
573 page = pte_page(pteval);
574 if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval)) &&
575 !PageCompound(page))
576 release_pte_page(page);
577 }
578
579 list_for_each_entry_safe(page, tmp, compound_pagelist, lru) {
580 list_del(&page->lru);
581 release_pte_page(page);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700582 }
583}
584
Kirill A. Shutemov94456892020-06-03 16:00:20 -0700585static bool is_refcount_suitable(struct page *page)
586{
587 int expected_refcount;
588
589 expected_refcount = total_mapcount(page);
590 if (PageSwapCache(page))
591 expected_refcount += compound_nr(page);
592
593 return page_count(page) == expected_refcount;
594}
595
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700596static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
597 unsigned long address,
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700598 pte_t *pte,
599 struct list_head *compound_pagelist)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700600{
601 struct page *page = NULL;
602 pte_t *_pte;
Kirill A. Shutemov71a2c112020-06-03 16:00:30 -0700603 int none_or_zero = 0, shared = 0, result = 0, referenced = 0;
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700604 bool writable = false;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700605
606 for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
607 _pte++, address += PAGE_SIZE) {
608 pte_t pteval = *_pte;
609 if (pte_none(pteval) || (pte_present(pteval) &&
610 is_zero_pfn(pte_pfn(pteval)))) {
611 if (!userfaultfd_armed(vma) &&
612 ++none_or_zero <= khugepaged_max_ptes_none) {
613 continue;
614 } else {
615 result = SCAN_EXCEED_NONE_PTE;
616 goto out;
617 }
618 }
619 if (!pte_present(pteval)) {
620 result = SCAN_PTE_NON_PRESENT;
621 goto out;
622 }
623 page = vm_normal_page(vma, address, pteval);
624 if (unlikely(!page)) {
625 result = SCAN_PAGE_NULL;
626 goto out;
627 }
628
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700629 VM_BUG_ON_PAGE(!PageAnon(page), page);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700630
Kirill A. Shutemov71a2c112020-06-03 16:00:30 -0700631 if (page_mapcount(page) > 1 &&
632 ++shared > khugepaged_max_ptes_shared) {
633 result = SCAN_EXCEED_SHARED_PTE;
634 goto out;
635 }
636
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700637 if (PageCompound(page)) {
638 struct page *p;
639 page = compound_head(page);
640
641 /*
642 * Check if we have dealt with the compound page
643 * already
644 */
645 list_for_each_entry(p, compound_pagelist, lru) {
646 if (page == p)
647 goto next;
648 }
649 }
650
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700651 /*
652 * We can do it before isolate_lru_page because the
653 * page can't be freed from under us. NOTE: PG_lock
654 * is needed to serialize against split_huge_page
655 * when invoked from the VM.
656 */
657 if (!trylock_page(page)) {
658 result = SCAN_PAGE_LOCK;
659 goto out;
660 }
661
662 /*
Kirill A. Shutemov94456892020-06-03 16:00:20 -0700663 * Check if the page has any GUP (or other external) pins.
664 *
665 * The page table that maps the page has been already unlinked
666 * from the page table tree and this process cannot get
667 * an additinal pin on the page.
668 *
669 * New pins can come later if the page is shared across fork,
670 * but not from this process. The other process cannot write to
671 * the page, only trigger CoW.
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700672 */
Kirill A. Shutemov94456892020-06-03 16:00:20 -0700673 if (!is_refcount_suitable(page)) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700674 unlock_page(page);
675 result = SCAN_PAGE_COUNT;
676 goto out;
677 }
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700678 if (!pte_write(pteval) && PageSwapCache(page) &&
679 !reuse_swap_page(page, NULL)) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700680 /*
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700681 * Page is in the swap cache and cannot be re-used.
682 * It cannot be collapsed into a THP.
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700683 */
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700684 unlock_page(page);
685 result = SCAN_SWAP_CACHE_PAGE;
686 goto out;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700687 }
688
689 /*
690 * Isolate the page to avoid collapsing an hugepage
691 * currently in use by the VM.
692 */
693 if (isolate_lru_page(page)) {
694 unlock_page(page);
695 result = SCAN_DEL_PAGE_LRU;
696 goto out;
697 }
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700698 mod_node_page_state(page_pgdat(page),
699 NR_ISOLATED_ANON + page_is_file_lru(page),
700 compound_nr(page));
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700701 VM_BUG_ON_PAGE(!PageLocked(page), page);
702 VM_BUG_ON_PAGE(PageLRU(page), page);
703
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700704 if (PageCompound(page))
705 list_add_tail(&page->lru, compound_pagelist);
706next:
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700707 /* There should be enough young pte to collapse the page */
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700708 if (pte_young(pteval) ||
709 page_is_young(page) || PageReferenced(page) ||
710 mmu_notifier_test_young(vma->vm_mm, address))
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700711 referenced++;
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700712
713 if (pte_write(pteval))
714 writable = true;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700715 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700716
Miaohe Lin14d45fb2021-05-04 18:33:46 -0700717 if (unlikely(!writable)) {
718 result = SCAN_PAGE_RO;
719 } else if (unlikely(!referenced)) {
720 result = SCAN_LACK_REFERENCED_PAGE;
721 } else {
722 result = SCAN_SUCCEED;
723 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
724 referenced, writable, result);
725 return 1;
726 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700727out:
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700728 release_pte_pages(pte, _pte, compound_pagelist);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700729 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
730 referenced, writable, result);
731 return 0;
732}
733
734static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
735 struct vm_area_struct *vma,
736 unsigned long address,
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700737 spinlock_t *ptl,
738 struct list_head *compound_pagelist)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700739{
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700740 struct page *src_page, *tmp;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700741 pte_t *_pte;
David Rientjes338a16b2017-05-12 15:47:03 -0700742 for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
743 _pte++, page++, address += PAGE_SIZE) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700744 pte_t pteval = *_pte;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700745
746 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
747 clear_user_highpage(page, address);
748 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
749 if (is_zero_pfn(pte_pfn(pteval))) {
750 /*
751 * ptl mostly unnecessary.
752 */
753 spin_lock(ptl);
754 /*
755 * paravirt calls inside pte_clear here are
756 * superfluous.
757 */
758 pte_clear(vma->vm_mm, address, _pte);
759 spin_unlock(ptl);
760 }
761 } else {
762 src_page = pte_page(pteval);
763 copy_user_highpage(page, src_page, address, vma);
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700764 if (!PageCompound(src_page))
765 release_pte_page(src_page);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700766 /*
767 * ptl mostly unnecessary, but preempt has to
768 * be disabled to update the per-cpu stats
769 * inside page_remove_rmap().
770 */
771 spin_lock(ptl);
772 /*
773 * paravirt calls inside pte_clear here are
774 * superfluous.
775 */
776 pte_clear(vma->vm_mm, address, _pte);
777 page_remove_rmap(src_page, false);
778 spin_unlock(ptl);
779 free_page_and_swap_cache(src_page);
780 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700781 }
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700782
783 list_for_each_entry_safe(src_page, tmp, compound_pagelist, lru) {
784 list_del(&src_page->lru);
785 release_pte_page(src_page);
786 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700787}
788
789static void khugepaged_alloc_sleep(void)
790{
791 DEFINE_WAIT(wait);
792
793 add_wait_queue(&khugepaged_wait, &wait);
794 freezable_schedule_timeout_interruptible(
795 msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
796 remove_wait_queue(&khugepaged_wait, &wait);
797}
798
799static int khugepaged_node_load[MAX_NUMNODES];
800
801static bool khugepaged_scan_abort(int nid)
802{
803 int i;
804
805 /*
Mel Gormana5f5f912016-07-28 15:46:32 -0700806 * If node_reclaim_mode is disabled, then no extra effort is made to
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700807 * allocate memory locally.
808 */
Mel Gormana5f5f912016-07-28 15:46:32 -0700809 if (!node_reclaim_mode)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700810 return false;
811
812 /* If there is a count for this node already, it must be acceptable */
813 if (khugepaged_node_load[nid])
814 return false;
815
816 for (i = 0; i < MAX_NUMNODES; i++) {
817 if (!khugepaged_node_load[i])
818 continue;
Matt Fleminga55c7452019-08-08 20:53:01 +0100819 if (node_distance(nid, i) > node_reclaim_distance)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700820 return true;
821 }
822 return false;
823}
824
825/* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
826static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
827{
Vlastimil Babka25160352016-07-28 15:49:25 -0700828 return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700829}
830
831#ifdef CONFIG_NUMA
832static int khugepaged_find_target_node(void)
833{
834 static int last_khugepaged_target_node = NUMA_NO_NODE;
835 int nid, target_node = 0, max_value = 0;
836
837 /* find first node with max normal pages hit */
838 for (nid = 0; nid < MAX_NUMNODES; nid++)
839 if (khugepaged_node_load[nid] > max_value) {
840 max_value = khugepaged_node_load[nid];
841 target_node = nid;
842 }
843
844 /* do some balance if several nodes have the same hit record */
845 if (target_node <= last_khugepaged_target_node)
846 for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES;
847 nid++)
848 if (max_value == khugepaged_node_load[nid]) {
849 target_node = nid;
850 break;
851 }
852
853 last_khugepaged_target_node = target_node;
854 return target_node;
855}
856
857static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
858{
859 if (IS_ERR(*hpage)) {
860 if (!*wait)
861 return false;
862
863 *wait = false;
864 *hpage = NULL;
865 khugepaged_alloc_sleep();
866 } else if (*hpage) {
867 put_page(*hpage);
868 *hpage = NULL;
869 }
870
871 return true;
872}
873
874static struct page *
Kirill A. Shutemov988ddb72016-07-26 15:26:26 -0700875khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700876{
877 VM_BUG_ON_PAGE(*hpage, *hpage);
878
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700879 *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
880 if (unlikely(!*hpage)) {
881 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
882 *hpage = ERR_PTR(-ENOMEM);
883 return NULL;
884 }
885
886 prep_transhuge_page(*hpage);
887 count_vm_event(THP_COLLAPSE_ALLOC);
888 return *hpage;
889}
890#else
891static int khugepaged_find_target_node(void)
892{
893 return 0;
894}
895
896static inline struct page *alloc_khugepaged_hugepage(void)
897{
898 struct page *page;
899
900 page = alloc_pages(alloc_hugepage_khugepaged_gfpmask(),
901 HPAGE_PMD_ORDER);
902 if (page)
903 prep_transhuge_page(page);
904 return page;
905}
906
907static struct page *khugepaged_alloc_hugepage(bool *wait)
908{
909 struct page *hpage;
910
911 do {
912 hpage = alloc_khugepaged_hugepage();
913 if (!hpage) {
914 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
915 if (!*wait)
916 return NULL;
917
918 *wait = false;
919 khugepaged_alloc_sleep();
920 } else
921 count_vm_event(THP_COLLAPSE_ALLOC);
922 } while (unlikely(!hpage) && likely(khugepaged_enabled()));
923
924 return hpage;
925}
926
927static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
928{
Hugh Dickins033b5d72020-10-09 20:07:59 -0700929 /*
930 * If the hpage allocated earlier was briefly exposed in page cache
931 * before collapse_file() failed, it is possible that racing lookups
932 * have not yet completed, and would then be unpleasantly surprised by
933 * finding the hpage reused for the same mapping at a different offset.
934 * Just release the previous allocation if there is any danger of that.
935 */
936 if (*hpage && page_count(*hpage) > 1) {
937 put_page(*hpage);
938 *hpage = NULL;
939 }
940
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700941 if (!*hpage)
942 *hpage = khugepaged_alloc_hugepage(wait);
943
944 if (unlikely(!*hpage))
945 return false;
946
947 return true;
948}
949
950static struct page *
Kirill A. Shutemov988ddb72016-07-26 15:26:26 -0700951khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700952{
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700953 VM_BUG_ON(!*hpage);
954
955 return *hpage;
956}
957#endif
958
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700959/*
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -0700960 * If mmap_lock temporarily dropped, revalidate vma
961 * before taking mmap_lock.
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700962 * Return 0 if succeeds, otherwise return none-zero
963 * value (scan code).
964 */
965
Kirill A. Shutemovc131f752016-09-19 14:44:01 -0700966static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
967 struct vm_area_struct **vmap)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700968{
969 struct vm_area_struct *vma;
970 unsigned long hstart, hend;
971
972 if (unlikely(khugepaged_test_exit(mm)))
973 return SCAN_ANY_PROCESS;
974
Kirill A. Shutemovc131f752016-09-19 14:44:01 -0700975 *vmap = vma = find_vma(mm, address);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700976 if (!vma)
977 return SCAN_VMA_NULL;
978
979 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
980 hend = vma->vm_end & HPAGE_PMD_MASK;
981 if (address < hstart || address + HPAGE_PMD_SIZE > hend)
982 return SCAN_ADDRESS_RANGE;
Song Liu50f8b922018-08-17 15:47:00 -0700983 if (!hugepage_vma_check(vma, vma->vm_flags))
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700984 return SCAN_VMA_CHECK;
Kirill A. Shutemov594cced2020-07-23 21:15:34 -0700985 /* Anon VMA expected */
986 if (!vma->anon_vma || vma->vm_ops)
987 return SCAN_VMA_CHECK;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700988 return 0;
989}
990
991/*
992 * Bring missing pages in from swap, to complete THP collapse.
993 * Only done if khugepaged_scan_pmd believes it is worthwhile.
994 *
995 * Called and returns without pte mapped or spinlocks held,
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -0700996 * but with mmap_lock held to protect against vma changes.
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700997 */
998
999static bool __collapse_huge_page_swapin(struct mm_struct *mm,
1000 struct vm_area_struct *vma,
Will Deacon2d5a1722021-01-14 15:33:49 +00001001 unsigned long haddr, pmd_t *pmd,
Ebru Akagunduz0db501f2016-07-26 15:26:46 -07001002 int referenced)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001003{
Souptick Joarder2b740302018-08-23 17:01:36 -07001004 int swapped_in = 0;
1005 vm_fault_t ret = 0;
Will Deacon2d5a1722021-01-14 15:33:49 +00001006 unsigned long address, end = haddr + (HPAGE_PMD_NR * PAGE_SIZE);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001007
Will Deacon2d5a1722021-01-14 15:33:49 +00001008 for (address = haddr; address < end; address += PAGE_SIZE) {
1009 struct vm_fault vmf = {
1010 .vma = vma,
1011 .address = address,
1012 .pgoff = linear_page_index(vma, haddr),
1013 .flags = FAULT_FLAG_ALLOW_RETRY,
1014 .pmd = pmd,
1015 .vma_flags = vma->vm_flags,
1016 .vma_page_prot = vma->vm_page_prot,
1017 };
1018
1019 vmf.pte = pte_offset_map(pmd, address);
Jan Kara29943022016-12-14 15:07:16 -08001020 vmf.orig_pte = *vmf.pte;
Will Deacon2d5a1722021-01-14 15:33:49 +00001021 if (!is_swap_pte(vmf.orig_pte)) {
1022 pte_unmap(vmf.pte);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001023 continue;
Will Deacon2d5a1722021-01-14 15:33:49 +00001024 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001025 swapped_in++;
Jan Kara29943022016-12-14 15:07:16 -08001026 ret = do_swap_page(&vmf);
Ebru Akagunduz0db501f2016-07-26 15:26:46 -07001027
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07001028 /* do_swap_page returns VM_FAULT_RETRY with released mmap_lock */
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001029 if (ret & VM_FAULT_RETRY) {
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001030 mmap_read_lock(mm);
Will Deacon2d5a1722021-01-14 15:33:49 +00001031 if (hugepage_vma_revalidate(mm, haddr, &vma)) {
Ebru Akagunduz47f863e2016-07-26 15:26:43 -07001032 /* vma is no longer available, don't continue to swapin */
Ebru Akagunduz0db501f2016-07-26 15:26:46 -07001033 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001034 return false;
Ebru Akagunduz47f863e2016-07-26 15:26:43 -07001035 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001036 /* check if the pmd is still valid */
Will Deacon2d5a1722021-01-14 15:33:49 +00001037 if (mm_find_pmd(mm, haddr) != pmd) {
SeongJae Park835152a2017-05-12 15:46:38 -07001038 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001039 return false;
SeongJae Park835152a2017-05-12 15:46:38 -07001040 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001041 }
1042 if (ret & VM_FAULT_ERROR) {
Ebru Akagunduz0db501f2016-07-26 15:26:46 -07001043 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001044 return false;
1045 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001046 }
Kirill A. Shutemovae2c5d82020-06-03 16:00:17 -07001047
1048 /* Drain LRU add pagevec to remove extra pin on the swapped in pages */
1049 if (swapped_in)
1050 lru_add_drain();
1051
Ebru Akagunduz0db501f2016-07-26 15:26:46 -07001052 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001053 return true;
1054}
1055
1056static void collapse_huge_page(struct mm_struct *mm,
1057 unsigned long address,
1058 struct page **hpage,
Kirill A. Shutemovffe945e2020-06-03 16:00:09 -07001059 int node, int referenced, int unmapped)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001060{
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -07001061 LIST_HEAD(compound_pagelist);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001062 pmd_t *pmd, _pmd;
1063 pte_t *pte;
1064 pgtable_t pgtable;
1065 struct page *new_page;
1066 spinlock_t *pmd_ptl, *pte_ptl;
1067 int isolated = 0, result = 0;
Kirill A. Shutemovc131f752016-09-19 14:44:01 -07001068 struct vm_area_struct *vma;
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08001069 struct mmu_notifier_range range;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001070 gfp_t gfp;
1071
1072 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1073
1074 /* Only allocate from the target node */
Michal Hocko41b61672017-01-10 16:57:42 -08001075 gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001076
Kirill A. Shutemov988ddb72016-07-26 15:26:26 -07001077 /*
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07001078 * Before allocating the hugepage, release the mmap_lock read lock.
Kirill A. Shutemov988ddb72016-07-26 15:26:26 -07001079 * The allocation can take potentially a long time if it involves
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07001080 * sync compaction, and we do not need to hold the mmap_lock during
Kirill A. Shutemov988ddb72016-07-26 15:26:26 -07001081 * that. We will recheck the vma after taking it again in write mode.
1082 */
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001083 mmap_read_unlock(mm);
Kirill A. Shutemov988ddb72016-07-26 15:26:26 -07001084 new_page = khugepaged_alloc_page(hpage, gfp, node);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001085 if (!new_page) {
1086 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
1087 goto out_nolock;
1088 }
1089
Johannes Weinerd9eb1ea2020-06-03 16:02:24 -07001090 if (unlikely(mem_cgroup_charge(new_page, mm, gfp))) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001091 result = SCAN_CGROUP_CHARGE_FAIL;
1092 goto out_nolock;
1093 }
Johannes Weiner9d82c692020-06-03 16:02:04 -07001094 count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001095
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001096 mmap_read_lock(mm);
Kirill A. Shutemovc131f752016-09-19 14:44:01 -07001097 result = hugepage_vma_revalidate(mm, address, &vma);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001098 if (result) {
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001099 mmap_read_unlock(mm);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001100 goto out_nolock;
1101 }
1102
1103 pmd = mm_find_pmd(mm, address);
1104 if (!pmd) {
1105 result = SCAN_PMD_NULL;
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001106 mmap_read_unlock(mm);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001107 goto out_nolock;
1108 }
1109
1110 /*
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07001111 * __collapse_huge_page_swapin always returns with mmap_lock locked.
1112 * If it fails, we release mmap_lock and jump out_nolock.
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001113 * Continuing to collapse causes inconsistency.
1114 */
Kirill A. Shutemovffe945e2020-06-03 16:00:09 -07001115 if (unmapped && !__collapse_huge_page_swapin(mm, vma, address,
1116 pmd, referenced)) {
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001117 mmap_read_unlock(mm);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001118 goto out_nolock;
1119 }
1120
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001121 mmap_read_unlock(mm);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001122 /*
1123 * Prevent all access to pagetables with the exception of
1124 * gup_fast later handled by the ptep_clear_flush and the VM
1125 * handled by the anon_vma lock + PG_lock.
1126 */
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001127 mmap_write_lock(mm);
Kirill A. Shutemovc131f752016-09-19 14:44:01 -07001128 result = hugepage_vma_revalidate(mm, address, &vma);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001129 if (result)
1130 goto out;
1131 /* check if the pmd is still valid */
1132 if (mm_find_pmd(mm, address) != pmd)
1133 goto out;
1134
Laurent Dufour9cfe1682018-04-17 16:33:15 +02001135 vm_write_begin(vma);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001136 anon_vma_lock_write(vma->anon_vma);
1137
Jérôme Glisse7269f992019-05-13 17:20:53 -07001138 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm,
Jérôme Glisse6f4f13e2019-05-13 17:20:49 -07001139 address, address + HPAGE_PMD_SIZE);
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08001140 mmu_notifier_invalidate_range_start(&range);
Ville Syrjäläec649c9d2019-11-05 21:16:48 -08001141
1142 pte = pte_offset_map(pmd, address);
1143 pte_ptl = pte_lockptr(mm, pmd);
1144
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001145 pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
1146 /*
1147 * After this gup_fast can't run anymore. This also removes
1148 * any huge TLB entry from the CPU so we won't allow
1149 * huge and small TLB entries for the same virtual address
1150 * to avoid the risk of CPU bugs in that area.
1151 */
1152 _pmd = pmdp_collapse_flush(vma, address, pmd);
1153 spin_unlock(pmd_ptl);
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08001154 mmu_notifier_invalidate_range_end(&range);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001155
1156 spin_lock(pte_ptl);
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -07001157 isolated = __collapse_huge_page_isolate(vma, address, pte,
1158 &compound_pagelist);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001159 spin_unlock(pte_ptl);
1160
1161 if (unlikely(!isolated)) {
1162 pte_unmap(pte);
1163 spin_lock(pmd_ptl);
1164 BUG_ON(!pmd_none(*pmd));
1165 /*
1166 * We can only use set_pmd_at when establishing
1167 * hugepmds and never for establishing regular pmds that
1168 * points to regular pagetables. Use pmd_populate for that
1169 */
1170 pmd_populate(mm, pmd, pmd_pgtable(_pmd));
1171 spin_unlock(pmd_ptl);
1172 anon_vma_unlock_write(vma->anon_vma);
Laurent Dufour9cfe1682018-04-17 16:33:15 +02001173 vm_write_end(vma);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001174 result = SCAN_FAIL;
1175 goto out;
1176 }
1177
1178 /*
1179 * All pages are isolated and locked so anon_vma rmap
1180 * can't run anymore.
1181 */
1182 anon_vma_unlock_write(vma->anon_vma);
1183
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -07001184 __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl,
1185 &compound_pagelist);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001186 pte_unmap(pte);
1187 __SetPageUptodate(new_page);
1188 pgtable = pmd_pgtable(_pmd);
1189
1190 _pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
Linus Torvaldsf55e1012017-11-29 09:01:01 -08001191 _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001192
1193 /*
1194 * spin_lock() below is not the equivalent of smp_wmb(), so
1195 * this is needed to avoid the copy_huge_page writes to become
1196 * visible after the set_pmd_at() write.
1197 */
1198 smp_wmb();
1199
1200 spin_lock(pmd_ptl);
1201 BUG_ON(!pmd_none(*pmd));
Johannes Weinerbe5d0a72020-06-03 16:01:57 -07001202 page_add_new_anon_rmap(new_page, vma, address, true);
Joonsoo Kimb5181542020-08-11 18:30:40 -07001203 lru_cache_add_inactive_or_unevictable(new_page, vma);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001204 pgtable_trans_huge_deposit(mm, pmd, pgtable);
1205 set_pmd_at(mm, address, pmd, _pmd);
1206 update_mmu_cache_pmd(vma, address, pmd);
1207 spin_unlock(pmd_ptl);
Laurent Dufour9cfe1682018-04-17 16:33:15 +02001208 vm_write_end(vma);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001209
1210 *hpage = NULL;
1211
1212 khugepaged_pages_collapsed++;
1213 result = SCAN_SUCCEED;
1214out_up_write:
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001215 mmap_write_unlock(mm);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001216out_nolock:
Johannes Weiner9d82c692020-06-03 16:02:04 -07001217 if (!IS_ERR_OR_NULL(*hpage))
1218 mem_cgroup_uncharge(*hpage);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001219 trace_mm_collapse_huge_page(mm, isolated, result);
1220 return;
1221out:
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001222 goto out_up_write;
1223}
1224
1225static int khugepaged_scan_pmd(struct mm_struct *mm,
1226 struct vm_area_struct *vma,
1227 unsigned long address,
1228 struct page **hpage)
1229{
1230 pmd_t *pmd;
1231 pte_t *pte, *_pte;
Kirill A. Shutemov71a2c112020-06-03 16:00:30 -07001232 int ret = 0, result = 0, referenced = 0;
1233 int none_or_zero = 0, shared = 0;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001234 struct page *page = NULL;
1235 unsigned long _address;
1236 spinlock_t *ptl;
1237 int node = NUMA_NO_NODE, unmapped = 0;
Ebru Akagunduz0db501f2016-07-26 15:26:46 -07001238 bool writable = false;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001239
1240 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1241
1242 pmd = mm_find_pmd(mm, address);
1243 if (!pmd) {
1244 result = SCAN_PMD_NULL;
1245 goto out;
1246 }
1247
1248 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1249 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1250 for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
1251 _pte++, _address += PAGE_SIZE) {
1252 pte_t pteval = *_pte;
1253 if (is_swap_pte(pteval)) {
1254 if (++unmapped <= khugepaged_max_ptes_swap) {
Peter Xue1e267c2020-04-06 20:06:04 -07001255 /*
1256 * Always be strict with uffd-wp
1257 * enabled swap entries. Please see
1258 * comment below for pte_uffd_wp().
1259 */
1260 if (pte_swp_uffd_wp(pteval)) {
1261 result = SCAN_PTE_UFFD_WP;
1262 goto out_unmap;
1263 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001264 continue;
1265 } else {
1266 result = SCAN_EXCEED_SWAP_PTE;
1267 goto out_unmap;
1268 }
1269 }
1270 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
1271 if (!userfaultfd_armed(vma) &&
1272 ++none_or_zero <= khugepaged_max_ptes_none) {
1273 continue;
1274 } else {
1275 result = SCAN_EXCEED_NONE_PTE;
1276 goto out_unmap;
1277 }
1278 }
1279 if (!pte_present(pteval)) {
1280 result = SCAN_PTE_NON_PRESENT;
1281 goto out_unmap;
1282 }
Peter Xue1e267c2020-04-06 20:06:04 -07001283 if (pte_uffd_wp(pteval)) {
1284 /*
1285 * Don't collapse the page if any of the small
1286 * PTEs are armed with uffd write protection.
1287 * Here we can also mark the new huge pmd as
1288 * write protected if any of the small ones is
1289 * marked but that could bring uknown
1290 * userfault messages that falls outside of
1291 * the registered range. So, just be simple.
1292 */
1293 result = SCAN_PTE_UFFD_WP;
1294 goto out_unmap;
1295 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001296 if (pte_write(pteval))
1297 writable = true;
1298
1299 page = vm_normal_page(vma, _address, pteval);
1300 if (unlikely(!page)) {
1301 result = SCAN_PAGE_NULL;
1302 goto out_unmap;
1303 }
1304
Kirill A. Shutemov71a2c112020-06-03 16:00:30 -07001305 if (page_mapcount(page) > 1 &&
1306 ++shared > khugepaged_max_ptes_shared) {
1307 result = SCAN_EXCEED_SHARED_PTE;
1308 goto out_unmap;
1309 }
1310
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -07001311 page = compound_head(page);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001312
1313 /*
1314 * Record which node the original page is from and save this
1315 * information to khugepaged_node_load[].
1316 * Khupaged will allocate hugepage from the node has the max
1317 * hit record.
1318 */
1319 node = page_to_nid(page);
1320 if (khugepaged_scan_abort(node)) {
1321 result = SCAN_SCAN_ABORT;
1322 goto out_unmap;
1323 }
1324 khugepaged_node_load[node]++;
1325 if (!PageLRU(page)) {
1326 result = SCAN_PAGE_LRU;
1327 goto out_unmap;
1328 }
1329 if (PageLocked(page)) {
1330 result = SCAN_PAGE_LOCK;
1331 goto out_unmap;
1332 }
1333 if (!PageAnon(page)) {
1334 result = SCAN_PAGE_ANON;
1335 goto out_unmap;
1336 }
1337
1338 /*
Kirill A. Shutemov94456892020-06-03 16:00:20 -07001339 * Check if the page has any GUP (or other external) pins.
1340 *
1341 * Here the check is racy it may see totmal_mapcount > refcount
1342 * in some cases.
1343 * For example, one process with one forked child process.
1344 * The parent has the PMD split due to MADV_DONTNEED, then
1345 * the child is trying unmap the whole PMD, but khugepaged
1346 * may be scanning the parent between the child has
1347 * PageDoubleMap flag cleared and dec the mapcount. So
1348 * khugepaged may see total_mapcount > refcount.
1349 *
1350 * But such case is ephemeral we could always retry collapse
1351 * later. However it may report false positive if the page
1352 * has excessive GUP pins (i.e. 512). Anyway the same check
1353 * will be done again later the risk seems low.
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001354 */
Kirill A. Shutemov94456892020-06-03 16:00:20 -07001355 if (!is_refcount_suitable(page)) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001356 result = SCAN_PAGE_COUNT;
1357 goto out_unmap;
1358 }
1359 if (pte_young(pteval) ||
1360 page_is_young(page) || PageReferenced(page) ||
1361 mmu_notifier_test_young(vma->vm_mm, address))
Ebru Akagunduz0db501f2016-07-26 15:26:46 -07001362 referenced++;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001363 }
Kirill A. Shutemovffe945e2020-06-03 16:00:09 -07001364 if (!writable) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001365 result = SCAN_PAGE_RO;
Kirill A. Shutemovffe945e2020-06-03 16:00:09 -07001366 } else if (!referenced || (unmapped && referenced < HPAGE_PMD_NR/2)) {
1367 result = SCAN_LACK_REFERENCED_PAGE;
1368 } else {
1369 result = SCAN_SUCCEED;
1370 ret = 1;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001371 }
1372out_unmap:
1373 pte_unmap_unlock(pte, ptl);
1374 if (ret) {
1375 node = khugepaged_find_target_node();
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07001376 /* collapse_huge_page will return with the mmap_lock released */
Kirill A. Shutemovffe945e2020-06-03 16:00:09 -07001377 collapse_huge_page(mm, address, hpage, node,
1378 referenced, unmapped);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001379 }
1380out:
1381 trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
1382 none_or_zero, result, unmapped);
1383 return ret;
1384}
1385
1386static void collect_mm_slot(struct mm_slot *mm_slot)
1387{
1388 struct mm_struct *mm = mm_slot->mm;
1389
Lance Roy35f3aa32018-10-04 23:45:47 -07001390 lockdep_assert_held(&khugepaged_mm_lock);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001391
1392 if (khugepaged_test_exit(mm)) {
1393 /* free mm_slot */
1394 hash_del(&mm_slot->hash);
1395 list_del(&mm_slot->mm_node);
1396
1397 /*
1398 * Not strictly needed because the mm exited already.
1399 *
1400 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1401 */
1402
1403 /* khugepaged_mm_lock actually not necessary for the below */
1404 free_mm_slot(mm_slot);
1405 mmdrop(mm);
1406 }
1407}
1408
Matthew Wilcox (Oracle)396bcc52020-04-06 20:04:35 -07001409#ifdef CONFIG_SHMEM
Song Liu27e1f822019-09-23 15:38:30 -07001410/*
1411 * Notify khugepaged that given addr of the mm is pte-mapped THP. Then
1412 * khugepaged should try to collapse the page table.
1413 */
1414static int khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
1415 unsigned long addr)
1416{
1417 struct mm_slot *mm_slot;
1418
1419 VM_BUG_ON(addr & ~HPAGE_PMD_MASK);
1420
1421 spin_lock(&khugepaged_mm_lock);
1422 mm_slot = get_mm_slot(mm);
1423 if (likely(mm_slot && mm_slot->nr_pte_mapped_thp < MAX_PTE_MAPPED_THP))
1424 mm_slot->pte_mapped_thp[mm_slot->nr_pte_mapped_thp++] = addr;
1425 spin_unlock(&khugepaged_mm_lock);
1426 return 0;
1427}
1428
1429/**
1430 * Try to collapse a pte-mapped THP for mm at address haddr.
1431 *
1432 * This function checks whether all the PTEs in the PMD are pointing to the
1433 * right THP. If so, retract the page table so the THP can refault in with
1434 * as pmd-mapped.
1435 */
1436void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
1437{
1438 unsigned long haddr = addr & HPAGE_PMD_MASK;
1439 struct vm_area_struct *vma = find_vma(mm, haddr);
Hugh Dickins119a5fc2020-08-06 23:26:18 -07001440 struct page *hpage;
Song Liu27e1f822019-09-23 15:38:30 -07001441 pte_t *start_pte, *pte;
1442 pmd_t *pmd, _pmd;
1443 spinlock_t *ptl;
1444 int count = 0;
1445 int i;
1446
1447 if (!vma || !vma->vm_file ||
1448 vma->vm_start > haddr || vma->vm_end < haddr + HPAGE_PMD_SIZE)
1449 return;
1450
1451 /*
1452 * This vm_flags may not have VM_HUGEPAGE if the page was not
1453 * collapsed by this mm. But we can still collapse if the page is
1454 * the valid THP. Add extra VM_HUGEPAGE so hugepage_vma_check()
1455 * will not fail the vma for missing VM_HUGEPAGE
1456 */
1457 if (!hugepage_vma_check(vma, vma->vm_flags | VM_HUGEPAGE))
1458 return;
1459
Hugh Dickins119a5fc2020-08-06 23:26:18 -07001460 hpage = find_lock_page(vma->vm_file->f_mapping,
1461 linear_page_index(vma, haddr));
1462 if (!hpage)
1463 return;
1464
1465 if (!PageHead(hpage))
1466 goto drop_hpage;
1467
Song Liu27e1f822019-09-23 15:38:30 -07001468 pmd = mm_find_pmd(mm, haddr);
1469 if (!pmd)
Hugh Dickins119a5fc2020-08-06 23:26:18 -07001470 goto drop_hpage;
Song Liu27e1f822019-09-23 15:38:30 -07001471
1472 start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
1473
1474 /* step 1: check all mapped PTEs are to the right huge page */
1475 for (i = 0, addr = haddr, pte = start_pte;
1476 i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1477 struct page *page;
1478
1479 /* empty pte, skip */
1480 if (pte_none(*pte))
1481 continue;
1482
1483 /* page swapped out, abort */
1484 if (!pte_present(*pte))
1485 goto abort;
1486
1487 page = vm_normal_page(vma, addr, *pte);
1488
Song Liu27e1f822019-09-23 15:38:30 -07001489 /*
Hugh Dickins119a5fc2020-08-06 23:26:18 -07001490 * Note that uprobe, debugger, or MAP_PRIVATE may change the
1491 * page table, but the new page will not be a subpage of hpage.
Song Liu27e1f822019-09-23 15:38:30 -07001492 */
Hugh Dickins119a5fc2020-08-06 23:26:18 -07001493 if (hpage + i != page)
Song Liu27e1f822019-09-23 15:38:30 -07001494 goto abort;
1495 count++;
1496 }
1497
1498 /* step 2: adjust rmap */
1499 for (i = 0, addr = haddr, pte = start_pte;
1500 i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1501 struct page *page;
1502
1503 if (pte_none(*pte))
1504 continue;
1505 page = vm_normal_page(vma, addr, *pte);
1506 page_remove_rmap(page, false);
1507 }
1508
1509 pte_unmap_unlock(start_pte, ptl);
1510
1511 /* step 3: set proper refcount and mm_counters. */
Hugh Dickins119a5fc2020-08-06 23:26:18 -07001512 if (count) {
Song Liu27e1f822019-09-23 15:38:30 -07001513 page_ref_sub(hpage, count);
1514 add_mm_counter(vma->vm_mm, mm_counter_file(hpage), -count);
1515 }
1516
1517 /* step 4: collapse pmd */
1518 ptl = pmd_lock(vma->vm_mm, pmd);
Hugh Dickins723a80d2020-08-06 23:26:15 -07001519 _pmd = pmdp_collapse_flush(vma, haddr, pmd);
Song Liu27e1f822019-09-23 15:38:30 -07001520 spin_unlock(ptl);
1521 mm_dec_nr_ptes(mm);
1522 pte_free(mm, pmd_pgtable(_pmd));
Hugh Dickins119a5fc2020-08-06 23:26:18 -07001523
1524drop_hpage:
1525 unlock_page(hpage);
1526 put_page(hpage);
Song Liu27e1f822019-09-23 15:38:30 -07001527 return;
1528
1529abort:
1530 pte_unmap_unlock(start_pte, ptl);
Hugh Dickins119a5fc2020-08-06 23:26:18 -07001531 goto drop_hpage;
Song Liu27e1f822019-09-23 15:38:30 -07001532}
1533
1534static int khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
1535{
1536 struct mm_struct *mm = mm_slot->mm;
1537 int i;
1538
1539 if (likely(mm_slot->nr_pte_mapped_thp == 0))
1540 return 0;
1541
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001542 if (!mmap_write_trylock(mm))
Song Liu27e1f822019-09-23 15:38:30 -07001543 return -EBUSY;
1544
1545 if (unlikely(khugepaged_test_exit(mm)))
1546 goto out;
1547
1548 for (i = 0; i < mm_slot->nr_pte_mapped_thp; i++)
1549 collapse_pte_mapped_thp(mm, mm_slot->pte_mapped_thp[i]);
1550
1551out:
1552 mm_slot->nr_pte_mapped_thp = 0;
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001553 mmap_write_unlock(mm);
Song Liu27e1f822019-09-23 15:38:30 -07001554 return 0;
1555}
1556
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001557static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
1558{
1559 struct vm_area_struct *vma;
Hugh Dickins18e77602020-08-06 23:26:22 -07001560 struct mm_struct *mm;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001561 unsigned long addr;
1562 pmd_t *pmd, _pmd;
1563
1564 i_mmap_lock_write(mapping);
1565 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
Song Liu27e1f822019-09-23 15:38:30 -07001566 /*
1567 * Check vma->anon_vma to exclude MAP_PRIVATE mappings that
1568 * got written to. These VMAs are likely not worth investing
Michel Lespinasse3e4e28c2020-06-08 21:33:51 -07001569 * mmap_write_lock(mm) as PMD-mapping is likely to be split
Song Liu27e1f822019-09-23 15:38:30 -07001570 * later.
1571 *
1572 * Not that vma->anon_vma check is racy: it can be set up after
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07001573 * the check but before we took mmap_lock by the fault path.
Song Liu27e1f822019-09-23 15:38:30 -07001574 * But page lock would prevent establishing any new ptes of the
1575 * page, so we are safe.
1576 *
1577 * An alternative would be drop the check, but check that page
1578 * table is clear before calling pmdp_collapse_flush() under
1579 * ptl. It has higher chance to recover THP for the VMA, but
1580 * has higher cost too.
1581 */
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001582 if (vma->anon_vma)
1583 continue;
1584 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1585 if (addr & ~HPAGE_PMD_MASK)
1586 continue;
1587 if (vma->vm_end < addr + HPAGE_PMD_SIZE)
1588 continue;
Hugh Dickins18e77602020-08-06 23:26:22 -07001589 mm = vma->vm_mm;
1590 pmd = mm_find_pmd(mm, addr);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001591 if (!pmd)
1592 continue;
1593 /*
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07001594 * We need exclusive mmap_lock to retract page table.
Song Liu27e1f822019-09-23 15:38:30 -07001595 *
1596 * We use trylock due to lock inversion: we need to acquire
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07001597 * mmap_lock while holding page lock. Fault path does it in
Song Liu27e1f822019-09-23 15:38:30 -07001598 * reverse order. Trylock is a way to avoid deadlock.
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001599 */
Hugh Dickins18e77602020-08-06 23:26:22 -07001600 if (mmap_write_trylock(mm)) {
1601 if (!khugepaged_test_exit(mm)) {
1602 spinlock_t *ptl = pmd_lock(mm, pmd);
1603 /* assume page table is clear */
1604 _pmd = pmdp_collapse_flush(vma, addr, pmd);
1605 spin_unlock(ptl);
1606 mm_dec_nr_ptes(mm);
1607 pte_free(mm, pmd_pgtable(_pmd));
1608 }
1609 mmap_write_unlock(mm);
Song Liu27e1f822019-09-23 15:38:30 -07001610 } else {
1611 /* Try again later */
Hugh Dickins18e77602020-08-06 23:26:22 -07001612 khugepaged_add_pte_mapped_thp(mm, addr);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001613 }
1614 }
1615 i_mmap_unlock_write(mapping);
1616}
1617
1618/**
Song Liu99cb0db2019-09-23 15:38:00 -07001619 * collapse_file - collapse filemap/tmpfs/shmem pages into huge one.
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001620 *
1621 * Basic scheme is simple, details are more complex:
Hugh Dickins87c460a2018-11-30 14:10:43 -08001622 * - allocate and lock a new huge page;
Matthew Wilcox77da9382017-12-04 14:56:08 -05001623 * - scan page cache replacing old pages with the new one
Song Liu99cb0db2019-09-23 15:38:00 -07001624 * + swap/gup in pages if necessary;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001625 * + fill in gaps;
Matthew Wilcox77da9382017-12-04 14:56:08 -05001626 * + keep old pages around in case rollback is required;
1627 * - if replacing succeeds:
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001628 * + copy data over;
1629 * + free old pages;
Hugh Dickins87c460a2018-11-30 14:10:43 -08001630 * + unlock huge page;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001631 * - if replacing failed;
1632 * + put all pages back and unfreeze them;
Matthew Wilcox77da9382017-12-04 14:56:08 -05001633 * + restore gaps in the page cache;
Hugh Dickins87c460a2018-11-30 14:10:43 -08001634 * + unlock and free huge page;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001635 */
Song Liu579c5712019-09-23 15:37:57 -07001636static void collapse_file(struct mm_struct *mm,
1637 struct file *file, pgoff_t start,
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001638 struct page **hpage, int node)
1639{
Song Liu579c5712019-09-23 15:37:57 -07001640 struct address_space *mapping = file->f_mapping;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001641 gfp_t gfp;
Matthew Wilcox77da9382017-12-04 14:56:08 -05001642 struct page *new_page;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001643 pgoff_t index, end = start + HPAGE_PMD_NR;
1644 LIST_HEAD(pagelist);
Matthew Wilcox77da9382017-12-04 14:56:08 -05001645 XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001646 int nr_none = 0, result = SCAN_SUCCEED;
Song Liu99cb0db2019-09-23 15:38:00 -07001647 bool is_shmem = shmem_file(file);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001648
Song Liu99cb0db2019-09-23 15:38:00 -07001649 VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001650 VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
1651
1652 /* Only allocate from the target node */
Michal Hocko41b61672017-01-10 16:57:42 -08001653 gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001654
1655 new_page = khugepaged_alloc_page(hpage, gfp, node);
1656 if (!new_page) {
1657 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
1658 goto out;
1659 }
1660
Johannes Weinerd9eb1ea2020-06-03 16:02:24 -07001661 if (unlikely(mem_cgroup_charge(new_page, mm, gfp))) {
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001662 result = SCAN_CGROUP_CHARGE_FAIL;
1663 goto out;
1664 }
Johannes Weiner9d82c692020-06-03 16:02:04 -07001665 count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001666
Hugh Dickins95feeab2018-11-30 14:10:50 -08001667 /* This will be less messy when we use multi-index entries */
1668 do {
1669 xas_lock_irq(&xas);
1670 xas_create_range(&xas);
1671 if (!xas_error(&xas))
1672 break;
1673 xas_unlock_irq(&xas);
1674 if (!xas_nomem(&xas, GFP_KERNEL)) {
Hugh Dickins95feeab2018-11-30 14:10:50 -08001675 result = SCAN_FAIL;
1676 goto out;
1677 }
1678 } while (1);
1679
Hugh Dickins042a3082018-11-30 14:10:39 -08001680 __SetPageLocked(new_page);
Song Liu99cb0db2019-09-23 15:38:00 -07001681 if (is_shmem)
1682 __SetPageSwapBacked(new_page);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001683 new_page->index = start;
1684 new_page->mapping = mapping;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001685
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001686 /*
Hugh Dickins87c460a2018-11-30 14:10:43 -08001687 * At this point the new_page is locked and not up-to-date.
1688 * It's safe to insert it into the page cache, because nobody would
1689 * be able to map it or use it in another way until we unlock it.
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001690 */
1691
Matthew Wilcox77da9382017-12-04 14:56:08 -05001692 xas_set(&xas, start);
1693 for (index = start; index < end; index++) {
1694 struct page *page = xas_next(&xas);
1695
1696 VM_BUG_ON(index != xas.xa_index);
Song Liu99cb0db2019-09-23 15:38:00 -07001697 if (is_shmem) {
1698 if (!page) {
1699 /*
1700 * Stop if extent has been truncated or
1701 * hole-punched, and is now completely
1702 * empty.
1703 */
1704 if (index == start) {
1705 if (!xas_next_entry(&xas, end - 1)) {
1706 result = SCAN_TRUNCATED;
1707 goto xa_locked;
1708 }
1709 xas_set(&xas, index);
1710 }
1711 if (!shmem_charge(mapping->host, 1)) {
1712 result = SCAN_FAIL;
Hugh Dickins042a3082018-11-30 14:10:39 -08001713 goto xa_locked;
Hugh Dickins701270f2018-11-30 14:10:25 -08001714 }
Song Liu99cb0db2019-09-23 15:38:00 -07001715 xas_store(&xas, new_page);
1716 nr_none++;
1717 continue;
Hugh Dickins701270f2018-11-30 14:10:25 -08001718 }
Song Liu99cb0db2019-09-23 15:38:00 -07001719
1720 if (xa_is_value(page) || !PageUptodate(page)) {
1721 xas_unlock_irq(&xas);
1722 /* swap in or instantiate fallocated page */
1723 if (shmem_getpage(mapping->host, index, &page,
1724 SGP_NOHUGE)) {
1725 result = SCAN_FAIL;
1726 goto xa_unlocked;
1727 }
1728 } else if (trylock_page(page)) {
1729 get_page(page);
1730 xas_unlock_irq(&xas);
1731 } else {
1732 result = SCAN_PAGE_LOCK;
Hugh Dickins042a3082018-11-30 14:10:39 -08001733 goto xa_locked;
Matthew Wilcox77da9382017-12-04 14:56:08 -05001734 }
Song Liu99cb0db2019-09-23 15:38:00 -07001735 } else { /* !is_shmem */
1736 if (!page || xa_is_value(page)) {
1737 xas_unlock_irq(&xas);
1738 page_cache_sync_readahead(mapping, &file->f_ra,
1739 file, index,
David Howellse5a59d32020-09-04 16:36:16 -07001740 end - index);
Song Liu99cb0db2019-09-23 15:38:00 -07001741 /* drain pagevecs to help isolate_lru_page() */
1742 lru_add_drain();
1743 page = find_lock_page(mapping, index);
1744 if (unlikely(page == NULL)) {
1745 result = SCAN_FAIL;
1746 goto xa_unlocked;
1747 }
Song Liu75f36062019-11-30 17:57:19 -08001748 } else if (PageDirty(page)) {
1749 /*
1750 * khugepaged only works on read-only fd,
1751 * so this page is dirty because it hasn't
1752 * been flushed since first write. There
1753 * won't be new dirty pages.
1754 *
1755 * Trigger async flush here and hope the
1756 * writeback is done when khugepaged
1757 * revisits this page.
1758 *
1759 * This is a one-off situation. We are not
1760 * forcing writeback in loop.
1761 */
1762 xas_unlock_irq(&xas);
1763 filemap_flush(mapping);
1764 result = SCAN_FAIL;
1765 goto xa_unlocked;
Song Liu99cb0db2019-09-23 15:38:00 -07001766 } else if (trylock_page(page)) {
1767 get_page(page);
1768 xas_unlock_irq(&xas);
1769 } else {
1770 result = SCAN_PAGE_LOCK;
1771 goto xa_locked;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001772 }
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001773 }
1774
1775 /*
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07001776 * The page must be locked, so we can drop the i_pages lock
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001777 * without racing with truncate.
1778 */
1779 VM_BUG_ON_PAGE(!PageLocked(page), page);
Song Liu4655e5e2019-11-15 17:34:53 -08001780
1781 /* make sure the page is up to date */
1782 if (unlikely(!PageUptodate(page))) {
1783 result = SCAN_FAIL;
1784 goto out_unlock;
1785 }
Hugh Dickins06a5e122018-11-30 14:10:47 -08001786
1787 /*
1788 * If file was truncated then extended, or hole-punched, before
1789 * we locked the first page, then a THP might be there already.
1790 */
1791 if (PageTransCompound(page)) {
1792 result = SCAN_PAGE_COMPOUND;
1793 goto out_unlock;
1794 }
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001795
1796 if (page_mapping(page) != mapping) {
1797 result = SCAN_TRUNCATED;
1798 goto out_unlock;
1799 }
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001800
Song Liu4655e5e2019-11-15 17:34:53 -08001801 if (!is_shmem && PageDirty(page)) {
1802 /*
1803 * khugepaged only works on read-only fd, so this
1804 * page is dirty because it hasn't been flushed
1805 * since first write.
1806 */
1807 result = SCAN_FAIL;
1808 goto out_unlock;
1809 }
1810
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001811 if (isolate_lru_page(page)) {
1812 result = SCAN_DEL_PAGE_LRU;
Hugh Dickins042a3082018-11-30 14:10:39 -08001813 goto out_unlock;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001814 }
1815
Song Liu99cb0db2019-09-23 15:38:00 -07001816 if (page_has_private(page) &&
1817 !try_to_release_page(page, GFP_KERNEL)) {
1818 result = SCAN_PAGE_HAS_PRIVATE;
Hugh Dickins2f33a702020-05-27 22:20:43 -07001819 putback_lru_page(page);
Song Liu99cb0db2019-09-23 15:38:00 -07001820 goto out_unlock;
1821 }
1822
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001823 if (page_mapped(page))
Matthew Wilcox977fbdc2018-01-31 16:17:36 -08001824 unmap_mapping_pages(mapping, index, 1, false);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001825
Matthew Wilcox77da9382017-12-04 14:56:08 -05001826 xas_lock_irq(&xas);
1827 xas_set(&xas, index);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001828
Matthew Wilcox77da9382017-12-04 14:56:08 -05001829 VM_BUG_ON_PAGE(page != xas_load(&xas), page);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001830 VM_BUG_ON_PAGE(page_mapped(page), page);
1831
1832 /*
1833 * The page is expected to have page_count() == 3:
1834 * - we hold a pin on it;
Matthew Wilcox77da9382017-12-04 14:56:08 -05001835 * - one reference from page cache;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001836 * - one from isolate_lru_page;
1837 */
1838 if (!page_ref_freeze(page, 3)) {
1839 result = SCAN_PAGE_COUNT;
Hugh Dickins042a3082018-11-30 14:10:39 -08001840 xas_unlock_irq(&xas);
1841 putback_lru_page(page);
1842 goto out_unlock;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001843 }
1844
1845 /*
1846 * Add the page to the list to be able to undo the collapse if
1847 * something go wrong.
1848 */
1849 list_add_tail(&page->lru, &pagelist);
1850
1851 /* Finally, replace with the new page. */
Matthew Wilcox (Oracle)41011962019-09-23 15:34:52 -07001852 xas_store(&xas, new_page);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001853 continue;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001854out_unlock:
1855 unlock_page(page);
1856 put_page(page);
Hugh Dickins042a3082018-11-30 14:10:39 -08001857 goto xa_unlocked;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001858 }
1859
Song Liu99cb0db2019-09-23 15:38:00 -07001860 if (is_shmem)
1861 __inc_node_page_state(new_page, NR_SHMEM_THPS);
Song Liu09d91cd2019-09-23 15:38:03 -07001862 else {
Song Liu99cb0db2019-09-23 15:38:00 -07001863 __inc_node_page_state(new_page, NR_FILE_THPS);
Song Liu09d91cd2019-09-23 15:38:03 -07001864 filemap_nr_thps_inc(mapping);
Collin Fijalkovich28b4b152021-03-23 16:29:26 -07001865 /*
1866 * Paired with smp_mb() in do_dentry_open() to ensure
1867 * i_writecount is up to date and the update to nr_thps is
1868 * visible. Ensures the page cache will be truncated if the
1869 * file is opened writable.
1870 */
1871 smp_mb();
1872 if (inode_is_open_for_write(mapping->host)) {
1873 result = SCAN_FAIL;
1874 __dec_node_page_state(new_page, NR_FILE_THPS);
1875 filemap_nr_thps_dec(mapping);
1876 goto xa_locked;
1877 }
Song Liu09d91cd2019-09-23 15:38:03 -07001878 }
Song Liu99cb0db2019-09-23 15:38:00 -07001879
Hugh Dickins042a3082018-11-30 14:10:39 -08001880 if (nr_none) {
Johannes Weiner9d82c692020-06-03 16:02:04 -07001881 __mod_lruvec_page_state(new_page, NR_FILE_PAGES, nr_none);
Song Liu99cb0db2019-09-23 15:38:00 -07001882 if (is_shmem)
Johannes Weiner9d82c692020-06-03 16:02:04 -07001883 __mod_lruvec_page_state(new_page, NR_SHMEM, nr_none);
Hugh Dickins042a3082018-11-30 14:10:39 -08001884 }
1885
1886xa_locked:
1887 xas_unlock_irq(&xas);
Matthew Wilcox77da9382017-12-04 14:56:08 -05001888xa_unlocked:
Hugh Dickins042a3082018-11-30 14:10:39 -08001889
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001890 if (result == SCAN_SUCCEED) {
Matthew Wilcox77da9382017-12-04 14:56:08 -05001891 struct page *page, *tmp;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001892
1893 /*
Matthew Wilcox77da9382017-12-04 14:56:08 -05001894 * Replacing old pages with new one has succeeded, now we
1895 * need to copy the content and free the old pages.
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001896 */
Hugh Dickins2af8ff22018-11-30 14:10:35 -08001897 index = start;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001898 list_for_each_entry_safe(page, tmp, &pagelist, lru) {
Hugh Dickins2af8ff22018-11-30 14:10:35 -08001899 while (index < page->index) {
1900 clear_highpage(new_page + (index % HPAGE_PMD_NR));
1901 index++;
1902 }
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001903 copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
1904 page);
1905 list_del(&page->lru);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001906 page->mapping = NULL;
Hugh Dickins042a3082018-11-30 14:10:39 -08001907 page_ref_unfreeze(page, 1);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001908 ClearPageActive(page);
1909 ClearPageUnevictable(page);
Hugh Dickins042a3082018-11-30 14:10:39 -08001910 unlock_page(page);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001911 put_page(page);
Hugh Dickins2af8ff22018-11-30 14:10:35 -08001912 index++;
1913 }
1914 while (index < end) {
1915 clear_highpage(new_page + (index % HPAGE_PMD_NR));
1916 index++;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001917 }
1918
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001919 SetPageUptodate(new_page);
Hugh Dickins87c460a2018-11-30 14:10:43 -08001920 page_ref_add(new_page, HPAGE_PMD_NR - 1);
Johannes Weiner6058eae2020-06-03 16:02:40 -07001921 if (is_shmem)
Song Liu99cb0db2019-09-23 15:38:00 -07001922 set_page_dirty(new_page);
Johannes Weiner6058eae2020-06-03 16:02:40 -07001923 lru_cache_add(new_page);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001924
Hugh Dickins042a3082018-11-30 14:10:39 -08001925 /*
1926 * Remove pte page tables, so we can re-fault the page as huge.
1927 */
1928 retract_page_tables(mapping, start);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001929 *hpage = NULL;
Yang Shi87aa7522018-08-17 15:45:29 -07001930
1931 khugepaged_pages_collapsed++;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001932 } else {
Matthew Wilcox77da9382017-12-04 14:56:08 -05001933 struct page *page;
Hugh Dickinsaaa52e32018-11-30 14:10:29 -08001934
Matthew Wilcox77da9382017-12-04 14:56:08 -05001935 /* Something went wrong: roll back page cache changes */
Matthew Wilcox77da9382017-12-04 14:56:08 -05001936 xas_lock_irq(&xas);
Hugh Dickinsaaa52e32018-11-30 14:10:29 -08001937 mapping->nrpages -= nr_none;
Song Liu99cb0db2019-09-23 15:38:00 -07001938
1939 if (is_shmem)
1940 shmem_uncharge(mapping->host, nr_none);
Hugh Dickinsaaa52e32018-11-30 14:10:29 -08001941
Matthew Wilcox77da9382017-12-04 14:56:08 -05001942 xas_set(&xas, start);
1943 xas_for_each(&xas, page, end - 1) {
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001944 page = list_first_entry_or_null(&pagelist,
1945 struct page, lru);
Matthew Wilcox77da9382017-12-04 14:56:08 -05001946 if (!page || xas.xa_index < page->index) {
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001947 if (!nr_none)
1948 break;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001949 nr_none--;
Johannes Weiner59749e62016-12-12 16:43:35 -08001950 /* Put holes back where they were */
Matthew Wilcox77da9382017-12-04 14:56:08 -05001951 xas_store(&xas, NULL);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001952 continue;
1953 }
1954
Matthew Wilcox77da9382017-12-04 14:56:08 -05001955 VM_BUG_ON_PAGE(page->index != xas.xa_index, page);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001956
1957 /* Unfreeze the page. */
1958 list_del(&page->lru);
1959 page_ref_unfreeze(page, 2);
Matthew Wilcox77da9382017-12-04 14:56:08 -05001960 xas_store(&xas, page);
1961 xas_pause(&xas);
1962 xas_unlock_irq(&xas);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001963 unlock_page(page);
Hugh Dickins042a3082018-11-30 14:10:39 -08001964 putback_lru_page(page);
Matthew Wilcox77da9382017-12-04 14:56:08 -05001965 xas_lock_irq(&xas);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001966 }
1967 VM_BUG_ON(nr_none);
Matthew Wilcox77da9382017-12-04 14:56:08 -05001968 xas_unlock_irq(&xas);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001969
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001970 new_page->mapping = NULL;
1971 }
Hugh Dickins042a3082018-11-30 14:10:39 -08001972
1973 unlock_page(new_page);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001974out:
1975 VM_BUG_ON(!list_empty(&pagelist));
Johannes Weiner9d82c692020-06-03 16:02:04 -07001976 if (!IS_ERR_OR_NULL(*hpage))
1977 mem_cgroup_uncharge(*hpage);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001978 /* TODO: tracepoints */
1979}
1980
Song Liu579c5712019-09-23 15:37:57 -07001981static void khugepaged_scan_file(struct mm_struct *mm,
1982 struct file *file, pgoff_t start, struct page **hpage)
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001983{
1984 struct page *page = NULL;
Song Liu579c5712019-09-23 15:37:57 -07001985 struct address_space *mapping = file->f_mapping;
Matthew Wilcox85b392d2017-12-04 15:06:23 -05001986 XA_STATE(xas, &mapping->i_pages, start);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001987 int present, swap;
1988 int node = NUMA_NO_NODE;
1989 int result = SCAN_SUCCEED;
1990
1991 present = 0;
1992 swap = 0;
1993 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1994 rcu_read_lock();
Matthew Wilcox85b392d2017-12-04 15:06:23 -05001995 xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) {
1996 if (xas_retry(&xas, page))
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001997 continue;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001998
Matthew Wilcox85b392d2017-12-04 15:06:23 -05001999 if (xa_is_value(page)) {
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002000 if (++swap > khugepaged_max_ptes_swap) {
2001 result = SCAN_EXCEED_SWAP_PTE;
2002 break;
2003 }
2004 continue;
2005 }
2006
2007 if (PageTransCompound(page)) {
2008 result = SCAN_PAGE_COMPOUND;
2009 break;
2010 }
2011
2012 node = page_to_nid(page);
2013 if (khugepaged_scan_abort(node)) {
2014 result = SCAN_SCAN_ABORT;
2015 break;
2016 }
2017 khugepaged_node_load[node]++;
2018
2019 if (!PageLRU(page)) {
2020 result = SCAN_PAGE_LRU;
2021 break;
2022 }
2023
Song Liu99cb0db2019-09-23 15:38:00 -07002024 if (page_count(page) !=
2025 1 + page_mapcount(page) + page_has_private(page)) {
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002026 result = SCAN_PAGE_COUNT;
2027 break;
2028 }
2029
2030 /*
2031 * We probably should check if the page is referenced here, but
2032 * nobody would transfer pte_young() to PageReferenced() for us.
2033 * And rmap walk here is just too costly...
2034 */
2035
2036 present++;
2037
2038 if (need_resched()) {
Matthew Wilcox85b392d2017-12-04 15:06:23 -05002039 xas_pause(&xas);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002040 cond_resched_rcu();
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002041 }
2042 }
2043 rcu_read_unlock();
2044
2045 if (result == SCAN_SUCCEED) {
2046 if (present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
2047 result = SCAN_EXCEED_NONE_PTE;
2048 } else {
2049 node = khugepaged_find_target_node();
Song Liu579c5712019-09-23 15:37:57 -07002050 collapse_file(mm, file, start, hpage, node);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002051 }
2052 }
2053
2054 /* TODO: tracepoints */
2055}
2056#else
Song Liu579c5712019-09-23 15:37:57 -07002057static void khugepaged_scan_file(struct mm_struct *mm,
2058 struct file *file, pgoff_t start, struct page **hpage)
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002059{
2060 BUILD_BUG();
2061}
Song Liu27e1f822019-09-23 15:38:30 -07002062
2063static int khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
2064{
2065 return 0;
2066}
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002067#endif
2068
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002069static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
2070 struct page **hpage)
2071 __releases(&khugepaged_mm_lock)
2072 __acquires(&khugepaged_mm_lock)
2073{
2074 struct mm_slot *mm_slot;
2075 struct mm_struct *mm;
2076 struct vm_area_struct *vma;
2077 int progress = 0;
2078
2079 VM_BUG_ON(!pages);
Lance Roy35f3aa32018-10-04 23:45:47 -07002080 lockdep_assert_held(&khugepaged_mm_lock);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002081
2082 if (khugepaged_scan.mm_slot)
2083 mm_slot = khugepaged_scan.mm_slot;
2084 else {
2085 mm_slot = list_entry(khugepaged_scan.mm_head.next,
2086 struct mm_slot, mm_node);
2087 khugepaged_scan.address = 0;
2088 khugepaged_scan.mm_slot = mm_slot;
2089 }
2090 spin_unlock(&khugepaged_mm_lock);
Song Liu27e1f822019-09-23 15:38:30 -07002091 khugepaged_collapse_pte_mapped_thps(mm_slot);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002092
2093 mm = mm_slot->mm;
Yang Shi3b454ad2018-01-31 16:18:28 -08002094 /*
2095 * Don't wait for semaphore (to avoid long wait times). Just move to
2096 * the next mm on the list.
2097 */
2098 vma = NULL;
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002099 if (unlikely(!mmap_read_trylock(mm)))
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07002100 goto breakouterloop_mmap_lock;
Yang Shi3b454ad2018-01-31 16:18:28 -08002101 if (likely(!khugepaged_test_exit(mm)))
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002102 vma = find_vma(mm, khugepaged_scan.address);
2103
2104 progress++;
2105 for (; vma; vma = vma->vm_next) {
2106 unsigned long hstart, hend;
2107
2108 cond_resched();
2109 if (unlikely(khugepaged_test_exit(mm))) {
2110 progress++;
2111 break;
2112 }
Song Liu50f8b922018-08-17 15:47:00 -07002113 if (!hugepage_vma_check(vma, vma->vm_flags)) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002114skip:
2115 progress++;
2116 continue;
2117 }
2118 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2119 hend = vma->vm_end & HPAGE_PMD_MASK;
2120 if (hstart >= hend)
2121 goto skip;
2122 if (khugepaged_scan.address > hend)
2123 goto skip;
2124 if (khugepaged_scan.address < hstart)
2125 khugepaged_scan.address = hstart;
2126 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
Matthew Wilcox (Oracle)396bcc52020-04-06 20:04:35 -07002127 if (shmem_file(vma->vm_file) && !shmem_huge_enabled(vma))
2128 goto skip;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002129
2130 while (khugepaged_scan.address < hend) {
2131 int ret;
2132 cond_resched();
2133 if (unlikely(khugepaged_test_exit(mm)))
2134 goto breakouterloop;
2135
2136 VM_BUG_ON(khugepaged_scan.address < hstart ||
2137 khugepaged_scan.address + HPAGE_PMD_SIZE >
2138 hend);
Song Liu99cb0db2019-09-23 15:38:00 -07002139 if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
Matthew Wilcox (Oracle)396bcc52020-04-06 20:04:35 -07002140 struct file *file = get_file(vma->vm_file);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002141 pgoff_t pgoff = linear_page_index(vma,
2142 khugepaged_scan.address);
Song Liu99cb0db2019-09-23 15:38:00 -07002143
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002144 mmap_read_unlock(mm);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002145 ret = 1;
Song Liu579c5712019-09-23 15:37:57 -07002146 khugepaged_scan_file(mm, file, pgoff, hpage);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002147 fput(file);
2148 } else {
2149 ret = khugepaged_scan_pmd(mm, vma,
2150 khugepaged_scan.address,
2151 hpage);
2152 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002153 /* move to next address */
2154 khugepaged_scan.address += HPAGE_PMD_SIZE;
2155 progress += HPAGE_PMD_NR;
2156 if (ret)
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07002157 /* we released mmap_lock so break loop */
2158 goto breakouterloop_mmap_lock;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002159 if (progress >= pages)
2160 goto breakouterloop;
2161 }
2162 }
2163breakouterloop:
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002164 mmap_read_unlock(mm); /* exit_mmap will destroy ptes after this */
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07002165breakouterloop_mmap_lock:
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002166
2167 spin_lock(&khugepaged_mm_lock);
2168 VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
2169 /*
2170 * Release the current mm_slot if this mm is about to die, or
2171 * if we scanned all vmas of this mm.
2172 */
2173 if (khugepaged_test_exit(mm) || !vma) {
2174 /*
2175 * Make sure that if mm_users is reaching zero while
2176 * khugepaged runs here, khugepaged_exit will find
2177 * mm_slot not pointing to the exiting mm.
2178 */
2179 if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
2180 khugepaged_scan.mm_slot = list_entry(
2181 mm_slot->mm_node.next,
2182 struct mm_slot, mm_node);
2183 khugepaged_scan.address = 0;
2184 } else {
2185 khugepaged_scan.mm_slot = NULL;
2186 khugepaged_full_scans++;
2187 }
2188
2189 collect_mm_slot(mm_slot);
2190 }
2191
2192 return progress;
2193}
2194
2195static int khugepaged_has_work(void)
2196{
2197 return !list_empty(&khugepaged_scan.mm_head) &&
2198 khugepaged_enabled();
2199}
2200
2201static int khugepaged_wait_event(void)
2202{
2203 return !list_empty(&khugepaged_scan.mm_head) ||
2204 kthread_should_stop();
2205}
2206
2207static void khugepaged_do_scan(void)
2208{
2209 struct page *hpage = NULL;
2210 unsigned int progress = 0, pass_through_head = 0;
2211 unsigned int pages = khugepaged_pages_to_scan;
2212 bool wait = true;
2213
2214 barrier(); /* write khugepaged_pages_to_scan to local stack */
2215
Kirill A. Shutemova980df32020-06-03 16:00:12 -07002216 lru_add_drain_all();
2217
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002218 while (progress < pages) {
2219 if (!khugepaged_prealloc_page(&hpage, &wait))
2220 break;
2221
2222 cond_resched();
2223
2224 if (unlikely(kthread_should_stop() || try_to_freeze()))
2225 break;
2226
2227 spin_lock(&khugepaged_mm_lock);
2228 if (!khugepaged_scan.mm_slot)
2229 pass_through_head++;
2230 if (khugepaged_has_work() &&
2231 pass_through_head < 2)
2232 progress += khugepaged_scan_mm_slot(pages - progress,
2233 &hpage);
2234 else
2235 progress = pages;
2236 spin_unlock(&khugepaged_mm_lock);
2237 }
2238
2239 if (!IS_ERR_OR_NULL(hpage))
2240 put_page(hpage);
2241}
2242
2243static bool khugepaged_should_wakeup(void)
2244{
2245 return kthread_should_stop() ||
2246 time_after_eq(jiffies, khugepaged_sleep_expire);
2247}
2248
2249static void khugepaged_wait_work(void)
2250{
2251 if (khugepaged_has_work()) {
2252 const unsigned long scan_sleep_jiffies =
2253 msecs_to_jiffies(khugepaged_scan_sleep_millisecs);
2254
2255 if (!scan_sleep_jiffies)
2256 return;
2257
2258 khugepaged_sleep_expire = jiffies + scan_sleep_jiffies;
2259 wait_event_freezable_timeout(khugepaged_wait,
2260 khugepaged_should_wakeup(),
2261 scan_sleep_jiffies);
2262 return;
2263 }
2264
2265 if (khugepaged_enabled())
2266 wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
2267}
2268
2269static int khugepaged(void *none)
2270{
2271 struct mm_slot *mm_slot;
2272
2273 set_freezable();
2274 set_user_nice(current, MAX_NICE);
2275
2276 while (!kthread_should_stop()) {
2277 khugepaged_do_scan();
2278 khugepaged_wait_work();
2279 }
2280
2281 spin_lock(&khugepaged_mm_lock);
2282 mm_slot = khugepaged_scan.mm_slot;
2283 khugepaged_scan.mm_slot = NULL;
2284 if (mm_slot)
2285 collect_mm_slot(mm_slot);
2286 spin_unlock(&khugepaged_mm_lock);
2287 return 0;
2288}
2289
2290static void set_recommended_min_free_kbytes(void)
2291{
2292 struct zone *zone;
2293 int nr_zones = 0;
2294 unsigned long recommended_min;
2295
Joonsoo Kimb7d349c2018-04-10 16:30:27 -07002296 for_each_populated_zone(zone) {
2297 /*
2298 * We don't need to worry about fragmentation of
2299 * ZONE_MOVABLE since it only has movable pages.
2300 */
2301 if (zone_idx(zone) > gfp_zone(GFP_USER))
2302 continue;
2303
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002304 nr_zones++;
Joonsoo Kimb7d349c2018-04-10 16:30:27 -07002305 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002306
2307 /* Ensure 2 pageblocks are free to assist fragmentation avoidance */
2308 recommended_min = pageblock_nr_pages * nr_zones * 2;
2309
2310 /*
2311 * Make sure that on average at least two pageblocks are almost free
2312 * of another type, one for a migratetype to fall back to and a
2313 * second to avoid subsequent fallbacks of other types There are 3
2314 * MIGRATE_TYPES we care about.
2315 */
2316 recommended_min += pageblock_nr_pages * nr_zones *
2317 MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
2318
2319 /* don't ever allow to reserve more than 5% of the lowmem */
2320 recommended_min = min(recommended_min,
2321 (unsigned long) nr_free_buffer_pages() / 20);
2322 recommended_min <<= (PAGE_SHIFT-10);
2323
2324 if (recommended_min > min_free_kbytes) {
2325 if (user_min_free_kbytes >= 0)
2326 pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
2327 min_free_kbytes, recommended_min);
2328
2329 min_free_kbytes = recommended_min;
2330 }
2331 setup_per_zone_wmarks();
2332}
2333
2334int start_stop_khugepaged(void)
2335{
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002336 int err = 0;
2337
2338 mutex_lock(&khugepaged_mutex);
2339 if (khugepaged_enabled()) {
2340 if (!khugepaged_thread)
2341 khugepaged_thread = kthread_run(khugepaged, NULL,
2342 "khugepaged");
2343 if (IS_ERR(khugepaged_thread)) {
2344 pr_err("khugepaged: kthread_run(khugepaged) failed\n");
2345 err = PTR_ERR(khugepaged_thread);
2346 khugepaged_thread = NULL;
2347 goto fail;
2348 }
2349
2350 if (!list_empty(&khugepaged_scan.mm_head))
2351 wake_up_interruptible(&khugepaged_wait);
2352
2353 set_recommended_min_free_kbytes();
2354 } else if (khugepaged_thread) {
2355 kthread_stop(khugepaged_thread);
2356 khugepaged_thread = NULL;
2357 }
2358fail:
2359 mutex_unlock(&khugepaged_mutex);
2360 return err;
2361}
Vijay Balakrishna4aab2be2020-10-10 23:16:40 -07002362
2363void khugepaged_min_free_kbytes_update(void)
2364{
2365 mutex_lock(&khugepaged_mutex);
2366 if (khugepaged_enabled() && khugepaged_thread)
2367 set_recommended_min_free_kbytes();
2368 mutex_unlock(&khugepaged_mutex);
2369}