blob: fb0fdaec34d5773be448157d5b0c6aa999ae19b7 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3
4#include <linux/mm.h>
5#include <linux/sched.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +01006#include <linux/sched/mm.h>
Ingo Molnarf7ccbae2017-02-08 18:51:30 +01007#include <linux/sched/coredump.h>
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07008#include <linux/mmu_notifier.h>
9#include <linux/rmap.h>
10#include <linux/swap.h>
11#include <linux/mm_inline.h>
12#include <linux/kthread.h>
13#include <linux/khugepaged.h>
14#include <linux/freezer.h>
15#include <linux/mman.h>
16#include <linux/hashtable.h>
17#include <linux/userfaultfd_k.h>
18#include <linux/page_idle.h>
19#include <linux/swapops.h>
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -070020#include <linux/shmem_fs.h>
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070021
22#include <asm/tlb.h>
23#include <asm/pgalloc.h>
24#include "internal.h"
25
26enum scan_result {
27 SCAN_FAIL,
28 SCAN_SUCCEED,
29 SCAN_PMD_NULL,
30 SCAN_EXCEED_NONE_PTE,
Kirill A. Shutemov71a2c112020-06-03 16:00:30 -070031 SCAN_EXCEED_SWAP_PTE,
32 SCAN_EXCEED_SHARED_PTE,
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070033 SCAN_PTE_NON_PRESENT,
Peter Xue1e267c2020-04-06 20:06:04 -070034 SCAN_PTE_UFFD_WP,
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070035 SCAN_PAGE_RO,
Ebru Akagunduz0db501f2016-07-26 15:26:46 -070036 SCAN_LACK_REFERENCED_PAGE,
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070037 SCAN_PAGE_NULL,
38 SCAN_SCAN_ABORT,
39 SCAN_PAGE_COUNT,
40 SCAN_PAGE_LRU,
41 SCAN_PAGE_LOCK,
42 SCAN_PAGE_ANON,
43 SCAN_PAGE_COMPOUND,
44 SCAN_ANY_PROCESS,
45 SCAN_VMA_NULL,
46 SCAN_VMA_CHECK,
47 SCAN_ADDRESS_RANGE,
48 SCAN_SWAP_CACHE_PAGE,
49 SCAN_DEL_PAGE_LRU,
50 SCAN_ALLOC_HUGE_PAGE_FAIL,
51 SCAN_CGROUP_CHARGE_FAIL,
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -070052 SCAN_TRUNCATED,
Song Liu99cb0db2019-09-23 15:38:00 -070053 SCAN_PAGE_HAS_PRIVATE,
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070054};
55
56#define CREATE_TRACE_POINTS
57#include <trace/events/huge_memory.h>
58
Vijay Balakrishna4aab2be2020-10-10 23:16:40 -070059static struct task_struct *khugepaged_thread __read_mostly;
60static DEFINE_MUTEX(khugepaged_mutex);
61
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070062/* default scan 8*512 pte (or vmas) every 30 second */
63static unsigned int khugepaged_pages_to_scan __read_mostly;
64static unsigned int khugepaged_pages_collapsed;
65static unsigned int khugepaged_full_scans;
66static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
67/* during fragmentation poll the hugepage allocator once every minute */
68static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
69static unsigned long khugepaged_sleep_expire;
70static DEFINE_SPINLOCK(khugepaged_mm_lock);
71static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
72/*
73 * default collapse hugepages if there is at least one pte mapped like
74 * it would have happened if the vma was large enough during page
75 * fault.
76 */
77static unsigned int khugepaged_max_ptes_none __read_mostly;
78static unsigned int khugepaged_max_ptes_swap __read_mostly;
Kirill A. Shutemov71a2c112020-06-03 16:00:30 -070079static unsigned int khugepaged_max_ptes_shared __read_mostly;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070080
81#define MM_SLOTS_HASH_BITS 10
82static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
83
84static struct kmem_cache *mm_slot_cache __read_mostly;
85
Song Liu27e1f822019-09-23 15:38:30 -070086#define MAX_PTE_MAPPED_THP 8
87
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070088/**
89 * struct mm_slot - hash lookup from mm to mm_slot
90 * @hash: hash collision list
91 * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
92 * @mm: the mm that this information is valid for
Alex Shi336e6b52020-12-14 19:12:01 -080093 * @nr_pte_mapped_thp: number of pte mapped THP
94 * @pte_mapped_thp: address array corresponding pte mapped THP
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070095 */
96struct mm_slot {
97 struct hlist_node hash;
98 struct list_head mm_node;
99 struct mm_struct *mm;
Song Liu27e1f822019-09-23 15:38:30 -0700100
101 /* pte-mapped THP in this mm */
102 int nr_pte_mapped_thp;
103 unsigned long pte_mapped_thp[MAX_PTE_MAPPED_THP];
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700104};
105
106/**
107 * struct khugepaged_scan - cursor for scanning
108 * @mm_head: the head of the mm list to scan
109 * @mm_slot: the current mm_slot we are scanning
110 * @address: the next address inside that to be scanned
111 *
112 * There is only the one khugepaged_scan instance of this cursor structure.
113 */
114struct khugepaged_scan {
115 struct list_head mm_head;
116 struct mm_slot *mm_slot;
117 unsigned long address;
118};
119
120static struct khugepaged_scan khugepaged_scan = {
121 .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
122};
123
Jérémy Lefauree1465d12016-11-30 15:54:02 -0800124#ifdef CONFIG_SYSFS
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700125static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
126 struct kobj_attribute *attr,
127 char *buf)
128{
Joe Perchesae7a9272020-12-14 19:14:42 -0800129 return sysfs_emit(buf, "%u\n", khugepaged_scan_sleep_millisecs);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700130}
131
132static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
133 struct kobj_attribute *attr,
134 const char *buf, size_t count)
135{
Alexey Dobriyandfefd222020-12-14 19:15:03 -0800136 unsigned int msecs;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700137 int err;
138
Alexey Dobriyandfefd222020-12-14 19:15:03 -0800139 err = kstrtouint(buf, 10, &msecs);
140 if (err)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700141 return -EINVAL;
142
143 khugepaged_scan_sleep_millisecs = msecs;
144 khugepaged_sleep_expire = 0;
145 wake_up_interruptible(&khugepaged_wait);
146
147 return count;
148}
149static struct kobj_attribute scan_sleep_millisecs_attr =
150 __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
151 scan_sleep_millisecs_store);
152
153static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
154 struct kobj_attribute *attr,
155 char *buf)
156{
Joe Perchesae7a9272020-12-14 19:14:42 -0800157 return sysfs_emit(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700158}
159
160static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
161 struct kobj_attribute *attr,
162 const char *buf, size_t count)
163{
Alexey Dobriyandfefd222020-12-14 19:15:03 -0800164 unsigned int msecs;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700165 int err;
166
Alexey Dobriyandfefd222020-12-14 19:15:03 -0800167 err = kstrtouint(buf, 10, &msecs);
168 if (err)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700169 return -EINVAL;
170
171 khugepaged_alloc_sleep_millisecs = msecs;
172 khugepaged_sleep_expire = 0;
173 wake_up_interruptible(&khugepaged_wait);
174
175 return count;
176}
177static struct kobj_attribute alloc_sleep_millisecs_attr =
178 __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
179 alloc_sleep_millisecs_store);
180
181static ssize_t pages_to_scan_show(struct kobject *kobj,
182 struct kobj_attribute *attr,
183 char *buf)
184{
Joe Perchesae7a9272020-12-14 19:14:42 -0800185 return sysfs_emit(buf, "%u\n", khugepaged_pages_to_scan);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700186}
187static ssize_t pages_to_scan_store(struct kobject *kobj,
188 struct kobj_attribute *attr,
189 const char *buf, size_t count)
190{
Alexey Dobriyandfefd222020-12-14 19:15:03 -0800191 unsigned int pages;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700192 int err;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700193
Alexey Dobriyandfefd222020-12-14 19:15:03 -0800194 err = kstrtouint(buf, 10, &pages);
195 if (err || !pages)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700196 return -EINVAL;
197
198 khugepaged_pages_to_scan = pages;
199
200 return count;
201}
202static struct kobj_attribute pages_to_scan_attr =
203 __ATTR(pages_to_scan, 0644, pages_to_scan_show,
204 pages_to_scan_store);
205
206static ssize_t pages_collapsed_show(struct kobject *kobj,
207 struct kobj_attribute *attr,
208 char *buf)
209{
Joe Perchesae7a9272020-12-14 19:14:42 -0800210 return sysfs_emit(buf, "%u\n", khugepaged_pages_collapsed);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700211}
212static struct kobj_attribute pages_collapsed_attr =
213 __ATTR_RO(pages_collapsed);
214
215static ssize_t full_scans_show(struct kobject *kobj,
216 struct kobj_attribute *attr,
217 char *buf)
218{
Joe Perchesae7a9272020-12-14 19:14:42 -0800219 return sysfs_emit(buf, "%u\n", khugepaged_full_scans);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700220}
221static struct kobj_attribute full_scans_attr =
222 __ATTR_RO(full_scans);
223
224static ssize_t khugepaged_defrag_show(struct kobject *kobj,
225 struct kobj_attribute *attr, char *buf)
226{
227 return single_hugepage_flag_show(kobj, attr, buf,
Joe Perchesae7a9272020-12-14 19:14:42 -0800228 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700229}
230static ssize_t khugepaged_defrag_store(struct kobject *kobj,
231 struct kobj_attribute *attr,
232 const char *buf, size_t count)
233{
234 return single_hugepage_flag_store(kobj, attr, buf, count,
235 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
236}
237static struct kobj_attribute khugepaged_defrag_attr =
238 __ATTR(defrag, 0644, khugepaged_defrag_show,
239 khugepaged_defrag_store);
240
241/*
242 * max_ptes_none controls if khugepaged should collapse hugepages over
243 * any unmapped ptes in turn potentially increasing the memory
244 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
245 * reduce the available free memory in the system as it
246 * runs. Increasing max_ptes_none will instead potentially reduce the
247 * free memory in the system during the khugepaged scan.
248 */
249static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
250 struct kobj_attribute *attr,
251 char *buf)
252{
Joe Perchesae7a9272020-12-14 19:14:42 -0800253 return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_none);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700254}
255static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
256 struct kobj_attribute *attr,
257 const char *buf, size_t count)
258{
259 int err;
260 unsigned long max_ptes_none;
261
262 err = kstrtoul(buf, 10, &max_ptes_none);
263 if (err || max_ptes_none > HPAGE_PMD_NR-1)
264 return -EINVAL;
265
266 khugepaged_max_ptes_none = max_ptes_none;
267
268 return count;
269}
270static struct kobj_attribute khugepaged_max_ptes_none_attr =
271 __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
272 khugepaged_max_ptes_none_store);
273
274static ssize_t khugepaged_max_ptes_swap_show(struct kobject *kobj,
275 struct kobj_attribute *attr,
276 char *buf)
277{
Joe Perchesae7a9272020-12-14 19:14:42 -0800278 return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_swap);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700279}
280
281static ssize_t khugepaged_max_ptes_swap_store(struct kobject *kobj,
282 struct kobj_attribute *attr,
283 const char *buf, size_t count)
284{
285 int err;
286 unsigned long max_ptes_swap;
287
288 err = kstrtoul(buf, 10, &max_ptes_swap);
289 if (err || max_ptes_swap > HPAGE_PMD_NR-1)
290 return -EINVAL;
291
292 khugepaged_max_ptes_swap = max_ptes_swap;
293
294 return count;
295}
296
297static struct kobj_attribute khugepaged_max_ptes_swap_attr =
298 __ATTR(max_ptes_swap, 0644, khugepaged_max_ptes_swap_show,
299 khugepaged_max_ptes_swap_store);
300
Kirill A. Shutemov71a2c112020-06-03 16:00:30 -0700301static ssize_t khugepaged_max_ptes_shared_show(struct kobject *kobj,
Joe Perchesae7a9272020-12-14 19:14:42 -0800302 struct kobj_attribute *attr,
303 char *buf)
Kirill A. Shutemov71a2c112020-06-03 16:00:30 -0700304{
Joe Perchesae7a9272020-12-14 19:14:42 -0800305 return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_shared);
Kirill A. Shutemov71a2c112020-06-03 16:00:30 -0700306}
307
308static ssize_t khugepaged_max_ptes_shared_store(struct kobject *kobj,
309 struct kobj_attribute *attr,
310 const char *buf, size_t count)
311{
312 int err;
313 unsigned long max_ptes_shared;
314
315 err = kstrtoul(buf, 10, &max_ptes_shared);
316 if (err || max_ptes_shared > HPAGE_PMD_NR-1)
317 return -EINVAL;
318
319 khugepaged_max_ptes_shared = max_ptes_shared;
320
321 return count;
322}
323
324static struct kobj_attribute khugepaged_max_ptes_shared_attr =
325 __ATTR(max_ptes_shared, 0644, khugepaged_max_ptes_shared_show,
326 khugepaged_max_ptes_shared_store);
327
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700328static struct attribute *khugepaged_attr[] = {
329 &khugepaged_defrag_attr.attr,
330 &khugepaged_max_ptes_none_attr.attr,
Kirill A. Shutemov71a2c112020-06-03 16:00:30 -0700331 &khugepaged_max_ptes_swap_attr.attr,
332 &khugepaged_max_ptes_shared_attr.attr,
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700333 &pages_to_scan_attr.attr,
334 &pages_collapsed_attr.attr,
335 &full_scans_attr.attr,
336 &scan_sleep_millisecs_attr.attr,
337 &alloc_sleep_millisecs_attr.attr,
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700338 NULL,
339};
340
341struct attribute_group khugepaged_attr_group = {
342 .attrs = khugepaged_attr,
343 .name = "khugepaged",
344};
Jérémy Lefauree1465d12016-11-30 15:54:02 -0800345#endif /* CONFIG_SYSFS */
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700346
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700347int hugepage_madvise(struct vm_area_struct *vma,
348 unsigned long *vm_flags, int advice)
349{
350 switch (advice) {
351 case MADV_HUGEPAGE:
352#ifdef CONFIG_S390
353 /*
354 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
355 * can't handle this properly after s390_enable_sie, so we simply
356 * ignore the madvise to prevent qemu from causing a SIGSEGV.
357 */
358 if (mm_has_pgste(vma->vm_mm))
359 return 0;
360#endif
361 *vm_flags &= ~VM_NOHUGEPAGE;
362 *vm_flags |= VM_HUGEPAGE;
363 /*
364 * If the vma become good for khugepaged to scan,
365 * register it here without waiting a page fault that
366 * may not happen any time soon.
367 */
368 if (!(*vm_flags & VM_NO_KHUGEPAGED) &&
369 khugepaged_enter_vma_merge(vma, *vm_flags))
370 return -ENOMEM;
371 break;
372 case MADV_NOHUGEPAGE:
373 *vm_flags &= ~VM_HUGEPAGE;
374 *vm_flags |= VM_NOHUGEPAGE;
375 /*
376 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
377 * this vma even if we leave the mm registered in khugepaged if
378 * it got registered before VM_NOHUGEPAGE was set.
379 */
380 break;
381 }
382
383 return 0;
384}
385
386int __init khugepaged_init(void)
387{
388 mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
389 sizeof(struct mm_slot),
390 __alignof__(struct mm_slot), 0, NULL);
391 if (!mm_slot_cache)
392 return -ENOMEM;
393
394 khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
395 khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
396 khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8;
Kirill A. Shutemov71a2c112020-06-03 16:00:30 -0700397 khugepaged_max_ptes_shared = HPAGE_PMD_NR / 2;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700398
399 return 0;
400}
401
402void __init khugepaged_destroy(void)
403{
404 kmem_cache_destroy(mm_slot_cache);
405}
406
407static inline struct mm_slot *alloc_mm_slot(void)
408{
409 if (!mm_slot_cache) /* initialization failed */
410 return NULL;
411 return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
412}
413
414static inline void free_mm_slot(struct mm_slot *mm_slot)
415{
416 kmem_cache_free(mm_slot_cache, mm_slot);
417}
418
419static struct mm_slot *get_mm_slot(struct mm_struct *mm)
420{
421 struct mm_slot *mm_slot;
422
423 hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
424 if (mm == mm_slot->mm)
425 return mm_slot;
426
427 return NULL;
428}
429
430static void insert_to_mm_slots_hash(struct mm_struct *mm,
431 struct mm_slot *mm_slot)
432{
433 mm_slot->mm = mm;
434 hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
435}
436
437static inline int khugepaged_test_exit(struct mm_struct *mm)
438{
Jann Horn4d45e752020-10-15 20:13:00 -0700439 return atomic_read(&mm->mm_users) == 0;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700440}
441
Song Liu50f8b922018-08-17 15:47:00 -0700442static bool hugepage_vma_check(struct vm_area_struct *vma,
443 unsigned long vm_flags)
Yang Shic2231022018-08-17 15:45:26 -0700444{
Song Liu50f8b922018-08-17 15:47:00 -0700445 if ((!(vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
446 (vm_flags & VM_NOHUGEPAGE) ||
Yang Shic2231022018-08-17 15:45:26 -0700447 test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
448 return false;
Song Liu99cb0db2019-09-23 15:38:00 -0700449
450 if (shmem_file(vma->vm_file) ||
451 (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) &&
452 vma->vm_file &&
453 (vm_flags & VM_DENYWRITE))) {
Yang Shic2231022018-08-17 15:45:26 -0700454 return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
455 HPAGE_PMD_NR);
456 }
457 if (!vma->anon_vma || vma->vm_ops)
458 return false;
Anshuman Khandual222100e2020-04-01 21:07:52 -0700459 if (vma_is_temporary_stack(vma))
Yang Shic2231022018-08-17 15:45:26 -0700460 return false;
Song Liu50f8b922018-08-17 15:47:00 -0700461 return !(vm_flags & VM_NO_KHUGEPAGED);
Yang Shic2231022018-08-17 15:45:26 -0700462}
463
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700464int __khugepaged_enter(struct mm_struct *mm)
465{
466 struct mm_slot *mm_slot;
467 int wakeup;
468
469 mm_slot = alloc_mm_slot();
470 if (!mm_slot)
471 return -ENOMEM;
472
473 /* __khugepaged_exit() must not run from under us */
Hugh Dickinsf3f99d62020-08-20 17:42:02 -0700474 VM_BUG_ON_MM(atomic_read(&mm->mm_users) == 0, mm);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700475 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
476 free_mm_slot(mm_slot);
477 return 0;
478 }
479
480 spin_lock(&khugepaged_mm_lock);
481 insert_to_mm_slots_hash(mm, mm_slot);
482 /*
483 * Insert just behind the scanning cursor, to let the area settle
484 * down a little.
485 */
486 wakeup = list_empty(&khugepaged_scan.mm_head);
487 list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
488 spin_unlock(&khugepaged_mm_lock);
489
Vegard Nossumf1f10072017-02-27 14:30:07 -0800490 mmgrab(mm);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700491 if (wakeup)
492 wake_up_interruptible(&khugepaged_wait);
493
494 return 0;
495}
496
497int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
498 unsigned long vm_flags)
499{
500 unsigned long hstart, hend;
Yang Shic2231022018-08-17 15:45:26 -0700501
502 /*
Song Liu99cb0db2019-09-23 15:38:00 -0700503 * khugepaged only supports read-only files for non-shmem files.
504 * khugepaged does not yet work on special mappings. And
505 * file-private shmem THP is not supported.
Yang Shic2231022018-08-17 15:45:26 -0700506 */
Song Liu50f8b922018-08-17 15:47:00 -0700507 if (!hugepage_vma_check(vma, vm_flags))
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700508 return 0;
Yang Shic2231022018-08-17 15:45:26 -0700509
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700510 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
511 hend = vma->vm_end & HPAGE_PMD_MASK;
512 if (hstart < hend)
513 return khugepaged_enter(vma, vm_flags);
514 return 0;
515}
516
517void __khugepaged_exit(struct mm_struct *mm)
518{
519 struct mm_slot *mm_slot;
520 int free = 0;
521
522 spin_lock(&khugepaged_mm_lock);
523 mm_slot = get_mm_slot(mm);
524 if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
525 hash_del(&mm_slot->hash);
526 list_del(&mm_slot->mm_node);
527 free = 1;
528 }
529 spin_unlock(&khugepaged_mm_lock);
530
531 if (free) {
532 clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
533 free_mm_slot(mm_slot);
534 mmdrop(mm);
535 } else if (mm_slot) {
536 /*
537 * This is required to serialize against
538 * khugepaged_test_exit() (which is guaranteed to run
539 * under mmap sem read mode). Stop here (after we
540 * return all pagetables will be destroyed) until
541 * khugepaged has finished working on the pagetables
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -0700542 * under the mmap_lock.
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700543 */
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700544 mmap_write_lock(mm);
545 mmap_write_unlock(mm);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700546 }
547}
548
549static void release_pte_page(struct page *page)
550{
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700551 mod_node_page_state(page_pgdat(page),
552 NR_ISOLATED_ANON + page_is_file_lru(page),
553 -compound_nr(page));
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700554 unlock_page(page);
555 putback_lru_page(page);
556}
557
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700558static void release_pte_pages(pte_t *pte, pte_t *_pte,
559 struct list_head *compound_pagelist)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700560{
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700561 struct page *page, *tmp;
562
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700563 while (--_pte >= pte) {
564 pte_t pteval = *_pte;
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700565
566 page = pte_page(pteval);
567 if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval)) &&
568 !PageCompound(page))
569 release_pte_page(page);
570 }
571
572 list_for_each_entry_safe(page, tmp, compound_pagelist, lru) {
573 list_del(&page->lru);
574 release_pte_page(page);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700575 }
576}
577
Kirill A. Shutemov94456892020-06-03 16:00:20 -0700578static bool is_refcount_suitable(struct page *page)
579{
580 int expected_refcount;
581
582 expected_refcount = total_mapcount(page);
583 if (PageSwapCache(page))
584 expected_refcount += compound_nr(page);
585
586 return page_count(page) == expected_refcount;
587}
588
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700589static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
590 unsigned long address,
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700591 pte_t *pte,
592 struct list_head *compound_pagelist)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700593{
594 struct page *page = NULL;
595 pte_t *_pte;
Kirill A. Shutemov71a2c112020-06-03 16:00:30 -0700596 int none_or_zero = 0, shared = 0, result = 0, referenced = 0;
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700597 bool writable = false;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700598
599 for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
600 _pte++, address += PAGE_SIZE) {
601 pte_t pteval = *_pte;
602 if (pte_none(pteval) || (pte_present(pteval) &&
603 is_zero_pfn(pte_pfn(pteval)))) {
604 if (!userfaultfd_armed(vma) &&
605 ++none_or_zero <= khugepaged_max_ptes_none) {
606 continue;
607 } else {
608 result = SCAN_EXCEED_NONE_PTE;
609 goto out;
610 }
611 }
612 if (!pte_present(pteval)) {
613 result = SCAN_PTE_NON_PRESENT;
614 goto out;
615 }
616 page = vm_normal_page(vma, address, pteval);
617 if (unlikely(!page)) {
618 result = SCAN_PAGE_NULL;
619 goto out;
620 }
621
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700622 VM_BUG_ON_PAGE(!PageAnon(page), page);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700623
Kirill A. Shutemov71a2c112020-06-03 16:00:30 -0700624 if (page_mapcount(page) > 1 &&
625 ++shared > khugepaged_max_ptes_shared) {
626 result = SCAN_EXCEED_SHARED_PTE;
627 goto out;
628 }
629
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700630 if (PageCompound(page)) {
631 struct page *p;
632 page = compound_head(page);
633
634 /*
635 * Check if we have dealt with the compound page
636 * already
637 */
638 list_for_each_entry(p, compound_pagelist, lru) {
639 if (page == p)
640 goto next;
641 }
642 }
643
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700644 /*
645 * We can do it before isolate_lru_page because the
646 * page can't be freed from under us. NOTE: PG_lock
647 * is needed to serialize against split_huge_page
648 * when invoked from the VM.
649 */
650 if (!trylock_page(page)) {
651 result = SCAN_PAGE_LOCK;
652 goto out;
653 }
654
655 /*
Kirill A. Shutemov94456892020-06-03 16:00:20 -0700656 * Check if the page has any GUP (or other external) pins.
657 *
658 * The page table that maps the page has been already unlinked
659 * from the page table tree and this process cannot get
660 * an additinal pin on the page.
661 *
662 * New pins can come later if the page is shared across fork,
663 * but not from this process. The other process cannot write to
664 * the page, only trigger CoW.
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700665 */
Kirill A. Shutemov94456892020-06-03 16:00:20 -0700666 if (!is_refcount_suitable(page)) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700667 unlock_page(page);
668 result = SCAN_PAGE_COUNT;
669 goto out;
670 }
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700671 if (!pte_write(pteval) && PageSwapCache(page) &&
672 !reuse_swap_page(page, NULL)) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700673 /*
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700674 * Page is in the swap cache and cannot be re-used.
675 * It cannot be collapsed into a THP.
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700676 */
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700677 unlock_page(page);
678 result = SCAN_SWAP_CACHE_PAGE;
679 goto out;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700680 }
681
682 /*
683 * Isolate the page to avoid collapsing an hugepage
684 * currently in use by the VM.
685 */
686 if (isolate_lru_page(page)) {
687 unlock_page(page);
688 result = SCAN_DEL_PAGE_LRU;
689 goto out;
690 }
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700691 mod_node_page_state(page_pgdat(page),
692 NR_ISOLATED_ANON + page_is_file_lru(page),
693 compound_nr(page));
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700694 VM_BUG_ON_PAGE(!PageLocked(page), page);
695 VM_BUG_ON_PAGE(PageLRU(page), page);
696
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700697 if (PageCompound(page))
698 list_add_tail(&page->lru, compound_pagelist);
699next:
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700700 /* There should be enough young pte to collapse the page */
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700701 if (pte_young(pteval) ||
702 page_is_young(page) || PageReferenced(page) ||
703 mmu_notifier_test_young(vma->vm_mm, address))
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700704 referenced++;
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700705
706 if (pte_write(pteval))
707 writable = true;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700708 }
709 if (likely(writable)) {
710 if (likely(referenced)) {
711 result = SCAN_SUCCEED;
712 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
713 referenced, writable, result);
714 return 1;
715 }
716 } else {
717 result = SCAN_PAGE_RO;
718 }
719
720out:
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700721 release_pte_pages(pte, _pte, compound_pagelist);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700722 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
723 referenced, writable, result);
724 return 0;
725}
726
727static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
728 struct vm_area_struct *vma,
729 unsigned long address,
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700730 spinlock_t *ptl,
731 struct list_head *compound_pagelist)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700732{
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700733 struct page *src_page, *tmp;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700734 pte_t *_pte;
David Rientjes338a16b2017-05-12 15:47:03 -0700735 for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
736 _pte++, page++, address += PAGE_SIZE) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700737 pte_t pteval = *_pte;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700738
739 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
740 clear_user_highpage(page, address);
741 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
742 if (is_zero_pfn(pte_pfn(pteval))) {
743 /*
744 * ptl mostly unnecessary.
745 */
746 spin_lock(ptl);
747 /*
748 * paravirt calls inside pte_clear here are
749 * superfluous.
750 */
751 pte_clear(vma->vm_mm, address, _pte);
752 spin_unlock(ptl);
753 }
754 } else {
755 src_page = pte_page(pteval);
756 copy_user_highpage(page, src_page, address, vma);
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700757 if (!PageCompound(src_page))
758 release_pte_page(src_page);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700759 /*
760 * ptl mostly unnecessary, but preempt has to
761 * be disabled to update the per-cpu stats
762 * inside page_remove_rmap().
763 */
764 spin_lock(ptl);
765 /*
766 * paravirt calls inside pte_clear here are
767 * superfluous.
768 */
769 pte_clear(vma->vm_mm, address, _pte);
770 page_remove_rmap(src_page, false);
771 spin_unlock(ptl);
772 free_page_and_swap_cache(src_page);
773 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700774 }
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700775
776 list_for_each_entry_safe(src_page, tmp, compound_pagelist, lru) {
777 list_del(&src_page->lru);
778 release_pte_page(src_page);
779 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700780}
781
782static void khugepaged_alloc_sleep(void)
783{
784 DEFINE_WAIT(wait);
785
786 add_wait_queue(&khugepaged_wait, &wait);
787 freezable_schedule_timeout_interruptible(
788 msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
789 remove_wait_queue(&khugepaged_wait, &wait);
790}
791
792static int khugepaged_node_load[MAX_NUMNODES];
793
794static bool khugepaged_scan_abort(int nid)
795{
796 int i;
797
798 /*
Mel Gormana5f5f912016-07-28 15:46:32 -0700799 * If node_reclaim_mode is disabled, then no extra effort is made to
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700800 * allocate memory locally.
801 */
Mel Gormana5f5f912016-07-28 15:46:32 -0700802 if (!node_reclaim_mode)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700803 return false;
804
805 /* If there is a count for this node already, it must be acceptable */
806 if (khugepaged_node_load[nid])
807 return false;
808
809 for (i = 0; i < MAX_NUMNODES; i++) {
810 if (!khugepaged_node_load[i])
811 continue;
Matt Fleminga55c7452019-08-08 20:53:01 +0100812 if (node_distance(nid, i) > node_reclaim_distance)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700813 return true;
814 }
815 return false;
816}
817
818/* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
819static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
820{
Vlastimil Babka25160352016-07-28 15:49:25 -0700821 return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700822}
823
824#ifdef CONFIG_NUMA
825static int khugepaged_find_target_node(void)
826{
827 static int last_khugepaged_target_node = NUMA_NO_NODE;
828 int nid, target_node = 0, max_value = 0;
829
830 /* find first node with max normal pages hit */
831 for (nid = 0; nid < MAX_NUMNODES; nid++)
832 if (khugepaged_node_load[nid] > max_value) {
833 max_value = khugepaged_node_load[nid];
834 target_node = nid;
835 }
836
837 /* do some balance if several nodes have the same hit record */
838 if (target_node <= last_khugepaged_target_node)
839 for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES;
840 nid++)
841 if (max_value == khugepaged_node_load[nid]) {
842 target_node = nid;
843 break;
844 }
845
846 last_khugepaged_target_node = target_node;
847 return target_node;
848}
849
850static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
851{
852 if (IS_ERR(*hpage)) {
853 if (!*wait)
854 return false;
855
856 *wait = false;
857 *hpage = NULL;
858 khugepaged_alloc_sleep();
859 } else if (*hpage) {
860 put_page(*hpage);
861 *hpage = NULL;
862 }
863
864 return true;
865}
866
867static struct page *
Kirill A. Shutemov988ddb72016-07-26 15:26:26 -0700868khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700869{
870 VM_BUG_ON_PAGE(*hpage, *hpage);
871
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700872 *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
873 if (unlikely(!*hpage)) {
874 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
875 *hpage = ERR_PTR(-ENOMEM);
876 return NULL;
877 }
878
879 prep_transhuge_page(*hpage);
880 count_vm_event(THP_COLLAPSE_ALLOC);
881 return *hpage;
882}
883#else
884static int khugepaged_find_target_node(void)
885{
886 return 0;
887}
888
889static inline struct page *alloc_khugepaged_hugepage(void)
890{
891 struct page *page;
892
893 page = alloc_pages(alloc_hugepage_khugepaged_gfpmask(),
894 HPAGE_PMD_ORDER);
895 if (page)
896 prep_transhuge_page(page);
897 return page;
898}
899
900static struct page *khugepaged_alloc_hugepage(bool *wait)
901{
902 struct page *hpage;
903
904 do {
905 hpage = alloc_khugepaged_hugepage();
906 if (!hpage) {
907 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
908 if (!*wait)
909 return NULL;
910
911 *wait = false;
912 khugepaged_alloc_sleep();
913 } else
914 count_vm_event(THP_COLLAPSE_ALLOC);
915 } while (unlikely(!hpage) && likely(khugepaged_enabled()));
916
917 return hpage;
918}
919
920static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
921{
Hugh Dickins033b5d72020-10-09 20:07:59 -0700922 /*
923 * If the hpage allocated earlier was briefly exposed in page cache
924 * before collapse_file() failed, it is possible that racing lookups
925 * have not yet completed, and would then be unpleasantly surprised by
926 * finding the hpage reused for the same mapping at a different offset.
927 * Just release the previous allocation if there is any danger of that.
928 */
929 if (*hpage && page_count(*hpage) > 1) {
930 put_page(*hpage);
931 *hpage = NULL;
932 }
933
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700934 if (!*hpage)
935 *hpage = khugepaged_alloc_hugepage(wait);
936
937 if (unlikely(!*hpage))
938 return false;
939
940 return true;
941}
942
943static struct page *
Kirill A. Shutemov988ddb72016-07-26 15:26:26 -0700944khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700945{
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700946 VM_BUG_ON(!*hpage);
947
948 return *hpage;
949}
950#endif
951
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700952/*
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -0700953 * If mmap_lock temporarily dropped, revalidate vma
954 * before taking mmap_lock.
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700955 * Return 0 if succeeds, otherwise return none-zero
956 * value (scan code).
957 */
958
Kirill A. Shutemovc131f752016-09-19 14:44:01 -0700959static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
960 struct vm_area_struct **vmap)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700961{
962 struct vm_area_struct *vma;
963 unsigned long hstart, hend;
964
965 if (unlikely(khugepaged_test_exit(mm)))
966 return SCAN_ANY_PROCESS;
967
Kirill A. Shutemovc131f752016-09-19 14:44:01 -0700968 *vmap = vma = find_vma(mm, address);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700969 if (!vma)
970 return SCAN_VMA_NULL;
971
972 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
973 hend = vma->vm_end & HPAGE_PMD_MASK;
974 if (address < hstart || address + HPAGE_PMD_SIZE > hend)
975 return SCAN_ADDRESS_RANGE;
Song Liu50f8b922018-08-17 15:47:00 -0700976 if (!hugepage_vma_check(vma, vma->vm_flags))
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700977 return SCAN_VMA_CHECK;
Kirill A. Shutemov594cced2020-07-23 21:15:34 -0700978 /* Anon VMA expected */
979 if (!vma->anon_vma || vma->vm_ops)
980 return SCAN_VMA_CHECK;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700981 return 0;
982}
983
984/*
985 * Bring missing pages in from swap, to complete THP collapse.
986 * Only done if khugepaged_scan_pmd believes it is worthwhile.
987 *
988 * Called and returns without pte mapped or spinlocks held,
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -0700989 * but with mmap_lock held to protect against vma changes.
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700990 */
991
992static bool __collapse_huge_page_swapin(struct mm_struct *mm,
993 struct vm_area_struct *vma,
Will Deacon2b635dd2021-01-14 15:33:49 +0000994 unsigned long haddr, pmd_t *pmd,
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700995 int referenced)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700996{
Souptick Joarder2b740302018-08-23 17:01:36 -0700997 int swapped_in = 0;
998 vm_fault_t ret = 0;
Will Deacon2b635dd2021-01-14 15:33:49 +0000999 unsigned long address, end = haddr + (HPAGE_PMD_NR * PAGE_SIZE);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001000
Will Deacon2b635dd2021-01-14 15:33:49 +00001001 for (address = haddr; address < end; address += PAGE_SIZE) {
1002 struct vm_fault vmf = {
1003 .vma = vma,
1004 .address = address,
1005 .pgoff = linear_page_index(vma, haddr),
1006 .flags = FAULT_FLAG_ALLOW_RETRY,
1007 .pmd = pmd,
1008 };
1009
1010 vmf.pte = pte_offset_map(pmd, address);
Jan Kara29943022016-12-14 15:07:16 -08001011 vmf.orig_pte = *vmf.pte;
Will Deacon2b635dd2021-01-14 15:33:49 +00001012 if (!is_swap_pte(vmf.orig_pte)) {
1013 pte_unmap(vmf.pte);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001014 continue;
Will Deacon2b635dd2021-01-14 15:33:49 +00001015 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001016 swapped_in++;
Jan Kara29943022016-12-14 15:07:16 -08001017 ret = do_swap_page(&vmf);
Ebru Akagunduz0db501f2016-07-26 15:26:46 -07001018
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07001019 /* do_swap_page returns VM_FAULT_RETRY with released mmap_lock */
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001020 if (ret & VM_FAULT_RETRY) {
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001021 mmap_read_lock(mm);
Will Deacon2b635dd2021-01-14 15:33:49 +00001022 if (hugepage_vma_revalidate(mm, haddr, &vma)) {
Ebru Akagunduz47f863e2016-07-26 15:26:43 -07001023 /* vma is no longer available, don't continue to swapin */
Ebru Akagunduz0db501f2016-07-26 15:26:46 -07001024 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001025 return false;
Ebru Akagunduz47f863e2016-07-26 15:26:43 -07001026 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001027 /* check if the pmd is still valid */
Will Deacon2b635dd2021-01-14 15:33:49 +00001028 if (mm_find_pmd(mm, haddr) != pmd) {
SeongJae Park835152a2017-05-12 15:46:38 -07001029 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001030 return false;
SeongJae Park835152a2017-05-12 15:46:38 -07001031 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001032 }
1033 if (ret & VM_FAULT_ERROR) {
Ebru Akagunduz0db501f2016-07-26 15:26:46 -07001034 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001035 return false;
1036 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001037 }
Kirill A. Shutemovae2c5d82020-06-03 16:00:17 -07001038
1039 /* Drain LRU add pagevec to remove extra pin on the swapped in pages */
1040 if (swapped_in)
1041 lru_add_drain();
1042
Ebru Akagunduz0db501f2016-07-26 15:26:46 -07001043 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001044 return true;
1045}
1046
1047static void collapse_huge_page(struct mm_struct *mm,
1048 unsigned long address,
1049 struct page **hpage,
Kirill A. Shutemovffe945e2020-06-03 16:00:09 -07001050 int node, int referenced, int unmapped)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001051{
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -07001052 LIST_HEAD(compound_pagelist);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001053 pmd_t *pmd, _pmd;
1054 pte_t *pte;
1055 pgtable_t pgtable;
1056 struct page *new_page;
1057 spinlock_t *pmd_ptl, *pte_ptl;
1058 int isolated = 0, result = 0;
Kirill A. Shutemovc131f752016-09-19 14:44:01 -07001059 struct vm_area_struct *vma;
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08001060 struct mmu_notifier_range range;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001061 gfp_t gfp;
1062
1063 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1064
1065 /* Only allocate from the target node */
Michal Hocko41b61672017-01-10 16:57:42 -08001066 gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001067
Kirill A. Shutemov988ddb72016-07-26 15:26:26 -07001068 /*
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07001069 * Before allocating the hugepage, release the mmap_lock read lock.
Kirill A. Shutemov988ddb72016-07-26 15:26:26 -07001070 * The allocation can take potentially a long time if it involves
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07001071 * sync compaction, and we do not need to hold the mmap_lock during
Kirill A. Shutemov988ddb72016-07-26 15:26:26 -07001072 * that. We will recheck the vma after taking it again in write mode.
1073 */
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001074 mmap_read_unlock(mm);
Kirill A. Shutemov988ddb72016-07-26 15:26:26 -07001075 new_page = khugepaged_alloc_page(hpage, gfp, node);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001076 if (!new_page) {
1077 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
1078 goto out_nolock;
1079 }
1080
Johannes Weinerd9eb1ea2020-06-03 16:02:24 -07001081 if (unlikely(mem_cgroup_charge(new_page, mm, gfp))) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001082 result = SCAN_CGROUP_CHARGE_FAIL;
1083 goto out_nolock;
1084 }
Johannes Weiner9d82c692020-06-03 16:02:04 -07001085 count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001086
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001087 mmap_read_lock(mm);
Kirill A. Shutemovc131f752016-09-19 14:44:01 -07001088 result = hugepage_vma_revalidate(mm, address, &vma);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001089 if (result) {
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001090 mmap_read_unlock(mm);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001091 goto out_nolock;
1092 }
1093
1094 pmd = mm_find_pmd(mm, address);
1095 if (!pmd) {
1096 result = SCAN_PMD_NULL;
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001097 mmap_read_unlock(mm);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001098 goto out_nolock;
1099 }
1100
1101 /*
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07001102 * __collapse_huge_page_swapin always returns with mmap_lock locked.
1103 * If it fails, we release mmap_lock and jump out_nolock.
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001104 * Continuing to collapse causes inconsistency.
1105 */
Kirill A. Shutemovffe945e2020-06-03 16:00:09 -07001106 if (unmapped && !__collapse_huge_page_swapin(mm, vma, address,
1107 pmd, referenced)) {
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001108 mmap_read_unlock(mm);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001109 goto out_nolock;
1110 }
1111
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001112 mmap_read_unlock(mm);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001113 /*
1114 * Prevent all access to pagetables with the exception of
1115 * gup_fast later handled by the ptep_clear_flush and the VM
1116 * handled by the anon_vma lock + PG_lock.
1117 */
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001118 mmap_write_lock(mm);
Kirill A. Shutemovc131f752016-09-19 14:44:01 -07001119 result = hugepage_vma_revalidate(mm, address, &vma);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001120 if (result)
1121 goto out;
1122 /* check if the pmd is still valid */
1123 if (mm_find_pmd(mm, address) != pmd)
1124 goto out;
1125
1126 anon_vma_lock_write(vma->anon_vma);
1127
Jérôme Glisse7269f992019-05-13 17:20:53 -07001128 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm,
Jérôme Glisse6f4f13e2019-05-13 17:20:49 -07001129 address, address + HPAGE_PMD_SIZE);
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08001130 mmu_notifier_invalidate_range_start(&range);
Ville Syrjäläec649c9d2019-11-05 21:16:48 -08001131
1132 pte = pte_offset_map(pmd, address);
1133 pte_ptl = pte_lockptr(mm, pmd);
1134
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001135 pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
1136 /*
1137 * After this gup_fast can't run anymore. This also removes
1138 * any huge TLB entry from the CPU so we won't allow
1139 * huge and small TLB entries for the same virtual address
1140 * to avoid the risk of CPU bugs in that area.
1141 */
1142 _pmd = pmdp_collapse_flush(vma, address, pmd);
1143 spin_unlock(pmd_ptl);
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08001144 mmu_notifier_invalidate_range_end(&range);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001145
1146 spin_lock(pte_ptl);
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -07001147 isolated = __collapse_huge_page_isolate(vma, address, pte,
1148 &compound_pagelist);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001149 spin_unlock(pte_ptl);
1150
1151 if (unlikely(!isolated)) {
1152 pte_unmap(pte);
1153 spin_lock(pmd_ptl);
1154 BUG_ON(!pmd_none(*pmd));
1155 /*
1156 * We can only use set_pmd_at when establishing
1157 * hugepmds and never for establishing regular pmds that
1158 * points to regular pagetables. Use pmd_populate for that
1159 */
1160 pmd_populate(mm, pmd, pmd_pgtable(_pmd));
1161 spin_unlock(pmd_ptl);
1162 anon_vma_unlock_write(vma->anon_vma);
1163 result = SCAN_FAIL;
1164 goto out;
1165 }
1166
1167 /*
1168 * All pages are isolated and locked so anon_vma rmap
1169 * can't run anymore.
1170 */
1171 anon_vma_unlock_write(vma->anon_vma);
1172
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -07001173 __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl,
1174 &compound_pagelist);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001175 pte_unmap(pte);
1176 __SetPageUptodate(new_page);
1177 pgtable = pmd_pgtable(_pmd);
1178
1179 _pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
Linus Torvaldsf55e1012017-11-29 09:01:01 -08001180 _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001181
1182 /*
1183 * spin_lock() below is not the equivalent of smp_wmb(), so
1184 * this is needed to avoid the copy_huge_page writes to become
1185 * visible after the set_pmd_at() write.
1186 */
1187 smp_wmb();
1188
1189 spin_lock(pmd_ptl);
1190 BUG_ON(!pmd_none(*pmd));
Johannes Weinerbe5d0a72020-06-03 16:01:57 -07001191 page_add_new_anon_rmap(new_page, vma, address, true);
Joonsoo Kimb5181542020-08-11 18:30:40 -07001192 lru_cache_add_inactive_or_unevictable(new_page, vma);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001193 pgtable_trans_huge_deposit(mm, pmd, pgtable);
1194 set_pmd_at(mm, address, pmd, _pmd);
1195 update_mmu_cache_pmd(vma, address, pmd);
1196 spin_unlock(pmd_ptl);
1197
1198 *hpage = NULL;
1199
1200 khugepaged_pages_collapsed++;
1201 result = SCAN_SUCCEED;
1202out_up_write:
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001203 mmap_write_unlock(mm);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001204out_nolock:
Johannes Weiner9d82c692020-06-03 16:02:04 -07001205 if (!IS_ERR_OR_NULL(*hpage))
1206 mem_cgroup_uncharge(*hpage);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001207 trace_mm_collapse_huge_page(mm, isolated, result);
1208 return;
1209out:
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001210 goto out_up_write;
1211}
1212
1213static int khugepaged_scan_pmd(struct mm_struct *mm,
1214 struct vm_area_struct *vma,
1215 unsigned long address,
1216 struct page **hpage)
1217{
1218 pmd_t *pmd;
1219 pte_t *pte, *_pte;
Kirill A. Shutemov71a2c112020-06-03 16:00:30 -07001220 int ret = 0, result = 0, referenced = 0;
1221 int none_or_zero = 0, shared = 0;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001222 struct page *page = NULL;
1223 unsigned long _address;
1224 spinlock_t *ptl;
1225 int node = NUMA_NO_NODE, unmapped = 0;
Ebru Akagunduz0db501f2016-07-26 15:26:46 -07001226 bool writable = false;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001227
1228 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1229
1230 pmd = mm_find_pmd(mm, address);
1231 if (!pmd) {
1232 result = SCAN_PMD_NULL;
1233 goto out;
1234 }
1235
1236 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1237 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1238 for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
1239 _pte++, _address += PAGE_SIZE) {
1240 pte_t pteval = *_pte;
1241 if (is_swap_pte(pteval)) {
1242 if (++unmapped <= khugepaged_max_ptes_swap) {
Peter Xue1e267c2020-04-06 20:06:04 -07001243 /*
1244 * Always be strict with uffd-wp
1245 * enabled swap entries. Please see
1246 * comment below for pte_uffd_wp().
1247 */
1248 if (pte_swp_uffd_wp(pteval)) {
1249 result = SCAN_PTE_UFFD_WP;
1250 goto out_unmap;
1251 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001252 continue;
1253 } else {
1254 result = SCAN_EXCEED_SWAP_PTE;
1255 goto out_unmap;
1256 }
1257 }
1258 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
1259 if (!userfaultfd_armed(vma) &&
1260 ++none_or_zero <= khugepaged_max_ptes_none) {
1261 continue;
1262 } else {
1263 result = SCAN_EXCEED_NONE_PTE;
1264 goto out_unmap;
1265 }
1266 }
1267 if (!pte_present(pteval)) {
1268 result = SCAN_PTE_NON_PRESENT;
1269 goto out_unmap;
1270 }
Peter Xue1e267c2020-04-06 20:06:04 -07001271 if (pte_uffd_wp(pteval)) {
1272 /*
1273 * Don't collapse the page if any of the small
1274 * PTEs are armed with uffd write protection.
1275 * Here we can also mark the new huge pmd as
1276 * write protected if any of the small ones is
Haitao Shi8958b242020-12-15 20:47:26 -08001277 * marked but that could bring unknown
Peter Xue1e267c2020-04-06 20:06:04 -07001278 * userfault messages that falls outside of
1279 * the registered range. So, just be simple.
1280 */
1281 result = SCAN_PTE_UFFD_WP;
1282 goto out_unmap;
1283 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001284 if (pte_write(pteval))
1285 writable = true;
1286
1287 page = vm_normal_page(vma, _address, pteval);
1288 if (unlikely(!page)) {
1289 result = SCAN_PAGE_NULL;
1290 goto out_unmap;
1291 }
1292
Kirill A. Shutemov71a2c112020-06-03 16:00:30 -07001293 if (page_mapcount(page) > 1 &&
1294 ++shared > khugepaged_max_ptes_shared) {
1295 result = SCAN_EXCEED_SHARED_PTE;
1296 goto out_unmap;
1297 }
1298
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -07001299 page = compound_head(page);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001300
1301 /*
1302 * Record which node the original page is from and save this
1303 * information to khugepaged_node_load[].
1304 * Khupaged will allocate hugepage from the node has the max
1305 * hit record.
1306 */
1307 node = page_to_nid(page);
1308 if (khugepaged_scan_abort(node)) {
1309 result = SCAN_SCAN_ABORT;
1310 goto out_unmap;
1311 }
1312 khugepaged_node_load[node]++;
1313 if (!PageLRU(page)) {
1314 result = SCAN_PAGE_LRU;
1315 goto out_unmap;
1316 }
1317 if (PageLocked(page)) {
1318 result = SCAN_PAGE_LOCK;
1319 goto out_unmap;
1320 }
1321 if (!PageAnon(page)) {
1322 result = SCAN_PAGE_ANON;
1323 goto out_unmap;
1324 }
1325
1326 /*
Kirill A. Shutemov94456892020-06-03 16:00:20 -07001327 * Check if the page has any GUP (or other external) pins.
1328 *
1329 * Here the check is racy it may see totmal_mapcount > refcount
1330 * in some cases.
1331 * For example, one process with one forked child process.
1332 * The parent has the PMD split due to MADV_DONTNEED, then
1333 * the child is trying unmap the whole PMD, but khugepaged
1334 * may be scanning the parent between the child has
1335 * PageDoubleMap flag cleared and dec the mapcount. So
1336 * khugepaged may see total_mapcount > refcount.
1337 *
1338 * But such case is ephemeral we could always retry collapse
1339 * later. However it may report false positive if the page
1340 * has excessive GUP pins (i.e. 512). Anyway the same check
1341 * will be done again later the risk seems low.
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001342 */
Kirill A. Shutemov94456892020-06-03 16:00:20 -07001343 if (!is_refcount_suitable(page)) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001344 result = SCAN_PAGE_COUNT;
1345 goto out_unmap;
1346 }
1347 if (pte_young(pteval) ||
1348 page_is_young(page) || PageReferenced(page) ||
1349 mmu_notifier_test_young(vma->vm_mm, address))
Ebru Akagunduz0db501f2016-07-26 15:26:46 -07001350 referenced++;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001351 }
Kirill A. Shutemovffe945e2020-06-03 16:00:09 -07001352 if (!writable) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001353 result = SCAN_PAGE_RO;
Kirill A. Shutemovffe945e2020-06-03 16:00:09 -07001354 } else if (!referenced || (unmapped && referenced < HPAGE_PMD_NR/2)) {
1355 result = SCAN_LACK_REFERENCED_PAGE;
1356 } else {
1357 result = SCAN_SUCCEED;
1358 ret = 1;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001359 }
1360out_unmap:
1361 pte_unmap_unlock(pte, ptl);
1362 if (ret) {
1363 node = khugepaged_find_target_node();
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07001364 /* collapse_huge_page will return with the mmap_lock released */
Kirill A. Shutemovffe945e2020-06-03 16:00:09 -07001365 collapse_huge_page(mm, address, hpage, node,
1366 referenced, unmapped);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001367 }
1368out:
1369 trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
1370 none_or_zero, result, unmapped);
1371 return ret;
1372}
1373
1374static void collect_mm_slot(struct mm_slot *mm_slot)
1375{
1376 struct mm_struct *mm = mm_slot->mm;
1377
Lance Roy35f3aa32018-10-04 23:45:47 -07001378 lockdep_assert_held(&khugepaged_mm_lock);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001379
1380 if (khugepaged_test_exit(mm)) {
1381 /* free mm_slot */
1382 hash_del(&mm_slot->hash);
1383 list_del(&mm_slot->mm_node);
1384
1385 /*
1386 * Not strictly needed because the mm exited already.
1387 *
1388 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1389 */
1390
1391 /* khugepaged_mm_lock actually not necessary for the below */
1392 free_mm_slot(mm_slot);
1393 mmdrop(mm);
1394 }
1395}
1396
Matthew Wilcox (Oracle)396bcc52020-04-06 20:04:35 -07001397#ifdef CONFIG_SHMEM
Song Liu27e1f822019-09-23 15:38:30 -07001398/*
1399 * Notify khugepaged that given addr of the mm is pte-mapped THP. Then
1400 * khugepaged should try to collapse the page table.
1401 */
1402static int khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
1403 unsigned long addr)
1404{
1405 struct mm_slot *mm_slot;
1406
1407 VM_BUG_ON(addr & ~HPAGE_PMD_MASK);
1408
1409 spin_lock(&khugepaged_mm_lock);
1410 mm_slot = get_mm_slot(mm);
1411 if (likely(mm_slot && mm_slot->nr_pte_mapped_thp < MAX_PTE_MAPPED_THP))
1412 mm_slot->pte_mapped_thp[mm_slot->nr_pte_mapped_thp++] = addr;
1413 spin_unlock(&khugepaged_mm_lock);
1414 return 0;
1415}
1416
1417/**
Alex Shi336e6b52020-12-14 19:12:01 -08001418 * collapse_pte_mapped_thp - Try to collapse a pte-mapped THP for mm at
1419 * address haddr.
1420 *
1421 * @mm: process address space where collapse happens
1422 * @addr: THP collapse address
Song Liu27e1f822019-09-23 15:38:30 -07001423 *
1424 * This function checks whether all the PTEs in the PMD are pointing to the
1425 * right THP. If so, retract the page table so the THP can refault in with
1426 * as pmd-mapped.
1427 */
1428void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
1429{
1430 unsigned long haddr = addr & HPAGE_PMD_MASK;
1431 struct vm_area_struct *vma = find_vma(mm, haddr);
Hugh Dickins119a5fc2020-08-06 23:26:18 -07001432 struct page *hpage;
Song Liu27e1f822019-09-23 15:38:30 -07001433 pte_t *start_pte, *pte;
1434 pmd_t *pmd, _pmd;
1435 spinlock_t *ptl;
1436 int count = 0;
1437 int i;
1438
1439 if (!vma || !vma->vm_file ||
1440 vma->vm_start > haddr || vma->vm_end < haddr + HPAGE_PMD_SIZE)
1441 return;
1442
1443 /*
1444 * This vm_flags may not have VM_HUGEPAGE if the page was not
1445 * collapsed by this mm. But we can still collapse if the page is
1446 * the valid THP. Add extra VM_HUGEPAGE so hugepage_vma_check()
1447 * will not fail the vma for missing VM_HUGEPAGE
1448 */
1449 if (!hugepage_vma_check(vma, vma->vm_flags | VM_HUGEPAGE))
1450 return;
1451
Hugh Dickins119a5fc2020-08-06 23:26:18 -07001452 hpage = find_lock_page(vma->vm_file->f_mapping,
1453 linear_page_index(vma, haddr));
1454 if (!hpage)
1455 return;
1456
1457 if (!PageHead(hpage))
1458 goto drop_hpage;
1459
Song Liu27e1f822019-09-23 15:38:30 -07001460 pmd = mm_find_pmd(mm, haddr);
1461 if (!pmd)
Hugh Dickins119a5fc2020-08-06 23:26:18 -07001462 goto drop_hpage;
Song Liu27e1f822019-09-23 15:38:30 -07001463
1464 start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
1465
1466 /* step 1: check all mapped PTEs are to the right huge page */
1467 for (i = 0, addr = haddr, pte = start_pte;
1468 i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1469 struct page *page;
1470
1471 /* empty pte, skip */
1472 if (pte_none(*pte))
1473 continue;
1474
1475 /* page swapped out, abort */
1476 if (!pte_present(*pte))
1477 goto abort;
1478
1479 page = vm_normal_page(vma, addr, *pte);
1480
Song Liu27e1f822019-09-23 15:38:30 -07001481 /*
Hugh Dickins119a5fc2020-08-06 23:26:18 -07001482 * Note that uprobe, debugger, or MAP_PRIVATE may change the
1483 * page table, but the new page will not be a subpage of hpage.
Song Liu27e1f822019-09-23 15:38:30 -07001484 */
Hugh Dickins119a5fc2020-08-06 23:26:18 -07001485 if (hpage + i != page)
Song Liu27e1f822019-09-23 15:38:30 -07001486 goto abort;
1487 count++;
1488 }
1489
1490 /* step 2: adjust rmap */
1491 for (i = 0, addr = haddr, pte = start_pte;
1492 i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1493 struct page *page;
1494
1495 if (pte_none(*pte))
1496 continue;
1497 page = vm_normal_page(vma, addr, *pte);
1498 page_remove_rmap(page, false);
1499 }
1500
1501 pte_unmap_unlock(start_pte, ptl);
1502
1503 /* step 3: set proper refcount and mm_counters. */
Hugh Dickins119a5fc2020-08-06 23:26:18 -07001504 if (count) {
Song Liu27e1f822019-09-23 15:38:30 -07001505 page_ref_sub(hpage, count);
1506 add_mm_counter(vma->vm_mm, mm_counter_file(hpage), -count);
1507 }
1508
1509 /* step 4: collapse pmd */
1510 ptl = pmd_lock(vma->vm_mm, pmd);
Hugh Dickins723a80d2020-08-06 23:26:15 -07001511 _pmd = pmdp_collapse_flush(vma, haddr, pmd);
Song Liu27e1f822019-09-23 15:38:30 -07001512 spin_unlock(ptl);
1513 mm_dec_nr_ptes(mm);
1514 pte_free(mm, pmd_pgtable(_pmd));
Hugh Dickins119a5fc2020-08-06 23:26:18 -07001515
1516drop_hpage:
1517 unlock_page(hpage);
1518 put_page(hpage);
Song Liu27e1f822019-09-23 15:38:30 -07001519 return;
1520
1521abort:
1522 pte_unmap_unlock(start_pte, ptl);
Hugh Dickins119a5fc2020-08-06 23:26:18 -07001523 goto drop_hpage;
Song Liu27e1f822019-09-23 15:38:30 -07001524}
1525
1526static int khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
1527{
1528 struct mm_struct *mm = mm_slot->mm;
1529 int i;
1530
1531 if (likely(mm_slot->nr_pte_mapped_thp == 0))
1532 return 0;
1533
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001534 if (!mmap_write_trylock(mm))
Song Liu27e1f822019-09-23 15:38:30 -07001535 return -EBUSY;
1536
1537 if (unlikely(khugepaged_test_exit(mm)))
1538 goto out;
1539
1540 for (i = 0; i < mm_slot->nr_pte_mapped_thp; i++)
1541 collapse_pte_mapped_thp(mm, mm_slot->pte_mapped_thp[i]);
1542
1543out:
1544 mm_slot->nr_pte_mapped_thp = 0;
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001545 mmap_write_unlock(mm);
Song Liu27e1f822019-09-23 15:38:30 -07001546 return 0;
1547}
1548
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001549static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
1550{
1551 struct vm_area_struct *vma;
Hugh Dickins18e77602020-08-06 23:26:22 -07001552 struct mm_struct *mm;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001553 unsigned long addr;
1554 pmd_t *pmd, _pmd;
1555
1556 i_mmap_lock_write(mapping);
1557 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
Song Liu27e1f822019-09-23 15:38:30 -07001558 /*
1559 * Check vma->anon_vma to exclude MAP_PRIVATE mappings that
1560 * got written to. These VMAs are likely not worth investing
Michel Lespinasse3e4e28c2020-06-08 21:33:51 -07001561 * mmap_write_lock(mm) as PMD-mapping is likely to be split
Song Liu27e1f822019-09-23 15:38:30 -07001562 * later.
1563 *
1564 * Not that vma->anon_vma check is racy: it can be set up after
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07001565 * the check but before we took mmap_lock by the fault path.
Song Liu27e1f822019-09-23 15:38:30 -07001566 * But page lock would prevent establishing any new ptes of the
1567 * page, so we are safe.
1568 *
1569 * An alternative would be drop the check, but check that page
1570 * table is clear before calling pmdp_collapse_flush() under
1571 * ptl. It has higher chance to recover THP for the VMA, but
1572 * has higher cost too.
1573 */
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001574 if (vma->anon_vma)
1575 continue;
1576 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1577 if (addr & ~HPAGE_PMD_MASK)
1578 continue;
1579 if (vma->vm_end < addr + HPAGE_PMD_SIZE)
1580 continue;
Hugh Dickins18e77602020-08-06 23:26:22 -07001581 mm = vma->vm_mm;
1582 pmd = mm_find_pmd(mm, addr);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001583 if (!pmd)
1584 continue;
1585 /*
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07001586 * We need exclusive mmap_lock to retract page table.
Song Liu27e1f822019-09-23 15:38:30 -07001587 *
1588 * We use trylock due to lock inversion: we need to acquire
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07001589 * mmap_lock while holding page lock. Fault path does it in
Song Liu27e1f822019-09-23 15:38:30 -07001590 * reverse order. Trylock is a way to avoid deadlock.
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001591 */
Hugh Dickins18e77602020-08-06 23:26:22 -07001592 if (mmap_write_trylock(mm)) {
1593 if (!khugepaged_test_exit(mm)) {
1594 spinlock_t *ptl = pmd_lock(mm, pmd);
1595 /* assume page table is clear */
1596 _pmd = pmdp_collapse_flush(vma, addr, pmd);
1597 spin_unlock(ptl);
1598 mm_dec_nr_ptes(mm);
1599 pte_free(mm, pmd_pgtable(_pmd));
1600 }
1601 mmap_write_unlock(mm);
Song Liu27e1f822019-09-23 15:38:30 -07001602 } else {
1603 /* Try again later */
Hugh Dickins18e77602020-08-06 23:26:22 -07001604 khugepaged_add_pte_mapped_thp(mm, addr);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001605 }
1606 }
1607 i_mmap_unlock_write(mapping);
1608}
1609
1610/**
Song Liu99cb0db2019-09-23 15:38:00 -07001611 * collapse_file - collapse filemap/tmpfs/shmem pages into huge one.
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001612 *
Alex Shi336e6b52020-12-14 19:12:01 -08001613 * @mm: process address space where collapse happens
1614 * @file: file that collapse on
1615 * @start: collapse start address
1616 * @hpage: new allocated huge page for collapse
1617 * @node: appointed node the new huge page allocate from
1618 *
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001619 * Basic scheme is simple, details are more complex:
Hugh Dickins87c460a2018-11-30 14:10:43 -08001620 * - allocate and lock a new huge page;
Matthew Wilcox77da9382017-12-04 14:56:08 -05001621 * - scan page cache replacing old pages with the new one
Song Liu99cb0db2019-09-23 15:38:00 -07001622 * + swap/gup in pages if necessary;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001623 * + fill in gaps;
Matthew Wilcox77da9382017-12-04 14:56:08 -05001624 * + keep old pages around in case rollback is required;
1625 * - if replacing succeeds:
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001626 * + copy data over;
1627 * + free old pages;
Hugh Dickins87c460a2018-11-30 14:10:43 -08001628 * + unlock huge page;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001629 * - if replacing failed;
1630 * + put all pages back and unfreeze them;
Matthew Wilcox77da9382017-12-04 14:56:08 -05001631 * + restore gaps in the page cache;
Hugh Dickins87c460a2018-11-30 14:10:43 -08001632 * + unlock and free huge page;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001633 */
Song Liu579c5712019-09-23 15:37:57 -07001634static void collapse_file(struct mm_struct *mm,
1635 struct file *file, pgoff_t start,
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001636 struct page **hpage, int node)
1637{
Song Liu579c5712019-09-23 15:37:57 -07001638 struct address_space *mapping = file->f_mapping;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001639 gfp_t gfp;
Matthew Wilcox77da9382017-12-04 14:56:08 -05001640 struct page *new_page;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001641 pgoff_t index, end = start + HPAGE_PMD_NR;
1642 LIST_HEAD(pagelist);
Matthew Wilcox77da9382017-12-04 14:56:08 -05001643 XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001644 int nr_none = 0, result = SCAN_SUCCEED;
Song Liu99cb0db2019-09-23 15:38:00 -07001645 bool is_shmem = shmem_file(file);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001646
Song Liu99cb0db2019-09-23 15:38:00 -07001647 VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001648 VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
1649
1650 /* Only allocate from the target node */
Michal Hocko41b61672017-01-10 16:57:42 -08001651 gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001652
1653 new_page = khugepaged_alloc_page(hpage, gfp, node);
1654 if (!new_page) {
1655 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
1656 goto out;
1657 }
1658
Johannes Weinerd9eb1ea2020-06-03 16:02:24 -07001659 if (unlikely(mem_cgroup_charge(new_page, mm, gfp))) {
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001660 result = SCAN_CGROUP_CHARGE_FAIL;
1661 goto out;
1662 }
Johannes Weiner9d82c692020-06-03 16:02:04 -07001663 count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001664
Hugh Dickins95feeab2018-11-30 14:10:50 -08001665 /* This will be less messy when we use multi-index entries */
1666 do {
1667 xas_lock_irq(&xas);
1668 xas_create_range(&xas);
1669 if (!xas_error(&xas))
1670 break;
1671 xas_unlock_irq(&xas);
1672 if (!xas_nomem(&xas, GFP_KERNEL)) {
Hugh Dickins95feeab2018-11-30 14:10:50 -08001673 result = SCAN_FAIL;
1674 goto out;
1675 }
1676 } while (1);
1677
Hugh Dickins042a3082018-11-30 14:10:39 -08001678 __SetPageLocked(new_page);
Song Liu99cb0db2019-09-23 15:38:00 -07001679 if (is_shmem)
1680 __SetPageSwapBacked(new_page);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001681 new_page->index = start;
1682 new_page->mapping = mapping;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001683
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001684 /*
Hugh Dickins87c460a2018-11-30 14:10:43 -08001685 * At this point the new_page is locked and not up-to-date.
1686 * It's safe to insert it into the page cache, because nobody would
1687 * be able to map it or use it in another way until we unlock it.
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001688 */
1689
Matthew Wilcox77da9382017-12-04 14:56:08 -05001690 xas_set(&xas, start);
1691 for (index = start; index < end; index++) {
1692 struct page *page = xas_next(&xas);
1693
1694 VM_BUG_ON(index != xas.xa_index);
Song Liu99cb0db2019-09-23 15:38:00 -07001695 if (is_shmem) {
1696 if (!page) {
1697 /*
1698 * Stop if extent has been truncated or
1699 * hole-punched, and is now completely
1700 * empty.
1701 */
1702 if (index == start) {
1703 if (!xas_next_entry(&xas, end - 1)) {
1704 result = SCAN_TRUNCATED;
1705 goto xa_locked;
1706 }
1707 xas_set(&xas, index);
1708 }
1709 if (!shmem_charge(mapping->host, 1)) {
1710 result = SCAN_FAIL;
Hugh Dickins042a3082018-11-30 14:10:39 -08001711 goto xa_locked;
Hugh Dickins701270f2018-11-30 14:10:25 -08001712 }
Song Liu99cb0db2019-09-23 15:38:00 -07001713 xas_store(&xas, new_page);
1714 nr_none++;
1715 continue;
Hugh Dickins701270f2018-11-30 14:10:25 -08001716 }
Song Liu99cb0db2019-09-23 15:38:00 -07001717
1718 if (xa_is_value(page) || !PageUptodate(page)) {
1719 xas_unlock_irq(&xas);
1720 /* swap in or instantiate fallocated page */
1721 if (shmem_getpage(mapping->host, index, &page,
1722 SGP_NOHUGE)) {
1723 result = SCAN_FAIL;
1724 goto xa_unlocked;
1725 }
1726 } else if (trylock_page(page)) {
1727 get_page(page);
1728 xas_unlock_irq(&xas);
1729 } else {
1730 result = SCAN_PAGE_LOCK;
Hugh Dickins042a3082018-11-30 14:10:39 -08001731 goto xa_locked;
Matthew Wilcox77da9382017-12-04 14:56:08 -05001732 }
Song Liu99cb0db2019-09-23 15:38:00 -07001733 } else { /* !is_shmem */
1734 if (!page || xa_is_value(page)) {
1735 xas_unlock_irq(&xas);
1736 page_cache_sync_readahead(mapping, &file->f_ra,
1737 file, index,
David Howellse5a59d32020-09-04 16:36:16 -07001738 end - index);
Song Liu99cb0db2019-09-23 15:38:00 -07001739 /* drain pagevecs to help isolate_lru_page() */
1740 lru_add_drain();
1741 page = find_lock_page(mapping, index);
1742 if (unlikely(page == NULL)) {
1743 result = SCAN_FAIL;
1744 goto xa_unlocked;
1745 }
Song Liu75f36062019-11-30 17:57:19 -08001746 } else if (PageDirty(page)) {
1747 /*
1748 * khugepaged only works on read-only fd,
1749 * so this page is dirty because it hasn't
1750 * been flushed since first write. There
1751 * won't be new dirty pages.
1752 *
1753 * Trigger async flush here and hope the
1754 * writeback is done when khugepaged
1755 * revisits this page.
1756 *
1757 * This is a one-off situation. We are not
1758 * forcing writeback in loop.
1759 */
1760 xas_unlock_irq(&xas);
1761 filemap_flush(mapping);
1762 result = SCAN_FAIL;
1763 goto xa_unlocked;
Song Liu99cb0db2019-09-23 15:38:00 -07001764 } else if (trylock_page(page)) {
1765 get_page(page);
1766 xas_unlock_irq(&xas);
1767 } else {
1768 result = SCAN_PAGE_LOCK;
1769 goto xa_locked;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001770 }
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001771 }
1772
1773 /*
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07001774 * The page must be locked, so we can drop the i_pages lock
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001775 * without racing with truncate.
1776 */
1777 VM_BUG_ON_PAGE(!PageLocked(page), page);
Song Liu4655e5e2019-11-15 17:34:53 -08001778
1779 /* make sure the page is up to date */
1780 if (unlikely(!PageUptodate(page))) {
1781 result = SCAN_FAIL;
1782 goto out_unlock;
1783 }
Hugh Dickins06a5e122018-11-30 14:10:47 -08001784
1785 /*
1786 * If file was truncated then extended, or hole-punched, before
1787 * we locked the first page, then a THP might be there already.
1788 */
1789 if (PageTransCompound(page)) {
1790 result = SCAN_PAGE_COMPOUND;
1791 goto out_unlock;
1792 }
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001793
1794 if (page_mapping(page) != mapping) {
1795 result = SCAN_TRUNCATED;
1796 goto out_unlock;
1797 }
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001798
Song Liu4655e5e2019-11-15 17:34:53 -08001799 if (!is_shmem && PageDirty(page)) {
1800 /*
1801 * khugepaged only works on read-only fd, so this
1802 * page is dirty because it hasn't been flushed
1803 * since first write.
1804 */
1805 result = SCAN_FAIL;
1806 goto out_unlock;
1807 }
1808
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001809 if (isolate_lru_page(page)) {
1810 result = SCAN_DEL_PAGE_LRU;
Hugh Dickins042a3082018-11-30 14:10:39 -08001811 goto out_unlock;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001812 }
1813
Song Liu99cb0db2019-09-23 15:38:00 -07001814 if (page_has_private(page) &&
1815 !try_to_release_page(page, GFP_KERNEL)) {
1816 result = SCAN_PAGE_HAS_PRIVATE;
Hugh Dickins2f33a702020-05-27 22:20:43 -07001817 putback_lru_page(page);
Song Liu99cb0db2019-09-23 15:38:00 -07001818 goto out_unlock;
1819 }
1820
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001821 if (page_mapped(page))
Matthew Wilcox977fbdc2018-01-31 16:17:36 -08001822 unmap_mapping_pages(mapping, index, 1, false);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001823
Matthew Wilcox77da9382017-12-04 14:56:08 -05001824 xas_lock_irq(&xas);
1825 xas_set(&xas, index);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001826
Matthew Wilcox77da9382017-12-04 14:56:08 -05001827 VM_BUG_ON_PAGE(page != xas_load(&xas), page);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001828 VM_BUG_ON_PAGE(page_mapped(page), page);
1829
1830 /*
1831 * The page is expected to have page_count() == 3:
1832 * - we hold a pin on it;
Matthew Wilcox77da9382017-12-04 14:56:08 -05001833 * - one reference from page cache;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001834 * - one from isolate_lru_page;
1835 */
1836 if (!page_ref_freeze(page, 3)) {
1837 result = SCAN_PAGE_COUNT;
Hugh Dickins042a3082018-11-30 14:10:39 -08001838 xas_unlock_irq(&xas);
1839 putback_lru_page(page);
1840 goto out_unlock;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001841 }
1842
1843 /*
1844 * Add the page to the list to be able to undo the collapse if
1845 * something go wrong.
1846 */
1847 list_add_tail(&page->lru, &pagelist);
1848
1849 /* Finally, replace with the new page. */
Matthew Wilcox (Oracle)41011962019-09-23 15:34:52 -07001850 xas_store(&xas, new_page);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001851 continue;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001852out_unlock:
1853 unlock_page(page);
1854 put_page(page);
Hugh Dickins042a3082018-11-30 14:10:39 -08001855 goto xa_unlocked;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001856 }
1857
Song Liu99cb0db2019-09-23 15:38:00 -07001858 if (is_shmem)
Johannes Weinerb8eddff2020-12-14 19:06:20 -08001859 __inc_lruvec_page_state(new_page, NR_SHMEM_THPS);
Song Liu09d91cd2019-09-23 15:38:03 -07001860 else {
Johannes Weinerb8eddff2020-12-14 19:06:20 -08001861 __inc_lruvec_page_state(new_page, NR_FILE_THPS);
Song Liu09d91cd2019-09-23 15:38:03 -07001862 filemap_nr_thps_inc(mapping);
1863 }
Song Liu99cb0db2019-09-23 15:38:00 -07001864
Hugh Dickins042a3082018-11-30 14:10:39 -08001865 if (nr_none) {
Johannes Weiner9d82c692020-06-03 16:02:04 -07001866 __mod_lruvec_page_state(new_page, NR_FILE_PAGES, nr_none);
Song Liu99cb0db2019-09-23 15:38:00 -07001867 if (is_shmem)
Johannes Weiner9d82c692020-06-03 16:02:04 -07001868 __mod_lruvec_page_state(new_page, NR_SHMEM, nr_none);
Hugh Dickins042a3082018-11-30 14:10:39 -08001869 }
1870
1871xa_locked:
1872 xas_unlock_irq(&xas);
Matthew Wilcox77da9382017-12-04 14:56:08 -05001873xa_unlocked:
Hugh Dickins042a3082018-11-30 14:10:39 -08001874
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001875 if (result == SCAN_SUCCEED) {
Matthew Wilcox77da9382017-12-04 14:56:08 -05001876 struct page *page, *tmp;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001877
1878 /*
Matthew Wilcox77da9382017-12-04 14:56:08 -05001879 * Replacing old pages with new one has succeeded, now we
1880 * need to copy the content and free the old pages.
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001881 */
Hugh Dickins2af8ff22018-11-30 14:10:35 -08001882 index = start;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001883 list_for_each_entry_safe(page, tmp, &pagelist, lru) {
Hugh Dickins2af8ff22018-11-30 14:10:35 -08001884 while (index < page->index) {
1885 clear_highpage(new_page + (index % HPAGE_PMD_NR));
1886 index++;
1887 }
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001888 copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
1889 page);
1890 list_del(&page->lru);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001891 page->mapping = NULL;
Hugh Dickins042a3082018-11-30 14:10:39 -08001892 page_ref_unfreeze(page, 1);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001893 ClearPageActive(page);
1894 ClearPageUnevictable(page);
Hugh Dickins042a3082018-11-30 14:10:39 -08001895 unlock_page(page);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001896 put_page(page);
Hugh Dickins2af8ff22018-11-30 14:10:35 -08001897 index++;
1898 }
1899 while (index < end) {
1900 clear_highpage(new_page + (index % HPAGE_PMD_NR));
1901 index++;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001902 }
1903
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001904 SetPageUptodate(new_page);
Hugh Dickins87c460a2018-11-30 14:10:43 -08001905 page_ref_add(new_page, HPAGE_PMD_NR - 1);
Johannes Weiner6058eae2020-06-03 16:02:40 -07001906 if (is_shmem)
Song Liu99cb0db2019-09-23 15:38:00 -07001907 set_page_dirty(new_page);
Johannes Weiner6058eae2020-06-03 16:02:40 -07001908 lru_cache_add(new_page);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001909
Hugh Dickins042a3082018-11-30 14:10:39 -08001910 /*
1911 * Remove pte page tables, so we can re-fault the page as huge.
1912 */
1913 retract_page_tables(mapping, start);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001914 *hpage = NULL;
Yang Shi87aa7522018-08-17 15:45:29 -07001915
1916 khugepaged_pages_collapsed++;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001917 } else {
Matthew Wilcox77da9382017-12-04 14:56:08 -05001918 struct page *page;
Hugh Dickinsaaa52e32018-11-30 14:10:29 -08001919
Matthew Wilcox77da9382017-12-04 14:56:08 -05001920 /* Something went wrong: roll back page cache changes */
Matthew Wilcox77da9382017-12-04 14:56:08 -05001921 xas_lock_irq(&xas);
Hugh Dickinsaaa52e32018-11-30 14:10:29 -08001922 mapping->nrpages -= nr_none;
Song Liu99cb0db2019-09-23 15:38:00 -07001923
1924 if (is_shmem)
1925 shmem_uncharge(mapping->host, nr_none);
Hugh Dickinsaaa52e32018-11-30 14:10:29 -08001926
Matthew Wilcox77da9382017-12-04 14:56:08 -05001927 xas_set(&xas, start);
1928 xas_for_each(&xas, page, end - 1) {
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001929 page = list_first_entry_or_null(&pagelist,
1930 struct page, lru);
Matthew Wilcox77da9382017-12-04 14:56:08 -05001931 if (!page || xas.xa_index < page->index) {
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001932 if (!nr_none)
1933 break;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001934 nr_none--;
Johannes Weiner59749e62016-12-12 16:43:35 -08001935 /* Put holes back where they were */
Matthew Wilcox77da9382017-12-04 14:56:08 -05001936 xas_store(&xas, NULL);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001937 continue;
1938 }
1939
Matthew Wilcox77da9382017-12-04 14:56:08 -05001940 VM_BUG_ON_PAGE(page->index != xas.xa_index, page);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001941
1942 /* Unfreeze the page. */
1943 list_del(&page->lru);
1944 page_ref_unfreeze(page, 2);
Matthew Wilcox77da9382017-12-04 14:56:08 -05001945 xas_store(&xas, page);
1946 xas_pause(&xas);
1947 xas_unlock_irq(&xas);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001948 unlock_page(page);
Hugh Dickins042a3082018-11-30 14:10:39 -08001949 putback_lru_page(page);
Matthew Wilcox77da9382017-12-04 14:56:08 -05001950 xas_lock_irq(&xas);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001951 }
1952 VM_BUG_ON(nr_none);
Matthew Wilcox77da9382017-12-04 14:56:08 -05001953 xas_unlock_irq(&xas);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001954
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001955 new_page->mapping = NULL;
1956 }
Hugh Dickins042a3082018-11-30 14:10:39 -08001957
1958 unlock_page(new_page);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001959out:
1960 VM_BUG_ON(!list_empty(&pagelist));
Johannes Weiner9d82c692020-06-03 16:02:04 -07001961 if (!IS_ERR_OR_NULL(*hpage))
1962 mem_cgroup_uncharge(*hpage);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001963 /* TODO: tracepoints */
1964}
1965
Song Liu579c5712019-09-23 15:37:57 -07001966static void khugepaged_scan_file(struct mm_struct *mm,
1967 struct file *file, pgoff_t start, struct page **hpage)
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001968{
1969 struct page *page = NULL;
Song Liu579c5712019-09-23 15:37:57 -07001970 struct address_space *mapping = file->f_mapping;
Matthew Wilcox85b392d2017-12-04 15:06:23 -05001971 XA_STATE(xas, &mapping->i_pages, start);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001972 int present, swap;
1973 int node = NUMA_NO_NODE;
1974 int result = SCAN_SUCCEED;
1975
1976 present = 0;
1977 swap = 0;
1978 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1979 rcu_read_lock();
Matthew Wilcox85b392d2017-12-04 15:06:23 -05001980 xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) {
1981 if (xas_retry(&xas, page))
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001982 continue;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001983
Matthew Wilcox85b392d2017-12-04 15:06:23 -05001984 if (xa_is_value(page)) {
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001985 if (++swap > khugepaged_max_ptes_swap) {
1986 result = SCAN_EXCEED_SWAP_PTE;
1987 break;
1988 }
1989 continue;
1990 }
1991
1992 if (PageTransCompound(page)) {
1993 result = SCAN_PAGE_COMPOUND;
1994 break;
1995 }
1996
1997 node = page_to_nid(page);
1998 if (khugepaged_scan_abort(node)) {
1999 result = SCAN_SCAN_ABORT;
2000 break;
2001 }
2002 khugepaged_node_load[node]++;
2003
2004 if (!PageLRU(page)) {
2005 result = SCAN_PAGE_LRU;
2006 break;
2007 }
2008
Song Liu99cb0db2019-09-23 15:38:00 -07002009 if (page_count(page) !=
2010 1 + page_mapcount(page) + page_has_private(page)) {
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002011 result = SCAN_PAGE_COUNT;
2012 break;
2013 }
2014
2015 /*
2016 * We probably should check if the page is referenced here, but
2017 * nobody would transfer pte_young() to PageReferenced() for us.
2018 * And rmap walk here is just too costly...
2019 */
2020
2021 present++;
2022
2023 if (need_resched()) {
Matthew Wilcox85b392d2017-12-04 15:06:23 -05002024 xas_pause(&xas);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002025 cond_resched_rcu();
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002026 }
2027 }
2028 rcu_read_unlock();
2029
2030 if (result == SCAN_SUCCEED) {
2031 if (present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
2032 result = SCAN_EXCEED_NONE_PTE;
2033 } else {
2034 node = khugepaged_find_target_node();
Song Liu579c5712019-09-23 15:37:57 -07002035 collapse_file(mm, file, start, hpage, node);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002036 }
2037 }
2038
2039 /* TODO: tracepoints */
2040}
2041#else
Song Liu579c5712019-09-23 15:37:57 -07002042static void khugepaged_scan_file(struct mm_struct *mm,
2043 struct file *file, pgoff_t start, struct page **hpage)
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002044{
2045 BUILD_BUG();
2046}
Song Liu27e1f822019-09-23 15:38:30 -07002047
2048static int khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
2049{
2050 return 0;
2051}
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002052#endif
2053
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002054static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
2055 struct page **hpage)
2056 __releases(&khugepaged_mm_lock)
2057 __acquires(&khugepaged_mm_lock)
2058{
2059 struct mm_slot *mm_slot;
2060 struct mm_struct *mm;
2061 struct vm_area_struct *vma;
2062 int progress = 0;
2063
2064 VM_BUG_ON(!pages);
Lance Roy35f3aa32018-10-04 23:45:47 -07002065 lockdep_assert_held(&khugepaged_mm_lock);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002066
2067 if (khugepaged_scan.mm_slot)
2068 mm_slot = khugepaged_scan.mm_slot;
2069 else {
2070 mm_slot = list_entry(khugepaged_scan.mm_head.next,
2071 struct mm_slot, mm_node);
2072 khugepaged_scan.address = 0;
2073 khugepaged_scan.mm_slot = mm_slot;
2074 }
2075 spin_unlock(&khugepaged_mm_lock);
Song Liu27e1f822019-09-23 15:38:30 -07002076 khugepaged_collapse_pte_mapped_thps(mm_slot);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002077
2078 mm = mm_slot->mm;
Yang Shi3b454ad2018-01-31 16:18:28 -08002079 /*
2080 * Don't wait for semaphore (to avoid long wait times). Just move to
2081 * the next mm on the list.
2082 */
2083 vma = NULL;
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002084 if (unlikely(!mmap_read_trylock(mm)))
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07002085 goto breakouterloop_mmap_lock;
Yang Shi3b454ad2018-01-31 16:18:28 -08002086 if (likely(!khugepaged_test_exit(mm)))
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002087 vma = find_vma(mm, khugepaged_scan.address);
2088
2089 progress++;
2090 for (; vma; vma = vma->vm_next) {
2091 unsigned long hstart, hend;
2092
2093 cond_resched();
2094 if (unlikely(khugepaged_test_exit(mm))) {
2095 progress++;
2096 break;
2097 }
Song Liu50f8b922018-08-17 15:47:00 -07002098 if (!hugepage_vma_check(vma, vma->vm_flags)) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002099skip:
2100 progress++;
2101 continue;
2102 }
2103 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2104 hend = vma->vm_end & HPAGE_PMD_MASK;
2105 if (hstart >= hend)
2106 goto skip;
2107 if (khugepaged_scan.address > hend)
2108 goto skip;
2109 if (khugepaged_scan.address < hstart)
2110 khugepaged_scan.address = hstart;
2111 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
Matthew Wilcox (Oracle)396bcc52020-04-06 20:04:35 -07002112 if (shmem_file(vma->vm_file) && !shmem_huge_enabled(vma))
2113 goto skip;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002114
2115 while (khugepaged_scan.address < hend) {
2116 int ret;
2117 cond_resched();
2118 if (unlikely(khugepaged_test_exit(mm)))
2119 goto breakouterloop;
2120
2121 VM_BUG_ON(khugepaged_scan.address < hstart ||
2122 khugepaged_scan.address + HPAGE_PMD_SIZE >
2123 hend);
Song Liu99cb0db2019-09-23 15:38:00 -07002124 if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
Matthew Wilcox (Oracle)396bcc52020-04-06 20:04:35 -07002125 struct file *file = get_file(vma->vm_file);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002126 pgoff_t pgoff = linear_page_index(vma,
2127 khugepaged_scan.address);
Song Liu99cb0db2019-09-23 15:38:00 -07002128
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002129 mmap_read_unlock(mm);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002130 ret = 1;
Song Liu579c5712019-09-23 15:37:57 -07002131 khugepaged_scan_file(mm, file, pgoff, hpage);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002132 fput(file);
2133 } else {
2134 ret = khugepaged_scan_pmd(mm, vma,
2135 khugepaged_scan.address,
2136 hpage);
2137 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002138 /* move to next address */
2139 khugepaged_scan.address += HPAGE_PMD_SIZE;
2140 progress += HPAGE_PMD_NR;
2141 if (ret)
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07002142 /* we released mmap_lock so break loop */
2143 goto breakouterloop_mmap_lock;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002144 if (progress >= pages)
2145 goto breakouterloop;
2146 }
2147 }
2148breakouterloop:
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002149 mmap_read_unlock(mm); /* exit_mmap will destroy ptes after this */
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07002150breakouterloop_mmap_lock:
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002151
2152 spin_lock(&khugepaged_mm_lock);
2153 VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
2154 /*
2155 * Release the current mm_slot if this mm is about to die, or
2156 * if we scanned all vmas of this mm.
2157 */
2158 if (khugepaged_test_exit(mm) || !vma) {
2159 /*
2160 * Make sure that if mm_users is reaching zero while
2161 * khugepaged runs here, khugepaged_exit will find
2162 * mm_slot not pointing to the exiting mm.
2163 */
2164 if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
2165 khugepaged_scan.mm_slot = list_entry(
2166 mm_slot->mm_node.next,
2167 struct mm_slot, mm_node);
2168 khugepaged_scan.address = 0;
2169 } else {
2170 khugepaged_scan.mm_slot = NULL;
2171 khugepaged_full_scans++;
2172 }
2173
2174 collect_mm_slot(mm_slot);
2175 }
2176
2177 return progress;
2178}
2179
2180static int khugepaged_has_work(void)
2181{
2182 return !list_empty(&khugepaged_scan.mm_head) &&
2183 khugepaged_enabled();
2184}
2185
2186static int khugepaged_wait_event(void)
2187{
2188 return !list_empty(&khugepaged_scan.mm_head) ||
2189 kthread_should_stop();
2190}
2191
2192static void khugepaged_do_scan(void)
2193{
2194 struct page *hpage = NULL;
2195 unsigned int progress = 0, pass_through_head = 0;
2196 unsigned int pages = khugepaged_pages_to_scan;
2197 bool wait = true;
2198
2199 barrier(); /* write khugepaged_pages_to_scan to local stack */
2200
Kirill A. Shutemova980df32020-06-03 16:00:12 -07002201 lru_add_drain_all();
2202
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002203 while (progress < pages) {
2204 if (!khugepaged_prealloc_page(&hpage, &wait))
2205 break;
2206
2207 cond_resched();
2208
2209 if (unlikely(kthread_should_stop() || try_to_freeze()))
2210 break;
2211
2212 spin_lock(&khugepaged_mm_lock);
2213 if (!khugepaged_scan.mm_slot)
2214 pass_through_head++;
2215 if (khugepaged_has_work() &&
2216 pass_through_head < 2)
2217 progress += khugepaged_scan_mm_slot(pages - progress,
2218 &hpage);
2219 else
2220 progress = pages;
2221 spin_unlock(&khugepaged_mm_lock);
2222 }
2223
2224 if (!IS_ERR_OR_NULL(hpage))
2225 put_page(hpage);
2226}
2227
2228static bool khugepaged_should_wakeup(void)
2229{
2230 return kthread_should_stop() ||
2231 time_after_eq(jiffies, khugepaged_sleep_expire);
2232}
2233
2234static void khugepaged_wait_work(void)
2235{
2236 if (khugepaged_has_work()) {
2237 const unsigned long scan_sleep_jiffies =
2238 msecs_to_jiffies(khugepaged_scan_sleep_millisecs);
2239
2240 if (!scan_sleep_jiffies)
2241 return;
2242
2243 khugepaged_sleep_expire = jiffies + scan_sleep_jiffies;
2244 wait_event_freezable_timeout(khugepaged_wait,
2245 khugepaged_should_wakeup(),
2246 scan_sleep_jiffies);
2247 return;
2248 }
2249
2250 if (khugepaged_enabled())
2251 wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
2252}
2253
2254static int khugepaged(void *none)
2255{
2256 struct mm_slot *mm_slot;
2257
2258 set_freezable();
2259 set_user_nice(current, MAX_NICE);
2260
2261 while (!kthread_should_stop()) {
2262 khugepaged_do_scan();
2263 khugepaged_wait_work();
2264 }
2265
2266 spin_lock(&khugepaged_mm_lock);
2267 mm_slot = khugepaged_scan.mm_slot;
2268 khugepaged_scan.mm_slot = NULL;
2269 if (mm_slot)
2270 collect_mm_slot(mm_slot);
2271 spin_unlock(&khugepaged_mm_lock);
2272 return 0;
2273}
2274
2275static void set_recommended_min_free_kbytes(void)
2276{
2277 struct zone *zone;
2278 int nr_zones = 0;
2279 unsigned long recommended_min;
2280
Joonsoo Kimb7d349c2018-04-10 16:30:27 -07002281 for_each_populated_zone(zone) {
2282 /*
2283 * We don't need to worry about fragmentation of
2284 * ZONE_MOVABLE since it only has movable pages.
2285 */
2286 if (zone_idx(zone) > gfp_zone(GFP_USER))
2287 continue;
2288
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002289 nr_zones++;
Joonsoo Kimb7d349c2018-04-10 16:30:27 -07002290 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002291
2292 /* Ensure 2 pageblocks are free to assist fragmentation avoidance */
2293 recommended_min = pageblock_nr_pages * nr_zones * 2;
2294
2295 /*
2296 * Make sure that on average at least two pageblocks are almost free
2297 * of another type, one for a migratetype to fall back to and a
2298 * second to avoid subsequent fallbacks of other types There are 3
2299 * MIGRATE_TYPES we care about.
2300 */
2301 recommended_min += pageblock_nr_pages * nr_zones *
2302 MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
2303
2304 /* don't ever allow to reserve more than 5% of the lowmem */
2305 recommended_min = min(recommended_min,
2306 (unsigned long) nr_free_buffer_pages() / 20);
2307 recommended_min <<= (PAGE_SHIFT-10);
2308
2309 if (recommended_min > min_free_kbytes) {
2310 if (user_min_free_kbytes >= 0)
2311 pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
2312 min_free_kbytes, recommended_min);
2313
2314 min_free_kbytes = recommended_min;
2315 }
2316 setup_per_zone_wmarks();
2317}
2318
2319int start_stop_khugepaged(void)
2320{
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002321 int err = 0;
2322
2323 mutex_lock(&khugepaged_mutex);
2324 if (khugepaged_enabled()) {
2325 if (!khugepaged_thread)
2326 khugepaged_thread = kthread_run(khugepaged, NULL,
2327 "khugepaged");
2328 if (IS_ERR(khugepaged_thread)) {
2329 pr_err("khugepaged: kthread_run(khugepaged) failed\n");
2330 err = PTR_ERR(khugepaged_thread);
2331 khugepaged_thread = NULL;
2332 goto fail;
2333 }
2334
2335 if (!list_empty(&khugepaged_scan.mm_head))
2336 wake_up_interruptible(&khugepaged_wait);
2337
2338 set_recommended_min_free_kbytes();
2339 } else if (khugepaged_thread) {
2340 kthread_stop(khugepaged_thread);
2341 khugepaged_thread = NULL;
2342 }
2343fail:
2344 mutex_unlock(&khugepaged_mutex);
2345 return err;
2346}
Vijay Balakrishna4aab2be2020-10-10 23:16:40 -07002347
2348void khugepaged_min_free_kbytes_update(void)
2349{
2350 mutex_lock(&khugepaged_mutex);
2351 if (khugepaged_enabled() && khugepaged_thread)
2352 set_recommended_min_free_kbytes();
2353 mutex_unlock(&khugepaged_mutex);
2354}