blob: 99d77ffb79c2b39b2cd1cc4dfd72c4bf1684f3d6 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3
4#include <linux/mm.h>
5#include <linux/sched.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +01006#include <linux/sched/mm.h>
Ingo Molnarf7ccbae2017-02-08 18:51:30 +01007#include <linux/sched/coredump.h>
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07008#include <linux/mmu_notifier.h>
9#include <linux/rmap.h>
10#include <linux/swap.h>
11#include <linux/mm_inline.h>
12#include <linux/kthread.h>
13#include <linux/khugepaged.h>
14#include <linux/freezer.h>
15#include <linux/mman.h>
16#include <linux/hashtable.h>
17#include <linux/userfaultfd_k.h>
18#include <linux/page_idle.h>
19#include <linux/swapops.h>
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -070020#include <linux/shmem_fs.h>
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070021
22#include <asm/tlb.h>
23#include <asm/pgalloc.h>
24#include "internal.h"
25
26enum scan_result {
27 SCAN_FAIL,
28 SCAN_SUCCEED,
29 SCAN_PMD_NULL,
30 SCAN_EXCEED_NONE_PTE,
31 SCAN_PTE_NON_PRESENT,
Peter Xue1e267c2020-04-06 20:06:04 -070032 SCAN_PTE_UFFD_WP,
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070033 SCAN_PAGE_RO,
Ebru Akagunduz0db501f2016-07-26 15:26:46 -070034 SCAN_LACK_REFERENCED_PAGE,
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070035 SCAN_PAGE_NULL,
36 SCAN_SCAN_ABORT,
37 SCAN_PAGE_COUNT,
38 SCAN_PAGE_LRU,
39 SCAN_PAGE_LOCK,
40 SCAN_PAGE_ANON,
41 SCAN_PAGE_COMPOUND,
42 SCAN_ANY_PROCESS,
43 SCAN_VMA_NULL,
44 SCAN_VMA_CHECK,
45 SCAN_ADDRESS_RANGE,
46 SCAN_SWAP_CACHE_PAGE,
47 SCAN_DEL_PAGE_LRU,
48 SCAN_ALLOC_HUGE_PAGE_FAIL,
49 SCAN_CGROUP_CHARGE_FAIL,
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -070050 SCAN_EXCEED_SWAP_PTE,
51 SCAN_TRUNCATED,
Song Liu99cb0db2019-09-23 15:38:00 -070052 SCAN_PAGE_HAS_PRIVATE,
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070053};
54
55#define CREATE_TRACE_POINTS
56#include <trace/events/huge_memory.h>
57
58/* default scan 8*512 pte (or vmas) every 30 second */
59static unsigned int khugepaged_pages_to_scan __read_mostly;
60static unsigned int khugepaged_pages_collapsed;
61static unsigned int khugepaged_full_scans;
62static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
63/* during fragmentation poll the hugepage allocator once every minute */
64static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
65static unsigned long khugepaged_sleep_expire;
66static DEFINE_SPINLOCK(khugepaged_mm_lock);
67static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
68/*
69 * default collapse hugepages if there is at least one pte mapped like
70 * it would have happened if the vma was large enough during page
71 * fault.
72 */
73static unsigned int khugepaged_max_ptes_none __read_mostly;
74static unsigned int khugepaged_max_ptes_swap __read_mostly;
75
76#define MM_SLOTS_HASH_BITS 10
77static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
78
79static struct kmem_cache *mm_slot_cache __read_mostly;
80
Song Liu27e1f822019-09-23 15:38:30 -070081#define MAX_PTE_MAPPED_THP 8
82
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070083/**
84 * struct mm_slot - hash lookup from mm to mm_slot
85 * @hash: hash collision list
86 * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
87 * @mm: the mm that this information is valid for
88 */
89struct mm_slot {
90 struct hlist_node hash;
91 struct list_head mm_node;
92 struct mm_struct *mm;
Song Liu27e1f822019-09-23 15:38:30 -070093
94 /* pte-mapped THP in this mm */
95 int nr_pte_mapped_thp;
96 unsigned long pte_mapped_thp[MAX_PTE_MAPPED_THP];
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070097};
98
99/**
100 * struct khugepaged_scan - cursor for scanning
101 * @mm_head: the head of the mm list to scan
102 * @mm_slot: the current mm_slot we are scanning
103 * @address: the next address inside that to be scanned
104 *
105 * There is only the one khugepaged_scan instance of this cursor structure.
106 */
107struct khugepaged_scan {
108 struct list_head mm_head;
109 struct mm_slot *mm_slot;
110 unsigned long address;
111};
112
113static struct khugepaged_scan khugepaged_scan = {
114 .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
115};
116
Jérémy Lefauree1465d12016-11-30 15:54:02 -0800117#ifdef CONFIG_SYSFS
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700118static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
119 struct kobj_attribute *attr,
120 char *buf)
121{
122 return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs);
123}
124
125static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
126 struct kobj_attribute *attr,
127 const char *buf, size_t count)
128{
129 unsigned long msecs;
130 int err;
131
132 err = kstrtoul(buf, 10, &msecs);
133 if (err || msecs > UINT_MAX)
134 return -EINVAL;
135
136 khugepaged_scan_sleep_millisecs = msecs;
137 khugepaged_sleep_expire = 0;
138 wake_up_interruptible(&khugepaged_wait);
139
140 return count;
141}
142static struct kobj_attribute scan_sleep_millisecs_attr =
143 __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
144 scan_sleep_millisecs_store);
145
146static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
147 struct kobj_attribute *attr,
148 char *buf)
149{
150 return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
151}
152
153static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
154 struct kobj_attribute *attr,
155 const char *buf, size_t count)
156{
157 unsigned long msecs;
158 int err;
159
160 err = kstrtoul(buf, 10, &msecs);
161 if (err || msecs > UINT_MAX)
162 return -EINVAL;
163
164 khugepaged_alloc_sleep_millisecs = msecs;
165 khugepaged_sleep_expire = 0;
166 wake_up_interruptible(&khugepaged_wait);
167
168 return count;
169}
170static struct kobj_attribute alloc_sleep_millisecs_attr =
171 __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
172 alloc_sleep_millisecs_store);
173
174static ssize_t pages_to_scan_show(struct kobject *kobj,
175 struct kobj_attribute *attr,
176 char *buf)
177{
178 return sprintf(buf, "%u\n", khugepaged_pages_to_scan);
179}
180static ssize_t pages_to_scan_store(struct kobject *kobj,
181 struct kobj_attribute *attr,
182 const char *buf, size_t count)
183{
184 int err;
185 unsigned long pages;
186
187 err = kstrtoul(buf, 10, &pages);
188 if (err || !pages || pages > UINT_MAX)
189 return -EINVAL;
190
191 khugepaged_pages_to_scan = pages;
192
193 return count;
194}
195static struct kobj_attribute pages_to_scan_attr =
196 __ATTR(pages_to_scan, 0644, pages_to_scan_show,
197 pages_to_scan_store);
198
199static ssize_t pages_collapsed_show(struct kobject *kobj,
200 struct kobj_attribute *attr,
201 char *buf)
202{
203 return sprintf(buf, "%u\n", khugepaged_pages_collapsed);
204}
205static struct kobj_attribute pages_collapsed_attr =
206 __ATTR_RO(pages_collapsed);
207
208static ssize_t full_scans_show(struct kobject *kobj,
209 struct kobj_attribute *attr,
210 char *buf)
211{
212 return sprintf(buf, "%u\n", khugepaged_full_scans);
213}
214static struct kobj_attribute full_scans_attr =
215 __ATTR_RO(full_scans);
216
217static ssize_t khugepaged_defrag_show(struct kobject *kobj,
218 struct kobj_attribute *attr, char *buf)
219{
220 return single_hugepage_flag_show(kobj, attr, buf,
221 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
222}
223static ssize_t khugepaged_defrag_store(struct kobject *kobj,
224 struct kobj_attribute *attr,
225 const char *buf, size_t count)
226{
227 return single_hugepage_flag_store(kobj, attr, buf, count,
228 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
229}
230static struct kobj_attribute khugepaged_defrag_attr =
231 __ATTR(defrag, 0644, khugepaged_defrag_show,
232 khugepaged_defrag_store);
233
234/*
235 * max_ptes_none controls if khugepaged should collapse hugepages over
236 * any unmapped ptes in turn potentially increasing the memory
237 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
238 * reduce the available free memory in the system as it
239 * runs. Increasing max_ptes_none will instead potentially reduce the
240 * free memory in the system during the khugepaged scan.
241 */
242static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
243 struct kobj_attribute *attr,
244 char *buf)
245{
246 return sprintf(buf, "%u\n", khugepaged_max_ptes_none);
247}
248static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
249 struct kobj_attribute *attr,
250 const char *buf, size_t count)
251{
252 int err;
253 unsigned long max_ptes_none;
254
255 err = kstrtoul(buf, 10, &max_ptes_none);
256 if (err || max_ptes_none > HPAGE_PMD_NR-1)
257 return -EINVAL;
258
259 khugepaged_max_ptes_none = max_ptes_none;
260
261 return count;
262}
263static struct kobj_attribute khugepaged_max_ptes_none_attr =
264 __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
265 khugepaged_max_ptes_none_store);
266
267static ssize_t khugepaged_max_ptes_swap_show(struct kobject *kobj,
268 struct kobj_attribute *attr,
269 char *buf)
270{
271 return sprintf(buf, "%u\n", khugepaged_max_ptes_swap);
272}
273
274static ssize_t khugepaged_max_ptes_swap_store(struct kobject *kobj,
275 struct kobj_attribute *attr,
276 const char *buf, size_t count)
277{
278 int err;
279 unsigned long max_ptes_swap;
280
281 err = kstrtoul(buf, 10, &max_ptes_swap);
282 if (err || max_ptes_swap > HPAGE_PMD_NR-1)
283 return -EINVAL;
284
285 khugepaged_max_ptes_swap = max_ptes_swap;
286
287 return count;
288}
289
290static struct kobj_attribute khugepaged_max_ptes_swap_attr =
291 __ATTR(max_ptes_swap, 0644, khugepaged_max_ptes_swap_show,
292 khugepaged_max_ptes_swap_store);
293
294static struct attribute *khugepaged_attr[] = {
295 &khugepaged_defrag_attr.attr,
296 &khugepaged_max_ptes_none_attr.attr,
297 &pages_to_scan_attr.attr,
298 &pages_collapsed_attr.attr,
299 &full_scans_attr.attr,
300 &scan_sleep_millisecs_attr.attr,
301 &alloc_sleep_millisecs_attr.attr,
302 &khugepaged_max_ptes_swap_attr.attr,
303 NULL,
304};
305
306struct attribute_group khugepaged_attr_group = {
307 .attrs = khugepaged_attr,
308 .name = "khugepaged",
309};
Jérémy Lefauree1465d12016-11-30 15:54:02 -0800310#endif /* CONFIG_SYSFS */
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700311
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700312int hugepage_madvise(struct vm_area_struct *vma,
313 unsigned long *vm_flags, int advice)
314{
315 switch (advice) {
316 case MADV_HUGEPAGE:
317#ifdef CONFIG_S390
318 /*
319 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
320 * can't handle this properly after s390_enable_sie, so we simply
321 * ignore the madvise to prevent qemu from causing a SIGSEGV.
322 */
323 if (mm_has_pgste(vma->vm_mm))
324 return 0;
325#endif
326 *vm_flags &= ~VM_NOHUGEPAGE;
327 *vm_flags |= VM_HUGEPAGE;
328 /*
329 * If the vma become good for khugepaged to scan,
330 * register it here without waiting a page fault that
331 * may not happen any time soon.
332 */
333 if (!(*vm_flags & VM_NO_KHUGEPAGED) &&
334 khugepaged_enter_vma_merge(vma, *vm_flags))
335 return -ENOMEM;
336 break;
337 case MADV_NOHUGEPAGE:
338 *vm_flags &= ~VM_HUGEPAGE;
339 *vm_flags |= VM_NOHUGEPAGE;
340 /*
341 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
342 * this vma even if we leave the mm registered in khugepaged if
343 * it got registered before VM_NOHUGEPAGE was set.
344 */
345 break;
346 }
347
348 return 0;
349}
350
351int __init khugepaged_init(void)
352{
353 mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
354 sizeof(struct mm_slot),
355 __alignof__(struct mm_slot), 0, NULL);
356 if (!mm_slot_cache)
357 return -ENOMEM;
358
359 khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
360 khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
361 khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8;
362
363 return 0;
364}
365
366void __init khugepaged_destroy(void)
367{
368 kmem_cache_destroy(mm_slot_cache);
369}
370
371static inline struct mm_slot *alloc_mm_slot(void)
372{
373 if (!mm_slot_cache) /* initialization failed */
374 return NULL;
375 return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
376}
377
378static inline void free_mm_slot(struct mm_slot *mm_slot)
379{
380 kmem_cache_free(mm_slot_cache, mm_slot);
381}
382
383static struct mm_slot *get_mm_slot(struct mm_struct *mm)
384{
385 struct mm_slot *mm_slot;
386
387 hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
388 if (mm == mm_slot->mm)
389 return mm_slot;
390
391 return NULL;
392}
393
394static void insert_to_mm_slots_hash(struct mm_struct *mm,
395 struct mm_slot *mm_slot)
396{
397 mm_slot->mm = mm;
398 hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
399}
400
401static inline int khugepaged_test_exit(struct mm_struct *mm)
402{
403 return atomic_read(&mm->mm_users) == 0;
404}
405
Song Liu50f8b922018-08-17 15:47:00 -0700406static bool hugepage_vma_check(struct vm_area_struct *vma,
407 unsigned long vm_flags)
Yang Shic2231022018-08-17 15:45:26 -0700408{
Song Liu50f8b922018-08-17 15:47:00 -0700409 if ((!(vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
410 (vm_flags & VM_NOHUGEPAGE) ||
Yang Shic2231022018-08-17 15:45:26 -0700411 test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
412 return false;
Song Liu99cb0db2019-09-23 15:38:00 -0700413
414 if (shmem_file(vma->vm_file) ||
415 (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) &&
416 vma->vm_file &&
417 (vm_flags & VM_DENYWRITE))) {
Yang Shic2231022018-08-17 15:45:26 -0700418 return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
419 HPAGE_PMD_NR);
420 }
421 if (!vma->anon_vma || vma->vm_ops)
422 return false;
Anshuman Khandual222100e2020-04-01 21:07:52 -0700423 if (vma_is_temporary_stack(vma))
Yang Shic2231022018-08-17 15:45:26 -0700424 return false;
Song Liu50f8b922018-08-17 15:47:00 -0700425 return !(vm_flags & VM_NO_KHUGEPAGED);
Yang Shic2231022018-08-17 15:45:26 -0700426}
427
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700428int __khugepaged_enter(struct mm_struct *mm)
429{
430 struct mm_slot *mm_slot;
431 int wakeup;
432
433 mm_slot = alloc_mm_slot();
434 if (!mm_slot)
435 return -ENOMEM;
436
437 /* __khugepaged_exit() must not run from under us */
438 VM_BUG_ON_MM(khugepaged_test_exit(mm), mm);
439 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
440 free_mm_slot(mm_slot);
441 return 0;
442 }
443
444 spin_lock(&khugepaged_mm_lock);
445 insert_to_mm_slots_hash(mm, mm_slot);
446 /*
447 * Insert just behind the scanning cursor, to let the area settle
448 * down a little.
449 */
450 wakeup = list_empty(&khugepaged_scan.mm_head);
451 list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
452 spin_unlock(&khugepaged_mm_lock);
453
Vegard Nossumf1f10072017-02-27 14:30:07 -0800454 mmgrab(mm);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700455 if (wakeup)
456 wake_up_interruptible(&khugepaged_wait);
457
458 return 0;
459}
460
461int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
462 unsigned long vm_flags)
463{
464 unsigned long hstart, hend;
Yang Shic2231022018-08-17 15:45:26 -0700465
466 /*
Song Liu99cb0db2019-09-23 15:38:00 -0700467 * khugepaged only supports read-only files for non-shmem files.
468 * khugepaged does not yet work on special mappings. And
469 * file-private shmem THP is not supported.
Yang Shic2231022018-08-17 15:45:26 -0700470 */
Song Liu50f8b922018-08-17 15:47:00 -0700471 if (!hugepage_vma_check(vma, vm_flags))
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700472 return 0;
Yang Shic2231022018-08-17 15:45:26 -0700473
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700474 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
475 hend = vma->vm_end & HPAGE_PMD_MASK;
476 if (hstart < hend)
477 return khugepaged_enter(vma, vm_flags);
478 return 0;
479}
480
481void __khugepaged_exit(struct mm_struct *mm)
482{
483 struct mm_slot *mm_slot;
484 int free = 0;
485
486 spin_lock(&khugepaged_mm_lock);
487 mm_slot = get_mm_slot(mm);
488 if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
489 hash_del(&mm_slot->hash);
490 list_del(&mm_slot->mm_node);
491 free = 1;
492 }
493 spin_unlock(&khugepaged_mm_lock);
494
495 if (free) {
496 clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
497 free_mm_slot(mm_slot);
498 mmdrop(mm);
499 } else if (mm_slot) {
500 /*
501 * This is required to serialize against
502 * khugepaged_test_exit() (which is guaranteed to run
503 * under mmap sem read mode). Stop here (after we
504 * return all pagetables will be destroyed) until
505 * khugepaged has finished working on the pagetables
506 * under the mmap_sem.
507 */
508 down_write(&mm->mmap_sem);
509 up_write(&mm->mmap_sem);
510 }
511}
512
513static void release_pte_page(struct page *page)
514{
Huang Ying9de4f222020-04-06 20:04:41 -0700515 dec_node_page_state(page, NR_ISOLATED_ANON + page_is_file_lru(page));
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700516 unlock_page(page);
517 putback_lru_page(page);
518}
519
520static void release_pte_pages(pte_t *pte, pte_t *_pte)
521{
522 while (--_pte >= pte) {
523 pte_t pteval = *_pte;
524 if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval)))
525 release_pte_page(pte_page(pteval));
526 }
527}
528
529static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
530 unsigned long address,
531 pte_t *pte)
532{
533 struct page *page = NULL;
534 pte_t *_pte;
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700535 int none_or_zero = 0, result = 0, referenced = 0;
536 bool writable = false;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700537
538 for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
539 _pte++, address += PAGE_SIZE) {
540 pte_t pteval = *_pte;
541 if (pte_none(pteval) || (pte_present(pteval) &&
542 is_zero_pfn(pte_pfn(pteval)))) {
543 if (!userfaultfd_armed(vma) &&
544 ++none_or_zero <= khugepaged_max_ptes_none) {
545 continue;
546 } else {
547 result = SCAN_EXCEED_NONE_PTE;
548 goto out;
549 }
550 }
551 if (!pte_present(pteval)) {
552 result = SCAN_PTE_NON_PRESENT;
553 goto out;
554 }
555 page = vm_normal_page(vma, address, pteval);
556 if (unlikely(!page)) {
557 result = SCAN_PAGE_NULL;
558 goto out;
559 }
560
Kirill A. Shutemovfece2022018-03-22 16:17:28 -0700561 /* TODO: teach khugepaged to collapse THP mapped with pte */
562 if (PageCompound(page)) {
563 result = SCAN_PAGE_COMPOUND;
564 goto out;
565 }
566
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700567 VM_BUG_ON_PAGE(!PageAnon(page), page);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700568
569 /*
570 * We can do it before isolate_lru_page because the
571 * page can't be freed from under us. NOTE: PG_lock
572 * is needed to serialize against split_huge_page
573 * when invoked from the VM.
574 */
575 if (!trylock_page(page)) {
576 result = SCAN_PAGE_LOCK;
577 goto out;
578 }
579
580 /*
581 * cannot use mapcount: can't collapse if there's a gup pin.
582 * The page must only be referenced by the scanned process
583 * and page swap cache.
584 */
Minchan Kim2948be52017-05-03 14:53:35 -0700585 if (page_count(page) != 1 + PageSwapCache(page)) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700586 unlock_page(page);
587 result = SCAN_PAGE_COUNT;
588 goto out;
589 }
590 if (pte_write(pteval)) {
591 writable = true;
592 } else {
593 if (PageSwapCache(page) &&
594 !reuse_swap_page(page, NULL)) {
595 unlock_page(page);
596 result = SCAN_SWAP_CACHE_PAGE;
597 goto out;
598 }
599 /*
600 * Page is not in the swap cache. It can be collapsed
601 * into a THP.
602 */
603 }
604
605 /*
606 * Isolate the page to avoid collapsing an hugepage
607 * currently in use by the VM.
608 */
609 if (isolate_lru_page(page)) {
610 unlock_page(page);
611 result = SCAN_DEL_PAGE_LRU;
612 goto out;
613 }
Shaohua Lid44d3632017-05-03 14:52:26 -0700614 inc_node_page_state(page,
Huang Ying9de4f222020-04-06 20:04:41 -0700615 NR_ISOLATED_ANON + page_is_file_lru(page));
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700616 VM_BUG_ON_PAGE(!PageLocked(page), page);
617 VM_BUG_ON_PAGE(PageLRU(page), page);
618
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700619 /* There should be enough young pte to collapse the page */
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700620 if (pte_young(pteval) ||
621 page_is_young(page) || PageReferenced(page) ||
622 mmu_notifier_test_young(vma->vm_mm, address))
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700623 referenced++;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700624 }
625 if (likely(writable)) {
626 if (likely(referenced)) {
627 result = SCAN_SUCCEED;
628 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
629 referenced, writable, result);
630 return 1;
631 }
632 } else {
633 result = SCAN_PAGE_RO;
634 }
635
636out:
637 release_pte_pages(pte, _pte);
638 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
639 referenced, writable, result);
640 return 0;
641}
642
643static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
644 struct vm_area_struct *vma,
645 unsigned long address,
646 spinlock_t *ptl)
647{
648 pte_t *_pte;
David Rientjes338a16b2017-05-12 15:47:03 -0700649 for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
650 _pte++, page++, address += PAGE_SIZE) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700651 pte_t pteval = *_pte;
652 struct page *src_page;
653
654 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
655 clear_user_highpage(page, address);
656 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
657 if (is_zero_pfn(pte_pfn(pteval))) {
658 /*
659 * ptl mostly unnecessary.
660 */
661 spin_lock(ptl);
662 /*
663 * paravirt calls inside pte_clear here are
664 * superfluous.
665 */
666 pte_clear(vma->vm_mm, address, _pte);
667 spin_unlock(ptl);
668 }
669 } else {
670 src_page = pte_page(pteval);
671 copy_user_highpage(page, src_page, address, vma);
672 VM_BUG_ON_PAGE(page_mapcount(src_page) != 1, src_page);
673 release_pte_page(src_page);
674 /*
675 * ptl mostly unnecessary, but preempt has to
676 * be disabled to update the per-cpu stats
677 * inside page_remove_rmap().
678 */
679 spin_lock(ptl);
680 /*
681 * paravirt calls inside pte_clear here are
682 * superfluous.
683 */
684 pte_clear(vma->vm_mm, address, _pte);
685 page_remove_rmap(src_page, false);
686 spin_unlock(ptl);
687 free_page_and_swap_cache(src_page);
688 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700689 }
690}
691
692static void khugepaged_alloc_sleep(void)
693{
694 DEFINE_WAIT(wait);
695
696 add_wait_queue(&khugepaged_wait, &wait);
697 freezable_schedule_timeout_interruptible(
698 msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
699 remove_wait_queue(&khugepaged_wait, &wait);
700}
701
702static int khugepaged_node_load[MAX_NUMNODES];
703
704static bool khugepaged_scan_abort(int nid)
705{
706 int i;
707
708 /*
Mel Gormana5f5f912016-07-28 15:46:32 -0700709 * If node_reclaim_mode is disabled, then no extra effort is made to
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700710 * allocate memory locally.
711 */
Mel Gormana5f5f912016-07-28 15:46:32 -0700712 if (!node_reclaim_mode)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700713 return false;
714
715 /* If there is a count for this node already, it must be acceptable */
716 if (khugepaged_node_load[nid])
717 return false;
718
719 for (i = 0; i < MAX_NUMNODES; i++) {
720 if (!khugepaged_node_load[i])
721 continue;
Matt Fleminga55c7452019-08-08 20:53:01 +0100722 if (node_distance(nid, i) > node_reclaim_distance)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700723 return true;
724 }
725 return false;
726}
727
728/* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
729static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
730{
Vlastimil Babka25160352016-07-28 15:49:25 -0700731 return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700732}
733
734#ifdef CONFIG_NUMA
735static int khugepaged_find_target_node(void)
736{
737 static int last_khugepaged_target_node = NUMA_NO_NODE;
738 int nid, target_node = 0, max_value = 0;
739
740 /* find first node with max normal pages hit */
741 for (nid = 0; nid < MAX_NUMNODES; nid++)
742 if (khugepaged_node_load[nid] > max_value) {
743 max_value = khugepaged_node_load[nid];
744 target_node = nid;
745 }
746
747 /* do some balance if several nodes have the same hit record */
748 if (target_node <= last_khugepaged_target_node)
749 for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES;
750 nid++)
751 if (max_value == khugepaged_node_load[nid]) {
752 target_node = nid;
753 break;
754 }
755
756 last_khugepaged_target_node = target_node;
757 return target_node;
758}
759
760static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
761{
762 if (IS_ERR(*hpage)) {
763 if (!*wait)
764 return false;
765
766 *wait = false;
767 *hpage = NULL;
768 khugepaged_alloc_sleep();
769 } else if (*hpage) {
770 put_page(*hpage);
771 *hpage = NULL;
772 }
773
774 return true;
775}
776
777static struct page *
Kirill A. Shutemov988ddb72016-07-26 15:26:26 -0700778khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700779{
780 VM_BUG_ON_PAGE(*hpage, *hpage);
781
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700782 *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
783 if (unlikely(!*hpage)) {
784 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
785 *hpage = ERR_PTR(-ENOMEM);
786 return NULL;
787 }
788
789 prep_transhuge_page(*hpage);
790 count_vm_event(THP_COLLAPSE_ALLOC);
791 return *hpage;
792}
793#else
794static int khugepaged_find_target_node(void)
795{
796 return 0;
797}
798
799static inline struct page *alloc_khugepaged_hugepage(void)
800{
801 struct page *page;
802
803 page = alloc_pages(alloc_hugepage_khugepaged_gfpmask(),
804 HPAGE_PMD_ORDER);
805 if (page)
806 prep_transhuge_page(page);
807 return page;
808}
809
810static struct page *khugepaged_alloc_hugepage(bool *wait)
811{
812 struct page *hpage;
813
814 do {
815 hpage = alloc_khugepaged_hugepage();
816 if (!hpage) {
817 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
818 if (!*wait)
819 return NULL;
820
821 *wait = false;
822 khugepaged_alloc_sleep();
823 } else
824 count_vm_event(THP_COLLAPSE_ALLOC);
825 } while (unlikely(!hpage) && likely(khugepaged_enabled()));
826
827 return hpage;
828}
829
830static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
831{
832 if (!*hpage)
833 *hpage = khugepaged_alloc_hugepage(wait);
834
835 if (unlikely(!*hpage))
836 return false;
837
838 return true;
839}
840
841static struct page *
Kirill A. Shutemov988ddb72016-07-26 15:26:26 -0700842khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700843{
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700844 VM_BUG_ON(!*hpage);
845
846 return *hpage;
847}
848#endif
849
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700850/*
851 * If mmap_sem temporarily dropped, revalidate vma
852 * before taking mmap_sem.
853 * Return 0 if succeeds, otherwise return none-zero
854 * value (scan code).
855 */
856
Kirill A. Shutemovc131f752016-09-19 14:44:01 -0700857static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
858 struct vm_area_struct **vmap)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700859{
860 struct vm_area_struct *vma;
861 unsigned long hstart, hend;
862
863 if (unlikely(khugepaged_test_exit(mm)))
864 return SCAN_ANY_PROCESS;
865
Kirill A. Shutemovc131f752016-09-19 14:44:01 -0700866 *vmap = vma = find_vma(mm, address);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700867 if (!vma)
868 return SCAN_VMA_NULL;
869
870 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
871 hend = vma->vm_end & HPAGE_PMD_MASK;
872 if (address < hstart || address + HPAGE_PMD_SIZE > hend)
873 return SCAN_ADDRESS_RANGE;
Song Liu50f8b922018-08-17 15:47:00 -0700874 if (!hugepage_vma_check(vma, vma->vm_flags))
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700875 return SCAN_VMA_CHECK;
876 return 0;
877}
878
879/*
880 * Bring missing pages in from swap, to complete THP collapse.
881 * Only done if khugepaged_scan_pmd believes it is worthwhile.
882 *
883 * Called and returns without pte mapped or spinlocks held,
884 * but with mmap_sem held to protect against vma changes.
885 */
886
887static bool __collapse_huge_page_swapin(struct mm_struct *mm,
888 struct vm_area_struct *vma,
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700889 unsigned long address, pmd_t *pmd,
890 int referenced)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700891{
Souptick Joarder2b740302018-08-23 17:01:36 -0700892 int swapped_in = 0;
893 vm_fault_t ret = 0;
Jan Kara82b0f8c2016-12-14 15:06:58 -0800894 struct vm_fault vmf = {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700895 .vma = vma,
896 .address = address,
897 .flags = FAULT_FLAG_ALLOW_RETRY,
898 .pmd = pmd,
Jan Kara0721ec82016-12-14 15:07:04 -0800899 .pgoff = linear_page_index(vma, address),
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700900 };
901
Ebru Akagunduz982785c2016-09-19 14:44:04 -0700902 /* we only decide to swapin, if there is enough young ptes */
903 if (referenced < HPAGE_PMD_NR/2) {
904 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
905 return false;
906 }
Jan Kara82b0f8c2016-12-14 15:06:58 -0800907 vmf.pte = pte_offset_map(pmd, address);
908 for (; vmf.address < address + HPAGE_PMD_NR*PAGE_SIZE;
909 vmf.pte++, vmf.address += PAGE_SIZE) {
Jan Kara29943022016-12-14 15:07:16 -0800910 vmf.orig_pte = *vmf.pte;
911 if (!is_swap_pte(vmf.orig_pte))
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700912 continue;
913 swapped_in++;
Jan Kara29943022016-12-14 15:07:16 -0800914 ret = do_swap_page(&vmf);
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700915
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700916 /* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */
917 if (ret & VM_FAULT_RETRY) {
918 down_read(&mm->mmap_sem);
Jan Kara82b0f8c2016-12-14 15:06:58 -0800919 if (hugepage_vma_revalidate(mm, address, &vmf.vma)) {
Ebru Akagunduz47f863e2016-07-26 15:26:43 -0700920 /* vma is no longer available, don't continue to swapin */
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700921 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700922 return false;
Ebru Akagunduz47f863e2016-07-26 15:26:43 -0700923 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700924 /* check if the pmd is still valid */
SeongJae Park835152a2017-05-12 15:46:38 -0700925 if (mm_find_pmd(mm, address) != pmd) {
926 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700927 return false;
SeongJae Park835152a2017-05-12 15:46:38 -0700928 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700929 }
930 if (ret & VM_FAULT_ERROR) {
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700931 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700932 return false;
933 }
934 /* pte is unmapped now, we need to map it */
Jan Kara82b0f8c2016-12-14 15:06:58 -0800935 vmf.pte = pte_offset_map(pmd, vmf.address);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700936 }
Jan Kara82b0f8c2016-12-14 15:06:58 -0800937 vmf.pte--;
938 pte_unmap(vmf.pte);
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700939 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700940 return true;
941}
942
943static void collapse_huge_page(struct mm_struct *mm,
944 unsigned long address,
945 struct page **hpage,
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700946 int node, int referenced)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700947{
948 pmd_t *pmd, _pmd;
949 pte_t *pte;
950 pgtable_t pgtable;
951 struct page *new_page;
952 spinlock_t *pmd_ptl, *pte_ptl;
953 int isolated = 0, result = 0;
954 struct mem_cgroup *memcg;
Kirill A. Shutemovc131f752016-09-19 14:44:01 -0700955 struct vm_area_struct *vma;
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800956 struct mmu_notifier_range range;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700957 gfp_t gfp;
958
959 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
960
961 /* Only allocate from the target node */
Michal Hocko41b61672017-01-10 16:57:42 -0800962 gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700963
Kirill A. Shutemov988ddb72016-07-26 15:26:26 -0700964 /*
965 * Before allocating the hugepage, release the mmap_sem read lock.
966 * The allocation can take potentially a long time if it involves
967 * sync compaction, and we do not need to hold the mmap_sem during
968 * that. We will recheck the vma after taking it again in write mode.
969 */
970 up_read(&mm->mmap_sem);
971 new_page = khugepaged_alloc_page(hpage, gfp, node);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700972 if (!new_page) {
973 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
974 goto out_nolock;
975 }
976
Michal Hocko2a70f6a2018-04-10 16:29:30 -0700977 if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700978 result = SCAN_CGROUP_CHARGE_FAIL;
979 goto out_nolock;
980 }
981
982 down_read(&mm->mmap_sem);
Kirill A. Shutemovc131f752016-09-19 14:44:01 -0700983 result = hugepage_vma_revalidate(mm, address, &vma);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700984 if (result) {
985 mem_cgroup_cancel_charge(new_page, memcg, true);
986 up_read(&mm->mmap_sem);
987 goto out_nolock;
988 }
989
990 pmd = mm_find_pmd(mm, address);
991 if (!pmd) {
992 result = SCAN_PMD_NULL;
993 mem_cgroup_cancel_charge(new_page, memcg, true);
994 up_read(&mm->mmap_sem);
995 goto out_nolock;
996 }
997
998 /*
999 * __collapse_huge_page_swapin always returns with mmap_sem locked.
Ebru Akagunduz47f863e2016-07-26 15:26:43 -07001000 * If it fails, we release mmap_sem and jump out_nolock.
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001001 * Continuing to collapse causes inconsistency.
1002 */
Ebru Akagunduz0db501f2016-07-26 15:26:46 -07001003 if (!__collapse_huge_page_swapin(mm, vma, address, pmd, referenced)) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001004 mem_cgroup_cancel_charge(new_page, memcg, true);
1005 up_read(&mm->mmap_sem);
1006 goto out_nolock;
1007 }
1008
1009 up_read(&mm->mmap_sem);
1010 /*
1011 * Prevent all access to pagetables with the exception of
1012 * gup_fast later handled by the ptep_clear_flush and the VM
1013 * handled by the anon_vma lock + PG_lock.
1014 */
1015 down_write(&mm->mmap_sem);
Andrea Arcangeli59ea6d02019-06-13 15:56:11 -07001016 result = SCAN_ANY_PROCESS;
1017 if (!mmget_still_valid(mm))
1018 goto out;
Kirill A. Shutemovc131f752016-09-19 14:44:01 -07001019 result = hugepage_vma_revalidate(mm, address, &vma);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001020 if (result)
1021 goto out;
1022 /* check if the pmd is still valid */
1023 if (mm_find_pmd(mm, address) != pmd)
1024 goto out;
1025
1026 anon_vma_lock_write(vma->anon_vma);
1027
Jérôme Glisse7269f992019-05-13 17:20:53 -07001028 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm,
Jérôme Glisse6f4f13e2019-05-13 17:20:49 -07001029 address, address + HPAGE_PMD_SIZE);
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08001030 mmu_notifier_invalidate_range_start(&range);
Ville Syrjäläec649c9d2019-11-05 21:16:48 -08001031
1032 pte = pte_offset_map(pmd, address);
1033 pte_ptl = pte_lockptr(mm, pmd);
1034
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001035 pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
1036 /*
1037 * After this gup_fast can't run anymore. This also removes
1038 * any huge TLB entry from the CPU so we won't allow
1039 * huge and small TLB entries for the same virtual address
1040 * to avoid the risk of CPU bugs in that area.
1041 */
1042 _pmd = pmdp_collapse_flush(vma, address, pmd);
1043 spin_unlock(pmd_ptl);
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08001044 mmu_notifier_invalidate_range_end(&range);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001045
1046 spin_lock(pte_ptl);
1047 isolated = __collapse_huge_page_isolate(vma, address, pte);
1048 spin_unlock(pte_ptl);
1049
1050 if (unlikely(!isolated)) {
1051 pte_unmap(pte);
1052 spin_lock(pmd_ptl);
1053 BUG_ON(!pmd_none(*pmd));
1054 /*
1055 * We can only use set_pmd_at when establishing
1056 * hugepmds and never for establishing regular pmds that
1057 * points to regular pagetables. Use pmd_populate for that
1058 */
1059 pmd_populate(mm, pmd, pmd_pgtable(_pmd));
1060 spin_unlock(pmd_ptl);
1061 anon_vma_unlock_write(vma->anon_vma);
1062 result = SCAN_FAIL;
1063 goto out;
1064 }
1065
1066 /*
1067 * All pages are isolated and locked so anon_vma rmap
1068 * can't run anymore.
1069 */
1070 anon_vma_unlock_write(vma->anon_vma);
1071
1072 __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl);
1073 pte_unmap(pte);
1074 __SetPageUptodate(new_page);
1075 pgtable = pmd_pgtable(_pmd);
1076
1077 _pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
Linus Torvaldsf55e1012017-11-29 09:01:01 -08001078 _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001079
1080 /*
1081 * spin_lock() below is not the equivalent of smp_wmb(), so
1082 * this is needed to avoid the copy_huge_page writes to become
1083 * visible after the set_pmd_at() write.
1084 */
1085 smp_wmb();
1086
1087 spin_lock(pmd_ptl);
1088 BUG_ON(!pmd_none(*pmd));
1089 page_add_new_anon_rmap(new_page, vma, address, true);
1090 mem_cgroup_commit_charge(new_page, memcg, false, true);
Chris Down1ff9e6e2019-03-05 15:48:09 -08001091 count_memcg_events(memcg, THP_COLLAPSE_ALLOC, 1);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001092 lru_cache_add_active_or_unevictable(new_page, vma);
1093 pgtable_trans_huge_deposit(mm, pmd, pgtable);
1094 set_pmd_at(mm, address, pmd, _pmd);
1095 update_mmu_cache_pmd(vma, address, pmd);
1096 spin_unlock(pmd_ptl);
1097
1098 *hpage = NULL;
1099
1100 khugepaged_pages_collapsed++;
1101 result = SCAN_SUCCEED;
1102out_up_write:
1103 up_write(&mm->mmap_sem);
1104out_nolock:
1105 trace_mm_collapse_huge_page(mm, isolated, result);
1106 return;
1107out:
1108 mem_cgroup_cancel_charge(new_page, memcg, true);
1109 goto out_up_write;
1110}
1111
1112static int khugepaged_scan_pmd(struct mm_struct *mm,
1113 struct vm_area_struct *vma,
1114 unsigned long address,
1115 struct page **hpage)
1116{
1117 pmd_t *pmd;
1118 pte_t *pte, *_pte;
Ebru Akagunduz0db501f2016-07-26 15:26:46 -07001119 int ret = 0, none_or_zero = 0, result = 0, referenced = 0;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001120 struct page *page = NULL;
1121 unsigned long _address;
1122 spinlock_t *ptl;
1123 int node = NUMA_NO_NODE, unmapped = 0;
Ebru Akagunduz0db501f2016-07-26 15:26:46 -07001124 bool writable = false;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001125
1126 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1127
1128 pmd = mm_find_pmd(mm, address);
1129 if (!pmd) {
1130 result = SCAN_PMD_NULL;
1131 goto out;
1132 }
1133
1134 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1135 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1136 for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
1137 _pte++, _address += PAGE_SIZE) {
1138 pte_t pteval = *_pte;
1139 if (is_swap_pte(pteval)) {
1140 if (++unmapped <= khugepaged_max_ptes_swap) {
Peter Xue1e267c2020-04-06 20:06:04 -07001141 /*
1142 * Always be strict with uffd-wp
1143 * enabled swap entries. Please see
1144 * comment below for pte_uffd_wp().
1145 */
1146 if (pte_swp_uffd_wp(pteval)) {
1147 result = SCAN_PTE_UFFD_WP;
1148 goto out_unmap;
1149 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001150 continue;
1151 } else {
1152 result = SCAN_EXCEED_SWAP_PTE;
1153 goto out_unmap;
1154 }
1155 }
1156 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
1157 if (!userfaultfd_armed(vma) &&
1158 ++none_or_zero <= khugepaged_max_ptes_none) {
1159 continue;
1160 } else {
1161 result = SCAN_EXCEED_NONE_PTE;
1162 goto out_unmap;
1163 }
1164 }
1165 if (!pte_present(pteval)) {
1166 result = SCAN_PTE_NON_PRESENT;
1167 goto out_unmap;
1168 }
Peter Xue1e267c2020-04-06 20:06:04 -07001169 if (pte_uffd_wp(pteval)) {
1170 /*
1171 * Don't collapse the page if any of the small
1172 * PTEs are armed with uffd write protection.
1173 * Here we can also mark the new huge pmd as
1174 * write protected if any of the small ones is
1175 * marked but that could bring uknown
1176 * userfault messages that falls outside of
1177 * the registered range. So, just be simple.
1178 */
1179 result = SCAN_PTE_UFFD_WP;
1180 goto out_unmap;
1181 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001182 if (pte_write(pteval))
1183 writable = true;
1184
1185 page = vm_normal_page(vma, _address, pteval);
1186 if (unlikely(!page)) {
1187 result = SCAN_PAGE_NULL;
1188 goto out_unmap;
1189 }
1190
1191 /* TODO: teach khugepaged to collapse THP mapped with pte */
1192 if (PageCompound(page)) {
1193 result = SCAN_PAGE_COMPOUND;
1194 goto out_unmap;
1195 }
1196
1197 /*
1198 * Record which node the original page is from and save this
1199 * information to khugepaged_node_load[].
1200 * Khupaged will allocate hugepage from the node has the max
1201 * hit record.
1202 */
1203 node = page_to_nid(page);
1204 if (khugepaged_scan_abort(node)) {
1205 result = SCAN_SCAN_ABORT;
1206 goto out_unmap;
1207 }
1208 khugepaged_node_load[node]++;
1209 if (!PageLRU(page)) {
1210 result = SCAN_PAGE_LRU;
1211 goto out_unmap;
1212 }
1213 if (PageLocked(page)) {
1214 result = SCAN_PAGE_LOCK;
1215 goto out_unmap;
1216 }
1217 if (!PageAnon(page)) {
1218 result = SCAN_PAGE_ANON;
1219 goto out_unmap;
1220 }
1221
1222 /*
1223 * cannot use mapcount: can't collapse if there's a gup pin.
1224 * The page must only be referenced by the scanned process
1225 * and page swap cache.
1226 */
Minchan Kim2948be52017-05-03 14:53:35 -07001227 if (page_count(page) != 1 + PageSwapCache(page)) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001228 result = SCAN_PAGE_COUNT;
1229 goto out_unmap;
1230 }
1231 if (pte_young(pteval) ||
1232 page_is_young(page) || PageReferenced(page) ||
1233 mmu_notifier_test_young(vma->vm_mm, address))
Ebru Akagunduz0db501f2016-07-26 15:26:46 -07001234 referenced++;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001235 }
1236 if (writable) {
1237 if (referenced) {
1238 result = SCAN_SUCCEED;
1239 ret = 1;
1240 } else {
Ebru Akagunduz0db501f2016-07-26 15:26:46 -07001241 result = SCAN_LACK_REFERENCED_PAGE;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001242 }
1243 } else {
1244 result = SCAN_PAGE_RO;
1245 }
1246out_unmap:
1247 pte_unmap_unlock(pte, ptl);
1248 if (ret) {
1249 node = khugepaged_find_target_node();
1250 /* collapse_huge_page will return with the mmap_sem released */
Kirill A. Shutemovc131f752016-09-19 14:44:01 -07001251 collapse_huge_page(mm, address, hpage, node, referenced);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001252 }
1253out:
1254 trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
1255 none_or_zero, result, unmapped);
1256 return ret;
1257}
1258
1259static void collect_mm_slot(struct mm_slot *mm_slot)
1260{
1261 struct mm_struct *mm = mm_slot->mm;
1262
Lance Roy35f3aa32018-10-04 23:45:47 -07001263 lockdep_assert_held(&khugepaged_mm_lock);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001264
1265 if (khugepaged_test_exit(mm)) {
1266 /* free mm_slot */
1267 hash_del(&mm_slot->hash);
1268 list_del(&mm_slot->mm_node);
1269
1270 /*
1271 * Not strictly needed because the mm exited already.
1272 *
1273 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1274 */
1275
1276 /* khugepaged_mm_lock actually not necessary for the below */
1277 free_mm_slot(mm_slot);
1278 mmdrop(mm);
1279 }
1280}
1281
Matthew Wilcox (Oracle)396bcc52020-04-06 20:04:35 -07001282#ifdef CONFIG_SHMEM
Song Liu27e1f822019-09-23 15:38:30 -07001283/*
1284 * Notify khugepaged that given addr of the mm is pte-mapped THP. Then
1285 * khugepaged should try to collapse the page table.
1286 */
1287static int khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
1288 unsigned long addr)
1289{
1290 struct mm_slot *mm_slot;
1291
1292 VM_BUG_ON(addr & ~HPAGE_PMD_MASK);
1293
1294 spin_lock(&khugepaged_mm_lock);
1295 mm_slot = get_mm_slot(mm);
1296 if (likely(mm_slot && mm_slot->nr_pte_mapped_thp < MAX_PTE_MAPPED_THP))
1297 mm_slot->pte_mapped_thp[mm_slot->nr_pte_mapped_thp++] = addr;
1298 spin_unlock(&khugepaged_mm_lock);
1299 return 0;
1300}
1301
1302/**
1303 * Try to collapse a pte-mapped THP for mm at address haddr.
1304 *
1305 * This function checks whether all the PTEs in the PMD are pointing to the
1306 * right THP. If so, retract the page table so the THP can refault in with
1307 * as pmd-mapped.
1308 */
1309void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
1310{
1311 unsigned long haddr = addr & HPAGE_PMD_MASK;
1312 struct vm_area_struct *vma = find_vma(mm, haddr);
1313 struct page *hpage = NULL;
1314 pte_t *start_pte, *pte;
1315 pmd_t *pmd, _pmd;
1316 spinlock_t *ptl;
1317 int count = 0;
1318 int i;
1319
1320 if (!vma || !vma->vm_file ||
1321 vma->vm_start > haddr || vma->vm_end < haddr + HPAGE_PMD_SIZE)
1322 return;
1323
1324 /*
1325 * This vm_flags may not have VM_HUGEPAGE if the page was not
1326 * collapsed by this mm. But we can still collapse if the page is
1327 * the valid THP. Add extra VM_HUGEPAGE so hugepage_vma_check()
1328 * will not fail the vma for missing VM_HUGEPAGE
1329 */
1330 if (!hugepage_vma_check(vma, vma->vm_flags | VM_HUGEPAGE))
1331 return;
1332
1333 pmd = mm_find_pmd(mm, haddr);
1334 if (!pmd)
1335 return;
1336
1337 start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
1338
1339 /* step 1: check all mapped PTEs are to the right huge page */
1340 for (i = 0, addr = haddr, pte = start_pte;
1341 i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1342 struct page *page;
1343
1344 /* empty pte, skip */
1345 if (pte_none(*pte))
1346 continue;
1347
1348 /* page swapped out, abort */
1349 if (!pte_present(*pte))
1350 goto abort;
1351
1352 page = vm_normal_page(vma, addr, *pte);
1353
1354 if (!page || !PageCompound(page))
1355 goto abort;
1356
1357 if (!hpage) {
1358 hpage = compound_head(page);
1359 /*
1360 * The mapping of the THP should not change.
1361 *
1362 * Note that uprobe, debugger, or MAP_PRIVATE may
1363 * change the page table, but the new page will
1364 * not pass PageCompound() check.
1365 */
1366 if (WARN_ON(hpage->mapping != vma->vm_file->f_mapping))
1367 goto abort;
1368 }
1369
1370 /*
1371 * Confirm the page maps to the correct subpage.
1372 *
1373 * Note that uprobe, debugger, or MAP_PRIVATE may change
1374 * the page table, but the new page will not pass
1375 * PageCompound() check.
1376 */
1377 if (WARN_ON(hpage + i != page))
1378 goto abort;
1379 count++;
1380 }
1381
1382 /* step 2: adjust rmap */
1383 for (i = 0, addr = haddr, pte = start_pte;
1384 i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1385 struct page *page;
1386
1387 if (pte_none(*pte))
1388 continue;
1389 page = vm_normal_page(vma, addr, *pte);
1390 page_remove_rmap(page, false);
1391 }
1392
1393 pte_unmap_unlock(start_pte, ptl);
1394
1395 /* step 3: set proper refcount and mm_counters. */
1396 if (hpage) {
1397 page_ref_sub(hpage, count);
1398 add_mm_counter(vma->vm_mm, mm_counter_file(hpage), -count);
1399 }
1400
1401 /* step 4: collapse pmd */
1402 ptl = pmd_lock(vma->vm_mm, pmd);
1403 _pmd = pmdp_collapse_flush(vma, addr, pmd);
1404 spin_unlock(ptl);
1405 mm_dec_nr_ptes(mm);
1406 pte_free(mm, pmd_pgtable(_pmd));
1407 return;
1408
1409abort:
1410 pte_unmap_unlock(start_pte, ptl);
1411}
1412
1413static int khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
1414{
1415 struct mm_struct *mm = mm_slot->mm;
1416 int i;
1417
1418 if (likely(mm_slot->nr_pte_mapped_thp == 0))
1419 return 0;
1420
1421 if (!down_write_trylock(&mm->mmap_sem))
1422 return -EBUSY;
1423
1424 if (unlikely(khugepaged_test_exit(mm)))
1425 goto out;
1426
1427 for (i = 0; i < mm_slot->nr_pte_mapped_thp; i++)
1428 collapse_pte_mapped_thp(mm, mm_slot->pte_mapped_thp[i]);
1429
1430out:
1431 mm_slot->nr_pte_mapped_thp = 0;
1432 up_write(&mm->mmap_sem);
1433 return 0;
1434}
1435
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001436static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
1437{
1438 struct vm_area_struct *vma;
1439 unsigned long addr;
1440 pmd_t *pmd, _pmd;
1441
1442 i_mmap_lock_write(mapping);
1443 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
Song Liu27e1f822019-09-23 15:38:30 -07001444 /*
1445 * Check vma->anon_vma to exclude MAP_PRIVATE mappings that
1446 * got written to. These VMAs are likely not worth investing
1447 * down_write(mmap_sem) as PMD-mapping is likely to be split
1448 * later.
1449 *
1450 * Not that vma->anon_vma check is racy: it can be set up after
1451 * the check but before we took mmap_sem by the fault path.
1452 * But page lock would prevent establishing any new ptes of the
1453 * page, so we are safe.
1454 *
1455 * An alternative would be drop the check, but check that page
1456 * table is clear before calling pmdp_collapse_flush() under
1457 * ptl. It has higher chance to recover THP for the VMA, but
1458 * has higher cost too.
1459 */
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001460 if (vma->anon_vma)
1461 continue;
1462 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1463 if (addr & ~HPAGE_PMD_MASK)
1464 continue;
1465 if (vma->vm_end < addr + HPAGE_PMD_SIZE)
1466 continue;
1467 pmd = mm_find_pmd(vma->vm_mm, addr);
1468 if (!pmd)
1469 continue;
1470 /*
1471 * We need exclusive mmap_sem to retract page table.
Song Liu27e1f822019-09-23 15:38:30 -07001472 *
1473 * We use trylock due to lock inversion: we need to acquire
1474 * mmap_sem while holding page lock. Fault path does it in
1475 * reverse order. Trylock is a way to avoid deadlock.
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001476 */
1477 if (down_write_trylock(&vma->vm_mm->mmap_sem)) {
1478 spinlock_t *ptl = pmd_lock(vma->vm_mm, pmd);
1479 /* assume page table is clear */
1480 _pmd = pmdp_collapse_flush(vma, addr, pmd);
1481 spin_unlock(ptl);
1482 up_write(&vma->vm_mm->mmap_sem);
Kirill A. Shutemovc4812902017-11-15 17:35:37 -08001483 mm_dec_nr_ptes(vma->vm_mm);
Aneesh Kumar K.Vd670ffd2017-01-10 16:57:18 -08001484 pte_free(vma->vm_mm, pmd_pgtable(_pmd));
Song Liu27e1f822019-09-23 15:38:30 -07001485 } else {
1486 /* Try again later */
1487 khugepaged_add_pte_mapped_thp(vma->vm_mm, addr);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001488 }
1489 }
1490 i_mmap_unlock_write(mapping);
1491}
1492
1493/**
Song Liu99cb0db2019-09-23 15:38:00 -07001494 * collapse_file - collapse filemap/tmpfs/shmem pages into huge one.
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001495 *
1496 * Basic scheme is simple, details are more complex:
Hugh Dickins87c460a2018-11-30 14:10:43 -08001497 * - allocate and lock a new huge page;
Matthew Wilcox77da9382017-12-04 14:56:08 -05001498 * - scan page cache replacing old pages with the new one
Song Liu99cb0db2019-09-23 15:38:00 -07001499 * + swap/gup in pages if necessary;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001500 * + fill in gaps;
Matthew Wilcox77da9382017-12-04 14:56:08 -05001501 * + keep old pages around in case rollback is required;
1502 * - if replacing succeeds:
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001503 * + copy data over;
1504 * + free old pages;
Hugh Dickins87c460a2018-11-30 14:10:43 -08001505 * + unlock huge page;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001506 * - if replacing failed;
1507 * + put all pages back and unfreeze them;
Matthew Wilcox77da9382017-12-04 14:56:08 -05001508 * + restore gaps in the page cache;
Hugh Dickins87c460a2018-11-30 14:10:43 -08001509 * + unlock and free huge page;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001510 */
Song Liu579c5712019-09-23 15:37:57 -07001511static void collapse_file(struct mm_struct *mm,
1512 struct file *file, pgoff_t start,
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001513 struct page **hpage, int node)
1514{
Song Liu579c5712019-09-23 15:37:57 -07001515 struct address_space *mapping = file->f_mapping;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001516 gfp_t gfp;
Matthew Wilcox77da9382017-12-04 14:56:08 -05001517 struct page *new_page;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001518 struct mem_cgroup *memcg;
1519 pgoff_t index, end = start + HPAGE_PMD_NR;
1520 LIST_HEAD(pagelist);
Matthew Wilcox77da9382017-12-04 14:56:08 -05001521 XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001522 int nr_none = 0, result = SCAN_SUCCEED;
Song Liu99cb0db2019-09-23 15:38:00 -07001523 bool is_shmem = shmem_file(file);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001524
Song Liu99cb0db2019-09-23 15:38:00 -07001525 VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001526 VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
1527
1528 /* Only allocate from the target node */
Michal Hocko41b61672017-01-10 16:57:42 -08001529 gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001530
1531 new_page = khugepaged_alloc_page(hpage, gfp, node);
1532 if (!new_page) {
1533 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
1534 goto out;
1535 }
1536
Michal Hocko2a70f6a2018-04-10 16:29:30 -07001537 if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) {
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001538 result = SCAN_CGROUP_CHARGE_FAIL;
1539 goto out;
1540 }
1541
Hugh Dickins95feeab2018-11-30 14:10:50 -08001542 /* This will be less messy when we use multi-index entries */
1543 do {
1544 xas_lock_irq(&xas);
1545 xas_create_range(&xas);
1546 if (!xas_error(&xas))
1547 break;
1548 xas_unlock_irq(&xas);
1549 if (!xas_nomem(&xas, GFP_KERNEL)) {
1550 mem_cgroup_cancel_charge(new_page, memcg, true);
1551 result = SCAN_FAIL;
1552 goto out;
1553 }
1554 } while (1);
1555
Hugh Dickins042a3082018-11-30 14:10:39 -08001556 __SetPageLocked(new_page);
Song Liu99cb0db2019-09-23 15:38:00 -07001557 if (is_shmem)
1558 __SetPageSwapBacked(new_page);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001559 new_page->index = start;
1560 new_page->mapping = mapping;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001561
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001562 /*
Hugh Dickins87c460a2018-11-30 14:10:43 -08001563 * At this point the new_page is locked and not up-to-date.
1564 * It's safe to insert it into the page cache, because nobody would
1565 * be able to map it or use it in another way until we unlock it.
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001566 */
1567
Matthew Wilcox77da9382017-12-04 14:56:08 -05001568 xas_set(&xas, start);
1569 for (index = start; index < end; index++) {
1570 struct page *page = xas_next(&xas);
1571
1572 VM_BUG_ON(index != xas.xa_index);
Song Liu99cb0db2019-09-23 15:38:00 -07001573 if (is_shmem) {
1574 if (!page) {
1575 /*
1576 * Stop if extent has been truncated or
1577 * hole-punched, and is now completely
1578 * empty.
1579 */
1580 if (index == start) {
1581 if (!xas_next_entry(&xas, end - 1)) {
1582 result = SCAN_TRUNCATED;
1583 goto xa_locked;
1584 }
1585 xas_set(&xas, index);
1586 }
1587 if (!shmem_charge(mapping->host, 1)) {
1588 result = SCAN_FAIL;
Hugh Dickins042a3082018-11-30 14:10:39 -08001589 goto xa_locked;
Hugh Dickins701270f2018-11-30 14:10:25 -08001590 }
Song Liu99cb0db2019-09-23 15:38:00 -07001591 xas_store(&xas, new_page);
1592 nr_none++;
1593 continue;
Hugh Dickins701270f2018-11-30 14:10:25 -08001594 }
Song Liu99cb0db2019-09-23 15:38:00 -07001595
1596 if (xa_is_value(page) || !PageUptodate(page)) {
1597 xas_unlock_irq(&xas);
1598 /* swap in or instantiate fallocated page */
1599 if (shmem_getpage(mapping->host, index, &page,
1600 SGP_NOHUGE)) {
1601 result = SCAN_FAIL;
1602 goto xa_unlocked;
1603 }
1604 } else if (trylock_page(page)) {
1605 get_page(page);
1606 xas_unlock_irq(&xas);
1607 } else {
1608 result = SCAN_PAGE_LOCK;
Hugh Dickins042a3082018-11-30 14:10:39 -08001609 goto xa_locked;
Matthew Wilcox77da9382017-12-04 14:56:08 -05001610 }
Song Liu99cb0db2019-09-23 15:38:00 -07001611 } else { /* !is_shmem */
1612 if (!page || xa_is_value(page)) {
1613 xas_unlock_irq(&xas);
1614 page_cache_sync_readahead(mapping, &file->f_ra,
1615 file, index,
1616 PAGE_SIZE);
1617 /* drain pagevecs to help isolate_lru_page() */
1618 lru_add_drain();
1619 page = find_lock_page(mapping, index);
1620 if (unlikely(page == NULL)) {
1621 result = SCAN_FAIL;
1622 goto xa_unlocked;
1623 }
Song Liu75f36062019-11-30 17:57:19 -08001624 } else if (PageDirty(page)) {
1625 /*
1626 * khugepaged only works on read-only fd,
1627 * so this page is dirty because it hasn't
1628 * been flushed since first write. There
1629 * won't be new dirty pages.
1630 *
1631 * Trigger async flush here and hope the
1632 * writeback is done when khugepaged
1633 * revisits this page.
1634 *
1635 * This is a one-off situation. We are not
1636 * forcing writeback in loop.
1637 */
1638 xas_unlock_irq(&xas);
1639 filemap_flush(mapping);
1640 result = SCAN_FAIL;
1641 goto xa_unlocked;
Song Liu99cb0db2019-09-23 15:38:00 -07001642 } else if (trylock_page(page)) {
1643 get_page(page);
1644 xas_unlock_irq(&xas);
1645 } else {
1646 result = SCAN_PAGE_LOCK;
1647 goto xa_locked;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001648 }
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001649 }
1650
1651 /*
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07001652 * The page must be locked, so we can drop the i_pages lock
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001653 * without racing with truncate.
1654 */
1655 VM_BUG_ON_PAGE(!PageLocked(page), page);
Song Liu4655e5e2019-11-15 17:34:53 -08001656
1657 /* make sure the page is up to date */
1658 if (unlikely(!PageUptodate(page))) {
1659 result = SCAN_FAIL;
1660 goto out_unlock;
1661 }
Hugh Dickins06a5e122018-11-30 14:10:47 -08001662
1663 /*
1664 * If file was truncated then extended, or hole-punched, before
1665 * we locked the first page, then a THP might be there already.
1666 */
1667 if (PageTransCompound(page)) {
1668 result = SCAN_PAGE_COMPOUND;
1669 goto out_unlock;
1670 }
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001671
1672 if (page_mapping(page) != mapping) {
1673 result = SCAN_TRUNCATED;
1674 goto out_unlock;
1675 }
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001676
Song Liu4655e5e2019-11-15 17:34:53 -08001677 if (!is_shmem && PageDirty(page)) {
1678 /*
1679 * khugepaged only works on read-only fd, so this
1680 * page is dirty because it hasn't been flushed
1681 * since first write.
1682 */
1683 result = SCAN_FAIL;
1684 goto out_unlock;
1685 }
1686
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001687 if (isolate_lru_page(page)) {
1688 result = SCAN_DEL_PAGE_LRU;
Hugh Dickins042a3082018-11-30 14:10:39 -08001689 goto out_unlock;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001690 }
1691
Song Liu99cb0db2019-09-23 15:38:00 -07001692 if (page_has_private(page) &&
1693 !try_to_release_page(page, GFP_KERNEL)) {
1694 result = SCAN_PAGE_HAS_PRIVATE;
1695 goto out_unlock;
1696 }
1697
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001698 if (page_mapped(page))
Matthew Wilcox977fbdc2018-01-31 16:17:36 -08001699 unmap_mapping_pages(mapping, index, 1, false);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001700
Matthew Wilcox77da9382017-12-04 14:56:08 -05001701 xas_lock_irq(&xas);
1702 xas_set(&xas, index);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001703
Matthew Wilcox77da9382017-12-04 14:56:08 -05001704 VM_BUG_ON_PAGE(page != xas_load(&xas), page);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001705 VM_BUG_ON_PAGE(page_mapped(page), page);
1706
1707 /*
1708 * The page is expected to have page_count() == 3:
1709 * - we hold a pin on it;
Matthew Wilcox77da9382017-12-04 14:56:08 -05001710 * - one reference from page cache;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001711 * - one from isolate_lru_page;
1712 */
1713 if (!page_ref_freeze(page, 3)) {
1714 result = SCAN_PAGE_COUNT;
Hugh Dickins042a3082018-11-30 14:10:39 -08001715 xas_unlock_irq(&xas);
1716 putback_lru_page(page);
1717 goto out_unlock;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001718 }
1719
1720 /*
1721 * Add the page to the list to be able to undo the collapse if
1722 * something go wrong.
1723 */
1724 list_add_tail(&page->lru, &pagelist);
1725
1726 /* Finally, replace with the new page. */
Matthew Wilcox (Oracle)41011962019-09-23 15:34:52 -07001727 xas_store(&xas, new_page);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001728 continue;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001729out_unlock:
1730 unlock_page(page);
1731 put_page(page);
Hugh Dickins042a3082018-11-30 14:10:39 -08001732 goto xa_unlocked;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001733 }
1734
Song Liu99cb0db2019-09-23 15:38:00 -07001735 if (is_shmem)
1736 __inc_node_page_state(new_page, NR_SHMEM_THPS);
Song Liu09d91cd2019-09-23 15:38:03 -07001737 else {
Song Liu99cb0db2019-09-23 15:38:00 -07001738 __inc_node_page_state(new_page, NR_FILE_THPS);
Song Liu09d91cd2019-09-23 15:38:03 -07001739 filemap_nr_thps_inc(mapping);
1740 }
Song Liu99cb0db2019-09-23 15:38:00 -07001741
Hugh Dickins042a3082018-11-30 14:10:39 -08001742 if (nr_none) {
1743 struct zone *zone = page_zone(new_page);
1744
1745 __mod_node_page_state(zone->zone_pgdat, NR_FILE_PAGES, nr_none);
Song Liu99cb0db2019-09-23 15:38:00 -07001746 if (is_shmem)
1747 __mod_node_page_state(zone->zone_pgdat,
1748 NR_SHMEM, nr_none);
Hugh Dickins042a3082018-11-30 14:10:39 -08001749 }
1750
1751xa_locked:
1752 xas_unlock_irq(&xas);
Matthew Wilcox77da9382017-12-04 14:56:08 -05001753xa_unlocked:
Hugh Dickins042a3082018-11-30 14:10:39 -08001754
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001755 if (result == SCAN_SUCCEED) {
Matthew Wilcox77da9382017-12-04 14:56:08 -05001756 struct page *page, *tmp;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001757
1758 /*
Matthew Wilcox77da9382017-12-04 14:56:08 -05001759 * Replacing old pages with new one has succeeded, now we
1760 * need to copy the content and free the old pages.
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001761 */
Hugh Dickins2af8ff22018-11-30 14:10:35 -08001762 index = start;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001763 list_for_each_entry_safe(page, tmp, &pagelist, lru) {
Hugh Dickins2af8ff22018-11-30 14:10:35 -08001764 while (index < page->index) {
1765 clear_highpage(new_page + (index % HPAGE_PMD_NR));
1766 index++;
1767 }
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001768 copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
1769 page);
1770 list_del(&page->lru);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001771 page->mapping = NULL;
Hugh Dickins042a3082018-11-30 14:10:39 -08001772 page_ref_unfreeze(page, 1);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001773 ClearPageActive(page);
1774 ClearPageUnevictable(page);
Hugh Dickins042a3082018-11-30 14:10:39 -08001775 unlock_page(page);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001776 put_page(page);
Hugh Dickins2af8ff22018-11-30 14:10:35 -08001777 index++;
1778 }
1779 while (index < end) {
1780 clear_highpage(new_page + (index % HPAGE_PMD_NR));
1781 index++;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001782 }
1783
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001784 SetPageUptodate(new_page);
Hugh Dickins87c460a2018-11-30 14:10:43 -08001785 page_ref_add(new_page, HPAGE_PMD_NR - 1);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001786 mem_cgroup_commit_charge(new_page, memcg, false, true);
Song Liu99cb0db2019-09-23 15:38:00 -07001787
1788 if (is_shmem) {
1789 set_page_dirty(new_page);
1790 lru_cache_add_anon(new_page);
1791 } else {
1792 lru_cache_add_file(new_page);
1793 }
Chris Down1ff9e6e2019-03-05 15:48:09 -08001794 count_memcg_events(memcg, THP_COLLAPSE_ALLOC, 1);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001795
Hugh Dickins042a3082018-11-30 14:10:39 -08001796 /*
1797 * Remove pte page tables, so we can re-fault the page as huge.
1798 */
1799 retract_page_tables(mapping, start);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001800 *hpage = NULL;
Yang Shi87aa7522018-08-17 15:45:29 -07001801
1802 khugepaged_pages_collapsed++;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001803 } else {
Matthew Wilcox77da9382017-12-04 14:56:08 -05001804 struct page *page;
Hugh Dickinsaaa52e32018-11-30 14:10:29 -08001805
Matthew Wilcox77da9382017-12-04 14:56:08 -05001806 /* Something went wrong: roll back page cache changes */
Matthew Wilcox77da9382017-12-04 14:56:08 -05001807 xas_lock_irq(&xas);
Hugh Dickinsaaa52e32018-11-30 14:10:29 -08001808 mapping->nrpages -= nr_none;
Song Liu99cb0db2019-09-23 15:38:00 -07001809
1810 if (is_shmem)
1811 shmem_uncharge(mapping->host, nr_none);
Hugh Dickinsaaa52e32018-11-30 14:10:29 -08001812
Matthew Wilcox77da9382017-12-04 14:56:08 -05001813 xas_set(&xas, start);
1814 xas_for_each(&xas, page, end - 1) {
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001815 page = list_first_entry_or_null(&pagelist,
1816 struct page, lru);
Matthew Wilcox77da9382017-12-04 14:56:08 -05001817 if (!page || xas.xa_index < page->index) {
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001818 if (!nr_none)
1819 break;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001820 nr_none--;
Johannes Weiner59749e62016-12-12 16:43:35 -08001821 /* Put holes back where they were */
Matthew Wilcox77da9382017-12-04 14:56:08 -05001822 xas_store(&xas, NULL);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001823 continue;
1824 }
1825
Matthew Wilcox77da9382017-12-04 14:56:08 -05001826 VM_BUG_ON_PAGE(page->index != xas.xa_index, page);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001827
1828 /* Unfreeze the page. */
1829 list_del(&page->lru);
1830 page_ref_unfreeze(page, 2);
Matthew Wilcox77da9382017-12-04 14:56:08 -05001831 xas_store(&xas, page);
1832 xas_pause(&xas);
1833 xas_unlock_irq(&xas);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001834 unlock_page(page);
Hugh Dickins042a3082018-11-30 14:10:39 -08001835 putback_lru_page(page);
Matthew Wilcox77da9382017-12-04 14:56:08 -05001836 xas_lock_irq(&xas);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001837 }
1838 VM_BUG_ON(nr_none);
Matthew Wilcox77da9382017-12-04 14:56:08 -05001839 xas_unlock_irq(&xas);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001840
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001841 mem_cgroup_cancel_charge(new_page, memcg, true);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001842 new_page->mapping = NULL;
1843 }
Hugh Dickins042a3082018-11-30 14:10:39 -08001844
1845 unlock_page(new_page);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001846out:
1847 VM_BUG_ON(!list_empty(&pagelist));
1848 /* TODO: tracepoints */
1849}
1850
Song Liu579c5712019-09-23 15:37:57 -07001851static void khugepaged_scan_file(struct mm_struct *mm,
1852 struct file *file, pgoff_t start, struct page **hpage)
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001853{
1854 struct page *page = NULL;
Song Liu579c5712019-09-23 15:37:57 -07001855 struct address_space *mapping = file->f_mapping;
Matthew Wilcox85b392d2017-12-04 15:06:23 -05001856 XA_STATE(xas, &mapping->i_pages, start);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001857 int present, swap;
1858 int node = NUMA_NO_NODE;
1859 int result = SCAN_SUCCEED;
1860
1861 present = 0;
1862 swap = 0;
1863 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1864 rcu_read_lock();
Matthew Wilcox85b392d2017-12-04 15:06:23 -05001865 xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) {
1866 if (xas_retry(&xas, page))
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001867 continue;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001868
Matthew Wilcox85b392d2017-12-04 15:06:23 -05001869 if (xa_is_value(page)) {
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001870 if (++swap > khugepaged_max_ptes_swap) {
1871 result = SCAN_EXCEED_SWAP_PTE;
1872 break;
1873 }
1874 continue;
1875 }
1876
1877 if (PageTransCompound(page)) {
1878 result = SCAN_PAGE_COMPOUND;
1879 break;
1880 }
1881
1882 node = page_to_nid(page);
1883 if (khugepaged_scan_abort(node)) {
1884 result = SCAN_SCAN_ABORT;
1885 break;
1886 }
1887 khugepaged_node_load[node]++;
1888
1889 if (!PageLRU(page)) {
1890 result = SCAN_PAGE_LRU;
1891 break;
1892 }
1893
Song Liu99cb0db2019-09-23 15:38:00 -07001894 if (page_count(page) !=
1895 1 + page_mapcount(page) + page_has_private(page)) {
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001896 result = SCAN_PAGE_COUNT;
1897 break;
1898 }
1899
1900 /*
1901 * We probably should check if the page is referenced here, but
1902 * nobody would transfer pte_young() to PageReferenced() for us.
1903 * And rmap walk here is just too costly...
1904 */
1905
1906 present++;
1907
1908 if (need_resched()) {
Matthew Wilcox85b392d2017-12-04 15:06:23 -05001909 xas_pause(&xas);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001910 cond_resched_rcu();
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001911 }
1912 }
1913 rcu_read_unlock();
1914
1915 if (result == SCAN_SUCCEED) {
1916 if (present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
1917 result = SCAN_EXCEED_NONE_PTE;
1918 } else {
1919 node = khugepaged_find_target_node();
Song Liu579c5712019-09-23 15:37:57 -07001920 collapse_file(mm, file, start, hpage, node);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001921 }
1922 }
1923
1924 /* TODO: tracepoints */
1925}
1926#else
Song Liu579c5712019-09-23 15:37:57 -07001927static void khugepaged_scan_file(struct mm_struct *mm,
1928 struct file *file, pgoff_t start, struct page **hpage)
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001929{
1930 BUILD_BUG();
1931}
Song Liu27e1f822019-09-23 15:38:30 -07001932
1933static int khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
1934{
1935 return 0;
1936}
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001937#endif
1938
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001939static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
1940 struct page **hpage)
1941 __releases(&khugepaged_mm_lock)
1942 __acquires(&khugepaged_mm_lock)
1943{
1944 struct mm_slot *mm_slot;
1945 struct mm_struct *mm;
1946 struct vm_area_struct *vma;
1947 int progress = 0;
1948
1949 VM_BUG_ON(!pages);
Lance Roy35f3aa32018-10-04 23:45:47 -07001950 lockdep_assert_held(&khugepaged_mm_lock);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001951
1952 if (khugepaged_scan.mm_slot)
1953 mm_slot = khugepaged_scan.mm_slot;
1954 else {
1955 mm_slot = list_entry(khugepaged_scan.mm_head.next,
1956 struct mm_slot, mm_node);
1957 khugepaged_scan.address = 0;
1958 khugepaged_scan.mm_slot = mm_slot;
1959 }
1960 spin_unlock(&khugepaged_mm_lock);
Song Liu27e1f822019-09-23 15:38:30 -07001961 khugepaged_collapse_pte_mapped_thps(mm_slot);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001962
1963 mm = mm_slot->mm;
Yang Shi3b454ad2018-01-31 16:18:28 -08001964 /*
1965 * Don't wait for semaphore (to avoid long wait times). Just move to
1966 * the next mm on the list.
1967 */
1968 vma = NULL;
1969 if (unlikely(!down_read_trylock(&mm->mmap_sem)))
1970 goto breakouterloop_mmap_sem;
1971 if (likely(!khugepaged_test_exit(mm)))
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001972 vma = find_vma(mm, khugepaged_scan.address);
1973
1974 progress++;
1975 for (; vma; vma = vma->vm_next) {
1976 unsigned long hstart, hend;
1977
1978 cond_resched();
1979 if (unlikely(khugepaged_test_exit(mm))) {
1980 progress++;
1981 break;
1982 }
Song Liu50f8b922018-08-17 15:47:00 -07001983 if (!hugepage_vma_check(vma, vma->vm_flags)) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001984skip:
1985 progress++;
1986 continue;
1987 }
1988 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
1989 hend = vma->vm_end & HPAGE_PMD_MASK;
1990 if (hstart >= hend)
1991 goto skip;
1992 if (khugepaged_scan.address > hend)
1993 goto skip;
1994 if (khugepaged_scan.address < hstart)
1995 khugepaged_scan.address = hstart;
1996 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
Matthew Wilcox (Oracle)396bcc52020-04-06 20:04:35 -07001997 if (shmem_file(vma->vm_file) && !shmem_huge_enabled(vma))
1998 goto skip;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001999
2000 while (khugepaged_scan.address < hend) {
2001 int ret;
2002 cond_resched();
2003 if (unlikely(khugepaged_test_exit(mm)))
2004 goto breakouterloop;
2005
2006 VM_BUG_ON(khugepaged_scan.address < hstart ||
2007 khugepaged_scan.address + HPAGE_PMD_SIZE >
2008 hend);
Song Liu99cb0db2019-09-23 15:38:00 -07002009 if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
Matthew Wilcox (Oracle)396bcc52020-04-06 20:04:35 -07002010 struct file *file = get_file(vma->vm_file);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002011 pgoff_t pgoff = linear_page_index(vma,
2012 khugepaged_scan.address);
Song Liu99cb0db2019-09-23 15:38:00 -07002013
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002014 up_read(&mm->mmap_sem);
2015 ret = 1;
Song Liu579c5712019-09-23 15:37:57 -07002016 khugepaged_scan_file(mm, file, pgoff, hpage);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002017 fput(file);
2018 } else {
2019 ret = khugepaged_scan_pmd(mm, vma,
2020 khugepaged_scan.address,
2021 hpage);
2022 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002023 /* move to next address */
2024 khugepaged_scan.address += HPAGE_PMD_SIZE;
2025 progress += HPAGE_PMD_NR;
2026 if (ret)
2027 /* we released mmap_sem so break loop */
2028 goto breakouterloop_mmap_sem;
2029 if (progress >= pages)
2030 goto breakouterloop;
2031 }
2032 }
2033breakouterloop:
2034 up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */
2035breakouterloop_mmap_sem:
2036
2037 spin_lock(&khugepaged_mm_lock);
2038 VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
2039 /*
2040 * Release the current mm_slot if this mm is about to die, or
2041 * if we scanned all vmas of this mm.
2042 */
2043 if (khugepaged_test_exit(mm) || !vma) {
2044 /*
2045 * Make sure that if mm_users is reaching zero while
2046 * khugepaged runs here, khugepaged_exit will find
2047 * mm_slot not pointing to the exiting mm.
2048 */
2049 if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
2050 khugepaged_scan.mm_slot = list_entry(
2051 mm_slot->mm_node.next,
2052 struct mm_slot, mm_node);
2053 khugepaged_scan.address = 0;
2054 } else {
2055 khugepaged_scan.mm_slot = NULL;
2056 khugepaged_full_scans++;
2057 }
2058
2059 collect_mm_slot(mm_slot);
2060 }
2061
2062 return progress;
2063}
2064
2065static int khugepaged_has_work(void)
2066{
2067 return !list_empty(&khugepaged_scan.mm_head) &&
2068 khugepaged_enabled();
2069}
2070
2071static int khugepaged_wait_event(void)
2072{
2073 return !list_empty(&khugepaged_scan.mm_head) ||
2074 kthread_should_stop();
2075}
2076
2077static void khugepaged_do_scan(void)
2078{
2079 struct page *hpage = NULL;
2080 unsigned int progress = 0, pass_through_head = 0;
2081 unsigned int pages = khugepaged_pages_to_scan;
2082 bool wait = true;
2083
2084 barrier(); /* write khugepaged_pages_to_scan to local stack */
2085
2086 while (progress < pages) {
2087 if (!khugepaged_prealloc_page(&hpage, &wait))
2088 break;
2089
2090 cond_resched();
2091
2092 if (unlikely(kthread_should_stop() || try_to_freeze()))
2093 break;
2094
2095 spin_lock(&khugepaged_mm_lock);
2096 if (!khugepaged_scan.mm_slot)
2097 pass_through_head++;
2098 if (khugepaged_has_work() &&
2099 pass_through_head < 2)
2100 progress += khugepaged_scan_mm_slot(pages - progress,
2101 &hpage);
2102 else
2103 progress = pages;
2104 spin_unlock(&khugepaged_mm_lock);
2105 }
2106
2107 if (!IS_ERR_OR_NULL(hpage))
2108 put_page(hpage);
2109}
2110
2111static bool khugepaged_should_wakeup(void)
2112{
2113 return kthread_should_stop() ||
2114 time_after_eq(jiffies, khugepaged_sleep_expire);
2115}
2116
2117static void khugepaged_wait_work(void)
2118{
2119 if (khugepaged_has_work()) {
2120 const unsigned long scan_sleep_jiffies =
2121 msecs_to_jiffies(khugepaged_scan_sleep_millisecs);
2122
2123 if (!scan_sleep_jiffies)
2124 return;
2125
2126 khugepaged_sleep_expire = jiffies + scan_sleep_jiffies;
2127 wait_event_freezable_timeout(khugepaged_wait,
2128 khugepaged_should_wakeup(),
2129 scan_sleep_jiffies);
2130 return;
2131 }
2132
2133 if (khugepaged_enabled())
2134 wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
2135}
2136
2137static int khugepaged(void *none)
2138{
2139 struct mm_slot *mm_slot;
2140
2141 set_freezable();
2142 set_user_nice(current, MAX_NICE);
2143
2144 while (!kthread_should_stop()) {
2145 khugepaged_do_scan();
2146 khugepaged_wait_work();
2147 }
2148
2149 spin_lock(&khugepaged_mm_lock);
2150 mm_slot = khugepaged_scan.mm_slot;
2151 khugepaged_scan.mm_slot = NULL;
2152 if (mm_slot)
2153 collect_mm_slot(mm_slot);
2154 spin_unlock(&khugepaged_mm_lock);
2155 return 0;
2156}
2157
2158static void set_recommended_min_free_kbytes(void)
2159{
2160 struct zone *zone;
2161 int nr_zones = 0;
2162 unsigned long recommended_min;
2163
Joonsoo Kimb7d349c2018-04-10 16:30:27 -07002164 for_each_populated_zone(zone) {
2165 /*
2166 * We don't need to worry about fragmentation of
2167 * ZONE_MOVABLE since it only has movable pages.
2168 */
2169 if (zone_idx(zone) > gfp_zone(GFP_USER))
2170 continue;
2171
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002172 nr_zones++;
Joonsoo Kimb7d349c2018-04-10 16:30:27 -07002173 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002174
2175 /* Ensure 2 pageblocks are free to assist fragmentation avoidance */
2176 recommended_min = pageblock_nr_pages * nr_zones * 2;
2177
2178 /*
2179 * Make sure that on average at least two pageblocks are almost free
2180 * of another type, one for a migratetype to fall back to and a
2181 * second to avoid subsequent fallbacks of other types There are 3
2182 * MIGRATE_TYPES we care about.
2183 */
2184 recommended_min += pageblock_nr_pages * nr_zones *
2185 MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
2186
2187 /* don't ever allow to reserve more than 5% of the lowmem */
2188 recommended_min = min(recommended_min,
2189 (unsigned long) nr_free_buffer_pages() / 20);
2190 recommended_min <<= (PAGE_SHIFT-10);
2191
2192 if (recommended_min > min_free_kbytes) {
2193 if (user_min_free_kbytes >= 0)
2194 pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
2195 min_free_kbytes, recommended_min);
2196
2197 min_free_kbytes = recommended_min;
2198 }
2199 setup_per_zone_wmarks();
2200}
2201
2202int start_stop_khugepaged(void)
2203{
2204 static struct task_struct *khugepaged_thread __read_mostly;
2205 static DEFINE_MUTEX(khugepaged_mutex);
2206 int err = 0;
2207
2208 mutex_lock(&khugepaged_mutex);
2209 if (khugepaged_enabled()) {
2210 if (!khugepaged_thread)
2211 khugepaged_thread = kthread_run(khugepaged, NULL,
2212 "khugepaged");
2213 if (IS_ERR(khugepaged_thread)) {
2214 pr_err("khugepaged: kthread_run(khugepaged) failed\n");
2215 err = PTR_ERR(khugepaged_thread);
2216 khugepaged_thread = NULL;
2217 goto fail;
2218 }
2219
2220 if (!list_empty(&khugepaged_scan.mm_head))
2221 wake_up_interruptible(&khugepaged_wait);
2222
2223 set_recommended_min_free_kbytes();
2224 } else if (khugepaged_thread) {
2225 kthread_stop(khugepaged_thread);
2226 khugepaged_thread = NULL;
2227 }
2228fail:
2229 mutex_unlock(&khugepaged_mutex);
2230 return err;
2231}