blob: 7d77178a5248effa84de8164b948e9b254fc4105 [file] [log] [blame]
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2
3#include <linux/mm.h>
4#include <linux/sched.h>
5#include <linux/mmu_notifier.h>
6#include <linux/rmap.h>
7#include <linux/swap.h>
8#include <linux/mm_inline.h>
9#include <linux/kthread.h>
10#include <linux/khugepaged.h>
11#include <linux/freezer.h>
12#include <linux/mman.h>
13#include <linux/hashtable.h>
14#include <linux/userfaultfd_k.h>
15#include <linux/page_idle.h>
16#include <linux/swapops.h>
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -070017#include <linux/shmem_fs.h>
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070018
19#include <asm/tlb.h>
20#include <asm/pgalloc.h>
21#include "internal.h"
22
Jann Horn588be4a2022-11-25 22:37:13 +010023/* gross hack for <=4.19 stable */
24#if defined(CONFIG_S390) || defined(CONFIG_ARM)
25static void tlb_remove_table_smp_sync(void *arg)
26{
27 /* Simply deliver the interrupt */
28}
29
30static void tlb_remove_table_sync_one(void)
31{
32 smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
33}
34#endif
35
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070036enum scan_result {
37 SCAN_FAIL,
38 SCAN_SUCCEED,
39 SCAN_PMD_NULL,
40 SCAN_EXCEED_NONE_PTE,
41 SCAN_PTE_NON_PRESENT,
42 SCAN_PAGE_RO,
Ebru Akagunduz0db501f2016-07-26 15:26:46 -070043 SCAN_LACK_REFERENCED_PAGE,
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070044 SCAN_PAGE_NULL,
45 SCAN_SCAN_ABORT,
46 SCAN_PAGE_COUNT,
47 SCAN_PAGE_LRU,
48 SCAN_PAGE_LOCK,
49 SCAN_PAGE_ANON,
50 SCAN_PAGE_COMPOUND,
51 SCAN_ANY_PROCESS,
52 SCAN_VMA_NULL,
53 SCAN_VMA_CHECK,
54 SCAN_ADDRESS_RANGE,
55 SCAN_SWAP_CACHE_PAGE,
56 SCAN_DEL_PAGE_LRU,
57 SCAN_ALLOC_HUGE_PAGE_FAIL,
58 SCAN_CGROUP_CHARGE_FAIL,
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -070059 SCAN_EXCEED_SWAP_PTE,
60 SCAN_TRUNCATED,
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070061};
62
63#define CREATE_TRACE_POINTS
64#include <trace/events/huge_memory.h>
65
Vijay Balakrishna189394c2020-10-10 23:16:40 -070066static struct task_struct *khugepaged_thread __read_mostly;
67static DEFINE_MUTEX(khugepaged_mutex);
68
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070069/* default scan 8*512 pte (or vmas) every 30 second */
70static unsigned int khugepaged_pages_to_scan __read_mostly;
71static unsigned int khugepaged_pages_collapsed;
72static unsigned int khugepaged_full_scans;
73static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
74/* during fragmentation poll the hugepage allocator once every minute */
75static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
76static unsigned long khugepaged_sleep_expire;
77static DEFINE_SPINLOCK(khugepaged_mm_lock);
78static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
79/*
80 * default collapse hugepages if there is at least one pte mapped like
81 * it would have happened if the vma was large enough during page
82 * fault.
83 */
84static unsigned int khugepaged_max_ptes_none __read_mostly;
85static unsigned int khugepaged_max_ptes_swap __read_mostly;
86
87#define MM_SLOTS_HASH_BITS 10
88static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
89
90static struct kmem_cache *mm_slot_cache __read_mostly;
91
92/**
93 * struct mm_slot - hash lookup from mm to mm_slot
94 * @hash: hash collision list
95 * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
96 * @mm: the mm that this information is valid for
97 */
98struct mm_slot {
99 struct hlist_node hash;
100 struct list_head mm_node;
101 struct mm_struct *mm;
102};
103
104/**
105 * struct khugepaged_scan - cursor for scanning
106 * @mm_head: the head of the mm list to scan
107 * @mm_slot: the current mm_slot we are scanning
108 * @address: the next address inside that to be scanned
109 *
110 * There is only the one khugepaged_scan instance of this cursor structure.
111 */
112struct khugepaged_scan {
113 struct list_head mm_head;
114 struct mm_slot *mm_slot;
115 unsigned long address;
116};
117
118static struct khugepaged_scan khugepaged_scan = {
119 .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
120};
121
Jérémy Lefauree1465d12016-11-30 15:54:02 -0800122#ifdef CONFIG_SYSFS
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700123static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
124 struct kobj_attribute *attr,
125 char *buf)
126{
127 return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs);
128}
129
130static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
131 struct kobj_attribute *attr,
132 const char *buf, size_t count)
133{
134 unsigned long msecs;
135 int err;
136
137 err = kstrtoul(buf, 10, &msecs);
138 if (err || msecs > UINT_MAX)
139 return -EINVAL;
140
141 khugepaged_scan_sleep_millisecs = msecs;
142 khugepaged_sleep_expire = 0;
143 wake_up_interruptible(&khugepaged_wait);
144
145 return count;
146}
147static struct kobj_attribute scan_sleep_millisecs_attr =
148 __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
149 scan_sleep_millisecs_store);
150
151static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
152 struct kobj_attribute *attr,
153 char *buf)
154{
155 return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
156}
157
158static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
159 struct kobj_attribute *attr,
160 const char *buf, size_t count)
161{
162 unsigned long msecs;
163 int err;
164
165 err = kstrtoul(buf, 10, &msecs);
166 if (err || msecs > UINT_MAX)
167 return -EINVAL;
168
169 khugepaged_alloc_sleep_millisecs = msecs;
170 khugepaged_sleep_expire = 0;
171 wake_up_interruptible(&khugepaged_wait);
172
173 return count;
174}
175static struct kobj_attribute alloc_sleep_millisecs_attr =
176 __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
177 alloc_sleep_millisecs_store);
178
179static ssize_t pages_to_scan_show(struct kobject *kobj,
180 struct kobj_attribute *attr,
181 char *buf)
182{
183 return sprintf(buf, "%u\n", khugepaged_pages_to_scan);
184}
185static ssize_t pages_to_scan_store(struct kobject *kobj,
186 struct kobj_attribute *attr,
187 const char *buf, size_t count)
188{
189 int err;
190 unsigned long pages;
191
192 err = kstrtoul(buf, 10, &pages);
193 if (err || !pages || pages > UINT_MAX)
194 return -EINVAL;
195
196 khugepaged_pages_to_scan = pages;
197
198 return count;
199}
200static struct kobj_attribute pages_to_scan_attr =
201 __ATTR(pages_to_scan, 0644, pages_to_scan_show,
202 pages_to_scan_store);
203
204static ssize_t pages_collapsed_show(struct kobject *kobj,
205 struct kobj_attribute *attr,
206 char *buf)
207{
208 return sprintf(buf, "%u\n", khugepaged_pages_collapsed);
209}
210static struct kobj_attribute pages_collapsed_attr =
211 __ATTR_RO(pages_collapsed);
212
213static ssize_t full_scans_show(struct kobject *kobj,
214 struct kobj_attribute *attr,
215 char *buf)
216{
217 return sprintf(buf, "%u\n", khugepaged_full_scans);
218}
219static struct kobj_attribute full_scans_attr =
220 __ATTR_RO(full_scans);
221
222static ssize_t khugepaged_defrag_show(struct kobject *kobj,
223 struct kobj_attribute *attr, char *buf)
224{
225 return single_hugepage_flag_show(kobj, attr, buf,
226 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
227}
228static ssize_t khugepaged_defrag_store(struct kobject *kobj,
229 struct kobj_attribute *attr,
230 const char *buf, size_t count)
231{
232 return single_hugepage_flag_store(kobj, attr, buf, count,
233 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
234}
235static struct kobj_attribute khugepaged_defrag_attr =
236 __ATTR(defrag, 0644, khugepaged_defrag_show,
237 khugepaged_defrag_store);
238
239/*
240 * max_ptes_none controls if khugepaged should collapse hugepages over
241 * any unmapped ptes in turn potentially increasing the memory
242 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
243 * reduce the available free memory in the system as it
244 * runs. Increasing max_ptes_none will instead potentially reduce the
245 * free memory in the system during the khugepaged scan.
246 */
247static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
248 struct kobj_attribute *attr,
249 char *buf)
250{
251 return sprintf(buf, "%u\n", khugepaged_max_ptes_none);
252}
253static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
254 struct kobj_attribute *attr,
255 const char *buf, size_t count)
256{
257 int err;
258 unsigned long max_ptes_none;
259
260 err = kstrtoul(buf, 10, &max_ptes_none);
261 if (err || max_ptes_none > HPAGE_PMD_NR-1)
262 return -EINVAL;
263
264 khugepaged_max_ptes_none = max_ptes_none;
265
266 return count;
267}
268static struct kobj_attribute khugepaged_max_ptes_none_attr =
269 __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
270 khugepaged_max_ptes_none_store);
271
272static ssize_t khugepaged_max_ptes_swap_show(struct kobject *kobj,
273 struct kobj_attribute *attr,
274 char *buf)
275{
276 return sprintf(buf, "%u\n", khugepaged_max_ptes_swap);
277}
278
279static ssize_t khugepaged_max_ptes_swap_store(struct kobject *kobj,
280 struct kobj_attribute *attr,
281 const char *buf, size_t count)
282{
283 int err;
284 unsigned long max_ptes_swap;
285
286 err = kstrtoul(buf, 10, &max_ptes_swap);
287 if (err || max_ptes_swap > HPAGE_PMD_NR-1)
288 return -EINVAL;
289
290 khugepaged_max_ptes_swap = max_ptes_swap;
291
292 return count;
293}
294
295static struct kobj_attribute khugepaged_max_ptes_swap_attr =
296 __ATTR(max_ptes_swap, 0644, khugepaged_max_ptes_swap_show,
297 khugepaged_max_ptes_swap_store);
298
299static struct attribute *khugepaged_attr[] = {
300 &khugepaged_defrag_attr.attr,
301 &khugepaged_max_ptes_none_attr.attr,
302 &pages_to_scan_attr.attr,
303 &pages_collapsed_attr.attr,
304 &full_scans_attr.attr,
305 &scan_sleep_millisecs_attr.attr,
306 &alloc_sleep_millisecs_attr.attr,
307 &khugepaged_max_ptes_swap_attr.attr,
308 NULL,
309};
310
311struct attribute_group khugepaged_attr_group = {
312 .attrs = khugepaged_attr,
313 .name = "khugepaged",
314};
Jérémy Lefauree1465d12016-11-30 15:54:02 -0800315#endif /* CONFIG_SYSFS */
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700316
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -0700317#define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700318
319int hugepage_madvise(struct vm_area_struct *vma,
320 unsigned long *vm_flags, int advice)
321{
322 switch (advice) {
323 case MADV_HUGEPAGE:
324#ifdef CONFIG_S390
325 /*
326 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
327 * can't handle this properly after s390_enable_sie, so we simply
328 * ignore the madvise to prevent qemu from causing a SIGSEGV.
329 */
330 if (mm_has_pgste(vma->vm_mm))
331 return 0;
332#endif
333 *vm_flags &= ~VM_NOHUGEPAGE;
334 *vm_flags |= VM_HUGEPAGE;
335 /*
336 * If the vma become good for khugepaged to scan,
337 * register it here without waiting a page fault that
338 * may not happen any time soon.
339 */
340 if (!(*vm_flags & VM_NO_KHUGEPAGED) &&
341 khugepaged_enter_vma_merge(vma, *vm_flags))
342 return -ENOMEM;
343 break;
344 case MADV_NOHUGEPAGE:
345 *vm_flags &= ~VM_HUGEPAGE;
346 *vm_flags |= VM_NOHUGEPAGE;
347 /*
348 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
349 * this vma even if we leave the mm registered in khugepaged if
350 * it got registered before VM_NOHUGEPAGE was set.
351 */
352 break;
353 }
354
355 return 0;
356}
357
358int __init khugepaged_init(void)
359{
360 mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
361 sizeof(struct mm_slot),
362 __alignof__(struct mm_slot), 0, NULL);
363 if (!mm_slot_cache)
364 return -ENOMEM;
365
366 khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
367 khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
368 khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8;
369
370 return 0;
371}
372
373void __init khugepaged_destroy(void)
374{
375 kmem_cache_destroy(mm_slot_cache);
376}
377
378static inline struct mm_slot *alloc_mm_slot(void)
379{
380 if (!mm_slot_cache) /* initialization failed */
381 return NULL;
382 return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
383}
384
385static inline void free_mm_slot(struct mm_slot *mm_slot)
386{
387 kmem_cache_free(mm_slot_cache, mm_slot);
388}
389
390static struct mm_slot *get_mm_slot(struct mm_struct *mm)
391{
392 struct mm_slot *mm_slot;
393
394 hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
395 if (mm == mm_slot->mm)
396 return mm_slot;
397
398 return NULL;
399}
400
401static void insert_to_mm_slots_hash(struct mm_struct *mm,
402 struct mm_slot *mm_slot)
403{
404 mm_slot->mm = mm;
405 hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
406}
407
408static inline int khugepaged_test_exit(struct mm_struct *mm)
409{
Hugh Dickinsdb63d182020-08-06 23:26:25 -0700410 return atomic_read(&mm->mm_users) == 0 || !mmget_still_valid(mm);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700411}
412
413int __khugepaged_enter(struct mm_struct *mm)
414{
415 struct mm_slot *mm_slot;
416 int wakeup;
417
418 mm_slot = alloc_mm_slot();
419 if (!mm_slot)
420 return -ENOMEM;
421
422 /* __khugepaged_exit() must not run from under us */
Hugh Dickinscdb3f8b2020-08-20 17:42:02 -0700423 VM_BUG_ON_MM(atomic_read(&mm->mm_users) == 0, mm);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700424 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
425 free_mm_slot(mm_slot);
426 return 0;
427 }
428
429 spin_lock(&khugepaged_mm_lock);
430 insert_to_mm_slots_hash(mm, mm_slot);
431 /*
432 * Insert just behind the scanning cursor, to let the area settle
433 * down a little.
434 */
435 wakeup = list_empty(&khugepaged_scan.mm_head);
436 list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
437 spin_unlock(&khugepaged_mm_lock);
438
439 atomic_inc(&mm->mm_count);
440 if (wakeup)
441 wake_up_interruptible(&khugepaged_wait);
442
443 return 0;
444}
445
446int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
447 unsigned long vm_flags)
448{
449 unsigned long hstart, hend;
450 if (!vma->anon_vma)
451 /*
452 * Not yet faulted in so we will register later in the
453 * page fault if needed.
454 */
455 return 0;
456 if (vma->vm_ops || (vm_flags & VM_NO_KHUGEPAGED))
457 /* khugepaged not yet working on file or special mappings */
458 return 0;
459 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
460 hend = vma->vm_end & HPAGE_PMD_MASK;
461 if (hstart < hend)
462 return khugepaged_enter(vma, vm_flags);
463 return 0;
464}
465
466void __khugepaged_exit(struct mm_struct *mm)
467{
468 struct mm_slot *mm_slot;
469 int free = 0;
470
471 spin_lock(&khugepaged_mm_lock);
472 mm_slot = get_mm_slot(mm);
473 if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
474 hash_del(&mm_slot->hash);
475 list_del(&mm_slot->mm_node);
476 free = 1;
477 }
478 spin_unlock(&khugepaged_mm_lock);
479
480 if (free) {
481 clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
482 free_mm_slot(mm_slot);
483 mmdrop(mm);
484 } else if (mm_slot) {
485 /*
486 * This is required to serialize against
487 * khugepaged_test_exit() (which is guaranteed to run
488 * under mmap sem read mode). Stop here (after we
489 * return all pagetables will be destroyed) until
490 * khugepaged has finished working on the pagetables
491 * under the mmap_sem.
492 */
493 down_write(&mm->mmap_sem);
494 up_write(&mm->mmap_sem);
495 }
496}
497
498static void release_pte_page(struct page *page)
499{
500 /* 0 stands for page_is_file_cache(page) == false */
Mel Gorman599d0c92016-07-28 15:45:31 -0700501 dec_node_page_state(page, NR_ISOLATED_ANON + 0);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700502 unlock_page(page);
503 putback_lru_page(page);
504}
505
506static void release_pte_pages(pte_t *pte, pte_t *_pte)
507{
508 while (--_pte >= pte) {
509 pte_t pteval = *_pte;
510 if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval)))
511 release_pte_page(pte_page(pteval));
512 }
513}
514
515static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
516 unsigned long address,
517 pte_t *pte)
518{
519 struct page *page = NULL;
520 pte_t *_pte;
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700521 int none_or_zero = 0, result = 0, referenced = 0;
522 bool writable = false;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700523
524 for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
525 _pte++, address += PAGE_SIZE) {
526 pte_t pteval = *_pte;
527 if (pte_none(pteval) || (pte_present(pteval) &&
528 is_zero_pfn(pte_pfn(pteval)))) {
529 if (!userfaultfd_armed(vma) &&
530 ++none_or_zero <= khugepaged_max_ptes_none) {
531 continue;
532 } else {
533 result = SCAN_EXCEED_NONE_PTE;
534 goto out;
535 }
536 }
537 if (!pte_present(pteval)) {
538 result = SCAN_PTE_NON_PRESENT;
539 goto out;
540 }
541 page = vm_normal_page(vma, address, pteval);
542 if (unlikely(!page)) {
543 result = SCAN_PAGE_NULL;
544 goto out;
545 }
546
Kirill A. Shutemov24284d52018-03-22 16:17:28 -0700547 /* TODO: teach khugepaged to collapse THP mapped with pte */
548 if (PageCompound(page)) {
549 result = SCAN_PAGE_COMPOUND;
550 goto out;
551 }
552
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700553 VM_BUG_ON_PAGE(!PageAnon(page), page);
554 VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
555
556 /*
557 * We can do it before isolate_lru_page because the
558 * page can't be freed from under us. NOTE: PG_lock
559 * is needed to serialize against split_huge_page
560 * when invoked from the VM.
561 */
562 if (!trylock_page(page)) {
563 result = SCAN_PAGE_LOCK;
564 goto out;
565 }
566
567 /*
568 * cannot use mapcount: can't collapse if there's a gup pin.
569 * The page must only be referenced by the scanned process
570 * and page swap cache.
571 */
572 if (page_count(page) != 1 + !!PageSwapCache(page)) {
573 unlock_page(page);
574 result = SCAN_PAGE_COUNT;
575 goto out;
576 }
577 if (pte_write(pteval)) {
578 writable = true;
579 } else {
580 if (PageSwapCache(page) &&
581 !reuse_swap_page(page, NULL)) {
582 unlock_page(page);
583 result = SCAN_SWAP_CACHE_PAGE;
584 goto out;
585 }
586 /*
587 * Page is not in the swap cache. It can be collapsed
588 * into a THP.
589 */
590 }
591
592 /*
593 * Isolate the page to avoid collapsing an hugepage
594 * currently in use by the VM.
595 */
596 if (isolate_lru_page(page)) {
597 unlock_page(page);
598 result = SCAN_DEL_PAGE_LRU;
599 goto out;
600 }
601 /* 0 stands for page_is_file_cache(page) == false */
Mel Gorman599d0c92016-07-28 15:45:31 -0700602 inc_node_page_state(page, NR_ISOLATED_ANON + 0);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700603 VM_BUG_ON_PAGE(!PageLocked(page), page);
604 VM_BUG_ON_PAGE(PageLRU(page), page);
605
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700606 /* There should be enough young pte to collapse the page */
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700607 if (pte_young(pteval) ||
608 page_is_young(page) || PageReferenced(page) ||
609 mmu_notifier_test_young(vma->vm_mm, address))
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700610 referenced++;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700611 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700612
Miaohe Lind5e95af2021-05-04 18:33:46 -0700613 if (unlikely(!writable)) {
614 result = SCAN_PAGE_RO;
615 } else if (unlikely(!referenced)) {
616 result = SCAN_LACK_REFERENCED_PAGE;
617 } else {
618 result = SCAN_SUCCEED;
619 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
620 referenced, writable, result);
621 return 1;
622 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700623out:
624 release_pte_pages(pte, _pte);
625 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
626 referenced, writable, result);
627 return 0;
628}
629
630static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
631 struct vm_area_struct *vma,
632 unsigned long address,
633 spinlock_t *ptl)
634{
635 pte_t *_pte;
636 for (_pte = pte; _pte < pte+HPAGE_PMD_NR; _pte++) {
637 pte_t pteval = *_pte;
638 struct page *src_page;
639
640 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
641 clear_user_highpage(page, address);
642 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
643 if (is_zero_pfn(pte_pfn(pteval))) {
644 /*
645 * ptl mostly unnecessary.
646 */
647 spin_lock(ptl);
648 /*
649 * paravirt calls inside pte_clear here are
650 * superfluous.
651 */
652 pte_clear(vma->vm_mm, address, _pte);
653 spin_unlock(ptl);
654 }
655 } else {
656 src_page = pte_page(pteval);
657 copy_user_highpage(page, src_page, address, vma);
658 VM_BUG_ON_PAGE(page_mapcount(src_page) != 1, src_page);
659 release_pte_page(src_page);
660 /*
661 * ptl mostly unnecessary, but preempt has to
662 * be disabled to update the per-cpu stats
663 * inside page_remove_rmap().
664 */
665 spin_lock(ptl);
666 /*
667 * paravirt calls inside pte_clear here are
668 * superfluous.
669 */
670 pte_clear(vma->vm_mm, address, _pte);
671 page_remove_rmap(src_page, false);
672 spin_unlock(ptl);
673 free_page_and_swap_cache(src_page);
674 }
675
676 address += PAGE_SIZE;
677 page++;
678 }
679}
680
681static void khugepaged_alloc_sleep(void)
682{
683 DEFINE_WAIT(wait);
684
685 add_wait_queue(&khugepaged_wait, &wait);
686 freezable_schedule_timeout_interruptible(
687 msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
688 remove_wait_queue(&khugepaged_wait, &wait);
689}
690
691static int khugepaged_node_load[MAX_NUMNODES];
692
693static bool khugepaged_scan_abort(int nid)
694{
695 int i;
696
697 /*
Mel Gormana5f5f912016-07-28 15:46:32 -0700698 * If node_reclaim_mode is disabled, then no extra effort is made to
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700699 * allocate memory locally.
700 */
Mel Gormana5f5f912016-07-28 15:46:32 -0700701 if (!node_reclaim_mode)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700702 return false;
703
704 /* If there is a count for this node already, it must be acceptable */
705 if (khugepaged_node_load[nid])
706 return false;
707
708 for (i = 0; i < MAX_NUMNODES; i++) {
709 if (!khugepaged_node_load[i])
710 continue;
711 if (node_distance(nid, i) > RECLAIM_DISTANCE)
712 return true;
713 }
714 return false;
715}
716
717/* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
718static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
719{
Vlastimil Babka25160352016-07-28 15:49:25 -0700720 return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700721}
722
723#ifdef CONFIG_NUMA
724static int khugepaged_find_target_node(void)
725{
726 static int last_khugepaged_target_node = NUMA_NO_NODE;
727 int nid, target_node = 0, max_value = 0;
728
729 /* find first node with max normal pages hit */
730 for (nid = 0; nid < MAX_NUMNODES; nid++)
731 if (khugepaged_node_load[nid] > max_value) {
732 max_value = khugepaged_node_load[nid];
733 target_node = nid;
734 }
735
736 /* do some balance if several nodes have the same hit record */
737 if (target_node <= last_khugepaged_target_node)
738 for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES;
739 nid++)
740 if (max_value == khugepaged_node_load[nid]) {
741 target_node = nid;
742 break;
743 }
744
745 last_khugepaged_target_node = target_node;
746 return target_node;
747}
748
749static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
750{
751 if (IS_ERR(*hpage)) {
752 if (!*wait)
753 return false;
754
755 *wait = false;
756 *hpage = NULL;
757 khugepaged_alloc_sleep();
758 } else if (*hpage) {
759 put_page(*hpage);
760 *hpage = NULL;
761 }
762
763 return true;
764}
765
766static struct page *
Kirill A. Shutemov988ddb72016-07-26 15:26:26 -0700767khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700768{
769 VM_BUG_ON_PAGE(*hpage, *hpage);
770
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700771 *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
772 if (unlikely(!*hpage)) {
773 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
774 *hpage = ERR_PTR(-ENOMEM);
775 return NULL;
776 }
777
778 prep_transhuge_page(*hpage);
779 count_vm_event(THP_COLLAPSE_ALLOC);
780 return *hpage;
781}
782#else
783static int khugepaged_find_target_node(void)
784{
785 return 0;
786}
787
788static inline struct page *alloc_khugepaged_hugepage(void)
789{
790 struct page *page;
791
792 page = alloc_pages(alloc_hugepage_khugepaged_gfpmask(),
793 HPAGE_PMD_ORDER);
794 if (page)
795 prep_transhuge_page(page);
796 return page;
797}
798
799static struct page *khugepaged_alloc_hugepage(bool *wait)
800{
801 struct page *hpage;
802
803 do {
804 hpage = alloc_khugepaged_hugepage();
805 if (!hpage) {
806 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
807 if (!*wait)
808 return NULL;
809
810 *wait = false;
811 khugepaged_alloc_sleep();
812 } else
813 count_vm_event(THP_COLLAPSE_ALLOC);
814 } while (unlikely(!hpage) && likely(khugepaged_enabled()));
815
816 return hpage;
817}
818
819static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
820{
Hugh Dickins5c5aaf92020-10-09 20:07:59 -0700821 /*
822 * If the hpage allocated earlier was briefly exposed in page cache
823 * before collapse_file() failed, it is possible that racing lookups
824 * have not yet completed, and would then be unpleasantly surprised by
825 * finding the hpage reused for the same mapping at a different offset.
826 * Just release the previous allocation if there is any danger of that.
827 */
828 if (*hpage && page_count(*hpage) > 1) {
829 put_page(*hpage);
830 *hpage = NULL;
831 }
832
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700833 if (!*hpage)
834 *hpage = khugepaged_alloc_hugepage(wait);
835
836 if (unlikely(!*hpage))
837 return false;
838
839 return true;
840}
841
842static struct page *
Kirill A. Shutemov988ddb72016-07-26 15:26:26 -0700843khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700844{
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700845 VM_BUG_ON(!*hpage);
846
847 return *hpage;
848}
849#endif
850
851static bool hugepage_vma_check(struct vm_area_struct *vma)
852{
853 if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
854 (vma->vm_flags & VM_NOHUGEPAGE))
855 return false;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -0700856 if (shmem_file(vma->vm_file)) {
Kirill A. Shutemove496cf32016-07-26 15:26:35 -0700857 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
858 return false;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -0700859 return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
860 HPAGE_PMD_NR);
861 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700862 if (!vma->anon_vma || vma->vm_ops)
863 return false;
864 if (is_vma_temporary_stack(vma))
865 return false;
866 return !(vma->vm_flags & VM_NO_KHUGEPAGED);
867}
868
869/*
870 * If mmap_sem temporarily dropped, revalidate vma
871 * before taking mmap_sem.
872 * Return 0 if succeeds, otherwise return none-zero
873 * value (scan code).
874 */
875
Kirill A. Shutemovc131f752016-09-19 14:44:01 -0700876static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
877 struct vm_area_struct **vmap)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700878{
879 struct vm_area_struct *vma;
880 unsigned long hstart, hend;
881
882 if (unlikely(khugepaged_test_exit(mm)))
883 return SCAN_ANY_PROCESS;
884
Kirill A. Shutemovc131f752016-09-19 14:44:01 -0700885 *vmap = vma = find_vma(mm, address);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700886 if (!vma)
887 return SCAN_VMA_NULL;
888
889 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
890 hend = vma->vm_end & HPAGE_PMD_MASK;
891 if (address < hstart || address + HPAGE_PMD_SIZE > hend)
892 return SCAN_ADDRESS_RANGE;
893 if (!hugepage_vma_check(vma))
894 return SCAN_VMA_CHECK;
895 return 0;
896}
897
898/*
899 * Bring missing pages in from swap, to complete THP collapse.
900 * Only done if khugepaged_scan_pmd believes it is worthwhile.
901 *
902 * Called and returns without pte mapped or spinlocks held,
903 * but with mmap_sem held to protect against vma changes.
904 */
905
906static bool __collapse_huge_page_swapin(struct mm_struct *mm,
907 struct vm_area_struct *vma,
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700908 unsigned long address, pmd_t *pmd,
909 int referenced)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700910{
911 pte_t pteval;
912 int swapped_in = 0, ret = 0;
913 struct fault_env fe = {
914 .vma = vma,
915 .address = address,
916 .flags = FAULT_FLAG_ALLOW_RETRY,
917 .pmd = pmd,
Laurent Dufourff04da72018-04-17 16:33:18 +0200918 .vma_flags = vma->vm_flags,
919 .vma_page_prot = vma->vm_page_prot,
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700920 };
921
Ebru Akagunduz982785c2016-09-19 14:44:04 -0700922 /* we only decide to swapin, if there is enough young ptes */
923 if (referenced < HPAGE_PMD_NR/2) {
924 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
925 return false;
926 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700927 fe.pte = pte_offset_map(pmd, address);
928 for (; fe.address < address + HPAGE_PMD_NR*PAGE_SIZE;
929 fe.pte++, fe.address += PAGE_SIZE) {
930 pteval = *fe.pte;
931 if (!is_swap_pte(pteval))
932 continue;
933 swapped_in++;
934 ret = do_swap_page(&fe, pteval);
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700935
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700936 /* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */
937 if (ret & VM_FAULT_RETRY) {
938 down_read(&mm->mmap_sem);
Kirill A. Shutemovc131f752016-09-19 14:44:01 -0700939 if (hugepage_vma_revalidate(mm, address, &fe.vma)) {
Ebru Akagunduz47f863e2016-07-26 15:26:43 -0700940 /* vma is no longer available, don't continue to swapin */
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700941 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700942 return false;
Ebru Akagunduz47f863e2016-07-26 15:26:43 -0700943 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700944 /* check if the pmd is still valid */
945 if (mm_find_pmd(mm, address) != pmd)
946 return false;
947 }
948 if (ret & VM_FAULT_ERROR) {
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700949 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700950 return false;
951 }
952 /* pte is unmapped now, we need to map it */
953 fe.pte = pte_offset_map(pmd, fe.address);
954 }
955 fe.pte--;
956 pte_unmap(fe.pte);
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700957 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700958 return true;
959}
960
961static void collapse_huge_page(struct mm_struct *mm,
962 unsigned long address,
963 struct page **hpage,
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700964 int node, int referenced)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700965{
966 pmd_t *pmd, _pmd;
967 pte_t *pte;
968 pgtable_t pgtable;
969 struct page *new_page;
970 spinlock_t *pmd_ptl, *pte_ptl;
971 int isolated = 0, result = 0;
972 struct mem_cgroup *memcg;
Kirill A. Shutemovc131f752016-09-19 14:44:01 -0700973 struct vm_area_struct *vma;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700974 unsigned long mmun_start; /* For mmu_notifiers */
975 unsigned long mmun_end; /* For mmu_notifiers */
976 gfp_t gfp;
977
978 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
979
980 /* Only allocate from the target node */
981 gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_OTHER_NODE | __GFP_THISNODE;
982
Kirill A. Shutemov988ddb72016-07-26 15:26:26 -0700983 /*
984 * Before allocating the hugepage, release the mmap_sem read lock.
985 * The allocation can take potentially a long time if it involves
986 * sync compaction, and we do not need to hold the mmap_sem during
987 * that. We will recheck the vma after taking it again in write mode.
988 */
989 up_read(&mm->mmap_sem);
990 new_page = khugepaged_alloc_page(hpage, gfp, node);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700991 if (!new_page) {
992 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
993 goto out_nolock;
994 }
995
David Rientjes6b7ff8e2018-03-22 16:17:45 -0700996 /* Do not oom kill for khugepaged charges */
997 if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp | __GFP_NORETRY,
998 &memcg, true))) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700999 result = SCAN_CGROUP_CHARGE_FAIL;
1000 goto out_nolock;
1001 }
1002
1003 down_read(&mm->mmap_sem);
Kirill A. Shutemovc131f752016-09-19 14:44:01 -07001004 result = hugepage_vma_revalidate(mm, address, &vma);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001005 if (result) {
1006 mem_cgroup_cancel_charge(new_page, memcg, true);
1007 up_read(&mm->mmap_sem);
1008 goto out_nolock;
1009 }
1010
1011 pmd = mm_find_pmd(mm, address);
1012 if (!pmd) {
1013 result = SCAN_PMD_NULL;
1014 mem_cgroup_cancel_charge(new_page, memcg, true);
1015 up_read(&mm->mmap_sem);
1016 goto out_nolock;
1017 }
1018
1019 /*
1020 * __collapse_huge_page_swapin always returns with mmap_sem locked.
Ebru Akagunduz47f863e2016-07-26 15:26:43 -07001021 * If it fails, we release mmap_sem and jump out_nolock.
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001022 * Continuing to collapse causes inconsistency.
1023 */
Ebru Akagunduz0db501f2016-07-26 15:26:46 -07001024 if (!__collapse_huge_page_swapin(mm, vma, address, pmd, referenced)) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001025 mem_cgroup_cancel_charge(new_page, memcg, true);
1026 up_read(&mm->mmap_sem);
1027 goto out_nolock;
1028 }
1029
1030 up_read(&mm->mmap_sem);
1031 /*
1032 * Prevent all access to pagetables with the exception of
1033 * gup_fast later handled by the ptep_clear_flush and the VM
1034 * handled by the anon_vma lock + PG_lock.
1035 */
1036 down_write(&mm->mmap_sem);
Kirill A. Shutemovc131f752016-09-19 14:44:01 -07001037 result = hugepage_vma_revalidate(mm, address, &vma);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001038 if (result)
1039 goto out;
1040 /* check if the pmd is still valid */
1041 if (mm_find_pmd(mm, address) != pmd)
1042 goto out;
1043
Laurent Dufourdd2b4652018-04-17 16:33:15 +02001044 vm_write_begin(vma);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001045 anon_vma_lock_write(vma->anon_vma);
1046
1047 pte = pte_offset_map(pmd, address);
1048 pte_ptl = pte_lockptr(mm, pmd);
1049
1050 mmun_start = address;
1051 mmun_end = address + HPAGE_PMD_SIZE;
1052 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
1053 pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
1054 /*
1055 * After this gup_fast can't run anymore. This also removes
1056 * any huge TLB entry from the CPU so we won't allow
1057 * huge and small TLB entries for the same virtual address
1058 * to avoid the risk of CPU bugs in that area.
1059 */
1060 _pmd = pmdp_collapse_flush(vma, address, pmd);
1061 spin_unlock(pmd_ptl);
1062 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
Jann Horn588be4a2022-11-25 22:37:13 +01001063 tlb_remove_table_sync_one();
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001064
1065 spin_lock(pte_ptl);
1066 isolated = __collapse_huge_page_isolate(vma, address, pte);
1067 spin_unlock(pte_ptl);
1068
1069 if (unlikely(!isolated)) {
1070 pte_unmap(pte);
1071 spin_lock(pmd_ptl);
1072 BUG_ON(!pmd_none(*pmd));
1073 /*
1074 * We can only use set_pmd_at when establishing
1075 * hugepmds and never for establishing regular pmds that
1076 * points to regular pagetables. Use pmd_populate for that
1077 */
1078 pmd_populate(mm, pmd, pmd_pgtable(_pmd));
1079 spin_unlock(pmd_ptl);
1080 anon_vma_unlock_write(vma->anon_vma);
Laurent Dufourdd2b4652018-04-17 16:33:15 +02001081 vm_write_end(vma);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001082 result = SCAN_FAIL;
1083 goto out;
1084 }
1085
1086 /*
1087 * All pages are isolated and locked so anon_vma rmap
1088 * can't run anymore.
1089 */
1090 anon_vma_unlock_write(vma->anon_vma);
1091
1092 __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl);
1093 pte_unmap(pte);
1094 __SetPageUptodate(new_page);
1095 pgtable = pmd_pgtable(_pmd);
1096
1097 _pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
1098 _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
1099
1100 /*
1101 * spin_lock() below is not the equivalent of smp_wmb(), so
1102 * this is needed to avoid the copy_huge_page writes to become
1103 * visible after the set_pmd_at() write.
1104 */
1105 smp_wmb();
1106
1107 spin_lock(pmd_ptl);
1108 BUG_ON(!pmd_none(*pmd));
1109 page_add_new_anon_rmap(new_page, vma, address, true);
1110 mem_cgroup_commit_charge(new_page, memcg, false, true);
1111 lru_cache_add_active_or_unevictable(new_page, vma);
1112 pgtable_trans_huge_deposit(mm, pmd, pgtable);
1113 set_pmd_at(mm, address, pmd, _pmd);
1114 update_mmu_cache_pmd(vma, address, pmd);
1115 spin_unlock(pmd_ptl);
Laurent Dufourdd2b4652018-04-17 16:33:15 +02001116 vm_write_end(vma);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001117
1118 *hpage = NULL;
1119
1120 khugepaged_pages_collapsed++;
1121 result = SCAN_SUCCEED;
1122out_up_write:
1123 up_write(&mm->mmap_sem);
1124out_nolock:
1125 trace_mm_collapse_huge_page(mm, isolated, result);
1126 return;
1127out:
1128 mem_cgroup_cancel_charge(new_page, memcg, true);
1129 goto out_up_write;
1130}
1131
1132static int khugepaged_scan_pmd(struct mm_struct *mm,
1133 struct vm_area_struct *vma,
1134 unsigned long address,
1135 struct page **hpage)
1136{
1137 pmd_t *pmd;
1138 pte_t *pte, *_pte;
Ebru Akagunduz0db501f2016-07-26 15:26:46 -07001139 int ret = 0, none_or_zero = 0, result = 0, referenced = 0;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001140 struct page *page = NULL;
1141 unsigned long _address;
1142 spinlock_t *ptl;
1143 int node = NUMA_NO_NODE, unmapped = 0;
Ebru Akagunduz0db501f2016-07-26 15:26:46 -07001144 bool writable = false;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001145
1146 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1147
1148 pmd = mm_find_pmd(mm, address);
1149 if (!pmd) {
1150 result = SCAN_PMD_NULL;
1151 goto out;
1152 }
1153
1154 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1155 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1156 for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
1157 _pte++, _address += PAGE_SIZE) {
1158 pte_t pteval = *_pte;
1159 if (is_swap_pte(pteval)) {
1160 if (++unmapped <= khugepaged_max_ptes_swap) {
1161 continue;
1162 } else {
1163 result = SCAN_EXCEED_SWAP_PTE;
1164 goto out_unmap;
1165 }
1166 }
1167 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
1168 if (!userfaultfd_armed(vma) &&
1169 ++none_or_zero <= khugepaged_max_ptes_none) {
1170 continue;
1171 } else {
1172 result = SCAN_EXCEED_NONE_PTE;
1173 goto out_unmap;
1174 }
1175 }
1176 if (!pte_present(pteval)) {
1177 result = SCAN_PTE_NON_PRESENT;
1178 goto out_unmap;
1179 }
1180 if (pte_write(pteval))
1181 writable = true;
1182
1183 page = vm_normal_page(vma, _address, pteval);
1184 if (unlikely(!page)) {
1185 result = SCAN_PAGE_NULL;
1186 goto out_unmap;
1187 }
1188
1189 /* TODO: teach khugepaged to collapse THP mapped with pte */
1190 if (PageCompound(page)) {
1191 result = SCAN_PAGE_COMPOUND;
1192 goto out_unmap;
1193 }
1194
1195 /*
1196 * Record which node the original page is from and save this
1197 * information to khugepaged_node_load[].
1198 * Khupaged will allocate hugepage from the node has the max
1199 * hit record.
1200 */
1201 node = page_to_nid(page);
1202 if (khugepaged_scan_abort(node)) {
1203 result = SCAN_SCAN_ABORT;
1204 goto out_unmap;
1205 }
1206 khugepaged_node_load[node]++;
1207 if (!PageLRU(page)) {
1208 result = SCAN_PAGE_LRU;
1209 goto out_unmap;
1210 }
1211 if (PageLocked(page)) {
1212 result = SCAN_PAGE_LOCK;
1213 goto out_unmap;
1214 }
1215 if (!PageAnon(page)) {
1216 result = SCAN_PAGE_ANON;
1217 goto out_unmap;
1218 }
1219
1220 /*
1221 * cannot use mapcount: can't collapse if there's a gup pin.
1222 * The page must only be referenced by the scanned process
1223 * and page swap cache.
1224 */
1225 if (page_count(page) != 1 + !!PageSwapCache(page)) {
1226 result = SCAN_PAGE_COUNT;
1227 goto out_unmap;
1228 }
1229 if (pte_young(pteval) ||
1230 page_is_young(page) || PageReferenced(page) ||
1231 mmu_notifier_test_young(vma->vm_mm, address))
Ebru Akagunduz0db501f2016-07-26 15:26:46 -07001232 referenced++;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001233 }
1234 if (writable) {
1235 if (referenced) {
1236 result = SCAN_SUCCEED;
1237 ret = 1;
1238 } else {
Ebru Akagunduz0db501f2016-07-26 15:26:46 -07001239 result = SCAN_LACK_REFERENCED_PAGE;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001240 }
1241 } else {
1242 result = SCAN_PAGE_RO;
1243 }
1244out_unmap:
1245 pte_unmap_unlock(pte, ptl);
1246 if (ret) {
1247 node = khugepaged_find_target_node();
1248 /* collapse_huge_page will return with the mmap_sem released */
Kirill A. Shutemovc131f752016-09-19 14:44:01 -07001249 collapse_huge_page(mm, address, hpage, node, referenced);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001250 }
1251out:
1252 trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
1253 none_or_zero, result, unmapped);
1254 return ret;
1255}
1256
1257static void collect_mm_slot(struct mm_slot *mm_slot)
1258{
1259 struct mm_struct *mm = mm_slot->mm;
1260
1261 VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
1262
1263 if (khugepaged_test_exit(mm)) {
1264 /* free mm_slot */
1265 hash_del(&mm_slot->hash);
1266 list_del(&mm_slot->mm_node);
1267
1268 /*
1269 * Not strictly needed because the mm exited already.
1270 *
1271 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1272 */
1273
1274 /* khugepaged_mm_lock actually not necessary for the below */
1275 free_mm_slot(mm_slot);
1276 mmdrop(mm);
1277 }
1278}
1279
Kirill A. Shutemove496cf32016-07-26 15:26:35 -07001280#if defined(CONFIG_SHMEM) && defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE)
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001281static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
1282{
1283 struct vm_area_struct *vma;
Hugh Dickinsdc3ff4f2020-08-06 23:26:22 -07001284 struct mm_struct *mm;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001285 unsigned long addr;
1286 pmd_t *pmd, _pmd;
1287
1288 i_mmap_lock_write(mapping);
1289 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
1290 /* probably overkill */
1291 if (vma->anon_vma)
1292 continue;
1293 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1294 if (addr & ~HPAGE_PMD_MASK)
1295 continue;
1296 if (vma->vm_end < addr + HPAGE_PMD_SIZE)
1297 continue;
Hugh Dickinsdc3ff4f2020-08-06 23:26:22 -07001298 mm = vma->vm_mm;
1299 pmd = mm_find_pmd(mm, addr);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001300 if (!pmd)
1301 continue;
1302 /*
1303 * We need exclusive mmap_sem to retract page table.
1304 * If trylock fails we would end up with pte-mapped THP after
1305 * re-fault. Not ideal, but it's more important to not disturb
1306 * the system too much.
1307 */
Hugh Dickinsdc3ff4f2020-08-06 23:26:22 -07001308 if (down_write_trylock(&mm->mmap_sem)) {
1309 if (!khugepaged_test_exit(mm)) {
Jann Horn275c6262022-11-25 22:37:14 +01001310 spinlock_t *ptl;
1311 unsigned long end = addr + HPAGE_PMD_SIZE;
1312
1313 mmu_notifier_invalidate_range_start(mm, addr,
1314 end);
1315 ptl = pmd_lock(mm, pmd);
Hugh Dickinsdc3ff4f2020-08-06 23:26:22 -07001316 /* assume page table is clear */
1317 _pmd = pmdp_collapse_flush(vma, addr, pmd);
1318 spin_unlock(ptl);
1319 atomic_long_dec(&mm->nr_ptes);
Jann Horn588be4a2022-11-25 22:37:13 +01001320 tlb_remove_table_sync_one();
Hugh Dickinsdc3ff4f2020-08-06 23:26:22 -07001321 pte_free(mm, pmd_pgtable(_pmd));
Jann Horn275c6262022-11-25 22:37:14 +01001322 mmu_notifier_invalidate_range_end(mm, addr,
1323 end);
Hugh Dickinsdc3ff4f2020-08-06 23:26:22 -07001324 }
1325 up_write(&mm->mmap_sem);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001326 }
1327 }
1328 i_mmap_unlock_write(mapping);
1329}
1330
1331/**
1332 * collapse_shmem - collapse small tmpfs/shmem pages into huge one.
1333 *
1334 * Basic scheme is simple, details are more complex:
Hugh Dickins8dcbb5f2018-11-30 14:10:43 -08001335 * - allocate and lock a new huge page;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001336 * - scan over radix tree replacing old pages the new one
1337 * + swap in pages if necessary;
1338 * + fill in gaps;
1339 * + keep old pages around in case if rollback is required;
1340 * - if replacing succeed:
1341 * + copy data over;
1342 * + free old pages;
Hugh Dickins8dcbb5f2018-11-30 14:10:43 -08001343 * + unlock huge page;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001344 * - if replacing failed;
1345 * + put all pages back and unfreeze them;
1346 * + restore gaps in the radix-tree;
Hugh Dickins8dcbb5f2018-11-30 14:10:43 -08001347 * + unlock and free huge page;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001348 */
1349static void collapse_shmem(struct mm_struct *mm,
1350 struct address_space *mapping, pgoff_t start,
1351 struct page **hpage, int node)
1352{
1353 gfp_t gfp;
1354 struct page *page, *new_page, *tmp;
1355 struct mem_cgroup *memcg;
1356 pgoff_t index, end = start + HPAGE_PMD_NR;
1357 LIST_HEAD(pagelist);
1358 struct radix_tree_iter iter;
1359 void **slot;
1360 int nr_none = 0, result = SCAN_SUCCEED;
1361
1362 VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
1363
1364 /* Only allocate from the target node */
1365 gfp = alloc_hugepage_khugepaged_gfpmask() |
1366 __GFP_OTHER_NODE | __GFP_THISNODE;
1367
1368 new_page = khugepaged_alloc_page(hpage, gfp, node);
1369 if (!new_page) {
1370 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
1371 goto out;
1372 }
1373
David Rientjes6b7ff8e2018-03-22 16:17:45 -07001374 /* Do not oom kill for khugepaged charges */
1375 if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp | __GFP_NORETRY,
1376 &memcg, true))) {
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001377 result = SCAN_CGROUP_CHARGE_FAIL;
1378 goto out;
1379 }
1380
Hugh Dickinsc2ca73b2018-11-30 14:10:39 -08001381 __SetPageLocked(new_page);
1382 __SetPageSwapBacked(new_page);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001383 new_page->index = start;
1384 new_page->mapping = mapping;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001385
1386 /*
Hugh Dickins8dcbb5f2018-11-30 14:10:43 -08001387 * At this point the new_page is locked and not up-to-date.
1388 * It's safe to insert it into the page cache, because nobody would
1389 * be able to map it or use it in another way until we unlock it.
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001390 */
1391
1392 index = start;
1393 spin_lock_irq(&mapping->tree_lock);
1394 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
1395 int n = min(iter.index, end) - index;
1396
1397 /*
Hugh Dickins10e458e2018-11-30 14:10:25 -08001398 * Stop if extent has been hole-punched, and is now completely
1399 * empty (the more obvious i_size_read() check would take an
1400 * irq-unsafe seqlock on 32-bit).
1401 */
1402 if (n >= HPAGE_PMD_NR) {
1403 result = SCAN_TRUNCATED;
1404 goto tree_locked;
1405 }
1406
1407 /*
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001408 * Handle holes in the radix tree: charge it from shmem and
1409 * insert relevant subpage of new_page into the radix-tree.
1410 */
1411 if (n && !shmem_charge(mapping->host, n)) {
1412 result = SCAN_FAIL;
Hugh Dickinsc2ca73b2018-11-30 14:10:39 -08001413 goto tree_locked;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001414 }
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001415 for (; index < min(iter.index, end); index++) {
1416 radix_tree_insert(&mapping->page_tree, index,
1417 new_page + (index % HPAGE_PMD_NR));
1418 }
Hugh Dickinsc2ca73b2018-11-30 14:10:39 -08001419 nr_none += n;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001420
1421 /* We are done. */
1422 if (index >= end)
1423 break;
1424
1425 page = radix_tree_deref_slot_protected(slot,
1426 &mapping->tree_lock);
1427 if (radix_tree_exceptional_entry(page) || !PageUptodate(page)) {
1428 spin_unlock_irq(&mapping->tree_lock);
1429 /* swap in or instantiate fallocated page */
1430 if (shmem_getpage(mapping->host, index, &page,
1431 SGP_NOHUGE)) {
1432 result = SCAN_FAIL;
1433 goto tree_unlocked;
1434 }
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001435 } else if (trylock_page(page)) {
1436 get_page(page);
Hugh Dickinsc2ca73b2018-11-30 14:10:39 -08001437 spin_unlock_irq(&mapping->tree_lock);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001438 } else {
1439 result = SCAN_PAGE_LOCK;
Hugh Dickinsc2ca73b2018-11-30 14:10:39 -08001440 goto tree_locked;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001441 }
1442
1443 /*
1444 * The page must be locked, so we can drop the tree_lock
1445 * without racing with truncate.
1446 */
1447 VM_BUG_ON_PAGE(!PageLocked(page), page);
1448 VM_BUG_ON_PAGE(!PageUptodate(page), page);
Hugh Dickinsdc628032018-11-30 14:10:47 -08001449
1450 /*
1451 * If file was truncated then extended, or hole-punched, before
1452 * we locked the first page, then a THP might be there already.
1453 */
1454 if (PageTransCompound(page)) {
1455 result = SCAN_PAGE_COMPOUND;
1456 goto out_unlock;
1457 }
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001458
1459 if (page_mapping(page) != mapping) {
1460 result = SCAN_TRUNCATED;
1461 goto out_unlock;
1462 }
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001463
1464 if (isolate_lru_page(page)) {
1465 result = SCAN_DEL_PAGE_LRU;
Hugh Dickinsc2ca73b2018-11-30 14:10:39 -08001466 goto out_unlock;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001467 }
1468
1469 if (page_mapped(page))
1470 unmap_mapping_range(mapping, index << PAGE_SHIFT,
1471 PAGE_SIZE, 0);
1472
1473 spin_lock_irq(&mapping->tree_lock);
1474
Johannes Weiner058a4a52016-12-12 16:43:32 -08001475 slot = radix_tree_lookup_slot(&mapping->page_tree, index);
1476 VM_BUG_ON_PAGE(page != radix_tree_deref_slot_protected(slot,
1477 &mapping->tree_lock), page);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001478 VM_BUG_ON_PAGE(page_mapped(page), page);
1479
1480 /*
1481 * The page is expected to have page_count() == 3:
1482 * - we hold a pin on it;
1483 * - one reference from radix tree;
1484 * - one from isolate_lru_page;
1485 */
1486 if (!page_ref_freeze(page, 3)) {
1487 result = SCAN_PAGE_COUNT;
Hugh Dickinsc2ca73b2018-11-30 14:10:39 -08001488 spin_unlock_irq(&mapping->tree_lock);
1489 putback_lru_page(page);
1490 goto out_unlock;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001491 }
1492
1493 /*
1494 * Add the page to the list to be able to undo the collapse if
1495 * something go wrong.
1496 */
1497 list_add_tail(&page->lru, &pagelist);
1498
1499 /* Finally, replace with the new page. */
1500 radix_tree_replace_slot(slot,
1501 new_page + (index % HPAGE_PMD_NR));
1502
Johannes Weiner058a4a52016-12-12 16:43:32 -08001503 slot = radix_tree_iter_next(&iter);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001504 index++;
1505 continue;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001506out_unlock:
1507 unlock_page(page);
1508 put_page(page);
Hugh Dickinsc2ca73b2018-11-30 14:10:39 -08001509 goto tree_unlocked;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001510 }
1511
1512 /*
1513 * Handle hole in radix tree at the end of the range.
1514 * This code only triggers if there's nothing in radix tree
1515 * beyond 'end'.
1516 */
Hugh Dickinsc2ca73b2018-11-30 14:10:39 -08001517 if (index < end) {
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001518 int n = end - index;
1519
Hugh Dickins10e458e2018-11-30 14:10:25 -08001520 /* Stop if extent has been truncated, and is now empty */
1521 if (n >= HPAGE_PMD_NR) {
1522 result = SCAN_TRUNCATED;
1523 goto tree_locked;
1524 }
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001525 if (!shmem_charge(mapping->host, n)) {
1526 result = SCAN_FAIL;
1527 goto tree_locked;
1528 }
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001529 for (; index < end; index++) {
1530 radix_tree_insert(&mapping->page_tree, index,
1531 new_page + (index % HPAGE_PMD_NR));
1532 }
1533 nr_none += n;
1534 }
1535
Hugh Dickinsc2ca73b2018-11-30 14:10:39 -08001536 __inc_node_page_state(new_page, NR_SHMEM_THPS);
1537 if (nr_none) {
1538 struct zone *zone = page_zone(new_page);
1539
1540 __mod_node_page_state(zone->zone_pgdat, NR_FILE_PAGES, nr_none);
1541 __mod_node_page_state(zone->zone_pgdat, NR_SHMEM, nr_none);
1542 }
1543
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001544tree_locked:
1545 spin_unlock_irq(&mapping->tree_lock);
1546tree_unlocked:
1547
1548 if (result == SCAN_SUCCEED) {
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001549 /*
1550 * Replacing old pages with new one has succeed, now we need to
1551 * copy the content and free old pages.
1552 */
Hugh Dickins5c0ecc22018-11-30 14:10:35 -08001553 index = start;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001554 list_for_each_entry_safe(page, tmp, &pagelist, lru) {
Hugh Dickins5c0ecc22018-11-30 14:10:35 -08001555 while (index < page->index) {
1556 clear_highpage(new_page + (index % HPAGE_PMD_NR));
1557 index++;
1558 }
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001559 copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
1560 page);
1561 list_del(&page->lru);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001562 page->mapping = NULL;
Hugh Dickinsc2ca73b2018-11-30 14:10:39 -08001563 page_ref_unfreeze(page, 1);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001564 ClearPageActive(page);
1565 ClearPageUnevictable(page);
Hugh Dickinsc2ca73b2018-11-30 14:10:39 -08001566 unlock_page(page);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001567 put_page(page);
Hugh Dickins5c0ecc22018-11-30 14:10:35 -08001568 index++;
1569 }
1570 while (index < end) {
1571 clear_highpage(new_page + (index % HPAGE_PMD_NR));
1572 index++;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001573 }
1574
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001575 SetPageUptodate(new_page);
Hugh Dickins8dcbb5f2018-11-30 14:10:43 -08001576 page_ref_add(new_page, HPAGE_PMD_NR - 1);
Hugh Dickinsc2ca73b2018-11-30 14:10:39 -08001577 set_page_dirty(new_page);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001578 mem_cgroup_commit_charge(new_page, memcg, false, true);
1579 lru_cache_add_anon(new_page);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001580
Hugh Dickinsc2ca73b2018-11-30 14:10:39 -08001581 /*
1582 * Remove pte page tables, so we can re-fault the page as huge.
1583 */
1584 retract_page_tables(mapping, start);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001585 *hpage = NULL;
1586 } else {
1587 /* Something went wrong: rollback changes to the radix-tree */
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001588 spin_lock_irq(&mapping->tree_lock);
Hugh Dickins0dba3e52018-11-30 14:10:29 -08001589 mapping->nrpages -= nr_none;
1590 shmem_uncharge(mapping->host, nr_none);
1591
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001592 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter,
1593 start) {
1594 if (iter.index >= end)
1595 break;
1596 page = list_first_entry_or_null(&pagelist,
1597 struct page, lru);
1598 if (!page || iter.index < page->index) {
1599 if (!nr_none)
1600 break;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001601 nr_none--;
Johannes Weinerdc1b6d02016-12-12 16:43:35 -08001602 /* Put holes back where they were */
1603 radix_tree_delete(&mapping->page_tree,
1604 iter.index);
1605 slot = radix_tree_iter_next(&iter);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001606 continue;
1607 }
1608
1609 VM_BUG_ON_PAGE(page->index != iter.index, page);
1610
1611 /* Unfreeze the page. */
1612 list_del(&page->lru);
1613 page_ref_unfreeze(page, 2);
1614 radix_tree_replace_slot(slot, page);
1615 spin_unlock_irq(&mapping->tree_lock);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001616 unlock_page(page);
Hugh Dickinsc2ca73b2018-11-30 14:10:39 -08001617 putback_lru_page(page);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001618 spin_lock_irq(&mapping->tree_lock);
Johannes Weiner058a4a52016-12-12 16:43:32 -08001619 slot = radix_tree_iter_next(&iter);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001620 }
1621 VM_BUG_ON(nr_none);
1622 spin_unlock_irq(&mapping->tree_lock);
1623
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001624 mem_cgroup_cancel_charge(new_page, memcg, true);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001625 new_page->mapping = NULL;
1626 }
Hugh Dickinsc2ca73b2018-11-30 14:10:39 -08001627
1628 unlock_page(new_page);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001629out:
1630 VM_BUG_ON(!list_empty(&pagelist));
1631 /* TODO: tracepoints */
1632}
1633
1634static void khugepaged_scan_shmem(struct mm_struct *mm,
1635 struct address_space *mapping,
1636 pgoff_t start, struct page **hpage)
1637{
1638 struct page *page = NULL;
1639 struct radix_tree_iter iter;
1640 void **slot;
1641 int present, swap;
1642 int node = NUMA_NO_NODE;
1643 int result = SCAN_SUCCEED;
1644
1645 present = 0;
1646 swap = 0;
1647 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1648 rcu_read_lock();
1649 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
1650 if (iter.index >= start + HPAGE_PMD_NR)
1651 break;
1652
1653 page = radix_tree_deref_slot(slot);
1654 if (radix_tree_deref_retry(page)) {
1655 slot = radix_tree_iter_retry(&iter);
1656 continue;
1657 }
1658
1659 if (radix_tree_exception(page)) {
1660 if (++swap > khugepaged_max_ptes_swap) {
1661 result = SCAN_EXCEED_SWAP_PTE;
1662 break;
1663 }
1664 continue;
1665 }
1666
1667 if (PageTransCompound(page)) {
1668 result = SCAN_PAGE_COMPOUND;
1669 break;
1670 }
1671
1672 node = page_to_nid(page);
1673 if (khugepaged_scan_abort(node)) {
1674 result = SCAN_SCAN_ABORT;
1675 break;
1676 }
1677 khugepaged_node_load[node]++;
1678
1679 if (!PageLRU(page)) {
1680 result = SCAN_PAGE_LRU;
1681 break;
1682 }
1683
1684 if (page_count(page) != 1 + page_mapcount(page)) {
1685 result = SCAN_PAGE_COUNT;
1686 break;
1687 }
1688
1689 /*
1690 * We probably should check if the page is referenced here, but
1691 * nobody would transfer pte_young() to PageReferenced() for us.
1692 * And rmap walk here is just too costly...
1693 */
1694
1695 present++;
1696
1697 if (need_resched()) {
1698 cond_resched_rcu();
1699 slot = radix_tree_iter_next(&iter);
1700 }
1701 }
1702 rcu_read_unlock();
1703
1704 if (result == SCAN_SUCCEED) {
1705 if (present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
1706 result = SCAN_EXCEED_NONE_PTE;
1707 } else {
1708 node = khugepaged_find_target_node();
1709 collapse_shmem(mm, mapping, start, hpage, node);
1710 }
1711 }
1712
1713 /* TODO: tracepoints */
1714}
1715#else
1716static void khugepaged_scan_shmem(struct mm_struct *mm,
1717 struct address_space *mapping,
1718 pgoff_t start, struct page **hpage)
1719{
1720 BUILD_BUG();
1721}
1722#endif
1723
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001724static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
1725 struct page **hpage)
1726 __releases(&khugepaged_mm_lock)
1727 __acquires(&khugepaged_mm_lock)
1728{
1729 struct mm_slot *mm_slot;
1730 struct mm_struct *mm;
1731 struct vm_area_struct *vma;
1732 int progress = 0;
1733
1734 VM_BUG_ON(!pages);
1735 VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
1736
1737 if (khugepaged_scan.mm_slot)
1738 mm_slot = khugepaged_scan.mm_slot;
1739 else {
1740 mm_slot = list_entry(khugepaged_scan.mm_head.next,
1741 struct mm_slot, mm_node);
1742 khugepaged_scan.address = 0;
1743 khugepaged_scan.mm_slot = mm_slot;
1744 }
1745 spin_unlock(&khugepaged_mm_lock);
1746
1747 mm = mm_slot->mm;
Yang Shie56d3702018-01-31 16:18:28 -08001748 /*
1749 * Don't wait for semaphore (to avoid long wait times). Just move to
1750 * the next mm on the list.
1751 */
1752 vma = NULL;
1753 if (unlikely(!down_read_trylock(&mm->mmap_sem)))
1754 goto breakouterloop_mmap_sem;
1755 if (likely(!khugepaged_test_exit(mm)))
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001756 vma = find_vma(mm, khugepaged_scan.address);
1757
1758 progress++;
1759 for (; vma; vma = vma->vm_next) {
1760 unsigned long hstart, hend;
1761
1762 cond_resched();
1763 if (unlikely(khugepaged_test_exit(mm))) {
1764 progress++;
1765 break;
1766 }
1767 if (!hugepage_vma_check(vma)) {
1768skip:
1769 progress++;
1770 continue;
1771 }
1772 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
1773 hend = vma->vm_end & HPAGE_PMD_MASK;
1774 if (hstart >= hend)
1775 goto skip;
1776 if (khugepaged_scan.address > hend)
1777 goto skip;
1778 if (khugepaged_scan.address < hstart)
1779 khugepaged_scan.address = hstart;
1780 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
1781
1782 while (khugepaged_scan.address < hend) {
1783 int ret;
1784 cond_resched();
1785 if (unlikely(khugepaged_test_exit(mm)))
1786 goto breakouterloop;
1787
1788 VM_BUG_ON(khugepaged_scan.address < hstart ||
1789 khugepaged_scan.address + HPAGE_PMD_SIZE >
1790 hend);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001791 if (shmem_file(vma->vm_file)) {
Kirill A. Shutemove496cf32016-07-26 15:26:35 -07001792 struct file *file;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001793 pgoff_t pgoff = linear_page_index(vma,
1794 khugepaged_scan.address);
Kirill A. Shutemove496cf32016-07-26 15:26:35 -07001795 if (!shmem_huge_enabled(vma))
1796 goto skip;
1797 file = get_file(vma->vm_file);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001798 up_read(&mm->mmap_sem);
1799 ret = 1;
1800 khugepaged_scan_shmem(mm, file->f_mapping,
1801 pgoff, hpage);
1802 fput(file);
1803 } else {
1804 ret = khugepaged_scan_pmd(mm, vma,
1805 khugepaged_scan.address,
1806 hpage);
1807 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001808 /* move to next address */
1809 khugepaged_scan.address += HPAGE_PMD_SIZE;
1810 progress += HPAGE_PMD_NR;
1811 if (ret)
1812 /* we released mmap_sem so break loop */
1813 goto breakouterloop_mmap_sem;
1814 if (progress >= pages)
1815 goto breakouterloop;
1816 }
1817 }
1818breakouterloop:
1819 up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */
1820breakouterloop_mmap_sem:
1821
1822 spin_lock(&khugepaged_mm_lock);
1823 VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
1824 /*
1825 * Release the current mm_slot if this mm is about to die, or
1826 * if we scanned all vmas of this mm.
1827 */
1828 if (khugepaged_test_exit(mm) || !vma) {
1829 /*
1830 * Make sure that if mm_users is reaching zero while
1831 * khugepaged runs here, khugepaged_exit will find
1832 * mm_slot not pointing to the exiting mm.
1833 */
1834 if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
1835 khugepaged_scan.mm_slot = list_entry(
1836 mm_slot->mm_node.next,
1837 struct mm_slot, mm_node);
1838 khugepaged_scan.address = 0;
1839 } else {
1840 khugepaged_scan.mm_slot = NULL;
1841 khugepaged_full_scans++;
1842 }
1843
1844 collect_mm_slot(mm_slot);
1845 }
1846
1847 return progress;
1848}
1849
1850static int khugepaged_has_work(void)
1851{
1852 return !list_empty(&khugepaged_scan.mm_head) &&
1853 khugepaged_enabled();
1854}
1855
1856static int khugepaged_wait_event(void)
1857{
1858 return !list_empty(&khugepaged_scan.mm_head) ||
1859 kthread_should_stop();
1860}
1861
1862static void khugepaged_do_scan(void)
1863{
1864 struct page *hpage = NULL;
1865 unsigned int progress = 0, pass_through_head = 0;
1866 unsigned int pages = khugepaged_pages_to_scan;
1867 bool wait = true;
1868
1869 barrier(); /* write khugepaged_pages_to_scan to local stack */
1870
1871 while (progress < pages) {
1872 if (!khugepaged_prealloc_page(&hpage, &wait))
1873 break;
1874
1875 cond_resched();
1876
1877 if (unlikely(kthread_should_stop() || try_to_freeze()))
1878 break;
1879
1880 spin_lock(&khugepaged_mm_lock);
1881 if (!khugepaged_scan.mm_slot)
1882 pass_through_head++;
1883 if (khugepaged_has_work() &&
1884 pass_through_head < 2)
1885 progress += khugepaged_scan_mm_slot(pages - progress,
1886 &hpage);
1887 else
1888 progress = pages;
1889 spin_unlock(&khugepaged_mm_lock);
1890 }
1891
1892 if (!IS_ERR_OR_NULL(hpage))
1893 put_page(hpage);
1894}
1895
1896static bool khugepaged_should_wakeup(void)
1897{
1898 return kthread_should_stop() ||
1899 time_after_eq(jiffies, khugepaged_sleep_expire);
1900}
1901
1902static void khugepaged_wait_work(void)
1903{
1904 if (khugepaged_has_work()) {
1905 const unsigned long scan_sleep_jiffies =
1906 msecs_to_jiffies(khugepaged_scan_sleep_millisecs);
1907
1908 if (!scan_sleep_jiffies)
1909 return;
1910
1911 khugepaged_sleep_expire = jiffies + scan_sleep_jiffies;
1912 wait_event_freezable_timeout(khugepaged_wait,
1913 khugepaged_should_wakeup(),
1914 scan_sleep_jiffies);
1915 return;
1916 }
1917
1918 if (khugepaged_enabled())
1919 wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
1920}
1921
1922static int khugepaged(void *none)
1923{
1924 struct mm_slot *mm_slot;
1925
1926 set_freezable();
1927 set_user_nice(current, MAX_NICE);
1928
1929 while (!kthread_should_stop()) {
1930 khugepaged_do_scan();
1931 khugepaged_wait_work();
1932 }
1933
1934 spin_lock(&khugepaged_mm_lock);
1935 mm_slot = khugepaged_scan.mm_slot;
1936 khugepaged_scan.mm_slot = NULL;
1937 if (mm_slot)
1938 collect_mm_slot(mm_slot);
1939 spin_unlock(&khugepaged_mm_lock);
1940 return 0;
1941}
1942
1943static void set_recommended_min_free_kbytes(void)
1944{
1945 struct zone *zone;
1946 int nr_zones = 0;
1947 unsigned long recommended_min;
1948
1949 for_each_populated_zone(zone)
1950 nr_zones++;
1951
1952 /* Ensure 2 pageblocks are free to assist fragmentation avoidance */
1953 recommended_min = pageblock_nr_pages * nr_zones * 2;
1954
1955 /*
1956 * Make sure that on average at least two pageblocks are almost free
1957 * of another type, one for a migratetype to fall back to and a
1958 * second to avoid subsequent fallbacks of other types There are 3
1959 * MIGRATE_TYPES we care about.
1960 */
1961 recommended_min += pageblock_nr_pages * nr_zones *
1962 MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
1963
1964 /* don't ever allow to reserve more than 5% of the lowmem */
1965 recommended_min = min(recommended_min,
1966 (unsigned long) nr_free_buffer_pages() / 20);
1967 recommended_min <<= (PAGE_SHIFT-10);
1968
1969 if (recommended_min > min_free_kbytes) {
1970 if (user_min_free_kbytes >= 0)
1971 pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
1972 min_free_kbytes, recommended_min);
1973
1974 min_free_kbytes = recommended_min;
1975 }
1976 setup_per_zone_wmarks();
1977}
1978
1979int start_stop_khugepaged(void)
1980{
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001981 int err = 0;
1982
1983 mutex_lock(&khugepaged_mutex);
1984 if (khugepaged_enabled()) {
1985 if (!khugepaged_thread)
1986 khugepaged_thread = kthread_run(khugepaged, NULL,
1987 "khugepaged");
1988 if (IS_ERR(khugepaged_thread)) {
1989 pr_err("khugepaged: kthread_run(khugepaged) failed\n");
1990 err = PTR_ERR(khugepaged_thread);
1991 khugepaged_thread = NULL;
1992 goto fail;
1993 }
1994
1995 if (!list_empty(&khugepaged_scan.mm_head))
1996 wake_up_interruptible(&khugepaged_wait);
1997
1998 set_recommended_min_free_kbytes();
1999 } else if (khugepaged_thread) {
2000 kthread_stop(khugepaged_thread);
2001 khugepaged_thread = NULL;
2002 }
2003fail:
2004 mutex_unlock(&khugepaged_mutex);
2005 return err;
2006}
Vijay Balakrishna189394c2020-10-10 23:16:40 -07002007
2008void khugepaged_min_free_kbytes_update(void)
2009{
2010 mutex_lock(&khugepaged_mutex);
2011 if (khugepaged_enabled() && khugepaged_thread)
2012 set_recommended_min_free_kbytes();
2013 mutex_unlock(&khugepaged_mutex);
2014}