blob: 7cb00eac1a4ae5303d1749a1a2fce2a854b0dc29 [file] [log] [blame]
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2
3#include <linux/mm.h>
4#include <linux/sched.h>
5#include <linux/mmu_notifier.h>
6#include <linux/rmap.h>
7#include <linux/swap.h>
8#include <linux/mm_inline.h>
9#include <linux/kthread.h>
10#include <linux/khugepaged.h>
11#include <linux/freezer.h>
12#include <linux/mman.h>
13#include <linux/hashtable.h>
14#include <linux/userfaultfd_k.h>
15#include <linux/page_idle.h>
16#include <linux/swapops.h>
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -070017#include <linux/shmem_fs.h>
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070018
19#include <asm/tlb.h>
20#include <asm/pgalloc.h>
21#include "internal.h"
22
23enum scan_result {
24 SCAN_FAIL,
25 SCAN_SUCCEED,
26 SCAN_PMD_NULL,
27 SCAN_EXCEED_NONE_PTE,
28 SCAN_PTE_NON_PRESENT,
29 SCAN_PAGE_RO,
Ebru Akagunduz0db501f2016-07-26 15:26:46 -070030 SCAN_LACK_REFERENCED_PAGE,
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070031 SCAN_PAGE_NULL,
32 SCAN_SCAN_ABORT,
33 SCAN_PAGE_COUNT,
34 SCAN_PAGE_LRU,
35 SCAN_PAGE_LOCK,
36 SCAN_PAGE_ANON,
37 SCAN_PAGE_COMPOUND,
38 SCAN_ANY_PROCESS,
39 SCAN_VMA_NULL,
40 SCAN_VMA_CHECK,
41 SCAN_ADDRESS_RANGE,
42 SCAN_SWAP_CACHE_PAGE,
43 SCAN_DEL_PAGE_LRU,
44 SCAN_ALLOC_HUGE_PAGE_FAIL,
45 SCAN_CGROUP_CHARGE_FAIL,
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -070046 SCAN_EXCEED_SWAP_PTE,
47 SCAN_TRUNCATED,
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070048};
49
50#define CREATE_TRACE_POINTS
51#include <trace/events/huge_memory.h>
52
Vijay Balakrishna189394c2020-10-10 23:16:40 -070053static struct task_struct *khugepaged_thread __read_mostly;
54static DEFINE_MUTEX(khugepaged_mutex);
55
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070056/* default scan 8*512 pte (or vmas) every 30 second */
57static unsigned int khugepaged_pages_to_scan __read_mostly;
58static unsigned int khugepaged_pages_collapsed;
59static unsigned int khugepaged_full_scans;
60static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
61/* during fragmentation poll the hugepage allocator once every minute */
62static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
63static unsigned long khugepaged_sleep_expire;
64static DEFINE_SPINLOCK(khugepaged_mm_lock);
65static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
66/*
67 * default collapse hugepages if there is at least one pte mapped like
68 * it would have happened if the vma was large enough during page
69 * fault.
70 */
71static unsigned int khugepaged_max_ptes_none __read_mostly;
72static unsigned int khugepaged_max_ptes_swap __read_mostly;
73
74#define MM_SLOTS_HASH_BITS 10
75static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
76
77static struct kmem_cache *mm_slot_cache __read_mostly;
78
79/**
80 * struct mm_slot - hash lookup from mm to mm_slot
81 * @hash: hash collision list
82 * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
83 * @mm: the mm that this information is valid for
84 */
85struct mm_slot {
86 struct hlist_node hash;
87 struct list_head mm_node;
88 struct mm_struct *mm;
89};
90
91/**
92 * struct khugepaged_scan - cursor for scanning
93 * @mm_head: the head of the mm list to scan
94 * @mm_slot: the current mm_slot we are scanning
95 * @address: the next address inside that to be scanned
96 *
97 * There is only the one khugepaged_scan instance of this cursor structure.
98 */
99struct khugepaged_scan {
100 struct list_head mm_head;
101 struct mm_slot *mm_slot;
102 unsigned long address;
103};
104
105static struct khugepaged_scan khugepaged_scan = {
106 .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
107};
108
Jérémy Lefauree1465d12016-11-30 15:54:02 -0800109#ifdef CONFIG_SYSFS
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700110static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
111 struct kobj_attribute *attr,
112 char *buf)
113{
114 return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs);
115}
116
117static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
118 struct kobj_attribute *attr,
119 const char *buf, size_t count)
120{
121 unsigned long msecs;
122 int err;
123
124 err = kstrtoul(buf, 10, &msecs);
125 if (err || msecs > UINT_MAX)
126 return -EINVAL;
127
128 khugepaged_scan_sleep_millisecs = msecs;
129 khugepaged_sleep_expire = 0;
130 wake_up_interruptible(&khugepaged_wait);
131
132 return count;
133}
134static struct kobj_attribute scan_sleep_millisecs_attr =
135 __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
136 scan_sleep_millisecs_store);
137
138static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
139 struct kobj_attribute *attr,
140 char *buf)
141{
142 return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
143}
144
145static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
146 struct kobj_attribute *attr,
147 const char *buf, size_t count)
148{
149 unsigned long msecs;
150 int err;
151
152 err = kstrtoul(buf, 10, &msecs);
153 if (err || msecs > UINT_MAX)
154 return -EINVAL;
155
156 khugepaged_alloc_sleep_millisecs = msecs;
157 khugepaged_sleep_expire = 0;
158 wake_up_interruptible(&khugepaged_wait);
159
160 return count;
161}
162static struct kobj_attribute alloc_sleep_millisecs_attr =
163 __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
164 alloc_sleep_millisecs_store);
165
166static ssize_t pages_to_scan_show(struct kobject *kobj,
167 struct kobj_attribute *attr,
168 char *buf)
169{
170 return sprintf(buf, "%u\n", khugepaged_pages_to_scan);
171}
172static ssize_t pages_to_scan_store(struct kobject *kobj,
173 struct kobj_attribute *attr,
174 const char *buf, size_t count)
175{
176 int err;
177 unsigned long pages;
178
179 err = kstrtoul(buf, 10, &pages);
180 if (err || !pages || pages > UINT_MAX)
181 return -EINVAL;
182
183 khugepaged_pages_to_scan = pages;
184
185 return count;
186}
187static struct kobj_attribute pages_to_scan_attr =
188 __ATTR(pages_to_scan, 0644, pages_to_scan_show,
189 pages_to_scan_store);
190
191static ssize_t pages_collapsed_show(struct kobject *kobj,
192 struct kobj_attribute *attr,
193 char *buf)
194{
195 return sprintf(buf, "%u\n", khugepaged_pages_collapsed);
196}
197static struct kobj_attribute pages_collapsed_attr =
198 __ATTR_RO(pages_collapsed);
199
200static ssize_t full_scans_show(struct kobject *kobj,
201 struct kobj_attribute *attr,
202 char *buf)
203{
204 return sprintf(buf, "%u\n", khugepaged_full_scans);
205}
206static struct kobj_attribute full_scans_attr =
207 __ATTR_RO(full_scans);
208
209static ssize_t khugepaged_defrag_show(struct kobject *kobj,
210 struct kobj_attribute *attr, char *buf)
211{
212 return single_hugepage_flag_show(kobj, attr, buf,
213 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
214}
215static ssize_t khugepaged_defrag_store(struct kobject *kobj,
216 struct kobj_attribute *attr,
217 const char *buf, size_t count)
218{
219 return single_hugepage_flag_store(kobj, attr, buf, count,
220 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
221}
222static struct kobj_attribute khugepaged_defrag_attr =
223 __ATTR(defrag, 0644, khugepaged_defrag_show,
224 khugepaged_defrag_store);
225
226/*
227 * max_ptes_none controls if khugepaged should collapse hugepages over
228 * any unmapped ptes in turn potentially increasing the memory
229 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
230 * reduce the available free memory in the system as it
231 * runs. Increasing max_ptes_none will instead potentially reduce the
232 * free memory in the system during the khugepaged scan.
233 */
234static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
235 struct kobj_attribute *attr,
236 char *buf)
237{
238 return sprintf(buf, "%u\n", khugepaged_max_ptes_none);
239}
240static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
241 struct kobj_attribute *attr,
242 const char *buf, size_t count)
243{
244 int err;
245 unsigned long max_ptes_none;
246
247 err = kstrtoul(buf, 10, &max_ptes_none);
248 if (err || max_ptes_none > HPAGE_PMD_NR-1)
249 return -EINVAL;
250
251 khugepaged_max_ptes_none = max_ptes_none;
252
253 return count;
254}
255static struct kobj_attribute khugepaged_max_ptes_none_attr =
256 __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
257 khugepaged_max_ptes_none_store);
258
259static ssize_t khugepaged_max_ptes_swap_show(struct kobject *kobj,
260 struct kobj_attribute *attr,
261 char *buf)
262{
263 return sprintf(buf, "%u\n", khugepaged_max_ptes_swap);
264}
265
266static ssize_t khugepaged_max_ptes_swap_store(struct kobject *kobj,
267 struct kobj_attribute *attr,
268 const char *buf, size_t count)
269{
270 int err;
271 unsigned long max_ptes_swap;
272
273 err = kstrtoul(buf, 10, &max_ptes_swap);
274 if (err || max_ptes_swap > HPAGE_PMD_NR-1)
275 return -EINVAL;
276
277 khugepaged_max_ptes_swap = max_ptes_swap;
278
279 return count;
280}
281
282static struct kobj_attribute khugepaged_max_ptes_swap_attr =
283 __ATTR(max_ptes_swap, 0644, khugepaged_max_ptes_swap_show,
284 khugepaged_max_ptes_swap_store);
285
286static struct attribute *khugepaged_attr[] = {
287 &khugepaged_defrag_attr.attr,
288 &khugepaged_max_ptes_none_attr.attr,
289 &pages_to_scan_attr.attr,
290 &pages_collapsed_attr.attr,
291 &full_scans_attr.attr,
292 &scan_sleep_millisecs_attr.attr,
293 &alloc_sleep_millisecs_attr.attr,
294 &khugepaged_max_ptes_swap_attr.attr,
295 NULL,
296};
297
298struct attribute_group khugepaged_attr_group = {
299 .attrs = khugepaged_attr,
300 .name = "khugepaged",
301};
Jérémy Lefauree1465d12016-11-30 15:54:02 -0800302#endif /* CONFIG_SYSFS */
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700303
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -0700304#define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700305
306int hugepage_madvise(struct vm_area_struct *vma,
307 unsigned long *vm_flags, int advice)
308{
309 switch (advice) {
310 case MADV_HUGEPAGE:
311#ifdef CONFIG_S390
312 /*
313 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
314 * can't handle this properly after s390_enable_sie, so we simply
315 * ignore the madvise to prevent qemu from causing a SIGSEGV.
316 */
317 if (mm_has_pgste(vma->vm_mm))
318 return 0;
319#endif
320 *vm_flags &= ~VM_NOHUGEPAGE;
321 *vm_flags |= VM_HUGEPAGE;
322 /*
323 * If the vma become good for khugepaged to scan,
324 * register it here without waiting a page fault that
325 * may not happen any time soon.
326 */
327 if (!(*vm_flags & VM_NO_KHUGEPAGED) &&
328 khugepaged_enter_vma_merge(vma, *vm_flags))
329 return -ENOMEM;
330 break;
331 case MADV_NOHUGEPAGE:
332 *vm_flags &= ~VM_HUGEPAGE;
333 *vm_flags |= VM_NOHUGEPAGE;
334 /*
335 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
336 * this vma even if we leave the mm registered in khugepaged if
337 * it got registered before VM_NOHUGEPAGE was set.
338 */
339 break;
340 }
341
342 return 0;
343}
344
345int __init khugepaged_init(void)
346{
347 mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
348 sizeof(struct mm_slot),
349 __alignof__(struct mm_slot), 0, NULL);
350 if (!mm_slot_cache)
351 return -ENOMEM;
352
353 khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
354 khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
355 khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8;
356
357 return 0;
358}
359
360void __init khugepaged_destroy(void)
361{
362 kmem_cache_destroy(mm_slot_cache);
363}
364
365static inline struct mm_slot *alloc_mm_slot(void)
366{
367 if (!mm_slot_cache) /* initialization failed */
368 return NULL;
369 return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
370}
371
372static inline void free_mm_slot(struct mm_slot *mm_slot)
373{
374 kmem_cache_free(mm_slot_cache, mm_slot);
375}
376
377static struct mm_slot *get_mm_slot(struct mm_struct *mm)
378{
379 struct mm_slot *mm_slot;
380
381 hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
382 if (mm == mm_slot->mm)
383 return mm_slot;
384
385 return NULL;
386}
387
388static void insert_to_mm_slots_hash(struct mm_struct *mm,
389 struct mm_slot *mm_slot)
390{
391 mm_slot->mm = mm;
392 hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
393}
394
395static inline int khugepaged_test_exit(struct mm_struct *mm)
396{
Hugh Dickinsdb63d182020-08-06 23:26:25 -0700397 return atomic_read(&mm->mm_users) == 0 || !mmget_still_valid(mm);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700398}
399
400int __khugepaged_enter(struct mm_struct *mm)
401{
402 struct mm_slot *mm_slot;
403 int wakeup;
404
405 mm_slot = alloc_mm_slot();
406 if (!mm_slot)
407 return -ENOMEM;
408
409 /* __khugepaged_exit() must not run from under us */
Hugh Dickinscdb3f8b2020-08-20 17:42:02 -0700410 VM_BUG_ON_MM(atomic_read(&mm->mm_users) == 0, mm);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700411 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
412 free_mm_slot(mm_slot);
413 return 0;
414 }
415
416 spin_lock(&khugepaged_mm_lock);
417 insert_to_mm_slots_hash(mm, mm_slot);
418 /*
419 * Insert just behind the scanning cursor, to let the area settle
420 * down a little.
421 */
422 wakeup = list_empty(&khugepaged_scan.mm_head);
423 list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
424 spin_unlock(&khugepaged_mm_lock);
425
426 atomic_inc(&mm->mm_count);
427 if (wakeup)
428 wake_up_interruptible(&khugepaged_wait);
429
430 return 0;
431}
432
433int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
434 unsigned long vm_flags)
435{
436 unsigned long hstart, hend;
437 if (!vma->anon_vma)
438 /*
439 * Not yet faulted in so we will register later in the
440 * page fault if needed.
441 */
442 return 0;
443 if (vma->vm_ops || (vm_flags & VM_NO_KHUGEPAGED))
444 /* khugepaged not yet working on file or special mappings */
445 return 0;
446 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
447 hend = vma->vm_end & HPAGE_PMD_MASK;
448 if (hstart < hend)
449 return khugepaged_enter(vma, vm_flags);
450 return 0;
451}
452
453void __khugepaged_exit(struct mm_struct *mm)
454{
455 struct mm_slot *mm_slot;
456 int free = 0;
457
458 spin_lock(&khugepaged_mm_lock);
459 mm_slot = get_mm_slot(mm);
460 if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
461 hash_del(&mm_slot->hash);
462 list_del(&mm_slot->mm_node);
463 free = 1;
464 }
465 spin_unlock(&khugepaged_mm_lock);
466
467 if (free) {
468 clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
469 free_mm_slot(mm_slot);
470 mmdrop(mm);
471 } else if (mm_slot) {
472 /*
473 * This is required to serialize against
474 * khugepaged_test_exit() (which is guaranteed to run
475 * under mmap sem read mode). Stop here (after we
476 * return all pagetables will be destroyed) until
477 * khugepaged has finished working on the pagetables
478 * under the mmap_sem.
479 */
480 down_write(&mm->mmap_sem);
481 up_write(&mm->mmap_sem);
482 }
483}
484
485static void release_pte_page(struct page *page)
486{
487 /* 0 stands for page_is_file_cache(page) == false */
Mel Gorman599d0c92016-07-28 15:45:31 -0700488 dec_node_page_state(page, NR_ISOLATED_ANON + 0);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700489 unlock_page(page);
490 putback_lru_page(page);
491}
492
493static void release_pte_pages(pte_t *pte, pte_t *_pte)
494{
495 while (--_pte >= pte) {
496 pte_t pteval = *_pte;
497 if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval)))
498 release_pte_page(pte_page(pteval));
499 }
500}
501
502static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
503 unsigned long address,
504 pte_t *pte)
505{
506 struct page *page = NULL;
507 pte_t *_pte;
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700508 int none_or_zero = 0, result = 0, referenced = 0;
509 bool writable = false;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700510
511 for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
512 _pte++, address += PAGE_SIZE) {
513 pte_t pteval = *_pte;
514 if (pte_none(pteval) || (pte_present(pteval) &&
515 is_zero_pfn(pte_pfn(pteval)))) {
516 if (!userfaultfd_armed(vma) &&
517 ++none_or_zero <= khugepaged_max_ptes_none) {
518 continue;
519 } else {
520 result = SCAN_EXCEED_NONE_PTE;
521 goto out;
522 }
523 }
524 if (!pte_present(pteval)) {
525 result = SCAN_PTE_NON_PRESENT;
526 goto out;
527 }
528 page = vm_normal_page(vma, address, pteval);
529 if (unlikely(!page)) {
530 result = SCAN_PAGE_NULL;
531 goto out;
532 }
533
Kirill A. Shutemov24284d52018-03-22 16:17:28 -0700534 /* TODO: teach khugepaged to collapse THP mapped with pte */
535 if (PageCompound(page)) {
536 result = SCAN_PAGE_COMPOUND;
537 goto out;
538 }
539
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700540 VM_BUG_ON_PAGE(!PageAnon(page), page);
541 VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
542
543 /*
544 * We can do it before isolate_lru_page because the
545 * page can't be freed from under us. NOTE: PG_lock
546 * is needed to serialize against split_huge_page
547 * when invoked from the VM.
548 */
549 if (!trylock_page(page)) {
550 result = SCAN_PAGE_LOCK;
551 goto out;
552 }
553
554 /*
555 * cannot use mapcount: can't collapse if there's a gup pin.
556 * The page must only be referenced by the scanned process
557 * and page swap cache.
558 */
559 if (page_count(page) != 1 + !!PageSwapCache(page)) {
560 unlock_page(page);
561 result = SCAN_PAGE_COUNT;
562 goto out;
563 }
564 if (pte_write(pteval)) {
565 writable = true;
566 } else {
567 if (PageSwapCache(page) &&
568 !reuse_swap_page(page, NULL)) {
569 unlock_page(page);
570 result = SCAN_SWAP_CACHE_PAGE;
571 goto out;
572 }
573 /*
574 * Page is not in the swap cache. It can be collapsed
575 * into a THP.
576 */
577 }
578
579 /*
580 * Isolate the page to avoid collapsing an hugepage
581 * currently in use by the VM.
582 */
583 if (isolate_lru_page(page)) {
584 unlock_page(page);
585 result = SCAN_DEL_PAGE_LRU;
586 goto out;
587 }
588 /* 0 stands for page_is_file_cache(page) == false */
Mel Gorman599d0c92016-07-28 15:45:31 -0700589 inc_node_page_state(page, NR_ISOLATED_ANON + 0);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700590 VM_BUG_ON_PAGE(!PageLocked(page), page);
591 VM_BUG_ON_PAGE(PageLRU(page), page);
592
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700593 /* There should be enough young pte to collapse the page */
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700594 if (pte_young(pteval) ||
595 page_is_young(page) || PageReferenced(page) ||
596 mmu_notifier_test_young(vma->vm_mm, address))
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700597 referenced++;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700598 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700599
Miaohe Lind5e95af2021-05-04 18:33:46 -0700600 if (unlikely(!writable)) {
601 result = SCAN_PAGE_RO;
602 } else if (unlikely(!referenced)) {
603 result = SCAN_LACK_REFERENCED_PAGE;
604 } else {
605 result = SCAN_SUCCEED;
606 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
607 referenced, writable, result);
608 return 1;
609 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700610out:
611 release_pte_pages(pte, _pte);
612 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
613 referenced, writable, result);
614 return 0;
615}
616
617static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
618 struct vm_area_struct *vma,
619 unsigned long address,
620 spinlock_t *ptl)
621{
622 pte_t *_pte;
623 for (_pte = pte; _pte < pte+HPAGE_PMD_NR; _pte++) {
624 pte_t pteval = *_pte;
625 struct page *src_page;
626
627 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
628 clear_user_highpage(page, address);
629 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
630 if (is_zero_pfn(pte_pfn(pteval))) {
631 /*
632 * ptl mostly unnecessary.
633 */
634 spin_lock(ptl);
635 /*
636 * paravirt calls inside pte_clear here are
637 * superfluous.
638 */
639 pte_clear(vma->vm_mm, address, _pte);
640 spin_unlock(ptl);
641 }
642 } else {
643 src_page = pte_page(pteval);
644 copy_user_highpage(page, src_page, address, vma);
645 VM_BUG_ON_PAGE(page_mapcount(src_page) != 1, src_page);
646 release_pte_page(src_page);
647 /*
648 * ptl mostly unnecessary, but preempt has to
649 * be disabled to update the per-cpu stats
650 * inside page_remove_rmap().
651 */
652 spin_lock(ptl);
653 /*
654 * paravirt calls inside pte_clear here are
655 * superfluous.
656 */
657 pte_clear(vma->vm_mm, address, _pte);
658 page_remove_rmap(src_page, false);
659 spin_unlock(ptl);
660 free_page_and_swap_cache(src_page);
661 }
662
663 address += PAGE_SIZE;
664 page++;
665 }
666}
667
668static void khugepaged_alloc_sleep(void)
669{
670 DEFINE_WAIT(wait);
671
672 add_wait_queue(&khugepaged_wait, &wait);
673 freezable_schedule_timeout_interruptible(
674 msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
675 remove_wait_queue(&khugepaged_wait, &wait);
676}
677
678static int khugepaged_node_load[MAX_NUMNODES];
679
680static bool khugepaged_scan_abort(int nid)
681{
682 int i;
683
684 /*
Mel Gormana5f5f912016-07-28 15:46:32 -0700685 * If node_reclaim_mode is disabled, then no extra effort is made to
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700686 * allocate memory locally.
687 */
Mel Gormana5f5f912016-07-28 15:46:32 -0700688 if (!node_reclaim_mode)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700689 return false;
690
691 /* If there is a count for this node already, it must be acceptable */
692 if (khugepaged_node_load[nid])
693 return false;
694
695 for (i = 0; i < MAX_NUMNODES; i++) {
696 if (!khugepaged_node_load[i])
697 continue;
698 if (node_distance(nid, i) > RECLAIM_DISTANCE)
699 return true;
700 }
701 return false;
702}
703
704/* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
705static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
706{
Vlastimil Babka25160352016-07-28 15:49:25 -0700707 return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700708}
709
710#ifdef CONFIG_NUMA
711static int khugepaged_find_target_node(void)
712{
713 static int last_khugepaged_target_node = NUMA_NO_NODE;
714 int nid, target_node = 0, max_value = 0;
715
716 /* find first node with max normal pages hit */
717 for (nid = 0; nid < MAX_NUMNODES; nid++)
718 if (khugepaged_node_load[nid] > max_value) {
719 max_value = khugepaged_node_load[nid];
720 target_node = nid;
721 }
722
723 /* do some balance if several nodes have the same hit record */
724 if (target_node <= last_khugepaged_target_node)
725 for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES;
726 nid++)
727 if (max_value == khugepaged_node_load[nid]) {
728 target_node = nid;
729 break;
730 }
731
732 last_khugepaged_target_node = target_node;
733 return target_node;
734}
735
736static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
737{
738 if (IS_ERR(*hpage)) {
739 if (!*wait)
740 return false;
741
742 *wait = false;
743 *hpage = NULL;
744 khugepaged_alloc_sleep();
745 } else if (*hpage) {
746 put_page(*hpage);
747 *hpage = NULL;
748 }
749
750 return true;
751}
752
753static struct page *
Kirill A. Shutemov988ddb72016-07-26 15:26:26 -0700754khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700755{
756 VM_BUG_ON_PAGE(*hpage, *hpage);
757
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700758 *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
759 if (unlikely(!*hpage)) {
760 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
761 *hpage = ERR_PTR(-ENOMEM);
762 return NULL;
763 }
764
765 prep_transhuge_page(*hpage);
766 count_vm_event(THP_COLLAPSE_ALLOC);
767 return *hpage;
768}
769#else
770static int khugepaged_find_target_node(void)
771{
772 return 0;
773}
774
775static inline struct page *alloc_khugepaged_hugepage(void)
776{
777 struct page *page;
778
779 page = alloc_pages(alloc_hugepage_khugepaged_gfpmask(),
780 HPAGE_PMD_ORDER);
781 if (page)
782 prep_transhuge_page(page);
783 return page;
784}
785
786static struct page *khugepaged_alloc_hugepage(bool *wait)
787{
788 struct page *hpage;
789
790 do {
791 hpage = alloc_khugepaged_hugepage();
792 if (!hpage) {
793 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
794 if (!*wait)
795 return NULL;
796
797 *wait = false;
798 khugepaged_alloc_sleep();
799 } else
800 count_vm_event(THP_COLLAPSE_ALLOC);
801 } while (unlikely(!hpage) && likely(khugepaged_enabled()));
802
803 return hpage;
804}
805
806static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
807{
Hugh Dickins5c5aaf92020-10-09 20:07:59 -0700808 /*
809 * If the hpage allocated earlier was briefly exposed in page cache
810 * before collapse_file() failed, it is possible that racing lookups
811 * have not yet completed, and would then be unpleasantly surprised by
812 * finding the hpage reused for the same mapping at a different offset.
813 * Just release the previous allocation if there is any danger of that.
814 */
815 if (*hpage && page_count(*hpage) > 1) {
816 put_page(*hpage);
817 *hpage = NULL;
818 }
819
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700820 if (!*hpage)
821 *hpage = khugepaged_alloc_hugepage(wait);
822
823 if (unlikely(!*hpage))
824 return false;
825
826 return true;
827}
828
829static struct page *
Kirill A. Shutemov988ddb72016-07-26 15:26:26 -0700830khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700831{
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700832 VM_BUG_ON(!*hpage);
833
834 return *hpage;
835}
836#endif
837
838static bool hugepage_vma_check(struct vm_area_struct *vma)
839{
840 if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
841 (vma->vm_flags & VM_NOHUGEPAGE))
842 return false;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -0700843 if (shmem_file(vma->vm_file)) {
Kirill A. Shutemove496cf32016-07-26 15:26:35 -0700844 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
845 return false;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -0700846 return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
847 HPAGE_PMD_NR);
848 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700849 if (!vma->anon_vma || vma->vm_ops)
850 return false;
851 if (is_vma_temporary_stack(vma))
852 return false;
853 return !(vma->vm_flags & VM_NO_KHUGEPAGED);
854}
855
856/*
857 * If mmap_sem temporarily dropped, revalidate vma
858 * before taking mmap_sem.
859 * Return 0 if succeeds, otherwise return none-zero
860 * value (scan code).
861 */
862
Kirill A. Shutemovc131f752016-09-19 14:44:01 -0700863static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
864 struct vm_area_struct **vmap)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700865{
866 struct vm_area_struct *vma;
867 unsigned long hstart, hend;
868
869 if (unlikely(khugepaged_test_exit(mm)))
870 return SCAN_ANY_PROCESS;
871
Kirill A. Shutemovc131f752016-09-19 14:44:01 -0700872 *vmap = vma = find_vma(mm, address);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700873 if (!vma)
874 return SCAN_VMA_NULL;
875
876 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
877 hend = vma->vm_end & HPAGE_PMD_MASK;
878 if (address < hstart || address + HPAGE_PMD_SIZE > hend)
879 return SCAN_ADDRESS_RANGE;
880 if (!hugepage_vma_check(vma))
881 return SCAN_VMA_CHECK;
882 return 0;
883}
884
885/*
886 * Bring missing pages in from swap, to complete THP collapse.
887 * Only done if khugepaged_scan_pmd believes it is worthwhile.
888 *
889 * Called and returns without pte mapped or spinlocks held,
890 * but with mmap_sem held to protect against vma changes.
891 */
892
893static bool __collapse_huge_page_swapin(struct mm_struct *mm,
894 struct vm_area_struct *vma,
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700895 unsigned long address, pmd_t *pmd,
896 int referenced)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700897{
898 pte_t pteval;
899 int swapped_in = 0, ret = 0;
900 struct fault_env fe = {
901 .vma = vma,
902 .address = address,
903 .flags = FAULT_FLAG_ALLOW_RETRY,
904 .pmd = pmd,
Laurent Dufourff04da72018-04-17 16:33:18 +0200905 .vma_flags = vma->vm_flags,
906 .vma_page_prot = vma->vm_page_prot,
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700907 };
908
Ebru Akagunduz982785c2016-09-19 14:44:04 -0700909 /* we only decide to swapin, if there is enough young ptes */
910 if (referenced < HPAGE_PMD_NR/2) {
911 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
912 return false;
913 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700914 fe.pte = pte_offset_map(pmd, address);
915 for (; fe.address < address + HPAGE_PMD_NR*PAGE_SIZE;
916 fe.pte++, fe.address += PAGE_SIZE) {
917 pteval = *fe.pte;
918 if (!is_swap_pte(pteval))
919 continue;
920 swapped_in++;
921 ret = do_swap_page(&fe, pteval);
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700922
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700923 /* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */
924 if (ret & VM_FAULT_RETRY) {
925 down_read(&mm->mmap_sem);
Kirill A. Shutemovc131f752016-09-19 14:44:01 -0700926 if (hugepage_vma_revalidate(mm, address, &fe.vma)) {
Ebru Akagunduz47f863e2016-07-26 15:26:43 -0700927 /* vma is no longer available, don't continue to swapin */
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700928 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700929 return false;
Ebru Akagunduz47f863e2016-07-26 15:26:43 -0700930 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700931 /* check if the pmd is still valid */
932 if (mm_find_pmd(mm, address) != pmd)
933 return false;
934 }
935 if (ret & VM_FAULT_ERROR) {
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700936 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700937 return false;
938 }
939 /* pte is unmapped now, we need to map it */
940 fe.pte = pte_offset_map(pmd, fe.address);
941 }
942 fe.pte--;
943 pte_unmap(fe.pte);
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700944 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700945 return true;
946}
947
948static void collapse_huge_page(struct mm_struct *mm,
949 unsigned long address,
950 struct page **hpage,
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700951 int node, int referenced)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700952{
953 pmd_t *pmd, _pmd;
954 pte_t *pte;
955 pgtable_t pgtable;
956 struct page *new_page;
957 spinlock_t *pmd_ptl, *pte_ptl;
958 int isolated = 0, result = 0;
959 struct mem_cgroup *memcg;
Kirill A. Shutemovc131f752016-09-19 14:44:01 -0700960 struct vm_area_struct *vma;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700961 unsigned long mmun_start; /* For mmu_notifiers */
962 unsigned long mmun_end; /* For mmu_notifiers */
963 gfp_t gfp;
964
965 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
966
967 /* Only allocate from the target node */
968 gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_OTHER_NODE | __GFP_THISNODE;
969
Kirill A. Shutemov988ddb72016-07-26 15:26:26 -0700970 /*
971 * Before allocating the hugepage, release the mmap_sem read lock.
972 * The allocation can take potentially a long time if it involves
973 * sync compaction, and we do not need to hold the mmap_sem during
974 * that. We will recheck the vma after taking it again in write mode.
975 */
976 up_read(&mm->mmap_sem);
977 new_page = khugepaged_alloc_page(hpage, gfp, node);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700978 if (!new_page) {
979 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
980 goto out_nolock;
981 }
982
David Rientjes6b7ff8e2018-03-22 16:17:45 -0700983 /* Do not oom kill for khugepaged charges */
984 if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp | __GFP_NORETRY,
985 &memcg, true))) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700986 result = SCAN_CGROUP_CHARGE_FAIL;
987 goto out_nolock;
988 }
989
990 down_read(&mm->mmap_sem);
Kirill A. Shutemovc131f752016-09-19 14:44:01 -0700991 result = hugepage_vma_revalidate(mm, address, &vma);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700992 if (result) {
993 mem_cgroup_cancel_charge(new_page, memcg, true);
994 up_read(&mm->mmap_sem);
995 goto out_nolock;
996 }
997
998 pmd = mm_find_pmd(mm, address);
999 if (!pmd) {
1000 result = SCAN_PMD_NULL;
1001 mem_cgroup_cancel_charge(new_page, memcg, true);
1002 up_read(&mm->mmap_sem);
1003 goto out_nolock;
1004 }
1005
1006 /*
1007 * __collapse_huge_page_swapin always returns with mmap_sem locked.
Ebru Akagunduz47f863e2016-07-26 15:26:43 -07001008 * If it fails, we release mmap_sem and jump out_nolock.
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001009 * Continuing to collapse causes inconsistency.
1010 */
Ebru Akagunduz0db501f2016-07-26 15:26:46 -07001011 if (!__collapse_huge_page_swapin(mm, vma, address, pmd, referenced)) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001012 mem_cgroup_cancel_charge(new_page, memcg, true);
1013 up_read(&mm->mmap_sem);
1014 goto out_nolock;
1015 }
1016
1017 up_read(&mm->mmap_sem);
1018 /*
1019 * Prevent all access to pagetables with the exception of
1020 * gup_fast later handled by the ptep_clear_flush and the VM
1021 * handled by the anon_vma lock + PG_lock.
1022 */
1023 down_write(&mm->mmap_sem);
Kirill A. Shutemovc131f752016-09-19 14:44:01 -07001024 result = hugepage_vma_revalidate(mm, address, &vma);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001025 if (result)
1026 goto out;
1027 /* check if the pmd is still valid */
1028 if (mm_find_pmd(mm, address) != pmd)
1029 goto out;
1030
Laurent Dufourdd2b4652018-04-17 16:33:15 +02001031 vm_write_begin(vma);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001032 anon_vma_lock_write(vma->anon_vma);
1033
1034 pte = pte_offset_map(pmd, address);
1035 pte_ptl = pte_lockptr(mm, pmd);
1036
1037 mmun_start = address;
1038 mmun_end = address + HPAGE_PMD_SIZE;
1039 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
1040 pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
1041 /*
1042 * After this gup_fast can't run anymore. This also removes
1043 * any huge TLB entry from the CPU so we won't allow
1044 * huge and small TLB entries for the same virtual address
1045 * to avoid the risk of CPU bugs in that area.
1046 */
1047 _pmd = pmdp_collapse_flush(vma, address, pmd);
1048 spin_unlock(pmd_ptl);
1049 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1050
1051 spin_lock(pte_ptl);
1052 isolated = __collapse_huge_page_isolate(vma, address, pte);
1053 spin_unlock(pte_ptl);
1054
1055 if (unlikely(!isolated)) {
1056 pte_unmap(pte);
1057 spin_lock(pmd_ptl);
1058 BUG_ON(!pmd_none(*pmd));
1059 /*
1060 * We can only use set_pmd_at when establishing
1061 * hugepmds and never for establishing regular pmds that
1062 * points to regular pagetables. Use pmd_populate for that
1063 */
1064 pmd_populate(mm, pmd, pmd_pgtable(_pmd));
1065 spin_unlock(pmd_ptl);
1066 anon_vma_unlock_write(vma->anon_vma);
Laurent Dufourdd2b4652018-04-17 16:33:15 +02001067 vm_write_end(vma);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001068 result = SCAN_FAIL;
1069 goto out;
1070 }
1071
1072 /*
1073 * All pages are isolated and locked so anon_vma rmap
1074 * can't run anymore.
1075 */
1076 anon_vma_unlock_write(vma->anon_vma);
1077
1078 __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl);
1079 pte_unmap(pte);
1080 __SetPageUptodate(new_page);
1081 pgtable = pmd_pgtable(_pmd);
1082
1083 _pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
1084 _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
1085
1086 /*
1087 * spin_lock() below is not the equivalent of smp_wmb(), so
1088 * this is needed to avoid the copy_huge_page writes to become
1089 * visible after the set_pmd_at() write.
1090 */
1091 smp_wmb();
1092
1093 spin_lock(pmd_ptl);
1094 BUG_ON(!pmd_none(*pmd));
1095 page_add_new_anon_rmap(new_page, vma, address, true);
1096 mem_cgroup_commit_charge(new_page, memcg, false, true);
1097 lru_cache_add_active_or_unevictable(new_page, vma);
1098 pgtable_trans_huge_deposit(mm, pmd, pgtable);
1099 set_pmd_at(mm, address, pmd, _pmd);
1100 update_mmu_cache_pmd(vma, address, pmd);
1101 spin_unlock(pmd_ptl);
Laurent Dufourdd2b4652018-04-17 16:33:15 +02001102 vm_write_end(vma);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001103
1104 *hpage = NULL;
1105
1106 khugepaged_pages_collapsed++;
1107 result = SCAN_SUCCEED;
1108out_up_write:
1109 up_write(&mm->mmap_sem);
1110out_nolock:
1111 trace_mm_collapse_huge_page(mm, isolated, result);
1112 return;
1113out:
1114 mem_cgroup_cancel_charge(new_page, memcg, true);
1115 goto out_up_write;
1116}
1117
1118static int khugepaged_scan_pmd(struct mm_struct *mm,
1119 struct vm_area_struct *vma,
1120 unsigned long address,
1121 struct page **hpage)
1122{
1123 pmd_t *pmd;
1124 pte_t *pte, *_pte;
Ebru Akagunduz0db501f2016-07-26 15:26:46 -07001125 int ret = 0, none_or_zero = 0, result = 0, referenced = 0;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001126 struct page *page = NULL;
1127 unsigned long _address;
1128 spinlock_t *ptl;
1129 int node = NUMA_NO_NODE, unmapped = 0;
Ebru Akagunduz0db501f2016-07-26 15:26:46 -07001130 bool writable = false;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001131
1132 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1133
1134 pmd = mm_find_pmd(mm, address);
1135 if (!pmd) {
1136 result = SCAN_PMD_NULL;
1137 goto out;
1138 }
1139
1140 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1141 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1142 for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
1143 _pte++, _address += PAGE_SIZE) {
1144 pte_t pteval = *_pte;
1145 if (is_swap_pte(pteval)) {
1146 if (++unmapped <= khugepaged_max_ptes_swap) {
1147 continue;
1148 } else {
1149 result = SCAN_EXCEED_SWAP_PTE;
1150 goto out_unmap;
1151 }
1152 }
1153 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
1154 if (!userfaultfd_armed(vma) &&
1155 ++none_or_zero <= khugepaged_max_ptes_none) {
1156 continue;
1157 } else {
1158 result = SCAN_EXCEED_NONE_PTE;
1159 goto out_unmap;
1160 }
1161 }
1162 if (!pte_present(pteval)) {
1163 result = SCAN_PTE_NON_PRESENT;
1164 goto out_unmap;
1165 }
1166 if (pte_write(pteval))
1167 writable = true;
1168
1169 page = vm_normal_page(vma, _address, pteval);
1170 if (unlikely(!page)) {
1171 result = SCAN_PAGE_NULL;
1172 goto out_unmap;
1173 }
1174
1175 /* TODO: teach khugepaged to collapse THP mapped with pte */
1176 if (PageCompound(page)) {
1177 result = SCAN_PAGE_COMPOUND;
1178 goto out_unmap;
1179 }
1180
1181 /*
1182 * Record which node the original page is from and save this
1183 * information to khugepaged_node_load[].
1184 * Khupaged will allocate hugepage from the node has the max
1185 * hit record.
1186 */
1187 node = page_to_nid(page);
1188 if (khugepaged_scan_abort(node)) {
1189 result = SCAN_SCAN_ABORT;
1190 goto out_unmap;
1191 }
1192 khugepaged_node_load[node]++;
1193 if (!PageLRU(page)) {
1194 result = SCAN_PAGE_LRU;
1195 goto out_unmap;
1196 }
1197 if (PageLocked(page)) {
1198 result = SCAN_PAGE_LOCK;
1199 goto out_unmap;
1200 }
1201 if (!PageAnon(page)) {
1202 result = SCAN_PAGE_ANON;
1203 goto out_unmap;
1204 }
1205
1206 /*
1207 * cannot use mapcount: can't collapse if there's a gup pin.
1208 * The page must only be referenced by the scanned process
1209 * and page swap cache.
1210 */
1211 if (page_count(page) != 1 + !!PageSwapCache(page)) {
1212 result = SCAN_PAGE_COUNT;
1213 goto out_unmap;
1214 }
1215 if (pte_young(pteval) ||
1216 page_is_young(page) || PageReferenced(page) ||
1217 mmu_notifier_test_young(vma->vm_mm, address))
Ebru Akagunduz0db501f2016-07-26 15:26:46 -07001218 referenced++;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001219 }
1220 if (writable) {
1221 if (referenced) {
1222 result = SCAN_SUCCEED;
1223 ret = 1;
1224 } else {
Ebru Akagunduz0db501f2016-07-26 15:26:46 -07001225 result = SCAN_LACK_REFERENCED_PAGE;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001226 }
1227 } else {
1228 result = SCAN_PAGE_RO;
1229 }
1230out_unmap:
1231 pte_unmap_unlock(pte, ptl);
1232 if (ret) {
1233 node = khugepaged_find_target_node();
1234 /* collapse_huge_page will return with the mmap_sem released */
Kirill A. Shutemovc131f752016-09-19 14:44:01 -07001235 collapse_huge_page(mm, address, hpage, node, referenced);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001236 }
1237out:
1238 trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
1239 none_or_zero, result, unmapped);
1240 return ret;
1241}
1242
1243static void collect_mm_slot(struct mm_slot *mm_slot)
1244{
1245 struct mm_struct *mm = mm_slot->mm;
1246
1247 VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
1248
1249 if (khugepaged_test_exit(mm)) {
1250 /* free mm_slot */
1251 hash_del(&mm_slot->hash);
1252 list_del(&mm_slot->mm_node);
1253
1254 /*
1255 * Not strictly needed because the mm exited already.
1256 *
1257 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1258 */
1259
1260 /* khugepaged_mm_lock actually not necessary for the below */
1261 free_mm_slot(mm_slot);
1262 mmdrop(mm);
1263 }
1264}
1265
Kirill A. Shutemove496cf32016-07-26 15:26:35 -07001266#if defined(CONFIG_SHMEM) && defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE)
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001267static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
1268{
1269 struct vm_area_struct *vma;
Hugh Dickinsdc3ff4f2020-08-06 23:26:22 -07001270 struct mm_struct *mm;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001271 unsigned long addr;
1272 pmd_t *pmd, _pmd;
1273
1274 i_mmap_lock_write(mapping);
1275 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
1276 /* probably overkill */
1277 if (vma->anon_vma)
1278 continue;
1279 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1280 if (addr & ~HPAGE_PMD_MASK)
1281 continue;
1282 if (vma->vm_end < addr + HPAGE_PMD_SIZE)
1283 continue;
Hugh Dickinsdc3ff4f2020-08-06 23:26:22 -07001284 mm = vma->vm_mm;
1285 pmd = mm_find_pmd(mm, addr);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001286 if (!pmd)
1287 continue;
1288 /*
1289 * We need exclusive mmap_sem to retract page table.
1290 * If trylock fails we would end up with pte-mapped THP after
1291 * re-fault. Not ideal, but it's more important to not disturb
1292 * the system too much.
1293 */
Hugh Dickinsdc3ff4f2020-08-06 23:26:22 -07001294 if (down_write_trylock(&mm->mmap_sem)) {
1295 if (!khugepaged_test_exit(mm)) {
1296 spinlock_t *ptl = pmd_lock(mm, pmd);
1297 /* assume page table is clear */
1298 _pmd = pmdp_collapse_flush(vma, addr, pmd);
1299 spin_unlock(ptl);
1300 atomic_long_dec(&mm->nr_ptes);
1301 pte_free(mm, pmd_pgtable(_pmd));
1302 }
1303 up_write(&mm->mmap_sem);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001304 }
1305 }
1306 i_mmap_unlock_write(mapping);
1307}
1308
1309/**
1310 * collapse_shmem - collapse small tmpfs/shmem pages into huge one.
1311 *
1312 * Basic scheme is simple, details are more complex:
Hugh Dickins8dcbb5f2018-11-30 14:10:43 -08001313 * - allocate and lock a new huge page;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001314 * - scan over radix tree replacing old pages the new one
1315 * + swap in pages if necessary;
1316 * + fill in gaps;
1317 * + keep old pages around in case if rollback is required;
1318 * - if replacing succeed:
1319 * + copy data over;
1320 * + free old pages;
Hugh Dickins8dcbb5f2018-11-30 14:10:43 -08001321 * + unlock huge page;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001322 * - if replacing failed;
1323 * + put all pages back and unfreeze them;
1324 * + restore gaps in the radix-tree;
Hugh Dickins8dcbb5f2018-11-30 14:10:43 -08001325 * + unlock and free huge page;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001326 */
1327static void collapse_shmem(struct mm_struct *mm,
1328 struct address_space *mapping, pgoff_t start,
1329 struct page **hpage, int node)
1330{
1331 gfp_t gfp;
1332 struct page *page, *new_page, *tmp;
1333 struct mem_cgroup *memcg;
1334 pgoff_t index, end = start + HPAGE_PMD_NR;
1335 LIST_HEAD(pagelist);
1336 struct radix_tree_iter iter;
1337 void **slot;
1338 int nr_none = 0, result = SCAN_SUCCEED;
1339
1340 VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
1341
1342 /* Only allocate from the target node */
1343 gfp = alloc_hugepage_khugepaged_gfpmask() |
1344 __GFP_OTHER_NODE | __GFP_THISNODE;
1345
1346 new_page = khugepaged_alloc_page(hpage, gfp, node);
1347 if (!new_page) {
1348 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
1349 goto out;
1350 }
1351
David Rientjes6b7ff8e2018-03-22 16:17:45 -07001352 /* Do not oom kill for khugepaged charges */
1353 if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp | __GFP_NORETRY,
1354 &memcg, true))) {
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001355 result = SCAN_CGROUP_CHARGE_FAIL;
1356 goto out;
1357 }
1358
Hugh Dickinsc2ca73b2018-11-30 14:10:39 -08001359 __SetPageLocked(new_page);
1360 __SetPageSwapBacked(new_page);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001361 new_page->index = start;
1362 new_page->mapping = mapping;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001363
1364 /*
Hugh Dickins8dcbb5f2018-11-30 14:10:43 -08001365 * At this point the new_page is locked and not up-to-date.
1366 * It's safe to insert it into the page cache, because nobody would
1367 * be able to map it or use it in another way until we unlock it.
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001368 */
1369
1370 index = start;
1371 spin_lock_irq(&mapping->tree_lock);
1372 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
1373 int n = min(iter.index, end) - index;
1374
1375 /*
Hugh Dickins10e458e2018-11-30 14:10:25 -08001376 * Stop if extent has been hole-punched, and is now completely
1377 * empty (the more obvious i_size_read() check would take an
1378 * irq-unsafe seqlock on 32-bit).
1379 */
1380 if (n >= HPAGE_PMD_NR) {
1381 result = SCAN_TRUNCATED;
1382 goto tree_locked;
1383 }
1384
1385 /*
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001386 * Handle holes in the radix tree: charge it from shmem and
1387 * insert relevant subpage of new_page into the radix-tree.
1388 */
1389 if (n && !shmem_charge(mapping->host, n)) {
1390 result = SCAN_FAIL;
Hugh Dickinsc2ca73b2018-11-30 14:10:39 -08001391 goto tree_locked;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001392 }
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001393 for (; index < min(iter.index, end); index++) {
1394 radix_tree_insert(&mapping->page_tree, index,
1395 new_page + (index % HPAGE_PMD_NR));
1396 }
Hugh Dickinsc2ca73b2018-11-30 14:10:39 -08001397 nr_none += n;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001398
1399 /* We are done. */
1400 if (index >= end)
1401 break;
1402
1403 page = radix_tree_deref_slot_protected(slot,
1404 &mapping->tree_lock);
1405 if (radix_tree_exceptional_entry(page) || !PageUptodate(page)) {
1406 spin_unlock_irq(&mapping->tree_lock);
1407 /* swap in or instantiate fallocated page */
1408 if (shmem_getpage(mapping->host, index, &page,
1409 SGP_NOHUGE)) {
1410 result = SCAN_FAIL;
1411 goto tree_unlocked;
1412 }
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001413 } else if (trylock_page(page)) {
1414 get_page(page);
Hugh Dickinsc2ca73b2018-11-30 14:10:39 -08001415 spin_unlock_irq(&mapping->tree_lock);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001416 } else {
1417 result = SCAN_PAGE_LOCK;
Hugh Dickinsc2ca73b2018-11-30 14:10:39 -08001418 goto tree_locked;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001419 }
1420
1421 /*
1422 * The page must be locked, so we can drop the tree_lock
1423 * without racing with truncate.
1424 */
1425 VM_BUG_ON_PAGE(!PageLocked(page), page);
1426 VM_BUG_ON_PAGE(!PageUptodate(page), page);
Hugh Dickinsdc628032018-11-30 14:10:47 -08001427
1428 /*
1429 * If file was truncated then extended, or hole-punched, before
1430 * we locked the first page, then a THP might be there already.
1431 */
1432 if (PageTransCompound(page)) {
1433 result = SCAN_PAGE_COMPOUND;
1434 goto out_unlock;
1435 }
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001436
1437 if (page_mapping(page) != mapping) {
1438 result = SCAN_TRUNCATED;
1439 goto out_unlock;
1440 }
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001441
1442 if (isolate_lru_page(page)) {
1443 result = SCAN_DEL_PAGE_LRU;
Hugh Dickinsc2ca73b2018-11-30 14:10:39 -08001444 goto out_unlock;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001445 }
1446
1447 if (page_mapped(page))
1448 unmap_mapping_range(mapping, index << PAGE_SHIFT,
1449 PAGE_SIZE, 0);
1450
1451 spin_lock_irq(&mapping->tree_lock);
1452
Johannes Weiner058a4a52016-12-12 16:43:32 -08001453 slot = radix_tree_lookup_slot(&mapping->page_tree, index);
1454 VM_BUG_ON_PAGE(page != radix_tree_deref_slot_protected(slot,
1455 &mapping->tree_lock), page);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001456 VM_BUG_ON_PAGE(page_mapped(page), page);
1457
1458 /*
1459 * The page is expected to have page_count() == 3:
1460 * - we hold a pin on it;
1461 * - one reference from radix tree;
1462 * - one from isolate_lru_page;
1463 */
1464 if (!page_ref_freeze(page, 3)) {
1465 result = SCAN_PAGE_COUNT;
Hugh Dickinsc2ca73b2018-11-30 14:10:39 -08001466 spin_unlock_irq(&mapping->tree_lock);
1467 putback_lru_page(page);
1468 goto out_unlock;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001469 }
1470
1471 /*
1472 * Add the page to the list to be able to undo the collapse if
1473 * something go wrong.
1474 */
1475 list_add_tail(&page->lru, &pagelist);
1476
1477 /* Finally, replace with the new page. */
1478 radix_tree_replace_slot(slot,
1479 new_page + (index % HPAGE_PMD_NR));
1480
Johannes Weiner058a4a52016-12-12 16:43:32 -08001481 slot = radix_tree_iter_next(&iter);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001482 index++;
1483 continue;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001484out_unlock:
1485 unlock_page(page);
1486 put_page(page);
Hugh Dickinsc2ca73b2018-11-30 14:10:39 -08001487 goto tree_unlocked;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001488 }
1489
1490 /*
1491 * Handle hole in radix tree at the end of the range.
1492 * This code only triggers if there's nothing in radix tree
1493 * beyond 'end'.
1494 */
Hugh Dickinsc2ca73b2018-11-30 14:10:39 -08001495 if (index < end) {
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001496 int n = end - index;
1497
Hugh Dickins10e458e2018-11-30 14:10:25 -08001498 /* Stop if extent has been truncated, and is now empty */
1499 if (n >= HPAGE_PMD_NR) {
1500 result = SCAN_TRUNCATED;
1501 goto tree_locked;
1502 }
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001503 if (!shmem_charge(mapping->host, n)) {
1504 result = SCAN_FAIL;
1505 goto tree_locked;
1506 }
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001507 for (; index < end; index++) {
1508 radix_tree_insert(&mapping->page_tree, index,
1509 new_page + (index % HPAGE_PMD_NR));
1510 }
1511 nr_none += n;
1512 }
1513
Hugh Dickinsc2ca73b2018-11-30 14:10:39 -08001514 __inc_node_page_state(new_page, NR_SHMEM_THPS);
1515 if (nr_none) {
1516 struct zone *zone = page_zone(new_page);
1517
1518 __mod_node_page_state(zone->zone_pgdat, NR_FILE_PAGES, nr_none);
1519 __mod_node_page_state(zone->zone_pgdat, NR_SHMEM, nr_none);
1520 }
1521
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001522tree_locked:
1523 spin_unlock_irq(&mapping->tree_lock);
1524tree_unlocked:
1525
1526 if (result == SCAN_SUCCEED) {
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001527 /*
1528 * Replacing old pages with new one has succeed, now we need to
1529 * copy the content and free old pages.
1530 */
Hugh Dickins5c0ecc22018-11-30 14:10:35 -08001531 index = start;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001532 list_for_each_entry_safe(page, tmp, &pagelist, lru) {
Hugh Dickins5c0ecc22018-11-30 14:10:35 -08001533 while (index < page->index) {
1534 clear_highpage(new_page + (index % HPAGE_PMD_NR));
1535 index++;
1536 }
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001537 copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
1538 page);
1539 list_del(&page->lru);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001540 page->mapping = NULL;
Hugh Dickinsc2ca73b2018-11-30 14:10:39 -08001541 page_ref_unfreeze(page, 1);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001542 ClearPageActive(page);
1543 ClearPageUnevictable(page);
Hugh Dickinsc2ca73b2018-11-30 14:10:39 -08001544 unlock_page(page);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001545 put_page(page);
Hugh Dickins5c0ecc22018-11-30 14:10:35 -08001546 index++;
1547 }
1548 while (index < end) {
1549 clear_highpage(new_page + (index % HPAGE_PMD_NR));
1550 index++;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001551 }
1552
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001553 SetPageUptodate(new_page);
Hugh Dickins8dcbb5f2018-11-30 14:10:43 -08001554 page_ref_add(new_page, HPAGE_PMD_NR - 1);
Hugh Dickinsc2ca73b2018-11-30 14:10:39 -08001555 set_page_dirty(new_page);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001556 mem_cgroup_commit_charge(new_page, memcg, false, true);
1557 lru_cache_add_anon(new_page);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001558
Hugh Dickinsc2ca73b2018-11-30 14:10:39 -08001559 /*
1560 * Remove pte page tables, so we can re-fault the page as huge.
1561 */
1562 retract_page_tables(mapping, start);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001563 *hpage = NULL;
1564 } else {
1565 /* Something went wrong: rollback changes to the radix-tree */
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001566 spin_lock_irq(&mapping->tree_lock);
Hugh Dickins0dba3e52018-11-30 14:10:29 -08001567 mapping->nrpages -= nr_none;
1568 shmem_uncharge(mapping->host, nr_none);
1569
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001570 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter,
1571 start) {
1572 if (iter.index >= end)
1573 break;
1574 page = list_first_entry_or_null(&pagelist,
1575 struct page, lru);
1576 if (!page || iter.index < page->index) {
1577 if (!nr_none)
1578 break;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001579 nr_none--;
Johannes Weinerdc1b6d02016-12-12 16:43:35 -08001580 /* Put holes back where they were */
1581 radix_tree_delete(&mapping->page_tree,
1582 iter.index);
1583 slot = radix_tree_iter_next(&iter);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001584 continue;
1585 }
1586
1587 VM_BUG_ON_PAGE(page->index != iter.index, page);
1588
1589 /* Unfreeze the page. */
1590 list_del(&page->lru);
1591 page_ref_unfreeze(page, 2);
1592 radix_tree_replace_slot(slot, page);
1593 spin_unlock_irq(&mapping->tree_lock);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001594 unlock_page(page);
Hugh Dickinsc2ca73b2018-11-30 14:10:39 -08001595 putback_lru_page(page);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001596 spin_lock_irq(&mapping->tree_lock);
Johannes Weiner058a4a52016-12-12 16:43:32 -08001597 slot = radix_tree_iter_next(&iter);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001598 }
1599 VM_BUG_ON(nr_none);
1600 spin_unlock_irq(&mapping->tree_lock);
1601
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001602 mem_cgroup_cancel_charge(new_page, memcg, true);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001603 new_page->mapping = NULL;
1604 }
Hugh Dickinsc2ca73b2018-11-30 14:10:39 -08001605
1606 unlock_page(new_page);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001607out:
1608 VM_BUG_ON(!list_empty(&pagelist));
1609 /* TODO: tracepoints */
1610}
1611
1612static void khugepaged_scan_shmem(struct mm_struct *mm,
1613 struct address_space *mapping,
1614 pgoff_t start, struct page **hpage)
1615{
1616 struct page *page = NULL;
1617 struct radix_tree_iter iter;
1618 void **slot;
1619 int present, swap;
1620 int node = NUMA_NO_NODE;
1621 int result = SCAN_SUCCEED;
1622
1623 present = 0;
1624 swap = 0;
1625 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1626 rcu_read_lock();
1627 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
1628 if (iter.index >= start + HPAGE_PMD_NR)
1629 break;
1630
1631 page = radix_tree_deref_slot(slot);
1632 if (radix_tree_deref_retry(page)) {
1633 slot = radix_tree_iter_retry(&iter);
1634 continue;
1635 }
1636
1637 if (radix_tree_exception(page)) {
1638 if (++swap > khugepaged_max_ptes_swap) {
1639 result = SCAN_EXCEED_SWAP_PTE;
1640 break;
1641 }
1642 continue;
1643 }
1644
1645 if (PageTransCompound(page)) {
1646 result = SCAN_PAGE_COMPOUND;
1647 break;
1648 }
1649
1650 node = page_to_nid(page);
1651 if (khugepaged_scan_abort(node)) {
1652 result = SCAN_SCAN_ABORT;
1653 break;
1654 }
1655 khugepaged_node_load[node]++;
1656
1657 if (!PageLRU(page)) {
1658 result = SCAN_PAGE_LRU;
1659 break;
1660 }
1661
1662 if (page_count(page) != 1 + page_mapcount(page)) {
1663 result = SCAN_PAGE_COUNT;
1664 break;
1665 }
1666
1667 /*
1668 * We probably should check if the page is referenced here, but
1669 * nobody would transfer pte_young() to PageReferenced() for us.
1670 * And rmap walk here is just too costly...
1671 */
1672
1673 present++;
1674
1675 if (need_resched()) {
1676 cond_resched_rcu();
1677 slot = radix_tree_iter_next(&iter);
1678 }
1679 }
1680 rcu_read_unlock();
1681
1682 if (result == SCAN_SUCCEED) {
1683 if (present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
1684 result = SCAN_EXCEED_NONE_PTE;
1685 } else {
1686 node = khugepaged_find_target_node();
1687 collapse_shmem(mm, mapping, start, hpage, node);
1688 }
1689 }
1690
1691 /* TODO: tracepoints */
1692}
1693#else
1694static void khugepaged_scan_shmem(struct mm_struct *mm,
1695 struct address_space *mapping,
1696 pgoff_t start, struct page **hpage)
1697{
1698 BUILD_BUG();
1699}
1700#endif
1701
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001702static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
1703 struct page **hpage)
1704 __releases(&khugepaged_mm_lock)
1705 __acquires(&khugepaged_mm_lock)
1706{
1707 struct mm_slot *mm_slot;
1708 struct mm_struct *mm;
1709 struct vm_area_struct *vma;
1710 int progress = 0;
1711
1712 VM_BUG_ON(!pages);
1713 VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
1714
1715 if (khugepaged_scan.mm_slot)
1716 mm_slot = khugepaged_scan.mm_slot;
1717 else {
1718 mm_slot = list_entry(khugepaged_scan.mm_head.next,
1719 struct mm_slot, mm_node);
1720 khugepaged_scan.address = 0;
1721 khugepaged_scan.mm_slot = mm_slot;
1722 }
1723 spin_unlock(&khugepaged_mm_lock);
1724
1725 mm = mm_slot->mm;
Yang Shie56d3702018-01-31 16:18:28 -08001726 /*
1727 * Don't wait for semaphore (to avoid long wait times). Just move to
1728 * the next mm on the list.
1729 */
1730 vma = NULL;
1731 if (unlikely(!down_read_trylock(&mm->mmap_sem)))
1732 goto breakouterloop_mmap_sem;
1733 if (likely(!khugepaged_test_exit(mm)))
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001734 vma = find_vma(mm, khugepaged_scan.address);
1735
1736 progress++;
1737 for (; vma; vma = vma->vm_next) {
1738 unsigned long hstart, hend;
1739
1740 cond_resched();
1741 if (unlikely(khugepaged_test_exit(mm))) {
1742 progress++;
1743 break;
1744 }
1745 if (!hugepage_vma_check(vma)) {
1746skip:
1747 progress++;
1748 continue;
1749 }
1750 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
1751 hend = vma->vm_end & HPAGE_PMD_MASK;
1752 if (hstart >= hend)
1753 goto skip;
1754 if (khugepaged_scan.address > hend)
1755 goto skip;
1756 if (khugepaged_scan.address < hstart)
1757 khugepaged_scan.address = hstart;
1758 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
1759
1760 while (khugepaged_scan.address < hend) {
1761 int ret;
1762 cond_resched();
1763 if (unlikely(khugepaged_test_exit(mm)))
1764 goto breakouterloop;
1765
1766 VM_BUG_ON(khugepaged_scan.address < hstart ||
1767 khugepaged_scan.address + HPAGE_PMD_SIZE >
1768 hend);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001769 if (shmem_file(vma->vm_file)) {
Kirill A. Shutemove496cf32016-07-26 15:26:35 -07001770 struct file *file;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001771 pgoff_t pgoff = linear_page_index(vma,
1772 khugepaged_scan.address);
Kirill A. Shutemove496cf32016-07-26 15:26:35 -07001773 if (!shmem_huge_enabled(vma))
1774 goto skip;
1775 file = get_file(vma->vm_file);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001776 up_read(&mm->mmap_sem);
1777 ret = 1;
1778 khugepaged_scan_shmem(mm, file->f_mapping,
1779 pgoff, hpage);
1780 fput(file);
1781 } else {
1782 ret = khugepaged_scan_pmd(mm, vma,
1783 khugepaged_scan.address,
1784 hpage);
1785 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001786 /* move to next address */
1787 khugepaged_scan.address += HPAGE_PMD_SIZE;
1788 progress += HPAGE_PMD_NR;
1789 if (ret)
1790 /* we released mmap_sem so break loop */
1791 goto breakouterloop_mmap_sem;
1792 if (progress >= pages)
1793 goto breakouterloop;
1794 }
1795 }
1796breakouterloop:
1797 up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */
1798breakouterloop_mmap_sem:
1799
1800 spin_lock(&khugepaged_mm_lock);
1801 VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
1802 /*
1803 * Release the current mm_slot if this mm is about to die, or
1804 * if we scanned all vmas of this mm.
1805 */
1806 if (khugepaged_test_exit(mm) || !vma) {
1807 /*
1808 * Make sure that if mm_users is reaching zero while
1809 * khugepaged runs here, khugepaged_exit will find
1810 * mm_slot not pointing to the exiting mm.
1811 */
1812 if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
1813 khugepaged_scan.mm_slot = list_entry(
1814 mm_slot->mm_node.next,
1815 struct mm_slot, mm_node);
1816 khugepaged_scan.address = 0;
1817 } else {
1818 khugepaged_scan.mm_slot = NULL;
1819 khugepaged_full_scans++;
1820 }
1821
1822 collect_mm_slot(mm_slot);
1823 }
1824
1825 return progress;
1826}
1827
1828static int khugepaged_has_work(void)
1829{
1830 return !list_empty(&khugepaged_scan.mm_head) &&
1831 khugepaged_enabled();
1832}
1833
1834static int khugepaged_wait_event(void)
1835{
1836 return !list_empty(&khugepaged_scan.mm_head) ||
1837 kthread_should_stop();
1838}
1839
1840static void khugepaged_do_scan(void)
1841{
1842 struct page *hpage = NULL;
1843 unsigned int progress = 0, pass_through_head = 0;
1844 unsigned int pages = khugepaged_pages_to_scan;
1845 bool wait = true;
1846
1847 barrier(); /* write khugepaged_pages_to_scan to local stack */
1848
1849 while (progress < pages) {
1850 if (!khugepaged_prealloc_page(&hpage, &wait))
1851 break;
1852
1853 cond_resched();
1854
1855 if (unlikely(kthread_should_stop() || try_to_freeze()))
1856 break;
1857
1858 spin_lock(&khugepaged_mm_lock);
1859 if (!khugepaged_scan.mm_slot)
1860 pass_through_head++;
1861 if (khugepaged_has_work() &&
1862 pass_through_head < 2)
1863 progress += khugepaged_scan_mm_slot(pages - progress,
1864 &hpage);
1865 else
1866 progress = pages;
1867 spin_unlock(&khugepaged_mm_lock);
1868 }
1869
1870 if (!IS_ERR_OR_NULL(hpage))
1871 put_page(hpage);
1872}
1873
1874static bool khugepaged_should_wakeup(void)
1875{
1876 return kthread_should_stop() ||
1877 time_after_eq(jiffies, khugepaged_sleep_expire);
1878}
1879
1880static void khugepaged_wait_work(void)
1881{
1882 if (khugepaged_has_work()) {
1883 const unsigned long scan_sleep_jiffies =
1884 msecs_to_jiffies(khugepaged_scan_sleep_millisecs);
1885
1886 if (!scan_sleep_jiffies)
1887 return;
1888
1889 khugepaged_sleep_expire = jiffies + scan_sleep_jiffies;
1890 wait_event_freezable_timeout(khugepaged_wait,
1891 khugepaged_should_wakeup(),
1892 scan_sleep_jiffies);
1893 return;
1894 }
1895
1896 if (khugepaged_enabled())
1897 wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
1898}
1899
1900static int khugepaged(void *none)
1901{
1902 struct mm_slot *mm_slot;
1903
1904 set_freezable();
1905 set_user_nice(current, MAX_NICE);
1906
1907 while (!kthread_should_stop()) {
1908 khugepaged_do_scan();
1909 khugepaged_wait_work();
1910 }
1911
1912 spin_lock(&khugepaged_mm_lock);
1913 mm_slot = khugepaged_scan.mm_slot;
1914 khugepaged_scan.mm_slot = NULL;
1915 if (mm_slot)
1916 collect_mm_slot(mm_slot);
1917 spin_unlock(&khugepaged_mm_lock);
1918 return 0;
1919}
1920
1921static void set_recommended_min_free_kbytes(void)
1922{
1923 struct zone *zone;
1924 int nr_zones = 0;
1925 unsigned long recommended_min;
1926
1927 for_each_populated_zone(zone)
1928 nr_zones++;
1929
1930 /* Ensure 2 pageblocks are free to assist fragmentation avoidance */
1931 recommended_min = pageblock_nr_pages * nr_zones * 2;
1932
1933 /*
1934 * Make sure that on average at least two pageblocks are almost free
1935 * of another type, one for a migratetype to fall back to and a
1936 * second to avoid subsequent fallbacks of other types There are 3
1937 * MIGRATE_TYPES we care about.
1938 */
1939 recommended_min += pageblock_nr_pages * nr_zones *
1940 MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
1941
1942 /* don't ever allow to reserve more than 5% of the lowmem */
1943 recommended_min = min(recommended_min,
1944 (unsigned long) nr_free_buffer_pages() / 20);
1945 recommended_min <<= (PAGE_SHIFT-10);
1946
1947 if (recommended_min > min_free_kbytes) {
1948 if (user_min_free_kbytes >= 0)
1949 pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
1950 min_free_kbytes, recommended_min);
1951
1952 min_free_kbytes = recommended_min;
1953 }
1954 setup_per_zone_wmarks();
1955}
1956
1957int start_stop_khugepaged(void)
1958{
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001959 int err = 0;
1960
1961 mutex_lock(&khugepaged_mutex);
1962 if (khugepaged_enabled()) {
1963 if (!khugepaged_thread)
1964 khugepaged_thread = kthread_run(khugepaged, NULL,
1965 "khugepaged");
1966 if (IS_ERR(khugepaged_thread)) {
1967 pr_err("khugepaged: kthread_run(khugepaged) failed\n");
1968 err = PTR_ERR(khugepaged_thread);
1969 khugepaged_thread = NULL;
1970 goto fail;
1971 }
1972
1973 if (!list_empty(&khugepaged_scan.mm_head))
1974 wake_up_interruptible(&khugepaged_wait);
1975
1976 set_recommended_min_free_kbytes();
1977 } else if (khugepaged_thread) {
1978 kthread_stop(khugepaged_thread);
1979 khugepaged_thread = NULL;
1980 }
1981fail:
1982 mutex_unlock(&khugepaged_mutex);
1983 return err;
1984}
Vijay Balakrishna189394c2020-10-10 23:16:40 -07001985
1986void khugepaged_min_free_kbytes_update(void)
1987{
1988 mutex_lock(&khugepaged_mutex);
1989 if (khugepaged_enabled() && khugepaged_thread)
1990 set_recommended_min_free_kbytes();
1991 mutex_unlock(&khugepaged_mutex);
1992}