blob: e99101162f1abf62be1249d01867e7b31c9e2bc8 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3
4#include <linux/mm.h>
5#include <linux/sched.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +01006#include <linux/sched/mm.h>
Ingo Molnarf7ccbae2017-02-08 18:51:30 +01007#include <linux/sched/coredump.h>
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07008#include <linux/mmu_notifier.h>
9#include <linux/rmap.h>
10#include <linux/swap.h>
11#include <linux/mm_inline.h>
12#include <linux/kthread.h>
13#include <linux/khugepaged.h>
14#include <linux/freezer.h>
15#include <linux/mman.h>
16#include <linux/hashtable.h>
17#include <linux/userfaultfd_k.h>
18#include <linux/page_idle.h>
19#include <linux/swapops.h>
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -070020#include <linux/shmem_fs.h>
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070021
22#include <asm/tlb.h>
23#include <asm/pgalloc.h>
24#include "internal.h"
25
26enum scan_result {
27 SCAN_FAIL,
28 SCAN_SUCCEED,
29 SCAN_PMD_NULL,
30 SCAN_EXCEED_NONE_PTE,
Kirill A. Shutemov71a2c112020-06-03 16:00:30 -070031 SCAN_EXCEED_SWAP_PTE,
32 SCAN_EXCEED_SHARED_PTE,
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070033 SCAN_PTE_NON_PRESENT,
Peter Xue1e267c2020-04-06 20:06:04 -070034 SCAN_PTE_UFFD_WP,
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070035 SCAN_PAGE_RO,
Ebru Akagunduz0db501f2016-07-26 15:26:46 -070036 SCAN_LACK_REFERENCED_PAGE,
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070037 SCAN_PAGE_NULL,
38 SCAN_SCAN_ABORT,
39 SCAN_PAGE_COUNT,
40 SCAN_PAGE_LRU,
41 SCAN_PAGE_LOCK,
42 SCAN_PAGE_ANON,
43 SCAN_PAGE_COMPOUND,
44 SCAN_ANY_PROCESS,
45 SCAN_VMA_NULL,
46 SCAN_VMA_CHECK,
47 SCAN_ADDRESS_RANGE,
48 SCAN_SWAP_CACHE_PAGE,
49 SCAN_DEL_PAGE_LRU,
50 SCAN_ALLOC_HUGE_PAGE_FAIL,
51 SCAN_CGROUP_CHARGE_FAIL,
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -070052 SCAN_TRUNCATED,
Song Liu99cb0db2019-09-23 15:38:00 -070053 SCAN_PAGE_HAS_PRIVATE,
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070054};
55
56#define CREATE_TRACE_POINTS
57#include <trace/events/huge_memory.h>
58
Vijay Balakrishna4aab2be2020-10-10 23:16:40 -070059static struct task_struct *khugepaged_thread __read_mostly;
60static DEFINE_MUTEX(khugepaged_mutex);
61
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070062/* default scan 8*512 pte (or vmas) every 30 second */
63static unsigned int khugepaged_pages_to_scan __read_mostly;
64static unsigned int khugepaged_pages_collapsed;
65static unsigned int khugepaged_full_scans;
66static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
67/* during fragmentation poll the hugepage allocator once every minute */
68static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
69static unsigned long khugepaged_sleep_expire;
70static DEFINE_SPINLOCK(khugepaged_mm_lock);
71static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
72/*
73 * default collapse hugepages if there is at least one pte mapped like
74 * it would have happened if the vma was large enough during page
75 * fault.
76 */
77static unsigned int khugepaged_max_ptes_none __read_mostly;
78static unsigned int khugepaged_max_ptes_swap __read_mostly;
Kirill A. Shutemov71a2c112020-06-03 16:00:30 -070079static unsigned int khugepaged_max_ptes_shared __read_mostly;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070080
81#define MM_SLOTS_HASH_BITS 10
82static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
83
84static struct kmem_cache *mm_slot_cache __read_mostly;
85
Song Liu27e1f822019-09-23 15:38:30 -070086#define MAX_PTE_MAPPED_THP 8
87
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070088/**
89 * struct mm_slot - hash lookup from mm to mm_slot
90 * @hash: hash collision list
91 * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
92 * @mm: the mm that this information is valid for
Alex Shi336e6b52020-12-14 19:12:01 -080093 * @nr_pte_mapped_thp: number of pte mapped THP
94 * @pte_mapped_thp: address array corresponding pte mapped THP
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070095 */
96struct mm_slot {
97 struct hlist_node hash;
98 struct list_head mm_node;
99 struct mm_struct *mm;
Song Liu27e1f822019-09-23 15:38:30 -0700100
101 /* pte-mapped THP in this mm */
102 int nr_pte_mapped_thp;
103 unsigned long pte_mapped_thp[MAX_PTE_MAPPED_THP];
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700104};
105
106/**
107 * struct khugepaged_scan - cursor for scanning
108 * @mm_head: the head of the mm list to scan
109 * @mm_slot: the current mm_slot we are scanning
110 * @address: the next address inside that to be scanned
111 *
112 * There is only the one khugepaged_scan instance of this cursor structure.
113 */
114struct khugepaged_scan {
115 struct list_head mm_head;
116 struct mm_slot *mm_slot;
117 unsigned long address;
118};
119
120static struct khugepaged_scan khugepaged_scan = {
121 .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
122};
123
Jérémy Lefauree1465d12016-11-30 15:54:02 -0800124#ifdef CONFIG_SYSFS
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700125static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
126 struct kobj_attribute *attr,
127 char *buf)
128{
Joe Perchesae7a9272020-12-14 19:14:42 -0800129 return sysfs_emit(buf, "%u\n", khugepaged_scan_sleep_millisecs);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700130}
131
132static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
133 struct kobj_attribute *attr,
134 const char *buf, size_t count)
135{
Alexey Dobriyandfefd222020-12-14 19:15:03 -0800136 unsigned int msecs;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700137 int err;
138
Alexey Dobriyandfefd222020-12-14 19:15:03 -0800139 err = kstrtouint(buf, 10, &msecs);
140 if (err)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700141 return -EINVAL;
142
143 khugepaged_scan_sleep_millisecs = msecs;
144 khugepaged_sleep_expire = 0;
145 wake_up_interruptible(&khugepaged_wait);
146
147 return count;
148}
149static struct kobj_attribute scan_sleep_millisecs_attr =
150 __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
151 scan_sleep_millisecs_store);
152
153static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
154 struct kobj_attribute *attr,
155 char *buf)
156{
Joe Perchesae7a9272020-12-14 19:14:42 -0800157 return sysfs_emit(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700158}
159
160static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
161 struct kobj_attribute *attr,
162 const char *buf, size_t count)
163{
Alexey Dobriyandfefd222020-12-14 19:15:03 -0800164 unsigned int msecs;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700165 int err;
166
Alexey Dobriyandfefd222020-12-14 19:15:03 -0800167 err = kstrtouint(buf, 10, &msecs);
168 if (err)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700169 return -EINVAL;
170
171 khugepaged_alloc_sleep_millisecs = msecs;
172 khugepaged_sleep_expire = 0;
173 wake_up_interruptible(&khugepaged_wait);
174
175 return count;
176}
177static struct kobj_attribute alloc_sleep_millisecs_attr =
178 __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
179 alloc_sleep_millisecs_store);
180
181static ssize_t pages_to_scan_show(struct kobject *kobj,
182 struct kobj_attribute *attr,
183 char *buf)
184{
Joe Perchesae7a9272020-12-14 19:14:42 -0800185 return sysfs_emit(buf, "%u\n", khugepaged_pages_to_scan);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700186}
187static ssize_t pages_to_scan_store(struct kobject *kobj,
188 struct kobj_attribute *attr,
189 const char *buf, size_t count)
190{
Alexey Dobriyandfefd222020-12-14 19:15:03 -0800191 unsigned int pages;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700192 int err;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700193
Alexey Dobriyandfefd222020-12-14 19:15:03 -0800194 err = kstrtouint(buf, 10, &pages);
195 if (err || !pages)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700196 return -EINVAL;
197
198 khugepaged_pages_to_scan = pages;
199
200 return count;
201}
202static struct kobj_attribute pages_to_scan_attr =
203 __ATTR(pages_to_scan, 0644, pages_to_scan_show,
204 pages_to_scan_store);
205
206static ssize_t pages_collapsed_show(struct kobject *kobj,
207 struct kobj_attribute *attr,
208 char *buf)
209{
Joe Perchesae7a9272020-12-14 19:14:42 -0800210 return sysfs_emit(buf, "%u\n", khugepaged_pages_collapsed);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700211}
212static struct kobj_attribute pages_collapsed_attr =
213 __ATTR_RO(pages_collapsed);
214
215static ssize_t full_scans_show(struct kobject *kobj,
216 struct kobj_attribute *attr,
217 char *buf)
218{
Joe Perchesae7a9272020-12-14 19:14:42 -0800219 return sysfs_emit(buf, "%u\n", khugepaged_full_scans);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700220}
221static struct kobj_attribute full_scans_attr =
222 __ATTR_RO(full_scans);
223
224static ssize_t khugepaged_defrag_show(struct kobject *kobj,
225 struct kobj_attribute *attr, char *buf)
226{
227 return single_hugepage_flag_show(kobj, attr, buf,
Joe Perchesae7a9272020-12-14 19:14:42 -0800228 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700229}
230static ssize_t khugepaged_defrag_store(struct kobject *kobj,
231 struct kobj_attribute *attr,
232 const char *buf, size_t count)
233{
234 return single_hugepage_flag_store(kobj, attr, buf, count,
235 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
236}
237static struct kobj_attribute khugepaged_defrag_attr =
238 __ATTR(defrag, 0644, khugepaged_defrag_show,
239 khugepaged_defrag_store);
240
241/*
242 * max_ptes_none controls if khugepaged should collapse hugepages over
243 * any unmapped ptes in turn potentially increasing the memory
244 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
245 * reduce the available free memory in the system as it
246 * runs. Increasing max_ptes_none will instead potentially reduce the
247 * free memory in the system during the khugepaged scan.
248 */
249static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
250 struct kobj_attribute *attr,
251 char *buf)
252{
Joe Perchesae7a9272020-12-14 19:14:42 -0800253 return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_none);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700254}
255static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
256 struct kobj_attribute *attr,
257 const char *buf, size_t count)
258{
259 int err;
260 unsigned long max_ptes_none;
261
262 err = kstrtoul(buf, 10, &max_ptes_none);
263 if (err || max_ptes_none > HPAGE_PMD_NR-1)
264 return -EINVAL;
265
266 khugepaged_max_ptes_none = max_ptes_none;
267
268 return count;
269}
270static struct kobj_attribute khugepaged_max_ptes_none_attr =
271 __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
272 khugepaged_max_ptes_none_store);
273
274static ssize_t khugepaged_max_ptes_swap_show(struct kobject *kobj,
275 struct kobj_attribute *attr,
276 char *buf)
277{
Joe Perchesae7a9272020-12-14 19:14:42 -0800278 return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_swap);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700279}
280
281static ssize_t khugepaged_max_ptes_swap_store(struct kobject *kobj,
282 struct kobj_attribute *attr,
283 const char *buf, size_t count)
284{
285 int err;
286 unsigned long max_ptes_swap;
287
288 err = kstrtoul(buf, 10, &max_ptes_swap);
289 if (err || max_ptes_swap > HPAGE_PMD_NR-1)
290 return -EINVAL;
291
292 khugepaged_max_ptes_swap = max_ptes_swap;
293
294 return count;
295}
296
297static struct kobj_attribute khugepaged_max_ptes_swap_attr =
298 __ATTR(max_ptes_swap, 0644, khugepaged_max_ptes_swap_show,
299 khugepaged_max_ptes_swap_store);
300
Kirill A. Shutemov71a2c112020-06-03 16:00:30 -0700301static ssize_t khugepaged_max_ptes_shared_show(struct kobject *kobj,
Joe Perchesae7a9272020-12-14 19:14:42 -0800302 struct kobj_attribute *attr,
303 char *buf)
Kirill A. Shutemov71a2c112020-06-03 16:00:30 -0700304{
Joe Perchesae7a9272020-12-14 19:14:42 -0800305 return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_shared);
Kirill A. Shutemov71a2c112020-06-03 16:00:30 -0700306}
307
308static ssize_t khugepaged_max_ptes_shared_store(struct kobject *kobj,
309 struct kobj_attribute *attr,
310 const char *buf, size_t count)
311{
312 int err;
313 unsigned long max_ptes_shared;
314
315 err = kstrtoul(buf, 10, &max_ptes_shared);
316 if (err || max_ptes_shared > HPAGE_PMD_NR-1)
317 return -EINVAL;
318
319 khugepaged_max_ptes_shared = max_ptes_shared;
320
321 return count;
322}
323
324static struct kobj_attribute khugepaged_max_ptes_shared_attr =
325 __ATTR(max_ptes_shared, 0644, khugepaged_max_ptes_shared_show,
326 khugepaged_max_ptes_shared_store);
327
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700328static struct attribute *khugepaged_attr[] = {
329 &khugepaged_defrag_attr.attr,
330 &khugepaged_max_ptes_none_attr.attr,
Kirill A. Shutemov71a2c112020-06-03 16:00:30 -0700331 &khugepaged_max_ptes_swap_attr.attr,
332 &khugepaged_max_ptes_shared_attr.attr,
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700333 &pages_to_scan_attr.attr,
334 &pages_collapsed_attr.attr,
335 &full_scans_attr.attr,
336 &scan_sleep_millisecs_attr.attr,
337 &alloc_sleep_millisecs_attr.attr,
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700338 NULL,
339};
340
341struct attribute_group khugepaged_attr_group = {
342 .attrs = khugepaged_attr,
343 .name = "khugepaged",
344};
Jérémy Lefauree1465d12016-11-30 15:54:02 -0800345#endif /* CONFIG_SYSFS */
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700346
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700347int hugepage_madvise(struct vm_area_struct *vma,
348 unsigned long *vm_flags, int advice)
349{
350 switch (advice) {
351 case MADV_HUGEPAGE:
352#ifdef CONFIG_S390
353 /*
354 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
355 * can't handle this properly after s390_enable_sie, so we simply
356 * ignore the madvise to prevent qemu from causing a SIGSEGV.
357 */
358 if (mm_has_pgste(vma->vm_mm))
359 return 0;
360#endif
361 *vm_flags &= ~VM_NOHUGEPAGE;
362 *vm_flags |= VM_HUGEPAGE;
363 /*
364 * If the vma become good for khugepaged to scan,
365 * register it here without waiting a page fault that
366 * may not happen any time soon.
367 */
368 if (!(*vm_flags & VM_NO_KHUGEPAGED) &&
369 khugepaged_enter_vma_merge(vma, *vm_flags))
370 return -ENOMEM;
371 break;
372 case MADV_NOHUGEPAGE:
373 *vm_flags &= ~VM_HUGEPAGE;
374 *vm_flags |= VM_NOHUGEPAGE;
375 /*
376 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
377 * this vma even if we leave the mm registered in khugepaged if
378 * it got registered before VM_NOHUGEPAGE was set.
379 */
380 break;
381 }
382
383 return 0;
384}
385
386int __init khugepaged_init(void)
387{
388 mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
389 sizeof(struct mm_slot),
390 __alignof__(struct mm_slot), 0, NULL);
391 if (!mm_slot_cache)
392 return -ENOMEM;
393
394 khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
395 khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
396 khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8;
Kirill A. Shutemov71a2c112020-06-03 16:00:30 -0700397 khugepaged_max_ptes_shared = HPAGE_PMD_NR / 2;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700398
399 return 0;
400}
401
402void __init khugepaged_destroy(void)
403{
404 kmem_cache_destroy(mm_slot_cache);
405}
406
407static inline struct mm_slot *alloc_mm_slot(void)
408{
409 if (!mm_slot_cache) /* initialization failed */
410 return NULL;
411 return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
412}
413
414static inline void free_mm_slot(struct mm_slot *mm_slot)
415{
416 kmem_cache_free(mm_slot_cache, mm_slot);
417}
418
419static struct mm_slot *get_mm_slot(struct mm_struct *mm)
420{
421 struct mm_slot *mm_slot;
422
423 hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
424 if (mm == mm_slot->mm)
425 return mm_slot;
426
427 return NULL;
428}
429
430static void insert_to_mm_slots_hash(struct mm_struct *mm,
431 struct mm_slot *mm_slot)
432{
433 mm_slot->mm = mm;
434 hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
435}
436
437static inline int khugepaged_test_exit(struct mm_struct *mm)
438{
Jann Horn4d45e752020-10-15 20:13:00 -0700439 return atomic_read(&mm->mm_users) == 0;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700440}
441
Song Liu50f8b922018-08-17 15:47:00 -0700442static bool hugepage_vma_check(struct vm_area_struct *vma,
443 unsigned long vm_flags)
Yang Shic2231022018-08-17 15:45:26 -0700444{
Miaohe Line6be37b2021-06-30 18:47:50 -0700445 if (!transhuge_vma_enabled(vma, vm_flags))
Yang Shic2231022018-08-17 15:45:26 -0700446 return false;
Song Liu99cb0db2019-09-23 15:38:00 -0700447
Yang Shia4aeaa02021-10-28 14:36:30 -0700448 if (vma->vm_file && !IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) -
449 vma->vm_pgoff, HPAGE_PMD_NR))
450 return false;
451
Rik van Rielcd89fb02021-02-25 17:16:25 -0800452 /* Enabled via shmem mount options or sysfs settings. */
Yang Shia4aeaa02021-10-28 14:36:30 -0700453 if (shmem_file(vma->vm_file))
454 return shmem_huge_enabled(vma);
Rik van Rielcd89fb02021-02-25 17:16:25 -0800455
456 /* THP settings require madvise. */
457 if (!(vm_flags & VM_HUGEPAGE) && !khugepaged_always())
458 return false;
459
Yang Shia4aeaa02021-10-28 14:36:30 -0700460 /* Only regular file is valid */
Rik van Rielcd89fb02021-02-25 17:16:25 -0800461 if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && vma->vm_file &&
Collin Fijalkovicheb6ecbe2021-06-30 18:51:32 -0700462 (vm_flags & VM_EXEC)) {
Yang Shia4aeaa02021-10-28 14:36:30 -0700463 struct inode *inode = vma->vm_file->f_inode;
464
465 return !inode_is_open_for_write(inode) &&
466 S_ISREG(inode->i_mode);
Rik van Rielcd89fb02021-02-25 17:16:25 -0800467 }
468
Yang Shic2231022018-08-17 15:45:26 -0700469 if (!vma->anon_vma || vma->vm_ops)
470 return false;
Anshuman Khandual222100e2020-04-01 21:07:52 -0700471 if (vma_is_temporary_stack(vma))
Yang Shic2231022018-08-17 15:45:26 -0700472 return false;
Song Liu50f8b922018-08-17 15:47:00 -0700473 return !(vm_flags & VM_NO_KHUGEPAGED);
Yang Shic2231022018-08-17 15:45:26 -0700474}
475
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700476int __khugepaged_enter(struct mm_struct *mm)
477{
478 struct mm_slot *mm_slot;
479 int wakeup;
480
481 mm_slot = alloc_mm_slot();
482 if (!mm_slot)
483 return -ENOMEM;
484
485 /* __khugepaged_exit() must not run from under us */
Miaohe Lin28ff0a32021-05-04 18:33:43 -0700486 VM_BUG_ON_MM(khugepaged_test_exit(mm), mm);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700487 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
488 free_mm_slot(mm_slot);
489 return 0;
490 }
491
492 spin_lock(&khugepaged_mm_lock);
493 insert_to_mm_slots_hash(mm, mm_slot);
494 /*
495 * Insert just behind the scanning cursor, to let the area settle
496 * down a little.
497 */
498 wakeup = list_empty(&khugepaged_scan.mm_head);
499 list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
500 spin_unlock(&khugepaged_mm_lock);
501
Vegard Nossumf1f10072017-02-27 14:30:07 -0800502 mmgrab(mm);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700503 if (wakeup)
504 wake_up_interruptible(&khugepaged_wait);
505
506 return 0;
507}
508
509int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
510 unsigned long vm_flags)
511{
512 unsigned long hstart, hend;
Yang Shic2231022018-08-17 15:45:26 -0700513
514 /*
Song Liu99cb0db2019-09-23 15:38:00 -0700515 * khugepaged only supports read-only files for non-shmem files.
516 * khugepaged does not yet work on special mappings. And
517 * file-private shmem THP is not supported.
Yang Shic2231022018-08-17 15:45:26 -0700518 */
Song Liu50f8b922018-08-17 15:47:00 -0700519 if (!hugepage_vma_check(vma, vm_flags))
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700520 return 0;
Yang Shic2231022018-08-17 15:45:26 -0700521
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700522 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
523 hend = vma->vm_end & HPAGE_PMD_MASK;
524 if (hstart < hend)
525 return khugepaged_enter(vma, vm_flags);
526 return 0;
527}
528
529void __khugepaged_exit(struct mm_struct *mm)
530{
531 struct mm_slot *mm_slot;
532 int free = 0;
533
534 spin_lock(&khugepaged_mm_lock);
535 mm_slot = get_mm_slot(mm);
536 if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
537 hash_del(&mm_slot->hash);
538 list_del(&mm_slot->mm_node);
539 free = 1;
540 }
541 spin_unlock(&khugepaged_mm_lock);
542
543 if (free) {
544 clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
545 free_mm_slot(mm_slot);
546 mmdrop(mm);
547 } else if (mm_slot) {
548 /*
549 * This is required to serialize against
550 * khugepaged_test_exit() (which is guaranteed to run
551 * under mmap sem read mode). Stop here (after we
552 * return all pagetables will be destroyed) until
553 * khugepaged has finished working on the pagetables
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -0700554 * under the mmap_lock.
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700555 */
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700556 mmap_write_lock(mm);
557 mmap_write_unlock(mm);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700558 }
559}
560
561static void release_pte_page(struct page *page)
562{
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700563 mod_node_page_state(page_pgdat(page),
564 NR_ISOLATED_ANON + page_is_file_lru(page),
565 -compound_nr(page));
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700566 unlock_page(page);
567 putback_lru_page(page);
568}
569
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700570static void release_pte_pages(pte_t *pte, pte_t *_pte,
571 struct list_head *compound_pagelist)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700572{
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700573 struct page *page, *tmp;
574
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700575 while (--_pte >= pte) {
576 pte_t pteval = *_pte;
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700577
578 page = pte_page(pteval);
579 if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval)) &&
580 !PageCompound(page))
581 release_pte_page(page);
582 }
583
584 list_for_each_entry_safe(page, tmp, compound_pagelist, lru) {
585 list_del(&page->lru);
586 release_pte_page(page);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700587 }
588}
589
Kirill A. Shutemov94456892020-06-03 16:00:20 -0700590static bool is_refcount_suitable(struct page *page)
591{
592 int expected_refcount;
593
594 expected_refcount = total_mapcount(page);
595 if (PageSwapCache(page))
596 expected_refcount += compound_nr(page);
597
598 return page_count(page) == expected_refcount;
599}
600
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700601static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
602 unsigned long address,
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700603 pte_t *pte,
604 struct list_head *compound_pagelist)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700605{
606 struct page *page = NULL;
607 pte_t *_pte;
Kirill A. Shutemov71a2c112020-06-03 16:00:30 -0700608 int none_or_zero = 0, shared = 0, result = 0, referenced = 0;
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700609 bool writable = false;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700610
611 for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
612 _pte++, address += PAGE_SIZE) {
613 pte_t pteval = *_pte;
614 if (pte_none(pteval) || (pte_present(pteval) &&
615 is_zero_pfn(pte_pfn(pteval)))) {
616 if (!userfaultfd_armed(vma) &&
617 ++none_or_zero <= khugepaged_max_ptes_none) {
618 continue;
619 } else {
620 result = SCAN_EXCEED_NONE_PTE;
621 goto out;
622 }
623 }
624 if (!pte_present(pteval)) {
625 result = SCAN_PTE_NON_PRESENT;
626 goto out;
627 }
628 page = vm_normal_page(vma, address, pteval);
629 if (unlikely(!page)) {
630 result = SCAN_PAGE_NULL;
631 goto out;
632 }
633
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700634 VM_BUG_ON_PAGE(!PageAnon(page), page);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700635
Kirill A. Shutemov71a2c112020-06-03 16:00:30 -0700636 if (page_mapcount(page) > 1 &&
637 ++shared > khugepaged_max_ptes_shared) {
638 result = SCAN_EXCEED_SHARED_PTE;
639 goto out;
640 }
641
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700642 if (PageCompound(page)) {
643 struct page *p;
644 page = compound_head(page);
645
646 /*
647 * Check if we have dealt with the compound page
648 * already
649 */
650 list_for_each_entry(p, compound_pagelist, lru) {
651 if (page == p)
652 goto next;
653 }
654 }
655
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700656 /*
657 * We can do it before isolate_lru_page because the
658 * page can't be freed from under us. NOTE: PG_lock
659 * is needed to serialize against split_huge_page
660 * when invoked from the VM.
661 */
662 if (!trylock_page(page)) {
663 result = SCAN_PAGE_LOCK;
664 goto out;
665 }
666
667 /*
Kirill A. Shutemov94456892020-06-03 16:00:20 -0700668 * Check if the page has any GUP (or other external) pins.
669 *
670 * The page table that maps the page has been already unlinked
671 * from the page table tree and this process cannot get
Ingo Molnarf0953a12021-05-06 18:06:47 -0700672 * an additional pin on the page.
Kirill A. Shutemov94456892020-06-03 16:00:20 -0700673 *
674 * New pins can come later if the page is shared across fork,
675 * but not from this process. The other process cannot write to
676 * the page, only trigger CoW.
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700677 */
Kirill A. Shutemov94456892020-06-03 16:00:20 -0700678 if (!is_refcount_suitable(page)) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700679 unlock_page(page);
680 result = SCAN_PAGE_COUNT;
681 goto out;
682 }
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700683 if (!pte_write(pteval) && PageSwapCache(page) &&
684 !reuse_swap_page(page, NULL)) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700685 /*
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700686 * Page is in the swap cache and cannot be re-used.
687 * It cannot be collapsed into a THP.
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700688 */
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700689 unlock_page(page);
690 result = SCAN_SWAP_CACHE_PAGE;
691 goto out;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700692 }
693
694 /*
695 * Isolate the page to avoid collapsing an hugepage
696 * currently in use by the VM.
697 */
698 if (isolate_lru_page(page)) {
699 unlock_page(page);
700 result = SCAN_DEL_PAGE_LRU;
701 goto out;
702 }
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700703 mod_node_page_state(page_pgdat(page),
704 NR_ISOLATED_ANON + page_is_file_lru(page),
705 compound_nr(page));
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700706 VM_BUG_ON_PAGE(!PageLocked(page), page);
707 VM_BUG_ON_PAGE(PageLRU(page), page);
708
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700709 if (PageCompound(page))
710 list_add_tail(&page->lru, compound_pagelist);
711next:
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700712 /* There should be enough young pte to collapse the page */
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700713 if (pte_young(pteval) ||
714 page_is_young(page) || PageReferenced(page) ||
715 mmu_notifier_test_young(vma->vm_mm, address))
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700716 referenced++;
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700717
718 if (pte_write(pteval))
719 writable = true;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700720 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700721
Miaohe Lin74e579b2021-05-04 18:33:46 -0700722 if (unlikely(!writable)) {
723 result = SCAN_PAGE_RO;
724 } else if (unlikely(!referenced)) {
725 result = SCAN_LACK_REFERENCED_PAGE;
726 } else {
727 result = SCAN_SUCCEED;
728 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
729 referenced, writable, result);
730 return 1;
731 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700732out:
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700733 release_pte_pages(pte, _pte, compound_pagelist);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700734 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
735 referenced, writable, result);
736 return 0;
737}
738
739static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
740 struct vm_area_struct *vma,
741 unsigned long address,
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700742 spinlock_t *ptl,
743 struct list_head *compound_pagelist)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700744{
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700745 struct page *src_page, *tmp;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700746 pte_t *_pte;
David Rientjes338a16b2017-05-12 15:47:03 -0700747 for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
748 _pte++, page++, address += PAGE_SIZE) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700749 pte_t pteval = *_pte;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700750
751 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
752 clear_user_highpage(page, address);
753 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
754 if (is_zero_pfn(pte_pfn(pteval))) {
755 /*
756 * ptl mostly unnecessary.
757 */
758 spin_lock(ptl);
759 /*
760 * paravirt calls inside pte_clear here are
761 * superfluous.
762 */
763 pte_clear(vma->vm_mm, address, _pte);
764 spin_unlock(ptl);
765 }
766 } else {
767 src_page = pte_page(pteval);
768 copy_user_highpage(page, src_page, address, vma);
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700769 if (!PageCompound(src_page))
770 release_pte_page(src_page);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700771 /*
772 * ptl mostly unnecessary, but preempt has to
773 * be disabled to update the per-cpu stats
774 * inside page_remove_rmap().
775 */
776 spin_lock(ptl);
777 /*
778 * paravirt calls inside pte_clear here are
779 * superfluous.
780 */
781 pte_clear(vma->vm_mm, address, _pte);
782 page_remove_rmap(src_page, false);
783 spin_unlock(ptl);
784 free_page_and_swap_cache(src_page);
785 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700786 }
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700787
788 list_for_each_entry_safe(src_page, tmp, compound_pagelist, lru) {
789 list_del(&src_page->lru);
790 release_pte_page(src_page);
791 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700792}
793
794static void khugepaged_alloc_sleep(void)
795{
796 DEFINE_WAIT(wait);
797
798 add_wait_queue(&khugepaged_wait, &wait);
799 freezable_schedule_timeout_interruptible(
800 msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
801 remove_wait_queue(&khugepaged_wait, &wait);
802}
803
804static int khugepaged_node_load[MAX_NUMNODES];
805
806static bool khugepaged_scan_abort(int nid)
807{
808 int i;
809
810 /*
Mel Gormana5f5f912016-07-28 15:46:32 -0700811 * If node_reclaim_mode is disabled, then no extra effort is made to
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700812 * allocate memory locally.
813 */
Dave Hansen202e35d2021-05-04 18:36:04 -0700814 if (!node_reclaim_enabled())
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700815 return false;
816
817 /* If there is a count for this node already, it must be acceptable */
818 if (khugepaged_node_load[nid])
819 return false;
820
821 for (i = 0; i < MAX_NUMNODES; i++) {
822 if (!khugepaged_node_load[i])
823 continue;
Matt Fleminga55c7452019-08-08 20:53:01 +0100824 if (node_distance(nid, i) > node_reclaim_distance)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700825 return true;
826 }
827 return false;
828}
829
830/* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
831static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
832{
Vlastimil Babka25160352016-07-28 15:49:25 -0700833 return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700834}
835
836#ifdef CONFIG_NUMA
837static int khugepaged_find_target_node(void)
838{
839 static int last_khugepaged_target_node = NUMA_NO_NODE;
840 int nid, target_node = 0, max_value = 0;
841
842 /* find first node with max normal pages hit */
843 for (nid = 0; nid < MAX_NUMNODES; nid++)
844 if (khugepaged_node_load[nid] > max_value) {
845 max_value = khugepaged_node_load[nid];
846 target_node = nid;
847 }
848
849 /* do some balance if several nodes have the same hit record */
850 if (target_node <= last_khugepaged_target_node)
851 for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES;
852 nid++)
853 if (max_value == khugepaged_node_load[nid]) {
854 target_node = nid;
855 break;
856 }
857
858 last_khugepaged_target_node = target_node;
859 return target_node;
860}
861
862static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
863{
864 if (IS_ERR(*hpage)) {
865 if (!*wait)
866 return false;
867
868 *wait = false;
869 *hpage = NULL;
870 khugepaged_alloc_sleep();
871 } else if (*hpage) {
872 put_page(*hpage);
873 *hpage = NULL;
874 }
875
876 return true;
877}
878
879static struct page *
Kirill A. Shutemov988ddb72016-07-26 15:26:26 -0700880khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700881{
882 VM_BUG_ON_PAGE(*hpage, *hpage);
883
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700884 *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
885 if (unlikely(!*hpage)) {
886 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
887 *hpage = ERR_PTR(-ENOMEM);
888 return NULL;
889 }
890
891 prep_transhuge_page(*hpage);
892 count_vm_event(THP_COLLAPSE_ALLOC);
893 return *hpage;
894}
895#else
896static int khugepaged_find_target_node(void)
897{
898 return 0;
899}
900
901static inline struct page *alloc_khugepaged_hugepage(void)
902{
903 struct page *page;
904
905 page = alloc_pages(alloc_hugepage_khugepaged_gfpmask(),
906 HPAGE_PMD_ORDER);
907 if (page)
908 prep_transhuge_page(page);
909 return page;
910}
911
912static struct page *khugepaged_alloc_hugepage(bool *wait)
913{
914 struct page *hpage;
915
916 do {
917 hpage = alloc_khugepaged_hugepage();
918 if (!hpage) {
919 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
920 if (!*wait)
921 return NULL;
922
923 *wait = false;
924 khugepaged_alloc_sleep();
925 } else
926 count_vm_event(THP_COLLAPSE_ALLOC);
927 } while (unlikely(!hpage) && likely(khugepaged_enabled()));
928
929 return hpage;
930}
931
932static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
933{
Hugh Dickins033b5d72020-10-09 20:07:59 -0700934 /*
935 * If the hpage allocated earlier was briefly exposed in page cache
936 * before collapse_file() failed, it is possible that racing lookups
937 * have not yet completed, and would then be unpleasantly surprised by
938 * finding the hpage reused for the same mapping at a different offset.
939 * Just release the previous allocation if there is any danger of that.
940 */
941 if (*hpage && page_count(*hpage) > 1) {
942 put_page(*hpage);
943 *hpage = NULL;
944 }
945
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700946 if (!*hpage)
947 *hpage = khugepaged_alloc_hugepage(wait);
948
949 if (unlikely(!*hpage))
950 return false;
951
952 return true;
953}
954
955static struct page *
Kirill A. Shutemov988ddb72016-07-26 15:26:26 -0700956khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700957{
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700958 VM_BUG_ON(!*hpage);
959
960 return *hpage;
961}
962#endif
963
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700964/*
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -0700965 * If mmap_lock temporarily dropped, revalidate vma
966 * before taking mmap_lock.
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700967 * Return 0 if succeeds, otherwise return none-zero
968 * value (scan code).
969 */
970
Kirill A. Shutemovc131f752016-09-19 14:44:01 -0700971static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
972 struct vm_area_struct **vmap)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700973{
974 struct vm_area_struct *vma;
975 unsigned long hstart, hend;
976
977 if (unlikely(khugepaged_test_exit(mm)))
978 return SCAN_ANY_PROCESS;
979
Kirill A. Shutemovc131f752016-09-19 14:44:01 -0700980 *vmap = vma = find_vma(mm, address);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700981 if (!vma)
982 return SCAN_VMA_NULL;
983
984 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
985 hend = vma->vm_end & HPAGE_PMD_MASK;
986 if (address < hstart || address + HPAGE_PMD_SIZE > hend)
987 return SCAN_ADDRESS_RANGE;
Song Liu50f8b922018-08-17 15:47:00 -0700988 if (!hugepage_vma_check(vma, vma->vm_flags))
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700989 return SCAN_VMA_CHECK;
Kirill A. Shutemov594cced2020-07-23 21:15:34 -0700990 /* Anon VMA expected */
991 if (!vma->anon_vma || vma->vm_ops)
992 return SCAN_VMA_CHECK;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700993 return 0;
994}
995
996/*
997 * Bring missing pages in from swap, to complete THP collapse.
998 * Only done if khugepaged_scan_pmd believes it is worthwhile.
999 *
1000 * Called and returns without pte mapped or spinlocks held,
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07001001 * but with mmap_lock held to protect against vma changes.
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001002 */
1003
1004static bool __collapse_huge_page_swapin(struct mm_struct *mm,
1005 struct vm_area_struct *vma,
Will Deacon2b635dd2021-01-14 15:33:49 +00001006 unsigned long haddr, pmd_t *pmd,
Ebru Akagunduz0db501f2016-07-26 15:26:46 -07001007 int referenced)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001008{
Souptick Joarder2b740302018-08-23 17:01:36 -07001009 int swapped_in = 0;
1010 vm_fault_t ret = 0;
Will Deacon2b635dd2021-01-14 15:33:49 +00001011 unsigned long address, end = haddr + (HPAGE_PMD_NR * PAGE_SIZE);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001012
Will Deacon2b635dd2021-01-14 15:33:49 +00001013 for (address = haddr; address < end; address += PAGE_SIZE) {
1014 struct vm_fault vmf = {
1015 .vma = vma,
1016 .address = address,
1017 .pgoff = linear_page_index(vma, haddr),
1018 .flags = FAULT_FLAG_ALLOW_RETRY,
1019 .pmd = pmd,
1020 };
1021
1022 vmf.pte = pte_offset_map(pmd, address);
Jan Kara29943022016-12-14 15:07:16 -08001023 vmf.orig_pte = *vmf.pte;
Will Deacon2b635dd2021-01-14 15:33:49 +00001024 if (!is_swap_pte(vmf.orig_pte)) {
1025 pte_unmap(vmf.pte);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001026 continue;
Will Deacon2b635dd2021-01-14 15:33:49 +00001027 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001028 swapped_in++;
Jan Kara29943022016-12-14 15:07:16 -08001029 ret = do_swap_page(&vmf);
Ebru Akagunduz0db501f2016-07-26 15:26:46 -07001030
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07001031 /* do_swap_page returns VM_FAULT_RETRY with released mmap_lock */
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001032 if (ret & VM_FAULT_RETRY) {
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001033 mmap_read_lock(mm);
Will Deacon2b635dd2021-01-14 15:33:49 +00001034 if (hugepage_vma_revalidate(mm, haddr, &vma)) {
Ebru Akagunduz47f863e2016-07-26 15:26:43 -07001035 /* vma is no longer available, don't continue to swapin */
Ebru Akagunduz0db501f2016-07-26 15:26:46 -07001036 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001037 return false;
Ebru Akagunduz47f863e2016-07-26 15:26:43 -07001038 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001039 /* check if the pmd is still valid */
Will Deacon2b635dd2021-01-14 15:33:49 +00001040 if (mm_find_pmd(mm, haddr) != pmd) {
SeongJae Park835152a2017-05-12 15:46:38 -07001041 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001042 return false;
SeongJae Park835152a2017-05-12 15:46:38 -07001043 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001044 }
1045 if (ret & VM_FAULT_ERROR) {
Ebru Akagunduz0db501f2016-07-26 15:26:46 -07001046 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001047 return false;
1048 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001049 }
Kirill A. Shutemovae2c5d82020-06-03 16:00:17 -07001050
1051 /* Drain LRU add pagevec to remove extra pin on the swapped in pages */
1052 if (swapped_in)
1053 lru_add_drain();
1054
Ebru Akagunduz0db501f2016-07-26 15:26:46 -07001055 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001056 return true;
1057}
1058
1059static void collapse_huge_page(struct mm_struct *mm,
1060 unsigned long address,
1061 struct page **hpage,
Kirill A. Shutemovffe945e2020-06-03 16:00:09 -07001062 int node, int referenced, int unmapped)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001063{
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -07001064 LIST_HEAD(compound_pagelist);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001065 pmd_t *pmd, _pmd;
1066 pte_t *pte;
1067 pgtable_t pgtable;
1068 struct page *new_page;
1069 spinlock_t *pmd_ptl, *pte_ptl;
1070 int isolated = 0, result = 0;
Kirill A. Shutemovc131f752016-09-19 14:44:01 -07001071 struct vm_area_struct *vma;
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08001072 struct mmu_notifier_range range;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001073 gfp_t gfp;
1074
1075 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1076
1077 /* Only allocate from the target node */
Michal Hocko41b61672017-01-10 16:57:42 -08001078 gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001079
Kirill A. Shutemov988ddb72016-07-26 15:26:26 -07001080 /*
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07001081 * Before allocating the hugepage, release the mmap_lock read lock.
Kirill A. Shutemov988ddb72016-07-26 15:26:26 -07001082 * The allocation can take potentially a long time if it involves
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07001083 * sync compaction, and we do not need to hold the mmap_lock during
Kirill A. Shutemov988ddb72016-07-26 15:26:26 -07001084 * that. We will recheck the vma after taking it again in write mode.
1085 */
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001086 mmap_read_unlock(mm);
Kirill A. Shutemov988ddb72016-07-26 15:26:26 -07001087 new_page = khugepaged_alloc_page(hpage, gfp, node);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001088 if (!new_page) {
1089 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
1090 goto out_nolock;
1091 }
1092
Matthew Wilcox (Oracle)8f425e42021-06-25 09:27:04 -04001093 if (unlikely(mem_cgroup_charge(page_folio(new_page), mm, gfp))) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001094 result = SCAN_CGROUP_CHARGE_FAIL;
1095 goto out_nolock;
1096 }
Johannes Weiner9d82c692020-06-03 16:02:04 -07001097 count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001098
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001099 mmap_read_lock(mm);
Kirill A. Shutemovc131f752016-09-19 14:44:01 -07001100 result = hugepage_vma_revalidate(mm, address, &vma);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001101 if (result) {
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001102 mmap_read_unlock(mm);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001103 goto out_nolock;
1104 }
1105
1106 pmd = mm_find_pmd(mm, address);
1107 if (!pmd) {
1108 result = SCAN_PMD_NULL;
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001109 mmap_read_unlock(mm);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001110 goto out_nolock;
1111 }
1112
1113 /*
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07001114 * __collapse_huge_page_swapin always returns with mmap_lock locked.
1115 * If it fails, we release mmap_lock and jump out_nolock.
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001116 * Continuing to collapse causes inconsistency.
1117 */
Kirill A. Shutemovffe945e2020-06-03 16:00:09 -07001118 if (unmapped && !__collapse_huge_page_swapin(mm, vma, address,
1119 pmd, referenced)) {
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001120 mmap_read_unlock(mm);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001121 goto out_nolock;
1122 }
1123
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001124 mmap_read_unlock(mm);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001125 /*
1126 * Prevent all access to pagetables with the exception of
1127 * gup_fast later handled by the ptep_clear_flush and the VM
1128 * handled by the anon_vma lock + PG_lock.
1129 */
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001130 mmap_write_lock(mm);
Kirill A. Shutemovc131f752016-09-19 14:44:01 -07001131 result = hugepage_vma_revalidate(mm, address, &vma);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001132 if (result)
Miaohe Lin18d24a72021-05-04 18:34:17 -07001133 goto out_up_write;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001134 /* check if the pmd is still valid */
1135 if (mm_find_pmd(mm, address) != pmd)
Miaohe Lin18d24a72021-05-04 18:34:17 -07001136 goto out_up_write;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001137
1138 anon_vma_lock_write(vma->anon_vma);
1139
Jérôme Glisse7269f992019-05-13 17:20:53 -07001140 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm,
Jérôme Glisse6f4f13e2019-05-13 17:20:49 -07001141 address, address + HPAGE_PMD_SIZE);
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08001142 mmu_notifier_invalidate_range_start(&range);
Ville Syrjäläec649c9d2019-11-05 21:16:48 -08001143
1144 pte = pte_offset_map(pmd, address);
1145 pte_ptl = pte_lockptr(mm, pmd);
1146
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001147 pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
1148 /*
1149 * After this gup_fast can't run anymore. This also removes
1150 * any huge TLB entry from the CPU so we won't allow
1151 * huge and small TLB entries for the same virtual address
1152 * to avoid the risk of CPU bugs in that area.
1153 */
1154 _pmd = pmdp_collapse_flush(vma, address, pmd);
1155 spin_unlock(pmd_ptl);
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08001156 mmu_notifier_invalidate_range_end(&range);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001157
1158 spin_lock(pte_ptl);
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -07001159 isolated = __collapse_huge_page_isolate(vma, address, pte,
1160 &compound_pagelist);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001161 spin_unlock(pte_ptl);
1162
1163 if (unlikely(!isolated)) {
1164 pte_unmap(pte);
1165 spin_lock(pmd_ptl);
1166 BUG_ON(!pmd_none(*pmd));
1167 /*
1168 * We can only use set_pmd_at when establishing
1169 * hugepmds and never for establishing regular pmds that
1170 * points to regular pagetables. Use pmd_populate for that
1171 */
1172 pmd_populate(mm, pmd, pmd_pgtable(_pmd));
1173 spin_unlock(pmd_ptl);
1174 anon_vma_unlock_write(vma->anon_vma);
1175 result = SCAN_FAIL;
Miaohe Lin18d24a72021-05-04 18:34:17 -07001176 goto out_up_write;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001177 }
1178
1179 /*
1180 * All pages are isolated and locked so anon_vma rmap
1181 * can't run anymore.
1182 */
1183 anon_vma_unlock_write(vma->anon_vma);
1184
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -07001185 __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl,
1186 &compound_pagelist);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001187 pte_unmap(pte);
Miaohe Lin588d01f2021-05-04 18:33:40 -07001188 /*
1189 * spin_lock() below is not the equivalent of smp_wmb(), but
1190 * the smp_wmb() inside __SetPageUptodate() can be reused to
1191 * avoid the copy_huge_page writes to become visible after
1192 * the set_pmd_at() write.
1193 */
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001194 __SetPageUptodate(new_page);
1195 pgtable = pmd_pgtable(_pmd);
1196
1197 _pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
Linus Torvaldsf55e1012017-11-29 09:01:01 -08001198 _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001199
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001200 spin_lock(pmd_ptl);
1201 BUG_ON(!pmd_none(*pmd));
Johannes Weinerbe5d0a72020-06-03 16:01:57 -07001202 page_add_new_anon_rmap(new_page, vma, address, true);
Joonsoo Kimb5181542020-08-11 18:30:40 -07001203 lru_cache_add_inactive_or_unevictable(new_page, vma);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001204 pgtable_trans_huge_deposit(mm, pmd, pgtable);
1205 set_pmd_at(mm, address, pmd, _pmd);
1206 update_mmu_cache_pmd(vma, address, pmd);
1207 spin_unlock(pmd_ptl);
1208
1209 *hpage = NULL;
1210
1211 khugepaged_pages_collapsed++;
1212 result = SCAN_SUCCEED;
1213out_up_write:
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001214 mmap_write_unlock(mm);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001215out_nolock:
Johannes Weiner9d82c692020-06-03 16:02:04 -07001216 if (!IS_ERR_OR_NULL(*hpage))
Matthew Wilcox (Oracle)bbc6b702021-05-01 20:42:23 -04001217 mem_cgroup_uncharge(page_folio(*hpage));
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001218 trace_mm_collapse_huge_page(mm, isolated, result);
1219 return;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001220}
1221
1222static int khugepaged_scan_pmd(struct mm_struct *mm,
1223 struct vm_area_struct *vma,
1224 unsigned long address,
1225 struct page **hpage)
1226{
1227 pmd_t *pmd;
1228 pte_t *pte, *_pte;
Kirill A. Shutemov71a2c112020-06-03 16:00:30 -07001229 int ret = 0, result = 0, referenced = 0;
1230 int none_or_zero = 0, shared = 0;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001231 struct page *page = NULL;
1232 unsigned long _address;
1233 spinlock_t *ptl;
1234 int node = NUMA_NO_NODE, unmapped = 0;
Ebru Akagunduz0db501f2016-07-26 15:26:46 -07001235 bool writable = false;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001236
1237 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1238
1239 pmd = mm_find_pmd(mm, address);
1240 if (!pmd) {
1241 result = SCAN_PMD_NULL;
1242 goto out;
1243 }
1244
1245 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1246 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1247 for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
1248 _pte++, _address += PAGE_SIZE) {
1249 pte_t pteval = *_pte;
1250 if (is_swap_pte(pteval)) {
1251 if (++unmapped <= khugepaged_max_ptes_swap) {
Peter Xue1e267c2020-04-06 20:06:04 -07001252 /*
1253 * Always be strict with uffd-wp
1254 * enabled swap entries. Please see
1255 * comment below for pte_uffd_wp().
1256 */
1257 if (pte_swp_uffd_wp(pteval)) {
1258 result = SCAN_PTE_UFFD_WP;
1259 goto out_unmap;
1260 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001261 continue;
1262 } else {
1263 result = SCAN_EXCEED_SWAP_PTE;
1264 goto out_unmap;
1265 }
1266 }
1267 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
1268 if (!userfaultfd_armed(vma) &&
1269 ++none_or_zero <= khugepaged_max_ptes_none) {
1270 continue;
1271 } else {
1272 result = SCAN_EXCEED_NONE_PTE;
1273 goto out_unmap;
1274 }
1275 }
Peter Xue1e267c2020-04-06 20:06:04 -07001276 if (pte_uffd_wp(pteval)) {
1277 /*
1278 * Don't collapse the page if any of the small
1279 * PTEs are armed with uffd write protection.
1280 * Here we can also mark the new huge pmd as
1281 * write protected if any of the small ones is
Haitao Shi8958b242020-12-15 20:47:26 -08001282 * marked but that could bring unknown
Peter Xue1e267c2020-04-06 20:06:04 -07001283 * userfault messages that falls outside of
1284 * the registered range. So, just be simple.
1285 */
1286 result = SCAN_PTE_UFFD_WP;
1287 goto out_unmap;
1288 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001289 if (pte_write(pteval))
1290 writable = true;
1291
1292 page = vm_normal_page(vma, _address, pteval);
1293 if (unlikely(!page)) {
1294 result = SCAN_PAGE_NULL;
1295 goto out_unmap;
1296 }
1297
Kirill A. Shutemov71a2c112020-06-03 16:00:30 -07001298 if (page_mapcount(page) > 1 &&
1299 ++shared > khugepaged_max_ptes_shared) {
1300 result = SCAN_EXCEED_SHARED_PTE;
1301 goto out_unmap;
1302 }
1303
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -07001304 page = compound_head(page);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001305
1306 /*
1307 * Record which node the original page is from and save this
1308 * information to khugepaged_node_load[].
1309 * Khupaged will allocate hugepage from the node has the max
1310 * hit record.
1311 */
1312 node = page_to_nid(page);
1313 if (khugepaged_scan_abort(node)) {
1314 result = SCAN_SCAN_ABORT;
1315 goto out_unmap;
1316 }
1317 khugepaged_node_load[node]++;
1318 if (!PageLRU(page)) {
1319 result = SCAN_PAGE_LRU;
1320 goto out_unmap;
1321 }
1322 if (PageLocked(page)) {
1323 result = SCAN_PAGE_LOCK;
1324 goto out_unmap;
1325 }
1326 if (!PageAnon(page)) {
1327 result = SCAN_PAGE_ANON;
1328 goto out_unmap;
1329 }
1330
1331 /*
Kirill A. Shutemov94456892020-06-03 16:00:20 -07001332 * Check if the page has any GUP (or other external) pins.
1333 *
1334 * Here the check is racy it may see totmal_mapcount > refcount
1335 * in some cases.
1336 * For example, one process with one forked child process.
1337 * The parent has the PMD split due to MADV_DONTNEED, then
1338 * the child is trying unmap the whole PMD, but khugepaged
1339 * may be scanning the parent between the child has
1340 * PageDoubleMap flag cleared and dec the mapcount. So
1341 * khugepaged may see total_mapcount > refcount.
1342 *
1343 * But such case is ephemeral we could always retry collapse
1344 * later. However it may report false positive if the page
1345 * has excessive GUP pins (i.e. 512). Anyway the same check
1346 * will be done again later the risk seems low.
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001347 */
Kirill A. Shutemov94456892020-06-03 16:00:20 -07001348 if (!is_refcount_suitable(page)) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001349 result = SCAN_PAGE_COUNT;
1350 goto out_unmap;
1351 }
1352 if (pte_young(pteval) ||
1353 page_is_young(page) || PageReferenced(page) ||
1354 mmu_notifier_test_young(vma->vm_mm, address))
Ebru Akagunduz0db501f2016-07-26 15:26:46 -07001355 referenced++;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001356 }
Kirill A. Shutemovffe945e2020-06-03 16:00:09 -07001357 if (!writable) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001358 result = SCAN_PAGE_RO;
Kirill A. Shutemovffe945e2020-06-03 16:00:09 -07001359 } else if (!referenced || (unmapped && referenced < HPAGE_PMD_NR/2)) {
1360 result = SCAN_LACK_REFERENCED_PAGE;
1361 } else {
1362 result = SCAN_SUCCEED;
1363 ret = 1;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001364 }
1365out_unmap:
1366 pte_unmap_unlock(pte, ptl);
1367 if (ret) {
1368 node = khugepaged_find_target_node();
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07001369 /* collapse_huge_page will return with the mmap_lock released */
Kirill A. Shutemovffe945e2020-06-03 16:00:09 -07001370 collapse_huge_page(mm, address, hpage, node,
1371 referenced, unmapped);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001372 }
1373out:
1374 trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
1375 none_or_zero, result, unmapped);
1376 return ret;
1377}
1378
1379static void collect_mm_slot(struct mm_slot *mm_slot)
1380{
1381 struct mm_struct *mm = mm_slot->mm;
1382
Lance Roy35f3aa32018-10-04 23:45:47 -07001383 lockdep_assert_held(&khugepaged_mm_lock);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001384
1385 if (khugepaged_test_exit(mm)) {
1386 /* free mm_slot */
1387 hash_del(&mm_slot->hash);
1388 list_del(&mm_slot->mm_node);
1389
1390 /*
1391 * Not strictly needed because the mm exited already.
1392 *
1393 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1394 */
1395
1396 /* khugepaged_mm_lock actually not necessary for the below */
1397 free_mm_slot(mm_slot);
1398 mmdrop(mm);
1399 }
1400}
1401
Matthew Wilcox (Oracle)396bcc52020-04-06 20:04:35 -07001402#ifdef CONFIG_SHMEM
Song Liu27e1f822019-09-23 15:38:30 -07001403/*
1404 * Notify khugepaged that given addr of the mm is pte-mapped THP. Then
1405 * khugepaged should try to collapse the page table.
1406 */
1407static int khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
1408 unsigned long addr)
1409{
1410 struct mm_slot *mm_slot;
1411
1412 VM_BUG_ON(addr & ~HPAGE_PMD_MASK);
1413
1414 spin_lock(&khugepaged_mm_lock);
1415 mm_slot = get_mm_slot(mm);
1416 if (likely(mm_slot && mm_slot->nr_pte_mapped_thp < MAX_PTE_MAPPED_THP))
1417 mm_slot->pte_mapped_thp[mm_slot->nr_pte_mapped_thp++] = addr;
1418 spin_unlock(&khugepaged_mm_lock);
1419 return 0;
1420}
1421
1422/**
Alex Shi336e6b52020-12-14 19:12:01 -08001423 * collapse_pte_mapped_thp - Try to collapse a pte-mapped THP for mm at
1424 * address haddr.
1425 *
1426 * @mm: process address space where collapse happens
1427 * @addr: THP collapse address
Song Liu27e1f822019-09-23 15:38:30 -07001428 *
1429 * This function checks whether all the PTEs in the PMD are pointing to the
1430 * right THP. If so, retract the page table so the THP can refault in with
1431 * as pmd-mapped.
1432 */
1433void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
1434{
1435 unsigned long haddr = addr & HPAGE_PMD_MASK;
1436 struct vm_area_struct *vma = find_vma(mm, haddr);
Hugh Dickins119a5fc2020-08-06 23:26:18 -07001437 struct page *hpage;
Song Liu27e1f822019-09-23 15:38:30 -07001438 pte_t *start_pte, *pte;
1439 pmd_t *pmd, _pmd;
1440 spinlock_t *ptl;
1441 int count = 0;
1442 int i;
1443
1444 if (!vma || !vma->vm_file ||
Miaohe Linfef792a2021-05-04 18:34:15 -07001445 !range_in_vma(vma, haddr, haddr + HPAGE_PMD_SIZE))
Song Liu27e1f822019-09-23 15:38:30 -07001446 return;
1447
1448 /*
1449 * This vm_flags may not have VM_HUGEPAGE if the page was not
1450 * collapsed by this mm. But we can still collapse if the page is
1451 * the valid THP. Add extra VM_HUGEPAGE so hugepage_vma_check()
1452 * will not fail the vma for missing VM_HUGEPAGE
1453 */
1454 if (!hugepage_vma_check(vma, vma->vm_flags | VM_HUGEPAGE))
1455 return;
1456
Hugh Dickins119a5fc2020-08-06 23:26:18 -07001457 hpage = find_lock_page(vma->vm_file->f_mapping,
1458 linear_page_index(vma, haddr));
1459 if (!hpage)
1460 return;
1461
1462 if (!PageHead(hpage))
1463 goto drop_hpage;
1464
Song Liu27e1f822019-09-23 15:38:30 -07001465 pmd = mm_find_pmd(mm, haddr);
1466 if (!pmd)
Hugh Dickins119a5fc2020-08-06 23:26:18 -07001467 goto drop_hpage;
Song Liu27e1f822019-09-23 15:38:30 -07001468
1469 start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
1470
1471 /* step 1: check all mapped PTEs are to the right huge page */
1472 for (i = 0, addr = haddr, pte = start_pte;
1473 i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1474 struct page *page;
1475
1476 /* empty pte, skip */
1477 if (pte_none(*pte))
1478 continue;
1479
1480 /* page swapped out, abort */
1481 if (!pte_present(*pte))
1482 goto abort;
1483
1484 page = vm_normal_page(vma, addr, *pte);
1485
Song Liu27e1f822019-09-23 15:38:30 -07001486 /*
Hugh Dickins119a5fc2020-08-06 23:26:18 -07001487 * Note that uprobe, debugger, or MAP_PRIVATE may change the
1488 * page table, but the new page will not be a subpage of hpage.
Song Liu27e1f822019-09-23 15:38:30 -07001489 */
Hugh Dickins119a5fc2020-08-06 23:26:18 -07001490 if (hpage + i != page)
Song Liu27e1f822019-09-23 15:38:30 -07001491 goto abort;
1492 count++;
1493 }
1494
1495 /* step 2: adjust rmap */
1496 for (i = 0, addr = haddr, pte = start_pte;
1497 i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1498 struct page *page;
1499
1500 if (pte_none(*pte))
1501 continue;
1502 page = vm_normal_page(vma, addr, *pte);
1503 page_remove_rmap(page, false);
1504 }
1505
1506 pte_unmap_unlock(start_pte, ptl);
1507
1508 /* step 3: set proper refcount and mm_counters. */
Hugh Dickins119a5fc2020-08-06 23:26:18 -07001509 if (count) {
Song Liu27e1f822019-09-23 15:38:30 -07001510 page_ref_sub(hpage, count);
1511 add_mm_counter(vma->vm_mm, mm_counter_file(hpage), -count);
1512 }
1513
1514 /* step 4: collapse pmd */
1515 ptl = pmd_lock(vma->vm_mm, pmd);
Hugh Dickins723a80d2020-08-06 23:26:15 -07001516 _pmd = pmdp_collapse_flush(vma, haddr, pmd);
Song Liu27e1f822019-09-23 15:38:30 -07001517 spin_unlock(ptl);
1518 mm_dec_nr_ptes(mm);
1519 pte_free(mm, pmd_pgtable(_pmd));
Hugh Dickins119a5fc2020-08-06 23:26:18 -07001520
1521drop_hpage:
1522 unlock_page(hpage);
1523 put_page(hpage);
Song Liu27e1f822019-09-23 15:38:30 -07001524 return;
1525
1526abort:
1527 pte_unmap_unlock(start_pte, ptl);
Hugh Dickins119a5fc2020-08-06 23:26:18 -07001528 goto drop_hpage;
Song Liu27e1f822019-09-23 15:38:30 -07001529}
1530
Miaohe Lin0edf61e2021-05-04 18:33:37 -07001531static void khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
Song Liu27e1f822019-09-23 15:38:30 -07001532{
1533 struct mm_struct *mm = mm_slot->mm;
1534 int i;
1535
1536 if (likely(mm_slot->nr_pte_mapped_thp == 0))
Miaohe Lin0edf61e2021-05-04 18:33:37 -07001537 return;
Song Liu27e1f822019-09-23 15:38:30 -07001538
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001539 if (!mmap_write_trylock(mm))
Miaohe Lin0edf61e2021-05-04 18:33:37 -07001540 return;
Song Liu27e1f822019-09-23 15:38:30 -07001541
1542 if (unlikely(khugepaged_test_exit(mm)))
1543 goto out;
1544
1545 for (i = 0; i < mm_slot->nr_pte_mapped_thp; i++)
1546 collapse_pte_mapped_thp(mm, mm_slot->pte_mapped_thp[i]);
1547
1548out:
1549 mm_slot->nr_pte_mapped_thp = 0;
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001550 mmap_write_unlock(mm);
Song Liu27e1f822019-09-23 15:38:30 -07001551}
1552
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001553static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
1554{
1555 struct vm_area_struct *vma;
Hugh Dickins18e77602020-08-06 23:26:22 -07001556 struct mm_struct *mm;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001557 unsigned long addr;
1558 pmd_t *pmd, _pmd;
1559
1560 i_mmap_lock_write(mapping);
1561 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
Song Liu27e1f822019-09-23 15:38:30 -07001562 /*
1563 * Check vma->anon_vma to exclude MAP_PRIVATE mappings that
1564 * got written to. These VMAs are likely not worth investing
Michel Lespinasse3e4e28c2020-06-08 21:33:51 -07001565 * mmap_write_lock(mm) as PMD-mapping is likely to be split
Song Liu27e1f822019-09-23 15:38:30 -07001566 * later.
1567 *
1568 * Not that vma->anon_vma check is racy: it can be set up after
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07001569 * the check but before we took mmap_lock by the fault path.
Song Liu27e1f822019-09-23 15:38:30 -07001570 * But page lock would prevent establishing any new ptes of the
1571 * page, so we are safe.
1572 *
1573 * An alternative would be drop the check, but check that page
1574 * table is clear before calling pmdp_collapse_flush() under
1575 * ptl. It has higher chance to recover THP for the VMA, but
1576 * has higher cost too.
1577 */
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001578 if (vma->anon_vma)
1579 continue;
1580 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1581 if (addr & ~HPAGE_PMD_MASK)
1582 continue;
1583 if (vma->vm_end < addr + HPAGE_PMD_SIZE)
1584 continue;
Hugh Dickins18e77602020-08-06 23:26:22 -07001585 mm = vma->vm_mm;
1586 pmd = mm_find_pmd(mm, addr);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001587 if (!pmd)
1588 continue;
1589 /*
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07001590 * We need exclusive mmap_lock to retract page table.
Song Liu27e1f822019-09-23 15:38:30 -07001591 *
1592 * We use trylock due to lock inversion: we need to acquire
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07001593 * mmap_lock while holding page lock. Fault path does it in
Song Liu27e1f822019-09-23 15:38:30 -07001594 * reverse order. Trylock is a way to avoid deadlock.
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001595 */
Hugh Dickins18e77602020-08-06 23:26:22 -07001596 if (mmap_write_trylock(mm)) {
1597 if (!khugepaged_test_exit(mm)) {
1598 spinlock_t *ptl = pmd_lock(mm, pmd);
1599 /* assume page table is clear */
1600 _pmd = pmdp_collapse_flush(vma, addr, pmd);
1601 spin_unlock(ptl);
1602 mm_dec_nr_ptes(mm);
1603 pte_free(mm, pmd_pgtable(_pmd));
1604 }
1605 mmap_write_unlock(mm);
Song Liu27e1f822019-09-23 15:38:30 -07001606 } else {
1607 /* Try again later */
Hugh Dickins18e77602020-08-06 23:26:22 -07001608 khugepaged_add_pte_mapped_thp(mm, addr);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001609 }
1610 }
1611 i_mmap_unlock_write(mapping);
1612}
1613
1614/**
Song Liu99cb0db2019-09-23 15:38:00 -07001615 * collapse_file - collapse filemap/tmpfs/shmem pages into huge one.
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001616 *
Alex Shi336e6b52020-12-14 19:12:01 -08001617 * @mm: process address space where collapse happens
1618 * @file: file that collapse on
1619 * @start: collapse start address
1620 * @hpage: new allocated huge page for collapse
1621 * @node: appointed node the new huge page allocate from
1622 *
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001623 * Basic scheme is simple, details are more complex:
Hugh Dickins87c460a2018-11-30 14:10:43 -08001624 * - allocate and lock a new huge page;
Matthew Wilcox77da9382017-12-04 14:56:08 -05001625 * - scan page cache replacing old pages with the new one
Song Liu99cb0db2019-09-23 15:38:00 -07001626 * + swap/gup in pages if necessary;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001627 * + fill in gaps;
Matthew Wilcox77da9382017-12-04 14:56:08 -05001628 * + keep old pages around in case rollback is required;
1629 * - if replacing succeeds:
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001630 * + copy data over;
1631 * + free old pages;
Hugh Dickins87c460a2018-11-30 14:10:43 -08001632 * + unlock huge page;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001633 * - if replacing failed;
1634 * + put all pages back and unfreeze them;
Matthew Wilcox77da9382017-12-04 14:56:08 -05001635 * + restore gaps in the page cache;
Hugh Dickins87c460a2018-11-30 14:10:43 -08001636 * + unlock and free huge page;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001637 */
Song Liu579c5712019-09-23 15:37:57 -07001638static void collapse_file(struct mm_struct *mm,
1639 struct file *file, pgoff_t start,
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001640 struct page **hpage, int node)
1641{
Song Liu579c5712019-09-23 15:37:57 -07001642 struct address_space *mapping = file->f_mapping;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001643 gfp_t gfp;
Matthew Wilcox77da9382017-12-04 14:56:08 -05001644 struct page *new_page;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001645 pgoff_t index, end = start + HPAGE_PMD_NR;
1646 LIST_HEAD(pagelist);
Matthew Wilcox77da9382017-12-04 14:56:08 -05001647 XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001648 int nr_none = 0, result = SCAN_SUCCEED;
Song Liu99cb0db2019-09-23 15:38:00 -07001649 bool is_shmem = shmem_file(file);
Muchun Songbf9ecea2021-02-24 12:03:27 -08001650 int nr;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001651
Song Liu99cb0db2019-09-23 15:38:00 -07001652 VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001653 VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
1654
1655 /* Only allocate from the target node */
Michal Hocko41b61672017-01-10 16:57:42 -08001656 gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001657
1658 new_page = khugepaged_alloc_page(hpage, gfp, node);
1659 if (!new_page) {
1660 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
1661 goto out;
1662 }
1663
Matthew Wilcox (Oracle)8f425e42021-06-25 09:27:04 -04001664 if (unlikely(mem_cgroup_charge(page_folio(new_page), mm, gfp))) {
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001665 result = SCAN_CGROUP_CHARGE_FAIL;
1666 goto out;
1667 }
Johannes Weiner9d82c692020-06-03 16:02:04 -07001668 count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001669
Hugh Dickins95feeab2018-11-30 14:10:50 -08001670 /* This will be less messy when we use multi-index entries */
1671 do {
1672 xas_lock_irq(&xas);
1673 xas_create_range(&xas);
1674 if (!xas_error(&xas))
1675 break;
1676 xas_unlock_irq(&xas);
1677 if (!xas_nomem(&xas, GFP_KERNEL)) {
Hugh Dickins95feeab2018-11-30 14:10:50 -08001678 result = SCAN_FAIL;
1679 goto out;
1680 }
1681 } while (1);
1682
Hugh Dickins042a3082018-11-30 14:10:39 -08001683 __SetPageLocked(new_page);
Song Liu99cb0db2019-09-23 15:38:00 -07001684 if (is_shmem)
1685 __SetPageSwapBacked(new_page);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001686 new_page->index = start;
1687 new_page->mapping = mapping;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001688
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001689 /*
Hugh Dickins87c460a2018-11-30 14:10:43 -08001690 * At this point the new_page is locked and not up-to-date.
1691 * It's safe to insert it into the page cache, because nobody would
1692 * be able to map it or use it in another way until we unlock it.
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001693 */
1694
Matthew Wilcox77da9382017-12-04 14:56:08 -05001695 xas_set(&xas, start);
1696 for (index = start; index < end; index++) {
1697 struct page *page = xas_next(&xas);
1698
1699 VM_BUG_ON(index != xas.xa_index);
Song Liu99cb0db2019-09-23 15:38:00 -07001700 if (is_shmem) {
1701 if (!page) {
1702 /*
1703 * Stop if extent has been truncated or
1704 * hole-punched, and is now completely
1705 * empty.
1706 */
1707 if (index == start) {
1708 if (!xas_next_entry(&xas, end - 1)) {
1709 result = SCAN_TRUNCATED;
1710 goto xa_locked;
1711 }
1712 xas_set(&xas, index);
1713 }
1714 if (!shmem_charge(mapping->host, 1)) {
1715 result = SCAN_FAIL;
Hugh Dickins042a3082018-11-30 14:10:39 -08001716 goto xa_locked;
Hugh Dickins701270f2018-11-30 14:10:25 -08001717 }
Song Liu99cb0db2019-09-23 15:38:00 -07001718 xas_store(&xas, new_page);
1719 nr_none++;
1720 continue;
Hugh Dickins701270f2018-11-30 14:10:25 -08001721 }
Song Liu99cb0db2019-09-23 15:38:00 -07001722
1723 if (xa_is_value(page) || !PageUptodate(page)) {
1724 xas_unlock_irq(&xas);
1725 /* swap in or instantiate fallocated page */
1726 if (shmem_getpage(mapping->host, index, &page,
Hugh Dickinsacdd9f8e2021-09-02 14:54:34 -07001727 SGP_NOALLOC)) {
Song Liu99cb0db2019-09-23 15:38:00 -07001728 result = SCAN_FAIL;
1729 goto xa_unlocked;
1730 }
1731 } else if (trylock_page(page)) {
1732 get_page(page);
1733 xas_unlock_irq(&xas);
1734 } else {
1735 result = SCAN_PAGE_LOCK;
Hugh Dickins042a3082018-11-30 14:10:39 -08001736 goto xa_locked;
Matthew Wilcox77da9382017-12-04 14:56:08 -05001737 }
Song Liu99cb0db2019-09-23 15:38:00 -07001738 } else { /* !is_shmem */
1739 if (!page || xa_is_value(page)) {
1740 xas_unlock_irq(&xas);
1741 page_cache_sync_readahead(mapping, &file->f_ra,
1742 file, index,
David Howellse5a59d32020-09-04 16:36:16 -07001743 end - index);
Song Liu99cb0db2019-09-23 15:38:00 -07001744 /* drain pagevecs to help isolate_lru_page() */
1745 lru_add_drain();
1746 page = find_lock_page(mapping, index);
1747 if (unlikely(page == NULL)) {
1748 result = SCAN_FAIL;
1749 goto xa_unlocked;
1750 }
Song Liu75f36062019-11-30 17:57:19 -08001751 } else if (PageDirty(page)) {
1752 /*
1753 * khugepaged only works on read-only fd,
1754 * so this page is dirty because it hasn't
1755 * been flushed since first write. There
1756 * won't be new dirty pages.
1757 *
1758 * Trigger async flush here and hope the
1759 * writeback is done when khugepaged
1760 * revisits this page.
1761 *
1762 * This is a one-off situation. We are not
1763 * forcing writeback in loop.
1764 */
1765 xas_unlock_irq(&xas);
1766 filemap_flush(mapping);
1767 result = SCAN_FAIL;
1768 goto xa_unlocked;
Rongwei Wang74c42e12021-10-28 14:36:27 -07001769 } else if (PageWriteback(page)) {
1770 xas_unlock_irq(&xas);
1771 result = SCAN_FAIL;
1772 goto xa_unlocked;
Song Liu99cb0db2019-09-23 15:38:00 -07001773 } else if (trylock_page(page)) {
1774 get_page(page);
1775 xas_unlock_irq(&xas);
1776 } else {
1777 result = SCAN_PAGE_LOCK;
1778 goto xa_locked;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001779 }
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001780 }
1781
1782 /*
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07001783 * The page must be locked, so we can drop the i_pages lock
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001784 * without racing with truncate.
1785 */
1786 VM_BUG_ON_PAGE(!PageLocked(page), page);
Song Liu4655e5e2019-11-15 17:34:53 -08001787
1788 /* make sure the page is up to date */
1789 if (unlikely(!PageUptodate(page))) {
1790 result = SCAN_FAIL;
1791 goto out_unlock;
1792 }
Hugh Dickins06a5e122018-11-30 14:10:47 -08001793
1794 /*
1795 * If file was truncated then extended, or hole-punched, before
1796 * we locked the first page, then a THP might be there already.
1797 */
1798 if (PageTransCompound(page)) {
1799 result = SCAN_PAGE_COMPOUND;
1800 goto out_unlock;
1801 }
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001802
1803 if (page_mapping(page) != mapping) {
1804 result = SCAN_TRUNCATED;
1805 goto out_unlock;
1806 }
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001807
Rongwei Wang74c42e12021-10-28 14:36:27 -07001808 if (!is_shmem && (PageDirty(page) ||
1809 PageWriteback(page))) {
Song Liu4655e5e2019-11-15 17:34:53 -08001810 /*
1811 * khugepaged only works on read-only fd, so this
1812 * page is dirty because it hasn't been flushed
1813 * since first write.
1814 */
1815 result = SCAN_FAIL;
1816 goto out_unlock;
1817 }
1818
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001819 if (isolate_lru_page(page)) {
1820 result = SCAN_DEL_PAGE_LRU;
Hugh Dickins042a3082018-11-30 14:10:39 -08001821 goto out_unlock;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001822 }
1823
Song Liu99cb0db2019-09-23 15:38:00 -07001824 if (page_has_private(page) &&
1825 !try_to_release_page(page, GFP_KERNEL)) {
1826 result = SCAN_PAGE_HAS_PRIVATE;
Hugh Dickins2f33a702020-05-27 22:20:43 -07001827 putback_lru_page(page);
Song Liu99cb0db2019-09-23 15:38:00 -07001828 goto out_unlock;
1829 }
1830
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001831 if (page_mapped(page))
Matthew Wilcox977fbdc2018-01-31 16:17:36 -08001832 unmap_mapping_pages(mapping, index, 1, false);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001833
Matthew Wilcox77da9382017-12-04 14:56:08 -05001834 xas_lock_irq(&xas);
1835 xas_set(&xas, index);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001836
Matthew Wilcox77da9382017-12-04 14:56:08 -05001837 VM_BUG_ON_PAGE(page != xas_load(&xas), page);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001838 VM_BUG_ON_PAGE(page_mapped(page), page);
1839
1840 /*
1841 * The page is expected to have page_count() == 3:
1842 * - we hold a pin on it;
Matthew Wilcox77da9382017-12-04 14:56:08 -05001843 * - one reference from page cache;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001844 * - one from isolate_lru_page;
1845 */
1846 if (!page_ref_freeze(page, 3)) {
1847 result = SCAN_PAGE_COUNT;
Hugh Dickins042a3082018-11-30 14:10:39 -08001848 xas_unlock_irq(&xas);
1849 putback_lru_page(page);
1850 goto out_unlock;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001851 }
1852
1853 /*
1854 * Add the page to the list to be able to undo the collapse if
1855 * something go wrong.
1856 */
1857 list_add_tail(&page->lru, &pagelist);
1858
1859 /* Finally, replace with the new page. */
Matthew Wilcox (Oracle)41011962019-09-23 15:34:52 -07001860 xas_store(&xas, new_page);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001861 continue;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001862out_unlock:
1863 unlock_page(page);
1864 put_page(page);
Hugh Dickins042a3082018-11-30 14:10:39 -08001865 goto xa_unlocked;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001866 }
Muchun Songbf9ecea2021-02-24 12:03:27 -08001867 nr = thp_nr_pages(new_page);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001868
Song Liu99cb0db2019-09-23 15:38:00 -07001869 if (is_shmem)
Muchun Song57b28472021-02-24 12:03:31 -08001870 __mod_lruvec_page_state(new_page, NR_SHMEM_THPS, nr);
Song Liu09d91cd2019-09-23 15:38:03 -07001871 else {
Muchun Songbf9ecea2021-02-24 12:03:27 -08001872 __mod_lruvec_page_state(new_page, NR_FILE_THPS, nr);
Song Liu09d91cd2019-09-23 15:38:03 -07001873 filemap_nr_thps_inc(mapping);
Collin Fijalkovicheb6ecbe2021-06-30 18:51:32 -07001874 /*
1875 * Paired with smp_mb() in do_dentry_open() to ensure
1876 * i_writecount is up to date and the update to nr_thps is
1877 * visible. Ensures the page cache will be truncated if the
1878 * file is opened writable.
1879 */
1880 smp_mb();
1881 if (inode_is_open_for_write(mapping->host)) {
1882 result = SCAN_FAIL;
1883 __mod_lruvec_page_state(new_page, NR_FILE_THPS, -nr);
1884 filemap_nr_thps_dec(mapping);
1885 goto xa_locked;
1886 }
Song Liu09d91cd2019-09-23 15:38:03 -07001887 }
Song Liu99cb0db2019-09-23 15:38:00 -07001888
Hugh Dickins042a3082018-11-30 14:10:39 -08001889 if (nr_none) {
Johannes Weiner9d82c692020-06-03 16:02:04 -07001890 __mod_lruvec_page_state(new_page, NR_FILE_PAGES, nr_none);
Song Liu99cb0db2019-09-23 15:38:00 -07001891 if (is_shmem)
Johannes Weiner9d82c692020-06-03 16:02:04 -07001892 __mod_lruvec_page_state(new_page, NR_SHMEM, nr_none);
Hugh Dickins042a3082018-11-30 14:10:39 -08001893 }
1894
1895xa_locked:
1896 xas_unlock_irq(&xas);
Matthew Wilcox77da9382017-12-04 14:56:08 -05001897xa_unlocked:
Hugh Dickins042a3082018-11-30 14:10:39 -08001898
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001899 if (result == SCAN_SUCCEED) {
Matthew Wilcox77da9382017-12-04 14:56:08 -05001900 struct page *page, *tmp;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001901
1902 /*
Matthew Wilcox77da9382017-12-04 14:56:08 -05001903 * Replacing old pages with new one has succeeded, now we
1904 * need to copy the content and free the old pages.
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001905 */
Hugh Dickins2af8ff22018-11-30 14:10:35 -08001906 index = start;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001907 list_for_each_entry_safe(page, tmp, &pagelist, lru) {
Hugh Dickins2af8ff22018-11-30 14:10:35 -08001908 while (index < page->index) {
1909 clear_highpage(new_page + (index % HPAGE_PMD_NR));
1910 index++;
1911 }
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001912 copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
1913 page);
1914 list_del(&page->lru);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001915 page->mapping = NULL;
Hugh Dickins042a3082018-11-30 14:10:39 -08001916 page_ref_unfreeze(page, 1);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001917 ClearPageActive(page);
1918 ClearPageUnevictable(page);
Hugh Dickins042a3082018-11-30 14:10:39 -08001919 unlock_page(page);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001920 put_page(page);
Hugh Dickins2af8ff22018-11-30 14:10:35 -08001921 index++;
1922 }
1923 while (index < end) {
1924 clear_highpage(new_page + (index % HPAGE_PMD_NR));
1925 index++;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001926 }
1927
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001928 SetPageUptodate(new_page);
Hugh Dickins87c460a2018-11-30 14:10:43 -08001929 page_ref_add(new_page, HPAGE_PMD_NR - 1);
Johannes Weiner6058eae2020-06-03 16:02:40 -07001930 if (is_shmem)
Song Liu99cb0db2019-09-23 15:38:00 -07001931 set_page_dirty(new_page);
Johannes Weiner6058eae2020-06-03 16:02:40 -07001932 lru_cache_add(new_page);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001933
Hugh Dickins042a3082018-11-30 14:10:39 -08001934 /*
1935 * Remove pte page tables, so we can re-fault the page as huge.
1936 */
1937 retract_page_tables(mapping, start);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001938 *hpage = NULL;
Yang Shi87aa7522018-08-17 15:45:29 -07001939
1940 khugepaged_pages_collapsed++;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001941 } else {
Matthew Wilcox77da9382017-12-04 14:56:08 -05001942 struct page *page;
Hugh Dickinsaaa52e32018-11-30 14:10:29 -08001943
Matthew Wilcox77da9382017-12-04 14:56:08 -05001944 /* Something went wrong: roll back page cache changes */
Matthew Wilcox77da9382017-12-04 14:56:08 -05001945 xas_lock_irq(&xas);
Hugh Dickinsaaa52e32018-11-30 14:10:29 -08001946 mapping->nrpages -= nr_none;
Song Liu99cb0db2019-09-23 15:38:00 -07001947
1948 if (is_shmem)
1949 shmem_uncharge(mapping->host, nr_none);
Hugh Dickinsaaa52e32018-11-30 14:10:29 -08001950
Matthew Wilcox77da9382017-12-04 14:56:08 -05001951 xas_set(&xas, start);
1952 xas_for_each(&xas, page, end - 1) {
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001953 page = list_first_entry_or_null(&pagelist,
1954 struct page, lru);
Matthew Wilcox77da9382017-12-04 14:56:08 -05001955 if (!page || xas.xa_index < page->index) {
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001956 if (!nr_none)
1957 break;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001958 nr_none--;
Johannes Weiner59749e62016-12-12 16:43:35 -08001959 /* Put holes back where they were */
Matthew Wilcox77da9382017-12-04 14:56:08 -05001960 xas_store(&xas, NULL);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001961 continue;
1962 }
1963
Matthew Wilcox77da9382017-12-04 14:56:08 -05001964 VM_BUG_ON_PAGE(page->index != xas.xa_index, page);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001965
1966 /* Unfreeze the page. */
1967 list_del(&page->lru);
1968 page_ref_unfreeze(page, 2);
Matthew Wilcox77da9382017-12-04 14:56:08 -05001969 xas_store(&xas, page);
1970 xas_pause(&xas);
1971 xas_unlock_irq(&xas);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001972 unlock_page(page);
Hugh Dickins042a3082018-11-30 14:10:39 -08001973 putback_lru_page(page);
Matthew Wilcox77da9382017-12-04 14:56:08 -05001974 xas_lock_irq(&xas);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001975 }
1976 VM_BUG_ON(nr_none);
Matthew Wilcox77da9382017-12-04 14:56:08 -05001977 xas_unlock_irq(&xas);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001978
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001979 new_page->mapping = NULL;
1980 }
Hugh Dickins042a3082018-11-30 14:10:39 -08001981
1982 unlock_page(new_page);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001983out:
1984 VM_BUG_ON(!list_empty(&pagelist));
Johannes Weiner9d82c692020-06-03 16:02:04 -07001985 if (!IS_ERR_OR_NULL(*hpage))
Matthew Wilcox (Oracle)bbc6b702021-05-01 20:42:23 -04001986 mem_cgroup_uncharge(page_folio(*hpage));
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001987 /* TODO: tracepoints */
1988}
1989
Song Liu579c5712019-09-23 15:37:57 -07001990static void khugepaged_scan_file(struct mm_struct *mm,
1991 struct file *file, pgoff_t start, struct page **hpage)
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001992{
1993 struct page *page = NULL;
Song Liu579c5712019-09-23 15:37:57 -07001994 struct address_space *mapping = file->f_mapping;
Matthew Wilcox85b392d2017-12-04 15:06:23 -05001995 XA_STATE(xas, &mapping->i_pages, start);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001996 int present, swap;
1997 int node = NUMA_NO_NODE;
1998 int result = SCAN_SUCCEED;
1999
2000 present = 0;
2001 swap = 0;
2002 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
2003 rcu_read_lock();
Matthew Wilcox85b392d2017-12-04 15:06:23 -05002004 xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) {
2005 if (xas_retry(&xas, page))
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002006 continue;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002007
Matthew Wilcox85b392d2017-12-04 15:06:23 -05002008 if (xa_is_value(page)) {
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002009 if (++swap > khugepaged_max_ptes_swap) {
2010 result = SCAN_EXCEED_SWAP_PTE;
2011 break;
2012 }
2013 continue;
2014 }
2015
2016 if (PageTransCompound(page)) {
2017 result = SCAN_PAGE_COMPOUND;
2018 break;
2019 }
2020
2021 node = page_to_nid(page);
2022 if (khugepaged_scan_abort(node)) {
2023 result = SCAN_SCAN_ABORT;
2024 break;
2025 }
2026 khugepaged_node_load[node]++;
2027
2028 if (!PageLRU(page)) {
2029 result = SCAN_PAGE_LRU;
2030 break;
2031 }
2032
Song Liu99cb0db2019-09-23 15:38:00 -07002033 if (page_count(page) !=
2034 1 + page_mapcount(page) + page_has_private(page)) {
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002035 result = SCAN_PAGE_COUNT;
2036 break;
2037 }
2038
2039 /*
2040 * We probably should check if the page is referenced here, but
2041 * nobody would transfer pte_young() to PageReferenced() for us.
2042 * And rmap walk here is just too costly...
2043 */
2044
2045 present++;
2046
2047 if (need_resched()) {
Matthew Wilcox85b392d2017-12-04 15:06:23 -05002048 xas_pause(&xas);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002049 cond_resched_rcu();
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002050 }
2051 }
2052 rcu_read_unlock();
2053
2054 if (result == SCAN_SUCCEED) {
2055 if (present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
2056 result = SCAN_EXCEED_NONE_PTE;
2057 } else {
2058 node = khugepaged_find_target_node();
Song Liu579c5712019-09-23 15:37:57 -07002059 collapse_file(mm, file, start, hpage, node);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002060 }
2061 }
2062
2063 /* TODO: tracepoints */
2064}
2065#else
Song Liu579c5712019-09-23 15:37:57 -07002066static void khugepaged_scan_file(struct mm_struct *mm,
2067 struct file *file, pgoff_t start, struct page **hpage)
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002068{
2069 BUILD_BUG();
2070}
Song Liu27e1f822019-09-23 15:38:30 -07002071
Miaohe Lin0edf61e2021-05-04 18:33:37 -07002072static void khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
Song Liu27e1f822019-09-23 15:38:30 -07002073{
Song Liu27e1f822019-09-23 15:38:30 -07002074}
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002075#endif
2076
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002077static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
2078 struct page **hpage)
2079 __releases(&khugepaged_mm_lock)
2080 __acquires(&khugepaged_mm_lock)
2081{
2082 struct mm_slot *mm_slot;
2083 struct mm_struct *mm;
2084 struct vm_area_struct *vma;
2085 int progress = 0;
2086
2087 VM_BUG_ON(!pages);
Lance Roy35f3aa32018-10-04 23:45:47 -07002088 lockdep_assert_held(&khugepaged_mm_lock);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002089
2090 if (khugepaged_scan.mm_slot)
2091 mm_slot = khugepaged_scan.mm_slot;
2092 else {
2093 mm_slot = list_entry(khugepaged_scan.mm_head.next,
2094 struct mm_slot, mm_node);
2095 khugepaged_scan.address = 0;
2096 khugepaged_scan.mm_slot = mm_slot;
2097 }
2098 spin_unlock(&khugepaged_mm_lock);
Song Liu27e1f822019-09-23 15:38:30 -07002099 khugepaged_collapse_pte_mapped_thps(mm_slot);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002100
2101 mm = mm_slot->mm;
Yang Shi3b454ad2018-01-31 16:18:28 -08002102 /*
2103 * Don't wait for semaphore (to avoid long wait times). Just move to
2104 * the next mm on the list.
2105 */
2106 vma = NULL;
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002107 if (unlikely(!mmap_read_trylock(mm)))
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07002108 goto breakouterloop_mmap_lock;
Yang Shi3b454ad2018-01-31 16:18:28 -08002109 if (likely(!khugepaged_test_exit(mm)))
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002110 vma = find_vma(mm, khugepaged_scan.address);
2111
2112 progress++;
2113 for (; vma; vma = vma->vm_next) {
2114 unsigned long hstart, hend;
2115
2116 cond_resched();
2117 if (unlikely(khugepaged_test_exit(mm))) {
2118 progress++;
2119 break;
2120 }
Song Liu50f8b922018-08-17 15:47:00 -07002121 if (!hugepage_vma_check(vma, vma->vm_flags)) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002122skip:
2123 progress++;
2124 continue;
2125 }
2126 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2127 hend = vma->vm_end & HPAGE_PMD_MASK;
2128 if (hstart >= hend)
2129 goto skip;
2130 if (khugepaged_scan.address > hend)
2131 goto skip;
2132 if (khugepaged_scan.address < hstart)
2133 khugepaged_scan.address = hstart;
2134 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
Matthew Wilcox (Oracle)396bcc52020-04-06 20:04:35 -07002135 if (shmem_file(vma->vm_file) && !shmem_huge_enabled(vma))
2136 goto skip;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002137
2138 while (khugepaged_scan.address < hend) {
2139 int ret;
2140 cond_resched();
2141 if (unlikely(khugepaged_test_exit(mm)))
2142 goto breakouterloop;
2143
2144 VM_BUG_ON(khugepaged_scan.address < hstart ||
2145 khugepaged_scan.address + HPAGE_PMD_SIZE >
2146 hend);
Song Liu99cb0db2019-09-23 15:38:00 -07002147 if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
Matthew Wilcox (Oracle)396bcc52020-04-06 20:04:35 -07002148 struct file *file = get_file(vma->vm_file);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002149 pgoff_t pgoff = linear_page_index(vma,
2150 khugepaged_scan.address);
Song Liu99cb0db2019-09-23 15:38:00 -07002151
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002152 mmap_read_unlock(mm);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002153 ret = 1;
Song Liu579c5712019-09-23 15:37:57 -07002154 khugepaged_scan_file(mm, file, pgoff, hpage);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002155 fput(file);
2156 } else {
2157 ret = khugepaged_scan_pmd(mm, vma,
2158 khugepaged_scan.address,
2159 hpage);
2160 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002161 /* move to next address */
2162 khugepaged_scan.address += HPAGE_PMD_SIZE;
2163 progress += HPAGE_PMD_NR;
2164 if (ret)
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07002165 /* we released mmap_lock so break loop */
2166 goto breakouterloop_mmap_lock;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002167 if (progress >= pages)
2168 goto breakouterloop;
2169 }
2170 }
2171breakouterloop:
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002172 mmap_read_unlock(mm); /* exit_mmap will destroy ptes after this */
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07002173breakouterloop_mmap_lock:
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002174
2175 spin_lock(&khugepaged_mm_lock);
2176 VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
2177 /*
2178 * Release the current mm_slot if this mm is about to die, or
2179 * if we scanned all vmas of this mm.
2180 */
2181 if (khugepaged_test_exit(mm) || !vma) {
2182 /*
2183 * Make sure that if mm_users is reaching zero while
2184 * khugepaged runs here, khugepaged_exit will find
2185 * mm_slot not pointing to the exiting mm.
2186 */
2187 if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
2188 khugepaged_scan.mm_slot = list_entry(
2189 mm_slot->mm_node.next,
2190 struct mm_slot, mm_node);
2191 khugepaged_scan.address = 0;
2192 } else {
2193 khugepaged_scan.mm_slot = NULL;
2194 khugepaged_full_scans++;
2195 }
2196
2197 collect_mm_slot(mm_slot);
2198 }
2199
2200 return progress;
2201}
2202
2203static int khugepaged_has_work(void)
2204{
2205 return !list_empty(&khugepaged_scan.mm_head) &&
2206 khugepaged_enabled();
2207}
2208
2209static int khugepaged_wait_event(void)
2210{
2211 return !list_empty(&khugepaged_scan.mm_head) ||
2212 kthread_should_stop();
2213}
2214
2215static void khugepaged_do_scan(void)
2216{
2217 struct page *hpage = NULL;
2218 unsigned int progress = 0, pass_through_head = 0;
Yanfei Xu89dc6a92021-05-04 18:34:12 -07002219 unsigned int pages = READ_ONCE(khugepaged_pages_to_scan);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002220 bool wait = true;
2221
Kirill A. Shutemova980df32020-06-03 16:00:12 -07002222 lru_add_drain_all();
2223
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002224 while (progress < pages) {
2225 if (!khugepaged_prealloc_page(&hpage, &wait))
2226 break;
2227
2228 cond_resched();
2229
2230 if (unlikely(kthread_should_stop() || try_to_freeze()))
2231 break;
2232
2233 spin_lock(&khugepaged_mm_lock);
2234 if (!khugepaged_scan.mm_slot)
2235 pass_through_head++;
2236 if (khugepaged_has_work() &&
2237 pass_through_head < 2)
2238 progress += khugepaged_scan_mm_slot(pages - progress,
2239 &hpage);
2240 else
2241 progress = pages;
2242 spin_unlock(&khugepaged_mm_lock);
2243 }
2244
2245 if (!IS_ERR_OR_NULL(hpage))
2246 put_page(hpage);
2247}
2248
2249static bool khugepaged_should_wakeup(void)
2250{
2251 return kthread_should_stop() ||
2252 time_after_eq(jiffies, khugepaged_sleep_expire);
2253}
2254
2255static void khugepaged_wait_work(void)
2256{
2257 if (khugepaged_has_work()) {
2258 const unsigned long scan_sleep_jiffies =
2259 msecs_to_jiffies(khugepaged_scan_sleep_millisecs);
2260
2261 if (!scan_sleep_jiffies)
2262 return;
2263
2264 khugepaged_sleep_expire = jiffies + scan_sleep_jiffies;
2265 wait_event_freezable_timeout(khugepaged_wait,
2266 khugepaged_should_wakeup(),
2267 scan_sleep_jiffies);
2268 return;
2269 }
2270
2271 if (khugepaged_enabled())
2272 wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
2273}
2274
2275static int khugepaged(void *none)
2276{
2277 struct mm_slot *mm_slot;
2278
2279 set_freezable();
2280 set_user_nice(current, MAX_NICE);
2281
2282 while (!kthread_should_stop()) {
2283 khugepaged_do_scan();
2284 khugepaged_wait_work();
2285 }
2286
2287 spin_lock(&khugepaged_mm_lock);
2288 mm_slot = khugepaged_scan.mm_slot;
2289 khugepaged_scan.mm_slot = NULL;
2290 if (mm_slot)
2291 collect_mm_slot(mm_slot);
2292 spin_unlock(&khugepaged_mm_lock);
2293 return 0;
2294}
2295
2296static void set_recommended_min_free_kbytes(void)
2297{
2298 struct zone *zone;
2299 int nr_zones = 0;
2300 unsigned long recommended_min;
2301
Liangcai Fanbd3400e2021-11-05 13:41:36 -07002302 if (!khugepaged_enabled()) {
2303 calculate_min_free_kbytes();
2304 goto update_wmarks;
2305 }
2306
Joonsoo Kimb7d349c2018-04-10 16:30:27 -07002307 for_each_populated_zone(zone) {
2308 /*
2309 * We don't need to worry about fragmentation of
2310 * ZONE_MOVABLE since it only has movable pages.
2311 */
2312 if (zone_idx(zone) > gfp_zone(GFP_USER))
2313 continue;
2314
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002315 nr_zones++;
Joonsoo Kimb7d349c2018-04-10 16:30:27 -07002316 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002317
2318 /* Ensure 2 pageblocks are free to assist fragmentation avoidance */
2319 recommended_min = pageblock_nr_pages * nr_zones * 2;
2320
2321 /*
2322 * Make sure that on average at least two pageblocks are almost free
2323 * of another type, one for a migratetype to fall back to and a
2324 * second to avoid subsequent fallbacks of other types There are 3
2325 * MIGRATE_TYPES we care about.
2326 */
2327 recommended_min += pageblock_nr_pages * nr_zones *
2328 MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
2329
2330 /* don't ever allow to reserve more than 5% of the lowmem */
2331 recommended_min = min(recommended_min,
2332 (unsigned long) nr_free_buffer_pages() / 20);
2333 recommended_min <<= (PAGE_SHIFT-10);
2334
2335 if (recommended_min > min_free_kbytes) {
2336 if (user_min_free_kbytes >= 0)
2337 pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
2338 min_free_kbytes, recommended_min);
2339
2340 min_free_kbytes = recommended_min;
2341 }
Liangcai Fanbd3400e2021-11-05 13:41:36 -07002342
2343update_wmarks:
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002344 setup_per_zone_wmarks();
2345}
2346
2347int start_stop_khugepaged(void)
2348{
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002349 int err = 0;
2350
2351 mutex_lock(&khugepaged_mutex);
2352 if (khugepaged_enabled()) {
2353 if (!khugepaged_thread)
2354 khugepaged_thread = kthread_run(khugepaged, NULL,
2355 "khugepaged");
2356 if (IS_ERR(khugepaged_thread)) {
2357 pr_err("khugepaged: kthread_run(khugepaged) failed\n");
2358 err = PTR_ERR(khugepaged_thread);
2359 khugepaged_thread = NULL;
2360 goto fail;
2361 }
2362
2363 if (!list_empty(&khugepaged_scan.mm_head))
2364 wake_up_interruptible(&khugepaged_wait);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002365 } else if (khugepaged_thread) {
2366 kthread_stop(khugepaged_thread);
2367 khugepaged_thread = NULL;
2368 }
Liangcai Fanbd3400e2021-11-05 13:41:36 -07002369 set_recommended_min_free_kbytes();
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002370fail:
2371 mutex_unlock(&khugepaged_mutex);
2372 return err;
2373}
Vijay Balakrishna4aab2be2020-10-10 23:16:40 -07002374
2375void khugepaged_min_free_kbytes_update(void)
2376{
2377 mutex_lock(&khugepaged_mutex);
2378 if (khugepaged_enabled() && khugepaged_thread)
2379 set_recommended_min_free_kbytes();
2380 mutex_unlock(&khugepaged_mutex);
2381}