blob: 131492fd1148b4a40b28c159286955b191a5bdeb [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3
4#include <linux/mm.h>
5#include <linux/sched.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +01006#include <linux/sched/mm.h>
Ingo Molnarf7ccbae2017-02-08 18:51:30 +01007#include <linux/sched/coredump.h>
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07008#include <linux/mmu_notifier.h>
9#include <linux/rmap.h>
10#include <linux/swap.h>
11#include <linux/mm_inline.h>
12#include <linux/kthread.h>
13#include <linux/khugepaged.h>
14#include <linux/freezer.h>
15#include <linux/mman.h>
16#include <linux/hashtable.h>
17#include <linux/userfaultfd_k.h>
18#include <linux/page_idle.h>
Pasha Tatashin80110bb2022-02-03 20:49:24 -080019#include <linux/page_table_check.h>
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070020#include <linux/swapops.h>
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -070021#include <linux/shmem_fs.h>
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070022
23#include <asm/tlb.h>
24#include <asm/pgalloc.h>
25#include "internal.h"
26
27enum scan_result {
28 SCAN_FAIL,
29 SCAN_SUCCEED,
30 SCAN_PMD_NULL,
31 SCAN_EXCEED_NONE_PTE,
Kirill A. Shutemov71a2c112020-06-03 16:00:30 -070032 SCAN_EXCEED_SWAP_PTE,
33 SCAN_EXCEED_SHARED_PTE,
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070034 SCAN_PTE_NON_PRESENT,
Peter Xue1e267c2020-04-06 20:06:04 -070035 SCAN_PTE_UFFD_WP,
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070036 SCAN_PAGE_RO,
Ebru Akagunduz0db501f2016-07-26 15:26:46 -070037 SCAN_LACK_REFERENCED_PAGE,
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070038 SCAN_PAGE_NULL,
39 SCAN_SCAN_ABORT,
40 SCAN_PAGE_COUNT,
41 SCAN_PAGE_LRU,
42 SCAN_PAGE_LOCK,
43 SCAN_PAGE_ANON,
44 SCAN_PAGE_COMPOUND,
45 SCAN_ANY_PROCESS,
46 SCAN_VMA_NULL,
47 SCAN_VMA_CHECK,
48 SCAN_ADDRESS_RANGE,
49 SCAN_SWAP_CACHE_PAGE,
50 SCAN_DEL_PAGE_LRU,
51 SCAN_ALLOC_HUGE_PAGE_FAIL,
52 SCAN_CGROUP_CHARGE_FAIL,
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -070053 SCAN_TRUNCATED,
Song Liu99cb0db2019-09-23 15:38:00 -070054 SCAN_PAGE_HAS_PRIVATE,
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070055};
56
57#define CREATE_TRACE_POINTS
58#include <trace/events/huge_memory.h>
59
Vijay Balakrishna4aab2be2020-10-10 23:16:40 -070060static struct task_struct *khugepaged_thread __read_mostly;
61static DEFINE_MUTEX(khugepaged_mutex);
62
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070063/* default scan 8*512 pte (or vmas) every 30 second */
64static unsigned int khugepaged_pages_to_scan __read_mostly;
65static unsigned int khugepaged_pages_collapsed;
66static unsigned int khugepaged_full_scans;
67static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
68/* during fragmentation poll the hugepage allocator once every minute */
69static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
70static unsigned long khugepaged_sleep_expire;
71static DEFINE_SPINLOCK(khugepaged_mm_lock);
72static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
73/*
74 * default collapse hugepages if there is at least one pte mapped like
75 * it would have happened if the vma was large enough during page
76 * fault.
77 */
78static unsigned int khugepaged_max_ptes_none __read_mostly;
79static unsigned int khugepaged_max_ptes_swap __read_mostly;
Kirill A. Shutemov71a2c112020-06-03 16:00:30 -070080static unsigned int khugepaged_max_ptes_shared __read_mostly;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070081
82#define MM_SLOTS_HASH_BITS 10
83static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
84
85static struct kmem_cache *mm_slot_cache __read_mostly;
86
Song Liu27e1f822019-09-23 15:38:30 -070087#define MAX_PTE_MAPPED_THP 8
88
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070089/**
90 * struct mm_slot - hash lookup from mm to mm_slot
91 * @hash: hash collision list
92 * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
93 * @mm: the mm that this information is valid for
Alex Shi336e6b52020-12-14 19:12:01 -080094 * @nr_pte_mapped_thp: number of pte mapped THP
95 * @pte_mapped_thp: address array corresponding pte mapped THP
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070096 */
97struct mm_slot {
98 struct hlist_node hash;
99 struct list_head mm_node;
100 struct mm_struct *mm;
Song Liu27e1f822019-09-23 15:38:30 -0700101
102 /* pte-mapped THP in this mm */
103 int nr_pte_mapped_thp;
104 unsigned long pte_mapped_thp[MAX_PTE_MAPPED_THP];
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700105};
106
107/**
108 * struct khugepaged_scan - cursor for scanning
109 * @mm_head: the head of the mm list to scan
110 * @mm_slot: the current mm_slot we are scanning
111 * @address: the next address inside that to be scanned
112 *
113 * There is only the one khugepaged_scan instance of this cursor structure.
114 */
115struct khugepaged_scan {
116 struct list_head mm_head;
117 struct mm_slot *mm_slot;
118 unsigned long address;
119};
120
121static struct khugepaged_scan khugepaged_scan = {
122 .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
123};
124
Jérémy Lefauree1465d12016-11-30 15:54:02 -0800125#ifdef CONFIG_SYSFS
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700126static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
127 struct kobj_attribute *attr,
128 char *buf)
129{
Joe Perchesae7a9272020-12-14 19:14:42 -0800130 return sysfs_emit(buf, "%u\n", khugepaged_scan_sleep_millisecs);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700131}
132
133static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
134 struct kobj_attribute *attr,
135 const char *buf, size_t count)
136{
Alexey Dobriyandfefd222020-12-14 19:15:03 -0800137 unsigned int msecs;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700138 int err;
139
Alexey Dobriyandfefd222020-12-14 19:15:03 -0800140 err = kstrtouint(buf, 10, &msecs);
141 if (err)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700142 return -EINVAL;
143
144 khugepaged_scan_sleep_millisecs = msecs;
145 khugepaged_sleep_expire = 0;
146 wake_up_interruptible(&khugepaged_wait);
147
148 return count;
149}
150static struct kobj_attribute scan_sleep_millisecs_attr =
151 __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
152 scan_sleep_millisecs_store);
153
154static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
155 struct kobj_attribute *attr,
156 char *buf)
157{
Joe Perchesae7a9272020-12-14 19:14:42 -0800158 return sysfs_emit(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700159}
160
161static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
162 struct kobj_attribute *attr,
163 const char *buf, size_t count)
164{
Alexey Dobriyandfefd222020-12-14 19:15:03 -0800165 unsigned int msecs;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700166 int err;
167
Alexey Dobriyandfefd222020-12-14 19:15:03 -0800168 err = kstrtouint(buf, 10, &msecs);
169 if (err)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700170 return -EINVAL;
171
172 khugepaged_alloc_sleep_millisecs = msecs;
173 khugepaged_sleep_expire = 0;
174 wake_up_interruptible(&khugepaged_wait);
175
176 return count;
177}
178static struct kobj_attribute alloc_sleep_millisecs_attr =
179 __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
180 alloc_sleep_millisecs_store);
181
182static ssize_t pages_to_scan_show(struct kobject *kobj,
183 struct kobj_attribute *attr,
184 char *buf)
185{
Joe Perchesae7a9272020-12-14 19:14:42 -0800186 return sysfs_emit(buf, "%u\n", khugepaged_pages_to_scan);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700187}
188static ssize_t pages_to_scan_store(struct kobject *kobj,
189 struct kobj_attribute *attr,
190 const char *buf, size_t count)
191{
Alexey Dobriyandfefd222020-12-14 19:15:03 -0800192 unsigned int pages;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700193 int err;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700194
Alexey Dobriyandfefd222020-12-14 19:15:03 -0800195 err = kstrtouint(buf, 10, &pages);
196 if (err || !pages)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700197 return -EINVAL;
198
199 khugepaged_pages_to_scan = pages;
200
201 return count;
202}
203static struct kobj_attribute pages_to_scan_attr =
204 __ATTR(pages_to_scan, 0644, pages_to_scan_show,
205 pages_to_scan_store);
206
207static ssize_t pages_collapsed_show(struct kobject *kobj,
208 struct kobj_attribute *attr,
209 char *buf)
210{
Joe Perchesae7a9272020-12-14 19:14:42 -0800211 return sysfs_emit(buf, "%u\n", khugepaged_pages_collapsed);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700212}
213static struct kobj_attribute pages_collapsed_attr =
214 __ATTR_RO(pages_collapsed);
215
216static ssize_t full_scans_show(struct kobject *kobj,
217 struct kobj_attribute *attr,
218 char *buf)
219{
Joe Perchesae7a9272020-12-14 19:14:42 -0800220 return sysfs_emit(buf, "%u\n", khugepaged_full_scans);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700221}
222static struct kobj_attribute full_scans_attr =
223 __ATTR_RO(full_scans);
224
225static ssize_t khugepaged_defrag_show(struct kobject *kobj,
226 struct kobj_attribute *attr, char *buf)
227{
228 return single_hugepage_flag_show(kobj, attr, buf,
Joe Perchesae7a9272020-12-14 19:14:42 -0800229 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700230}
231static ssize_t khugepaged_defrag_store(struct kobject *kobj,
232 struct kobj_attribute *attr,
233 const char *buf, size_t count)
234{
235 return single_hugepage_flag_store(kobj, attr, buf, count,
236 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
237}
238static struct kobj_attribute khugepaged_defrag_attr =
239 __ATTR(defrag, 0644, khugepaged_defrag_show,
240 khugepaged_defrag_store);
241
242/*
243 * max_ptes_none controls if khugepaged should collapse hugepages over
244 * any unmapped ptes in turn potentially increasing the memory
245 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
246 * reduce the available free memory in the system as it
247 * runs. Increasing max_ptes_none will instead potentially reduce the
248 * free memory in the system during the khugepaged scan.
249 */
250static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
251 struct kobj_attribute *attr,
252 char *buf)
253{
Joe Perchesae7a9272020-12-14 19:14:42 -0800254 return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_none);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700255}
256static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
257 struct kobj_attribute *attr,
258 const char *buf, size_t count)
259{
260 int err;
261 unsigned long max_ptes_none;
262
263 err = kstrtoul(buf, 10, &max_ptes_none);
264 if (err || max_ptes_none > HPAGE_PMD_NR-1)
265 return -EINVAL;
266
267 khugepaged_max_ptes_none = max_ptes_none;
268
269 return count;
270}
271static struct kobj_attribute khugepaged_max_ptes_none_attr =
272 __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
273 khugepaged_max_ptes_none_store);
274
275static ssize_t khugepaged_max_ptes_swap_show(struct kobject *kobj,
276 struct kobj_attribute *attr,
277 char *buf)
278{
Joe Perchesae7a9272020-12-14 19:14:42 -0800279 return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_swap);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700280}
281
282static ssize_t khugepaged_max_ptes_swap_store(struct kobject *kobj,
283 struct kobj_attribute *attr,
284 const char *buf, size_t count)
285{
286 int err;
287 unsigned long max_ptes_swap;
288
289 err = kstrtoul(buf, 10, &max_ptes_swap);
290 if (err || max_ptes_swap > HPAGE_PMD_NR-1)
291 return -EINVAL;
292
293 khugepaged_max_ptes_swap = max_ptes_swap;
294
295 return count;
296}
297
298static struct kobj_attribute khugepaged_max_ptes_swap_attr =
299 __ATTR(max_ptes_swap, 0644, khugepaged_max_ptes_swap_show,
300 khugepaged_max_ptes_swap_store);
301
Kirill A. Shutemov71a2c112020-06-03 16:00:30 -0700302static ssize_t khugepaged_max_ptes_shared_show(struct kobject *kobj,
Joe Perchesae7a9272020-12-14 19:14:42 -0800303 struct kobj_attribute *attr,
304 char *buf)
Kirill A. Shutemov71a2c112020-06-03 16:00:30 -0700305{
Joe Perchesae7a9272020-12-14 19:14:42 -0800306 return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_shared);
Kirill A. Shutemov71a2c112020-06-03 16:00:30 -0700307}
308
309static ssize_t khugepaged_max_ptes_shared_store(struct kobject *kobj,
310 struct kobj_attribute *attr,
311 const char *buf, size_t count)
312{
313 int err;
314 unsigned long max_ptes_shared;
315
316 err = kstrtoul(buf, 10, &max_ptes_shared);
317 if (err || max_ptes_shared > HPAGE_PMD_NR-1)
318 return -EINVAL;
319
320 khugepaged_max_ptes_shared = max_ptes_shared;
321
322 return count;
323}
324
325static struct kobj_attribute khugepaged_max_ptes_shared_attr =
326 __ATTR(max_ptes_shared, 0644, khugepaged_max_ptes_shared_show,
327 khugepaged_max_ptes_shared_store);
328
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700329static struct attribute *khugepaged_attr[] = {
330 &khugepaged_defrag_attr.attr,
331 &khugepaged_max_ptes_none_attr.attr,
Kirill A. Shutemov71a2c112020-06-03 16:00:30 -0700332 &khugepaged_max_ptes_swap_attr.attr,
333 &khugepaged_max_ptes_shared_attr.attr,
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700334 &pages_to_scan_attr.attr,
335 &pages_collapsed_attr.attr,
336 &full_scans_attr.attr,
337 &scan_sleep_millisecs_attr.attr,
338 &alloc_sleep_millisecs_attr.attr,
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700339 NULL,
340};
341
342struct attribute_group khugepaged_attr_group = {
343 .attrs = khugepaged_attr,
344 .name = "khugepaged",
345};
Jérémy Lefauree1465d12016-11-30 15:54:02 -0800346#endif /* CONFIG_SYSFS */
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700347
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700348int hugepage_madvise(struct vm_area_struct *vma,
349 unsigned long *vm_flags, int advice)
350{
351 switch (advice) {
352 case MADV_HUGEPAGE:
353#ifdef CONFIG_S390
354 /*
355 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
356 * can't handle this properly after s390_enable_sie, so we simply
357 * ignore the madvise to prevent qemu from causing a SIGSEGV.
358 */
359 if (mm_has_pgste(vma->vm_mm))
360 return 0;
361#endif
362 *vm_flags &= ~VM_NOHUGEPAGE;
363 *vm_flags |= VM_HUGEPAGE;
364 /*
365 * If the vma become good for khugepaged to scan,
366 * register it here without waiting a page fault that
367 * may not happen any time soon.
368 */
369 if (!(*vm_flags & VM_NO_KHUGEPAGED) &&
370 khugepaged_enter_vma_merge(vma, *vm_flags))
371 return -ENOMEM;
372 break;
373 case MADV_NOHUGEPAGE:
374 *vm_flags &= ~VM_HUGEPAGE;
375 *vm_flags |= VM_NOHUGEPAGE;
376 /*
377 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
378 * this vma even if we leave the mm registered in khugepaged if
379 * it got registered before VM_NOHUGEPAGE was set.
380 */
381 break;
382 }
383
384 return 0;
385}
386
387int __init khugepaged_init(void)
388{
389 mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
390 sizeof(struct mm_slot),
391 __alignof__(struct mm_slot), 0, NULL);
392 if (!mm_slot_cache)
393 return -ENOMEM;
394
395 khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
396 khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
397 khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8;
Kirill A. Shutemov71a2c112020-06-03 16:00:30 -0700398 khugepaged_max_ptes_shared = HPAGE_PMD_NR / 2;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700399
400 return 0;
401}
402
403void __init khugepaged_destroy(void)
404{
405 kmem_cache_destroy(mm_slot_cache);
406}
407
408static inline struct mm_slot *alloc_mm_slot(void)
409{
410 if (!mm_slot_cache) /* initialization failed */
411 return NULL;
412 return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
413}
414
415static inline void free_mm_slot(struct mm_slot *mm_slot)
416{
417 kmem_cache_free(mm_slot_cache, mm_slot);
418}
419
420static struct mm_slot *get_mm_slot(struct mm_struct *mm)
421{
422 struct mm_slot *mm_slot;
423
424 hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
425 if (mm == mm_slot->mm)
426 return mm_slot;
427
428 return NULL;
429}
430
431static void insert_to_mm_slots_hash(struct mm_struct *mm,
432 struct mm_slot *mm_slot)
433{
434 mm_slot->mm = mm;
435 hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
436}
437
438static inline int khugepaged_test_exit(struct mm_struct *mm)
439{
Jann Horn4d45e752020-10-15 20:13:00 -0700440 return atomic_read(&mm->mm_users) == 0;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700441}
442
Song Liu50f8b922018-08-17 15:47:00 -0700443static bool hugepage_vma_check(struct vm_area_struct *vma,
444 unsigned long vm_flags)
Yang Shic2231022018-08-17 15:45:26 -0700445{
Miaohe Line6be37b2021-06-30 18:47:50 -0700446 if (!transhuge_vma_enabled(vma, vm_flags))
Yang Shic2231022018-08-17 15:45:26 -0700447 return false;
Song Liu99cb0db2019-09-23 15:38:00 -0700448
Yang Shia4aeaa02021-10-28 14:36:30 -0700449 if (vma->vm_file && !IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) -
450 vma->vm_pgoff, HPAGE_PMD_NR))
451 return false;
452
Rik van Rielcd89fb02021-02-25 17:16:25 -0800453 /* Enabled via shmem mount options or sysfs settings. */
Yang Shia4aeaa02021-10-28 14:36:30 -0700454 if (shmem_file(vma->vm_file))
455 return shmem_huge_enabled(vma);
Rik van Rielcd89fb02021-02-25 17:16:25 -0800456
457 /* THP settings require madvise. */
458 if (!(vm_flags & VM_HUGEPAGE) && !khugepaged_always())
459 return false;
460
Yang Shia4aeaa02021-10-28 14:36:30 -0700461 /* Only regular file is valid */
Rik van Rielcd89fb02021-02-25 17:16:25 -0800462 if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && vma->vm_file &&
Collin Fijalkovicheb6ecbe2021-06-30 18:51:32 -0700463 (vm_flags & VM_EXEC)) {
Yang Shia4aeaa02021-10-28 14:36:30 -0700464 struct inode *inode = vma->vm_file->f_inode;
465
466 return !inode_is_open_for_write(inode) &&
467 S_ISREG(inode->i_mode);
Rik van Rielcd89fb02021-02-25 17:16:25 -0800468 }
469
Yang Shic2231022018-08-17 15:45:26 -0700470 if (!vma->anon_vma || vma->vm_ops)
471 return false;
Anshuman Khandual222100e2020-04-01 21:07:52 -0700472 if (vma_is_temporary_stack(vma))
Yang Shic2231022018-08-17 15:45:26 -0700473 return false;
Song Liu50f8b922018-08-17 15:47:00 -0700474 return !(vm_flags & VM_NO_KHUGEPAGED);
Yang Shic2231022018-08-17 15:45:26 -0700475}
476
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700477int __khugepaged_enter(struct mm_struct *mm)
478{
479 struct mm_slot *mm_slot;
480 int wakeup;
481
482 mm_slot = alloc_mm_slot();
483 if (!mm_slot)
484 return -ENOMEM;
485
486 /* __khugepaged_exit() must not run from under us */
Miaohe Lin28ff0a32021-05-04 18:33:43 -0700487 VM_BUG_ON_MM(khugepaged_test_exit(mm), mm);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700488 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
489 free_mm_slot(mm_slot);
490 return 0;
491 }
492
493 spin_lock(&khugepaged_mm_lock);
494 insert_to_mm_slots_hash(mm, mm_slot);
495 /*
496 * Insert just behind the scanning cursor, to let the area settle
497 * down a little.
498 */
499 wakeup = list_empty(&khugepaged_scan.mm_head);
500 list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
501 spin_unlock(&khugepaged_mm_lock);
502
Vegard Nossumf1f10072017-02-27 14:30:07 -0800503 mmgrab(mm);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700504 if (wakeup)
505 wake_up_interruptible(&khugepaged_wait);
506
507 return 0;
508}
509
510int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
511 unsigned long vm_flags)
512{
513 unsigned long hstart, hend;
Yang Shic2231022018-08-17 15:45:26 -0700514
515 /*
Song Liu99cb0db2019-09-23 15:38:00 -0700516 * khugepaged only supports read-only files for non-shmem files.
517 * khugepaged does not yet work on special mappings. And
518 * file-private shmem THP is not supported.
Yang Shic2231022018-08-17 15:45:26 -0700519 */
Song Liu50f8b922018-08-17 15:47:00 -0700520 if (!hugepage_vma_check(vma, vm_flags))
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700521 return 0;
Yang Shic2231022018-08-17 15:45:26 -0700522
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700523 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
524 hend = vma->vm_end & HPAGE_PMD_MASK;
525 if (hstart < hend)
526 return khugepaged_enter(vma, vm_flags);
527 return 0;
528}
529
530void __khugepaged_exit(struct mm_struct *mm)
531{
532 struct mm_slot *mm_slot;
533 int free = 0;
534
535 spin_lock(&khugepaged_mm_lock);
536 mm_slot = get_mm_slot(mm);
537 if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
538 hash_del(&mm_slot->hash);
539 list_del(&mm_slot->mm_node);
540 free = 1;
541 }
542 spin_unlock(&khugepaged_mm_lock);
543
544 if (free) {
545 clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
546 free_mm_slot(mm_slot);
547 mmdrop(mm);
548 } else if (mm_slot) {
549 /*
550 * This is required to serialize against
551 * khugepaged_test_exit() (which is guaranteed to run
552 * under mmap sem read mode). Stop here (after we
553 * return all pagetables will be destroyed) until
554 * khugepaged has finished working on the pagetables
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -0700555 * under the mmap_lock.
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700556 */
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700557 mmap_write_lock(mm);
558 mmap_write_unlock(mm);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700559 }
560}
561
562static void release_pte_page(struct page *page)
563{
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700564 mod_node_page_state(page_pgdat(page),
565 NR_ISOLATED_ANON + page_is_file_lru(page),
566 -compound_nr(page));
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700567 unlock_page(page);
568 putback_lru_page(page);
569}
570
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700571static void release_pte_pages(pte_t *pte, pte_t *_pte,
572 struct list_head *compound_pagelist)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700573{
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700574 struct page *page, *tmp;
575
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700576 while (--_pte >= pte) {
577 pte_t pteval = *_pte;
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700578
579 page = pte_page(pteval);
580 if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval)) &&
581 !PageCompound(page))
582 release_pte_page(page);
583 }
584
585 list_for_each_entry_safe(page, tmp, compound_pagelist, lru) {
586 list_del(&page->lru);
587 release_pte_page(page);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700588 }
589}
590
Kirill A. Shutemov94456892020-06-03 16:00:20 -0700591static bool is_refcount_suitable(struct page *page)
592{
593 int expected_refcount;
594
595 expected_refcount = total_mapcount(page);
596 if (PageSwapCache(page))
597 expected_refcount += compound_nr(page);
598
599 return page_count(page) == expected_refcount;
600}
601
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700602static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
603 unsigned long address,
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700604 pte_t *pte,
605 struct list_head *compound_pagelist)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700606{
607 struct page *page = NULL;
608 pte_t *_pte;
Kirill A. Shutemov71a2c112020-06-03 16:00:30 -0700609 int none_or_zero = 0, shared = 0, result = 0, referenced = 0;
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700610 bool writable = false;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700611
612 for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
613 _pte++, address += PAGE_SIZE) {
614 pte_t pteval = *_pte;
615 if (pte_none(pteval) || (pte_present(pteval) &&
616 is_zero_pfn(pte_pfn(pteval)))) {
617 if (!userfaultfd_armed(vma) &&
618 ++none_or_zero <= khugepaged_max_ptes_none) {
619 continue;
620 } else {
621 result = SCAN_EXCEED_NONE_PTE;
Yang Yange9ea8742022-01-14 14:07:55 -0800622 count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700623 goto out;
624 }
625 }
626 if (!pte_present(pteval)) {
627 result = SCAN_PTE_NON_PRESENT;
628 goto out;
629 }
630 page = vm_normal_page(vma, address, pteval);
631 if (unlikely(!page)) {
632 result = SCAN_PAGE_NULL;
633 goto out;
634 }
635
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700636 VM_BUG_ON_PAGE(!PageAnon(page), page);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700637
Kirill A. Shutemov71a2c112020-06-03 16:00:30 -0700638 if (page_mapcount(page) > 1 &&
639 ++shared > khugepaged_max_ptes_shared) {
640 result = SCAN_EXCEED_SHARED_PTE;
Yang Yange9ea8742022-01-14 14:07:55 -0800641 count_vm_event(THP_SCAN_EXCEED_SHARED_PTE);
Kirill A. Shutemov71a2c112020-06-03 16:00:30 -0700642 goto out;
643 }
644
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700645 if (PageCompound(page)) {
646 struct page *p;
647 page = compound_head(page);
648
649 /*
650 * Check if we have dealt with the compound page
651 * already
652 */
653 list_for_each_entry(p, compound_pagelist, lru) {
654 if (page == p)
655 goto next;
656 }
657 }
658
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700659 /*
660 * We can do it before isolate_lru_page because the
661 * page can't be freed from under us. NOTE: PG_lock
662 * is needed to serialize against split_huge_page
663 * when invoked from the VM.
664 */
665 if (!trylock_page(page)) {
666 result = SCAN_PAGE_LOCK;
667 goto out;
668 }
669
670 /*
Kirill A. Shutemov94456892020-06-03 16:00:20 -0700671 * Check if the page has any GUP (or other external) pins.
672 *
673 * The page table that maps the page has been already unlinked
674 * from the page table tree and this process cannot get
Ingo Molnarf0953a12021-05-06 18:06:47 -0700675 * an additional pin on the page.
Kirill A. Shutemov94456892020-06-03 16:00:20 -0700676 *
677 * New pins can come later if the page is shared across fork,
678 * but not from this process. The other process cannot write to
679 * the page, only trigger CoW.
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700680 */
Kirill A. Shutemov94456892020-06-03 16:00:20 -0700681 if (!is_refcount_suitable(page)) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700682 unlock_page(page);
683 result = SCAN_PAGE_COUNT;
684 goto out;
685 }
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700686 if (!pte_write(pteval) && PageSwapCache(page) &&
Matthew Wilcox (Oracle)020e8762022-01-14 14:06:44 -0800687 !reuse_swap_page(page)) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700688 /*
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700689 * Page is in the swap cache and cannot be re-used.
690 * It cannot be collapsed into a THP.
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700691 */
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700692 unlock_page(page);
693 result = SCAN_SWAP_CACHE_PAGE;
694 goto out;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700695 }
696
697 /*
698 * Isolate the page to avoid collapsing an hugepage
699 * currently in use by the VM.
700 */
701 if (isolate_lru_page(page)) {
702 unlock_page(page);
703 result = SCAN_DEL_PAGE_LRU;
704 goto out;
705 }
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700706 mod_node_page_state(page_pgdat(page),
707 NR_ISOLATED_ANON + page_is_file_lru(page),
708 compound_nr(page));
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700709 VM_BUG_ON_PAGE(!PageLocked(page), page);
710 VM_BUG_ON_PAGE(PageLRU(page), page);
711
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700712 if (PageCompound(page))
713 list_add_tail(&page->lru, compound_pagelist);
714next:
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700715 /* There should be enough young pte to collapse the page */
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700716 if (pte_young(pteval) ||
717 page_is_young(page) || PageReferenced(page) ||
718 mmu_notifier_test_young(vma->vm_mm, address))
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700719 referenced++;
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700720
721 if (pte_write(pteval))
722 writable = true;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700723 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700724
Miaohe Lin74e579b2021-05-04 18:33:46 -0700725 if (unlikely(!writable)) {
726 result = SCAN_PAGE_RO;
727 } else if (unlikely(!referenced)) {
728 result = SCAN_LACK_REFERENCED_PAGE;
729 } else {
730 result = SCAN_SUCCEED;
731 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
732 referenced, writable, result);
733 return 1;
734 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700735out:
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700736 release_pte_pages(pte, _pte, compound_pagelist);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700737 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
738 referenced, writable, result);
739 return 0;
740}
741
742static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
743 struct vm_area_struct *vma,
744 unsigned long address,
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700745 spinlock_t *ptl,
746 struct list_head *compound_pagelist)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700747{
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700748 struct page *src_page, *tmp;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700749 pte_t *_pte;
David Rientjes338a16b2017-05-12 15:47:03 -0700750 for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
751 _pte++, page++, address += PAGE_SIZE) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700752 pte_t pteval = *_pte;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700753
754 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
755 clear_user_highpage(page, address);
756 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
757 if (is_zero_pfn(pte_pfn(pteval))) {
758 /*
759 * ptl mostly unnecessary.
760 */
761 spin_lock(ptl);
Pasha Tatashin08d5b292022-01-14 14:06:33 -0800762 ptep_clear(vma->vm_mm, address, _pte);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700763 spin_unlock(ptl);
764 }
765 } else {
766 src_page = pte_page(pteval);
767 copy_user_highpage(page, src_page, address, vma);
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700768 if (!PageCompound(src_page))
769 release_pte_page(src_page);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700770 /*
771 * ptl mostly unnecessary, but preempt has to
772 * be disabled to update the per-cpu stats
773 * inside page_remove_rmap().
774 */
775 spin_lock(ptl);
Pasha Tatashin08d5b292022-01-14 14:06:33 -0800776 ptep_clear(vma->vm_mm, address, _pte);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700777 page_remove_rmap(src_page, false);
778 spin_unlock(ptl);
779 free_page_and_swap_cache(src_page);
780 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700781 }
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700782
783 list_for_each_entry_safe(src_page, tmp, compound_pagelist, lru) {
784 list_del(&src_page->lru);
785 release_pte_page(src_page);
786 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700787}
788
789static void khugepaged_alloc_sleep(void)
790{
791 DEFINE_WAIT(wait);
792
793 add_wait_queue(&khugepaged_wait, &wait);
794 freezable_schedule_timeout_interruptible(
795 msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
796 remove_wait_queue(&khugepaged_wait, &wait);
797}
798
799static int khugepaged_node_load[MAX_NUMNODES];
800
801static bool khugepaged_scan_abort(int nid)
802{
803 int i;
804
805 /*
Mel Gormana5f5f912016-07-28 15:46:32 -0700806 * If node_reclaim_mode is disabled, then no extra effort is made to
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700807 * allocate memory locally.
808 */
Dave Hansen202e35d2021-05-04 18:36:04 -0700809 if (!node_reclaim_enabled())
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700810 return false;
811
812 /* If there is a count for this node already, it must be acceptable */
813 if (khugepaged_node_load[nid])
814 return false;
815
816 for (i = 0; i < MAX_NUMNODES; i++) {
817 if (!khugepaged_node_load[i])
818 continue;
Matt Fleminga55c7452019-08-08 20:53:01 +0100819 if (node_distance(nid, i) > node_reclaim_distance)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700820 return true;
821 }
822 return false;
823}
824
825/* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
826static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
827{
Vlastimil Babka25160352016-07-28 15:49:25 -0700828 return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700829}
830
831#ifdef CONFIG_NUMA
832static int khugepaged_find_target_node(void)
833{
834 static int last_khugepaged_target_node = NUMA_NO_NODE;
835 int nid, target_node = 0, max_value = 0;
836
837 /* find first node with max normal pages hit */
838 for (nid = 0; nid < MAX_NUMNODES; nid++)
839 if (khugepaged_node_load[nid] > max_value) {
840 max_value = khugepaged_node_load[nid];
841 target_node = nid;
842 }
843
844 /* do some balance if several nodes have the same hit record */
845 if (target_node <= last_khugepaged_target_node)
846 for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES;
847 nid++)
848 if (max_value == khugepaged_node_load[nid]) {
849 target_node = nid;
850 break;
851 }
852
853 last_khugepaged_target_node = target_node;
854 return target_node;
855}
856
857static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
858{
859 if (IS_ERR(*hpage)) {
860 if (!*wait)
861 return false;
862
863 *wait = false;
864 *hpage = NULL;
865 khugepaged_alloc_sleep();
866 } else if (*hpage) {
867 put_page(*hpage);
868 *hpage = NULL;
869 }
870
871 return true;
872}
873
874static struct page *
Kirill A. Shutemov988ddb72016-07-26 15:26:26 -0700875khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700876{
877 VM_BUG_ON_PAGE(*hpage, *hpage);
878
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700879 *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
880 if (unlikely(!*hpage)) {
881 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
882 *hpage = ERR_PTR(-ENOMEM);
883 return NULL;
884 }
885
886 prep_transhuge_page(*hpage);
887 count_vm_event(THP_COLLAPSE_ALLOC);
888 return *hpage;
889}
890#else
891static int khugepaged_find_target_node(void)
892{
893 return 0;
894}
895
896static inline struct page *alloc_khugepaged_hugepage(void)
897{
898 struct page *page;
899
900 page = alloc_pages(alloc_hugepage_khugepaged_gfpmask(),
901 HPAGE_PMD_ORDER);
902 if (page)
903 prep_transhuge_page(page);
904 return page;
905}
906
907static struct page *khugepaged_alloc_hugepage(bool *wait)
908{
909 struct page *hpage;
910
911 do {
912 hpage = alloc_khugepaged_hugepage();
913 if (!hpage) {
914 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
915 if (!*wait)
916 return NULL;
917
918 *wait = false;
919 khugepaged_alloc_sleep();
920 } else
921 count_vm_event(THP_COLLAPSE_ALLOC);
922 } while (unlikely(!hpage) && likely(khugepaged_enabled()));
923
924 return hpage;
925}
926
927static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
928{
Hugh Dickins033b5d72020-10-09 20:07:59 -0700929 /*
930 * If the hpage allocated earlier was briefly exposed in page cache
931 * before collapse_file() failed, it is possible that racing lookups
932 * have not yet completed, and would then be unpleasantly surprised by
933 * finding the hpage reused for the same mapping at a different offset.
934 * Just release the previous allocation if there is any danger of that.
935 */
936 if (*hpage && page_count(*hpage) > 1) {
937 put_page(*hpage);
938 *hpage = NULL;
939 }
940
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700941 if (!*hpage)
942 *hpage = khugepaged_alloc_hugepage(wait);
943
944 if (unlikely(!*hpage))
945 return false;
946
947 return true;
948}
949
950static struct page *
Kirill A. Shutemov988ddb72016-07-26 15:26:26 -0700951khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700952{
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700953 VM_BUG_ON(!*hpage);
954
955 return *hpage;
956}
957#endif
958
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700959/*
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -0700960 * If mmap_lock temporarily dropped, revalidate vma
961 * before taking mmap_lock.
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700962 * Return 0 if succeeds, otherwise return none-zero
963 * value (scan code).
964 */
965
Kirill A. Shutemovc131f752016-09-19 14:44:01 -0700966static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
967 struct vm_area_struct **vmap)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700968{
969 struct vm_area_struct *vma;
970 unsigned long hstart, hend;
971
972 if (unlikely(khugepaged_test_exit(mm)))
973 return SCAN_ANY_PROCESS;
974
Kirill A. Shutemovc131f752016-09-19 14:44:01 -0700975 *vmap = vma = find_vma(mm, address);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700976 if (!vma)
977 return SCAN_VMA_NULL;
978
979 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
980 hend = vma->vm_end & HPAGE_PMD_MASK;
981 if (address < hstart || address + HPAGE_PMD_SIZE > hend)
982 return SCAN_ADDRESS_RANGE;
Song Liu50f8b922018-08-17 15:47:00 -0700983 if (!hugepage_vma_check(vma, vma->vm_flags))
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700984 return SCAN_VMA_CHECK;
Kirill A. Shutemov594cced2020-07-23 21:15:34 -0700985 /* Anon VMA expected */
986 if (!vma->anon_vma || vma->vm_ops)
987 return SCAN_VMA_CHECK;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700988 return 0;
989}
990
991/*
992 * Bring missing pages in from swap, to complete THP collapse.
993 * Only done if khugepaged_scan_pmd believes it is worthwhile.
994 *
995 * Called and returns without pte mapped or spinlocks held,
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -0700996 * but with mmap_lock held to protect against vma changes.
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700997 */
998
999static bool __collapse_huge_page_swapin(struct mm_struct *mm,
1000 struct vm_area_struct *vma,
Will Deacon2b635dd2021-01-14 15:33:49 +00001001 unsigned long haddr, pmd_t *pmd,
Ebru Akagunduz0db501f2016-07-26 15:26:46 -07001002 int referenced)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001003{
Souptick Joarder2b740302018-08-23 17:01:36 -07001004 int swapped_in = 0;
1005 vm_fault_t ret = 0;
Will Deacon2b635dd2021-01-14 15:33:49 +00001006 unsigned long address, end = haddr + (HPAGE_PMD_NR * PAGE_SIZE);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001007
Will Deacon2b635dd2021-01-14 15:33:49 +00001008 for (address = haddr; address < end; address += PAGE_SIZE) {
1009 struct vm_fault vmf = {
1010 .vma = vma,
1011 .address = address,
1012 .pgoff = linear_page_index(vma, haddr),
1013 .flags = FAULT_FLAG_ALLOW_RETRY,
1014 .pmd = pmd,
1015 };
1016
1017 vmf.pte = pte_offset_map(pmd, address);
Jan Kara29943022016-12-14 15:07:16 -08001018 vmf.orig_pte = *vmf.pte;
Will Deacon2b635dd2021-01-14 15:33:49 +00001019 if (!is_swap_pte(vmf.orig_pte)) {
1020 pte_unmap(vmf.pte);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001021 continue;
Will Deacon2b635dd2021-01-14 15:33:49 +00001022 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001023 swapped_in++;
Jan Kara29943022016-12-14 15:07:16 -08001024 ret = do_swap_page(&vmf);
Ebru Akagunduz0db501f2016-07-26 15:26:46 -07001025
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07001026 /* do_swap_page returns VM_FAULT_RETRY with released mmap_lock */
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001027 if (ret & VM_FAULT_RETRY) {
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001028 mmap_read_lock(mm);
Will Deacon2b635dd2021-01-14 15:33:49 +00001029 if (hugepage_vma_revalidate(mm, haddr, &vma)) {
Ebru Akagunduz47f863e2016-07-26 15:26:43 -07001030 /* vma is no longer available, don't continue to swapin */
Ebru Akagunduz0db501f2016-07-26 15:26:46 -07001031 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001032 return false;
Ebru Akagunduz47f863e2016-07-26 15:26:43 -07001033 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001034 /* check if the pmd is still valid */
Will Deacon2b635dd2021-01-14 15:33:49 +00001035 if (mm_find_pmd(mm, haddr) != pmd) {
SeongJae Park835152a2017-05-12 15:46:38 -07001036 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001037 return false;
SeongJae Park835152a2017-05-12 15:46:38 -07001038 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001039 }
1040 if (ret & VM_FAULT_ERROR) {
Ebru Akagunduz0db501f2016-07-26 15:26:46 -07001041 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001042 return false;
1043 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001044 }
Kirill A. Shutemovae2c5d82020-06-03 16:00:17 -07001045
1046 /* Drain LRU add pagevec to remove extra pin on the swapped in pages */
1047 if (swapped_in)
1048 lru_add_drain();
1049
Ebru Akagunduz0db501f2016-07-26 15:26:46 -07001050 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001051 return true;
1052}
1053
1054static void collapse_huge_page(struct mm_struct *mm,
1055 unsigned long address,
1056 struct page **hpage,
Kirill A. Shutemovffe945e2020-06-03 16:00:09 -07001057 int node, int referenced, int unmapped)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001058{
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -07001059 LIST_HEAD(compound_pagelist);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001060 pmd_t *pmd, _pmd;
1061 pte_t *pte;
1062 pgtable_t pgtable;
1063 struct page *new_page;
1064 spinlock_t *pmd_ptl, *pte_ptl;
1065 int isolated = 0, result = 0;
Kirill A. Shutemovc131f752016-09-19 14:44:01 -07001066 struct vm_area_struct *vma;
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08001067 struct mmu_notifier_range range;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001068 gfp_t gfp;
1069
1070 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1071
1072 /* Only allocate from the target node */
Michal Hocko41b61672017-01-10 16:57:42 -08001073 gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001074
Kirill A. Shutemov988ddb72016-07-26 15:26:26 -07001075 /*
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07001076 * Before allocating the hugepage, release the mmap_lock read lock.
Kirill A. Shutemov988ddb72016-07-26 15:26:26 -07001077 * The allocation can take potentially a long time if it involves
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07001078 * sync compaction, and we do not need to hold the mmap_lock during
Kirill A. Shutemov988ddb72016-07-26 15:26:26 -07001079 * that. We will recheck the vma after taking it again in write mode.
1080 */
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001081 mmap_read_unlock(mm);
Kirill A. Shutemov988ddb72016-07-26 15:26:26 -07001082 new_page = khugepaged_alloc_page(hpage, gfp, node);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001083 if (!new_page) {
1084 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
1085 goto out_nolock;
1086 }
1087
Matthew Wilcox (Oracle)8f425e42021-06-25 09:27:04 -04001088 if (unlikely(mem_cgroup_charge(page_folio(new_page), mm, gfp))) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001089 result = SCAN_CGROUP_CHARGE_FAIL;
1090 goto out_nolock;
1091 }
Johannes Weiner9d82c692020-06-03 16:02:04 -07001092 count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001093
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001094 mmap_read_lock(mm);
Kirill A. Shutemovc131f752016-09-19 14:44:01 -07001095 result = hugepage_vma_revalidate(mm, address, &vma);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001096 if (result) {
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001097 mmap_read_unlock(mm);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001098 goto out_nolock;
1099 }
1100
1101 pmd = mm_find_pmd(mm, address);
1102 if (!pmd) {
1103 result = SCAN_PMD_NULL;
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001104 mmap_read_unlock(mm);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001105 goto out_nolock;
1106 }
1107
1108 /*
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07001109 * __collapse_huge_page_swapin always returns with mmap_lock locked.
1110 * If it fails, we release mmap_lock and jump out_nolock.
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001111 * Continuing to collapse causes inconsistency.
1112 */
Kirill A. Shutemovffe945e2020-06-03 16:00:09 -07001113 if (unmapped && !__collapse_huge_page_swapin(mm, vma, address,
1114 pmd, referenced)) {
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001115 mmap_read_unlock(mm);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001116 goto out_nolock;
1117 }
1118
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001119 mmap_read_unlock(mm);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001120 /*
1121 * Prevent all access to pagetables with the exception of
1122 * gup_fast later handled by the ptep_clear_flush and the VM
1123 * handled by the anon_vma lock + PG_lock.
1124 */
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001125 mmap_write_lock(mm);
Kirill A. Shutemovc131f752016-09-19 14:44:01 -07001126 result = hugepage_vma_revalidate(mm, address, &vma);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001127 if (result)
Miaohe Lin18d24a72021-05-04 18:34:17 -07001128 goto out_up_write;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001129 /* check if the pmd is still valid */
1130 if (mm_find_pmd(mm, address) != pmd)
Miaohe Lin18d24a72021-05-04 18:34:17 -07001131 goto out_up_write;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001132
1133 anon_vma_lock_write(vma->anon_vma);
1134
Jérôme Glisse7269f992019-05-13 17:20:53 -07001135 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm,
Jérôme Glisse6f4f13e2019-05-13 17:20:49 -07001136 address, address + HPAGE_PMD_SIZE);
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08001137 mmu_notifier_invalidate_range_start(&range);
Ville Syrjäläec649c9d2019-11-05 21:16:48 -08001138
1139 pte = pte_offset_map(pmd, address);
1140 pte_ptl = pte_lockptr(mm, pmd);
1141
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001142 pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
1143 /*
1144 * After this gup_fast can't run anymore. This also removes
1145 * any huge TLB entry from the CPU so we won't allow
1146 * huge and small TLB entries for the same virtual address
1147 * to avoid the risk of CPU bugs in that area.
1148 */
1149 _pmd = pmdp_collapse_flush(vma, address, pmd);
1150 spin_unlock(pmd_ptl);
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08001151 mmu_notifier_invalidate_range_end(&range);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001152
1153 spin_lock(pte_ptl);
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -07001154 isolated = __collapse_huge_page_isolate(vma, address, pte,
1155 &compound_pagelist);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001156 spin_unlock(pte_ptl);
1157
1158 if (unlikely(!isolated)) {
1159 pte_unmap(pte);
1160 spin_lock(pmd_ptl);
1161 BUG_ON(!pmd_none(*pmd));
1162 /*
1163 * We can only use set_pmd_at when establishing
1164 * hugepmds and never for establishing regular pmds that
1165 * points to regular pagetables. Use pmd_populate for that
1166 */
1167 pmd_populate(mm, pmd, pmd_pgtable(_pmd));
1168 spin_unlock(pmd_ptl);
1169 anon_vma_unlock_write(vma->anon_vma);
1170 result = SCAN_FAIL;
Miaohe Lin18d24a72021-05-04 18:34:17 -07001171 goto out_up_write;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001172 }
1173
1174 /*
1175 * All pages are isolated and locked so anon_vma rmap
1176 * can't run anymore.
1177 */
1178 anon_vma_unlock_write(vma->anon_vma);
1179
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -07001180 __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl,
1181 &compound_pagelist);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001182 pte_unmap(pte);
Miaohe Lin588d01f2021-05-04 18:33:40 -07001183 /*
1184 * spin_lock() below is not the equivalent of smp_wmb(), but
1185 * the smp_wmb() inside __SetPageUptodate() can be reused to
1186 * avoid the copy_huge_page writes to become visible after
1187 * the set_pmd_at() write.
1188 */
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001189 __SetPageUptodate(new_page);
1190 pgtable = pmd_pgtable(_pmd);
1191
1192 _pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
Linus Torvaldsf55e1012017-11-29 09:01:01 -08001193 _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001194
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001195 spin_lock(pmd_ptl);
1196 BUG_ON(!pmd_none(*pmd));
Johannes Weinerbe5d0a72020-06-03 16:01:57 -07001197 page_add_new_anon_rmap(new_page, vma, address, true);
Joonsoo Kimb5181542020-08-11 18:30:40 -07001198 lru_cache_add_inactive_or_unevictable(new_page, vma);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001199 pgtable_trans_huge_deposit(mm, pmd, pgtable);
1200 set_pmd_at(mm, address, pmd, _pmd);
1201 update_mmu_cache_pmd(vma, address, pmd);
1202 spin_unlock(pmd_ptl);
1203
1204 *hpage = NULL;
1205
1206 khugepaged_pages_collapsed++;
1207 result = SCAN_SUCCEED;
1208out_up_write:
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001209 mmap_write_unlock(mm);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001210out_nolock:
Johannes Weiner9d82c692020-06-03 16:02:04 -07001211 if (!IS_ERR_OR_NULL(*hpage))
Matthew Wilcox (Oracle)bbc6b702021-05-01 20:42:23 -04001212 mem_cgroup_uncharge(page_folio(*hpage));
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001213 trace_mm_collapse_huge_page(mm, isolated, result);
1214 return;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001215}
1216
1217static int khugepaged_scan_pmd(struct mm_struct *mm,
1218 struct vm_area_struct *vma,
1219 unsigned long address,
1220 struct page **hpage)
1221{
1222 pmd_t *pmd;
1223 pte_t *pte, *_pte;
Kirill A. Shutemov71a2c112020-06-03 16:00:30 -07001224 int ret = 0, result = 0, referenced = 0;
1225 int none_or_zero = 0, shared = 0;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001226 struct page *page = NULL;
1227 unsigned long _address;
1228 spinlock_t *ptl;
1229 int node = NUMA_NO_NODE, unmapped = 0;
Ebru Akagunduz0db501f2016-07-26 15:26:46 -07001230 bool writable = false;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001231
1232 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1233
1234 pmd = mm_find_pmd(mm, address);
1235 if (!pmd) {
1236 result = SCAN_PMD_NULL;
1237 goto out;
1238 }
1239
1240 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1241 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1242 for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
1243 _pte++, _address += PAGE_SIZE) {
1244 pte_t pteval = *_pte;
1245 if (is_swap_pte(pteval)) {
1246 if (++unmapped <= khugepaged_max_ptes_swap) {
Peter Xue1e267c2020-04-06 20:06:04 -07001247 /*
1248 * Always be strict with uffd-wp
1249 * enabled swap entries. Please see
1250 * comment below for pte_uffd_wp().
1251 */
1252 if (pte_swp_uffd_wp(pteval)) {
1253 result = SCAN_PTE_UFFD_WP;
1254 goto out_unmap;
1255 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001256 continue;
1257 } else {
1258 result = SCAN_EXCEED_SWAP_PTE;
Yang Yange9ea8742022-01-14 14:07:55 -08001259 count_vm_event(THP_SCAN_EXCEED_SWAP_PTE);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001260 goto out_unmap;
1261 }
1262 }
1263 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
1264 if (!userfaultfd_armed(vma) &&
1265 ++none_or_zero <= khugepaged_max_ptes_none) {
1266 continue;
1267 } else {
1268 result = SCAN_EXCEED_NONE_PTE;
Yang Yange9ea8742022-01-14 14:07:55 -08001269 count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001270 goto out_unmap;
1271 }
1272 }
Peter Xue1e267c2020-04-06 20:06:04 -07001273 if (pte_uffd_wp(pteval)) {
1274 /*
1275 * Don't collapse the page if any of the small
1276 * PTEs are armed with uffd write protection.
1277 * Here we can also mark the new huge pmd as
1278 * write protected if any of the small ones is
Haitao Shi8958b242020-12-15 20:47:26 -08001279 * marked but that could bring unknown
Peter Xue1e267c2020-04-06 20:06:04 -07001280 * userfault messages that falls outside of
1281 * the registered range. So, just be simple.
1282 */
1283 result = SCAN_PTE_UFFD_WP;
1284 goto out_unmap;
1285 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001286 if (pte_write(pteval))
1287 writable = true;
1288
1289 page = vm_normal_page(vma, _address, pteval);
1290 if (unlikely(!page)) {
1291 result = SCAN_PAGE_NULL;
1292 goto out_unmap;
1293 }
1294
Kirill A. Shutemov71a2c112020-06-03 16:00:30 -07001295 if (page_mapcount(page) > 1 &&
1296 ++shared > khugepaged_max_ptes_shared) {
1297 result = SCAN_EXCEED_SHARED_PTE;
Yang Yange9ea8742022-01-14 14:07:55 -08001298 count_vm_event(THP_SCAN_EXCEED_SHARED_PTE);
Kirill A. Shutemov71a2c112020-06-03 16:00:30 -07001299 goto out_unmap;
1300 }
1301
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -07001302 page = compound_head(page);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001303
1304 /*
1305 * Record which node the original page is from and save this
1306 * information to khugepaged_node_load[].
Quanfa Fu0b8f0d82022-01-14 14:09:25 -08001307 * Khugepaged will allocate hugepage from the node has the max
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001308 * hit record.
1309 */
1310 node = page_to_nid(page);
1311 if (khugepaged_scan_abort(node)) {
1312 result = SCAN_SCAN_ABORT;
1313 goto out_unmap;
1314 }
1315 khugepaged_node_load[node]++;
1316 if (!PageLRU(page)) {
1317 result = SCAN_PAGE_LRU;
1318 goto out_unmap;
1319 }
1320 if (PageLocked(page)) {
1321 result = SCAN_PAGE_LOCK;
1322 goto out_unmap;
1323 }
1324 if (!PageAnon(page)) {
1325 result = SCAN_PAGE_ANON;
1326 goto out_unmap;
1327 }
1328
1329 /*
Kirill A. Shutemov94456892020-06-03 16:00:20 -07001330 * Check if the page has any GUP (or other external) pins.
1331 *
1332 * Here the check is racy it may see totmal_mapcount > refcount
1333 * in some cases.
1334 * For example, one process with one forked child process.
1335 * The parent has the PMD split due to MADV_DONTNEED, then
1336 * the child is trying unmap the whole PMD, but khugepaged
1337 * may be scanning the parent between the child has
1338 * PageDoubleMap flag cleared and dec the mapcount. So
1339 * khugepaged may see total_mapcount > refcount.
1340 *
1341 * But such case is ephemeral we could always retry collapse
1342 * later. However it may report false positive if the page
1343 * has excessive GUP pins (i.e. 512). Anyway the same check
1344 * will be done again later the risk seems low.
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001345 */
Kirill A. Shutemov94456892020-06-03 16:00:20 -07001346 if (!is_refcount_suitable(page)) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001347 result = SCAN_PAGE_COUNT;
1348 goto out_unmap;
1349 }
1350 if (pte_young(pteval) ||
1351 page_is_young(page) || PageReferenced(page) ||
1352 mmu_notifier_test_young(vma->vm_mm, address))
Ebru Akagunduz0db501f2016-07-26 15:26:46 -07001353 referenced++;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001354 }
Kirill A. Shutemovffe945e2020-06-03 16:00:09 -07001355 if (!writable) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001356 result = SCAN_PAGE_RO;
Kirill A. Shutemovffe945e2020-06-03 16:00:09 -07001357 } else if (!referenced || (unmapped && referenced < HPAGE_PMD_NR/2)) {
1358 result = SCAN_LACK_REFERENCED_PAGE;
1359 } else {
1360 result = SCAN_SUCCEED;
1361 ret = 1;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001362 }
1363out_unmap:
1364 pte_unmap_unlock(pte, ptl);
1365 if (ret) {
1366 node = khugepaged_find_target_node();
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07001367 /* collapse_huge_page will return with the mmap_lock released */
Kirill A. Shutemovffe945e2020-06-03 16:00:09 -07001368 collapse_huge_page(mm, address, hpage, node,
1369 referenced, unmapped);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001370 }
1371out:
1372 trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
1373 none_or_zero, result, unmapped);
1374 return ret;
1375}
1376
1377static void collect_mm_slot(struct mm_slot *mm_slot)
1378{
1379 struct mm_struct *mm = mm_slot->mm;
1380
Lance Roy35f3aa32018-10-04 23:45:47 -07001381 lockdep_assert_held(&khugepaged_mm_lock);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001382
1383 if (khugepaged_test_exit(mm)) {
1384 /* free mm_slot */
1385 hash_del(&mm_slot->hash);
1386 list_del(&mm_slot->mm_node);
1387
1388 /*
1389 * Not strictly needed because the mm exited already.
1390 *
1391 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1392 */
1393
1394 /* khugepaged_mm_lock actually not necessary for the below */
1395 free_mm_slot(mm_slot);
1396 mmdrop(mm);
1397 }
1398}
1399
Matthew Wilcox (Oracle)396bcc52020-04-06 20:04:35 -07001400#ifdef CONFIG_SHMEM
Song Liu27e1f822019-09-23 15:38:30 -07001401/*
1402 * Notify khugepaged that given addr of the mm is pte-mapped THP. Then
1403 * khugepaged should try to collapse the page table.
1404 */
1405static int khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
1406 unsigned long addr)
1407{
1408 struct mm_slot *mm_slot;
1409
1410 VM_BUG_ON(addr & ~HPAGE_PMD_MASK);
1411
1412 spin_lock(&khugepaged_mm_lock);
1413 mm_slot = get_mm_slot(mm);
1414 if (likely(mm_slot && mm_slot->nr_pte_mapped_thp < MAX_PTE_MAPPED_THP))
1415 mm_slot->pte_mapped_thp[mm_slot->nr_pte_mapped_thp++] = addr;
1416 spin_unlock(&khugepaged_mm_lock);
1417 return 0;
1418}
1419
Pasha Tatashine59a47b82022-02-03 20:49:20 -08001420static void collapse_and_free_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
1421 unsigned long addr, pmd_t *pmdp)
1422{
1423 spinlock_t *ptl;
1424 pmd_t pmd;
1425
Pasha Tatashin80110bb2022-02-03 20:49:24 -08001426 mmap_assert_write_locked(mm);
Pasha Tatashine59a47b82022-02-03 20:49:20 -08001427 ptl = pmd_lock(vma->vm_mm, pmdp);
1428 pmd = pmdp_collapse_flush(vma, addr, pmdp);
1429 spin_unlock(ptl);
1430 mm_dec_nr_ptes(mm);
Pasha Tatashin80110bb2022-02-03 20:49:24 -08001431 page_table_check_pte_clear_range(mm, addr, pmd);
Pasha Tatashine59a47b82022-02-03 20:49:20 -08001432 pte_free(mm, pmd_pgtable(pmd));
1433}
1434
Song Liu27e1f822019-09-23 15:38:30 -07001435/**
Alex Shi336e6b52020-12-14 19:12:01 -08001436 * collapse_pte_mapped_thp - Try to collapse a pte-mapped THP for mm at
1437 * address haddr.
1438 *
1439 * @mm: process address space where collapse happens
1440 * @addr: THP collapse address
Song Liu27e1f822019-09-23 15:38:30 -07001441 *
1442 * This function checks whether all the PTEs in the PMD are pointing to the
1443 * right THP. If so, retract the page table so the THP can refault in with
1444 * as pmd-mapped.
1445 */
1446void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
1447{
1448 unsigned long haddr = addr & HPAGE_PMD_MASK;
1449 struct vm_area_struct *vma = find_vma(mm, haddr);
Hugh Dickins119a5fc2020-08-06 23:26:18 -07001450 struct page *hpage;
Song Liu27e1f822019-09-23 15:38:30 -07001451 pte_t *start_pte, *pte;
Pasha Tatashine59a47b82022-02-03 20:49:20 -08001452 pmd_t *pmd;
Song Liu27e1f822019-09-23 15:38:30 -07001453 spinlock_t *ptl;
1454 int count = 0;
1455 int i;
1456
1457 if (!vma || !vma->vm_file ||
Miaohe Linfef792a2021-05-04 18:34:15 -07001458 !range_in_vma(vma, haddr, haddr + HPAGE_PMD_SIZE))
Song Liu27e1f822019-09-23 15:38:30 -07001459 return;
1460
1461 /*
1462 * This vm_flags may not have VM_HUGEPAGE if the page was not
1463 * collapsed by this mm. But we can still collapse if the page is
1464 * the valid THP. Add extra VM_HUGEPAGE so hugepage_vma_check()
1465 * will not fail the vma for missing VM_HUGEPAGE
1466 */
1467 if (!hugepage_vma_check(vma, vma->vm_flags | VM_HUGEPAGE))
1468 return;
1469
Hugh Dickins119a5fc2020-08-06 23:26:18 -07001470 hpage = find_lock_page(vma->vm_file->f_mapping,
1471 linear_page_index(vma, haddr));
1472 if (!hpage)
1473 return;
1474
1475 if (!PageHead(hpage))
1476 goto drop_hpage;
1477
Song Liu27e1f822019-09-23 15:38:30 -07001478 pmd = mm_find_pmd(mm, haddr);
1479 if (!pmd)
Hugh Dickins119a5fc2020-08-06 23:26:18 -07001480 goto drop_hpage;
Song Liu27e1f822019-09-23 15:38:30 -07001481
1482 start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
1483
1484 /* step 1: check all mapped PTEs are to the right huge page */
1485 for (i = 0, addr = haddr, pte = start_pte;
1486 i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1487 struct page *page;
1488
1489 /* empty pte, skip */
1490 if (pte_none(*pte))
1491 continue;
1492
1493 /* page swapped out, abort */
1494 if (!pte_present(*pte))
1495 goto abort;
1496
1497 page = vm_normal_page(vma, addr, *pte);
1498
Song Liu27e1f822019-09-23 15:38:30 -07001499 /*
Hugh Dickins119a5fc2020-08-06 23:26:18 -07001500 * Note that uprobe, debugger, or MAP_PRIVATE may change the
1501 * page table, but the new page will not be a subpage of hpage.
Song Liu27e1f822019-09-23 15:38:30 -07001502 */
Hugh Dickins119a5fc2020-08-06 23:26:18 -07001503 if (hpage + i != page)
Song Liu27e1f822019-09-23 15:38:30 -07001504 goto abort;
1505 count++;
1506 }
1507
1508 /* step 2: adjust rmap */
1509 for (i = 0, addr = haddr, pte = start_pte;
1510 i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1511 struct page *page;
1512
1513 if (pte_none(*pte))
1514 continue;
1515 page = vm_normal_page(vma, addr, *pte);
1516 page_remove_rmap(page, false);
1517 }
1518
1519 pte_unmap_unlock(start_pte, ptl);
1520
1521 /* step 3: set proper refcount and mm_counters. */
Hugh Dickins119a5fc2020-08-06 23:26:18 -07001522 if (count) {
Song Liu27e1f822019-09-23 15:38:30 -07001523 page_ref_sub(hpage, count);
1524 add_mm_counter(vma->vm_mm, mm_counter_file(hpage), -count);
1525 }
1526
1527 /* step 4: collapse pmd */
Pasha Tatashine59a47b82022-02-03 20:49:20 -08001528 collapse_and_free_pmd(mm, vma, haddr, pmd);
Hugh Dickins119a5fc2020-08-06 23:26:18 -07001529drop_hpage:
1530 unlock_page(hpage);
1531 put_page(hpage);
Song Liu27e1f822019-09-23 15:38:30 -07001532 return;
1533
1534abort:
1535 pte_unmap_unlock(start_pte, ptl);
Hugh Dickins119a5fc2020-08-06 23:26:18 -07001536 goto drop_hpage;
Song Liu27e1f822019-09-23 15:38:30 -07001537}
1538
Miaohe Lin0edf61e2021-05-04 18:33:37 -07001539static void khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
Song Liu27e1f822019-09-23 15:38:30 -07001540{
1541 struct mm_struct *mm = mm_slot->mm;
1542 int i;
1543
1544 if (likely(mm_slot->nr_pte_mapped_thp == 0))
Miaohe Lin0edf61e2021-05-04 18:33:37 -07001545 return;
Song Liu27e1f822019-09-23 15:38:30 -07001546
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001547 if (!mmap_write_trylock(mm))
Miaohe Lin0edf61e2021-05-04 18:33:37 -07001548 return;
Song Liu27e1f822019-09-23 15:38:30 -07001549
1550 if (unlikely(khugepaged_test_exit(mm)))
1551 goto out;
1552
1553 for (i = 0; i < mm_slot->nr_pte_mapped_thp; i++)
1554 collapse_pte_mapped_thp(mm, mm_slot->pte_mapped_thp[i]);
1555
1556out:
1557 mm_slot->nr_pte_mapped_thp = 0;
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001558 mmap_write_unlock(mm);
Song Liu27e1f822019-09-23 15:38:30 -07001559}
1560
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001561static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
1562{
1563 struct vm_area_struct *vma;
Hugh Dickins18e77602020-08-06 23:26:22 -07001564 struct mm_struct *mm;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001565 unsigned long addr;
Pasha Tatashine59a47b82022-02-03 20:49:20 -08001566 pmd_t *pmd;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001567
1568 i_mmap_lock_write(mapping);
1569 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
Song Liu27e1f822019-09-23 15:38:30 -07001570 /*
1571 * Check vma->anon_vma to exclude MAP_PRIVATE mappings that
1572 * got written to. These VMAs are likely not worth investing
Michel Lespinasse3e4e28c2020-06-08 21:33:51 -07001573 * mmap_write_lock(mm) as PMD-mapping is likely to be split
Song Liu27e1f822019-09-23 15:38:30 -07001574 * later.
1575 *
1576 * Not that vma->anon_vma check is racy: it can be set up after
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07001577 * the check but before we took mmap_lock by the fault path.
Song Liu27e1f822019-09-23 15:38:30 -07001578 * But page lock would prevent establishing any new ptes of the
1579 * page, so we are safe.
1580 *
1581 * An alternative would be drop the check, but check that page
1582 * table is clear before calling pmdp_collapse_flush() under
1583 * ptl. It has higher chance to recover THP for the VMA, but
1584 * has higher cost too.
1585 */
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001586 if (vma->anon_vma)
1587 continue;
1588 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1589 if (addr & ~HPAGE_PMD_MASK)
1590 continue;
1591 if (vma->vm_end < addr + HPAGE_PMD_SIZE)
1592 continue;
Hugh Dickins18e77602020-08-06 23:26:22 -07001593 mm = vma->vm_mm;
1594 pmd = mm_find_pmd(mm, addr);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001595 if (!pmd)
1596 continue;
1597 /*
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07001598 * We need exclusive mmap_lock to retract page table.
Song Liu27e1f822019-09-23 15:38:30 -07001599 *
1600 * We use trylock due to lock inversion: we need to acquire
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07001601 * mmap_lock while holding page lock. Fault path does it in
Song Liu27e1f822019-09-23 15:38:30 -07001602 * reverse order. Trylock is a way to avoid deadlock.
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001603 */
Hugh Dickins18e77602020-08-06 23:26:22 -07001604 if (mmap_write_trylock(mm)) {
Pasha Tatashine59a47b82022-02-03 20:49:20 -08001605 if (!khugepaged_test_exit(mm))
1606 collapse_and_free_pmd(mm, vma, addr, pmd);
Hugh Dickins18e77602020-08-06 23:26:22 -07001607 mmap_write_unlock(mm);
Song Liu27e1f822019-09-23 15:38:30 -07001608 } else {
1609 /* Try again later */
Hugh Dickins18e77602020-08-06 23:26:22 -07001610 khugepaged_add_pte_mapped_thp(mm, addr);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001611 }
1612 }
1613 i_mmap_unlock_write(mapping);
1614}
1615
1616/**
Song Liu99cb0db2019-09-23 15:38:00 -07001617 * collapse_file - collapse filemap/tmpfs/shmem pages into huge one.
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001618 *
Alex Shi336e6b52020-12-14 19:12:01 -08001619 * @mm: process address space where collapse happens
1620 * @file: file that collapse on
1621 * @start: collapse start address
1622 * @hpage: new allocated huge page for collapse
1623 * @node: appointed node the new huge page allocate from
1624 *
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001625 * Basic scheme is simple, details are more complex:
Hugh Dickins87c460a2018-11-30 14:10:43 -08001626 * - allocate and lock a new huge page;
Matthew Wilcox77da9382017-12-04 14:56:08 -05001627 * - scan page cache replacing old pages with the new one
Song Liu99cb0db2019-09-23 15:38:00 -07001628 * + swap/gup in pages if necessary;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001629 * + fill in gaps;
Matthew Wilcox77da9382017-12-04 14:56:08 -05001630 * + keep old pages around in case rollback is required;
1631 * - if replacing succeeds:
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001632 * + copy data over;
1633 * + free old pages;
Hugh Dickins87c460a2018-11-30 14:10:43 -08001634 * + unlock huge page;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001635 * - if replacing failed;
1636 * + put all pages back and unfreeze them;
Matthew Wilcox77da9382017-12-04 14:56:08 -05001637 * + restore gaps in the page cache;
Hugh Dickins87c460a2018-11-30 14:10:43 -08001638 * + unlock and free huge page;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001639 */
Song Liu579c5712019-09-23 15:37:57 -07001640static void collapse_file(struct mm_struct *mm,
1641 struct file *file, pgoff_t start,
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001642 struct page **hpage, int node)
1643{
Song Liu579c5712019-09-23 15:37:57 -07001644 struct address_space *mapping = file->f_mapping;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001645 gfp_t gfp;
Matthew Wilcox77da9382017-12-04 14:56:08 -05001646 struct page *new_page;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001647 pgoff_t index, end = start + HPAGE_PMD_NR;
1648 LIST_HEAD(pagelist);
Matthew Wilcox77da9382017-12-04 14:56:08 -05001649 XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001650 int nr_none = 0, result = SCAN_SUCCEED;
Song Liu99cb0db2019-09-23 15:38:00 -07001651 bool is_shmem = shmem_file(file);
Muchun Songbf9ecea2021-02-24 12:03:27 -08001652 int nr;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001653
Song Liu99cb0db2019-09-23 15:38:00 -07001654 VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001655 VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
1656
1657 /* Only allocate from the target node */
Michal Hocko41b61672017-01-10 16:57:42 -08001658 gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001659
1660 new_page = khugepaged_alloc_page(hpage, gfp, node);
1661 if (!new_page) {
1662 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
1663 goto out;
1664 }
1665
Matthew Wilcox (Oracle)8f425e42021-06-25 09:27:04 -04001666 if (unlikely(mem_cgroup_charge(page_folio(new_page), mm, gfp))) {
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001667 result = SCAN_CGROUP_CHARGE_FAIL;
1668 goto out;
1669 }
Johannes Weiner9d82c692020-06-03 16:02:04 -07001670 count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001671
Matthew Wilcox (Oracle)6b24ca42020-06-27 22:19:08 -04001672 /*
1673 * Ensure we have slots for all the pages in the range. This is
1674 * almost certainly a no-op because most of the pages must be present
1675 */
Hugh Dickins95feeab2018-11-30 14:10:50 -08001676 do {
1677 xas_lock_irq(&xas);
1678 xas_create_range(&xas);
1679 if (!xas_error(&xas))
1680 break;
1681 xas_unlock_irq(&xas);
1682 if (!xas_nomem(&xas, GFP_KERNEL)) {
Hugh Dickins95feeab2018-11-30 14:10:50 -08001683 result = SCAN_FAIL;
1684 goto out;
1685 }
1686 } while (1);
1687
Hugh Dickins042a3082018-11-30 14:10:39 -08001688 __SetPageLocked(new_page);
Song Liu99cb0db2019-09-23 15:38:00 -07001689 if (is_shmem)
1690 __SetPageSwapBacked(new_page);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001691 new_page->index = start;
1692 new_page->mapping = mapping;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001693
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001694 /*
Hugh Dickins87c460a2018-11-30 14:10:43 -08001695 * At this point the new_page is locked and not up-to-date.
1696 * It's safe to insert it into the page cache, because nobody would
1697 * be able to map it or use it in another way until we unlock it.
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001698 */
1699
Matthew Wilcox77da9382017-12-04 14:56:08 -05001700 xas_set(&xas, start);
1701 for (index = start; index < end; index++) {
1702 struct page *page = xas_next(&xas);
1703
1704 VM_BUG_ON(index != xas.xa_index);
Song Liu99cb0db2019-09-23 15:38:00 -07001705 if (is_shmem) {
1706 if (!page) {
1707 /*
1708 * Stop if extent has been truncated or
1709 * hole-punched, and is now completely
1710 * empty.
1711 */
1712 if (index == start) {
1713 if (!xas_next_entry(&xas, end - 1)) {
1714 result = SCAN_TRUNCATED;
1715 goto xa_locked;
1716 }
1717 xas_set(&xas, index);
1718 }
1719 if (!shmem_charge(mapping->host, 1)) {
1720 result = SCAN_FAIL;
Hugh Dickins042a3082018-11-30 14:10:39 -08001721 goto xa_locked;
Hugh Dickins701270f2018-11-30 14:10:25 -08001722 }
Song Liu99cb0db2019-09-23 15:38:00 -07001723 xas_store(&xas, new_page);
1724 nr_none++;
1725 continue;
Hugh Dickins701270f2018-11-30 14:10:25 -08001726 }
Song Liu99cb0db2019-09-23 15:38:00 -07001727
1728 if (xa_is_value(page) || !PageUptodate(page)) {
1729 xas_unlock_irq(&xas);
1730 /* swap in or instantiate fallocated page */
1731 if (shmem_getpage(mapping->host, index, &page,
Hugh Dickinsacdd9f8e2021-09-02 14:54:34 -07001732 SGP_NOALLOC)) {
Song Liu99cb0db2019-09-23 15:38:00 -07001733 result = SCAN_FAIL;
1734 goto xa_unlocked;
1735 }
1736 } else if (trylock_page(page)) {
1737 get_page(page);
1738 xas_unlock_irq(&xas);
1739 } else {
1740 result = SCAN_PAGE_LOCK;
Hugh Dickins042a3082018-11-30 14:10:39 -08001741 goto xa_locked;
Matthew Wilcox77da9382017-12-04 14:56:08 -05001742 }
Song Liu99cb0db2019-09-23 15:38:00 -07001743 } else { /* !is_shmem */
1744 if (!page || xa_is_value(page)) {
1745 xas_unlock_irq(&xas);
1746 page_cache_sync_readahead(mapping, &file->f_ra,
1747 file, index,
David Howellse5a59d32020-09-04 16:36:16 -07001748 end - index);
Song Liu99cb0db2019-09-23 15:38:00 -07001749 /* drain pagevecs to help isolate_lru_page() */
1750 lru_add_drain();
1751 page = find_lock_page(mapping, index);
1752 if (unlikely(page == NULL)) {
1753 result = SCAN_FAIL;
1754 goto xa_unlocked;
1755 }
Song Liu75f36062019-11-30 17:57:19 -08001756 } else if (PageDirty(page)) {
1757 /*
1758 * khugepaged only works on read-only fd,
1759 * so this page is dirty because it hasn't
1760 * been flushed since first write. There
1761 * won't be new dirty pages.
1762 *
1763 * Trigger async flush here and hope the
1764 * writeback is done when khugepaged
1765 * revisits this page.
1766 *
1767 * This is a one-off situation. We are not
1768 * forcing writeback in loop.
1769 */
1770 xas_unlock_irq(&xas);
1771 filemap_flush(mapping);
1772 result = SCAN_FAIL;
1773 goto xa_unlocked;
Rongwei Wang74c42e12021-10-28 14:36:27 -07001774 } else if (PageWriteback(page)) {
1775 xas_unlock_irq(&xas);
1776 result = SCAN_FAIL;
1777 goto xa_unlocked;
Song Liu99cb0db2019-09-23 15:38:00 -07001778 } else if (trylock_page(page)) {
1779 get_page(page);
1780 xas_unlock_irq(&xas);
1781 } else {
1782 result = SCAN_PAGE_LOCK;
1783 goto xa_locked;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001784 }
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001785 }
1786
1787 /*
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07001788 * The page must be locked, so we can drop the i_pages lock
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001789 * without racing with truncate.
1790 */
1791 VM_BUG_ON_PAGE(!PageLocked(page), page);
Song Liu4655e5e2019-11-15 17:34:53 -08001792
1793 /* make sure the page is up to date */
1794 if (unlikely(!PageUptodate(page))) {
1795 result = SCAN_FAIL;
1796 goto out_unlock;
1797 }
Hugh Dickins06a5e122018-11-30 14:10:47 -08001798
1799 /*
1800 * If file was truncated then extended, or hole-punched, before
1801 * we locked the first page, then a THP might be there already.
1802 */
1803 if (PageTransCompound(page)) {
1804 result = SCAN_PAGE_COMPOUND;
1805 goto out_unlock;
1806 }
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001807
1808 if (page_mapping(page) != mapping) {
1809 result = SCAN_TRUNCATED;
1810 goto out_unlock;
1811 }
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001812
Rongwei Wang74c42e12021-10-28 14:36:27 -07001813 if (!is_shmem && (PageDirty(page) ||
1814 PageWriteback(page))) {
Song Liu4655e5e2019-11-15 17:34:53 -08001815 /*
1816 * khugepaged only works on read-only fd, so this
1817 * page is dirty because it hasn't been flushed
1818 * since first write.
1819 */
1820 result = SCAN_FAIL;
1821 goto out_unlock;
1822 }
1823
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001824 if (isolate_lru_page(page)) {
1825 result = SCAN_DEL_PAGE_LRU;
Hugh Dickins042a3082018-11-30 14:10:39 -08001826 goto out_unlock;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001827 }
1828
Song Liu99cb0db2019-09-23 15:38:00 -07001829 if (page_has_private(page) &&
1830 !try_to_release_page(page, GFP_KERNEL)) {
1831 result = SCAN_PAGE_HAS_PRIVATE;
Hugh Dickins2f33a702020-05-27 22:20:43 -07001832 putback_lru_page(page);
Song Liu99cb0db2019-09-23 15:38:00 -07001833 goto out_unlock;
1834 }
1835
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001836 if (page_mapped(page))
Matthew Wilcox977fbdc2018-01-31 16:17:36 -08001837 unmap_mapping_pages(mapping, index, 1, false);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001838
Matthew Wilcox77da9382017-12-04 14:56:08 -05001839 xas_lock_irq(&xas);
1840 xas_set(&xas, index);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001841
Matthew Wilcox77da9382017-12-04 14:56:08 -05001842 VM_BUG_ON_PAGE(page != xas_load(&xas), page);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001843 VM_BUG_ON_PAGE(page_mapped(page), page);
1844
1845 /*
1846 * The page is expected to have page_count() == 3:
1847 * - we hold a pin on it;
Matthew Wilcox77da9382017-12-04 14:56:08 -05001848 * - one reference from page cache;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001849 * - one from isolate_lru_page;
1850 */
1851 if (!page_ref_freeze(page, 3)) {
1852 result = SCAN_PAGE_COUNT;
Hugh Dickins042a3082018-11-30 14:10:39 -08001853 xas_unlock_irq(&xas);
1854 putback_lru_page(page);
1855 goto out_unlock;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001856 }
1857
1858 /*
1859 * Add the page to the list to be able to undo the collapse if
1860 * something go wrong.
1861 */
1862 list_add_tail(&page->lru, &pagelist);
1863
1864 /* Finally, replace with the new page. */
Matthew Wilcox (Oracle)41011962019-09-23 15:34:52 -07001865 xas_store(&xas, new_page);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001866 continue;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001867out_unlock:
1868 unlock_page(page);
1869 put_page(page);
Hugh Dickins042a3082018-11-30 14:10:39 -08001870 goto xa_unlocked;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001871 }
Muchun Songbf9ecea2021-02-24 12:03:27 -08001872 nr = thp_nr_pages(new_page);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001873
Song Liu99cb0db2019-09-23 15:38:00 -07001874 if (is_shmem)
Muchun Song57b28472021-02-24 12:03:31 -08001875 __mod_lruvec_page_state(new_page, NR_SHMEM_THPS, nr);
Song Liu09d91cd2019-09-23 15:38:03 -07001876 else {
Muchun Songbf9ecea2021-02-24 12:03:27 -08001877 __mod_lruvec_page_state(new_page, NR_FILE_THPS, nr);
Song Liu09d91cd2019-09-23 15:38:03 -07001878 filemap_nr_thps_inc(mapping);
Collin Fijalkovicheb6ecbe2021-06-30 18:51:32 -07001879 /*
1880 * Paired with smp_mb() in do_dentry_open() to ensure
1881 * i_writecount is up to date and the update to nr_thps is
1882 * visible. Ensures the page cache will be truncated if the
1883 * file is opened writable.
1884 */
1885 smp_mb();
1886 if (inode_is_open_for_write(mapping->host)) {
1887 result = SCAN_FAIL;
1888 __mod_lruvec_page_state(new_page, NR_FILE_THPS, -nr);
1889 filemap_nr_thps_dec(mapping);
1890 goto xa_locked;
1891 }
Song Liu09d91cd2019-09-23 15:38:03 -07001892 }
Song Liu99cb0db2019-09-23 15:38:00 -07001893
Hugh Dickins042a3082018-11-30 14:10:39 -08001894 if (nr_none) {
Johannes Weiner9d82c692020-06-03 16:02:04 -07001895 __mod_lruvec_page_state(new_page, NR_FILE_PAGES, nr_none);
Song Liu99cb0db2019-09-23 15:38:00 -07001896 if (is_shmem)
Johannes Weiner9d82c692020-06-03 16:02:04 -07001897 __mod_lruvec_page_state(new_page, NR_SHMEM, nr_none);
Hugh Dickins042a3082018-11-30 14:10:39 -08001898 }
1899
Matthew Wilcox (Oracle)6b24ca42020-06-27 22:19:08 -04001900 /* Join all the small entries into a single multi-index entry */
1901 xas_set_order(&xas, start, HPAGE_PMD_ORDER);
1902 xas_store(&xas, new_page);
Hugh Dickins042a3082018-11-30 14:10:39 -08001903xa_locked:
1904 xas_unlock_irq(&xas);
Matthew Wilcox77da9382017-12-04 14:56:08 -05001905xa_unlocked:
Hugh Dickins042a3082018-11-30 14:10:39 -08001906
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001907 if (result == SCAN_SUCCEED) {
Matthew Wilcox77da9382017-12-04 14:56:08 -05001908 struct page *page, *tmp;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001909
1910 /*
Matthew Wilcox77da9382017-12-04 14:56:08 -05001911 * Replacing old pages with new one has succeeded, now we
1912 * need to copy the content and free the old pages.
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001913 */
Hugh Dickins2af8ff22018-11-30 14:10:35 -08001914 index = start;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001915 list_for_each_entry_safe(page, tmp, &pagelist, lru) {
Hugh Dickins2af8ff22018-11-30 14:10:35 -08001916 while (index < page->index) {
1917 clear_highpage(new_page + (index % HPAGE_PMD_NR));
1918 index++;
1919 }
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001920 copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
1921 page);
1922 list_del(&page->lru);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001923 page->mapping = NULL;
Hugh Dickins042a3082018-11-30 14:10:39 -08001924 page_ref_unfreeze(page, 1);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001925 ClearPageActive(page);
1926 ClearPageUnevictable(page);
Hugh Dickins042a3082018-11-30 14:10:39 -08001927 unlock_page(page);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001928 put_page(page);
Hugh Dickins2af8ff22018-11-30 14:10:35 -08001929 index++;
1930 }
1931 while (index < end) {
1932 clear_highpage(new_page + (index % HPAGE_PMD_NR));
1933 index++;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001934 }
1935
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001936 SetPageUptodate(new_page);
Hugh Dickins87c460a2018-11-30 14:10:43 -08001937 page_ref_add(new_page, HPAGE_PMD_NR - 1);
Johannes Weiner6058eae2020-06-03 16:02:40 -07001938 if (is_shmem)
Song Liu99cb0db2019-09-23 15:38:00 -07001939 set_page_dirty(new_page);
Johannes Weiner6058eae2020-06-03 16:02:40 -07001940 lru_cache_add(new_page);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001941
Hugh Dickins042a3082018-11-30 14:10:39 -08001942 /*
1943 * Remove pte page tables, so we can re-fault the page as huge.
1944 */
1945 retract_page_tables(mapping, start);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001946 *hpage = NULL;
Yang Shi87aa7522018-08-17 15:45:29 -07001947
1948 khugepaged_pages_collapsed++;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001949 } else {
Matthew Wilcox77da9382017-12-04 14:56:08 -05001950 struct page *page;
Hugh Dickinsaaa52e32018-11-30 14:10:29 -08001951
Matthew Wilcox77da9382017-12-04 14:56:08 -05001952 /* Something went wrong: roll back page cache changes */
Matthew Wilcox77da9382017-12-04 14:56:08 -05001953 xas_lock_irq(&xas);
Hugh Dickinsaaa52e32018-11-30 14:10:29 -08001954 mapping->nrpages -= nr_none;
Song Liu99cb0db2019-09-23 15:38:00 -07001955
1956 if (is_shmem)
1957 shmem_uncharge(mapping->host, nr_none);
Hugh Dickinsaaa52e32018-11-30 14:10:29 -08001958
Matthew Wilcox77da9382017-12-04 14:56:08 -05001959 xas_set(&xas, start);
1960 xas_for_each(&xas, page, end - 1) {
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001961 page = list_first_entry_or_null(&pagelist,
1962 struct page, lru);
Matthew Wilcox77da9382017-12-04 14:56:08 -05001963 if (!page || xas.xa_index < page->index) {
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001964 if (!nr_none)
1965 break;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001966 nr_none--;
Johannes Weiner59749e62016-12-12 16:43:35 -08001967 /* Put holes back where they were */
Matthew Wilcox77da9382017-12-04 14:56:08 -05001968 xas_store(&xas, NULL);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001969 continue;
1970 }
1971
Matthew Wilcox77da9382017-12-04 14:56:08 -05001972 VM_BUG_ON_PAGE(page->index != xas.xa_index, page);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001973
1974 /* Unfreeze the page. */
1975 list_del(&page->lru);
1976 page_ref_unfreeze(page, 2);
Matthew Wilcox77da9382017-12-04 14:56:08 -05001977 xas_store(&xas, page);
1978 xas_pause(&xas);
1979 xas_unlock_irq(&xas);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001980 unlock_page(page);
Hugh Dickins042a3082018-11-30 14:10:39 -08001981 putback_lru_page(page);
Matthew Wilcox77da9382017-12-04 14:56:08 -05001982 xas_lock_irq(&xas);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001983 }
1984 VM_BUG_ON(nr_none);
Matthew Wilcox77da9382017-12-04 14:56:08 -05001985 xas_unlock_irq(&xas);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001986
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001987 new_page->mapping = NULL;
1988 }
Hugh Dickins042a3082018-11-30 14:10:39 -08001989
1990 unlock_page(new_page);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001991out:
1992 VM_BUG_ON(!list_empty(&pagelist));
Johannes Weiner9d82c692020-06-03 16:02:04 -07001993 if (!IS_ERR_OR_NULL(*hpage))
Matthew Wilcox (Oracle)bbc6b702021-05-01 20:42:23 -04001994 mem_cgroup_uncharge(page_folio(*hpage));
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001995 /* TODO: tracepoints */
1996}
1997
Song Liu579c5712019-09-23 15:37:57 -07001998static void khugepaged_scan_file(struct mm_struct *mm,
1999 struct file *file, pgoff_t start, struct page **hpage)
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002000{
2001 struct page *page = NULL;
Song Liu579c5712019-09-23 15:37:57 -07002002 struct address_space *mapping = file->f_mapping;
Matthew Wilcox85b392d2017-12-04 15:06:23 -05002003 XA_STATE(xas, &mapping->i_pages, start);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002004 int present, swap;
2005 int node = NUMA_NO_NODE;
2006 int result = SCAN_SUCCEED;
2007
2008 present = 0;
2009 swap = 0;
2010 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
2011 rcu_read_lock();
Matthew Wilcox85b392d2017-12-04 15:06:23 -05002012 xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) {
2013 if (xas_retry(&xas, page))
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002014 continue;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002015
Matthew Wilcox85b392d2017-12-04 15:06:23 -05002016 if (xa_is_value(page)) {
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002017 if (++swap > khugepaged_max_ptes_swap) {
2018 result = SCAN_EXCEED_SWAP_PTE;
Yang Yange9ea8742022-01-14 14:07:55 -08002019 count_vm_event(THP_SCAN_EXCEED_SWAP_PTE);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002020 break;
2021 }
2022 continue;
2023 }
2024
Matthew Wilcox (Oracle)6b24ca42020-06-27 22:19:08 -04002025 /*
2026 * XXX: khugepaged should compact smaller compound pages
2027 * into a PMD sized page
2028 */
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002029 if (PageTransCompound(page)) {
2030 result = SCAN_PAGE_COMPOUND;
2031 break;
2032 }
2033
2034 node = page_to_nid(page);
2035 if (khugepaged_scan_abort(node)) {
2036 result = SCAN_SCAN_ABORT;
2037 break;
2038 }
2039 khugepaged_node_load[node]++;
2040
2041 if (!PageLRU(page)) {
2042 result = SCAN_PAGE_LRU;
2043 break;
2044 }
2045
Song Liu99cb0db2019-09-23 15:38:00 -07002046 if (page_count(page) !=
2047 1 + page_mapcount(page) + page_has_private(page)) {
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002048 result = SCAN_PAGE_COUNT;
2049 break;
2050 }
2051
2052 /*
2053 * We probably should check if the page is referenced here, but
2054 * nobody would transfer pte_young() to PageReferenced() for us.
2055 * And rmap walk here is just too costly...
2056 */
2057
2058 present++;
2059
2060 if (need_resched()) {
Matthew Wilcox85b392d2017-12-04 15:06:23 -05002061 xas_pause(&xas);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002062 cond_resched_rcu();
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002063 }
2064 }
2065 rcu_read_unlock();
2066
2067 if (result == SCAN_SUCCEED) {
2068 if (present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
2069 result = SCAN_EXCEED_NONE_PTE;
Yang Yange9ea8742022-01-14 14:07:55 -08002070 count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002071 } else {
2072 node = khugepaged_find_target_node();
Song Liu579c5712019-09-23 15:37:57 -07002073 collapse_file(mm, file, start, hpage, node);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002074 }
2075 }
2076
2077 /* TODO: tracepoints */
2078}
2079#else
Song Liu579c5712019-09-23 15:37:57 -07002080static void khugepaged_scan_file(struct mm_struct *mm,
2081 struct file *file, pgoff_t start, struct page **hpage)
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002082{
2083 BUILD_BUG();
2084}
Song Liu27e1f822019-09-23 15:38:30 -07002085
Miaohe Lin0edf61e2021-05-04 18:33:37 -07002086static void khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
Song Liu27e1f822019-09-23 15:38:30 -07002087{
Song Liu27e1f822019-09-23 15:38:30 -07002088}
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002089#endif
2090
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002091static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
2092 struct page **hpage)
2093 __releases(&khugepaged_mm_lock)
2094 __acquires(&khugepaged_mm_lock)
2095{
2096 struct mm_slot *mm_slot;
2097 struct mm_struct *mm;
2098 struct vm_area_struct *vma;
2099 int progress = 0;
2100
2101 VM_BUG_ON(!pages);
Lance Roy35f3aa32018-10-04 23:45:47 -07002102 lockdep_assert_held(&khugepaged_mm_lock);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002103
2104 if (khugepaged_scan.mm_slot)
2105 mm_slot = khugepaged_scan.mm_slot;
2106 else {
2107 mm_slot = list_entry(khugepaged_scan.mm_head.next,
2108 struct mm_slot, mm_node);
2109 khugepaged_scan.address = 0;
2110 khugepaged_scan.mm_slot = mm_slot;
2111 }
2112 spin_unlock(&khugepaged_mm_lock);
Song Liu27e1f822019-09-23 15:38:30 -07002113 khugepaged_collapse_pte_mapped_thps(mm_slot);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002114
2115 mm = mm_slot->mm;
Yang Shi3b454ad2018-01-31 16:18:28 -08002116 /*
2117 * Don't wait for semaphore (to avoid long wait times). Just move to
2118 * the next mm on the list.
2119 */
2120 vma = NULL;
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002121 if (unlikely(!mmap_read_trylock(mm)))
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07002122 goto breakouterloop_mmap_lock;
Yang Shi3b454ad2018-01-31 16:18:28 -08002123 if (likely(!khugepaged_test_exit(mm)))
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002124 vma = find_vma(mm, khugepaged_scan.address);
2125
2126 progress++;
2127 for (; vma; vma = vma->vm_next) {
2128 unsigned long hstart, hend;
2129
2130 cond_resched();
2131 if (unlikely(khugepaged_test_exit(mm))) {
2132 progress++;
2133 break;
2134 }
Song Liu50f8b922018-08-17 15:47:00 -07002135 if (!hugepage_vma_check(vma, vma->vm_flags)) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002136skip:
2137 progress++;
2138 continue;
2139 }
2140 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2141 hend = vma->vm_end & HPAGE_PMD_MASK;
2142 if (hstart >= hend)
2143 goto skip;
2144 if (khugepaged_scan.address > hend)
2145 goto skip;
2146 if (khugepaged_scan.address < hstart)
2147 khugepaged_scan.address = hstart;
2148 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
Matthew Wilcox (Oracle)396bcc52020-04-06 20:04:35 -07002149 if (shmem_file(vma->vm_file) && !shmem_huge_enabled(vma))
2150 goto skip;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002151
2152 while (khugepaged_scan.address < hend) {
2153 int ret;
2154 cond_resched();
2155 if (unlikely(khugepaged_test_exit(mm)))
2156 goto breakouterloop;
2157
2158 VM_BUG_ON(khugepaged_scan.address < hstart ||
2159 khugepaged_scan.address + HPAGE_PMD_SIZE >
2160 hend);
Song Liu99cb0db2019-09-23 15:38:00 -07002161 if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
Matthew Wilcox (Oracle)396bcc52020-04-06 20:04:35 -07002162 struct file *file = get_file(vma->vm_file);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002163 pgoff_t pgoff = linear_page_index(vma,
2164 khugepaged_scan.address);
Song Liu99cb0db2019-09-23 15:38:00 -07002165
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002166 mmap_read_unlock(mm);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002167 ret = 1;
Song Liu579c5712019-09-23 15:37:57 -07002168 khugepaged_scan_file(mm, file, pgoff, hpage);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002169 fput(file);
2170 } else {
2171 ret = khugepaged_scan_pmd(mm, vma,
2172 khugepaged_scan.address,
2173 hpage);
2174 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002175 /* move to next address */
2176 khugepaged_scan.address += HPAGE_PMD_SIZE;
2177 progress += HPAGE_PMD_NR;
2178 if (ret)
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07002179 /* we released mmap_lock so break loop */
2180 goto breakouterloop_mmap_lock;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002181 if (progress >= pages)
2182 goto breakouterloop;
2183 }
2184 }
2185breakouterloop:
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002186 mmap_read_unlock(mm); /* exit_mmap will destroy ptes after this */
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07002187breakouterloop_mmap_lock:
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002188
2189 spin_lock(&khugepaged_mm_lock);
2190 VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
2191 /*
2192 * Release the current mm_slot if this mm is about to die, or
2193 * if we scanned all vmas of this mm.
2194 */
2195 if (khugepaged_test_exit(mm) || !vma) {
2196 /*
2197 * Make sure that if mm_users is reaching zero while
2198 * khugepaged runs here, khugepaged_exit will find
2199 * mm_slot not pointing to the exiting mm.
2200 */
2201 if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
2202 khugepaged_scan.mm_slot = list_entry(
2203 mm_slot->mm_node.next,
2204 struct mm_slot, mm_node);
2205 khugepaged_scan.address = 0;
2206 } else {
2207 khugepaged_scan.mm_slot = NULL;
2208 khugepaged_full_scans++;
2209 }
2210
2211 collect_mm_slot(mm_slot);
2212 }
2213
2214 return progress;
2215}
2216
2217static int khugepaged_has_work(void)
2218{
2219 return !list_empty(&khugepaged_scan.mm_head) &&
2220 khugepaged_enabled();
2221}
2222
2223static int khugepaged_wait_event(void)
2224{
2225 return !list_empty(&khugepaged_scan.mm_head) ||
2226 kthread_should_stop();
2227}
2228
2229static void khugepaged_do_scan(void)
2230{
2231 struct page *hpage = NULL;
2232 unsigned int progress = 0, pass_through_head = 0;
Yanfei Xu89dc6a92021-05-04 18:34:12 -07002233 unsigned int pages = READ_ONCE(khugepaged_pages_to_scan);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002234 bool wait = true;
2235
Kirill A. Shutemova980df32020-06-03 16:00:12 -07002236 lru_add_drain_all();
2237
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002238 while (progress < pages) {
2239 if (!khugepaged_prealloc_page(&hpage, &wait))
2240 break;
2241
2242 cond_resched();
2243
2244 if (unlikely(kthread_should_stop() || try_to_freeze()))
2245 break;
2246
2247 spin_lock(&khugepaged_mm_lock);
2248 if (!khugepaged_scan.mm_slot)
2249 pass_through_head++;
2250 if (khugepaged_has_work() &&
2251 pass_through_head < 2)
2252 progress += khugepaged_scan_mm_slot(pages - progress,
2253 &hpage);
2254 else
2255 progress = pages;
2256 spin_unlock(&khugepaged_mm_lock);
2257 }
2258
2259 if (!IS_ERR_OR_NULL(hpage))
2260 put_page(hpage);
2261}
2262
2263static bool khugepaged_should_wakeup(void)
2264{
2265 return kthread_should_stop() ||
2266 time_after_eq(jiffies, khugepaged_sleep_expire);
2267}
2268
2269static void khugepaged_wait_work(void)
2270{
2271 if (khugepaged_has_work()) {
2272 const unsigned long scan_sleep_jiffies =
2273 msecs_to_jiffies(khugepaged_scan_sleep_millisecs);
2274
2275 if (!scan_sleep_jiffies)
2276 return;
2277
2278 khugepaged_sleep_expire = jiffies + scan_sleep_jiffies;
2279 wait_event_freezable_timeout(khugepaged_wait,
2280 khugepaged_should_wakeup(),
2281 scan_sleep_jiffies);
2282 return;
2283 }
2284
2285 if (khugepaged_enabled())
2286 wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
2287}
2288
2289static int khugepaged(void *none)
2290{
2291 struct mm_slot *mm_slot;
2292
2293 set_freezable();
2294 set_user_nice(current, MAX_NICE);
2295
2296 while (!kthread_should_stop()) {
2297 khugepaged_do_scan();
2298 khugepaged_wait_work();
2299 }
2300
2301 spin_lock(&khugepaged_mm_lock);
2302 mm_slot = khugepaged_scan.mm_slot;
2303 khugepaged_scan.mm_slot = NULL;
2304 if (mm_slot)
2305 collect_mm_slot(mm_slot);
2306 spin_unlock(&khugepaged_mm_lock);
2307 return 0;
2308}
2309
2310static void set_recommended_min_free_kbytes(void)
2311{
2312 struct zone *zone;
2313 int nr_zones = 0;
2314 unsigned long recommended_min;
2315
Liangcai Fanbd3400e2021-11-05 13:41:36 -07002316 if (!khugepaged_enabled()) {
2317 calculate_min_free_kbytes();
2318 goto update_wmarks;
2319 }
2320
Joonsoo Kimb7d349c2018-04-10 16:30:27 -07002321 for_each_populated_zone(zone) {
2322 /*
2323 * We don't need to worry about fragmentation of
2324 * ZONE_MOVABLE since it only has movable pages.
2325 */
2326 if (zone_idx(zone) > gfp_zone(GFP_USER))
2327 continue;
2328
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002329 nr_zones++;
Joonsoo Kimb7d349c2018-04-10 16:30:27 -07002330 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002331
2332 /* Ensure 2 pageblocks are free to assist fragmentation avoidance */
2333 recommended_min = pageblock_nr_pages * nr_zones * 2;
2334
2335 /*
2336 * Make sure that on average at least two pageblocks are almost free
2337 * of another type, one for a migratetype to fall back to and a
2338 * second to avoid subsequent fallbacks of other types There are 3
2339 * MIGRATE_TYPES we care about.
2340 */
2341 recommended_min += pageblock_nr_pages * nr_zones *
2342 MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
2343
2344 /* don't ever allow to reserve more than 5% of the lowmem */
2345 recommended_min = min(recommended_min,
2346 (unsigned long) nr_free_buffer_pages() / 20);
2347 recommended_min <<= (PAGE_SHIFT-10);
2348
2349 if (recommended_min > min_free_kbytes) {
2350 if (user_min_free_kbytes >= 0)
2351 pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
2352 min_free_kbytes, recommended_min);
2353
2354 min_free_kbytes = recommended_min;
2355 }
Liangcai Fanbd3400e2021-11-05 13:41:36 -07002356
2357update_wmarks:
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002358 setup_per_zone_wmarks();
2359}
2360
2361int start_stop_khugepaged(void)
2362{
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002363 int err = 0;
2364
2365 mutex_lock(&khugepaged_mutex);
2366 if (khugepaged_enabled()) {
2367 if (!khugepaged_thread)
2368 khugepaged_thread = kthread_run(khugepaged, NULL,
2369 "khugepaged");
2370 if (IS_ERR(khugepaged_thread)) {
2371 pr_err("khugepaged: kthread_run(khugepaged) failed\n");
2372 err = PTR_ERR(khugepaged_thread);
2373 khugepaged_thread = NULL;
2374 goto fail;
2375 }
2376
2377 if (!list_empty(&khugepaged_scan.mm_head))
2378 wake_up_interruptible(&khugepaged_wait);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002379 } else if (khugepaged_thread) {
2380 kthread_stop(khugepaged_thread);
2381 khugepaged_thread = NULL;
2382 }
Liangcai Fanbd3400e2021-11-05 13:41:36 -07002383 set_recommended_min_free_kbytes();
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002384fail:
2385 mutex_unlock(&khugepaged_mutex);
2386 return err;
2387}
Vijay Balakrishna4aab2be2020-10-10 23:16:40 -07002388
2389void khugepaged_min_free_kbytes_update(void)
2390{
2391 mutex_lock(&khugepaged_mutex);
2392 if (khugepaged_enabled() && khugepaged_thread)
2393 set_recommended_min_free_kbytes();
2394 mutex_unlock(&khugepaged_mutex);
2395}