blob: dc0a0c82a5acaa0694b9198f56eba9c33ca087aa [file] [log] [blame]
Thomas Gleixner20c8ccb2019-06-04 10:11:32 +02001// SPDX-License-Identifier: GPL-2.0-only
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08002/*
3 * Copyright (C) 2009 Red Hat, Inc.
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08004 */
5
Andrew Mortonae3a8c12014-06-04 16:06:58 -07006#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08008#include <linux/mm.h>
9#include <linux/sched.h>
Zi Yanfa6c0232021-05-04 18:34:23 -070010#include <linux/sched/mm.h>
Ingo Molnarf7ccbae2017-02-08 18:51:30 +010011#include <linux/sched/coredump.h>
Ingo Molnar6a3827d2017-02-08 18:51:31 +010012#include <linux/sched/numa_balancing.h>
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -080013#include <linux/highmem.h>
14#include <linux/hugetlb.h>
15#include <linux/mmu_notifier.h>
16#include <linux/rmap.h>
17#include <linux/swap.h>
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -080018#include <linux/shrinker.h>
Andrea Arcangeliba761492011-01-13 15:46:58 -080019#include <linux/mm_inline.h>
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -080020#include <linux/swapops.h>
Matthew Wilcox4897c762015-09-08 14:58:45 -070021#include <linux/dax.h>
Andrea Arcangeliba761492011-01-13 15:46:58 -080022#include <linux/khugepaged.h>
Andrea Arcangeli878aee72011-01-13 15:47:10 -080023#include <linux/freezer.h>
Dan Williamsf25748e32016-01-15 16:56:43 -080024#include <linux/pfn_t.h>
Andrea Arcangelia664b2d2011-01-13 15:47:17 -080025#include <linux/mman.h>
Dan Williams3565fce2016-01-15 16:56:55 -080026#include <linux/memremap.h>
Ralf Baechle325adeb2012-10-15 13:44:56 +020027#include <linux/pagemap.h>
Kirill A. Shutemov49071d42016-01-15 16:54:40 -080028#include <linux/debugfs.h>
Mel Gorman4daae3b2012-11-02 11:33:45 +000029#include <linux/migrate.h>
Sasha Levin43b5fbb2013-02-22 16:32:27 -080030#include <linux/hashtable.h>
Andrea Arcangeli6b251fc2015-09-04 15:46:20 -070031#include <linux/userfaultfd_k.h>
Vladimir Davydov33c3fc72015-09-09 15:35:45 -070032#include <linux/page_idle.h>
Kirill A. Shutemovbaa355f2016-07-26 15:25:51 -070033#include <linux/shmem_fs.h>
Michal Hocko6b31d592017-08-18 15:16:15 -070034#include <linux/oom.h>
Anshuman Khandual98fa15f2019-03-05 15:42:58 -080035#include <linux/numa.h>
Vlastimil Babkaf7da6772019-08-24 17:54:59 -070036#include <linux/page_owner.h>
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -080037
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -080038#include <asm/tlb.h>
39#include <asm/pgalloc.h>
40#include "internal.h"
41
Andrea Arcangeliba761492011-01-13 15:46:58 -080042/*
Michael DeGuzisb14d5952017-05-17 15:19:21 -040043 * By default, transparent hugepage support is disabled in order to avoid
44 * risking an increased memory footprint for applications that are not
45 * guaranteed to benefit from it. When transparent hugepage support is
46 * enabled, it is for all mappings, and khugepaged scans all mappings.
Jianguo Wu8bfa3f92013-11-12 15:07:16 -080047 * Defrag is invoked by khugepaged hugepage allocations and by page faults
48 * for all hugepage allocations.
Andrea Arcangeliba761492011-01-13 15:46:58 -080049 */
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -080050unsigned long transparent_hugepage_flags __read_mostly =
Andrea Arcangeli13ece882011-01-13 15:47:07 -080051#ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS
Andrea Arcangeliba761492011-01-13 15:46:58 -080052 (1<<TRANSPARENT_HUGEPAGE_FLAG)|
Andrea Arcangeli13ece882011-01-13 15:47:07 -080053#endif
54#ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
55 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)|
56#endif
Mel Gorman444eb2a42016-03-17 14:19:23 -070057 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG)|
Kirill A. Shutemov79da5402012-12-12 13:51:12 -080058 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)|
59 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
Andrea Arcangeliba761492011-01-13 15:46:58 -080060
Kirill A. Shutemov9a982252016-01-15 16:54:17 -080061static struct shrinker deferred_split_shrinker;
Andrea Arcangelif0005652011-01-13 15:47:04 -080062
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -080063static atomic_t huge_zero_refcount;
Wang, Yalin56873f42015-02-11 15:24:51 -080064struct page *huge_zero_page __read_mostly;
Hugh Dickins3b77e8c2021-06-15 18:23:49 -070065unsigned long huge_zero_pfn __read_mostly = ~0UL;
Kirill A. Shutemov4a6c1292012-12-12 13:50:47 -080066
Miaohe Line6be37b2021-06-30 18:47:50 -070067static inline bool file_thp_enabled(struct vm_area_struct *vma)
68{
69 return transhuge_vma_enabled(vma, vma->vm_flags) && vma->vm_file &&
70 !inode_is_open_for_write(vma->vm_file->f_inode) &&
71 (vma->vm_flags & VM_EXEC);
72}
73
74bool transparent_hugepage_active(struct vm_area_struct *vma)
Michal Hocko7635d9c2018-12-28 00:38:21 -080075{
Yang Shic0630662019-07-18 15:57:27 -070076 /* The addr is used to check if the vma size fits */
77 unsigned long addr = (vma->vm_end & HPAGE_PMD_MASK) - HPAGE_PMD_SIZE;
78
79 if (!transhuge_vma_suitable(vma, addr))
80 return false;
Michal Hocko7635d9c2018-12-28 00:38:21 -080081 if (vma_is_anonymous(vma))
82 return __transparent_hugepage_enabled(vma);
Yang Shic0630662019-07-18 15:57:27 -070083 if (vma_is_shmem(vma))
84 return shmem_huge_enabled(vma);
Miaohe Line6be37b2021-06-30 18:47:50 -070085 if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS))
86 return file_thp_enabled(vma);
Michal Hocko7635d9c2018-12-28 00:38:21 -080087
88 return false;
89}
90
Miaohe Linaaa97052021-05-04 18:33:55 -070091static bool get_huge_zero_page(void)
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -080092{
93 struct page *zero_page;
94retry:
95 if (likely(atomic_inc_not_zero(&huge_zero_refcount)))
Miaohe Linaaa97052021-05-04 18:33:55 -070096 return true;
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -080097
98 zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE,
99 HPAGE_PMD_ORDER);
Kirill A. Shutemovd8a8e1f2012-12-12 13:51:09 -0800100 if (!zero_page) {
101 count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED);
Miaohe Linaaa97052021-05-04 18:33:55 -0700102 return false;
Kirill A. Shutemovd8a8e1f2012-12-12 13:51:09 -0800103 }
104 count_vm_event(THP_ZERO_PAGE_ALLOC);
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -0800105 preempt_disable();
Kirill A. Shutemov5918d102013-04-29 15:08:44 -0700106 if (cmpxchg(&huge_zero_page, NULL, zero_page)) {
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -0800107 preempt_enable();
Yu Zhao5ddacbe2014-10-29 14:50:26 -0700108 __free_pages(zero_page, compound_order(zero_page));
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -0800109 goto retry;
110 }
Hugh Dickins3b77e8c2021-06-15 18:23:49 -0700111 WRITE_ONCE(huge_zero_pfn, page_to_pfn(zero_page));
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -0800112
113 /* We take additional reference here. It will be put back by shrinker */
114 atomic_set(&huge_zero_refcount, 2);
115 preempt_enable();
Miaohe Linaaa97052021-05-04 18:33:55 -0700116 return true;
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -0800117}
118
Aaron Lu6fcb52a2016-10-07 17:00:08 -0700119static void put_huge_zero_page(void)
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -0800120{
121 /*
122 * Counter should never go to zero here. Only shrinker can put
123 * last reference.
124 */
125 BUG_ON(atomic_dec_and_test(&huge_zero_refcount));
126}
127
Aaron Lu6fcb52a2016-10-07 17:00:08 -0700128struct page *mm_get_huge_zero_page(struct mm_struct *mm)
129{
130 if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
131 return READ_ONCE(huge_zero_page);
132
133 if (!get_huge_zero_page())
134 return NULL;
135
136 if (test_and_set_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
137 put_huge_zero_page();
138
139 return READ_ONCE(huge_zero_page);
140}
141
142void mm_put_huge_zero_page(struct mm_struct *mm)
143{
144 if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
145 put_huge_zero_page();
146}
147
Glauber Costa48896462013-08-28 10:18:15 +1000148static unsigned long shrink_huge_zero_page_count(struct shrinker *shrink,
149 struct shrink_control *sc)
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -0800150{
Glauber Costa48896462013-08-28 10:18:15 +1000151 /* we can free zero page only if last reference remains */
152 return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0;
153}
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -0800154
Glauber Costa48896462013-08-28 10:18:15 +1000155static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
156 struct shrink_control *sc)
157{
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -0800158 if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
Kirill A. Shutemov5918d102013-04-29 15:08:44 -0700159 struct page *zero_page = xchg(&huge_zero_page, NULL);
160 BUG_ON(zero_page == NULL);
Hugh Dickins3b77e8c2021-06-15 18:23:49 -0700161 WRITE_ONCE(huge_zero_pfn, ~0UL);
Yu Zhao5ddacbe2014-10-29 14:50:26 -0700162 __free_pages(zero_page, compound_order(zero_page));
Glauber Costa48896462013-08-28 10:18:15 +1000163 return HPAGE_PMD_NR;
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -0800164 }
165
166 return 0;
167}
168
169static struct shrinker huge_zero_page_shrinker = {
Glauber Costa48896462013-08-28 10:18:15 +1000170 .count_objects = shrink_huge_zero_page_count,
171 .scan_objects = shrink_huge_zero_page_scan,
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -0800172 .seeks = DEFAULT_SEEKS,
173};
174
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800175#ifdef CONFIG_SYSFS
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800176static ssize_t enabled_show(struct kobject *kobj,
177 struct kobj_attribute *attr, char *buf)
178{
Joe Perchesbfb0ffe2020-12-14 19:14:46 -0800179 const char *output;
180
Mel Gorman444eb2a42016-03-17 14:19:23 -0700181 if (test_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags))
Joe Perchesbfb0ffe2020-12-14 19:14:46 -0800182 output = "[always] madvise never";
183 else if (test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
184 &transparent_hugepage_flags))
185 output = "always [madvise] never";
Mel Gorman444eb2a42016-03-17 14:19:23 -0700186 else
Joe Perchesbfb0ffe2020-12-14 19:14:46 -0800187 output = "always madvise [never]";
188
189 return sysfs_emit(buf, "%s\n", output);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800190}
Mel Gorman444eb2a42016-03-17 14:19:23 -0700191
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800192static ssize_t enabled_store(struct kobject *kobj,
193 struct kobj_attribute *attr,
194 const char *buf, size_t count)
195{
David Rientjes21440d72017-02-22 15:45:49 -0800196 ssize_t ret = count;
Andrea Arcangeliba761492011-01-13 15:46:58 -0800197
David Rientjesf42f2552020-01-30 22:14:48 -0800198 if (sysfs_streq(buf, "always")) {
David Rientjes21440d72017-02-22 15:45:49 -0800199 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
200 set_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
David Rientjesf42f2552020-01-30 22:14:48 -0800201 } else if (sysfs_streq(buf, "madvise")) {
David Rientjes21440d72017-02-22 15:45:49 -0800202 clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
203 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
David Rientjesf42f2552020-01-30 22:14:48 -0800204 } else if (sysfs_streq(buf, "never")) {
David Rientjes21440d72017-02-22 15:45:49 -0800205 clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
206 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
207 } else
208 ret = -EINVAL;
Andrea Arcangeliba761492011-01-13 15:46:58 -0800209
210 if (ret > 0) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700211 int err = start_stop_khugepaged();
Andrea Arcangeliba761492011-01-13 15:46:58 -0800212 if (err)
213 ret = err;
214 }
Andrea Arcangeliba761492011-01-13 15:46:58 -0800215 return ret;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800216}
217static struct kobj_attribute enabled_attr =
218 __ATTR(enabled, 0644, enabled_show, enabled_store);
219
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700220ssize_t single_hugepage_flag_show(struct kobject *kobj,
Joe Perchesbfb0ffe2020-12-14 19:14:46 -0800221 struct kobj_attribute *attr, char *buf,
222 enum transparent_hugepage_flag flag)
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800223{
Joe Perchesbfb0ffe2020-12-14 19:14:46 -0800224 return sysfs_emit(buf, "%d\n",
225 !!test_bit(flag, &transparent_hugepage_flags));
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800226}
Ben Hutchingse27e6152011-04-14 15:22:21 -0700227
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700228ssize_t single_hugepage_flag_store(struct kobject *kobj,
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800229 struct kobj_attribute *attr,
230 const char *buf, size_t count,
231 enum transparent_hugepage_flag flag)
232{
Ben Hutchingse27e6152011-04-14 15:22:21 -0700233 unsigned long value;
234 int ret;
235
236 ret = kstrtoul(buf, 10, &value);
237 if (ret < 0)
238 return ret;
239 if (value > 1)
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800240 return -EINVAL;
241
Ben Hutchingse27e6152011-04-14 15:22:21 -0700242 if (value)
243 set_bit(flag, &transparent_hugepage_flags);
244 else
245 clear_bit(flag, &transparent_hugepage_flags);
246
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800247 return count;
248}
249
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800250static ssize_t defrag_show(struct kobject *kobj,
251 struct kobj_attribute *attr, char *buf)
252{
Joe Perchesbfb0ffe2020-12-14 19:14:46 -0800253 const char *output;
254
255 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
256 &transparent_hugepage_flags))
257 output = "[always] defer defer+madvise madvise never";
258 else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
259 &transparent_hugepage_flags))
260 output = "always [defer] defer+madvise madvise never";
261 else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG,
262 &transparent_hugepage_flags))
263 output = "always defer [defer+madvise] madvise never";
264 else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
265 &transparent_hugepage_flags))
266 output = "always defer defer+madvise [madvise] never";
267 else
268 output = "always defer defer+madvise madvise [never]";
269
270 return sysfs_emit(buf, "%s\n", output);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800271}
David Rientjes21440d72017-02-22 15:45:49 -0800272
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800273static ssize_t defrag_store(struct kobject *kobj,
274 struct kobj_attribute *attr,
275 const char *buf, size_t count)
276{
David Rientjesf42f2552020-01-30 22:14:48 -0800277 if (sysfs_streq(buf, "always")) {
David Rientjes21440d72017-02-22 15:45:49 -0800278 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
279 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
280 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
281 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
David Rientjesf42f2552020-01-30 22:14:48 -0800282 } else if (sysfs_streq(buf, "defer+madvise")) {
David Rientjes21440d72017-02-22 15:45:49 -0800283 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
284 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
285 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
286 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
David Rientjesf42f2552020-01-30 22:14:48 -0800287 } else if (sysfs_streq(buf, "defer")) {
David Rientjes4fad7fb2017-04-07 16:04:54 -0700288 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
289 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
290 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
291 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
David Rientjesf42f2552020-01-30 22:14:48 -0800292 } else if (sysfs_streq(buf, "madvise")) {
David Rientjes21440d72017-02-22 15:45:49 -0800293 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
294 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
295 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
296 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
David Rientjesf42f2552020-01-30 22:14:48 -0800297 } else if (sysfs_streq(buf, "never")) {
David Rientjes21440d72017-02-22 15:45:49 -0800298 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
299 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
300 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
301 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
302 } else
303 return -EINVAL;
304
305 return count;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800306}
307static struct kobj_attribute defrag_attr =
308 __ATTR(defrag, 0644, defrag_show, defrag_store);
309
Kirill A. Shutemov79da5402012-12-12 13:51:12 -0800310static ssize_t use_zero_page_show(struct kobject *kobj,
Joe Perchesae7a9272020-12-14 19:14:42 -0800311 struct kobj_attribute *attr, char *buf)
Kirill A. Shutemov79da5402012-12-12 13:51:12 -0800312{
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700313 return single_hugepage_flag_show(kobj, attr, buf,
Joe Perchesae7a9272020-12-14 19:14:42 -0800314 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
Kirill A. Shutemov79da5402012-12-12 13:51:12 -0800315}
316static ssize_t use_zero_page_store(struct kobject *kobj,
317 struct kobj_attribute *attr, const char *buf, size_t count)
318{
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700319 return single_hugepage_flag_store(kobj, attr, buf, count,
Kirill A. Shutemov79da5402012-12-12 13:51:12 -0800320 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
321}
322static struct kobj_attribute use_zero_page_attr =
323 __ATTR(use_zero_page, 0644, use_zero_page_show, use_zero_page_store);
Hugh Dickins49920d22016-12-12 16:44:50 -0800324
325static ssize_t hpage_pmd_size_show(struct kobject *kobj,
Joe Perchesae7a9272020-12-14 19:14:42 -0800326 struct kobj_attribute *attr, char *buf)
Hugh Dickins49920d22016-12-12 16:44:50 -0800327{
Joe Perchesae7a9272020-12-14 19:14:42 -0800328 return sysfs_emit(buf, "%lu\n", HPAGE_PMD_SIZE);
Hugh Dickins49920d22016-12-12 16:44:50 -0800329}
330static struct kobj_attribute hpage_pmd_size_attr =
331 __ATTR_RO(hpage_pmd_size);
332
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800333static struct attribute *hugepage_attr[] = {
334 &enabled_attr.attr,
335 &defrag_attr.attr,
Kirill A. Shutemov79da5402012-12-12 13:51:12 -0800336 &use_zero_page_attr.attr,
Hugh Dickins49920d22016-12-12 16:44:50 -0800337 &hpage_pmd_size_attr.attr,
Matthew Wilcox (Oracle)396bcc52020-04-06 20:04:35 -0700338#ifdef CONFIG_SHMEM
Kirill A. Shutemov5a6e75f2016-07-26 15:26:13 -0700339 &shmem_enabled_attr.attr,
340#endif
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800341 NULL,
342};
343
Arvind Yadav8aa95a22017-09-06 16:22:03 -0700344static const struct attribute_group hugepage_attr_group = {
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800345 .attrs = hugepage_attr,
Andrea Arcangeliba761492011-01-13 15:46:58 -0800346};
347
Shaohua Li569e5592012-01-12 17:19:11 -0800348static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj)
349{
350 int err;
351
352 *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
353 if (unlikely(!*hugepage_kobj)) {
Andrew Mortonae3a8c12014-06-04 16:06:58 -0700354 pr_err("failed to create transparent hugepage kobject\n");
Shaohua Li569e5592012-01-12 17:19:11 -0800355 return -ENOMEM;
356 }
357
358 err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group);
359 if (err) {
Andrew Mortonae3a8c12014-06-04 16:06:58 -0700360 pr_err("failed to register transparent hugepage group\n");
Shaohua Li569e5592012-01-12 17:19:11 -0800361 goto delete_obj;
362 }
363
364 err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group);
365 if (err) {
Andrew Mortonae3a8c12014-06-04 16:06:58 -0700366 pr_err("failed to register transparent hugepage group\n");
Shaohua Li569e5592012-01-12 17:19:11 -0800367 goto remove_hp_group;
368 }
369
370 return 0;
371
372remove_hp_group:
373 sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group);
374delete_obj:
375 kobject_put(*hugepage_kobj);
376 return err;
377}
378
379static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj)
380{
381 sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group);
382 sysfs_remove_group(hugepage_kobj, &hugepage_attr_group);
383 kobject_put(hugepage_kobj);
384}
385#else
386static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj)
387{
388 return 0;
389}
390
391static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj)
392{
393}
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800394#endif /* CONFIG_SYSFS */
395
396static int __init hugepage_init(void)
397{
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800398 int err;
Shaohua Li569e5592012-01-12 17:19:11 -0800399 struct kobject *hugepage_kobj;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800400
Andrea Arcangeli4b7167b2011-01-13 15:47:09 -0800401 if (!has_transparent_hugepage()) {
Aneesh Kumar K.Vbae84952021-02-24 12:07:32 -0800402 /*
403 * Hardware doesn't support hugepages, hence disable
404 * DAX PMD support.
405 */
406 transparent_hugepage_flags = 1 << TRANSPARENT_HUGEPAGE_NEVER_DAX;
Shaohua Li569e5592012-01-12 17:19:11 -0800407 return -EINVAL;
Andrea Arcangeli4b7167b2011-01-13 15:47:09 -0800408 }
409
Kirill A. Shutemovff20c2e2016-03-01 09:45:14 +0530410 /*
411 * hugepages can't be allocated by the buddy allocator
412 */
413 MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER >= MAX_ORDER);
414 /*
415 * we use page->mapping and page->index in second tail page
416 * as list_head: assuming THP order >= 2
417 */
418 MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER < 2);
419
Shaohua Li569e5592012-01-12 17:19:11 -0800420 err = hugepage_init_sysfs(&hugepage_kobj);
421 if (err)
Kirill A. Shutemov65ebb642015-04-15 16:14:20 -0700422 goto err_sysfs;
Andrea Arcangeliba761492011-01-13 15:46:58 -0800423
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700424 err = khugepaged_init();
Andrea Arcangeliba761492011-01-13 15:46:58 -0800425 if (err)
Kirill A. Shutemov65ebb642015-04-15 16:14:20 -0700426 goto err_slab;
Andrea Arcangeliba761492011-01-13 15:46:58 -0800427
Kirill A. Shutemov65ebb642015-04-15 16:14:20 -0700428 err = register_shrinker(&huge_zero_page_shrinker);
429 if (err)
430 goto err_hzp_shrinker;
Kirill A. Shutemov9a982252016-01-15 16:54:17 -0800431 err = register_shrinker(&deferred_split_shrinker);
432 if (err)
433 goto err_split_shrinker;
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -0800434
Rik van Riel97562cd2011-01-13 15:47:12 -0800435 /*
436 * By default disable transparent hugepages on smaller systems,
437 * where the extra memory used could hurt more than TLB overhead
438 * is likely to save. The admin can still enable it through /sys.
439 */
Arun KSca79b0c2018-12-28 00:34:29 -0800440 if (totalram_pages() < (512 << (20 - PAGE_SHIFT))) {
Rik van Riel97562cd2011-01-13 15:47:12 -0800441 transparent_hugepage_flags = 0;
Kirill A. Shutemov79553da2932015-04-15 16:14:56 -0700442 return 0;
443 }
Rik van Riel97562cd2011-01-13 15:47:12 -0800444
Kirill A. Shutemov79553da2932015-04-15 16:14:56 -0700445 err = start_stop_khugepaged();
Kirill A. Shutemov65ebb642015-04-15 16:14:20 -0700446 if (err)
447 goto err_khugepaged;
Andrea Arcangeliba761492011-01-13 15:46:58 -0800448
Shaohua Li569e5592012-01-12 17:19:11 -0800449 return 0;
Kirill A. Shutemov65ebb642015-04-15 16:14:20 -0700450err_khugepaged:
Kirill A. Shutemov9a982252016-01-15 16:54:17 -0800451 unregister_shrinker(&deferred_split_shrinker);
452err_split_shrinker:
Kirill A. Shutemov65ebb642015-04-15 16:14:20 -0700453 unregister_shrinker(&huge_zero_page_shrinker);
454err_hzp_shrinker:
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700455 khugepaged_destroy();
Kirill A. Shutemov65ebb642015-04-15 16:14:20 -0700456err_slab:
Shaohua Li569e5592012-01-12 17:19:11 -0800457 hugepage_exit_sysfs(hugepage_kobj);
Kirill A. Shutemov65ebb642015-04-15 16:14:20 -0700458err_sysfs:
Andrea Arcangeliba761492011-01-13 15:46:58 -0800459 return err;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800460}
Paul Gortmakera64fb3c2014-01-23 15:53:30 -0800461subsys_initcall(hugepage_init);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800462
463static int __init setup_transparent_hugepage(char *str)
464{
465 int ret = 0;
466 if (!str)
467 goto out;
468 if (!strcmp(str, "always")) {
469 set_bit(TRANSPARENT_HUGEPAGE_FLAG,
470 &transparent_hugepage_flags);
471 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
472 &transparent_hugepage_flags);
473 ret = 1;
474 } else if (!strcmp(str, "madvise")) {
475 clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
476 &transparent_hugepage_flags);
477 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
478 &transparent_hugepage_flags);
479 ret = 1;
480 } else if (!strcmp(str, "never")) {
481 clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
482 &transparent_hugepage_flags);
483 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
484 &transparent_hugepage_flags);
485 ret = 1;
486 }
487out:
488 if (!ret)
Andrew Mortonae3a8c12014-06-04 16:06:58 -0700489 pr_warn("transparent_hugepage= cannot parse, ignored\n");
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800490 return ret;
491}
492__setup("transparent_hugepage=", setup_transparent_hugepage);
493
Linus Torvaldsf55e1012017-11-29 09:01:01 -0800494pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800495{
Linus Torvaldsf55e1012017-11-29 09:01:01 -0800496 if (likely(vma->vm_flags & VM_WRITE))
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800497 pmd = pmd_mkwrite(pmd);
498 return pmd;
499}
500
Yang Shi87eaceb2019-09-23 15:38:15 -0700501#ifdef CONFIG_MEMCG
502static inline struct deferred_split *get_deferred_split_queue(struct page *page)
Kirill A. Shutemov9a982252016-01-15 16:54:17 -0800503{
Roman Gushchinbcfe06b2020-12-01 13:58:27 -0800504 struct mem_cgroup *memcg = page_memcg(compound_head(page));
Yang Shi87eaceb2019-09-23 15:38:15 -0700505 struct pglist_data *pgdat = NODE_DATA(page_to_nid(page));
506
507 if (memcg)
508 return &memcg->deferred_split_queue;
509 else
510 return &pgdat->deferred_split_queue;
Kirill A. Shutemov9a982252016-01-15 16:54:17 -0800511}
Yang Shi87eaceb2019-09-23 15:38:15 -0700512#else
513static inline struct deferred_split *get_deferred_split_queue(struct page *page)
514{
515 struct pglist_data *pgdat = NODE_DATA(page_to_nid(page));
516
517 return &pgdat->deferred_split_queue;
518}
519#endif
Kirill A. Shutemov9a982252016-01-15 16:54:17 -0800520
521void prep_transhuge_page(struct page *page)
522{
523 /*
524 * we use page->mapping and page->indexlru in second tail page
525 * as list_head: assuming THP order >= 2
526 */
Kirill A. Shutemov9a982252016-01-15 16:54:17 -0800527
528 INIT_LIST_HEAD(page_deferred_list(page));
529 set_compound_page_dtor(page, TRANSHUGE_PAGE_DTOR);
530}
531
Sean Christopherson005ba372020-01-08 12:24:36 -0800532bool is_transparent_hugepage(struct page *page)
533{
534 if (!PageCompound(page))
Zou Weifa1f68c2020-06-04 16:49:46 -0700535 return false;
Sean Christopherson005ba372020-01-08 12:24:36 -0800536
537 page = compound_head(page);
538 return is_huge_zero_page(page) ||
539 page[1].compound_dtor == TRANSHUGE_PAGE_DTOR;
540}
541EXPORT_SYMBOL_GPL(is_transparent_hugepage);
542
Kirill A. Shutemov97d3d0f2020-01-13 16:29:10 -0800543static unsigned long __thp_get_unmapped_area(struct file *filp,
544 unsigned long addr, unsigned long len,
Toshi Kani74d2fad2016-10-07 16:59:56 -0700545 loff_t off, unsigned long flags, unsigned long size)
546{
Toshi Kani74d2fad2016-10-07 16:59:56 -0700547 loff_t off_end = off + len;
548 loff_t off_align = round_up(off, size);
Kirill A. Shutemov97d3d0f2020-01-13 16:29:10 -0800549 unsigned long len_pad, ret;
Toshi Kani74d2fad2016-10-07 16:59:56 -0700550
551 if (off_end <= off_align || (off_end - off_align) < size)
552 return 0;
553
554 len_pad = len + size;
555 if (len_pad < len || (off + len_pad) < off)
556 return 0;
557
Kirill A. Shutemov97d3d0f2020-01-13 16:29:10 -0800558 ret = current->mm->get_unmapped_area(filp, addr, len_pad,
Toshi Kani74d2fad2016-10-07 16:59:56 -0700559 off >> PAGE_SHIFT, flags);
Kirill A. Shutemov97d3d0f2020-01-13 16:29:10 -0800560
561 /*
562 * The failure might be due to length padding. The caller will retry
563 * without the padding.
564 */
565 if (IS_ERR_VALUE(ret))
Toshi Kani74d2fad2016-10-07 16:59:56 -0700566 return 0;
567
Kirill A. Shutemov97d3d0f2020-01-13 16:29:10 -0800568 /*
569 * Do not try to align to THP boundary if allocation at the address
570 * hint succeeds.
571 */
572 if (ret == addr)
573 return addr;
574
575 ret += (off - ret) & (size - 1);
576 return ret;
Toshi Kani74d2fad2016-10-07 16:59:56 -0700577}
578
579unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
580 unsigned long len, unsigned long pgoff, unsigned long flags)
581{
Kirill A. Shutemov97d3d0f2020-01-13 16:29:10 -0800582 unsigned long ret;
Toshi Kani74d2fad2016-10-07 16:59:56 -0700583 loff_t off = (loff_t)pgoff << PAGE_SHIFT;
584
Toshi Kani74d2fad2016-10-07 16:59:56 -0700585 if (!IS_DAX(filp->f_mapping->host) || !IS_ENABLED(CONFIG_FS_DAX_PMD))
586 goto out;
587
Kirill A. Shutemov97d3d0f2020-01-13 16:29:10 -0800588 ret = __thp_get_unmapped_area(filp, addr, len, off, flags, PMD_SIZE);
589 if (ret)
590 return ret;
591out:
Toshi Kani74d2fad2016-10-07 16:59:56 -0700592 return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
593}
594EXPORT_SYMBOL_GPL(thp_get_unmapped_area);
595
Souptick Joarder2b740302018-08-23 17:01:36 -0700596static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
597 struct page *page, gfp_t gfp)
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800598{
Jan Kara82b0f8c2016-12-14 15:06:58 -0800599 struct vm_area_struct *vma = vmf->vma;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800600 pgtable_t pgtable;
Jan Kara82b0f8c2016-12-14 15:06:58 -0800601 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
Souptick Joarder2b740302018-08-23 17:01:36 -0700602 vm_fault_t ret = 0;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800603
Sasha Levin309381fea2014-01-23 15:52:54 -0800604 VM_BUG_ON_PAGE(!PageCompound(page), page);
Johannes Weiner00501b52014-08-08 14:19:20 -0700605
Johannes Weinerd9eb1ea2020-06-03 16:02:24 -0700606 if (mem_cgroup_charge(page, vma->vm_mm, gfp)) {
Andrea Arcangeli6b251fc2015-09-04 15:46:20 -0700607 put_page(page);
608 count_vm_event(THP_FAULT_FALLBACK);
David Rientjes85b9f462020-04-06 20:04:28 -0700609 count_vm_event(THP_FAULT_FALLBACK_CHARGE);
Andrea Arcangeli6b251fc2015-09-04 15:46:20 -0700610 return VM_FAULT_FALLBACK;
611 }
Johannes Weiner9d82c692020-06-03 16:02:04 -0700612 cgroup_throttle_swaprate(page, gfp);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800613
Joel Fernandes (Google)4cf58922019-01-03 15:28:34 -0800614 pgtable = pte_alloc_one(vma->vm_mm);
Johannes Weiner00501b52014-08-08 14:19:20 -0700615 if (unlikely(!pgtable)) {
Michal Hocko6b31d592017-08-18 15:16:15 -0700616 ret = VM_FAULT_OOM;
617 goto release;
Johannes Weiner00501b52014-08-08 14:19:20 -0700618 }
619
Huang Yingc79b57e2017-09-06 16:25:04 -0700620 clear_huge_page(page, vmf->address, HPAGE_PMD_NR);
Minchan Kim52f37622013-04-29 15:08:15 -0700621 /*
622 * The memory barrier inside __SetPageUptodate makes sure that
623 * clear_huge_page writes become visible before the set_pmd_at()
624 * write.
625 */
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800626 __SetPageUptodate(page);
627
Jan Kara82b0f8c2016-12-14 15:06:58 -0800628 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
629 if (unlikely(!pmd_none(*vmf->pmd))) {
Michal Hocko6b31d592017-08-18 15:16:15 -0700630 goto unlock_release;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800631 } else {
632 pmd_t entry;
Andrea Arcangeli6b251fc2015-09-04 15:46:20 -0700633
Michal Hocko6b31d592017-08-18 15:16:15 -0700634 ret = check_stable_address_space(vma->vm_mm);
635 if (ret)
636 goto unlock_release;
637
Andrea Arcangeli6b251fc2015-09-04 15:46:20 -0700638 /* Deliver the page fault to userland */
639 if (userfaultfd_missing(vma)) {
Jan Kara82b0f8c2016-12-14 15:06:58 -0800640 spin_unlock(vmf->ptl);
Andrea Arcangeli6b251fc2015-09-04 15:46:20 -0700641 put_page(page);
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -0700642 pte_free(vma->vm_mm, pgtable);
Miaohe Lin8fd5eda2021-05-04 18:33:49 -0700643 ret = handle_userfault(vmf, VM_UFFD_MISSING);
644 VM_BUG_ON(ret & VM_FAULT_FALLBACK);
645 return ret;
Andrea Arcangeli6b251fc2015-09-04 15:46:20 -0700646 }
647
Kirill A. Shutemov31223592013-09-12 15:14:01 -0700648 entry = mk_huge_pmd(page, vma->vm_page_prot);
Linus Torvaldsf55e1012017-11-29 09:01:01 -0800649 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
Kirill A. Shutemovd281ee62016-01-15 16:52:16 -0800650 page_add_new_anon_rmap(page, vma, haddr, true);
Joonsoo Kimb5181542020-08-11 18:30:40 -0700651 lru_cache_add_inactive_or_unevictable(page, vma);
Jan Kara82b0f8c2016-12-14 15:06:58 -0800652 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
653 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
Bibo Maofca40572021-02-24 12:06:42 -0800654 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -0700655 add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR);
Kirill A. Shutemovc4812902017-11-15 17:35:37 -0800656 mm_inc_nr_ptes(vma->vm_mm);
Jan Kara82b0f8c2016-12-14 15:06:58 -0800657 spin_unlock(vmf->ptl);
Andrea Arcangeli6b251fc2015-09-04 15:46:20 -0700658 count_vm_event(THP_FAULT_ALLOC);
Johannes Weiner9d82c692020-06-03 16:02:04 -0700659 count_memcg_event_mm(vma->vm_mm, THP_FAULT_ALLOC);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800660 }
661
David Rientjesaa2e8782012-05-29 15:06:17 -0700662 return 0;
Michal Hocko6b31d592017-08-18 15:16:15 -0700663unlock_release:
664 spin_unlock(vmf->ptl);
665release:
666 if (pgtable)
667 pte_free(vma->vm_mm, pgtable);
Michal Hocko6b31d592017-08-18 15:16:15 -0700668 put_page(page);
669 return ret;
670
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800671}
672
Mel Gorman444eb2a42016-03-17 14:19:23 -0700673/*
David Rientjes21440d72017-02-22 15:45:49 -0800674 * always: directly stall for all thp allocations
675 * defer: wake kswapd and fail if not immediately available
676 * defer+madvise: wake kswapd and directly stall for MADV_HUGEPAGE, otherwise
677 * fail if not immediately available
678 * madvise: directly stall for MADV_HUGEPAGE, otherwise fail if not immediately
679 * available
680 * never: never stall for any thp allocation
Mel Gorman444eb2a42016-03-17 14:19:23 -0700681 */
Rik van Riel164cc4f2021-02-25 17:16:18 -0800682gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma)
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -0800683{
Rik van Riel164cc4f2021-02-25 17:16:18 -0800684 const bool vma_madvised = vma && (vma->vm_flags & VM_HUGEPAGE);
Michal Hocko89c83fb2018-11-02 15:48:31 -0700685
David Rientjesac79f782019-09-04 12:54:18 -0700686 /* Always do synchronous compaction */
David Rientjes21440d72017-02-22 15:45:49 -0800687 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags))
Andrea Arcangelia8282602019-08-13 15:37:53 -0700688 return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY);
David Rientjesac79f782019-09-04 12:54:18 -0700689
690 /* Kick kcompactd and fail quickly */
David Rientjes21440d72017-02-22 15:45:49 -0800691 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags))
David Rientjes19deb762019-09-04 12:54:20 -0700692 return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM;
David Rientjesac79f782019-09-04 12:54:18 -0700693
694 /* Synchronous compaction if madvised, otherwise kick kcompactd */
David Rientjes21440d72017-02-22 15:45:49 -0800695 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags))
David Rientjes19deb762019-09-04 12:54:20 -0700696 return GFP_TRANSHUGE_LIGHT |
697 (vma_madvised ? __GFP_DIRECT_RECLAIM :
698 __GFP_KSWAPD_RECLAIM);
David Rientjesac79f782019-09-04 12:54:18 -0700699
700 /* Only do synchronous compaction if madvised */
David Rientjes21440d72017-02-22 15:45:49 -0800701 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags))
David Rientjes19deb762019-09-04 12:54:20 -0700702 return GFP_TRANSHUGE_LIGHT |
703 (vma_madvised ? __GFP_DIRECT_RECLAIM : 0);
David Rientjesac79f782019-09-04 12:54:18 -0700704
David Rientjes19deb762019-09-04 12:54:20 -0700705 return GFP_TRANSHUGE_LIGHT;
Mel Gorman444eb2a42016-03-17 14:19:23 -0700706}
707
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -0800708/* Caller must hold page table lock. */
Miaohe Lin2efeb8d2021-02-24 12:07:29 -0800709static void set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -0800710 struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
Kirill A. Shutemov5918d102013-04-29 15:08:44 -0700711 struct page *zero_page)
Kirill A. Shutemovfc9fe822012-12-12 13:50:51 -0800712{
713 pmd_t entry;
Andrew Morton7c414162015-09-08 14:58:43 -0700714 if (!pmd_none(*pmd))
Miaohe Lin2efeb8d2021-02-24 12:07:29 -0800715 return;
Kirill A. Shutemov5918d102013-04-29 15:08:44 -0700716 entry = mk_pmd(zero_page, vma->vm_page_prot);
Kirill A. Shutemovfc9fe822012-12-12 13:50:51 -0800717 entry = pmd_mkhuge(entry);
Matthew Wilcox12c9d702016-02-02 16:57:57 -0800718 if (pgtable)
719 pgtable_trans_huge_deposit(mm, pmd, pgtable);
Kirill A. Shutemovfc9fe822012-12-12 13:50:51 -0800720 set_pmd_at(mm, haddr, pmd, entry);
Kirill A. Shutemovc4812902017-11-15 17:35:37 -0800721 mm_inc_nr_ptes(mm);
Kirill A. Shutemovfc9fe822012-12-12 13:50:51 -0800722}
723
Souptick Joarder2b740302018-08-23 17:01:36 -0700724vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800725{
Jan Kara82b0f8c2016-12-14 15:06:58 -0800726 struct vm_area_struct *vma = vmf->vma;
Aneesh Kumar K.V077fcf12015-02-11 15:27:12 -0800727 gfp_t gfp;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800728 struct page *page;
Jan Kara82b0f8c2016-12-14 15:06:58 -0800729 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800730
Yang Shi43675e62019-07-18 15:57:24 -0700731 if (!transhuge_vma_suitable(vma, haddr))
Kirill A. Shutemovc0292552013-09-12 15:14:05 -0700732 return VM_FAULT_FALLBACK;
Kirill A. Shutemov128ec032013-09-12 15:14:03 -0700733 if (unlikely(anon_vma_prepare(vma)))
734 return VM_FAULT_OOM;
David Rientjes6d50e602014-10-29 14:50:31 -0700735 if (unlikely(khugepaged_enter(vma, vma->vm_flags)))
Kirill A. Shutemov128ec032013-09-12 15:14:03 -0700736 return VM_FAULT_OOM;
Jan Kara82b0f8c2016-12-14 15:06:58 -0800737 if (!(vmf->flags & FAULT_FLAG_WRITE) &&
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -0700738 !mm_forbids_zeropage(vma->vm_mm) &&
Kirill A. Shutemov128ec032013-09-12 15:14:03 -0700739 transparent_hugepage_use_zero_page()) {
740 pgtable_t pgtable;
741 struct page *zero_page;
Souptick Joarder2b740302018-08-23 17:01:36 -0700742 vm_fault_t ret;
Joel Fernandes (Google)4cf58922019-01-03 15:28:34 -0800743 pgtable = pte_alloc_one(vma->vm_mm);
Kirill A. Shutemov128ec032013-09-12 15:14:03 -0700744 if (unlikely(!pgtable))
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800745 return VM_FAULT_OOM;
Aaron Lu6fcb52a2016-10-07 17:00:08 -0700746 zero_page = mm_get_huge_zero_page(vma->vm_mm);
Kirill A. Shutemov128ec032013-09-12 15:14:03 -0700747 if (unlikely(!zero_page)) {
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -0700748 pte_free(vma->vm_mm, pgtable);
Andi Kleen81ab4202011-04-14 15:22:06 -0700749 count_vm_event(THP_FAULT_FALLBACK);
Kirill A. Shutemovc0292552013-09-12 15:14:05 -0700750 return VM_FAULT_FALLBACK;
Andi Kleen81ab4202011-04-14 15:22:06 -0700751 }
Jan Kara82b0f8c2016-12-14 15:06:58 -0800752 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
Andrea Arcangeli6b251fc2015-09-04 15:46:20 -0700753 ret = 0;
Jan Kara82b0f8c2016-12-14 15:06:58 -0800754 if (pmd_none(*vmf->pmd)) {
Michal Hocko6b31d592017-08-18 15:16:15 -0700755 ret = check_stable_address_space(vma->vm_mm);
756 if (ret) {
757 spin_unlock(vmf->ptl);
Gerald Schaeferbfe8cc12020-11-21 22:17:15 -0800758 pte_free(vma->vm_mm, pgtable);
Michal Hocko6b31d592017-08-18 15:16:15 -0700759 } else if (userfaultfd_missing(vma)) {
Jan Kara82b0f8c2016-12-14 15:06:58 -0800760 spin_unlock(vmf->ptl);
Gerald Schaeferbfe8cc12020-11-21 22:17:15 -0800761 pte_free(vma->vm_mm, pgtable);
Jan Kara82b0f8c2016-12-14 15:06:58 -0800762 ret = handle_userfault(vmf, VM_UFFD_MISSING);
Andrea Arcangeli6b251fc2015-09-04 15:46:20 -0700763 VM_BUG_ON(ret & VM_FAULT_FALLBACK);
764 } else {
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -0700765 set_huge_zero_page(pgtable, vma->vm_mm, vma,
Jan Kara82b0f8c2016-12-14 15:06:58 -0800766 haddr, vmf->pmd, zero_page);
Bibo Maofca40572021-02-24 12:06:42 -0800767 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
Jan Kara82b0f8c2016-12-14 15:06:58 -0800768 spin_unlock(vmf->ptl);
Andrea Arcangeli6b251fc2015-09-04 15:46:20 -0700769 }
Gerald Schaeferbfe8cc12020-11-21 22:17:15 -0800770 } else {
Jan Kara82b0f8c2016-12-14 15:06:58 -0800771 spin_unlock(vmf->ptl);
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -0700772 pte_free(vma->vm_mm, pgtable);
Gerald Schaeferbfe8cc12020-11-21 22:17:15 -0800773 }
Andrea Arcangeli6b251fc2015-09-04 15:46:20 -0700774 return ret;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800775 }
Rik van Riel164cc4f2021-02-25 17:16:18 -0800776 gfp = vma_thp_gfp_mask(vma);
David Rientjes19deb762019-09-04 12:54:20 -0700777 page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER);
Kirill A. Shutemov128ec032013-09-12 15:14:03 -0700778 if (unlikely(!page)) {
779 count_vm_event(THP_FAULT_FALLBACK);
Kirill A. Shutemovc0292552013-09-12 15:14:05 -0700780 return VM_FAULT_FALLBACK;
Kirill A. Shutemov128ec032013-09-12 15:14:03 -0700781 }
Kirill A. Shutemov9a982252016-01-15 16:54:17 -0800782 prep_transhuge_page(page);
Jan Kara82b0f8c2016-12-14 15:06:58 -0800783 return __do_huge_pmd_anonymous_page(vmf, page, gfp);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800784}
785
Matthew Wilcoxae18d6d2015-09-08 14:59:14 -0700786static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
Oliver O'Halloran3b6521f2017-05-08 15:59:43 -0700787 pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write,
788 pgtable_t pgtable)
Matthew Wilcox5cad4652015-09-08 14:58:54 -0700789{
790 struct mm_struct *mm = vma->vm_mm;
791 pmd_t entry;
792 spinlock_t *ptl;
793
794 ptl = pmd_lock(mm, pmd);
Aneesh Kumar K.Vc6f3c5e2019-04-05 18:39:10 -0700795 if (!pmd_none(*pmd)) {
796 if (write) {
797 if (pmd_pfn(*pmd) != pfn_t_to_pfn(pfn)) {
798 WARN_ON_ONCE(!is_huge_zero_pmd(*pmd));
799 goto out_unlock;
800 }
801 entry = pmd_mkyoung(*pmd);
802 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
803 if (pmdp_set_access_flags(vma, addr, pmd, entry, 1))
804 update_mmu_cache_pmd(vma, addr, pmd);
805 }
806
807 goto out_unlock;
808 }
809
Dan Williamsf25748e32016-01-15 16:56:43 -0800810 entry = pmd_mkhuge(pfn_t_pmd(pfn, prot));
811 if (pfn_t_devmap(pfn))
812 entry = pmd_mkdevmap(entry);
Ross Zwisler01871e52016-01-15 16:56:02 -0800813 if (write) {
Linus Torvaldsf55e1012017-11-29 09:01:01 -0800814 entry = pmd_mkyoung(pmd_mkdirty(entry));
815 entry = maybe_pmd_mkwrite(entry, vma);
Matthew Wilcox5cad4652015-09-08 14:58:54 -0700816 }
Oliver O'Halloran3b6521f2017-05-08 15:59:43 -0700817
818 if (pgtable) {
819 pgtable_trans_huge_deposit(mm, pmd, pgtable);
Kirill A. Shutemovc4812902017-11-15 17:35:37 -0800820 mm_inc_nr_ptes(mm);
Aneesh Kumar K.Vc6f3c5e2019-04-05 18:39:10 -0700821 pgtable = NULL;
Oliver O'Halloran3b6521f2017-05-08 15:59:43 -0700822 }
823
Ross Zwisler01871e52016-01-15 16:56:02 -0800824 set_pmd_at(mm, addr, pmd, entry);
825 update_mmu_cache_pmd(vma, addr, pmd);
Aneesh Kumar K.Vc6f3c5e2019-04-05 18:39:10 -0700826
827out_unlock:
Matthew Wilcox5cad4652015-09-08 14:58:54 -0700828 spin_unlock(ptl);
Aneesh Kumar K.Vc6f3c5e2019-04-05 18:39:10 -0700829 if (pgtable)
830 pte_free(mm, pgtable);
Matthew Wilcox5cad4652015-09-08 14:58:54 -0700831}
832
Thomas Hellstrom (VMware)9a9731b2020-03-24 18:48:09 +0100833/**
834 * vmf_insert_pfn_pmd_prot - insert a pmd size pfn
835 * @vmf: Structure describing the fault
836 * @pfn: pfn to insert
837 * @pgprot: page protection to use
838 * @write: whether it's a write fault
839 *
840 * Insert a pmd size pfn. See vmf_insert_pfn() for additional info and
841 * also consult the vmf_insert_mixed_prot() documentation when
842 * @pgprot != @vmf->vma->vm_page_prot.
843 *
844 * Return: vm_fault_t value.
845 */
846vm_fault_t vmf_insert_pfn_pmd_prot(struct vm_fault *vmf, pfn_t pfn,
847 pgprot_t pgprot, bool write)
Matthew Wilcox5cad4652015-09-08 14:58:54 -0700848{
Dan Williamsfce86ff2019-05-13 17:15:33 -0700849 unsigned long addr = vmf->address & PMD_MASK;
850 struct vm_area_struct *vma = vmf->vma;
Oliver O'Halloran3b6521f2017-05-08 15:59:43 -0700851 pgtable_t pgtable = NULL;
Dan Williamsfce86ff2019-05-13 17:15:33 -0700852
Matthew Wilcox5cad4652015-09-08 14:58:54 -0700853 /*
854 * If we had pmd_special, we could avoid all these restrictions,
855 * but we need to be consistent with PTEs and architectures that
856 * can't support a 'special' bit.
857 */
Dave Jiange1fb4a02018-08-17 15:43:40 -0700858 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
859 !pfn_t_devmap(pfn));
Matthew Wilcox5cad4652015-09-08 14:58:54 -0700860 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
861 (VM_PFNMAP|VM_MIXEDMAP));
862 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
Matthew Wilcox5cad4652015-09-08 14:58:54 -0700863
864 if (addr < vma->vm_start || addr >= vma->vm_end)
865 return VM_FAULT_SIGBUS;
Borislav Petkov308a0472016-10-26 19:43:43 +0200866
Oliver O'Halloran3b6521f2017-05-08 15:59:43 -0700867 if (arch_needs_pgtable_deposit()) {
Joel Fernandes (Google)4cf58922019-01-03 15:28:34 -0800868 pgtable = pte_alloc_one(vma->vm_mm);
Oliver O'Halloran3b6521f2017-05-08 15:59:43 -0700869 if (!pgtable)
870 return VM_FAULT_OOM;
871 }
872
Borislav Petkov308a0472016-10-26 19:43:43 +0200873 track_pfn_insert(vma, &pgprot, pfn);
874
Dan Williamsfce86ff2019-05-13 17:15:33 -0700875 insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write, pgtable);
Matthew Wilcoxae18d6d2015-09-08 14:59:14 -0700876 return VM_FAULT_NOPAGE;
Matthew Wilcox5cad4652015-09-08 14:58:54 -0700877}
Thomas Hellstrom (VMware)9a9731b2020-03-24 18:48:09 +0100878EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd_prot);
Matthew Wilcox5cad4652015-09-08 14:58:54 -0700879
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -0800880#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
Linus Torvaldsf55e1012017-11-29 09:01:01 -0800881static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma)
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -0800882{
Linus Torvaldsf55e1012017-11-29 09:01:01 -0800883 if (likely(vma->vm_flags & VM_WRITE))
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -0800884 pud = pud_mkwrite(pud);
885 return pud;
886}
887
888static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
889 pud_t *pud, pfn_t pfn, pgprot_t prot, bool write)
890{
891 struct mm_struct *mm = vma->vm_mm;
892 pud_t entry;
893 spinlock_t *ptl;
894
895 ptl = pud_lock(mm, pud);
Aneesh Kumar K.Vc6f3c5e2019-04-05 18:39:10 -0700896 if (!pud_none(*pud)) {
897 if (write) {
898 if (pud_pfn(*pud) != pfn_t_to_pfn(pfn)) {
899 WARN_ON_ONCE(!is_huge_zero_pud(*pud));
900 goto out_unlock;
901 }
902 entry = pud_mkyoung(*pud);
903 entry = maybe_pud_mkwrite(pud_mkdirty(entry), vma);
904 if (pudp_set_access_flags(vma, addr, pud, entry, 1))
905 update_mmu_cache_pud(vma, addr, pud);
906 }
907 goto out_unlock;
908 }
909
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -0800910 entry = pud_mkhuge(pfn_t_pud(pfn, prot));
911 if (pfn_t_devmap(pfn))
912 entry = pud_mkdevmap(entry);
913 if (write) {
Linus Torvaldsf55e1012017-11-29 09:01:01 -0800914 entry = pud_mkyoung(pud_mkdirty(entry));
915 entry = maybe_pud_mkwrite(entry, vma);
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -0800916 }
917 set_pud_at(mm, addr, pud, entry);
918 update_mmu_cache_pud(vma, addr, pud);
Aneesh Kumar K.Vc6f3c5e2019-04-05 18:39:10 -0700919
920out_unlock:
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -0800921 spin_unlock(ptl);
922}
923
Thomas Hellstrom (VMware)9a9731b2020-03-24 18:48:09 +0100924/**
925 * vmf_insert_pfn_pud_prot - insert a pud size pfn
926 * @vmf: Structure describing the fault
927 * @pfn: pfn to insert
928 * @pgprot: page protection to use
929 * @write: whether it's a write fault
930 *
931 * Insert a pud size pfn. See vmf_insert_pfn() for additional info and
932 * also consult the vmf_insert_mixed_prot() documentation when
933 * @pgprot != @vmf->vma->vm_page_prot.
934 *
935 * Return: vm_fault_t value.
936 */
937vm_fault_t vmf_insert_pfn_pud_prot(struct vm_fault *vmf, pfn_t pfn,
938 pgprot_t pgprot, bool write)
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -0800939{
Dan Williamsfce86ff2019-05-13 17:15:33 -0700940 unsigned long addr = vmf->address & PUD_MASK;
941 struct vm_area_struct *vma = vmf->vma;
Dan Williamsfce86ff2019-05-13 17:15:33 -0700942
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -0800943 /*
944 * If we had pud_special, we could avoid all these restrictions,
945 * but we need to be consistent with PTEs and architectures that
946 * can't support a 'special' bit.
947 */
Dave Jiang62ec0d82018-09-04 15:46:16 -0700948 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
949 !pfn_t_devmap(pfn));
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -0800950 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
951 (VM_PFNMAP|VM_MIXEDMAP));
952 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -0800953
954 if (addr < vma->vm_start || addr >= vma->vm_end)
955 return VM_FAULT_SIGBUS;
956
957 track_pfn_insert(vma, &pgprot, pfn);
958
Dan Williamsfce86ff2019-05-13 17:15:33 -0700959 insert_pfn_pud(vma, addr, vmf->pud, pfn, pgprot, write);
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -0800960 return VM_FAULT_NOPAGE;
961}
Thomas Hellstrom (VMware)9a9731b2020-03-24 18:48:09 +0100962EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud_prot);
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -0800963#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
964
Dan Williams3565fce2016-01-15 16:56:55 -0800965static void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
Kirill A. Shutemova8f97362017-11-27 06:21:25 +0300966 pmd_t *pmd, int flags)
Dan Williams3565fce2016-01-15 16:56:55 -0800967{
968 pmd_t _pmd;
969
Kirill A. Shutemova8f97362017-11-27 06:21:25 +0300970 _pmd = pmd_mkyoung(*pmd);
971 if (flags & FOLL_WRITE)
972 _pmd = pmd_mkdirty(_pmd);
Dan Williams3565fce2016-01-15 16:56:55 -0800973 if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK,
Kirill A. Shutemova8f97362017-11-27 06:21:25 +0300974 pmd, _pmd, flags & FOLL_WRITE))
Dan Williams3565fce2016-01-15 16:56:55 -0800975 update_mmu_cache_pmd(vma, addr, pmd);
976}
977
978struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
Keith Buschdf06b372018-10-26 15:10:28 -0700979 pmd_t *pmd, int flags, struct dev_pagemap **pgmap)
Dan Williams3565fce2016-01-15 16:56:55 -0800980{
981 unsigned long pfn = pmd_pfn(*pmd);
982 struct mm_struct *mm = vma->vm_mm;
Dan Williams3565fce2016-01-15 16:56:55 -0800983 struct page *page;
984
985 assert_spin_locked(pmd_lockptr(mm, pmd));
986
Keno Fischer8310d482017-01-24 15:17:48 -0800987 /*
988 * When we COW a devmap PMD entry, we split it into PTEs, so we should
989 * not be in this function with `flags & FOLL_COW` set.
990 */
991 WARN_ONCE(flags & FOLL_COW, "mm: In follow_devmap_pmd with FOLL_COW set");
992
John Hubbard3faa52c2020-04-01 21:05:29 -0700993 /* FOLL_GET and FOLL_PIN are mutually exclusive. */
994 if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
995 (FOLL_PIN | FOLL_GET)))
996 return NULL;
997
Linus Torvaldsf6f37322017-12-15 18:53:22 -0800998 if (flags & FOLL_WRITE && !pmd_write(*pmd))
Dan Williams3565fce2016-01-15 16:56:55 -0800999 return NULL;
1000
1001 if (pmd_present(*pmd) && pmd_devmap(*pmd))
1002 /* pass */;
1003 else
1004 return NULL;
1005
1006 if (flags & FOLL_TOUCH)
Kirill A. Shutemova8f97362017-11-27 06:21:25 +03001007 touch_pmd(vma, addr, pmd, flags);
Dan Williams3565fce2016-01-15 16:56:55 -08001008
1009 /*
1010 * device mapped pages can only be returned if the
1011 * caller will manage the page reference count.
1012 */
John Hubbard3faa52c2020-04-01 21:05:29 -07001013 if (!(flags & (FOLL_GET | FOLL_PIN)))
Dan Williams3565fce2016-01-15 16:56:55 -08001014 return ERR_PTR(-EEXIST);
1015
1016 pfn += (addr & ~PMD_MASK) >> PAGE_SHIFT;
Keith Buschdf06b372018-10-26 15:10:28 -07001017 *pgmap = get_dev_pagemap(pfn, *pgmap);
1018 if (!*pgmap)
Dan Williams3565fce2016-01-15 16:56:55 -08001019 return ERR_PTR(-EFAULT);
1020 page = pfn_to_page(pfn);
John Hubbard3faa52c2020-04-01 21:05:29 -07001021 if (!try_grab_page(page, flags))
1022 page = ERR_PTR(-ENOMEM);
Dan Williams3565fce2016-01-15 16:56:55 -08001023
1024 return page;
1025}
1026
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001027int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1028 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
Peter Xu8f34f1e2021-06-30 18:49:02 -07001029 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001030{
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08001031 spinlock_t *dst_ptl, *src_ptl;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001032 struct page *src_page;
1033 pmd_t pmd;
Matthew Wilcox12c9d702016-02-02 16:57:57 -08001034 pgtable_t pgtable = NULL;
Kirill A. Shutemov628d47c2016-07-26 15:25:42 -07001035 int ret = -ENOMEM;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001036
Kirill A. Shutemov628d47c2016-07-26 15:25:42 -07001037 /* Skip if can be re-fill on fault */
Peter Xu8f34f1e2021-06-30 18:49:02 -07001038 if (!vma_is_anonymous(dst_vma))
Kirill A. Shutemov628d47c2016-07-26 15:25:42 -07001039 return 0;
1040
Joel Fernandes (Google)4cf58922019-01-03 15:28:34 -08001041 pgtable = pte_alloc_one(dst_mm);
Kirill A. Shutemov628d47c2016-07-26 15:25:42 -07001042 if (unlikely(!pgtable))
1043 goto out;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001044
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08001045 dst_ptl = pmd_lock(dst_mm, dst_pmd);
1046 src_ptl = pmd_lockptr(src_mm, src_pmd);
1047 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001048
1049 ret = -EAGAIN;
1050 pmd = *src_pmd;
Zi Yan84c3fc42017-09-08 16:11:01 -07001051
1052#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1053 if (unlikely(is_swap_pmd(pmd))) {
1054 swp_entry_t entry = pmd_to_swp_entry(pmd);
1055
1056 VM_BUG_ON(!is_pmd_migration_entry(pmd));
1057 if (is_write_migration_entry(entry)) {
1058 make_migration_entry_read(&entry);
1059 pmd = swp_entry_to_pmd(entry);
Naoya Horiguchiab6e3d02017-09-08 16:11:04 -07001060 if (pmd_swp_soft_dirty(*src_pmd))
1061 pmd = pmd_swp_mksoft_dirty(pmd);
Peter Xu8f34f1e2021-06-30 18:49:02 -07001062 if (pmd_swp_uffd_wp(*src_pmd))
1063 pmd = pmd_swp_mkuffd_wp(pmd);
Zi Yan84c3fc42017-09-08 16:11:01 -07001064 set_pmd_at(src_mm, addr, src_pmd, pmd);
1065 }
Zi Yandd8a67f2017-11-02 15:59:47 -07001066 add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
Kirill A. Shutemovaf5b0f62017-11-15 17:35:40 -08001067 mm_inc_nr_ptes(dst_mm);
Zi Yandd8a67f2017-11-02 15:59:47 -07001068 pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
Peter Xu8f34f1e2021-06-30 18:49:02 -07001069 if (!userfaultfd_wp(dst_vma))
1070 pmd = pmd_swp_clear_uffd_wp(pmd);
Zi Yan84c3fc42017-09-08 16:11:01 -07001071 set_pmd_at(dst_mm, addr, dst_pmd, pmd);
1072 ret = 0;
1073 goto out_unlock;
1074 }
1075#endif
1076
Kirill A. Shutemov628d47c2016-07-26 15:25:42 -07001077 if (unlikely(!pmd_trans_huge(pmd))) {
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001078 pte_free(dst_mm, pgtable);
1079 goto out_unlock;
1080 }
Kirill A. Shutemovfc9fe822012-12-12 13:50:51 -08001081 /*
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08001082 * When page table lock is held, the huge zero pmd should not be
Kirill A. Shutemovfc9fe822012-12-12 13:50:51 -08001083 * under splitting since we don't split the page itself, only pmd to
1084 * a page table.
1085 */
1086 if (is_huge_zero_pmd(pmd)) {
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -08001087 /*
1088 * get_huge_zero_page() will never allocate a new page here,
1089 * since we already have a zero page to copy. It just takes a
1090 * reference.
1091 */
Peter Xu5fc7a5f2021-06-30 18:48:59 -07001092 mm_get_huge_zero_page(dst_mm);
1093 goto out_zero_page;
Kirill A. Shutemovfc9fe822012-12-12 13:50:51 -08001094 }
Mel Gormande466bd2013-12-18 17:08:42 -08001095
Kirill A. Shutemov628d47c2016-07-26 15:25:42 -07001096 src_page = pmd_page(pmd);
1097 VM_BUG_ON_PAGE(!PageHead(src_page), src_page);
Peter Xud0420352020-09-25 18:26:00 -04001098
1099 /*
1100 * If this page is a potentially pinned page, split and retry the fault
1101 * with smaller page size. Normally this should not happen because the
1102 * userspace should use MADV_DONTFORK upon pinned regions. This is a
1103 * best effort that the pinned pages won't be replaced by another
1104 * random page during the coming copy-on-write.
1105 */
Peter Xu8f34f1e2021-06-30 18:49:02 -07001106 if (unlikely(page_needs_cow_for_dma(src_vma, src_page))) {
Peter Xud0420352020-09-25 18:26:00 -04001107 pte_free(dst_mm, pgtable);
1108 spin_unlock(src_ptl);
1109 spin_unlock(dst_ptl);
Peter Xu8f34f1e2021-06-30 18:49:02 -07001110 __split_huge_pmd(src_vma, src_pmd, addr, false, NULL);
Peter Xud0420352020-09-25 18:26:00 -04001111 return -EAGAIN;
1112 }
1113
Kirill A. Shutemov628d47c2016-07-26 15:25:42 -07001114 get_page(src_page);
1115 page_dup_rmap(src_page, true);
1116 add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
Peter Xu5fc7a5f2021-06-30 18:48:59 -07001117out_zero_page:
Kirill A. Shutemovc4812902017-11-15 17:35:37 -08001118 mm_inc_nr_ptes(dst_mm);
Kirill A. Shutemov628d47c2016-07-26 15:25:42 -07001119 pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001120 pmdp_set_wrprotect(src_mm, addr, src_pmd);
Peter Xu8f34f1e2021-06-30 18:49:02 -07001121 if (!userfaultfd_wp(dst_vma))
1122 pmd = pmd_clear_uffd_wp(pmd);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001123 pmd = pmd_mkold(pmd_wrprotect(pmd));
1124 set_pmd_at(dst_mm, addr, dst_pmd, pmd);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001125
1126 ret = 0;
1127out_unlock:
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08001128 spin_unlock(src_ptl);
1129 spin_unlock(dst_ptl);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001130out:
1131 return ret;
1132}
1133
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08001134#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
1135static void touch_pud(struct vm_area_struct *vma, unsigned long addr,
Kirill A. Shutemova8f97362017-11-27 06:21:25 +03001136 pud_t *pud, int flags)
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08001137{
1138 pud_t _pud;
1139
Kirill A. Shutemova8f97362017-11-27 06:21:25 +03001140 _pud = pud_mkyoung(*pud);
1141 if (flags & FOLL_WRITE)
1142 _pud = pud_mkdirty(_pud);
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08001143 if (pudp_set_access_flags(vma, addr & HPAGE_PUD_MASK,
Kirill A. Shutemova8f97362017-11-27 06:21:25 +03001144 pud, _pud, flags & FOLL_WRITE))
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08001145 update_mmu_cache_pud(vma, addr, pud);
1146}
1147
1148struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
Keith Buschdf06b372018-10-26 15:10:28 -07001149 pud_t *pud, int flags, struct dev_pagemap **pgmap)
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08001150{
1151 unsigned long pfn = pud_pfn(*pud);
1152 struct mm_struct *mm = vma->vm_mm;
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08001153 struct page *page;
1154
1155 assert_spin_locked(pud_lockptr(mm, pud));
1156
Linus Torvaldsf6f37322017-12-15 18:53:22 -08001157 if (flags & FOLL_WRITE && !pud_write(*pud))
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08001158 return NULL;
1159
John Hubbard3faa52c2020-04-01 21:05:29 -07001160 /* FOLL_GET and FOLL_PIN are mutually exclusive. */
1161 if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
1162 (FOLL_PIN | FOLL_GET)))
1163 return NULL;
1164
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08001165 if (pud_present(*pud) && pud_devmap(*pud))
1166 /* pass */;
1167 else
1168 return NULL;
1169
1170 if (flags & FOLL_TOUCH)
Kirill A. Shutemova8f97362017-11-27 06:21:25 +03001171 touch_pud(vma, addr, pud, flags);
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08001172
1173 /*
1174 * device mapped pages can only be returned if the
1175 * caller will manage the page reference count.
John Hubbard3faa52c2020-04-01 21:05:29 -07001176 *
1177 * At least one of FOLL_GET | FOLL_PIN must be set, so assert that here:
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08001178 */
John Hubbard3faa52c2020-04-01 21:05:29 -07001179 if (!(flags & (FOLL_GET | FOLL_PIN)))
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08001180 return ERR_PTR(-EEXIST);
1181
1182 pfn += (addr & ~PUD_MASK) >> PAGE_SHIFT;
Keith Buschdf06b372018-10-26 15:10:28 -07001183 *pgmap = get_dev_pagemap(pfn, *pgmap);
1184 if (!*pgmap)
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08001185 return ERR_PTR(-EFAULT);
1186 page = pfn_to_page(pfn);
John Hubbard3faa52c2020-04-01 21:05:29 -07001187 if (!try_grab_page(page, flags))
1188 page = ERR_PTR(-ENOMEM);
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08001189
1190 return page;
1191}
1192
1193int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1194 pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
1195 struct vm_area_struct *vma)
1196{
1197 spinlock_t *dst_ptl, *src_ptl;
1198 pud_t pud;
1199 int ret;
1200
1201 dst_ptl = pud_lock(dst_mm, dst_pud);
1202 src_ptl = pud_lockptr(src_mm, src_pud);
1203 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
1204
1205 ret = -EAGAIN;
1206 pud = *src_pud;
1207 if (unlikely(!pud_trans_huge(pud) && !pud_devmap(pud)))
1208 goto out_unlock;
1209
1210 /*
1211 * When page table lock is held, the huge zero pud should not be
1212 * under splitting since we don't split the page itself, only pud to
1213 * a page table.
1214 */
1215 if (is_huge_zero_pud(pud)) {
1216 /* No huge zero pud yet */
1217 }
1218
Peter Xud0420352020-09-25 18:26:00 -04001219 /* Please refer to comments in copy_huge_pmd() */
Peter Xu97a7e472021-03-12 21:07:26 -08001220 if (unlikely(page_needs_cow_for_dma(vma, pud_page(pud)))) {
Peter Xud0420352020-09-25 18:26:00 -04001221 spin_unlock(src_ptl);
1222 spin_unlock(dst_ptl);
1223 __split_huge_pud(vma, src_pud, addr);
1224 return -EAGAIN;
1225 }
1226
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08001227 pudp_set_wrprotect(src_mm, addr, src_pud);
1228 pud = pud_mkold(pud_wrprotect(pud));
1229 set_pud_at(dst_mm, addr, dst_pud, pud);
1230
1231 ret = 0;
1232out_unlock:
1233 spin_unlock(src_ptl);
1234 spin_unlock(dst_ptl);
1235 return ret;
1236}
1237
1238void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
1239{
1240 pud_t entry;
1241 unsigned long haddr;
1242 bool write = vmf->flags & FAULT_FLAG_WRITE;
1243
1244 vmf->ptl = pud_lock(vmf->vma->vm_mm, vmf->pud);
1245 if (unlikely(!pud_same(*vmf->pud, orig_pud)))
1246 goto unlock;
1247
1248 entry = pud_mkyoung(orig_pud);
1249 if (write)
1250 entry = pud_mkdirty(entry);
1251 haddr = vmf->address & HPAGE_PUD_MASK;
1252 if (pudp_set_access_flags(vmf->vma, haddr, vmf->pud, entry, write))
1253 update_mmu_cache_pud(vmf->vma, vmf->address, vmf->pud);
1254
1255unlock:
1256 spin_unlock(vmf->ptl);
1257}
1258#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
1259
Yang Shi5db4f152021-06-30 18:51:35 -07001260void huge_pmd_set_accessed(struct vm_fault *vmf)
Will Deacona1dd4502012-12-11 16:01:27 -08001261{
1262 pmd_t entry;
1263 unsigned long haddr;
Minchan Kim20f664a2017-01-10 16:57:51 -08001264 bool write = vmf->flags & FAULT_FLAG_WRITE;
Yang Shi5db4f152021-06-30 18:51:35 -07001265 pmd_t orig_pmd = vmf->orig_pmd;
Will Deacona1dd4502012-12-11 16:01:27 -08001266
Jan Kara82b0f8c2016-12-14 15:06:58 -08001267 vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
1268 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd)))
Will Deacona1dd4502012-12-11 16:01:27 -08001269 goto unlock;
1270
1271 entry = pmd_mkyoung(orig_pmd);
Minchan Kim20f664a2017-01-10 16:57:51 -08001272 if (write)
1273 entry = pmd_mkdirty(entry);
Jan Kara82b0f8c2016-12-14 15:06:58 -08001274 haddr = vmf->address & HPAGE_PMD_MASK;
Minchan Kim20f664a2017-01-10 16:57:51 -08001275 if (pmdp_set_access_flags(vmf->vma, haddr, vmf->pmd, entry, write))
Jan Kara82b0f8c2016-12-14 15:06:58 -08001276 update_mmu_cache_pmd(vmf->vma, vmf->address, vmf->pmd);
Will Deacona1dd4502012-12-11 16:01:27 -08001277
1278unlock:
Jan Kara82b0f8c2016-12-14 15:06:58 -08001279 spin_unlock(vmf->ptl);
Will Deacona1dd4502012-12-11 16:01:27 -08001280}
1281
Yang Shi5db4f152021-06-30 18:51:35 -07001282vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf)
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001283{
Jan Kara82b0f8c2016-12-14 15:06:58 -08001284 struct vm_area_struct *vma = vmf->vma;
Kirill A. Shutemov3917c802020-06-03 16:00:27 -07001285 struct page *page;
Jan Kara82b0f8c2016-12-14 15:06:58 -08001286 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
Yang Shi5db4f152021-06-30 18:51:35 -07001287 pmd_t orig_pmd = vmf->orig_pmd;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001288
Jan Kara82b0f8c2016-12-14 15:06:58 -08001289 vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd);
Sasha Levin81d1b092014-10-09 15:28:10 -07001290 VM_BUG_ON_VMA(!vma->anon_vma, vma);
Kirill A. Shutemov3917c802020-06-03 16:00:27 -07001291
Kirill A. Shutemov93b47962012-12-12 13:50:54 -08001292 if (is_huge_zero_pmd(orig_pmd))
Kirill A. Shutemov3917c802020-06-03 16:00:27 -07001293 goto fallback;
1294
Jan Kara82b0f8c2016-12-14 15:06:58 -08001295 spin_lock(vmf->ptl);
Kirill A. Shutemov3917c802020-06-03 16:00:27 -07001296
1297 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) {
1298 spin_unlock(vmf->ptl);
1299 return 0;
1300 }
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001301
1302 page = pmd_page(orig_pmd);
Miaohe Linf6004e72021-05-04 18:34:02 -07001303 VM_BUG_ON_PAGE(!PageHead(page), page);
Kirill A. Shutemov3917c802020-06-03 16:00:27 -07001304
1305 /* Lock page for reuse_swap_page() */
Huang Yingba3c4ce2017-09-06 16:22:19 -07001306 if (!trylock_page(page)) {
1307 get_page(page);
1308 spin_unlock(vmf->ptl);
1309 lock_page(page);
1310 spin_lock(vmf->ptl);
1311 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) {
Kirill A. Shutemov3917c802020-06-03 16:00:27 -07001312 spin_unlock(vmf->ptl);
Huang Yingba3c4ce2017-09-06 16:22:19 -07001313 unlock_page(page);
1314 put_page(page);
Kirill A. Shutemov3917c802020-06-03 16:00:27 -07001315 return 0;
Huang Yingba3c4ce2017-09-06 16:22:19 -07001316 }
1317 put_page(page);
1318 }
Kirill A. Shutemov3917c802020-06-03 16:00:27 -07001319
1320 /*
1321 * We can only reuse the page if nobody else maps the huge page or it's
1322 * part.
1323 */
Huang Yingba3c4ce2017-09-06 16:22:19 -07001324 if (reuse_swap_page(page, NULL)) {
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001325 pmd_t entry;
1326 entry = pmd_mkyoung(orig_pmd);
Linus Torvaldsf55e1012017-11-29 09:01:01 -08001327 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
Kirill A. Shutemov3917c802020-06-03 16:00:27 -07001328 if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry, 1))
Jan Kara82b0f8c2016-12-14 15:06:58 -08001329 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
Huang Yingba3c4ce2017-09-06 16:22:19 -07001330 unlock_page(page);
Jan Kara82b0f8c2016-12-14 15:06:58 -08001331 spin_unlock(vmf->ptl);
Kirill A. Shutemov3917c802020-06-03 16:00:27 -07001332 return VM_FAULT_WRITE;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001333 }
Kirill A. Shutemov3917c802020-06-03 16:00:27 -07001334
1335 unlock_page(page);
Jan Kara82b0f8c2016-12-14 15:06:58 -08001336 spin_unlock(vmf->ptl);
Kirill A. Shutemov3917c802020-06-03 16:00:27 -07001337fallback:
1338 __split_huge_pmd(vma, vmf->pmd, vmf->address, false, NULL);
1339 return VM_FAULT_FALLBACK;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001340}
1341
Keno Fischer8310d482017-01-24 15:17:48 -08001342/*
Peter Xua308c712020-08-21 19:49:57 -04001343 * FOLL_FORCE can write to even unwritable pmd's, but only
1344 * after we've gone through a COW cycle and they are dirty.
Keno Fischer8310d482017-01-24 15:17:48 -08001345 */
1346static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags)
1347{
Peter Xua308c712020-08-21 19:49:57 -04001348 return pmd_write(pmd) ||
1349 ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd));
Keno Fischer8310d482017-01-24 15:17:48 -08001350}
1351
David Rientjesb676b292012-10-08 16:34:03 -07001352struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001353 unsigned long addr,
1354 pmd_t *pmd,
1355 unsigned int flags)
1356{
David Rientjesb676b292012-10-08 16:34:03 -07001357 struct mm_struct *mm = vma->vm_mm;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001358 struct page *page = NULL;
1359
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08001360 assert_spin_locked(pmd_lockptr(mm, pmd));
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001361
Keno Fischer8310d482017-01-24 15:17:48 -08001362 if (flags & FOLL_WRITE && !can_follow_write_pmd(*pmd, flags))
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001363 goto out;
1364
Kirill A. Shutemov85facf22013-02-04 14:28:42 -08001365 /* Avoid dumping huge zero page */
1366 if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd))
1367 return ERR_PTR(-EFAULT);
1368
Mel Gorman2b4847e2013-12-18 17:08:32 -08001369 /* Full NUMA hinting faults to serialise migration in fault paths */
Mel Gorman8a0516e2015-02-12 14:58:22 -08001370 if ((flags & FOLL_NUMA) && pmd_protnone(*pmd))
Mel Gorman2b4847e2013-12-18 17:08:32 -08001371 goto out;
1372
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001373 page = pmd_page(*pmd);
Dan Williamsca120cf2016-09-03 10:38:03 -07001374 VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page);
John Hubbard3faa52c2020-04-01 21:05:29 -07001375
1376 if (!try_grab_page(page, flags))
1377 return ERR_PTR(-ENOMEM);
1378
Dan Williams3565fce2016-01-15 16:56:55 -08001379 if (flags & FOLL_TOUCH)
Kirill A. Shutemova8f97362017-11-27 06:21:25 +03001380 touch_pmd(vma, addr, pmd, flags);
John Hubbard3faa52c2020-04-01 21:05:29 -07001381
Eric B Munsonde60f5f2015-11-05 18:51:36 -08001382 if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
Kirill A. Shutemove90309c2016-01-15 16:54:33 -08001383 /*
1384 * We don't mlock() pte-mapped THPs. This way we can avoid
1385 * leaking mlocked pages into non-VM_LOCKED VMAs.
1386 *
Kirill A. Shutemov9a73f612016-07-26 15:25:53 -07001387 * For anon THP:
1388 *
Kirill A. Shutemove90309c2016-01-15 16:54:33 -08001389 * In most cases the pmd is the only mapping of the page as we
1390 * break COW for the mlock() -- see gup_flags |= FOLL_WRITE for
1391 * writable private mappings in populate_vma_page_range().
1392 *
1393 * The only scenario when we have the page shared here is if we
1394 * mlocking read-only mapping shared over fork(). We skip
1395 * mlocking such pages.
Kirill A. Shutemov9a73f612016-07-26 15:25:53 -07001396 *
1397 * For file THP:
1398 *
1399 * We can expect PageDoubleMap() to be stable under page lock:
1400 * for file pages we set it in page_add_file_rmap(), which
1401 * requires page to be locked.
Kirill A. Shutemove90309c2016-01-15 16:54:33 -08001402 */
Kirill A. Shutemov9a73f612016-07-26 15:25:53 -07001403
1404 if (PageAnon(page) && compound_mapcount(page) != 1)
1405 goto skip_mlock;
1406 if (PageDoubleMap(page) || !page->mapping)
1407 goto skip_mlock;
1408 if (!trylock_page(page))
1409 goto skip_mlock;
Kirill A. Shutemov9a73f612016-07-26 15:25:53 -07001410 if (page->mapping && !PageDoubleMap(page))
1411 mlock_vma_page(page);
1412 unlock_page(page);
David Rientjesb676b292012-10-08 16:34:03 -07001413 }
Kirill A. Shutemov9a73f612016-07-26 15:25:53 -07001414skip_mlock:
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001415 page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
Dan Williamsca120cf2016-09-03 10:38:03 -07001416 VM_BUG_ON_PAGE(!PageCompound(page) && !is_zone_device_page(page), page);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001417
1418out:
1419 return page;
1420}
1421
Mel Gormand10e63f2012-10-25 14:16:31 +02001422/* NUMA hinting page fault entry point for trans huge pmds */
Yang Shi5db4f152021-06-30 18:51:35 -07001423vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
Mel Gormand10e63f2012-10-25 14:16:31 +02001424{
Jan Kara82b0f8c2016-12-14 15:06:58 -08001425 struct vm_area_struct *vma = vmf->vma;
Yang Shic5b5a3d2021-06-30 18:51:42 -07001426 pmd_t oldpmd = vmf->orig_pmd;
1427 pmd_t pmd;
Mel Gormanb32967f2012-11-19 12:35:47 +00001428 struct page *page;
Jan Kara82b0f8c2016-12-14 15:06:58 -08001429 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
Yang Shic5b5a3d2021-06-30 18:51:42 -07001430 int page_nid = NUMA_NO_NODE;
Peter Zijlstra90572892013-10-07 11:29:20 +01001431 int target_nid, last_cpupid = -1;
Mel Gorman8191acb2013-10-07 11:28:45 +01001432 bool migrated = false;
Yang Shic5b5a3d2021-06-30 18:51:42 -07001433 bool was_writable = pmd_savedwrite(oldpmd);
Peter Zijlstra6688cc02013-10-07 11:29:24 +01001434 int flags = 0;
Mel Gormand10e63f2012-10-25 14:16:31 +02001435
Jan Kara82b0f8c2016-12-14 15:06:58 -08001436 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
Yang Shic5b5a3d2021-06-30 18:51:42 -07001437 if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) {
Jan Kara82b0f8c2016-12-14 15:06:58 -08001438 spin_unlock(vmf->ptl);
Mel Gormande466bd2013-12-18 17:08:42 -08001439 goto out;
1440 }
1441
Mel Gormana54a4072013-10-07 11:28:46 +01001442 /*
Peter Zijlstra8b1b4362017-06-07 18:05:07 +02001443 * Since we took the NUMA fault, we must have observed the !accessible
1444 * bit. Make sure all other CPUs agree with that, to avoid them
1445 * modifying the page we're about to migrate.
1446 *
1447 * Must be done under PTL such that we'll observe the relevant
Peter Zijlstraccde85b2017-08-11 14:29:01 +02001448 * inc_tlb_flush_pending().
1449 *
1450 * We are not sure a pending tlb flush here is for a huge page
1451 * mapping or not. Hence use the tlb range variant
Peter Zijlstra8b1b4362017-06-07 18:05:07 +02001452 */
Andrea Arcangeli7066f0f2018-10-26 15:10:40 -07001453 if (mm_tlb_flush_pending(vma->vm_mm)) {
Peter Zijlstraccde85b2017-08-11 14:29:01 +02001454 flush_tlb_range(vma, haddr, haddr + HPAGE_PMD_SIZE);
Andrea Arcangeli7066f0f2018-10-26 15:10:40 -07001455 /*
1456 * change_huge_pmd() released the pmd lock before
1457 * invalidating the secondary MMUs sharing the primary
1458 * MMU pagetables (with ->invalidate_range()). The
1459 * mmu_notifier_invalidate_range_end() (which
1460 * internally calls ->invalidate_range()) in
1461 * change_pmd_range() will run after us, so we can't
1462 * rely on it here and we need an explicit invalidate.
1463 */
1464 mmu_notifier_invalidate_range(vma->vm_mm, haddr,
1465 haddr + HPAGE_PMD_SIZE);
1466 }
Peter Zijlstra8b1b4362017-06-07 18:05:07 +02001467
Yang Shic5b5a3d2021-06-30 18:51:42 -07001468 pmd = pmd_modify(oldpmd, vma->vm_page_prot);
1469 page = vm_normal_page_pmd(vma, haddr, pmd);
1470 if (!page)
1471 goto out_map;
1472
1473 /* See similar comment in do_numa_page for explanation */
1474 if (!was_writable)
1475 flags |= TNF_NO_GROUP;
1476
1477 page_nid = page_to_nid(page);
1478 last_cpupid = page_cpupid_last(page);
1479 target_nid = numa_migrate_prep(page, vma, haddr, page_nid,
1480 &flags);
1481
1482 if (target_nid == NUMA_NO_NODE) {
1483 put_page(page);
1484 goto out_map;
1485 }
1486
Jan Kara82b0f8c2016-12-14 15:06:58 -08001487 spin_unlock(vmf->ptl);
Peter Zijlstra8b1b4362017-06-07 18:05:07 +02001488
Yang Shic5b5a3d2021-06-30 18:51:42 -07001489 migrated = migrate_misplaced_page(page, vma, target_nid);
Peter Zijlstra6688cc02013-10-07 11:29:24 +01001490 if (migrated) {
1491 flags |= TNF_MIGRATED;
Mel Gorman8191acb2013-10-07 11:28:45 +01001492 page_nid = target_nid;
Yang Shic5b5a3d2021-06-30 18:51:42 -07001493 } else {
Mel Gorman074c2382015-03-25 15:55:42 -07001494 flags |= TNF_MIGRATE_FAIL;
Yang Shic5b5a3d2021-06-30 18:51:42 -07001495 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
1496 if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) {
1497 spin_unlock(vmf->ptl);
1498 goto out;
1499 }
1500 goto out_map;
1501 }
Mel Gormanb8916632013-10-07 11:28:44 +01001502
1503out:
Anshuman Khandual98fa15f2019-03-05 15:42:58 -08001504 if (page_nid != NUMA_NO_NODE)
Jan Kara82b0f8c2016-12-14 15:06:58 -08001505 task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR,
Aneesh Kumar K.V9a8b3002017-02-24 14:59:56 -08001506 flags);
Mel Gorman8191acb2013-10-07 11:28:45 +01001507
Mel Gormand10e63f2012-10-25 14:16:31 +02001508 return 0;
Yang Shic5b5a3d2021-06-30 18:51:42 -07001509
1510out_map:
1511 /* Restore the PMD */
1512 pmd = pmd_modify(oldpmd, vma->vm_page_prot);
1513 pmd = pmd_mkyoung(pmd);
1514 if (was_writable)
1515 pmd = pmd_mkwrite(pmd);
1516 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd);
1517 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
1518 spin_unlock(vmf->ptl);
1519 goto out;
Mel Gormand10e63f2012-10-25 14:16:31 +02001520}
1521
Huang Ying319904a2016-07-28 15:48:03 -07001522/*
1523 * Return true if we do MADV_FREE successfully on entire pmd page.
1524 * Otherwise, return false.
1525 */
1526bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
Minchan Kimb8d3c4c2016-01-15 16:55:42 -08001527 pmd_t *pmd, unsigned long addr, unsigned long next)
Minchan Kimb8d3c4c2016-01-15 16:55:42 -08001528{
1529 spinlock_t *ptl;
1530 pmd_t orig_pmd;
1531 struct page *page;
1532 struct mm_struct *mm = tlb->mm;
Huang Ying319904a2016-07-28 15:48:03 -07001533 bool ret = false;
Minchan Kimb8d3c4c2016-01-15 16:55:42 -08001534
Peter Zijlstraed6a7932018-08-31 14:46:08 +02001535 tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
Aneesh Kumar K.V07e32662016-12-12 16:42:40 -08001536
Kirill A. Shutemovb6ec57f2016-01-21 16:40:25 -08001537 ptl = pmd_trans_huge_lock(pmd, vma);
1538 if (!ptl)
Linus Torvalds25eedab2016-01-17 18:33:15 -08001539 goto out_unlocked;
Minchan Kimb8d3c4c2016-01-15 16:55:42 -08001540
1541 orig_pmd = *pmd;
Huang Ying319904a2016-07-28 15:48:03 -07001542 if (is_huge_zero_pmd(orig_pmd))
Minchan Kimb8d3c4c2016-01-15 16:55:42 -08001543 goto out;
Minchan Kimb8d3c4c2016-01-15 16:55:42 -08001544
Zi Yan84c3fc42017-09-08 16:11:01 -07001545 if (unlikely(!pmd_present(orig_pmd))) {
1546 VM_BUG_ON(thp_migration_supported() &&
1547 !is_pmd_migration_entry(orig_pmd));
1548 goto out;
1549 }
1550
Minchan Kimb8d3c4c2016-01-15 16:55:42 -08001551 page = pmd_page(orig_pmd);
1552 /*
1553 * If other processes are mapping this page, we couldn't discard
1554 * the page unless they all do MADV_FREE so let's skip the page.
1555 */
Miaohe Linbabbbdd2021-06-30 18:47:57 -07001556 if (total_mapcount(page) != 1)
Minchan Kimb8d3c4c2016-01-15 16:55:42 -08001557 goto out;
1558
1559 if (!trylock_page(page))
1560 goto out;
1561
1562 /*
1563 * If user want to discard part-pages of THP, split it so MADV_FREE
1564 * will deactivate only them.
1565 */
1566 if (next - addr != HPAGE_PMD_SIZE) {
1567 get_page(page);
1568 spin_unlock(ptl);
Huang Ying9818b8c2016-07-14 12:07:12 -07001569 split_huge_page(page);
Minchan Kimb8d3c4c2016-01-15 16:55:42 -08001570 unlock_page(page);
Kirill A. Shutemovbbf29ff2017-07-06 15:35:28 -07001571 put_page(page);
Minchan Kimb8d3c4c2016-01-15 16:55:42 -08001572 goto out_unlocked;
1573 }
1574
1575 if (PageDirty(page))
1576 ClearPageDirty(page);
1577 unlock_page(page);
1578
Minchan Kimb8d3c4c2016-01-15 16:55:42 -08001579 if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) {
Kirill A. Shutemov58ceeb62017-04-13 14:56:26 -07001580 pmdp_invalidate(vma, addr, pmd);
Minchan Kimb8d3c4c2016-01-15 16:55:42 -08001581 orig_pmd = pmd_mkold(orig_pmd);
1582 orig_pmd = pmd_mkclean(orig_pmd);
1583
1584 set_pmd_at(mm, addr, pmd, orig_pmd);
1585 tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
1586 }
Shaohua Li802a3a92017-05-03 14:52:32 -07001587
1588 mark_page_lazyfree(page);
Huang Ying319904a2016-07-28 15:48:03 -07001589 ret = true;
Minchan Kimb8d3c4c2016-01-15 16:55:42 -08001590out:
1591 spin_unlock(ptl);
1592out_unlocked:
1593 return ret;
1594}
1595
Aneesh Kumar K.V953c66c2016-12-12 16:44:32 -08001596static inline void zap_deposited_table(struct mm_struct *mm, pmd_t *pmd)
1597{
1598 pgtable_t pgtable;
1599
1600 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
1601 pte_free(mm, pgtable);
Kirill A. Shutemovc4812902017-11-15 17:35:37 -08001602 mm_dec_nr_ptes(mm);
Aneesh Kumar K.V953c66c2016-12-12 16:44:32 -08001603}
1604
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001605int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
Shaohua Lif21760b2012-01-12 17:19:16 -08001606 pmd_t *pmd, unsigned long addr)
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001607{
Kirill A. Shutemovda146762015-09-08 14:59:31 -07001608 pmd_t orig_pmd;
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08001609 spinlock_t *ptl;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001610
Peter Zijlstraed6a7932018-08-31 14:46:08 +02001611 tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
Aneesh Kumar K.V07e32662016-12-12 16:42:40 -08001612
Kirill A. Shutemovb6ec57f2016-01-21 16:40:25 -08001613 ptl = __pmd_trans_huge_lock(pmd, vma);
1614 if (!ptl)
Kirill A. Shutemovda146762015-09-08 14:59:31 -07001615 return 0;
1616 /*
1617 * For architectures like ppc64 we look at deposited pgtable
1618 * when calling pmdp_huge_get_and_clear. So do the
1619 * pgtable_trans_huge_withdraw after finishing pmdp related
1620 * operations.
1621 */
Aneesh Kumar K.V93a98692020-05-05 12:47:28 +05301622 orig_pmd = pmdp_huge_get_and_clear_full(vma, addr, pmd,
1623 tlb->fullmm);
Kirill A. Shutemovda146762015-09-08 14:59:31 -07001624 tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
Thomas Hellstrom (VMware)2484ca92020-03-24 18:47:17 +01001625 if (vma_is_special_huge(vma)) {
Oliver O'Halloran3b6521f2017-05-08 15:59:43 -07001626 if (arch_needs_pgtable_deposit())
1627 zap_deposited_table(tlb->mm, pmd);
Kirill A. Shutemovda146762015-09-08 14:59:31 -07001628 spin_unlock(ptl);
Kirill A. Shutemovda146762015-09-08 14:59:31 -07001629 } else if (is_huge_zero_pmd(orig_pmd)) {
Oliver O'Halloranc14a6eb2017-05-08 15:59:40 -07001630 zap_deposited_table(tlb->mm, pmd);
Kirill A. Shutemovda146762015-09-08 14:59:31 -07001631 spin_unlock(ptl);
Kirill A. Shutemovda146762015-09-08 14:59:31 -07001632 } else {
Zi Yan616b8372017-09-08 16:10:57 -07001633 struct page *page = NULL;
1634 int flush_needed = 1;
1635
1636 if (pmd_present(orig_pmd)) {
1637 page = pmd_page(orig_pmd);
1638 page_remove_rmap(page, true);
1639 VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
1640 VM_BUG_ON_PAGE(!PageHead(page), page);
1641 } else if (thp_migration_supported()) {
1642 swp_entry_t entry;
1643
1644 VM_BUG_ON(!is_pmd_migration_entry(orig_pmd));
1645 entry = pmd_to_swp_entry(orig_pmd);
Miaohe Lina44f89d2021-05-04 18:34:08 -07001646 page = migration_entry_to_page(entry);
Zi Yan616b8372017-09-08 16:10:57 -07001647 flush_needed = 0;
1648 } else
1649 WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!");
1650
Kirill A. Shutemovb5072382016-07-26 15:25:34 -07001651 if (PageAnon(page)) {
Oliver O'Halloranc14a6eb2017-05-08 15:59:40 -07001652 zap_deposited_table(tlb->mm, pmd);
Kirill A. Shutemovb5072382016-07-26 15:25:34 -07001653 add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
1654 } else {
Aneesh Kumar K.V953c66c2016-12-12 16:44:32 -08001655 if (arch_needs_pgtable_deposit())
1656 zap_deposited_table(tlb->mm, pmd);
Yang Shifadae292018-08-17 15:44:55 -07001657 add_mm_counter(tlb->mm, mm_counter_file(page), -HPAGE_PMD_NR);
Kirill A. Shutemovb5072382016-07-26 15:25:34 -07001658 }
Zi Yan616b8372017-09-08 16:10:57 -07001659
Kirill A. Shutemovda146762015-09-08 14:59:31 -07001660 spin_unlock(ptl);
Zi Yan616b8372017-09-08 16:10:57 -07001661 if (flush_needed)
1662 tlb_remove_page_size(tlb, page, HPAGE_PMD_SIZE);
Naoya Horiguchi025c5b22012-03-21 16:33:57 -07001663 }
Kirill A. Shutemovda146762015-09-08 14:59:31 -07001664 return 1;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001665}
1666
Aneesh Kumar K.V1dd38b62016-12-12 16:44:29 -08001667#ifndef pmd_move_must_withdraw
1668static inline int pmd_move_must_withdraw(spinlock_t *new_pmd_ptl,
1669 spinlock_t *old_pmd_ptl,
1670 struct vm_area_struct *vma)
1671{
1672 /*
1673 * With split pmd lock we also need to move preallocated
1674 * PTE page table if new_pmd is on different PMD page table.
1675 *
1676 * We also don't deposit and withdraw tables for file pages.
1677 */
1678 return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma);
1679}
1680#endif
1681
Naoya Horiguchiab6e3d02017-09-08 16:11:04 -07001682static pmd_t move_soft_dirty_pmd(pmd_t pmd)
1683{
1684#ifdef CONFIG_MEM_SOFT_DIRTY
1685 if (unlikely(is_pmd_migration_entry(pmd)))
1686 pmd = pmd_swp_mksoft_dirty(pmd);
1687 else if (pmd_present(pmd))
1688 pmd = pmd_mksoft_dirty(pmd);
1689#endif
1690 return pmd;
1691}
1692
Hugh Dickinsbf8616d2016-05-19 17:12:54 -07001693bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
Wei Yangb8aa9d92020-08-06 23:23:40 -07001694 unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd)
Andrea Arcangeli37a1c492011-10-31 17:08:30 -07001695{
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08001696 spinlock_t *old_ptl, *new_ptl;
Andrea Arcangeli37a1c492011-10-31 17:08:30 -07001697 pmd_t pmd;
Andrea Arcangeli37a1c492011-10-31 17:08:30 -07001698 struct mm_struct *mm = vma->vm_mm;
Aaron Lu5d190422016-11-10 17:16:33 +08001699 bool force_flush = false;
Andrea Arcangeli37a1c492011-10-31 17:08:30 -07001700
Andrea Arcangeli37a1c492011-10-31 17:08:30 -07001701 /*
1702 * The destination pmd shouldn't be established, free_pgtables()
1703 * should have release it.
1704 */
1705 if (WARN_ON(!pmd_none(*new_pmd))) {
1706 VM_BUG_ON(pmd_trans_huge(*new_pmd));
Kirill A. Shutemov4b471e82016-01-15 16:53:39 -08001707 return false;
Andrea Arcangeli37a1c492011-10-31 17:08:30 -07001708 }
1709
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08001710 /*
1711 * We don't have to worry about the ordering of src and dst
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07001712 * ptlocks because exclusive mmap_lock prevents deadlock.
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08001713 */
Kirill A. Shutemovb6ec57f2016-01-21 16:40:25 -08001714 old_ptl = __pmd_trans_huge_lock(old_pmd, vma);
1715 if (old_ptl) {
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08001716 new_ptl = pmd_lockptr(mm, new_pmd);
1717 if (new_ptl != old_ptl)
1718 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
Aneesh Kumar K.V8809aa22015-06-24 16:57:44 -07001719 pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd);
Linus Torvaldseb66ae02018-10-12 15:22:59 -07001720 if (pmd_present(pmd))
Aaron Lua2ce2662016-11-29 13:27:31 +08001721 force_flush = true;
Naoya Horiguchi025c5b22012-03-21 16:33:57 -07001722 VM_BUG_ON(!pmd_none(*new_pmd));
Kirill A. Shutemov35928062013-12-12 17:12:33 -08001723
Aneesh Kumar K.V1dd38b62016-12-12 16:44:29 -08001724 if (pmd_move_must_withdraw(new_ptl, old_ptl, vma)) {
Aneesh Kumar K.Vb3084f42014-01-13 11:34:24 +05301725 pgtable_t pgtable;
Kirill A. Shutemov35928062013-12-12 17:12:33 -08001726 pgtable = pgtable_trans_huge_withdraw(mm, old_pmd);
1727 pgtable_trans_huge_deposit(mm, new_pmd, pgtable);
Kirill A. Shutemov35928062013-12-12 17:12:33 -08001728 }
Naoya Horiguchiab6e3d02017-09-08 16:11:04 -07001729 pmd = move_soft_dirty_pmd(pmd);
1730 set_pmd_at(mm, new_addr, new_pmd, pmd);
Aaron Lu5d190422016-11-10 17:16:33 +08001731 if (force_flush)
1732 flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
Linus Torvaldseb66ae02018-10-12 15:22:59 -07001733 if (new_ptl != old_ptl)
1734 spin_unlock(new_ptl);
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08001735 spin_unlock(old_ptl);
Kirill A. Shutemov4b471e82016-01-15 16:53:39 -08001736 return true;
Andrea Arcangeli37a1c492011-10-31 17:08:30 -07001737 }
Kirill A. Shutemov4b471e82016-01-15 16:53:39 -08001738 return false;
Andrea Arcangeli37a1c492011-10-31 17:08:30 -07001739}
1740
Mel Gormanf123d742013-10-07 11:28:49 +01001741/*
1742 * Returns
1743 * - 0 if PMD could not be locked
Ingo Molnarf0953a12021-05-06 18:06:47 -07001744 * - 1 if PMD was locked but protections unchanged and TLB flush unnecessary
Yang Shie346e662021-06-30 18:51:55 -07001745 * or if prot_numa but THP migration is not supported
Ingo Molnarf0953a12021-05-06 18:06:47 -07001746 * - HPAGE_PMD_NR if protections changed and TLB flush necessary
Mel Gormanf123d742013-10-07 11:28:49 +01001747 */
Johannes Weinercd7548a2011-01-13 15:47:04 -08001748int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
Peter Xu58705442020-04-06 20:05:45 -07001749 unsigned long addr, pgprot_t newprot, unsigned long cp_flags)
Johannes Weinercd7548a2011-01-13 15:47:04 -08001750{
1751 struct mm_struct *mm = vma->vm_mm;
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08001752 spinlock_t *ptl;
Kirill A. Shutemov0a85e51d2017-04-13 14:56:17 -07001753 pmd_t entry;
1754 bool preserve_write;
1755 int ret;
Peter Xu58705442020-04-06 20:05:45 -07001756 bool prot_numa = cp_flags & MM_CP_PROT_NUMA;
Peter Xu292924b2020-04-06 20:05:49 -07001757 bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
1758 bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
Johannes Weinercd7548a2011-01-13 15:47:04 -08001759
Yang Shie346e662021-06-30 18:51:55 -07001760 if (prot_numa && !thp_migration_supported())
1761 return 1;
1762
Kirill A. Shutemovb6ec57f2016-01-21 16:40:25 -08001763 ptl = __pmd_trans_huge_lock(pmd, vma);
Kirill A. Shutemov0a85e51d2017-04-13 14:56:17 -07001764 if (!ptl)
1765 return 0;
Mel Gormane944fd62015-02-12 14:58:35 -08001766
Kirill A. Shutemov0a85e51d2017-04-13 14:56:17 -07001767 preserve_write = prot_numa && pmd_write(*pmd);
1768 ret = 1;
Mel Gormane944fd62015-02-12 14:58:35 -08001769
Zi Yan84c3fc42017-09-08 16:11:01 -07001770#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1771 if (is_swap_pmd(*pmd)) {
1772 swp_entry_t entry = pmd_to_swp_entry(*pmd);
1773
1774 VM_BUG_ON(!is_pmd_migration_entry(*pmd));
1775 if (is_write_migration_entry(entry)) {
1776 pmd_t newpmd;
1777 /*
1778 * A protection check is difficult so
1779 * just be safe and disable write
1780 */
1781 make_migration_entry_read(&entry);
1782 newpmd = swp_entry_to_pmd(entry);
Naoya Horiguchiab6e3d02017-09-08 16:11:04 -07001783 if (pmd_swp_soft_dirty(*pmd))
1784 newpmd = pmd_swp_mksoft_dirty(newpmd);
Peter Xu8f34f1e2021-06-30 18:49:02 -07001785 if (pmd_swp_uffd_wp(*pmd))
1786 newpmd = pmd_swp_mkuffd_wp(newpmd);
Zi Yan84c3fc42017-09-08 16:11:01 -07001787 set_pmd_at(mm, addr, pmd, newpmd);
1788 }
1789 goto unlock;
1790 }
1791#endif
1792
Kirill A. Shutemov0a85e51d2017-04-13 14:56:17 -07001793 /*
1794 * Avoid trapping faults against the zero page. The read-only
1795 * data is likely to be read-cached on the local CPU and
1796 * local/remote hits to the zero page are not interesting.
1797 */
1798 if (prot_numa && is_huge_zero_pmd(*pmd))
1799 goto unlock;
Johannes Weinercd7548a2011-01-13 15:47:04 -08001800
Kirill A. Shutemov0a85e51d2017-04-13 14:56:17 -07001801 if (prot_numa && pmd_protnone(*pmd))
1802 goto unlock;
1803
Kirill A. Shutemovced10802017-04-13 14:56:20 -07001804 /*
Michel Lespinasse3e4e28c2020-06-08 21:33:51 -07001805 * In case prot_numa, we are under mmap_read_lock(mm). It's critical
Kirill A. Shutemovced10802017-04-13 14:56:20 -07001806 * to not clear pmd intermittently to avoid race with MADV_DONTNEED
Michel Lespinasse3e4e28c2020-06-08 21:33:51 -07001807 * which is also under mmap_read_lock(mm):
Kirill A. Shutemovced10802017-04-13 14:56:20 -07001808 *
1809 * CPU0: CPU1:
1810 * change_huge_pmd(prot_numa=1)
1811 * pmdp_huge_get_and_clear_notify()
1812 * madvise_dontneed()
1813 * zap_pmd_range()
1814 * pmd_trans_huge(*pmd) == 0 (without ptl)
1815 * // skip the pmd
1816 * set_pmd_at();
1817 * // pmd is re-established
1818 *
1819 * The race makes MADV_DONTNEED miss the huge pmd and don't clear it
1820 * which may break userspace.
1821 *
1822 * pmdp_invalidate() is required to make sure we don't miss
1823 * dirty/young flags set by hardware.
1824 */
Kirill A. Shutemova3cf9882018-01-31 16:18:20 -08001825 entry = pmdp_invalidate(vma, addr, pmd);
Kirill A. Shutemovced10802017-04-13 14:56:20 -07001826
Kirill A. Shutemov0a85e51d2017-04-13 14:56:17 -07001827 entry = pmd_modify(entry, newprot);
1828 if (preserve_write)
1829 entry = pmd_mk_savedwrite(entry);
Peter Xu292924b2020-04-06 20:05:49 -07001830 if (uffd_wp) {
1831 entry = pmd_wrprotect(entry);
1832 entry = pmd_mkuffd_wp(entry);
1833 } else if (uffd_wp_resolve) {
1834 /*
1835 * Leave the write bit to be handled by PF interrupt
1836 * handler, then things like COW could be properly
1837 * handled.
1838 */
1839 entry = pmd_clear_uffd_wp(entry);
1840 }
Kirill A. Shutemov0a85e51d2017-04-13 14:56:17 -07001841 ret = HPAGE_PMD_NR;
1842 set_pmd_at(mm, addr, pmd, entry);
1843 BUG_ON(vma_is_anonymous(vma) && !preserve_write && pmd_write(entry));
1844unlock:
1845 spin_unlock(ptl);
Johannes Weinercd7548a2011-01-13 15:47:04 -08001846 return ret;
1847}
1848
Naoya Horiguchi025c5b22012-03-21 16:33:57 -07001849/*
Huang Ying8f19b0c2016-07-26 15:27:04 -07001850 * Returns page table lock pointer if a given pmd maps a thp, NULL otherwise.
Naoya Horiguchi025c5b22012-03-21 16:33:57 -07001851 *
Huang Ying8f19b0c2016-07-26 15:27:04 -07001852 * Note that if it returns page table lock pointer, this routine returns without
1853 * unlocking page table lock. So callers must unlock it.
Naoya Horiguchi025c5b22012-03-21 16:33:57 -07001854 */
Kirill A. Shutemovb6ec57f2016-01-21 16:40:25 -08001855spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma)
Naoya Horiguchi025c5b22012-03-21 16:33:57 -07001856{
Kirill A. Shutemovb6ec57f2016-01-21 16:40:25 -08001857 spinlock_t *ptl;
1858 ptl = pmd_lock(vma->vm_mm, pmd);
Zi Yan84c3fc42017-09-08 16:11:01 -07001859 if (likely(is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) ||
1860 pmd_devmap(*pmd)))
Kirill A. Shutemovb6ec57f2016-01-21 16:40:25 -08001861 return ptl;
1862 spin_unlock(ptl);
1863 return NULL;
Naoya Horiguchi025c5b22012-03-21 16:33:57 -07001864}
1865
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08001866/*
1867 * Returns true if a given pud maps a thp, false otherwise.
1868 *
1869 * Note that if it returns true, this routine returns without unlocking page
1870 * table lock. So callers must unlock it.
1871 */
1872spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma)
1873{
1874 spinlock_t *ptl;
1875
1876 ptl = pud_lock(vma->vm_mm, pud);
1877 if (likely(pud_trans_huge(*pud) || pud_devmap(*pud)))
1878 return ptl;
1879 spin_unlock(ptl);
1880 return NULL;
1881}
1882
1883#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
1884int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
1885 pud_t *pud, unsigned long addr)
1886{
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08001887 spinlock_t *ptl;
1888
1889 ptl = __pud_trans_huge_lock(pud, vma);
1890 if (!ptl)
1891 return 0;
1892 /*
1893 * For architectures like ppc64 we look at deposited pgtable
1894 * when calling pudp_huge_get_and_clear. So do the
1895 * pgtable_trans_huge_withdraw after finishing pudp related
1896 * operations.
1897 */
Qian Cai70516b92019-03-05 15:50:00 -08001898 pudp_huge_get_and_clear_full(tlb->mm, addr, pud, tlb->fullmm);
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08001899 tlb_remove_pud_tlb_entry(tlb, pud, addr);
Thomas Hellstrom (VMware)2484ca92020-03-24 18:47:17 +01001900 if (vma_is_special_huge(vma)) {
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08001901 spin_unlock(ptl);
1902 /* No zero page support yet */
1903 } else {
1904 /* No support for anonymous PUD pages yet */
1905 BUG();
1906 }
1907 return 1;
1908}
1909
1910static void __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud,
1911 unsigned long haddr)
1912{
1913 VM_BUG_ON(haddr & ~HPAGE_PUD_MASK);
1914 VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
1915 VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PUD_SIZE, vma);
1916 VM_BUG_ON(!pud_trans_huge(*pud) && !pud_devmap(*pud));
1917
Yisheng Xiece9311c2017-03-09 16:17:00 -08001918 count_vm_event(THP_SPLIT_PUD);
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08001919
1920 pudp_huge_clear_flush_notify(vma, haddr, pud);
1921}
1922
1923void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
1924 unsigned long address)
1925{
1926 spinlock_t *ptl;
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08001927 struct mmu_notifier_range range;
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08001928
Jérôme Glisse7269f992019-05-13 17:20:53 -07001929 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
Jérôme Glisse6f4f13e2019-05-13 17:20:49 -07001930 address & HPAGE_PUD_MASK,
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08001931 (address & HPAGE_PUD_MASK) + HPAGE_PUD_SIZE);
1932 mmu_notifier_invalidate_range_start(&range);
1933 ptl = pud_lock(vma->vm_mm, pud);
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08001934 if (unlikely(!pud_trans_huge(*pud) && !pud_devmap(*pud)))
1935 goto out;
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08001936 __split_huge_pud_locked(vma, pud, range.start);
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08001937
1938out:
1939 spin_unlock(ptl);
Jérôme Glisse4645b9f2017-11-15 17:34:11 -08001940 /*
1941 * No need to double call mmu_notifier->invalidate_range() callback as
1942 * the above pudp_huge_clear_flush_notify() did already call it.
1943 */
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08001944 mmu_notifier_invalidate_range_only_end(&range);
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08001945}
1946#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
1947
Kirill A. Shutemoveef1b3b2016-01-15 16:53:53 -08001948static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
1949 unsigned long haddr, pmd_t *pmd)
1950{
1951 struct mm_struct *mm = vma->vm_mm;
1952 pgtable_t pgtable;
1953 pmd_t _pmd;
1954 int i;
1955
Jérôme Glisse0f108512017-11-15 17:34:07 -08001956 /*
1957 * Leave pmd empty until pte is filled note that it is fine to delay
1958 * notification until mmu_notifier_invalidate_range_end() as we are
1959 * replacing a zero pmd write protected page with a zero pte write
1960 * protected page.
1961 *
Mike Rapoportad56b732018-03-21 21:22:47 +02001962 * See Documentation/vm/mmu_notifier.rst
Jérôme Glisse0f108512017-11-15 17:34:07 -08001963 */
1964 pmdp_huge_clear_flush(vma, haddr, pmd);
Kirill A. Shutemoveef1b3b2016-01-15 16:53:53 -08001965
1966 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
1967 pmd_populate(mm, &_pmd, pgtable);
1968
1969 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
1970 pte_t *pte, entry;
1971 entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot);
1972 entry = pte_mkspecial(entry);
1973 pte = pte_offset_map(&_pmd, haddr);
1974 VM_BUG_ON(!pte_none(*pte));
1975 set_pte_at(mm, haddr, pte, entry);
1976 pte_unmap(pte);
1977 }
1978 smp_wmb(); /* make pte visible before pmd */
1979 pmd_populate(mm, pmd, pgtable);
Kirill A. Shutemoveef1b3b2016-01-15 16:53:53 -08001980}
1981
1982static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
Kirill A. Shutemovba988282016-01-15 16:53:56 -08001983 unsigned long haddr, bool freeze)
Kirill A. Shutemoveef1b3b2016-01-15 16:53:53 -08001984{
1985 struct mm_struct *mm = vma->vm_mm;
1986 struct page *page;
1987 pgtable_t pgtable;
Aneesh Kumar K.V423ac9a2018-01-31 16:18:24 -08001988 pmd_t old_pmd, _pmd;
Peter Xu292924b2020-04-06 20:05:49 -07001989 bool young, write, soft_dirty, pmd_migration = false, uffd_wp = false;
Kirill A. Shutemov2ac015e2016-02-24 18:58:03 +03001990 unsigned long addr;
Kirill A. Shutemoveef1b3b2016-01-15 16:53:53 -08001991 int i;
1992
1993 VM_BUG_ON(haddr & ~HPAGE_PMD_MASK);
1994 VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
1995 VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma);
Zi Yan84c3fc42017-09-08 16:11:01 -07001996 VM_BUG_ON(!is_pmd_migration_entry(*pmd) && !pmd_trans_huge(*pmd)
1997 && !pmd_devmap(*pmd));
Kirill A. Shutemoveef1b3b2016-01-15 16:53:53 -08001998
1999 count_vm_event(THP_SPLIT_PMD);
2000
Kirill A. Shutemovd21b9e52016-07-26 15:25:37 -07002001 if (!vma_is_anonymous(vma)) {
Hugh Dickins99fa8a42021-06-15 18:23:45 -07002002 old_pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd);
Aneesh Kumar K.V953c66c2016-12-12 16:44:32 -08002003 /*
2004 * We are going to unmap this huge page. So
2005 * just go ahead and zap it
2006 */
2007 if (arch_needs_pgtable_deposit())
2008 zap_deposited_table(mm, pmd);
Thomas Hellstrom (VMware)2484ca92020-03-24 18:47:17 +01002009 if (vma_is_special_huge(vma))
Kirill A. Shutemovd21b9e52016-07-26 15:25:37 -07002010 return;
Hugh Dickins99fa8a42021-06-15 18:23:45 -07002011 if (unlikely(is_pmd_migration_entry(old_pmd))) {
2012 swp_entry_t entry;
2013
2014 entry = pmd_to_swp_entry(old_pmd);
2015 page = migration_entry_to_page(entry);
2016 } else {
2017 page = pmd_page(old_pmd);
2018 if (!PageDirty(page) && pmd_dirty(old_pmd))
2019 set_page_dirty(page);
2020 if (!PageReferenced(page) && pmd_young(old_pmd))
2021 SetPageReferenced(page);
2022 page_remove_rmap(page, true);
2023 put_page(page);
2024 }
Yang Shifadae292018-08-17 15:44:55 -07002025 add_mm_counter(mm, mm_counter_file(page), -HPAGE_PMD_NR);
Kirill A. Shutemoveef1b3b2016-01-15 16:53:53 -08002026 return;
Hugh Dickins99fa8a42021-06-15 18:23:45 -07002027 }
2028
Hugh Dickins3b77e8c2021-06-15 18:23:49 -07002029 if (is_huge_zero_pmd(*pmd)) {
Jérôme Glisse4645b9f2017-11-15 17:34:11 -08002030 /*
2031 * FIXME: Do we want to invalidate secondary mmu by calling
2032 * mmu_notifier_invalidate_range() see comments below inside
2033 * __split_huge_pmd() ?
2034 *
2035 * We are going from a zero huge page write protected to zero
2036 * small page also write protected so it does not seems useful
2037 * to invalidate secondary mmu at this time.
2038 */
Kirill A. Shutemoveef1b3b2016-01-15 16:53:53 -08002039 return __split_huge_zero_page_pmd(vma, haddr, pmd);
2040 }
2041
Aneesh Kumar K.V423ac9a2018-01-31 16:18:24 -08002042 /*
2043 * Up to this point the pmd is present and huge and userland has the
2044 * whole access to the hugepage during the split (which happens in
2045 * place). If we overwrite the pmd with the not-huge version pointing
2046 * to the pte here (which of course we could if all CPUs were bug
2047 * free), userland could trigger a small page size TLB miss on the
2048 * small sized TLB while the hugepage TLB entry is still established in
2049 * the huge TLB. Some CPU doesn't like that.
Alexander A. Klimov42742d92020-08-06 23:26:08 -07002050 * See http://support.amd.com/TechDocs/41322_10h_Rev_Gd.pdf, Erratum
2051 * 383 on page 105. Intel should be safe but is also warns that it's
Aneesh Kumar K.V423ac9a2018-01-31 16:18:24 -08002052 * only safe if the permission and cache attributes of the two entries
2053 * loaded in the two TLB is identical (which should be the case here).
2054 * But it is generally safer to never allow small and huge TLB entries
2055 * for the same virtual address to be loaded simultaneously. So instead
2056 * of doing "pmd_populate(); flush_pmd_tlb_range();" we first mark the
2057 * current pmd notpresent (atomically because here the pmd_trans_huge
2058 * must remain set at all times on the pmd until the split is complete
2059 * for this pmd), then we flush the SMP TLB and finally we write the
2060 * non-huge version of the pmd entry with pmd_populate.
2061 */
2062 old_pmd = pmdp_invalidate(vma, haddr, pmd);
2063
Aneesh Kumar K.V423ac9a2018-01-31 16:18:24 -08002064 pmd_migration = is_pmd_migration_entry(old_pmd);
Peter Xu2e83ee12018-12-21 14:30:50 -08002065 if (unlikely(pmd_migration)) {
Zi Yan84c3fc42017-09-08 16:11:01 -07002066 swp_entry_t entry;
2067
Aneesh Kumar K.V423ac9a2018-01-31 16:18:24 -08002068 entry = pmd_to_swp_entry(old_pmd);
Miaohe Lina44f89d2021-05-04 18:34:08 -07002069 page = migration_entry_to_page(entry);
Peter Xu2e83ee12018-12-21 14:30:50 -08002070 write = is_write_migration_entry(entry);
2071 young = false;
2072 soft_dirty = pmd_swp_soft_dirty(old_pmd);
Peter Xuf45ec5f2020-04-06 20:06:01 -07002073 uffd_wp = pmd_swp_uffd_wp(old_pmd);
Peter Xu2e83ee12018-12-21 14:30:50 -08002074 } else {
Aneesh Kumar K.V423ac9a2018-01-31 16:18:24 -08002075 page = pmd_page(old_pmd);
Peter Xu2e83ee12018-12-21 14:30:50 -08002076 if (pmd_dirty(old_pmd))
2077 SetPageDirty(page);
2078 write = pmd_write(old_pmd);
2079 young = pmd_young(old_pmd);
2080 soft_dirty = pmd_soft_dirty(old_pmd);
Peter Xu292924b2020-04-06 20:05:49 -07002081 uffd_wp = pmd_uffd_wp(old_pmd);
Peter Xu2e83ee12018-12-21 14:30:50 -08002082 }
Kirill A. Shutemoveef1b3b2016-01-15 16:53:53 -08002083 VM_BUG_ON_PAGE(!page_count(page), page);
Joonsoo Kimfe896d12016-03-17 14:19:26 -07002084 page_ref_add(page, HPAGE_PMD_NR - 1);
Kirill A. Shutemoveef1b3b2016-01-15 16:53:53 -08002085
Aneesh Kumar K.V423ac9a2018-01-31 16:18:24 -08002086 /*
2087 * Withdraw the table only after we mark the pmd entry invalid.
2088 * This's critical for some architectures (Power).
2089 */
Kirill A. Shutemoveef1b3b2016-01-15 16:53:53 -08002090 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
2091 pmd_populate(mm, &_pmd, pgtable);
2092
Kirill A. Shutemov2ac015e2016-02-24 18:58:03 +03002093 for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
Kirill A. Shutemoveef1b3b2016-01-15 16:53:53 -08002094 pte_t entry, *pte;
2095 /*
2096 * Note that NUMA hinting access restrictions are not
2097 * transferred to avoid any possibility of altering
2098 * permissions across VMAs.
2099 */
Zi Yan84c3fc42017-09-08 16:11:01 -07002100 if (freeze || pmd_migration) {
Kirill A. Shutemovba988282016-01-15 16:53:56 -08002101 swp_entry_t swp_entry;
2102 swp_entry = make_migration_entry(page + i, write);
2103 entry = swp_entry_to_pte(swp_entry);
Andrea Arcangeli804dd152016-08-25 15:16:57 -07002104 if (soft_dirty)
2105 entry = pte_swp_mksoft_dirty(entry);
Peter Xuf45ec5f2020-04-06 20:06:01 -07002106 if (uffd_wp)
2107 entry = pte_swp_mkuffd_wp(entry);
Kirill A. Shutemovba988282016-01-15 16:53:56 -08002108 } else {
Andrea Arcangeli6d2329f2016-10-07 17:01:22 -07002109 entry = mk_pte(page + i, READ_ONCE(vma->vm_page_prot));
Minchan Kimb8d3c4c2016-01-15 16:55:42 -08002110 entry = maybe_mkwrite(entry, vma);
Kirill A. Shutemovba988282016-01-15 16:53:56 -08002111 if (!write)
2112 entry = pte_wrprotect(entry);
2113 if (!young)
2114 entry = pte_mkold(entry);
Andrea Arcangeli804dd152016-08-25 15:16:57 -07002115 if (soft_dirty)
2116 entry = pte_mksoft_dirty(entry);
Peter Xu292924b2020-04-06 20:05:49 -07002117 if (uffd_wp)
2118 entry = pte_mkuffd_wp(entry);
Kirill A. Shutemovba988282016-01-15 16:53:56 -08002119 }
Kirill A. Shutemov2ac015e2016-02-24 18:58:03 +03002120 pte = pte_offset_map(&_pmd, addr);
Kirill A. Shutemoveef1b3b2016-01-15 16:53:53 -08002121 BUG_ON(!pte_none(*pte));
Kirill A. Shutemov2ac015e2016-02-24 18:58:03 +03002122 set_pte_at(mm, addr, pte, entry);
Ralph Campbellec0abae2020-09-18 21:20:24 -07002123 if (!pmd_migration)
2124 atomic_inc(&page[i]._mapcount);
Kirill A. Shutemoveef1b3b2016-01-15 16:53:53 -08002125 pte_unmap(pte);
2126 }
2127
Ralph Campbellec0abae2020-09-18 21:20:24 -07002128 if (!pmd_migration) {
2129 /*
2130 * Set PG_double_map before dropping compound_mapcount to avoid
2131 * false-negative page_mapped().
2132 */
2133 if (compound_mapcount(page) > 1 &&
2134 !TestSetPageDoubleMap(page)) {
Kirill A. Shutemoveef1b3b2016-01-15 16:53:53 -08002135 for (i = 0; i < HPAGE_PMD_NR; i++)
Ralph Campbellec0abae2020-09-18 21:20:24 -07002136 atomic_inc(&page[i]._mapcount);
Kirill A. Shutemoveef1b3b2016-01-15 16:53:53 -08002137 }
Ralph Campbellec0abae2020-09-18 21:20:24 -07002138
2139 lock_page_memcg(page);
2140 if (atomic_add_negative(-1, compound_mapcount_ptr(page))) {
2141 /* Last compound_mapcount is gone. */
Muchun Song69473e52021-02-24 12:03:23 -08002142 __mod_lruvec_page_state(page, NR_ANON_THPS,
2143 -HPAGE_PMD_NR);
Ralph Campbellec0abae2020-09-18 21:20:24 -07002144 if (TestClearPageDoubleMap(page)) {
2145 /* No need in mapcount reference anymore */
2146 for (i = 0; i < HPAGE_PMD_NR; i++)
2147 atomic_dec(&page[i]._mapcount);
2148 }
2149 }
2150 unlock_page_memcg(page);
Kirill A. Shutemoveef1b3b2016-01-15 16:53:53 -08002151 }
2152
2153 smp_wmb(); /* make pte visible before pmd */
2154 pmd_populate(mm, pmd, pgtable);
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002155
2156 if (freeze) {
Kirill A. Shutemov2ac015e2016-02-24 18:58:03 +03002157 for (i = 0; i < HPAGE_PMD_NR; i++) {
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002158 page_remove_rmap(page + i, false);
2159 put_page(page + i);
2160 }
2161 }
Kirill A. Shutemoveef1b3b2016-01-15 16:53:53 -08002162}
2163
2164void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
Naoya Horiguchi33f47512016-07-14 12:07:32 -07002165 unsigned long address, bool freeze, struct page *page)
Kirill A. Shutemoveef1b3b2016-01-15 16:53:53 -08002166{
2167 spinlock_t *ptl;
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08002168 struct mmu_notifier_range range;
Hugh Dickins1c2f6732021-02-04 18:32:31 -08002169 bool do_unlock_page = false;
Andrea Arcangelic444eb52020-05-27 19:06:24 -04002170 pmd_t _pmd;
Kirill A. Shutemoveef1b3b2016-01-15 16:53:53 -08002171
Jérôme Glisse7269f992019-05-13 17:20:53 -07002172 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
Jérôme Glisse6f4f13e2019-05-13 17:20:49 -07002173 address & HPAGE_PMD_MASK,
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08002174 (address & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE);
2175 mmu_notifier_invalidate_range_start(&range);
2176 ptl = pmd_lock(vma->vm_mm, pmd);
Naoya Horiguchi33f47512016-07-14 12:07:32 -07002177
2178 /*
2179 * If caller asks to setup a migration entries, we need a page to check
2180 * pmd against. Otherwise we can end up replacing wrong page.
2181 */
2182 VM_BUG_ON(freeze && !page);
Andrea Arcangelic444eb52020-05-27 19:06:24 -04002183 if (page) {
2184 VM_WARN_ON_ONCE(!PageLocked(page));
Andrea Arcangelic444eb52020-05-27 19:06:24 -04002185 if (page != pmd_page(*pmd))
2186 goto out;
2187 }
Naoya Horiguchi33f47512016-07-14 12:07:32 -07002188
Andrea Arcangelic444eb52020-05-27 19:06:24 -04002189repeat:
Dan Williams5c7fb562016-01-15 16:56:52 -08002190 if (pmd_trans_huge(*pmd)) {
Andrea Arcangelic444eb52020-05-27 19:06:24 -04002191 if (!page) {
2192 page = pmd_page(*pmd);
Hugh Dickins1c2f6732021-02-04 18:32:31 -08002193 /*
2194 * An anonymous page must be locked, to ensure that a
2195 * concurrent reuse_swap_page() sees stable mapcount;
2196 * but reuse_swap_page() is not used on shmem or file,
2197 * and page lock must not be taken when zap_pmd_range()
2198 * calls __split_huge_pmd() while i_mmap_lock is held.
2199 */
2200 if (PageAnon(page)) {
2201 if (unlikely(!trylock_page(page))) {
2202 get_page(page);
2203 _pmd = *pmd;
2204 spin_unlock(ptl);
2205 lock_page(page);
2206 spin_lock(ptl);
2207 if (unlikely(!pmd_same(*pmd, _pmd))) {
2208 unlock_page(page);
2209 put_page(page);
2210 page = NULL;
2211 goto repeat;
2212 }
Andrea Arcangelic444eb52020-05-27 19:06:24 -04002213 put_page(page);
Andrea Arcangelic444eb52020-05-27 19:06:24 -04002214 }
Hugh Dickins1c2f6732021-02-04 18:32:31 -08002215 do_unlock_page = true;
Andrea Arcangelic444eb52020-05-27 19:06:24 -04002216 }
2217 }
Dan Williams5c7fb562016-01-15 16:56:52 -08002218 if (PageMlocked(page))
Kirill A. Shutemov5f737712016-03-17 14:20:13 -07002219 clear_page_mlock(page);
Zi Yan84c3fc42017-09-08 16:11:01 -07002220 } else if (!(pmd_devmap(*pmd) || is_pmd_migration_entry(*pmd)))
Kirill A. Shutemove90309c2016-01-15 16:54:33 -08002221 goto out;
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08002222 __split_huge_pmd_locked(vma, pmd, range.start, freeze);
Kirill A. Shutemove90309c2016-01-15 16:54:33 -08002223out:
Kirill A. Shutemoveef1b3b2016-01-15 16:53:53 -08002224 spin_unlock(ptl);
Hugh Dickins1c2f6732021-02-04 18:32:31 -08002225 if (do_unlock_page)
Andrea Arcangelic444eb52020-05-27 19:06:24 -04002226 unlock_page(page);
Jérôme Glisse4645b9f2017-11-15 17:34:11 -08002227 /*
2228 * No need to double call mmu_notifier->invalidate_range() callback.
2229 * They are 3 cases to consider inside __split_huge_pmd_locked():
2230 * 1) pmdp_huge_clear_flush_notify() call invalidate_range() obvious
2231 * 2) __split_huge_zero_page_pmd() read only zero page and any write
2232 * fault will trigger a flush_notify before pointing to a new page
2233 * (it is fine if the secondary mmu keeps pointing to the old zero
2234 * page in the meantime)
2235 * 3) Split a huge pmd into pte pointing to the same page. No need
2236 * to invalidate secondary tlb entry they are all still valid.
2237 * any further changes to individual pte will notify. So no need
2238 * to call mmu_notifier->invalidate_range()
2239 */
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08002240 mmu_notifier_invalidate_range_only_end(&range);
Kirill A. Shutemoveef1b3b2016-01-15 16:53:53 -08002241}
2242
Kirill A. Shutemovfec89c12016-03-17 14:20:10 -07002243void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
2244 bool freeze, struct page *page)
Andrea Arcangeli94fcc582011-01-13 15:47:08 -08002245{
Hugh Dickinsf72e7dc2014-06-23 13:22:05 -07002246 pgd_t *pgd;
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03002247 p4d_t *p4d;
Hugh Dickinsf72e7dc2014-06-23 13:22:05 -07002248 pud_t *pud;
Andrea Arcangeli94fcc582011-01-13 15:47:08 -08002249 pmd_t *pmd;
2250
Kirill A. Shutemov78ddc532016-01-15 16:52:42 -08002251 pgd = pgd_offset(vma->vm_mm, address);
Hugh Dickinsf72e7dc2014-06-23 13:22:05 -07002252 if (!pgd_present(*pgd))
2253 return;
2254
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03002255 p4d = p4d_offset(pgd, address);
2256 if (!p4d_present(*p4d))
2257 return;
2258
2259 pud = pud_offset(p4d, address);
Hugh Dickinsf72e7dc2014-06-23 13:22:05 -07002260 if (!pud_present(*pud))
2261 return;
2262
2263 pmd = pmd_offset(pud, address);
Kirill A. Shutemovfec89c12016-03-17 14:20:10 -07002264
Naoya Horiguchi33f47512016-07-14 12:07:32 -07002265 __split_huge_pmd(vma, pmd, address, freeze, page);
Andrea Arcangeli94fcc582011-01-13 15:47:08 -08002266}
2267
Miaohe Lin71f9e582021-05-04 18:33:52 -07002268static inline void split_huge_pmd_if_needed(struct vm_area_struct *vma, unsigned long address)
2269{
2270 /*
2271 * If the new address isn't hpage aligned and it could previously
2272 * contain an hugepage: check if we need to split an huge pmd.
2273 */
2274 if (!IS_ALIGNED(address, HPAGE_PMD_SIZE) &&
2275 range_in_vma(vma, ALIGN_DOWN(address, HPAGE_PMD_SIZE),
2276 ALIGN(address, HPAGE_PMD_SIZE)))
2277 split_huge_pmd_address(vma, address, false, NULL);
2278}
2279
Kirill A. Shutemove1b99962015-09-08 14:58:37 -07002280void vma_adjust_trans_huge(struct vm_area_struct *vma,
Andrea Arcangeli94fcc582011-01-13 15:47:08 -08002281 unsigned long start,
2282 unsigned long end,
2283 long adjust_next)
2284{
Miaohe Lin71f9e582021-05-04 18:33:52 -07002285 /* Check if we need to split start first. */
2286 split_huge_pmd_if_needed(vma, start);
2287
2288 /* Check if we need to split end next. */
2289 split_huge_pmd_if_needed(vma, end);
Andrea Arcangeli94fcc582011-01-13 15:47:08 -08002290
2291 /*
Miaohe Lin71f9e582021-05-04 18:33:52 -07002292 * If we're also updating the vma->vm_next->vm_start,
2293 * check if we need to split it.
Andrea Arcangeli94fcc582011-01-13 15:47:08 -08002294 */
2295 if (adjust_next > 0) {
2296 struct vm_area_struct *next = vma->vm_next;
2297 unsigned long nstart = next->vm_start;
Wei Yangf9d86a62020-10-13 16:53:57 -07002298 nstart += adjust_next;
Miaohe Lin71f9e582021-05-04 18:33:52 -07002299 split_huge_pmd_if_needed(next, nstart);
Andrea Arcangeli94fcc582011-01-13 15:47:08 -08002300 }
2301}
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002302
Hugh Dickins906f9cd2018-11-30 14:10:13 -08002303static void unmap_page(struct page *page)
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002304{
Hugh Dickins732ed552021-06-15 18:23:53 -07002305 enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK | TTU_SYNC |
Kirill A. Shutemovc7ab0d22017-02-24 14:58:01 -08002306 TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD;
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002307
2308 VM_BUG_ON_PAGE(!PageHead(page), page);
2309
Kirill A. Shutemovbaa355f2016-07-26 15:25:51 -07002310 if (PageAnon(page))
Naoya Horiguchib5ff8162017-09-08 16:10:49 -07002311 ttu_flags |= TTU_SPLIT_FREEZE;
Kirill A. Shutemovbaa355f2016-07-26 15:25:51 -07002312
Yang Shi504e0702021-06-15 18:24:07 -07002313 try_to_unmap(page, ttu_flags);
2314
2315 VM_WARN_ON_ONCE_PAGE(page_mapped(page), page);
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002316}
2317
Kirill A. Shutemov8cce5472020-10-15 20:05:36 -07002318static void remap_page(struct page *page, unsigned int nr)
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002319{
Kirill A. Shutemovfec89c12016-03-17 14:20:10 -07002320 int i;
Kirill A. Shutemovace71a12017-02-24 14:57:45 -08002321 if (PageTransHuge(page)) {
2322 remove_migration_ptes(page, page, true);
2323 } else {
Kirill A. Shutemov8cce5472020-10-15 20:05:36 -07002324 for (i = 0; i < nr; i++)
Kirill A. Shutemovace71a12017-02-24 14:57:45 -08002325 remove_migration_ptes(page + i, page + i, true);
2326 }
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002327}
2328
Alex Shi94866632020-12-15 12:33:24 -08002329static void lru_add_page_tail(struct page *head, struct page *tail,
Alex Shi88dcb9a2020-12-15 12:33:20 -08002330 struct lruvec *lruvec, struct list_head *list)
2331{
Alex Shi94866632020-12-15 12:33:24 -08002332 VM_BUG_ON_PAGE(!PageHead(head), head);
2333 VM_BUG_ON_PAGE(PageCompound(tail), head);
2334 VM_BUG_ON_PAGE(PageLRU(tail), head);
Alex Shi6168d0d2020-12-15 12:34:29 -08002335 lockdep_assert_held(&lruvec->lru_lock);
Alex Shi88dcb9a2020-12-15 12:33:20 -08002336
Alex Shi6dbb5742020-12-15 12:33:29 -08002337 if (list) {
Alex Shi88dcb9a2020-12-15 12:33:20 -08002338 /* page reclaim is reclaiming a huge page */
Alex Shi6dbb5742020-12-15 12:33:29 -08002339 VM_WARN_ON(PageLRU(head));
Alex Shi94866632020-12-15 12:33:24 -08002340 get_page(tail);
2341 list_add_tail(&tail->lru, list);
Alex Shi88dcb9a2020-12-15 12:33:20 -08002342 } else {
Alex Shi6dbb5742020-12-15 12:33:29 -08002343 /* head is still on lru (and we have it frozen) */
2344 VM_WARN_ON(!PageLRU(head));
2345 SetPageLRU(tail);
2346 list_add_tail(&tail->lru, &head->lru);
Alex Shi88dcb9a2020-12-15 12:33:20 -08002347 }
2348}
2349
Kirill A. Shutemov8df651c2016-03-15 14:57:30 -07002350static void __split_huge_page_tail(struct page *head, int tail,
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002351 struct lruvec *lruvec, struct list_head *list)
2352{
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002353 struct page *page_tail = head + tail;
2354
Kirill A. Shutemov8df651c2016-03-15 14:57:30 -07002355 VM_BUG_ON_PAGE(atomic_read(&page_tail->_mapcount) != -1, page_tail);
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002356
2357 /*
Konstantin Khlebnikov605ca5e2018-04-05 16:23:28 -07002358 * Clone page flags before unfreezing refcount.
2359 *
2360 * After successful get_page_unless_zero() might follow flags change,
Haitao Shi8958b242020-12-15 20:47:26 -08002361 * for example lock_page() which set PG_waiters.
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002362 */
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002363 page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
2364 page_tail->flags |= (head->flags &
2365 ((1L << PG_referenced) |
2366 (1L << PG_swapbacked) |
Huang Ying38d8b4e2017-07-06 15:37:18 -07002367 (1L << PG_swapcache) |
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002368 (1L << PG_mlocked) |
2369 (1L << PG_uptodate) |
2370 (1L << PG_active) |
Johannes Weiner1899ad12018-10-26 15:06:04 -07002371 (1L << PG_workingset) |
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002372 (1L << PG_locked) |
Minchan Kimb8d3c4c2016-01-15 16:55:42 -08002373 (1L << PG_unevictable) |
Catalin Marinas72e6afa2020-07-02 10:19:30 +01002374#ifdef CONFIG_64BIT
2375 (1L << PG_arch_2) |
2376#endif
Minchan Kimb8d3c4c2016-01-15 16:55:42 -08002377 (1L << PG_dirty)));
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002378
Hugh Dickins173d9d92018-11-30 14:10:16 -08002379 /* ->mapping in first tail page is compound_mapcount */
2380 VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING,
2381 page_tail);
2382 page_tail->mapping = head->mapping;
2383 page_tail->index = head->index + tail;
2384
Konstantin Khlebnikov605ca5e2018-04-05 16:23:28 -07002385 /* Page flags must be visible before we make the page non-compound. */
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002386 smp_wmb();
2387
Konstantin Khlebnikov605ca5e2018-04-05 16:23:28 -07002388 /*
2389 * Clear PageTail before unfreezing page refcount.
2390 *
2391 * After successful get_page_unless_zero() might follow put_page()
2392 * which needs correct compound_head().
2393 */
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002394 clear_compound_head(page_tail);
2395
Konstantin Khlebnikov605ca5e2018-04-05 16:23:28 -07002396 /* Finally unfreeze refcount. Additional reference from page cache. */
2397 page_ref_unfreeze(page_tail, 1 + (!PageAnon(head) ||
2398 PageSwapCache(head)));
2399
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002400 if (page_is_young(head))
2401 set_page_young(page_tail);
2402 if (page_is_idle(head))
2403 set_page_idle(page_tail);
2404
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002405 page_cpupid_xchg_last(page_tail, page_cpupid_last(head));
Michal Hocko94723aa2018-04-10 16:30:07 -07002406
2407 /*
2408 * always add to the tail because some iterators expect new
2409 * pages to show after the currently processed elements - e.g.
2410 * migrate_pages
2411 */
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002412 lru_add_page_tail(head, page_tail, lruvec, list);
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002413}
2414
Kirill A. Shutemovbaa355f2016-07-26 15:25:51 -07002415static void __split_huge_page(struct page *page, struct list_head *list,
Alex Shib6769832020-12-15 12:33:33 -08002416 pgoff_t end)
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002417{
2418 struct page *head = compound_head(page);
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002419 struct lruvec *lruvec;
Matthew Wilcox (Oracle)41011962019-09-23 15:34:52 -07002420 struct address_space *swap_cache = NULL;
2421 unsigned long offset = 0;
Kirill A. Shutemov8cce5472020-10-15 20:05:36 -07002422 unsigned int nr = thp_nr_pages(head);
Kirill A. Shutemov8df651c2016-03-15 14:57:30 -07002423 int i;
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002424
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002425 /* complete memcg works before add pages to LRU */
Zhou Guanghuibe6c8982021-03-12 21:08:30 -08002426 split_page_memcg(head, nr);
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002427
Matthew Wilcox (Oracle)41011962019-09-23 15:34:52 -07002428 if (PageAnon(head) && PageSwapCache(head)) {
2429 swp_entry_t entry = { .val = page_private(head) };
2430
2431 offset = swp_offset(entry);
2432 swap_cache = swap_address_space(entry);
2433 xa_lock(&swap_cache->i_pages);
2434 }
2435
Ingo Molnarf0953a12021-05-06 18:06:47 -07002436 /* lock lru list/PageCompound, ref frozen by page_ref_freeze */
Alex Shi6168d0d2020-12-15 12:34:29 -08002437 lruvec = lock_page_lruvec(head);
Alex Shib6769832020-12-15 12:33:33 -08002438
Kirill A. Shutemov8cce5472020-10-15 20:05:36 -07002439 for (i = nr - 1; i >= 1; i--) {
Kirill A. Shutemov8df651c2016-03-15 14:57:30 -07002440 __split_huge_page_tail(head, i, lruvec, list);
Kirill A. Shutemovbaa355f2016-07-26 15:25:51 -07002441 /* Some pages can be beyond i_size: drop them from page cache */
2442 if (head[i].index >= end) {
Hugh Dickins2d077d42018-06-01 16:50:45 -07002443 ClearPageDirty(head + i);
Kirill A. Shutemovbaa355f2016-07-26 15:25:51 -07002444 __delete_from_page_cache(head + i, NULL);
Kirill A. Shutemov800d8c62016-07-26 15:26:18 -07002445 if (IS_ENABLED(CONFIG_SHMEM) && PageSwapBacked(head))
2446 shmem_uncharge(head->mapping->host, 1);
Kirill A. Shutemovbaa355f2016-07-26 15:25:51 -07002447 put_page(head + i);
Matthew Wilcox (Oracle)41011962019-09-23 15:34:52 -07002448 } else if (!PageAnon(page)) {
2449 __xa_store(&head->mapping->i_pages, head[i].index,
2450 head + i, 0);
2451 } else if (swap_cache) {
2452 __xa_store(&swap_cache->i_pages, offset + i,
2453 head + i, 0);
Kirill A. Shutemovbaa355f2016-07-26 15:25:51 -07002454 }
2455 }
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002456
2457 ClearPageCompound(head);
Alex Shi6168d0d2020-12-15 12:34:29 -08002458 unlock_page_lruvec(lruvec);
Alex Shib6769832020-12-15 12:33:33 -08002459 /* Caller disabled irqs, so they are still disabled here */
Vlastimil Babkaf7da6772019-08-24 17:54:59 -07002460
Kirill A. Shutemov8cce5472020-10-15 20:05:36 -07002461 split_page_owner(head, nr);
Vlastimil Babkaf7da6772019-08-24 17:54:59 -07002462
Kirill A. Shutemovbaa355f2016-07-26 15:25:51 -07002463 /* See comment in __split_huge_page_tail() */
2464 if (PageAnon(head)) {
Matthew Wilcoxaa5dc072017-12-04 10:16:10 -05002465 /* Additional pin to swap cache */
Matthew Wilcox (Oracle)41011962019-09-23 15:34:52 -07002466 if (PageSwapCache(head)) {
Huang Ying38d8b4e2017-07-06 15:37:18 -07002467 page_ref_add(head, 2);
Matthew Wilcox (Oracle)41011962019-09-23 15:34:52 -07002468 xa_unlock(&swap_cache->i_pages);
2469 } else {
Huang Ying38d8b4e2017-07-06 15:37:18 -07002470 page_ref_inc(head);
Matthew Wilcox (Oracle)41011962019-09-23 15:34:52 -07002471 }
Kirill A. Shutemovbaa355f2016-07-26 15:25:51 -07002472 } else {
Matthew Wilcoxaa5dc072017-12-04 10:16:10 -05002473 /* Additional pin to page cache */
Kirill A. Shutemovbaa355f2016-07-26 15:25:51 -07002474 page_ref_add(head, 2);
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07002475 xa_unlock(&head->mapping->i_pages);
Kirill A. Shutemovbaa355f2016-07-26 15:25:51 -07002476 }
Alex Shib6769832020-12-15 12:33:33 -08002477 local_irq_enable();
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002478
Kirill A. Shutemov8cce5472020-10-15 20:05:36 -07002479 remap_page(head, nr);
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002480
Huang Yingc4f9c702020-10-15 20:06:07 -07002481 if (PageSwapCache(head)) {
2482 swp_entry_t entry = { .val = page_private(head) };
2483
2484 split_swap_cluster(entry);
2485 }
2486
Kirill A. Shutemov8cce5472020-10-15 20:05:36 -07002487 for (i = 0; i < nr; i++) {
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002488 struct page *subpage = head + i;
2489 if (subpage == page)
2490 continue;
2491 unlock_page(subpage);
2492
2493 /*
2494 * Subpages may be freed if there wasn't any mapping
2495 * like if add_to_swap() is running on a lru page that
2496 * had its mapping zapped. And freeing these pages
2497 * requires taking the lru_lock so we do the put_page
2498 * of the tail pages after the split is complete.
2499 */
2500 put_page(subpage);
2501 }
2502}
2503
Kirill A. Shutemovb20ce5e2016-01-15 16:54:37 -08002504int total_mapcount(struct page *page)
2505{
Kirill A. Shutemov86b562b2020-10-15 20:05:33 -07002506 int i, compound, nr, ret;
Kirill A. Shutemovb20ce5e2016-01-15 16:54:37 -08002507
2508 VM_BUG_ON_PAGE(PageTail(page), page);
2509
2510 if (likely(!PageCompound(page)))
2511 return atomic_read(&page->_mapcount) + 1;
2512
Kirill A. Shutemovdd78fed2016-07-26 15:25:26 -07002513 compound = compound_mapcount(page);
Kirill A. Shutemov86b562b2020-10-15 20:05:33 -07002514 nr = compound_nr(page);
Kirill A. Shutemovb20ce5e2016-01-15 16:54:37 -08002515 if (PageHuge(page))
Kirill A. Shutemovdd78fed2016-07-26 15:25:26 -07002516 return compound;
2517 ret = compound;
Kirill A. Shutemov86b562b2020-10-15 20:05:33 -07002518 for (i = 0; i < nr; i++)
Kirill A. Shutemovb20ce5e2016-01-15 16:54:37 -08002519 ret += atomic_read(&page[i]._mapcount) + 1;
Kirill A. Shutemovdd78fed2016-07-26 15:25:26 -07002520 /* File pages has compound_mapcount included in _mapcount */
2521 if (!PageAnon(page))
Kirill A. Shutemov86b562b2020-10-15 20:05:33 -07002522 return ret - compound * nr;
Kirill A. Shutemovb20ce5e2016-01-15 16:54:37 -08002523 if (PageDoubleMap(page))
Kirill A. Shutemov86b562b2020-10-15 20:05:33 -07002524 ret -= nr;
Kirill A. Shutemovb20ce5e2016-01-15 16:54:37 -08002525 return ret;
2526}
2527
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002528/*
Andrea Arcangeli6d0a07e2016-05-12 15:42:25 -07002529 * This calculates accurately how many mappings a transparent hugepage
2530 * has (unlike page_mapcount() which isn't fully accurate). This full
2531 * accuracy is primarily needed to know if copy-on-write faults can
2532 * reuse the page and change the mapping to read-write instead of
2533 * copying them. At the same time this returns the total_mapcount too.
2534 *
2535 * The function returns the highest mapcount any one of the subpages
2536 * has. If the return value is one, even if different processes are
2537 * mapping different subpages of the transparent hugepage, they can
2538 * all reuse it, because each process is reusing a different subpage.
2539 *
2540 * The total_mapcount is instead counting all virtual mappings of the
2541 * subpages. If the total_mapcount is equal to "one", it tells the
2542 * caller all mappings belong to the same "mm" and in turn the
2543 * anon_vma of the transparent hugepage can become the vma->anon_vma
2544 * local one as no other process may be mapping any of the subpages.
2545 *
2546 * It would be more accurate to replace page_mapcount() with
2547 * page_trans_huge_mapcount(), however we only use
2548 * page_trans_huge_mapcount() in the copy-on-write faults where we
2549 * need full accuracy to avoid breaking page pinning, because
2550 * page_trans_huge_mapcount() is slower than page_mapcount().
2551 */
2552int page_trans_huge_mapcount(struct page *page, int *total_mapcount)
2553{
2554 int i, ret, _total_mapcount, mapcount;
2555
2556 /* hugetlbfs shouldn't call it */
2557 VM_BUG_ON_PAGE(PageHuge(page), page);
2558
2559 if (likely(!PageTransCompound(page))) {
2560 mapcount = atomic_read(&page->_mapcount) + 1;
2561 if (total_mapcount)
2562 *total_mapcount = mapcount;
2563 return mapcount;
2564 }
2565
2566 page = compound_head(page);
2567
2568 _total_mapcount = ret = 0;
Matthew Wilcox (Oracle)65dfe3c2020-10-15 20:05:39 -07002569 for (i = 0; i < thp_nr_pages(page); i++) {
Andrea Arcangeli6d0a07e2016-05-12 15:42:25 -07002570 mapcount = atomic_read(&page[i]._mapcount) + 1;
2571 ret = max(ret, mapcount);
2572 _total_mapcount += mapcount;
2573 }
2574 if (PageDoubleMap(page)) {
2575 ret -= 1;
Matthew Wilcox (Oracle)65dfe3c2020-10-15 20:05:39 -07002576 _total_mapcount -= thp_nr_pages(page);
Andrea Arcangeli6d0a07e2016-05-12 15:42:25 -07002577 }
2578 mapcount = compound_mapcount(page);
2579 ret += mapcount;
2580 _total_mapcount += mapcount;
2581 if (total_mapcount)
2582 *total_mapcount = _total_mapcount;
2583 return ret;
2584}
2585
Huang Yingb8f593c2017-07-06 15:37:28 -07002586/* Racy check whether the huge page can be split */
2587bool can_split_huge_page(struct page *page, int *pextra_pins)
2588{
2589 int extra_pins;
2590
Matthew Wilcoxaa5dc072017-12-04 10:16:10 -05002591 /* Additional pins from page cache */
Huang Yingb8f593c2017-07-06 15:37:28 -07002592 if (PageAnon(page))
Matthew Wilcox (Oracle)e2333da2020-10-15 20:05:43 -07002593 extra_pins = PageSwapCache(page) ? thp_nr_pages(page) : 0;
Huang Yingb8f593c2017-07-06 15:37:28 -07002594 else
Matthew Wilcox (Oracle)e2333da2020-10-15 20:05:43 -07002595 extra_pins = thp_nr_pages(page);
Huang Yingb8f593c2017-07-06 15:37:28 -07002596 if (pextra_pins)
2597 *pextra_pins = extra_pins;
2598 return total_mapcount(page) == page_count(page) - extra_pins - 1;
2599}
2600
Andrea Arcangeli6d0a07e2016-05-12 15:42:25 -07002601/*
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002602 * This function splits huge page into normal pages. @page can point to any
2603 * subpage of huge page to split. Split doesn't change the position of @page.
2604 *
2605 * Only caller must hold pin on the @page, otherwise split fails with -EBUSY.
2606 * The huge page must be locked.
2607 *
2608 * If @list is null, tail pages will be added to LRU list, otherwise, to @list.
2609 *
2610 * Both head page and tail pages will inherit mapping, flags, and so on from
2611 * the hugepage.
2612 *
2613 * GUP pin and PG_locked transferred to @page. Rest subpages can be freed if
2614 * they are not mapped.
2615 *
2616 * Returns 0 if the hugepage is split successfully.
2617 * Returns -EBUSY if the page is pinned or if anon_vma disappeared from under
2618 * us.
2619 */
2620int split_huge_page_to_list(struct page *page, struct list_head *list)
2621{
2622 struct page *head = compound_head(page);
Wei Yanga8803e62020-01-30 22:14:32 -08002623 struct deferred_split *ds_queue = get_deferred_split_queue(head);
Kirill A. Shutemovbaa355f2016-07-26 15:25:51 -07002624 struct anon_vma *anon_vma = NULL;
2625 struct address_space *mapping = NULL;
Yang Shi504e0702021-06-15 18:24:07 -07002626 int extra_pins, ret;
Hugh Dickins006d3ff2018-11-30 14:10:21 -08002627 pgoff_t end;
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002628
Wei Yangcb829622020-01-30 22:14:29 -08002629 VM_BUG_ON_PAGE(is_huge_zero_page(head), head);
Wei Yanga8803e62020-01-30 22:14:32 -08002630 VM_BUG_ON_PAGE(!PageLocked(head), head);
2631 VM_BUG_ON_PAGE(!PageCompound(head), head);
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002632
Wei Yanga8803e62020-01-30 22:14:32 -08002633 if (PageWriteback(head))
Huang Ying59807682017-09-06 16:22:34 -07002634 return -EBUSY;
2635
Kirill A. Shutemovbaa355f2016-07-26 15:25:51 -07002636 if (PageAnon(head)) {
2637 /*
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07002638 * The caller does not necessarily hold an mmap_lock that would
Kirill A. Shutemovbaa355f2016-07-26 15:25:51 -07002639 * prevent the anon_vma disappearing so we first we take a
2640 * reference to it and then lock the anon_vma for write. This
2641 * is similar to page_lock_anon_vma_read except the write lock
2642 * is taken to serialise against parallel split or collapse
2643 * operations.
2644 */
2645 anon_vma = page_get_anon_vma(head);
2646 if (!anon_vma) {
2647 ret = -EBUSY;
2648 goto out;
2649 }
Hugh Dickins006d3ff2018-11-30 14:10:21 -08002650 end = -1;
Kirill A. Shutemovbaa355f2016-07-26 15:25:51 -07002651 mapping = NULL;
2652 anon_vma_lock_write(anon_vma);
2653 } else {
2654 mapping = head->mapping;
2655
2656 /* Truncated ? */
2657 if (!mapping) {
2658 ret = -EBUSY;
2659 goto out;
2660 }
2661
Kirill A. Shutemovbaa355f2016-07-26 15:25:51 -07002662 anon_vma = NULL;
2663 i_mmap_lock_read(mapping);
Hugh Dickins006d3ff2018-11-30 14:10:21 -08002664
2665 /*
2666 *__split_huge_page() may need to trim off pages beyond EOF:
2667 * but on 32-bit, i_size_read() takes an irq-unsafe seqlock,
2668 * which cannot be nested inside the page tree lock. So note
2669 * end now: i_size itself may be changed at any moment, but
2670 * head page lock is good enough to serialize the trimming.
2671 */
2672 end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002673 }
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002674
2675 /*
Hugh Dickins906f9cd2018-11-30 14:10:13 -08002676 * Racy check if we can split the page, before unmap_page() will
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002677 * split PMDs
2678 */
Huang Yingb8f593c2017-07-06 15:37:28 -07002679 if (!can_split_huge_page(head, &extra_pins)) {
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002680 ret = -EBUSY;
2681 goto out_unlock;
2682 }
2683
Hugh Dickins906f9cd2018-11-30 14:10:13 -08002684 unmap_page(head);
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002685
Alex Shib6769832020-12-15 12:33:33 -08002686 /* block interrupt reentry in xa_lock and spinlock */
2687 local_irq_disable();
Kirill A. Shutemovbaa355f2016-07-26 15:25:51 -07002688 if (mapping) {
Matthew Wilcoxaa5dc072017-12-04 10:16:10 -05002689 XA_STATE(xas, &mapping->i_pages, page_index(head));
Kirill A. Shutemovbaa355f2016-07-26 15:25:51 -07002690
Kirill A. Shutemovbaa355f2016-07-26 15:25:51 -07002691 /*
Matthew Wilcoxaa5dc072017-12-04 10:16:10 -05002692 * Check if the head page is present in page cache.
Kirill A. Shutemovbaa355f2016-07-26 15:25:51 -07002693 * We assume all tail are present too, if head is there.
2694 */
Matthew Wilcoxaa5dc072017-12-04 10:16:10 -05002695 xa_lock(&mapping->i_pages);
2696 if (xas_load(&xas) != head)
Kirill A. Shutemovbaa355f2016-07-26 15:25:51 -07002697 goto fail;
2698 }
2699
Joonsoo Kim0139aa72016-05-19 17:10:49 -07002700 /* Prevent deferred_split_scan() touching ->_refcount */
Yang Shi364c1ee2019-09-23 15:38:06 -07002701 spin_lock(&ds_queue->split_queue_lock);
Yang Shi504e0702021-06-15 18:24:07 -07002702 if (page_ref_freeze(head, 1 + extra_pins)) {
Kirill A. Shutemov9a982252016-01-15 16:54:17 -08002703 if (!list_empty(page_deferred_list(head))) {
Yang Shi364c1ee2019-09-23 15:38:06 -07002704 ds_queue->split_queue_len--;
Kirill A. Shutemov9a982252016-01-15 16:54:17 -08002705 list_del(page_deferred_list(head));
2706 }
Wei Yangafb97172020-01-30 22:14:35 -08002707 spin_unlock(&ds_queue->split_queue_lock);
Kirill A. Shutemov06d3eff2019-10-18 20:20:30 -07002708 if (mapping) {
Muchun Songbf9ecea2021-02-24 12:03:27 -08002709 int nr = thp_nr_pages(head);
2710
Wei Yanga8803e62020-01-30 22:14:32 -08002711 if (PageSwapBacked(head))
Muchun Song57b28472021-02-24 12:03:31 -08002712 __mod_lruvec_page_state(head, NR_SHMEM_THPS,
2713 -nr);
Kirill A. Shutemov06d3eff2019-10-18 20:20:30 -07002714 else
Muchun Songbf9ecea2021-02-24 12:03:27 -08002715 __mod_lruvec_page_state(head, NR_FILE_THPS,
2716 -nr);
Kirill A. Shutemov06d3eff2019-10-18 20:20:30 -07002717 }
2718
Alex Shib6769832020-12-15 12:33:33 -08002719 __split_huge_page(page, list, end);
Huang Yingc4f9c702020-10-15 20:06:07 -07002720 ret = 0;
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002721 } else {
Yang Shi364c1ee2019-09-23 15:38:06 -07002722 spin_unlock(&ds_queue->split_queue_lock);
Yang Shi504e0702021-06-15 18:24:07 -07002723fail:
2724 if (mapping)
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07002725 xa_unlock(&mapping->i_pages);
Alex Shib6769832020-12-15 12:33:33 -08002726 local_irq_enable();
Kirill A. Shutemov8cce5472020-10-15 20:05:36 -07002727 remap_page(head, thp_nr_pages(head));
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002728 ret = -EBUSY;
2729 }
2730
2731out_unlock:
Kirill A. Shutemovbaa355f2016-07-26 15:25:51 -07002732 if (anon_vma) {
2733 anon_vma_unlock_write(anon_vma);
2734 put_anon_vma(anon_vma);
2735 }
2736 if (mapping)
2737 i_mmap_unlock_read(mapping);
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002738out:
2739 count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED);
2740 return ret;
2741}
Kirill A. Shutemov9a982252016-01-15 16:54:17 -08002742
2743void free_transhuge_page(struct page *page)
2744{
Yang Shi87eaceb2019-09-23 15:38:15 -07002745 struct deferred_split *ds_queue = get_deferred_split_queue(page);
Kirill A. Shutemov9a982252016-01-15 16:54:17 -08002746 unsigned long flags;
2747
Yang Shi364c1ee2019-09-23 15:38:06 -07002748 spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
Kirill A. Shutemov9a982252016-01-15 16:54:17 -08002749 if (!list_empty(page_deferred_list(page))) {
Yang Shi364c1ee2019-09-23 15:38:06 -07002750 ds_queue->split_queue_len--;
Kirill A. Shutemov9a982252016-01-15 16:54:17 -08002751 list_del(page_deferred_list(page));
2752 }
Yang Shi364c1ee2019-09-23 15:38:06 -07002753 spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
Kirill A. Shutemov9a982252016-01-15 16:54:17 -08002754 free_compound_page(page);
2755}
2756
2757void deferred_split_huge_page(struct page *page)
2758{
Yang Shi87eaceb2019-09-23 15:38:15 -07002759 struct deferred_split *ds_queue = get_deferred_split_queue(page);
2760#ifdef CONFIG_MEMCG
Roman Gushchinbcfe06b2020-12-01 13:58:27 -08002761 struct mem_cgroup *memcg = page_memcg(compound_head(page));
Yang Shi87eaceb2019-09-23 15:38:15 -07002762#endif
Kirill A. Shutemov9a982252016-01-15 16:54:17 -08002763 unsigned long flags;
2764
2765 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
2766
Yang Shi87eaceb2019-09-23 15:38:15 -07002767 /*
2768 * The try_to_unmap() in page reclaim path might reach here too,
2769 * this may cause a race condition to corrupt deferred split queue.
2770 * And, if page reclaim is already handling the same page, it is
2771 * unnecessary to handle it again in shrinker.
2772 *
2773 * Check PageSwapCache to determine if the page is being
2774 * handled by page reclaim since THP swap would add the page into
2775 * swap cache before calling try_to_unmap().
2776 */
2777 if (PageSwapCache(page))
2778 return;
2779
Yang Shi364c1ee2019-09-23 15:38:06 -07002780 spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
Kirill A. Shutemov9a982252016-01-15 16:54:17 -08002781 if (list_empty(page_deferred_list(page))) {
Kirill A. Shutemovf9719a02016-03-17 14:18:45 -07002782 count_vm_event(THP_DEFERRED_SPLIT_PAGE);
Yang Shi364c1ee2019-09-23 15:38:06 -07002783 list_add_tail(page_deferred_list(page), &ds_queue->split_queue);
2784 ds_queue->split_queue_len++;
Yang Shi87eaceb2019-09-23 15:38:15 -07002785#ifdef CONFIG_MEMCG
2786 if (memcg)
Yang Shi2bfd3632021-05-04 18:36:11 -07002787 set_shrinker_bit(memcg, page_to_nid(page),
2788 deferred_split_shrinker.id);
Yang Shi87eaceb2019-09-23 15:38:15 -07002789#endif
Kirill A. Shutemov9a982252016-01-15 16:54:17 -08002790 }
Yang Shi364c1ee2019-09-23 15:38:06 -07002791 spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
Kirill A. Shutemov9a982252016-01-15 16:54:17 -08002792}
2793
2794static unsigned long deferred_split_count(struct shrinker *shrink,
2795 struct shrink_control *sc)
2796{
Kirill A. Shutemova3d0a9182016-02-02 16:57:08 -08002797 struct pglist_data *pgdata = NODE_DATA(sc->nid);
Yang Shi364c1ee2019-09-23 15:38:06 -07002798 struct deferred_split *ds_queue = &pgdata->deferred_split_queue;
Yang Shi87eaceb2019-09-23 15:38:15 -07002799
2800#ifdef CONFIG_MEMCG
2801 if (sc->memcg)
2802 ds_queue = &sc->memcg->deferred_split_queue;
2803#endif
Yang Shi364c1ee2019-09-23 15:38:06 -07002804 return READ_ONCE(ds_queue->split_queue_len);
Kirill A. Shutemov9a982252016-01-15 16:54:17 -08002805}
2806
2807static unsigned long deferred_split_scan(struct shrinker *shrink,
2808 struct shrink_control *sc)
2809{
Kirill A. Shutemova3d0a9182016-02-02 16:57:08 -08002810 struct pglist_data *pgdata = NODE_DATA(sc->nid);
Yang Shi364c1ee2019-09-23 15:38:06 -07002811 struct deferred_split *ds_queue = &pgdata->deferred_split_queue;
Kirill A. Shutemov9a982252016-01-15 16:54:17 -08002812 unsigned long flags;
2813 LIST_HEAD(list), *pos, *next;
2814 struct page *page;
2815 int split = 0;
2816
Yang Shi87eaceb2019-09-23 15:38:15 -07002817#ifdef CONFIG_MEMCG
2818 if (sc->memcg)
2819 ds_queue = &sc->memcg->deferred_split_queue;
2820#endif
2821
Yang Shi364c1ee2019-09-23 15:38:06 -07002822 spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
Kirill A. Shutemov9a982252016-01-15 16:54:17 -08002823 /* Take pin on all head pages to avoid freeing them under us */
Yang Shi364c1ee2019-09-23 15:38:06 -07002824 list_for_each_safe(pos, next, &ds_queue->split_queue) {
Miaohe Lindfe5c512021-06-30 18:47:46 -07002825 page = list_entry((void *)pos, struct page, deferred_list);
Kirill A. Shutemov9a982252016-01-15 16:54:17 -08002826 page = compound_head(page);
Kirill A. Shutemove3ae1952016-02-02 16:57:15 -08002827 if (get_page_unless_zero(page)) {
2828 list_move(page_deferred_list(page), &list);
2829 } else {
2830 /* We lost race with put_compound_page() */
Kirill A. Shutemov9a982252016-01-15 16:54:17 -08002831 list_del_init(page_deferred_list(page));
Yang Shi364c1ee2019-09-23 15:38:06 -07002832 ds_queue->split_queue_len--;
Kirill A. Shutemov9a982252016-01-15 16:54:17 -08002833 }
Kirill A. Shutemove3ae1952016-02-02 16:57:15 -08002834 if (!--sc->nr_to_scan)
2835 break;
Kirill A. Shutemov9a982252016-01-15 16:54:17 -08002836 }
Yang Shi364c1ee2019-09-23 15:38:06 -07002837 spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
Kirill A. Shutemov9a982252016-01-15 16:54:17 -08002838
2839 list_for_each_safe(pos, next, &list) {
Miaohe Lindfe5c512021-06-30 18:47:46 -07002840 page = list_entry((void *)pos, struct page, deferred_list);
Kirill A. Shutemovfa41b902018-03-22 16:17:31 -07002841 if (!trylock_page(page))
2842 goto next;
Kirill A. Shutemov9a982252016-01-15 16:54:17 -08002843 /* split_huge_page() removes page from list on success */
2844 if (!split_huge_page(page))
2845 split++;
2846 unlock_page(page);
Kirill A. Shutemovfa41b902018-03-22 16:17:31 -07002847next:
Kirill A. Shutemov9a982252016-01-15 16:54:17 -08002848 put_page(page);
2849 }
2850
Yang Shi364c1ee2019-09-23 15:38:06 -07002851 spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
2852 list_splice_tail(&list, &ds_queue->split_queue);
2853 spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
Kirill A. Shutemov9a982252016-01-15 16:54:17 -08002854
Kirill A. Shutemovcb8d68e2016-02-02 16:57:12 -08002855 /*
2856 * Stop shrinker if we didn't split any page, but the queue is empty.
2857 * This can happen if pages were freed under us.
2858 */
Yang Shi364c1ee2019-09-23 15:38:06 -07002859 if (!split && list_empty(&ds_queue->split_queue))
Kirill A. Shutemovcb8d68e2016-02-02 16:57:12 -08002860 return SHRINK_STOP;
2861 return split;
Kirill A. Shutemov9a982252016-01-15 16:54:17 -08002862}
2863
2864static struct shrinker deferred_split_shrinker = {
2865 .count_objects = deferred_split_count,
2866 .scan_objects = deferred_split_scan,
2867 .seeks = DEFAULT_SEEKS,
Yang Shi87eaceb2019-09-23 15:38:15 -07002868 .flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE |
2869 SHRINKER_NONSLAB,
Kirill A. Shutemov9a982252016-01-15 16:54:17 -08002870};
Kirill A. Shutemov49071d42016-01-15 16:54:40 -08002871
2872#ifdef CONFIG_DEBUG_FS
Zi Yanfa6c0232021-05-04 18:34:23 -07002873static void split_huge_pages_all(void)
Kirill A. Shutemov49071d42016-01-15 16:54:40 -08002874{
2875 struct zone *zone;
2876 struct page *page;
2877 unsigned long pfn, max_zone_pfn;
2878 unsigned long total = 0, split = 0;
2879
Zi Yanfa6c0232021-05-04 18:34:23 -07002880 pr_debug("Split all THPs\n");
Kirill A. Shutemov49071d42016-01-15 16:54:40 -08002881 for_each_populated_zone(zone) {
2882 max_zone_pfn = zone_end_pfn(zone);
2883 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) {
2884 if (!pfn_valid(pfn))
2885 continue;
2886
2887 page = pfn_to_page(pfn);
2888 if (!get_page_unless_zero(page))
2889 continue;
2890
2891 if (zone != page_zone(page))
2892 goto next;
2893
Kirill A. Shutemovbaa355f2016-07-26 15:25:51 -07002894 if (!PageHead(page) || PageHuge(page) || !PageLRU(page))
Kirill A. Shutemov49071d42016-01-15 16:54:40 -08002895 goto next;
2896
2897 total++;
2898 lock_page(page);
2899 if (!split_huge_page(page))
2900 split++;
2901 unlock_page(page);
2902next:
2903 put_page(page);
Zi Yanfa6c0232021-05-04 18:34:23 -07002904 cond_resched();
Kirill A. Shutemov49071d42016-01-15 16:54:40 -08002905 }
2906 }
2907
Zi Yanfa6c0232021-05-04 18:34:23 -07002908 pr_debug("%lu of %lu THP split\n", split, total);
Kirill A. Shutemov49071d42016-01-15 16:54:40 -08002909}
Zi Yanfa6c0232021-05-04 18:34:23 -07002910
2911static inline bool vma_not_suitable_for_thp_split(struct vm_area_struct *vma)
2912{
2913 return vma_is_special_huge(vma) || (vma->vm_flags & VM_IO) ||
2914 is_vm_hugetlb_page(vma);
2915}
2916
2917static int split_huge_pages_pid(int pid, unsigned long vaddr_start,
2918 unsigned long vaddr_end)
2919{
2920 int ret = 0;
2921 struct task_struct *task;
2922 struct mm_struct *mm;
2923 unsigned long total = 0, split = 0;
2924 unsigned long addr;
2925
2926 vaddr_start &= PAGE_MASK;
2927 vaddr_end &= PAGE_MASK;
2928
2929 /* Find the task_struct from pid */
2930 rcu_read_lock();
2931 task = find_task_by_vpid(pid);
2932 if (!task) {
2933 rcu_read_unlock();
2934 ret = -ESRCH;
2935 goto out;
2936 }
2937 get_task_struct(task);
2938 rcu_read_unlock();
2939
2940 /* Find the mm_struct */
2941 mm = get_task_mm(task);
2942 put_task_struct(task);
2943
2944 if (!mm) {
2945 ret = -EINVAL;
2946 goto out;
2947 }
2948
2949 pr_debug("Split huge pages in pid: %d, vaddr: [0x%lx - 0x%lx]\n",
2950 pid, vaddr_start, vaddr_end);
2951
2952 mmap_read_lock(mm);
2953 /*
2954 * always increase addr by PAGE_SIZE, since we could have a PTE page
2955 * table filled with PTE-mapped THPs, each of which is distinct.
2956 */
2957 for (addr = vaddr_start; addr < vaddr_end; addr += PAGE_SIZE) {
2958 struct vm_area_struct *vma = find_vma(mm, addr);
2959 unsigned int follflags;
2960 struct page *page;
2961
2962 if (!vma || addr < vma->vm_start)
2963 break;
2964
2965 /* skip special VMA and hugetlb VMA */
2966 if (vma_not_suitable_for_thp_split(vma)) {
2967 addr = vma->vm_end;
2968 continue;
2969 }
2970
2971 /* FOLL_DUMP to ignore special (like zero) pages */
2972 follflags = FOLL_GET | FOLL_DUMP;
2973 page = follow_page(vma, addr, follflags);
2974
2975 if (IS_ERR(page))
2976 continue;
2977 if (!page)
2978 continue;
2979
2980 if (!is_transparent_hugepage(page))
2981 goto next;
2982
2983 total++;
2984 if (!can_split_huge_page(compound_head(page), NULL))
2985 goto next;
2986
2987 if (!trylock_page(page))
2988 goto next;
2989
2990 if (!split_huge_page(page))
2991 split++;
2992
2993 unlock_page(page);
2994next:
2995 put_page(page);
2996 cond_resched();
2997 }
2998 mmap_read_unlock(mm);
2999 mmput(mm);
3000
3001 pr_debug("%lu of %lu THP split\n", split, total);
3002
3003out:
3004 return ret;
3005}
3006
Zi Yanfbe37502021-05-04 18:34:26 -07003007static int split_huge_pages_in_file(const char *file_path, pgoff_t off_start,
3008 pgoff_t off_end)
3009{
3010 struct filename *file;
3011 struct file *candidate;
3012 struct address_space *mapping;
3013 int ret = -EINVAL;
3014 pgoff_t index;
3015 int nr_pages = 1;
3016 unsigned long total = 0, split = 0;
3017
3018 file = getname_kernel(file_path);
3019 if (IS_ERR(file))
3020 return ret;
3021
3022 candidate = file_open_name(file, O_RDONLY, 0);
3023 if (IS_ERR(candidate))
3024 goto out;
3025
3026 pr_debug("split file-backed THPs in file: %s, page offset: [0x%lx - 0x%lx]\n",
3027 file_path, off_start, off_end);
3028
3029 mapping = candidate->f_mapping;
3030
3031 for (index = off_start; index < off_end; index += nr_pages) {
3032 struct page *fpage = pagecache_get_page(mapping, index,
3033 FGP_ENTRY | FGP_HEAD, 0);
3034
3035 nr_pages = 1;
3036 if (xa_is_value(fpage) || !fpage)
3037 continue;
3038
3039 if (!is_transparent_hugepage(fpage))
3040 goto next;
3041
3042 total++;
3043 nr_pages = thp_nr_pages(fpage);
3044
3045 if (!trylock_page(fpage))
3046 goto next;
3047
3048 if (!split_huge_page(fpage))
3049 split++;
3050
3051 unlock_page(fpage);
3052next:
3053 put_page(fpage);
3054 cond_resched();
3055 }
3056
3057 filp_close(candidate, NULL);
3058 ret = 0;
3059
3060 pr_debug("%lu of %lu file-backed THP split\n", split, total);
3061out:
3062 putname(file);
3063 return ret;
3064}
3065
Zi Yanfa6c0232021-05-04 18:34:23 -07003066#define MAX_INPUT_BUF_SZ 255
3067
3068static ssize_t split_huge_pages_write(struct file *file, const char __user *buf,
3069 size_t count, loff_t *ppops)
3070{
3071 static DEFINE_MUTEX(split_debug_mutex);
3072 ssize_t ret;
Zi Yanfbe37502021-05-04 18:34:26 -07003073 /* hold pid, start_vaddr, end_vaddr or file_path, off_start, off_end */
3074 char input_buf[MAX_INPUT_BUF_SZ];
Zi Yanfa6c0232021-05-04 18:34:23 -07003075 int pid;
3076 unsigned long vaddr_start, vaddr_end;
3077
3078 ret = mutex_lock_interruptible(&split_debug_mutex);
3079 if (ret)
3080 return ret;
3081
3082 ret = -EFAULT;
3083
3084 memset(input_buf, 0, MAX_INPUT_BUF_SZ);
3085 if (copy_from_user(input_buf, buf, min_t(size_t, count, MAX_INPUT_BUF_SZ)))
3086 goto out;
3087
3088 input_buf[MAX_INPUT_BUF_SZ - 1] = '\0';
Zi Yanfbe37502021-05-04 18:34:26 -07003089
3090 if (input_buf[0] == '/') {
3091 char *tok;
3092 char *buf = input_buf;
3093 char file_path[MAX_INPUT_BUF_SZ];
3094 pgoff_t off_start = 0, off_end = 0;
3095 size_t input_len = strlen(input_buf);
3096
3097 tok = strsep(&buf, ",");
3098 if (tok) {
3099 strncpy(file_path, tok, MAX_INPUT_BUF_SZ);
3100 } else {
3101 ret = -EINVAL;
3102 goto out;
3103 }
3104
3105 ret = sscanf(buf, "0x%lx,0x%lx", &off_start, &off_end);
3106 if (ret != 2) {
3107 ret = -EINVAL;
3108 goto out;
3109 }
3110 ret = split_huge_pages_in_file(file_path, off_start, off_end);
3111 if (!ret)
3112 ret = input_len;
3113
3114 goto out;
3115 }
3116
Zi Yanfa6c0232021-05-04 18:34:23 -07003117 ret = sscanf(input_buf, "%d,0x%lx,0x%lx", &pid, &vaddr_start, &vaddr_end);
3118 if (ret == 1 && pid == 1) {
3119 split_huge_pages_all();
3120 ret = strlen(input_buf);
3121 goto out;
3122 } else if (ret != 3) {
3123 ret = -EINVAL;
3124 goto out;
3125 }
3126
3127 ret = split_huge_pages_pid(pid, vaddr_start, vaddr_end);
3128 if (!ret)
3129 ret = strlen(input_buf);
3130out:
3131 mutex_unlock(&split_debug_mutex);
3132 return ret;
3133
3134}
3135
3136static const struct file_operations split_huge_pages_fops = {
3137 .owner = THIS_MODULE,
3138 .write = split_huge_pages_write,
3139 .llseek = no_llseek,
3140};
Kirill A. Shutemov49071d42016-01-15 16:54:40 -08003141
3142static int __init split_huge_pages_debugfs(void)
3143{
Greg Kroah-Hartmand9f79792019-03-05 15:46:09 -08003144 debugfs_create_file("split_huge_pages", 0200, NULL, NULL,
3145 &split_huge_pages_fops);
Kirill A. Shutemov49071d42016-01-15 16:54:40 -08003146 return 0;
3147}
3148late_initcall(split_huge_pages_debugfs);
3149#endif
Zi Yan616b8372017-09-08 16:10:57 -07003150
3151#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
3152void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
3153 struct page *page)
3154{
3155 struct vm_area_struct *vma = pvmw->vma;
3156 struct mm_struct *mm = vma->vm_mm;
3157 unsigned long address = pvmw->address;
3158 pmd_t pmdval;
3159 swp_entry_t entry;
Naoya Horiguchiab6e3d02017-09-08 16:11:04 -07003160 pmd_t pmdswp;
Zi Yan616b8372017-09-08 16:10:57 -07003161
3162 if (!(pvmw->pmd && !pvmw->pte))
3163 return;
3164
Zi Yan616b8372017-09-08 16:10:57 -07003165 flush_cache_range(vma, address, address + HPAGE_PMD_SIZE);
Huang Ying8a8683a2020-03-05 22:28:29 -08003166 pmdval = pmdp_invalidate(vma, address, pvmw->pmd);
Zi Yan616b8372017-09-08 16:10:57 -07003167 if (pmd_dirty(pmdval))
3168 set_page_dirty(page);
3169 entry = make_migration_entry(page, pmd_write(pmdval));
Naoya Horiguchiab6e3d02017-09-08 16:11:04 -07003170 pmdswp = swp_entry_to_pmd(entry);
3171 if (pmd_soft_dirty(pmdval))
3172 pmdswp = pmd_swp_mksoft_dirty(pmdswp);
3173 set_pmd_at(mm, address, pvmw->pmd, pmdswp);
Zi Yan616b8372017-09-08 16:10:57 -07003174 page_remove_rmap(page, true);
3175 put_page(page);
Zi Yan616b8372017-09-08 16:10:57 -07003176}
3177
3178void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
3179{
3180 struct vm_area_struct *vma = pvmw->vma;
3181 struct mm_struct *mm = vma->vm_mm;
3182 unsigned long address = pvmw->address;
3183 unsigned long mmun_start = address & HPAGE_PMD_MASK;
3184 pmd_t pmde;
3185 swp_entry_t entry;
3186
3187 if (!(pvmw->pmd && !pvmw->pte))
3188 return;
3189
3190 entry = pmd_to_swp_entry(*pvmw->pmd);
3191 get_page(new);
3192 pmde = pmd_mkold(mk_huge_pmd(new, vma->vm_page_prot));
Naoya Horiguchiab6e3d02017-09-08 16:11:04 -07003193 if (pmd_swp_soft_dirty(*pvmw->pmd))
3194 pmde = pmd_mksoft_dirty(pmde);
Zi Yan616b8372017-09-08 16:10:57 -07003195 if (is_write_migration_entry(entry))
Linus Torvaldsf55e1012017-11-29 09:01:01 -08003196 pmde = maybe_pmd_mkwrite(pmde, vma);
Peter Xu8f34f1e2021-06-30 18:49:02 -07003197 if (pmd_swp_uffd_wp(*pvmw->pmd))
3198 pmde = pmd_wrprotect(pmd_mkuffd_wp(pmde));
Zi Yan616b8372017-09-08 16:10:57 -07003199
3200 flush_cache_range(vma, mmun_start, mmun_start + HPAGE_PMD_SIZE);
Naoya Horiguchie71769a2018-04-20 14:55:45 -07003201 if (PageAnon(new))
3202 page_add_anon_rmap(new, vma, mmun_start, true);
3203 else
3204 page_add_file_rmap(new, true);
Zi Yan616b8372017-09-08 16:10:57 -07003205 set_pmd_at(mm, mmun_start, pvmw->pmd, pmde);
Kirill A. Shutemove125fe42018-10-05 15:51:41 -07003206 if ((vma->vm_flags & VM_LOCKED) && !PageDoubleMap(new))
Zi Yan616b8372017-09-08 16:10:57 -07003207 mlock_vma_page(new);
3208 update_mmu_cache_pmd(vma, address, pvmw->pmd);
3209}
3210#endif