blob: 3ed3fb68922c77a7d28ab253ba3b1eb2aee35619 [file] [log] [blame]
Anshuman Khandual399145f2020-06-04 16:47:15 -07001// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * This kernel test validates architecture page table helpers and
4 * accessors and helps in verifying their continued compliance with
5 * expected generic MM semantics.
6 *
7 * Copyright (C) 2019 ARM Ltd.
8 *
9 * Author: Anshuman Khandual <anshuman.khandual@arm.com>
10 */
Anshuman Khandual6315df42020-08-06 23:19:25 -070011#define pr_fmt(fmt) "debug_vm_pgtable: [%-25s]: " fmt, __func__
Anshuman Khandual399145f2020-06-04 16:47:15 -070012
13#include <linux/gfp.h>
14#include <linux/highmem.h>
15#include <linux/hugetlb.h>
16#include <linux/kernel.h>
17#include <linux/kconfig.h>
18#include <linux/mm.h>
19#include <linux/mman.h>
20#include <linux/mm_types.h>
21#include <linux/module.h>
22#include <linux/pfn_t.h>
23#include <linux/printk.h>
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -070024#include <linux/pgtable.h>
Anshuman Khandual399145f2020-06-04 16:47:15 -070025#include <linux/random.h>
26#include <linux/spinlock.h>
27#include <linux/swap.h>
28#include <linux/swapops.h>
29#include <linux/start_kernel.h>
30#include <linux/sched/mm.h>
Aneesh Kumar K.V85a14462020-10-15 20:04:36 -070031#include <linux/io.h>
Anshuman Khandual399145f2020-06-04 16:47:15 -070032#include <asm/pgalloc.h>
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -070033#include <asm/tlbflush.h>
Anshuman Khandual399145f2020-06-04 16:47:15 -070034
Anshuman Khandualb1d00002020-08-06 23:19:28 -070035/*
36 * Please refer Documentation/vm/arch_pgtable_helpers.rst for the semantics
37 * expectations that are being validated here. All future changes in here
38 * or the documentation need to be in sync.
39 */
40
Anshuman Khandual399145f2020-06-04 16:47:15 -070041#define VMFLAGS (VM_READ|VM_WRITE|VM_EXEC)
42
43/*
44 * On s390 platform, the lower 4 bits are used to identify given page table
45 * entry type. But these bits might affect the ability to clear entries with
46 * pxx_clear() because of how dynamic page table folding works on s390. So
47 * while loading up the entries do not change the lower 4 bits. It does not
Aneesh Kumar K.Vcfc5bbc2020-10-15 20:04:33 -070048 * have affect any other platform. Also avoid the 62nd bit on ppc64 that is
49 * used to mark a pte entry.
Anshuman Khandual399145f2020-06-04 16:47:15 -070050 */
Aneesh Kumar K.Vcfc5bbc2020-10-15 20:04:33 -070051#define S390_SKIP_MASK GENMASK(3, 0)
52#if __BITS_PER_LONG == 64
53#define PPC64_SKIP_MASK GENMASK(62, 62)
54#else
55#define PPC64_SKIP_MASK 0x0
56#endif
57#define ARCH_SKIP_MASK (S390_SKIP_MASK | PPC64_SKIP_MASK)
58#define RANDOM_ORVALUE (GENMASK(BITS_PER_LONG - 1, 0) & ~ARCH_SKIP_MASK)
Anshuman Khandual399145f2020-06-04 16:47:15 -070059#define RANDOM_NZVALUE GENMASK(7, 0)
60
61static void __init pte_basic_tests(unsigned long pfn, pgprot_t prot)
62{
63 pte_t pte = pfn_pte(pfn, prot);
64
Anshuman Khandual6315df42020-08-06 23:19:25 -070065 pr_debug("Validating PTE basic\n");
Anshuman Khandual399145f2020-06-04 16:47:15 -070066 WARN_ON(!pte_same(pte, pte));
67 WARN_ON(!pte_young(pte_mkyoung(pte_mkold(pte))));
68 WARN_ON(!pte_dirty(pte_mkdirty(pte_mkclean(pte))));
69 WARN_ON(!pte_write(pte_mkwrite(pte_wrprotect(pte))));
70 WARN_ON(pte_young(pte_mkold(pte_mkyoung(pte))));
71 WARN_ON(pte_dirty(pte_mkclean(pte_mkdirty(pte))));
72 WARN_ON(pte_write(pte_wrprotect(pte_mkwrite(pte))));
73}
74
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -070075static void __init pte_advanced_tests(struct mm_struct *mm,
76 struct vm_area_struct *vma, pte_t *ptep,
77 unsigned long pfn, unsigned long vaddr,
78 pgprot_t prot)
79{
80 pte_t pte = pfn_pte(pfn, prot);
81
Aneesh Kumar K.Vc3824e12020-10-15 20:04:46 -070082 /*
83 * Architectures optimize set_pte_at by avoiding TLB flush.
84 * This requires set_pte_at to be not used to update an
85 * existing pte entry. Clear pte before we do set_pte_at
86 */
87
Anshuman Khandual6315df42020-08-06 23:19:25 -070088 pr_debug("Validating PTE advanced\n");
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -070089 pte = pfn_pte(pfn, prot);
90 set_pte_at(mm, vaddr, ptep, pte);
91 ptep_set_wrprotect(mm, vaddr, ptep);
92 pte = ptep_get(ptep);
93 WARN_ON(pte_write(pte));
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -070094 ptep_get_and_clear(mm, vaddr, ptep);
95 pte = ptep_get(ptep);
96 WARN_ON(!pte_none(pte));
97
98 pte = pfn_pte(pfn, prot);
99 pte = pte_wrprotect(pte);
100 pte = pte_mkclean(pte);
101 set_pte_at(mm, vaddr, ptep, pte);
102 pte = pte_mkwrite(pte);
103 pte = pte_mkdirty(pte);
104 ptep_set_access_flags(vma, vaddr, ptep, pte, 1);
105 pte = ptep_get(ptep);
106 WARN_ON(!(pte_write(pte) && pte_dirty(pte)));
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700107 ptep_get_and_clear_full(mm, vaddr, ptep, 1);
108 pte = ptep_get(ptep);
109 WARN_ON(!pte_none(pte));
110
Aneesh Kumar K.Vc3824e12020-10-15 20:04:46 -0700111 pte = pfn_pte(pfn, prot);
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700112 pte = pte_mkyoung(pte);
113 set_pte_at(mm, vaddr, ptep, pte);
114 ptep_test_and_clear_young(vma, vaddr, ptep);
115 pte = ptep_get(ptep);
116 WARN_ON(pte_young(pte));
117}
118
119static void __init pte_savedwrite_tests(unsigned long pfn, pgprot_t prot)
120{
121 pte_t pte = pfn_pte(pfn, prot);
122
Aneesh Kumar K.V42006052020-10-15 20:04:40 -0700123 if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
124 return;
125
Anshuman Khandual6315df42020-08-06 23:19:25 -0700126 pr_debug("Validating PTE saved write\n");
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700127 WARN_ON(!pte_savedwrite(pte_mk_savedwrite(pte_clear_savedwrite(pte))));
128 WARN_ON(pte_savedwrite(pte_clear_savedwrite(pte_mk_savedwrite(pte))));
129}
Aneesh Kumar K.V42006052020-10-15 20:04:40 -0700130
Anshuman Khandual399145f2020-06-04 16:47:15 -0700131#ifdef CONFIG_TRANSPARENT_HUGEPAGE
132static void __init pmd_basic_tests(unsigned long pfn, pgprot_t prot)
133{
134 pmd_t pmd = pfn_pmd(pfn, prot);
135
Aneesh Kumar K.V787d5632020-06-10 18:41:44 -0700136 if (!has_transparent_hugepage())
137 return;
138
Anshuman Khandual6315df42020-08-06 23:19:25 -0700139 pr_debug("Validating PMD basic\n");
Anshuman Khandual399145f2020-06-04 16:47:15 -0700140 WARN_ON(!pmd_same(pmd, pmd));
141 WARN_ON(!pmd_young(pmd_mkyoung(pmd_mkold(pmd))));
142 WARN_ON(!pmd_dirty(pmd_mkdirty(pmd_mkclean(pmd))));
143 WARN_ON(!pmd_write(pmd_mkwrite(pmd_wrprotect(pmd))));
144 WARN_ON(pmd_young(pmd_mkold(pmd_mkyoung(pmd))));
145 WARN_ON(pmd_dirty(pmd_mkclean(pmd_mkdirty(pmd))));
146 WARN_ON(pmd_write(pmd_wrprotect(pmd_mkwrite(pmd))));
147 /*
148 * A huge page does not point to next level page table
149 * entry. Hence this must qualify as pmd_bad().
150 */
151 WARN_ON(!pmd_bad(pmd_mkhuge(pmd)));
152}
153
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700154static void __init pmd_advanced_tests(struct mm_struct *mm,
155 struct vm_area_struct *vma, pmd_t *pmdp,
156 unsigned long pfn, unsigned long vaddr,
157 pgprot_t prot)
158{
159 pmd_t pmd = pfn_pmd(pfn, prot);
160
161 if (!has_transparent_hugepage())
162 return;
163
Anshuman Khandual6315df42020-08-06 23:19:25 -0700164 pr_debug("Validating PMD advanced\n");
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700165 /* Align the address wrt HPAGE_PMD_SIZE */
166 vaddr = (vaddr & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE;
167
168 pmd = pfn_pmd(pfn, prot);
169 set_pmd_at(mm, vaddr, pmdp, pmd);
170 pmdp_set_wrprotect(mm, vaddr, pmdp);
171 pmd = READ_ONCE(*pmdp);
172 WARN_ON(pmd_write(pmd));
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700173 pmdp_huge_get_and_clear(mm, vaddr, pmdp);
174 pmd = READ_ONCE(*pmdp);
175 WARN_ON(!pmd_none(pmd));
176
177 pmd = pfn_pmd(pfn, prot);
178 pmd = pmd_wrprotect(pmd);
179 pmd = pmd_mkclean(pmd);
180 set_pmd_at(mm, vaddr, pmdp, pmd);
181 pmd = pmd_mkwrite(pmd);
182 pmd = pmd_mkdirty(pmd);
183 pmdp_set_access_flags(vma, vaddr, pmdp, pmd, 1);
184 pmd = READ_ONCE(*pmdp);
185 WARN_ON(!(pmd_write(pmd) && pmd_dirty(pmd)));
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700186 pmdp_huge_get_and_clear_full(vma, vaddr, pmdp, 1);
187 pmd = READ_ONCE(*pmdp);
188 WARN_ON(!pmd_none(pmd));
189
Aneesh Kumar K.Vc3824e12020-10-15 20:04:46 -0700190 pmd = pmd_mkhuge(pfn_pmd(pfn, prot));
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700191 pmd = pmd_mkyoung(pmd);
192 set_pmd_at(mm, vaddr, pmdp, pmd);
193 pmdp_test_and_clear_young(vma, vaddr, pmdp);
194 pmd = READ_ONCE(*pmdp);
195 WARN_ON(pmd_young(pmd));
196}
197
198static void __init pmd_leaf_tests(unsigned long pfn, pgprot_t prot)
199{
200 pmd_t pmd = pfn_pmd(pfn, prot);
201
Anshuman Khandual6315df42020-08-06 23:19:25 -0700202 pr_debug("Validating PMD leaf\n");
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700203 /*
204 * PMD based THP is a leaf entry.
205 */
206 pmd = pmd_mkhuge(pmd);
207 WARN_ON(!pmd_leaf(pmd));
208}
209
Aneesh Kumar K.V85a14462020-10-15 20:04:36 -0700210#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700211static void __init pmd_huge_tests(pmd_t *pmdp, unsigned long pfn, pgprot_t prot)
212{
213 pmd_t pmd;
214
Aneesh Kumar K.V85a14462020-10-15 20:04:36 -0700215 if (!arch_ioremap_pmd_supported())
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700216 return;
Anshuman Khandual6315df42020-08-06 23:19:25 -0700217
218 pr_debug("Validating PMD huge\n");
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700219 /*
220 * X86 defined pmd_set_huge() verifies that the given
221 * PMD is not a populated non-leaf entry.
222 */
223 WRITE_ONCE(*pmdp, __pmd(0));
224 WARN_ON(!pmd_set_huge(pmdp, __pfn_to_phys(pfn), prot));
225 WARN_ON(!pmd_clear_huge(pmdp));
226 pmd = READ_ONCE(*pmdp);
227 WARN_ON(!pmd_none(pmd));
228}
Aneesh Kumar K.V85a14462020-10-15 20:04:36 -0700229#else /* CONFIG_HAVE_ARCH_HUGE_VMAP */
230static void __init pmd_huge_tests(pmd_t *pmdp, unsigned long pfn, pgprot_t prot) { }
231#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700232
233static void __init pmd_savedwrite_tests(unsigned long pfn, pgprot_t prot)
234{
235 pmd_t pmd = pfn_pmd(pfn, prot);
236
Aneesh Kumar K.V42006052020-10-15 20:04:40 -0700237 if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
238 return;
239
Anshuman Khandual6315df42020-08-06 23:19:25 -0700240 pr_debug("Validating PMD saved write\n");
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700241 WARN_ON(!pmd_savedwrite(pmd_mk_savedwrite(pmd_clear_savedwrite(pmd))));
242 WARN_ON(pmd_savedwrite(pmd_clear_savedwrite(pmd_mk_savedwrite(pmd))));
243}
244
Anshuman Khandual399145f2020-06-04 16:47:15 -0700245#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
246static void __init pud_basic_tests(unsigned long pfn, pgprot_t prot)
247{
248 pud_t pud = pfn_pud(pfn, prot);
249
Aneesh Kumar K.V787d5632020-06-10 18:41:44 -0700250 if (!has_transparent_hugepage())
251 return;
252
Anshuman Khandual6315df42020-08-06 23:19:25 -0700253 pr_debug("Validating PUD basic\n");
Anshuman Khandual399145f2020-06-04 16:47:15 -0700254 WARN_ON(!pud_same(pud, pud));
255 WARN_ON(!pud_young(pud_mkyoung(pud_mkold(pud))));
256 WARN_ON(!pud_write(pud_mkwrite(pud_wrprotect(pud))));
257 WARN_ON(pud_write(pud_wrprotect(pud_mkwrite(pud))));
258 WARN_ON(pud_young(pud_mkold(pud_mkyoung(pud))));
259
260 if (mm_pmd_folded(mm))
261 return;
262
263 /*
264 * A huge page does not point to next level page table
265 * entry. Hence this must qualify as pud_bad().
266 */
267 WARN_ON(!pud_bad(pud_mkhuge(pud)));
268}
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700269
270static void __init pud_advanced_tests(struct mm_struct *mm,
271 struct vm_area_struct *vma, pud_t *pudp,
272 unsigned long pfn, unsigned long vaddr,
273 pgprot_t prot)
274{
275 pud_t pud = pfn_pud(pfn, prot);
276
277 if (!has_transparent_hugepage())
278 return;
279
Anshuman Khandual6315df42020-08-06 23:19:25 -0700280 pr_debug("Validating PUD advanced\n");
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700281 /* Align the address wrt HPAGE_PUD_SIZE */
282 vaddr = (vaddr & HPAGE_PUD_MASK) + HPAGE_PUD_SIZE;
283
284 set_pud_at(mm, vaddr, pudp, pud);
285 pudp_set_wrprotect(mm, vaddr, pudp);
286 pud = READ_ONCE(*pudp);
287 WARN_ON(pud_write(pud));
288
289#ifndef __PAGETABLE_PMD_FOLDED
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700290 pudp_huge_get_and_clear(mm, vaddr, pudp);
291 pud = READ_ONCE(*pudp);
292 WARN_ON(!pud_none(pud));
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700293#endif /* __PAGETABLE_PMD_FOLDED */
294 pud = pfn_pud(pfn, prot);
295 pud = pud_wrprotect(pud);
296 pud = pud_mkclean(pud);
297 set_pud_at(mm, vaddr, pudp, pud);
298 pud = pud_mkwrite(pud);
299 pud = pud_mkdirty(pud);
300 pudp_set_access_flags(vma, vaddr, pudp, pud, 1);
301 pud = READ_ONCE(*pudp);
302 WARN_ON(!(pud_write(pud) && pud_dirty(pud)));
303
Aneesh Kumar K.Vc3824e12020-10-15 20:04:46 -0700304#ifndef __PAGETABLE_PMD_FOLDED
305 pudp_huge_get_and_clear_full(mm, vaddr, pudp, 1);
306 pud = READ_ONCE(*pudp);
307 WARN_ON(!pud_none(pud));
308#endif /* __PAGETABLE_PMD_FOLDED */
309
310 pud = pfn_pud(pfn, prot);
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700311 pud = pud_mkyoung(pud);
312 set_pud_at(mm, vaddr, pudp, pud);
313 pudp_test_and_clear_young(vma, vaddr, pudp);
314 pud = READ_ONCE(*pudp);
315 WARN_ON(pud_young(pud));
316}
317
318static void __init pud_leaf_tests(unsigned long pfn, pgprot_t prot)
319{
320 pud_t pud = pfn_pud(pfn, prot);
321
Anshuman Khandual6315df42020-08-06 23:19:25 -0700322 pr_debug("Validating PUD leaf\n");
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700323 /*
324 * PUD based THP is a leaf entry.
325 */
326 pud = pud_mkhuge(pud);
327 WARN_ON(!pud_leaf(pud));
328}
329
Aneesh Kumar K.V85a14462020-10-15 20:04:36 -0700330#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700331static void __init pud_huge_tests(pud_t *pudp, unsigned long pfn, pgprot_t prot)
332{
333 pud_t pud;
334
Aneesh Kumar K.V85a14462020-10-15 20:04:36 -0700335 if (!arch_ioremap_pud_supported())
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700336 return;
Anshuman Khandual6315df42020-08-06 23:19:25 -0700337
338 pr_debug("Validating PUD huge\n");
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700339 /*
340 * X86 defined pud_set_huge() verifies that the given
341 * PUD is not a populated non-leaf entry.
342 */
343 WRITE_ONCE(*pudp, __pud(0));
344 WARN_ON(!pud_set_huge(pudp, __pfn_to_phys(pfn), prot));
345 WARN_ON(!pud_clear_huge(pudp));
346 pud = READ_ONCE(*pudp);
347 WARN_ON(!pud_none(pud));
348}
Aneesh Kumar K.V85a14462020-10-15 20:04:36 -0700349#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
350static void __init pud_huge_tests(pud_t *pudp, unsigned long pfn, pgprot_t prot) { }
351#endif /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
352
Anshuman Khandual399145f2020-06-04 16:47:15 -0700353#else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
354static void __init pud_basic_tests(unsigned long pfn, pgprot_t prot) { }
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700355static void __init pud_advanced_tests(struct mm_struct *mm,
356 struct vm_area_struct *vma, pud_t *pudp,
357 unsigned long pfn, unsigned long vaddr,
358 pgprot_t prot)
359{
360}
361static void __init pud_leaf_tests(unsigned long pfn, pgprot_t prot) { }
362static void __init pud_huge_tests(pud_t *pudp, unsigned long pfn, pgprot_t prot)
363{
364}
Anshuman Khandual399145f2020-06-04 16:47:15 -0700365#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
366#else /* !CONFIG_TRANSPARENT_HUGEPAGE */
367static void __init pmd_basic_tests(unsigned long pfn, pgprot_t prot) { }
368static void __init pud_basic_tests(unsigned long pfn, pgprot_t prot) { }
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700369static void __init pmd_advanced_tests(struct mm_struct *mm,
370 struct vm_area_struct *vma, pmd_t *pmdp,
371 unsigned long pfn, unsigned long vaddr,
372 pgprot_t prot)
373{
374}
375static void __init pud_advanced_tests(struct mm_struct *mm,
376 struct vm_area_struct *vma, pud_t *pudp,
377 unsigned long pfn, unsigned long vaddr,
378 pgprot_t prot)
379{
380}
381static void __init pmd_leaf_tests(unsigned long pfn, pgprot_t prot) { }
382static void __init pud_leaf_tests(unsigned long pfn, pgprot_t prot) { }
383static void __init pmd_huge_tests(pmd_t *pmdp, unsigned long pfn, pgprot_t prot)
384{
385}
386static void __init pud_huge_tests(pud_t *pudp, unsigned long pfn, pgprot_t prot)
387{
388}
389static void __init pmd_savedwrite_tests(unsigned long pfn, pgprot_t prot) { }
Anshuman Khandual399145f2020-06-04 16:47:15 -0700390#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
391
392static void __init p4d_basic_tests(unsigned long pfn, pgprot_t prot)
393{
394 p4d_t p4d;
395
Anshuman Khandual6315df42020-08-06 23:19:25 -0700396 pr_debug("Validating P4D basic\n");
Anshuman Khandual399145f2020-06-04 16:47:15 -0700397 memset(&p4d, RANDOM_NZVALUE, sizeof(p4d_t));
398 WARN_ON(!p4d_same(p4d, p4d));
399}
400
401static void __init pgd_basic_tests(unsigned long pfn, pgprot_t prot)
402{
403 pgd_t pgd;
404
Anshuman Khandual6315df42020-08-06 23:19:25 -0700405 pr_debug("Validating PGD basic\n");
Anshuman Khandual399145f2020-06-04 16:47:15 -0700406 memset(&pgd, RANDOM_NZVALUE, sizeof(pgd_t));
407 WARN_ON(!pgd_same(pgd, pgd));
408}
409
410#ifndef __PAGETABLE_PUD_FOLDED
411static void __init pud_clear_tests(struct mm_struct *mm, pud_t *pudp)
412{
413 pud_t pud = READ_ONCE(*pudp);
414
415 if (mm_pmd_folded(mm))
416 return;
417
Anshuman Khandual6315df42020-08-06 23:19:25 -0700418 pr_debug("Validating PUD clear\n");
Anshuman Khandual399145f2020-06-04 16:47:15 -0700419 pud = __pud(pud_val(pud) | RANDOM_ORVALUE);
420 WRITE_ONCE(*pudp, pud);
421 pud_clear(pudp);
422 pud = READ_ONCE(*pudp);
423 WARN_ON(!pud_none(pud));
424}
425
426static void __init pud_populate_tests(struct mm_struct *mm, pud_t *pudp,
427 pmd_t *pmdp)
428{
429 pud_t pud;
430
431 if (mm_pmd_folded(mm))
432 return;
Anshuman Khandual6315df42020-08-06 23:19:25 -0700433
434 pr_debug("Validating PUD populate\n");
Anshuman Khandual399145f2020-06-04 16:47:15 -0700435 /*
436 * This entry points to next level page table page.
437 * Hence this must not qualify as pud_bad().
438 */
439 pmd_clear(pmdp);
440 pud_clear(pudp);
441 pud_populate(mm, pudp, pmdp);
442 pud = READ_ONCE(*pudp);
443 WARN_ON(pud_bad(pud));
444}
445#else /* !__PAGETABLE_PUD_FOLDED */
446static void __init pud_clear_tests(struct mm_struct *mm, pud_t *pudp) { }
447static void __init pud_populate_tests(struct mm_struct *mm, pud_t *pudp,
448 pmd_t *pmdp)
449{
450}
451#endif /* PAGETABLE_PUD_FOLDED */
452
453#ifndef __PAGETABLE_P4D_FOLDED
454static void __init p4d_clear_tests(struct mm_struct *mm, p4d_t *p4dp)
455{
456 p4d_t p4d = READ_ONCE(*p4dp);
457
458 if (mm_pud_folded(mm))
459 return;
460
Anshuman Khandual6315df42020-08-06 23:19:25 -0700461 pr_debug("Validating P4D clear\n");
Anshuman Khandual399145f2020-06-04 16:47:15 -0700462 p4d = __p4d(p4d_val(p4d) | RANDOM_ORVALUE);
463 WRITE_ONCE(*p4dp, p4d);
464 p4d_clear(p4dp);
465 p4d = READ_ONCE(*p4dp);
466 WARN_ON(!p4d_none(p4d));
467}
468
469static void __init p4d_populate_tests(struct mm_struct *mm, p4d_t *p4dp,
470 pud_t *pudp)
471{
472 p4d_t p4d;
473
474 if (mm_pud_folded(mm))
475 return;
476
Anshuman Khandual6315df42020-08-06 23:19:25 -0700477 pr_debug("Validating P4D populate\n");
Anshuman Khandual399145f2020-06-04 16:47:15 -0700478 /*
479 * This entry points to next level page table page.
480 * Hence this must not qualify as p4d_bad().
481 */
482 pud_clear(pudp);
483 p4d_clear(p4dp);
484 p4d_populate(mm, p4dp, pudp);
485 p4d = READ_ONCE(*p4dp);
486 WARN_ON(p4d_bad(p4d));
487}
488
489static void __init pgd_clear_tests(struct mm_struct *mm, pgd_t *pgdp)
490{
491 pgd_t pgd = READ_ONCE(*pgdp);
492
493 if (mm_p4d_folded(mm))
494 return;
495
Anshuman Khandual6315df42020-08-06 23:19:25 -0700496 pr_debug("Validating PGD clear\n");
Anshuman Khandual399145f2020-06-04 16:47:15 -0700497 pgd = __pgd(pgd_val(pgd) | RANDOM_ORVALUE);
498 WRITE_ONCE(*pgdp, pgd);
499 pgd_clear(pgdp);
500 pgd = READ_ONCE(*pgdp);
501 WARN_ON(!pgd_none(pgd));
502}
503
504static void __init pgd_populate_tests(struct mm_struct *mm, pgd_t *pgdp,
505 p4d_t *p4dp)
506{
507 pgd_t pgd;
508
509 if (mm_p4d_folded(mm))
510 return;
511
Anshuman Khandual6315df42020-08-06 23:19:25 -0700512 pr_debug("Validating PGD populate\n");
Anshuman Khandual399145f2020-06-04 16:47:15 -0700513 /*
514 * This entry points to next level page table page.
515 * Hence this must not qualify as pgd_bad().
516 */
517 p4d_clear(p4dp);
518 pgd_clear(pgdp);
519 pgd_populate(mm, pgdp, p4dp);
520 pgd = READ_ONCE(*pgdp);
521 WARN_ON(pgd_bad(pgd));
522}
523#else /* !__PAGETABLE_P4D_FOLDED */
524static void __init p4d_clear_tests(struct mm_struct *mm, p4d_t *p4dp) { }
525static void __init pgd_clear_tests(struct mm_struct *mm, pgd_t *pgdp) { }
526static void __init p4d_populate_tests(struct mm_struct *mm, p4d_t *p4dp,
527 pud_t *pudp)
528{
529}
530static void __init pgd_populate_tests(struct mm_struct *mm, pgd_t *pgdp,
531 p4d_t *p4dp)
532{
533}
534#endif /* PAGETABLE_P4D_FOLDED */
535
536static void __init pte_clear_tests(struct mm_struct *mm, pte_t *ptep,
537 unsigned long vaddr)
538{
Christophe Leroy9449c9c2020-06-25 20:30:04 -0700539 pte_t pte = ptep_get(ptep);
Anshuman Khandual399145f2020-06-04 16:47:15 -0700540
Anshuman Khandual6315df42020-08-06 23:19:25 -0700541 pr_debug("Validating PTE clear\n");
Anshuman Khandual399145f2020-06-04 16:47:15 -0700542 pte = __pte(pte_val(pte) | RANDOM_ORVALUE);
543 set_pte_at(mm, vaddr, ptep, pte);
544 barrier();
545 pte_clear(mm, vaddr, ptep);
Christophe Leroy9449c9c2020-06-25 20:30:04 -0700546 pte = ptep_get(ptep);
Anshuman Khandual399145f2020-06-04 16:47:15 -0700547 WARN_ON(!pte_none(pte));
548}
549
550static void __init pmd_clear_tests(struct mm_struct *mm, pmd_t *pmdp)
551{
552 pmd_t pmd = READ_ONCE(*pmdp);
553
Anshuman Khandual6315df42020-08-06 23:19:25 -0700554 pr_debug("Validating PMD clear\n");
Anshuman Khandual399145f2020-06-04 16:47:15 -0700555 pmd = __pmd(pmd_val(pmd) | RANDOM_ORVALUE);
556 WRITE_ONCE(*pmdp, pmd);
557 pmd_clear(pmdp);
558 pmd = READ_ONCE(*pmdp);
559 WARN_ON(!pmd_none(pmd));
560}
561
562static void __init pmd_populate_tests(struct mm_struct *mm, pmd_t *pmdp,
563 pgtable_t pgtable)
564{
565 pmd_t pmd;
566
Anshuman Khandual6315df42020-08-06 23:19:25 -0700567 pr_debug("Validating PMD populate\n");
Anshuman Khandual399145f2020-06-04 16:47:15 -0700568 /*
569 * This entry points to next level page table page.
570 * Hence this must not qualify as pmd_bad().
571 */
572 pmd_clear(pmdp);
573 pmd_populate(mm, pmdp, pgtable);
574 pmd = READ_ONCE(*pmdp);
575 WARN_ON(pmd_bad(pmd));
576}
577
Anshuman Khandual05289402020-08-06 23:19:16 -0700578static void __init pte_special_tests(unsigned long pfn, pgprot_t prot)
579{
580 pte_t pte = pfn_pte(pfn, prot);
581
582 if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL))
583 return;
584
Anshuman Khandual6315df42020-08-06 23:19:25 -0700585 pr_debug("Validating PTE special\n");
Anshuman Khandual05289402020-08-06 23:19:16 -0700586 WARN_ON(!pte_special(pte_mkspecial(pte)));
587}
588
589static void __init pte_protnone_tests(unsigned long pfn, pgprot_t prot)
590{
591 pte_t pte = pfn_pte(pfn, prot);
592
593 if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
594 return;
595
Anshuman Khandual6315df42020-08-06 23:19:25 -0700596 pr_debug("Validating PTE protnone\n");
Anshuman Khandual05289402020-08-06 23:19:16 -0700597 WARN_ON(!pte_protnone(pte));
598 WARN_ON(!pte_present(pte));
599}
600
601#ifdef CONFIG_TRANSPARENT_HUGEPAGE
602static void __init pmd_protnone_tests(unsigned long pfn, pgprot_t prot)
603{
604 pmd_t pmd = pmd_mkhuge(pfn_pmd(pfn, prot));
605
606 if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
607 return;
608
Anshuman Khandual6315df42020-08-06 23:19:25 -0700609 pr_debug("Validating PMD protnone\n");
Anshuman Khandual05289402020-08-06 23:19:16 -0700610 WARN_ON(!pmd_protnone(pmd));
611 WARN_ON(!pmd_present(pmd));
612}
613#else /* !CONFIG_TRANSPARENT_HUGEPAGE */
614static void __init pmd_protnone_tests(unsigned long pfn, pgprot_t prot) { }
615#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
616
617#ifdef CONFIG_ARCH_HAS_PTE_DEVMAP
618static void __init pte_devmap_tests(unsigned long pfn, pgprot_t prot)
619{
620 pte_t pte = pfn_pte(pfn, prot);
621
Anshuman Khandual6315df42020-08-06 23:19:25 -0700622 pr_debug("Validating PTE devmap\n");
Anshuman Khandual05289402020-08-06 23:19:16 -0700623 WARN_ON(!pte_devmap(pte_mkdevmap(pte)));
624}
625
626#ifdef CONFIG_TRANSPARENT_HUGEPAGE
627static void __init pmd_devmap_tests(unsigned long pfn, pgprot_t prot)
628{
629 pmd_t pmd = pfn_pmd(pfn, prot);
630
Anshuman Khandual6315df42020-08-06 23:19:25 -0700631 pr_debug("Validating PMD devmap\n");
Anshuman Khandual05289402020-08-06 23:19:16 -0700632 WARN_ON(!pmd_devmap(pmd_mkdevmap(pmd)));
633}
634
635#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
636static void __init pud_devmap_tests(unsigned long pfn, pgprot_t prot)
637{
638 pud_t pud = pfn_pud(pfn, prot);
639
Anshuman Khandual6315df42020-08-06 23:19:25 -0700640 pr_debug("Validating PUD devmap\n");
Anshuman Khandual05289402020-08-06 23:19:16 -0700641 WARN_ON(!pud_devmap(pud_mkdevmap(pud)));
642}
643#else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
644static void __init pud_devmap_tests(unsigned long pfn, pgprot_t prot) { }
645#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
646#else /* CONFIG_TRANSPARENT_HUGEPAGE */
647static void __init pmd_devmap_tests(unsigned long pfn, pgprot_t prot) { }
648static void __init pud_devmap_tests(unsigned long pfn, pgprot_t prot) { }
649#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
650#else
651static void __init pte_devmap_tests(unsigned long pfn, pgprot_t prot) { }
652static void __init pmd_devmap_tests(unsigned long pfn, pgprot_t prot) { }
653static void __init pud_devmap_tests(unsigned long pfn, pgprot_t prot) { }
654#endif /* CONFIG_ARCH_HAS_PTE_DEVMAP */
655
656static void __init pte_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
657{
658 pte_t pte = pfn_pte(pfn, prot);
659
660 if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
661 return;
662
Anshuman Khandual6315df42020-08-06 23:19:25 -0700663 pr_debug("Validating PTE soft dirty\n");
Anshuman Khandual05289402020-08-06 23:19:16 -0700664 WARN_ON(!pte_soft_dirty(pte_mksoft_dirty(pte)));
665 WARN_ON(pte_soft_dirty(pte_clear_soft_dirty(pte)));
666}
667
668static void __init pte_swap_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
669{
670 pte_t pte = pfn_pte(pfn, prot);
671
672 if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
673 return;
674
Anshuman Khandual6315df42020-08-06 23:19:25 -0700675 pr_debug("Validating PTE swap soft dirty\n");
Anshuman Khandual05289402020-08-06 23:19:16 -0700676 WARN_ON(!pte_swp_soft_dirty(pte_swp_mksoft_dirty(pte)));
677 WARN_ON(pte_swp_soft_dirty(pte_swp_clear_soft_dirty(pte)));
678}
679
680#ifdef CONFIG_TRANSPARENT_HUGEPAGE
681static void __init pmd_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
682{
683 pmd_t pmd = pfn_pmd(pfn, prot);
684
685 if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
686 return;
687
Anshuman Khandual6315df42020-08-06 23:19:25 -0700688 pr_debug("Validating PMD soft dirty\n");
Anshuman Khandual05289402020-08-06 23:19:16 -0700689 WARN_ON(!pmd_soft_dirty(pmd_mksoft_dirty(pmd)));
690 WARN_ON(pmd_soft_dirty(pmd_clear_soft_dirty(pmd)));
691}
692
693static void __init pmd_swap_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
694{
695 pmd_t pmd = pfn_pmd(pfn, prot);
696
697 if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) ||
698 !IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION))
699 return;
700
Anshuman Khandual6315df42020-08-06 23:19:25 -0700701 pr_debug("Validating PMD swap soft dirty\n");
Anshuman Khandual05289402020-08-06 23:19:16 -0700702 WARN_ON(!pmd_swp_soft_dirty(pmd_swp_mksoft_dirty(pmd)));
703 WARN_ON(pmd_swp_soft_dirty(pmd_swp_clear_soft_dirty(pmd)));
704}
705#else /* !CONFIG_ARCH_HAS_PTE_DEVMAP */
706static void __init pmd_soft_dirty_tests(unsigned long pfn, pgprot_t prot) { }
707static void __init pmd_swap_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
708{
709}
710#endif /* CONFIG_ARCH_HAS_PTE_DEVMAP */
711
712static void __init pte_swap_tests(unsigned long pfn, pgprot_t prot)
713{
714 swp_entry_t swp;
715 pte_t pte;
716
Anshuman Khandual6315df42020-08-06 23:19:25 -0700717 pr_debug("Validating PTE swap\n");
Anshuman Khandual05289402020-08-06 23:19:16 -0700718 pte = pfn_pte(pfn, prot);
719 swp = __pte_to_swp_entry(pte);
720 pte = __swp_entry_to_pte(swp);
721 WARN_ON(pfn != pte_pfn(pte));
722}
723
724#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
725static void __init pmd_swap_tests(unsigned long pfn, pgprot_t prot)
726{
727 swp_entry_t swp;
728 pmd_t pmd;
729
Anshuman Khandual6315df42020-08-06 23:19:25 -0700730 pr_debug("Validating PMD swap\n");
Anshuman Khandual05289402020-08-06 23:19:16 -0700731 pmd = pfn_pmd(pfn, prot);
732 swp = __pmd_to_swp_entry(pmd);
733 pmd = __swp_entry_to_pmd(swp);
734 WARN_ON(pfn != pmd_pfn(pmd));
735}
736#else /* !CONFIG_ARCH_ENABLE_THP_MIGRATION */
737static void __init pmd_swap_tests(unsigned long pfn, pgprot_t prot) { }
738#endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
739
740static void __init swap_migration_tests(void)
741{
742 struct page *page;
743 swp_entry_t swp;
744
745 if (!IS_ENABLED(CONFIG_MIGRATION))
746 return;
Anshuman Khandual6315df42020-08-06 23:19:25 -0700747
748 pr_debug("Validating swap migration\n");
Anshuman Khandual05289402020-08-06 23:19:16 -0700749 /*
750 * swap_migration_tests() requires a dedicated page as it needs to
751 * be locked before creating a migration entry from it. Locking the
752 * page that actually maps kernel text ('start_kernel') can be real
753 * problematic. Lets allocate a dedicated page explicitly for this
754 * purpose that will be freed subsequently.
755 */
756 page = alloc_page(GFP_KERNEL);
757 if (!page) {
758 pr_err("page allocation failed\n");
759 return;
760 }
761
762 /*
763 * make_migration_entry() expects given page to be
764 * locked, otherwise it stumbles upon a BUG_ON().
765 */
766 __SetPageLocked(page);
767 swp = make_migration_entry(page, 1);
768 WARN_ON(!is_migration_entry(swp));
769 WARN_ON(!is_write_migration_entry(swp));
770
771 make_migration_entry_read(&swp);
772 WARN_ON(!is_migration_entry(swp));
773 WARN_ON(is_write_migration_entry(swp));
774
775 swp = make_migration_entry(page, 0);
776 WARN_ON(!is_migration_entry(swp));
777 WARN_ON(is_write_migration_entry(swp));
778 __ClearPageLocked(page);
779 __free_page(page);
780}
781
782#ifdef CONFIG_HUGETLB_PAGE
783static void __init hugetlb_basic_tests(unsigned long pfn, pgprot_t prot)
784{
785 struct page *page;
786 pte_t pte;
787
Anshuman Khandual6315df42020-08-06 23:19:25 -0700788 pr_debug("Validating HugeTLB basic\n");
Anshuman Khandual05289402020-08-06 23:19:16 -0700789 /*
790 * Accessing the page associated with the pfn is safe here,
791 * as it was previously derived from a real kernel symbol.
792 */
793 page = pfn_to_page(pfn);
794 pte = mk_huge_pte(page, prot);
795
796 WARN_ON(!huge_pte_dirty(huge_pte_mkdirty(pte)));
797 WARN_ON(!huge_pte_write(huge_pte_mkwrite(huge_pte_wrprotect(pte))));
798 WARN_ON(huge_pte_write(huge_pte_wrprotect(huge_pte_mkwrite(pte))));
799
800#ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
801 pte = pfn_pte(pfn, prot);
802
803 WARN_ON(!pte_huge(pte_mkhuge(pte)));
804#endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
805}
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700806
807static void __init hugetlb_advanced_tests(struct mm_struct *mm,
808 struct vm_area_struct *vma,
809 pte_t *ptep, unsigned long pfn,
810 unsigned long vaddr, pgprot_t prot)
811{
812 struct page *page = pfn_to_page(pfn);
813 pte_t pte = ptep_get(ptep);
814 unsigned long paddr = __pfn_to_phys(pfn) & PMD_MASK;
815
Anshuman Khandual6315df42020-08-06 23:19:25 -0700816 pr_debug("Validating HugeTLB advanced\n");
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700817 pte = pte_mkhuge(mk_pte(pfn_to_page(PHYS_PFN(paddr)), prot));
818 set_huge_pte_at(mm, vaddr, ptep, pte);
819 barrier();
820 WARN_ON(!pte_same(pte, huge_ptep_get(ptep)));
821 huge_pte_clear(mm, vaddr, ptep, PMD_SIZE);
822 pte = huge_ptep_get(ptep);
823 WARN_ON(!huge_pte_none(pte));
824
825 pte = mk_huge_pte(page, prot);
826 set_huge_pte_at(mm, vaddr, ptep, pte);
827 barrier();
828 huge_ptep_set_wrprotect(mm, vaddr, ptep);
829 pte = huge_ptep_get(ptep);
830 WARN_ON(huge_pte_write(pte));
831
832 pte = mk_huge_pte(page, prot);
833 set_huge_pte_at(mm, vaddr, ptep, pte);
834 barrier();
835 huge_ptep_get_and_clear(mm, vaddr, ptep);
836 pte = huge_ptep_get(ptep);
837 WARN_ON(!huge_pte_none(pte));
838
839 pte = mk_huge_pte(page, prot);
840 pte = huge_pte_wrprotect(pte);
841 set_huge_pte_at(mm, vaddr, ptep, pte);
842 barrier();
843 pte = huge_pte_mkwrite(pte);
844 pte = huge_pte_mkdirty(pte);
845 huge_ptep_set_access_flags(vma, vaddr, ptep, pte, 1);
846 pte = huge_ptep_get(ptep);
847 WARN_ON(!(huge_pte_write(pte) && huge_pte_dirty(pte)));
848}
Anshuman Khandual05289402020-08-06 23:19:16 -0700849#else /* !CONFIG_HUGETLB_PAGE */
850static void __init hugetlb_basic_tests(unsigned long pfn, pgprot_t prot) { }
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700851static void __init hugetlb_advanced_tests(struct mm_struct *mm,
852 struct vm_area_struct *vma,
853 pte_t *ptep, unsigned long pfn,
854 unsigned long vaddr, pgprot_t prot)
855{
856}
Anshuman Khandual05289402020-08-06 23:19:16 -0700857#endif /* CONFIG_HUGETLB_PAGE */
858
859#ifdef CONFIG_TRANSPARENT_HUGEPAGE
860static void __init pmd_thp_tests(unsigned long pfn, pgprot_t prot)
861{
862 pmd_t pmd;
863
864 if (!has_transparent_hugepage())
865 return;
866
Anshuman Khandual6315df42020-08-06 23:19:25 -0700867 pr_debug("Validating PMD based THP\n");
Anshuman Khandual05289402020-08-06 23:19:16 -0700868 /*
869 * pmd_trans_huge() and pmd_present() must return positive after
870 * MMU invalidation with pmd_mkinvalid(). This behavior is an
871 * optimization for transparent huge page. pmd_trans_huge() must
872 * be true if pmd_page() returns a valid THP to avoid taking the
873 * pmd_lock when others walk over non transhuge pmds (i.e. there
874 * are no THP allocated). Especially when splitting a THP and
875 * removing the present bit from the pmd, pmd_trans_huge() still
876 * needs to return true. pmd_present() should be true whenever
877 * pmd_trans_huge() returns true.
878 */
879 pmd = pfn_pmd(pfn, prot);
880 WARN_ON(!pmd_trans_huge(pmd_mkhuge(pmd)));
881
882#ifndef __HAVE_ARCH_PMDP_INVALIDATE
883 WARN_ON(!pmd_trans_huge(pmd_mkinvalid(pmd_mkhuge(pmd))));
884 WARN_ON(!pmd_present(pmd_mkinvalid(pmd_mkhuge(pmd))));
885#endif /* __HAVE_ARCH_PMDP_INVALIDATE */
886}
887
888#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
889static void __init pud_thp_tests(unsigned long pfn, pgprot_t prot)
890{
891 pud_t pud;
892
893 if (!has_transparent_hugepage())
894 return;
895
Anshuman Khandual6315df42020-08-06 23:19:25 -0700896 pr_debug("Validating PUD based THP\n");
Anshuman Khandual05289402020-08-06 23:19:16 -0700897 pud = pfn_pud(pfn, prot);
898 WARN_ON(!pud_trans_huge(pud_mkhuge(pud)));
899
900 /*
901 * pud_mkinvalid() has been dropped for now. Enable back
902 * these tests when it comes back with a modified pud_present().
903 *
904 * WARN_ON(!pud_trans_huge(pud_mkinvalid(pud_mkhuge(pud))));
905 * WARN_ON(!pud_present(pud_mkinvalid(pud_mkhuge(pud))));
906 */
907}
908#else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
909static void __init pud_thp_tests(unsigned long pfn, pgprot_t prot) { }
910#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
911#else /* !CONFIG_TRANSPARENT_HUGEPAGE */
912static void __init pmd_thp_tests(unsigned long pfn, pgprot_t prot) { }
913static void __init pud_thp_tests(unsigned long pfn, pgprot_t prot) { }
914#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
915
Anshuman Khandual399145f2020-06-04 16:47:15 -0700916static unsigned long __init get_random_vaddr(void)
917{
918 unsigned long random_vaddr, random_pages, total_user_pages;
919
920 total_user_pages = (TASK_SIZE - FIRST_USER_ADDRESS) / PAGE_SIZE;
921
922 random_pages = get_random_long() % total_user_pages;
923 random_vaddr = FIRST_USER_ADDRESS + random_pages * PAGE_SIZE;
924
925 return random_vaddr;
926}
927
928static int __init debug_vm_pgtable(void)
929{
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700930 struct vm_area_struct *vma;
Anshuman Khandual399145f2020-06-04 16:47:15 -0700931 struct mm_struct *mm;
932 pgd_t *pgdp;
933 p4d_t *p4dp, *saved_p4dp;
934 pud_t *pudp, *saved_pudp;
935 pmd_t *pmdp, *saved_pmdp, pmd;
936 pte_t *ptep;
937 pgtable_t saved_ptep;
Anshuman Khandual05289402020-08-06 23:19:16 -0700938 pgprot_t prot, protnone;
Anshuman Khandual399145f2020-06-04 16:47:15 -0700939 phys_addr_t paddr;
940 unsigned long vaddr, pte_aligned, pmd_aligned;
941 unsigned long pud_aligned, p4d_aligned, pgd_aligned;
Kees Cookfea11202020-06-03 13:28:45 -0700942 spinlock_t *ptl = NULL;
Anshuman Khandual399145f2020-06-04 16:47:15 -0700943
944 pr_info("Validating architecture page table helpers\n");
945 prot = vm_get_page_prot(VMFLAGS);
946 vaddr = get_random_vaddr();
947 mm = mm_alloc();
948 if (!mm) {
949 pr_err("mm_struct allocation failed\n");
950 return 1;
951 }
952
953 /*
Anshuman Khandual05289402020-08-06 23:19:16 -0700954 * __P000 (or even __S000) will help create page table entries with
955 * PROT_NONE permission as required for pxx_protnone_tests().
956 */
957 protnone = __P000;
958
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700959 vma = vm_area_alloc(mm);
960 if (!vma) {
961 pr_err("vma allocation failed\n");
962 return 1;
963 }
964
Anshuman Khandual05289402020-08-06 23:19:16 -0700965 /*
Anshuman Khandual399145f2020-06-04 16:47:15 -0700966 * PFN for mapping at PTE level is determined from a standard kernel
967 * text symbol. But pfns for higher page table levels are derived by
968 * masking lower bits of this real pfn. These derived pfns might not
969 * exist on the platform but that does not really matter as pfn_pxx()
970 * helpers will still create appropriate entries for the test. This
971 * helps avoid large memory block allocations to be used for mapping
972 * at higher page table levels.
973 */
974 paddr = __pa_symbol(&start_kernel);
975
976 pte_aligned = (paddr & PAGE_MASK) >> PAGE_SHIFT;
977 pmd_aligned = (paddr & PMD_MASK) >> PAGE_SHIFT;
978 pud_aligned = (paddr & PUD_MASK) >> PAGE_SHIFT;
979 p4d_aligned = (paddr & P4D_MASK) >> PAGE_SHIFT;
980 pgd_aligned = (paddr & PGDIR_MASK) >> PAGE_SHIFT;
981 WARN_ON(!pfn_valid(pte_aligned));
982
983 pgdp = pgd_offset(mm, vaddr);
984 p4dp = p4d_alloc(mm, pgdp, vaddr);
985 pudp = pud_alloc(mm, p4dp, vaddr);
986 pmdp = pmd_alloc(mm, pudp, vaddr);
Aneesh Kumar K.Ve8edf0a2020-10-15 20:04:49 -0700987 ptep = pte_alloc_map(mm, pmdp, vaddr);
Anshuman Khandual399145f2020-06-04 16:47:15 -0700988
989 /*
990 * Save all the page table page addresses as the page table
991 * entries will be used for testing with random or garbage
992 * values. These saved addresses will be used for freeing
993 * page table pages.
994 */
995 pmd = READ_ONCE(*pmdp);
996 saved_p4dp = p4d_offset(pgdp, 0UL);
997 saved_pudp = pud_offset(p4dp, 0UL);
998 saved_pmdp = pmd_offset(pudp, 0UL);
999 saved_ptep = pmd_pgtable(pmd);
1000
1001 pte_basic_tests(pte_aligned, prot);
1002 pmd_basic_tests(pmd_aligned, prot);
1003 pud_basic_tests(pud_aligned, prot);
1004 p4d_basic_tests(p4d_aligned, prot);
1005 pgd_basic_tests(pgd_aligned, prot);
1006
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -07001007 pmd_leaf_tests(pmd_aligned, prot);
1008 pud_leaf_tests(pud_aligned, prot);
1009
Aneesh Kumar K.V42006052020-10-15 20:04:40 -07001010 pte_savedwrite_tests(pte_aligned, protnone);
1011 pmd_savedwrite_tests(pmd_aligned, protnone);
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -07001012
Anshuman Khandual05289402020-08-06 23:19:16 -07001013 pte_special_tests(pte_aligned, prot);
1014 pte_protnone_tests(pte_aligned, protnone);
1015 pmd_protnone_tests(pmd_aligned, protnone);
1016
1017 pte_devmap_tests(pte_aligned, prot);
1018 pmd_devmap_tests(pmd_aligned, prot);
1019 pud_devmap_tests(pud_aligned, prot);
1020
1021 pte_soft_dirty_tests(pte_aligned, prot);
1022 pmd_soft_dirty_tests(pmd_aligned, prot);
1023 pte_swap_soft_dirty_tests(pte_aligned, prot);
1024 pmd_swap_soft_dirty_tests(pmd_aligned, prot);
1025
1026 pte_swap_tests(pte_aligned, prot);
1027 pmd_swap_tests(pmd_aligned, prot);
1028
1029 swap_migration_tests();
Anshuman Khandual05289402020-08-06 23:19:16 -07001030
1031 pmd_thp_tests(pmd_aligned, prot);
1032 pud_thp_tests(pud_aligned, prot);
1033
Aneesh Kumar K.Ve8edf0a2020-10-15 20:04:49 -07001034 hugetlb_basic_tests(pte_aligned, prot);
1035
1036 pte_clear_tests(mm, ptep, vaddr);
1037 pmd_clear_tests(mm, pmdp);
1038 pud_clear_tests(mm, pudp);
1039 p4d_clear_tests(mm, p4dp);
1040 pgd_clear_tests(mm, pgdp);
1041
1042 ptl = pte_lockptr(mm, pmdp);
1043 spin_lock(ptl);
1044
1045 pte_advanced_tests(mm, vma, ptep, pte_aligned, vaddr, prot);
1046 pmd_advanced_tests(mm, vma, pmdp, pmd_aligned, vaddr, prot);
1047 pud_advanced_tests(mm, vma, pudp, pud_aligned, vaddr, prot);
1048 hugetlb_advanced_tests(mm, vma, ptep, pte_aligned, vaddr, prot);
1049
1050
1051 pmd_huge_tests(pmdp, pmd_aligned, prot);
1052 pud_huge_tests(pudp, pud_aligned, prot);
1053
1054 pte_unmap_unlock(ptep, ptl);
1055
1056 pmd_populate_tests(mm, pmdp, saved_ptep);
1057 pud_populate_tests(mm, pudp, saved_pmdp);
1058 p4d_populate_tests(mm, p4dp, saved_pudp);
1059 pgd_populate_tests(mm, pgdp, saved_p4dp);
1060
Anshuman Khandual399145f2020-06-04 16:47:15 -07001061 p4d_free(mm, saved_p4dp);
1062 pud_free(mm, saved_pudp);
1063 pmd_free(mm, saved_pmdp);
1064 pte_free(mm, saved_ptep);
1065
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -07001066 vm_area_free(vma);
Anshuman Khandual399145f2020-06-04 16:47:15 -07001067 mm_dec_nr_puds(mm);
1068 mm_dec_nr_pmds(mm);
1069 mm_dec_nr_ptes(mm);
1070 mmdrop(mm);
1071 return 0;
1072}
1073late_initcall(debug_vm_pgtable);