blob: a9bd6ce1ba02b3f60a76bc67381d85a9f0743c1e [file] [log] [blame]
Anshuman Khandual399145f2020-06-04 16:47:15 -07001// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * This kernel test validates architecture page table helpers and
4 * accessors and helps in verifying their continued compliance with
5 * expected generic MM semantics.
6 *
7 * Copyright (C) 2019 ARM Ltd.
8 *
9 * Author: Anshuman Khandual <anshuman.khandual@arm.com>
10 */
Anshuman Khandual6315df42020-08-06 23:19:25 -070011#define pr_fmt(fmt) "debug_vm_pgtable: [%-25s]: " fmt, __func__
Anshuman Khandual399145f2020-06-04 16:47:15 -070012
13#include <linux/gfp.h>
14#include <linux/highmem.h>
15#include <linux/hugetlb.h>
16#include <linux/kernel.h>
17#include <linux/kconfig.h>
18#include <linux/mm.h>
19#include <linux/mman.h>
20#include <linux/mm_types.h>
21#include <linux/module.h>
22#include <linux/pfn_t.h>
23#include <linux/printk.h>
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -070024#include <linux/pgtable.h>
Anshuman Khandual399145f2020-06-04 16:47:15 -070025#include <linux/random.h>
26#include <linux/spinlock.h>
27#include <linux/swap.h>
28#include <linux/swapops.h>
29#include <linux/start_kernel.h>
30#include <linux/sched/mm.h>
Aneesh Kumar K.V85a14462020-10-15 20:04:36 -070031#include <linux/io.h>
Anshuman Khandual399145f2020-06-04 16:47:15 -070032#include <asm/pgalloc.h>
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -070033#include <asm/tlbflush.h>
Anshuman Khandual399145f2020-06-04 16:47:15 -070034
Anshuman Khandualb1d00002020-08-06 23:19:28 -070035/*
36 * Please refer Documentation/vm/arch_pgtable_helpers.rst for the semantics
37 * expectations that are being validated here. All future changes in here
38 * or the documentation need to be in sync.
39 */
40
Anshuman Khandual399145f2020-06-04 16:47:15 -070041#define VMFLAGS (VM_READ|VM_WRITE|VM_EXEC)
42
43/*
44 * On s390 platform, the lower 4 bits are used to identify given page table
45 * entry type. But these bits might affect the ability to clear entries with
46 * pxx_clear() because of how dynamic page table folding works on s390. So
47 * while loading up the entries do not change the lower 4 bits. It does not
Aneesh Kumar K.Vcfc5bbc2020-10-15 20:04:33 -070048 * have affect any other platform. Also avoid the 62nd bit on ppc64 that is
49 * used to mark a pte entry.
Anshuman Khandual399145f2020-06-04 16:47:15 -070050 */
Aneesh Kumar K.Vcfc5bbc2020-10-15 20:04:33 -070051#define S390_SKIP_MASK GENMASK(3, 0)
52#if __BITS_PER_LONG == 64
53#define PPC64_SKIP_MASK GENMASK(62, 62)
54#else
55#define PPC64_SKIP_MASK 0x0
56#endif
57#define ARCH_SKIP_MASK (S390_SKIP_MASK | PPC64_SKIP_MASK)
58#define RANDOM_ORVALUE (GENMASK(BITS_PER_LONG - 1, 0) & ~ARCH_SKIP_MASK)
Anshuman Khandual399145f2020-06-04 16:47:15 -070059#define RANDOM_NZVALUE GENMASK(7, 0)
60
Anshuman Khandual2e326c02021-02-24 12:01:36 -080061static void __init pte_basic_tests(unsigned long pfn, int idx)
Anshuman Khandual399145f2020-06-04 16:47:15 -070062{
Anshuman Khandual2e326c02021-02-24 12:01:36 -080063 pgprot_t prot = protection_map[idx];
Anshuman Khandual399145f2020-06-04 16:47:15 -070064 pte_t pte = pfn_pte(pfn, prot);
Anshuman Khandual2e326c02021-02-24 12:01:36 -080065 unsigned long val = idx, *ptr = &val;
Anshuman Khandual399145f2020-06-04 16:47:15 -070066
Anshuman Khandual2e326c02021-02-24 12:01:36 -080067 pr_debug("Validating PTE basic (%pGv)\n", ptr);
Anshuman Khandualbb5c47c2021-02-24 12:01:32 -080068
69 /*
70 * This test needs to be executed after the given page table entry
71 * is created with pfn_pte() to make sure that protection_map[idx]
72 * does not have the dirty bit enabled from the beginning. This is
73 * important for platforms like arm64 where (!PTE_RDONLY) indicate
74 * dirty bit being set.
75 */
76 WARN_ON(pte_dirty(pte_wrprotect(pte)));
77
Anshuman Khandual399145f2020-06-04 16:47:15 -070078 WARN_ON(!pte_same(pte, pte));
79 WARN_ON(!pte_young(pte_mkyoung(pte_mkold(pte))));
80 WARN_ON(!pte_dirty(pte_mkdirty(pte_mkclean(pte))));
81 WARN_ON(!pte_write(pte_mkwrite(pte_wrprotect(pte))));
82 WARN_ON(pte_young(pte_mkold(pte_mkyoung(pte))));
83 WARN_ON(pte_dirty(pte_mkclean(pte_mkdirty(pte))));
84 WARN_ON(pte_write(pte_wrprotect(pte_mkwrite(pte))));
Anshuman Khandualbb5c47c2021-02-24 12:01:32 -080085 WARN_ON(pte_dirty(pte_wrprotect(pte_mkclean(pte))));
86 WARN_ON(!pte_dirty(pte_wrprotect(pte_mkdirty(pte))));
Anshuman Khandual399145f2020-06-04 16:47:15 -070087}
88
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -070089static void __init pte_advanced_tests(struct mm_struct *mm,
90 struct vm_area_struct *vma, pte_t *ptep,
91 unsigned long pfn, unsigned long vaddr,
92 pgprot_t prot)
93{
94 pte_t pte = pfn_pte(pfn, prot);
95
Aneesh Kumar K.Vc3824e12020-10-15 20:04:46 -070096 /*
97 * Architectures optimize set_pte_at by avoiding TLB flush.
98 * This requires set_pte_at to be not used to update an
99 * existing pte entry. Clear pte before we do set_pte_at
100 */
101
Anshuman Khandual6315df42020-08-06 23:19:25 -0700102 pr_debug("Validating PTE advanced\n");
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700103 pte = pfn_pte(pfn, prot);
104 set_pte_at(mm, vaddr, ptep, pte);
105 ptep_set_wrprotect(mm, vaddr, ptep);
106 pte = ptep_get(ptep);
107 WARN_ON(pte_write(pte));
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700108 ptep_get_and_clear(mm, vaddr, ptep);
109 pte = ptep_get(ptep);
110 WARN_ON(!pte_none(pte));
111
112 pte = pfn_pte(pfn, prot);
113 pte = pte_wrprotect(pte);
114 pte = pte_mkclean(pte);
115 set_pte_at(mm, vaddr, ptep, pte);
116 pte = pte_mkwrite(pte);
117 pte = pte_mkdirty(pte);
118 ptep_set_access_flags(vma, vaddr, ptep, pte, 1);
119 pte = ptep_get(ptep);
120 WARN_ON(!(pte_write(pte) && pte_dirty(pte)));
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700121 ptep_get_and_clear_full(mm, vaddr, ptep, 1);
122 pte = ptep_get(ptep);
123 WARN_ON(!pte_none(pte));
124
Aneesh Kumar K.Vc3824e12020-10-15 20:04:46 -0700125 pte = pfn_pte(pfn, prot);
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700126 pte = pte_mkyoung(pte);
127 set_pte_at(mm, vaddr, ptep, pte);
128 ptep_test_and_clear_young(vma, vaddr, ptep);
129 pte = ptep_get(ptep);
130 WARN_ON(pte_young(pte));
131}
132
133static void __init pte_savedwrite_tests(unsigned long pfn, pgprot_t prot)
134{
135 pte_t pte = pfn_pte(pfn, prot);
136
Aneesh Kumar K.V42006052020-10-15 20:04:40 -0700137 if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
138 return;
139
Anshuman Khandual6315df42020-08-06 23:19:25 -0700140 pr_debug("Validating PTE saved write\n");
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700141 WARN_ON(!pte_savedwrite(pte_mk_savedwrite(pte_clear_savedwrite(pte))));
142 WARN_ON(pte_savedwrite(pte_clear_savedwrite(pte_mk_savedwrite(pte))));
143}
Aneesh Kumar K.V42006052020-10-15 20:04:40 -0700144
Anshuman Khandual399145f2020-06-04 16:47:15 -0700145#ifdef CONFIG_TRANSPARENT_HUGEPAGE
Anshuman Khandual2e326c02021-02-24 12:01:36 -0800146static void __init pmd_basic_tests(unsigned long pfn, int idx)
Anshuman Khandual399145f2020-06-04 16:47:15 -0700147{
Anshuman Khandual2e326c02021-02-24 12:01:36 -0800148 pgprot_t prot = protection_map[idx];
Anshuman Khandual399145f2020-06-04 16:47:15 -0700149 pmd_t pmd = pfn_pmd(pfn, prot);
Anshuman Khandual2e326c02021-02-24 12:01:36 -0800150 unsigned long val = idx, *ptr = &val;
Anshuman Khandual399145f2020-06-04 16:47:15 -0700151
Aneesh Kumar K.V787d5632020-06-10 18:41:44 -0700152 if (!has_transparent_hugepage())
153 return;
154
Anshuman Khandual2e326c02021-02-24 12:01:36 -0800155 pr_debug("Validating PMD basic (%pGv)\n", ptr);
Anshuman Khandualbb5c47c2021-02-24 12:01:32 -0800156
157 /*
158 * This test needs to be executed after the given page table entry
159 * is created with pfn_pmd() to make sure that protection_map[idx]
160 * does not have the dirty bit enabled from the beginning. This is
161 * important for platforms like arm64 where (!PTE_RDONLY) indicate
162 * dirty bit being set.
163 */
164 WARN_ON(pmd_dirty(pmd_wrprotect(pmd)));
165
166
Anshuman Khandual399145f2020-06-04 16:47:15 -0700167 WARN_ON(!pmd_same(pmd, pmd));
168 WARN_ON(!pmd_young(pmd_mkyoung(pmd_mkold(pmd))));
169 WARN_ON(!pmd_dirty(pmd_mkdirty(pmd_mkclean(pmd))));
170 WARN_ON(!pmd_write(pmd_mkwrite(pmd_wrprotect(pmd))));
171 WARN_ON(pmd_young(pmd_mkold(pmd_mkyoung(pmd))));
172 WARN_ON(pmd_dirty(pmd_mkclean(pmd_mkdirty(pmd))));
173 WARN_ON(pmd_write(pmd_wrprotect(pmd_mkwrite(pmd))));
Anshuman Khandualbb5c47c2021-02-24 12:01:32 -0800174 WARN_ON(pmd_dirty(pmd_wrprotect(pmd_mkclean(pmd))));
175 WARN_ON(!pmd_dirty(pmd_wrprotect(pmd_mkdirty(pmd))));
Anshuman Khandual399145f2020-06-04 16:47:15 -0700176 /*
177 * A huge page does not point to next level page table
178 * entry. Hence this must qualify as pmd_bad().
179 */
180 WARN_ON(!pmd_bad(pmd_mkhuge(pmd)));
181}
182
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700183static void __init pmd_advanced_tests(struct mm_struct *mm,
184 struct vm_area_struct *vma, pmd_t *pmdp,
185 unsigned long pfn, unsigned long vaddr,
Aneesh Kumar K.V87f34982020-10-15 20:04:56 -0700186 pgprot_t prot, pgtable_t pgtable)
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700187{
188 pmd_t pmd = pfn_pmd(pfn, prot);
189
190 if (!has_transparent_hugepage())
191 return;
192
Anshuman Khandual6315df42020-08-06 23:19:25 -0700193 pr_debug("Validating PMD advanced\n");
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700194 /* Align the address wrt HPAGE_PMD_SIZE */
195 vaddr = (vaddr & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE;
196
Aneesh Kumar K.V87f34982020-10-15 20:04:56 -0700197 pgtable_trans_huge_deposit(mm, pmdp, pgtable);
198
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700199 pmd = pfn_pmd(pfn, prot);
200 set_pmd_at(mm, vaddr, pmdp, pmd);
201 pmdp_set_wrprotect(mm, vaddr, pmdp);
202 pmd = READ_ONCE(*pmdp);
203 WARN_ON(pmd_write(pmd));
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700204 pmdp_huge_get_and_clear(mm, vaddr, pmdp);
205 pmd = READ_ONCE(*pmdp);
206 WARN_ON(!pmd_none(pmd));
207
208 pmd = pfn_pmd(pfn, prot);
209 pmd = pmd_wrprotect(pmd);
210 pmd = pmd_mkclean(pmd);
211 set_pmd_at(mm, vaddr, pmdp, pmd);
212 pmd = pmd_mkwrite(pmd);
213 pmd = pmd_mkdirty(pmd);
214 pmdp_set_access_flags(vma, vaddr, pmdp, pmd, 1);
215 pmd = READ_ONCE(*pmdp);
216 WARN_ON(!(pmd_write(pmd) && pmd_dirty(pmd)));
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700217 pmdp_huge_get_and_clear_full(vma, vaddr, pmdp, 1);
218 pmd = READ_ONCE(*pmdp);
219 WARN_ON(!pmd_none(pmd));
220
Aneesh Kumar K.Vc3824e12020-10-15 20:04:46 -0700221 pmd = pmd_mkhuge(pfn_pmd(pfn, prot));
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700222 pmd = pmd_mkyoung(pmd);
223 set_pmd_at(mm, vaddr, pmdp, pmd);
224 pmdp_test_and_clear_young(vma, vaddr, pmdp);
225 pmd = READ_ONCE(*pmdp);
226 WARN_ON(pmd_young(pmd));
Aneesh Kumar K.V87f34982020-10-15 20:04:56 -0700227
Aneesh Kumar K.V13af0502020-10-15 20:04:59 -0700228 /* Clear the pte entries */
229 pmdp_huge_get_and_clear(mm, vaddr, pmdp);
Aneesh Kumar K.V87f34982020-10-15 20:04:56 -0700230 pgtable = pgtable_trans_huge_withdraw(mm, pmdp);
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700231}
232
233static void __init pmd_leaf_tests(unsigned long pfn, pgprot_t prot)
234{
235 pmd_t pmd = pfn_pmd(pfn, prot);
236
Anshuman Khandual6315df42020-08-06 23:19:25 -0700237 pr_debug("Validating PMD leaf\n");
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700238 /*
239 * PMD based THP is a leaf entry.
240 */
241 pmd = pmd_mkhuge(pmd);
242 WARN_ON(!pmd_leaf(pmd));
243}
244
Aneesh Kumar K.V85a14462020-10-15 20:04:36 -0700245#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700246static void __init pmd_huge_tests(pmd_t *pmdp, unsigned long pfn, pgprot_t prot)
247{
248 pmd_t pmd;
249
Aneesh Kumar K.V85a14462020-10-15 20:04:36 -0700250 if (!arch_ioremap_pmd_supported())
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700251 return;
Anshuman Khandual6315df42020-08-06 23:19:25 -0700252
253 pr_debug("Validating PMD huge\n");
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700254 /*
255 * X86 defined pmd_set_huge() verifies that the given
256 * PMD is not a populated non-leaf entry.
257 */
258 WRITE_ONCE(*pmdp, __pmd(0));
259 WARN_ON(!pmd_set_huge(pmdp, __pfn_to_phys(pfn), prot));
260 WARN_ON(!pmd_clear_huge(pmdp));
261 pmd = READ_ONCE(*pmdp);
262 WARN_ON(!pmd_none(pmd));
263}
Aneesh Kumar K.V85a14462020-10-15 20:04:36 -0700264#else /* CONFIG_HAVE_ARCH_HUGE_VMAP */
265static void __init pmd_huge_tests(pmd_t *pmdp, unsigned long pfn, pgprot_t prot) { }
266#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700267
268static void __init pmd_savedwrite_tests(unsigned long pfn, pgprot_t prot)
269{
270 pmd_t pmd = pfn_pmd(pfn, prot);
271
Aneesh Kumar K.V42006052020-10-15 20:04:40 -0700272 if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
273 return;
274
Anshuman Khandual6315df42020-08-06 23:19:25 -0700275 pr_debug("Validating PMD saved write\n");
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700276 WARN_ON(!pmd_savedwrite(pmd_mk_savedwrite(pmd_clear_savedwrite(pmd))));
277 WARN_ON(pmd_savedwrite(pmd_clear_savedwrite(pmd_mk_savedwrite(pmd))));
278}
279
Anshuman Khandual399145f2020-06-04 16:47:15 -0700280#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
Anshuman Khandual2e326c02021-02-24 12:01:36 -0800281static void __init pud_basic_tests(struct mm_struct *mm, unsigned long pfn, int idx)
Anshuman Khandual399145f2020-06-04 16:47:15 -0700282{
Anshuman Khandual2e326c02021-02-24 12:01:36 -0800283 pgprot_t prot = protection_map[idx];
Anshuman Khandual399145f2020-06-04 16:47:15 -0700284 pud_t pud = pfn_pud(pfn, prot);
Anshuman Khandual2e326c02021-02-24 12:01:36 -0800285 unsigned long val = idx, *ptr = &val;
Anshuman Khandual399145f2020-06-04 16:47:15 -0700286
Aneesh Kumar K.V787d5632020-06-10 18:41:44 -0700287 if (!has_transparent_hugepage())
288 return;
289
Anshuman Khandual2e326c02021-02-24 12:01:36 -0800290 pr_debug("Validating PUD basic (%pGv)\n", ptr);
Anshuman Khandualbb5c47c2021-02-24 12:01:32 -0800291
292 /*
293 * This test needs to be executed after the given page table entry
294 * is created with pfn_pud() to make sure that protection_map[idx]
295 * does not have the dirty bit enabled from the beginning. This is
296 * important for platforms like arm64 where (!PTE_RDONLY) indicate
297 * dirty bit being set.
298 */
299 WARN_ON(pud_dirty(pud_wrprotect(pud)));
300
Anshuman Khandual399145f2020-06-04 16:47:15 -0700301 WARN_ON(!pud_same(pud, pud));
302 WARN_ON(!pud_young(pud_mkyoung(pud_mkold(pud))));
Anshuman Khandualbb5c47c2021-02-24 12:01:32 -0800303 WARN_ON(!pud_dirty(pud_mkdirty(pud_mkclean(pud))));
304 WARN_ON(pud_dirty(pud_mkclean(pud_mkdirty(pud))));
Anshuman Khandual399145f2020-06-04 16:47:15 -0700305 WARN_ON(!pud_write(pud_mkwrite(pud_wrprotect(pud))));
306 WARN_ON(pud_write(pud_wrprotect(pud_mkwrite(pud))));
307 WARN_ON(pud_young(pud_mkold(pud_mkyoung(pud))));
Anshuman Khandualbb5c47c2021-02-24 12:01:32 -0800308 WARN_ON(pud_dirty(pud_wrprotect(pud_mkclean(pud))));
309 WARN_ON(!pud_dirty(pud_wrprotect(pud_mkdirty(pud))));
Anshuman Khandual399145f2020-06-04 16:47:15 -0700310
311 if (mm_pmd_folded(mm))
312 return;
313
314 /*
315 * A huge page does not point to next level page table
316 * entry. Hence this must qualify as pud_bad().
317 */
318 WARN_ON(!pud_bad(pud_mkhuge(pud)));
319}
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700320
321static void __init pud_advanced_tests(struct mm_struct *mm,
322 struct vm_area_struct *vma, pud_t *pudp,
323 unsigned long pfn, unsigned long vaddr,
324 pgprot_t prot)
325{
326 pud_t pud = pfn_pud(pfn, prot);
327
328 if (!has_transparent_hugepage())
329 return;
330
Anshuman Khandual6315df42020-08-06 23:19:25 -0700331 pr_debug("Validating PUD advanced\n");
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700332 /* Align the address wrt HPAGE_PUD_SIZE */
333 vaddr = (vaddr & HPAGE_PUD_MASK) + HPAGE_PUD_SIZE;
334
335 set_pud_at(mm, vaddr, pudp, pud);
336 pudp_set_wrprotect(mm, vaddr, pudp);
337 pud = READ_ONCE(*pudp);
338 WARN_ON(pud_write(pud));
339
340#ifndef __PAGETABLE_PMD_FOLDED
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700341 pudp_huge_get_and_clear(mm, vaddr, pudp);
342 pud = READ_ONCE(*pudp);
343 WARN_ON(!pud_none(pud));
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700344#endif /* __PAGETABLE_PMD_FOLDED */
345 pud = pfn_pud(pfn, prot);
346 pud = pud_wrprotect(pud);
347 pud = pud_mkclean(pud);
348 set_pud_at(mm, vaddr, pudp, pud);
349 pud = pud_mkwrite(pud);
350 pud = pud_mkdirty(pud);
351 pudp_set_access_flags(vma, vaddr, pudp, pud, 1);
352 pud = READ_ONCE(*pudp);
353 WARN_ON(!(pud_write(pud) && pud_dirty(pud)));
354
Aneesh Kumar K.Vc3824e12020-10-15 20:04:46 -0700355#ifndef __PAGETABLE_PMD_FOLDED
356 pudp_huge_get_and_clear_full(mm, vaddr, pudp, 1);
357 pud = READ_ONCE(*pudp);
358 WARN_ON(!pud_none(pud));
359#endif /* __PAGETABLE_PMD_FOLDED */
360
361 pud = pfn_pud(pfn, prot);
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700362 pud = pud_mkyoung(pud);
363 set_pud_at(mm, vaddr, pudp, pud);
364 pudp_test_and_clear_young(vma, vaddr, pudp);
365 pud = READ_ONCE(*pudp);
366 WARN_ON(pud_young(pud));
Aneesh Kumar K.V13af0502020-10-15 20:04:59 -0700367
368 pudp_huge_get_and_clear(mm, vaddr, pudp);
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700369}
370
371static void __init pud_leaf_tests(unsigned long pfn, pgprot_t prot)
372{
373 pud_t pud = pfn_pud(pfn, prot);
374
Anshuman Khandual6315df42020-08-06 23:19:25 -0700375 pr_debug("Validating PUD leaf\n");
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700376 /*
377 * PUD based THP is a leaf entry.
378 */
379 pud = pud_mkhuge(pud);
380 WARN_ON(!pud_leaf(pud));
381}
382
Aneesh Kumar K.V85a14462020-10-15 20:04:36 -0700383#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700384static void __init pud_huge_tests(pud_t *pudp, unsigned long pfn, pgprot_t prot)
385{
386 pud_t pud;
387
Aneesh Kumar K.V85a14462020-10-15 20:04:36 -0700388 if (!arch_ioremap_pud_supported())
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700389 return;
Anshuman Khandual6315df42020-08-06 23:19:25 -0700390
391 pr_debug("Validating PUD huge\n");
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700392 /*
393 * X86 defined pud_set_huge() verifies that the given
394 * PUD is not a populated non-leaf entry.
395 */
396 WRITE_ONCE(*pudp, __pud(0));
397 WARN_ON(!pud_set_huge(pudp, __pfn_to_phys(pfn), prot));
398 WARN_ON(!pud_clear_huge(pudp));
399 pud = READ_ONCE(*pudp);
400 WARN_ON(!pud_none(pud));
401}
Aneesh Kumar K.V85a14462020-10-15 20:04:36 -0700402#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
403static void __init pud_huge_tests(pud_t *pudp, unsigned long pfn, pgprot_t prot) { }
404#endif /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
405
Anshuman Khandual399145f2020-06-04 16:47:15 -0700406#else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
Anshuman Khandual2e326c02021-02-24 12:01:36 -0800407static void __init pud_basic_tests(struct mm_struct *mm, unsigned long pfn, int idx) { }
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700408static void __init pud_advanced_tests(struct mm_struct *mm,
409 struct vm_area_struct *vma, pud_t *pudp,
410 unsigned long pfn, unsigned long vaddr,
411 pgprot_t prot)
412{
413}
414static void __init pud_leaf_tests(unsigned long pfn, pgprot_t prot) { }
415static void __init pud_huge_tests(pud_t *pudp, unsigned long pfn, pgprot_t prot)
416{
417}
Anshuman Khandual399145f2020-06-04 16:47:15 -0700418#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
419#else /* !CONFIG_TRANSPARENT_HUGEPAGE */
Anshuman Khandual2e326c02021-02-24 12:01:36 -0800420static void __init pmd_basic_tests(unsigned long pfn, int idx) { }
421static void __init pud_basic_tests(struct mm_struct *mm, unsigned long pfn, int idx) { }
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700422static void __init pmd_advanced_tests(struct mm_struct *mm,
423 struct vm_area_struct *vma, pmd_t *pmdp,
424 unsigned long pfn, unsigned long vaddr,
Aneesh Kumar K.V87f34982020-10-15 20:04:56 -0700425 pgprot_t prot, pgtable_t pgtable)
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700426{
427}
428static void __init pud_advanced_tests(struct mm_struct *mm,
429 struct vm_area_struct *vma, pud_t *pudp,
430 unsigned long pfn, unsigned long vaddr,
431 pgprot_t prot)
432{
433}
434static void __init pmd_leaf_tests(unsigned long pfn, pgprot_t prot) { }
435static void __init pud_leaf_tests(unsigned long pfn, pgprot_t prot) { }
436static void __init pmd_huge_tests(pmd_t *pmdp, unsigned long pfn, pgprot_t prot)
437{
438}
439static void __init pud_huge_tests(pud_t *pudp, unsigned long pfn, pgprot_t prot)
440{
441}
442static void __init pmd_savedwrite_tests(unsigned long pfn, pgprot_t prot) { }
Anshuman Khandual399145f2020-06-04 16:47:15 -0700443#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
444
445static void __init p4d_basic_tests(unsigned long pfn, pgprot_t prot)
446{
447 p4d_t p4d;
448
Anshuman Khandual6315df42020-08-06 23:19:25 -0700449 pr_debug("Validating P4D basic\n");
Anshuman Khandual399145f2020-06-04 16:47:15 -0700450 memset(&p4d, RANDOM_NZVALUE, sizeof(p4d_t));
451 WARN_ON(!p4d_same(p4d, p4d));
452}
453
454static void __init pgd_basic_tests(unsigned long pfn, pgprot_t prot)
455{
456 pgd_t pgd;
457
Anshuman Khandual6315df42020-08-06 23:19:25 -0700458 pr_debug("Validating PGD basic\n");
Anshuman Khandual399145f2020-06-04 16:47:15 -0700459 memset(&pgd, RANDOM_NZVALUE, sizeof(pgd_t));
460 WARN_ON(!pgd_same(pgd, pgd));
461}
462
463#ifndef __PAGETABLE_PUD_FOLDED
464static void __init pud_clear_tests(struct mm_struct *mm, pud_t *pudp)
465{
466 pud_t pud = READ_ONCE(*pudp);
467
468 if (mm_pmd_folded(mm))
469 return;
470
Anshuman Khandual6315df42020-08-06 23:19:25 -0700471 pr_debug("Validating PUD clear\n");
Anshuman Khandual399145f2020-06-04 16:47:15 -0700472 pud = __pud(pud_val(pud) | RANDOM_ORVALUE);
473 WRITE_ONCE(*pudp, pud);
474 pud_clear(pudp);
475 pud = READ_ONCE(*pudp);
476 WARN_ON(!pud_none(pud));
477}
478
479static void __init pud_populate_tests(struct mm_struct *mm, pud_t *pudp,
480 pmd_t *pmdp)
481{
482 pud_t pud;
483
484 if (mm_pmd_folded(mm))
485 return;
Anshuman Khandual6315df42020-08-06 23:19:25 -0700486
487 pr_debug("Validating PUD populate\n");
Anshuman Khandual399145f2020-06-04 16:47:15 -0700488 /*
489 * This entry points to next level page table page.
490 * Hence this must not qualify as pud_bad().
491 */
Anshuman Khandual399145f2020-06-04 16:47:15 -0700492 pud_populate(mm, pudp, pmdp);
493 pud = READ_ONCE(*pudp);
494 WARN_ON(pud_bad(pud));
495}
496#else /* !__PAGETABLE_PUD_FOLDED */
497static void __init pud_clear_tests(struct mm_struct *mm, pud_t *pudp) { }
498static void __init pud_populate_tests(struct mm_struct *mm, pud_t *pudp,
499 pmd_t *pmdp)
500{
501}
502#endif /* PAGETABLE_PUD_FOLDED */
503
504#ifndef __PAGETABLE_P4D_FOLDED
505static void __init p4d_clear_tests(struct mm_struct *mm, p4d_t *p4dp)
506{
507 p4d_t p4d = READ_ONCE(*p4dp);
508
509 if (mm_pud_folded(mm))
510 return;
511
Anshuman Khandual6315df42020-08-06 23:19:25 -0700512 pr_debug("Validating P4D clear\n");
Anshuman Khandual399145f2020-06-04 16:47:15 -0700513 p4d = __p4d(p4d_val(p4d) | RANDOM_ORVALUE);
514 WRITE_ONCE(*p4dp, p4d);
515 p4d_clear(p4dp);
516 p4d = READ_ONCE(*p4dp);
517 WARN_ON(!p4d_none(p4d));
518}
519
520static void __init p4d_populate_tests(struct mm_struct *mm, p4d_t *p4dp,
521 pud_t *pudp)
522{
523 p4d_t p4d;
524
525 if (mm_pud_folded(mm))
526 return;
527
Anshuman Khandual6315df42020-08-06 23:19:25 -0700528 pr_debug("Validating P4D populate\n");
Anshuman Khandual399145f2020-06-04 16:47:15 -0700529 /*
530 * This entry points to next level page table page.
531 * Hence this must not qualify as p4d_bad().
532 */
533 pud_clear(pudp);
534 p4d_clear(p4dp);
535 p4d_populate(mm, p4dp, pudp);
536 p4d = READ_ONCE(*p4dp);
537 WARN_ON(p4d_bad(p4d));
538}
539
540static void __init pgd_clear_tests(struct mm_struct *mm, pgd_t *pgdp)
541{
542 pgd_t pgd = READ_ONCE(*pgdp);
543
544 if (mm_p4d_folded(mm))
545 return;
546
Anshuman Khandual6315df42020-08-06 23:19:25 -0700547 pr_debug("Validating PGD clear\n");
Anshuman Khandual399145f2020-06-04 16:47:15 -0700548 pgd = __pgd(pgd_val(pgd) | RANDOM_ORVALUE);
549 WRITE_ONCE(*pgdp, pgd);
550 pgd_clear(pgdp);
551 pgd = READ_ONCE(*pgdp);
552 WARN_ON(!pgd_none(pgd));
553}
554
555static void __init pgd_populate_tests(struct mm_struct *mm, pgd_t *pgdp,
556 p4d_t *p4dp)
557{
558 pgd_t pgd;
559
560 if (mm_p4d_folded(mm))
561 return;
562
Anshuman Khandual6315df42020-08-06 23:19:25 -0700563 pr_debug("Validating PGD populate\n");
Anshuman Khandual399145f2020-06-04 16:47:15 -0700564 /*
565 * This entry points to next level page table page.
566 * Hence this must not qualify as pgd_bad().
567 */
568 p4d_clear(p4dp);
569 pgd_clear(pgdp);
570 pgd_populate(mm, pgdp, p4dp);
571 pgd = READ_ONCE(*pgdp);
572 WARN_ON(pgd_bad(pgd));
573}
574#else /* !__PAGETABLE_P4D_FOLDED */
575static void __init p4d_clear_tests(struct mm_struct *mm, p4d_t *p4dp) { }
576static void __init pgd_clear_tests(struct mm_struct *mm, pgd_t *pgdp) { }
577static void __init p4d_populate_tests(struct mm_struct *mm, p4d_t *p4dp,
578 pud_t *pudp)
579{
580}
581static void __init pgd_populate_tests(struct mm_struct *mm, pgd_t *pgdp,
582 p4d_t *p4dp)
583{
584}
585#endif /* PAGETABLE_P4D_FOLDED */
586
587static void __init pte_clear_tests(struct mm_struct *mm, pte_t *ptep,
Aneesh Kumar K.V401035d2020-10-15 20:05:06 -0700588 unsigned long pfn, unsigned long vaddr,
589 pgprot_t prot)
Anshuman Khandual399145f2020-06-04 16:47:15 -0700590{
Aneesh Kumar K.V401035d2020-10-15 20:05:06 -0700591 pte_t pte = pfn_pte(pfn, prot);
Anshuman Khandual399145f2020-06-04 16:47:15 -0700592
Anshuman Khandual6315df42020-08-06 23:19:25 -0700593 pr_debug("Validating PTE clear\n");
Aneesh Kumar K.V401035d2020-10-15 20:05:06 -0700594#ifndef CONFIG_RISCV
Anshuman Khandual399145f2020-06-04 16:47:15 -0700595 pte = __pte(pte_val(pte) | RANDOM_ORVALUE);
Aneesh Kumar K.V401035d2020-10-15 20:05:06 -0700596#endif
Anshuman Khandual399145f2020-06-04 16:47:15 -0700597 set_pte_at(mm, vaddr, ptep, pte);
598 barrier();
599 pte_clear(mm, vaddr, ptep);
Christophe Leroy9449c9c2020-06-25 20:30:04 -0700600 pte = ptep_get(ptep);
Anshuman Khandual399145f2020-06-04 16:47:15 -0700601 WARN_ON(!pte_none(pte));
602}
603
604static void __init pmd_clear_tests(struct mm_struct *mm, pmd_t *pmdp)
605{
606 pmd_t pmd = READ_ONCE(*pmdp);
607
Anshuman Khandual6315df42020-08-06 23:19:25 -0700608 pr_debug("Validating PMD clear\n");
Anshuman Khandual399145f2020-06-04 16:47:15 -0700609 pmd = __pmd(pmd_val(pmd) | RANDOM_ORVALUE);
610 WRITE_ONCE(*pmdp, pmd);
611 pmd_clear(pmdp);
612 pmd = READ_ONCE(*pmdp);
613 WARN_ON(!pmd_none(pmd));
614}
615
616static void __init pmd_populate_tests(struct mm_struct *mm, pmd_t *pmdp,
617 pgtable_t pgtable)
618{
619 pmd_t pmd;
620
Anshuman Khandual6315df42020-08-06 23:19:25 -0700621 pr_debug("Validating PMD populate\n");
Anshuman Khandual399145f2020-06-04 16:47:15 -0700622 /*
623 * This entry points to next level page table page.
624 * Hence this must not qualify as pmd_bad().
625 */
Anshuman Khandual399145f2020-06-04 16:47:15 -0700626 pmd_populate(mm, pmdp, pgtable);
627 pmd = READ_ONCE(*pmdp);
628 WARN_ON(pmd_bad(pmd));
629}
630
Anshuman Khandual05289402020-08-06 23:19:16 -0700631static void __init pte_special_tests(unsigned long pfn, pgprot_t prot)
632{
633 pte_t pte = pfn_pte(pfn, prot);
634
635 if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL))
636 return;
637
Anshuman Khandual6315df42020-08-06 23:19:25 -0700638 pr_debug("Validating PTE special\n");
Anshuman Khandual05289402020-08-06 23:19:16 -0700639 WARN_ON(!pte_special(pte_mkspecial(pte)));
640}
641
642static void __init pte_protnone_tests(unsigned long pfn, pgprot_t prot)
643{
644 pte_t pte = pfn_pte(pfn, prot);
645
646 if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
647 return;
648
Anshuman Khandual6315df42020-08-06 23:19:25 -0700649 pr_debug("Validating PTE protnone\n");
Anshuman Khandual05289402020-08-06 23:19:16 -0700650 WARN_ON(!pte_protnone(pte));
651 WARN_ON(!pte_present(pte));
652}
653
654#ifdef CONFIG_TRANSPARENT_HUGEPAGE
655static void __init pmd_protnone_tests(unsigned long pfn, pgprot_t prot)
656{
657 pmd_t pmd = pmd_mkhuge(pfn_pmd(pfn, prot));
658
659 if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
660 return;
661
Anshuman Khandual6315df42020-08-06 23:19:25 -0700662 pr_debug("Validating PMD protnone\n");
Anshuman Khandual05289402020-08-06 23:19:16 -0700663 WARN_ON(!pmd_protnone(pmd));
664 WARN_ON(!pmd_present(pmd));
665}
666#else /* !CONFIG_TRANSPARENT_HUGEPAGE */
667static void __init pmd_protnone_tests(unsigned long pfn, pgprot_t prot) { }
668#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
669
670#ifdef CONFIG_ARCH_HAS_PTE_DEVMAP
671static void __init pte_devmap_tests(unsigned long pfn, pgprot_t prot)
672{
673 pte_t pte = pfn_pte(pfn, prot);
674
Anshuman Khandual6315df42020-08-06 23:19:25 -0700675 pr_debug("Validating PTE devmap\n");
Anshuman Khandual05289402020-08-06 23:19:16 -0700676 WARN_ON(!pte_devmap(pte_mkdevmap(pte)));
677}
678
679#ifdef CONFIG_TRANSPARENT_HUGEPAGE
680static void __init pmd_devmap_tests(unsigned long pfn, pgprot_t prot)
681{
682 pmd_t pmd = pfn_pmd(pfn, prot);
683
Anshuman Khandual6315df42020-08-06 23:19:25 -0700684 pr_debug("Validating PMD devmap\n");
Anshuman Khandual05289402020-08-06 23:19:16 -0700685 WARN_ON(!pmd_devmap(pmd_mkdevmap(pmd)));
686}
687
688#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
689static void __init pud_devmap_tests(unsigned long pfn, pgprot_t prot)
690{
691 pud_t pud = pfn_pud(pfn, prot);
692
Anshuman Khandual6315df42020-08-06 23:19:25 -0700693 pr_debug("Validating PUD devmap\n");
Anshuman Khandual05289402020-08-06 23:19:16 -0700694 WARN_ON(!pud_devmap(pud_mkdevmap(pud)));
695}
696#else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
697static void __init pud_devmap_tests(unsigned long pfn, pgprot_t prot) { }
698#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
699#else /* CONFIG_TRANSPARENT_HUGEPAGE */
700static void __init pmd_devmap_tests(unsigned long pfn, pgprot_t prot) { }
701static void __init pud_devmap_tests(unsigned long pfn, pgprot_t prot) { }
702#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
703#else
704static void __init pte_devmap_tests(unsigned long pfn, pgprot_t prot) { }
705static void __init pmd_devmap_tests(unsigned long pfn, pgprot_t prot) { }
706static void __init pud_devmap_tests(unsigned long pfn, pgprot_t prot) { }
707#endif /* CONFIG_ARCH_HAS_PTE_DEVMAP */
708
709static void __init pte_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
710{
711 pte_t pte = pfn_pte(pfn, prot);
712
713 if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
714 return;
715
Anshuman Khandual6315df42020-08-06 23:19:25 -0700716 pr_debug("Validating PTE soft dirty\n");
Anshuman Khandual05289402020-08-06 23:19:16 -0700717 WARN_ON(!pte_soft_dirty(pte_mksoft_dirty(pte)));
718 WARN_ON(pte_soft_dirty(pte_clear_soft_dirty(pte)));
719}
720
721static void __init pte_swap_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
722{
723 pte_t pte = pfn_pte(pfn, prot);
724
725 if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
726 return;
727
Anshuman Khandual6315df42020-08-06 23:19:25 -0700728 pr_debug("Validating PTE swap soft dirty\n");
Anshuman Khandual05289402020-08-06 23:19:16 -0700729 WARN_ON(!pte_swp_soft_dirty(pte_swp_mksoft_dirty(pte)));
730 WARN_ON(pte_swp_soft_dirty(pte_swp_clear_soft_dirty(pte)));
731}
732
733#ifdef CONFIG_TRANSPARENT_HUGEPAGE
734static void __init pmd_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
735{
736 pmd_t pmd = pfn_pmd(pfn, prot);
737
738 if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
739 return;
740
Anshuman Khandual6315df42020-08-06 23:19:25 -0700741 pr_debug("Validating PMD soft dirty\n");
Anshuman Khandual05289402020-08-06 23:19:16 -0700742 WARN_ON(!pmd_soft_dirty(pmd_mksoft_dirty(pmd)));
743 WARN_ON(pmd_soft_dirty(pmd_clear_soft_dirty(pmd)));
744}
745
746static void __init pmd_swap_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
747{
748 pmd_t pmd = pfn_pmd(pfn, prot);
749
750 if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) ||
751 !IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION))
752 return;
753
Anshuman Khandual6315df42020-08-06 23:19:25 -0700754 pr_debug("Validating PMD swap soft dirty\n");
Anshuman Khandual05289402020-08-06 23:19:16 -0700755 WARN_ON(!pmd_swp_soft_dirty(pmd_swp_mksoft_dirty(pmd)));
756 WARN_ON(pmd_swp_soft_dirty(pmd_swp_clear_soft_dirty(pmd)));
757}
758#else /* !CONFIG_ARCH_HAS_PTE_DEVMAP */
759static void __init pmd_soft_dirty_tests(unsigned long pfn, pgprot_t prot) { }
760static void __init pmd_swap_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
761{
762}
763#endif /* CONFIG_ARCH_HAS_PTE_DEVMAP */
764
765static void __init pte_swap_tests(unsigned long pfn, pgprot_t prot)
766{
767 swp_entry_t swp;
768 pte_t pte;
769
Anshuman Khandual6315df42020-08-06 23:19:25 -0700770 pr_debug("Validating PTE swap\n");
Anshuman Khandual05289402020-08-06 23:19:16 -0700771 pte = pfn_pte(pfn, prot);
772 swp = __pte_to_swp_entry(pte);
773 pte = __swp_entry_to_pte(swp);
774 WARN_ON(pfn != pte_pfn(pte));
775}
776
777#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
778static void __init pmd_swap_tests(unsigned long pfn, pgprot_t prot)
779{
780 swp_entry_t swp;
781 pmd_t pmd;
782
Anshuman Khandual6315df42020-08-06 23:19:25 -0700783 pr_debug("Validating PMD swap\n");
Anshuman Khandual05289402020-08-06 23:19:16 -0700784 pmd = pfn_pmd(pfn, prot);
785 swp = __pmd_to_swp_entry(pmd);
786 pmd = __swp_entry_to_pmd(swp);
787 WARN_ON(pfn != pmd_pfn(pmd));
788}
789#else /* !CONFIG_ARCH_ENABLE_THP_MIGRATION */
790static void __init pmd_swap_tests(unsigned long pfn, pgprot_t prot) { }
791#endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
792
793static void __init swap_migration_tests(void)
794{
795 struct page *page;
796 swp_entry_t swp;
797
798 if (!IS_ENABLED(CONFIG_MIGRATION))
799 return;
Anshuman Khandual6315df42020-08-06 23:19:25 -0700800
801 pr_debug("Validating swap migration\n");
Anshuman Khandual05289402020-08-06 23:19:16 -0700802 /*
803 * swap_migration_tests() requires a dedicated page as it needs to
804 * be locked before creating a migration entry from it. Locking the
805 * page that actually maps kernel text ('start_kernel') can be real
806 * problematic. Lets allocate a dedicated page explicitly for this
807 * purpose that will be freed subsequently.
808 */
809 page = alloc_page(GFP_KERNEL);
810 if (!page) {
811 pr_err("page allocation failed\n");
812 return;
813 }
814
815 /*
816 * make_migration_entry() expects given page to be
817 * locked, otherwise it stumbles upon a BUG_ON().
818 */
819 __SetPageLocked(page);
820 swp = make_migration_entry(page, 1);
821 WARN_ON(!is_migration_entry(swp));
822 WARN_ON(!is_write_migration_entry(swp));
823
824 make_migration_entry_read(&swp);
825 WARN_ON(!is_migration_entry(swp));
826 WARN_ON(is_write_migration_entry(swp));
827
828 swp = make_migration_entry(page, 0);
829 WARN_ON(!is_migration_entry(swp));
830 WARN_ON(is_write_migration_entry(swp));
831 __ClearPageLocked(page);
832 __free_page(page);
833}
834
835#ifdef CONFIG_HUGETLB_PAGE
836static void __init hugetlb_basic_tests(unsigned long pfn, pgprot_t prot)
837{
838 struct page *page;
839 pte_t pte;
840
Anshuman Khandual6315df42020-08-06 23:19:25 -0700841 pr_debug("Validating HugeTLB basic\n");
Anshuman Khandual05289402020-08-06 23:19:16 -0700842 /*
843 * Accessing the page associated with the pfn is safe here,
844 * as it was previously derived from a real kernel symbol.
845 */
846 page = pfn_to_page(pfn);
847 pte = mk_huge_pte(page, prot);
848
849 WARN_ON(!huge_pte_dirty(huge_pte_mkdirty(pte)));
850 WARN_ON(!huge_pte_write(huge_pte_mkwrite(huge_pte_wrprotect(pte))));
851 WARN_ON(huge_pte_write(huge_pte_wrprotect(huge_pte_mkwrite(pte))));
852
853#ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
854 pte = pfn_pte(pfn, prot);
855
856 WARN_ON(!pte_huge(pte_mkhuge(pte)));
857#endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
858}
859#else /* !CONFIG_HUGETLB_PAGE */
860static void __init hugetlb_basic_tests(unsigned long pfn, pgprot_t prot) { }
861#endif /* CONFIG_HUGETLB_PAGE */
862
863#ifdef CONFIG_TRANSPARENT_HUGEPAGE
864static void __init pmd_thp_tests(unsigned long pfn, pgprot_t prot)
865{
866 pmd_t pmd;
867
868 if (!has_transparent_hugepage())
869 return;
870
Anshuman Khandual6315df42020-08-06 23:19:25 -0700871 pr_debug("Validating PMD based THP\n");
Anshuman Khandual05289402020-08-06 23:19:16 -0700872 /*
873 * pmd_trans_huge() and pmd_present() must return positive after
874 * MMU invalidation with pmd_mkinvalid(). This behavior is an
875 * optimization for transparent huge page. pmd_trans_huge() must
876 * be true if pmd_page() returns a valid THP to avoid taking the
877 * pmd_lock when others walk over non transhuge pmds (i.e. there
878 * are no THP allocated). Especially when splitting a THP and
879 * removing the present bit from the pmd, pmd_trans_huge() still
880 * needs to return true. pmd_present() should be true whenever
881 * pmd_trans_huge() returns true.
882 */
883 pmd = pfn_pmd(pfn, prot);
884 WARN_ON(!pmd_trans_huge(pmd_mkhuge(pmd)));
885
886#ifndef __HAVE_ARCH_PMDP_INVALIDATE
887 WARN_ON(!pmd_trans_huge(pmd_mkinvalid(pmd_mkhuge(pmd))));
888 WARN_ON(!pmd_present(pmd_mkinvalid(pmd_mkhuge(pmd))));
889#endif /* __HAVE_ARCH_PMDP_INVALIDATE */
890}
891
892#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
893static void __init pud_thp_tests(unsigned long pfn, pgprot_t prot)
894{
895 pud_t pud;
896
897 if (!has_transparent_hugepage())
898 return;
899
Anshuman Khandual6315df42020-08-06 23:19:25 -0700900 pr_debug("Validating PUD based THP\n");
Anshuman Khandual05289402020-08-06 23:19:16 -0700901 pud = pfn_pud(pfn, prot);
902 WARN_ON(!pud_trans_huge(pud_mkhuge(pud)));
903
904 /*
905 * pud_mkinvalid() has been dropped for now. Enable back
906 * these tests when it comes back with a modified pud_present().
907 *
908 * WARN_ON(!pud_trans_huge(pud_mkinvalid(pud_mkhuge(pud))));
909 * WARN_ON(!pud_present(pud_mkinvalid(pud_mkhuge(pud))));
910 */
911}
912#else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
913static void __init pud_thp_tests(unsigned long pfn, pgprot_t prot) { }
914#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
915#else /* !CONFIG_TRANSPARENT_HUGEPAGE */
916static void __init pmd_thp_tests(unsigned long pfn, pgprot_t prot) { }
917static void __init pud_thp_tests(unsigned long pfn, pgprot_t prot) { }
918#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
919
Anshuman Khandual399145f2020-06-04 16:47:15 -0700920static unsigned long __init get_random_vaddr(void)
921{
922 unsigned long random_vaddr, random_pages, total_user_pages;
923
924 total_user_pages = (TASK_SIZE - FIRST_USER_ADDRESS) / PAGE_SIZE;
925
926 random_pages = get_random_long() % total_user_pages;
927 random_vaddr = FIRST_USER_ADDRESS + random_pages * PAGE_SIZE;
928
929 return random_vaddr;
930}
931
932static int __init debug_vm_pgtable(void)
933{
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700934 struct vm_area_struct *vma;
Anshuman Khandual399145f2020-06-04 16:47:15 -0700935 struct mm_struct *mm;
936 pgd_t *pgdp;
937 p4d_t *p4dp, *saved_p4dp;
938 pud_t *pudp, *saved_pudp;
939 pmd_t *pmdp, *saved_pmdp, pmd;
940 pte_t *ptep;
941 pgtable_t saved_ptep;
Anshuman Khandual05289402020-08-06 23:19:16 -0700942 pgprot_t prot, protnone;
Anshuman Khandual399145f2020-06-04 16:47:15 -0700943 phys_addr_t paddr;
944 unsigned long vaddr, pte_aligned, pmd_aligned;
945 unsigned long pud_aligned, p4d_aligned, pgd_aligned;
Kees Cookfea11202020-06-03 13:28:45 -0700946 spinlock_t *ptl = NULL;
Anshuman Khandual2e326c02021-02-24 12:01:36 -0800947 int idx;
Anshuman Khandual399145f2020-06-04 16:47:15 -0700948
949 pr_info("Validating architecture page table helpers\n");
950 prot = vm_get_page_prot(VMFLAGS);
951 vaddr = get_random_vaddr();
952 mm = mm_alloc();
953 if (!mm) {
954 pr_err("mm_struct allocation failed\n");
955 return 1;
956 }
957
958 /*
Anshuman Khandual05289402020-08-06 23:19:16 -0700959 * __P000 (or even __S000) will help create page table entries with
960 * PROT_NONE permission as required for pxx_protnone_tests().
961 */
962 protnone = __P000;
963
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700964 vma = vm_area_alloc(mm);
965 if (!vma) {
966 pr_err("vma allocation failed\n");
967 return 1;
968 }
969
Anshuman Khandual05289402020-08-06 23:19:16 -0700970 /*
Anshuman Khandual399145f2020-06-04 16:47:15 -0700971 * PFN for mapping at PTE level is determined from a standard kernel
972 * text symbol. But pfns for higher page table levels are derived by
973 * masking lower bits of this real pfn. These derived pfns might not
974 * exist on the platform but that does not really matter as pfn_pxx()
975 * helpers will still create appropriate entries for the test. This
976 * helps avoid large memory block allocations to be used for mapping
977 * at higher page table levels.
978 */
979 paddr = __pa_symbol(&start_kernel);
980
981 pte_aligned = (paddr & PAGE_MASK) >> PAGE_SHIFT;
982 pmd_aligned = (paddr & PMD_MASK) >> PAGE_SHIFT;
983 pud_aligned = (paddr & PUD_MASK) >> PAGE_SHIFT;
984 p4d_aligned = (paddr & P4D_MASK) >> PAGE_SHIFT;
985 pgd_aligned = (paddr & PGDIR_MASK) >> PAGE_SHIFT;
986 WARN_ON(!pfn_valid(pte_aligned));
987
988 pgdp = pgd_offset(mm, vaddr);
989 p4dp = p4d_alloc(mm, pgdp, vaddr);
990 pudp = pud_alloc(mm, p4dp, vaddr);
991 pmdp = pmd_alloc(mm, pudp, vaddr);
Aneesh Kumar K.Vf14312e2020-10-15 20:05:10 -0700992 /*
993 * Allocate pgtable_t
994 */
995 if (pte_alloc(mm, pmdp)) {
996 pr_err("pgtable allocation failed\n");
997 return 1;
998 }
Anshuman Khandual399145f2020-06-04 16:47:15 -0700999
1000 /*
1001 * Save all the page table page addresses as the page table
1002 * entries will be used for testing with random or garbage
1003 * values. These saved addresses will be used for freeing
1004 * page table pages.
1005 */
1006 pmd = READ_ONCE(*pmdp);
1007 saved_p4dp = p4d_offset(pgdp, 0UL);
1008 saved_pudp = pud_offset(p4dp, 0UL);
1009 saved_pmdp = pmd_offset(pudp, 0UL);
1010 saved_ptep = pmd_pgtable(pmd);
1011
Anshuman Khandual2e326c02021-02-24 12:01:36 -08001012 /*
1013 * Iterate over the protection_map[] to make sure that all
1014 * the basic page table transformation validations just hold
1015 * true irrespective of the starting protection value for a
1016 * given page table entry.
1017 */
1018 for (idx = 0; idx < ARRAY_SIZE(protection_map); idx++) {
1019 pte_basic_tests(pte_aligned, idx);
1020 pmd_basic_tests(pmd_aligned, idx);
1021 pud_basic_tests(mm, pud_aligned, idx);
1022 }
1023
1024 /*
1025 * Both P4D and PGD level tests are very basic which do not
1026 * involve creating page table entries from the protection
1027 * value and the given pfn. Hence just keep them out from
1028 * the above iteration for now to save some test execution
1029 * time.
1030 */
Anshuman Khandual399145f2020-06-04 16:47:15 -07001031 p4d_basic_tests(p4d_aligned, prot);
1032 pgd_basic_tests(pgd_aligned, prot);
1033
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -07001034 pmd_leaf_tests(pmd_aligned, prot);
1035 pud_leaf_tests(pud_aligned, prot);
1036
Aneesh Kumar K.V42006052020-10-15 20:04:40 -07001037 pte_savedwrite_tests(pte_aligned, protnone);
1038 pmd_savedwrite_tests(pmd_aligned, protnone);
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -07001039
Anshuman Khandual05289402020-08-06 23:19:16 -07001040 pte_special_tests(pte_aligned, prot);
1041 pte_protnone_tests(pte_aligned, protnone);
1042 pmd_protnone_tests(pmd_aligned, protnone);
1043
1044 pte_devmap_tests(pte_aligned, prot);
1045 pmd_devmap_tests(pmd_aligned, prot);
1046 pud_devmap_tests(pud_aligned, prot);
1047
1048 pte_soft_dirty_tests(pte_aligned, prot);
1049 pmd_soft_dirty_tests(pmd_aligned, prot);
1050 pte_swap_soft_dirty_tests(pte_aligned, prot);
1051 pmd_swap_soft_dirty_tests(pmd_aligned, prot);
1052
1053 pte_swap_tests(pte_aligned, prot);
1054 pmd_swap_tests(pmd_aligned, prot);
1055
1056 swap_migration_tests();
Anshuman Khandual05289402020-08-06 23:19:16 -07001057
1058 pmd_thp_tests(pmd_aligned, prot);
1059 pud_thp_tests(pud_aligned, prot);
1060
Aneesh Kumar K.Ve8edf0a2020-10-15 20:04:49 -07001061 hugetlb_basic_tests(pte_aligned, prot);
1062
Aneesh Kumar K.V6f302e272020-10-15 20:04:53 -07001063 /*
1064 * Page table modifying tests. They need to hold
1065 * proper page table lock.
1066 */
Aneesh Kumar K.Ve8edf0a2020-10-15 20:04:49 -07001067
Aneesh Kumar K.Vf14312e2020-10-15 20:05:10 -07001068 ptep = pte_offset_map_lock(mm, pmdp, vaddr, &ptl);
Aneesh Kumar K.V401035d2020-10-15 20:05:06 -07001069 pte_clear_tests(mm, ptep, pte_aligned, vaddr, prot);
Aneesh Kumar K.Ve8edf0a2020-10-15 20:04:49 -07001070 pte_advanced_tests(mm, vma, ptep, pte_aligned, vaddr, prot);
Aneesh Kumar K.Ve8edf0a2020-10-15 20:04:49 -07001071 pte_unmap_unlock(ptep, ptl);
1072
Aneesh Kumar K.V6f302e272020-10-15 20:04:53 -07001073 ptl = pmd_lock(mm, pmdp);
1074 pmd_clear_tests(mm, pmdp);
Aneesh Kumar K.V87f34982020-10-15 20:04:56 -07001075 pmd_advanced_tests(mm, vma, pmdp, pmd_aligned, vaddr, prot, saved_ptep);
Aneesh Kumar K.V6f302e272020-10-15 20:04:53 -07001076 pmd_huge_tests(pmdp, pmd_aligned, prot);
Aneesh Kumar K.Ve8edf0a2020-10-15 20:04:49 -07001077 pmd_populate_tests(mm, pmdp, saved_ptep);
Aneesh Kumar K.V6f302e272020-10-15 20:04:53 -07001078 spin_unlock(ptl);
1079
1080 ptl = pud_lock(mm, pudp);
1081 pud_clear_tests(mm, pudp);
1082 pud_advanced_tests(mm, vma, pudp, pud_aligned, vaddr, prot);
1083 pud_huge_tests(pudp, pud_aligned, prot);
Aneesh Kumar K.Ve8edf0a2020-10-15 20:04:49 -07001084 pud_populate_tests(mm, pudp, saved_pmdp);
Aneesh Kumar K.V6f302e272020-10-15 20:04:53 -07001085 spin_unlock(ptl);
1086
Aneesh Kumar K.V6f302e272020-10-15 20:04:53 -07001087 spin_lock(&mm->page_table_lock);
1088 p4d_clear_tests(mm, p4dp);
1089 pgd_clear_tests(mm, pgdp);
Aneesh Kumar K.Ve8edf0a2020-10-15 20:04:49 -07001090 p4d_populate_tests(mm, p4dp, saved_pudp);
1091 pgd_populate_tests(mm, pgdp, saved_p4dp);
Aneesh Kumar K.V6f302e272020-10-15 20:04:53 -07001092 spin_unlock(&mm->page_table_lock);
Aneesh Kumar K.Ve8edf0a2020-10-15 20:04:49 -07001093
Anshuman Khandual399145f2020-06-04 16:47:15 -07001094 p4d_free(mm, saved_p4dp);
1095 pud_free(mm, saved_pudp);
1096 pmd_free(mm, saved_pmdp);
1097 pte_free(mm, saved_ptep);
1098
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -07001099 vm_area_free(vma);
Anshuman Khandual399145f2020-06-04 16:47:15 -07001100 mm_dec_nr_puds(mm);
1101 mm_dec_nr_pmds(mm);
1102 mm_dec_nr_ptes(mm);
1103 mmdrop(mm);
1104 return 0;
1105}
1106late_initcall(debug_vm_pgtable);