blob: 12ebc97e8b4354c72edf35f96239e3b53919dc76 [file] [log] [blame]
Anshuman Khandual399145f2020-06-04 16:47:15 -07001// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * This kernel test validates architecture page table helpers and
4 * accessors and helps in verifying their continued compliance with
5 * expected generic MM semantics.
6 *
7 * Copyright (C) 2019 ARM Ltd.
8 *
9 * Author: Anshuman Khandual <anshuman.khandual@arm.com>
10 */
Anshuman Khandual6315df42020-08-06 23:19:25 -070011#define pr_fmt(fmt) "debug_vm_pgtable: [%-25s]: " fmt, __func__
Anshuman Khandual399145f2020-06-04 16:47:15 -070012
13#include <linux/gfp.h>
14#include <linux/highmem.h>
15#include <linux/hugetlb.h>
16#include <linux/kernel.h>
17#include <linux/kconfig.h>
18#include <linux/mm.h>
19#include <linux/mman.h>
20#include <linux/mm_types.h>
21#include <linux/module.h>
22#include <linux/pfn_t.h>
23#include <linux/printk.h>
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -070024#include <linux/pgtable.h>
Anshuman Khandual399145f2020-06-04 16:47:15 -070025#include <linux/random.h>
26#include <linux/spinlock.h>
27#include <linux/swap.h>
28#include <linux/swapops.h>
29#include <linux/start_kernel.h>
30#include <linux/sched/mm.h>
Aneesh Kumar K.V85a14462020-10-15 20:04:36 -070031#include <linux/io.h>
Anshuman Khandual399145f2020-06-04 16:47:15 -070032#include <asm/pgalloc.h>
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -070033#include <asm/tlbflush.h>
Anshuman Khandual399145f2020-06-04 16:47:15 -070034
Anshuman Khandualb1d00002020-08-06 23:19:28 -070035/*
36 * Please refer Documentation/vm/arch_pgtable_helpers.rst for the semantics
37 * expectations that are being validated here. All future changes in here
38 * or the documentation need to be in sync.
39 */
40
Anshuman Khandual399145f2020-06-04 16:47:15 -070041#define VMFLAGS (VM_READ|VM_WRITE|VM_EXEC)
42
43/*
44 * On s390 platform, the lower 4 bits are used to identify given page table
45 * entry type. But these bits might affect the ability to clear entries with
46 * pxx_clear() because of how dynamic page table folding works on s390. So
47 * while loading up the entries do not change the lower 4 bits. It does not
Aneesh Kumar K.Vcfc5bbc2020-10-15 20:04:33 -070048 * have affect any other platform. Also avoid the 62nd bit on ppc64 that is
49 * used to mark a pte entry.
Anshuman Khandual399145f2020-06-04 16:47:15 -070050 */
Aneesh Kumar K.Vcfc5bbc2020-10-15 20:04:33 -070051#define S390_SKIP_MASK GENMASK(3, 0)
52#if __BITS_PER_LONG == 64
53#define PPC64_SKIP_MASK GENMASK(62, 62)
54#else
55#define PPC64_SKIP_MASK 0x0
56#endif
57#define ARCH_SKIP_MASK (S390_SKIP_MASK | PPC64_SKIP_MASK)
58#define RANDOM_ORVALUE (GENMASK(BITS_PER_LONG - 1, 0) & ~ARCH_SKIP_MASK)
Anshuman Khandual399145f2020-06-04 16:47:15 -070059#define RANDOM_NZVALUE GENMASK(7, 0)
60
Anshuman Khandual7abf6e52021-02-24 12:01:36 -080061static void __init pte_basic_tests(unsigned long pfn, int idx)
Anshuman Khandual399145f2020-06-04 16:47:15 -070062{
Anshuman Khandual7abf6e52021-02-24 12:01:36 -080063 pgprot_t prot = protection_map[idx];
Anshuman Khandual399145f2020-06-04 16:47:15 -070064 pte_t pte = pfn_pte(pfn, prot);
Anshuman Khandual7abf6e52021-02-24 12:01:36 -080065 unsigned long val = idx, *ptr = &val;
Anshuman Khandual399145f2020-06-04 16:47:15 -070066
Anshuman Khandual7abf6e52021-02-24 12:01:36 -080067 pr_debug("Validating PTE basic (%pGv)\n", ptr);
Anshuman Khandual27634d62021-02-24 12:01:32 -080068
69 /*
70 * This test needs to be executed after the given page table entry
71 * is created with pfn_pte() to make sure that protection_map[idx]
72 * does not have the dirty bit enabled from the beginning. This is
73 * important for platforms like arm64 where (!PTE_RDONLY) indicate
74 * dirty bit being set.
75 */
76 WARN_ON(pte_dirty(pte_wrprotect(pte)));
77
Anshuman Khandual399145f2020-06-04 16:47:15 -070078 WARN_ON(!pte_same(pte, pte));
79 WARN_ON(!pte_young(pte_mkyoung(pte_mkold(pte))));
80 WARN_ON(!pte_dirty(pte_mkdirty(pte_mkclean(pte))));
81 WARN_ON(!pte_write(pte_mkwrite(pte_wrprotect(pte))));
82 WARN_ON(pte_young(pte_mkold(pte_mkyoung(pte))));
83 WARN_ON(pte_dirty(pte_mkclean(pte_mkdirty(pte))));
84 WARN_ON(pte_write(pte_wrprotect(pte_mkwrite(pte))));
Anshuman Khandual27634d62021-02-24 12:01:32 -080085 WARN_ON(pte_dirty(pte_wrprotect(pte_mkclean(pte))));
86 WARN_ON(!pte_dirty(pte_wrprotect(pte_mkdirty(pte))));
Anshuman Khandual399145f2020-06-04 16:47:15 -070087}
88
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -070089static void __init pte_advanced_tests(struct mm_struct *mm,
90 struct vm_area_struct *vma, pte_t *ptep,
91 unsigned long pfn, unsigned long vaddr,
92 pgprot_t prot)
93{
94 pte_t pte = pfn_pte(pfn, prot);
95
Aneesh Kumar K.Vc3824e12020-10-15 20:04:46 -070096 /*
97 * Architectures optimize set_pte_at by avoiding TLB flush.
98 * This requires set_pte_at to be not used to update an
99 * existing pte entry. Clear pte before we do set_pte_at
100 */
101
Anshuman Khandual6315df42020-08-06 23:19:25 -0700102 pr_debug("Validating PTE advanced\n");
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700103 pte = pfn_pte(pfn, prot);
104 set_pte_at(mm, vaddr, ptep, pte);
105 ptep_set_wrprotect(mm, vaddr, ptep);
106 pte = ptep_get(ptep);
107 WARN_ON(pte_write(pte));
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700108 ptep_get_and_clear(mm, vaddr, ptep);
109 pte = ptep_get(ptep);
110 WARN_ON(!pte_none(pte));
111
112 pte = pfn_pte(pfn, prot);
113 pte = pte_wrprotect(pte);
114 pte = pte_mkclean(pte);
115 set_pte_at(mm, vaddr, ptep, pte);
116 pte = pte_mkwrite(pte);
117 pte = pte_mkdirty(pte);
118 ptep_set_access_flags(vma, vaddr, ptep, pte, 1);
119 pte = ptep_get(ptep);
120 WARN_ON(!(pte_write(pte) && pte_dirty(pte)));
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700121 ptep_get_and_clear_full(mm, vaddr, ptep, 1);
122 pte = ptep_get(ptep);
123 WARN_ON(!pte_none(pte));
124
Aneesh Kumar K.Vc3824e12020-10-15 20:04:46 -0700125 pte = pfn_pte(pfn, prot);
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700126 pte = pte_mkyoung(pte);
127 set_pte_at(mm, vaddr, ptep, pte);
128 ptep_test_and_clear_young(vma, vaddr, ptep);
129 pte = ptep_get(ptep);
130 WARN_ON(pte_young(pte));
131}
132
133static void __init pte_savedwrite_tests(unsigned long pfn, pgprot_t prot)
134{
135 pte_t pte = pfn_pte(pfn, prot);
136
Aneesh Kumar K.V42006052020-10-15 20:04:40 -0700137 if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
138 return;
139
Anshuman Khandual6315df42020-08-06 23:19:25 -0700140 pr_debug("Validating PTE saved write\n");
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700141 WARN_ON(!pte_savedwrite(pte_mk_savedwrite(pte_clear_savedwrite(pte))));
142 WARN_ON(pte_savedwrite(pte_clear_savedwrite(pte_mk_savedwrite(pte))));
143}
Aneesh Kumar K.V42006052020-10-15 20:04:40 -0700144
Anshuman Khandual399145f2020-06-04 16:47:15 -0700145#ifdef CONFIG_TRANSPARENT_HUGEPAGE
Anshuman Khandual7abf6e52021-02-24 12:01:36 -0800146static void __init pmd_basic_tests(unsigned long pfn, int idx)
Anshuman Khandual399145f2020-06-04 16:47:15 -0700147{
Anshuman Khandual7abf6e52021-02-24 12:01:36 -0800148 pgprot_t prot = protection_map[idx];
Anshuman Khandual7abf6e52021-02-24 12:01:36 -0800149 unsigned long val = idx, *ptr = &val;
Anshuman Khandual29ae2c92021-06-28 19:35:10 -0700150 pmd_t pmd;
Anshuman Khandual399145f2020-06-04 16:47:15 -0700151
Aneesh Kumar K.V787d5632020-06-10 18:41:44 -0700152 if (!has_transparent_hugepage())
153 return;
154
Anshuman Khandual7abf6e52021-02-24 12:01:36 -0800155 pr_debug("Validating PMD basic (%pGv)\n", ptr);
Anshuman Khandual29ae2c92021-06-28 19:35:10 -0700156 pmd = pfn_pmd(pfn, prot);
Anshuman Khandual27634d62021-02-24 12:01:32 -0800157
158 /*
159 * This test needs to be executed after the given page table entry
160 * is created with pfn_pmd() to make sure that protection_map[idx]
161 * does not have the dirty bit enabled from the beginning. This is
162 * important for platforms like arm64 where (!PTE_RDONLY) indicate
163 * dirty bit being set.
164 */
165 WARN_ON(pmd_dirty(pmd_wrprotect(pmd)));
166
167
Anshuman Khandual399145f2020-06-04 16:47:15 -0700168 WARN_ON(!pmd_same(pmd, pmd));
169 WARN_ON(!pmd_young(pmd_mkyoung(pmd_mkold(pmd))));
170 WARN_ON(!pmd_dirty(pmd_mkdirty(pmd_mkclean(pmd))));
171 WARN_ON(!pmd_write(pmd_mkwrite(pmd_wrprotect(pmd))));
172 WARN_ON(pmd_young(pmd_mkold(pmd_mkyoung(pmd))));
173 WARN_ON(pmd_dirty(pmd_mkclean(pmd_mkdirty(pmd))));
174 WARN_ON(pmd_write(pmd_wrprotect(pmd_mkwrite(pmd))));
Anshuman Khandual27634d62021-02-24 12:01:32 -0800175 WARN_ON(pmd_dirty(pmd_wrprotect(pmd_mkclean(pmd))));
176 WARN_ON(!pmd_dirty(pmd_wrprotect(pmd_mkdirty(pmd))));
Anshuman Khandual399145f2020-06-04 16:47:15 -0700177 /*
178 * A huge page does not point to next level page table
179 * entry. Hence this must qualify as pmd_bad().
180 */
181 WARN_ON(!pmd_bad(pmd_mkhuge(pmd)));
182}
183
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700184static void __init pmd_advanced_tests(struct mm_struct *mm,
185 struct vm_area_struct *vma, pmd_t *pmdp,
186 unsigned long pfn, unsigned long vaddr,
Aneesh Kumar K.V87f34982020-10-15 20:04:56 -0700187 pgprot_t prot, pgtable_t pgtable)
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700188{
Anshuman Khandual29ae2c92021-06-28 19:35:10 -0700189 pmd_t pmd;
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700190
191 if (!has_transparent_hugepage())
192 return;
193
Anshuman Khandual6315df42020-08-06 23:19:25 -0700194 pr_debug("Validating PMD advanced\n");
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700195 /* Align the address wrt HPAGE_PMD_SIZE */
Gerald Schaefer5f2e1e82021-06-04 20:01:18 -0700196 vaddr &= HPAGE_PMD_MASK;
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700197
Aneesh Kumar K.V87f34982020-10-15 20:04:56 -0700198 pgtable_trans_huge_deposit(mm, pmdp, pgtable);
199
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700200 pmd = pfn_pmd(pfn, prot);
201 set_pmd_at(mm, vaddr, pmdp, pmd);
202 pmdp_set_wrprotect(mm, vaddr, pmdp);
203 pmd = READ_ONCE(*pmdp);
204 WARN_ON(pmd_write(pmd));
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700205 pmdp_huge_get_and_clear(mm, vaddr, pmdp);
206 pmd = READ_ONCE(*pmdp);
207 WARN_ON(!pmd_none(pmd));
208
209 pmd = pfn_pmd(pfn, prot);
210 pmd = pmd_wrprotect(pmd);
211 pmd = pmd_mkclean(pmd);
212 set_pmd_at(mm, vaddr, pmdp, pmd);
213 pmd = pmd_mkwrite(pmd);
214 pmd = pmd_mkdirty(pmd);
215 pmdp_set_access_flags(vma, vaddr, pmdp, pmd, 1);
216 pmd = READ_ONCE(*pmdp);
217 WARN_ON(!(pmd_write(pmd) && pmd_dirty(pmd)));
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700218 pmdp_huge_get_and_clear_full(vma, vaddr, pmdp, 1);
219 pmd = READ_ONCE(*pmdp);
220 WARN_ON(!pmd_none(pmd));
221
Aneesh Kumar K.Vc3824e12020-10-15 20:04:46 -0700222 pmd = pmd_mkhuge(pfn_pmd(pfn, prot));
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700223 pmd = pmd_mkyoung(pmd);
224 set_pmd_at(mm, vaddr, pmdp, pmd);
225 pmdp_test_and_clear_young(vma, vaddr, pmdp);
226 pmd = READ_ONCE(*pmdp);
227 WARN_ON(pmd_young(pmd));
Aneesh Kumar K.V87f34982020-10-15 20:04:56 -0700228
Aneesh Kumar K.V13af0502020-10-15 20:04:59 -0700229 /* Clear the pte entries */
230 pmdp_huge_get_and_clear(mm, vaddr, pmdp);
Aneesh Kumar K.V87f34982020-10-15 20:04:56 -0700231 pgtable = pgtable_trans_huge_withdraw(mm, pmdp);
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700232}
233
234static void __init pmd_leaf_tests(unsigned long pfn, pgprot_t prot)
235{
Anshuman Khandual29ae2c92021-06-28 19:35:10 -0700236 pmd_t pmd;
237
238 if (!has_transparent_hugepage())
239 return;
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700240
Anshuman Khandual6315df42020-08-06 23:19:25 -0700241 pr_debug("Validating PMD leaf\n");
Anshuman Khandual29ae2c92021-06-28 19:35:10 -0700242 pmd = pfn_pmd(pfn, prot);
243
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700244 /*
245 * PMD based THP is a leaf entry.
246 */
247 pmd = pmd_mkhuge(pmd);
248 WARN_ON(!pmd_leaf(pmd));
249}
250
Aneesh Kumar K.V85a14462020-10-15 20:04:36 -0700251#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700252static void __init pmd_huge_tests(pmd_t *pmdp, unsigned long pfn, pgprot_t prot)
253{
254 pmd_t pmd;
255
Aneesh Kumar K.V85a14462020-10-15 20:04:36 -0700256 if (!arch_ioremap_pmd_supported())
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700257 return;
Anshuman Khandual6315df42020-08-06 23:19:25 -0700258
259 pr_debug("Validating PMD huge\n");
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700260 /*
261 * X86 defined pmd_set_huge() verifies that the given
262 * PMD is not a populated non-leaf entry.
263 */
264 WRITE_ONCE(*pmdp, __pmd(0));
265 WARN_ON(!pmd_set_huge(pmdp, __pfn_to_phys(pfn), prot));
266 WARN_ON(!pmd_clear_huge(pmdp));
267 pmd = READ_ONCE(*pmdp);
268 WARN_ON(!pmd_none(pmd));
269}
Aneesh Kumar K.V85a14462020-10-15 20:04:36 -0700270#else /* CONFIG_HAVE_ARCH_HUGE_VMAP */
271static void __init pmd_huge_tests(pmd_t *pmdp, unsigned long pfn, pgprot_t prot) { }
272#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700273
274static void __init pmd_savedwrite_tests(unsigned long pfn, pgprot_t prot)
275{
Anshuman Khandual29ae2c92021-06-28 19:35:10 -0700276 pmd_t pmd;
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700277
Aneesh Kumar K.V42006052020-10-15 20:04:40 -0700278 if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
279 return;
280
Anshuman Khandual29ae2c92021-06-28 19:35:10 -0700281 if (!has_transparent_hugepage())
282 return;
283
Anshuman Khandual6315df42020-08-06 23:19:25 -0700284 pr_debug("Validating PMD saved write\n");
Anshuman Khandual29ae2c92021-06-28 19:35:10 -0700285 pmd = pfn_pmd(pfn, prot);
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700286 WARN_ON(!pmd_savedwrite(pmd_mk_savedwrite(pmd_clear_savedwrite(pmd))));
287 WARN_ON(pmd_savedwrite(pmd_clear_savedwrite(pmd_mk_savedwrite(pmd))));
288}
289
Anshuman Khandual399145f2020-06-04 16:47:15 -0700290#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
Anshuman Khandual7abf6e52021-02-24 12:01:36 -0800291static void __init pud_basic_tests(struct mm_struct *mm, unsigned long pfn, int idx)
Anshuman Khandual399145f2020-06-04 16:47:15 -0700292{
Anshuman Khandual7abf6e52021-02-24 12:01:36 -0800293 pgprot_t prot = protection_map[idx];
Anshuman Khandual7abf6e52021-02-24 12:01:36 -0800294 unsigned long val = idx, *ptr = &val;
Anshuman Khandual29ae2c92021-06-28 19:35:10 -0700295 pud_t pud;
Anshuman Khandual399145f2020-06-04 16:47:15 -0700296
Aneesh Kumar K.V787d5632020-06-10 18:41:44 -0700297 if (!has_transparent_hugepage())
298 return;
299
Anshuman Khandual7abf6e52021-02-24 12:01:36 -0800300 pr_debug("Validating PUD basic (%pGv)\n", ptr);
Anshuman Khandual29ae2c92021-06-28 19:35:10 -0700301 pud = pfn_pud(pfn, prot);
Anshuman Khandual27634d62021-02-24 12:01:32 -0800302
303 /*
304 * This test needs to be executed after the given page table entry
305 * is created with pfn_pud() to make sure that protection_map[idx]
306 * does not have the dirty bit enabled from the beginning. This is
307 * important for platforms like arm64 where (!PTE_RDONLY) indicate
308 * dirty bit being set.
309 */
310 WARN_ON(pud_dirty(pud_wrprotect(pud)));
311
Anshuman Khandual399145f2020-06-04 16:47:15 -0700312 WARN_ON(!pud_same(pud, pud));
313 WARN_ON(!pud_young(pud_mkyoung(pud_mkold(pud))));
Anshuman Khandual27634d62021-02-24 12:01:32 -0800314 WARN_ON(!pud_dirty(pud_mkdirty(pud_mkclean(pud))));
315 WARN_ON(pud_dirty(pud_mkclean(pud_mkdirty(pud))));
Anshuman Khandual399145f2020-06-04 16:47:15 -0700316 WARN_ON(!pud_write(pud_mkwrite(pud_wrprotect(pud))));
317 WARN_ON(pud_write(pud_wrprotect(pud_mkwrite(pud))));
318 WARN_ON(pud_young(pud_mkold(pud_mkyoung(pud))));
Anshuman Khandual27634d62021-02-24 12:01:32 -0800319 WARN_ON(pud_dirty(pud_wrprotect(pud_mkclean(pud))));
320 WARN_ON(!pud_dirty(pud_wrprotect(pud_mkdirty(pud))));
Anshuman Khandual399145f2020-06-04 16:47:15 -0700321
322 if (mm_pmd_folded(mm))
323 return;
324
325 /*
326 * A huge page does not point to next level page table
327 * entry. Hence this must qualify as pud_bad().
328 */
329 WARN_ON(!pud_bad(pud_mkhuge(pud)));
330}
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700331
332static void __init pud_advanced_tests(struct mm_struct *mm,
333 struct vm_area_struct *vma, pud_t *pudp,
334 unsigned long pfn, unsigned long vaddr,
335 pgprot_t prot)
336{
Anshuman Khandual29ae2c92021-06-28 19:35:10 -0700337 pud_t pud;
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700338
339 if (!has_transparent_hugepage())
340 return;
341
Anshuman Khandual6315df42020-08-06 23:19:25 -0700342 pr_debug("Validating PUD advanced\n");
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700343 /* Align the address wrt HPAGE_PUD_SIZE */
Gerald Schaefer5f2e1e82021-06-04 20:01:18 -0700344 vaddr &= HPAGE_PUD_MASK;
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700345
Anshuman Khandual29ae2c92021-06-28 19:35:10 -0700346 pud = pfn_pud(pfn, prot);
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700347 set_pud_at(mm, vaddr, pudp, pud);
348 pudp_set_wrprotect(mm, vaddr, pudp);
349 pud = READ_ONCE(*pudp);
350 WARN_ON(pud_write(pud));
351
352#ifndef __PAGETABLE_PMD_FOLDED
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700353 pudp_huge_get_and_clear(mm, vaddr, pudp);
354 pud = READ_ONCE(*pudp);
355 WARN_ON(!pud_none(pud));
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700356#endif /* __PAGETABLE_PMD_FOLDED */
357 pud = pfn_pud(pfn, prot);
358 pud = pud_wrprotect(pud);
359 pud = pud_mkclean(pud);
360 set_pud_at(mm, vaddr, pudp, pud);
361 pud = pud_mkwrite(pud);
362 pud = pud_mkdirty(pud);
363 pudp_set_access_flags(vma, vaddr, pudp, pud, 1);
364 pud = READ_ONCE(*pudp);
365 WARN_ON(!(pud_write(pud) && pud_dirty(pud)));
366
Aneesh Kumar K.Vc3824e12020-10-15 20:04:46 -0700367#ifndef __PAGETABLE_PMD_FOLDED
368 pudp_huge_get_and_clear_full(mm, vaddr, pudp, 1);
369 pud = READ_ONCE(*pudp);
370 WARN_ON(!pud_none(pud));
371#endif /* __PAGETABLE_PMD_FOLDED */
372
373 pud = pfn_pud(pfn, prot);
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700374 pud = pud_mkyoung(pud);
375 set_pud_at(mm, vaddr, pudp, pud);
376 pudp_test_and_clear_young(vma, vaddr, pudp);
377 pud = READ_ONCE(*pudp);
378 WARN_ON(pud_young(pud));
Aneesh Kumar K.V13af0502020-10-15 20:04:59 -0700379
380 pudp_huge_get_and_clear(mm, vaddr, pudp);
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700381}
382
383static void __init pud_leaf_tests(unsigned long pfn, pgprot_t prot)
384{
Anshuman Khandual29ae2c92021-06-28 19:35:10 -0700385 pud_t pud;
386
387 if (!has_transparent_hugepage())
388 return;
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700389
Anshuman Khandual6315df42020-08-06 23:19:25 -0700390 pr_debug("Validating PUD leaf\n");
Anshuman Khandual29ae2c92021-06-28 19:35:10 -0700391 pud = pfn_pud(pfn, prot);
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700392 /*
393 * PUD based THP is a leaf entry.
394 */
395 pud = pud_mkhuge(pud);
396 WARN_ON(!pud_leaf(pud));
397}
398
Aneesh Kumar K.V85a14462020-10-15 20:04:36 -0700399#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700400static void __init pud_huge_tests(pud_t *pudp, unsigned long pfn, pgprot_t prot)
401{
402 pud_t pud;
403
Aneesh Kumar K.V85a14462020-10-15 20:04:36 -0700404 if (!arch_ioremap_pud_supported())
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700405 return;
Anshuman Khandual6315df42020-08-06 23:19:25 -0700406
407 pr_debug("Validating PUD huge\n");
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700408 /*
409 * X86 defined pud_set_huge() verifies that the given
410 * PUD is not a populated non-leaf entry.
411 */
412 WRITE_ONCE(*pudp, __pud(0));
413 WARN_ON(!pud_set_huge(pudp, __pfn_to_phys(pfn), prot));
414 WARN_ON(!pud_clear_huge(pudp));
415 pud = READ_ONCE(*pudp);
416 WARN_ON(!pud_none(pud));
417}
Aneesh Kumar K.V85a14462020-10-15 20:04:36 -0700418#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
419static void __init pud_huge_tests(pud_t *pudp, unsigned long pfn, pgprot_t prot) { }
420#endif /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
421
Anshuman Khandual399145f2020-06-04 16:47:15 -0700422#else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
Anshuman Khandual7abf6e52021-02-24 12:01:36 -0800423static void __init pud_basic_tests(struct mm_struct *mm, unsigned long pfn, int idx) { }
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700424static void __init pud_advanced_tests(struct mm_struct *mm,
425 struct vm_area_struct *vma, pud_t *pudp,
426 unsigned long pfn, unsigned long vaddr,
427 pgprot_t prot)
428{
429}
430static void __init pud_leaf_tests(unsigned long pfn, pgprot_t prot) { }
431static void __init pud_huge_tests(pud_t *pudp, unsigned long pfn, pgprot_t prot)
432{
433}
Anshuman Khandual399145f2020-06-04 16:47:15 -0700434#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
435#else /* !CONFIG_TRANSPARENT_HUGEPAGE */
Anshuman Khandual7abf6e52021-02-24 12:01:36 -0800436static void __init pmd_basic_tests(unsigned long pfn, int idx) { }
437static void __init pud_basic_tests(struct mm_struct *mm, unsigned long pfn, int idx) { }
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700438static void __init pmd_advanced_tests(struct mm_struct *mm,
439 struct vm_area_struct *vma, pmd_t *pmdp,
440 unsigned long pfn, unsigned long vaddr,
Aneesh Kumar K.V87f34982020-10-15 20:04:56 -0700441 pgprot_t prot, pgtable_t pgtable)
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700442{
443}
444static void __init pud_advanced_tests(struct mm_struct *mm,
445 struct vm_area_struct *vma, pud_t *pudp,
446 unsigned long pfn, unsigned long vaddr,
447 pgprot_t prot)
448{
449}
450static void __init pmd_leaf_tests(unsigned long pfn, pgprot_t prot) { }
451static void __init pud_leaf_tests(unsigned long pfn, pgprot_t prot) { }
452static void __init pmd_huge_tests(pmd_t *pmdp, unsigned long pfn, pgprot_t prot)
453{
454}
455static void __init pud_huge_tests(pud_t *pudp, unsigned long pfn, pgprot_t prot)
456{
457}
458static void __init pmd_savedwrite_tests(unsigned long pfn, pgprot_t prot) { }
Anshuman Khandual399145f2020-06-04 16:47:15 -0700459#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
460
461static void __init p4d_basic_tests(unsigned long pfn, pgprot_t prot)
462{
463 p4d_t p4d;
464
Anshuman Khandual6315df42020-08-06 23:19:25 -0700465 pr_debug("Validating P4D basic\n");
Anshuman Khandual399145f2020-06-04 16:47:15 -0700466 memset(&p4d, RANDOM_NZVALUE, sizeof(p4d_t));
467 WARN_ON(!p4d_same(p4d, p4d));
468}
469
470static void __init pgd_basic_tests(unsigned long pfn, pgprot_t prot)
471{
472 pgd_t pgd;
473
Anshuman Khandual6315df42020-08-06 23:19:25 -0700474 pr_debug("Validating PGD basic\n");
Anshuman Khandual399145f2020-06-04 16:47:15 -0700475 memset(&pgd, RANDOM_NZVALUE, sizeof(pgd_t));
476 WARN_ON(!pgd_same(pgd, pgd));
477}
478
479#ifndef __PAGETABLE_PUD_FOLDED
480static void __init pud_clear_tests(struct mm_struct *mm, pud_t *pudp)
481{
482 pud_t pud = READ_ONCE(*pudp);
483
484 if (mm_pmd_folded(mm))
485 return;
486
Anshuman Khandual6315df42020-08-06 23:19:25 -0700487 pr_debug("Validating PUD clear\n");
Anshuman Khandual399145f2020-06-04 16:47:15 -0700488 pud = __pud(pud_val(pud) | RANDOM_ORVALUE);
489 WRITE_ONCE(*pudp, pud);
490 pud_clear(pudp);
491 pud = READ_ONCE(*pudp);
492 WARN_ON(!pud_none(pud));
493}
494
495static void __init pud_populate_tests(struct mm_struct *mm, pud_t *pudp,
496 pmd_t *pmdp)
497{
498 pud_t pud;
499
500 if (mm_pmd_folded(mm))
501 return;
Anshuman Khandual6315df42020-08-06 23:19:25 -0700502
503 pr_debug("Validating PUD populate\n");
Anshuman Khandual399145f2020-06-04 16:47:15 -0700504 /*
505 * This entry points to next level page table page.
506 * Hence this must not qualify as pud_bad().
507 */
Anshuman Khandual399145f2020-06-04 16:47:15 -0700508 pud_populate(mm, pudp, pmdp);
509 pud = READ_ONCE(*pudp);
510 WARN_ON(pud_bad(pud));
511}
512#else /* !__PAGETABLE_PUD_FOLDED */
513static void __init pud_clear_tests(struct mm_struct *mm, pud_t *pudp) { }
514static void __init pud_populate_tests(struct mm_struct *mm, pud_t *pudp,
515 pmd_t *pmdp)
516{
517}
518#endif /* PAGETABLE_PUD_FOLDED */
519
520#ifndef __PAGETABLE_P4D_FOLDED
521static void __init p4d_clear_tests(struct mm_struct *mm, p4d_t *p4dp)
522{
523 p4d_t p4d = READ_ONCE(*p4dp);
524
525 if (mm_pud_folded(mm))
526 return;
527
Anshuman Khandual6315df42020-08-06 23:19:25 -0700528 pr_debug("Validating P4D clear\n");
Anshuman Khandual399145f2020-06-04 16:47:15 -0700529 p4d = __p4d(p4d_val(p4d) | RANDOM_ORVALUE);
530 WRITE_ONCE(*p4dp, p4d);
531 p4d_clear(p4dp);
532 p4d = READ_ONCE(*p4dp);
533 WARN_ON(!p4d_none(p4d));
534}
535
536static void __init p4d_populate_tests(struct mm_struct *mm, p4d_t *p4dp,
537 pud_t *pudp)
538{
539 p4d_t p4d;
540
541 if (mm_pud_folded(mm))
542 return;
543
Anshuman Khandual6315df42020-08-06 23:19:25 -0700544 pr_debug("Validating P4D populate\n");
Anshuman Khandual399145f2020-06-04 16:47:15 -0700545 /*
546 * This entry points to next level page table page.
547 * Hence this must not qualify as p4d_bad().
548 */
549 pud_clear(pudp);
550 p4d_clear(p4dp);
551 p4d_populate(mm, p4dp, pudp);
552 p4d = READ_ONCE(*p4dp);
553 WARN_ON(p4d_bad(p4d));
554}
555
556static void __init pgd_clear_tests(struct mm_struct *mm, pgd_t *pgdp)
557{
558 pgd_t pgd = READ_ONCE(*pgdp);
559
560 if (mm_p4d_folded(mm))
561 return;
562
Anshuman Khandual6315df42020-08-06 23:19:25 -0700563 pr_debug("Validating PGD clear\n");
Anshuman Khandual399145f2020-06-04 16:47:15 -0700564 pgd = __pgd(pgd_val(pgd) | RANDOM_ORVALUE);
565 WRITE_ONCE(*pgdp, pgd);
566 pgd_clear(pgdp);
567 pgd = READ_ONCE(*pgdp);
568 WARN_ON(!pgd_none(pgd));
569}
570
571static void __init pgd_populate_tests(struct mm_struct *mm, pgd_t *pgdp,
572 p4d_t *p4dp)
573{
574 pgd_t pgd;
575
576 if (mm_p4d_folded(mm))
577 return;
578
Anshuman Khandual6315df42020-08-06 23:19:25 -0700579 pr_debug("Validating PGD populate\n");
Anshuman Khandual399145f2020-06-04 16:47:15 -0700580 /*
581 * This entry points to next level page table page.
582 * Hence this must not qualify as pgd_bad().
583 */
584 p4d_clear(p4dp);
585 pgd_clear(pgdp);
586 pgd_populate(mm, pgdp, p4dp);
587 pgd = READ_ONCE(*pgdp);
588 WARN_ON(pgd_bad(pgd));
589}
590#else /* !__PAGETABLE_P4D_FOLDED */
591static void __init p4d_clear_tests(struct mm_struct *mm, p4d_t *p4dp) { }
592static void __init pgd_clear_tests(struct mm_struct *mm, pgd_t *pgdp) { }
593static void __init p4d_populate_tests(struct mm_struct *mm, p4d_t *p4dp,
594 pud_t *pudp)
595{
596}
597static void __init pgd_populate_tests(struct mm_struct *mm, pgd_t *pgdp,
598 p4d_t *p4dp)
599{
600}
601#endif /* PAGETABLE_P4D_FOLDED */
602
603static void __init pte_clear_tests(struct mm_struct *mm, pte_t *ptep,
Aneesh Kumar K.V401035d2020-10-15 20:05:06 -0700604 unsigned long pfn, unsigned long vaddr,
605 pgprot_t prot)
Anshuman Khandual399145f2020-06-04 16:47:15 -0700606{
Aneesh Kumar K.V401035d2020-10-15 20:05:06 -0700607 pte_t pte = pfn_pte(pfn, prot);
Anshuman Khandual399145f2020-06-04 16:47:15 -0700608
Anshuman Khandual6315df42020-08-06 23:19:25 -0700609 pr_debug("Validating PTE clear\n");
Aneesh Kumar K.V401035d2020-10-15 20:05:06 -0700610#ifndef CONFIG_RISCV
Anshuman Khandual399145f2020-06-04 16:47:15 -0700611 pte = __pte(pte_val(pte) | RANDOM_ORVALUE);
Aneesh Kumar K.V401035d2020-10-15 20:05:06 -0700612#endif
Anshuman Khandual399145f2020-06-04 16:47:15 -0700613 set_pte_at(mm, vaddr, ptep, pte);
614 barrier();
615 pte_clear(mm, vaddr, ptep);
Christophe Leroy9449c9c2020-06-25 20:30:04 -0700616 pte = ptep_get(ptep);
Anshuman Khandual399145f2020-06-04 16:47:15 -0700617 WARN_ON(!pte_none(pte));
618}
619
620static void __init pmd_clear_tests(struct mm_struct *mm, pmd_t *pmdp)
621{
622 pmd_t pmd = READ_ONCE(*pmdp);
623
Anshuman Khandual6315df42020-08-06 23:19:25 -0700624 pr_debug("Validating PMD clear\n");
Anshuman Khandual399145f2020-06-04 16:47:15 -0700625 pmd = __pmd(pmd_val(pmd) | RANDOM_ORVALUE);
626 WRITE_ONCE(*pmdp, pmd);
627 pmd_clear(pmdp);
628 pmd = READ_ONCE(*pmdp);
629 WARN_ON(!pmd_none(pmd));
630}
631
632static void __init pmd_populate_tests(struct mm_struct *mm, pmd_t *pmdp,
633 pgtable_t pgtable)
634{
635 pmd_t pmd;
636
Anshuman Khandual6315df42020-08-06 23:19:25 -0700637 pr_debug("Validating PMD populate\n");
Anshuman Khandual399145f2020-06-04 16:47:15 -0700638 /*
639 * This entry points to next level page table page.
640 * Hence this must not qualify as pmd_bad().
641 */
Anshuman Khandual399145f2020-06-04 16:47:15 -0700642 pmd_populate(mm, pmdp, pgtable);
643 pmd = READ_ONCE(*pmdp);
644 WARN_ON(pmd_bad(pmd));
645}
646
Anshuman Khandual05289402020-08-06 23:19:16 -0700647static void __init pte_special_tests(unsigned long pfn, pgprot_t prot)
648{
649 pte_t pte = pfn_pte(pfn, prot);
650
651 if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL))
652 return;
653
Anshuman Khandual6315df42020-08-06 23:19:25 -0700654 pr_debug("Validating PTE special\n");
Anshuman Khandual05289402020-08-06 23:19:16 -0700655 WARN_ON(!pte_special(pte_mkspecial(pte)));
656}
657
658static void __init pte_protnone_tests(unsigned long pfn, pgprot_t prot)
659{
660 pte_t pte = pfn_pte(pfn, prot);
661
662 if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
663 return;
664
Anshuman Khandual6315df42020-08-06 23:19:25 -0700665 pr_debug("Validating PTE protnone\n");
Anshuman Khandual05289402020-08-06 23:19:16 -0700666 WARN_ON(!pte_protnone(pte));
667 WARN_ON(!pte_present(pte));
668}
669
670#ifdef CONFIG_TRANSPARENT_HUGEPAGE
671static void __init pmd_protnone_tests(unsigned long pfn, pgprot_t prot)
672{
Anshuman Khandual29ae2c92021-06-28 19:35:10 -0700673 pmd_t pmd;
Anshuman Khandual05289402020-08-06 23:19:16 -0700674
675 if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
676 return;
677
Anshuman Khandual29ae2c92021-06-28 19:35:10 -0700678 if (!has_transparent_hugepage())
679 return;
680
Anshuman Khandual6315df42020-08-06 23:19:25 -0700681 pr_debug("Validating PMD protnone\n");
Anshuman Khandual29ae2c92021-06-28 19:35:10 -0700682 pmd = pmd_mkhuge(pfn_pmd(pfn, prot));
Anshuman Khandual05289402020-08-06 23:19:16 -0700683 WARN_ON(!pmd_protnone(pmd));
684 WARN_ON(!pmd_present(pmd));
685}
686#else /* !CONFIG_TRANSPARENT_HUGEPAGE */
687static void __init pmd_protnone_tests(unsigned long pfn, pgprot_t prot) { }
688#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
689
690#ifdef CONFIG_ARCH_HAS_PTE_DEVMAP
691static void __init pte_devmap_tests(unsigned long pfn, pgprot_t prot)
692{
693 pte_t pte = pfn_pte(pfn, prot);
694
Anshuman Khandual6315df42020-08-06 23:19:25 -0700695 pr_debug("Validating PTE devmap\n");
Anshuman Khandual05289402020-08-06 23:19:16 -0700696 WARN_ON(!pte_devmap(pte_mkdevmap(pte)));
697}
698
699#ifdef CONFIG_TRANSPARENT_HUGEPAGE
700static void __init pmd_devmap_tests(unsigned long pfn, pgprot_t prot)
701{
Anshuman Khandual29ae2c92021-06-28 19:35:10 -0700702 pmd_t pmd;
703
704 if (!has_transparent_hugepage())
705 return;
Anshuman Khandual05289402020-08-06 23:19:16 -0700706
Anshuman Khandual6315df42020-08-06 23:19:25 -0700707 pr_debug("Validating PMD devmap\n");
Anshuman Khandual29ae2c92021-06-28 19:35:10 -0700708 pmd = pfn_pmd(pfn, prot);
Anshuman Khandual05289402020-08-06 23:19:16 -0700709 WARN_ON(!pmd_devmap(pmd_mkdevmap(pmd)));
710}
711
712#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
713static void __init pud_devmap_tests(unsigned long pfn, pgprot_t prot)
714{
Anshuman Khandual29ae2c92021-06-28 19:35:10 -0700715 pud_t pud;
716
717 if (!has_transparent_hugepage())
718 return;
Anshuman Khandual05289402020-08-06 23:19:16 -0700719
Anshuman Khandual6315df42020-08-06 23:19:25 -0700720 pr_debug("Validating PUD devmap\n");
Anshuman Khandual29ae2c92021-06-28 19:35:10 -0700721 pud = pfn_pud(pfn, prot);
Anshuman Khandual05289402020-08-06 23:19:16 -0700722 WARN_ON(!pud_devmap(pud_mkdevmap(pud)));
723}
724#else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
725static void __init pud_devmap_tests(unsigned long pfn, pgprot_t prot) { }
726#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
727#else /* CONFIG_TRANSPARENT_HUGEPAGE */
728static void __init pmd_devmap_tests(unsigned long pfn, pgprot_t prot) { }
729static void __init pud_devmap_tests(unsigned long pfn, pgprot_t prot) { }
730#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
731#else
732static void __init pte_devmap_tests(unsigned long pfn, pgprot_t prot) { }
733static void __init pmd_devmap_tests(unsigned long pfn, pgprot_t prot) { }
734static void __init pud_devmap_tests(unsigned long pfn, pgprot_t prot) { }
735#endif /* CONFIG_ARCH_HAS_PTE_DEVMAP */
736
737static void __init pte_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
738{
739 pte_t pte = pfn_pte(pfn, prot);
740
741 if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
742 return;
743
Anshuman Khandual6315df42020-08-06 23:19:25 -0700744 pr_debug("Validating PTE soft dirty\n");
Anshuman Khandual05289402020-08-06 23:19:16 -0700745 WARN_ON(!pte_soft_dirty(pte_mksoft_dirty(pte)));
746 WARN_ON(pte_soft_dirty(pte_clear_soft_dirty(pte)));
747}
748
749static void __init pte_swap_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
750{
751 pte_t pte = pfn_pte(pfn, prot);
752
753 if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
754 return;
755
Anshuman Khandual6315df42020-08-06 23:19:25 -0700756 pr_debug("Validating PTE swap soft dirty\n");
Anshuman Khandual05289402020-08-06 23:19:16 -0700757 WARN_ON(!pte_swp_soft_dirty(pte_swp_mksoft_dirty(pte)));
758 WARN_ON(pte_swp_soft_dirty(pte_swp_clear_soft_dirty(pte)));
759}
760
761#ifdef CONFIG_TRANSPARENT_HUGEPAGE
762static void __init pmd_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
763{
Anshuman Khandual29ae2c92021-06-28 19:35:10 -0700764 pmd_t pmd;
Anshuman Khandual05289402020-08-06 23:19:16 -0700765
766 if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
767 return;
768
Anshuman Khandual29ae2c92021-06-28 19:35:10 -0700769 if (!has_transparent_hugepage())
770 return;
771
Anshuman Khandual6315df42020-08-06 23:19:25 -0700772 pr_debug("Validating PMD soft dirty\n");
Anshuman Khandual29ae2c92021-06-28 19:35:10 -0700773 pmd = pfn_pmd(pfn, prot);
Anshuman Khandual05289402020-08-06 23:19:16 -0700774 WARN_ON(!pmd_soft_dirty(pmd_mksoft_dirty(pmd)));
775 WARN_ON(pmd_soft_dirty(pmd_clear_soft_dirty(pmd)));
776}
777
778static void __init pmd_swap_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
779{
Anshuman Khandual29ae2c92021-06-28 19:35:10 -0700780 pmd_t pmd;
Anshuman Khandual05289402020-08-06 23:19:16 -0700781
782 if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) ||
783 !IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION))
784 return;
785
Anshuman Khandual29ae2c92021-06-28 19:35:10 -0700786 if (!has_transparent_hugepage())
787 return;
788
Anshuman Khandual6315df42020-08-06 23:19:25 -0700789 pr_debug("Validating PMD swap soft dirty\n");
Anshuman Khandual29ae2c92021-06-28 19:35:10 -0700790 pmd = pfn_pmd(pfn, prot);
Anshuman Khandual05289402020-08-06 23:19:16 -0700791 WARN_ON(!pmd_swp_soft_dirty(pmd_swp_mksoft_dirty(pmd)));
792 WARN_ON(pmd_swp_soft_dirty(pmd_swp_clear_soft_dirty(pmd)));
793}
794#else /* !CONFIG_ARCH_HAS_PTE_DEVMAP */
795static void __init pmd_soft_dirty_tests(unsigned long pfn, pgprot_t prot) { }
796static void __init pmd_swap_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
797{
798}
799#endif /* CONFIG_ARCH_HAS_PTE_DEVMAP */
800
801static void __init pte_swap_tests(unsigned long pfn, pgprot_t prot)
802{
803 swp_entry_t swp;
804 pte_t pte;
805
Anshuman Khandual6315df42020-08-06 23:19:25 -0700806 pr_debug("Validating PTE swap\n");
Anshuman Khandual05289402020-08-06 23:19:16 -0700807 pte = pfn_pte(pfn, prot);
808 swp = __pte_to_swp_entry(pte);
809 pte = __swp_entry_to_pte(swp);
810 WARN_ON(pfn != pte_pfn(pte));
811}
812
813#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
814static void __init pmd_swap_tests(unsigned long pfn, pgprot_t prot)
815{
816 swp_entry_t swp;
817 pmd_t pmd;
818
Anshuman Khandual29ae2c92021-06-28 19:35:10 -0700819 if (!has_transparent_hugepage())
820 return;
821
Anshuman Khandual6315df42020-08-06 23:19:25 -0700822 pr_debug("Validating PMD swap\n");
Anshuman Khandual05289402020-08-06 23:19:16 -0700823 pmd = pfn_pmd(pfn, prot);
824 swp = __pmd_to_swp_entry(pmd);
825 pmd = __swp_entry_to_pmd(swp);
826 WARN_ON(pfn != pmd_pfn(pmd));
827}
828#else /* !CONFIG_ARCH_ENABLE_THP_MIGRATION */
829static void __init pmd_swap_tests(unsigned long pfn, pgprot_t prot) { }
830#endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
831
832static void __init swap_migration_tests(void)
833{
834 struct page *page;
835 swp_entry_t swp;
836
837 if (!IS_ENABLED(CONFIG_MIGRATION))
838 return;
Anshuman Khandual6315df42020-08-06 23:19:25 -0700839
840 pr_debug("Validating swap migration\n");
Anshuman Khandual05289402020-08-06 23:19:16 -0700841 /*
842 * swap_migration_tests() requires a dedicated page as it needs to
843 * be locked before creating a migration entry from it. Locking the
844 * page that actually maps kernel text ('start_kernel') can be real
845 * problematic. Lets allocate a dedicated page explicitly for this
846 * purpose that will be freed subsequently.
847 */
848 page = alloc_page(GFP_KERNEL);
849 if (!page) {
850 pr_err("page allocation failed\n");
851 return;
852 }
853
854 /*
855 * make_migration_entry() expects given page to be
856 * locked, otherwise it stumbles upon a BUG_ON().
857 */
858 __SetPageLocked(page);
859 swp = make_migration_entry(page, 1);
860 WARN_ON(!is_migration_entry(swp));
861 WARN_ON(!is_write_migration_entry(swp));
862
863 make_migration_entry_read(&swp);
864 WARN_ON(!is_migration_entry(swp));
865 WARN_ON(is_write_migration_entry(swp));
866
867 swp = make_migration_entry(page, 0);
868 WARN_ON(!is_migration_entry(swp));
869 WARN_ON(is_write_migration_entry(swp));
870 __ClearPageLocked(page);
871 __free_page(page);
872}
873
874#ifdef CONFIG_HUGETLB_PAGE
875static void __init hugetlb_basic_tests(unsigned long pfn, pgprot_t prot)
876{
877 struct page *page;
878 pte_t pte;
879
Anshuman Khandual6315df42020-08-06 23:19:25 -0700880 pr_debug("Validating HugeTLB basic\n");
Anshuman Khandual05289402020-08-06 23:19:16 -0700881 /*
882 * Accessing the page associated with the pfn is safe here,
883 * as it was previously derived from a real kernel symbol.
884 */
885 page = pfn_to_page(pfn);
886 pte = mk_huge_pte(page, prot);
887
888 WARN_ON(!huge_pte_dirty(huge_pte_mkdirty(pte)));
889 WARN_ON(!huge_pte_write(huge_pte_mkwrite(huge_pte_wrprotect(pte))));
890 WARN_ON(huge_pte_write(huge_pte_wrprotect(huge_pte_mkwrite(pte))));
891
892#ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
893 pte = pfn_pte(pfn, prot);
894
895 WARN_ON(!pte_huge(pte_mkhuge(pte)));
896#endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
897}
898#else /* !CONFIG_HUGETLB_PAGE */
899static void __init hugetlb_basic_tests(unsigned long pfn, pgprot_t prot) { }
900#endif /* CONFIG_HUGETLB_PAGE */
901
902#ifdef CONFIG_TRANSPARENT_HUGEPAGE
903static void __init pmd_thp_tests(unsigned long pfn, pgprot_t prot)
904{
905 pmd_t pmd;
906
907 if (!has_transparent_hugepage())
908 return;
909
Anshuman Khandual6315df42020-08-06 23:19:25 -0700910 pr_debug("Validating PMD based THP\n");
Anshuman Khandual05289402020-08-06 23:19:16 -0700911 /*
912 * pmd_trans_huge() and pmd_present() must return positive after
913 * MMU invalidation with pmd_mkinvalid(). This behavior is an
914 * optimization for transparent huge page. pmd_trans_huge() must
915 * be true if pmd_page() returns a valid THP to avoid taking the
916 * pmd_lock when others walk over non transhuge pmds (i.e. there
917 * are no THP allocated). Especially when splitting a THP and
918 * removing the present bit from the pmd, pmd_trans_huge() still
919 * needs to return true. pmd_present() should be true whenever
920 * pmd_trans_huge() returns true.
921 */
922 pmd = pfn_pmd(pfn, prot);
923 WARN_ON(!pmd_trans_huge(pmd_mkhuge(pmd)));
924
925#ifndef __HAVE_ARCH_PMDP_INVALIDATE
926 WARN_ON(!pmd_trans_huge(pmd_mkinvalid(pmd_mkhuge(pmd))));
927 WARN_ON(!pmd_present(pmd_mkinvalid(pmd_mkhuge(pmd))));
928#endif /* __HAVE_ARCH_PMDP_INVALIDATE */
929}
930
931#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
932static void __init pud_thp_tests(unsigned long pfn, pgprot_t prot)
933{
934 pud_t pud;
935
936 if (!has_transparent_hugepage())
937 return;
938
Anshuman Khandual6315df42020-08-06 23:19:25 -0700939 pr_debug("Validating PUD based THP\n");
Anshuman Khandual05289402020-08-06 23:19:16 -0700940 pud = pfn_pud(pfn, prot);
941 WARN_ON(!pud_trans_huge(pud_mkhuge(pud)));
942
943 /*
944 * pud_mkinvalid() has been dropped for now. Enable back
945 * these tests when it comes back with a modified pud_present().
946 *
947 * WARN_ON(!pud_trans_huge(pud_mkinvalid(pud_mkhuge(pud))));
948 * WARN_ON(!pud_present(pud_mkinvalid(pud_mkhuge(pud))));
949 */
950}
951#else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
952static void __init pud_thp_tests(unsigned long pfn, pgprot_t prot) { }
953#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
954#else /* !CONFIG_TRANSPARENT_HUGEPAGE */
955static void __init pmd_thp_tests(unsigned long pfn, pgprot_t prot) { }
956static void __init pud_thp_tests(unsigned long pfn, pgprot_t prot) { }
957#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
958
Anshuman Khandual399145f2020-06-04 16:47:15 -0700959static unsigned long __init get_random_vaddr(void)
960{
961 unsigned long random_vaddr, random_pages, total_user_pages;
962
963 total_user_pages = (TASK_SIZE - FIRST_USER_ADDRESS) / PAGE_SIZE;
964
965 random_pages = get_random_long() % total_user_pages;
966 random_vaddr = FIRST_USER_ADDRESS + random_pages * PAGE_SIZE;
967
968 return random_vaddr;
969}
970
971static int __init debug_vm_pgtable(void)
972{
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700973 struct vm_area_struct *vma;
Anshuman Khandual399145f2020-06-04 16:47:15 -0700974 struct mm_struct *mm;
975 pgd_t *pgdp;
976 p4d_t *p4dp, *saved_p4dp;
977 pud_t *pudp, *saved_pudp;
978 pmd_t *pmdp, *saved_pmdp, pmd;
979 pte_t *ptep;
980 pgtable_t saved_ptep;
Anshuman Khandual05289402020-08-06 23:19:16 -0700981 pgprot_t prot, protnone;
Anshuman Khandual399145f2020-06-04 16:47:15 -0700982 phys_addr_t paddr;
983 unsigned long vaddr, pte_aligned, pmd_aligned;
984 unsigned long pud_aligned, p4d_aligned, pgd_aligned;
Kees Cookfea11202020-06-03 13:28:45 -0700985 spinlock_t *ptl = NULL;
Anshuman Khandual7abf6e52021-02-24 12:01:36 -0800986 int idx;
Anshuman Khandual399145f2020-06-04 16:47:15 -0700987
988 pr_info("Validating architecture page table helpers\n");
989 prot = vm_get_page_prot(VMFLAGS);
990 vaddr = get_random_vaddr();
991 mm = mm_alloc();
992 if (!mm) {
993 pr_err("mm_struct allocation failed\n");
994 return 1;
995 }
996
997 /*
Anshuman Khandual05289402020-08-06 23:19:16 -0700998 * __P000 (or even __S000) will help create page table entries with
999 * PROT_NONE permission as required for pxx_protnone_tests().
1000 */
1001 protnone = __P000;
1002
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -07001003 vma = vm_area_alloc(mm);
1004 if (!vma) {
1005 pr_err("vma allocation failed\n");
1006 return 1;
1007 }
1008
Anshuman Khandual05289402020-08-06 23:19:16 -07001009 /*
Anshuman Khandual399145f2020-06-04 16:47:15 -07001010 * PFN for mapping at PTE level is determined from a standard kernel
1011 * text symbol. But pfns for higher page table levels are derived by
1012 * masking lower bits of this real pfn. These derived pfns might not
1013 * exist on the platform but that does not really matter as pfn_pxx()
1014 * helpers will still create appropriate entries for the test. This
1015 * helps avoid large memory block allocations to be used for mapping
1016 * at higher page table levels.
1017 */
1018 paddr = __pa_symbol(&start_kernel);
1019
1020 pte_aligned = (paddr & PAGE_MASK) >> PAGE_SHIFT;
1021 pmd_aligned = (paddr & PMD_MASK) >> PAGE_SHIFT;
1022 pud_aligned = (paddr & PUD_MASK) >> PAGE_SHIFT;
1023 p4d_aligned = (paddr & P4D_MASK) >> PAGE_SHIFT;
1024 pgd_aligned = (paddr & PGDIR_MASK) >> PAGE_SHIFT;
1025 WARN_ON(!pfn_valid(pte_aligned));
1026
1027 pgdp = pgd_offset(mm, vaddr);
1028 p4dp = p4d_alloc(mm, pgdp, vaddr);
1029 pudp = pud_alloc(mm, p4dp, vaddr);
1030 pmdp = pmd_alloc(mm, pudp, vaddr);
Aneesh Kumar K.Vf14312e2020-10-15 20:05:10 -07001031 /*
1032 * Allocate pgtable_t
1033 */
1034 if (pte_alloc(mm, pmdp)) {
1035 pr_err("pgtable allocation failed\n");
1036 return 1;
1037 }
Anshuman Khandual399145f2020-06-04 16:47:15 -07001038
1039 /*
1040 * Save all the page table page addresses as the page table
1041 * entries will be used for testing with random or garbage
1042 * values. These saved addresses will be used for freeing
1043 * page table pages.
1044 */
1045 pmd = READ_ONCE(*pmdp);
1046 saved_p4dp = p4d_offset(pgdp, 0UL);
1047 saved_pudp = pud_offset(p4dp, 0UL);
1048 saved_pmdp = pmd_offset(pudp, 0UL);
1049 saved_ptep = pmd_pgtable(pmd);
1050
Anshuman Khandual7abf6e52021-02-24 12:01:36 -08001051 /*
1052 * Iterate over the protection_map[] to make sure that all
1053 * the basic page table transformation validations just hold
1054 * true irrespective of the starting protection value for a
1055 * given page table entry.
1056 */
1057 for (idx = 0; idx < ARRAY_SIZE(protection_map); idx++) {
1058 pte_basic_tests(pte_aligned, idx);
1059 pmd_basic_tests(pmd_aligned, idx);
1060 pud_basic_tests(mm, pud_aligned, idx);
1061 }
1062
1063 /*
1064 * Both P4D and PGD level tests are very basic which do not
1065 * involve creating page table entries from the protection
1066 * value and the given pfn. Hence just keep them out from
1067 * the above iteration for now to save some test execution
1068 * time.
1069 */
Anshuman Khandual399145f2020-06-04 16:47:15 -07001070 p4d_basic_tests(p4d_aligned, prot);
1071 pgd_basic_tests(pgd_aligned, prot);
1072
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -07001073 pmd_leaf_tests(pmd_aligned, prot);
1074 pud_leaf_tests(pud_aligned, prot);
1075
Aneesh Kumar K.V42006052020-10-15 20:04:40 -07001076 pte_savedwrite_tests(pte_aligned, protnone);
1077 pmd_savedwrite_tests(pmd_aligned, protnone);
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -07001078
Anshuman Khandual05289402020-08-06 23:19:16 -07001079 pte_special_tests(pte_aligned, prot);
1080 pte_protnone_tests(pte_aligned, protnone);
1081 pmd_protnone_tests(pmd_aligned, protnone);
1082
1083 pte_devmap_tests(pte_aligned, prot);
1084 pmd_devmap_tests(pmd_aligned, prot);
1085 pud_devmap_tests(pud_aligned, prot);
1086
1087 pte_soft_dirty_tests(pte_aligned, prot);
1088 pmd_soft_dirty_tests(pmd_aligned, prot);
1089 pte_swap_soft_dirty_tests(pte_aligned, prot);
1090 pmd_swap_soft_dirty_tests(pmd_aligned, prot);
1091
1092 pte_swap_tests(pte_aligned, prot);
1093 pmd_swap_tests(pmd_aligned, prot);
1094
1095 swap_migration_tests();
Anshuman Khandual05289402020-08-06 23:19:16 -07001096
1097 pmd_thp_tests(pmd_aligned, prot);
1098 pud_thp_tests(pud_aligned, prot);
1099
Aneesh Kumar K.Ve8edf0a2020-10-15 20:04:49 -07001100 hugetlb_basic_tests(pte_aligned, prot);
1101
Aneesh Kumar K.V6f302e272020-10-15 20:04:53 -07001102 /*
1103 * Page table modifying tests. They need to hold
1104 * proper page table lock.
1105 */
Aneesh Kumar K.Ve8edf0a2020-10-15 20:04:49 -07001106
Aneesh Kumar K.Vf14312e2020-10-15 20:05:10 -07001107 ptep = pte_offset_map_lock(mm, pmdp, vaddr, &ptl);
Aneesh Kumar K.V401035d2020-10-15 20:05:06 -07001108 pte_clear_tests(mm, ptep, pte_aligned, vaddr, prot);
Aneesh Kumar K.Ve8edf0a2020-10-15 20:04:49 -07001109 pte_advanced_tests(mm, vma, ptep, pte_aligned, vaddr, prot);
Aneesh Kumar K.Ve8edf0a2020-10-15 20:04:49 -07001110 pte_unmap_unlock(ptep, ptl);
1111
Aneesh Kumar K.V6f302e272020-10-15 20:04:53 -07001112 ptl = pmd_lock(mm, pmdp);
1113 pmd_clear_tests(mm, pmdp);
Aneesh Kumar K.V87f34982020-10-15 20:04:56 -07001114 pmd_advanced_tests(mm, vma, pmdp, pmd_aligned, vaddr, prot, saved_ptep);
Aneesh Kumar K.V6f302e272020-10-15 20:04:53 -07001115 pmd_huge_tests(pmdp, pmd_aligned, prot);
Aneesh Kumar K.Ve8edf0a2020-10-15 20:04:49 -07001116 pmd_populate_tests(mm, pmdp, saved_ptep);
Aneesh Kumar K.V6f302e272020-10-15 20:04:53 -07001117 spin_unlock(ptl);
1118
1119 ptl = pud_lock(mm, pudp);
1120 pud_clear_tests(mm, pudp);
1121 pud_advanced_tests(mm, vma, pudp, pud_aligned, vaddr, prot);
1122 pud_huge_tests(pudp, pud_aligned, prot);
Aneesh Kumar K.Ve8edf0a2020-10-15 20:04:49 -07001123 pud_populate_tests(mm, pudp, saved_pmdp);
Aneesh Kumar K.V6f302e272020-10-15 20:04:53 -07001124 spin_unlock(ptl);
1125
Aneesh Kumar K.V6f302e272020-10-15 20:04:53 -07001126 spin_lock(&mm->page_table_lock);
1127 p4d_clear_tests(mm, p4dp);
1128 pgd_clear_tests(mm, pgdp);
Aneesh Kumar K.Ve8edf0a2020-10-15 20:04:49 -07001129 p4d_populate_tests(mm, p4dp, saved_pudp);
1130 pgd_populate_tests(mm, pgdp, saved_p4dp);
Aneesh Kumar K.V6f302e272020-10-15 20:04:53 -07001131 spin_unlock(&mm->page_table_lock);
Aneesh Kumar K.Ve8edf0a2020-10-15 20:04:49 -07001132
Anshuman Khandual399145f2020-06-04 16:47:15 -07001133 p4d_free(mm, saved_p4dp);
1134 pud_free(mm, saved_pudp);
1135 pmd_free(mm, saved_pmdp);
1136 pte_free(mm, saved_ptep);
1137
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -07001138 vm_area_free(vma);
Anshuman Khandual399145f2020-06-04 16:47:15 -07001139 mm_dec_nr_puds(mm);
1140 mm_dec_nr_pmds(mm);
1141 mm_dec_nr_ptes(mm);
1142 mmdrop(mm);
1143 return 0;
1144}
1145late_initcall(debug_vm_pgtable);