blob: abf778f729fdc9258b8cbf30af652c2646d6053e [file] [log] [blame]
Anshuman Khandual399145f2020-06-04 16:47:15 -07001// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * This kernel test validates architecture page table helpers and
4 * accessors and helps in verifying their continued compliance with
5 * expected generic MM semantics.
6 *
7 * Copyright (C) 2019 ARM Ltd.
8 *
9 * Author: Anshuman Khandual <anshuman.khandual@arm.com>
10 */
Anshuman Khandual6315df42020-08-06 23:19:25 -070011#define pr_fmt(fmt) "debug_vm_pgtable: [%-25s]: " fmt, __func__
Anshuman Khandual399145f2020-06-04 16:47:15 -070012
13#include <linux/gfp.h>
14#include <linux/highmem.h>
15#include <linux/hugetlb.h>
16#include <linux/kernel.h>
17#include <linux/kconfig.h>
18#include <linux/mm.h>
19#include <linux/mman.h>
20#include <linux/mm_types.h>
21#include <linux/module.h>
22#include <linux/pfn_t.h>
23#include <linux/printk.h>
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -070024#include <linux/pgtable.h>
Anshuman Khandual399145f2020-06-04 16:47:15 -070025#include <linux/random.h>
26#include <linux/spinlock.h>
27#include <linux/swap.h>
28#include <linux/swapops.h>
29#include <linux/start_kernel.h>
30#include <linux/sched/mm.h>
Aneesh Kumar K.V85a14462020-10-15 20:04:36 -070031#include <linux/io.h>
Anshuman Khandual399145f2020-06-04 16:47:15 -070032#include <asm/pgalloc.h>
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -070033#include <asm/tlbflush.h>
Anshuman Khandual399145f2020-06-04 16:47:15 -070034
Anshuman Khandualb1d00002020-08-06 23:19:28 -070035/*
36 * Please refer Documentation/vm/arch_pgtable_helpers.rst for the semantics
37 * expectations that are being validated here. All future changes in here
38 * or the documentation need to be in sync.
39 */
40
Anshuman Khandual399145f2020-06-04 16:47:15 -070041#define VMFLAGS (VM_READ|VM_WRITE|VM_EXEC)
42
43/*
44 * On s390 platform, the lower 4 bits are used to identify given page table
45 * entry type. But these bits might affect the ability to clear entries with
46 * pxx_clear() because of how dynamic page table folding works on s390. So
47 * while loading up the entries do not change the lower 4 bits. It does not
Aneesh Kumar K.Vcfc5bbc2020-10-15 20:04:33 -070048 * have affect any other platform. Also avoid the 62nd bit on ppc64 that is
49 * used to mark a pte entry.
Anshuman Khandual399145f2020-06-04 16:47:15 -070050 */
Aneesh Kumar K.Vcfc5bbc2020-10-15 20:04:33 -070051#define S390_SKIP_MASK GENMASK(3, 0)
52#if __BITS_PER_LONG == 64
53#define PPC64_SKIP_MASK GENMASK(62, 62)
54#else
55#define PPC64_SKIP_MASK 0x0
56#endif
57#define ARCH_SKIP_MASK (S390_SKIP_MASK | PPC64_SKIP_MASK)
58#define RANDOM_ORVALUE (GENMASK(BITS_PER_LONG - 1, 0) & ~ARCH_SKIP_MASK)
Anshuman Khandual399145f2020-06-04 16:47:15 -070059#define RANDOM_NZVALUE GENMASK(7, 0)
60
Gavin Shan3c9b84f2021-09-02 14:52:19 -070061struct pgtable_debug_args {
62 struct mm_struct *mm;
63 struct vm_area_struct *vma;
64
65 pgd_t *pgdp;
66 p4d_t *p4dp;
67 pud_t *pudp;
68 pmd_t *pmdp;
69 pte_t *ptep;
70
71 p4d_t *start_p4dp;
72 pud_t *start_pudp;
73 pmd_t *start_pmdp;
74 pgtable_t start_ptep;
75
76 unsigned long vaddr;
77 pgprot_t page_prot;
78 pgprot_t page_prot_none;
79
80 bool is_contiguous_page;
81 unsigned long pud_pfn;
82 unsigned long pmd_pfn;
83 unsigned long pte_pfn;
84
85 unsigned long fixed_pgd_pfn;
86 unsigned long fixed_p4d_pfn;
87 unsigned long fixed_pud_pfn;
88 unsigned long fixed_pmd_pfn;
89 unsigned long fixed_pte_pfn;
90};
91
Gavin Shan36b77d12021-09-02 14:52:22 -070092static void __init pte_basic_tests(struct pgtable_debug_args *args, int idx)
Anshuman Khandual399145f2020-06-04 16:47:15 -070093{
Anshuman Khandual2e326c02021-02-24 12:01:36 -080094 pgprot_t prot = protection_map[idx];
Gavin Shan36b77d12021-09-02 14:52:22 -070095 pte_t pte = pfn_pte(args->fixed_pte_pfn, prot);
Anshuman Khandual2e326c02021-02-24 12:01:36 -080096 unsigned long val = idx, *ptr = &val;
Anshuman Khandual399145f2020-06-04 16:47:15 -070097
Anshuman Khandual2e326c02021-02-24 12:01:36 -080098 pr_debug("Validating PTE basic (%pGv)\n", ptr);
Anshuman Khandualbb5c47c2021-02-24 12:01:32 -080099
100 /*
101 * This test needs to be executed after the given page table entry
102 * is created with pfn_pte() to make sure that protection_map[idx]
103 * does not have the dirty bit enabled from the beginning. This is
104 * important for platforms like arm64 where (!PTE_RDONLY) indicate
105 * dirty bit being set.
106 */
107 WARN_ON(pte_dirty(pte_wrprotect(pte)));
108
Anshuman Khandual399145f2020-06-04 16:47:15 -0700109 WARN_ON(!pte_same(pte, pte));
110 WARN_ON(!pte_young(pte_mkyoung(pte_mkold(pte))));
111 WARN_ON(!pte_dirty(pte_mkdirty(pte_mkclean(pte))));
112 WARN_ON(!pte_write(pte_mkwrite(pte_wrprotect(pte))));
113 WARN_ON(pte_young(pte_mkold(pte_mkyoung(pte))));
114 WARN_ON(pte_dirty(pte_mkclean(pte_mkdirty(pte))));
115 WARN_ON(pte_write(pte_wrprotect(pte_mkwrite(pte))));
Anshuman Khandualbb5c47c2021-02-24 12:01:32 -0800116 WARN_ON(pte_dirty(pte_wrprotect(pte_mkclean(pte))));
117 WARN_ON(!pte_dirty(pte_wrprotect(pte_mkdirty(pte))));
Anshuman Khandual399145f2020-06-04 16:47:15 -0700118}
119
Gavin Shan44966c42021-09-02 14:52:38 -0700120static void __init pte_advanced_tests(struct pgtable_debug_args *args)
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700121{
Shixin Liub593b902021-06-30 18:47:40 -0700122 pte_t pte;
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700123
Aneesh Kumar K.Vc3824e12020-10-15 20:04:46 -0700124 /*
125 * Architectures optimize set_pte_at by avoiding TLB flush.
126 * This requires set_pte_at to be not used to update an
127 * existing pte entry. Clear pte before we do set_pte_at
128 */
Gavin Shan44966c42021-09-02 14:52:38 -0700129 if (args->pte_pfn == ULONG_MAX)
130 return;
Aneesh Kumar K.Vc3824e12020-10-15 20:04:46 -0700131
Anshuman Khandual6315df42020-08-06 23:19:25 -0700132 pr_debug("Validating PTE advanced\n");
Gavin Shan44966c42021-09-02 14:52:38 -0700133 pte = pfn_pte(args->pte_pfn, args->page_prot);
134 set_pte_at(args->mm, args->vaddr, args->ptep, pte);
135 ptep_set_wrprotect(args->mm, args->vaddr, args->ptep);
136 pte = ptep_get(args->ptep);
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700137 WARN_ON(pte_write(pte));
Gavin Shan44966c42021-09-02 14:52:38 -0700138 ptep_get_and_clear(args->mm, args->vaddr, args->ptep);
139 pte = ptep_get(args->ptep);
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700140 WARN_ON(!pte_none(pte));
141
Gavin Shan44966c42021-09-02 14:52:38 -0700142 pte = pfn_pte(args->pte_pfn, args->page_prot);
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700143 pte = pte_wrprotect(pte);
144 pte = pte_mkclean(pte);
Gavin Shan44966c42021-09-02 14:52:38 -0700145 set_pte_at(args->mm, args->vaddr, args->ptep, pte);
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700146 pte = pte_mkwrite(pte);
147 pte = pte_mkdirty(pte);
Gavin Shan44966c42021-09-02 14:52:38 -0700148 ptep_set_access_flags(args->vma, args->vaddr, args->ptep, pte, 1);
149 pte = ptep_get(args->ptep);
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700150 WARN_ON(!(pte_write(pte) && pte_dirty(pte)));
Gavin Shan44966c42021-09-02 14:52:38 -0700151 ptep_get_and_clear_full(args->mm, args->vaddr, args->ptep, 1);
152 pte = ptep_get(args->ptep);
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700153 WARN_ON(!pte_none(pte));
154
Gavin Shan44966c42021-09-02 14:52:38 -0700155 pte = pfn_pte(args->pte_pfn, args->page_prot);
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700156 pte = pte_mkyoung(pte);
Gavin Shan44966c42021-09-02 14:52:38 -0700157 set_pte_at(args->mm, args->vaddr, args->ptep, pte);
158 ptep_test_and_clear_young(args->vma, args->vaddr, args->ptep);
159 pte = ptep_get(args->ptep);
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700160 WARN_ON(pte_young(pte));
161}
162
Gavin Shan8983d232021-09-02 14:52:25 -0700163static void __init pte_savedwrite_tests(struct pgtable_debug_args *args)
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700164{
Gavin Shan8983d232021-09-02 14:52:25 -0700165 pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot_none);
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700166
Aneesh Kumar K.V42006052020-10-15 20:04:40 -0700167 if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
168 return;
169
Anshuman Khandual6315df42020-08-06 23:19:25 -0700170 pr_debug("Validating PTE saved write\n");
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700171 WARN_ON(!pte_savedwrite(pte_mk_savedwrite(pte_clear_savedwrite(pte))));
172 WARN_ON(pte_savedwrite(pte_clear_savedwrite(pte_mk_savedwrite(pte))));
173}
Aneesh Kumar K.V42006052020-10-15 20:04:40 -0700174
Anshuman Khandual399145f2020-06-04 16:47:15 -0700175#ifdef CONFIG_TRANSPARENT_HUGEPAGE
Gavin Shan36b77d12021-09-02 14:52:22 -0700176static void __init pmd_basic_tests(struct pgtable_debug_args *args, int idx)
Anshuman Khandual399145f2020-06-04 16:47:15 -0700177{
Anshuman Khandual2e326c02021-02-24 12:01:36 -0800178 pgprot_t prot = protection_map[idx];
Anshuman Khandual2e326c02021-02-24 12:01:36 -0800179 unsigned long val = idx, *ptr = &val;
Anshuman Khandual65ac1a62021-06-28 19:35:10 -0700180 pmd_t pmd;
Anshuman Khandual399145f2020-06-04 16:47:15 -0700181
Aneesh Kumar K.V787d5632020-06-10 18:41:44 -0700182 if (!has_transparent_hugepage())
183 return;
184
Anshuman Khandual2e326c02021-02-24 12:01:36 -0800185 pr_debug("Validating PMD basic (%pGv)\n", ptr);
Gavin Shan36b77d12021-09-02 14:52:22 -0700186 pmd = pfn_pmd(args->fixed_pmd_pfn, prot);
Anshuman Khandualbb5c47c2021-02-24 12:01:32 -0800187
188 /*
189 * This test needs to be executed after the given page table entry
190 * is created with pfn_pmd() to make sure that protection_map[idx]
191 * does not have the dirty bit enabled from the beginning. This is
192 * important for platforms like arm64 where (!PTE_RDONLY) indicate
193 * dirty bit being set.
194 */
195 WARN_ON(pmd_dirty(pmd_wrprotect(pmd)));
196
197
Anshuman Khandual399145f2020-06-04 16:47:15 -0700198 WARN_ON(!pmd_same(pmd, pmd));
199 WARN_ON(!pmd_young(pmd_mkyoung(pmd_mkold(pmd))));
200 WARN_ON(!pmd_dirty(pmd_mkdirty(pmd_mkclean(pmd))));
201 WARN_ON(!pmd_write(pmd_mkwrite(pmd_wrprotect(pmd))));
202 WARN_ON(pmd_young(pmd_mkold(pmd_mkyoung(pmd))));
203 WARN_ON(pmd_dirty(pmd_mkclean(pmd_mkdirty(pmd))));
204 WARN_ON(pmd_write(pmd_wrprotect(pmd_mkwrite(pmd))));
Anshuman Khandualbb5c47c2021-02-24 12:01:32 -0800205 WARN_ON(pmd_dirty(pmd_wrprotect(pmd_mkclean(pmd))));
206 WARN_ON(!pmd_dirty(pmd_wrprotect(pmd_mkdirty(pmd))));
Anshuman Khandual399145f2020-06-04 16:47:15 -0700207 /*
208 * A huge page does not point to next level page table
209 * entry. Hence this must qualify as pmd_bad().
210 */
211 WARN_ON(!pmd_bad(pmd_mkhuge(pmd)));
212}
213
Gavin Shanc0fe07b2021-09-02 14:52:41 -0700214static void __init pmd_advanced_tests(struct pgtable_debug_args *args)
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700215{
Anshuman Khandual65ac1a62021-06-28 19:35:10 -0700216 pmd_t pmd;
Gavin Shanc0fe07b2021-09-02 14:52:41 -0700217 unsigned long vaddr = args->vaddr;
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700218
219 if (!has_transparent_hugepage())
220 return;
221
Gavin Shanc0fe07b2021-09-02 14:52:41 -0700222 if (args->pmd_pfn == ULONG_MAX)
223 return;
224
Anshuman Khandual6315df42020-08-06 23:19:25 -0700225 pr_debug("Validating PMD advanced\n");
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700226 /* Align the address wrt HPAGE_PMD_SIZE */
Gerald Schaefer04f7ce32021-06-04 20:01:18 -0700227 vaddr &= HPAGE_PMD_MASK;
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700228
Gavin Shanc0fe07b2021-09-02 14:52:41 -0700229 pgtable_trans_huge_deposit(args->mm, args->pmdp, args->start_ptep);
Aneesh Kumar K.V87f34982020-10-15 20:04:56 -0700230
Gavin Shanc0fe07b2021-09-02 14:52:41 -0700231 pmd = pfn_pmd(args->pmd_pfn, args->page_prot);
232 set_pmd_at(args->mm, vaddr, args->pmdp, pmd);
233 pmdp_set_wrprotect(args->mm, vaddr, args->pmdp);
234 pmd = READ_ONCE(*args->pmdp);
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700235 WARN_ON(pmd_write(pmd));
Gavin Shanc0fe07b2021-09-02 14:52:41 -0700236 pmdp_huge_get_and_clear(args->mm, vaddr, args->pmdp);
237 pmd = READ_ONCE(*args->pmdp);
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700238 WARN_ON(!pmd_none(pmd));
239
Gavin Shanc0fe07b2021-09-02 14:52:41 -0700240 pmd = pfn_pmd(args->pmd_pfn, args->page_prot);
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700241 pmd = pmd_wrprotect(pmd);
242 pmd = pmd_mkclean(pmd);
Gavin Shanc0fe07b2021-09-02 14:52:41 -0700243 set_pmd_at(args->mm, vaddr, args->pmdp, pmd);
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700244 pmd = pmd_mkwrite(pmd);
245 pmd = pmd_mkdirty(pmd);
Gavin Shanc0fe07b2021-09-02 14:52:41 -0700246 pmdp_set_access_flags(args->vma, vaddr, args->pmdp, pmd, 1);
247 pmd = READ_ONCE(*args->pmdp);
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700248 WARN_ON(!(pmd_write(pmd) && pmd_dirty(pmd)));
Gavin Shanc0fe07b2021-09-02 14:52:41 -0700249 pmdp_huge_get_and_clear_full(args->vma, vaddr, args->pmdp, 1);
250 pmd = READ_ONCE(*args->pmdp);
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700251 WARN_ON(!pmd_none(pmd));
252
Gavin Shanc0fe07b2021-09-02 14:52:41 -0700253 pmd = pmd_mkhuge(pfn_pmd(args->pmd_pfn, args->page_prot));
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700254 pmd = pmd_mkyoung(pmd);
Gavin Shanc0fe07b2021-09-02 14:52:41 -0700255 set_pmd_at(args->mm, vaddr, args->pmdp, pmd);
256 pmdp_test_and_clear_young(args->vma, vaddr, args->pmdp);
257 pmd = READ_ONCE(*args->pmdp);
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700258 WARN_ON(pmd_young(pmd));
Aneesh Kumar K.V87f34982020-10-15 20:04:56 -0700259
Aneesh Kumar K.V13af0502020-10-15 20:04:59 -0700260 /* Clear the pte entries */
Gavin Shanc0fe07b2021-09-02 14:52:41 -0700261 pmdp_huge_get_and_clear(args->mm, vaddr, args->pmdp);
262 pgtable_trans_huge_withdraw(args->mm, args->pmdp);
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700263}
264
Gavin Shan8983d232021-09-02 14:52:25 -0700265static void __init pmd_leaf_tests(struct pgtable_debug_args *args)
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700266{
Anshuman Khandual65ac1a62021-06-28 19:35:10 -0700267 pmd_t pmd;
268
269 if (!has_transparent_hugepage())
270 return;
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700271
Anshuman Khandual6315df42020-08-06 23:19:25 -0700272 pr_debug("Validating PMD leaf\n");
Gavin Shan8983d232021-09-02 14:52:25 -0700273 pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot);
Anshuman Khandual65ac1a62021-06-28 19:35:10 -0700274
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700275 /*
276 * PMD based THP is a leaf entry.
277 */
278 pmd = pmd_mkhuge(pmd);
279 WARN_ON(!pmd_leaf(pmd));
280}
281
Gavin Shan8983d232021-09-02 14:52:25 -0700282static void __init pmd_savedwrite_tests(struct pgtable_debug_args *args)
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700283{
Anshuman Khandual65ac1a62021-06-28 19:35:10 -0700284 pmd_t pmd;
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700285
Aneesh Kumar K.V42006052020-10-15 20:04:40 -0700286 if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
287 return;
288
Anshuman Khandual65ac1a62021-06-28 19:35:10 -0700289 if (!has_transparent_hugepage())
290 return;
291
Anshuman Khandual6315df42020-08-06 23:19:25 -0700292 pr_debug("Validating PMD saved write\n");
Gavin Shan8983d232021-09-02 14:52:25 -0700293 pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot_none);
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700294 WARN_ON(!pmd_savedwrite(pmd_mk_savedwrite(pmd_clear_savedwrite(pmd))));
295 WARN_ON(pmd_savedwrite(pmd_clear_savedwrite(pmd_mk_savedwrite(pmd))));
296}
297
Anshuman Khandual399145f2020-06-04 16:47:15 -0700298#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
Gavin Shan36b77d12021-09-02 14:52:22 -0700299static void __init pud_basic_tests(struct pgtable_debug_args *args, int idx)
Anshuman Khandual399145f2020-06-04 16:47:15 -0700300{
Anshuman Khandual2e326c02021-02-24 12:01:36 -0800301 pgprot_t prot = protection_map[idx];
Anshuman Khandual2e326c02021-02-24 12:01:36 -0800302 unsigned long val = idx, *ptr = &val;
Anshuman Khandual65ac1a62021-06-28 19:35:10 -0700303 pud_t pud;
Anshuman Khandual399145f2020-06-04 16:47:15 -0700304
Aneesh Kumar K.V787d5632020-06-10 18:41:44 -0700305 if (!has_transparent_hugepage())
306 return;
307
Anshuman Khandual2e326c02021-02-24 12:01:36 -0800308 pr_debug("Validating PUD basic (%pGv)\n", ptr);
Gavin Shan36b77d12021-09-02 14:52:22 -0700309 pud = pfn_pud(args->fixed_pud_pfn, prot);
Anshuman Khandualbb5c47c2021-02-24 12:01:32 -0800310
311 /*
312 * This test needs to be executed after the given page table entry
313 * is created with pfn_pud() to make sure that protection_map[idx]
314 * does not have the dirty bit enabled from the beginning. This is
315 * important for platforms like arm64 where (!PTE_RDONLY) indicate
316 * dirty bit being set.
317 */
318 WARN_ON(pud_dirty(pud_wrprotect(pud)));
319
Anshuman Khandual399145f2020-06-04 16:47:15 -0700320 WARN_ON(!pud_same(pud, pud));
321 WARN_ON(!pud_young(pud_mkyoung(pud_mkold(pud))));
Anshuman Khandualbb5c47c2021-02-24 12:01:32 -0800322 WARN_ON(!pud_dirty(pud_mkdirty(pud_mkclean(pud))));
323 WARN_ON(pud_dirty(pud_mkclean(pud_mkdirty(pud))));
Anshuman Khandual399145f2020-06-04 16:47:15 -0700324 WARN_ON(!pud_write(pud_mkwrite(pud_wrprotect(pud))));
325 WARN_ON(pud_write(pud_wrprotect(pud_mkwrite(pud))));
326 WARN_ON(pud_young(pud_mkold(pud_mkyoung(pud))));
Anshuman Khandualbb5c47c2021-02-24 12:01:32 -0800327 WARN_ON(pud_dirty(pud_wrprotect(pud_mkclean(pud))));
328 WARN_ON(!pud_dirty(pud_wrprotect(pud_mkdirty(pud))));
Anshuman Khandual399145f2020-06-04 16:47:15 -0700329
Gavin Shan36b77d12021-09-02 14:52:22 -0700330 if (mm_pmd_folded(args->mm))
Anshuman Khandual399145f2020-06-04 16:47:15 -0700331 return;
332
333 /*
334 * A huge page does not point to next level page table
335 * entry. Hence this must qualify as pud_bad().
336 */
337 WARN_ON(!pud_bad(pud_mkhuge(pud)));
338}
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700339
340static void __init pud_advanced_tests(struct mm_struct *mm,
341 struct vm_area_struct *vma, pud_t *pudp,
342 unsigned long pfn, unsigned long vaddr,
343 pgprot_t prot)
344{
Anshuman Khandual65ac1a62021-06-28 19:35:10 -0700345 pud_t pud;
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700346
347 if (!has_transparent_hugepage())
348 return;
349
Anshuman Khandual6315df42020-08-06 23:19:25 -0700350 pr_debug("Validating PUD advanced\n");
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700351 /* Align the address wrt HPAGE_PUD_SIZE */
Gerald Schaefer04f7ce32021-06-04 20:01:18 -0700352 vaddr &= HPAGE_PUD_MASK;
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700353
Anshuman Khandual65ac1a62021-06-28 19:35:10 -0700354 pud = pfn_pud(pfn, prot);
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700355 set_pud_at(mm, vaddr, pudp, pud);
356 pudp_set_wrprotect(mm, vaddr, pudp);
357 pud = READ_ONCE(*pudp);
358 WARN_ON(pud_write(pud));
359
360#ifndef __PAGETABLE_PMD_FOLDED
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700361 pudp_huge_get_and_clear(mm, vaddr, pudp);
362 pud = READ_ONCE(*pudp);
363 WARN_ON(!pud_none(pud));
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700364#endif /* __PAGETABLE_PMD_FOLDED */
365 pud = pfn_pud(pfn, prot);
366 pud = pud_wrprotect(pud);
367 pud = pud_mkclean(pud);
368 set_pud_at(mm, vaddr, pudp, pud);
369 pud = pud_mkwrite(pud);
370 pud = pud_mkdirty(pud);
371 pudp_set_access_flags(vma, vaddr, pudp, pud, 1);
372 pud = READ_ONCE(*pudp);
373 WARN_ON(!(pud_write(pud) && pud_dirty(pud)));
374
Aneesh Kumar K.Vc3824e12020-10-15 20:04:46 -0700375#ifndef __PAGETABLE_PMD_FOLDED
376 pudp_huge_get_and_clear_full(mm, vaddr, pudp, 1);
377 pud = READ_ONCE(*pudp);
378 WARN_ON(!pud_none(pud));
379#endif /* __PAGETABLE_PMD_FOLDED */
380
381 pud = pfn_pud(pfn, prot);
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700382 pud = pud_mkyoung(pud);
383 set_pud_at(mm, vaddr, pudp, pud);
384 pudp_test_and_clear_young(vma, vaddr, pudp);
385 pud = READ_ONCE(*pudp);
386 WARN_ON(pud_young(pud));
Aneesh Kumar K.V13af0502020-10-15 20:04:59 -0700387
388 pudp_huge_get_and_clear(mm, vaddr, pudp);
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700389}
390
Gavin Shan8983d232021-09-02 14:52:25 -0700391static void __init pud_leaf_tests(struct pgtable_debug_args *args)
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700392{
Anshuman Khandual65ac1a62021-06-28 19:35:10 -0700393 pud_t pud;
394
395 if (!has_transparent_hugepage())
396 return;
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700397
Anshuman Khandual6315df42020-08-06 23:19:25 -0700398 pr_debug("Validating PUD leaf\n");
Gavin Shan8983d232021-09-02 14:52:25 -0700399 pud = pfn_pud(args->fixed_pud_pfn, args->page_prot);
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700400 /*
401 * PUD based THP is a leaf entry.
402 */
403 pud = pud_mkhuge(pud);
404 WARN_ON(!pud_leaf(pud));
405}
Shixin Liu5fe77be2021-06-30 18:47:37 -0700406#else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
Gavin Shan36b77d12021-09-02 14:52:22 -0700407static void __init pud_basic_tests(struct pgtable_debug_args *args, int idx) { }
Shixin Liu5fe77be2021-06-30 18:47:37 -0700408static void __init pud_advanced_tests(struct mm_struct *mm,
409 struct vm_area_struct *vma, pud_t *pudp,
410 unsigned long pfn, unsigned long vaddr,
411 pgprot_t prot)
412{
413}
Gavin Shan8983d232021-09-02 14:52:25 -0700414static void __init pud_leaf_tests(struct pgtable_debug_args *args) { }
Shixin Liu5fe77be2021-06-30 18:47:37 -0700415#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
416#else /* !CONFIG_TRANSPARENT_HUGEPAGE */
Gavin Shan36b77d12021-09-02 14:52:22 -0700417static void __init pmd_basic_tests(struct pgtable_debug_args *args, int idx) { }
418static void __init pud_basic_tests(struct pgtable_debug_args *args, int idx) { }
Gavin Shanc0fe07b2021-09-02 14:52:41 -0700419static void __init pmd_advanced_tests(struct pgtable_debug_args *args) { }
Shixin Liu5fe77be2021-06-30 18:47:37 -0700420static void __init pud_advanced_tests(struct mm_struct *mm,
421 struct vm_area_struct *vma, pud_t *pudp,
422 unsigned long pfn, unsigned long vaddr,
423 pgprot_t prot)
424{
425}
Gavin Shan8983d232021-09-02 14:52:25 -0700426static void __init pmd_leaf_tests(struct pgtable_debug_args *args) { }
427static void __init pud_leaf_tests(struct pgtable_debug_args *args) { }
428static void __init pmd_savedwrite_tests(struct pgtable_debug_args *args) { }
Shixin Liu5fe77be2021-06-30 18:47:37 -0700429#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700430
Aneesh Kumar K.V85a14462020-10-15 20:04:36 -0700431#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
Gavin Shanc0fe07b2021-09-02 14:52:41 -0700432static void __init pmd_huge_tests(struct pgtable_debug_args *args)
Shixin Liu5fe77be2021-06-30 18:47:37 -0700433{
434 pmd_t pmd;
435
Gavin Shanc0fe07b2021-09-02 14:52:41 -0700436 if (!arch_vmap_pmd_supported(args->page_prot))
Shixin Liu5fe77be2021-06-30 18:47:37 -0700437 return;
438
439 pr_debug("Validating PMD huge\n");
440 /*
441 * X86 defined pmd_set_huge() verifies that the given
442 * PMD is not a populated non-leaf entry.
443 */
Gavin Shanc0fe07b2021-09-02 14:52:41 -0700444 WRITE_ONCE(*args->pmdp, __pmd(0));
445 WARN_ON(!pmd_set_huge(args->pmdp, __pfn_to_phys(args->fixed_pmd_pfn), args->page_prot));
446 WARN_ON(!pmd_clear_huge(args->pmdp));
447 pmd = READ_ONCE(*args->pmdp);
Shixin Liu5fe77be2021-06-30 18:47:37 -0700448 WARN_ON(!pmd_none(pmd));
449}
450
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700451static void __init pud_huge_tests(pud_t *pudp, unsigned long pfn, pgprot_t prot)
452{
453 pud_t pud;
454
Nicholas Pigginbbc180a2021-04-29 22:58:26 -0700455 if (!arch_vmap_pud_supported(prot))
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700456 return;
Anshuman Khandual6315df42020-08-06 23:19:25 -0700457
458 pr_debug("Validating PUD huge\n");
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -0700459 /*
460 * X86 defined pud_set_huge() verifies that the given
461 * PUD is not a populated non-leaf entry.
462 */
463 WRITE_ONCE(*pudp, __pud(0));
464 WARN_ON(!pud_set_huge(pudp, __pfn_to_phys(pfn), prot));
465 WARN_ON(!pud_clear_huge(pudp));
466 pud = READ_ONCE(*pudp);
467 WARN_ON(!pud_none(pud));
468}
Aneesh Kumar K.V85a14462020-10-15 20:04:36 -0700469#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
Gavin Shanc0fe07b2021-09-02 14:52:41 -0700470static void __init pmd_huge_tests(struct pgtable_debug_args *args) { }
Aneesh Kumar K.V85a14462020-10-15 20:04:36 -0700471static void __init pud_huge_tests(pud_t *pudp, unsigned long pfn, pgprot_t prot) { }
Shixin Liu5fe77be2021-06-30 18:47:37 -0700472#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
Anshuman Khandual399145f2020-06-04 16:47:15 -0700473
Gavin Shan36b77d12021-09-02 14:52:22 -0700474static void __init p4d_basic_tests(struct pgtable_debug_args *args)
Anshuman Khandual399145f2020-06-04 16:47:15 -0700475{
476 p4d_t p4d;
477
Anshuman Khandual6315df42020-08-06 23:19:25 -0700478 pr_debug("Validating P4D basic\n");
Anshuman Khandual399145f2020-06-04 16:47:15 -0700479 memset(&p4d, RANDOM_NZVALUE, sizeof(p4d_t));
480 WARN_ON(!p4d_same(p4d, p4d));
481}
482
Gavin Shan36b77d12021-09-02 14:52:22 -0700483static void __init pgd_basic_tests(struct pgtable_debug_args *args)
Anshuman Khandual399145f2020-06-04 16:47:15 -0700484{
485 pgd_t pgd;
486
Anshuman Khandual6315df42020-08-06 23:19:25 -0700487 pr_debug("Validating PGD basic\n");
Anshuman Khandual399145f2020-06-04 16:47:15 -0700488 memset(&pgd, RANDOM_NZVALUE, sizeof(pgd_t));
489 WARN_ON(!pgd_same(pgd, pgd));
490}
491
492#ifndef __PAGETABLE_PUD_FOLDED
493static void __init pud_clear_tests(struct mm_struct *mm, pud_t *pudp)
494{
495 pud_t pud = READ_ONCE(*pudp);
496
497 if (mm_pmd_folded(mm))
498 return;
499
Anshuman Khandual6315df42020-08-06 23:19:25 -0700500 pr_debug("Validating PUD clear\n");
Anshuman Khandual399145f2020-06-04 16:47:15 -0700501 pud = __pud(pud_val(pud) | RANDOM_ORVALUE);
502 WRITE_ONCE(*pudp, pud);
503 pud_clear(pudp);
504 pud = READ_ONCE(*pudp);
505 WARN_ON(!pud_none(pud));
506}
507
508static void __init pud_populate_tests(struct mm_struct *mm, pud_t *pudp,
509 pmd_t *pmdp)
510{
511 pud_t pud;
512
513 if (mm_pmd_folded(mm))
514 return;
Anshuman Khandual6315df42020-08-06 23:19:25 -0700515
516 pr_debug("Validating PUD populate\n");
Anshuman Khandual399145f2020-06-04 16:47:15 -0700517 /*
518 * This entry points to next level page table page.
519 * Hence this must not qualify as pud_bad().
520 */
Anshuman Khandual399145f2020-06-04 16:47:15 -0700521 pud_populate(mm, pudp, pmdp);
522 pud = READ_ONCE(*pudp);
523 WARN_ON(pud_bad(pud));
524}
525#else /* !__PAGETABLE_PUD_FOLDED */
526static void __init pud_clear_tests(struct mm_struct *mm, pud_t *pudp) { }
527static void __init pud_populate_tests(struct mm_struct *mm, pud_t *pudp,
528 pmd_t *pmdp)
529{
530}
531#endif /* PAGETABLE_PUD_FOLDED */
532
533#ifndef __PAGETABLE_P4D_FOLDED
534static void __init p4d_clear_tests(struct mm_struct *mm, p4d_t *p4dp)
535{
536 p4d_t p4d = READ_ONCE(*p4dp);
537
538 if (mm_pud_folded(mm))
539 return;
540
Anshuman Khandual6315df42020-08-06 23:19:25 -0700541 pr_debug("Validating P4D clear\n");
Anshuman Khandual399145f2020-06-04 16:47:15 -0700542 p4d = __p4d(p4d_val(p4d) | RANDOM_ORVALUE);
543 WRITE_ONCE(*p4dp, p4d);
544 p4d_clear(p4dp);
545 p4d = READ_ONCE(*p4dp);
546 WARN_ON(!p4d_none(p4d));
547}
548
549static void __init p4d_populate_tests(struct mm_struct *mm, p4d_t *p4dp,
550 pud_t *pudp)
551{
552 p4d_t p4d;
553
554 if (mm_pud_folded(mm))
555 return;
556
Anshuman Khandual6315df42020-08-06 23:19:25 -0700557 pr_debug("Validating P4D populate\n");
Anshuman Khandual399145f2020-06-04 16:47:15 -0700558 /*
559 * This entry points to next level page table page.
560 * Hence this must not qualify as p4d_bad().
561 */
562 pud_clear(pudp);
563 p4d_clear(p4dp);
564 p4d_populate(mm, p4dp, pudp);
565 p4d = READ_ONCE(*p4dp);
566 WARN_ON(p4d_bad(p4d));
567}
568
569static void __init pgd_clear_tests(struct mm_struct *mm, pgd_t *pgdp)
570{
571 pgd_t pgd = READ_ONCE(*pgdp);
572
573 if (mm_p4d_folded(mm))
574 return;
575
Anshuman Khandual6315df42020-08-06 23:19:25 -0700576 pr_debug("Validating PGD clear\n");
Anshuman Khandual399145f2020-06-04 16:47:15 -0700577 pgd = __pgd(pgd_val(pgd) | RANDOM_ORVALUE);
578 WRITE_ONCE(*pgdp, pgd);
579 pgd_clear(pgdp);
580 pgd = READ_ONCE(*pgdp);
581 WARN_ON(!pgd_none(pgd));
582}
583
584static void __init pgd_populate_tests(struct mm_struct *mm, pgd_t *pgdp,
585 p4d_t *p4dp)
586{
587 pgd_t pgd;
588
589 if (mm_p4d_folded(mm))
590 return;
591
Anshuman Khandual6315df42020-08-06 23:19:25 -0700592 pr_debug("Validating PGD populate\n");
Anshuman Khandual399145f2020-06-04 16:47:15 -0700593 /*
594 * This entry points to next level page table page.
595 * Hence this must not qualify as pgd_bad().
596 */
597 p4d_clear(p4dp);
598 pgd_clear(pgdp);
599 pgd_populate(mm, pgdp, p4dp);
600 pgd = READ_ONCE(*pgdp);
601 WARN_ON(pgd_bad(pgd));
602}
603#else /* !__PAGETABLE_P4D_FOLDED */
604static void __init p4d_clear_tests(struct mm_struct *mm, p4d_t *p4dp) { }
605static void __init pgd_clear_tests(struct mm_struct *mm, pgd_t *pgdp) { }
606static void __init p4d_populate_tests(struct mm_struct *mm, p4d_t *p4dp,
607 pud_t *pudp)
608{
609}
610static void __init pgd_populate_tests(struct mm_struct *mm, pgd_t *pgdp,
611 p4d_t *p4dp)
612{
613}
614#endif /* PAGETABLE_P4D_FOLDED */
615
Gavin Shan44966c42021-09-02 14:52:38 -0700616static void __init pte_clear_tests(struct pgtable_debug_args *args)
Anshuman Khandual399145f2020-06-04 16:47:15 -0700617{
Gavin Shan44966c42021-09-02 14:52:38 -0700618 pte_t pte = pfn_pte(args->pte_pfn, args->page_prot);
619
620 if (args->pte_pfn == ULONG_MAX)
621 return;
Anshuman Khandual399145f2020-06-04 16:47:15 -0700622
Anshuman Khandual6315df42020-08-06 23:19:25 -0700623 pr_debug("Validating PTE clear\n");
Aneesh Kumar K.V401035d2020-10-15 20:05:06 -0700624#ifndef CONFIG_RISCV
Anshuman Khandual399145f2020-06-04 16:47:15 -0700625 pte = __pte(pte_val(pte) | RANDOM_ORVALUE);
Aneesh Kumar K.V401035d2020-10-15 20:05:06 -0700626#endif
Gavin Shan44966c42021-09-02 14:52:38 -0700627 set_pte_at(args->mm, args->vaddr, args->ptep, pte);
Anshuman Khandual399145f2020-06-04 16:47:15 -0700628 barrier();
Gavin Shan44966c42021-09-02 14:52:38 -0700629 pte_clear(args->mm, args->vaddr, args->ptep);
630 pte = ptep_get(args->ptep);
Anshuman Khandual399145f2020-06-04 16:47:15 -0700631 WARN_ON(!pte_none(pte));
632}
633
Gavin Shanc0fe07b2021-09-02 14:52:41 -0700634static void __init pmd_clear_tests(struct pgtable_debug_args *args)
Anshuman Khandual399145f2020-06-04 16:47:15 -0700635{
Gavin Shanc0fe07b2021-09-02 14:52:41 -0700636 pmd_t pmd = READ_ONCE(*args->pmdp);
Anshuman Khandual399145f2020-06-04 16:47:15 -0700637
Anshuman Khandual6315df42020-08-06 23:19:25 -0700638 pr_debug("Validating PMD clear\n");
Anshuman Khandual399145f2020-06-04 16:47:15 -0700639 pmd = __pmd(pmd_val(pmd) | RANDOM_ORVALUE);
Gavin Shanc0fe07b2021-09-02 14:52:41 -0700640 WRITE_ONCE(*args->pmdp, pmd);
641 pmd_clear(args->pmdp);
642 pmd = READ_ONCE(*args->pmdp);
Anshuman Khandual399145f2020-06-04 16:47:15 -0700643 WARN_ON(!pmd_none(pmd));
644}
645
Gavin Shanc0fe07b2021-09-02 14:52:41 -0700646static void __init pmd_populate_tests(struct pgtable_debug_args *args)
Anshuman Khandual399145f2020-06-04 16:47:15 -0700647{
648 pmd_t pmd;
649
Anshuman Khandual6315df42020-08-06 23:19:25 -0700650 pr_debug("Validating PMD populate\n");
Anshuman Khandual399145f2020-06-04 16:47:15 -0700651 /*
652 * This entry points to next level page table page.
653 * Hence this must not qualify as pmd_bad().
654 */
Gavin Shanc0fe07b2021-09-02 14:52:41 -0700655 pmd_populate(args->mm, args->pmdp, args->start_ptep);
656 pmd = READ_ONCE(*args->pmdp);
Anshuman Khandual399145f2020-06-04 16:47:15 -0700657 WARN_ON(pmd_bad(pmd));
658}
659
Gavin Shan8cb183f2021-09-02 14:52:28 -0700660static void __init pte_special_tests(struct pgtable_debug_args *args)
Anshuman Khandual05289402020-08-06 23:19:16 -0700661{
Gavin Shan8cb183f2021-09-02 14:52:28 -0700662 pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot);
Anshuman Khandual05289402020-08-06 23:19:16 -0700663
664 if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL))
665 return;
666
Anshuman Khandual6315df42020-08-06 23:19:25 -0700667 pr_debug("Validating PTE special\n");
Anshuman Khandual05289402020-08-06 23:19:16 -0700668 WARN_ON(!pte_special(pte_mkspecial(pte)));
669}
670
Gavin Shan8cb183f2021-09-02 14:52:28 -0700671static void __init pte_protnone_tests(struct pgtable_debug_args *args)
Anshuman Khandual05289402020-08-06 23:19:16 -0700672{
Gavin Shan8cb183f2021-09-02 14:52:28 -0700673 pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot_none);
Anshuman Khandual05289402020-08-06 23:19:16 -0700674
675 if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
676 return;
677
Anshuman Khandual6315df42020-08-06 23:19:25 -0700678 pr_debug("Validating PTE protnone\n");
Anshuman Khandual05289402020-08-06 23:19:16 -0700679 WARN_ON(!pte_protnone(pte));
680 WARN_ON(!pte_present(pte));
681}
682
683#ifdef CONFIG_TRANSPARENT_HUGEPAGE
Gavin Shan8cb183f2021-09-02 14:52:28 -0700684static void __init pmd_protnone_tests(struct pgtable_debug_args *args)
Anshuman Khandual05289402020-08-06 23:19:16 -0700685{
Anshuman Khandual65ac1a62021-06-28 19:35:10 -0700686 pmd_t pmd;
Anshuman Khandual05289402020-08-06 23:19:16 -0700687
688 if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
689 return;
690
Anshuman Khandual65ac1a62021-06-28 19:35:10 -0700691 if (!has_transparent_hugepage())
692 return;
693
Anshuman Khandual6315df42020-08-06 23:19:25 -0700694 pr_debug("Validating PMD protnone\n");
Gavin Shan8cb183f2021-09-02 14:52:28 -0700695 pmd = pmd_mkhuge(pfn_pmd(args->fixed_pmd_pfn, args->page_prot_none));
Anshuman Khandual05289402020-08-06 23:19:16 -0700696 WARN_ON(!pmd_protnone(pmd));
697 WARN_ON(!pmd_present(pmd));
698}
699#else /* !CONFIG_TRANSPARENT_HUGEPAGE */
Gavin Shan8cb183f2021-09-02 14:52:28 -0700700static void __init pmd_protnone_tests(struct pgtable_debug_args *args) { }
Anshuman Khandual05289402020-08-06 23:19:16 -0700701#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
702
703#ifdef CONFIG_ARCH_HAS_PTE_DEVMAP
Gavin Shan8cb183f2021-09-02 14:52:28 -0700704static void __init pte_devmap_tests(struct pgtable_debug_args *args)
Anshuman Khandual05289402020-08-06 23:19:16 -0700705{
Gavin Shan8cb183f2021-09-02 14:52:28 -0700706 pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot);
Anshuman Khandual05289402020-08-06 23:19:16 -0700707
Anshuman Khandual6315df42020-08-06 23:19:25 -0700708 pr_debug("Validating PTE devmap\n");
Anshuman Khandual05289402020-08-06 23:19:16 -0700709 WARN_ON(!pte_devmap(pte_mkdevmap(pte)));
710}
711
712#ifdef CONFIG_TRANSPARENT_HUGEPAGE
Gavin Shan8cb183f2021-09-02 14:52:28 -0700713static void __init pmd_devmap_tests(struct pgtable_debug_args *args)
Anshuman Khandual05289402020-08-06 23:19:16 -0700714{
Anshuman Khandual65ac1a62021-06-28 19:35:10 -0700715 pmd_t pmd;
716
717 if (!has_transparent_hugepage())
718 return;
Anshuman Khandual05289402020-08-06 23:19:16 -0700719
Anshuman Khandual6315df42020-08-06 23:19:25 -0700720 pr_debug("Validating PMD devmap\n");
Gavin Shan8cb183f2021-09-02 14:52:28 -0700721 pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot);
Anshuman Khandual05289402020-08-06 23:19:16 -0700722 WARN_ON(!pmd_devmap(pmd_mkdevmap(pmd)));
723}
724
725#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
Gavin Shan8cb183f2021-09-02 14:52:28 -0700726static void __init pud_devmap_tests(struct pgtable_debug_args *args)
Anshuman Khandual05289402020-08-06 23:19:16 -0700727{
Anshuman Khandual65ac1a62021-06-28 19:35:10 -0700728 pud_t pud;
729
730 if (!has_transparent_hugepage())
731 return;
Anshuman Khandual05289402020-08-06 23:19:16 -0700732
Anshuman Khandual6315df42020-08-06 23:19:25 -0700733 pr_debug("Validating PUD devmap\n");
Gavin Shan8cb183f2021-09-02 14:52:28 -0700734 pud = pfn_pud(args->fixed_pud_pfn, args->page_prot);
Anshuman Khandual05289402020-08-06 23:19:16 -0700735 WARN_ON(!pud_devmap(pud_mkdevmap(pud)));
736}
737#else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
Gavin Shan8cb183f2021-09-02 14:52:28 -0700738static void __init pud_devmap_tests(struct pgtable_debug_args *args) { }
Anshuman Khandual05289402020-08-06 23:19:16 -0700739#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
740#else /* CONFIG_TRANSPARENT_HUGEPAGE */
Gavin Shan8cb183f2021-09-02 14:52:28 -0700741static void __init pmd_devmap_tests(struct pgtable_debug_args *args) { }
742static void __init pud_devmap_tests(struct pgtable_debug_args *args) { }
Anshuman Khandual05289402020-08-06 23:19:16 -0700743#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
744#else
Gavin Shan8cb183f2021-09-02 14:52:28 -0700745static void __init pte_devmap_tests(struct pgtable_debug_args *args) { }
746static void __init pmd_devmap_tests(struct pgtable_debug_args *args) { }
747static void __init pud_devmap_tests(struct pgtable_debug_args *args) { }
Anshuman Khandual05289402020-08-06 23:19:16 -0700748#endif /* CONFIG_ARCH_HAS_PTE_DEVMAP */
749
Gavin Shan5f447e82021-09-02 14:52:32 -0700750static void __init pte_soft_dirty_tests(struct pgtable_debug_args *args)
Anshuman Khandual05289402020-08-06 23:19:16 -0700751{
Gavin Shan5f447e82021-09-02 14:52:32 -0700752 pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot);
Anshuman Khandual05289402020-08-06 23:19:16 -0700753
754 if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
755 return;
756
Anshuman Khandual6315df42020-08-06 23:19:25 -0700757 pr_debug("Validating PTE soft dirty\n");
Anshuman Khandual05289402020-08-06 23:19:16 -0700758 WARN_ON(!pte_soft_dirty(pte_mksoft_dirty(pte)));
759 WARN_ON(pte_soft_dirty(pte_clear_soft_dirty(pte)));
760}
761
Gavin Shan5f447e82021-09-02 14:52:32 -0700762static void __init pte_swap_soft_dirty_tests(struct pgtable_debug_args *args)
Anshuman Khandual05289402020-08-06 23:19:16 -0700763{
Gavin Shan5f447e82021-09-02 14:52:32 -0700764 pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot);
Anshuman Khandual05289402020-08-06 23:19:16 -0700765
766 if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
767 return;
768
Anshuman Khandual6315df42020-08-06 23:19:25 -0700769 pr_debug("Validating PTE swap soft dirty\n");
Anshuman Khandual05289402020-08-06 23:19:16 -0700770 WARN_ON(!pte_swp_soft_dirty(pte_swp_mksoft_dirty(pte)));
771 WARN_ON(pte_swp_soft_dirty(pte_swp_clear_soft_dirty(pte)));
772}
773
774#ifdef CONFIG_TRANSPARENT_HUGEPAGE
Gavin Shan5f447e82021-09-02 14:52:32 -0700775static void __init pmd_soft_dirty_tests(struct pgtable_debug_args *args)
Anshuman Khandual05289402020-08-06 23:19:16 -0700776{
Anshuman Khandual65ac1a62021-06-28 19:35:10 -0700777 pmd_t pmd;
Anshuman Khandual05289402020-08-06 23:19:16 -0700778
779 if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
780 return;
781
Anshuman Khandual65ac1a62021-06-28 19:35:10 -0700782 if (!has_transparent_hugepage())
783 return;
784
Anshuman Khandual6315df42020-08-06 23:19:25 -0700785 pr_debug("Validating PMD soft dirty\n");
Gavin Shan5f447e82021-09-02 14:52:32 -0700786 pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot);
Anshuman Khandual05289402020-08-06 23:19:16 -0700787 WARN_ON(!pmd_soft_dirty(pmd_mksoft_dirty(pmd)));
788 WARN_ON(pmd_soft_dirty(pmd_clear_soft_dirty(pmd)));
789}
790
Gavin Shan5f447e82021-09-02 14:52:32 -0700791static void __init pmd_swap_soft_dirty_tests(struct pgtable_debug_args *args)
Anshuman Khandual05289402020-08-06 23:19:16 -0700792{
Anshuman Khandual65ac1a62021-06-28 19:35:10 -0700793 pmd_t pmd;
Anshuman Khandual05289402020-08-06 23:19:16 -0700794
795 if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) ||
796 !IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION))
797 return;
798
Anshuman Khandual65ac1a62021-06-28 19:35:10 -0700799 if (!has_transparent_hugepage())
800 return;
801
Anshuman Khandual6315df42020-08-06 23:19:25 -0700802 pr_debug("Validating PMD swap soft dirty\n");
Gavin Shan5f447e82021-09-02 14:52:32 -0700803 pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot);
Anshuman Khandual05289402020-08-06 23:19:16 -0700804 WARN_ON(!pmd_swp_soft_dirty(pmd_swp_mksoft_dirty(pmd)));
805 WARN_ON(pmd_swp_soft_dirty(pmd_swp_clear_soft_dirty(pmd)));
806}
Shixin Liub593b902021-06-30 18:47:40 -0700807#else /* !CONFIG_TRANSPARENT_HUGEPAGE */
Gavin Shan5f447e82021-09-02 14:52:32 -0700808static void __init pmd_soft_dirty_tests(struct pgtable_debug_args *args) { }
809static void __init pmd_swap_soft_dirty_tests(struct pgtable_debug_args *args) { }
Shixin Liub593b902021-06-30 18:47:40 -0700810#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
Anshuman Khandual05289402020-08-06 23:19:16 -0700811
Gavin Shan5f447e82021-09-02 14:52:32 -0700812static void __init pte_swap_tests(struct pgtable_debug_args *args)
Anshuman Khandual05289402020-08-06 23:19:16 -0700813{
814 swp_entry_t swp;
815 pte_t pte;
816
Anshuman Khandual6315df42020-08-06 23:19:25 -0700817 pr_debug("Validating PTE swap\n");
Gavin Shan5f447e82021-09-02 14:52:32 -0700818 pte = pfn_pte(args->fixed_pte_pfn, args->page_prot);
Anshuman Khandual05289402020-08-06 23:19:16 -0700819 swp = __pte_to_swp_entry(pte);
820 pte = __swp_entry_to_pte(swp);
Gavin Shan5f447e82021-09-02 14:52:32 -0700821 WARN_ON(args->fixed_pte_pfn != pte_pfn(pte));
Anshuman Khandual05289402020-08-06 23:19:16 -0700822}
823
824#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
Gavin Shan5f447e82021-09-02 14:52:32 -0700825static void __init pmd_swap_tests(struct pgtable_debug_args *args)
Anshuman Khandual05289402020-08-06 23:19:16 -0700826{
827 swp_entry_t swp;
828 pmd_t pmd;
829
Anshuman Khandual65ac1a62021-06-28 19:35:10 -0700830 if (!has_transparent_hugepage())
831 return;
832
Anshuman Khandual6315df42020-08-06 23:19:25 -0700833 pr_debug("Validating PMD swap\n");
Gavin Shan5f447e82021-09-02 14:52:32 -0700834 pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot);
Anshuman Khandual05289402020-08-06 23:19:16 -0700835 swp = __pmd_to_swp_entry(pmd);
836 pmd = __swp_entry_to_pmd(swp);
Gavin Shan5f447e82021-09-02 14:52:32 -0700837 WARN_ON(args->fixed_pmd_pfn != pmd_pfn(pmd));
Anshuman Khandual05289402020-08-06 23:19:16 -0700838}
839#else /* !CONFIG_ARCH_ENABLE_THP_MIGRATION */
Gavin Shan5f447e82021-09-02 14:52:32 -0700840static void __init pmd_swap_tests(struct pgtable_debug_args *args) { }
Anshuman Khandual05289402020-08-06 23:19:16 -0700841#endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
842
Gavin Shan4878a882021-09-02 14:52:35 -0700843static void __init swap_migration_tests(struct pgtable_debug_args *args)
Anshuman Khandual05289402020-08-06 23:19:16 -0700844{
845 struct page *page;
846 swp_entry_t swp;
847
848 if (!IS_ENABLED(CONFIG_MIGRATION))
849 return;
Anshuman Khandual6315df42020-08-06 23:19:25 -0700850
Anshuman Khandual05289402020-08-06 23:19:16 -0700851 /*
852 * swap_migration_tests() requires a dedicated page as it needs to
853 * be locked before creating a migration entry from it. Locking the
854 * page that actually maps kernel text ('start_kernel') can be real
Gavin Shan4878a882021-09-02 14:52:35 -0700855 * problematic. Lets use the allocated page explicitly for this
856 * purpose.
Anshuman Khandual05289402020-08-06 23:19:16 -0700857 */
Gavin Shan4878a882021-09-02 14:52:35 -0700858 page = (args->pte_pfn != ULONG_MAX) ? pfn_to_page(args->pte_pfn) : NULL;
859 if (!page)
Anshuman Khandual05289402020-08-06 23:19:16 -0700860 return;
Gavin Shan4878a882021-09-02 14:52:35 -0700861
862 pr_debug("Validating swap migration\n");
Anshuman Khandual05289402020-08-06 23:19:16 -0700863
864 /*
865 * make_migration_entry() expects given page to be
866 * locked, otherwise it stumbles upon a BUG_ON().
867 */
868 __SetPageLocked(page);
Alistair Popple4dd845b2021-06-30 18:54:09 -0700869 swp = make_writable_migration_entry(page_to_pfn(page));
Anshuman Khandual05289402020-08-06 23:19:16 -0700870 WARN_ON(!is_migration_entry(swp));
Alistair Popple4dd845b2021-06-30 18:54:09 -0700871 WARN_ON(!is_writable_migration_entry(swp));
Anshuman Khandual05289402020-08-06 23:19:16 -0700872
Alistair Popple4dd845b2021-06-30 18:54:09 -0700873 swp = make_readable_migration_entry(swp_offset(swp));
Anshuman Khandual05289402020-08-06 23:19:16 -0700874 WARN_ON(!is_migration_entry(swp));
Alistair Popple4dd845b2021-06-30 18:54:09 -0700875 WARN_ON(is_writable_migration_entry(swp));
Anshuman Khandual05289402020-08-06 23:19:16 -0700876
Alistair Popple4dd845b2021-06-30 18:54:09 -0700877 swp = make_readable_migration_entry(page_to_pfn(page));
Anshuman Khandual05289402020-08-06 23:19:16 -0700878 WARN_ON(!is_migration_entry(swp));
Alistair Popple4dd845b2021-06-30 18:54:09 -0700879 WARN_ON(is_writable_migration_entry(swp));
Anshuman Khandual05289402020-08-06 23:19:16 -0700880 __ClearPageLocked(page);
Anshuman Khandual05289402020-08-06 23:19:16 -0700881}
882
883#ifdef CONFIG_HUGETLB_PAGE
Gavin Shan36b77d12021-09-02 14:52:22 -0700884static void __init hugetlb_basic_tests(struct pgtable_debug_args *args)
Anshuman Khandual05289402020-08-06 23:19:16 -0700885{
886 struct page *page;
887 pte_t pte;
888
Anshuman Khandual6315df42020-08-06 23:19:25 -0700889 pr_debug("Validating HugeTLB basic\n");
Anshuman Khandual05289402020-08-06 23:19:16 -0700890 /*
891 * Accessing the page associated with the pfn is safe here,
892 * as it was previously derived from a real kernel symbol.
893 */
Gavin Shan36b77d12021-09-02 14:52:22 -0700894 page = pfn_to_page(args->fixed_pmd_pfn);
895 pte = mk_huge_pte(page, args->page_prot);
Anshuman Khandual05289402020-08-06 23:19:16 -0700896
897 WARN_ON(!huge_pte_dirty(huge_pte_mkdirty(pte)));
898 WARN_ON(!huge_pte_write(huge_pte_mkwrite(huge_pte_wrprotect(pte))));
899 WARN_ON(huge_pte_write(huge_pte_wrprotect(huge_pte_mkwrite(pte))));
900
901#ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
Gavin Shan36b77d12021-09-02 14:52:22 -0700902 pte = pfn_pte(args->fixed_pmd_pfn, args->page_prot);
Anshuman Khandual05289402020-08-06 23:19:16 -0700903
904 WARN_ON(!pte_huge(pte_mkhuge(pte)));
905#endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
906}
907#else /* !CONFIG_HUGETLB_PAGE */
Gavin Shan36b77d12021-09-02 14:52:22 -0700908static void __init hugetlb_basic_tests(struct pgtable_debug_args *args) { }
Anshuman Khandual05289402020-08-06 23:19:16 -0700909#endif /* CONFIG_HUGETLB_PAGE */
910
911#ifdef CONFIG_TRANSPARENT_HUGEPAGE
Gavin Shan4878a882021-09-02 14:52:35 -0700912static void __init pmd_thp_tests(struct pgtable_debug_args *args)
Anshuman Khandual05289402020-08-06 23:19:16 -0700913{
914 pmd_t pmd;
915
916 if (!has_transparent_hugepage())
917 return;
918
Anshuman Khandual6315df42020-08-06 23:19:25 -0700919 pr_debug("Validating PMD based THP\n");
Anshuman Khandual05289402020-08-06 23:19:16 -0700920 /*
921 * pmd_trans_huge() and pmd_present() must return positive after
922 * MMU invalidation with pmd_mkinvalid(). This behavior is an
923 * optimization for transparent huge page. pmd_trans_huge() must
924 * be true if pmd_page() returns a valid THP to avoid taking the
925 * pmd_lock when others walk over non transhuge pmds (i.e. there
926 * are no THP allocated). Especially when splitting a THP and
927 * removing the present bit from the pmd, pmd_trans_huge() still
928 * needs to return true. pmd_present() should be true whenever
929 * pmd_trans_huge() returns true.
930 */
Gavin Shan4878a882021-09-02 14:52:35 -0700931 pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot);
Anshuman Khandual05289402020-08-06 23:19:16 -0700932 WARN_ON(!pmd_trans_huge(pmd_mkhuge(pmd)));
933
934#ifndef __HAVE_ARCH_PMDP_INVALIDATE
935 WARN_ON(!pmd_trans_huge(pmd_mkinvalid(pmd_mkhuge(pmd))));
936 WARN_ON(!pmd_present(pmd_mkinvalid(pmd_mkhuge(pmd))));
937#endif /* __HAVE_ARCH_PMDP_INVALIDATE */
938}
939
940#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
Gavin Shan4878a882021-09-02 14:52:35 -0700941static void __init pud_thp_tests(struct pgtable_debug_args *args)
Anshuman Khandual05289402020-08-06 23:19:16 -0700942{
943 pud_t pud;
944
945 if (!has_transparent_hugepage())
946 return;
947
Anshuman Khandual6315df42020-08-06 23:19:25 -0700948 pr_debug("Validating PUD based THP\n");
Gavin Shan4878a882021-09-02 14:52:35 -0700949 pud = pfn_pud(args->fixed_pud_pfn, args->page_prot);
Anshuman Khandual05289402020-08-06 23:19:16 -0700950 WARN_ON(!pud_trans_huge(pud_mkhuge(pud)));
951
952 /*
953 * pud_mkinvalid() has been dropped for now. Enable back
954 * these tests when it comes back with a modified pud_present().
955 *
956 * WARN_ON(!pud_trans_huge(pud_mkinvalid(pud_mkhuge(pud))));
957 * WARN_ON(!pud_present(pud_mkinvalid(pud_mkhuge(pud))));
958 */
959}
960#else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
Gavin Shan4878a882021-09-02 14:52:35 -0700961static void __init pud_thp_tests(struct pgtable_debug_args *args) { }
Anshuman Khandual05289402020-08-06 23:19:16 -0700962#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
963#else /* !CONFIG_TRANSPARENT_HUGEPAGE */
Gavin Shan4878a882021-09-02 14:52:35 -0700964static void __init pmd_thp_tests(struct pgtable_debug_args *args) { }
965static void __init pud_thp_tests(struct pgtable_debug_args *args) { }
Anshuman Khandual05289402020-08-06 23:19:16 -0700966#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
967
Anshuman Khandual399145f2020-06-04 16:47:15 -0700968static unsigned long __init get_random_vaddr(void)
969{
970 unsigned long random_vaddr, random_pages, total_user_pages;
971
972 total_user_pages = (TASK_SIZE - FIRST_USER_ADDRESS) / PAGE_SIZE;
973
974 random_pages = get_random_long() % total_user_pages;
975 random_vaddr = FIRST_USER_ADDRESS + random_pages * PAGE_SIZE;
976
977 return random_vaddr;
978}
979
Gavin Shan3c9b84f2021-09-02 14:52:19 -0700980static void __init destroy_args(struct pgtable_debug_args *args)
981{
982 struct page *page = NULL;
983
984 /* Free (huge) page */
985 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
986 IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) &&
987 has_transparent_hugepage() &&
988 args->pud_pfn != ULONG_MAX) {
989 if (args->is_contiguous_page) {
990 free_contig_range(args->pud_pfn,
991 (1 << (HPAGE_PUD_SHIFT - PAGE_SHIFT)));
992 } else {
993 page = pfn_to_page(args->pud_pfn);
994 __free_pages(page, HPAGE_PUD_SHIFT - PAGE_SHIFT);
995 }
996
997 args->pud_pfn = ULONG_MAX;
998 args->pmd_pfn = ULONG_MAX;
999 args->pte_pfn = ULONG_MAX;
1000 }
1001
1002 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
1003 has_transparent_hugepage() &&
1004 args->pmd_pfn != ULONG_MAX) {
1005 if (args->is_contiguous_page) {
1006 free_contig_range(args->pmd_pfn, (1 << HPAGE_PMD_ORDER));
1007 } else {
1008 page = pfn_to_page(args->pmd_pfn);
1009 __free_pages(page, HPAGE_PMD_ORDER);
1010 }
1011
1012 args->pmd_pfn = ULONG_MAX;
1013 args->pte_pfn = ULONG_MAX;
1014 }
1015
1016 if (args->pte_pfn != ULONG_MAX) {
1017 page = pfn_to_page(args->pte_pfn);
1018 __free_pages(page, 0);
1019
1020 args->pte_pfn = ULONG_MAX;
1021 }
1022
1023 /* Free page table entries */
1024 if (args->start_ptep) {
1025 pte_free(args->mm, args->start_ptep);
1026 mm_dec_nr_ptes(args->mm);
1027 }
1028
1029 if (args->start_pmdp) {
1030 pmd_free(args->mm, args->start_pmdp);
1031 mm_dec_nr_pmds(args->mm);
1032 }
1033
1034 if (args->start_pudp) {
1035 pud_free(args->mm, args->start_pudp);
1036 mm_dec_nr_puds(args->mm);
1037 }
1038
1039 if (args->start_p4dp)
1040 p4d_free(args->mm, args->start_p4dp);
1041
1042 /* Free vma and mm struct */
1043 if (args->vma)
1044 vm_area_free(args->vma);
1045
1046 if (args->mm)
1047 mmdrop(args->mm);
1048}
1049
1050static struct page * __init
1051debug_vm_pgtable_alloc_huge_page(struct pgtable_debug_args *args, int order)
1052{
1053 struct page *page = NULL;
1054
1055#ifdef CONFIG_CONTIG_ALLOC
1056 if (order >= MAX_ORDER) {
1057 page = alloc_contig_pages((1 << order), GFP_KERNEL,
1058 first_online_node, NULL);
1059 if (page) {
1060 args->is_contiguous_page = true;
1061 return page;
1062 }
1063 }
1064#endif
1065
1066 if (order < MAX_ORDER)
1067 page = alloc_pages(GFP_KERNEL, order);
1068
1069 return page;
1070}
1071
1072static int __init init_args(struct pgtable_debug_args *args)
1073{
1074 struct page *page = NULL;
1075 phys_addr_t phys;
1076 int ret = 0;
1077
1078 /*
1079 * Initialize the debugging data.
1080 *
1081 * __P000 (or even __S000) will help create page table entries with
1082 * PROT_NONE permission as required for pxx_protnone_tests().
1083 */
1084 memset(args, 0, sizeof(*args));
1085 args->vaddr = get_random_vaddr();
1086 args->page_prot = vm_get_page_prot(VMFLAGS);
1087 args->page_prot_none = __P000;
1088 args->is_contiguous_page = false;
1089 args->pud_pfn = ULONG_MAX;
1090 args->pmd_pfn = ULONG_MAX;
1091 args->pte_pfn = ULONG_MAX;
1092 args->fixed_pgd_pfn = ULONG_MAX;
1093 args->fixed_p4d_pfn = ULONG_MAX;
1094 args->fixed_pud_pfn = ULONG_MAX;
1095 args->fixed_pmd_pfn = ULONG_MAX;
1096 args->fixed_pte_pfn = ULONG_MAX;
1097
1098 /* Allocate mm and vma */
1099 args->mm = mm_alloc();
1100 if (!args->mm) {
1101 pr_err("Failed to allocate mm struct\n");
1102 ret = -ENOMEM;
1103 goto error;
1104 }
1105
1106 args->vma = vm_area_alloc(args->mm);
1107 if (!args->vma) {
1108 pr_err("Failed to allocate vma\n");
1109 ret = -ENOMEM;
1110 goto error;
1111 }
1112
1113 /*
1114 * Allocate page table entries. They will be modified in the tests.
1115 * Lets save the page table entries so that they can be released
1116 * when the tests are completed.
1117 */
1118 args->pgdp = pgd_offset(args->mm, args->vaddr);
1119 args->p4dp = p4d_alloc(args->mm, args->pgdp, args->vaddr);
1120 if (!args->p4dp) {
1121 pr_err("Failed to allocate p4d entries\n");
1122 ret = -ENOMEM;
1123 goto error;
1124 }
1125 args->start_p4dp = p4d_offset(args->pgdp, 0UL);
1126 WARN_ON(!args->start_p4dp);
1127
1128 args->pudp = pud_alloc(args->mm, args->p4dp, args->vaddr);
1129 if (!args->pudp) {
1130 pr_err("Failed to allocate pud entries\n");
1131 ret = -ENOMEM;
1132 goto error;
1133 }
1134 args->start_pudp = pud_offset(args->p4dp, 0UL);
1135 WARN_ON(!args->start_pudp);
1136
1137 args->pmdp = pmd_alloc(args->mm, args->pudp, args->vaddr);
1138 if (!args->pmdp) {
1139 pr_err("Failed to allocate pmd entries\n");
1140 ret = -ENOMEM;
1141 goto error;
1142 }
1143 args->start_pmdp = pmd_offset(args->pudp, 0UL);
1144 WARN_ON(!args->start_pmdp);
1145
1146 if (pte_alloc(args->mm, args->pmdp)) {
1147 pr_err("Failed to allocate pte entries\n");
1148 ret = -ENOMEM;
1149 goto error;
1150 }
1151 args->start_ptep = pmd_pgtable(READ_ONCE(*args->pmdp));
1152 WARN_ON(!args->start_ptep);
1153
1154 /*
1155 * PFN for mapping at PTE level is determined from a standard kernel
1156 * text symbol. But pfns for higher page table levels are derived by
1157 * masking lower bits of this real pfn. These derived pfns might not
1158 * exist on the platform but that does not really matter as pfn_pxx()
1159 * helpers will still create appropriate entries for the test. This
1160 * helps avoid large memory block allocations to be used for mapping
1161 * at higher page table levels in some of the tests.
1162 */
1163 phys = __pa_symbol(&start_kernel);
1164 args->fixed_pgd_pfn = __phys_to_pfn(phys & PGDIR_MASK);
1165 args->fixed_p4d_pfn = __phys_to_pfn(phys & P4D_MASK);
1166 args->fixed_pud_pfn = __phys_to_pfn(phys & PUD_MASK);
1167 args->fixed_pmd_pfn = __phys_to_pfn(phys & PMD_MASK);
1168 args->fixed_pte_pfn = __phys_to_pfn(phys & PAGE_MASK);
1169 WARN_ON(!pfn_valid(args->fixed_pte_pfn));
1170
1171 /*
1172 * Allocate (huge) pages because some of the tests need to access
1173 * the data in the pages. The corresponding tests will be skipped
1174 * if we fail to allocate (huge) pages.
1175 */
1176 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
1177 IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) &&
1178 has_transparent_hugepage()) {
1179 page = debug_vm_pgtable_alloc_huge_page(args,
1180 HPAGE_PUD_SHIFT - PAGE_SHIFT);
1181 if (page) {
1182 args->pud_pfn = page_to_pfn(page);
1183 args->pmd_pfn = args->pud_pfn;
1184 args->pte_pfn = args->pud_pfn;
1185 return 0;
1186 }
1187 }
1188
1189 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
1190 has_transparent_hugepage()) {
1191 page = debug_vm_pgtable_alloc_huge_page(args, HPAGE_PMD_ORDER);
1192 if (page) {
1193 args->pmd_pfn = page_to_pfn(page);
1194 args->pte_pfn = args->pmd_pfn;
1195 return 0;
1196 }
1197 }
1198
1199 page = alloc_pages(GFP_KERNEL, 0);
1200 if (page)
1201 args->pte_pfn = page_to_pfn(page);
1202
1203 return 0;
1204
1205error:
1206 destroy_args(args);
1207 return ret;
1208}
1209
Anshuman Khandual399145f2020-06-04 16:47:15 -07001210static int __init debug_vm_pgtable(void)
1211{
Gavin Shan3c9b84f2021-09-02 14:52:19 -07001212 struct pgtable_debug_args args;
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -07001213 struct vm_area_struct *vma;
Anshuman Khandual399145f2020-06-04 16:47:15 -07001214 struct mm_struct *mm;
1215 pgd_t *pgdp;
1216 p4d_t *p4dp, *saved_p4dp;
1217 pud_t *pudp, *saved_pudp;
1218 pmd_t *pmdp, *saved_pmdp, pmd;
Anshuman Khandual399145f2020-06-04 16:47:15 -07001219 pgtable_t saved_ptep;
Gavin Shan8cb183f2021-09-02 14:52:28 -07001220 pgprot_t prot;
Anshuman Khandual399145f2020-06-04 16:47:15 -07001221 phys_addr_t paddr;
Gavin Shanc0fe07b2021-09-02 14:52:41 -07001222 unsigned long vaddr;
Gavin Shan36b77d12021-09-02 14:52:22 -07001223 unsigned long pud_aligned;
Kees Cookfea11202020-06-03 13:28:45 -07001224 spinlock_t *ptl = NULL;
Gavin Shan3c9b84f2021-09-02 14:52:19 -07001225 int idx, ret;
Anshuman Khandual399145f2020-06-04 16:47:15 -07001226
1227 pr_info("Validating architecture page table helpers\n");
Gavin Shan3c9b84f2021-09-02 14:52:19 -07001228 ret = init_args(&args);
1229 if (ret)
1230 return ret;
1231
Anshuman Khandual399145f2020-06-04 16:47:15 -07001232 prot = vm_get_page_prot(VMFLAGS);
1233 vaddr = get_random_vaddr();
1234 mm = mm_alloc();
1235 if (!mm) {
1236 pr_err("mm_struct allocation failed\n");
1237 return 1;
1238 }
1239
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -07001240 vma = vm_area_alloc(mm);
1241 if (!vma) {
1242 pr_err("vma allocation failed\n");
1243 return 1;
1244 }
1245
Anshuman Khandual05289402020-08-06 23:19:16 -07001246 /*
Anshuman Khandual399145f2020-06-04 16:47:15 -07001247 * PFN for mapping at PTE level is determined from a standard kernel
1248 * text symbol. But pfns for higher page table levels are derived by
1249 * masking lower bits of this real pfn. These derived pfns might not
1250 * exist on the platform but that does not really matter as pfn_pxx()
1251 * helpers will still create appropriate entries for the test. This
1252 * helps avoid large memory block allocations to be used for mapping
1253 * at higher page table levels.
1254 */
1255 paddr = __pa_symbol(&start_kernel);
1256
Anshuman Khandual399145f2020-06-04 16:47:15 -07001257 pud_aligned = (paddr & PUD_MASK) >> PAGE_SHIFT;
Anshuman Khandual399145f2020-06-04 16:47:15 -07001258
1259 pgdp = pgd_offset(mm, vaddr);
1260 p4dp = p4d_alloc(mm, pgdp, vaddr);
1261 pudp = pud_alloc(mm, p4dp, vaddr);
1262 pmdp = pmd_alloc(mm, pudp, vaddr);
Aneesh Kumar K.Vf14312e2020-10-15 20:05:10 -07001263 /*
1264 * Allocate pgtable_t
1265 */
1266 if (pte_alloc(mm, pmdp)) {
1267 pr_err("pgtable allocation failed\n");
1268 return 1;
1269 }
Anshuman Khandual399145f2020-06-04 16:47:15 -07001270
1271 /*
1272 * Save all the page table page addresses as the page table
1273 * entries will be used for testing with random or garbage
1274 * values. These saved addresses will be used for freeing
1275 * page table pages.
1276 */
1277 pmd = READ_ONCE(*pmdp);
1278 saved_p4dp = p4d_offset(pgdp, 0UL);
1279 saved_pudp = pud_offset(p4dp, 0UL);
1280 saved_pmdp = pmd_offset(pudp, 0UL);
1281 saved_ptep = pmd_pgtable(pmd);
1282
Anshuman Khandual2e326c02021-02-24 12:01:36 -08001283 /*
1284 * Iterate over the protection_map[] to make sure that all
1285 * the basic page table transformation validations just hold
1286 * true irrespective of the starting protection value for a
1287 * given page table entry.
1288 */
1289 for (idx = 0; idx < ARRAY_SIZE(protection_map); idx++) {
Gavin Shan36b77d12021-09-02 14:52:22 -07001290 pte_basic_tests(&args, idx);
1291 pmd_basic_tests(&args, idx);
1292 pud_basic_tests(&args, idx);
Anshuman Khandual2e326c02021-02-24 12:01:36 -08001293 }
1294
1295 /*
1296 * Both P4D and PGD level tests are very basic which do not
1297 * involve creating page table entries from the protection
1298 * value and the given pfn. Hence just keep them out from
1299 * the above iteration for now to save some test execution
1300 * time.
1301 */
Gavin Shan36b77d12021-09-02 14:52:22 -07001302 p4d_basic_tests(&args);
1303 pgd_basic_tests(&args);
Anshuman Khandual399145f2020-06-04 16:47:15 -07001304
Gavin Shan8983d232021-09-02 14:52:25 -07001305 pmd_leaf_tests(&args);
1306 pud_leaf_tests(&args);
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -07001307
Gavin Shan8983d232021-09-02 14:52:25 -07001308 pte_savedwrite_tests(&args);
1309 pmd_savedwrite_tests(&args);
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -07001310
Gavin Shan8cb183f2021-09-02 14:52:28 -07001311 pte_special_tests(&args);
1312 pte_protnone_tests(&args);
1313 pmd_protnone_tests(&args);
Anshuman Khandual05289402020-08-06 23:19:16 -07001314
Gavin Shan8cb183f2021-09-02 14:52:28 -07001315 pte_devmap_tests(&args);
1316 pmd_devmap_tests(&args);
1317 pud_devmap_tests(&args);
Anshuman Khandual05289402020-08-06 23:19:16 -07001318
Gavin Shan5f447e82021-09-02 14:52:32 -07001319 pte_soft_dirty_tests(&args);
1320 pmd_soft_dirty_tests(&args);
1321 pte_swap_soft_dirty_tests(&args);
1322 pmd_swap_soft_dirty_tests(&args);
Anshuman Khandual05289402020-08-06 23:19:16 -07001323
Gavin Shan5f447e82021-09-02 14:52:32 -07001324 pte_swap_tests(&args);
1325 pmd_swap_tests(&args);
Anshuman Khandual05289402020-08-06 23:19:16 -07001326
Gavin Shan4878a882021-09-02 14:52:35 -07001327 swap_migration_tests(&args);
Anshuman Khandual05289402020-08-06 23:19:16 -07001328
Gavin Shan4878a882021-09-02 14:52:35 -07001329 pmd_thp_tests(&args);
1330 pud_thp_tests(&args);
Anshuman Khandual05289402020-08-06 23:19:16 -07001331
Gavin Shan36b77d12021-09-02 14:52:22 -07001332 hugetlb_basic_tests(&args);
Aneesh Kumar K.Ve8edf0a2020-10-15 20:04:49 -07001333
Aneesh Kumar K.V6f302e272020-10-15 20:04:53 -07001334 /*
1335 * Page table modifying tests. They need to hold
1336 * proper page table lock.
1337 */
Aneesh Kumar K.Ve8edf0a2020-10-15 20:04:49 -07001338
Gavin Shan44966c42021-09-02 14:52:38 -07001339 args.ptep = pte_offset_map_lock(args.mm, args.pmdp, args.vaddr, &ptl);
1340 pte_clear_tests(&args);
1341 pte_advanced_tests(&args);
1342 pte_unmap_unlock(args.ptep, ptl);
Aneesh Kumar K.Ve8edf0a2020-10-15 20:04:49 -07001343
Gavin Shanc0fe07b2021-09-02 14:52:41 -07001344 ptl = pmd_lock(args.mm, args.pmdp);
1345 pmd_clear_tests(&args);
1346 pmd_advanced_tests(&args);
1347 pmd_huge_tests(&args);
1348 pmd_populate_tests(&args);
Aneesh Kumar K.V6f302e272020-10-15 20:04:53 -07001349 spin_unlock(ptl);
1350
1351 ptl = pud_lock(mm, pudp);
1352 pud_clear_tests(mm, pudp);
1353 pud_advanced_tests(mm, vma, pudp, pud_aligned, vaddr, prot);
1354 pud_huge_tests(pudp, pud_aligned, prot);
Aneesh Kumar K.Ve8edf0a2020-10-15 20:04:49 -07001355 pud_populate_tests(mm, pudp, saved_pmdp);
Aneesh Kumar K.V6f302e272020-10-15 20:04:53 -07001356 spin_unlock(ptl);
1357
Aneesh Kumar K.V6f302e272020-10-15 20:04:53 -07001358 spin_lock(&mm->page_table_lock);
1359 p4d_clear_tests(mm, p4dp);
1360 pgd_clear_tests(mm, pgdp);
Aneesh Kumar K.Ve8edf0a2020-10-15 20:04:49 -07001361 p4d_populate_tests(mm, p4dp, saved_pudp);
1362 pgd_populate_tests(mm, pgdp, saved_p4dp);
Aneesh Kumar K.V6f302e272020-10-15 20:04:53 -07001363 spin_unlock(&mm->page_table_lock);
Aneesh Kumar K.Ve8edf0a2020-10-15 20:04:49 -07001364
Anshuman Khandual399145f2020-06-04 16:47:15 -07001365 p4d_free(mm, saved_p4dp);
1366 pud_free(mm, saved_pudp);
1367 pmd_free(mm, saved_pmdp);
1368 pte_free(mm, saved_ptep);
1369
Anshuman Khanduala5c3b9f2020-08-06 23:19:20 -07001370 vm_area_free(vma);
Anshuman Khandual399145f2020-06-04 16:47:15 -07001371 mm_dec_nr_puds(mm);
1372 mm_dec_nr_pmds(mm);
1373 mm_dec_nr_ptes(mm);
1374 mmdrop(mm);
Gavin Shan3c9b84f2021-09-02 14:52:19 -07001375
1376 destroy_args(&args);
Anshuman Khandual399145f2020-06-04 16:47:15 -07001377 return 0;
1378}
1379late_initcall(debug_vm_pgtable);