Anshuman Khandual | 399145f | 2020-06-04 16:47:15 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
| 2 | /* |
| 3 | * This kernel test validates architecture page table helpers and |
| 4 | * accessors and helps in verifying their continued compliance with |
| 5 | * expected generic MM semantics. |
| 6 | * |
| 7 | * Copyright (C) 2019 ARM Ltd. |
| 8 | * |
| 9 | * Author: Anshuman Khandual <anshuman.khandual@arm.com> |
| 10 | */ |
Anshuman Khandual | 6315df4 | 2020-08-06 23:19:25 -0700 | [diff] [blame] | 11 | #define pr_fmt(fmt) "debug_vm_pgtable: [%-25s]: " fmt, __func__ |
Anshuman Khandual | 399145f | 2020-06-04 16:47:15 -0700 | [diff] [blame] | 12 | |
| 13 | #include <linux/gfp.h> |
| 14 | #include <linux/highmem.h> |
| 15 | #include <linux/hugetlb.h> |
| 16 | #include <linux/kernel.h> |
| 17 | #include <linux/kconfig.h> |
| 18 | #include <linux/mm.h> |
| 19 | #include <linux/mman.h> |
| 20 | #include <linux/mm_types.h> |
| 21 | #include <linux/module.h> |
| 22 | #include <linux/pfn_t.h> |
| 23 | #include <linux/printk.h> |
Anshuman Khandual | a5c3b9f | 2020-08-06 23:19:20 -0700 | [diff] [blame] | 24 | #include <linux/pgtable.h> |
Anshuman Khandual | 399145f | 2020-06-04 16:47:15 -0700 | [diff] [blame] | 25 | #include <linux/random.h> |
| 26 | #include <linux/spinlock.h> |
| 27 | #include <linux/swap.h> |
| 28 | #include <linux/swapops.h> |
| 29 | #include <linux/start_kernel.h> |
| 30 | #include <linux/sched/mm.h> |
Aneesh Kumar K.V | 85a1446 | 2020-10-15 20:04:36 -0700 | [diff] [blame] | 31 | #include <linux/io.h> |
Anshuman Khandual | 399145f | 2020-06-04 16:47:15 -0700 | [diff] [blame] | 32 | #include <asm/pgalloc.h> |
Anshuman Khandual | a5c3b9f | 2020-08-06 23:19:20 -0700 | [diff] [blame] | 33 | #include <asm/tlbflush.h> |
Anshuman Khandual | 399145f | 2020-06-04 16:47:15 -0700 | [diff] [blame] | 34 | |
Anshuman Khandual | b1d0000 | 2020-08-06 23:19:28 -0700 | [diff] [blame] | 35 | /* |
| 36 | * Please refer Documentation/vm/arch_pgtable_helpers.rst for the semantics |
| 37 | * expectations that are being validated here. All future changes in here |
| 38 | * or the documentation need to be in sync. |
| 39 | */ |
| 40 | |
Anshuman Khandual | 399145f | 2020-06-04 16:47:15 -0700 | [diff] [blame] | 41 | #define VMFLAGS (VM_READ|VM_WRITE|VM_EXEC) |
| 42 | |
| 43 | /* |
| 44 | * On s390 platform, the lower 4 bits are used to identify given page table |
| 45 | * entry type. But these bits might affect the ability to clear entries with |
| 46 | * pxx_clear() because of how dynamic page table folding works on s390. So |
| 47 | * while loading up the entries do not change the lower 4 bits. It does not |
Aneesh Kumar K.V | cfc5bbc | 2020-10-15 20:04:33 -0700 | [diff] [blame] | 48 | * have affect any other platform. Also avoid the 62nd bit on ppc64 that is |
| 49 | * used to mark a pte entry. |
Anshuman Khandual | 399145f | 2020-06-04 16:47:15 -0700 | [diff] [blame] | 50 | */ |
Aneesh Kumar K.V | cfc5bbc | 2020-10-15 20:04:33 -0700 | [diff] [blame] | 51 | #define S390_SKIP_MASK GENMASK(3, 0) |
| 52 | #if __BITS_PER_LONG == 64 |
| 53 | #define PPC64_SKIP_MASK GENMASK(62, 62) |
| 54 | #else |
| 55 | #define PPC64_SKIP_MASK 0x0 |
| 56 | #endif |
| 57 | #define ARCH_SKIP_MASK (S390_SKIP_MASK | PPC64_SKIP_MASK) |
| 58 | #define RANDOM_ORVALUE (GENMASK(BITS_PER_LONG - 1, 0) & ~ARCH_SKIP_MASK) |
Anshuman Khandual | 399145f | 2020-06-04 16:47:15 -0700 | [diff] [blame] | 59 | #define RANDOM_NZVALUE GENMASK(7, 0) |
| 60 | |
Gavin Shan | 3c9b84f | 2021-09-02 14:52:19 -0700 | [diff] [blame] | 61 | struct pgtable_debug_args { |
| 62 | struct mm_struct *mm; |
| 63 | struct vm_area_struct *vma; |
| 64 | |
| 65 | pgd_t *pgdp; |
| 66 | p4d_t *p4dp; |
| 67 | pud_t *pudp; |
| 68 | pmd_t *pmdp; |
| 69 | pte_t *ptep; |
| 70 | |
| 71 | p4d_t *start_p4dp; |
| 72 | pud_t *start_pudp; |
| 73 | pmd_t *start_pmdp; |
| 74 | pgtable_t start_ptep; |
| 75 | |
| 76 | unsigned long vaddr; |
| 77 | pgprot_t page_prot; |
| 78 | pgprot_t page_prot_none; |
| 79 | |
| 80 | bool is_contiguous_page; |
| 81 | unsigned long pud_pfn; |
| 82 | unsigned long pmd_pfn; |
| 83 | unsigned long pte_pfn; |
| 84 | |
| 85 | unsigned long fixed_pgd_pfn; |
| 86 | unsigned long fixed_p4d_pfn; |
| 87 | unsigned long fixed_pud_pfn; |
| 88 | unsigned long fixed_pmd_pfn; |
| 89 | unsigned long fixed_pte_pfn; |
| 90 | }; |
| 91 | |
Gavin Shan | 36b77d1 | 2021-09-02 14:52:22 -0700 | [diff] [blame] | 92 | static void __init pte_basic_tests(struct pgtable_debug_args *args, int idx) |
Anshuman Khandual | 399145f | 2020-06-04 16:47:15 -0700 | [diff] [blame] | 93 | { |
Anshuman Khandual | 2e326c0 | 2021-02-24 12:01:36 -0800 | [diff] [blame] | 94 | pgprot_t prot = protection_map[idx]; |
Gavin Shan | 36b77d1 | 2021-09-02 14:52:22 -0700 | [diff] [blame] | 95 | pte_t pte = pfn_pte(args->fixed_pte_pfn, prot); |
Anshuman Khandual | 2e326c0 | 2021-02-24 12:01:36 -0800 | [diff] [blame] | 96 | unsigned long val = idx, *ptr = &val; |
Anshuman Khandual | 399145f | 2020-06-04 16:47:15 -0700 | [diff] [blame] | 97 | |
Anshuman Khandual | 2e326c0 | 2021-02-24 12:01:36 -0800 | [diff] [blame] | 98 | pr_debug("Validating PTE basic (%pGv)\n", ptr); |
Anshuman Khandual | bb5c47c | 2021-02-24 12:01:32 -0800 | [diff] [blame] | 99 | |
| 100 | /* |
| 101 | * This test needs to be executed after the given page table entry |
| 102 | * is created with pfn_pte() to make sure that protection_map[idx] |
| 103 | * does not have the dirty bit enabled from the beginning. This is |
| 104 | * important for platforms like arm64 where (!PTE_RDONLY) indicate |
| 105 | * dirty bit being set. |
| 106 | */ |
| 107 | WARN_ON(pte_dirty(pte_wrprotect(pte))); |
| 108 | |
Anshuman Khandual | 399145f | 2020-06-04 16:47:15 -0700 | [diff] [blame] | 109 | WARN_ON(!pte_same(pte, pte)); |
| 110 | WARN_ON(!pte_young(pte_mkyoung(pte_mkold(pte)))); |
| 111 | WARN_ON(!pte_dirty(pte_mkdirty(pte_mkclean(pte)))); |
| 112 | WARN_ON(!pte_write(pte_mkwrite(pte_wrprotect(pte)))); |
| 113 | WARN_ON(pte_young(pte_mkold(pte_mkyoung(pte)))); |
| 114 | WARN_ON(pte_dirty(pte_mkclean(pte_mkdirty(pte)))); |
| 115 | WARN_ON(pte_write(pte_wrprotect(pte_mkwrite(pte)))); |
Anshuman Khandual | bb5c47c | 2021-02-24 12:01:32 -0800 | [diff] [blame] | 116 | WARN_ON(pte_dirty(pte_wrprotect(pte_mkclean(pte)))); |
| 117 | WARN_ON(!pte_dirty(pte_wrprotect(pte_mkdirty(pte)))); |
Anshuman Khandual | 399145f | 2020-06-04 16:47:15 -0700 | [diff] [blame] | 118 | } |
| 119 | |
Gavin Shan | 44966c4 | 2021-09-02 14:52:38 -0700 | [diff] [blame] | 120 | static void __init pte_advanced_tests(struct pgtable_debug_args *args) |
Anshuman Khandual | a5c3b9f | 2020-08-06 23:19:20 -0700 | [diff] [blame] | 121 | { |
Shixin Liu | b593b90 | 2021-06-30 18:47:40 -0700 | [diff] [blame] | 122 | pte_t pte; |
Anshuman Khandual | a5c3b9f | 2020-08-06 23:19:20 -0700 | [diff] [blame] | 123 | |
Aneesh Kumar K.V | c3824e1 | 2020-10-15 20:04:46 -0700 | [diff] [blame] | 124 | /* |
| 125 | * Architectures optimize set_pte_at by avoiding TLB flush. |
| 126 | * This requires set_pte_at to be not used to update an |
| 127 | * existing pte entry. Clear pte before we do set_pte_at |
| 128 | */ |
Gavin Shan | 44966c4 | 2021-09-02 14:52:38 -0700 | [diff] [blame] | 129 | if (args->pte_pfn == ULONG_MAX) |
| 130 | return; |
Aneesh Kumar K.V | c3824e1 | 2020-10-15 20:04:46 -0700 | [diff] [blame] | 131 | |
Anshuman Khandual | 6315df4 | 2020-08-06 23:19:25 -0700 | [diff] [blame] | 132 | pr_debug("Validating PTE advanced\n"); |
Gavin Shan | 44966c4 | 2021-09-02 14:52:38 -0700 | [diff] [blame] | 133 | pte = pfn_pte(args->pte_pfn, args->page_prot); |
| 134 | set_pte_at(args->mm, args->vaddr, args->ptep, pte); |
| 135 | ptep_set_wrprotect(args->mm, args->vaddr, args->ptep); |
| 136 | pte = ptep_get(args->ptep); |
Anshuman Khandual | a5c3b9f | 2020-08-06 23:19:20 -0700 | [diff] [blame] | 137 | WARN_ON(pte_write(pte)); |
Gavin Shan | 44966c4 | 2021-09-02 14:52:38 -0700 | [diff] [blame] | 138 | ptep_get_and_clear(args->mm, args->vaddr, args->ptep); |
| 139 | pte = ptep_get(args->ptep); |
Anshuman Khandual | a5c3b9f | 2020-08-06 23:19:20 -0700 | [diff] [blame] | 140 | WARN_ON(!pte_none(pte)); |
| 141 | |
Gavin Shan | 44966c4 | 2021-09-02 14:52:38 -0700 | [diff] [blame] | 142 | pte = pfn_pte(args->pte_pfn, args->page_prot); |
Anshuman Khandual | a5c3b9f | 2020-08-06 23:19:20 -0700 | [diff] [blame] | 143 | pte = pte_wrprotect(pte); |
| 144 | pte = pte_mkclean(pte); |
Gavin Shan | 44966c4 | 2021-09-02 14:52:38 -0700 | [diff] [blame] | 145 | set_pte_at(args->mm, args->vaddr, args->ptep, pte); |
Anshuman Khandual | a5c3b9f | 2020-08-06 23:19:20 -0700 | [diff] [blame] | 146 | pte = pte_mkwrite(pte); |
| 147 | pte = pte_mkdirty(pte); |
Gavin Shan | 44966c4 | 2021-09-02 14:52:38 -0700 | [diff] [blame] | 148 | ptep_set_access_flags(args->vma, args->vaddr, args->ptep, pte, 1); |
| 149 | pte = ptep_get(args->ptep); |
Anshuman Khandual | a5c3b9f | 2020-08-06 23:19:20 -0700 | [diff] [blame] | 150 | WARN_ON(!(pte_write(pte) && pte_dirty(pte))); |
Gavin Shan | 44966c4 | 2021-09-02 14:52:38 -0700 | [diff] [blame] | 151 | ptep_get_and_clear_full(args->mm, args->vaddr, args->ptep, 1); |
| 152 | pte = ptep_get(args->ptep); |
Anshuman Khandual | a5c3b9f | 2020-08-06 23:19:20 -0700 | [diff] [blame] | 153 | WARN_ON(!pte_none(pte)); |
| 154 | |
Gavin Shan | 44966c4 | 2021-09-02 14:52:38 -0700 | [diff] [blame] | 155 | pte = pfn_pte(args->pte_pfn, args->page_prot); |
Anshuman Khandual | a5c3b9f | 2020-08-06 23:19:20 -0700 | [diff] [blame] | 156 | pte = pte_mkyoung(pte); |
Gavin Shan | 44966c4 | 2021-09-02 14:52:38 -0700 | [diff] [blame] | 157 | set_pte_at(args->mm, args->vaddr, args->ptep, pte); |
| 158 | ptep_test_and_clear_young(args->vma, args->vaddr, args->ptep); |
| 159 | pte = ptep_get(args->ptep); |
Anshuman Khandual | a5c3b9f | 2020-08-06 23:19:20 -0700 | [diff] [blame] | 160 | WARN_ON(pte_young(pte)); |
| 161 | } |
| 162 | |
Gavin Shan | 8983d23 | 2021-09-02 14:52:25 -0700 | [diff] [blame] | 163 | static void __init pte_savedwrite_tests(struct pgtable_debug_args *args) |
Anshuman Khandual | a5c3b9f | 2020-08-06 23:19:20 -0700 | [diff] [blame] | 164 | { |
Gavin Shan | 8983d23 | 2021-09-02 14:52:25 -0700 | [diff] [blame] | 165 | pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot_none); |
Anshuman Khandual | a5c3b9f | 2020-08-06 23:19:20 -0700 | [diff] [blame] | 166 | |
Aneesh Kumar K.V | 4200605 | 2020-10-15 20:04:40 -0700 | [diff] [blame] | 167 | if (!IS_ENABLED(CONFIG_NUMA_BALANCING)) |
| 168 | return; |
| 169 | |
Anshuman Khandual | 6315df4 | 2020-08-06 23:19:25 -0700 | [diff] [blame] | 170 | pr_debug("Validating PTE saved write\n"); |
Anshuman Khandual | a5c3b9f | 2020-08-06 23:19:20 -0700 | [diff] [blame] | 171 | WARN_ON(!pte_savedwrite(pte_mk_savedwrite(pte_clear_savedwrite(pte)))); |
| 172 | WARN_ON(pte_savedwrite(pte_clear_savedwrite(pte_mk_savedwrite(pte)))); |
| 173 | } |
Aneesh Kumar K.V | 4200605 | 2020-10-15 20:04:40 -0700 | [diff] [blame] | 174 | |
Anshuman Khandual | 399145f | 2020-06-04 16:47:15 -0700 | [diff] [blame] | 175 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
Gavin Shan | 36b77d1 | 2021-09-02 14:52:22 -0700 | [diff] [blame] | 176 | static void __init pmd_basic_tests(struct pgtable_debug_args *args, int idx) |
Anshuman Khandual | 399145f | 2020-06-04 16:47:15 -0700 | [diff] [blame] | 177 | { |
Anshuman Khandual | 2e326c0 | 2021-02-24 12:01:36 -0800 | [diff] [blame] | 178 | pgprot_t prot = protection_map[idx]; |
Anshuman Khandual | 2e326c0 | 2021-02-24 12:01:36 -0800 | [diff] [blame] | 179 | unsigned long val = idx, *ptr = &val; |
Anshuman Khandual | 65ac1a6 | 2021-06-28 19:35:10 -0700 | [diff] [blame] | 180 | pmd_t pmd; |
Anshuman Khandual | 399145f | 2020-06-04 16:47:15 -0700 | [diff] [blame] | 181 | |
Aneesh Kumar K.V | 787d563 | 2020-06-10 18:41:44 -0700 | [diff] [blame] | 182 | if (!has_transparent_hugepage()) |
| 183 | return; |
| 184 | |
Anshuman Khandual | 2e326c0 | 2021-02-24 12:01:36 -0800 | [diff] [blame] | 185 | pr_debug("Validating PMD basic (%pGv)\n", ptr); |
Gavin Shan | 36b77d1 | 2021-09-02 14:52:22 -0700 | [diff] [blame] | 186 | pmd = pfn_pmd(args->fixed_pmd_pfn, prot); |
Anshuman Khandual | bb5c47c | 2021-02-24 12:01:32 -0800 | [diff] [blame] | 187 | |
| 188 | /* |
| 189 | * This test needs to be executed after the given page table entry |
| 190 | * is created with pfn_pmd() to make sure that protection_map[idx] |
| 191 | * does not have the dirty bit enabled from the beginning. This is |
| 192 | * important for platforms like arm64 where (!PTE_RDONLY) indicate |
| 193 | * dirty bit being set. |
| 194 | */ |
| 195 | WARN_ON(pmd_dirty(pmd_wrprotect(pmd))); |
| 196 | |
| 197 | |
Anshuman Khandual | 399145f | 2020-06-04 16:47:15 -0700 | [diff] [blame] | 198 | WARN_ON(!pmd_same(pmd, pmd)); |
| 199 | WARN_ON(!pmd_young(pmd_mkyoung(pmd_mkold(pmd)))); |
| 200 | WARN_ON(!pmd_dirty(pmd_mkdirty(pmd_mkclean(pmd)))); |
| 201 | WARN_ON(!pmd_write(pmd_mkwrite(pmd_wrprotect(pmd)))); |
| 202 | WARN_ON(pmd_young(pmd_mkold(pmd_mkyoung(pmd)))); |
| 203 | WARN_ON(pmd_dirty(pmd_mkclean(pmd_mkdirty(pmd)))); |
| 204 | WARN_ON(pmd_write(pmd_wrprotect(pmd_mkwrite(pmd)))); |
Anshuman Khandual | bb5c47c | 2021-02-24 12:01:32 -0800 | [diff] [blame] | 205 | WARN_ON(pmd_dirty(pmd_wrprotect(pmd_mkclean(pmd)))); |
| 206 | WARN_ON(!pmd_dirty(pmd_wrprotect(pmd_mkdirty(pmd)))); |
Anshuman Khandual | 399145f | 2020-06-04 16:47:15 -0700 | [diff] [blame] | 207 | /* |
| 208 | * A huge page does not point to next level page table |
| 209 | * entry. Hence this must qualify as pmd_bad(). |
| 210 | */ |
| 211 | WARN_ON(!pmd_bad(pmd_mkhuge(pmd))); |
| 212 | } |
| 213 | |
Gavin Shan | c0fe07b | 2021-09-02 14:52:41 -0700 | [diff] [blame^] | 214 | static void __init pmd_advanced_tests(struct pgtable_debug_args *args) |
Anshuman Khandual | a5c3b9f | 2020-08-06 23:19:20 -0700 | [diff] [blame] | 215 | { |
Anshuman Khandual | 65ac1a6 | 2021-06-28 19:35:10 -0700 | [diff] [blame] | 216 | pmd_t pmd; |
Gavin Shan | c0fe07b | 2021-09-02 14:52:41 -0700 | [diff] [blame^] | 217 | unsigned long vaddr = args->vaddr; |
Anshuman Khandual | a5c3b9f | 2020-08-06 23:19:20 -0700 | [diff] [blame] | 218 | |
| 219 | if (!has_transparent_hugepage()) |
| 220 | return; |
| 221 | |
Gavin Shan | c0fe07b | 2021-09-02 14:52:41 -0700 | [diff] [blame^] | 222 | if (args->pmd_pfn == ULONG_MAX) |
| 223 | return; |
| 224 | |
Anshuman Khandual | 6315df4 | 2020-08-06 23:19:25 -0700 | [diff] [blame] | 225 | pr_debug("Validating PMD advanced\n"); |
Anshuman Khandual | a5c3b9f | 2020-08-06 23:19:20 -0700 | [diff] [blame] | 226 | /* Align the address wrt HPAGE_PMD_SIZE */ |
Gerald Schaefer | 04f7ce3 | 2021-06-04 20:01:18 -0700 | [diff] [blame] | 227 | vaddr &= HPAGE_PMD_MASK; |
Anshuman Khandual | a5c3b9f | 2020-08-06 23:19:20 -0700 | [diff] [blame] | 228 | |
Gavin Shan | c0fe07b | 2021-09-02 14:52:41 -0700 | [diff] [blame^] | 229 | pgtable_trans_huge_deposit(args->mm, args->pmdp, args->start_ptep); |
Aneesh Kumar K.V | 87f3498 | 2020-10-15 20:04:56 -0700 | [diff] [blame] | 230 | |
Gavin Shan | c0fe07b | 2021-09-02 14:52:41 -0700 | [diff] [blame^] | 231 | pmd = pfn_pmd(args->pmd_pfn, args->page_prot); |
| 232 | set_pmd_at(args->mm, vaddr, args->pmdp, pmd); |
| 233 | pmdp_set_wrprotect(args->mm, vaddr, args->pmdp); |
| 234 | pmd = READ_ONCE(*args->pmdp); |
Anshuman Khandual | a5c3b9f | 2020-08-06 23:19:20 -0700 | [diff] [blame] | 235 | WARN_ON(pmd_write(pmd)); |
Gavin Shan | c0fe07b | 2021-09-02 14:52:41 -0700 | [diff] [blame^] | 236 | pmdp_huge_get_and_clear(args->mm, vaddr, args->pmdp); |
| 237 | pmd = READ_ONCE(*args->pmdp); |
Anshuman Khandual | a5c3b9f | 2020-08-06 23:19:20 -0700 | [diff] [blame] | 238 | WARN_ON(!pmd_none(pmd)); |
| 239 | |
Gavin Shan | c0fe07b | 2021-09-02 14:52:41 -0700 | [diff] [blame^] | 240 | pmd = pfn_pmd(args->pmd_pfn, args->page_prot); |
Anshuman Khandual | a5c3b9f | 2020-08-06 23:19:20 -0700 | [diff] [blame] | 241 | pmd = pmd_wrprotect(pmd); |
| 242 | pmd = pmd_mkclean(pmd); |
Gavin Shan | c0fe07b | 2021-09-02 14:52:41 -0700 | [diff] [blame^] | 243 | set_pmd_at(args->mm, vaddr, args->pmdp, pmd); |
Anshuman Khandual | a5c3b9f | 2020-08-06 23:19:20 -0700 | [diff] [blame] | 244 | pmd = pmd_mkwrite(pmd); |
| 245 | pmd = pmd_mkdirty(pmd); |
Gavin Shan | c0fe07b | 2021-09-02 14:52:41 -0700 | [diff] [blame^] | 246 | pmdp_set_access_flags(args->vma, vaddr, args->pmdp, pmd, 1); |
| 247 | pmd = READ_ONCE(*args->pmdp); |
Anshuman Khandual | a5c3b9f | 2020-08-06 23:19:20 -0700 | [diff] [blame] | 248 | WARN_ON(!(pmd_write(pmd) && pmd_dirty(pmd))); |
Gavin Shan | c0fe07b | 2021-09-02 14:52:41 -0700 | [diff] [blame^] | 249 | pmdp_huge_get_and_clear_full(args->vma, vaddr, args->pmdp, 1); |
| 250 | pmd = READ_ONCE(*args->pmdp); |
Anshuman Khandual | a5c3b9f | 2020-08-06 23:19:20 -0700 | [diff] [blame] | 251 | WARN_ON(!pmd_none(pmd)); |
| 252 | |
Gavin Shan | c0fe07b | 2021-09-02 14:52:41 -0700 | [diff] [blame^] | 253 | pmd = pmd_mkhuge(pfn_pmd(args->pmd_pfn, args->page_prot)); |
Anshuman Khandual | a5c3b9f | 2020-08-06 23:19:20 -0700 | [diff] [blame] | 254 | pmd = pmd_mkyoung(pmd); |
Gavin Shan | c0fe07b | 2021-09-02 14:52:41 -0700 | [diff] [blame^] | 255 | set_pmd_at(args->mm, vaddr, args->pmdp, pmd); |
| 256 | pmdp_test_and_clear_young(args->vma, vaddr, args->pmdp); |
| 257 | pmd = READ_ONCE(*args->pmdp); |
Anshuman Khandual | a5c3b9f | 2020-08-06 23:19:20 -0700 | [diff] [blame] | 258 | WARN_ON(pmd_young(pmd)); |
Aneesh Kumar K.V | 87f3498 | 2020-10-15 20:04:56 -0700 | [diff] [blame] | 259 | |
Aneesh Kumar K.V | 13af050 | 2020-10-15 20:04:59 -0700 | [diff] [blame] | 260 | /* Clear the pte entries */ |
Gavin Shan | c0fe07b | 2021-09-02 14:52:41 -0700 | [diff] [blame^] | 261 | pmdp_huge_get_and_clear(args->mm, vaddr, args->pmdp); |
| 262 | pgtable_trans_huge_withdraw(args->mm, args->pmdp); |
Anshuman Khandual | a5c3b9f | 2020-08-06 23:19:20 -0700 | [diff] [blame] | 263 | } |
| 264 | |
Gavin Shan | 8983d23 | 2021-09-02 14:52:25 -0700 | [diff] [blame] | 265 | static void __init pmd_leaf_tests(struct pgtable_debug_args *args) |
Anshuman Khandual | a5c3b9f | 2020-08-06 23:19:20 -0700 | [diff] [blame] | 266 | { |
Anshuman Khandual | 65ac1a6 | 2021-06-28 19:35:10 -0700 | [diff] [blame] | 267 | pmd_t pmd; |
| 268 | |
| 269 | if (!has_transparent_hugepage()) |
| 270 | return; |
Anshuman Khandual | a5c3b9f | 2020-08-06 23:19:20 -0700 | [diff] [blame] | 271 | |
Anshuman Khandual | 6315df4 | 2020-08-06 23:19:25 -0700 | [diff] [blame] | 272 | pr_debug("Validating PMD leaf\n"); |
Gavin Shan | 8983d23 | 2021-09-02 14:52:25 -0700 | [diff] [blame] | 273 | pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot); |
Anshuman Khandual | 65ac1a6 | 2021-06-28 19:35:10 -0700 | [diff] [blame] | 274 | |
Anshuman Khandual | a5c3b9f | 2020-08-06 23:19:20 -0700 | [diff] [blame] | 275 | /* |
| 276 | * PMD based THP is a leaf entry. |
| 277 | */ |
| 278 | pmd = pmd_mkhuge(pmd); |
| 279 | WARN_ON(!pmd_leaf(pmd)); |
| 280 | } |
| 281 | |
Gavin Shan | 8983d23 | 2021-09-02 14:52:25 -0700 | [diff] [blame] | 282 | static void __init pmd_savedwrite_tests(struct pgtable_debug_args *args) |
Anshuman Khandual | a5c3b9f | 2020-08-06 23:19:20 -0700 | [diff] [blame] | 283 | { |
Anshuman Khandual | 65ac1a6 | 2021-06-28 19:35:10 -0700 | [diff] [blame] | 284 | pmd_t pmd; |
Anshuman Khandual | a5c3b9f | 2020-08-06 23:19:20 -0700 | [diff] [blame] | 285 | |
Aneesh Kumar K.V | 4200605 | 2020-10-15 20:04:40 -0700 | [diff] [blame] | 286 | if (!IS_ENABLED(CONFIG_NUMA_BALANCING)) |
| 287 | return; |
| 288 | |
Anshuman Khandual | 65ac1a6 | 2021-06-28 19:35:10 -0700 | [diff] [blame] | 289 | if (!has_transparent_hugepage()) |
| 290 | return; |
| 291 | |
Anshuman Khandual | 6315df4 | 2020-08-06 23:19:25 -0700 | [diff] [blame] | 292 | pr_debug("Validating PMD saved write\n"); |
Gavin Shan | 8983d23 | 2021-09-02 14:52:25 -0700 | [diff] [blame] | 293 | pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot_none); |
Anshuman Khandual | a5c3b9f | 2020-08-06 23:19:20 -0700 | [diff] [blame] | 294 | WARN_ON(!pmd_savedwrite(pmd_mk_savedwrite(pmd_clear_savedwrite(pmd)))); |
| 295 | WARN_ON(pmd_savedwrite(pmd_clear_savedwrite(pmd_mk_savedwrite(pmd)))); |
| 296 | } |
| 297 | |
Anshuman Khandual | 399145f | 2020-06-04 16:47:15 -0700 | [diff] [blame] | 298 | #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD |
Gavin Shan | 36b77d1 | 2021-09-02 14:52:22 -0700 | [diff] [blame] | 299 | static void __init pud_basic_tests(struct pgtable_debug_args *args, int idx) |
Anshuman Khandual | 399145f | 2020-06-04 16:47:15 -0700 | [diff] [blame] | 300 | { |
Anshuman Khandual | 2e326c0 | 2021-02-24 12:01:36 -0800 | [diff] [blame] | 301 | pgprot_t prot = protection_map[idx]; |
Anshuman Khandual | 2e326c0 | 2021-02-24 12:01:36 -0800 | [diff] [blame] | 302 | unsigned long val = idx, *ptr = &val; |
Anshuman Khandual | 65ac1a6 | 2021-06-28 19:35:10 -0700 | [diff] [blame] | 303 | pud_t pud; |
Anshuman Khandual | 399145f | 2020-06-04 16:47:15 -0700 | [diff] [blame] | 304 | |
Aneesh Kumar K.V | 787d563 | 2020-06-10 18:41:44 -0700 | [diff] [blame] | 305 | if (!has_transparent_hugepage()) |
| 306 | return; |
| 307 | |
Anshuman Khandual | 2e326c0 | 2021-02-24 12:01:36 -0800 | [diff] [blame] | 308 | pr_debug("Validating PUD basic (%pGv)\n", ptr); |
Gavin Shan | 36b77d1 | 2021-09-02 14:52:22 -0700 | [diff] [blame] | 309 | pud = pfn_pud(args->fixed_pud_pfn, prot); |
Anshuman Khandual | bb5c47c | 2021-02-24 12:01:32 -0800 | [diff] [blame] | 310 | |
| 311 | /* |
| 312 | * This test needs to be executed after the given page table entry |
| 313 | * is created with pfn_pud() to make sure that protection_map[idx] |
| 314 | * does not have the dirty bit enabled from the beginning. This is |
| 315 | * important for platforms like arm64 where (!PTE_RDONLY) indicate |
| 316 | * dirty bit being set. |
| 317 | */ |
| 318 | WARN_ON(pud_dirty(pud_wrprotect(pud))); |
| 319 | |
Anshuman Khandual | 399145f | 2020-06-04 16:47:15 -0700 | [diff] [blame] | 320 | WARN_ON(!pud_same(pud, pud)); |
| 321 | WARN_ON(!pud_young(pud_mkyoung(pud_mkold(pud)))); |
Anshuman Khandual | bb5c47c | 2021-02-24 12:01:32 -0800 | [diff] [blame] | 322 | WARN_ON(!pud_dirty(pud_mkdirty(pud_mkclean(pud)))); |
| 323 | WARN_ON(pud_dirty(pud_mkclean(pud_mkdirty(pud)))); |
Anshuman Khandual | 399145f | 2020-06-04 16:47:15 -0700 | [diff] [blame] | 324 | WARN_ON(!pud_write(pud_mkwrite(pud_wrprotect(pud)))); |
| 325 | WARN_ON(pud_write(pud_wrprotect(pud_mkwrite(pud)))); |
| 326 | WARN_ON(pud_young(pud_mkold(pud_mkyoung(pud)))); |
Anshuman Khandual | bb5c47c | 2021-02-24 12:01:32 -0800 | [diff] [blame] | 327 | WARN_ON(pud_dirty(pud_wrprotect(pud_mkclean(pud)))); |
| 328 | WARN_ON(!pud_dirty(pud_wrprotect(pud_mkdirty(pud)))); |
Anshuman Khandual | 399145f | 2020-06-04 16:47:15 -0700 | [diff] [blame] | 329 | |
Gavin Shan | 36b77d1 | 2021-09-02 14:52:22 -0700 | [diff] [blame] | 330 | if (mm_pmd_folded(args->mm)) |
Anshuman Khandual | 399145f | 2020-06-04 16:47:15 -0700 | [diff] [blame] | 331 | return; |
| 332 | |
| 333 | /* |
| 334 | * A huge page does not point to next level page table |
| 335 | * entry. Hence this must qualify as pud_bad(). |
| 336 | */ |
| 337 | WARN_ON(!pud_bad(pud_mkhuge(pud))); |
| 338 | } |
Anshuman Khandual | a5c3b9f | 2020-08-06 23:19:20 -0700 | [diff] [blame] | 339 | |
| 340 | static void __init pud_advanced_tests(struct mm_struct *mm, |
| 341 | struct vm_area_struct *vma, pud_t *pudp, |
| 342 | unsigned long pfn, unsigned long vaddr, |
| 343 | pgprot_t prot) |
| 344 | { |
Anshuman Khandual | 65ac1a6 | 2021-06-28 19:35:10 -0700 | [diff] [blame] | 345 | pud_t pud; |
Anshuman Khandual | a5c3b9f | 2020-08-06 23:19:20 -0700 | [diff] [blame] | 346 | |
| 347 | if (!has_transparent_hugepage()) |
| 348 | return; |
| 349 | |
Anshuman Khandual | 6315df4 | 2020-08-06 23:19:25 -0700 | [diff] [blame] | 350 | pr_debug("Validating PUD advanced\n"); |
Anshuman Khandual | a5c3b9f | 2020-08-06 23:19:20 -0700 | [diff] [blame] | 351 | /* Align the address wrt HPAGE_PUD_SIZE */ |
Gerald Schaefer | 04f7ce3 | 2021-06-04 20:01:18 -0700 | [diff] [blame] | 352 | vaddr &= HPAGE_PUD_MASK; |
Anshuman Khandual | a5c3b9f | 2020-08-06 23:19:20 -0700 | [diff] [blame] | 353 | |
Anshuman Khandual | 65ac1a6 | 2021-06-28 19:35:10 -0700 | [diff] [blame] | 354 | pud = pfn_pud(pfn, prot); |
Anshuman Khandual | a5c3b9f | 2020-08-06 23:19:20 -0700 | [diff] [blame] | 355 | set_pud_at(mm, vaddr, pudp, pud); |
| 356 | pudp_set_wrprotect(mm, vaddr, pudp); |
| 357 | pud = READ_ONCE(*pudp); |
| 358 | WARN_ON(pud_write(pud)); |
| 359 | |
| 360 | #ifndef __PAGETABLE_PMD_FOLDED |
Anshuman Khandual | a5c3b9f | 2020-08-06 23:19:20 -0700 | [diff] [blame] | 361 | pudp_huge_get_and_clear(mm, vaddr, pudp); |
| 362 | pud = READ_ONCE(*pudp); |
| 363 | WARN_ON(!pud_none(pud)); |
Anshuman Khandual | a5c3b9f | 2020-08-06 23:19:20 -0700 | [diff] [blame] | 364 | #endif /* __PAGETABLE_PMD_FOLDED */ |
| 365 | pud = pfn_pud(pfn, prot); |
| 366 | pud = pud_wrprotect(pud); |
| 367 | pud = pud_mkclean(pud); |
| 368 | set_pud_at(mm, vaddr, pudp, pud); |
| 369 | pud = pud_mkwrite(pud); |
| 370 | pud = pud_mkdirty(pud); |
| 371 | pudp_set_access_flags(vma, vaddr, pudp, pud, 1); |
| 372 | pud = READ_ONCE(*pudp); |
| 373 | WARN_ON(!(pud_write(pud) && pud_dirty(pud))); |
| 374 | |
Aneesh Kumar K.V | c3824e1 | 2020-10-15 20:04:46 -0700 | [diff] [blame] | 375 | #ifndef __PAGETABLE_PMD_FOLDED |
| 376 | pudp_huge_get_and_clear_full(mm, vaddr, pudp, 1); |
| 377 | pud = READ_ONCE(*pudp); |
| 378 | WARN_ON(!pud_none(pud)); |
| 379 | #endif /* __PAGETABLE_PMD_FOLDED */ |
| 380 | |
| 381 | pud = pfn_pud(pfn, prot); |
Anshuman Khandual | a5c3b9f | 2020-08-06 23:19:20 -0700 | [diff] [blame] | 382 | pud = pud_mkyoung(pud); |
| 383 | set_pud_at(mm, vaddr, pudp, pud); |
| 384 | pudp_test_and_clear_young(vma, vaddr, pudp); |
| 385 | pud = READ_ONCE(*pudp); |
| 386 | WARN_ON(pud_young(pud)); |
Aneesh Kumar K.V | 13af050 | 2020-10-15 20:04:59 -0700 | [diff] [blame] | 387 | |
| 388 | pudp_huge_get_and_clear(mm, vaddr, pudp); |
Anshuman Khandual | a5c3b9f | 2020-08-06 23:19:20 -0700 | [diff] [blame] | 389 | } |
| 390 | |
Gavin Shan | 8983d23 | 2021-09-02 14:52:25 -0700 | [diff] [blame] | 391 | static void __init pud_leaf_tests(struct pgtable_debug_args *args) |
Anshuman Khandual | a5c3b9f | 2020-08-06 23:19:20 -0700 | [diff] [blame] | 392 | { |
Anshuman Khandual | 65ac1a6 | 2021-06-28 19:35:10 -0700 | [diff] [blame] | 393 | pud_t pud; |
| 394 | |
| 395 | if (!has_transparent_hugepage()) |
| 396 | return; |
Anshuman Khandual | a5c3b9f | 2020-08-06 23:19:20 -0700 | [diff] [blame] | 397 | |
Anshuman Khandual | 6315df4 | 2020-08-06 23:19:25 -0700 | [diff] [blame] | 398 | pr_debug("Validating PUD leaf\n"); |
Gavin Shan | 8983d23 | 2021-09-02 14:52:25 -0700 | [diff] [blame] | 399 | pud = pfn_pud(args->fixed_pud_pfn, args->page_prot); |
Anshuman Khandual | a5c3b9f | 2020-08-06 23:19:20 -0700 | [diff] [blame] | 400 | /* |
| 401 | * PUD based THP is a leaf entry. |
| 402 | */ |
| 403 | pud = pud_mkhuge(pud); |
| 404 | WARN_ON(!pud_leaf(pud)); |
| 405 | } |
Shixin Liu | 5fe77be | 2021-06-30 18:47:37 -0700 | [diff] [blame] | 406 | #else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ |
Gavin Shan | 36b77d1 | 2021-09-02 14:52:22 -0700 | [diff] [blame] | 407 | static void __init pud_basic_tests(struct pgtable_debug_args *args, int idx) { } |
Shixin Liu | 5fe77be | 2021-06-30 18:47:37 -0700 | [diff] [blame] | 408 | static void __init pud_advanced_tests(struct mm_struct *mm, |
| 409 | struct vm_area_struct *vma, pud_t *pudp, |
| 410 | unsigned long pfn, unsigned long vaddr, |
| 411 | pgprot_t prot) |
| 412 | { |
| 413 | } |
Gavin Shan | 8983d23 | 2021-09-02 14:52:25 -0700 | [diff] [blame] | 414 | static void __init pud_leaf_tests(struct pgtable_debug_args *args) { } |
Shixin Liu | 5fe77be | 2021-06-30 18:47:37 -0700 | [diff] [blame] | 415 | #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ |
| 416 | #else /* !CONFIG_TRANSPARENT_HUGEPAGE */ |
Gavin Shan | 36b77d1 | 2021-09-02 14:52:22 -0700 | [diff] [blame] | 417 | static void __init pmd_basic_tests(struct pgtable_debug_args *args, int idx) { } |
| 418 | static void __init pud_basic_tests(struct pgtable_debug_args *args, int idx) { } |
Gavin Shan | c0fe07b | 2021-09-02 14:52:41 -0700 | [diff] [blame^] | 419 | static void __init pmd_advanced_tests(struct pgtable_debug_args *args) { } |
Shixin Liu | 5fe77be | 2021-06-30 18:47:37 -0700 | [diff] [blame] | 420 | static void __init pud_advanced_tests(struct mm_struct *mm, |
| 421 | struct vm_area_struct *vma, pud_t *pudp, |
| 422 | unsigned long pfn, unsigned long vaddr, |
| 423 | pgprot_t prot) |
| 424 | { |
| 425 | } |
Gavin Shan | 8983d23 | 2021-09-02 14:52:25 -0700 | [diff] [blame] | 426 | static void __init pmd_leaf_tests(struct pgtable_debug_args *args) { } |
| 427 | static void __init pud_leaf_tests(struct pgtable_debug_args *args) { } |
| 428 | static void __init pmd_savedwrite_tests(struct pgtable_debug_args *args) { } |
Shixin Liu | 5fe77be | 2021-06-30 18:47:37 -0700 | [diff] [blame] | 429 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
Anshuman Khandual | a5c3b9f | 2020-08-06 23:19:20 -0700 | [diff] [blame] | 430 | |
Aneesh Kumar K.V | 85a1446 | 2020-10-15 20:04:36 -0700 | [diff] [blame] | 431 | #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP |
Gavin Shan | c0fe07b | 2021-09-02 14:52:41 -0700 | [diff] [blame^] | 432 | static void __init pmd_huge_tests(struct pgtable_debug_args *args) |
Shixin Liu | 5fe77be | 2021-06-30 18:47:37 -0700 | [diff] [blame] | 433 | { |
| 434 | pmd_t pmd; |
| 435 | |
Gavin Shan | c0fe07b | 2021-09-02 14:52:41 -0700 | [diff] [blame^] | 436 | if (!arch_vmap_pmd_supported(args->page_prot)) |
Shixin Liu | 5fe77be | 2021-06-30 18:47:37 -0700 | [diff] [blame] | 437 | return; |
| 438 | |
| 439 | pr_debug("Validating PMD huge\n"); |
| 440 | /* |
| 441 | * X86 defined pmd_set_huge() verifies that the given |
| 442 | * PMD is not a populated non-leaf entry. |
| 443 | */ |
Gavin Shan | c0fe07b | 2021-09-02 14:52:41 -0700 | [diff] [blame^] | 444 | WRITE_ONCE(*args->pmdp, __pmd(0)); |
| 445 | WARN_ON(!pmd_set_huge(args->pmdp, __pfn_to_phys(args->fixed_pmd_pfn), args->page_prot)); |
| 446 | WARN_ON(!pmd_clear_huge(args->pmdp)); |
| 447 | pmd = READ_ONCE(*args->pmdp); |
Shixin Liu | 5fe77be | 2021-06-30 18:47:37 -0700 | [diff] [blame] | 448 | WARN_ON(!pmd_none(pmd)); |
| 449 | } |
| 450 | |
Anshuman Khandual | a5c3b9f | 2020-08-06 23:19:20 -0700 | [diff] [blame] | 451 | static void __init pud_huge_tests(pud_t *pudp, unsigned long pfn, pgprot_t prot) |
| 452 | { |
| 453 | pud_t pud; |
| 454 | |
Nicholas Piggin | bbc180a | 2021-04-29 22:58:26 -0700 | [diff] [blame] | 455 | if (!arch_vmap_pud_supported(prot)) |
Anshuman Khandual | a5c3b9f | 2020-08-06 23:19:20 -0700 | [diff] [blame] | 456 | return; |
Anshuman Khandual | 6315df4 | 2020-08-06 23:19:25 -0700 | [diff] [blame] | 457 | |
| 458 | pr_debug("Validating PUD huge\n"); |
Anshuman Khandual | a5c3b9f | 2020-08-06 23:19:20 -0700 | [diff] [blame] | 459 | /* |
| 460 | * X86 defined pud_set_huge() verifies that the given |
| 461 | * PUD is not a populated non-leaf entry. |
| 462 | */ |
| 463 | WRITE_ONCE(*pudp, __pud(0)); |
| 464 | WARN_ON(!pud_set_huge(pudp, __pfn_to_phys(pfn), prot)); |
| 465 | WARN_ON(!pud_clear_huge(pudp)); |
| 466 | pud = READ_ONCE(*pudp); |
| 467 | WARN_ON(!pud_none(pud)); |
| 468 | } |
Aneesh Kumar K.V | 85a1446 | 2020-10-15 20:04:36 -0700 | [diff] [blame] | 469 | #else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */ |
Gavin Shan | c0fe07b | 2021-09-02 14:52:41 -0700 | [diff] [blame^] | 470 | static void __init pmd_huge_tests(struct pgtable_debug_args *args) { } |
Aneesh Kumar K.V | 85a1446 | 2020-10-15 20:04:36 -0700 | [diff] [blame] | 471 | static void __init pud_huge_tests(pud_t *pudp, unsigned long pfn, pgprot_t prot) { } |
Shixin Liu | 5fe77be | 2021-06-30 18:47:37 -0700 | [diff] [blame] | 472 | #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ |
Anshuman Khandual | 399145f | 2020-06-04 16:47:15 -0700 | [diff] [blame] | 473 | |
Gavin Shan | 36b77d1 | 2021-09-02 14:52:22 -0700 | [diff] [blame] | 474 | static void __init p4d_basic_tests(struct pgtable_debug_args *args) |
Anshuman Khandual | 399145f | 2020-06-04 16:47:15 -0700 | [diff] [blame] | 475 | { |
| 476 | p4d_t p4d; |
| 477 | |
Anshuman Khandual | 6315df4 | 2020-08-06 23:19:25 -0700 | [diff] [blame] | 478 | pr_debug("Validating P4D basic\n"); |
Anshuman Khandual | 399145f | 2020-06-04 16:47:15 -0700 | [diff] [blame] | 479 | memset(&p4d, RANDOM_NZVALUE, sizeof(p4d_t)); |
| 480 | WARN_ON(!p4d_same(p4d, p4d)); |
| 481 | } |
| 482 | |
Gavin Shan | 36b77d1 | 2021-09-02 14:52:22 -0700 | [diff] [blame] | 483 | static void __init pgd_basic_tests(struct pgtable_debug_args *args) |
Anshuman Khandual | 399145f | 2020-06-04 16:47:15 -0700 | [diff] [blame] | 484 | { |
| 485 | pgd_t pgd; |
| 486 | |
Anshuman Khandual | 6315df4 | 2020-08-06 23:19:25 -0700 | [diff] [blame] | 487 | pr_debug("Validating PGD basic\n"); |
Anshuman Khandual | 399145f | 2020-06-04 16:47:15 -0700 | [diff] [blame] | 488 | memset(&pgd, RANDOM_NZVALUE, sizeof(pgd_t)); |
| 489 | WARN_ON(!pgd_same(pgd, pgd)); |
| 490 | } |
| 491 | |
| 492 | #ifndef __PAGETABLE_PUD_FOLDED |
| 493 | static void __init pud_clear_tests(struct mm_struct *mm, pud_t *pudp) |
| 494 | { |
| 495 | pud_t pud = READ_ONCE(*pudp); |
| 496 | |
| 497 | if (mm_pmd_folded(mm)) |
| 498 | return; |
| 499 | |
Anshuman Khandual | 6315df4 | 2020-08-06 23:19:25 -0700 | [diff] [blame] | 500 | pr_debug("Validating PUD clear\n"); |
Anshuman Khandual | 399145f | 2020-06-04 16:47:15 -0700 | [diff] [blame] | 501 | pud = __pud(pud_val(pud) | RANDOM_ORVALUE); |
| 502 | WRITE_ONCE(*pudp, pud); |
| 503 | pud_clear(pudp); |
| 504 | pud = READ_ONCE(*pudp); |
| 505 | WARN_ON(!pud_none(pud)); |
| 506 | } |
| 507 | |
| 508 | static void __init pud_populate_tests(struct mm_struct *mm, pud_t *pudp, |
| 509 | pmd_t *pmdp) |
| 510 | { |
| 511 | pud_t pud; |
| 512 | |
| 513 | if (mm_pmd_folded(mm)) |
| 514 | return; |
Anshuman Khandual | 6315df4 | 2020-08-06 23:19:25 -0700 | [diff] [blame] | 515 | |
| 516 | pr_debug("Validating PUD populate\n"); |
Anshuman Khandual | 399145f | 2020-06-04 16:47:15 -0700 | [diff] [blame] | 517 | /* |
| 518 | * This entry points to next level page table page. |
| 519 | * Hence this must not qualify as pud_bad(). |
| 520 | */ |
Anshuman Khandual | 399145f | 2020-06-04 16:47:15 -0700 | [diff] [blame] | 521 | pud_populate(mm, pudp, pmdp); |
| 522 | pud = READ_ONCE(*pudp); |
| 523 | WARN_ON(pud_bad(pud)); |
| 524 | } |
| 525 | #else /* !__PAGETABLE_PUD_FOLDED */ |
| 526 | static void __init pud_clear_tests(struct mm_struct *mm, pud_t *pudp) { } |
| 527 | static void __init pud_populate_tests(struct mm_struct *mm, pud_t *pudp, |
| 528 | pmd_t *pmdp) |
| 529 | { |
| 530 | } |
| 531 | #endif /* PAGETABLE_PUD_FOLDED */ |
| 532 | |
| 533 | #ifndef __PAGETABLE_P4D_FOLDED |
| 534 | static void __init p4d_clear_tests(struct mm_struct *mm, p4d_t *p4dp) |
| 535 | { |
| 536 | p4d_t p4d = READ_ONCE(*p4dp); |
| 537 | |
| 538 | if (mm_pud_folded(mm)) |
| 539 | return; |
| 540 | |
Anshuman Khandual | 6315df4 | 2020-08-06 23:19:25 -0700 | [diff] [blame] | 541 | pr_debug("Validating P4D clear\n"); |
Anshuman Khandual | 399145f | 2020-06-04 16:47:15 -0700 | [diff] [blame] | 542 | p4d = __p4d(p4d_val(p4d) | RANDOM_ORVALUE); |
| 543 | WRITE_ONCE(*p4dp, p4d); |
| 544 | p4d_clear(p4dp); |
| 545 | p4d = READ_ONCE(*p4dp); |
| 546 | WARN_ON(!p4d_none(p4d)); |
| 547 | } |
| 548 | |
| 549 | static void __init p4d_populate_tests(struct mm_struct *mm, p4d_t *p4dp, |
| 550 | pud_t *pudp) |
| 551 | { |
| 552 | p4d_t p4d; |
| 553 | |
| 554 | if (mm_pud_folded(mm)) |
| 555 | return; |
| 556 | |
Anshuman Khandual | 6315df4 | 2020-08-06 23:19:25 -0700 | [diff] [blame] | 557 | pr_debug("Validating P4D populate\n"); |
Anshuman Khandual | 399145f | 2020-06-04 16:47:15 -0700 | [diff] [blame] | 558 | /* |
| 559 | * This entry points to next level page table page. |
| 560 | * Hence this must not qualify as p4d_bad(). |
| 561 | */ |
| 562 | pud_clear(pudp); |
| 563 | p4d_clear(p4dp); |
| 564 | p4d_populate(mm, p4dp, pudp); |
| 565 | p4d = READ_ONCE(*p4dp); |
| 566 | WARN_ON(p4d_bad(p4d)); |
| 567 | } |
| 568 | |
| 569 | static void __init pgd_clear_tests(struct mm_struct *mm, pgd_t *pgdp) |
| 570 | { |
| 571 | pgd_t pgd = READ_ONCE(*pgdp); |
| 572 | |
| 573 | if (mm_p4d_folded(mm)) |
| 574 | return; |
| 575 | |
Anshuman Khandual | 6315df4 | 2020-08-06 23:19:25 -0700 | [diff] [blame] | 576 | pr_debug("Validating PGD clear\n"); |
Anshuman Khandual | 399145f | 2020-06-04 16:47:15 -0700 | [diff] [blame] | 577 | pgd = __pgd(pgd_val(pgd) | RANDOM_ORVALUE); |
| 578 | WRITE_ONCE(*pgdp, pgd); |
| 579 | pgd_clear(pgdp); |
| 580 | pgd = READ_ONCE(*pgdp); |
| 581 | WARN_ON(!pgd_none(pgd)); |
| 582 | } |
| 583 | |
| 584 | static void __init pgd_populate_tests(struct mm_struct *mm, pgd_t *pgdp, |
| 585 | p4d_t *p4dp) |
| 586 | { |
| 587 | pgd_t pgd; |
| 588 | |
| 589 | if (mm_p4d_folded(mm)) |
| 590 | return; |
| 591 | |
Anshuman Khandual | 6315df4 | 2020-08-06 23:19:25 -0700 | [diff] [blame] | 592 | pr_debug("Validating PGD populate\n"); |
Anshuman Khandual | 399145f | 2020-06-04 16:47:15 -0700 | [diff] [blame] | 593 | /* |
| 594 | * This entry points to next level page table page. |
| 595 | * Hence this must not qualify as pgd_bad(). |
| 596 | */ |
| 597 | p4d_clear(p4dp); |
| 598 | pgd_clear(pgdp); |
| 599 | pgd_populate(mm, pgdp, p4dp); |
| 600 | pgd = READ_ONCE(*pgdp); |
| 601 | WARN_ON(pgd_bad(pgd)); |
| 602 | } |
| 603 | #else /* !__PAGETABLE_P4D_FOLDED */ |
| 604 | static void __init p4d_clear_tests(struct mm_struct *mm, p4d_t *p4dp) { } |
| 605 | static void __init pgd_clear_tests(struct mm_struct *mm, pgd_t *pgdp) { } |
| 606 | static void __init p4d_populate_tests(struct mm_struct *mm, p4d_t *p4dp, |
| 607 | pud_t *pudp) |
| 608 | { |
| 609 | } |
| 610 | static void __init pgd_populate_tests(struct mm_struct *mm, pgd_t *pgdp, |
| 611 | p4d_t *p4dp) |
| 612 | { |
| 613 | } |
| 614 | #endif /* PAGETABLE_P4D_FOLDED */ |
| 615 | |
Gavin Shan | 44966c4 | 2021-09-02 14:52:38 -0700 | [diff] [blame] | 616 | static void __init pte_clear_tests(struct pgtable_debug_args *args) |
Anshuman Khandual | 399145f | 2020-06-04 16:47:15 -0700 | [diff] [blame] | 617 | { |
Gavin Shan | 44966c4 | 2021-09-02 14:52:38 -0700 | [diff] [blame] | 618 | pte_t pte = pfn_pte(args->pte_pfn, args->page_prot); |
| 619 | |
| 620 | if (args->pte_pfn == ULONG_MAX) |
| 621 | return; |
Anshuman Khandual | 399145f | 2020-06-04 16:47:15 -0700 | [diff] [blame] | 622 | |
Anshuman Khandual | 6315df4 | 2020-08-06 23:19:25 -0700 | [diff] [blame] | 623 | pr_debug("Validating PTE clear\n"); |
Aneesh Kumar K.V | 401035d | 2020-10-15 20:05:06 -0700 | [diff] [blame] | 624 | #ifndef CONFIG_RISCV |
Anshuman Khandual | 399145f | 2020-06-04 16:47:15 -0700 | [diff] [blame] | 625 | pte = __pte(pte_val(pte) | RANDOM_ORVALUE); |
Aneesh Kumar K.V | 401035d | 2020-10-15 20:05:06 -0700 | [diff] [blame] | 626 | #endif |
Gavin Shan | 44966c4 | 2021-09-02 14:52:38 -0700 | [diff] [blame] | 627 | set_pte_at(args->mm, args->vaddr, args->ptep, pte); |
Anshuman Khandual | 399145f | 2020-06-04 16:47:15 -0700 | [diff] [blame] | 628 | barrier(); |
Gavin Shan | 44966c4 | 2021-09-02 14:52:38 -0700 | [diff] [blame] | 629 | pte_clear(args->mm, args->vaddr, args->ptep); |
| 630 | pte = ptep_get(args->ptep); |
Anshuman Khandual | 399145f | 2020-06-04 16:47:15 -0700 | [diff] [blame] | 631 | WARN_ON(!pte_none(pte)); |
| 632 | } |
| 633 | |
Gavin Shan | c0fe07b | 2021-09-02 14:52:41 -0700 | [diff] [blame^] | 634 | static void __init pmd_clear_tests(struct pgtable_debug_args *args) |
Anshuman Khandual | 399145f | 2020-06-04 16:47:15 -0700 | [diff] [blame] | 635 | { |
Gavin Shan | c0fe07b | 2021-09-02 14:52:41 -0700 | [diff] [blame^] | 636 | pmd_t pmd = READ_ONCE(*args->pmdp); |
Anshuman Khandual | 399145f | 2020-06-04 16:47:15 -0700 | [diff] [blame] | 637 | |
Anshuman Khandual | 6315df4 | 2020-08-06 23:19:25 -0700 | [diff] [blame] | 638 | pr_debug("Validating PMD clear\n"); |
Anshuman Khandual | 399145f | 2020-06-04 16:47:15 -0700 | [diff] [blame] | 639 | pmd = __pmd(pmd_val(pmd) | RANDOM_ORVALUE); |
Gavin Shan | c0fe07b | 2021-09-02 14:52:41 -0700 | [diff] [blame^] | 640 | WRITE_ONCE(*args->pmdp, pmd); |
| 641 | pmd_clear(args->pmdp); |
| 642 | pmd = READ_ONCE(*args->pmdp); |
Anshuman Khandual | 399145f | 2020-06-04 16:47:15 -0700 | [diff] [blame] | 643 | WARN_ON(!pmd_none(pmd)); |
| 644 | } |
| 645 | |
Gavin Shan | c0fe07b | 2021-09-02 14:52:41 -0700 | [diff] [blame^] | 646 | static void __init pmd_populate_tests(struct pgtable_debug_args *args) |
Anshuman Khandual | 399145f | 2020-06-04 16:47:15 -0700 | [diff] [blame] | 647 | { |
| 648 | pmd_t pmd; |
| 649 | |
Anshuman Khandual | 6315df4 | 2020-08-06 23:19:25 -0700 | [diff] [blame] | 650 | pr_debug("Validating PMD populate\n"); |
Anshuman Khandual | 399145f | 2020-06-04 16:47:15 -0700 | [diff] [blame] | 651 | /* |
| 652 | * This entry points to next level page table page. |
| 653 | * Hence this must not qualify as pmd_bad(). |
| 654 | */ |
Gavin Shan | c0fe07b | 2021-09-02 14:52:41 -0700 | [diff] [blame^] | 655 | pmd_populate(args->mm, args->pmdp, args->start_ptep); |
| 656 | pmd = READ_ONCE(*args->pmdp); |
Anshuman Khandual | 399145f | 2020-06-04 16:47:15 -0700 | [diff] [blame] | 657 | WARN_ON(pmd_bad(pmd)); |
| 658 | } |
| 659 | |
Gavin Shan | 8cb183f | 2021-09-02 14:52:28 -0700 | [diff] [blame] | 660 | static void __init pte_special_tests(struct pgtable_debug_args *args) |
Anshuman Khandual | 0528940 | 2020-08-06 23:19:16 -0700 | [diff] [blame] | 661 | { |
Gavin Shan | 8cb183f | 2021-09-02 14:52:28 -0700 | [diff] [blame] | 662 | pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot); |
Anshuman Khandual | 0528940 | 2020-08-06 23:19:16 -0700 | [diff] [blame] | 663 | |
| 664 | if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL)) |
| 665 | return; |
| 666 | |
Anshuman Khandual | 6315df4 | 2020-08-06 23:19:25 -0700 | [diff] [blame] | 667 | pr_debug("Validating PTE special\n"); |
Anshuman Khandual | 0528940 | 2020-08-06 23:19:16 -0700 | [diff] [blame] | 668 | WARN_ON(!pte_special(pte_mkspecial(pte))); |
| 669 | } |
| 670 | |
Gavin Shan | 8cb183f | 2021-09-02 14:52:28 -0700 | [diff] [blame] | 671 | static void __init pte_protnone_tests(struct pgtable_debug_args *args) |
Anshuman Khandual | 0528940 | 2020-08-06 23:19:16 -0700 | [diff] [blame] | 672 | { |
Gavin Shan | 8cb183f | 2021-09-02 14:52:28 -0700 | [diff] [blame] | 673 | pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot_none); |
Anshuman Khandual | 0528940 | 2020-08-06 23:19:16 -0700 | [diff] [blame] | 674 | |
| 675 | if (!IS_ENABLED(CONFIG_NUMA_BALANCING)) |
| 676 | return; |
| 677 | |
Anshuman Khandual | 6315df4 | 2020-08-06 23:19:25 -0700 | [diff] [blame] | 678 | pr_debug("Validating PTE protnone\n"); |
Anshuman Khandual | 0528940 | 2020-08-06 23:19:16 -0700 | [diff] [blame] | 679 | WARN_ON(!pte_protnone(pte)); |
| 680 | WARN_ON(!pte_present(pte)); |
| 681 | } |
| 682 | |
| 683 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
Gavin Shan | 8cb183f | 2021-09-02 14:52:28 -0700 | [diff] [blame] | 684 | static void __init pmd_protnone_tests(struct pgtable_debug_args *args) |
Anshuman Khandual | 0528940 | 2020-08-06 23:19:16 -0700 | [diff] [blame] | 685 | { |
Anshuman Khandual | 65ac1a6 | 2021-06-28 19:35:10 -0700 | [diff] [blame] | 686 | pmd_t pmd; |
Anshuman Khandual | 0528940 | 2020-08-06 23:19:16 -0700 | [diff] [blame] | 687 | |
| 688 | if (!IS_ENABLED(CONFIG_NUMA_BALANCING)) |
| 689 | return; |
| 690 | |
Anshuman Khandual | 65ac1a6 | 2021-06-28 19:35:10 -0700 | [diff] [blame] | 691 | if (!has_transparent_hugepage()) |
| 692 | return; |
| 693 | |
Anshuman Khandual | 6315df4 | 2020-08-06 23:19:25 -0700 | [diff] [blame] | 694 | pr_debug("Validating PMD protnone\n"); |
Gavin Shan | 8cb183f | 2021-09-02 14:52:28 -0700 | [diff] [blame] | 695 | pmd = pmd_mkhuge(pfn_pmd(args->fixed_pmd_pfn, args->page_prot_none)); |
Anshuman Khandual | 0528940 | 2020-08-06 23:19:16 -0700 | [diff] [blame] | 696 | WARN_ON(!pmd_protnone(pmd)); |
| 697 | WARN_ON(!pmd_present(pmd)); |
| 698 | } |
| 699 | #else /* !CONFIG_TRANSPARENT_HUGEPAGE */ |
Gavin Shan | 8cb183f | 2021-09-02 14:52:28 -0700 | [diff] [blame] | 700 | static void __init pmd_protnone_tests(struct pgtable_debug_args *args) { } |
Anshuman Khandual | 0528940 | 2020-08-06 23:19:16 -0700 | [diff] [blame] | 701 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
| 702 | |
| 703 | #ifdef CONFIG_ARCH_HAS_PTE_DEVMAP |
Gavin Shan | 8cb183f | 2021-09-02 14:52:28 -0700 | [diff] [blame] | 704 | static void __init pte_devmap_tests(struct pgtable_debug_args *args) |
Anshuman Khandual | 0528940 | 2020-08-06 23:19:16 -0700 | [diff] [blame] | 705 | { |
Gavin Shan | 8cb183f | 2021-09-02 14:52:28 -0700 | [diff] [blame] | 706 | pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot); |
Anshuman Khandual | 0528940 | 2020-08-06 23:19:16 -0700 | [diff] [blame] | 707 | |
Anshuman Khandual | 6315df4 | 2020-08-06 23:19:25 -0700 | [diff] [blame] | 708 | pr_debug("Validating PTE devmap\n"); |
Anshuman Khandual | 0528940 | 2020-08-06 23:19:16 -0700 | [diff] [blame] | 709 | WARN_ON(!pte_devmap(pte_mkdevmap(pte))); |
| 710 | } |
| 711 | |
| 712 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
Gavin Shan | 8cb183f | 2021-09-02 14:52:28 -0700 | [diff] [blame] | 713 | static void __init pmd_devmap_tests(struct pgtable_debug_args *args) |
Anshuman Khandual | 0528940 | 2020-08-06 23:19:16 -0700 | [diff] [blame] | 714 | { |
Anshuman Khandual | 65ac1a6 | 2021-06-28 19:35:10 -0700 | [diff] [blame] | 715 | pmd_t pmd; |
| 716 | |
| 717 | if (!has_transparent_hugepage()) |
| 718 | return; |
Anshuman Khandual | 0528940 | 2020-08-06 23:19:16 -0700 | [diff] [blame] | 719 | |
Anshuman Khandual | 6315df4 | 2020-08-06 23:19:25 -0700 | [diff] [blame] | 720 | pr_debug("Validating PMD devmap\n"); |
Gavin Shan | 8cb183f | 2021-09-02 14:52:28 -0700 | [diff] [blame] | 721 | pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot); |
Anshuman Khandual | 0528940 | 2020-08-06 23:19:16 -0700 | [diff] [blame] | 722 | WARN_ON(!pmd_devmap(pmd_mkdevmap(pmd))); |
| 723 | } |
| 724 | |
| 725 | #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD |
Gavin Shan | 8cb183f | 2021-09-02 14:52:28 -0700 | [diff] [blame] | 726 | static void __init pud_devmap_tests(struct pgtable_debug_args *args) |
Anshuman Khandual | 0528940 | 2020-08-06 23:19:16 -0700 | [diff] [blame] | 727 | { |
Anshuman Khandual | 65ac1a6 | 2021-06-28 19:35:10 -0700 | [diff] [blame] | 728 | pud_t pud; |
| 729 | |
| 730 | if (!has_transparent_hugepage()) |
| 731 | return; |
Anshuman Khandual | 0528940 | 2020-08-06 23:19:16 -0700 | [diff] [blame] | 732 | |
Anshuman Khandual | 6315df4 | 2020-08-06 23:19:25 -0700 | [diff] [blame] | 733 | pr_debug("Validating PUD devmap\n"); |
Gavin Shan | 8cb183f | 2021-09-02 14:52:28 -0700 | [diff] [blame] | 734 | pud = pfn_pud(args->fixed_pud_pfn, args->page_prot); |
Anshuman Khandual | 0528940 | 2020-08-06 23:19:16 -0700 | [diff] [blame] | 735 | WARN_ON(!pud_devmap(pud_mkdevmap(pud))); |
| 736 | } |
| 737 | #else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ |
Gavin Shan | 8cb183f | 2021-09-02 14:52:28 -0700 | [diff] [blame] | 738 | static void __init pud_devmap_tests(struct pgtable_debug_args *args) { } |
Anshuman Khandual | 0528940 | 2020-08-06 23:19:16 -0700 | [diff] [blame] | 739 | #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ |
| 740 | #else /* CONFIG_TRANSPARENT_HUGEPAGE */ |
Gavin Shan | 8cb183f | 2021-09-02 14:52:28 -0700 | [diff] [blame] | 741 | static void __init pmd_devmap_tests(struct pgtable_debug_args *args) { } |
| 742 | static void __init pud_devmap_tests(struct pgtable_debug_args *args) { } |
Anshuman Khandual | 0528940 | 2020-08-06 23:19:16 -0700 | [diff] [blame] | 743 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
| 744 | #else |
Gavin Shan | 8cb183f | 2021-09-02 14:52:28 -0700 | [diff] [blame] | 745 | static void __init pte_devmap_tests(struct pgtable_debug_args *args) { } |
| 746 | static void __init pmd_devmap_tests(struct pgtable_debug_args *args) { } |
| 747 | static void __init pud_devmap_tests(struct pgtable_debug_args *args) { } |
Anshuman Khandual | 0528940 | 2020-08-06 23:19:16 -0700 | [diff] [blame] | 748 | #endif /* CONFIG_ARCH_HAS_PTE_DEVMAP */ |
| 749 | |
Gavin Shan | 5f447e8 | 2021-09-02 14:52:32 -0700 | [diff] [blame] | 750 | static void __init pte_soft_dirty_tests(struct pgtable_debug_args *args) |
Anshuman Khandual | 0528940 | 2020-08-06 23:19:16 -0700 | [diff] [blame] | 751 | { |
Gavin Shan | 5f447e8 | 2021-09-02 14:52:32 -0700 | [diff] [blame] | 752 | pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot); |
Anshuman Khandual | 0528940 | 2020-08-06 23:19:16 -0700 | [diff] [blame] | 753 | |
| 754 | if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY)) |
| 755 | return; |
| 756 | |
Anshuman Khandual | 6315df4 | 2020-08-06 23:19:25 -0700 | [diff] [blame] | 757 | pr_debug("Validating PTE soft dirty\n"); |
Anshuman Khandual | 0528940 | 2020-08-06 23:19:16 -0700 | [diff] [blame] | 758 | WARN_ON(!pte_soft_dirty(pte_mksoft_dirty(pte))); |
| 759 | WARN_ON(pte_soft_dirty(pte_clear_soft_dirty(pte))); |
| 760 | } |
| 761 | |
Gavin Shan | 5f447e8 | 2021-09-02 14:52:32 -0700 | [diff] [blame] | 762 | static void __init pte_swap_soft_dirty_tests(struct pgtable_debug_args *args) |
Anshuman Khandual | 0528940 | 2020-08-06 23:19:16 -0700 | [diff] [blame] | 763 | { |
Gavin Shan | 5f447e8 | 2021-09-02 14:52:32 -0700 | [diff] [blame] | 764 | pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot); |
Anshuman Khandual | 0528940 | 2020-08-06 23:19:16 -0700 | [diff] [blame] | 765 | |
| 766 | if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY)) |
| 767 | return; |
| 768 | |
Anshuman Khandual | 6315df4 | 2020-08-06 23:19:25 -0700 | [diff] [blame] | 769 | pr_debug("Validating PTE swap soft dirty\n"); |
Anshuman Khandual | 0528940 | 2020-08-06 23:19:16 -0700 | [diff] [blame] | 770 | WARN_ON(!pte_swp_soft_dirty(pte_swp_mksoft_dirty(pte))); |
| 771 | WARN_ON(pte_swp_soft_dirty(pte_swp_clear_soft_dirty(pte))); |
| 772 | } |
| 773 | |
| 774 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
Gavin Shan | 5f447e8 | 2021-09-02 14:52:32 -0700 | [diff] [blame] | 775 | static void __init pmd_soft_dirty_tests(struct pgtable_debug_args *args) |
Anshuman Khandual | 0528940 | 2020-08-06 23:19:16 -0700 | [diff] [blame] | 776 | { |
Anshuman Khandual | 65ac1a6 | 2021-06-28 19:35:10 -0700 | [diff] [blame] | 777 | pmd_t pmd; |
Anshuman Khandual | 0528940 | 2020-08-06 23:19:16 -0700 | [diff] [blame] | 778 | |
| 779 | if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY)) |
| 780 | return; |
| 781 | |
Anshuman Khandual | 65ac1a6 | 2021-06-28 19:35:10 -0700 | [diff] [blame] | 782 | if (!has_transparent_hugepage()) |
| 783 | return; |
| 784 | |
Anshuman Khandual | 6315df4 | 2020-08-06 23:19:25 -0700 | [diff] [blame] | 785 | pr_debug("Validating PMD soft dirty\n"); |
Gavin Shan | 5f447e8 | 2021-09-02 14:52:32 -0700 | [diff] [blame] | 786 | pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot); |
Anshuman Khandual | 0528940 | 2020-08-06 23:19:16 -0700 | [diff] [blame] | 787 | WARN_ON(!pmd_soft_dirty(pmd_mksoft_dirty(pmd))); |
| 788 | WARN_ON(pmd_soft_dirty(pmd_clear_soft_dirty(pmd))); |
| 789 | } |
| 790 | |
Gavin Shan | 5f447e8 | 2021-09-02 14:52:32 -0700 | [diff] [blame] | 791 | static void __init pmd_swap_soft_dirty_tests(struct pgtable_debug_args *args) |
Anshuman Khandual | 0528940 | 2020-08-06 23:19:16 -0700 | [diff] [blame] | 792 | { |
Anshuman Khandual | 65ac1a6 | 2021-06-28 19:35:10 -0700 | [diff] [blame] | 793 | pmd_t pmd; |
Anshuman Khandual | 0528940 | 2020-08-06 23:19:16 -0700 | [diff] [blame] | 794 | |
| 795 | if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) || |
| 796 | !IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION)) |
| 797 | return; |
| 798 | |
Anshuman Khandual | 65ac1a6 | 2021-06-28 19:35:10 -0700 | [diff] [blame] | 799 | if (!has_transparent_hugepage()) |
| 800 | return; |
| 801 | |
Anshuman Khandual | 6315df4 | 2020-08-06 23:19:25 -0700 | [diff] [blame] | 802 | pr_debug("Validating PMD swap soft dirty\n"); |
Gavin Shan | 5f447e8 | 2021-09-02 14:52:32 -0700 | [diff] [blame] | 803 | pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot); |
Anshuman Khandual | 0528940 | 2020-08-06 23:19:16 -0700 | [diff] [blame] | 804 | WARN_ON(!pmd_swp_soft_dirty(pmd_swp_mksoft_dirty(pmd))); |
| 805 | WARN_ON(pmd_swp_soft_dirty(pmd_swp_clear_soft_dirty(pmd))); |
| 806 | } |
Shixin Liu | b593b90 | 2021-06-30 18:47:40 -0700 | [diff] [blame] | 807 | #else /* !CONFIG_TRANSPARENT_HUGEPAGE */ |
Gavin Shan | 5f447e8 | 2021-09-02 14:52:32 -0700 | [diff] [blame] | 808 | static void __init pmd_soft_dirty_tests(struct pgtable_debug_args *args) { } |
| 809 | static void __init pmd_swap_soft_dirty_tests(struct pgtable_debug_args *args) { } |
Shixin Liu | b593b90 | 2021-06-30 18:47:40 -0700 | [diff] [blame] | 810 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
Anshuman Khandual | 0528940 | 2020-08-06 23:19:16 -0700 | [diff] [blame] | 811 | |
Gavin Shan | 5f447e8 | 2021-09-02 14:52:32 -0700 | [diff] [blame] | 812 | static void __init pte_swap_tests(struct pgtable_debug_args *args) |
Anshuman Khandual | 0528940 | 2020-08-06 23:19:16 -0700 | [diff] [blame] | 813 | { |
| 814 | swp_entry_t swp; |
| 815 | pte_t pte; |
| 816 | |
Anshuman Khandual | 6315df4 | 2020-08-06 23:19:25 -0700 | [diff] [blame] | 817 | pr_debug("Validating PTE swap\n"); |
Gavin Shan | 5f447e8 | 2021-09-02 14:52:32 -0700 | [diff] [blame] | 818 | pte = pfn_pte(args->fixed_pte_pfn, args->page_prot); |
Anshuman Khandual | 0528940 | 2020-08-06 23:19:16 -0700 | [diff] [blame] | 819 | swp = __pte_to_swp_entry(pte); |
| 820 | pte = __swp_entry_to_pte(swp); |
Gavin Shan | 5f447e8 | 2021-09-02 14:52:32 -0700 | [diff] [blame] | 821 | WARN_ON(args->fixed_pte_pfn != pte_pfn(pte)); |
Anshuman Khandual | 0528940 | 2020-08-06 23:19:16 -0700 | [diff] [blame] | 822 | } |
| 823 | |
| 824 | #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION |
Gavin Shan | 5f447e8 | 2021-09-02 14:52:32 -0700 | [diff] [blame] | 825 | static void __init pmd_swap_tests(struct pgtable_debug_args *args) |
Anshuman Khandual | 0528940 | 2020-08-06 23:19:16 -0700 | [diff] [blame] | 826 | { |
| 827 | swp_entry_t swp; |
| 828 | pmd_t pmd; |
| 829 | |
Anshuman Khandual | 65ac1a6 | 2021-06-28 19:35:10 -0700 | [diff] [blame] | 830 | if (!has_transparent_hugepage()) |
| 831 | return; |
| 832 | |
Anshuman Khandual | 6315df4 | 2020-08-06 23:19:25 -0700 | [diff] [blame] | 833 | pr_debug("Validating PMD swap\n"); |
Gavin Shan | 5f447e8 | 2021-09-02 14:52:32 -0700 | [diff] [blame] | 834 | pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot); |
Anshuman Khandual | 0528940 | 2020-08-06 23:19:16 -0700 | [diff] [blame] | 835 | swp = __pmd_to_swp_entry(pmd); |
| 836 | pmd = __swp_entry_to_pmd(swp); |
Gavin Shan | 5f447e8 | 2021-09-02 14:52:32 -0700 | [diff] [blame] | 837 | WARN_ON(args->fixed_pmd_pfn != pmd_pfn(pmd)); |
Anshuman Khandual | 0528940 | 2020-08-06 23:19:16 -0700 | [diff] [blame] | 838 | } |
| 839 | #else /* !CONFIG_ARCH_ENABLE_THP_MIGRATION */ |
Gavin Shan | 5f447e8 | 2021-09-02 14:52:32 -0700 | [diff] [blame] | 840 | static void __init pmd_swap_tests(struct pgtable_debug_args *args) { } |
Anshuman Khandual | 0528940 | 2020-08-06 23:19:16 -0700 | [diff] [blame] | 841 | #endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */ |
| 842 | |
Gavin Shan | 4878a88 | 2021-09-02 14:52:35 -0700 | [diff] [blame] | 843 | static void __init swap_migration_tests(struct pgtable_debug_args *args) |
Anshuman Khandual | 0528940 | 2020-08-06 23:19:16 -0700 | [diff] [blame] | 844 | { |
| 845 | struct page *page; |
| 846 | swp_entry_t swp; |
| 847 | |
| 848 | if (!IS_ENABLED(CONFIG_MIGRATION)) |
| 849 | return; |
Anshuman Khandual | 6315df4 | 2020-08-06 23:19:25 -0700 | [diff] [blame] | 850 | |
Anshuman Khandual | 0528940 | 2020-08-06 23:19:16 -0700 | [diff] [blame] | 851 | /* |
| 852 | * swap_migration_tests() requires a dedicated page as it needs to |
| 853 | * be locked before creating a migration entry from it. Locking the |
| 854 | * page that actually maps kernel text ('start_kernel') can be real |
Gavin Shan | 4878a88 | 2021-09-02 14:52:35 -0700 | [diff] [blame] | 855 | * problematic. Lets use the allocated page explicitly for this |
| 856 | * purpose. |
Anshuman Khandual | 0528940 | 2020-08-06 23:19:16 -0700 | [diff] [blame] | 857 | */ |
Gavin Shan | 4878a88 | 2021-09-02 14:52:35 -0700 | [diff] [blame] | 858 | page = (args->pte_pfn != ULONG_MAX) ? pfn_to_page(args->pte_pfn) : NULL; |
| 859 | if (!page) |
Anshuman Khandual | 0528940 | 2020-08-06 23:19:16 -0700 | [diff] [blame] | 860 | return; |
Gavin Shan | 4878a88 | 2021-09-02 14:52:35 -0700 | [diff] [blame] | 861 | |
| 862 | pr_debug("Validating swap migration\n"); |
Anshuman Khandual | 0528940 | 2020-08-06 23:19:16 -0700 | [diff] [blame] | 863 | |
| 864 | /* |
| 865 | * make_migration_entry() expects given page to be |
| 866 | * locked, otherwise it stumbles upon a BUG_ON(). |
| 867 | */ |
| 868 | __SetPageLocked(page); |
Alistair Popple | 4dd845b | 2021-06-30 18:54:09 -0700 | [diff] [blame] | 869 | swp = make_writable_migration_entry(page_to_pfn(page)); |
Anshuman Khandual | 0528940 | 2020-08-06 23:19:16 -0700 | [diff] [blame] | 870 | WARN_ON(!is_migration_entry(swp)); |
Alistair Popple | 4dd845b | 2021-06-30 18:54:09 -0700 | [diff] [blame] | 871 | WARN_ON(!is_writable_migration_entry(swp)); |
Anshuman Khandual | 0528940 | 2020-08-06 23:19:16 -0700 | [diff] [blame] | 872 | |
Alistair Popple | 4dd845b | 2021-06-30 18:54:09 -0700 | [diff] [blame] | 873 | swp = make_readable_migration_entry(swp_offset(swp)); |
Anshuman Khandual | 0528940 | 2020-08-06 23:19:16 -0700 | [diff] [blame] | 874 | WARN_ON(!is_migration_entry(swp)); |
Alistair Popple | 4dd845b | 2021-06-30 18:54:09 -0700 | [diff] [blame] | 875 | WARN_ON(is_writable_migration_entry(swp)); |
Anshuman Khandual | 0528940 | 2020-08-06 23:19:16 -0700 | [diff] [blame] | 876 | |
Alistair Popple | 4dd845b | 2021-06-30 18:54:09 -0700 | [diff] [blame] | 877 | swp = make_readable_migration_entry(page_to_pfn(page)); |
Anshuman Khandual | 0528940 | 2020-08-06 23:19:16 -0700 | [diff] [blame] | 878 | WARN_ON(!is_migration_entry(swp)); |
Alistair Popple | 4dd845b | 2021-06-30 18:54:09 -0700 | [diff] [blame] | 879 | WARN_ON(is_writable_migration_entry(swp)); |
Anshuman Khandual | 0528940 | 2020-08-06 23:19:16 -0700 | [diff] [blame] | 880 | __ClearPageLocked(page); |
Anshuman Khandual | 0528940 | 2020-08-06 23:19:16 -0700 | [diff] [blame] | 881 | } |
| 882 | |
| 883 | #ifdef CONFIG_HUGETLB_PAGE |
Gavin Shan | 36b77d1 | 2021-09-02 14:52:22 -0700 | [diff] [blame] | 884 | static void __init hugetlb_basic_tests(struct pgtable_debug_args *args) |
Anshuman Khandual | 0528940 | 2020-08-06 23:19:16 -0700 | [diff] [blame] | 885 | { |
| 886 | struct page *page; |
| 887 | pte_t pte; |
| 888 | |
Anshuman Khandual | 6315df4 | 2020-08-06 23:19:25 -0700 | [diff] [blame] | 889 | pr_debug("Validating HugeTLB basic\n"); |
Anshuman Khandual | 0528940 | 2020-08-06 23:19:16 -0700 | [diff] [blame] | 890 | /* |
| 891 | * Accessing the page associated with the pfn is safe here, |
| 892 | * as it was previously derived from a real kernel symbol. |
| 893 | */ |
Gavin Shan | 36b77d1 | 2021-09-02 14:52:22 -0700 | [diff] [blame] | 894 | page = pfn_to_page(args->fixed_pmd_pfn); |
| 895 | pte = mk_huge_pte(page, args->page_prot); |
Anshuman Khandual | 0528940 | 2020-08-06 23:19:16 -0700 | [diff] [blame] | 896 | |
| 897 | WARN_ON(!huge_pte_dirty(huge_pte_mkdirty(pte))); |
| 898 | WARN_ON(!huge_pte_write(huge_pte_mkwrite(huge_pte_wrprotect(pte)))); |
| 899 | WARN_ON(huge_pte_write(huge_pte_wrprotect(huge_pte_mkwrite(pte)))); |
| 900 | |
| 901 | #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB |
Gavin Shan | 36b77d1 | 2021-09-02 14:52:22 -0700 | [diff] [blame] | 902 | pte = pfn_pte(args->fixed_pmd_pfn, args->page_prot); |
Anshuman Khandual | 0528940 | 2020-08-06 23:19:16 -0700 | [diff] [blame] | 903 | |
| 904 | WARN_ON(!pte_huge(pte_mkhuge(pte))); |
| 905 | #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */ |
| 906 | } |
| 907 | #else /* !CONFIG_HUGETLB_PAGE */ |
Gavin Shan | 36b77d1 | 2021-09-02 14:52:22 -0700 | [diff] [blame] | 908 | static void __init hugetlb_basic_tests(struct pgtable_debug_args *args) { } |
Anshuman Khandual | 0528940 | 2020-08-06 23:19:16 -0700 | [diff] [blame] | 909 | #endif /* CONFIG_HUGETLB_PAGE */ |
| 910 | |
| 911 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
Gavin Shan | 4878a88 | 2021-09-02 14:52:35 -0700 | [diff] [blame] | 912 | static void __init pmd_thp_tests(struct pgtable_debug_args *args) |
Anshuman Khandual | 0528940 | 2020-08-06 23:19:16 -0700 | [diff] [blame] | 913 | { |
| 914 | pmd_t pmd; |
| 915 | |
| 916 | if (!has_transparent_hugepage()) |
| 917 | return; |
| 918 | |
Anshuman Khandual | 6315df4 | 2020-08-06 23:19:25 -0700 | [diff] [blame] | 919 | pr_debug("Validating PMD based THP\n"); |
Anshuman Khandual | 0528940 | 2020-08-06 23:19:16 -0700 | [diff] [blame] | 920 | /* |
| 921 | * pmd_trans_huge() and pmd_present() must return positive after |
| 922 | * MMU invalidation with pmd_mkinvalid(). This behavior is an |
| 923 | * optimization for transparent huge page. pmd_trans_huge() must |
| 924 | * be true if pmd_page() returns a valid THP to avoid taking the |
| 925 | * pmd_lock when others walk over non transhuge pmds (i.e. there |
| 926 | * are no THP allocated). Especially when splitting a THP and |
| 927 | * removing the present bit from the pmd, pmd_trans_huge() still |
| 928 | * needs to return true. pmd_present() should be true whenever |
| 929 | * pmd_trans_huge() returns true. |
| 930 | */ |
Gavin Shan | 4878a88 | 2021-09-02 14:52:35 -0700 | [diff] [blame] | 931 | pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot); |
Anshuman Khandual | 0528940 | 2020-08-06 23:19:16 -0700 | [diff] [blame] | 932 | WARN_ON(!pmd_trans_huge(pmd_mkhuge(pmd))); |
| 933 | |
| 934 | #ifndef __HAVE_ARCH_PMDP_INVALIDATE |
| 935 | WARN_ON(!pmd_trans_huge(pmd_mkinvalid(pmd_mkhuge(pmd)))); |
| 936 | WARN_ON(!pmd_present(pmd_mkinvalid(pmd_mkhuge(pmd)))); |
| 937 | #endif /* __HAVE_ARCH_PMDP_INVALIDATE */ |
| 938 | } |
| 939 | |
| 940 | #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD |
Gavin Shan | 4878a88 | 2021-09-02 14:52:35 -0700 | [diff] [blame] | 941 | static void __init pud_thp_tests(struct pgtable_debug_args *args) |
Anshuman Khandual | 0528940 | 2020-08-06 23:19:16 -0700 | [diff] [blame] | 942 | { |
| 943 | pud_t pud; |
| 944 | |
| 945 | if (!has_transparent_hugepage()) |
| 946 | return; |
| 947 | |
Anshuman Khandual | 6315df4 | 2020-08-06 23:19:25 -0700 | [diff] [blame] | 948 | pr_debug("Validating PUD based THP\n"); |
Gavin Shan | 4878a88 | 2021-09-02 14:52:35 -0700 | [diff] [blame] | 949 | pud = pfn_pud(args->fixed_pud_pfn, args->page_prot); |
Anshuman Khandual | 0528940 | 2020-08-06 23:19:16 -0700 | [diff] [blame] | 950 | WARN_ON(!pud_trans_huge(pud_mkhuge(pud))); |
| 951 | |
| 952 | /* |
| 953 | * pud_mkinvalid() has been dropped for now. Enable back |
| 954 | * these tests when it comes back with a modified pud_present(). |
| 955 | * |
| 956 | * WARN_ON(!pud_trans_huge(pud_mkinvalid(pud_mkhuge(pud)))); |
| 957 | * WARN_ON(!pud_present(pud_mkinvalid(pud_mkhuge(pud)))); |
| 958 | */ |
| 959 | } |
| 960 | #else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ |
Gavin Shan | 4878a88 | 2021-09-02 14:52:35 -0700 | [diff] [blame] | 961 | static void __init pud_thp_tests(struct pgtable_debug_args *args) { } |
Anshuman Khandual | 0528940 | 2020-08-06 23:19:16 -0700 | [diff] [blame] | 962 | #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ |
| 963 | #else /* !CONFIG_TRANSPARENT_HUGEPAGE */ |
Gavin Shan | 4878a88 | 2021-09-02 14:52:35 -0700 | [diff] [blame] | 964 | static void __init pmd_thp_tests(struct pgtable_debug_args *args) { } |
| 965 | static void __init pud_thp_tests(struct pgtable_debug_args *args) { } |
Anshuman Khandual | 0528940 | 2020-08-06 23:19:16 -0700 | [diff] [blame] | 966 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
| 967 | |
Anshuman Khandual | 399145f | 2020-06-04 16:47:15 -0700 | [diff] [blame] | 968 | static unsigned long __init get_random_vaddr(void) |
| 969 | { |
| 970 | unsigned long random_vaddr, random_pages, total_user_pages; |
| 971 | |
| 972 | total_user_pages = (TASK_SIZE - FIRST_USER_ADDRESS) / PAGE_SIZE; |
| 973 | |
| 974 | random_pages = get_random_long() % total_user_pages; |
| 975 | random_vaddr = FIRST_USER_ADDRESS + random_pages * PAGE_SIZE; |
| 976 | |
| 977 | return random_vaddr; |
| 978 | } |
| 979 | |
Gavin Shan | 3c9b84f | 2021-09-02 14:52:19 -0700 | [diff] [blame] | 980 | static void __init destroy_args(struct pgtable_debug_args *args) |
| 981 | { |
| 982 | struct page *page = NULL; |
| 983 | |
| 984 | /* Free (huge) page */ |
| 985 | if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && |
| 986 | IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) && |
| 987 | has_transparent_hugepage() && |
| 988 | args->pud_pfn != ULONG_MAX) { |
| 989 | if (args->is_contiguous_page) { |
| 990 | free_contig_range(args->pud_pfn, |
| 991 | (1 << (HPAGE_PUD_SHIFT - PAGE_SHIFT))); |
| 992 | } else { |
| 993 | page = pfn_to_page(args->pud_pfn); |
| 994 | __free_pages(page, HPAGE_PUD_SHIFT - PAGE_SHIFT); |
| 995 | } |
| 996 | |
| 997 | args->pud_pfn = ULONG_MAX; |
| 998 | args->pmd_pfn = ULONG_MAX; |
| 999 | args->pte_pfn = ULONG_MAX; |
| 1000 | } |
| 1001 | |
| 1002 | if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && |
| 1003 | has_transparent_hugepage() && |
| 1004 | args->pmd_pfn != ULONG_MAX) { |
| 1005 | if (args->is_contiguous_page) { |
| 1006 | free_contig_range(args->pmd_pfn, (1 << HPAGE_PMD_ORDER)); |
| 1007 | } else { |
| 1008 | page = pfn_to_page(args->pmd_pfn); |
| 1009 | __free_pages(page, HPAGE_PMD_ORDER); |
| 1010 | } |
| 1011 | |
| 1012 | args->pmd_pfn = ULONG_MAX; |
| 1013 | args->pte_pfn = ULONG_MAX; |
| 1014 | } |
| 1015 | |
| 1016 | if (args->pte_pfn != ULONG_MAX) { |
| 1017 | page = pfn_to_page(args->pte_pfn); |
| 1018 | __free_pages(page, 0); |
| 1019 | |
| 1020 | args->pte_pfn = ULONG_MAX; |
| 1021 | } |
| 1022 | |
| 1023 | /* Free page table entries */ |
| 1024 | if (args->start_ptep) { |
| 1025 | pte_free(args->mm, args->start_ptep); |
| 1026 | mm_dec_nr_ptes(args->mm); |
| 1027 | } |
| 1028 | |
| 1029 | if (args->start_pmdp) { |
| 1030 | pmd_free(args->mm, args->start_pmdp); |
| 1031 | mm_dec_nr_pmds(args->mm); |
| 1032 | } |
| 1033 | |
| 1034 | if (args->start_pudp) { |
| 1035 | pud_free(args->mm, args->start_pudp); |
| 1036 | mm_dec_nr_puds(args->mm); |
| 1037 | } |
| 1038 | |
| 1039 | if (args->start_p4dp) |
| 1040 | p4d_free(args->mm, args->start_p4dp); |
| 1041 | |
| 1042 | /* Free vma and mm struct */ |
| 1043 | if (args->vma) |
| 1044 | vm_area_free(args->vma); |
| 1045 | |
| 1046 | if (args->mm) |
| 1047 | mmdrop(args->mm); |
| 1048 | } |
| 1049 | |
| 1050 | static struct page * __init |
| 1051 | debug_vm_pgtable_alloc_huge_page(struct pgtable_debug_args *args, int order) |
| 1052 | { |
| 1053 | struct page *page = NULL; |
| 1054 | |
| 1055 | #ifdef CONFIG_CONTIG_ALLOC |
| 1056 | if (order >= MAX_ORDER) { |
| 1057 | page = alloc_contig_pages((1 << order), GFP_KERNEL, |
| 1058 | first_online_node, NULL); |
| 1059 | if (page) { |
| 1060 | args->is_contiguous_page = true; |
| 1061 | return page; |
| 1062 | } |
| 1063 | } |
| 1064 | #endif |
| 1065 | |
| 1066 | if (order < MAX_ORDER) |
| 1067 | page = alloc_pages(GFP_KERNEL, order); |
| 1068 | |
| 1069 | return page; |
| 1070 | } |
| 1071 | |
| 1072 | static int __init init_args(struct pgtable_debug_args *args) |
| 1073 | { |
| 1074 | struct page *page = NULL; |
| 1075 | phys_addr_t phys; |
| 1076 | int ret = 0; |
| 1077 | |
| 1078 | /* |
| 1079 | * Initialize the debugging data. |
| 1080 | * |
| 1081 | * __P000 (or even __S000) will help create page table entries with |
| 1082 | * PROT_NONE permission as required for pxx_protnone_tests(). |
| 1083 | */ |
| 1084 | memset(args, 0, sizeof(*args)); |
| 1085 | args->vaddr = get_random_vaddr(); |
| 1086 | args->page_prot = vm_get_page_prot(VMFLAGS); |
| 1087 | args->page_prot_none = __P000; |
| 1088 | args->is_contiguous_page = false; |
| 1089 | args->pud_pfn = ULONG_MAX; |
| 1090 | args->pmd_pfn = ULONG_MAX; |
| 1091 | args->pte_pfn = ULONG_MAX; |
| 1092 | args->fixed_pgd_pfn = ULONG_MAX; |
| 1093 | args->fixed_p4d_pfn = ULONG_MAX; |
| 1094 | args->fixed_pud_pfn = ULONG_MAX; |
| 1095 | args->fixed_pmd_pfn = ULONG_MAX; |
| 1096 | args->fixed_pte_pfn = ULONG_MAX; |
| 1097 | |
| 1098 | /* Allocate mm and vma */ |
| 1099 | args->mm = mm_alloc(); |
| 1100 | if (!args->mm) { |
| 1101 | pr_err("Failed to allocate mm struct\n"); |
| 1102 | ret = -ENOMEM; |
| 1103 | goto error; |
| 1104 | } |
| 1105 | |
| 1106 | args->vma = vm_area_alloc(args->mm); |
| 1107 | if (!args->vma) { |
| 1108 | pr_err("Failed to allocate vma\n"); |
| 1109 | ret = -ENOMEM; |
| 1110 | goto error; |
| 1111 | } |
| 1112 | |
| 1113 | /* |
| 1114 | * Allocate page table entries. They will be modified in the tests. |
| 1115 | * Lets save the page table entries so that they can be released |
| 1116 | * when the tests are completed. |
| 1117 | */ |
| 1118 | args->pgdp = pgd_offset(args->mm, args->vaddr); |
| 1119 | args->p4dp = p4d_alloc(args->mm, args->pgdp, args->vaddr); |
| 1120 | if (!args->p4dp) { |
| 1121 | pr_err("Failed to allocate p4d entries\n"); |
| 1122 | ret = -ENOMEM; |
| 1123 | goto error; |
| 1124 | } |
| 1125 | args->start_p4dp = p4d_offset(args->pgdp, 0UL); |
| 1126 | WARN_ON(!args->start_p4dp); |
| 1127 | |
| 1128 | args->pudp = pud_alloc(args->mm, args->p4dp, args->vaddr); |
| 1129 | if (!args->pudp) { |
| 1130 | pr_err("Failed to allocate pud entries\n"); |
| 1131 | ret = -ENOMEM; |
| 1132 | goto error; |
| 1133 | } |
| 1134 | args->start_pudp = pud_offset(args->p4dp, 0UL); |
| 1135 | WARN_ON(!args->start_pudp); |
| 1136 | |
| 1137 | args->pmdp = pmd_alloc(args->mm, args->pudp, args->vaddr); |
| 1138 | if (!args->pmdp) { |
| 1139 | pr_err("Failed to allocate pmd entries\n"); |
| 1140 | ret = -ENOMEM; |
| 1141 | goto error; |
| 1142 | } |
| 1143 | args->start_pmdp = pmd_offset(args->pudp, 0UL); |
| 1144 | WARN_ON(!args->start_pmdp); |
| 1145 | |
| 1146 | if (pte_alloc(args->mm, args->pmdp)) { |
| 1147 | pr_err("Failed to allocate pte entries\n"); |
| 1148 | ret = -ENOMEM; |
| 1149 | goto error; |
| 1150 | } |
| 1151 | args->start_ptep = pmd_pgtable(READ_ONCE(*args->pmdp)); |
| 1152 | WARN_ON(!args->start_ptep); |
| 1153 | |
| 1154 | /* |
| 1155 | * PFN for mapping at PTE level is determined from a standard kernel |
| 1156 | * text symbol. But pfns for higher page table levels are derived by |
| 1157 | * masking lower bits of this real pfn. These derived pfns might not |
| 1158 | * exist on the platform but that does not really matter as pfn_pxx() |
| 1159 | * helpers will still create appropriate entries for the test. This |
| 1160 | * helps avoid large memory block allocations to be used for mapping |
| 1161 | * at higher page table levels in some of the tests. |
| 1162 | */ |
| 1163 | phys = __pa_symbol(&start_kernel); |
| 1164 | args->fixed_pgd_pfn = __phys_to_pfn(phys & PGDIR_MASK); |
| 1165 | args->fixed_p4d_pfn = __phys_to_pfn(phys & P4D_MASK); |
| 1166 | args->fixed_pud_pfn = __phys_to_pfn(phys & PUD_MASK); |
| 1167 | args->fixed_pmd_pfn = __phys_to_pfn(phys & PMD_MASK); |
| 1168 | args->fixed_pte_pfn = __phys_to_pfn(phys & PAGE_MASK); |
| 1169 | WARN_ON(!pfn_valid(args->fixed_pte_pfn)); |
| 1170 | |
| 1171 | /* |
| 1172 | * Allocate (huge) pages because some of the tests need to access |
| 1173 | * the data in the pages. The corresponding tests will be skipped |
| 1174 | * if we fail to allocate (huge) pages. |
| 1175 | */ |
| 1176 | if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && |
| 1177 | IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) && |
| 1178 | has_transparent_hugepage()) { |
| 1179 | page = debug_vm_pgtable_alloc_huge_page(args, |
| 1180 | HPAGE_PUD_SHIFT - PAGE_SHIFT); |
| 1181 | if (page) { |
| 1182 | args->pud_pfn = page_to_pfn(page); |
| 1183 | args->pmd_pfn = args->pud_pfn; |
| 1184 | args->pte_pfn = args->pud_pfn; |
| 1185 | return 0; |
| 1186 | } |
| 1187 | } |
| 1188 | |
| 1189 | if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && |
| 1190 | has_transparent_hugepage()) { |
| 1191 | page = debug_vm_pgtable_alloc_huge_page(args, HPAGE_PMD_ORDER); |
| 1192 | if (page) { |
| 1193 | args->pmd_pfn = page_to_pfn(page); |
| 1194 | args->pte_pfn = args->pmd_pfn; |
| 1195 | return 0; |
| 1196 | } |
| 1197 | } |
| 1198 | |
| 1199 | page = alloc_pages(GFP_KERNEL, 0); |
| 1200 | if (page) |
| 1201 | args->pte_pfn = page_to_pfn(page); |
| 1202 | |
| 1203 | return 0; |
| 1204 | |
| 1205 | error: |
| 1206 | destroy_args(args); |
| 1207 | return ret; |
| 1208 | } |
| 1209 | |
Anshuman Khandual | 399145f | 2020-06-04 16:47:15 -0700 | [diff] [blame] | 1210 | static int __init debug_vm_pgtable(void) |
| 1211 | { |
Gavin Shan | 3c9b84f | 2021-09-02 14:52:19 -0700 | [diff] [blame] | 1212 | struct pgtable_debug_args args; |
Anshuman Khandual | a5c3b9f | 2020-08-06 23:19:20 -0700 | [diff] [blame] | 1213 | struct vm_area_struct *vma; |
Anshuman Khandual | 399145f | 2020-06-04 16:47:15 -0700 | [diff] [blame] | 1214 | struct mm_struct *mm; |
| 1215 | pgd_t *pgdp; |
| 1216 | p4d_t *p4dp, *saved_p4dp; |
| 1217 | pud_t *pudp, *saved_pudp; |
| 1218 | pmd_t *pmdp, *saved_pmdp, pmd; |
Anshuman Khandual | 399145f | 2020-06-04 16:47:15 -0700 | [diff] [blame] | 1219 | pgtable_t saved_ptep; |
Gavin Shan | 8cb183f | 2021-09-02 14:52:28 -0700 | [diff] [blame] | 1220 | pgprot_t prot; |
Anshuman Khandual | 399145f | 2020-06-04 16:47:15 -0700 | [diff] [blame] | 1221 | phys_addr_t paddr; |
Gavin Shan | c0fe07b | 2021-09-02 14:52:41 -0700 | [diff] [blame^] | 1222 | unsigned long vaddr; |
Gavin Shan | 36b77d1 | 2021-09-02 14:52:22 -0700 | [diff] [blame] | 1223 | unsigned long pud_aligned; |
Kees Cook | fea1120 | 2020-06-03 13:28:45 -0700 | [diff] [blame] | 1224 | spinlock_t *ptl = NULL; |
Gavin Shan | 3c9b84f | 2021-09-02 14:52:19 -0700 | [diff] [blame] | 1225 | int idx, ret; |
Anshuman Khandual | 399145f | 2020-06-04 16:47:15 -0700 | [diff] [blame] | 1226 | |
| 1227 | pr_info("Validating architecture page table helpers\n"); |
Gavin Shan | 3c9b84f | 2021-09-02 14:52:19 -0700 | [diff] [blame] | 1228 | ret = init_args(&args); |
| 1229 | if (ret) |
| 1230 | return ret; |
| 1231 | |
Anshuman Khandual | 399145f | 2020-06-04 16:47:15 -0700 | [diff] [blame] | 1232 | prot = vm_get_page_prot(VMFLAGS); |
| 1233 | vaddr = get_random_vaddr(); |
| 1234 | mm = mm_alloc(); |
| 1235 | if (!mm) { |
| 1236 | pr_err("mm_struct allocation failed\n"); |
| 1237 | return 1; |
| 1238 | } |
| 1239 | |
Anshuman Khandual | a5c3b9f | 2020-08-06 23:19:20 -0700 | [diff] [blame] | 1240 | vma = vm_area_alloc(mm); |
| 1241 | if (!vma) { |
| 1242 | pr_err("vma allocation failed\n"); |
| 1243 | return 1; |
| 1244 | } |
| 1245 | |
Anshuman Khandual | 0528940 | 2020-08-06 23:19:16 -0700 | [diff] [blame] | 1246 | /* |
Anshuman Khandual | 399145f | 2020-06-04 16:47:15 -0700 | [diff] [blame] | 1247 | * PFN for mapping at PTE level is determined from a standard kernel |
| 1248 | * text symbol. But pfns for higher page table levels are derived by |
| 1249 | * masking lower bits of this real pfn. These derived pfns might not |
| 1250 | * exist on the platform but that does not really matter as pfn_pxx() |
| 1251 | * helpers will still create appropriate entries for the test. This |
| 1252 | * helps avoid large memory block allocations to be used for mapping |
| 1253 | * at higher page table levels. |
| 1254 | */ |
| 1255 | paddr = __pa_symbol(&start_kernel); |
| 1256 | |
Anshuman Khandual | 399145f | 2020-06-04 16:47:15 -0700 | [diff] [blame] | 1257 | pud_aligned = (paddr & PUD_MASK) >> PAGE_SHIFT; |
Anshuman Khandual | 399145f | 2020-06-04 16:47:15 -0700 | [diff] [blame] | 1258 | |
| 1259 | pgdp = pgd_offset(mm, vaddr); |
| 1260 | p4dp = p4d_alloc(mm, pgdp, vaddr); |
| 1261 | pudp = pud_alloc(mm, p4dp, vaddr); |
| 1262 | pmdp = pmd_alloc(mm, pudp, vaddr); |
Aneesh Kumar K.V | f14312e | 2020-10-15 20:05:10 -0700 | [diff] [blame] | 1263 | /* |
| 1264 | * Allocate pgtable_t |
| 1265 | */ |
| 1266 | if (pte_alloc(mm, pmdp)) { |
| 1267 | pr_err("pgtable allocation failed\n"); |
| 1268 | return 1; |
| 1269 | } |
Anshuman Khandual | 399145f | 2020-06-04 16:47:15 -0700 | [diff] [blame] | 1270 | |
| 1271 | /* |
| 1272 | * Save all the page table page addresses as the page table |
| 1273 | * entries will be used for testing with random or garbage |
| 1274 | * values. These saved addresses will be used for freeing |
| 1275 | * page table pages. |
| 1276 | */ |
| 1277 | pmd = READ_ONCE(*pmdp); |
| 1278 | saved_p4dp = p4d_offset(pgdp, 0UL); |
| 1279 | saved_pudp = pud_offset(p4dp, 0UL); |
| 1280 | saved_pmdp = pmd_offset(pudp, 0UL); |
| 1281 | saved_ptep = pmd_pgtable(pmd); |
| 1282 | |
Anshuman Khandual | 2e326c0 | 2021-02-24 12:01:36 -0800 | [diff] [blame] | 1283 | /* |
| 1284 | * Iterate over the protection_map[] to make sure that all |
| 1285 | * the basic page table transformation validations just hold |
| 1286 | * true irrespective of the starting protection value for a |
| 1287 | * given page table entry. |
| 1288 | */ |
| 1289 | for (idx = 0; idx < ARRAY_SIZE(protection_map); idx++) { |
Gavin Shan | 36b77d1 | 2021-09-02 14:52:22 -0700 | [diff] [blame] | 1290 | pte_basic_tests(&args, idx); |
| 1291 | pmd_basic_tests(&args, idx); |
| 1292 | pud_basic_tests(&args, idx); |
Anshuman Khandual | 2e326c0 | 2021-02-24 12:01:36 -0800 | [diff] [blame] | 1293 | } |
| 1294 | |
| 1295 | /* |
| 1296 | * Both P4D and PGD level tests are very basic which do not |
| 1297 | * involve creating page table entries from the protection |
| 1298 | * value and the given pfn. Hence just keep them out from |
| 1299 | * the above iteration for now to save some test execution |
| 1300 | * time. |
| 1301 | */ |
Gavin Shan | 36b77d1 | 2021-09-02 14:52:22 -0700 | [diff] [blame] | 1302 | p4d_basic_tests(&args); |
| 1303 | pgd_basic_tests(&args); |
Anshuman Khandual | 399145f | 2020-06-04 16:47:15 -0700 | [diff] [blame] | 1304 | |
Gavin Shan | 8983d23 | 2021-09-02 14:52:25 -0700 | [diff] [blame] | 1305 | pmd_leaf_tests(&args); |
| 1306 | pud_leaf_tests(&args); |
Anshuman Khandual | a5c3b9f | 2020-08-06 23:19:20 -0700 | [diff] [blame] | 1307 | |
Gavin Shan | 8983d23 | 2021-09-02 14:52:25 -0700 | [diff] [blame] | 1308 | pte_savedwrite_tests(&args); |
| 1309 | pmd_savedwrite_tests(&args); |
Anshuman Khandual | a5c3b9f | 2020-08-06 23:19:20 -0700 | [diff] [blame] | 1310 | |
Gavin Shan | 8cb183f | 2021-09-02 14:52:28 -0700 | [diff] [blame] | 1311 | pte_special_tests(&args); |
| 1312 | pte_protnone_tests(&args); |
| 1313 | pmd_protnone_tests(&args); |
Anshuman Khandual | 0528940 | 2020-08-06 23:19:16 -0700 | [diff] [blame] | 1314 | |
Gavin Shan | 8cb183f | 2021-09-02 14:52:28 -0700 | [diff] [blame] | 1315 | pte_devmap_tests(&args); |
| 1316 | pmd_devmap_tests(&args); |
| 1317 | pud_devmap_tests(&args); |
Anshuman Khandual | 0528940 | 2020-08-06 23:19:16 -0700 | [diff] [blame] | 1318 | |
Gavin Shan | 5f447e8 | 2021-09-02 14:52:32 -0700 | [diff] [blame] | 1319 | pte_soft_dirty_tests(&args); |
| 1320 | pmd_soft_dirty_tests(&args); |
| 1321 | pte_swap_soft_dirty_tests(&args); |
| 1322 | pmd_swap_soft_dirty_tests(&args); |
Anshuman Khandual | 0528940 | 2020-08-06 23:19:16 -0700 | [diff] [blame] | 1323 | |
Gavin Shan | 5f447e8 | 2021-09-02 14:52:32 -0700 | [diff] [blame] | 1324 | pte_swap_tests(&args); |
| 1325 | pmd_swap_tests(&args); |
Anshuman Khandual | 0528940 | 2020-08-06 23:19:16 -0700 | [diff] [blame] | 1326 | |
Gavin Shan | 4878a88 | 2021-09-02 14:52:35 -0700 | [diff] [blame] | 1327 | swap_migration_tests(&args); |
Anshuman Khandual | 0528940 | 2020-08-06 23:19:16 -0700 | [diff] [blame] | 1328 | |
Gavin Shan | 4878a88 | 2021-09-02 14:52:35 -0700 | [diff] [blame] | 1329 | pmd_thp_tests(&args); |
| 1330 | pud_thp_tests(&args); |
Anshuman Khandual | 0528940 | 2020-08-06 23:19:16 -0700 | [diff] [blame] | 1331 | |
Gavin Shan | 36b77d1 | 2021-09-02 14:52:22 -0700 | [diff] [blame] | 1332 | hugetlb_basic_tests(&args); |
Aneesh Kumar K.V | e8edf0a | 2020-10-15 20:04:49 -0700 | [diff] [blame] | 1333 | |
Aneesh Kumar K.V | 6f302e27 | 2020-10-15 20:04:53 -0700 | [diff] [blame] | 1334 | /* |
| 1335 | * Page table modifying tests. They need to hold |
| 1336 | * proper page table lock. |
| 1337 | */ |
Aneesh Kumar K.V | e8edf0a | 2020-10-15 20:04:49 -0700 | [diff] [blame] | 1338 | |
Gavin Shan | 44966c4 | 2021-09-02 14:52:38 -0700 | [diff] [blame] | 1339 | args.ptep = pte_offset_map_lock(args.mm, args.pmdp, args.vaddr, &ptl); |
| 1340 | pte_clear_tests(&args); |
| 1341 | pte_advanced_tests(&args); |
| 1342 | pte_unmap_unlock(args.ptep, ptl); |
Aneesh Kumar K.V | e8edf0a | 2020-10-15 20:04:49 -0700 | [diff] [blame] | 1343 | |
Gavin Shan | c0fe07b | 2021-09-02 14:52:41 -0700 | [diff] [blame^] | 1344 | ptl = pmd_lock(args.mm, args.pmdp); |
| 1345 | pmd_clear_tests(&args); |
| 1346 | pmd_advanced_tests(&args); |
| 1347 | pmd_huge_tests(&args); |
| 1348 | pmd_populate_tests(&args); |
Aneesh Kumar K.V | 6f302e27 | 2020-10-15 20:04:53 -0700 | [diff] [blame] | 1349 | spin_unlock(ptl); |
| 1350 | |
| 1351 | ptl = pud_lock(mm, pudp); |
| 1352 | pud_clear_tests(mm, pudp); |
| 1353 | pud_advanced_tests(mm, vma, pudp, pud_aligned, vaddr, prot); |
| 1354 | pud_huge_tests(pudp, pud_aligned, prot); |
Aneesh Kumar K.V | e8edf0a | 2020-10-15 20:04:49 -0700 | [diff] [blame] | 1355 | pud_populate_tests(mm, pudp, saved_pmdp); |
Aneesh Kumar K.V | 6f302e27 | 2020-10-15 20:04:53 -0700 | [diff] [blame] | 1356 | spin_unlock(ptl); |
| 1357 | |
Aneesh Kumar K.V | 6f302e27 | 2020-10-15 20:04:53 -0700 | [diff] [blame] | 1358 | spin_lock(&mm->page_table_lock); |
| 1359 | p4d_clear_tests(mm, p4dp); |
| 1360 | pgd_clear_tests(mm, pgdp); |
Aneesh Kumar K.V | e8edf0a | 2020-10-15 20:04:49 -0700 | [diff] [blame] | 1361 | p4d_populate_tests(mm, p4dp, saved_pudp); |
| 1362 | pgd_populate_tests(mm, pgdp, saved_p4dp); |
Aneesh Kumar K.V | 6f302e27 | 2020-10-15 20:04:53 -0700 | [diff] [blame] | 1363 | spin_unlock(&mm->page_table_lock); |
Aneesh Kumar K.V | e8edf0a | 2020-10-15 20:04:49 -0700 | [diff] [blame] | 1364 | |
Anshuman Khandual | 399145f | 2020-06-04 16:47:15 -0700 | [diff] [blame] | 1365 | p4d_free(mm, saved_p4dp); |
| 1366 | pud_free(mm, saved_pudp); |
| 1367 | pmd_free(mm, saved_pmdp); |
| 1368 | pte_free(mm, saved_ptep); |
| 1369 | |
Anshuman Khandual | a5c3b9f | 2020-08-06 23:19:20 -0700 | [diff] [blame] | 1370 | vm_area_free(vma); |
Anshuman Khandual | 399145f | 2020-06-04 16:47:15 -0700 | [diff] [blame] | 1371 | mm_dec_nr_puds(mm); |
| 1372 | mm_dec_nr_pmds(mm); |
| 1373 | mm_dec_nr_ptes(mm); |
| 1374 | mmdrop(mm); |
Gavin Shan | 3c9b84f | 2021-09-02 14:52:19 -0700 | [diff] [blame] | 1375 | |
| 1376 | destroy_args(&args); |
Anshuman Khandual | 399145f | 2020-06-04 16:47:15 -0700 | [diff] [blame] | 1377 | return 0; |
| 1378 | } |
| 1379 | late_initcall(debug_vm_pgtable); |