Aneesh Kumar K.V | eee24b5 | 2016-04-29 23:25:44 +1000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2005, Paul Mackerras, IBM Corporation. |
| 3 | * Copyright 2009, Benjamin Herrenschmidt, IBM Corporation. |
| 4 | * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation. |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU General Public License |
| 8 | * as published by the Free Software Foundation; either version |
| 9 | * 2 of the License, or (at your option) any later version. |
| 10 | */ |
| 11 | |
| 12 | #include <linux/sched.h> |
Ingo Molnar | 589ee62 | 2017-02-04 00:16:44 +0100 | [diff] [blame] | 13 | #include <linux/mm_types.h> |
Balbir Singh | cd65d69 | 2017-06-29 03:04:08 +1000 | [diff] [blame] | 14 | #include <linux/mm.h> |
Ingo Molnar | 589ee62 | 2017-02-04 00:16:44 +0100 | [diff] [blame] | 15 | |
Aneesh Kumar K.V | eee24b5 | 2016-04-29 23:25:44 +1000 | [diff] [blame] | 16 | #include <asm/pgalloc.h> |
Balbir Singh | cd65d69 | 2017-06-29 03:04:08 +1000 | [diff] [blame] | 17 | #include <asm/pgtable.h> |
| 18 | #include <asm/sections.h> |
| 19 | #include <asm/mmu.h> |
Aneesh Kumar K.V | eee24b5 | 2016-04-29 23:25:44 +1000 | [diff] [blame] | 20 | #include <asm/tlb.h> |
| 21 | |
Christophe Leroy | 9d9f2cc | 2019-03-29 09:59:59 +0000 | [diff] [blame] | 22 | #include <mm/mmu_decl.h> |
Aneesh Kumar K.V | eee24b5 | 2016-04-29 23:25:44 +1000 | [diff] [blame] | 23 | |
Aneesh Kumar K.V | 6a1ea36 | 2016-04-29 23:26:28 +1000 | [diff] [blame] | 24 | #define CREATE_TRACE_POINTS |
| 25 | #include <trace/events/thp.h> |
| 26 | |
Aneesh Kumar K.V | c2b4d8b | 2018-03-26 15:34:49 +0530 | [diff] [blame] | 27 | #if H_PGTABLE_RANGE > (USER_VSID_RANGE * (TASK_SIZE_USER64 / TASK_CONTEXT_SIZE)) |
| 28 | #warning Limited user VSID range means pagetable space is wasted |
| 29 | #endif |
| 30 | |
Aneesh Kumar K.V | eee24b5 | 2016-04-29 23:25:44 +1000 | [diff] [blame] | 31 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
| 32 | /* |
Anshuman Khandual | b0f36c1 | 2017-04-06 19:44:49 +0530 | [diff] [blame] | 33 | * vmemmap is the starting address of the virtual address space where |
| 34 | * struct pages are allocated for all possible PFNs present on the system |
| 35 | * including holes and bad memory (hence sparse). These virtual struct |
| 36 | * pages are stored in sequence in this virtual address space irrespective |
| 37 | * of the fact whether the corresponding PFN is valid or not. This achieves |
| 38 | * constant relationship between address of struct page and its PFN. |
| 39 | * |
| 40 | * During boot or memory hotplug operation when a new memory section is |
| 41 | * added, physical memory allocation (including hash table bolting) will |
| 42 | * be performed for the set of struct pages which are part of the memory |
| 43 | * section. This saves memory by not allocating struct pages for PFNs |
| 44 | * which are not valid. |
| 45 | * |
| 46 | * ---------------------------------------------- |
| 47 | * | PHYSICAL ALLOCATION OF VIRTUAL STRUCT PAGES| |
| 48 | * ---------------------------------------------- |
| 49 | * |
| 50 | * f000000000000000 c000000000000000 |
| 51 | * vmemmap +--------------+ +--------------+ |
| 52 | * + | page struct | +--------------> | page struct | |
| 53 | * | +--------------+ +--------------+ |
| 54 | * | | page struct | +--------------> | page struct | |
| 55 | * | +--------------+ | +--------------+ |
| 56 | * | | page struct | + +------> | page struct | |
| 57 | * | +--------------+ | +--------------+ |
| 58 | * | | page struct | | +--> | page struct | |
| 59 | * | +--------------+ | | +--------------+ |
| 60 | * | | page struct | | | |
| 61 | * | +--------------+ | | |
| 62 | * | | page struct | | | |
| 63 | * | +--------------+ | | |
| 64 | * | | page struct | | | |
| 65 | * | +--------------+ | | |
| 66 | * | | page struct | | | |
| 67 | * | +--------------+ | | |
| 68 | * | | page struct | +-------+ | |
| 69 | * | +--------------+ | |
| 70 | * | | page struct | +-----------+ |
| 71 | * | +--------------+ |
| 72 | * | | page struct | No mapping |
| 73 | * | +--------------+ |
| 74 | * | | page struct | No mapping |
| 75 | * v +--------------+ |
| 76 | * |
| 77 | * ----------------------------------------- |
| 78 | * | RELATION BETWEEN STRUCT PAGES AND PFNS| |
| 79 | * ----------------------------------------- |
| 80 | * |
| 81 | * vmemmap +--------------+ +---------------+ |
| 82 | * + | page struct | +-------------> | PFN | |
| 83 | * | +--------------+ +---------------+ |
| 84 | * | | page struct | +-------------> | PFN | |
| 85 | * | +--------------+ +---------------+ |
| 86 | * | | page struct | +-------------> | PFN | |
| 87 | * | +--------------+ +---------------+ |
| 88 | * | | page struct | +-------------> | PFN | |
| 89 | * | +--------------+ +---------------+ |
| 90 | * | | | |
| 91 | * | +--------------+ |
| 92 | * | | | |
| 93 | * | +--------------+ |
| 94 | * | | | |
| 95 | * | +--------------+ +---------------+ |
| 96 | * | | page struct | +-------------> | PFN | |
| 97 | * | +--------------+ +---------------+ |
| 98 | * | | | |
| 99 | * | +--------------+ |
| 100 | * | | | |
| 101 | * | +--------------+ +---------------+ |
| 102 | * | | page struct | +-------------> | PFN | |
| 103 | * | +--------------+ +---------------+ |
| 104 | * | | page struct | +-------------> | PFN | |
| 105 | * v +--------------+ +---------------+ |
| 106 | */ |
| 107 | /* |
Aneesh Kumar K.V | eee24b5 | 2016-04-29 23:25:44 +1000 | [diff] [blame] | 108 | * On hash-based CPUs, the vmemmap is bolted in the hash table. |
| 109 | * |
| 110 | */ |
Aneesh Kumar K.V | 31a14fa | 2016-04-29 23:25:59 +1000 | [diff] [blame] | 111 | int __meminit hash__vmemmap_create_mapping(unsigned long start, |
| 112 | unsigned long page_size, |
| 113 | unsigned long phys) |
Aneesh Kumar K.V | eee24b5 | 2016-04-29 23:25:44 +1000 | [diff] [blame] | 114 | { |
Aneesh Kumar K.V | e090939 | 2019-04-17 18:29:15 +0530 | [diff] [blame] | 115 | int rc; |
| 116 | |
| 117 | if ((start + page_size) >= H_VMEMMAP_END) { |
Colin Ian King | f341d89 | 2019-04-23 16:10:17 +0100 | [diff] [blame] | 118 | pr_warn("Outside the supported range\n"); |
Aneesh Kumar K.V | e090939 | 2019-04-17 18:29:15 +0530 | [diff] [blame] | 119 | return -1; |
| 120 | } |
| 121 | |
| 122 | rc = htab_bolt_mapping(start, start + page_size, phys, |
| 123 | pgprot_val(PAGE_KERNEL), |
| 124 | mmu_vmemmap_psize, mmu_kernel_ssize); |
Aneesh Kumar K.V | eee24b5 | 2016-04-29 23:25:44 +1000 | [diff] [blame] | 125 | if (rc < 0) { |
| 126 | int rc2 = htab_remove_mapping(start, start + page_size, |
| 127 | mmu_vmemmap_psize, |
| 128 | mmu_kernel_ssize); |
| 129 | BUG_ON(rc2 && (rc2 != -ENOENT)); |
| 130 | } |
| 131 | return rc; |
| 132 | } |
| 133 | |
| 134 | #ifdef CONFIG_MEMORY_HOTPLUG |
Aneesh Kumar K.V | 31a14fa | 2016-04-29 23:25:59 +1000 | [diff] [blame] | 135 | void hash__vmemmap_remove_mapping(unsigned long start, |
| 136 | unsigned long page_size) |
Aneesh Kumar K.V | eee24b5 | 2016-04-29 23:25:44 +1000 | [diff] [blame] | 137 | { |
| 138 | int rc = htab_remove_mapping(start, start + page_size, |
| 139 | mmu_vmemmap_psize, |
| 140 | mmu_kernel_ssize); |
| 141 | BUG_ON((rc < 0) && (rc != -ENOENT)); |
| 142 | WARN_ON(rc == -ENOENT); |
| 143 | } |
| 144 | #endif |
| 145 | #endif /* CONFIG_SPARSEMEM_VMEMMAP */ |
| 146 | |
| 147 | /* |
| 148 | * map_kernel_page currently only called by __ioremap |
| 149 | * map_kernel_page adds an entry to the ioremap page table |
| 150 | * and adds an entry to the HPT, possibly bolting it |
| 151 | */ |
Christophe Leroy | c766ee7 | 2018-10-09 13:51:45 +0000 | [diff] [blame] | 152 | int hash__map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot) |
Aneesh Kumar K.V | eee24b5 | 2016-04-29 23:25:44 +1000 | [diff] [blame] | 153 | { |
| 154 | pgd_t *pgdp; |
| 155 | pud_t *pudp; |
| 156 | pmd_t *pmdp; |
| 157 | pte_t *ptep; |
| 158 | |
Aneesh Kumar K.V | dd1842a | 2016-04-29 23:25:49 +1000 | [diff] [blame] | 159 | BUILD_BUG_ON(TASK_SIZE_USER64 > H_PGTABLE_RANGE); |
Aneesh Kumar K.V | eee24b5 | 2016-04-29 23:25:44 +1000 | [diff] [blame] | 160 | if (slab_is_available()) { |
| 161 | pgdp = pgd_offset_k(ea); |
| 162 | pudp = pud_alloc(&init_mm, pgdp, ea); |
| 163 | if (!pudp) |
| 164 | return -ENOMEM; |
| 165 | pmdp = pmd_alloc(&init_mm, pudp, ea); |
| 166 | if (!pmdp) |
| 167 | return -ENOMEM; |
| 168 | ptep = pte_alloc_kernel(pmdp, ea); |
| 169 | if (!ptep) |
| 170 | return -ENOMEM; |
Christophe Leroy | c766ee7 | 2018-10-09 13:51:45 +0000 | [diff] [blame] | 171 | set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, prot)); |
Aneesh Kumar K.V | eee24b5 | 2016-04-29 23:25:44 +1000 | [diff] [blame] | 172 | } else { |
| 173 | /* |
| 174 | * If the mm subsystem is not fully up, we cannot create a |
| 175 | * linux page table entry for this mapping. Simply bolt an |
| 176 | * entry in the hardware page table. |
| 177 | * |
| 178 | */ |
Christophe Leroy | c766ee7 | 2018-10-09 13:51:45 +0000 | [diff] [blame] | 179 | if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, pgprot_val(prot), |
Aneesh Kumar K.V | eee24b5 | 2016-04-29 23:25:44 +1000 | [diff] [blame] | 180 | mmu_io_psize, mmu_kernel_ssize)) { |
| 181 | printk(KERN_ERR "Failed to do bolted mapping IO " |
| 182 | "memory at %016lx !\n", pa); |
| 183 | return -ENOMEM; |
| 184 | } |
| 185 | } |
| 186 | |
| 187 | smp_wmb(); |
| 188 | return 0; |
| 189 | } |
Aneesh Kumar K.V | 6a1ea36 | 2016-04-29 23:26:28 +1000 | [diff] [blame] | 190 | |
| 191 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 192 | |
Aneesh Kumar K.V | 3df33f1 | 2016-04-29 23:26:29 +1000 | [diff] [blame] | 193 | unsigned long hash__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr, |
| 194 | pmd_t *pmdp, unsigned long clr, |
| 195 | unsigned long set) |
Aneesh Kumar K.V | 6a1ea36 | 2016-04-29 23:26:28 +1000 | [diff] [blame] | 196 | { |
| 197 | __be64 old_be, tmp; |
| 198 | unsigned long old; |
| 199 | |
| 200 | #ifdef CONFIG_DEBUG_VM |
Oliver O'Halloran | ebd3119 | 2017-06-28 11:32:34 +1000 | [diff] [blame] | 201 | WARN_ON(!hash__pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp)); |
Aneesh Kumar K.V | af60a4c | 2018-04-16 16:57:16 +0530 | [diff] [blame] | 202 | assert_spin_locked(pmd_lockptr(mm, pmdp)); |
Aneesh Kumar K.V | 6a1ea36 | 2016-04-29 23:26:28 +1000 | [diff] [blame] | 203 | #endif |
| 204 | |
| 205 | __asm__ __volatile__( |
| 206 | "1: ldarx %0,0,%3\n\ |
| 207 | and. %1,%0,%6\n\ |
| 208 | bne- 1b \n\ |
| 209 | andc %1,%0,%4 \n\ |
| 210 | or %1,%1,%7\n\ |
| 211 | stdcx. %1,0,%3 \n\ |
| 212 | bne- 1b" |
| 213 | : "=&r" (old_be), "=&r" (tmp), "=m" (*pmdp) |
| 214 | : "r" (pmdp), "r" (cpu_to_be64(clr)), "m" (*pmdp), |
| 215 | "r" (cpu_to_be64(H_PAGE_BUSY)), "r" (cpu_to_be64(set)) |
| 216 | : "cc" ); |
| 217 | |
| 218 | old = be64_to_cpu(old_be); |
| 219 | |
| 220 | trace_hugepage_update(addr, old, clr, set); |
| 221 | if (old & H_PAGE_HASHPTE) |
| 222 | hpte_do_hugepage_flush(mm, addr, pmdp, old); |
| 223 | return old; |
| 224 | } |
| 225 | |
Aneesh Kumar K.V | 3df33f1 | 2016-04-29 23:26:29 +1000 | [diff] [blame] | 226 | pmd_t hash__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address, |
| 227 | pmd_t *pmdp) |
Aneesh Kumar K.V | 6a1ea36 | 2016-04-29 23:26:28 +1000 | [diff] [blame] | 228 | { |
| 229 | pmd_t pmd; |
| 230 | |
| 231 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); |
| 232 | VM_BUG_ON(pmd_trans_huge(*pmdp)); |
Oliver O'Halloran | ebd3119 | 2017-06-28 11:32:34 +1000 | [diff] [blame] | 233 | VM_BUG_ON(pmd_devmap(*pmdp)); |
Aneesh Kumar K.V | 6a1ea36 | 2016-04-29 23:26:28 +1000 | [diff] [blame] | 234 | |
| 235 | pmd = *pmdp; |
| 236 | pmd_clear(pmdp); |
| 237 | /* |
| 238 | * Wait for all pending hash_page to finish. This is needed |
| 239 | * in case of subpage collapse. When we collapse normal pages |
| 240 | * to hugepage, we first clear the pmd, then invalidate all |
| 241 | * the PTE entries. The assumption here is that any low level |
| 242 | * page fault will see a none pmd and take the slow path that |
| 243 | * will wait on mmap_sem. But we could very well be in a |
| 244 | * hash_page with local ptep pointer value. Such a hash page |
| 245 | * can result in adding new HPTE entries for normal subpages. |
| 246 | * That means we could be modifying the page content as we |
| 247 | * copy them to a huge page. So wait for parallel hash_page |
| 248 | * to finish before invalidating HPTE entries. We can do this |
| 249 | * by sending an IPI to all the cpus and executing a dummy |
| 250 | * function there. |
| 251 | */ |
Aneesh Kumar K.V | fa4531f | 2017-07-27 11:54:54 +0530 | [diff] [blame] | 252 | serialize_against_pte_lookup(vma->vm_mm); |
Aneesh Kumar K.V | 6a1ea36 | 2016-04-29 23:26:28 +1000 | [diff] [blame] | 253 | /* |
| 254 | * Now invalidate the hpte entries in the range |
| 255 | * covered by pmd. This make sure we take a |
| 256 | * fault and will find the pmd as none, which will |
| 257 | * result in a major fault which takes mmap_sem and |
| 258 | * hence wait for collapse to complete. Without this |
| 259 | * the __collapse_huge_page_copy can result in copying |
| 260 | * the old content. |
| 261 | */ |
| 262 | flush_tlb_pmd_range(vma->vm_mm, &pmd, address); |
| 263 | return pmd; |
| 264 | } |
| 265 | |
| 266 | /* |
Aneesh Kumar K.V | 6a1ea36 | 2016-04-29 23:26:28 +1000 | [diff] [blame] | 267 | * We want to put the pgtable in pmd and use pgtable for tracking |
| 268 | * the base page size hptes |
| 269 | */ |
Aneesh Kumar K.V | 3df33f1 | 2016-04-29 23:26:29 +1000 | [diff] [blame] | 270 | void hash__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, |
| 271 | pgtable_t pgtable) |
Aneesh Kumar K.V | 6a1ea36 | 2016-04-29 23:26:28 +1000 | [diff] [blame] | 272 | { |
| 273 | pgtable_t *pgtable_slot; |
Aneesh Kumar K.V | af60a4c | 2018-04-16 16:57:16 +0530 | [diff] [blame] | 274 | |
| 275 | assert_spin_locked(pmd_lockptr(mm, pmdp)); |
Aneesh Kumar K.V | 6a1ea36 | 2016-04-29 23:26:28 +1000 | [diff] [blame] | 276 | /* |
| 277 | * we store the pgtable in the second half of PMD |
| 278 | */ |
| 279 | pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD; |
| 280 | *pgtable_slot = pgtable; |
| 281 | /* |
| 282 | * expose the deposited pgtable to other cpus. |
| 283 | * before we set the hugepage PTE at pmd level |
| 284 | * hash fault code looks at the deposted pgtable |
| 285 | * to store hash index values. |
| 286 | */ |
| 287 | smp_wmb(); |
| 288 | } |
| 289 | |
Aneesh Kumar K.V | 3df33f1 | 2016-04-29 23:26:29 +1000 | [diff] [blame] | 290 | pgtable_t hash__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) |
Aneesh Kumar K.V | 6a1ea36 | 2016-04-29 23:26:28 +1000 | [diff] [blame] | 291 | { |
| 292 | pgtable_t pgtable; |
| 293 | pgtable_t *pgtable_slot; |
| 294 | |
Aneesh Kumar K.V | af60a4c | 2018-04-16 16:57:16 +0530 | [diff] [blame] | 295 | assert_spin_locked(pmd_lockptr(mm, pmdp)); |
| 296 | |
Aneesh Kumar K.V | 6a1ea36 | 2016-04-29 23:26:28 +1000 | [diff] [blame] | 297 | pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD; |
| 298 | pgtable = *pgtable_slot; |
| 299 | /* |
| 300 | * Once we withdraw, mark the entry NULL. |
| 301 | */ |
| 302 | *pgtable_slot = NULL; |
| 303 | /* |
| 304 | * We store HPTE information in the deposited PTE fragment. |
| 305 | * zero out the content on withdraw. |
| 306 | */ |
| 307 | memset(pgtable, 0, PTE_FRAG_SIZE); |
| 308 | return pgtable; |
| 309 | } |
| 310 | |
Aneesh Kumar K.V | 6a1ea36 | 2016-04-29 23:26:28 +1000 | [diff] [blame] | 311 | /* |
| 312 | * A linux hugepage PMD was changed and the corresponding hash table entries |
| 313 | * neesd to be flushed. |
| 314 | */ |
| 315 | void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr, |
| 316 | pmd_t *pmdp, unsigned long old_pmd) |
| 317 | { |
| 318 | int ssize; |
| 319 | unsigned int psize; |
| 320 | unsigned long vsid; |
| 321 | unsigned long flags = 0; |
Aneesh Kumar K.V | 6a1ea36 | 2016-04-29 23:26:28 +1000 | [diff] [blame] | 322 | |
| 323 | /* get the base page size,vsid and segment size */ |
| 324 | #ifdef CONFIG_DEBUG_VM |
| 325 | psize = get_slice_psize(mm, addr); |
| 326 | BUG_ON(psize == MMU_PAGE_16M); |
| 327 | #endif |
| 328 | if (old_pmd & H_PAGE_COMBO) |
| 329 | psize = MMU_PAGE_4K; |
| 330 | else |
| 331 | psize = MMU_PAGE_64K; |
| 332 | |
| 333 | if (!is_kernel_addr(addr)) { |
| 334 | ssize = user_segment_size(addr); |
Aneesh Kumar K.V | f384796c | 2018-03-26 15:34:48 +0530 | [diff] [blame] | 335 | vsid = get_user_vsid(&mm->context, addr, ssize); |
Aneesh Kumar K.V | 6a1ea36 | 2016-04-29 23:26:28 +1000 | [diff] [blame] | 336 | WARN_ON(vsid == 0); |
| 337 | } else { |
| 338 | vsid = get_kernel_vsid(addr, mmu_kernel_ssize); |
| 339 | ssize = mmu_kernel_ssize; |
| 340 | } |
| 341 | |
Benjamin Herrenschmidt | b426e4b | 2017-07-24 14:28:01 +1000 | [diff] [blame] | 342 | if (mm_is_thread_local(mm)) |
Aneesh Kumar K.V | 6a1ea36 | 2016-04-29 23:26:28 +1000 | [diff] [blame] | 343 | flags |= HPTE_LOCAL_UPDATE; |
| 344 | |
| 345 | return flush_hash_hugepage(vsid, addr, pmdp, psize, ssize, flags); |
| 346 | } |
| 347 | |
Aneesh Kumar K.V | 3df33f1 | 2016-04-29 23:26:29 +1000 | [diff] [blame] | 348 | pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm, |
| 349 | unsigned long addr, pmd_t *pmdp) |
Aneesh Kumar K.V | 6a1ea36 | 2016-04-29 23:26:28 +1000 | [diff] [blame] | 350 | { |
| 351 | pmd_t old_pmd; |
| 352 | pgtable_t pgtable; |
| 353 | unsigned long old; |
| 354 | pgtable_t *pgtable_slot; |
| 355 | |
| 356 | old = pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0); |
| 357 | old_pmd = __pmd(old); |
| 358 | /* |
| 359 | * We have pmd == none and we are holding page_table_lock. |
| 360 | * So we can safely go and clear the pgtable hash |
| 361 | * index info. |
| 362 | */ |
| 363 | pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD; |
| 364 | pgtable = *pgtable_slot; |
| 365 | /* |
| 366 | * Let's zero out old valid and hash index details |
| 367 | * hash fault look at them. |
| 368 | */ |
| 369 | memset(pgtable, 0, PTE_FRAG_SIZE); |
| 370 | /* |
Aneesh Kumar K.V | fa4531f | 2017-07-27 11:54:54 +0530 | [diff] [blame] | 371 | * Serialize against find_current_mm_pte variants which does lock-less |
Aneesh Kumar K.V | 6a1ea36 | 2016-04-29 23:26:28 +1000 | [diff] [blame] | 372 | * lookup in page tables with local interrupts disabled. For huge pages |
| 373 | * it casts pmd_t to pte_t. Since format of pte_t is different from |
| 374 | * pmd_t we want to prevent transit from pmd pointing to page table |
| 375 | * to pmd pointing to huge page (and back) while interrupts are disabled. |
| 376 | * We clear pmd to possibly replace it with page table pointer in |
| 377 | * different code paths. So make sure we wait for the parallel |
Aneesh Kumar K.V | fa4531f | 2017-07-27 11:54:54 +0530 | [diff] [blame] | 378 | * find_curren_mm_pte to finish. |
Aneesh Kumar K.V | 6a1ea36 | 2016-04-29 23:26:28 +1000 | [diff] [blame] | 379 | */ |
Aneesh Kumar K.V | fa4531f | 2017-07-27 11:54:54 +0530 | [diff] [blame] | 380 | serialize_against_pte_lookup(mm); |
Aneesh Kumar K.V | 6a1ea36 | 2016-04-29 23:26:28 +1000 | [diff] [blame] | 381 | return old_pmd; |
| 382 | } |
| 383 | |
Aneesh Kumar K.V | 3df33f1 | 2016-04-29 23:26:29 +1000 | [diff] [blame] | 384 | int hash__has_transparent_hugepage(void) |
Aneesh Kumar K.V | 6a1ea36 | 2016-04-29 23:26:28 +1000 | [diff] [blame] | 385 | { |
| 386 | |
| 387 | if (!mmu_has_feature(MMU_FTR_16M_PAGE)) |
| 388 | return 0; |
| 389 | /* |
| 390 | * We support THP only if PMD_SIZE is 16MB. |
| 391 | */ |
| 392 | if (mmu_psize_defs[MMU_PAGE_16M].shift != PMD_SHIFT) |
| 393 | return 0; |
| 394 | /* |
| 395 | * We need to make sure that we support 16MB hugepage in a segement |
| 396 | * with base page size 64K or 4K. We only enable THP with a PAGE_SIZE |
| 397 | * of 64K. |
| 398 | */ |
| 399 | /* |
| 400 | * If we have 64K HPTE, we will be using that by default |
| 401 | */ |
| 402 | if (mmu_psize_defs[MMU_PAGE_64K].shift && |
| 403 | (mmu_psize_defs[MMU_PAGE_64K].penc[MMU_PAGE_16M] == -1)) |
| 404 | return 0; |
| 405 | /* |
| 406 | * Ok we only have 4K HPTE |
| 407 | */ |
| 408 | if (mmu_psize_defs[MMU_PAGE_4K].penc[MMU_PAGE_16M] == -1) |
| 409 | return 0; |
| 410 | |
| 411 | return 1; |
| 412 | } |
| 413 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
Balbir Singh | cd65d69 | 2017-06-29 03:04:08 +1000 | [diff] [blame] | 414 | |
| 415 | #ifdef CONFIG_STRICT_KERNEL_RWX |
Michael Ellerman | fa7f918 | 2017-07-14 16:51:22 +1000 | [diff] [blame] | 416 | static bool hash__change_memory_range(unsigned long start, unsigned long end, |
| 417 | unsigned long newpp) |
Balbir Singh | cd65d69 | 2017-06-29 03:04:08 +1000 | [diff] [blame] | 418 | { |
Balbir Singh | cd65d69 | 2017-06-29 03:04:08 +1000 | [diff] [blame] | 419 | unsigned long idx; |
| 420 | unsigned int step, shift; |
Balbir Singh | cd65d69 | 2017-06-29 03:04:08 +1000 | [diff] [blame] | 421 | |
| 422 | shift = mmu_psize_defs[mmu_linear_psize].shift; |
| 423 | step = 1 << shift; |
| 424 | |
Michael Ellerman | fa7f918 | 2017-07-14 16:51:22 +1000 | [diff] [blame] | 425 | start = ALIGN_DOWN(start, step); |
| 426 | end = ALIGN(end, step); // aligns up |
Balbir Singh | cd65d69 | 2017-06-29 03:04:08 +1000 | [diff] [blame] | 427 | |
Michael Ellerman | fa7f918 | 2017-07-14 16:51:22 +1000 | [diff] [blame] | 428 | if (start >= end) |
| 429 | return false; |
Balbir Singh | cd65d69 | 2017-06-29 03:04:08 +1000 | [diff] [blame] | 430 | |
Michael Ellerman | fa7f918 | 2017-07-14 16:51:22 +1000 | [diff] [blame] | 431 | pr_debug("Changing page protection on range 0x%lx-0x%lx, to 0x%lx, step 0x%x\n", |
| 432 | start, end, newpp, step); |
Balbir Singh | cd65d69 | 2017-06-29 03:04:08 +1000 | [diff] [blame] | 433 | |
| 434 | for (idx = start; idx < end; idx += step) |
| 435 | /* Not sure if we can do much with the return value */ |
| 436 | mmu_hash_ops.hpte_updateboltedpp(newpp, idx, mmu_linear_psize, |
| 437 | mmu_kernel_ssize); |
| 438 | |
Michael Ellerman | fa7f918 | 2017-07-14 16:51:22 +1000 | [diff] [blame] | 439 | return true; |
| 440 | } |
| 441 | |
| 442 | void hash__mark_rodata_ro(void) |
| 443 | { |
| 444 | unsigned long start, end; |
| 445 | |
| 446 | start = (unsigned long)_stext; |
| 447 | end = (unsigned long)__init_begin; |
| 448 | |
| 449 | WARN_ON(!hash__change_memory_range(start, end, PP_RXXX)); |
Balbir Singh | cd65d69 | 2017-06-29 03:04:08 +1000 | [diff] [blame] | 450 | } |
Michael Ellerman | 029d925 | 2017-07-14 16:51:23 +1000 | [diff] [blame] | 451 | |
| 452 | void hash__mark_initmem_nx(void) |
| 453 | { |
| 454 | unsigned long start, end, pp; |
| 455 | |
| 456 | start = (unsigned long)__init_begin; |
| 457 | end = (unsigned long)__init_end; |
| 458 | |
| 459 | pp = htab_convert_pte_flags(pgprot_val(PAGE_KERNEL)); |
| 460 | |
| 461 | WARN_ON(!hash__change_memory_range(start, end, pp)); |
| 462 | } |
Balbir Singh | cd65d69 | 2017-06-29 03:04:08 +1000 | [diff] [blame] | 463 | #endif |