Thomas Gleixner | 2874c5f | 2019-05-27 08:55:01 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Aneesh Kumar K.V | 3df33f1 | 2016-04-29 23:26:29 +1000 | [diff] [blame] | 2 | /* |
| 3 | * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation. |
Aneesh Kumar K.V | 3df33f1 | 2016-04-29 23:26:29 +1000 | [diff] [blame] | 4 | */ |
| 5 | |
| 6 | #include <linux/sched.h> |
Ingo Molnar | 589ee62 | 2017-02-04 00:16:44 +0100 | [diff] [blame] | 7 | #include <linux/mm_types.h> |
Aneesh Kumar K.V | 59879d5 | 2018-04-16 16:57:14 +0530 | [diff] [blame] | 8 | #include <linux/memblock.h> |
Aneesh Kumar K.V | dbf77fed | 2021-08-12 18:58:31 +0530 | [diff] [blame] | 9 | #include <linux/debugfs.h> |
Aneesh Kumar K.V | fa4531f | 2017-07-27 11:54:54 +0530 | [diff] [blame] | 10 | #include <misc/cxl-base.h> |
Ingo Molnar | 589ee62 | 2017-02-04 00:16:44 +0100 | [diff] [blame] | 11 | |
Aneesh Kumar K.V | 3df33f1 | 2016-04-29 23:26:29 +1000 | [diff] [blame] | 12 | #include <asm/pgalloc.h> |
| 13 | #include <asm/tlb.h> |
Aneesh Kumar K.V | 59879d5 | 2018-04-16 16:57:14 +0530 | [diff] [blame] | 14 | #include <asm/trace.h> |
| 15 | #include <asm/powernv.h> |
Michael Anderson | 139a1d2 | 2019-08-22 00:48:35 -0300 | [diff] [blame] | 16 | #include <asm/firmware.h> |
| 17 | #include <asm/ultravisor.h> |
Aneesh Kumar K.V | 000a42b | 2020-07-09 08:59:41 +0530 | [diff] [blame] | 18 | #include <asm/kexec.h> |
Aneesh Kumar K.V | 3df33f1 | 2016-04-29 23:26:29 +1000 | [diff] [blame] | 19 | |
Christophe Leroy | 9d9f2cc | 2019-03-29 09:59:59 +0000 | [diff] [blame] | 20 | #include <mm/mmu_decl.h> |
Aneesh Kumar K.V | 3df33f1 | 2016-04-29 23:26:29 +1000 | [diff] [blame] | 21 | #include <trace/events/thp.h> |
| 22 | |
Michael Ellerman | 2bb421a | 2021-02-11 00:08:03 +1100 | [diff] [blame] | 23 | #include "internal.h" |
| 24 | |
Aneesh Kumar K.V | 8a6c697 | 2018-04-16 16:57:22 +0530 | [diff] [blame] | 25 | unsigned long __pmd_frag_nr; |
| 26 | EXPORT_SYMBOL(__pmd_frag_nr); |
| 27 | unsigned long __pmd_frag_size_shift; |
| 28 | EXPORT_SYMBOL(__pmd_frag_size_shift); |
| 29 | |
Aneesh Kumar K.V | 3df33f1 | 2016-04-29 23:26:29 +1000 | [diff] [blame] | 30 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 31 | /* |
| 32 | * This is called when relaxing access to a hugepage. It's also called in the page |
| 33 | * fault path when we don't hit any of the major fault cases, ie, a minor |
| 34 | * update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have |
| 35 | * handled those two for us, we additionally deal with missing execute |
| 36 | * permission here on some processors |
| 37 | */ |
| 38 | int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address, |
| 39 | pmd_t *pmdp, pmd_t entry, int dirty) |
| 40 | { |
| 41 | int changed; |
| 42 | #ifdef CONFIG_DEBUG_VM |
Oliver O'Halloran | ebd3119 | 2017-06-28 11:32:34 +1000 | [diff] [blame] | 43 | WARN_ON(!pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp)); |
Aneesh Kumar K.V | af60a4c | 2018-04-16 16:57:16 +0530 | [diff] [blame] | 44 | assert_spin_locked(pmd_lockptr(vma->vm_mm, pmdp)); |
Aneesh Kumar K.V | 3df33f1 | 2016-04-29 23:26:29 +1000 | [diff] [blame] | 45 | #endif |
| 46 | changed = !pmd_same(*(pmdp), entry); |
| 47 | if (changed) { |
Aneesh Kumar K.V | e4c1112 | 2018-05-29 19:58:40 +0530 | [diff] [blame] | 48 | /* |
| 49 | * We can use MMU_PAGE_2M here, because only radix |
| 50 | * path look at the psize. |
| 51 | */ |
| 52 | __ptep_set_access_flags(vma, pmdp_ptep(pmdp), |
| 53 | pmd_pte(entry), address, MMU_PAGE_2M); |
Aneesh Kumar K.V | 3df33f1 | 2016-04-29 23:26:29 +1000 | [diff] [blame] | 54 | } |
| 55 | return changed; |
| 56 | } |
| 57 | |
| 58 | int pmdp_test_and_clear_young(struct vm_area_struct *vma, |
| 59 | unsigned long address, pmd_t *pmdp) |
| 60 | { |
| 61 | return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp); |
| 62 | } |
| 63 | /* |
| 64 | * set a new huge pmd. We should not be called for updating |
| 65 | * an existing pmd entry. That should go via pmd_hugepage_update. |
| 66 | */ |
| 67 | void set_pmd_at(struct mm_struct *mm, unsigned long addr, |
| 68 | pmd_t *pmdp, pmd_t pmd) |
| 69 | { |
| 70 | #ifdef CONFIG_DEBUG_VM |
Aneesh Kumar K.V | da7ad36 | 2018-09-20 23:39:42 +0530 | [diff] [blame] | 71 | /* |
| 72 | * Make sure hardware valid bit is not set. We don't do |
| 73 | * tlb flush for this update. |
| 74 | */ |
Aneesh Kumar K.V | dd0e144 | 2018-10-13 22:18:15 +0530 | [diff] [blame] | 75 | |
| 76 | WARN_ON(pte_hw_valid(pmd_pte(*pmdp)) && !pte_protnone(pmd_pte(*pmdp))); |
Aneesh Kumar K.V | af60a4c | 2018-04-16 16:57:16 +0530 | [diff] [blame] | 77 | assert_spin_locked(pmd_lockptr(mm, pmdp)); |
Aneesh Kumar K.V | 1ecf2cd | 2019-05-14 11:33:01 +0530 | [diff] [blame] | 78 | WARN_ON(!(pmd_large(pmd))); |
Aneesh Kumar K.V | 3df33f1 | 2016-04-29 23:26:29 +1000 | [diff] [blame] | 79 | #endif |
| 80 | trace_hugepage_set_pmd(addr, pmd_val(pmd)); |
| 81 | return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd)); |
| 82 | } |
Aneesh Kumar K.V | fa4531f | 2017-07-27 11:54:54 +0530 | [diff] [blame] | 83 | |
Nicholas Piggin | 032b7f0 | 2020-12-17 23:47:30 +1000 | [diff] [blame] | 84 | static void do_serialize(void *arg) |
| 85 | { |
| 86 | /* We've taken the IPI, so try to trim the mask while here */ |
| 87 | if (radix_enabled()) { |
| 88 | struct mm_struct *mm = arg; |
| 89 | exit_lazy_flush_tlb(mm, false); |
| 90 | } |
Aneesh Kumar K.V | fa4531f | 2017-07-27 11:54:54 +0530 | [diff] [blame] | 91 | } |
Nicholas Piggin | 032b7f0 | 2020-12-17 23:47:30 +1000 | [diff] [blame] | 92 | |
Aneesh Kumar K.V | fa4531f | 2017-07-27 11:54:54 +0530 | [diff] [blame] | 93 | /* |
| 94 | * Serialize against find_current_mm_pte which does lock-less |
| 95 | * lookup in page tables with local interrupts disabled. For huge pages |
| 96 | * it casts pmd_t to pte_t. Since format of pte_t is different from |
| 97 | * pmd_t we want to prevent transit from pmd pointing to page table |
| 98 | * to pmd pointing to huge page (and back) while interrupts are disabled. |
| 99 | * We clear pmd to possibly replace it with page table pointer in |
| 100 | * different code paths. So make sure we wait for the parallel |
| 101 | * find_current_mm_pte to finish. |
| 102 | */ |
| 103 | void serialize_against_pte_lookup(struct mm_struct *mm) |
| 104 | { |
| 105 | smp_mb(); |
Nicholas Piggin | 032b7f0 | 2020-12-17 23:47:30 +1000 | [diff] [blame] | 106 | smp_call_function_many(mm_cpumask(mm), do_serialize, mm, 1); |
Aneesh Kumar K.V | fa4531f | 2017-07-27 11:54:54 +0530 | [diff] [blame] | 107 | } |
| 108 | |
Aneesh Kumar K.V | 3df33f1 | 2016-04-29 23:26:29 +1000 | [diff] [blame] | 109 | /* |
| 110 | * We use this to invalidate a pmdp entry before switching from a |
| 111 | * hugepte to regular pmd entry. |
| 112 | */ |
Aneesh Kumar K.V | 8cc931e | 2018-01-31 16:18:02 -0800 | [diff] [blame] | 113 | pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, |
Aneesh Kumar K.V | 3df33f1 | 2016-04-29 23:26:29 +1000 | [diff] [blame] | 114 | pmd_t *pmdp) |
| 115 | { |
Aneesh Kumar K.V | 8cc931e | 2018-01-31 16:18:02 -0800 | [diff] [blame] | 116 | unsigned long old_pmd; |
| 117 | |
Aneesh Kumar K.V | da7ad36 | 2018-09-20 23:39:42 +0530 | [diff] [blame] | 118 | old_pmd = pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, _PAGE_INVALID); |
Aneesh Kumar K.V | d8e91e9 | 2016-07-13 15:06:40 +0530 | [diff] [blame] | 119 | flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); |
Aneesh Kumar K.V | 8cc931e | 2018-01-31 16:18:02 -0800 | [diff] [blame] | 120 | return __pmd(old_pmd); |
Aneesh Kumar K.V | 3df33f1 | 2016-04-29 23:26:29 +1000 | [diff] [blame] | 121 | } |
| 122 | |
Aneesh Kumar K.V | 75358ea | 2020-05-05 12:47:29 +0530 | [diff] [blame] | 123 | pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma, |
| 124 | unsigned long addr, pmd_t *pmdp, int full) |
| 125 | { |
| 126 | pmd_t pmd; |
| 127 | VM_BUG_ON(addr & ~HPAGE_PMD_MASK); |
| 128 | VM_BUG_ON((pmd_present(*pmdp) && !pmd_trans_huge(*pmdp) && |
| 129 | !pmd_devmap(*pmdp)) || !pmd_present(*pmdp)); |
| 130 | pmd = pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp); |
| 131 | /* |
| 132 | * if it not a fullmm flush, then we can possibly end up converting |
| 133 | * this PMD pte entry to a regular level 0 PTE by a parallel page fault. |
| 134 | * Make sure we flush the tlb in this case. |
| 135 | */ |
| 136 | if (!full) |
| 137 | flush_pmd_tlb_range(vma, addr, addr + HPAGE_PMD_SIZE); |
| 138 | return pmd; |
| 139 | } |
| 140 | |
Aneesh Kumar K.V | 3df33f1 | 2016-04-29 23:26:29 +1000 | [diff] [blame] | 141 | static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot) |
| 142 | { |
| 143 | return __pmd(pmd_val(pmd) | pgprot_val(pgprot)); |
| 144 | } |
| 145 | |
Aneesh Kumar K.V | 53f45ec | 2020-10-22 14:41:15 +0530 | [diff] [blame] | 146 | /* |
| 147 | * At some point we should be able to get rid of |
| 148 | * pmd_mkhuge() and mk_huge_pmd() when we update all the |
| 149 | * other archs to mark the pmd huge in pfn_pmd() |
| 150 | */ |
Aneesh Kumar K.V | 3df33f1 | 2016-04-29 23:26:29 +1000 | [diff] [blame] | 151 | pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot) |
| 152 | { |
| 153 | unsigned long pmdv; |
| 154 | |
| 155 | pmdv = (pfn << PAGE_SHIFT) & PTE_RPN_MASK; |
Aneesh Kumar K.V | 53f45ec | 2020-10-22 14:41:15 +0530 | [diff] [blame] | 156 | |
| 157 | return __pmd_mkhuge(pmd_set_protbits(__pmd(pmdv), pgprot)); |
Aneesh Kumar K.V | 3df33f1 | 2016-04-29 23:26:29 +1000 | [diff] [blame] | 158 | } |
| 159 | |
| 160 | pmd_t mk_pmd(struct page *page, pgprot_t pgprot) |
| 161 | { |
| 162 | return pfn_pmd(page_to_pfn(page), pgprot); |
| 163 | } |
| 164 | |
| 165 | pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) |
| 166 | { |
| 167 | unsigned long pmdv; |
| 168 | |
| 169 | pmdv = pmd_val(pmd); |
| 170 | pmdv &= _HPAGE_CHG_MASK; |
| 171 | return pmd_set_protbits(__pmd(pmdv), newprot); |
| 172 | } |
Aneesh Kumar K.V | 3df33f1 | 2016-04-29 23:26:29 +1000 | [diff] [blame] | 173 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
Benjamin Herrenschmidt | fe036a0 | 2016-08-19 14:22:37 +0530 | [diff] [blame] | 174 | |
Hari Bathini | 8119cef | 2021-07-14 18:17:58 +0530 | [diff] [blame] | 175 | /* For use by kexec, called with MMU off */ |
| 176 | notrace void mmu_cleanup_all(void) |
Benjamin Herrenschmidt | fe036a0 | 2016-08-19 14:22:37 +0530 | [diff] [blame] | 177 | { |
| 178 | if (radix_enabled()) |
| 179 | radix__mmu_cleanup_all(); |
| 180 | else if (mmu_hash_ops.hpte_clear_all) |
| 181 | mmu_hash_ops.hpte_clear_all(); |
Aneesh Kumar K.V | 000a42b | 2020-07-09 08:59:41 +0530 | [diff] [blame] | 182 | |
| 183 | reset_sprs(); |
Benjamin Herrenschmidt | fe036a0 | 2016-08-19 14:22:37 +0530 | [diff] [blame] | 184 | } |
Reza Arbab | 32b53c0 | 2017-01-03 14:39:51 -0600 | [diff] [blame] | 185 | |
| 186 | #ifdef CONFIG_MEMORY_HOTPLUG |
Logan Gunthorpe | 4e00c5a | 2020-04-10 14:33:32 -0700 | [diff] [blame] | 187 | int __meminit create_section_mapping(unsigned long start, unsigned long end, |
| 188 | int nid, pgprot_t prot) |
Reza Arbab | 32b53c0 | 2017-01-03 14:39:51 -0600 | [diff] [blame] | 189 | { |
| 190 | if (radix_enabled()) |
Logan Gunthorpe | 4e00c5a | 2020-04-10 14:33:32 -0700 | [diff] [blame] | 191 | return radix__create_section_mapping(start, end, nid, prot); |
Reza Arbab | 32b53c0 | 2017-01-03 14:39:51 -0600 | [diff] [blame] | 192 | |
Logan Gunthorpe | 4e00c5a | 2020-04-10 14:33:32 -0700 | [diff] [blame] | 193 | return hash__create_section_mapping(start, end, nid, prot); |
Reza Arbab | 32b53c0 | 2017-01-03 14:39:51 -0600 | [diff] [blame] | 194 | } |
| 195 | |
Mauricio Faria de Oliveira | bde709a | 2018-03-09 17:45:58 -0300 | [diff] [blame] | 196 | int __meminit remove_section_mapping(unsigned long start, unsigned long end) |
Reza Arbab | 32b53c0 | 2017-01-03 14:39:51 -0600 | [diff] [blame] | 197 | { |
| 198 | if (radix_enabled()) |
Reza Arbab | 4b5d62c | 2017-01-16 13:07:45 -0600 | [diff] [blame] | 199 | return radix__remove_section_mapping(start, end); |
Reza Arbab | 32b53c0 | 2017-01-03 14:39:51 -0600 | [diff] [blame] | 200 | |
| 201 | return hash__remove_section_mapping(start, end); |
| 202 | } |
| 203 | #endif /* CONFIG_MEMORY_HOTPLUG */ |
Aneesh Kumar K.V | 59879d5 | 2018-04-16 16:57:14 +0530 | [diff] [blame] | 204 | |
| 205 | void __init mmu_partition_table_init(void) |
| 206 | { |
| 207 | unsigned long patb_size = 1UL << PATB_SIZE_SHIFT; |
| 208 | unsigned long ptcr; |
| 209 | |
| 210 | BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 36), "Partition table size too large."); |
Aneesh Kumar K.V | 59879d5 | 2018-04-16 16:57:14 +0530 | [diff] [blame] | 211 | /* Initialize the Partition Table with no entries */ |
Mike Rapoport | f806714 | 2019-03-07 16:30:48 -0800 | [diff] [blame] | 212 | partition_tb = memblock_alloc(patb_size, patb_size); |
Mike Rapoport | 8a7f97b | 2019-03-11 23:30:31 -0700 | [diff] [blame] | 213 | if (!partition_tb) |
| 214 | panic("%s: Failed to allocate %lu bytes align=0x%lx\n", |
| 215 | __func__, patb_size, patb_size); |
Aneesh Kumar K.V | 59879d5 | 2018-04-16 16:57:14 +0530 | [diff] [blame] | 216 | |
| 217 | /* |
| 218 | * update partition table control register, |
| 219 | * 64 K size. |
| 220 | */ |
| 221 | ptcr = __pa(partition_tb) | (PATB_SIZE_SHIFT - 12); |
Claudio Carvalho | 5223134 | 2019-08-22 00:48:36 -0300 | [diff] [blame] | 222 | set_ptcr_when_no_uv(ptcr); |
Aneesh Kumar K.V | 59879d5 | 2018-04-16 16:57:14 +0530 | [diff] [blame] | 223 | powernv_set_nmmu_ptcr(ptcr); |
| 224 | } |
| 225 | |
Michael Anderson | 139a1d2 | 2019-08-22 00:48:35 -0300 | [diff] [blame] | 226 | static void flush_partition(unsigned int lpid, bool radix) |
Aneesh Kumar K.V | 59879d5 | 2018-04-16 16:57:14 +0530 | [diff] [blame] | 227 | { |
Michael Anderson | 139a1d2 | 2019-08-22 00:48:35 -0300 | [diff] [blame] | 228 | if (radix) { |
Nicholas Piggin | 99161de | 2019-09-03 01:29:27 +1000 | [diff] [blame] | 229 | radix__flush_all_lpid(lpid); |
| 230 | radix__flush_all_lpid_guest(lpid); |
Aneesh Kumar K.V | 59879d5 | 2018-04-16 16:57:14 +0530 | [diff] [blame] | 231 | } else { |
Nicholas Piggin | 99161de | 2019-09-03 01:29:27 +1000 | [diff] [blame] | 232 | asm volatile("ptesync" : : : "memory"); |
Aneesh Kumar K.V | 59879d5 | 2018-04-16 16:57:14 +0530 | [diff] [blame] | 233 | asm volatile(PPC_TLBIE_5(%0,%1,2,0,0) : : |
| 234 | "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid)); |
Nicholas Piggin | 99161de | 2019-09-03 01:29:27 +1000 | [diff] [blame] | 235 | /* do we need fixup here ?*/ |
| 236 | asm volatile("eieio; tlbsync; ptesync" : : : "memory"); |
Aneesh Kumar K.V | 59879d5 | 2018-04-16 16:57:14 +0530 | [diff] [blame] | 237 | trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 0); |
| 238 | } |
Aneesh Kumar K.V | 59879d5 | 2018-04-16 16:57:14 +0530 | [diff] [blame] | 239 | } |
Michael Anderson | 139a1d2 | 2019-08-22 00:48:35 -0300 | [diff] [blame] | 240 | |
| 241 | void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0, |
Nicholas Piggin | fd13dae | 2019-09-03 01:29:28 +1000 | [diff] [blame] | 242 | unsigned long dw1, bool flush) |
Michael Anderson | 139a1d2 | 2019-08-22 00:48:35 -0300 | [diff] [blame] | 243 | { |
| 244 | unsigned long old = be64_to_cpu(partition_tb[lpid].patb0); |
| 245 | |
| 246 | /* |
| 247 | * When ultravisor is enabled, the partition table is stored in secure |
| 248 | * memory and can only be accessed doing an ultravisor call. However, we |
| 249 | * maintain a copy of the partition table in normal memory to allow Nest |
| 250 | * MMU translations to occur (for normal VMs). |
| 251 | * |
| 252 | * Therefore, here we always update partition_tb, regardless of whether |
| 253 | * we are running under an ultravisor or not. |
| 254 | */ |
| 255 | partition_tb[lpid].patb0 = cpu_to_be64(dw0); |
| 256 | partition_tb[lpid].patb1 = cpu_to_be64(dw1); |
| 257 | |
| 258 | /* |
| 259 | * If ultravisor is enabled, we do an ultravisor call to register the |
| 260 | * partition table entry (PATE), which also do a global flush of TLBs |
| 261 | * and partition table caches for the lpid. Otherwise, just do the |
| 262 | * flush. The type of flush (hash or radix) depends on what the previous |
| 263 | * use of the partition ID was, not the new use. |
| 264 | */ |
| 265 | if (firmware_has_feature(FW_FEATURE_ULTRAVISOR)) { |
| 266 | uv_register_pate(lpid, dw0, dw1); |
| 267 | pr_info("PATE registered by ultravisor: dw0 = 0x%lx, dw1 = 0x%lx\n", |
| 268 | dw0, dw1); |
Nicholas Piggin | fd13dae | 2019-09-03 01:29:28 +1000 | [diff] [blame] | 269 | } else if (flush) { |
Nicholas Piggin | 7d805ac | 2019-09-03 01:29:30 +1000 | [diff] [blame] | 270 | /* |
| 271 | * Boot does not need to flush, because MMU is off and each |
| 272 | * CPU does a tlbiel_all() before switching them on, which |
| 273 | * flushes everything. |
| 274 | */ |
Michael Anderson | 139a1d2 | 2019-08-22 00:48:35 -0300 | [diff] [blame] | 275 | flush_partition(lpid, (old & PATB_HR)); |
| 276 | } |
| 277 | } |
Aneesh Kumar K.V | 59879d5 | 2018-04-16 16:57:14 +0530 | [diff] [blame] | 278 | EXPORT_SYMBOL_GPL(mmu_partition_table_set_entry); |
Aneesh Kumar K.V | 1c7ec8a | 2018-04-16 16:57:20 +0530 | [diff] [blame] | 279 | |
Aneesh Kumar K.V | 8a6c697 | 2018-04-16 16:57:22 +0530 | [diff] [blame] | 280 | static pmd_t *get_pmd_from_cache(struct mm_struct *mm) |
| 281 | { |
| 282 | void *pmd_frag, *ret; |
| 283 | |
Christophe Leroy | 2a14653 | 2018-11-29 14:06:53 +0000 | [diff] [blame] | 284 | if (PMD_FRAG_NR == 1) |
| 285 | return NULL; |
| 286 | |
Aneesh Kumar K.V | 8a6c697 | 2018-04-16 16:57:22 +0530 | [diff] [blame] | 287 | spin_lock(&mm->page_table_lock); |
| 288 | ret = mm->context.pmd_frag; |
| 289 | if (ret) { |
| 290 | pmd_frag = ret + PMD_FRAG_SIZE; |
| 291 | /* |
| 292 | * If we have taken up all the fragments mark PTE page NULL |
| 293 | */ |
| 294 | if (((unsigned long)pmd_frag & ~PAGE_MASK) == 0) |
| 295 | pmd_frag = NULL; |
| 296 | mm->context.pmd_frag = pmd_frag; |
| 297 | } |
| 298 | spin_unlock(&mm->page_table_lock); |
| 299 | return (pmd_t *)ret; |
| 300 | } |
| 301 | |
| 302 | static pmd_t *__alloc_for_pmdcache(struct mm_struct *mm) |
| 303 | { |
| 304 | void *ret = NULL; |
| 305 | struct page *page; |
| 306 | gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO; |
| 307 | |
| 308 | if (mm == &init_mm) |
| 309 | gfp &= ~__GFP_ACCOUNT; |
| 310 | page = alloc_page(gfp); |
| 311 | if (!page) |
| 312 | return NULL; |
| 313 | if (!pgtable_pmd_page_ctor(page)) { |
| 314 | __free_pages(page, 0); |
| 315 | return NULL; |
| 316 | } |
| 317 | |
Nicholas Piggin | 4231aba | 2018-07-27 21:48:17 +1000 | [diff] [blame] | 318 | atomic_set(&page->pt_frag_refcount, 1); |
| 319 | |
Aneesh Kumar K.V | 8a6c697 | 2018-04-16 16:57:22 +0530 | [diff] [blame] | 320 | ret = page_address(page); |
| 321 | /* |
| 322 | * if we support only one fragment just return the |
| 323 | * allocated page. |
| 324 | */ |
| 325 | if (PMD_FRAG_NR == 1) |
| 326 | return ret; |
| 327 | |
| 328 | spin_lock(&mm->page_table_lock); |
| 329 | /* |
| 330 | * If we find pgtable_page set, we return |
| 331 | * the allocated page with single fragement |
| 332 | * count. |
| 333 | */ |
| 334 | if (likely(!mm->context.pmd_frag)) { |
Nicholas Piggin | 4231aba | 2018-07-27 21:48:17 +1000 | [diff] [blame] | 335 | atomic_set(&page->pt_frag_refcount, PMD_FRAG_NR); |
Aneesh Kumar K.V | 8a6c697 | 2018-04-16 16:57:22 +0530 | [diff] [blame] | 336 | mm->context.pmd_frag = ret + PMD_FRAG_SIZE; |
| 337 | } |
| 338 | spin_unlock(&mm->page_table_lock); |
| 339 | |
| 340 | return (pmd_t *)ret; |
| 341 | } |
| 342 | |
| 343 | pmd_t *pmd_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr) |
| 344 | { |
| 345 | pmd_t *pmd; |
| 346 | |
| 347 | pmd = get_pmd_from_cache(mm); |
| 348 | if (pmd) |
| 349 | return pmd; |
| 350 | |
| 351 | return __alloc_for_pmdcache(mm); |
| 352 | } |
| 353 | |
| 354 | void pmd_fragment_free(unsigned long *pmd) |
| 355 | { |
| 356 | struct page *page = virt_to_page(pmd); |
| 357 | |
Aneesh Kumar K.V | 645d5ce | 2020-07-09 18:49:22 +0530 | [diff] [blame] | 358 | if (PageReserved(page)) |
| 359 | return free_reserved_page(page); |
| 360 | |
Nicholas Piggin | 4231aba | 2018-07-27 21:48:17 +1000 | [diff] [blame] | 361 | BUG_ON(atomic_read(&page->pt_frag_refcount) <= 0); |
| 362 | if (atomic_dec_and_test(&page->pt_frag_refcount)) { |
Aneesh Kumar K.V | 8a6c697 | 2018-04-16 16:57:22 +0530 | [diff] [blame] | 363 | pgtable_pmd_page_dtor(page); |
Nicholas Piggin | 4231aba | 2018-07-27 21:48:17 +1000 | [diff] [blame] | 364 | __free_page(page); |
Aneesh Kumar K.V | 8a6c697 | 2018-04-16 16:57:22 +0530 | [diff] [blame] | 365 | } |
| 366 | } |
| 367 | |
Aneesh Kumar K.V | 0c4d268 | 2018-04-16 16:57:21 +0530 | [diff] [blame] | 368 | static inline void pgtable_free(void *table, int index) |
| 369 | { |
| 370 | switch (index) { |
| 371 | case PTE_INDEX: |
| 372 | pte_fragment_free(table, 0); |
| 373 | break; |
| 374 | case PMD_INDEX: |
Aneesh Kumar K.V | 738f964 | 2018-04-16 16:57:23 +0530 | [diff] [blame] | 375 | pmd_fragment_free(table); |
Aneesh Kumar K.V | 0c4d268 | 2018-04-16 16:57:21 +0530 | [diff] [blame] | 376 | break; |
| 377 | case PUD_INDEX: |
Aneesh Kumar K.V | 645d5ce | 2020-07-09 18:49:22 +0530 | [diff] [blame] | 378 | __pud_free(table); |
Aneesh Kumar K.V | 0c4d268 | 2018-04-16 16:57:21 +0530 | [diff] [blame] | 379 | break; |
Aneesh Kumar K.V | fadd03c | 2018-06-14 16:01:52 +0530 | [diff] [blame] | 380 | #if defined(CONFIG_PPC_4K_PAGES) && defined(CONFIG_HUGETLB_PAGE) |
| 381 | /* 16M hugepd directory at pud level */ |
| 382 | case HTLB_16M_INDEX: |
| 383 | BUILD_BUG_ON(H_16M_CACHE_INDEX <= 0); |
| 384 | kmem_cache_free(PGT_CACHE(H_16M_CACHE_INDEX), table); |
| 385 | break; |
| 386 | /* 16G hugepd directory at the pgd level */ |
| 387 | case HTLB_16G_INDEX: |
| 388 | BUILD_BUG_ON(H_16G_CACHE_INDEX <= 0); |
| 389 | kmem_cache_free(PGT_CACHE(H_16G_CACHE_INDEX), table); |
| 390 | break; |
| 391 | #endif |
Aneesh Kumar K.V | 0c4d268 | 2018-04-16 16:57:21 +0530 | [diff] [blame] | 392 | /* We don't free pgd table via RCU callback */ |
| 393 | default: |
| 394 | BUG(); |
| 395 | } |
| 396 | } |
| 397 | |
Aneesh Kumar K.V | 0c4d268 | 2018-04-16 16:57:21 +0530 | [diff] [blame] | 398 | void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int index) |
Aneesh Kumar K.V | 7023467 | 2018-04-16 16:57:19 +0530 | [diff] [blame] | 399 | { |
| 400 | unsigned long pgf = (unsigned long)table; |
| 401 | |
Aneesh Kumar K.V | 0c4d268 | 2018-04-16 16:57:21 +0530 | [diff] [blame] | 402 | BUG_ON(index > MAX_PGTABLE_INDEX_SIZE); |
| 403 | pgf |= index; |
Aneesh Kumar K.V | 7023467 | 2018-04-16 16:57:19 +0530 | [diff] [blame] | 404 | tlb_remove_table(tlb, (void *)pgf); |
| 405 | } |
| 406 | |
| 407 | void __tlb_remove_table(void *_table) |
| 408 | { |
| 409 | void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE); |
Aneesh Kumar K.V | 0c4d268 | 2018-04-16 16:57:21 +0530 | [diff] [blame] | 410 | unsigned int index = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE; |
Aneesh Kumar K.V | 7023467 | 2018-04-16 16:57:19 +0530 | [diff] [blame] | 411 | |
Aneesh Kumar K.V | 0c4d268 | 2018-04-16 16:57:21 +0530 | [diff] [blame] | 412 | return pgtable_free(table, index); |
Aneesh Kumar K.V | 7023467 | 2018-04-16 16:57:19 +0530 | [diff] [blame] | 413 | } |
Aneesh Kumar K.V | a2dc009 | 2018-08-13 11:14:57 +0530 | [diff] [blame] | 414 | |
| 415 | #ifdef CONFIG_PROC_FS |
| 416 | atomic_long_t direct_pages_count[MMU_PAGE_COUNT]; |
| 417 | |
| 418 | void arch_report_meminfo(struct seq_file *m) |
| 419 | { |
| 420 | /* |
| 421 | * Hash maps the memory with one size mmu_linear_psize. |
| 422 | * So don't bother to print these on hash |
| 423 | */ |
| 424 | if (!radix_enabled()) |
| 425 | return; |
| 426 | seq_printf(m, "DirectMap4k: %8lu kB\n", |
| 427 | atomic_long_read(&direct_pages_count[MMU_PAGE_4K]) << 2); |
| 428 | seq_printf(m, "DirectMap64k: %8lu kB\n", |
| 429 | atomic_long_read(&direct_pages_count[MMU_PAGE_64K]) << 6); |
| 430 | seq_printf(m, "DirectMap2M: %8lu kB\n", |
| 431 | atomic_long_read(&direct_pages_count[MMU_PAGE_2M]) << 11); |
| 432 | seq_printf(m, "DirectMap1G: %8lu kB\n", |
| 433 | atomic_long_read(&direct_pages_count[MMU_PAGE_1G]) << 20); |
| 434 | } |
| 435 | #endif /* CONFIG_PROC_FS */ |
Aneesh Kumar K.V | 579b923 | 2019-01-23 11:51:38 +0530 | [diff] [blame] | 436 | |
Aneesh Kumar K.V | 5b32336 | 2019-03-05 15:46:33 -0800 | [diff] [blame] | 437 | pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, |
| 438 | pte_t *ptep) |
| 439 | { |
| 440 | unsigned long pte_val; |
| 441 | |
| 442 | /* |
| 443 | * Clear the _PAGE_PRESENT so that no hardware parallel update is |
| 444 | * possible. Also keep the pte_present true so that we don't take |
| 445 | * wrong fault. |
| 446 | */ |
| 447 | pte_val = pte_update(vma->vm_mm, addr, ptep, _PAGE_PRESENT, _PAGE_INVALID, 0); |
| 448 | |
| 449 | return __pte(pte_val); |
| 450 | |
| 451 | } |
| 452 | |
| 453 | void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, |
| 454 | pte_t *ptep, pte_t old_pte, pte_t pte) |
| 455 | { |
| 456 | if (radix_enabled()) |
| 457 | return radix__ptep_modify_prot_commit(vma, addr, |
| 458 | ptep, old_pte, pte); |
| 459 | set_pte_at(vma->vm_mm, addr, ptep, pte); |
| 460 | } |
| 461 | |
Aneesh Kumar K.V | 579b923 | 2019-01-23 11:51:38 +0530 | [diff] [blame] | 462 | /* |
| 463 | * For hash translation mode, we use the deposited table to store hash slot |
| 464 | * information and they are stored at PTRS_PER_PMD offset from related pmd |
| 465 | * location. Hence a pmd move requires deposit and withdraw. |
| 466 | * |
| 467 | * For radix translation with split pmd ptl, we store the deposited table in the |
| 468 | * pmd page. Hence if we have different pmd page we need to withdraw during pmd |
| 469 | * move. |
| 470 | * |
| 471 | * With hash we use deposited table always irrespective of anon or not. |
| 472 | * With radix we use deposited table only for anonymous mapping. |
| 473 | */ |
| 474 | int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl, |
| 475 | struct spinlock *old_pmd_ptl, |
| 476 | struct vm_area_struct *vma) |
| 477 | { |
| 478 | if (radix_enabled()) |
| 479 | return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma); |
| 480 | |
| 481 | return true; |
| 482 | } |
Nicholas Piggin | 2275d7b | 2019-09-03 01:29:31 +1000 | [diff] [blame] | 483 | |
| 484 | /* |
| 485 | * Does the CPU support tlbie? |
| 486 | */ |
| 487 | bool tlbie_capable __read_mostly = true; |
| 488 | EXPORT_SYMBOL(tlbie_capable); |
| 489 | |
| 490 | /* |
| 491 | * Should tlbie be used for management of CPU TLBs, for kernel and process |
| 492 | * address spaces? tlbie may still be used for nMMU accelerators, and for KVM |
| 493 | * guest address spaces. |
| 494 | */ |
| 495 | bool tlbie_enabled __read_mostly = true; |
| 496 | |
| 497 | static int __init setup_disable_tlbie(char *str) |
| 498 | { |
| 499 | if (!radix_enabled()) { |
| 500 | pr_err("disable_tlbie: Unable to disable TLBIE with Hash MMU.\n"); |
| 501 | return 1; |
| 502 | } |
| 503 | |
| 504 | tlbie_capable = false; |
| 505 | tlbie_enabled = false; |
| 506 | |
| 507 | return 1; |
| 508 | } |
| 509 | __setup("disable_tlbie", setup_disable_tlbie); |
| 510 | |
| 511 | static int __init pgtable_debugfs_setup(void) |
| 512 | { |
| 513 | if (!tlbie_capable) |
| 514 | return 0; |
| 515 | |
| 516 | /* |
| 517 | * There is no locking vs tlb flushing when changing this value. |
| 518 | * The tlb flushers will see one value or another, and use either |
| 519 | * tlbie or tlbiel with IPIs. In both cases the TLBs will be |
| 520 | * invalidated as expected. |
| 521 | */ |
| 522 | debugfs_create_bool("tlbie_enabled", 0600, |
Aneesh Kumar K.V | dbf77fed | 2021-08-12 18:58:31 +0530 | [diff] [blame] | 523 | arch_debugfs_dir, |
Nicholas Piggin | 2275d7b | 2019-09-03 01:29:31 +1000 | [diff] [blame] | 524 | &tlbie_enabled); |
| 525 | |
| 526 | return 0; |
| 527 | } |
| 528 | arch_initcall(pgtable_debugfs_setup); |