Thomas Gleixner | 2874c5f | 2019-05-27 08:55:01 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Kumar Gala | 0186f47 | 2008-11-19 12:50:04 +0000 | [diff] [blame] | 2 | /* |
| 3 | * This file contains common routines for dealing with free of page tables |
Benjamin Herrenschmidt | 8d30c14 | 2009-02-10 16:02:37 +0000 | [diff] [blame] | 4 | * Along with common page table handling code |
Kumar Gala | 0186f47 | 2008-11-19 12:50:04 +0000 | [diff] [blame] | 5 | * |
| 6 | * Derived from arch/powerpc/mm/tlb_64.c: |
| 7 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) |
| 8 | * |
| 9 | * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) |
| 10 | * and Cort Dougan (PReP) (cort@cs.nmt.edu) |
| 11 | * Copyright (C) 1996 Paul Mackerras |
| 12 | * |
| 13 | * Derived from "arch/i386/mm/init.c" |
| 14 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds |
| 15 | * |
| 16 | * Dave Engebretsen <engebret@us.ibm.com> |
| 17 | * Rework for PPC64 port. |
Kumar Gala | 0186f47 | 2008-11-19 12:50:04 +0000 | [diff] [blame] | 18 | */ |
| 19 | |
| 20 | #include <linux/kernel.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 21 | #include <linux/gfp.h> |
Kumar Gala | 0186f47 | 2008-11-19 12:50:04 +0000 | [diff] [blame] | 22 | #include <linux/mm.h> |
Kumar Gala | 0186f47 | 2008-11-19 12:50:04 +0000 | [diff] [blame] | 23 | #include <linux/percpu.h> |
| 24 | #include <linux/hardirq.h> |
Becky Bruce | 41151e7 | 2011-06-28 09:54:48 +0000 | [diff] [blame] | 25 | #include <linux/hugetlb.h> |
Kumar Gala | 0186f47 | 2008-11-19 12:50:04 +0000 | [diff] [blame] | 26 | #include <asm/tlbflush.h> |
| 27 | #include <asm/tlb.h> |
Christophe Leroy | 0caed4d | 2019-04-26 05:59:41 +0000 | [diff] [blame] | 28 | #include <asm/hugetlb.h> |
Cédric Le Goater | d25da50 | 2021-01-04 15:31:44 +0100 | [diff] [blame] | 29 | #include <asm/pte-walk.h> |
Kumar Gala | 0186f47 | 2008-11-19 12:50:04 +0000 | [diff] [blame] | 30 | |
Michael Ellerman | 3018fbc | 2021-06-24 22:34:20 +1000 | [diff] [blame] | 31 | #ifdef CONFIG_PPC64 |
| 32 | #define PGD_ALIGN (sizeof(pgd_t) * MAX_PTRS_PER_PGD) |
| 33 | #else |
| 34 | #define PGD_ALIGN PAGE_SIZE |
| 35 | #endif |
| 36 | |
| 37 | pgd_t swapper_pg_dir[MAX_PTRS_PER_PGD] __section(".bss..page_aligned") __aligned(PGD_ALIGN); |
Christophe Leroy | e72421a | 2021-06-07 10:56:05 +0000 | [diff] [blame] | 38 | |
Benjamin Herrenschmidt | 8d30c14 | 2009-02-10 16:02:37 +0000 | [diff] [blame] | 39 | static inline int is_exec_fault(void) |
| 40 | { |
| 41 | return current->thread.regs && TRAP(current->thread.regs) == 0x400; |
| 42 | } |
| 43 | |
| 44 | /* We only try to do i/d cache coherency on stuff that looks like |
| 45 | * reasonably "normal" PTEs. We currently require a PTE to be present |
Aneesh Kumar K.V | 30bda41 | 2016-04-29 23:25:38 +1000 | [diff] [blame] | 46 | * and we avoid _PAGE_SPECIAL and cache inhibited pte. We also only do that |
Benjamin Herrenschmidt | ea3cc33 | 2009-08-18 19:00:34 +0000 | [diff] [blame] | 47 | * on userspace PTEs |
Benjamin Herrenschmidt | 8d30c14 | 2009-02-10 16:02:37 +0000 | [diff] [blame] | 48 | */ |
| 49 | static inline int pte_looks_normal(pte_t pte) |
| 50 | { |
Aneesh Kumar K.V | ac29c64 | 2016-04-29 23:25:34 +1000 | [diff] [blame] | 51 | |
Christophe Leroy | 26973fa | 2018-10-09 13:51:56 +0000 | [diff] [blame] | 52 | if (pte_present(pte) && !pte_special(pte)) { |
Aneesh Kumar K.V | 30bda41 | 2016-04-29 23:25:38 +1000 | [diff] [blame] | 53 | if (pte_ci(pte)) |
| 54 | return 0; |
Aneesh Kumar K.V | ac29c64 | 2016-04-29 23:25:34 +1000 | [diff] [blame] | 55 | if (pte_user(pte)) |
| 56 | return 1; |
| 57 | } |
| 58 | return 0; |
Benjamin Herrenschmidt | 8d30c14 | 2009-02-10 16:02:37 +0000 | [diff] [blame] | 59 | } |
| 60 | |
Anton Blanchard | e51df2c | 2014-08-20 08:55:18 +1000 | [diff] [blame] | 61 | static struct page *maybe_pte_to_page(pte_t pte) |
Benjamin Herrenschmidt | ea3cc33 | 2009-08-18 19:00:34 +0000 | [diff] [blame] | 62 | { |
| 63 | unsigned long pfn = pte_pfn(pte); |
| 64 | struct page *page; |
| 65 | |
| 66 | if (unlikely(!pfn_valid(pfn))) |
| 67 | return NULL; |
| 68 | page = pfn_to_page(pfn); |
| 69 | if (PageReserved(page)) |
| 70 | return NULL; |
| 71 | return page; |
| 72 | } |
| 73 | |
Christophe Leroy | d81e6f8 | 2018-10-09 13:51:47 +0000 | [diff] [blame] | 74 | #ifdef CONFIG_PPC_BOOK3S |
Benjamin Herrenschmidt | ea3cc33 | 2009-08-18 19:00:34 +0000 | [diff] [blame] | 75 | |
Benjamin Herrenschmidt | 8d30c14 | 2009-02-10 16:02:37 +0000 | [diff] [blame] | 76 | /* Server-style MMU handles coherency when hashing if HW exec permission |
Benjamin Herrenschmidt | ea3cc33 | 2009-08-18 19:00:34 +0000 | [diff] [blame] | 77 | * is supposed per page (currently 64-bit only). If not, then, we always |
| 78 | * flush the cache for valid PTEs in set_pte. Embedded CPU without HW exec |
| 79 | * support falls into the same category. |
Benjamin Herrenschmidt | 8d30c14 | 2009-02-10 16:02:37 +0000 | [diff] [blame] | 80 | */ |
Benjamin Herrenschmidt | ea3cc33 | 2009-08-18 19:00:34 +0000 | [diff] [blame] | 81 | |
Christophe Leroy | 385e89d | 2018-11-28 17:21:10 +0000 | [diff] [blame] | 82 | static pte_t set_pte_filter_hash(pte_t pte) |
Benjamin Herrenschmidt | 8d30c14 | 2009-02-10 16:02:37 +0000 | [diff] [blame] | 83 | { |
Benjamin Herrenschmidt | ea3cc33 | 2009-08-18 19:00:34 +0000 | [diff] [blame] | 84 | pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS); |
| 85 | if (pte_looks_normal(pte) && !(cpu_has_feature(CPU_FTR_COHERENT_ICACHE) || |
| 86 | cpu_has_feature(CPU_FTR_NOEXECUTE))) { |
| 87 | struct page *pg = maybe_pte_to_page(pte); |
| 88 | if (!pg) |
| 89 | return pte; |
Aneesh Kumar K.V | ec94b9b | 2021-02-03 10:28:11 +0530 | [diff] [blame] | 90 | if (!test_bit(PG_dcache_clean, &pg->flags)) { |
Benjamin Herrenschmidt | ea3cc33 | 2009-08-18 19:00:34 +0000 | [diff] [blame] | 91 | flush_dcache_icache_page(pg); |
Aneesh Kumar K.V | ec94b9b | 2021-02-03 10:28:11 +0530 | [diff] [blame] | 92 | set_bit(PG_dcache_clean, &pg->flags); |
Benjamin Herrenschmidt | ea3cc33 | 2009-08-18 19:00:34 +0000 | [diff] [blame] | 93 | } |
| 94 | } |
| 95 | return pte; |
Benjamin Herrenschmidt | 8d30c14 | 2009-02-10 16:02:37 +0000 | [diff] [blame] | 96 | } |
Benjamin Herrenschmidt | ea3cc33 | 2009-08-18 19:00:34 +0000 | [diff] [blame] | 97 | |
Christophe Leroy | d81e6f8 | 2018-10-09 13:51:47 +0000 | [diff] [blame] | 98 | #else /* CONFIG_PPC_BOOK3S */ |
Benjamin Herrenschmidt | ea3cc33 | 2009-08-18 19:00:34 +0000 | [diff] [blame] | 99 | |
Christophe Leroy | 385e89d | 2018-11-28 17:21:10 +0000 | [diff] [blame] | 100 | static pte_t set_pte_filter_hash(pte_t pte) { return pte; } |
| 101 | |
| 102 | #endif /* CONFIG_PPC_BOOK3S */ |
| 103 | |
Benjamin Herrenschmidt | ea3cc33 | 2009-08-18 19:00:34 +0000 | [diff] [blame] | 104 | /* Embedded type MMU with HW exec support. This is a bit more complicated |
| 105 | * as we don't have two bits to spare for _PAGE_EXEC and _PAGE_HWEXEC so |
| 106 | * instead we "filter out" the exec permission for non clean pages. |
Benjamin Herrenschmidt | 8d30c14 | 2009-02-10 16:02:37 +0000 | [diff] [blame] | 107 | */ |
Christophe Leroy | b12c07a | 2020-05-19 05:49:06 +0000 | [diff] [blame] | 108 | static inline pte_t set_pte_filter(pte_t pte) |
Benjamin Herrenschmidt | 8d30c14 | 2009-02-10 16:02:37 +0000 | [diff] [blame] | 109 | { |
Benjamin Herrenschmidt | ea3cc33 | 2009-08-18 19:00:34 +0000 | [diff] [blame] | 110 | struct page *pg; |
| 111 | |
Nicholas Piggin | af3a0ea | 2021-12-02 00:41:49 +1000 | [diff] [blame] | 112 | if (radix_enabled()) |
| 113 | return pte; |
| 114 | |
Christophe Leroy | 385e89d | 2018-11-28 17:21:10 +0000 | [diff] [blame] | 115 | if (mmu_has_feature(MMU_FTR_HPTE_TABLE)) |
| 116 | return set_pte_filter_hash(pte); |
| 117 | |
Benjamin Herrenschmidt | ea3cc33 | 2009-08-18 19:00:34 +0000 | [diff] [blame] | 118 | /* No exec permission in the first place, move on */ |
Christophe Leroy | 26973fa | 2018-10-09 13:51:56 +0000 | [diff] [blame] | 119 | if (!pte_exec(pte) || !pte_looks_normal(pte)) |
Benjamin Herrenschmidt | ea3cc33 | 2009-08-18 19:00:34 +0000 | [diff] [blame] | 120 | return pte; |
| 121 | |
| 122 | /* If you set _PAGE_EXEC on weird pages you're on your own */ |
| 123 | pg = maybe_pte_to_page(pte); |
| 124 | if (unlikely(!pg)) |
| 125 | return pte; |
| 126 | |
| 127 | /* If the page clean, we move on */ |
Aneesh Kumar K.V | ec94b9b | 2021-02-03 10:28:11 +0530 | [diff] [blame] | 128 | if (test_bit(PG_dcache_clean, &pg->flags)) |
Benjamin Herrenschmidt | ea3cc33 | 2009-08-18 19:00:34 +0000 | [diff] [blame] | 129 | return pte; |
| 130 | |
| 131 | /* If it's an exec fault, we flush the cache and make it clean */ |
| 132 | if (is_exec_fault()) { |
| 133 | flush_dcache_icache_page(pg); |
Aneesh Kumar K.V | ec94b9b | 2021-02-03 10:28:11 +0530 | [diff] [blame] | 134 | set_bit(PG_dcache_clean, &pg->flags); |
Benjamin Herrenschmidt | ea3cc33 | 2009-08-18 19:00:34 +0000 | [diff] [blame] | 135 | return pte; |
| 136 | } |
| 137 | |
| 138 | /* Else, we filter out _PAGE_EXEC */ |
Christophe Leroy | 26973fa | 2018-10-09 13:51:56 +0000 | [diff] [blame] | 139 | return pte_exprotect(pte); |
Benjamin Herrenschmidt | 8d30c14 | 2009-02-10 16:02:37 +0000 | [diff] [blame] | 140 | } |
Benjamin Herrenschmidt | ea3cc33 | 2009-08-18 19:00:34 +0000 | [diff] [blame] | 141 | |
| 142 | static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma, |
| 143 | int dirty) |
Benjamin Herrenschmidt | 8d30c14 | 2009-02-10 16:02:37 +0000 | [diff] [blame] | 144 | { |
Benjamin Herrenschmidt | ea3cc33 | 2009-08-18 19:00:34 +0000 | [diff] [blame] | 145 | struct page *pg; |
| 146 | |
Nicholas Piggin | af3a0ea | 2021-12-02 00:41:49 +1000 | [diff] [blame] | 147 | if (IS_ENABLED(CONFIG_PPC_BOOK3S_64)) |
| 148 | return pte; |
| 149 | |
Christophe Leroy | 385e89d | 2018-11-28 17:21:10 +0000 | [diff] [blame] | 150 | if (mmu_has_feature(MMU_FTR_HPTE_TABLE)) |
| 151 | return pte; |
| 152 | |
Benjamin Herrenschmidt | ea3cc33 | 2009-08-18 19:00:34 +0000 | [diff] [blame] | 153 | /* So here, we only care about exec faults, as we use them |
| 154 | * to recover lost _PAGE_EXEC and perform I$/D$ coherency |
| 155 | * if necessary. Also if _PAGE_EXEC is already set, same deal, |
| 156 | * we just bail out |
| 157 | */ |
Christophe Leroy | 26973fa | 2018-10-09 13:51:56 +0000 | [diff] [blame] | 158 | if (dirty || pte_exec(pte) || !is_exec_fault()) |
Benjamin Herrenschmidt | ea3cc33 | 2009-08-18 19:00:34 +0000 | [diff] [blame] | 159 | return pte; |
| 160 | |
| 161 | #ifdef CONFIG_DEBUG_VM |
| 162 | /* So this is an exec fault, _PAGE_EXEC is not set. If it was |
| 163 | * an error we would have bailed out earlier in do_page_fault() |
| 164 | * but let's make sure of it |
| 165 | */ |
| 166 | if (WARN_ON(!(vma->vm_flags & VM_EXEC))) |
| 167 | return pte; |
| 168 | #endif /* CONFIG_DEBUG_VM */ |
| 169 | |
| 170 | /* If you set _PAGE_EXEC on weird pages you're on your own */ |
| 171 | pg = maybe_pte_to_page(pte); |
| 172 | if (unlikely(!pg)) |
| 173 | goto bail; |
| 174 | |
| 175 | /* If the page is already clean, we move on */ |
Aneesh Kumar K.V | ec94b9b | 2021-02-03 10:28:11 +0530 | [diff] [blame] | 176 | if (test_bit(PG_dcache_clean, &pg->flags)) |
Benjamin Herrenschmidt | ea3cc33 | 2009-08-18 19:00:34 +0000 | [diff] [blame] | 177 | goto bail; |
| 178 | |
Aneesh Kumar K.V | ec94b9b | 2021-02-03 10:28:11 +0530 | [diff] [blame] | 179 | /* Clean the page and set PG_dcache_clean */ |
Benjamin Herrenschmidt | ea3cc33 | 2009-08-18 19:00:34 +0000 | [diff] [blame] | 180 | flush_dcache_icache_page(pg); |
Aneesh Kumar K.V | ec94b9b | 2021-02-03 10:28:11 +0530 | [diff] [blame] | 181 | set_bit(PG_dcache_clean, &pg->flags); |
Benjamin Herrenschmidt | ea3cc33 | 2009-08-18 19:00:34 +0000 | [diff] [blame] | 182 | |
| 183 | bail: |
Christophe Leroy | 26973fa | 2018-10-09 13:51:56 +0000 | [diff] [blame] | 184 | return pte_mkexec(pte); |
Benjamin Herrenschmidt | 8d30c14 | 2009-02-10 16:02:37 +0000 | [diff] [blame] | 185 | } |
Benjamin Herrenschmidt | ea3cc33 | 2009-08-18 19:00:34 +0000 | [diff] [blame] | 186 | |
Benjamin Herrenschmidt | 8d30c14 | 2009-02-10 16:02:37 +0000 | [diff] [blame] | 187 | /* |
| 188 | * set_pte stores a linux PTE into the linux page table. |
| 189 | */ |
Benjamin Herrenschmidt | ea3cc33 | 2009-08-18 19:00:34 +0000 | [diff] [blame] | 190 | void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, |
| 191 | pte_t pte) |
Benjamin Herrenschmidt | 8d30c14 | 2009-02-10 16:02:37 +0000 | [diff] [blame] | 192 | { |
Mel Gorman | 8a0516e | 2015-02-12 14:58:22 -0800 | [diff] [blame] | 193 | /* |
Aneesh Kumar K.V | da7ad36 | 2018-09-20 23:39:42 +0530 | [diff] [blame] | 194 | * Make sure hardware valid bit is not set. We don't do |
| 195 | * tlb flush for this update. |
Mel Gorman | 8a0516e | 2015-02-12 14:58:22 -0800 | [diff] [blame] | 196 | */ |
Aneesh Kumar K.V | dd0e144 | 2018-10-13 22:18:15 +0530 | [diff] [blame] | 197 | VM_WARN_ON(pte_hw_valid(*ptep) && !pte_protnone(*ptep)); |
Aneesh Kumar K.V | c7d5484 | 2016-04-29 23:25:30 +1000 | [diff] [blame] | 198 | |
Benjamin Herrenschmidt | 8d30c14 | 2009-02-10 16:02:37 +0000 | [diff] [blame] | 199 | /* Note: mm->context.id might not yet have been assigned as |
| 200 | * this context might not have been activated yet when this |
| 201 | * is called. |
| 202 | */ |
LEROY Christophe | 79df1b3 | 2013-09-11 18:44:44 +0200 | [diff] [blame] | 203 | pte = set_pte_filter(pte); |
Benjamin Herrenschmidt | 8d30c14 | 2009-02-10 16:02:37 +0000 | [diff] [blame] | 204 | |
| 205 | /* Perform the setting of the PTE */ |
| 206 | __set_pte_at(mm, addr, ptep, pte, 0); |
| 207 | } |
| 208 | |
| 209 | /* |
| 210 | * This is called when relaxing access to a PTE. It's also called in the page |
| 211 | * fault path when we don't hit any of the major fault cases, ie, a minor |
| 212 | * update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have |
| 213 | * handled those two for us, we additionally deal with missing execute |
| 214 | * permission here on some processors |
| 215 | */ |
| 216 | int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, |
| 217 | pte_t *ptep, pte_t entry, int dirty) |
| 218 | { |
| 219 | int changed; |
Benjamin Herrenschmidt | ea3cc33 | 2009-08-18 19:00:34 +0000 | [diff] [blame] | 220 | entry = set_access_flags_filter(entry, vma, dirty); |
Benjamin Herrenschmidt | 8d30c14 | 2009-02-10 16:02:37 +0000 | [diff] [blame] | 221 | changed = !pte_same(*(ptep), entry); |
| 222 | if (changed) { |
Aneesh Kumar K.V | f069ff3 | 2018-05-29 19:58:38 +0530 | [diff] [blame] | 223 | assert_pte_locked(vma->vm_mm, address); |
Aneesh Kumar K.V | e4c1112 | 2018-05-29 19:58:40 +0530 | [diff] [blame] | 224 | __ptep_set_access_flags(vma, ptep, entry, |
| 225 | address, mmu_virtual_psize); |
Benjamin Herrenschmidt | 8d30c14 | 2009-02-10 16:02:37 +0000 | [diff] [blame] | 226 | } |
| 227 | return changed; |
| 228 | } |
| 229 | |
Aneesh Kumar K.V | f069ff3 | 2018-05-29 19:58:38 +0530 | [diff] [blame] | 230 | #ifdef CONFIG_HUGETLB_PAGE |
Breno Leitao | bce85a1 | 2018-10-31 11:24:11 -0300 | [diff] [blame] | 231 | int huge_ptep_set_access_flags(struct vm_area_struct *vma, |
| 232 | unsigned long addr, pte_t *ptep, |
| 233 | pte_t pte, int dirty) |
Aneesh Kumar K.V | f069ff3 | 2018-05-29 19:58:38 +0530 | [diff] [blame] | 234 | { |
| 235 | #ifdef HUGETLB_NEED_PRELOAD |
| 236 | /* |
| 237 | * The "return 1" forces a call of update_mmu_cache, which will write a |
| 238 | * TLB entry. Without this, platforms that don't do a write of the TLB |
| 239 | * entry in the TLB miss handler asm will fault ad infinitum. |
| 240 | */ |
| 241 | ptep_set_access_flags(vma, addr, ptep, pte, dirty); |
| 242 | return 1; |
| 243 | #else |
Aneesh Kumar K.V | e4c1112 | 2018-05-29 19:58:40 +0530 | [diff] [blame] | 244 | int changed, psize; |
Aneesh Kumar K.V | f069ff3 | 2018-05-29 19:58:38 +0530 | [diff] [blame] | 245 | |
| 246 | pte = set_access_flags_filter(pte, vma, dirty); |
| 247 | changed = !pte_same(*(ptep), pte); |
| 248 | if (changed) { |
Aneesh Kumar K.V | e4c1112 | 2018-05-29 19:58:40 +0530 | [diff] [blame] | 249 | |
| 250 | #ifdef CONFIG_PPC_BOOK3S_64 |
Aneesh Kumar K.V | ed515b6 | 2018-06-01 13:54:24 +0530 | [diff] [blame] | 251 | struct hstate *h = hstate_vma(vma); |
| 252 | |
| 253 | psize = hstate_get_psize(h); |
| 254 | #ifdef CONFIG_DEBUG_VM |
| 255 | assert_spin_locked(huge_pte_lockptr(h, vma->vm_mm, ptep)); |
| 256 | #endif |
| 257 | |
Aneesh Kumar K.V | e4c1112 | 2018-05-29 19:58:40 +0530 | [diff] [blame] | 258 | #else |
| 259 | /* |
Christophe Leroy | b12c07a | 2020-05-19 05:49:06 +0000 | [diff] [blame] | 260 | * Not used on non book3s64 platforms. |
| 261 | * 8xx compares it with mmu_virtual_psize to |
| 262 | * know if it is a huge page or not. |
Aneesh Kumar K.V | e4c1112 | 2018-05-29 19:58:40 +0530 | [diff] [blame] | 263 | */ |
Christophe Leroy | b12c07a | 2020-05-19 05:49:06 +0000 | [diff] [blame] | 264 | psize = MMU_PAGE_COUNT; |
Aneesh Kumar K.V | e4c1112 | 2018-05-29 19:58:40 +0530 | [diff] [blame] | 265 | #endif |
Aneesh Kumar K.V | e4c1112 | 2018-05-29 19:58:40 +0530 | [diff] [blame] | 266 | __ptep_set_access_flags(vma, ptep, pte, addr, psize); |
Aneesh Kumar K.V | f069ff3 | 2018-05-29 19:58:38 +0530 | [diff] [blame] | 267 | } |
| 268 | return changed; |
| 269 | #endif |
| 270 | } |
Christophe Leroy | b12c07a | 2020-05-19 05:49:06 +0000 | [diff] [blame] | 271 | |
| 272 | #if defined(CONFIG_PPC_8xx) |
| 273 | void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte) |
| 274 | { |
Andrew Morton | 78c24f7 | 2020-06-10 14:34:02 -0700 | [diff] [blame] | 275 | pmd_t *pmd = pmd_off(mm, addr); |
Christophe Leroy | b250c8c | 2020-05-19 05:49:09 +0000 | [diff] [blame] | 276 | pte_basic_t val; |
Christophe Leroy | c7d1918 | 2021-09-17 15:57:31 +0200 | [diff] [blame] | 277 | pte_basic_t *entry = (pte_basic_t *)ptep; |
Christophe Leroy | 175a999 | 2020-08-31 08:30:43 +0000 | [diff] [blame] | 278 | int num, i; |
Christophe Leroy | b250c8c | 2020-05-19 05:49:09 +0000 | [diff] [blame] | 279 | |
Christophe Leroy | b12c07a | 2020-05-19 05:49:06 +0000 | [diff] [blame] | 280 | /* |
| 281 | * Make sure hardware valid bit is not set. We don't do |
| 282 | * tlb flush for this update. |
| 283 | */ |
| 284 | VM_WARN_ON(pte_hw_valid(*ptep) && !pte_protnone(*ptep)); |
| 285 | |
Christophe Leroy | b12c07a | 2020-05-19 05:49:06 +0000 | [diff] [blame] | 286 | pte = set_pte_filter(pte); |
| 287 | |
Christophe Leroy | b250c8c | 2020-05-19 05:49:09 +0000 | [diff] [blame] | 288 | val = pte_val(pte); |
Christophe Leroy | 175a999 | 2020-08-31 08:30:43 +0000 | [diff] [blame] | 289 | |
| 290 | num = number_of_cells_per_pte(pmd, val, 1); |
| 291 | |
Christophe Leroy | b250c8c | 2020-05-19 05:49:09 +0000 | [diff] [blame] | 292 | for (i = 0; i < num; i++, entry++, val += SZ_4K) |
| 293 | *entry = val; |
Christophe Leroy | b12c07a | 2020-05-19 05:49:06 +0000 | [diff] [blame] | 294 | } |
| 295 | #endif |
Aneesh Kumar K.V | f069ff3 | 2018-05-29 19:58:38 +0530 | [diff] [blame] | 296 | #endif /* CONFIG_HUGETLB_PAGE */ |
| 297 | |
Benjamin Herrenschmidt | 8d30c14 | 2009-02-10 16:02:37 +0000 | [diff] [blame] | 298 | #ifdef CONFIG_DEBUG_VM |
| 299 | void assert_pte_locked(struct mm_struct *mm, unsigned long addr) |
| 300 | { |
| 301 | pgd_t *pgd; |
Mike Rapoport | 2fb4706 | 2020-06-04 16:46:44 -0700 | [diff] [blame] | 302 | p4d_t *p4d; |
Benjamin Herrenschmidt | 8d30c14 | 2009-02-10 16:02:37 +0000 | [diff] [blame] | 303 | pud_t *pud; |
| 304 | pmd_t *pmd; |
| 305 | |
| 306 | if (mm == &init_mm) |
| 307 | return; |
| 308 | pgd = mm->pgd + pgd_index(addr); |
| 309 | BUG_ON(pgd_none(*pgd)); |
Mike Rapoport | 2fb4706 | 2020-06-04 16:46:44 -0700 | [diff] [blame] | 310 | p4d = p4d_offset(pgd, addr); |
| 311 | BUG_ON(p4d_none(*p4d)); |
| 312 | pud = pud_offset(p4d, addr); |
Benjamin Herrenschmidt | 8d30c14 | 2009-02-10 16:02:37 +0000 | [diff] [blame] | 313 | BUG_ON(pud_none(*pud)); |
| 314 | pmd = pmd_offset(pud, addr); |
Aneesh Kumar K.V | a00e7be | 2013-06-20 14:30:24 +0530 | [diff] [blame] | 315 | /* |
| 316 | * khugepaged to collapse normal pages to hugepage, first set |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 317 | * pmd to none to force page fault/gup to take mmap_lock. After |
Aneesh Kumar K.V | a00e7be | 2013-06-20 14:30:24 +0530 | [diff] [blame] | 318 | * pmd is set to none, we do a pte_clear which does this assertion |
| 319 | * so if we find pmd none, return. |
| 320 | */ |
| 321 | if (pmd_none(*pmd)) |
| 322 | return; |
Benjamin Herrenschmidt | 8d30c14 | 2009-02-10 16:02:37 +0000 | [diff] [blame] | 323 | BUG_ON(!pmd_present(*pmd)); |
Kumar Gala | 797a747 | 2009-08-18 15:21:40 +0000 | [diff] [blame] | 324 | assert_spin_locked(pte_lockptr(mm, pmd)); |
Benjamin Herrenschmidt | 8d30c14 | 2009-02-10 16:02:37 +0000 | [diff] [blame] | 325 | } |
| 326 | #endif /* CONFIG_DEBUG_VM */ |
| 327 | |
Alexey Kardashevskiy | e9ab1a1 | 2016-02-15 12:55:03 +1100 | [diff] [blame] | 328 | unsigned long vmalloc_to_phys(void *va) |
| 329 | { |
| 330 | unsigned long pfn = vmalloc_to_pfn(va); |
| 331 | |
| 332 | BUG_ON(!pfn); |
| 333 | return __pa(pfn_to_kaddr(pfn)) + offset_in_page(va); |
| 334 | } |
| 335 | EXPORT_SYMBOL_GPL(vmalloc_to_phys); |
Christophe Leroy | 0caed4d | 2019-04-26 05:59:41 +0000 | [diff] [blame] | 336 | |
| 337 | /* |
| 338 | * We have 4 cases for pgds and pmds: |
| 339 | * (1) invalid (all zeroes) |
| 340 | * (2) pointer to next table, as normal; bottom 6 bits == 0 |
| 341 | * (3) leaf pte for huge page _PAGE_PTE set |
| 342 | * (4) hugepd pointer, _PAGE_PTE = 0 and bits [2..6] indicate size of table |
| 343 | * |
| 344 | * So long as we atomically load page table pointers we are safe against teardown, |
| 345 | * we can follow the address down to the the page and take a ref on it. |
| 346 | * This function need to be called with interrupts disabled. We use this variant |
| 347 | * when we have MSR[EE] = 0 but the paca->irq_soft_mask = IRQS_ENABLED |
| 348 | */ |
| 349 | pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea, |
| 350 | bool *is_thp, unsigned *hpage_shift) |
| 351 | { |
Mike Rapoport | 2fb4706 | 2020-06-04 16:46:44 -0700 | [diff] [blame] | 352 | pgd_t *pgdp; |
| 353 | p4d_t p4d, *p4dp; |
Christophe Leroy | 0caed4d | 2019-04-26 05:59:41 +0000 | [diff] [blame] | 354 | pud_t pud, *pudp; |
| 355 | pmd_t pmd, *pmdp; |
| 356 | pte_t *ret_pte; |
| 357 | hugepd_t *hpdp = NULL; |
Mike Rapoport | 2fb4706 | 2020-06-04 16:46:44 -0700 | [diff] [blame] | 358 | unsigned pdshift; |
Christophe Leroy | 0caed4d | 2019-04-26 05:59:41 +0000 | [diff] [blame] | 359 | |
| 360 | if (hpage_shift) |
| 361 | *hpage_shift = 0; |
| 362 | |
| 363 | if (is_thp) |
| 364 | *is_thp = false; |
| 365 | |
Christophe Leroy | 0caed4d | 2019-04-26 05:59:41 +0000 | [diff] [blame] | 366 | /* |
| 367 | * Always operate on the local stack value. This make sure the |
| 368 | * value don't get updated by a parallel THP split/collapse, |
| 369 | * page fault or a page unmap. The return pte_t * is still not |
| 370 | * stable. So should be checked there for above conditions. |
Mike Rapoport | 2fb4706 | 2020-06-04 16:46:44 -0700 | [diff] [blame] | 371 | * Top level is an exception because it is folded into p4d. |
Christophe Leroy | 0caed4d | 2019-04-26 05:59:41 +0000 | [diff] [blame] | 372 | */ |
Mike Rapoport | 2fb4706 | 2020-06-04 16:46:44 -0700 | [diff] [blame] | 373 | pgdp = pgdir + pgd_index(ea); |
| 374 | p4dp = p4d_offset(pgdp, ea); |
| 375 | p4d = READ_ONCE(*p4dp); |
| 376 | pdshift = P4D_SHIFT; |
| 377 | |
| 378 | if (p4d_none(p4d)) |
Christophe Leroy | 0caed4d | 2019-04-26 05:59:41 +0000 | [diff] [blame] | 379 | return NULL; |
Christophe Leroy | fab9a11 | 2019-04-26 05:59:51 +0000 | [diff] [blame] | 380 | |
Mike Rapoport | 2fb4706 | 2020-06-04 16:46:44 -0700 | [diff] [blame] | 381 | if (p4d_is_leaf(p4d)) { |
| 382 | ret_pte = (pte_t *)p4dp; |
Christophe Leroy | 0caed4d | 2019-04-26 05:59:41 +0000 | [diff] [blame] | 383 | goto out; |
Christophe Leroy | fab9a11 | 2019-04-26 05:59:51 +0000 | [diff] [blame] | 384 | } |
Aneesh Kumar K.V | d6eaced | 2019-05-14 11:33:00 +0530 | [diff] [blame] | 385 | |
Mike Rapoport | 2fb4706 | 2020-06-04 16:46:44 -0700 | [diff] [blame] | 386 | if (is_hugepd(__hugepd(p4d_val(p4d)))) { |
| 387 | hpdp = (hugepd_t *)&p4d; |
Christophe Leroy | fab9a11 | 2019-04-26 05:59:51 +0000 | [diff] [blame] | 388 | goto out_huge; |
| 389 | } |
Christophe Leroy | 0caed4d | 2019-04-26 05:59:41 +0000 | [diff] [blame] | 390 | |
Christophe Leroy | 26e66b0 | 2019-04-26 05:59:53 +0000 | [diff] [blame] | 391 | /* |
| 392 | * Even if we end up with an unmap, the pgtable will not |
| 393 | * be freed, because we do an rcu free and here we are |
| 394 | * irq disabled |
| 395 | */ |
| 396 | pdshift = PUD_SHIFT; |
Mike Rapoport | 2fb4706 | 2020-06-04 16:46:44 -0700 | [diff] [blame] | 397 | pudp = pud_offset(&p4d, ea); |
Christophe Leroy | 26e66b0 | 2019-04-26 05:59:53 +0000 | [diff] [blame] | 398 | pud = READ_ONCE(*pudp); |
Christophe Leroy | fab9a11 | 2019-04-26 05:59:51 +0000 | [diff] [blame] | 399 | |
Christophe Leroy | 26e66b0 | 2019-04-26 05:59:53 +0000 | [diff] [blame] | 400 | if (pud_none(pud)) |
| 401 | return NULL; |
Christophe Leroy | 0caed4d | 2019-04-26 05:59:41 +0000 | [diff] [blame] | 402 | |
Aneesh Kumar K.V | d6eaced | 2019-05-14 11:33:00 +0530 | [diff] [blame] | 403 | if (pud_is_leaf(pud)) { |
Christophe Leroy | 26e66b0 | 2019-04-26 05:59:53 +0000 | [diff] [blame] | 404 | ret_pte = (pte_t *)pudp; |
| 405 | goto out; |
Christophe Leroy | 0caed4d | 2019-04-26 05:59:41 +0000 | [diff] [blame] | 406 | } |
Aneesh Kumar K.V | d6eaced | 2019-05-14 11:33:00 +0530 | [diff] [blame] | 407 | |
Christophe Leroy | 26e66b0 | 2019-04-26 05:59:53 +0000 | [diff] [blame] | 408 | if (is_hugepd(__hugepd(pud_val(pud)))) { |
| 409 | hpdp = (hugepd_t *)&pud; |
| 410 | goto out_huge; |
| 411 | } |
Aneesh Kumar K.V | d6eaced | 2019-05-14 11:33:00 +0530 | [diff] [blame] | 412 | |
Christophe Leroy | 26e66b0 | 2019-04-26 05:59:53 +0000 | [diff] [blame] | 413 | pdshift = PMD_SHIFT; |
| 414 | pmdp = pmd_offset(&pud, ea); |
| 415 | pmd = READ_ONCE(*pmdp); |
Nicholas Piggin | a00196a | 2019-06-07 13:56:36 +1000 | [diff] [blame] | 416 | |
Christophe Leroy | 26e66b0 | 2019-04-26 05:59:53 +0000 | [diff] [blame] | 417 | /* |
Nicholas Piggin | a00196a | 2019-06-07 13:56:36 +1000 | [diff] [blame] | 418 | * A hugepage collapse is captured by this condition, see |
| 419 | * pmdp_collapse_flush. |
Christophe Leroy | 26e66b0 | 2019-04-26 05:59:53 +0000 | [diff] [blame] | 420 | */ |
| 421 | if (pmd_none(pmd)) |
| 422 | return NULL; |
| 423 | |
Nicholas Piggin | a00196a | 2019-06-07 13:56:36 +1000 | [diff] [blame] | 424 | #ifdef CONFIG_PPC_BOOK3S_64 |
| 425 | /* |
| 426 | * A hugepage split is captured by this condition, see |
| 427 | * pmdp_invalidate. |
| 428 | * |
| 429 | * Huge page modification can be caught here too. |
| 430 | */ |
| 431 | if (pmd_is_serializing(pmd)) |
| 432 | return NULL; |
| 433 | #endif |
| 434 | |
Christophe Leroy | 26e66b0 | 2019-04-26 05:59:53 +0000 | [diff] [blame] | 435 | if (pmd_trans_huge(pmd) || pmd_devmap(pmd)) { |
| 436 | if (is_thp) |
| 437 | *is_thp = true; |
| 438 | ret_pte = (pte_t *)pmdp; |
| 439 | goto out; |
| 440 | } |
Aneesh Kumar K.V | d6eaced | 2019-05-14 11:33:00 +0530 | [diff] [blame] | 441 | |
| 442 | if (pmd_is_leaf(pmd)) { |
Christophe Leroy | 26e66b0 | 2019-04-26 05:59:53 +0000 | [diff] [blame] | 443 | ret_pte = (pte_t *)pmdp; |
| 444 | goto out; |
| 445 | } |
Aneesh Kumar K.V | d6eaced | 2019-05-14 11:33:00 +0530 | [diff] [blame] | 446 | |
Christophe Leroy | 26e66b0 | 2019-04-26 05:59:53 +0000 | [diff] [blame] | 447 | if (is_hugepd(__hugepd(pmd_val(pmd)))) { |
| 448 | hpdp = (hugepd_t *)&pmd; |
| 449 | goto out_huge; |
| 450 | } |
| 451 | |
| 452 | return pte_offset_kernel(&pmd, ea); |
| 453 | |
Christophe Leroy | fab9a11 | 2019-04-26 05:59:51 +0000 | [diff] [blame] | 454 | out_huge: |
Christophe Leroy | 0caed4d | 2019-04-26 05:59:41 +0000 | [diff] [blame] | 455 | if (!hpdp) |
| 456 | return NULL; |
| 457 | |
| 458 | ret_pte = hugepte_offset(*hpdp, ea, pdshift); |
| 459 | pdshift = hugepd_shift(*hpdp); |
| 460 | out: |
| 461 | if (hpage_shift) |
| 462 | *hpage_shift = pdshift; |
| 463 | return ret_pte; |
| 464 | } |
| 465 | EXPORT_SYMBOL_GPL(__find_linux_pte); |