Thomas Gleixner | 2874c5f | 2019-05-27 08:55:01 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Benjamin Herrenschmidt | f048aac | 2008-12-18 19:13:38 +0000 | [diff] [blame] | 2 | /* |
| 3 | * This file contains the routines for TLB flushing. |
| 4 | * On machines where the MMU does not use a hash table to store virtual to |
| 5 | * physical translations (ie, SW loaded TLBs or Book3E compilant processors, |
| 6 | * this does -not- include 603 however which shares the implementation with |
| 7 | * hash based processors) |
| 8 | * |
| 9 | * -- BenH |
| 10 | * |
Benjamin Herrenschmidt | 25d21ad | 2009-07-23 23:15:47 +0000 | [diff] [blame] | 11 | * Copyright 2008,2009 Ben Herrenschmidt <benh@kernel.crashing.org> |
| 12 | * IBM Corp. |
Benjamin Herrenschmidt | f048aac | 2008-12-18 19:13:38 +0000 | [diff] [blame] | 13 | * |
| 14 | * Derived from arch/ppc/mm/init.c: |
| 15 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) |
| 16 | * |
| 17 | * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) |
| 18 | * and Cort Dougan (PReP) (cort@cs.nmt.edu) |
| 19 | * Copyright (C) 1996 Paul Mackerras |
| 20 | * |
| 21 | * Derived from "arch/i386/mm/init.c" |
| 22 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds |
Benjamin Herrenschmidt | f048aac | 2008-12-18 19:13:38 +0000 | [diff] [blame] | 23 | */ |
| 24 | |
| 25 | #include <linux/kernel.h> |
Paul Gortmaker | 9308794 | 2011-07-29 16:19:31 +1000 | [diff] [blame] | 26 | #include <linux/export.h> |
Benjamin Herrenschmidt | f048aac | 2008-12-18 19:13:38 +0000 | [diff] [blame] | 27 | #include <linux/mm.h> |
| 28 | #include <linux/init.h> |
| 29 | #include <linux/highmem.h> |
| 30 | #include <linux/pagemap.h> |
| 31 | #include <linux/preempt.h> |
| 32 | #include <linux/spinlock.h> |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 33 | #include <linux/memblock.h> |
Dave Kleikamp | 91b191c | 2011-07-04 18:38:03 +0000 | [diff] [blame] | 34 | #include <linux/of_fdt.h> |
Becky Bruce | 41151e7 | 2011-06-28 09:54:48 +0000 | [diff] [blame] | 35 | #include <linux/hugetlb.h> |
Benjamin Herrenschmidt | f048aac | 2008-12-18 19:13:38 +0000 | [diff] [blame] | 36 | |
Mike Rapoport | ca15ca4 | 2020-08-06 23:22:28 -0700 | [diff] [blame] | 37 | #include <asm/pgalloc.h> |
Benjamin Herrenschmidt | f048aac | 2008-12-18 19:13:38 +0000 | [diff] [blame] | 38 | #include <asm/tlbflush.h> |
| 39 | #include <asm/tlb.h> |
Benjamin Herrenschmidt | 25d21ad | 2009-07-23 23:15:47 +0000 | [diff] [blame] | 40 | #include <asm/code-patching.h> |
Scott Wood | d9e1831 | 2015-10-06 22:48:09 -0500 | [diff] [blame] | 41 | #include <asm/cputhreads.h> |
Becky Bruce | 41151e7 | 2011-06-28 09:54:48 +0000 | [diff] [blame] | 42 | #include <asm/hugetlb.h> |
Scott Wood | 28efc35 | 2013-10-11 19:22:38 -0500 | [diff] [blame] | 43 | #include <asm/paca.h> |
Benjamin Herrenschmidt | f048aac | 2008-12-18 19:13:38 +0000 | [diff] [blame] | 44 | |
Christophe Leroy | 9d9f2cc | 2019-03-29 09:59:59 +0000 | [diff] [blame] | 45 | #include <mm/mmu_decl.h> |
Benjamin Herrenschmidt | f048aac | 2008-12-18 19:13:38 +0000 | [diff] [blame] | 46 | |
Becky Bruce | 41151e7 | 2011-06-28 09:54:48 +0000 | [diff] [blame] | 47 | /* |
| 48 | * This struct lists the sw-supported page sizes. The hardawre MMU may support |
| 49 | * other sizes not listed here. The .ind field is only used on MMUs that have |
| 50 | * indirect page table entries. |
| 51 | */ |
Christophe Leroy | 4b914286 | 2016-12-07 08:47:28 +0100 | [diff] [blame] | 52 | #if defined(CONFIG_PPC_BOOK3E_MMU) || defined(CONFIG_PPC_8xx) |
Becky Bruce | 881fde1 | 2011-10-10 10:50:40 +0000 | [diff] [blame] | 53 | #ifdef CONFIG_PPC_FSL_BOOK3E |
Becky Bruce | 41151e7 | 2011-06-28 09:54:48 +0000 | [diff] [blame] | 54 | struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = { |
| 55 | [MMU_PAGE_4K] = { |
| 56 | .shift = 12, |
| 57 | .enc = BOOK3E_PAGESZ_4K, |
| 58 | }, |
Scott Wood | 28efc35 | 2013-10-11 19:22:38 -0500 | [diff] [blame] | 59 | [MMU_PAGE_2M] = { |
| 60 | .shift = 21, |
| 61 | .enc = BOOK3E_PAGESZ_2M, |
| 62 | }, |
Becky Bruce | 41151e7 | 2011-06-28 09:54:48 +0000 | [diff] [blame] | 63 | [MMU_PAGE_4M] = { |
| 64 | .shift = 22, |
| 65 | .enc = BOOK3E_PAGESZ_4M, |
| 66 | }, |
| 67 | [MMU_PAGE_16M] = { |
| 68 | .shift = 24, |
| 69 | .enc = BOOK3E_PAGESZ_16M, |
| 70 | }, |
| 71 | [MMU_PAGE_64M] = { |
| 72 | .shift = 26, |
| 73 | .enc = BOOK3E_PAGESZ_64M, |
| 74 | }, |
| 75 | [MMU_PAGE_256M] = { |
| 76 | .shift = 28, |
| 77 | .enc = BOOK3E_PAGESZ_256M, |
| 78 | }, |
| 79 | [MMU_PAGE_1G] = { |
| 80 | .shift = 30, |
| 81 | .enc = BOOK3E_PAGESZ_1GB, |
| 82 | }, |
| 83 | }; |
Christophe Leroy | 4b914286 | 2016-12-07 08:47:28 +0100 | [diff] [blame] | 84 | #elif defined(CONFIG_PPC_8xx) |
| 85 | struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = { |
Christophe Leroy | 4b914286 | 2016-12-07 08:47:28 +0100 | [diff] [blame] | 86 | [MMU_PAGE_4K] = { |
| 87 | .shift = 12, |
| 88 | }, |
Christophe Leroy | 4b914286 | 2016-12-07 08:47:28 +0100 | [diff] [blame] | 89 | [MMU_PAGE_16K] = { |
| 90 | .shift = 14, |
| 91 | }, |
Christophe Leroy | 4b914286 | 2016-12-07 08:47:28 +0100 | [diff] [blame] | 92 | [MMU_PAGE_512K] = { |
| 93 | .shift = 19, |
| 94 | }, |
| 95 | [MMU_PAGE_8M] = { |
| 96 | .shift = 23, |
| 97 | }, |
| 98 | }; |
Becky Bruce | 41151e7 | 2011-06-28 09:54:48 +0000 | [diff] [blame] | 99 | #else |
Benjamin Herrenschmidt | 25d21ad | 2009-07-23 23:15:47 +0000 | [diff] [blame] | 100 | struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = { |
| 101 | [MMU_PAGE_4K] = { |
| 102 | .shift = 12, |
Benjamin Herrenschmidt | f2b26c9 | 2010-07-09 14:57:43 +1000 | [diff] [blame] | 103 | .ind = 20, |
Benjamin Herrenschmidt | 25d21ad | 2009-07-23 23:15:47 +0000 | [diff] [blame] | 104 | .enc = BOOK3E_PAGESZ_4K, |
| 105 | }, |
| 106 | [MMU_PAGE_16K] = { |
| 107 | .shift = 14, |
| 108 | .enc = BOOK3E_PAGESZ_16K, |
| 109 | }, |
| 110 | [MMU_PAGE_64K] = { |
| 111 | .shift = 16, |
Benjamin Herrenschmidt | f2b26c9 | 2010-07-09 14:57:43 +1000 | [diff] [blame] | 112 | .ind = 28, |
Benjamin Herrenschmidt | 25d21ad | 2009-07-23 23:15:47 +0000 | [diff] [blame] | 113 | .enc = BOOK3E_PAGESZ_64K, |
| 114 | }, |
| 115 | [MMU_PAGE_1M] = { |
| 116 | .shift = 20, |
| 117 | .enc = BOOK3E_PAGESZ_1M, |
| 118 | }, |
| 119 | [MMU_PAGE_16M] = { |
| 120 | .shift = 24, |
Benjamin Herrenschmidt | f2b26c9 | 2010-07-09 14:57:43 +1000 | [diff] [blame] | 121 | .ind = 36, |
Benjamin Herrenschmidt | 25d21ad | 2009-07-23 23:15:47 +0000 | [diff] [blame] | 122 | .enc = BOOK3E_PAGESZ_16M, |
| 123 | }, |
| 124 | [MMU_PAGE_256M] = { |
| 125 | .shift = 28, |
| 126 | .enc = BOOK3E_PAGESZ_256M, |
| 127 | }, |
| 128 | [MMU_PAGE_1G] = { |
| 129 | .shift = 30, |
| 130 | .enc = BOOK3E_PAGESZ_1GB, |
| 131 | }, |
| 132 | }; |
Becky Bruce | 41151e7 | 2011-06-28 09:54:48 +0000 | [diff] [blame] | 133 | #endif /* CONFIG_FSL_BOOKE */ |
| 134 | |
Benjamin Herrenschmidt | 25d21ad | 2009-07-23 23:15:47 +0000 | [diff] [blame] | 135 | static inline int mmu_get_tsize(int psize) |
| 136 | { |
| 137 | return mmu_psize_defs[psize].enc; |
| 138 | } |
| 139 | #else |
| 140 | static inline int mmu_get_tsize(int psize) |
| 141 | { |
| 142 | /* This isn't used on !Book3E for now */ |
| 143 | return 0; |
| 144 | } |
Becky Bruce | 41151e7 | 2011-06-28 09:54:48 +0000 | [diff] [blame] | 145 | #endif /* CONFIG_PPC_BOOK3E_MMU */ |
Benjamin Herrenschmidt | 25d21ad | 2009-07-23 23:15:47 +0000 | [diff] [blame] | 146 | |
| 147 | /* The variables below are currently only used on 64-bit Book3E |
| 148 | * though this will probably be made common with other nohash |
| 149 | * implementations at some point |
| 150 | */ |
| 151 | #ifdef CONFIG_PPC64 |
| 152 | |
| 153 | int mmu_linear_psize; /* Page size used for the linear mapping */ |
| 154 | int mmu_pte_psize; /* Page size used for PTE pages */ |
Benjamin Herrenschmidt | 32a7494 | 2009-07-23 23:15:58 +0000 | [diff] [blame] | 155 | int mmu_vmemmap_psize; /* Page size used for the virtual mem map */ |
Scott Wood | 28efc35 | 2013-10-11 19:22:38 -0500 | [diff] [blame] | 156 | int book3e_htw_mode; /* HW tablewalk? Value is PPC_HTW_* */ |
Benjamin Herrenschmidt | 25d21ad | 2009-07-23 23:15:47 +0000 | [diff] [blame] | 157 | unsigned long linear_map_top; /* Top of linear mapping */ |
| 158 | |
Scott Wood | 609af38 | 2014-03-10 17:29:38 -0500 | [diff] [blame] | 159 | |
| 160 | /* |
| 161 | * Number of bytes to add to SPRN_SPRG_TLB_EXFRAME on crit/mcheck/debug |
| 162 | * exceptions. This is used for bolted and e6500 TLB miss handlers which |
| 163 | * do not modify this SPRG in the TLB miss code; for other TLB miss handlers, |
| 164 | * this is set to zero. |
| 165 | */ |
| 166 | int extlb_level_exc; |
| 167 | |
Benjamin Herrenschmidt | 25d21ad | 2009-07-23 23:15:47 +0000 | [diff] [blame] | 168 | #endif /* CONFIG_PPC64 */ |
| 169 | |
Becky Bruce | 3160b09 | 2011-06-28 14:54:47 -0500 | [diff] [blame] | 170 | #ifdef CONFIG_PPC_FSL_BOOK3E |
| 171 | /* next_tlbcam_idx is used to round-robin tlbcam entry assignment */ |
| 172 | DEFINE_PER_CPU(int, next_tlbcam_idx); |
| 173 | EXPORT_PER_CPU_SYMBOL(next_tlbcam_idx); |
| 174 | #endif |
| 175 | |
Benjamin Herrenschmidt | f048aac | 2008-12-18 19:13:38 +0000 | [diff] [blame] | 176 | /* |
| 177 | * Base TLB flushing operations: |
| 178 | * |
| 179 | * - flush_tlb_mm(mm) flushes the specified mm context TLB's |
| 180 | * - flush_tlb_page(vma, vmaddr) flushes one page |
| 181 | * - flush_tlb_range(vma, start, end) flushes a range of pages |
| 182 | * - flush_tlb_kernel_range(start, end) flushes kernel pages |
| 183 | * |
| 184 | * - local_* variants of page and mm only apply to the current |
| 185 | * processor |
| 186 | */ |
| 187 | |
Christophe Leroy | 63f501e | 2021-09-17 15:57:12 +0200 | [diff] [blame] | 188 | #ifndef CONFIG_PPC_8xx |
Benjamin Herrenschmidt | f048aac | 2008-12-18 19:13:38 +0000 | [diff] [blame] | 189 | /* |
| 190 | * These are the base non-SMP variants of page and mm flushing |
| 191 | */ |
| 192 | void local_flush_tlb_mm(struct mm_struct *mm) |
| 193 | { |
| 194 | unsigned int pid; |
| 195 | |
| 196 | preempt_disable(); |
| 197 | pid = mm->context.id; |
| 198 | if (pid != MMU_NO_CONTEXT) |
| 199 | _tlbil_pid(pid); |
| 200 | preempt_enable(); |
| 201 | } |
| 202 | EXPORT_SYMBOL(local_flush_tlb_mm); |
| 203 | |
Benjamin Herrenschmidt | d4e167d | 2009-07-23 23:15:24 +0000 | [diff] [blame] | 204 | void __local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, |
| 205 | int tsize, int ind) |
Benjamin Herrenschmidt | f048aac | 2008-12-18 19:13:38 +0000 | [diff] [blame] | 206 | { |
| 207 | unsigned int pid; |
| 208 | |
| 209 | preempt_disable(); |
Benjamin Herrenschmidt | d4e167d | 2009-07-23 23:15:24 +0000 | [diff] [blame] | 210 | pid = mm ? mm->context.id : 0; |
Benjamin Herrenschmidt | f048aac | 2008-12-18 19:13:38 +0000 | [diff] [blame] | 211 | if (pid != MMU_NO_CONTEXT) |
Benjamin Herrenschmidt | d4e167d | 2009-07-23 23:15:24 +0000 | [diff] [blame] | 212 | _tlbil_va(vmaddr, pid, tsize, ind); |
Benjamin Herrenschmidt | f048aac | 2008-12-18 19:13:38 +0000 | [diff] [blame] | 213 | preempt_enable(); |
| 214 | } |
Benjamin Herrenschmidt | f048aac | 2008-12-18 19:13:38 +0000 | [diff] [blame] | 215 | |
Benjamin Herrenschmidt | d4e167d | 2009-07-23 23:15:24 +0000 | [diff] [blame] | 216 | void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) |
| 217 | { |
| 218 | __local_flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr, |
Benjamin Herrenschmidt | 25d21ad | 2009-07-23 23:15:47 +0000 | [diff] [blame] | 219 | mmu_get_tsize(mmu_virtual_psize), 0); |
Benjamin Herrenschmidt | d4e167d | 2009-07-23 23:15:24 +0000 | [diff] [blame] | 220 | } |
| 221 | EXPORT_SYMBOL(local_flush_tlb_page); |
Christophe Leroy | 63f501e | 2021-09-17 15:57:12 +0200 | [diff] [blame] | 222 | #endif |
Benjamin Herrenschmidt | f048aac | 2008-12-18 19:13:38 +0000 | [diff] [blame] | 223 | |
| 224 | /* |
| 225 | * And here are the SMP non-local implementations |
| 226 | */ |
| 227 | #ifdef CONFIG_SMP |
| 228 | |
Thomas Gleixner | 3eb93c5 | 2010-02-18 02:22:44 +0000 | [diff] [blame] | 229 | static DEFINE_RAW_SPINLOCK(tlbivax_lock); |
Benjamin Herrenschmidt | f048aac | 2008-12-18 19:13:38 +0000 | [diff] [blame] | 230 | |
| 231 | struct tlb_flush_param { |
| 232 | unsigned long addr; |
| 233 | unsigned int pid; |
Benjamin Herrenschmidt | d4e167d | 2009-07-23 23:15:24 +0000 | [diff] [blame] | 234 | unsigned int tsize; |
| 235 | unsigned int ind; |
Benjamin Herrenschmidt | f048aac | 2008-12-18 19:13:38 +0000 | [diff] [blame] | 236 | }; |
| 237 | |
| 238 | static void do_flush_tlb_mm_ipi(void *param) |
| 239 | { |
| 240 | struct tlb_flush_param *p = param; |
| 241 | |
| 242 | _tlbil_pid(p ? p->pid : 0); |
| 243 | } |
| 244 | |
| 245 | static void do_flush_tlb_page_ipi(void *param) |
| 246 | { |
| 247 | struct tlb_flush_param *p = param; |
| 248 | |
Benjamin Herrenschmidt | d4e167d | 2009-07-23 23:15:24 +0000 | [diff] [blame] | 249 | _tlbil_va(p->addr, p->pid, p->tsize, p->ind); |
Benjamin Herrenschmidt | f048aac | 2008-12-18 19:13:38 +0000 | [diff] [blame] | 250 | } |
| 251 | |
| 252 | |
| 253 | /* Note on invalidations and PID: |
| 254 | * |
| 255 | * We snapshot the PID with preempt disabled. At this point, it can still |
| 256 | * change either because: |
| 257 | * - our context is being stolen (PID -> NO_CONTEXT) on another CPU |
| 258 | * - we are invaliating some target that isn't currently running here |
| 259 | * and is concurrently acquiring a new PID on another CPU |
| 260 | * - some other CPU is re-acquiring a lost PID for this mm |
| 261 | * etc... |
| 262 | * |
| 263 | * However, this shouldn't be a problem as we only guarantee |
| 264 | * invalidation of TLB entries present prior to this call, so we |
| 265 | * don't care about the PID changing, and invalidating a stale PID |
| 266 | * is generally harmless. |
| 267 | */ |
| 268 | |
| 269 | void flush_tlb_mm(struct mm_struct *mm) |
| 270 | { |
Benjamin Herrenschmidt | f048aac | 2008-12-18 19:13:38 +0000 | [diff] [blame] | 271 | unsigned int pid; |
| 272 | |
| 273 | preempt_disable(); |
| 274 | pid = mm->context.id; |
| 275 | if (unlikely(pid == MMU_NO_CONTEXT)) |
| 276 | goto no_context; |
Benjamin Herrenschmidt | fcce810 | 2009-07-23 23:15:10 +0000 | [diff] [blame] | 277 | if (!mm_is_core_local(mm)) { |
Benjamin Herrenschmidt | f048aac | 2008-12-18 19:13:38 +0000 | [diff] [blame] | 278 | struct tlb_flush_param p = { .pid = pid }; |
Rusty Russell | 56aa412 | 2009-03-15 18:16:43 +0000 | [diff] [blame] | 279 | /* Ignores smp_processor_id() even if set. */ |
| 280 | smp_call_function_many(mm_cpumask(mm), |
| 281 | do_flush_tlb_mm_ipi, &p, 1); |
Benjamin Herrenschmidt | f048aac | 2008-12-18 19:13:38 +0000 | [diff] [blame] | 282 | } |
| 283 | _tlbil_pid(pid); |
| 284 | no_context: |
| 285 | preempt_enable(); |
| 286 | } |
| 287 | EXPORT_SYMBOL(flush_tlb_mm); |
| 288 | |
Benjamin Herrenschmidt | d4e167d | 2009-07-23 23:15:24 +0000 | [diff] [blame] | 289 | void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, |
| 290 | int tsize, int ind) |
Benjamin Herrenschmidt | f048aac | 2008-12-18 19:13:38 +0000 | [diff] [blame] | 291 | { |
Rusty Russell | 56aa412 | 2009-03-15 18:16:43 +0000 | [diff] [blame] | 292 | struct cpumask *cpu_mask; |
Benjamin Herrenschmidt | f048aac | 2008-12-18 19:13:38 +0000 | [diff] [blame] | 293 | unsigned int pid; |
| 294 | |
Arseny Solokha | c2c896b | 2015-02-04 13:18:02 +1100 | [diff] [blame] | 295 | /* |
| 296 | * This function as well as __local_flush_tlb_page() must only be called |
| 297 | * for user contexts. |
| 298 | */ |
Igor Stoppa | 63da6ca | 2018-09-07 18:35:26 +0300 | [diff] [blame] | 299 | if (WARN_ON(!mm)) |
Arseny Solokha | 0dc294f | 2015-01-30 19:08:27 +0700 | [diff] [blame] | 300 | return; |
| 301 | |
Benjamin Herrenschmidt | f048aac | 2008-12-18 19:13:38 +0000 | [diff] [blame] | 302 | preempt_disable(); |
Arseny Solokha | 0dc294f | 2015-01-30 19:08:27 +0700 | [diff] [blame] | 303 | pid = mm->context.id; |
Benjamin Herrenschmidt | f048aac | 2008-12-18 19:13:38 +0000 | [diff] [blame] | 304 | if (unlikely(pid == MMU_NO_CONTEXT)) |
| 305 | goto bail; |
Benjamin Herrenschmidt | d4e167d | 2009-07-23 23:15:24 +0000 | [diff] [blame] | 306 | cpu_mask = mm_cpumask(mm); |
Benjamin Herrenschmidt | fcce810 | 2009-07-23 23:15:10 +0000 | [diff] [blame] | 307 | if (!mm_is_core_local(mm)) { |
Benjamin Herrenschmidt | f048aac | 2008-12-18 19:13:38 +0000 | [diff] [blame] | 308 | /* If broadcast tlbivax is supported, use it */ |
| 309 | if (mmu_has_feature(MMU_FTR_USE_TLBIVAX_BCAST)) { |
| 310 | int lock = mmu_has_feature(MMU_FTR_LOCK_BCAST_INVAL); |
| 311 | if (lock) |
Thomas Gleixner | 3eb93c5 | 2010-02-18 02:22:44 +0000 | [diff] [blame] | 312 | raw_spin_lock(&tlbivax_lock); |
Benjamin Herrenschmidt | d4e167d | 2009-07-23 23:15:24 +0000 | [diff] [blame] | 313 | _tlbivax_bcast(vmaddr, pid, tsize, ind); |
Benjamin Herrenschmidt | f048aac | 2008-12-18 19:13:38 +0000 | [diff] [blame] | 314 | if (lock) |
Thomas Gleixner | 3eb93c5 | 2010-02-18 02:22:44 +0000 | [diff] [blame] | 315 | raw_spin_unlock(&tlbivax_lock); |
Benjamin Herrenschmidt | f048aac | 2008-12-18 19:13:38 +0000 | [diff] [blame] | 316 | goto bail; |
| 317 | } else { |
Benjamin Herrenschmidt | d4e167d | 2009-07-23 23:15:24 +0000 | [diff] [blame] | 318 | struct tlb_flush_param p = { |
| 319 | .pid = pid, |
| 320 | .addr = vmaddr, |
| 321 | .tsize = tsize, |
| 322 | .ind = ind, |
| 323 | }; |
Rusty Russell | 56aa412 | 2009-03-15 18:16:43 +0000 | [diff] [blame] | 324 | /* Ignores smp_processor_id() even if set in cpu_mask */ |
| 325 | smp_call_function_many(cpu_mask, |
Benjamin Herrenschmidt | f048aac | 2008-12-18 19:13:38 +0000 | [diff] [blame] | 326 | do_flush_tlb_page_ipi, &p, 1); |
| 327 | } |
| 328 | } |
Benjamin Herrenschmidt | d4e167d | 2009-07-23 23:15:24 +0000 | [diff] [blame] | 329 | _tlbil_va(vmaddr, pid, tsize, ind); |
Benjamin Herrenschmidt | f048aac | 2008-12-18 19:13:38 +0000 | [diff] [blame] | 330 | bail: |
| 331 | preempt_enable(); |
| 332 | } |
Benjamin Herrenschmidt | d4e167d | 2009-07-23 23:15:24 +0000 | [diff] [blame] | 333 | |
| 334 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) |
| 335 | { |
Becky Bruce | 41151e7 | 2011-06-28 09:54:48 +0000 | [diff] [blame] | 336 | #ifdef CONFIG_HUGETLB_PAGE |
Scott Wood | d742aa1 | 2013-11-21 18:26:42 -0600 | [diff] [blame] | 337 | if (vma && is_vm_hugetlb_page(vma)) |
Becky Bruce | 41151e7 | 2011-06-28 09:54:48 +0000 | [diff] [blame] | 338 | flush_hugetlb_page(vma, vmaddr); |
| 339 | #endif |
| 340 | |
Benjamin Herrenschmidt | d4e167d | 2009-07-23 23:15:24 +0000 | [diff] [blame] | 341 | __flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr, |
Benjamin Herrenschmidt | 25d21ad | 2009-07-23 23:15:47 +0000 | [diff] [blame] | 342 | mmu_get_tsize(mmu_virtual_psize), 0); |
Benjamin Herrenschmidt | d4e167d | 2009-07-23 23:15:24 +0000 | [diff] [blame] | 343 | } |
Benjamin Herrenschmidt | f048aac | 2008-12-18 19:13:38 +0000 | [diff] [blame] | 344 | EXPORT_SYMBOL(flush_tlb_page); |
| 345 | |
| 346 | #endif /* CONFIG_SMP */ |
| 347 | |
Dave Kleikamp | 91b191c | 2011-07-04 18:38:03 +0000 | [diff] [blame] | 348 | #ifdef CONFIG_PPC_47x |
| 349 | void __init early_init_mmu_47x(void) |
| 350 | { |
| 351 | #ifdef CONFIG_SMP |
| 352 | unsigned long root = of_get_flat_dt_root(); |
| 353 | if (of_get_flat_dt_prop(root, "cooperative-partition", NULL)) |
| 354 | mmu_clear_feature(MMU_FTR_USE_TLBIVAX_BCAST); |
| 355 | #endif /* CONFIG_SMP */ |
| 356 | } |
| 357 | #endif /* CONFIG_PPC_47x */ |
| 358 | |
Benjamin Herrenschmidt | f048aac | 2008-12-18 19:13:38 +0000 | [diff] [blame] | 359 | /* |
| 360 | * Flush kernel TLB entries in the given range |
| 361 | */ |
| 362 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) |
| 363 | { |
| 364 | #ifdef CONFIG_SMP |
| 365 | preempt_disable(); |
| 366 | smp_call_function(do_flush_tlb_mm_ipi, NULL, 1); |
| 367 | _tlbil_pid(0); |
| 368 | preempt_enable(); |
Dave Liu | d6a09e0 | 2008-12-30 23:42:55 +0000 | [diff] [blame] | 369 | #else |
Benjamin Herrenschmidt | f048aac | 2008-12-18 19:13:38 +0000 | [diff] [blame] | 370 | _tlbil_pid(0); |
Dave Liu | d6a09e0 | 2008-12-30 23:42:55 +0000 | [diff] [blame] | 371 | #endif |
Benjamin Herrenschmidt | f048aac | 2008-12-18 19:13:38 +0000 | [diff] [blame] | 372 | } |
| 373 | EXPORT_SYMBOL(flush_tlb_kernel_range); |
| 374 | |
| 375 | /* |
| 376 | * Currently, for range flushing, we just do a full mm flush. This should |
| 377 | * be optimized based on a threshold on the size of the range, since |
| 378 | * some implementation can stack multiple tlbivax before a tlbsync but |
| 379 | * for now, we keep it that way |
| 380 | */ |
| 381 | void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, |
| 382 | unsigned long end) |
| 383 | |
| 384 | { |
Christophe Leroy | 5c8136f | 2018-01-23 14:22:50 +0100 | [diff] [blame] | 385 | if (end - start == PAGE_SIZE && !(start & ~PAGE_MASK)) |
| 386 | flush_tlb_page(vma, start); |
| 387 | else |
| 388 | flush_tlb_mm(vma->vm_mm); |
Benjamin Herrenschmidt | f048aac | 2008-12-18 19:13:38 +0000 | [diff] [blame] | 389 | } |
| 390 | EXPORT_SYMBOL(flush_tlb_range); |
Benjamin Herrenschmidt | c7cc58a1 | 2009-07-23 23:15:28 +0000 | [diff] [blame] | 391 | |
| 392 | void tlb_flush(struct mmu_gather *tlb) |
| 393 | { |
| 394 | flush_tlb_mm(tlb->mm); |
Benjamin Herrenschmidt | c7cc58a1 | 2009-07-23 23:15:28 +0000 | [diff] [blame] | 395 | } |
Benjamin Herrenschmidt | 25d21ad | 2009-07-23 23:15:47 +0000 | [diff] [blame] | 396 | |
| 397 | /* |
| 398 | * Below are functions specific to the 64-bit variant of Book3E though that |
| 399 | * may change in the future |
| 400 | */ |
| 401 | |
| 402 | #ifdef CONFIG_PPC64 |
| 403 | |
| 404 | /* |
| 405 | * Handling of virtual linear page tables or indirect TLB entries |
| 406 | * flushing when PTE pages are freed |
| 407 | */ |
| 408 | void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address) |
| 409 | { |
| 410 | int tsize = mmu_psize_defs[mmu_pte_psize].enc; |
| 411 | |
Scott Wood | 28efc35 | 2013-10-11 19:22:38 -0500 | [diff] [blame] | 412 | if (book3e_htw_mode != PPC_HTW_NONE) { |
Benjamin Herrenschmidt | 25d21ad | 2009-07-23 23:15:47 +0000 | [diff] [blame] | 413 | unsigned long start = address & PMD_MASK; |
| 414 | unsigned long end = address + PMD_SIZE; |
| 415 | unsigned long size = 1UL << mmu_psize_defs[mmu_pte_psize].shift; |
| 416 | |
| 417 | /* This isn't the most optimal, ideally we would factor out the |
| 418 | * while preempt & CPU mask mucking around, or even the IPI but |
| 419 | * it will do for now |
| 420 | */ |
| 421 | while (start < end) { |
| 422 | __flush_tlb_page(tlb->mm, start, tsize, 1); |
| 423 | start += size; |
| 424 | } |
| 425 | } else { |
| 426 | unsigned long rmask = 0xf000000000000000ul; |
| 427 | unsigned long rid = (address & rmask) | 0x1000000000000000ul; |
| 428 | unsigned long vpte = address & ~rmask; |
| 429 | |
Benjamin Herrenschmidt | 25d21ad | 2009-07-23 23:15:47 +0000 | [diff] [blame] | 430 | vpte = (vpte >> (PAGE_SHIFT - 3)) & ~0xffful; |
Benjamin Herrenschmidt | 25d21ad | 2009-07-23 23:15:47 +0000 | [diff] [blame] | 431 | vpte |= rid; |
| 432 | __flush_tlb_page(tlb->mm, vpte, tsize, 0); |
| 433 | } |
| 434 | } |
| 435 | |
Benjamin Herrenschmidt | f2b26c9 | 2010-07-09 14:57:43 +1000 | [diff] [blame] | 436 | static void setup_page_sizes(void) |
| 437 | { |
Kumar Gala | 988cf86 | 2010-10-08 02:13:25 -0500 | [diff] [blame] | 438 | unsigned int tlb0cfg; |
| 439 | unsigned int tlb0ps; |
| 440 | unsigned int eptcfg; |
Benjamin Herrenschmidt | f2b26c9 | 2010-07-09 14:57:43 +1000 | [diff] [blame] | 441 | int i, psize; |
| 442 | |
Kumar Gala | 988cf86 | 2010-10-08 02:13:25 -0500 | [diff] [blame] | 443 | #ifdef CONFIG_PPC_FSL_BOOK3E |
| 444 | unsigned int mmucfg = mfspr(SPRN_MMUCFG); |
Kumar Gala | 1b29187 | 2013-03-05 12:08:32 -0600 | [diff] [blame] | 445 | int fsl_mmu = mmu_has_feature(MMU_FTR_TYPE_FSL_E); |
Kumar Gala | 988cf86 | 2010-10-08 02:13:25 -0500 | [diff] [blame] | 446 | |
Kumar Gala | 1b29187 | 2013-03-05 12:08:32 -0600 | [diff] [blame] | 447 | if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V1) { |
Kumar Gala | 988cf86 | 2010-10-08 02:13:25 -0500 | [diff] [blame] | 448 | unsigned int tlb1cfg = mfspr(SPRN_TLB1CFG); |
| 449 | unsigned int min_pg, max_pg; |
| 450 | |
| 451 | min_pg = (tlb1cfg & TLBnCFG_MINSIZE) >> TLBnCFG_MINSIZE_SHIFT; |
| 452 | max_pg = (tlb1cfg & TLBnCFG_MAXSIZE) >> TLBnCFG_MAXSIZE_SHIFT; |
| 453 | |
| 454 | for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { |
| 455 | struct mmu_psize_def *def; |
| 456 | unsigned int shift; |
| 457 | |
| 458 | def = &mmu_psize_defs[psize]; |
| 459 | shift = def->shift; |
| 460 | |
Scott Wood | 28efc35 | 2013-10-11 19:22:38 -0500 | [diff] [blame] | 461 | if (shift == 0 || shift & 1) |
Kumar Gala | 988cf86 | 2010-10-08 02:13:25 -0500 | [diff] [blame] | 462 | continue; |
| 463 | |
| 464 | /* adjust to be in terms of 4^shift Kb */ |
| 465 | shift = (shift - 10) >> 1; |
| 466 | |
| 467 | if ((shift >= min_pg) && (shift <= max_pg)) |
| 468 | def->flags |= MMU_PAGE_SIZE_DIRECT; |
| 469 | } |
| 470 | |
Scott Wood | 28efc35 | 2013-10-11 19:22:38 -0500 | [diff] [blame] | 471 | goto out; |
Kumar Gala | 988cf86 | 2010-10-08 02:13:25 -0500 | [diff] [blame] | 472 | } |
Kumar Gala | 1b29187 | 2013-03-05 12:08:32 -0600 | [diff] [blame] | 473 | |
| 474 | if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V2) { |
Scott Wood | 28efc35 | 2013-10-11 19:22:38 -0500 | [diff] [blame] | 475 | u32 tlb1cfg, tlb1ps; |
| 476 | |
| 477 | tlb0cfg = mfspr(SPRN_TLB0CFG); |
| 478 | tlb1cfg = mfspr(SPRN_TLB1CFG); |
| 479 | tlb1ps = mfspr(SPRN_TLB1PS); |
| 480 | eptcfg = mfspr(SPRN_EPTCFG); |
| 481 | |
| 482 | if ((tlb1cfg & TLBnCFG_IND) && (tlb0cfg & TLBnCFG_PT)) |
| 483 | book3e_htw_mode = PPC_HTW_E6500; |
| 484 | |
| 485 | /* |
| 486 | * We expect 4K subpage size and unrestricted indirect size. |
| 487 | * The lack of a restriction on indirect size is a Freescale |
| 488 | * extension, indicated by PSn = 0 but SPSn != 0. |
| 489 | */ |
| 490 | if (eptcfg != 2) |
| 491 | book3e_htw_mode = PPC_HTW_NONE; |
Kumar Gala | 1b29187 | 2013-03-05 12:08:32 -0600 | [diff] [blame] | 492 | |
| 493 | for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { |
| 494 | struct mmu_psize_def *def = &mmu_psize_defs[psize]; |
| 495 | |
Daniel Axtens | f5e2848 | 2018-10-01 16:21:51 +1000 | [diff] [blame] | 496 | if (!def->shift) |
| 497 | continue; |
| 498 | |
Kumar Gala | 1b29187 | 2013-03-05 12:08:32 -0600 | [diff] [blame] | 499 | if (tlb1ps & (1U << (def->shift - 10))) { |
| 500 | def->flags |= MMU_PAGE_SIZE_DIRECT; |
Scott Wood | 28efc35 | 2013-10-11 19:22:38 -0500 | [diff] [blame] | 501 | |
| 502 | if (book3e_htw_mode && psize == MMU_PAGE_2M) |
| 503 | def->flags |= MMU_PAGE_SIZE_INDIRECT; |
Kumar Gala | 1b29187 | 2013-03-05 12:08:32 -0600 | [diff] [blame] | 504 | } |
| 505 | } |
| 506 | |
Scott Wood | 28efc35 | 2013-10-11 19:22:38 -0500 | [diff] [blame] | 507 | goto out; |
Kumar Gala | 1b29187 | 2013-03-05 12:08:32 -0600 | [diff] [blame] | 508 | } |
Kumar Gala | 988cf86 | 2010-10-08 02:13:25 -0500 | [diff] [blame] | 509 | #endif |
| 510 | |
| 511 | tlb0cfg = mfspr(SPRN_TLB0CFG); |
| 512 | tlb0ps = mfspr(SPRN_TLB0PS); |
| 513 | eptcfg = mfspr(SPRN_EPTCFG); |
| 514 | |
Benjamin Herrenschmidt | f2b26c9 | 2010-07-09 14:57:43 +1000 | [diff] [blame] | 515 | /* Look for supported direct sizes */ |
| 516 | for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { |
| 517 | struct mmu_psize_def *def = &mmu_psize_defs[psize]; |
| 518 | |
| 519 | if (tlb0ps & (1U << (def->shift - 10))) |
| 520 | def->flags |= MMU_PAGE_SIZE_DIRECT; |
| 521 | } |
| 522 | |
| 523 | /* Indirect page sizes supported ? */ |
Scott Wood | 28efc35 | 2013-10-11 19:22:38 -0500 | [diff] [blame] | 524 | if ((tlb0cfg & TLBnCFG_IND) == 0 || |
| 525 | (tlb0cfg & TLBnCFG_PT) == 0) |
| 526 | goto out; |
| 527 | |
| 528 | book3e_htw_mode = PPC_HTW_IBM; |
Benjamin Herrenschmidt | f2b26c9 | 2010-07-09 14:57:43 +1000 | [diff] [blame] | 529 | |
| 530 | /* Now, we only deal with one IND page size for each |
| 531 | * direct size. Hopefully all implementations today are |
| 532 | * unambiguous, but we might want to be careful in the |
| 533 | * future. |
| 534 | */ |
| 535 | for (i = 0; i < 3; i++) { |
| 536 | unsigned int ps, sps; |
| 537 | |
| 538 | sps = eptcfg & 0x1f; |
| 539 | eptcfg >>= 5; |
| 540 | ps = eptcfg & 0x1f; |
| 541 | eptcfg >>= 5; |
| 542 | if (!ps || !sps) |
| 543 | continue; |
| 544 | for (psize = 0; psize < MMU_PAGE_COUNT; psize++) { |
| 545 | struct mmu_psize_def *def = &mmu_psize_defs[psize]; |
| 546 | |
| 547 | if (ps == (def->shift - 10)) |
| 548 | def->flags |= MMU_PAGE_SIZE_INDIRECT; |
| 549 | if (sps == (def->shift - 10)) |
| 550 | def->ind = ps + 10; |
| 551 | } |
| 552 | } |
Benjamin Herrenschmidt | f2b26c9 | 2010-07-09 14:57:43 +1000 | [diff] [blame] | 553 | |
Scott Wood | 28efc35 | 2013-10-11 19:22:38 -0500 | [diff] [blame] | 554 | out: |
Benjamin Herrenschmidt | f2b26c9 | 2010-07-09 14:57:43 +1000 | [diff] [blame] | 555 | /* Cleanup array and print summary */ |
| 556 | pr_info("MMU: Supported page sizes\n"); |
| 557 | for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { |
| 558 | struct mmu_psize_def *def = &mmu_psize_defs[psize]; |
| 559 | const char *__page_type_names[] = { |
| 560 | "unsupported", |
| 561 | "direct", |
| 562 | "indirect", |
| 563 | "direct & indirect" |
| 564 | }; |
| 565 | if (def->flags == 0) { |
| 566 | def->shift = 0; |
| 567 | continue; |
| 568 | } |
| 569 | pr_info(" %8ld KB as %s\n", 1ul << (def->shift - 10), |
| 570 | __page_type_names[def->flags & 0x3]); |
| 571 | } |
| 572 | } |
| 573 | |
Scott Wood | f67f4ef | 2011-06-22 11:25:42 +0000 | [diff] [blame] | 574 | static void setup_mmu_htw(void) |
| 575 | { |
Scott Wood | 28efc35 | 2013-10-11 19:22:38 -0500 | [diff] [blame] | 576 | /* |
| 577 | * If we want to use HW tablewalk, enable it by patching the TLB miss |
| 578 | * handlers to branch to the one dedicated to it. |
| 579 | */ |
Benjamin Herrenschmidt | f2b26c9 | 2010-07-09 14:57:43 +1000 | [diff] [blame] | 580 | |
Scott Wood | 28efc35 | 2013-10-11 19:22:38 -0500 | [diff] [blame] | 581 | switch (book3e_htw_mode) { |
| 582 | case PPC_HTW_IBM: |
Scott Wood | f67f4ef | 2011-06-22 11:25:42 +0000 | [diff] [blame] | 583 | patch_exception(0x1c0, exc_data_tlb_miss_htw_book3e); |
| 584 | patch_exception(0x1e0, exc_instruction_tlb_miss_htw_book3e); |
Scott Wood | 28efc35 | 2013-10-11 19:22:38 -0500 | [diff] [blame] | 585 | break; |
Scott Wood | 9841c79 | 2014-01-17 18:36:38 -0600 | [diff] [blame] | 586 | #ifdef CONFIG_PPC_FSL_BOOK3E |
Scott Wood | 28efc35 | 2013-10-11 19:22:38 -0500 | [diff] [blame] | 587 | case PPC_HTW_E6500: |
Scott Wood | 609af38 | 2014-03-10 17:29:38 -0500 | [diff] [blame] | 588 | extlb_level_exc = EX_TLB_SIZE; |
Scott Wood | 28efc35 | 2013-10-11 19:22:38 -0500 | [diff] [blame] | 589 | patch_exception(0x1c0, exc_data_tlb_miss_e6500_book3e); |
| 590 | patch_exception(0x1e0, exc_instruction_tlb_miss_e6500_book3e); |
| 591 | break; |
Scott Wood | 9841c79 | 2014-01-17 18:36:38 -0600 | [diff] [blame] | 592 | #endif |
Benjamin Herrenschmidt | f2b26c9 | 2010-07-09 14:57:43 +1000 | [diff] [blame] | 593 | } |
Kumar Gala | 32d206e | 2011-05-19 20:09:28 +0000 | [diff] [blame] | 594 | pr_info("MMU: Book3E HW tablewalk %s\n", |
Scott Wood | 28efc35 | 2013-10-11 19:22:38 -0500 | [diff] [blame] | 595 | book3e_htw_mode != PPC_HTW_NONE ? "enabled" : "not supported"); |
Benjamin Herrenschmidt | f2b26c9 | 2010-07-09 14:57:43 +1000 | [diff] [blame] | 596 | } |
| 597 | |
| 598 | /* |
| 599 | * Early initialization of the MMU TLB code |
| 600 | */ |
Scott Wood | 5d61a21 | 2014-08-08 18:44:01 -0500 | [diff] [blame] | 601 | static void early_init_this_mmu(void) |
Benjamin Herrenschmidt | f2b26c9 | 2010-07-09 14:57:43 +1000 | [diff] [blame] | 602 | { |
Benjamin Herrenschmidt | 25d21ad | 2009-07-23 23:15:47 +0000 | [diff] [blame] | 603 | unsigned int mas4; |
| 604 | |
Benjamin Herrenschmidt | 25d21ad | 2009-07-23 23:15:47 +0000 | [diff] [blame] | 605 | /* Set MAS4 based on page table setting */ |
| 606 | |
| 607 | mas4 = 0x4 << MAS4_WIMGED_SHIFT; |
Scott Wood | 28efc35 | 2013-10-11 19:22:38 -0500 | [diff] [blame] | 608 | switch (book3e_htw_mode) { |
| 609 | case PPC_HTW_E6500: |
| 610 | mas4 |= MAS4_INDD; |
| 611 | mas4 |= BOOK3E_PAGESZ_2M << MAS4_TSIZED_SHIFT; |
| 612 | mas4 |= MAS4_TLBSELD(1); |
| 613 | mmu_pte_psize = MMU_PAGE_2M; |
| 614 | break; |
| 615 | |
| 616 | case PPC_HTW_IBM: |
| 617 | mas4 |= MAS4_INDD; |
Benjamin Herrenschmidt | 25d21ad | 2009-07-23 23:15:47 +0000 | [diff] [blame] | 618 | mas4 |= BOOK3E_PAGESZ_1M << MAS4_TSIZED_SHIFT; |
| 619 | mmu_pte_psize = MMU_PAGE_1M; |
Scott Wood | 28efc35 | 2013-10-11 19:22:38 -0500 | [diff] [blame] | 620 | break; |
| 621 | |
| 622 | case PPC_HTW_NONE: |
Benjamin Herrenschmidt | 25d21ad | 2009-07-23 23:15:47 +0000 | [diff] [blame] | 623 | mas4 |= BOOK3E_PAGESZ_4K << MAS4_TSIZED_SHIFT; |
Benjamin Herrenschmidt | 25d21ad | 2009-07-23 23:15:47 +0000 | [diff] [blame] | 624 | mmu_pte_psize = mmu_virtual_psize; |
Scott Wood | 28efc35 | 2013-10-11 19:22:38 -0500 | [diff] [blame] | 625 | break; |
Benjamin Herrenschmidt | 25d21ad | 2009-07-23 23:15:47 +0000 | [diff] [blame] | 626 | } |
| 627 | mtspr(SPRN_MAS4, mas4); |
| 628 | |
Kumar Gala | 55fd766 | 2009-10-16 18:48:40 -0500 | [diff] [blame] | 629 | #ifdef CONFIG_PPC_FSL_BOOK3E |
| 630 | if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) { |
| 631 | unsigned int num_cams; |
Scott Wood | d9e1831 | 2015-10-06 22:48:09 -0500 | [diff] [blame] | 632 | bool map = true; |
Kumar Gala | 55fd766 | 2009-10-16 18:48:40 -0500 | [diff] [blame] | 633 | |
| 634 | /* use a quarter of the TLBCAM for bolted linear map */ |
| 635 | num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4; |
Scott Wood | d9e1831 | 2015-10-06 22:48:09 -0500 | [diff] [blame] | 636 | |
| 637 | /* |
| 638 | * Only do the mapping once per core, or else the |
| 639 | * transient mapping would cause problems. |
| 640 | */ |
| 641 | #ifdef CONFIG_SMP |
chenhui zhao | ebb9d30 | 2015-12-24 08:39:57 +0800 | [diff] [blame] | 642 | if (hweight32(get_tensr()) > 1) |
Scott Wood | d9e1831 | 2015-10-06 22:48:09 -0500 | [diff] [blame] | 643 | map = false; |
| 644 | #endif |
| 645 | |
| 646 | if (map) |
| 647 | linear_map_top = map_mem_in_cams(linear_map_top, |
Christophe Leroy | 5b54860 | 2021-11-15 10:05:32 +0100 | [diff] [blame] | 648 | num_cams, false, true); |
Scott Wood | 5d61a21 | 2014-08-08 18:44:01 -0500 | [diff] [blame] | 649 | } |
| 650 | #endif |
Kumar Gala | 55fd766 | 2009-10-16 18:48:40 -0500 | [diff] [blame] | 651 | |
Scott Wood | 5d61a21 | 2014-08-08 18:44:01 -0500 | [diff] [blame] | 652 | /* A sync won't hurt us after mucking around with |
| 653 | * the MMU configuration |
| 654 | */ |
| 655 | mb(); |
| 656 | } |
Scott Wood | f67f4ef | 2011-06-22 11:25:42 +0000 | [diff] [blame] | 657 | |
Scott Wood | 5d61a21 | 2014-08-08 18:44:01 -0500 | [diff] [blame] | 658 | static void __init early_init_mmu_global(void) |
| 659 | { |
| 660 | /* XXX This will have to be decided at runtime, but right |
| 661 | * now our boot and TLB miss code hard wires it. Ideally |
| 662 | * we should find out a suitable page size and patch the |
| 663 | * TLB miss code (either that or use the PACA to store |
| 664 | * the value we want) |
| 665 | */ |
| 666 | mmu_linear_psize = MMU_PAGE_1G; |
| 667 | |
| 668 | /* XXX This should be decided at runtime based on supported |
| 669 | * page sizes in the TLB, but for now let's assume 16M is |
| 670 | * always there and a good fit (which it probably is) |
| 671 | * |
| 672 | * Freescale booke only supports 4K pages in TLB0, so use that. |
| 673 | */ |
| 674 | if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) |
| 675 | mmu_vmemmap_psize = MMU_PAGE_4K; |
| 676 | else |
| 677 | mmu_vmemmap_psize = MMU_PAGE_16M; |
| 678 | |
| 679 | /* XXX This code only checks for TLB 0 capabilities and doesn't |
| 680 | * check what page size combos are supported by the HW. It |
| 681 | * also doesn't handle the case where a separate array holds |
| 682 | * the IND entries from the array loaded by the PT. |
| 683 | */ |
| 684 | /* Look for supported page sizes */ |
| 685 | setup_page_sizes(); |
| 686 | |
| 687 | /* Look for HW tablewalk support */ |
| 688 | setup_mmu_htw(); |
| 689 | |
| 690 | #ifdef CONFIG_PPC_FSL_BOOK3E |
| 691 | if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) { |
Scott Wood | 28efc35 | 2013-10-11 19:22:38 -0500 | [diff] [blame] | 692 | if (book3e_htw_mode == PPC_HTW_NONE) { |
Scott Wood | 609af38 | 2014-03-10 17:29:38 -0500 | [diff] [blame] | 693 | extlb_level_exc = EX_TLB_SIZE; |
Scott Wood | 28efc35 | 2013-10-11 19:22:38 -0500 | [diff] [blame] | 694 | patch_exception(0x1c0, exc_data_tlb_miss_bolted_book3e); |
| 695 | patch_exception(0x1e0, |
| 696 | exc_instruction_tlb_miss_bolted_book3e); |
| 697 | } |
Kumar Gala | 55fd766 | 2009-10-16 18:48:40 -0500 | [diff] [blame] | 698 | } |
| 699 | #endif |
| 700 | |
Scott Wood | 5d61a21 | 2014-08-08 18:44:01 -0500 | [diff] [blame] | 701 | /* Set the global containing the top of the linear mapping |
| 702 | * for use by the TLB miss code |
Benjamin Herrenschmidt | 25d21ad | 2009-07-23 23:15:47 +0000 | [diff] [blame] | 703 | */ |
Scott Wood | 5d61a21 | 2014-08-08 18:44:01 -0500 | [diff] [blame] | 704 | linear_map_top = memblock_end_of_DRAM(); |
Christophe Leroy | 7cd9b317 | 2019-08-20 14:07:16 +0000 | [diff] [blame] | 705 | |
| 706 | ioremap_bot = IOREMAP_BASE; |
Scott Wood | 5d61a21 | 2014-08-08 18:44:01 -0500 | [diff] [blame] | 707 | } |
| 708 | |
| 709 | static void __init early_mmu_set_memory_limit(void) |
| 710 | { |
| 711 | #ifdef CONFIG_PPC_FSL_BOOK3E |
| 712 | if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) { |
| 713 | /* |
| 714 | * Limit memory so we dont have linear faults. |
| 715 | * Unlike memblock_set_current_limit, which limits |
| 716 | * memory available during early boot, this permanently |
| 717 | * reduces the memory available to Linux. We need to |
| 718 | * do this because highmem is not supported on 64-bit. |
| 719 | */ |
| 720 | memblock_enforce_memory_limit(linear_map_top); |
| 721 | } |
| 722 | #endif |
Benjamin Herrenschmidt | e63075a | 2010-07-06 15:39:01 -0700 | [diff] [blame] | 723 | |
| 724 | memblock_set_current_limit(linear_map_top); |
Benjamin Herrenschmidt | 25d21ad | 2009-07-23 23:15:47 +0000 | [diff] [blame] | 725 | } |
| 726 | |
Scott Wood | 5d61a21 | 2014-08-08 18:44:01 -0500 | [diff] [blame] | 727 | /* boot cpu only */ |
Benjamin Herrenschmidt | 25d21ad | 2009-07-23 23:15:47 +0000 | [diff] [blame] | 728 | void __init early_init_mmu(void) |
| 729 | { |
Scott Wood | 5d61a21 | 2014-08-08 18:44:01 -0500 | [diff] [blame] | 730 | early_init_mmu_global(); |
| 731 | early_init_this_mmu(); |
| 732 | early_mmu_set_memory_limit(); |
Benjamin Herrenschmidt | 25d21ad | 2009-07-23 23:15:47 +0000 | [diff] [blame] | 733 | } |
| 734 | |
Paul Gortmaker | 061d19f | 2013-06-24 15:30:09 -0400 | [diff] [blame] | 735 | void early_init_mmu_secondary(void) |
Benjamin Herrenschmidt | 25d21ad | 2009-07-23 23:15:47 +0000 | [diff] [blame] | 736 | { |
Scott Wood | 5d61a21 | 2014-08-08 18:44:01 -0500 | [diff] [blame] | 737 | early_init_this_mmu(); |
Benjamin Herrenschmidt | 25d21ad | 2009-07-23 23:15:47 +0000 | [diff] [blame] | 738 | } |
| 739 | |
Benjamin Herrenschmidt | cd3db0c | 2010-07-06 15:39:02 -0700 | [diff] [blame] | 740 | void setup_initial_memory_limit(phys_addr_t first_memblock_base, |
| 741 | phys_addr_t first_memblock_size) |
| 742 | { |
Kumar Gala | 1dc91c3 | 2011-09-16 10:39:59 -0500 | [diff] [blame] | 743 | /* On non-FSL Embedded 64-bit, we adjust the RMA size to match |
Benjamin Herrenschmidt | cd3db0c | 2010-07-06 15:39:02 -0700 | [diff] [blame] | 744 | * the bolted TLB entry. We know for now that only 1G |
| 745 | * entries are supported though that may eventually |
Kumar Gala | 1dc91c3 | 2011-09-16 10:39:59 -0500 | [diff] [blame] | 746 | * change. |
| 747 | * |
Scott Wood | eba5de8 | 2015-10-06 22:48:10 -0500 | [diff] [blame] | 748 | * on FSL Embedded 64-bit, usually all RAM is bolted, but with |
| 749 | * unusual memory sizes it's possible for some RAM to not be mapped |
| 750 | * (such RAM is not used at all by Linux, since we don't support |
| 751 | * highmem on 64-bit). We limit ppc64_rma_size to what would be |
| 752 | * mappable if this memblock is the only one. Additional memblocks |
| 753 | * can only increase, not decrease, the amount that ends up getting |
| 754 | * mapped. We still limit max to 1G even if we'll eventually map |
| 755 | * more. This is due to what the early init code is set up to do. |
Kumar Gala | 1dc91c3 | 2011-09-16 10:39:59 -0500 | [diff] [blame] | 756 | * |
| 757 | * We crop it to the size of the first MEMBLOCK to |
Benjamin Herrenschmidt | cd3db0c | 2010-07-06 15:39:02 -0700 | [diff] [blame] | 758 | * avoid going over total available memory just in case... |
| 759 | */ |
Kumar Gala | 1dc91c3 | 2011-09-16 10:39:59 -0500 | [diff] [blame] | 760 | #ifdef CONFIG_PPC_FSL_BOOK3E |
Michael Ellerman | 4868e35 | 2017-04-03 12:05:55 +1000 | [diff] [blame] | 761 | if (early_mmu_has_feature(MMU_FTR_TYPE_FSL_E)) { |
Kumar Gala | 1dc91c3 | 2011-09-16 10:39:59 -0500 | [diff] [blame] | 762 | unsigned long linear_sz; |
Scott Wood | eba5de8 | 2015-10-06 22:48:10 -0500 | [diff] [blame] | 763 | unsigned int num_cams; |
| 764 | |
| 765 | /* use a quarter of the TLBCAM for bolted linear map */ |
| 766 | num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4; |
| 767 | |
| 768 | linear_sz = map_mem_in_cams(first_memblock_size, num_cams, |
Christophe Leroy | 5b54860 | 2021-11-15 10:05:32 +0100 | [diff] [blame] | 769 | true, true); |
Scott Wood | eba5de8 | 2015-10-06 22:48:10 -0500 | [diff] [blame] | 770 | |
Kumar Gala | 1dc91c3 | 2011-09-16 10:39:59 -0500 | [diff] [blame] | 771 | ppc64_rma_size = min_t(u64, linear_sz, 0x40000000); |
| 772 | } else |
| 773 | #endif |
| 774 | ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000); |
Benjamin Herrenschmidt | cd3db0c | 2010-07-06 15:39:02 -0700 | [diff] [blame] | 775 | |
| 776 | /* Finally limit subsequent allocations */ |
Kumar Gala | 4a89261 | 2010-11-10 12:29:49 +0000 | [diff] [blame] | 777 | memblock_set_current_limit(first_memblock_base + ppc64_rma_size); |
Benjamin Herrenschmidt | cd3db0c | 2010-07-06 15:39:02 -0700 | [diff] [blame] | 778 | } |
Dave Kleikamp | 91b191c | 2011-07-04 18:38:03 +0000 | [diff] [blame] | 779 | #else /* ! CONFIG_PPC64 */ |
| 780 | void __init early_init_mmu(void) |
| 781 | { |
| 782 | #ifdef CONFIG_PPC_47x |
| 783 | early_init_mmu_47x(); |
| 784 | #endif |
Aneesh Kumar K.V | 67fda38f | 2019-04-17 18:33:49 +0530 | [diff] [blame] | 785 | |
| 786 | #ifdef CONFIG_PPC_MM_SLICES |
Christophe Leroy | 5953fb4 | 2019-04-25 14:29:36 +0000 | [diff] [blame] | 787 | mm_ctx_set_slb_addr_limit(&init_mm.context, SLB_ADDR_LIMIT_DEFAULT); |
Aneesh Kumar K.V | 67fda38f | 2019-04-17 18:33:49 +0530 | [diff] [blame] | 788 | #endif |
Dave Kleikamp | 91b191c | 2011-07-04 18:38:03 +0000 | [diff] [blame] | 789 | } |
Benjamin Herrenschmidt | 25d21ad | 2009-07-23 23:15:47 +0000 | [diff] [blame] | 790 | #endif /* CONFIG_PPC64 */ |