blob: 3a600bd7fbc6a45997a0711b204cb3b663181a19 [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +10002/*
3 * Page table handling routines for radix page table.
4 *
5 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +10006 */
Michael Ellermanbd350f72017-08-30 17:41:29 +10007
8#define pr_fmt(fmt) "radix-mmu: " fmt
9
Nicholas Piggind38153f2019-06-10 13:08:17 +100010#include <linux/io.h>
Michael Ellermanbd350f72017-08-30 17:41:29 +100011#include <linux/kernel.h>
Ingo Molnar589ee622017-02-04 00:16:44 +010012#include <linux/sched/mm.h>
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100013#include <linux/memblock.h>
Marc Zyngier13a9a5d2018-09-13 17:09:06 +010014#include <linux/of.h>
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100015#include <linux/of_fdt.h>
Balbir Singh7614ff32017-06-29 03:04:09 +100016#include <linux/mm.h>
Mike Kravetz997cdcb2019-11-30 17:56:37 -080017#include <linux/hugetlb.h>
Michael Ellerman6deb6b42017-08-30 17:41:17 +100018#include <linux/string_helpers.h>
Aneesh Kumar K.Vaf9d00e2020-07-09 18:49:25 +053019#include <linux/memory.h>
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100020
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100021#include <asm/pgalloc.h>
Nicholas Piggineeb715c2018-02-07 11:20:02 +100022#include <asm/mmu_context.h>
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100023#include <asm/dma.h>
24#include <asm/machdep.h>
25#include <asm/mmu.h>
26#include <asm/firmware.h>
Alistair Popple1d0761d2016-12-14 13:36:51 +110027#include <asm/powernv.h>
Michael Ellerman9abcc982017-06-06 15:48:57 +100028#include <asm/sections.h>
Nicholas Piggin993cfec2020-03-02 11:04:10 +100029#include <asm/smp.h>
Balbir Singh04284912017-04-11 15:23:25 +100030#include <asm/trace.h>
Michael Ellerman890274c2019-04-18 16:51:24 +100031#include <asm/uaccess.h>
Claudio Carvalho52231342019-08-22 00:48:36 -030032#include <asm/ultravisor.h>
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100033
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +100034#include <trace/events/thp.h>
35
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +100036unsigned int mmu_pid_bits;
37unsigned int mmu_base_pid;
Aneesh Kumar K.V950805f2020-10-07 17:18:35 +053038unsigned long radix_mem_block_size __ro_after_init;
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +100039
Nicholas Piggin2ad452f2018-02-14 01:08:24 +100040static __ref void *early_alloc_pgtable(unsigned long size, int nid,
41 unsigned long region_start, unsigned long region_end)
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100042{
Mike Rapoportf8067142019-03-07 16:30:48 -080043 phys_addr_t min_addr = MEMBLOCK_LOW_LIMIT;
44 phys_addr_t max_addr = MEMBLOCK_ALLOC_ANYWHERE;
Mike Rapoport8a7f97b2019-03-11 23:30:31 -070045 void *ptr;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100046
Mike Rapoportf8067142019-03-07 16:30:48 -080047 if (region_start)
48 min_addr = region_start;
49 if (region_end)
50 max_addr = region_end;
Nicholas Piggin2ad452f2018-02-14 01:08:24 +100051
Mike Rapoport8a7f97b2019-03-11 23:30:31 -070052 ptr = memblock_alloc_try_nid(size, size, min_addr, max_addr, nid);
53
54 if (!ptr)
55 panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%pa max_addr=%pa\n",
56 __func__, size, size, nid, &min_addr, &max_addr);
57
58 return ptr;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100059}
60
Aneesh Kumar K.V645d5ce2020-07-09 18:49:22 +053061/*
62 * When allocating pud or pmd pointers, we allocate a complete page
63 * of PAGE_SIZE rather than PUD_TABLE_SIZE or PMD_TABLE_SIZE. This
64 * is to ensure that the page obtained from the memblock allocator
65 * can be completely used as page table page and can be freed
66 * correctly when the page table entries are removed.
67 */
Nicholas Piggin0633daf2018-02-14 01:08:23 +100068static int early_map_kernel_page(unsigned long ea, unsigned long pa,
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100069 pgprot_t flags,
Nicholas Piggin2ad452f2018-02-14 01:08:24 +100070 unsigned int map_page_size,
71 int nid,
72 unsigned long region_start, unsigned long region_end)
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100073{
Nicholas Piggin2ad452f2018-02-14 01:08:24 +100074 unsigned long pfn = pa >> PAGE_SHIFT;
Nicholas Piggin0633daf2018-02-14 01:08:23 +100075 pgd_t *pgdp;
Mike Rapoport2fb47062020-06-04 16:46:44 -070076 p4d_t *p4dp;
Nicholas Piggin0633daf2018-02-14 01:08:23 +100077 pud_t *pudp;
78 pmd_t *pmdp;
79 pte_t *ptep;
80
81 pgdp = pgd_offset_k(ea);
Mike Rapoport2fb47062020-06-04 16:46:44 -070082 p4dp = p4d_offset(pgdp, ea);
83 if (p4d_none(*p4dp)) {
Aneesh Kumar K.V645d5ce2020-07-09 18:49:22 +053084 pudp = early_alloc_pgtable(PAGE_SIZE, nid,
85 region_start, region_end);
Mike Rapoport2fb47062020-06-04 16:46:44 -070086 p4d_populate(&init_mm, p4dp, pudp);
Nicholas Piggin0633daf2018-02-14 01:08:23 +100087 }
Mike Rapoport2fb47062020-06-04 16:46:44 -070088 pudp = pud_offset(p4dp, ea);
Nicholas Piggin0633daf2018-02-14 01:08:23 +100089 if (map_page_size == PUD_SIZE) {
90 ptep = (pte_t *)pudp;
91 goto set_the_pte;
92 }
93 if (pud_none(*pudp)) {
Aneesh Kumar K.V645d5ce2020-07-09 18:49:22 +053094 pmdp = early_alloc_pgtable(PAGE_SIZE, nid, region_start,
95 region_end);
Nicholas Piggin0633daf2018-02-14 01:08:23 +100096 pud_populate(&init_mm, pudp, pmdp);
97 }
98 pmdp = pmd_offset(pudp, ea);
99 if (map_page_size == PMD_SIZE) {
100 ptep = pmdp_ptep(pmdp);
101 goto set_the_pte;
102 }
103 if (!pmd_present(*pmdp)) {
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000104 ptep = early_alloc_pgtable(PAGE_SIZE, nid,
105 region_start, region_end);
Nicholas Piggin0633daf2018-02-14 01:08:23 +1000106 pmd_populate_kernel(&init_mm, pmdp, ptep);
107 }
108 ptep = pte_offset_kernel(pmdp, ea);
109
110set_the_pte:
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000111 set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags));
Jordan Nietheb8b2f372021-02-08 14:29:56 +1100112 asm volatile("ptesync": : :"memory");
Nicholas Piggin0633daf2018-02-14 01:08:23 +1000113 return 0;
114}
115
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000116/*
117 * nid, region_start, and region_end are hints to try to place the page
118 * table memory in the same node or region.
119 */
120static int __map_kernel_page(unsigned long ea, unsigned long pa,
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000121 pgprot_t flags,
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000122 unsigned int map_page_size,
123 int nid,
124 unsigned long region_start, unsigned long region_end)
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000125{
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000126 unsigned long pfn = pa >> PAGE_SHIFT;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000127 pgd_t *pgdp;
Mike Rapoport2fb47062020-06-04 16:46:44 -0700128 p4d_t *p4dp;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000129 pud_t *pudp;
130 pmd_t *pmdp;
131 pte_t *ptep;
132 /*
133 * Make sure task size is correct as per the max adddr
134 */
135 BUILD_BUG_ON(TASK_SIZE_USER64 > RADIX_PGTABLE_RANGE);
Nicholas Piggin0633daf2018-02-14 01:08:23 +1000136
Aneesh Kumar K.V0034d392019-04-17 18:29:14 +0530137#ifdef CONFIG_PPC_64K_PAGES
138 BUILD_BUG_ON(RADIX_KERN_MAP_SIZE != (1UL << MAX_EA_BITS_PER_CONTEXT));
139#endif
140
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000141 if (unlikely(!slab_is_available()))
142 return early_map_kernel_page(ea, pa, flags, map_page_size,
143 nid, region_start, region_end);
Nicholas Piggin0633daf2018-02-14 01:08:23 +1000144
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000145 /*
146 * Should make page table allocation functions be able to take a
147 * node, so we can place kernel page tables on the right nodes after
148 * boot.
149 */
Nicholas Piggin0633daf2018-02-14 01:08:23 +1000150 pgdp = pgd_offset_k(ea);
Mike Rapoport2fb47062020-06-04 16:46:44 -0700151 p4dp = p4d_offset(pgdp, ea);
152 pudp = pud_alloc(&init_mm, p4dp, ea);
Nicholas Piggin0633daf2018-02-14 01:08:23 +1000153 if (!pudp)
154 return -ENOMEM;
155 if (map_page_size == PUD_SIZE) {
156 ptep = (pte_t *)pudp;
157 goto set_the_pte;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000158 }
Nicholas Piggin0633daf2018-02-14 01:08:23 +1000159 pmdp = pmd_alloc(&init_mm, pudp, ea);
160 if (!pmdp)
161 return -ENOMEM;
162 if (map_page_size == PMD_SIZE) {
163 ptep = pmdp_ptep(pmdp);
164 goto set_the_pte;
165 }
166 ptep = pte_alloc_kernel(pmdp, ea);
167 if (!ptep)
168 return -ENOMEM;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000169
170set_the_pte:
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000171 set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags));
Jordan Nietheb8b2f372021-02-08 14:29:56 +1100172 asm volatile("ptesync": : :"memory");
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000173 return 0;
174}
175
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000176int radix__map_kernel_page(unsigned long ea, unsigned long pa,
177 pgprot_t flags,
178 unsigned int map_page_size)
179{
180 return __map_kernel_page(ea, pa, flags, map_page_size, -1, 0, 0);
181}
182
Balbir Singh7614ff32017-06-29 03:04:09 +1000183#ifdef CONFIG_STRICT_KERNEL_RWX
Michael Ellerman7098f8f2021-04-13 23:54:27 +1000184static void radix__change_memory_range(unsigned long start, unsigned long end,
185 unsigned long clear)
Balbir Singh7614ff32017-06-29 03:04:09 +1000186{
Balbir Singh7614ff32017-06-29 03:04:09 +1000187 unsigned long idx;
188 pgd_t *pgdp;
Mike Rapoport2fb47062020-06-04 16:46:44 -0700189 p4d_t *p4dp;
Balbir Singh7614ff32017-06-29 03:04:09 +1000190 pud_t *pudp;
191 pmd_t *pmdp;
192 pte_t *ptep;
193
194 start = ALIGN_DOWN(start, PAGE_SIZE);
195 end = PAGE_ALIGN(end); // aligns up
196
Michael Ellermanb134bd92017-07-14 16:51:21 +1000197 pr_debug("Changing flags on range %lx-%lx removing 0x%lx\n",
198 start, end, clear);
Balbir Singh7614ff32017-06-29 03:04:09 +1000199
200 for (idx = start; idx < end; idx += PAGE_SIZE) {
201 pgdp = pgd_offset_k(idx);
Mike Rapoport2fb47062020-06-04 16:46:44 -0700202 p4dp = p4d_offset(pgdp, idx);
203 pudp = pud_alloc(&init_mm, p4dp, idx);
Balbir Singh7614ff32017-06-29 03:04:09 +1000204 if (!pudp)
205 continue;
Aneesh Kumar K.Vd6eaced2019-05-14 11:33:00 +0530206 if (pud_is_leaf(*pudp)) {
Balbir Singh7614ff32017-06-29 03:04:09 +1000207 ptep = (pte_t *)pudp;
208 goto update_the_pte;
209 }
210 pmdp = pmd_alloc(&init_mm, pudp, idx);
211 if (!pmdp)
212 continue;
Aneesh Kumar K.Vd6eaced2019-05-14 11:33:00 +0530213 if (pmd_is_leaf(*pmdp)) {
Balbir Singh7614ff32017-06-29 03:04:09 +1000214 ptep = pmdp_ptep(pmdp);
215 goto update_the_pte;
216 }
217 ptep = pte_alloc_kernel(pmdp, idx);
218 if (!ptep)
219 continue;
220update_the_pte:
Michael Ellermanb134bd92017-07-14 16:51:21 +1000221 radix__pte_update(&init_mm, idx, ptep, clear, 0, 0);
Balbir Singh7614ff32017-06-29 03:04:09 +1000222 }
223
224 radix__flush_tlb_kernel_range(start, end);
225}
Michael Ellermanb134bd92017-07-14 16:51:21 +1000226
227void radix__mark_rodata_ro(void)
228{
229 unsigned long start, end;
230
231 start = (unsigned long)_stext;
232 end = (unsigned long)__init_begin;
233
234 radix__change_memory_range(start, end, _PAGE_WRITE);
235}
Michael Ellerman029d9252017-07-14 16:51:23 +1000236
237void radix__mark_initmem_nx(void)
238{
239 unsigned long start = (unsigned long)__init_begin;
240 unsigned long end = (unsigned long)__init_end;
241
242 radix__change_memory_range(start, end, _PAGE_EXEC);
243}
Balbir Singh7614ff32017-06-29 03:04:09 +1000244#endif /* CONFIG_STRICT_KERNEL_RWX */
245
Michael Ellermanafb6d062018-10-17 23:53:38 +1100246static inline void __meminit
247print_mapping(unsigned long start, unsigned long end, unsigned long size, bool exec)
Reza Arbabb5200ec2017-01-16 13:07:43 -0600248{
Michael Ellerman6deb6b42017-08-30 17:41:17 +1000249 char buf[10];
250
Reza Arbabb5200ec2017-01-16 13:07:43 -0600251 if (end <= start)
252 return;
253
Michael Ellerman6deb6b42017-08-30 17:41:17 +1000254 string_get_size(size, 1, STRING_UNITS_2, buf, sizeof(buf));
255
Michael Ellermanafb6d062018-10-17 23:53:38 +1100256 pr_info("Mapped 0x%016lx-0x%016lx with %s pages%s\n", start, end, buf,
257 exec ? " (exec)" : "");
Reza Arbabb5200ec2017-01-16 13:07:43 -0600258}
259
Michael Ellerman232aa402018-08-14 22:37:32 +1000260static unsigned long next_boundary(unsigned long addr, unsigned long end)
261{
262#ifdef CONFIG_STRICT_KERNEL_RWX
263 if (addr < __pa_symbol(__init_begin))
264 return __pa_symbol(__init_begin);
265#endif
266 return end;
267}
268
Reza Arbabb5200ec2017-01-16 13:07:43 -0600269static int __meminit create_physical_mapping(unsigned long start,
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000270 unsigned long end,
Aneesh Kumar K.Vaf9d00e2020-07-09 18:49:25 +0530271 unsigned long max_mapping_size,
Logan Gunthorpe4e00c5a2020-04-10 14:33:32 -0700272 int nid, pgprot_t _prot)
Reza Arbabb5200ec2017-01-16 13:07:43 -0600273{
Michael Ellerman9abcc982017-06-06 15:48:57 +1000274 unsigned long vaddr, addr, mapping_size = 0;
Michael Ellermanafb6d062018-10-17 23:53:38 +1100275 bool prev_exec, exec = false;
Michael Ellerman9abcc982017-06-06 15:48:57 +1000276 pgprot_t prot;
Aneesh Kumar K.Va2dc0092018-08-13 11:14:57 +0530277 int psize;
Reza Arbabb5200ec2017-01-16 13:07:43 -0600278
Christophe Leroyb7115312020-04-20 18:36:36 +0000279 start = ALIGN(start, PAGE_SIZE);
Aneesh Kumar K.V79b123cd2020-09-07 12:55:39 +0530280 end = ALIGN_DOWN(end, PAGE_SIZE);
Reza Arbabb5200ec2017-01-16 13:07:43 -0600281 for (addr = start; addr < end; addr += mapping_size) {
282 unsigned long gap, previous_size;
283 int rc;
284
Michael Ellerman232aa402018-08-14 22:37:32 +1000285 gap = next_boundary(addr, end) - addr;
Aneesh Kumar K.Vaf9d00e2020-07-09 18:49:25 +0530286 if (gap > max_mapping_size)
287 gap = max_mapping_size;
Reza Arbabb5200ec2017-01-16 13:07:43 -0600288 previous_size = mapping_size;
Michael Ellermanafb6d062018-10-17 23:53:38 +1100289 prev_exec = exec;
Reza Arbabb5200ec2017-01-16 13:07:43 -0600290
291 if (IS_ALIGNED(addr, PUD_SIZE) && gap >= PUD_SIZE &&
Michael Ellerman57306c62018-08-14 22:01:44 +1000292 mmu_psize_defs[MMU_PAGE_1G].shift) {
Reza Arbabb5200ec2017-01-16 13:07:43 -0600293 mapping_size = PUD_SIZE;
Aneesh Kumar K.Va2dc0092018-08-13 11:14:57 +0530294 psize = MMU_PAGE_1G;
295 } else if (IS_ALIGNED(addr, PMD_SIZE) && gap >= PMD_SIZE &&
296 mmu_psize_defs[MMU_PAGE_2M].shift) {
Reza Arbabb5200ec2017-01-16 13:07:43 -0600297 mapping_size = PMD_SIZE;
Aneesh Kumar K.Va2dc0092018-08-13 11:14:57 +0530298 psize = MMU_PAGE_2M;
299 } else {
Reza Arbabb5200ec2017-01-16 13:07:43 -0600300 mapping_size = PAGE_SIZE;
Aneesh Kumar K.Va2dc0092018-08-13 11:14:57 +0530301 psize = mmu_virtual_psize;
302 }
Reza Arbabb5200ec2017-01-16 13:07:43 -0600303
Michael Ellerman9abcc982017-06-06 15:48:57 +1000304 vaddr = (unsigned long)__va(addr);
305
Balbir Singh7f6d4982017-06-29 03:04:10 +1000306 if (overlaps_kernel_text(vaddr, vaddr + mapping_size) ||
Michael Ellermanafb6d062018-10-17 23:53:38 +1100307 overlaps_interrupt_vector_text(vaddr, vaddr + mapping_size)) {
Michael Ellerman9abcc982017-06-06 15:48:57 +1000308 prot = PAGE_KERNEL_X;
Michael Ellermanafb6d062018-10-17 23:53:38 +1100309 exec = true;
310 } else {
Logan Gunthorpe4e00c5a2020-04-10 14:33:32 -0700311 prot = _prot;
Michael Ellermanafb6d062018-10-17 23:53:38 +1100312 exec = false;
313 }
314
315 if (mapping_size != previous_size || exec != prev_exec) {
316 print_mapping(start, addr, previous_size, prev_exec);
317 start = addr;
318 }
Michael Ellerman9abcc982017-06-06 15:48:57 +1000319
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000320 rc = __map_kernel_page(vaddr, addr, prot, mapping_size, nid, start, end);
Reza Arbabb5200ec2017-01-16 13:07:43 -0600321 if (rc)
322 return rc;
Aneesh Kumar K.Va2dc0092018-08-13 11:14:57 +0530323
324 update_page_count(psize, 1);
Reza Arbabb5200ec2017-01-16 13:07:43 -0600325 }
326
Michael Ellermanafb6d062018-10-17 23:53:38 +1100327 print_mapping(start, addr, mapping_size, exec);
Reza Arbabb5200ec2017-01-16 13:07:43 -0600328 return 0;
329}
330
YueHaibingd667edc2019-05-04 18:24:27 +0800331static void __init radix_init_pgtable(void)
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000332{
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000333 unsigned long rts_field;
Mike Rapoportb10d6bc2020-10-13 16:58:08 -0700334 phys_addr_t start, end;
335 u64 i;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000336
337 /* We don't support slb for radix */
338 mmu_slb_size = 0;
Aneesh Kumar K.Vaf9d00e2020-07-09 18:49:25 +0530339
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000340 /*
Aneesh Kumar K.Vaf9d00e2020-07-09 18:49:25 +0530341 * Create the linear mapping
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000342 */
Mike Rapoportb10d6bc2020-10-13 16:58:08 -0700343 for_each_mem_range(i, &start, &end) {
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000344 /*
345 * The memblock allocator is up at this point, so the
346 * page tables will be allocated within the range. No
347 * need or a node (which we don't have yet).
348 */
Aneesh Kumar K.Ve0909392019-04-17 18:29:15 +0530349
Mike Rapoportb10d6bc2020-10-13 16:58:08 -0700350 if (end >= RADIX_VMALLOC_START) {
Colin Ian Kingf341d892019-04-23 16:10:17 +0100351 pr_warn("Outside the supported range\n");
Aneesh Kumar K.Ve0909392019-04-17 18:29:15 +0530352 continue;
353 }
354
Mike Rapoportb10d6bc2020-10-13 16:58:08 -0700355 WARN_ON(create_physical_mapping(start, end,
Aneesh Kumar K.Vaf9d00e2020-07-09 18:49:25 +0530356 radix_mem_block_size,
Logan Gunthorpe4e00c5a2020-04-10 14:33:32 -0700357 -1, PAGE_KERNEL));
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000358 }
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +1000359
360 /* Find out how many PID bits are supported */
Nicholas Piggin2e1ae9cd2021-05-28 19:07:41 +1000361 if (!cpu_has_feature(CPU_FTR_HVMODE) &&
362 cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) {
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +1000363 /*
Nicholas Piggin2e1ae9cd2021-05-28 19:07:41 +1000364 * Older versions of KVM on these machines perfer if the
365 * guest only uses the low 19 PID bits.
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +1000366 */
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +1000367 if (!mmu_pid_bits)
368 mmu_pid_bits = 19;
Nicholas Piggin2e1ae9cd2021-05-28 19:07:41 +1000369 } else {
370 if (!mmu_pid_bits)
371 mmu_pid_bits = 20;
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +1000372 }
Nicholas Piggin2e1ae9cd2021-05-28 19:07:41 +1000373 mmu_base_pid = 1;
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +1000374
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000375 /*
376 * Allocate Partition table and process table for the
377 * host.
378 */
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +1000379 BUG_ON(PRTB_SIZE_SHIFT > 36);
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000380 process_tb = early_alloc_pgtable(1UL << PRTB_SIZE_SHIFT, -1, 0, 0);
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000381 /*
382 * Fill in the process table.
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000383 */
Aneesh Kumar K.Vb23d9c52016-06-17 11:40:36 +0530384 rts_field = radix__get_tree_size();
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000385 process_tb->prtb0 = cpu_to_be64(rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE);
Nicholas Piggined6546b2019-09-03 01:29:26 +1000386
Nicholas Piggineeb715c2018-02-07 11:20:02 +1000387 /*
388 * The init_mm context is given the first available (non-zero) PID,
389 * which is the "guard PID" and contains no page table. PIDR should
390 * never be set to zero because that duplicates the kernel address
391 * space at the 0x0... offset (quadrant 0)!
392 *
393 * An arbitrary PID that may later be allocated by the PID allocator
394 * for userspace processes must not be used either, because that
395 * would cause stale user mappings for that PID on CPUs outside of
396 * the TLB invalidation scheme (because it won't be in mm_cpumask).
397 *
398 * So permanently carve out one PID for the purpose of a guard PID.
399 */
400 init_mm.context.id = mmu_base_pid;
401 mmu_base_pid++;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000402}
403
404static void __init radix_init_partition_table(void)
405{
Nicholas Piggined6546b2019-09-03 01:29:26 +1000406 unsigned long rts_field, dw0, dw1;
Aneesh Kumar K.Vb23d9c52016-06-17 11:40:36 +0530407
Paul Mackerras9d661952016-11-21 16:00:58 +1100408 mmu_partition_table_init();
Aneesh Kumar K.Vb23d9c52016-06-17 11:40:36 +0530409 rts_field = radix__get_tree_size();
Paul Mackerras9d661952016-11-21 16:00:58 +1100410 dw0 = rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE | PATB_HR;
Nicholas Piggined6546b2019-09-03 01:29:26 +1000411 dw1 = __pa(process_tb) | (PRTB_SIZE_SHIFT - 12) | PATB_GR;
Nicholas Piggin7d805ac2019-09-03 01:29:30 +1000412 mmu_partition_table_set_entry(0, dw0, dw1, false);
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000413
Aneesh Kumar K.V56547412016-07-13 15:05:25 +0530414 pr_info("Initializing Radix MMU\n");
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000415}
416
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000417static int __init get_idx_from_shift(unsigned int shift)
418{
419 int idx = -1;
420
421 switch (shift) {
422 case 0xc:
423 idx = MMU_PAGE_4K;
424 break;
425 case 0x10:
426 idx = MMU_PAGE_64K;
427 break;
428 case 0x15:
429 idx = MMU_PAGE_2M;
430 break;
431 case 0x1e:
432 idx = MMU_PAGE_1G;
433 break;
434 }
435 return idx;
436}
437
438static int __init radix_dt_scan_page_sizes(unsigned long node,
439 const char *uname, int depth,
440 void *data)
441{
442 int size = 0;
443 int shift, idx;
444 unsigned int ap;
445 const __be32 *prop;
446 const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
447
448 /* We are scanning "cpu" nodes only */
449 if (type == NULL || strcmp(type, "cpu") != 0)
450 return 0;
451
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +1000452 /* Find MMU PID size */
453 prop = of_get_flat_dt_prop(node, "ibm,mmu-pid-bits", &size);
454 if (prop && size == 4)
455 mmu_pid_bits = be32_to_cpup(prop);
456
457 /* Grab page size encodings */
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000458 prop = of_get_flat_dt_prop(node, "ibm,processor-radix-AP-encodings", &size);
459 if (!prop)
460 return 0;
461
462 pr_info("Page sizes from device-tree:\n");
463 for (; size >= 4; size -= 4, ++prop) {
464
465 struct mmu_psize_def *def;
466
467 /* top 3 bit is AP encoding */
468 shift = be32_to_cpu(prop[0]) & ~(0xe << 28);
469 ap = be32_to_cpu(prop[0]) >> 29;
Balbir Singhac8d3812016-11-05 15:24:22 +1100470 pr_info("Page size shift = %d AP=0x%x\n", shift, ap);
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000471
472 idx = get_idx_from_shift(shift);
473 if (idx < 0)
474 continue;
475
476 def = &mmu_psize_defs[idx];
477 def->shift = shift;
478 def->ap = ap;
Bharata B Raod6265cb2021-06-21 14:19:59 +0530479 def->h_rpt_pgsize = psize_to_rpti_pgsize(idx);
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000480 }
481
482 /* needed ? */
483 cur_cpu_spec->mmu_features &= ~MMU_FTR_NO_SLBIE_B;
484 return 1;
485}
486
Aneesh Kumar K.Vaf9d00e2020-07-09 18:49:25 +0530487#ifdef CONFIG_MEMORY_HOTPLUG
488static int __init probe_memory_block_size(unsigned long node, const char *uname, int
489 depth, void *data)
490{
491 unsigned long *mem_block_size = (unsigned long *)data;
Aneesh Kumar K.Vfbf2f132020-10-07 17:18:36 +0530492 const __be32 *prop;
Aneesh Kumar K.Vaf9d00e2020-07-09 18:49:25 +0530493 int len;
494
495 if (depth != 1)
496 return 0;
497
498 if (strcmp(uname, "ibm,dynamic-reconfiguration-memory"))
499 return 0;
500
501 prop = of_get_flat_dt_prop(node, "ibm,lmb-size", &len);
Aneesh Kumar K.Vfbf2f132020-10-07 17:18:36 +0530502
503 if (!prop || len < dt_root_size_cells * sizeof(__be32))
Aneesh Kumar K.Vaf9d00e2020-07-09 18:49:25 +0530504 /*
505 * Nothing in the device tree
506 */
507 *mem_block_size = MIN_MEMORY_BLOCK_SIZE;
508 else
Aneesh Kumar K.Vfbf2f132020-10-07 17:18:36 +0530509 *mem_block_size = of_read_number(prop, dt_root_size_cells);
Aneesh Kumar K.Vaf9d00e2020-07-09 18:49:25 +0530510 return 1;
511}
512
513static unsigned long radix_memory_block_size(void)
514{
515 unsigned long mem_block_size = MIN_MEMORY_BLOCK_SIZE;
516
517 /*
518 * OPAL firmware feature is set by now. Hence we are ok
519 * to test OPAL feature.
520 */
521 if (firmware_has_feature(FW_FEATURE_OPAL))
522 mem_block_size = 1UL * 1024 * 1024 * 1024;
523 else
524 of_scan_flat_dt(probe_memory_block_size, &mem_block_size);
525
526 return mem_block_size;
527}
528
529#else /* CONFIG_MEMORY_HOTPLUG */
530
531static unsigned long radix_memory_block_size(void)
532{
533 return 1UL * 1024 * 1024 * 1024;
534}
535
536#endif /* CONFIG_MEMORY_HOTPLUG */
537
538
Michael Ellerman2537b092016-07-26 21:55:27 +1000539void __init radix__early_init_devtree(void)
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000540{
541 int rc;
542
543 /*
544 * Try to find the available page sizes in the device-tree
545 */
546 rc = of_scan_flat_dt(radix_dt_scan_page_sizes, NULL);
Aneesh Kumar K.Vaf9d00e2020-07-09 18:49:25 +0530547 if (!rc) {
548 /*
549 * No page size details found in device tree.
550 * Let's assume we have page 4k and 64k support
551 */
552 mmu_psize_defs[MMU_PAGE_4K].shift = 12;
553 mmu_psize_defs[MMU_PAGE_4K].ap = 0x0;
Bharata B Raod6265cb2021-06-21 14:19:59 +0530554 mmu_psize_defs[MMU_PAGE_4K].h_rpt_pgsize =
555 psize_to_rpti_pgsize(MMU_PAGE_4K);
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000556
Aneesh Kumar K.Vaf9d00e2020-07-09 18:49:25 +0530557 mmu_psize_defs[MMU_PAGE_64K].shift = 16;
558 mmu_psize_defs[MMU_PAGE_64K].ap = 0x5;
Bharata B Raod6265cb2021-06-21 14:19:59 +0530559 mmu_psize_defs[MMU_PAGE_64K].h_rpt_pgsize =
560 psize_to_rpti_pgsize(MMU_PAGE_64K);
Aneesh Kumar K.Vaf9d00e2020-07-09 18:49:25 +0530561 }
562
563 /*
564 * Max mapping size used when mapping pages. We don't use
565 * ppc_md.memory_block_size() here because this get called
566 * early and we don't have machine probe called yet. Also
567 * the pseries implementation only check for ibm,lmb-size.
568 * All hypervisor supporting radix do expose that device
569 * tree node.
570 */
571 radix_mem_block_size = radix_memory_block_size();
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000572 return;
573}
574
Balbir Singhee97b6b2016-11-15 17:56:14 +1100575static void radix_init_amor(void)
576{
577 /*
578 * In HV mode, we init AMOR (Authority Mask Override Register) so that
579 * the hypervisor and guest can setup IAMR (Instruction Authority Mask
580 * Register), enable key 0 and set it to 1.
581 *
582 * AMOR = 0b1100 .... 0000 (Mask for key 0 is 11)
583 */
584 mtspr(SPRN_AMOR, (3ul << 62));
585}
586
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000587void __init radix__early_init_mmu(void)
588{
589 unsigned long lpcr;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000590
591#ifdef CONFIG_PPC_64K_PAGES
592 /* PAGE_SIZE mappings */
593 mmu_virtual_psize = MMU_PAGE_64K;
594#else
595 mmu_virtual_psize = MMU_PAGE_4K;
596#endif
597
598#ifdef CONFIG_SPARSEMEM_VMEMMAP
599 /* vmemmap mapping */
Aneesh Kumar K.V89a34962019-07-01 20:04:42 +0530600 if (mmu_psize_defs[MMU_PAGE_2M].shift) {
601 /*
602 * map vmemmap using 2M if available
603 */
604 mmu_vmemmap_psize = MMU_PAGE_2M;
605 } else
606 mmu_vmemmap_psize = mmu_virtual_psize;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000607#endif
608 /*
609 * initialize page table size
610 */
611 __pte_index_size = RADIX_PTE_INDEX_SIZE;
612 __pmd_index_size = RADIX_PMD_INDEX_SIZE;
613 __pud_index_size = RADIX_PUD_INDEX_SIZE;
614 __pgd_index_size = RADIX_PGD_INDEX_SIZE;
Aneesh Kumar K.Vfae22112018-02-11 20:30:06 +0530615 __pud_cache_index = RADIX_PUD_INDEX_SIZE;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000616 __pte_table_size = RADIX_PTE_TABLE_SIZE;
617 __pmd_table_size = RADIX_PMD_TABLE_SIZE;
618 __pud_table_size = RADIX_PUD_TABLE_SIZE;
619 __pgd_table_size = RADIX_PGD_TABLE_SIZE;
620
Aneesh Kumar K.Va2f41eb2016-04-29 23:26:19 +1000621 __pmd_val_bits = RADIX_PMD_VAL_BITS;
622 __pud_val_bits = RADIX_PUD_VAL_BITS;
623 __pgd_val_bits = RADIX_PGD_VAL_BITS;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000624
Aneesh Kumar K.Vd6a99962016-04-29 23:26:21 +1000625 __kernel_virt_start = RADIX_KERN_VIRT_START;
Aneesh Kumar K.Vd6a99962016-04-29 23:26:21 +1000626 __vmalloc_start = RADIX_VMALLOC_START;
627 __vmalloc_end = RADIX_VMALLOC_END;
Michael Ellerman63ee9b22017-08-01 20:29:22 +1000628 __kernel_io_start = RADIX_KERN_IO_START;
Aneesh Kumar K.Va35a3c62019-04-17 18:29:13 +0530629 __kernel_io_end = RADIX_KERN_IO_END;
Aneesh Kumar K.V0034d392019-04-17 18:29:14 +0530630 vmemmap = (struct page *)RADIX_VMEMMAP_START;
Aneesh Kumar K.Vd6a99962016-04-29 23:26:21 +1000631 ioremap_bot = IOREMAP_BASE;
Darren Stevensbfa37082016-06-29 21:06:28 +0100632
633#ifdef CONFIG_PCI
634 pci_io_base = ISA_IO_BASE;
635#endif
Aneesh Kumar K.Vfb4e5db2018-03-22 14:13:50 +0530636 __pte_frag_nr = RADIX_PTE_FRAG_NR;
637 __pte_frag_size_shift = RADIX_PTE_FRAG_SIZE_SHIFT;
Aneesh Kumar K.V8a6c6972018-04-16 16:57:22 +0530638 __pmd_frag_nr = RADIX_PMD_FRAG_NR;
639 __pmd_frag_size_shift = RADIX_PMD_FRAG_SIZE_SHIFT;
Aneesh Kumar K.Vd6a99962016-04-29 23:26:21 +1000640
Nicholas Piggined6546b2019-09-03 01:29:26 +1000641 radix_init_pgtable();
642
Aneesh Kumar K.Vd6c88602016-05-31 11:56:29 +0530643 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
644 lpcr = mfspr(SPRN_LPCR);
Aneesh Kumar K.Vbf16cdf2016-07-13 15:05:21 +0530645 mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000646 radix_init_partition_table();
Balbir Singhee97b6b2016-11-15 17:56:14 +1100647 radix_init_amor();
Paul Mackerrascc3d2942017-01-30 21:21:36 +1100648 } else {
649 radix_init_pseries();
Aneesh Kumar K.Vd6c88602016-05-31 11:56:29 +0530650 }
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000651
Paul Mackerras9d661952016-11-21 16:00:58 +1100652 memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
653
Nicholas Piggineeb715c2018-02-07 11:20:02 +1000654 /* Switch to the guard PID before turning on MMU */
655 radix__switch_mmu_context(NULL, &init_mm);
Nicholas Piggin7e71c422019-09-03 01:29:29 +1000656 tlbiel_all();
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000657}
658
659void radix__early_init_mmu_secondary(void)
660{
661 unsigned long lpcr;
662 /*
Aneesh Kumar K.Vd6c88602016-05-31 11:56:29 +0530663 * update partition table control register and UPRT
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000664 */
Aneesh Kumar K.Vd6c88602016-05-31 11:56:29 +0530665 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
666 lpcr = mfspr(SPRN_LPCR);
Aneesh Kumar K.Vbf16cdf2016-07-13 15:05:21 +0530667 mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
Aneesh Kumar K.Vd6c88602016-05-31 11:56:29 +0530668
Claudio Carvalho52231342019-08-22 00:48:36 -0300669 set_ptcr_when_no_uv(__pa(partition_tb) |
670 (PATB_SIZE_SHIFT - 12));
671
Balbir Singhee97b6b2016-11-15 17:56:14 +1100672 radix_init_amor();
Aneesh Kumar K.Vd6c88602016-05-31 11:56:29 +0530673 }
Nicholas Piggind4748272017-12-24 01:15:50 +1000674
Nicholas Piggineeb715c2018-02-07 11:20:02 +1000675 radix__switch_mmu_context(NULL, &init_mm);
Nicholas Piggin7e71c422019-09-03 01:29:29 +1000676 tlbiel_all();
Aneesh Kumar K.V39df17b2020-11-27 10:14:06 +0530677
678 /* Make sure userspace can't change the AMR */
679 mtspr(SPRN_UAMOR, 0);
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000680}
681
Hari Bathini8119cef2021-07-14 18:17:58 +0530682/* Called during kexec sequence with MMU off */
683notrace void radix__mmu_cleanup_all(void)
Benjamin Herrenschmidtfe036a02016-08-19 14:22:37 +0530684{
685 unsigned long lpcr;
686
687 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
688 lpcr = mfspr(SPRN_LPCR);
689 mtspr(SPRN_LPCR, lpcr & ~LPCR_UPRT);
Claudio Carvalho52231342019-08-22 00:48:36 -0300690 set_ptcr_when_no_uv(0);
Alistair Popple1d0761d2016-12-14 13:36:51 +1100691 powernv_set_nmmu_ptcr(0);
Benjamin Herrenschmidtfe036a02016-08-19 14:22:37 +0530692 radix__flush_tlb_all();
693 }
694}
695
Reza Arbab6cc27342017-01-16 13:07:44 -0600696#ifdef CONFIG_MEMORY_HOTPLUG
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600697static void free_pte_table(pte_t *pte_start, pmd_t *pmd)
698{
699 pte_t *pte;
700 int i;
701
702 for (i = 0; i < PTRS_PER_PTE; i++) {
703 pte = pte_start + i;
704 if (!pte_none(*pte))
705 return;
706 }
707
708 pte_free_kernel(&init_mm, pte_start);
709 pmd_clear(pmd);
710}
711
712static void free_pmd_table(pmd_t *pmd_start, pud_t *pud)
713{
714 pmd_t *pmd;
715 int i;
716
717 for (i = 0; i < PTRS_PER_PMD; i++) {
718 pmd = pmd_start + i;
719 if (!pmd_none(*pmd))
720 return;
721 }
722
723 pmd_free(&init_mm, pmd_start);
724 pud_clear(pud);
725}
726
Bharata B Rao9ce88532020-07-09 18:49:23 +0530727static void free_pud_table(pud_t *pud_start, p4d_t *p4d)
728{
729 pud_t *pud;
730 int i;
731
732 for (i = 0; i < PTRS_PER_PUD; i++) {
733 pud = pud_start + i;
734 if (!pud_none(*pud))
735 return;
736 }
737
738 pud_free(&init_mm, pud_start);
739 p4d_clear(p4d);
740}
741
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600742static void remove_pte_table(pte_t *pte_start, unsigned long addr,
743 unsigned long end)
744{
745 unsigned long next;
746 pte_t *pte;
747
748 pte = pte_start + pte_index(addr);
749 for (; addr < end; addr = next, pte++) {
750 next = (addr + PAGE_SIZE) & PAGE_MASK;
751 if (next > end)
752 next = end;
753
754 if (!pte_present(*pte))
755 continue;
756
Reza Arbab0d0a4bc2017-01-16 13:07:46 -0600757 if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(next)) {
758 /*
759 * The vmemmap_free() and remove_section_mapping()
760 * codepaths call us with aligned addresses.
761 */
762 WARN_ONCE(1, "%s: unaligned range\n", __func__);
763 continue;
764 }
765
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600766 pte_clear(&init_mm, addr, pte);
767 }
768}
769
Vladis Dronovaff77952020-07-29 15:37:41 +0200770static void __meminit remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600771 unsigned long end)
772{
773 unsigned long next;
774 pte_t *pte_base;
775 pmd_t *pmd;
776
777 pmd = pmd_start + pmd_index(addr);
778 for (; addr < end; addr = next, pmd++) {
779 next = pmd_addr_end(addr, end);
780
781 if (!pmd_present(*pmd))
782 continue;
783
Aneesh Kumar K.Vd6eaced2019-05-14 11:33:00 +0530784 if (pmd_is_leaf(*pmd)) {
Bharata B Raod6d6ebf2020-07-09 18:49:24 +0530785 if (!IS_ALIGNED(addr, PMD_SIZE) ||
786 !IS_ALIGNED(next, PMD_SIZE)) {
787 WARN_ONCE(1, "%s: unaligned range\n", __func__);
788 continue;
789 }
790 pte_clear(&init_mm, addr, (pte_t *)pmd);
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600791 continue;
792 }
793
794 pte_base = (pte_t *)pmd_page_vaddr(*pmd);
795 remove_pte_table(pte_base, addr, next);
796 free_pte_table(pte_base, pmd);
797 }
798}
799
Vladis Dronovaff77952020-07-29 15:37:41 +0200800static void __meminit remove_pud_table(pud_t *pud_start, unsigned long addr,
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600801 unsigned long end)
802{
803 unsigned long next;
804 pmd_t *pmd_base;
805 pud_t *pud;
806
807 pud = pud_start + pud_index(addr);
808 for (; addr < end; addr = next, pud++) {
809 next = pud_addr_end(addr, end);
810
811 if (!pud_present(*pud))
812 continue;
813
Aneesh Kumar K.Vd6eaced2019-05-14 11:33:00 +0530814 if (pud_is_leaf(*pud)) {
Bharata B Raod6d6ebf2020-07-09 18:49:24 +0530815 if (!IS_ALIGNED(addr, PUD_SIZE) ||
816 !IS_ALIGNED(next, PUD_SIZE)) {
817 WARN_ONCE(1, "%s: unaligned range\n", __func__);
818 continue;
819 }
820 pte_clear(&init_mm, addr, (pte_t *)pud);
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600821 continue;
822 }
823
Aneesh Kumar K.V9cf6fa22021-07-07 18:09:53 -0700824 pmd_base = pud_pgtable(*pud);
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600825 remove_pmd_table(pmd_base, addr, next);
826 free_pmd_table(pmd_base, pud);
827 }
828}
829
Mauricio Faria de Oliveirabde709a2018-03-09 17:45:58 -0300830static void __meminit remove_pagetable(unsigned long start, unsigned long end)
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600831{
832 unsigned long addr, next;
833 pud_t *pud_base;
834 pgd_t *pgd;
Mike Rapoport2fb47062020-06-04 16:46:44 -0700835 p4d_t *p4d;
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600836
837 spin_lock(&init_mm.page_table_lock);
838
839 for (addr = start; addr < end; addr = next) {
840 next = pgd_addr_end(addr, end);
841
842 pgd = pgd_offset_k(addr);
Mike Rapoport2fb47062020-06-04 16:46:44 -0700843 p4d = p4d_offset(pgd, addr);
844 if (!p4d_present(*p4d))
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600845 continue;
846
Mike Rapoport2fb47062020-06-04 16:46:44 -0700847 if (p4d_is_leaf(*p4d)) {
Bharata B Raod6d6ebf2020-07-09 18:49:24 +0530848 if (!IS_ALIGNED(addr, P4D_SIZE) ||
849 !IS_ALIGNED(next, P4D_SIZE)) {
850 WARN_ONCE(1, "%s: unaligned range\n", __func__);
851 continue;
852 }
853
854 pte_clear(&init_mm, addr, (pte_t *)pgd);
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600855 continue;
856 }
857
Aneesh Kumar K.Vdc4875f2021-07-07 18:09:56 -0700858 pud_base = p4d_pgtable(*p4d);
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600859 remove_pud_table(pud_base, addr, next);
Bharata B Rao9ce88532020-07-09 18:49:23 +0530860 free_pud_table(pud_base, p4d);
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600861 }
862
863 spin_unlock(&init_mm.page_table_lock);
864 radix__flush_tlb_kernel_range(start, end);
865}
866
Logan Gunthorpe4e00c5a2020-04-10 14:33:32 -0700867int __meminit radix__create_section_mapping(unsigned long start,
868 unsigned long end, int nid,
869 pgprot_t prot)
Reza Arbab6cc27342017-01-16 13:07:44 -0600870{
Aneesh Kumar K.Ve0909392019-04-17 18:29:15 +0530871 if (end >= RADIX_VMALLOC_START) {
Colin Ian Kingf341d892019-04-23 16:10:17 +0100872 pr_warn("Outside the supported range\n");
Aneesh Kumar K.Ve0909392019-04-17 18:29:15 +0530873 return -1;
874 }
875
Aneesh Kumar K.Vaf9d00e2020-07-09 18:49:25 +0530876 return create_physical_mapping(__pa(start), __pa(end),
877 radix_mem_block_size, nid, prot);
Reza Arbab6cc27342017-01-16 13:07:44 -0600878}
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600879
Mauricio Faria de Oliveirabde709a2018-03-09 17:45:58 -0300880int __meminit radix__remove_section_mapping(unsigned long start, unsigned long end)
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600881{
882 remove_pagetable(start, end);
883 return 0;
884}
Reza Arbab6cc27342017-01-16 13:07:44 -0600885#endif /* CONFIG_MEMORY_HOTPLUG */
886
Aneesh Kumar K.Vd9225ad2016-04-29 23:26:00 +1000887#ifdef CONFIG_SPARSEMEM_VMEMMAP
Nicholas Piggin29ab6c42018-02-14 01:08:22 +1000888static int __map_kernel_page_nid(unsigned long ea, unsigned long pa,
889 pgprot_t flags, unsigned int map_page_size,
890 int nid)
891{
892 return __map_kernel_page(ea, pa, flags, map_page_size, nid, 0, 0);
893}
894
Aneesh Kumar K.Vd9225ad2016-04-29 23:26:00 +1000895int __meminit radix__vmemmap_create_mapping(unsigned long start,
896 unsigned long page_size,
897 unsigned long phys)
898{
899 /* Create a PTE encoding */
900 unsigned long flags = _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_KERNEL_RW;
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000901 int nid = early_pfn_to_nid(phys >> PAGE_SHIFT);
902 int ret;
Aneesh Kumar K.Vd9225ad2016-04-29 23:26:00 +1000903
Aneesh Kumar K.Ve0909392019-04-17 18:29:15 +0530904 if ((start + page_size) >= RADIX_VMEMMAP_END) {
Colin Ian Kingf341d892019-04-23 16:10:17 +0100905 pr_warn("Outside the supported range\n");
Aneesh Kumar K.Ve0909392019-04-17 18:29:15 +0530906 return -1;
907 }
908
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000909 ret = __map_kernel_page_nid(start, phys, __pgprot(flags), page_size, nid);
910 BUG_ON(ret);
911
Aneesh Kumar K.Vd9225ad2016-04-29 23:26:00 +1000912 return 0;
913}
914
915#ifdef CONFIG_MEMORY_HOTPLUG
Mauricio Faria de Oliveirabde709a2018-03-09 17:45:58 -0300916void __meminit radix__vmemmap_remove_mapping(unsigned long start, unsigned long page_size)
Aneesh Kumar K.Vd9225ad2016-04-29 23:26:00 +1000917{
Reza Arbab0d0a4bc2017-01-16 13:07:46 -0600918 remove_pagetable(start, start + page_size);
Aneesh Kumar K.Vd9225ad2016-04-29 23:26:00 +1000919}
920#endif
921#endif
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +1000922
Joel Stanley4f703e72021-10-14 08:04:38 +1030923#ifdef CONFIG_DEBUG_PAGEALLOC
924void radix__kernel_map_pages(struct page *page, int numpages, int enable)
925{
926 pr_warn_once("DEBUG_PAGEALLOC not supported in radix mode\n");
927}
928#endif
929
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +1000930#ifdef CONFIG_TRANSPARENT_HUGEPAGE
931
932unsigned long radix__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
933 pmd_t *pmdp, unsigned long clr,
934 unsigned long set)
935{
936 unsigned long old;
937
938#ifdef CONFIG_DEBUG_VM
Oliver O'Halloranebd31192017-06-28 11:32:34 +1000939 WARN_ON(!radix__pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
Aneesh Kumar K.Vaf60a4c2018-04-16 16:57:16 +0530940 assert_spin_locked(pmd_lockptr(mm, pmdp));
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +1000941#endif
942
943 old = radix__pte_update(mm, addr, (pte_t *)pmdp, clr, set, 1);
944 trace_hugepage_update(addr, old, clr, set);
945
946 return old;
947}
948
949pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
950 pmd_t *pmdp)
951
952{
953 pmd_t pmd;
954
955 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
956 VM_BUG_ON(radix__pmd_trans_huge(*pmdp));
Oliver O'Halloranebd31192017-06-28 11:32:34 +1000957 VM_BUG_ON(pmd_devmap(*pmdp));
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +1000958 /*
959 * khugepaged calls this for normal pmd
960 */
961 pmd = *pmdp;
962 pmd_clear(pmdp);
Benjamin Herrenschmidt424de9c2017-07-19 14:49:06 +1000963
Aneesh Kumar K.Ve21dfbf2020-05-05 12:47:27 +0530964 /*
965 * pmdp collapse_flush need to ensure that there are no parallel gup
966 * walk after this call. This is needed so that we can have stable
967 * page ref count when collapsing a page. We don't allow a collapse page
968 * if we have gup taken on the page. We can ensure that by sending IPI
969 * because gup walk happens with IRQ disabled.
970 */
Aneesh Kumar K.Vfa4531f2017-07-27 11:54:54 +0530971 serialize_against_pte_lookup(vma->vm_mm);
Benjamin Herrenschmidt424de9c2017-07-19 14:49:06 +1000972
973 radix__flush_tlb_collapsed_pmd(vma->vm_mm, address);
974
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +1000975 return pmd;
976}
977
978/*
979 * For us pgtable_t is pte_t *. Inorder to save the deposisted
980 * page table, we consider the allocated page table as a list
981 * head. On withdraw we need to make sure we zero out the used
982 * list_head memory area.
983 */
984void radix__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
985 pgtable_t pgtable)
986{
Christophe Leroy47d99942019-03-29 10:00:00 +0000987 struct list_head *lh = (struct list_head *) pgtable;
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +1000988
Christophe Leroy47d99942019-03-29 10:00:00 +0000989 assert_spin_locked(pmd_lockptr(mm, pmdp));
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +1000990
Christophe Leroy47d99942019-03-29 10:00:00 +0000991 /* FIFO */
992 if (!pmd_huge_pte(mm, pmdp))
993 INIT_LIST_HEAD(lh);
994 else
995 list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
996 pmd_huge_pte(mm, pmdp) = pgtable;
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +1000997}
998
999pgtable_t radix__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
1000{
Christophe Leroy47d99942019-03-29 10:00:00 +00001001 pte_t *ptep;
1002 pgtable_t pgtable;
1003 struct list_head *lh;
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +10001004
Christophe Leroy47d99942019-03-29 10:00:00 +00001005 assert_spin_locked(pmd_lockptr(mm, pmdp));
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +10001006
Christophe Leroy47d99942019-03-29 10:00:00 +00001007 /* FIFO */
1008 pgtable = pmd_huge_pte(mm, pmdp);
1009 lh = (struct list_head *) pgtable;
1010 if (list_empty(lh))
1011 pmd_huge_pte(mm, pmdp) = NULL;
1012 else {
1013 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
1014 list_del(lh);
1015 }
1016 ptep = (pte_t *) pgtable;
1017 *ptep = __pte(0);
1018 ptep++;
1019 *ptep = __pte(0);
1020 return pgtable;
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +10001021}
1022
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +10001023pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm,
Christophe Leroy47d99942019-03-29 10:00:00 +00001024 unsigned long addr, pmd_t *pmdp)
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +10001025{
1026 pmd_t old_pmd;
1027 unsigned long old;
1028
1029 old = radix__pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0);
1030 old_pmd = __pmd(old);
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +10001031 return old_pmd;
1032}
1033
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +10001034#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
Aneesh Kumar K.V044003b2018-05-29 19:58:39 +05301035
Aneesh Kumar K.Ve4c11122018-05-29 19:58:40 +05301036void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep,
1037 pte_t entry, unsigned long address, int psize)
Aneesh Kumar K.V044003b2018-05-29 19:58:39 +05301038{
Aneesh Kumar K.Ve4c11122018-05-29 19:58:40 +05301039 struct mm_struct *mm = vma->vm_mm;
Aneesh Kumar K.V044003b2018-05-29 19:58:39 +05301040 unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED |
1041 _PAGE_RW | _PAGE_EXEC);
Aneesh Kumar K.Vf08d08f2018-08-22 22:46:05 +05301042
1043 unsigned long change = pte_val(entry) ^ pte_val(*ptep);
Aneesh Kumar K.Vbd5050e2018-05-29 19:58:41 +05301044 /*
1045 * To avoid NMMU hang while relaxing access, we need mark
1046 * the pte invalid in between.
1047 */
Aneesh Kumar K.Vf08d08f2018-08-22 22:46:05 +05301048 if ((change & _PAGE_RW) && atomic_read(&mm->context.copros) > 0) {
Aneesh Kumar K.V044003b2018-05-29 19:58:39 +05301049 unsigned long old_pte, new_pte;
1050
Aneesh Kumar K.Vf08d08f2018-08-22 22:46:05 +05301051 old_pte = __radix_pte_update(ptep, _PAGE_PRESENT, _PAGE_INVALID);
Aneesh Kumar K.V044003b2018-05-29 19:58:39 +05301052 /*
1053 * new value of pte
1054 */
1055 new_pte = old_pte | set;
Aneesh Kumar K.Vbd5050e2018-05-29 19:58:41 +05301056 radix__flush_tlb_page_psize(mm, address, psize);
Aneesh Kumar K.Vf08d08f2018-08-22 22:46:05 +05301057 __radix_pte_update(ptep, _PAGE_INVALID, new_pte);
Aneesh Kumar K.Vbd5050e2018-05-29 19:58:41 +05301058 } else {
Aneesh Kumar K.V044003b2018-05-29 19:58:39 +05301059 __radix_pte_update(ptep, 0, set);
Nicholas Piggine5f7cb52018-06-01 20:01:15 +10001060 /*
1061 * Book3S does not require a TLB flush when relaxing access
1062 * restrictions when the address space is not attached to a
1063 * NMMU, because the core MMU will reload the pte after taking
Bhaskar Chowdhury4763d372021-01-20 19:50:21 +05301064 * an access fault, which is defined by the architecture.
Nicholas Piggine5f7cb52018-06-01 20:01:15 +10001065 */
Aneesh Kumar K.Vbd5050e2018-05-29 19:58:41 +05301066 }
Nicholas Pigginf1cb8f92018-06-01 20:01:19 +10001067 /* See ptesync comment in radix__set_pte_at */
Aneesh Kumar K.V044003b2018-05-29 19:58:39 +05301068}
Aneesh Kumar K.V5b323362019-03-05 15:46:33 -08001069
1070void radix__ptep_modify_prot_commit(struct vm_area_struct *vma,
1071 unsigned long addr, pte_t *ptep,
1072 pte_t old_pte, pte_t pte)
1073{
1074 struct mm_struct *mm = vma->vm_mm;
1075
1076 /*
1077 * To avoid NMMU hang while relaxing access we need to flush the tlb before
1078 * we set the new value. We need to do this only for radix, because hash
1079 * translation does flush when updating the linux pte.
1080 */
1081 if (is_pte_rw_upgrade(pte_val(old_pte), pte_val(pte)) &&
1082 (atomic_read(&mm->context.copros) > 0))
1083 radix__flush_tlb_page(vma, addr);
1084
1085 set_pte_at(mm, addr, ptep, pte);
1086}
Nicholas Piggind38153f2019-06-10 13:08:17 +10001087
Nicholas Piggind909f912019-06-10 13:08:18 +10001088int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
1089{
1090 pte_t *ptep = (pte_t *)pud;
1091 pte_t new_pud = pfn_pte(__phys_to_pfn(addr), prot);
1092
1093 if (!radix_enabled())
1094 return 0;
1095
1096 set_pte_at(&init_mm, 0 /* radix unused */, ptep, new_pud);
1097
1098 return 1;
1099}
1100
1101int pud_clear_huge(pud_t *pud)
1102{
1103 if (pud_huge(*pud)) {
1104 pud_clear(pud);
1105 return 1;
1106 }
1107
1108 return 0;
1109}
1110
1111int pud_free_pmd_page(pud_t *pud, unsigned long addr)
1112{
1113 pmd_t *pmd;
1114 int i;
1115
Aneesh Kumar K.V9cf6fa22021-07-07 18:09:53 -07001116 pmd = pud_pgtable(*pud);
Nicholas Piggind909f912019-06-10 13:08:18 +10001117 pud_clear(pud);
1118
1119 flush_tlb_kernel_range(addr, addr + PUD_SIZE);
1120
1121 for (i = 0; i < PTRS_PER_PMD; i++) {
1122 if (!pmd_none(pmd[i])) {
1123 pte_t *pte;
1124 pte = (pte_t *)pmd_page_vaddr(pmd[i]);
1125
1126 pte_free_kernel(&init_mm, pte);
1127 }
1128 }
1129
1130 pmd_free(&init_mm, pmd);
1131
1132 return 1;
1133}
1134
1135int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
1136{
1137 pte_t *ptep = (pte_t *)pmd;
1138 pte_t new_pmd = pfn_pte(__phys_to_pfn(addr), prot);
1139
1140 if (!radix_enabled())
1141 return 0;
1142
1143 set_pte_at(&init_mm, 0 /* radix unused */, ptep, new_pmd);
1144
1145 return 1;
1146}
1147
1148int pmd_clear_huge(pmd_t *pmd)
1149{
1150 if (pmd_huge(*pmd)) {
1151 pmd_clear(pmd);
1152 return 1;
1153 }
1154
1155 return 0;
1156}
1157
1158int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
1159{
1160 pte_t *pte;
1161
1162 pte = (pte_t *)pmd_page_vaddr(*pmd);
1163 pmd_clear(pmd);
1164
1165 flush_tlb_kernel_range(addr, addr + PMD_SIZE);
1166
1167 pte_free_kernel(&init_mm, pte);
1168
1169 return 1;
1170}