blob: fe236c38ce00faa5ca9bfc09b424535071c1127c [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +10002/*
3 * Page table handling routines for radix page table.
4 *
5 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +10006 */
Michael Ellermanbd350f72017-08-30 17:41:29 +10007
8#define pr_fmt(fmt) "radix-mmu: " fmt
9
Nicholas Piggind38153f2019-06-10 13:08:17 +100010#include <linux/io.h>
Michael Ellermanbd350f72017-08-30 17:41:29 +100011#include <linux/kernel.h>
Ingo Molnar589ee622017-02-04 00:16:44 +010012#include <linux/sched/mm.h>
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100013#include <linux/memblock.h>
14#include <linux/of_fdt.h>
Balbir Singh7614ff32017-06-29 03:04:09 +100015#include <linux/mm.h>
Mike Kravetz997cdcb2019-11-30 17:56:37 -080016#include <linux/hugetlb.h>
Michael Ellerman6deb6b42017-08-30 17:41:17 +100017#include <linux/string_helpers.h>
Aneesh Kumar K.Vaf9d00e2020-07-09 18:49:25 +053018#include <linux/memory.h>
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100019
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100020#include <asm/pgalloc.h>
Nicholas Piggineeb715c2018-02-07 11:20:02 +100021#include <asm/mmu_context.h>
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100022#include <asm/dma.h>
23#include <asm/machdep.h>
24#include <asm/mmu.h>
25#include <asm/firmware.h>
Alistair Popple1d0761d2016-12-14 13:36:51 +110026#include <asm/powernv.h>
Michael Ellerman9abcc982017-06-06 15:48:57 +100027#include <asm/sections.h>
Nicholas Piggin993cfec2020-03-02 11:04:10 +100028#include <asm/smp.h>
Balbir Singh04284912017-04-11 15:23:25 +100029#include <asm/trace.h>
Michael Ellerman890274c2019-04-18 16:51:24 +100030#include <asm/uaccess.h>
Claudio Carvalho52231342019-08-22 00:48:36 -030031#include <asm/ultravisor.h>
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100032
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +100033#include <trace/events/thp.h>
34
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +100035unsigned int mmu_pid_bits;
36unsigned int mmu_base_pid;
Aneesh Kumar K.V950805f2020-10-07 17:18:35 +053037unsigned long radix_mem_block_size __ro_after_init;
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +100038
Nicholas Piggin2ad452f2018-02-14 01:08:24 +100039static __ref void *early_alloc_pgtable(unsigned long size, int nid,
40 unsigned long region_start, unsigned long region_end)
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100041{
Mike Rapoportf8067142019-03-07 16:30:48 -080042 phys_addr_t min_addr = MEMBLOCK_LOW_LIMIT;
43 phys_addr_t max_addr = MEMBLOCK_ALLOC_ANYWHERE;
Mike Rapoport8a7f97b2019-03-11 23:30:31 -070044 void *ptr;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100045
Mike Rapoportf8067142019-03-07 16:30:48 -080046 if (region_start)
47 min_addr = region_start;
48 if (region_end)
49 max_addr = region_end;
Nicholas Piggin2ad452f2018-02-14 01:08:24 +100050
Mike Rapoport8a7f97b2019-03-11 23:30:31 -070051 ptr = memblock_alloc_try_nid(size, size, min_addr, max_addr, nid);
52
53 if (!ptr)
54 panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%pa max_addr=%pa\n",
55 __func__, size, size, nid, &min_addr, &max_addr);
56
57 return ptr;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100058}
59
Aneesh Kumar K.V645d5ce2020-07-09 18:49:22 +053060/*
61 * When allocating pud or pmd pointers, we allocate a complete page
62 * of PAGE_SIZE rather than PUD_TABLE_SIZE or PMD_TABLE_SIZE. This
63 * is to ensure that the page obtained from the memblock allocator
64 * can be completely used as page table page and can be freed
65 * correctly when the page table entries are removed.
66 */
Nicholas Piggin0633daf2018-02-14 01:08:23 +100067static int early_map_kernel_page(unsigned long ea, unsigned long pa,
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100068 pgprot_t flags,
Nicholas Piggin2ad452f2018-02-14 01:08:24 +100069 unsigned int map_page_size,
70 int nid,
71 unsigned long region_start, unsigned long region_end)
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100072{
Nicholas Piggin2ad452f2018-02-14 01:08:24 +100073 unsigned long pfn = pa >> PAGE_SHIFT;
Nicholas Piggin0633daf2018-02-14 01:08:23 +100074 pgd_t *pgdp;
Mike Rapoport2fb47062020-06-04 16:46:44 -070075 p4d_t *p4dp;
Nicholas Piggin0633daf2018-02-14 01:08:23 +100076 pud_t *pudp;
77 pmd_t *pmdp;
78 pte_t *ptep;
79
80 pgdp = pgd_offset_k(ea);
Mike Rapoport2fb47062020-06-04 16:46:44 -070081 p4dp = p4d_offset(pgdp, ea);
82 if (p4d_none(*p4dp)) {
Aneesh Kumar K.V645d5ce2020-07-09 18:49:22 +053083 pudp = early_alloc_pgtable(PAGE_SIZE, nid,
84 region_start, region_end);
Mike Rapoport2fb47062020-06-04 16:46:44 -070085 p4d_populate(&init_mm, p4dp, pudp);
Nicholas Piggin0633daf2018-02-14 01:08:23 +100086 }
Mike Rapoport2fb47062020-06-04 16:46:44 -070087 pudp = pud_offset(p4dp, ea);
Nicholas Piggin0633daf2018-02-14 01:08:23 +100088 if (map_page_size == PUD_SIZE) {
89 ptep = (pte_t *)pudp;
90 goto set_the_pte;
91 }
92 if (pud_none(*pudp)) {
Aneesh Kumar K.V645d5ce2020-07-09 18:49:22 +053093 pmdp = early_alloc_pgtable(PAGE_SIZE, nid, region_start,
94 region_end);
Nicholas Piggin0633daf2018-02-14 01:08:23 +100095 pud_populate(&init_mm, pudp, pmdp);
96 }
97 pmdp = pmd_offset(pudp, ea);
98 if (map_page_size == PMD_SIZE) {
99 ptep = pmdp_ptep(pmdp);
100 goto set_the_pte;
101 }
102 if (!pmd_present(*pmdp)) {
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000103 ptep = early_alloc_pgtable(PAGE_SIZE, nid,
104 region_start, region_end);
Nicholas Piggin0633daf2018-02-14 01:08:23 +1000105 pmd_populate_kernel(&init_mm, pmdp, ptep);
106 }
107 ptep = pte_offset_kernel(pmdp, ea);
108
109set_the_pte:
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000110 set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags));
Jordan Nietheb8b2f372021-02-08 14:29:56 +1100111 asm volatile("ptesync": : :"memory");
Nicholas Piggin0633daf2018-02-14 01:08:23 +1000112 return 0;
113}
114
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000115/*
116 * nid, region_start, and region_end are hints to try to place the page
117 * table memory in the same node or region.
118 */
119static int __map_kernel_page(unsigned long ea, unsigned long pa,
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000120 pgprot_t flags,
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000121 unsigned int map_page_size,
122 int nid,
123 unsigned long region_start, unsigned long region_end)
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000124{
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000125 unsigned long pfn = pa >> PAGE_SHIFT;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000126 pgd_t *pgdp;
Mike Rapoport2fb47062020-06-04 16:46:44 -0700127 p4d_t *p4dp;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000128 pud_t *pudp;
129 pmd_t *pmdp;
130 pte_t *ptep;
131 /*
132 * Make sure task size is correct as per the max adddr
133 */
134 BUILD_BUG_ON(TASK_SIZE_USER64 > RADIX_PGTABLE_RANGE);
Nicholas Piggin0633daf2018-02-14 01:08:23 +1000135
Aneesh Kumar K.V0034d392019-04-17 18:29:14 +0530136#ifdef CONFIG_PPC_64K_PAGES
137 BUILD_BUG_ON(RADIX_KERN_MAP_SIZE != (1UL << MAX_EA_BITS_PER_CONTEXT));
138#endif
139
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000140 if (unlikely(!slab_is_available()))
141 return early_map_kernel_page(ea, pa, flags, map_page_size,
142 nid, region_start, region_end);
Nicholas Piggin0633daf2018-02-14 01:08:23 +1000143
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000144 /*
145 * Should make page table allocation functions be able to take a
146 * node, so we can place kernel page tables on the right nodes after
147 * boot.
148 */
Nicholas Piggin0633daf2018-02-14 01:08:23 +1000149 pgdp = pgd_offset_k(ea);
Mike Rapoport2fb47062020-06-04 16:46:44 -0700150 p4dp = p4d_offset(pgdp, ea);
151 pudp = pud_alloc(&init_mm, p4dp, ea);
Nicholas Piggin0633daf2018-02-14 01:08:23 +1000152 if (!pudp)
153 return -ENOMEM;
154 if (map_page_size == PUD_SIZE) {
155 ptep = (pte_t *)pudp;
156 goto set_the_pte;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000157 }
Nicholas Piggin0633daf2018-02-14 01:08:23 +1000158 pmdp = pmd_alloc(&init_mm, pudp, ea);
159 if (!pmdp)
160 return -ENOMEM;
161 if (map_page_size == PMD_SIZE) {
162 ptep = pmdp_ptep(pmdp);
163 goto set_the_pte;
164 }
165 ptep = pte_alloc_kernel(pmdp, ea);
166 if (!ptep)
167 return -ENOMEM;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000168
169set_the_pte:
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000170 set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags));
Jordan Nietheb8b2f372021-02-08 14:29:56 +1100171 asm volatile("ptesync": : :"memory");
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000172 return 0;
173}
174
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000175int radix__map_kernel_page(unsigned long ea, unsigned long pa,
176 pgprot_t flags,
177 unsigned int map_page_size)
178{
179 return __map_kernel_page(ea, pa, flags, map_page_size, -1, 0, 0);
180}
181
Balbir Singh7614ff32017-06-29 03:04:09 +1000182#ifdef CONFIG_STRICT_KERNEL_RWX
Michael Ellerman7098f8f2021-04-13 23:54:27 +1000183static void radix__change_memory_range(unsigned long start, unsigned long end,
184 unsigned long clear)
Balbir Singh7614ff32017-06-29 03:04:09 +1000185{
Balbir Singh7614ff32017-06-29 03:04:09 +1000186 unsigned long idx;
187 pgd_t *pgdp;
Mike Rapoport2fb47062020-06-04 16:46:44 -0700188 p4d_t *p4dp;
Balbir Singh7614ff32017-06-29 03:04:09 +1000189 pud_t *pudp;
190 pmd_t *pmdp;
191 pte_t *ptep;
192
193 start = ALIGN_DOWN(start, PAGE_SIZE);
194 end = PAGE_ALIGN(end); // aligns up
195
Michael Ellermanb134bd92017-07-14 16:51:21 +1000196 pr_debug("Changing flags on range %lx-%lx removing 0x%lx\n",
197 start, end, clear);
Balbir Singh7614ff32017-06-29 03:04:09 +1000198
199 for (idx = start; idx < end; idx += PAGE_SIZE) {
200 pgdp = pgd_offset_k(idx);
Mike Rapoport2fb47062020-06-04 16:46:44 -0700201 p4dp = p4d_offset(pgdp, idx);
202 pudp = pud_alloc(&init_mm, p4dp, idx);
Balbir Singh7614ff32017-06-29 03:04:09 +1000203 if (!pudp)
204 continue;
Aneesh Kumar K.Vd6eaced2019-05-14 11:33:00 +0530205 if (pud_is_leaf(*pudp)) {
Balbir Singh7614ff32017-06-29 03:04:09 +1000206 ptep = (pte_t *)pudp;
207 goto update_the_pte;
208 }
209 pmdp = pmd_alloc(&init_mm, pudp, idx);
210 if (!pmdp)
211 continue;
Aneesh Kumar K.Vd6eaced2019-05-14 11:33:00 +0530212 if (pmd_is_leaf(*pmdp)) {
Balbir Singh7614ff32017-06-29 03:04:09 +1000213 ptep = pmdp_ptep(pmdp);
214 goto update_the_pte;
215 }
216 ptep = pte_alloc_kernel(pmdp, idx);
217 if (!ptep)
218 continue;
219update_the_pte:
Michael Ellermanb134bd92017-07-14 16:51:21 +1000220 radix__pte_update(&init_mm, idx, ptep, clear, 0, 0);
Balbir Singh7614ff32017-06-29 03:04:09 +1000221 }
222
223 radix__flush_tlb_kernel_range(start, end);
224}
Michael Ellermanb134bd92017-07-14 16:51:21 +1000225
226void radix__mark_rodata_ro(void)
227{
228 unsigned long start, end;
229
230 start = (unsigned long)_stext;
231 end = (unsigned long)__init_begin;
232
233 radix__change_memory_range(start, end, _PAGE_WRITE);
234}
Michael Ellerman029d9252017-07-14 16:51:23 +1000235
236void radix__mark_initmem_nx(void)
237{
238 unsigned long start = (unsigned long)__init_begin;
239 unsigned long end = (unsigned long)__init_end;
240
241 radix__change_memory_range(start, end, _PAGE_EXEC);
242}
Balbir Singh7614ff32017-06-29 03:04:09 +1000243#endif /* CONFIG_STRICT_KERNEL_RWX */
244
Michael Ellermanafb6d062018-10-17 23:53:38 +1100245static inline void __meminit
246print_mapping(unsigned long start, unsigned long end, unsigned long size, bool exec)
Reza Arbabb5200ec2017-01-16 13:07:43 -0600247{
Michael Ellerman6deb6b42017-08-30 17:41:17 +1000248 char buf[10];
249
Reza Arbabb5200ec2017-01-16 13:07:43 -0600250 if (end <= start)
251 return;
252
Michael Ellerman6deb6b42017-08-30 17:41:17 +1000253 string_get_size(size, 1, STRING_UNITS_2, buf, sizeof(buf));
254
Michael Ellermanafb6d062018-10-17 23:53:38 +1100255 pr_info("Mapped 0x%016lx-0x%016lx with %s pages%s\n", start, end, buf,
256 exec ? " (exec)" : "");
Reza Arbabb5200ec2017-01-16 13:07:43 -0600257}
258
Michael Ellerman232aa402018-08-14 22:37:32 +1000259static unsigned long next_boundary(unsigned long addr, unsigned long end)
260{
261#ifdef CONFIG_STRICT_KERNEL_RWX
262 if (addr < __pa_symbol(__init_begin))
263 return __pa_symbol(__init_begin);
264#endif
265 return end;
266}
267
Reza Arbabb5200ec2017-01-16 13:07:43 -0600268static int __meminit create_physical_mapping(unsigned long start,
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000269 unsigned long end,
Aneesh Kumar K.Vaf9d00e2020-07-09 18:49:25 +0530270 unsigned long max_mapping_size,
Logan Gunthorpe4e00c5a2020-04-10 14:33:32 -0700271 int nid, pgprot_t _prot)
Reza Arbabb5200ec2017-01-16 13:07:43 -0600272{
Michael Ellerman9abcc982017-06-06 15:48:57 +1000273 unsigned long vaddr, addr, mapping_size = 0;
Michael Ellermanafb6d062018-10-17 23:53:38 +1100274 bool prev_exec, exec = false;
Michael Ellerman9abcc982017-06-06 15:48:57 +1000275 pgprot_t prot;
Aneesh Kumar K.Va2dc0092018-08-13 11:14:57 +0530276 int psize;
Reza Arbabb5200ec2017-01-16 13:07:43 -0600277
Christophe Leroyb7115312020-04-20 18:36:36 +0000278 start = ALIGN(start, PAGE_SIZE);
Aneesh Kumar K.V79b123cd2020-09-07 12:55:39 +0530279 end = ALIGN_DOWN(end, PAGE_SIZE);
Reza Arbabb5200ec2017-01-16 13:07:43 -0600280 for (addr = start; addr < end; addr += mapping_size) {
281 unsigned long gap, previous_size;
282 int rc;
283
Michael Ellerman232aa402018-08-14 22:37:32 +1000284 gap = next_boundary(addr, end) - addr;
Aneesh Kumar K.Vaf9d00e2020-07-09 18:49:25 +0530285 if (gap > max_mapping_size)
286 gap = max_mapping_size;
Reza Arbabb5200ec2017-01-16 13:07:43 -0600287 previous_size = mapping_size;
Michael Ellermanafb6d062018-10-17 23:53:38 +1100288 prev_exec = exec;
Reza Arbabb5200ec2017-01-16 13:07:43 -0600289
290 if (IS_ALIGNED(addr, PUD_SIZE) && gap >= PUD_SIZE &&
Michael Ellerman57306c62018-08-14 22:01:44 +1000291 mmu_psize_defs[MMU_PAGE_1G].shift) {
Reza Arbabb5200ec2017-01-16 13:07:43 -0600292 mapping_size = PUD_SIZE;
Aneesh Kumar K.Va2dc0092018-08-13 11:14:57 +0530293 psize = MMU_PAGE_1G;
294 } else if (IS_ALIGNED(addr, PMD_SIZE) && gap >= PMD_SIZE &&
295 mmu_psize_defs[MMU_PAGE_2M].shift) {
Reza Arbabb5200ec2017-01-16 13:07:43 -0600296 mapping_size = PMD_SIZE;
Aneesh Kumar K.Va2dc0092018-08-13 11:14:57 +0530297 psize = MMU_PAGE_2M;
298 } else {
Reza Arbabb5200ec2017-01-16 13:07:43 -0600299 mapping_size = PAGE_SIZE;
Aneesh Kumar K.Va2dc0092018-08-13 11:14:57 +0530300 psize = mmu_virtual_psize;
301 }
Reza Arbabb5200ec2017-01-16 13:07:43 -0600302
Michael Ellerman9abcc982017-06-06 15:48:57 +1000303 vaddr = (unsigned long)__va(addr);
304
Balbir Singh7f6d4982017-06-29 03:04:10 +1000305 if (overlaps_kernel_text(vaddr, vaddr + mapping_size) ||
Michael Ellermanafb6d062018-10-17 23:53:38 +1100306 overlaps_interrupt_vector_text(vaddr, vaddr + mapping_size)) {
Michael Ellerman9abcc982017-06-06 15:48:57 +1000307 prot = PAGE_KERNEL_X;
Michael Ellermanafb6d062018-10-17 23:53:38 +1100308 exec = true;
309 } else {
Logan Gunthorpe4e00c5a2020-04-10 14:33:32 -0700310 prot = _prot;
Michael Ellermanafb6d062018-10-17 23:53:38 +1100311 exec = false;
312 }
313
314 if (mapping_size != previous_size || exec != prev_exec) {
315 print_mapping(start, addr, previous_size, prev_exec);
316 start = addr;
317 }
Michael Ellerman9abcc982017-06-06 15:48:57 +1000318
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000319 rc = __map_kernel_page(vaddr, addr, prot, mapping_size, nid, start, end);
Reza Arbabb5200ec2017-01-16 13:07:43 -0600320 if (rc)
321 return rc;
Aneesh Kumar K.Va2dc0092018-08-13 11:14:57 +0530322
323 update_page_count(psize, 1);
Reza Arbabb5200ec2017-01-16 13:07:43 -0600324 }
325
Michael Ellermanafb6d062018-10-17 23:53:38 +1100326 print_mapping(start, addr, mapping_size, exec);
Reza Arbabb5200ec2017-01-16 13:07:43 -0600327 return 0;
328}
329
YueHaibingd667edc2019-05-04 18:24:27 +0800330static void __init radix_init_pgtable(void)
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000331{
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000332 unsigned long rts_field;
Mike Rapoportb10d6bc2020-10-13 16:58:08 -0700333 phys_addr_t start, end;
334 u64 i;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000335
336 /* We don't support slb for radix */
337 mmu_slb_size = 0;
Aneesh Kumar K.Vaf9d00e2020-07-09 18:49:25 +0530338
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000339 /*
Aneesh Kumar K.Vaf9d00e2020-07-09 18:49:25 +0530340 * Create the linear mapping
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000341 */
Mike Rapoportb10d6bc2020-10-13 16:58:08 -0700342 for_each_mem_range(i, &start, &end) {
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000343 /*
344 * The memblock allocator is up at this point, so the
345 * page tables will be allocated within the range. No
346 * need or a node (which we don't have yet).
347 */
Aneesh Kumar K.Ve0909392019-04-17 18:29:15 +0530348
Mike Rapoportb10d6bc2020-10-13 16:58:08 -0700349 if (end >= RADIX_VMALLOC_START) {
Colin Ian Kingf341d892019-04-23 16:10:17 +0100350 pr_warn("Outside the supported range\n");
Aneesh Kumar K.Ve0909392019-04-17 18:29:15 +0530351 continue;
352 }
353
Mike Rapoportb10d6bc2020-10-13 16:58:08 -0700354 WARN_ON(create_physical_mapping(start, end,
Aneesh Kumar K.Vaf9d00e2020-07-09 18:49:25 +0530355 radix_mem_block_size,
Logan Gunthorpe4e00c5a2020-04-10 14:33:32 -0700356 -1, PAGE_KERNEL));
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000357 }
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +1000358
359 /* Find out how many PID bits are supported */
Nicholas Piggin2e1ae9cd2021-05-28 19:07:41 +1000360 if (!cpu_has_feature(CPU_FTR_HVMODE) &&
361 cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) {
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +1000362 /*
Nicholas Piggin2e1ae9cd2021-05-28 19:07:41 +1000363 * Older versions of KVM on these machines perfer if the
364 * guest only uses the low 19 PID bits.
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +1000365 */
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +1000366 if (!mmu_pid_bits)
367 mmu_pid_bits = 19;
Nicholas Piggin2e1ae9cd2021-05-28 19:07:41 +1000368 } else {
369 if (!mmu_pid_bits)
370 mmu_pid_bits = 20;
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +1000371 }
Nicholas Piggin2e1ae9cd2021-05-28 19:07:41 +1000372 mmu_base_pid = 1;
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +1000373
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000374 /*
375 * Allocate Partition table and process table for the
376 * host.
377 */
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +1000378 BUG_ON(PRTB_SIZE_SHIFT > 36);
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000379 process_tb = early_alloc_pgtable(1UL << PRTB_SIZE_SHIFT, -1, 0, 0);
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000380 /*
381 * Fill in the process table.
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000382 */
Aneesh Kumar K.Vb23d9c52016-06-17 11:40:36 +0530383 rts_field = radix__get_tree_size();
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000384 process_tb->prtb0 = cpu_to_be64(rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE);
Nicholas Piggined6546b2019-09-03 01:29:26 +1000385
Nicholas Piggineeb715c2018-02-07 11:20:02 +1000386 /*
387 * The init_mm context is given the first available (non-zero) PID,
388 * which is the "guard PID" and contains no page table. PIDR should
389 * never be set to zero because that duplicates the kernel address
390 * space at the 0x0... offset (quadrant 0)!
391 *
392 * An arbitrary PID that may later be allocated by the PID allocator
393 * for userspace processes must not be used either, because that
394 * would cause stale user mappings for that PID on CPUs outside of
395 * the TLB invalidation scheme (because it won't be in mm_cpumask).
396 *
397 * So permanently carve out one PID for the purpose of a guard PID.
398 */
399 init_mm.context.id = mmu_base_pid;
400 mmu_base_pid++;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000401}
402
403static void __init radix_init_partition_table(void)
404{
Nicholas Piggined6546b2019-09-03 01:29:26 +1000405 unsigned long rts_field, dw0, dw1;
Aneesh Kumar K.Vb23d9c52016-06-17 11:40:36 +0530406
Paul Mackerras9d661952016-11-21 16:00:58 +1100407 mmu_partition_table_init();
Aneesh Kumar K.Vb23d9c52016-06-17 11:40:36 +0530408 rts_field = radix__get_tree_size();
Paul Mackerras9d661952016-11-21 16:00:58 +1100409 dw0 = rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE | PATB_HR;
Nicholas Piggined6546b2019-09-03 01:29:26 +1000410 dw1 = __pa(process_tb) | (PRTB_SIZE_SHIFT - 12) | PATB_GR;
Nicholas Piggin7d805ac2019-09-03 01:29:30 +1000411 mmu_partition_table_set_entry(0, dw0, dw1, false);
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000412
Aneesh Kumar K.V56547412016-07-13 15:05:25 +0530413 pr_info("Initializing Radix MMU\n");
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000414}
415
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000416static int __init get_idx_from_shift(unsigned int shift)
417{
418 int idx = -1;
419
420 switch (shift) {
421 case 0xc:
422 idx = MMU_PAGE_4K;
423 break;
424 case 0x10:
425 idx = MMU_PAGE_64K;
426 break;
427 case 0x15:
428 idx = MMU_PAGE_2M;
429 break;
430 case 0x1e:
431 idx = MMU_PAGE_1G;
432 break;
433 }
434 return idx;
435}
436
437static int __init radix_dt_scan_page_sizes(unsigned long node,
438 const char *uname, int depth,
439 void *data)
440{
441 int size = 0;
442 int shift, idx;
443 unsigned int ap;
444 const __be32 *prop;
445 const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
446
447 /* We are scanning "cpu" nodes only */
448 if (type == NULL || strcmp(type, "cpu") != 0)
449 return 0;
450
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +1000451 /* Find MMU PID size */
452 prop = of_get_flat_dt_prop(node, "ibm,mmu-pid-bits", &size);
453 if (prop && size == 4)
454 mmu_pid_bits = be32_to_cpup(prop);
455
456 /* Grab page size encodings */
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000457 prop = of_get_flat_dt_prop(node, "ibm,processor-radix-AP-encodings", &size);
458 if (!prop)
459 return 0;
460
461 pr_info("Page sizes from device-tree:\n");
462 for (; size >= 4; size -= 4, ++prop) {
463
464 struct mmu_psize_def *def;
465
466 /* top 3 bit is AP encoding */
467 shift = be32_to_cpu(prop[0]) & ~(0xe << 28);
468 ap = be32_to_cpu(prop[0]) >> 29;
Balbir Singhac8d3812016-11-05 15:24:22 +1100469 pr_info("Page size shift = %d AP=0x%x\n", shift, ap);
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000470
471 idx = get_idx_from_shift(shift);
472 if (idx < 0)
473 continue;
474
475 def = &mmu_psize_defs[idx];
476 def->shift = shift;
477 def->ap = ap;
478 }
479
480 /* needed ? */
481 cur_cpu_spec->mmu_features &= ~MMU_FTR_NO_SLBIE_B;
482 return 1;
483}
484
Aneesh Kumar K.Vaf9d00e2020-07-09 18:49:25 +0530485#ifdef CONFIG_MEMORY_HOTPLUG
486static int __init probe_memory_block_size(unsigned long node, const char *uname, int
487 depth, void *data)
488{
489 unsigned long *mem_block_size = (unsigned long *)data;
Aneesh Kumar K.Vfbf2f132020-10-07 17:18:36 +0530490 const __be32 *prop;
Aneesh Kumar K.Vaf9d00e2020-07-09 18:49:25 +0530491 int len;
492
493 if (depth != 1)
494 return 0;
495
496 if (strcmp(uname, "ibm,dynamic-reconfiguration-memory"))
497 return 0;
498
499 prop = of_get_flat_dt_prop(node, "ibm,lmb-size", &len);
Aneesh Kumar K.Vfbf2f132020-10-07 17:18:36 +0530500
501 if (!prop || len < dt_root_size_cells * sizeof(__be32))
Aneesh Kumar K.Vaf9d00e2020-07-09 18:49:25 +0530502 /*
503 * Nothing in the device tree
504 */
505 *mem_block_size = MIN_MEMORY_BLOCK_SIZE;
506 else
Aneesh Kumar K.Vfbf2f132020-10-07 17:18:36 +0530507 *mem_block_size = of_read_number(prop, dt_root_size_cells);
Aneesh Kumar K.Vaf9d00e2020-07-09 18:49:25 +0530508 return 1;
509}
510
511static unsigned long radix_memory_block_size(void)
512{
513 unsigned long mem_block_size = MIN_MEMORY_BLOCK_SIZE;
514
515 /*
516 * OPAL firmware feature is set by now. Hence we are ok
517 * to test OPAL feature.
518 */
519 if (firmware_has_feature(FW_FEATURE_OPAL))
520 mem_block_size = 1UL * 1024 * 1024 * 1024;
521 else
522 of_scan_flat_dt(probe_memory_block_size, &mem_block_size);
523
524 return mem_block_size;
525}
526
527#else /* CONFIG_MEMORY_HOTPLUG */
528
529static unsigned long radix_memory_block_size(void)
530{
531 return 1UL * 1024 * 1024 * 1024;
532}
533
534#endif /* CONFIG_MEMORY_HOTPLUG */
535
536
Michael Ellerman2537b092016-07-26 21:55:27 +1000537void __init radix__early_init_devtree(void)
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000538{
539 int rc;
540
541 /*
542 * Try to find the available page sizes in the device-tree
543 */
544 rc = of_scan_flat_dt(radix_dt_scan_page_sizes, NULL);
Aneesh Kumar K.Vaf9d00e2020-07-09 18:49:25 +0530545 if (!rc) {
546 /*
547 * No page size details found in device tree.
548 * Let's assume we have page 4k and 64k support
549 */
550 mmu_psize_defs[MMU_PAGE_4K].shift = 12;
551 mmu_psize_defs[MMU_PAGE_4K].ap = 0x0;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000552
Aneesh Kumar K.Vaf9d00e2020-07-09 18:49:25 +0530553 mmu_psize_defs[MMU_PAGE_64K].shift = 16;
554 mmu_psize_defs[MMU_PAGE_64K].ap = 0x5;
555 }
556
557 /*
558 * Max mapping size used when mapping pages. We don't use
559 * ppc_md.memory_block_size() here because this get called
560 * early and we don't have machine probe called yet. Also
561 * the pseries implementation only check for ibm,lmb-size.
562 * All hypervisor supporting radix do expose that device
563 * tree node.
564 */
565 radix_mem_block_size = radix_memory_block_size();
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000566 return;
567}
568
Balbir Singhee97b6b2016-11-15 17:56:14 +1100569static void radix_init_amor(void)
570{
571 /*
572 * In HV mode, we init AMOR (Authority Mask Override Register) so that
573 * the hypervisor and guest can setup IAMR (Instruction Authority Mask
574 * Register), enable key 0 and set it to 1.
575 *
576 * AMOR = 0b1100 .... 0000 (Mask for key 0 is 11)
577 */
578 mtspr(SPRN_AMOR, (3ul << 62));
579}
580
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000581void __init radix__early_init_mmu(void)
582{
583 unsigned long lpcr;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000584
585#ifdef CONFIG_PPC_64K_PAGES
586 /* PAGE_SIZE mappings */
587 mmu_virtual_psize = MMU_PAGE_64K;
588#else
589 mmu_virtual_psize = MMU_PAGE_4K;
590#endif
591
592#ifdef CONFIG_SPARSEMEM_VMEMMAP
593 /* vmemmap mapping */
Aneesh Kumar K.V89a34962019-07-01 20:04:42 +0530594 if (mmu_psize_defs[MMU_PAGE_2M].shift) {
595 /*
596 * map vmemmap using 2M if available
597 */
598 mmu_vmemmap_psize = MMU_PAGE_2M;
599 } else
600 mmu_vmemmap_psize = mmu_virtual_psize;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000601#endif
602 /*
603 * initialize page table size
604 */
605 __pte_index_size = RADIX_PTE_INDEX_SIZE;
606 __pmd_index_size = RADIX_PMD_INDEX_SIZE;
607 __pud_index_size = RADIX_PUD_INDEX_SIZE;
608 __pgd_index_size = RADIX_PGD_INDEX_SIZE;
Aneesh Kumar K.Vfae22112018-02-11 20:30:06 +0530609 __pud_cache_index = RADIX_PUD_INDEX_SIZE;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000610 __pte_table_size = RADIX_PTE_TABLE_SIZE;
611 __pmd_table_size = RADIX_PMD_TABLE_SIZE;
612 __pud_table_size = RADIX_PUD_TABLE_SIZE;
613 __pgd_table_size = RADIX_PGD_TABLE_SIZE;
614
Aneesh Kumar K.Va2f41eb2016-04-29 23:26:19 +1000615 __pmd_val_bits = RADIX_PMD_VAL_BITS;
616 __pud_val_bits = RADIX_PUD_VAL_BITS;
617 __pgd_val_bits = RADIX_PGD_VAL_BITS;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000618
Aneesh Kumar K.Vd6a99962016-04-29 23:26:21 +1000619 __kernel_virt_start = RADIX_KERN_VIRT_START;
Aneesh Kumar K.Vd6a99962016-04-29 23:26:21 +1000620 __vmalloc_start = RADIX_VMALLOC_START;
621 __vmalloc_end = RADIX_VMALLOC_END;
Michael Ellerman63ee9b22017-08-01 20:29:22 +1000622 __kernel_io_start = RADIX_KERN_IO_START;
Aneesh Kumar K.Va35a3c62019-04-17 18:29:13 +0530623 __kernel_io_end = RADIX_KERN_IO_END;
Aneesh Kumar K.V0034d392019-04-17 18:29:14 +0530624 vmemmap = (struct page *)RADIX_VMEMMAP_START;
Aneesh Kumar K.Vd6a99962016-04-29 23:26:21 +1000625 ioremap_bot = IOREMAP_BASE;
Darren Stevensbfa37082016-06-29 21:06:28 +0100626
627#ifdef CONFIG_PCI
628 pci_io_base = ISA_IO_BASE;
629#endif
Aneesh Kumar K.Vfb4e5db2018-03-22 14:13:50 +0530630 __pte_frag_nr = RADIX_PTE_FRAG_NR;
631 __pte_frag_size_shift = RADIX_PTE_FRAG_SIZE_SHIFT;
Aneesh Kumar K.V8a6c6972018-04-16 16:57:22 +0530632 __pmd_frag_nr = RADIX_PMD_FRAG_NR;
633 __pmd_frag_size_shift = RADIX_PMD_FRAG_SIZE_SHIFT;
Aneesh Kumar K.Vd6a99962016-04-29 23:26:21 +1000634
Nicholas Piggined6546b2019-09-03 01:29:26 +1000635 radix_init_pgtable();
636
Aneesh Kumar K.Vd6c88602016-05-31 11:56:29 +0530637 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
638 lpcr = mfspr(SPRN_LPCR);
Aneesh Kumar K.Vbf16cdf2016-07-13 15:05:21 +0530639 mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000640 radix_init_partition_table();
Balbir Singhee97b6b2016-11-15 17:56:14 +1100641 radix_init_amor();
Paul Mackerrascc3d2942017-01-30 21:21:36 +1100642 } else {
643 radix_init_pseries();
Aneesh Kumar K.Vd6c88602016-05-31 11:56:29 +0530644 }
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000645
Paul Mackerras9d661952016-11-21 16:00:58 +1100646 memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
647
Nicholas Piggineeb715c2018-02-07 11:20:02 +1000648 /* Switch to the guard PID before turning on MMU */
649 radix__switch_mmu_context(NULL, &init_mm);
Nicholas Piggin7e71c422019-09-03 01:29:29 +1000650 tlbiel_all();
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000651}
652
653void radix__early_init_mmu_secondary(void)
654{
655 unsigned long lpcr;
656 /*
Aneesh Kumar K.Vd6c88602016-05-31 11:56:29 +0530657 * update partition table control register and UPRT
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000658 */
Aneesh Kumar K.Vd6c88602016-05-31 11:56:29 +0530659 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
660 lpcr = mfspr(SPRN_LPCR);
Aneesh Kumar K.Vbf16cdf2016-07-13 15:05:21 +0530661 mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
Aneesh Kumar K.Vd6c88602016-05-31 11:56:29 +0530662
Claudio Carvalho52231342019-08-22 00:48:36 -0300663 set_ptcr_when_no_uv(__pa(partition_tb) |
664 (PATB_SIZE_SHIFT - 12));
665
Balbir Singhee97b6b2016-11-15 17:56:14 +1100666 radix_init_amor();
Aneesh Kumar K.Vd6c88602016-05-31 11:56:29 +0530667 }
Nicholas Piggind4748272017-12-24 01:15:50 +1000668
Nicholas Piggineeb715c2018-02-07 11:20:02 +1000669 radix__switch_mmu_context(NULL, &init_mm);
Nicholas Piggin7e71c422019-09-03 01:29:29 +1000670 tlbiel_all();
Aneesh Kumar K.V39df17b2020-11-27 10:14:06 +0530671
672 /* Make sure userspace can't change the AMR */
673 mtspr(SPRN_UAMOR, 0);
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000674}
675
Benjamin Herrenschmidtfe036a02016-08-19 14:22:37 +0530676void radix__mmu_cleanup_all(void)
677{
678 unsigned long lpcr;
679
680 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
681 lpcr = mfspr(SPRN_LPCR);
682 mtspr(SPRN_LPCR, lpcr & ~LPCR_UPRT);
Claudio Carvalho52231342019-08-22 00:48:36 -0300683 set_ptcr_when_no_uv(0);
Alistair Popple1d0761d2016-12-14 13:36:51 +1100684 powernv_set_nmmu_ptcr(0);
Benjamin Herrenschmidtfe036a02016-08-19 14:22:37 +0530685 radix__flush_tlb_all();
686 }
687}
688
Reza Arbab6cc27342017-01-16 13:07:44 -0600689#ifdef CONFIG_MEMORY_HOTPLUG
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600690static void free_pte_table(pte_t *pte_start, pmd_t *pmd)
691{
692 pte_t *pte;
693 int i;
694
695 for (i = 0; i < PTRS_PER_PTE; i++) {
696 pte = pte_start + i;
697 if (!pte_none(*pte))
698 return;
699 }
700
701 pte_free_kernel(&init_mm, pte_start);
702 pmd_clear(pmd);
703}
704
705static void free_pmd_table(pmd_t *pmd_start, pud_t *pud)
706{
707 pmd_t *pmd;
708 int i;
709
710 for (i = 0; i < PTRS_PER_PMD; i++) {
711 pmd = pmd_start + i;
712 if (!pmd_none(*pmd))
713 return;
714 }
715
716 pmd_free(&init_mm, pmd_start);
717 pud_clear(pud);
718}
719
Bharata B Rao9ce88532020-07-09 18:49:23 +0530720static void free_pud_table(pud_t *pud_start, p4d_t *p4d)
721{
722 pud_t *pud;
723 int i;
724
725 for (i = 0; i < PTRS_PER_PUD; i++) {
726 pud = pud_start + i;
727 if (!pud_none(*pud))
728 return;
729 }
730
731 pud_free(&init_mm, pud_start);
732 p4d_clear(p4d);
733}
734
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600735static void remove_pte_table(pte_t *pte_start, unsigned long addr,
736 unsigned long end)
737{
738 unsigned long next;
739 pte_t *pte;
740
741 pte = pte_start + pte_index(addr);
742 for (; addr < end; addr = next, pte++) {
743 next = (addr + PAGE_SIZE) & PAGE_MASK;
744 if (next > end)
745 next = end;
746
747 if (!pte_present(*pte))
748 continue;
749
Reza Arbab0d0a4bc2017-01-16 13:07:46 -0600750 if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(next)) {
751 /*
752 * The vmemmap_free() and remove_section_mapping()
753 * codepaths call us with aligned addresses.
754 */
755 WARN_ONCE(1, "%s: unaligned range\n", __func__);
756 continue;
757 }
758
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600759 pte_clear(&init_mm, addr, pte);
760 }
761}
762
Vladis Dronovaff77952020-07-29 15:37:41 +0200763static void __meminit remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600764 unsigned long end)
765{
766 unsigned long next;
767 pte_t *pte_base;
768 pmd_t *pmd;
769
770 pmd = pmd_start + pmd_index(addr);
771 for (; addr < end; addr = next, pmd++) {
772 next = pmd_addr_end(addr, end);
773
774 if (!pmd_present(*pmd))
775 continue;
776
Aneesh Kumar K.Vd6eaced2019-05-14 11:33:00 +0530777 if (pmd_is_leaf(*pmd)) {
Bharata B Raod6d6ebf2020-07-09 18:49:24 +0530778 if (!IS_ALIGNED(addr, PMD_SIZE) ||
779 !IS_ALIGNED(next, PMD_SIZE)) {
780 WARN_ONCE(1, "%s: unaligned range\n", __func__);
781 continue;
782 }
783 pte_clear(&init_mm, addr, (pte_t *)pmd);
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600784 continue;
785 }
786
787 pte_base = (pte_t *)pmd_page_vaddr(*pmd);
788 remove_pte_table(pte_base, addr, next);
789 free_pte_table(pte_base, pmd);
790 }
791}
792
Vladis Dronovaff77952020-07-29 15:37:41 +0200793static void __meminit remove_pud_table(pud_t *pud_start, unsigned long addr,
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600794 unsigned long end)
795{
796 unsigned long next;
797 pmd_t *pmd_base;
798 pud_t *pud;
799
800 pud = pud_start + pud_index(addr);
801 for (; addr < end; addr = next, pud++) {
802 next = pud_addr_end(addr, end);
803
804 if (!pud_present(*pud))
805 continue;
806
Aneesh Kumar K.Vd6eaced2019-05-14 11:33:00 +0530807 if (pud_is_leaf(*pud)) {
Bharata B Raod6d6ebf2020-07-09 18:49:24 +0530808 if (!IS_ALIGNED(addr, PUD_SIZE) ||
809 !IS_ALIGNED(next, PUD_SIZE)) {
810 WARN_ONCE(1, "%s: unaligned range\n", __func__);
811 continue;
812 }
813 pte_clear(&init_mm, addr, (pte_t *)pud);
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600814 continue;
815 }
816
817 pmd_base = (pmd_t *)pud_page_vaddr(*pud);
818 remove_pmd_table(pmd_base, addr, next);
819 free_pmd_table(pmd_base, pud);
820 }
821}
822
Mauricio Faria de Oliveirabde709a2018-03-09 17:45:58 -0300823static void __meminit remove_pagetable(unsigned long start, unsigned long end)
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600824{
825 unsigned long addr, next;
826 pud_t *pud_base;
827 pgd_t *pgd;
Mike Rapoport2fb47062020-06-04 16:46:44 -0700828 p4d_t *p4d;
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600829
830 spin_lock(&init_mm.page_table_lock);
831
832 for (addr = start; addr < end; addr = next) {
833 next = pgd_addr_end(addr, end);
834
835 pgd = pgd_offset_k(addr);
Mike Rapoport2fb47062020-06-04 16:46:44 -0700836 p4d = p4d_offset(pgd, addr);
837 if (!p4d_present(*p4d))
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600838 continue;
839
Mike Rapoport2fb47062020-06-04 16:46:44 -0700840 if (p4d_is_leaf(*p4d)) {
Bharata B Raod6d6ebf2020-07-09 18:49:24 +0530841 if (!IS_ALIGNED(addr, P4D_SIZE) ||
842 !IS_ALIGNED(next, P4D_SIZE)) {
843 WARN_ONCE(1, "%s: unaligned range\n", __func__);
844 continue;
845 }
846
847 pte_clear(&init_mm, addr, (pte_t *)pgd);
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600848 continue;
849 }
850
Mike Rapoport2fb47062020-06-04 16:46:44 -0700851 pud_base = (pud_t *)p4d_page_vaddr(*p4d);
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600852 remove_pud_table(pud_base, addr, next);
Bharata B Rao9ce88532020-07-09 18:49:23 +0530853 free_pud_table(pud_base, p4d);
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600854 }
855
856 spin_unlock(&init_mm.page_table_lock);
857 radix__flush_tlb_kernel_range(start, end);
858}
859
Logan Gunthorpe4e00c5a2020-04-10 14:33:32 -0700860int __meminit radix__create_section_mapping(unsigned long start,
861 unsigned long end, int nid,
862 pgprot_t prot)
Reza Arbab6cc27342017-01-16 13:07:44 -0600863{
Aneesh Kumar K.Ve0909392019-04-17 18:29:15 +0530864 if (end >= RADIX_VMALLOC_START) {
Colin Ian Kingf341d892019-04-23 16:10:17 +0100865 pr_warn("Outside the supported range\n");
Aneesh Kumar K.Ve0909392019-04-17 18:29:15 +0530866 return -1;
867 }
868
Aneesh Kumar K.Vaf9d00e2020-07-09 18:49:25 +0530869 return create_physical_mapping(__pa(start), __pa(end),
870 radix_mem_block_size, nid, prot);
Reza Arbab6cc27342017-01-16 13:07:44 -0600871}
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600872
Mauricio Faria de Oliveirabde709a2018-03-09 17:45:58 -0300873int __meminit radix__remove_section_mapping(unsigned long start, unsigned long end)
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600874{
875 remove_pagetable(start, end);
876 return 0;
877}
Reza Arbab6cc27342017-01-16 13:07:44 -0600878#endif /* CONFIG_MEMORY_HOTPLUG */
879
Aneesh Kumar K.Vd9225ad2016-04-29 23:26:00 +1000880#ifdef CONFIG_SPARSEMEM_VMEMMAP
Nicholas Piggin29ab6c42018-02-14 01:08:22 +1000881static int __map_kernel_page_nid(unsigned long ea, unsigned long pa,
882 pgprot_t flags, unsigned int map_page_size,
883 int nid)
884{
885 return __map_kernel_page(ea, pa, flags, map_page_size, nid, 0, 0);
886}
887
Aneesh Kumar K.Vd9225ad2016-04-29 23:26:00 +1000888int __meminit radix__vmemmap_create_mapping(unsigned long start,
889 unsigned long page_size,
890 unsigned long phys)
891{
892 /* Create a PTE encoding */
893 unsigned long flags = _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_KERNEL_RW;
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000894 int nid = early_pfn_to_nid(phys >> PAGE_SHIFT);
895 int ret;
Aneesh Kumar K.Vd9225ad2016-04-29 23:26:00 +1000896
Aneesh Kumar K.Ve0909392019-04-17 18:29:15 +0530897 if ((start + page_size) >= RADIX_VMEMMAP_END) {
Colin Ian Kingf341d892019-04-23 16:10:17 +0100898 pr_warn("Outside the supported range\n");
Aneesh Kumar K.Ve0909392019-04-17 18:29:15 +0530899 return -1;
900 }
901
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000902 ret = __map_kernel_page_nid(start, phys, __pgprot(flags), page_size, nid);
903 BUG_ON(ret);
904
Aneesh Kumar K.Vd9225ad2016-04-29 23:26:00 +1000905 return 0;
906}
907
908#ifdef CONFIG_MEMORY_HOTPLUG
Mauricio Faria de Oliveirabde709a2018-03-09 17:45:58 -0300909void __meminit radix__vmemmap_remove_mapping(unsigned long start, unsigned long page_size)
Aneesh Kumar K.Vd9225ad2016-04-29 23:26:00 +1000910{
Reza Arbab0d0a4bc2017-01-16 13:07:46 -0600911 remove_pagetable(start, start + page_size);
Aneesh Kumar K.Vd9225ad2016-04-29 23:26:00 +1000912}
913#endif
914#endif
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +1000915
916#ifdef CONFIG_TRANSPARENT_HUGEPAGE
917
918unsigned long radix__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
919 pmd_t *pmdp, unsigned long clr,
920 unsigned long set)
921{
922 unsigned long old;
923
924#ifdef CONFIG_DEBUG_VM
Oliver O'Halloranebd31192017-06-28 11:32:34 +1000925 WARN_ON(!radix__pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
Aneesh Kumar K.Vaf60a4c2018-04-16 16:57:16 +0530926 assert_spin_locked(pmd_lockptr(mm, pmdp));
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +1000927#endif
928
929 old = radix__pte_update(mm, addr, (pte_t *)pmdp, clr, set, 1);
930 trace_hugepage_update(addr, old, clr, set);
931
932 return old;
933}
934
935pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
936 pmd_t *pmdp)
937
938{
939 pmd_t pmd;
940
941 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
942 VM_BUG_ON(radix__pmd_trans_huge(*pmdp));
Oliver O'Halloranebd31192017-06-28 11:32:34 +1000943 VM_BUG_ON(pmd_devmap(*pmdp));
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +1000944 /*
945 * khugepaged calls this for normal pmd
946 */
947 pmd = *pmdp;
948 pmd_clear(pmdp);
Benjamin Herrenschmidt424de9c2017-07-19 14:49:06 +1000949
Aneesh Kumar K.Ve21dfbf2020-05-05 12:47:27 +0530950 /*
951 * pmdp collapse_flush need to ensure that there are no parallel gup
952 * walk after this call. This is needed so that we can have stable
953 * page ref count when collapsing a page. We don't allow a collapse page
954 * if we have gup taken on the page. We can ensure that by sending IPI
955 * because gup walk happens with IRQ disabled.
956 */
Aneesh Kumar K.Vfa4531f2017-07-27 11:54:54 +0530957 serialize_against_pte_lookup(vma->vm_mm);
Benjamin Herrenschmidt424de9c2017-07-19 14:49:06 +1000958
959 radix__flush_tlb_collapsed_pmd(vma->vm_mm, address);
960
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +1000961 return pmd;
962}
963
964/*
965 * For us pgtable_t is pte_t *. Inorder to save the deposisted
966 * page table, we consider the allocated page table as a list
967 * head. On withdraw we need to make sure we zero out the used
968 * list_head memory area.
969 */
970void radix__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
971 pgtable_t pgtable)
972{
Christophe Leroy47d99942019-03-29 10:00:00 +0000973 struct list_head *lh = (struct list_head *) pgtable;
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +1000974
Christophe Leroy47d99942019-03-29 10:00:00 +0000975 assert_spin_locked(pmd_lockptr(mm, pmdp));
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +1000976
Christophe Leroy47d99942019-03-29 10:00:00 +0000977 /* FIFO */
978 if (!pmd_huge_pte(mm, pmdp))
979 INIT_LIST_HEAD(lh);
980 else
981 list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
982 pmd_huge_pte(mm, pmdp) = pgtable;
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +1000983}
984
985pgtable_t radix__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
986{
Christophe Leroy47d99942019-03-29 10:00:00 +0000987 pte_t *ptep;
988 pgtable_t pgtable;
989 struct list_head *lh;
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +1000990
Christophe Leroy47d99942019-03-29 10:00:00 +0000991 assert_spin_locked(pmd_lockptr(mm, pmdp));
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +1000992
Christophe Leroy47d99942019-03-29 10:00:00 +0000993 /* FIFO */
994 pgtable = pmd_huge_pte(mm, pmdp);
995 lh = (struct list_head *) pgtable;
996 if (list_empty(lh))
997 pmd_huge_pte(mm, pmdp) = NULL;
998 else {
999 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
1000 list_del(lh);
1001 }
1002 ptep = (pte_t *) pgtable;
1003 *ptep = __pte(0);
1004 ptep++;
1005 *ptep = __pte(0);
1006 return pgtable;
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +10001007}
1008
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +10001009pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm,
Christophe Leroy47d99942019-03-29 10:00:00 +00001010 unsigned long addr, pmd_t *pmdp)
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +10001011{
1012 pmd_t old_pmd;
1013 unsigned long old;
1014
1015 old = radix__pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0);
1016 old_pmd = __pmd(old);
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +10001017 return old_pmd;
1018}
1019
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +10001020#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
Aneesh Kumar K.V044003b2018-05-29 19:58:39 +05301021
Aneesh Kumar K.Ve4c11122018-05-29 19:58:40 +05301022void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep,
1023 pte_t entry, unsigned long address, int psize)
Aneesh Kumar K.V044003b2018-05-29 19:58:39 +05301024{
Aneesh Kumar K.Ve4c11122018-05-29 19:58:40 +05301025 struct mm_struct *mm = vma->vm_mm;
Aneesh Kumar K.V044003b2018-05-29 19:58:39 +05301026 unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED |
1027 _PAGE_RW | _PAGE_EXEC);
Aneesh Kumar K.Vf08d08f2018-08-22 22:46:05 +05301028
1029 unsigned long change = pte_val(entry) ^ pte_val(*ptep);
Aneesh Kumar K.Vbd5050e2018-05-29 19:58:41 +05301030 /*
1031 * To avoid NMMU hang while relaxing access, we need mark
1032 * the pte invalid in between.
1033 */
Aneesh Kumar K.Vf08d08f2018-08-22 22:46:05 +05301034 if ((change & _PAGE_RW) && atomic_read(&mm->context.copros) > 0) {
Aneesh Kumar K.V044003b2018-05-29 19:58:39 +05301035 unsigned long old_pte, new_pte;
1036
Aneesh Kumar K.Vf08d08f2018-08-22 22:46:05 +05301037 old_pte = __radix_pte_update(ptep, _PAGE_PRESENT, _PAGE_INVALID);
Aneesh Kumar K.V044003b2018-05-29 19:58:39 +05301038 /*
1039 * new value of pte
1040 */
1041 new_pte = old_pte | set;
Aneesh Kumar K.Vbd5050e2018-05-29 19:58:41 +05301042 radix__flush_tlb_page_psize(mm, address, psize);
Aneesh Kumar K.Vf08d08f2018-08-22 22:46:05 +05301043 __radix_pte_update(ptep, _PAGE_INVALID, new_pte);
Aneesh Kumar K.Vbd5050e2018-05-29 19:58:41 +05301044 } else {
Aneesh Kumar K.V044003b2018-05-29 19:58:39 +05301045 __radix_pte_update(ptep, 0, set);
Nicholas Piggine5f7cb52018-06-01 20:01:15 +10001046 /*
1047 * Book3S does not require a TLB flush when relaxing access
1048 * restrictions when the address space is not attached to a
1049 * NMMU, because the core MMU will reload the pte after taking
Bhaskar Chowdhury4763d372021-01-20 19:50:21 +05301050 * an access fault, which is defined by the architecture.
Nicholas Piggine5f7cb52018-06-01 20:01:15 +10001051 */
Aneesh Kumar K.Vbd5050e2018-05-29 19:58:41 +05301052 }
Nicholas Pigginf1cb8f92018-06-01 20:01:19 +10001053 /* See ptesync comment in radix__set_pte_at */
Aneesh Kumar K.V044003b2018-05-29 19:58:39 +05301054}
Aneesh Kumar K.V5b323362019-03-05 15:46:33 -08001055
1056void radix__ptep_modify_prot_commit(struct vm_area_struct *vma,
1057 unsigned long addr, pte_t *ptep,
1058 pte_t old_pte, pte_t pte)
1059{
1060 struct mm_struct *mm = vma->vm_mm;
1061
1062 /*
1063 * To avoid NMMU hang while relaxing access we need to flush the tlb before
1064 * we set the new value. We need to do this only for radix, because hash
1065 * translation does flush when updating the linux pte.
1066 */
1067 if (is_pte_rw_upgrade(pte_val(old_pte), pte_val(pte)) &&
1068 (atomic_read(&mm->context.copros) > 0))
1069 radix__flush_tlb_page(vma, addr);
1070
1071 set_pte_at(mm, addr, ptep, pte);
1072}
Nicholas Piggind38153f2019-06-10 13:08:17 +10001073
Nicholas Piggind909f912019-06-10 13:08:18 +10001074int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
1075{
1076 pte_t *ptep = (pte_t *)pud;
1077 pte_t new_pud = pfn_pte(__phys_to_pfn(addr), prot);
1078
1079 if (!radix_enabled())
1080 return 0;
1081
1082 set_pte_at(&init_mm, 0 /* radix unused */, ptep, new_pud);
1083
1084 return 1;
1085}
1086
1087int pud_clear_huge(pud_t *pud)
1088{
1089 if (pud_huge(*pud)) {
1090 pud_clear(pud);
1091 return 1;
1092 }
1093
1094 return 0;
1095}
1096
1097int pud_free_pmd_page(pud_t *pud, unsigned long addr)
1098{
1099 pmd_t *pmd;
1100 int i;
1101
1102 pmd = (pmd_t *)pud_page_vaddr(*pud);
1103 pud_clear(pud);
1104
1105 flush_tlb_kernel_range(addr, addr + PUD_SIZE);
1106
1107 for (i = 0; i < PTRS_PER_PMD; i++) {
1108 if (!pmd_none(pmd[i])) {
1109 pte_t *pte;
1110 pte = (pte_t *)pmd_page_vaddr(pmd[i]);
1111
1112 pte_free_kernel(&init_mm, pte);
1113 }
1114 }
1115
1116 pmd_free(&init_mm, pmd);
1117
1118 return 1;
1119}
1120
1121int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
1122{
1123 pte_t *ptep = (pte_t *)pmd;
1124 pte_t new_pmd = pfn_pte(__phys_to_pfn(addr), prot);
1125
1126 if (!radix_enabled())
1127 return 0;
1128
1129 set_pte_at(&init_mm, 0 /* radix unused */, ptep, new_pmd);
1130
1131 return 1;
1132}
1133
1134int pmd_clear_huge(pmd_t *pmd)
1135{
1136 if (pmd_huge(*pmd)) {
1137 pmd_clear(pmd);
1138 return 1;
1139 }
1140
1141 return 0;
1142}
1143
1144int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
1145{
1146 pte_t *pte;
1147
1148 pte = (pte_t *)pmd_page_vaddr(*pmd);
1149 pmd_clear(pmd);
1150
1151 flush_tlb_kernel_range(addr, addr + PMD_SIZE);
1152
1153 pte_free_kernel(&init_mm, pte);
1154
1155 return 1;
1156}