blob: 98f0b243c1ab21e0953645dae57a5bf33b8ac80b [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +10002/*
3 * Page table handling routines for radix page table.
4 *
5 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +10006 */
Michael Ellermanbd350f72017-08-30 17:41:29 +10007
8#define pr_fmt(fmt) "radix-mmu: " fmt
9
Nicholas Piggind38153f2019-06-10 13:08:17 +100010#include <linux/io.h>
Michael Ellermanbd350f72017-08-30 17:41:29 +100011#include <linux/kernel.h>
Ingo Molnar589ee622017-02-04 00:16:44 +010012#include <linux/sched/mm.h>
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100013#include <linux/memblock.h>
14#include <linux/of_fdt.h>
Balbir Singh7614ff32017-06-29 03:04:09 +100015#include <linux/mm.h>
Mike Kravetz997cdcb2019-11-30 17:56:37 -080016#include <linux/hugetlb.h>
Michael Ellerman6deb6b42017-08-30 17:41:17 +100017#include <linux/string_helpers.h>
Aneesh Kumar K.Vaf9d00e2020-07-09 18:49:25 +053018#include <linux/memory.h>
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100019
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100020#include <asm/pgalloc.h>
Nicholas Piggineeb715c2018-02-07 11:20:02 +100021#include <asm/mmu_context.h>
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100022#include <asm/dma.h>
23#include <asm/machdep.h>
24#include <asm/mmu.h>
25#include <asm/firmware.h>
Alistair Popple1d0761d2016-12-14 13:36:51 +110026#include <asm/powernv.h>
Michael Ellerman9abcc982017-06-06 15:48:57 +100027#include <asm/sections.h>
Nicholas Piggin993cfec2020-03-02 11:04:10 +100028#include <asm/smp.h>
Balbir Singh04284912017-04-11 15:23:25 +100029#include <asm/trace.h>
Michael Ellerman890274c2019-04-18 16:51:24 +100030#include <asm/uaccess.h>
Claudio Carvalho52231342019-08-22 00:48:36 -030031#include <asm/ultravisor.h>
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100032
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +100033#include <trace/events/thp.h>
34
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +100035unsigned int mmu_pid_bits;
36unsigned int mmu_base_pid;
Aneesh Kumar K.V950805f2020-10-07 17:18:35 +053037unsigned long radix_mem_block_size __ro_after_init;
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +100038
Nicholas Piggin2ad452f2018-02-14 01:08:24 +100039static __ref void *early_alloc_pgtable(unsigned long size, int nid,
40 unsigned long region_start, unsigned long region_end)
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100041{
Mike Rapoportf8067142019-03-07 16:30:48 -080042 phys_addr_t min_addr = MEMBLOCK_LOW_LIMIT;
43 phys_addr_t max_addr = MEMBLOCK_ALLOC_ANYWHERE;
Mike Rapoport8a7f97b2019-03-11 23:30:31 -070044 void *ptr;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100045
Mike Rapoportf8067142019-03-07 16:30:48 -080046 if (region_start)
47 min_addr = region_start;
48 if (region_end)
49 max_addr = region_end;
Nicholas Piggin2ad452f2018-02-14 01:08:24 +100050
Mike Rapoport8a7f97b2019-03-11 23:30:31 -070051 ptr = memblock_alloc_try_nid(size, size, min_addr, max_addr, nid);
52
53 if (!ptr)
54 panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%pa max_addr=%pa\n",
55 __func__, size, size, nid, &min_addr, &max_addr);
56
57 return ptr;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100058}
59
Aneesh Kumar K.V645d5ce2020-07-09 18:49:22 +053060/*
61 * When allocating pud or pmd pointers, we allocate a complete page
62 * of PAGE_SIZE rather than PUD_TABLE_SIZE or PMD_TABLE_SIZE. This
63 * is to ensure that the page obtained from the memblock allocator
64 * can be completely used as page table page and can be freed
65 * correctly when the page table entries are removed.
66 */
Nicholas Piggin0633daf2018-02-14 01:08:23 +100067static int early_map_kernel_page(unsigned long ea, unsigned long pa,
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100068 pgprot_t flags,
Nicholas Piggin2ad452f2018-02-14 01:08:24 +100069 unsigned int map_page_size,
70 int nid,
71 unsigned long region_start, unsigned long region_end)
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100072{
Nicholas Piggin2ad452f2018-02-14 01:08:24 +100073 unsigned long pfn = pa >> PAGE_SHIFT;
Nicholas Piggin0633daf2018-02-14 01:08:23 +100074 pgd_t *pgdp;
Mike Rapoport2fb47062020-06-04 16:46:44 -070075 p4d_t *p4dp;
Nicholas Piggin0633daf2018-02-14 01:08:23 +100076 pud_t *pudp;
77 pmd_t *pmdp;
78 pte_t *ptep;
79
80 pgdp = pgd_offset_k(ea);
Mike Rapoport2fb47062020-06-04 16:46:44 -070081 p4dp = p4d_offset(pgdp, ea);
82 if (p4d_none(*p4dp)) {
Aneesh Kumar K.V645d5ce2020-07-09 18:49:22 +053083 pudp = early_alloc_pgtable(PAGE_SIZE, nid,
84 region_start, region_end);
Mike Rapoport2fb47062020-06-04 16:46:44 -070085 p4d_populate(&init_mm, p4dp, pudp);
Nicholas Piggin0633daf2018-02-14 01:08:23 +100086 }
Mike Rapoport2fb47062020-06-04 16:46:44 -070087 pudp = pud_offset(p4dp, ea);
Nicholas Piggin0633daf2018-02-14 01:08:23 +100088 if (map_page_size == PUD_SIZE) {
89 ptep = (pte_t *)pudp;
90 goto set_the_pte;
91 }
92 if (pud_none(*pudp)) {
Aneesh Kumar K.V645d5ce2020-07-09 18:49:22 +053093 pmdp = early_alloc_pgtable(PAGE_SIZE, nid, region_start,
94 region_end);
Nicholas Piggin0633daf2018-02-14 01:08:23 +100095 pud_populate(&init_mm, pudp, pmdp);
96 }
97 pmdp = pmd_offset(pudp, ea);
98 if (map_page_size == PMD_SIZE) {
99 ptep = pmdp_ptep(pmdp);
100 goto set_the_pte;
101 }
102 if (!pmd_present(*pmdp)) {
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000103 ptep = early_alloc_pgtable(PAGE_SIZE, nid,
104 region_start, region_end);
Nicholas Piggin0633daf2018-02-14 01:08:23 +1000105 pmd_populate_kernel(&init_mm, pmdp, ptep);
106 }
107 ptep = pte_offset_kernel(pmdp, ea);
108
109set_the_pte:
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000110 set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags));
Nicholas Piggin0633daf2018-02-14 01:08:23 +1000111 smp_wmb();
112 return 0;
113}
114
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000115/*
116 * nid, region_start, and region_end are hints to try to place the page
117 * table memory in the same node or region.
118 */
119static int __map_kernel_page(unsigned long ea, unsigned long pa,
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000120 pgprot_t flags,
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000121 unsigned int map_page_size,
122 int nid,
123 unsigned long region_start, unsigned long region_end)
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000124{
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000125 unsigned long pfn = pa >> PAGE_SHIFT;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000126 pgd_t *pgdp;
Mike Rapoport2fb47062020-06-04 16:46:44 -0700127 p4d_t *p4dp;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000128 pud_t *pudp;
129 pmd_t *pmdp;
130 pte_t *ptep;
131 /*
132 * Make sure task size is correct as per the max adddr
133 */
134 BUILD_BUG_ON(TASK_SIZE_USER64 > RADIX_PGTABLE_RANGE);
Nicholas Piggin0633daf2018-02-14 01:08:23 +1000135
Aneesh Kumar K.V0034d392019-04-17 18:29:14 +0530136#ifdef CONFIG_PPC_64K_PAGES
137 BUILD_BUG_ON(RADIX_KERN_MAP_SIZE != (1UL << MAX_EA_BITS_PER_CONTEXT));
138#endif
139
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000140 if (unlikely(!slab_is_available()))
141 return early_map_kernel_page(ea, pa, flags, map_page_size,
142 nid, region_start, region_end);
Nicholas Piggin0633daf2018-02-14 01:08:23 +1000143
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000144 /*
145 * Should make page table allocation functions be able to take a
146 * node, so we can place kernel page tables on the right nodes after
147 * boot.
148 */
Nicholas Piggin0633daf2018-02-14 01:08:23 +1000149 pgdp = pgd_offset_k(ea);
Mike Rapoport2fb47062020-06-04 16:46:44 -0700150 p4dp = p4d_offset(pgdp, ea);
151 pudp = pud_alloc(&init_mm, p4dp, ea);
Nicholas Piggin0633daf2018-02-14 01:08:23 +1000152 if (!pudp)
153 return -ENOMEM;
154 if (map_page_size == PUD_SIZE) {
155 ptep = (pte_t *)pudp;
156 goto set_the_pte;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000157 }
Nicholas Piggin0633daf2018-02-14 01:08:23 +1000158 pmdp = pmd_alloc(&init_mm, pudp, ea);
159 if (!pmdp)
160 return -ENOMEM;
161 if (map_page_size == PMD_SIZE) {
162 ptep = pmdp_ptep(pmdp);
163 goto set_the_pte;
164 }
165 ptep = pte_alloc_kernel(pmdp, ea);
166 if (!ptep)
167 return -ENOMEM;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000168
169set_the_pte:
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000170 set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags));
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000171 smp_wmb();
172 return 0;
173}
174
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000175int radix__map_kernel_page(unsigned long ea, unsigned long pa,
176 pgprot_t flags,
177 unsigned int map_page_size)
178{
179 return __map_kernel_page(ea, pa, flags, map_page_size, -1, 0, 0);
180}
181
Balbir Singh7614ff32017-06-29 03:04:09 +1000182#ifdef CONFIG_STRICT_KERNEL_RWX
Michael Ellermanb134bd92017-07-14 16:51:21 +1000183void radix__change_memory_range(unsigned long start, unsigned long end,
184 unsigned long clear)
Balbir Singh7614ff32017-06-29 03:04:09 +1000185{
Balbir Singh7614ff32017-06-29 03:04:09 +1000186 unsigned long idx;
187 pgd_t *pgdp;
Mike Rapoport2fb47062020-06-04 16:46:44 -0700188 p4d_t *p4dp;
Balbir Singh7614ff32017-06-29 03:04:09 +1000189 pud_t *pudp;
190 pmd_t *pmdp;
191 pte_t *ptep;
192
193 start = ALIGN_DOWN(start, PAGE_SIZE);
194 end = PAGE_ALIGN(end); // aligns up
195
Michael Ellermanb134bd92017-07-14 16:51:21 +1000196 pr_debug("Changing flags on range %lx-%lx removing 0x%lx\n",
197 start, end, clear);
Balbir Singh7614ff32017-06-29 03:04:09 +1000198
199 for (idx = start; idx < end; idx += PAGE_SIZE) {
200 pgdp = pgd_offset_k(idx);
Mike Rapoport2fb47062020-06-04 16:46:44 -0700201 p4dp = p4d_offset(pgdp, idx);
202 pudp = pud_alloc(&init_mm, p4dp, idx);
Balbir Singh7614ff32017-06-29 03:04:09 +1000203 if (!pudp)
204 continue;
Aneesh Kumar K.Vd6eaced2019-05-14 11:33:00 +0530205 if (pud_is_leaf(*pudp)) {
Balbir Singh7614ff32017-06-29 03:04:09 +1000206 ptep = (pte_t *)pudp;
207 goto update_the_pte;
208 }
209 pmdp = pmd_alloc(&init_mm, pudp, idx);
210 if (!pmdp)
211 continue;
Aneesh Kumar K.Vd6eaced2019-05-14 11:33:00 +0530212 if (pmd_is_leaf(*pmdp)) {
Balbir Singh7614ff32017-06-29 03:04:09 +1000213 ptep = pmdp_ptep(pmdp);
214 goto update_the_pte;
215 }
216 ptep = pte_alloc_kernel(pmdp, idx);
217 if (!ptep)
218 continue;
219update_the_pte:
Michael Ellermanb134bd92017-07-14 16:51:21 +1000220 radix__pte_update(&init_mm, idx, ptep, clear, 0, 0);
Balbir Singh7614ff32017-06-29 03:04:09 +1000221 }
222
223 radix__flush_tlb_kernel_range(start, end);
224}
Michael Ellermanb134bd92017-07-14 16:51:21 +1000225
226void radix__mark_rodata_ro(void)
227{
228 unsigned long start, end;
229
230 start = (unsigned long)_stext;
231 end = (unsigned long)__init_begin;
232
233 radix__change_memory_range(start, end, _PAGE_WRITE);
234}
Michael Ellerman029d9252017-07-14 16:51:23 +1000235
236void radix__mark_initmem_nx(void)
237{
238 unsigned long start = (unsigned long)__init_begin;
239 unsigned long end = (unsigned long)__init_end;
240
241 radix__change_memory_range(start, end, _PAGE_EXEC);
242}
Balbir Singh7614ff32017-06-29 03:04:09 +1000243#endif /* CONFIG_STRICT_KERNEL_RWX */
244
Michael Ellermanafb6d062018-10-17 23:53:38 +1100245static inline void __meminit
246print_mapping(unsigned long start, unsigned long end, unsigned long size, bool exec)
Reza Arbabb5200ec2017-01-16 13:07:43 -0600247{
Michael Ellerman6deb6b42017-08-30 17:41:17 +1000248 char buf[10];
249
Reza Arbabb5200ec2017-01-16 13:07:43 -0600250 if (end <= start)
251 return;
252
Michael Ellerman6deb6b42017-08-30 17:41:17 +1000253 string_get_size(size, 1, STRING_UNITS_2, buf, sizeof(buf));
254
Michael Ellermanafb6d062018-10-17 23:53:38 +1100255 pr_info("Mapped 0x%016lx-0x%016lx with %s pages%s\n", start, end, buf,
256 exec ? " (exec)" : "");
Reza Arbabb5200ec2017-01-16 13:07:43 -0600257}
258
Michael Ellerman232aa402018-08-14 22:37:32 +1000259static unsigned long next_boundary(unsigned long addr, unsigned long end)
260{
261#ifdef CONFIG_STRICT_KERNEL_RWX
262 if (addr < __pa_symbol(__init_begin))
263 return __pa_symbol(__init_begin);
264#endif
265 return end;
266}
267
Reza Arbabb5200ec2017-01-16 13:07:43 -0600268static int __meminit create_physical_mapping(unsigned long start,
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000269 unsigned long end,
Aneesh Kumar K.Vaf9d00e2020-07-09 18:49:25 +0530270 unsigned long max_mapping_size,
Logan Gunthorpe4e00c5a2020-04-10 14:33:32 -0700271 int nid, pgprot_t _prot)
Reza Arbabb5200ec2017-01-16 13:07:43 -0600272{
Michael Ellerman9abcc982017-06-06 15:48:57 +1000273 unsigned long vaddr, addr, mapping_size = 0;
Michael Ellermanafb6d062018-10-17 23:53:38 +1100274 bool prev_exec, exec = false;
Michael Ellerman9abcc982017-06-06 15:48:57 +1000275 pgprot_t prot;
Aneesh Kumar K.Va2dc0092018-08-13 11:14:57 +0530276 int psize;
Reza Arbabb5200ec2017-01-16 13:07:43 -0600277
Christophe Leroyb7115312020-04-20 18:36:36 +0000278 start = ALIGN(start, PAGE_SIZE);
Aneesh Kumar K.V79b123cd2020-09-07 12:55:39 +0530279 end = ALIGN_DOWN(end, PAGE_SIZE);
Reza Arbabb5200ec2017-01-16 13:07:43 -0600280 for (addr = start; addr < end; addr += mapping_size) {
281 unsigned long gap, previous_size;
282 int rc;
283
Michael Ellerman232aa402018-08-14 22:37:32 +1000284 gap = next_boundary(addr, end) - addr;
Aneesh Kumar K.Vaf9d00e2020-07-09 18:49:25 +0530285 if (gap > max_mapping_size)
286 gap = max_mapping_size;
Reza Arbabb5200ec2017-01-16 13:07:43 -0600287 previous_size = mapping_size;
Michael Ellermanafb6d062018-10-17 23:53:38 +1100288 prev_exec = exec;
Reza Arbabb5200ec2017-01-16 13:07:43 -0600289
290 if (IS_ALIGNED(addr, PUD_SIZE) && gap >= PUD_SIZE &&
Michael Ellerman57306c62018-08-14 22:01:44 +1000291 mmu_psize_defs[MMU_PAGE_1G].shift) {
Reza Arbabb5200ec2017-01-16 13:07:43 -0600292 mapping_size = PUD_SIZE;
Aneesh Kumar K.Va2dc0092018-08-13 11:14:57 +0530293 psize = MMU_PAGE_1G;
294 } else if (IS_ALIGNED(addr, PMD_SIZE) && gap >= PMD_SIZE &&
295 mmu_psize_defs[MMU_PAGE_2M].shift) {
Reza Arbabb5200ec2017-01-16 13:07:43 -0600296 mapping_size = PMD_SIZE;
Aneesh Kumar K.Va2dc0092018-08-13 11:14:57 +0530297 psize = MMU_PAGE_2M;
298 } else {
Reza Arbabb5200ec2017-01-16 13:07:43 -0600299 mapping_size = PAGE_SIZE;
Aneesh Kumar K.Va2dc0092018-08-13 11:14:57 +0530300 psize = mmu_virtual_psize;
301 }
Reza Arbabb5200ec2017-01-16 13:07:43 -0600302
Michael Ellerman9abcc982017-06-06 15:48:57 +1000303 vaddr = (unsigned long)__va(addr);
304
Balbir Singh7f6d4982017-06-29 03:04:10 +1000305 if (overlaps_kernel_text(vaddr, vaddr + mapping_size) ||
Michael Ellermanafb6d062018-10-17 23:53:38 +1100306 overlaps_interrupt_vector_text(vaddr, vaddr + mapping_size)) {
Michael Ellerman9abcc982017-06-06 15:48:57 +1000307 prot = PAGE_KERNEL_X;
Michael Ellermanafb6d062018-10-17 23:53:38 +1100308 exec = true;
309 } else {
Logan Gunthorpe4e00c5a2020-04-10 14:33:32 -0700310 prot = _prot;
Michael Ellermanafb6d062018-10-17 23:53:38 +1100311 exec = false;
312 }
313
314 if (mapping_size != previous_size || exec != prev_exec) {
315 print_mapping(start, addr, previous_size, prev_exec);
316 start = addr;
317 }
Michael Ellerman9abcc982017-06-06 15:48:57 +1000318
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000319 rc = __map_kernel_page(vaddr, addr, prot, mapping_size, nid, start, end);
Reza Arbabb5200ec2017-01-16 13:07:43 -0600320 if (rc)
321 return rc;
Aneesh Kumar K.Va2dc0092018-08-13 11:14:57 +0530322
323 update_page_count(psize, 1);
Reza Arbabb5200ec2017-01-16 13:07:43 -0600324 }
325
Michael Ellermanafb6d062018-10-17 23:53:38 +1100326 print_mapping(start, addr, mapping_size, exec);
Reza Arbabb5200ec2017-01-16 13:07:43 -0600327 return 0;
328}
329
YueHaibingd667edc2019-05-04 18:24:27 +0800330static void __init radix_init_pgtable(void)
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000331{
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000332 unsigned long rts_field;
Mike Rapoportb10d6bc2020-10-13 16:58:08 -0700333 phys_addr_t start, end;
334 u64 i;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000335
336 /* We don't support slb for radix */
337 mmu_slb_size = 0;
Aneesh Kumar K.Vaf9d00e2020-07-09 18:49:25 +0530338
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000339 /*
Aneesh Kumar K.Vaf9d00e2020-07-09 18:49:25 +0530340 * Create the linear mapping
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000341 */
Mike Rapoportb10d6bc2020-10-13 16:58:08 -0700342 for_each_mem_range(i, &start, &end) {
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000343 /*
344 * The memblock allocator is up at this point, so the
345 * page tables will be allocated within the range. No
346 * need or a node (which we don't have yet).
347 */
Aneesh Kumar K.Ve0909392019-04-17 18:29:15 +0530348
Mike Rapoportb10d6bc2020-10-13 16:58:08 -0700349 if (end >= RADIX_VMALLOC_START) {
Colin Ian Kingf341d892019-04-23 16:10:17 +0100350 pr_warn("Outside the supported range\n");
Aneesh Kumar K.Ve0909392019-04-17 18:29:15 +0530351 continue;
352 }
353
Mike Rapoportb10d6bc2020-10-13 16:58:08 -0700354 WARN_ON(create_physical_mapping(start, end,
Aneesh Kumar K.Vaf9d00e2020-07-09 18:49:25 +0530355 radix_mem_block_size,
Logan Gunthorpe4e00c5a2020-04-10 14:33:32 -0700356 -1, PAGE_KERNEL));
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000357 }
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +1000358
359 /* Find out how many PID bits are supported */
Jordan Niethe736bcdd2019-12-06 14:17:22 +1100360 if (!cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) {
361 if (!mmu_pid_bits)
362 mmu_pid_bits = 20;
363 mmu_base_pid = 1;
364 } else if (cpu_has_feature(CPU_FTR_HVMODE)) {
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +1000365 if (!mmu_pid_bits)
366 mmu_pid_bits = 20;
367#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
368 /*
369 * When KVM is possible, we only use the top half of the
370 * PID space to avoid collisions between host and guest PIDs
371 * which can cause problems due to prefetch when exiting the
372 * guest with AIL=3
373 */
374 mmu_base_pid = 1 << (mmu_pid_bits - 1);
375#else
376 mmu_base_pid = 1;
377#endif
378 } else {
379 /* The guest uses the bottom half of the PID space */
380 if (!mmu_pid_bits)
381 mmu_pid_bits = 19;
382 mmu_base_pid = 1;
383 }
384
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000385 /*
386 * Allocate Partition table and process table for the
387 * host.
388 */
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +1000389 BUG_ON(PRTB_SIZE_SHIFT > 36);
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000390 process_tb = early_alloc_pgtable(1UL << PRTB_SIZE_SHIFT, -1, 0, 0);
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000391 /*
392 * Fill in the process table.
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000393 */
Aneesh Kumar K.Vb23d9c52016-06-17 11:40:36 +0530394 rts_field = radix__get_tree_size();
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000395 process_tb->prtb0 = cpu_to_be64(rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE);
Nicholas Piggined6546b2019-09-03 01:29:26 +1000396
Nicholas Piggineeb715c2018-02-07 11:20:02 +1000397 /*
398 * The init_mm context is given the first available (non-zero) PID,
399 * which is the "guard PID" and contains no page table. PIDR should
400 * never be set to zero because that duplicates the kernel address
401 * space at the 0x0... offset (quadrant 0)!
402 *
403 * An arbitrary PID that may later be allocated by the PID allocator
404 * for userspace processes must not be used either, because that
405 * would cause stale user mappings for that PID on CPUs outside of
406 * the TLB invalidation scheme (because it won't be in mm_cpumask).
407 *
408 * So permanently carve out one PID for the purpose of a guard PID.
409 */
410 init_mm.context.id = mmu_base_pid;
411 mmu_base_pid++;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000412}
413
414static void __init radix_init_partition_table(void)
415{
Nicholas Piggined6546b2019-09-03 01:29:26 +1000416 unsigned long rts_field, dw0, dw1;
Aneesh Kumar K.Vb23d9c52016-06-17 11:40:36 +0530417
Paul Mackerras9d661952016-11-21 16:00:58 +1100418 mmu_partition_table_init();
Aneesh Kumar K.Vb23d9c52016-06-17 11:40:36 +0530419 rts_field = radix__get_tree_size();
Paul Mackerras9d661952016-11-21 16:00:58 +1100420 dw0 = rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE | PATB_HR;
Nicholas Piggined6546b2019-09-03 01:29:26 +1000421 dw1 = __pa(process_tb) | (PRTB_SIZE_SHIFT - 12) | PATB_GR;
Nicholas Piggin7d805ac2019-09-03 01:29:30 +1000422 mmu_partition_table_set_entry(0, dw0, dw1, false);
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000423
Aneesh Kumar K.V56547412016-07-13 15:05:25 +0530424 pr_info("Initializing Radix MMU\n");
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000425}
426
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000427static int __init get_idx_from_shift(unsigned int shift)
428{
429 int idx = -1;
430
431 switch (shift) {
432 case 0xc:
433 idx = MMU_PAGE_4K;
434 break;
435 case 0x10:
436 idx = MMU_PAGE_64K;
437 break;
438 case 0x15:
439 idx = MMU_PAGE_2M;
440 break;
441 case 0x1e:
442 idx = MMU_PAGE_1G;
443 break;
444 }
445 return idx;
446}
447
448static int __init radix_dt_scan_page_sizes(unsigned long node,
449 const char *uname, int depth,
450 void *data)
451{
452 int size = 0;
453 int shift, idx;
454 unsigned int ap;
455 const __be32 *prop;
456 const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
457
458 /* We are scanning "cpu" nodes only */
459 if (type == NULL || strcmp(type, "cpu") != 0)
460 return 0;
461
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +1000462 /* Find MMU PID size */
463 prop = of_get_flat_dt_prop(node, "ibm,mmu-pid-bits", &size);
464 if (prop && size == 4)
465 mmu_pid_bits = be32_to_cpup(prop);
466
467 /* Grab page size encodings */
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000468 prop = of_get_flat_dt_prop(node, "ibm,processor-radix-AP-encodings", &size);
469 if (!prop)
470 return 0;
471
472 pr_info("Page sizes from device-tree:\n");
473 for (; size >= 4; size -= 4, ++prop) {
474
475 struct mmu_psize_def *def;
476
477 /* top 3 bit is AP encoding */
478 shift = be32_to_cpu(prop[0]) & ~(0xe << 28);
479 ap = be32_to_cpu(prop[0]) >> 29;
Balbir Singhac8d3812016-11-05 15:24:22 +1100480 pr_info("Page size shift = %d AP=0x%x\n", shift, ap);
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000481
482 idx = get_idx_from_shift(shift);
483 if (idx < 0)
484 continue;
485
486 def = &mmu_psize_defs[idx];
487 def->shift = shift;
488 def->ap = ap;
489 }
490
491 /* needed ? */
492 cur_cpu_spec->mmu_features &= ~MMU_FTR_NO_SLBIE_B;
493 return 1;
494}
495
Aneesh Kumar K.Vaf9d00e2020-07-09 18:49:25 +0530496#ifdef CONFIG_MEMORY_HOTPLUG
497static int __init probe_memory_block_size(unsigned long node, const char *uname, int
498 depth, void *data)
499{
500 unsigned long *mem_block_size = (unsigned long *)data;
Aneesh Kumar K.Vfbf2f132020-10-07 17:18:36 +0530501 const __be32 *prop;
Aneesh Kumar K.Vaf9d00e2020-07-09 18:49:25 +0530502 int len;
503
504 if (depth != 1)
505 return 0;
506
507 if (strcmp(uname, "ibm,dynamic-reconfiguration-memory"))
508 return 0;
509
510 prop = of_get_flat_dt_prop(node, "ibm,lmb-size", &len);
Aneesh Kumar K.Vfbf2f132020-10-07 17:18:36 +0530511
512 if (!prop || len < dt_root_size_cells * sizeof(__be32))
Aneesh Kumar K.Vaf9d00e2020-07-09 18:49:25 +0530513 /*
514 * Nothing in the device tree
515 */
516 *mem_block_size = MIN_MEMORY_BLOCK_SIZE;
517 else
Aneesh Kumar K.Vfbf2f132020-10-07 17:18:36 +0530518 *mem_block_size = of_read_number(prop, dt_root_size_cells);
Aneesh Kumar K.Vaf9d00e2020-07-09 18:49:25 +0530519 return 1;
520}
521
522static unsigned long radix_memory_block_size(void)
523{
524 unsigned long mem_block_size = MIN_MEMORY_BLOCK_SIZE;
525
526 /*
527 * OPAL firmware feature is set by now. Hence we are ok
528 * to test OPAL feature.
529 */
530 if (firmware_has_feature(FW_FEATURE_OPAL))
531 mem_block_size = 1UL * 1024 * 1024 * 1024;
532 else
533 of_scan_flat_dt(probe_memory_block_size, &mem_block_size);
534
535 return mem_block_size;
536}
537
538#else /* CONFIG_MEMORY_HOTPLUG */
539
540static unsigned long radix_memory_block_size(void)
541{
542 return 1UL * 1024 * 1024 * 1024;
543}
544
545#endif /* CONFIG_MEMORY_HOTPLUG */
546
547
Michael Ellerman2537b092016-07-26 21:55:27 +1000548void __init radix__early_init_devtree(void)
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000549{
550 int rc;
551
552 /*
553 * Try to find the available page sizes in the device-tree
554 */
555 rc = of_scan_flat_dt(radix_dt_scan_page_sizes, NULL);
Aneesh Kumar K.Vaf9d00e2020-07-09 18:49:25 +0530556 if (!rc) {
557 /*
558 * No page size details found in device tree.
559 * Let's assume we have page 4k and 64k support
560 */
561 mmu_psize_defs[MMU_PAGE_4K].shift = 12;
562 mmu_psize_defs[MMU_PAGE_4K].ap = 0x0;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000563
Aneesh Kumar K.Vaf9d00e2020-07-09 18:49:25 +0530564 mmu_psize_defs[MMU_PAGE_64K].shift = 16;
565 mmu_psize_defs[MMU_PAGE_64K].ap = 0x5;
566 }
567
568 /*
569 * Max mapping size used when mapping pages. We don't use
570 * ppc_md.memory_block_size() here because this get called
571 * early and we don't have machine probe called yet. Also
572 * the pseries implementation only check for ibm,lmb-size.
573 * All hypervisor supporting radix do expose that device
574 * tree node.
575 */
576 radix_mem_block_size = radix_memory_block_size();
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000577 return;
578}
579
Balbir Singhee97b6b2016-11-15 17:56:14 +1100580static void radix_init_amor(void)
581{
582 /*
583 * In HV mode, we init AMOR (Authority Mask Override Register) so that
584 * the hypervisor and guest can setup IAMR (Instruction Authority Mask
585 * Register), enable key 0 and set it to 1.
586 *
587 * AMOR = 0b1100 .... 0000 (Mask for key 0 is 11)
588 */
589 mtspr(SPRN_AMOR, (3ul << 62));
590}
591
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000592void __init radix__early_init_mmu(void)
593{
594 unsigned long lpcr;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000595
596#ifdef CONFIG_PPC_64K_PAGES
597 /* PAGE_SIZE mappings */
598 mmu_virtual_psize = MMU_PAGE_64K;
599#else
600 mmu_virtual_psize = MMU_PAGE_4K;
601#endif
602
603#ifdef CONFIG_SPARSEMEM_VMEMMAP
604 /* vmemmap mapping */
Aneesh Kumar K.V89a34962019-07-01 20:04:42 +0530605 if (mmu_psize_defs[MMU_PAGE_2M].shift) {
606 /*
607 * map vmemmap using 2M if available
608 */
609 mmu_vmemmap_psize = MMU_PAGE_2M;
610 } else
611 mmu_vmemmap_psize = mmu_virtual_psize;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000612#endif
613 /*
614 * initialize page table size
615 */
616 __pte_index_size = RADIX_PTE_INDEX_SIZE;
617 __pmd_index_size = RADIX_PMD_INDEX_SIZE;
618 __pud_index_size = RADIX_PUD_INDEX_SIZE;
619 __pgd_index_size = RADIX_PGD_INDEX_SIZE;
Aneesh Kumar K.Vfae22112018-02-11 20:30:06 +0530620 __pud_cache_index = RADIX_PUD_INDEX_SIZE;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000621 __pte_table_size = RADIX_PTE_TABLE_SIZE;
622 __pmd_table_size = RADIX_PMD_TABLE_SIZE;
623 __pud_table_size = RADIX_PUD_TABLE_SIZE;
624 __pgd_table_size = RADIX_PGD_TABLE_SIZE;
625
Aneesh Kumar K.Va2f41eb2016-04-29 23:26:19 +1000626 __pmd_val_bits = RADIX_PMD_VAL_BITS;
627 __pud_val_bits = RADIX_PUD_VAL_BITS;
628 __pgd_val_bits = RADIX_PGD_VAL_BITS;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000629
Aneesh Kumar K.Vd6a99962016-04-29 23:26:21 +1000630 __kernel_virt_start = RADIX_KERN_VIRT_START;
Aneesh Kumar K.Vd6a99962016-04-29 23:26:21 +1000631 __vmalloc_start = RADIX_VMALLOC_START;
632 __vmalloc_end = RADIX_VMALLOC_END;
Michael Ellerman63ee9b22017-08-01 20:29:22 +1000633 __kernel_io_start = RADIX_KERN_IO_START;
Aneesh Kumar K.Va35a3c62019-04-17 18:29:13 +0530634 __kernel_io_end = RADIX_KERN_IO_END;
Aneesh Kumar K.V0034d392019-04-17 18:29:14 +0530635 vmemmap = (struct page *)RADIX_VMEMMAP_START;
Aneesh Kumar K.Vd6a99962016-04-29 23:26:21 +1000636 ioremap_bot = IOREMAP_BASE;
Darren Stevensbfa37082016-06-29 21:06:28 +0100637
638#ifdef CONFIG_PCI
639 pci_io_base = ISA_IO_BASE;
640#endif
Aneesh Kumar K.Vfb4e5db2018-03-22 14:13:50 +0530641 __pte_frag_nr = RADIX_PTE_FRAG_NR;
642 __pte_frag_size_shift = RADIX_PTE_FRAG_SIZE_SHIFT;
Aneesh Kumar K.V8a6c6972018-04-16 16:57:22 +0530643 __pmd_frag_nr = RADIX_PMD_FRAG_NR;
644 __pmd_frag_size_shift = RADIX_PMD_FRAG_SIZE_SHIFT;
Aneesh Kumar K.Vd6a99962016-04-29 23:26:21 +1000645
Nicholas Piggined6546b2019-09-03 01:29:26 +1000646 radix_init_pgtable();
647
Aneesh Kumar K.Vd6c88602016-05-31 11:56:29 +0530648 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
649 lpcr = mfspr(SPRN_LPCR);
Aneesh Kumar K.Vbf16cdf2016-07-13 15:05:21 +0530650 mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000651 radix_init_partition_table();
Balbir Singhee97b6b2016-11-15 17:56:14 +1100652 radix_init_amor();
Paul Mackerrascc3d2942017-01-30 21:21:36 +1100653 } else {
654 radix_init_pseries();
Aneesh Kumar K.Vd6c88602016-05-31 11:56:29 +0530655 }
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000656
Paul Mackerras9d661952016-11-21 16:00:58 +1100657 memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
658
Nicholas Piggineeb715c2018-02-07 11:20:02 +1000659 /* Switch to the guard PID before turning on MMU */
660 radix__switch_mmu_context(NULL, &init_mm);
Nicholas Piggin7e71c422019-09-03 01:29:29 +1000661 tlbiel_all();
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000662}
663
664void radix__early_init_mmu_secondary(void)
665{
666 unsigned long lpcr;
667 /*
Aneesh Kumar K.Vd6c88602016-05-31 11:56:29 +0530668 * update partition table control register and UPRT
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000669 */
Aneesh Kumar K.Vd6c88602016-05-31 11:56:29 +0530670 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
671 lpcr = mfspr(SPRN_LPCR);
Aneesh Kumar K.Vbf16cdf2016-07-13 15:05:21 +0530672 mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
Aneesh Kumar K.Vd6c88602016-05-31 11:56:29 +0530673
Claudio Carvalho52231342019-08-22 00:48:36 -0300674 set_ptcr_when_no_uv(__pa(partition_tb) |
675 (PATB_SIZE_SHIFT - 12));
676
Balbir Singhee97b6b2016-11-15 17:56:14 +1100677 radix_init_amor();
Aneesh Kumar K.Vd6c88602016-05-31 11:56:29 +0530678 }
Nicholas Piggind4748272017-12-24 01:15:50 +1000679
Nicholas Piggineeb715c2018-02-07 11:20:02 +1000680 radix__switch_mmu_context(NULL, &init_mm);
Nicholas Piggin7e71c422019-09-03 01:29:29 +1000681 tlbiel_all();
Aneesh Kumar K.V39df17b2020-11-27 10:14:06 +0530682
683 /* Make sure userspace can't change the AMR */
684 mtspr(SPRN_UAMOR, 0);
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000685}
686
Benjamin Herrenschmidtfe036a02016-08-19 14:22:37 +0530687void radix__mmu_cleanup_all(void)
688{
689 unsigned long lpcr;
690
691 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
692 lpcr = mfspr(SPRN_LPCR);
693 mtspr(SPRN_LPCR, lpcr & ~LPCR_UPRT);
Claudio Carvalho52231342019-08-22 00:48:36 -0300694 set_ptcr_when_no_uv(0);
Alistair Popple1d0761d2016-12-14 13:36:51 +1100695 powernv_set_nmmu_ptcr(0);
Benjamin Herrenschmidtfe036a02016-08-19 14:22:37 +0530696 radix__flush_tlb_all();
697 }
698}
699
Reza Arbab6cc27342017-01-16 13:07:44 -0600700#ifdef CONFIG_MEMORY_HOTPLUG
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600701static void free_pte_table(pte_t *pte_start, pmd_t *pmd)
702{
703 pte_t *pte;
704 int i;
705
706 for (i = 0; i < PTRS_PER_PTE; i++) {
707 pte = pte_start + i;
708 if (!pte_none(*pte))
709 return;
710 }
711
712 pte_free_kernel(&init_mm, pte_start);
713 pmd_clear(pmd);
714}
715
716static void free_pmd_table(pmd_t *pmd_start, pud_t *pud)
717{
718 pmd_t *pmd;
719 int i;
720
721 for (i = 0; i < PTRS_PER_PMD; i++) {
722 pmd = pmd_start + i;
723 if (!pmd_none(*pmd))
724 return;
725 }
726
727 pmd_free(&init_mm, pmd_start);
728 pud_clear(pud);
729}
730
Bharata B Rao9ce88532020-07-09 18:49:23 +0530731static void free_pud_table(pud_t *pud_start, p4d_t *p4d)
732{
733 pud_t *pud;
734 int i;
735
736 for (i = 0; i < PTRS_PER_PUD; i++) {
737 pud = pud_start + i;
738 if (!pud_none(*pud))
739 return;
740 }
741
742 pud_free(&init_mm, pud_start);
743 p4d_clear(p4d);
744}
745
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600746static void remove_pte_table(pte_t *pte_start, unsigned long addr,
747 unsigned long end)
748{
749 unsigned long next;
750 pte_t *pte;
751
752 pte = pte_start + pte_index(addr);
753 for (; addr < end; addr = next, pte++) {
754 next = (addr + PAGE_SIZE) & PAGE_MASK;
755 if (next > end)
756 next = end;
757
758 if (!pte_present(*pte))
759 continue;
760
Reza Arbab0d0a4bc2017-01-16 13:07:46 -0600761 if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(next)) {
762 /*
763 * The vmemmap_free() and remove_section_mapping()
764 * codepaths call us with aligned addresses.
765 */
766 WARN_ONCE(1, "%s: unaligned range\n", __func__);
767 continue;
768 }
769
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600770 pte_clear(&init_mm, addr, pte);
771 }
772}
773
Vladis Dronovaff77952020-07-29 15:37:41 +0200774static void __meminit remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600775 unsigned long end)
776{
777 unsigned long next;
778 pte_t *pte_base;
779 pmd_t *pmd;
780
781 pmd = pmd_start + pmd_index(addr);
782 for (; addr < end; addr = next, pmd++) {
783 next = pmd_addr_end(addr, end);
784
785 if (!pmd_present(*pmd))
786 continue;
787
Aneesh Kumar K.Vd6eaced2019-05-14 11:33:00 +0530788 if (pmd_is_leaf(*pmd)) {
Bharata B Raod6d6ebf2020-07-09 18:49:24 +0530789 if (!IS_ALIGNED(addr, PMD_SIZE) ||
790 !IS_ALIGNED(next, PMD_SIZE)) {
791 WARN_ONCE(1, "%s: unaligned range\n", __func__);
792 continue;
793 }
794 pte_clear(&init_mm, addr, (pte_t *)pmd);
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600795 continue;
796 }
797
798 pte_base = (pte_t *)pmd_page_vaddr(*pmd);
799 remove_pte_table(pte_base, addr, next);
800 free_pte_table(pte_base, pmd);
801 }
802}
803
Vladis Dronovaff77952020-07-29 15:37:41 +0200804static void __meminit remove_pud_table(pud_t *pud_start, unsigned long addr,
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600805 unsigned long end)
806{
807 unsigned long next;
808 pmd_t *pmd_base;
809 pud_t *pud;
810
811 pud = pud_start + pud_index(addr);
812 for (; addr < end; addr = next, pud++) {
813 next = pud_addr_end(addr, end);
814
815 if (!pud_present(*pud))
816 continue;
817
Aneesh Kumar K.Vd6eaced2019-05-14 11:33:00 +0530818 if (pud_is_leaf(*pud)) {
Bharata B Raod6d6ebf2020-07-09 18:49:24 +0530819 if (!IS_ALIGNED(addr, PUD_SIZE) ||
820 !IS_ALIGNED(next, PUD_SIZE)) {
821 WARN_ONCE(1, "%s: unaligned range\n", __func__);
822 continue;
823 }
824 pte_clear(&init_mm, addr, (pte_t *)pud);
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600825 continue;
826 }
827
828 pmd_base = (pmd_t *)pud_page_vaddr(*pud);
829 remove_pmd_table(pmd_base, addr, next);
830 free_pmd_table(pmd_base, pud);
831 }
832}
833
Mauricio Faria de Oliveirabde709a2018-03-09 17:45:58 -0300834static void __meminit remove_pagetable(unsigned long start, unsigned long end)
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600835{
836 unsigned long addr, next;
837 pud_t *pud_base;
838 pgd_t *pgd;
Mike Rapoport2fb47062020-06-04 16:46:44 -0700839 p4d_t *p4d;
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600840
841 spin_lock(&init_mm.page_table_lock);
842
843 for (addr = start; addr < end; addr = next) {
844 next = pgd_addr_end(addr, end);
845
846 pgd = pgd_offset_k(addr);
Mike Rapoport2fb47062020-06-04 16:46:44 -0700847 p4d = p4d_offset(pgd, addr);
848 if (!p4d_present(*p4d))
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600849 continue;
850
Mike Rapoport2fb47062020-06-04 16:46:44 -0700851 if (p4d_is_leaf(*p4d)) {
Bharata B Raod6d6ebf2020-07-09 18:49:24 +0530852 if (!IS_ALIGNED(addr, P4D_SIZE) ||
853 !IS_ALIGNED(next, P4D_SIZE)) {
854 WARN_ONCE(1, "%s: unaligned range\n", __func__);
855 continue;
856 }
857
858 pte_clear(&init_mm, addr, (pte_t *)pgd);
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600859 continue;
860 }
861
Mike Rapoport2fb47062020-06-04 16:46:44 -0700862 pud_base = (pud_t *)p4d_page_vaddr(*p4d);
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600863 remove_pud_table(pud_base, addr, next);
Bharata B Rao9ce88532020-07-09 18:49:23 +0530864 free_pud_table(pud_base, p4d);
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600865 }
866
867 spin_unlock(&init_mm.page_table_lock);
868 radix__flush_tlb_kernel_range(start, end);
869}
870
Logan Gunthorpe4e00c5a2020-04-10 14:33:32 -0700871int __meminit radix__create_section_mapping(unsigned long start,
872 unsigned long end, int nid,
873 pgprot_t prot)
Reza Arbab6cc27342017-01-16 13:07:44 -0600874{
Aneesh Kumar K.Ve0909392019-04-17 18:29:15 +0530875 if (end >= RADIX_VMALLOC_START) {
Colin Ian Kingf341d892019-04-23 16:10:17 +0100876 pr_warn("Outside the supported range\n");
Aneesh Kumar K.Ve0909392019-04-17 18:29:15 +0530877 return -1;
878 }
879
Aneesh Kumar K.Vaf9d00e2020-07-09 18:49:25 +0530880 return create_physical_mapping(__pa(start), __pa(end),
881 radix_mem_block_size, nid, prot);
Reza Arbab6cc27342017-01-16 13:07:44 -0600882}
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600883
Mauricio Faria de Oliveirabde709a2018-03-09 17:45:58 -0300884int __meminit radix__remove_section_mapping(unsigned long start, unsigned long end)
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600885{
886 remove_pagetable(start, end);
887 return 0;
888}
Reza Arbab6cc27342017-01-16 13:07:44 -0600889#endif /* CONFIG_MEMORY_HOTPLUG */
890
Aneesh Kumar K.Vd9225ad2016-04-29 23:26:00 +1000891#ifdef CONFIG_SPARSEMEM_VMEMMAP
Nicholas Piggin29ab6c42018-02-14 01:08:22 +1000892static int __map_kernel_page_nid(unsigned long ea, unsigned long pa,
893 pgprot_t flags, unsigned int map_page_size,
894 int nid)
895{
896 return __map_kernel_page(ea, pa, flags, map_page_size, nid, 0, 0);
897}
898
Aneesh Kumar K.Vd9225ad2016-04-29 23:26:00 +1000899int __meminit radix__vmemmap_create_mapping(unsigned long start,
900 unsigned long page_size,
901 unsigned long phys)
902{
903 /* Create a PTE encoding */
904 unsigned long flags = _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_KERNEL_RW;
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000905 int nid = early_pfn_to_nid(phys >> PAGE_SHIFT);
906 int ret;
Aneesh Kumar K.Vd9225ad2016-04-29 23:26:00 +1000907
Aneesh Kumar K.Ve0909392019-04-17 18:29:15 +0530908 if ((start + page_size) >= RADIX_VMEMMAP_END) {
Colin Ian Kingf341d892019-04-23 16:10:17 +0100909 pr_warn("Outside the supported range\n");
Aneesh Kumar K.Ve0909392019-04-17 18:29:15 +0530910 return -1;
911 }
912
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000913 ret = __map_kernel_page_nid(start, phys, __pgprot(flags), page_size, nid);
914 BUG_ON(ret);
915
Aneesh Kumar K.Vd9225ad2016-04-29 23:26:00 +1000916 return 0;
917}
918
919#ifdef CONFIG_MEMORY_HOTPLUG
Mauricio Faria de Oliveirabde709a2018-03-09 17:45:58 -0300920void __meminit radix__vmemmap_remove_mapping(unsigned long start, unsigned long page_size)
Aneesh Kumar K.Vd9225ad2016-04-29 23:26:00 +1000921{
Reza Arbab0d0a4bc2017-01-16 13:07:46 -0600922 remove_pagetable(start, start + page_size);
Aneesh Kumar K.Vd9225ad2016-04-29 23:26:00 +1000923}
924#endif
925#endif
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +1000926
927#ifdef CONFIG_TRANSPARENT_HUGEPAGE
928
929unsigned long radix__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
930 pmd_t *pmdp, unsigned long clr,
931 unsigned long set)
932{
933 unsigned long old;
934
935#ifdef CONFIG_DEBUG_VM
Oliver O'Halloranebd31192017-06-28 11:32:34 +1000936 WARN_ON(!radix__pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
Aneesh Kumar K.Vaf60a4c2018-04-16 16:57:16 +0530937 assert_spin_locked(pmd_lockptr(mm, pmdp));
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +1000938#endif
939
940 old = radix__pte_update(mm, addr, (pte_t *)pmdp, clr, set, 1);
941 trace_hugepage_update(addr, old, clr, set);
942
943 return old;
944}
945
946pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
947 pmd_t *pmdp)
948
949{
950 pmd_t pmd;
951
952 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
953 VM_BUG_ON(radix__pmd_trans_huge(*pmdp));
Oliver O'Halloranebd31192017-06-28 11:32:34 +1000954 VM_BUG_ON(pmd_devmap(*pmdp));
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +1000955 /*
956 * khugepaged calls this for normal pmd
957 */
958 pmd = *pmdp;
959 pmd_clear(pmdp);
Benjamin Herrenschmidt424de9c2017-07-19 14:49:06 +1000960
Aneesh Kumar K.Ve21dfbf2020-05-05 12:47:27 +0530961 /*
962 * pmdp collapse_flush need to ensure that there are no parallel gup
963 * walk after this call. This is needed so that we can have stable
964 * page ref count when collapsing a page. We don't allow a collapse page
965 * if we have gup taken on the page. We can ensure that by sending IPI
966 * because gup walk happens with IRQ disabled.
967 */
Aneesh Kumar K.Vfa4531f2017-07-27 11:54:54 +0530968 serialize_against_pte_lookup(vma->vm_mm);
Benjamin Herrenschmidt424de9c2017-07-19 14:49:06 +1000969
970 radix__flush_tlb_collapsed_pmd(vma->vm_mm, address);
971
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +1000972 return pmd;
973}
974
975/*
976 * For us pgtable_t is pte_t *. Inorder to save the deposisted
977 * page table, we consider the allocated page table as a list
978 * head. On withdraw we need to make sure we zero out the used
979 * list_head memory area.
980 */
981void radix__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
982 pgtable_t pgtable)
983{
Christophe Leroy47d99942019-03-29 10:00:00 +0000984 struct list_head *lh = (struct list_head *) pgtable;
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +1000985
Christophe Leroy47d99942019-03-29 10:00:00 +0000986 assert_spin_locked(pmd_lockptr(mm, pmdp));
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +1000987
Christophe Leroy47d99942019-03-29 10:00:00 +0000988 /* FIFO */
989 if (!pmd_huge_pte(mm, pmdp))
990 INIT_LIST_HEAD(lh);
991 else
992 list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
993 pmd_huge_pte(mm, pmdp) = pgtable;
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +1000994}
995
996pgtable_t radix__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
997{
Christophe Leroy47d99942019-03-29 10:00:00 +0000998 pte_t *ptep;
999 pgtable_t pgtable;
1000 struct list_head *lh;
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +10001001
Christophe Leroy47d99942019-03-29 10:00:00 +00001002 assert_spin_locked(pmd_lockptr(mm, pmdp));
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +10001003
Christophe Leroy47d99942019-03-29 10:00:00 +00001004 /* FIFO */
1005 pgtable = pmd_huge_pte(mm, pmdp);
1006 lh = (struct list_head *) pgtable;
1007 if (list_empty(lh))
1008 pmd_huge_pte(mm, pmdp) = NULL;
1009 else {
1010 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
1011 list_del(lh);
1012 }
1013 ptep = (pte_t *) pgtable;
1014 *ptep = __pte(0);
1015 ptep++;
1016 *ptep = __pte(0);
1017 return pgtable;
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +10001018}
1019
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +10001020pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm,
Christophe Leroy47d99942019-03-29 10:00:00 +00001021 unsigned long addr, pmd_t *pmdp)
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +10001022{
1023 pmd_t old_pmd;
1024 unsigned long old;
1025
1026 old = radix__pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0);
1027 old_pmd = __pmd(old);
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +10001028 return old_pmd;
1029}
1030
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +10001031#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
Aneesh Kumar K.V044003b2018-05-29 19:58:39 +05301032
Aneesh Kumar K.Ve4c11122018-05-29 19:58:40 +05301033void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep,
1034 pte_t entry, unsigned long address, int psize)
Aneesh Kumar K.V044003b2018-05-29 19:58:39 +05301035{
Aneesh Kumar K.Ve4c11122018-05-29 19:58:40 +05301036 struct mm_struct *mm = vma->vm_mm;
Aneesh Kumar K.V044003b2018-05-29 19:58:39 +05301037 unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED |
1038 _PAGE_RW | _PAGE_EXEC);
Aneesh Kumar K.Vf08d08f2018-08-22 22:46:05 +05301039
1040 unsigned long change = pte_val(entry) ^ pte_val(*ptep);
Aneesh Kumar K.Vbd5050e2018-05-29 19:58:41 +05301041 /*
1042 * To avoid NMMU hang while relaxing access, we need mark
1043 * the pte invalid in between.
1044 */
Aneesh Kumar K.Vf08d08f2018-08-22 22:46:05 +05301045 if ((change & _PAGE_RW) && atomic_read(&mm->context.copros) > 0) {
Aneesh Kumar K.V044003b2018-05-29 19:58:39 +05301046 unsigned long old_pte, new_pte;
1047
Aneesh Kumar K.Vf08d08f2018-08-22 22:46:05 +05301048 old_pte = __radix_pte_update(ptep, _PAGE_PRESENT, _PAGE_INVALID);
Aneesh Kumar K.V044003b2018-05-29 19:58:39 +05301049 /*
1050 * new value of pte
1051 */
1052 new_pte = old_pte | set;
Aneesh Kumar K.Vbd5050e2018-05-29 19:58:41 +05301053 radix__flush_tlb_page_psize(mm, address, psize);
Aneesh Kumar K.Vf08d08f2018-08-22 22:46:05 +05301054 __radix_pte_update(ptep, _PAGE_INVALID, new_pte);
Aneesh Kumar K.Vbd5050e2018-05-29 19:58:41 +05301055 } else {
Aneesh Kumar K.V044003b2018-05-29 19:58:39 +05301056 __radix_pte_update(ptep, 0, set);
Nicholas Piggine5f7cb52018-06-01 20:01:15 +10001057 /*
1058 * Book3S does not require a TLB flush when relaxing access
1059 * restrictions when the address space is not attached to a
1060 * NMMU, because the core MMU will reload the pte after taking
1061 * an access fault, which is defined by the architectue.
1062 */
Aneesh Kumar K.Vbd5050e2018-05-29 19:58:41 +05301063 }
Nicholas Pigginf1cb8f92018-06-01 20:01:19 +10001064 /* See ptesync comment in radix__set_pte_at */
Aneesh Kumar K.V044003b2018-05-29 19:58:39 +05301065}
Aneesh Kumar K.V5b323362019-03-05 15:46:33 -08001066
1067void radix__ptep_modify_prot_commit(struct vm_area_struct *vma,
1068 unsigned long addr, pte_t *ptep,
1069 pte_t old_pte, pte_t pte)
1070{
1071 struct mm_struct *mm = vma->vm_mm;
1072
1073 /*
1074 * To avoid NMMU hang while relaxing access we need to flush the tlb before
1075 * we set the new value. We need to do this only for radix, because hash
1076 * translation does flush when updating the linux pte.
1077 */
1078 if (is_pte_rw_upgrade(pte_val(old_pte), pte_val(pte)) &&
1079 (atomic_read(&mm->context.copros) > 0))
1080 radix__flush_tlb_page(vma, addr);
1081
1082 set_pte_at(mm, addr, ptep, pte);
1083}
Nicholas Piggind38153f2019-06-10 13:08:17 +10001084
Nicholas Piggind909f912019-06-10 13:08:18 +10001085int __init arch_ioremap_pud_supported(void)
1086{
1087 /* HPT does not cope with large pages in the vmalloc area */
1088 return radix_enabled();
1089}
1090
1091int __init arch_ioremap_pmd_supported(void)
1092{
1093 return radix_enabled();
1094}
1095
1096int p4d_free_pud_page(p4d_t *p4d, unsigned long addr)
1097{
1098 return 0;
1099}
1100
1101int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
1102{
1103 pte_t *ptep = (pte_t *)pud;
1104 pte_t new_pud = pfn_pte(__phys_to_pfn(addr), prot);
1105
1106 if (!radix_enabled())
1107 return 0;
1108
1109 set_pte_at(&init_mm, 0 /* radix unused */, ptep, new_pud);
1110
1111 return 1;
1112}
1113
1114int pud_clear_huge(pud_t *pud)
1115{
1116 if (pud_huge(*pud)) {
1117 pud_clear(pud);
1118 return 1;
1119 }
1120
1121 return 0;
1122}
1123
1124int pud_free_pmd_page(pud_t *pud, unsigned long addr)
1125{
1126 pmd_t *pmd;
1127 int i;
1128
1129 pmd = (pmd_t *)pud_page_vaddr(*pud);
1130 pud_clear(pud);
1131
1132 flush_tlb_kernel_range(addr, addr + PUD_SIZE);
1133
1134 for (i = 0; i < PTRS_PER_PMD; i++) {
1135 if (!pmd_none(pmd[i])) {
1136 pte_t *pte;
1137 pte = (pte_t *)pmd_page_vaddr(pmd[i]);
1138
1139 pte_free_kernel(&init_mm, pte);
1140 }
1141 }
1142
1143 pmd_free(&init_mm, pmd);
1144
1145 return 1;
1146}
1147
1148int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
1149{
1150 pte_t *ptep = (pte_t *)pmd;
1151 pte_t new_pmd = pfn_pte(__phys_to_pfn(addr), prot);
1152
1153 if (!radix_enabled())
1154 return 0;
1155
1156 set_pte_at(&init_mm, 0 /* radix unused */, ptep, new_pmd);
1157
1158 return 1;
1159}
1160
1161int pmd_clear_huge(pmd_t *pmd)
1162{
1163 if (pmd_huge(*pmd)) {
1164 pmd_clear(pmd);
1165 return 1;
1166 }
1167
1168 return 0;
1169}
1170
1171int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
1172{
1173 pte_t *pte;
1174
1175 pte = (pte_t *)pmd_page_vaddr(*pmd);
1176 pmd_clear(pmd);
1177
1178 flush_tlb_kernel_range(addr, addr + PMD_SIZE);
1179
1180 pte_free_kernel(&init_mm, pte);
1181
1182 return 1;
1183}
1184
Anshuman Khandual0f472d02019-07-16 16:27:33 -07001185int __init arch_ioremap_p4d_supported(void)
1186{
1187 return 0;
1188}