blob: 8904aa1243d8063a55db832ab52e6a963f289a1b [file] [log] [blame]
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +10001/*
2 * Page table handling routines for radix page table.
3 *
4 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
Michael Ellermanbd350f72017-08-30 17:41:29 +100011
12#define pr_fmt(fmt) "radix-mmu: " fmt
13
Nicholas Piggind38153f2019-06-10 13:08:17 +100014#include <linux/io.h>
Michael Ellermanbd350f72017-08-30 17:41:29 +100015#include <linux/kernel.h>
Ingo Molnar589ee622017-02-04 00:16:44 +010016#include <linux/sched/mm.h>
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100017#include <linux/memblock.h>
18#include <linux/of_fdt.h>
Balbir Singh7614ff32017-06-29 03:04:09 +100019#include <linux/mm.h>
Michael Ellerman6deb6b42017-08-30 17:41:17 +100020#include <linux/string_helpers.h>
Balbir Singh4dd5f8a92018-02-07 17:35:51 +110021#include <linux/stop_machine.h>
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100022
23#include <asm/pgtable.h>
24#include <asm/pgalloc.h>
Nicholas Piggineeb715c2018-02-07 11:20:02 +100025#include <asm/mmu_context.h>
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100026#include <asm/dma.h>
27#include <asm/machdep.h>
28#include <asm/mmu.h>
29#include <asm/firmware.h>
Alistair Popple1d0761d2016-12-14 13:36:51 +110030#include <asm/powernv.h>
Michael Ellerman9abcc982017-06-06 15:48:57 +100031#include <asm/sections.h>
Balbir Singh04284912017-04-11 15:23:25 +100032#include <asm/trace.h>
Michael Ellerman890274c2019-04-18 16:51:24 +100033#include <asm/uaccess.h>
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100034
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +100035#include <trace/events/thp.h>
36
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +100037unsigned int mmu_pid_bits;
38unsigned int mmu_base_pid;
39
Aneesh Kumar K.V83209bc2016-07-13 15:05:28 +053040static int native_register_process_table(unsigned long base, unsigned long pg_sz,
41 unsigned long table_size)
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100042{
Suraj Jitindar Singh7cd2a862017-08-03 14:15:51 +100043 unsigned long patb0, patb1;
Aneesh Kumar K.V83209bc2016-07-13 15:05:28 +053044
Suraj Jitindar Singh7cd2a862017-08-03 14:15:51 +100045 patb0 = be64_to_cpu(partition_tb[0].patb0);
46 patb1 = base | table_size | PATB_GR;
47
48 mmu_partition_table_set_entry(0, patb0, patb1);
49
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100050 return 0;
51}
52
Nicholas Piggin2ad452f2018-02-14 01:08:24 +100053static __ref void *early_alloc_pgtable(unsigned long size, int nid,
54 unsigned long region_start, unsigned long region_end)
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100055{
Mike Rapoportf8067142019-03-07 16:30:48 -080056 phys_addr_t min_addr = MEMBLOCK_LOW_LIMIT;
57 phys_addr_t max_addr = MEMBLOCK_ALLOC_ANYWHERE;
Mike Rapoport8a7f97b2019-03-11 23:30:31 -070058 void *ptr;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100059
Mike Rapoportf8067142019-03-07 16:30:48 -080060 if (region_start)
61 min_addr = region_start;
62 if (region_end)
63 max_addr = region_end;
Nicholas Piggin2ad452f2018-02-14 01:08:24 +100064
Mike Rapoport8a7f97b2019-03-11 23:30:31 -070065 ptr = memblock_alloc_try_nid(size, size, min_addr, max_addr, nid);
66
67 if (!ptr)
68 panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%pa max_addr=%pa\n",
69 __func__, size, size, nid, &min_addr, &max_addr);
70
71 return ptr;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100072}
73
Nicholas Piggin0633daf2018-02-14 01:08:23 +100074static int early_map_kernel_page(unsigned long ea, unsigned long pa,
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100075 pgprot_t flags,
Nicholas Piggin2ad452f2018-02-14 01:08:24 +100076 unsigned int map_page_size,
77 int nid,
78 unsigned long region_start, unsigned long region_end)
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100079{
Nicholas Piggin2ad452f2018-02-14 01:08:24 +100080 unsigned long pfn = pa >> PAGE_SHIFT;
Nicholas Piggin0633daf2018-02-14 01:08:23 +100081 pgd_t *pgdp;
82 pud_t *pudp;
83 pmd_t *pmdp;
84 pte_t *ptep;
85
86 pgdp = pgd_offset_k(ea);
87 if (pgd_none(*pgdp)) {
Nicholas Piggin2ad452f2018-02-14 01:08:24 +100088 pudp = early_alloc_pgtable(PUD_TABLE_SIZE, nid,
89 region_start, region_end);
Nicholas Piggin0633daf2018-02-14 01:08:23 +100090 pgd_populate(&init_mm, pgdp, pudp);
91 }
92 pudp = pud_offset(pgdp, ea);
93 if (map_page_size == PUD_SIZE) {
94 ptep = (pte_t *)pudp;
95 goto set_the_pte;
96 }
97 if (pud_none(*pudp)) {
Nicholas Piggin2ad452f2018-02-14 01:08:24 +100098 pmdp = early_alloc_pgtable(PMD_TABLE_SIZE, nid,
99 region_start, region_end);
Nicholas Piggin0633daf2018-02-14 01:08:23 +1000100 pud_populate(&init_mm, pudp, pmdp);
101 }
102 pmdp = pmd_offset(pudp, ea);
103 if (map_page_size == PMD_SIZE) {
104 ptep = pmdp_ptep(pmdp);
105 goto set_the_pte;
106 }
107 if (!pmd_present(*pmdp)) {
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000108 ptep = early_alloc_pgtable(PAGE_SIZE, nid,
109 region_start, region_end);
Nicholas Piggin0633daf2018-02-14 01:08:23 +1000110 pmd_populate_kernel(&init_mm, pmdp, ptep);
111 }
112 ptep = pte_offset_kernel(pmdp, ea);
113
114set_the_pte:
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000115 set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags));
Nicholas Piggin0633daf2018-02-14 01:08:23 +1000116 smp_wmb();
117 return 0;
118}
119
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000120/*
121 * nid, region_start, and region_end are hints to try to place the page
122 * table memory in the same node or region.
123 */
124static int __map_kernel_page(unsigned long ea, unsigned long pa,
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000125 pgprot_t flags,
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000126 unsigned int map_page_size,
127 int nid,
128 unsigned long region_start, unsigned long region_end)
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000129{
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000130 unsigned long pfn = pa >> PAGE_SHIFT;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000131 pgd_t *pgdp;
132 pud_t *pudp;
133 pmd_t *pmdp;
134 pte_t *ptep;
135 /*
136 * Make sure task size is correct as per the max adddr
137 */
138 BUILD_BUG_ON(TASK_SIZE_USER64 > RADIX_PGTABLE_RANGE);
Nicholas Piggin0633daf2018-02-14 01:08:23 +1000139
Aneesh Kumar K.V0034d392019-04-17 18:29:14 +0530140#ifdef CONFIG_PPC_64K_PAGES
141 BUILD_BUG_ON(RADIX_KERN_MAP_SIZE != (1UL << MAX_EA_BITS_PER_CONTEXT));
142#endif
143
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000144 if (unlikely(!slab_is_available()))
145 return early_map_kernel_page(ea, pa, flags, map_page_size,
146 nid, region_start, region_end);
Nicholas Piggin0633daf2018-02-14 01:08:23 +1000147
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000148 /*
149 * Should make page table allocation functions be able to take a
150 * node, so we can place kernel page tables on the right nodes after
151 * boot.
152 */
Nicholas Piggin0633daf2018-02-14 01:08:23 +1000153 pgdp = pgd_offset_k(ea);
154 pudp = pud_alloc(&init_mm, pgdp, ea);
155 if (!pudp)
156 return -ENOMEM;
157 if (map_page_size == PUD_SIZE) {
158 ptep = (pte_t *)pudp;
159 goto set_the_pte;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000160 }
Nicholas Piggin0633daf2018-02-14 01:08:23 +1000161 pmdp = pmd_alloc(&init_mm, pudp, ea);
162 if (!pmdp)
163 return -ENOMEM;
164 if (map_page_size == PMD_SIZE) {
165 ptep = pmdp_ptep(pmdp);
166 goto set_the_pte;
167 }
168 ptep = pte_alloc_kernel(pmdp, ea);
169 if (!ptep)
170 return -ENOMEM;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000171
172set_the_pte:
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000173 set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags));
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000174 smp_wmb();
175 return 0;
176}
177
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000178int radix__map_kernel_page(unsigned long ea, unsigned long pa,
179 pgprot_t flags,
180 unsigned int map_page_size)
181{
182 return __map_kernel_page(ea, pa, flags, map_page_size, -1, 0, 0);
183}
184
Balbir Singh7614ff32017-06-29 03:04:09 +1000185#ifdef CONFIG_STRICT_KERNEL_RWX
Michael Ellermanb134bd92017-07-14 16:51:21 +1000186void radix__change_memory_range(unsigned long start, unsigned long end,
187 unsigned long clear)
Balbir Singh7614ff32017-06-29 03:04:09 +1000188{
Balbir Singh7614ff32017-06-29 03:04:09 +1000189 unsigned long idx;
190 pgd_t *pgdp;
191 pud_t *pudp;
192 pmd_t *pmdp;
193 pte_t *ptep;
194
195 start = ALIGN_DOWN(start, PAGE_SIZE);
196 end = PAGE_ALIGN(end); // aligns up
197
Michael Ellermanb134bd92017-07-14 16:51:21 +1000198 pr_debug("Changing flags on range %lx-%lx removing 0x%lx\n",
199 start, end, clear);
Balbir Singh7614ff32017-06-29 03:04:09 +1000200
201 for (idx = start; idx < end; idx += PAGE_SIZE) {
202 pgdp = pgd_offset_k(idx);
203 pudp = pud_alloc(&init_mm, pgdp, idx);
204 if (!pudp)
205 continue;
206 if (pud_huge(*pudp)) {
207 ptep = (pte_t *)pudp;
208 goto update_the_pte;
209 }
210 pmdp = pmd_alloc(&init_mm, pudp, idx);
211 if (!pmdp)
212 continue;
213 if (pmd_huge(*pmdp)) {
214 ptep = pmdp_ptep(pmdp);
215 goto update_the_pte;
216 }
217 ptep = pte_alloc_kernel(pmdp, idx);
218 if (!ptep)
219 continue;
220update_the_pte:
Michael Ellermanb134bd92017-07-14 16:51:21 +1000221 radix__pte_update(&init_mm, idx, ptep, clear, 0, 0);
Balbir Singh7614ff32017-06-29 03:04:09 +1000222 }
223
224 radix__flush_tlb_kernel_range(start, end);
225}
Michael Ellermanb134bd92017-07-14 16:51:21 +1000226
227void radix__mark_rodata_ro(void)
228{
229 unsigned long start, end;
230
231 start = (unsigned long)_stext;
232 end = (unsigned long)__init_begin;
233
234 radix__change_memory_range(start, end, _PAGE_WRITE);
235}
Michael Ellerman029d9252017-07-14 16:51:23 +1000236
237void radix__mark_initmem_nx(void)
238{
239 unsigned long start = (unsigned long)__init_begin;
240 unsigned long end = (unsigned long)__init_end;
241
242 radix__change_memory_range(start, end, _PAGE_EXEC);
243}
Balbir Singh7614ff32017-06-29 03:04:09 +1000244#endif /* CONFIG_STRICT_KERNEL_RWX */
245
Michael Ellermanafb6d062018-10-17 23:53:38 +1100246static inline void __meminit
247print_mapping(unsigned long start, unsigned long end, unsigned long size, bool exec)
Reza Arbabb5200ec2017-01-16 13:07:43 -0600248{
Michael Ellerman6deb6b42017-08-30 17:41:17 +1000249 char buf[10];
250
Reza Arbabb5200ec2017-01-16 13:07:43 -0600251 if (end <= start)
252 return;
253
Michael Ellerman6deb6b42017-08-30 17:41:17 +1000254 string_get_size(size, 1, STRING_UNITS_2, buf, sizeof(buf));
255
Michael Ellermanafb6d062018-10-17 23:53:38 +1100256 pr_info("Mapped 0x%016lx-0x%016lx with %s pages%s\n", start, end, buf,
257 exec ? " (exec)" : "");
Reza Arbabb5200ec2017-01-16 13:07:43 -0600258}
259
Michael Ellerman232aa402018-08-14 22:37:32 +1000260static unsigned long next_boundary(unsigned long addr, unsigned long end)
261{
262#ifdef CONFIG_STRICT_KERNEL_RWX
263 if (addr < __pa_symbol(__init_begin))
264 return __pa_symbol(__init_begin);
265#endif
266 return end;
267}
268
Reza Arbabb5200ec2017-01-16 13:07:43 -0600269static int __meminit create_physical_mapping(unsigned long start,
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000270 unsigned long end,
271 int nid)
Reza Arbabb5200ec2017-01-16 13:07:43 -0600272{
Michael Ellerman9abcc982017-06-06 15:48:57 +1000273 unsigned long vaddr, addr, mapping_size = 0;
Michael Ellermanafb6d062018-10-17 23:53:38 +1100274 bool prev_exec, exec = false;
Michael Ellerman9abcc982017-06-06 15:48:57 +1000275 pgprot_t prot;
Aneesh Kumar K.Va2dc0092018-08-13 11:14:57 +0530276 int psize;
Reza Arbabb5200ec2017-01-16 13:07:43 -0600277
278 start = _ALIGN_UP(start, PAGE_SIZE);
279 for (addr = start; addr < end; addr += mapping_size) {
280 unsigned long gap, previous_size;
281 int rc;
282
Michael Ellerman232aa402018-08-14 22:37:32 +1000283 gap = next_boundary(addr, end) - addr;
Reza Arbabb5200ec2017-01-16 13:07:43 -0600284 previous_size = mapping_size;
Michael Ellermanafb6d062018-10-17 23:53:38 +1100285 prev_exec = exec;
Reza Arbabb5200ec2017-01-16 13:07:43 -0600286
287 if (IS_ALIGNED(addr, PUD_SIZE) && gap >= PUD_SIZE &&
Michael Ellerman57306c62018-08-14 22:01:44 +1000288 mmu_psize_defs[MMU_PAGE_1G].shift) {
Reza Arbabb5200ec2017-01-16 13:07:43 -0600289 mapping_size = PUD_SIZE;
Aneesh Kumar K.Va2dc0092018-08-13 11:14:57 +0530290 psize = MMU_PAGE_1G;
291 } else if (IS_ALIGNED(addr, PMD_SIZE) && gap >= PMD_SIZE &&
292 mmu_psize_defs[MMU_PAGE_2M].shift) {
Reza Arbabb5200ec2017-01-16 13:07:43 -0600293 mapping_size = PMD_SIZE;
Aneesh Kumar K.Va2dc0092018-08-13 11:14:57 +0530294 psize = MMU_PAGE_2M;
295 } else {
Reza Arbabb5200ec2017-01-16 13:07:43 -0600296 mapping_size = PAGE_SIZE;
Aneesh Kumar K.Va2dc0092018-08-13 11:14:57 +0530297 psize = mmu_virtual_psize;
298 }
Reza Arbabb5200ec2017-01-16 13:07:43 -0600299
Michael Ellerman9abcc982017-06-06 15:48:57 +1000300 vaddr = (unsigned long)__va(addr);
301
Balbir Singh7f6d4982017-06-29 03:04:10 +1000302 if (overlaps_kernel_text(vaddr, vaddr + mapping_size) ||
Michael Ellermanafb6d062018-10-17 23:53:38 +1100303 overlaps_interrupt_vector_text(vaddr, vaddr + mapping_size)) {
Michael Ellerman9abcc982017-06-06 15:48:57 +1000304 prot = PAGE_KERNEL_X;
Michael Ellermanafb6d062018-10-17 23:53:38 +1100305 exec = true;
306 } else {
Michael Ellerman9abcc982017-06-06 15:48:57 +1000307 prot = PAGE_KERNEL;
Michael Ellermanafb6d062018-10-17 23:53:38 +1100308 exec = false;
309 }
310
311 if (mapping_size != previous_size || exec != prev_exec) {
312 print_mapping(start, addr, previous_size, prev_exec);
313 start = addr;
314 }
Michael Ellerman9abcc982017-06-06 15:48:57 +1000315
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000316 rc = __map_kernel_page(vaddr, addr, prot, mapping_size, nid, start, end);
Reza Arbabb5200ec2017-01-16 13:07:43 -0600317 if (rc)
318 return rc;
Aneesh Kumar K.Va2dc0092018-08-13 11:14:57 +0530319
320 update_page_count(psize, 1);
Reza Arbabb5200ec2017-01-16 13:07:43 -0600321 }
322
Michael Ellermanafb6d062018-10-17 23:53:38 +1100323 print_mapping(start, addr, mapping_size, exec);
Reza Arbabb5200ec2017-01-16 13:07:43 -0600324 return 0;
325}
326
YueHaibingd667edc2019-05-04 18:24:27 +0800327static void __init radix_init_pgtable(void)
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000328{
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000329 unsigned long rts_field;
330 struct memblock_region *reg;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000331
332 /* We don't support slb for radix */
333 mmu_slb_size = 0;
334 /*
335 * Create the linear mapping, using standard page size for now
336 */
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000337 for_each_memblock(memory, reg) {
338 /*
339 * The memblock allocator is up at this point, so the
340 * page tables will be allocated within the range. No
341 * need or a node (which we don't have yet).
342 */
Aneesh Kumar K.Ve0909392019-04-17 18:29:15 +0530343
344 if ((reg->base + reg->size) >= RADIX_VMALLOC_START) {
Colin Ian Kingf341d892019-04-23 16:10:17 +0100345 pr_warn("Outside the supported range\n");
Aneesh Kumar K.Ve0909392019-04-17 18:29:15 +0530346 continue;
347 }
348
Reza Arbabb5200ec2017-01-16 13:07:43 -0600349 WARN_ON(create_physical_mapping(reg->base,
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000350 reg->base + reg->size,
351 -1));
352 }
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +1000353
354 /* Find out how many PID bits are supported */
355 if (cpu_has_feature(CPU_FTR_HVMODE)) {
356 if (!mmu_pid_bits)
357 mmu_pid_bits = 20;
358#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
359 /*
360 * When KVM is possible, we only use the top half of the
361 * PID space to avoid collisions between host and guest PIDs
362 * which can cause problems due to prefetch when exiting the
363 * guest with AIL=3
364 */
365 mmu_base_pid = 1 << (mmu_pid_bits - 1);
366#else
367 mmu_base_pid = 1;
368#endif
369 } else {
370 /* The guest uses the bottom half of the PID space */
371 if (!mmu_pid_bits)
372 mmu_pid_bits = 19;
373 mmu_base_pid = 1;
374 }
375
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000376 /*
377 * Allocate Partition table and process table for the
378 * host.
379 */
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +1000380 BUG_ON(PRTB_SIZE_SHIFT > 36);
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000381 process_tb = early_alloc_pgtable(1UL << PRTB_SIZE_SHIFT, -1, 0, 0);
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000382 /*
383 * Fill in the process table.
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000384 */
Aneesh Kumar K.Vb23d9c52016-06-17 11:40:36 +0530385 rts_field = radix__get_tree_size();
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000386 process_tb->prtb0 = cpu_to_be64(rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE);
387 /*
388 * Fill in the partition table. We are suppose to use effective address
389 * of process table here. But our linear mapping also enable us to use
390 * physical address here.
391 */
Michael Ellermaneea81482016-08-04 15:32:06 +1000392 register_process_table(__pa(process_tb), 0, PRTB_SIZE_SHIFT - 12);
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000393 pr_info("Process table %p and radix root for kernel: %p\n", process_tb, init_mm.pgd);
Paul Mackerras7a70d722017-02-27 14:32:41 +1100394 asm volatile("ptesync" : : : "memory");
395 asm volatile(PPC_TLBIE_5(%0,%1,2,1,1) : :
396 "r" (TLBIEL_INVAL_SET_LPID), "r" (0));
397 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
Balbir Singh04284912017-04-11 15:23:25 +1000398 trace_tlbie(0, 0, TLBIEL_INVAL_SET_LPID, 0, 2, 1, 1);
Nicholas Piggineeb715c2018-02-07 11:20:02 +1000399
400 /*
401 * The init_mm context is given the first available (non-zero) PID,
402 * which is the "guard PID" and contains no page table. PIDR should
403 * never be set to zero because that duplicates the kernel address
404 * space at the 0x0... offset (quadrant 0)!
405 *
406 * An arbitrary PID that may later be allocated by the PID allocator
407 * for userspace processes must not be used either, because that
408 * would cause stale user mappings for that PID on CPUs outside of
409 * the TLB invalidation scheme (because it won't be in mm_cpumask).
410 *
411 * So permanently carve out one PID for the purpose of a guard PID.
412 */
413 init_mm.context.id = mmu_base_pid;
414 mmu_base_pid++;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000415}
416
417static void __init radix_init_partition_table(void)
418{
Paul Mackerras9d661952016-11-21 16:00:58 +1100419 unsigned long rts_field, dw0;
Aneesh Kumar K.Vb23d9c52016-06-17 11:40:36 +0530420
Paul Mackerras9d661952016-11-21 16:00:58 +1100421 mmu_partition_table_init();
Aneesh Kumar K.Vb23d9c52016-06-17 11:40:36 +0530422 rts_field = radix__get_tree_size();
Paul Mackerras9d661952016-11-21 16:00:58 +1100423 dw0 = rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE | PATB_HR;
424 mmu_partition_table_set_entry(0, dw0, 0);
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000425
Aneesh Kumar K.V56547412016-07-13 15:05:25 +0530426 pr_info("Initializing Radix MMU\n");
427 pr_info("Partition table %p\n", partition_tb);
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000428}
429
430void __init radix_init_native(void)
431{
Michael Ellermaneea81482016-08-04 15:32:06 +1000432 register_process_table = native_register_process_table;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000433}
434
435static int __init get_idx_from_shift(unsigned int shift)
436{
437 int idx = -1;
438
439 switch (shift) {
440 case 0xc:
441 idx = MMU_PAGE_4K;
442 break;
443 case 0x10:
444 idx = MMU_PAGE_64K;
445 break;
446 case 0x15:
447 idx = MMU_PAGE_2M;
448 break;
449 case 0x1e:
450 idx = MMU_PAGE_1G;
451 break;
452 }
453 return idx;
454}
455
456static int __init radix_dt_scan_page_sizes(unsigned long node,
457 const char *uname, int depth,
458 void *data)
459{
460 int size = 0;
461 int shift, idx;
462 unsigned int ap;
463 const __be32 *prop;
464 const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
465
466 /* We are scanning "cpu" nodes only */
467 if (type == NULL || strcmp(type, "cpu") != 0)
468 return 0;
469
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +1000470 /* Find MMU PID size */
471 prop = of_get_flat_dt_prop(node, "ibm,mmu-pid-bits", &size);
472 if (prop && size == 4)
473 mmu_pid_bits = be32_to_cpup(prop);
474
475 /* Grab page size encodings */
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000476 prop = of_get_flat_dt_prop(node, "ibm,processor-radix-AP-encodings", &size);
477 if (!prop)
478 return 0;
479
480 pr_info("Page sizes from device-tree:\n");
481 for (; size >= 4; size -= 4, ++prop) {
482
483 struct mmu_psize_def *def;
484
485 /* top 3 bit is AP encoding */
486 shift = be32_to_cpu(prop[0]) & ~(0xe << 28);
487 ap = be32_to_cpu(prop[0]) >> 29;
Balbir Singhac8d3812016-11-05 15:24:22 +1100488 pr_info("Page size shift = %d AP=0x%x\n", shift, ap);
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000489
490 idx = get_idx_from_shift(shift);
491 if (idx < 0)
492 continue;
493
494 def = &mmu_psize_defs[idx];
495 def->shift = shift;
496 def->ap = ap;
497 }
498
499 /* needed ? */
500 cur_cpu_spec->mmu_features &= ~MMU_FTR_NO_SLBIE_B;
501 return 1;
502}
503
Michael Ellerman2537b092016-07-26 21:55:27 +1000504void __init radix__early_init_devtree(void)
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000505{
506 int rc;
507
508 /*
509 * Try to find the available page sizes in the device-tree
510 */
511 rc = of_scan_flat_dt(radix_dt_scan_page_sizes, NULL);
512 if (rc != 0) /* Found */
513 goto found;
514 /*
515 * let's assume we have page 4k and 64k support
516 */
517 mmu_psize_defs[MMU_PAGE_4K].shift = 12;
518 mmu_psize_defs[MMU_PAGE_4K].ap = 0x0;
519
520 mmu_psize_defs[MMU_PAGE_64K].shift = 16;
521 mmu_psize_defs[MMU_PAGE_64K].ap = 0x5;
522found:
523#ifdef CONFIG_SPARSEMEM_VMEMMAP
524 if (mmu_psize_defs[MMU_PAGE_2M].shift) {
525 /*
526 * map vmemmap using 2M if available
527 */
528 mmu_vmemmap_psize = MMU_PAGE_2M;
529 }
530#endif /* CONFIG_SPARSEMEM_VMEMMAP */
531 return;
532}
533
Balbir Singhee97b6b2016-11-15 17:56:14 +1100534static void radix_init_amor(void)
535{
536 /*
537 * In HV mode, we init AMOR (Authority Mask Override Register) so that
538 * the hypervisor and guest can setup IAMR (Instruction Authority Mask
539 * Register), enable key 0 and set it to 1.
540 *
541 * AMOR = 0b1100 .... 0000 (Mask for key 0 is 11)
542 */
543 mtspr(SPRN_AMOR, (3ul << 62));
544}
545
Russell Currey1bb2bae2019-04-18 16:51:22 +1000546#ifdef CONFIG_PPC_KUEP
547void setup_kuep(bool disabled)
Balbir Singh3b10d002016-11-15 17:56:16 +1100548{
Russell Currey1bb2bae2019-04-18 16:51:22 +1000549 if (disabled || !early_radix_enabled())
550 return;
551
552 if (smp_processor_id() == boot_cpuid)
553 pr_info("Activating Kernel Userspace Execution Prevention\n");
554
Balbir Singh3b10d002016-11-15 17:56:16 +1100555 /*
556 * Radix always uses key0 of the IAMR to determine if an access is
557 * allowed. We set bit 0 (IBM bit 1) of key0, to prevent instruction
558 * fetch.
559 */
Nicholas Piggin2bf10712018-07-05 18:47:00 +1000560 mtspr(SPRN_IAMR, (1ul << 62));
Balbir Singh3b10d002016-11-15 17:56:16 +1100561}
Russell Currey1bb2bae2019-04-18 16:51:22 +1000562#endif
Balbir Singh3b10d002016-11-15 17:56:16 +1100563
Michael Ellerman890274c2019-04-18 16:51:24 +1000564#ifdef CONFIG_PPC_KUAP
565void setup_kuap(bool disabled)
566{
567 if (disabled || !early_radix_enabled())
568 return;
569
570 if (smp_processor_id() == boot_cpuid) {
571 pr_info("Activating Kernel Userspace Access Prevention\n");
572 cur_cpu_spec->mmu_features |= MMU_FTR_RADIX_KUAP;
573 }
574
575 /* Make sure userspace can't change the AMR */
576 mtspr(SPRN_UAMOR, 0);
577 mtspr(SPRN_AMR, AMR_KUAP_BLOCKED);
578 isync();
579}
580#endif
581
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000582void __init radix__early_init_mmu(void)
583{
584 unsigned long lpcr;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000585
586#ifdef CONFIG_PPC_64K_PAGES
587 /* PAGE_SIZE mappings */
588 mmu_virtual_psize = MMU_PAGE_64K;
589#else
590 mmu_virtual_psize = MMU_PAGE_4K;
591#endif
592
593#ifdef CONFIG_SPARSEMEM_VMEMMAP
594 /* vmemmap mapping */
595 mmu_vmemmap_psize = mmu_virtual_psize;
596#endif
597 /*
598 * initialize page table size
599 */
600 __pte_index_size = RADIX_PTE_INDEX_SIZE;
601 __pmd_index_size = RADIX_PMD_INDEX_SIZE;
602 __pud_index_size = RADIX_PUD_INDEX_SIZE;
603 __pgd_index_size = RADIX_PGD_INDEX_SIZE;
Aneesh Kumar K.Vfae22112018-02-11 20:30:06 +0530604 __pud_cache_index = RADIX_PUD_INDEX_SIZE;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000605 __pte_table_size = RADIX_PTE_TABLE_SIZE;
606 __pmd_table_size = RADIX_PMD_TABLE_SIZE;
607 __pud_table_size = RADIX_PUD_TABLE_SIZE;
608 __pgd_table_size = RADIX_PGD_TABLE_SIZE;
609
Aneesh Kumar K.Va2f41eb2016-04-29 23:26:19 +1000610 __pmd_val_bits = RADIX_PMD_VAL_BITS;
611 __pud_val_bits = RADIX_PUD_VAL_BITS;
612 __pgd_val_bits = RADIX_PGD_VAL_BITS;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000613
Aneesh Kumar K.Vd6a99962016-04-29 23:26:21 +1000614 __kernel_virt_start = RADIX_KERN_VIRT_START;
Aneesh Kumar K.Vd6a99962016-04-29 23:26:21 +1000615 __vmalloc_start = RADIX_VMALLOC_START;
616 __vmalloc_end = RADIX_VMALLOC_END;
Michael Ellerman63ee9b22017-08-01 20:29:22 +1000617 __kernel_io_start = RADIX_KERN_IO_START;
Aneesh Kumar K.Va35a3c62019-04-17 18:29:13 +0530618 __kernel_io_end = RADIX_KERN_IO_END;
Aneesh Kumar K.V0034d392019-04-17 18:29:14 +0530619 vmemmap = (struct page *)RADIX_VMEMMAP_START;
Aneesh Kumar K.Vd6a99962016-04-29 23:26:21 +1000620 ioremap_bot = IOREMAP_BASE;
Darren Stevensbfa37082016-06-29 21:06:28 +0100621
622#ifdef CONFIG_PCI
623 pci_io_base = ISA_IO_BASE;
624#endif
Aneesh Kumar K.Vfb4e5db2018-03-22 14:13:50 +0530625 __pte_frag_nr = RADIX_PTE_FRAG_NR;
626 __pte_frag_size_shift = RADIX_PTE_FRAG_SIZE_SHIFT;
Aneesh Kumar K.V8a6c6972018-04-16 16:57:22 +0530627 __pmd_frag_nr = RADIX_PMD_FRAG_NR;
628 __pmd_frag_size_shift = RADIX_PMD_FRAG_SIZE_SHIFT;
Aneesh Kumar K.Vd6a99962016-04-29 23:26:21 +1000629
Aneesh Kumar K.Vd6c88602016-05-31 11:56:29 +0530630 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
Benjamin Herrenschmidt166dd7d2016-07-05 15:03:51 +1000631 radix_init_native();
Aneesh Kumar K.Vd6c88602016-05-31 11:56:29 +0530632 lpcr = mfspr(SPRN_LPCR);
Aneesh Kumar K.Vbf16cdf2016-07-13 15:05:21 +0530633 mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000634 radix_init_partition_table();
Balbir Singhee97b6b2016-11-15 17:56:14 +1100635 radix_init_amor();
Paul Mackerrascc3d2942017-01-30 21:21:36 +1100636 } else {
637 radix_init_pseries();
Aneesh Kumar K.Vd6c88602016-05-31 11:56:29 +0530638 }
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000639
Paul Mackerras9d661952016-11-21 16:00:58 +1100640 memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
641
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000642 radix_init_pgtable();
Nicholas Piggineeb715c2018-02-07 11:20:02 +1000643 /* Switch to the guard PID before turning on MMU */
644 radix__switch_mmu_context(NULL, &init_mm);
Nicholas Piggind4748272017-12-24 01:15:50 +1000645 if (cpu_has_feature(CPU_FTR_HVMODE))
646 tlbiel_all();
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000647}
648
649void radix__early_init_mmu_secondary(void)
650{
651 unsigned long lpcr;
652 /*
Aneesh Kumar K.Vd6c88602016-05-31 11:56:29 +0530653 * update partition table control register and UPRT
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000654 */
Aneesh Kumar K.Vd6c88602016-05-31 11:56:29 +0530655 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
656 lpcr = mfspr(SPRN_LPCR);
Aneesh Kumar K.Vbf16cdf2016-07-13 15:05:21 +0530657 mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
Aneesh Kumar K.Vd6c88602016-05-31 11:56:29 +0530658
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000659 mtspr(SPRN_PTCR,
660 __pa(partition_tb) | (PATB_SIZE_SHIFT - 12));
Balbir Singhee97b6b2016-11-15 17:56:14 +1100661 radix_init_amor();
Aneesh Kumar K.Vd6c88602016-05-31 11:56:29 +0530662 }
Nicholas Piggind4748272017-12-24 01:15:50 +1000663
Nicholas Piggineeb715c2018-02-07 11:20:02 +1000664 radix__switch_mmu_context(NULL, &init_mm);
Nicholas Piggind4748272017-12-24 01:15:50 +1000665 if (cpu_has_feature(CPU_FTR_HVMODE))
666 tlbiel_all();
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000667}
668
Benjamin Herrenschmidtfe036a02016-08-19 14:22:37 +0530669void radix__mmu_cleanup_all(void)
670{
671 unsigned long lpcr;
672
673 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
674 lpcr = mfspr(SPRN_LPCR);
675 mtspr(SPRN_LPCR, lpcr & ~LPCR_UPRT);
676 mtspr(SPRN_PTCR, 0);
Alistair Popple1d0761d2016-12-14 13:36:51 +1100677 powernv_set_nmmu_ptcr(0);
Benjamin Herrenschmidtfe036a02016-08-19 14:22:37 +0530678 radix__flush_tlb_all();
679 }
680}
681
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000682void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base,
683 phys_addr_t first_memblock_size)
684{
Christophe Leroy47d99942019-03-29 10:00:00 +0000685 /*
686 * We don't currently support the first MEMBLOCK not mapping 0
Aneesh Kumar K.V177ba7c2016-04-29 23:26:10 +1000687 * physical on those processors
688 */
689 BUG_ON(first_memblock_base != 0);
Nicholas Piggin1513c332017-12-22 21:17:08 +1000690
Nicholas Piggin5eae82c2017-12-22 21:17:11 +1000691 /*
692 * Radix mode is not limited by RMA / VRMA addressing.
693 */
694 ppc64_rma_size = ULONG_MAX;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000695}
Aneesh Kumar K.Vd9225ad2016-04-29 23:26:00 +1000696
Reza Arbab6cc27342017-01-16 13:07:44 -0600697#ifdef CONFIG_MEMORY_HOTPLUG
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600698static void free_pte_table(pte_t *pte_start, pmd_t *pmd)
699{
700 pte_t *pte;
701 int i;
702
703 for (i = 0; i < PTRS_PER_PTE; i++) {
704 pte = pte_start + i;
705 if (!pte_none(*pte))
706 return;
707 }
708
709 pte_free_kernel(&init_mm, pte_start);
710 pmd_clear(pmd);
711}
712
713static void free_pmd_table(pmd_t *pmd_start, pud_t *pud)
714{
715 pmd_t *pmd;
716 int i;
717
718 for (i = 0; i < PTRS_PER_PMD; i++) {
719 pmd = pmd_start + i;
720 if (!pmd_none(*pmd))
721 return;
722 }
723
724 pmd_free(&init_mm, pmd_start);
725 pud_clear(pud);
726}
727
Balbir Singh4dd5f8a92018-02-07 17:35:51 +1100728struct change_mapping_params {
729 pte_t *pte;
730 unsigned long start;
731 unsigned long end;
732 unsigned long aligned_start;
733 unsigned long aligned_end;
734};
735
Mauricio Faria de Oliveirabde709a2018-03-09 17:45:58 -0300736static int __meminit stop_machine_change_mapping(void *data)
Balbir Singh4dd5f8a92018-02-07 17:35:51 +1100737{
738 struct change_mapping_params *params =
739 (struct change_mapping_params *)data;
740
741 if (!data)
742 return -1;
743
744 spin_unlock(&init_mm.page_table_lock);
745 pte_clear(&init_mm, params->aligned_start, params->pte);
Michael Ellermanf437c512018-03-31 00:11:24 +1100746 create_physical_mapping(params->aligned_start, params->start, -1);
747 create_physical_mapping(params->end, params->aligned_end, -1);
Balbir Singh4dd5f8a92018-02-07 17:35:51 +1100748 spin_lock(&init_mm.page_table_lock);
749 return 0;
750}
751
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600752static void remove_pte_table(pte_t *pte_start, unsigned long addr,
753 unsigned long end)
754{
755 unsigned long next;
756 pte_t *pte;
757
758 pte = pte_start + pte_index(addr);
759 for (; addr < end; addr = next, pte++) {
760 next = (addr + PAGE_SIZE) & PAGE_MASK;
761 if (next > end)
762 next = end;
763
764 if (!pte_present(*pte))
765 continue;
766
Reza Arbab0d0a4bc2017-01-16 13:07:46 -0600767 if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(next)) {
768 /*
769 * The vmemmap_free() and remove_section_mapping()
770 * codepaths call us with aligned addresses.
771 */
772 WARN_ONCE(1, "%s: unaligned range\n", __func__);
773 continue;
774 }
775
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600776 pte_clear(&init_mm, addr, pte);
777 }
778}
779
Balbir Singh4dd5f8a92018-02-07 17:35:51 +1100780/*
781 * clear the pte and potentially split the mapping helper
782 */
Mauricio Faria de Oliveirabde709a2018-03-09 17:45:58 -0300783static void __meminit split_kernel_mapping(unsigned long addr, unsigned long end,
Balbir Singh4dd5f8a92018-02-07 17:35:51 +1100784 unsigned long size, pte_t *pte)
785{
786 unsigned long mask = ~(size - 1);
787 unsigned long aligned_start = addr & mask;
788 unsigned long aligned_end = addr + size;
789 struct change_mapping_params params;
790 bool split_region = false;
791
792 if ((end - addr) < size) {
793 /*
794 * We're going to clear the PTE, but not flushed
795 * the mapping, time to remap and flush. The
796 * effects if visible outside the processor or
797 * if we are running in code close to the
798 * mapping we cleared, we are in trouble.
799 */
800 if (overlaps_kernel_text(aligned_start, addr) ||
801 overlaps_kernel_text(end, aligned_end)) {
802 /*
803 * Hack, just return, don't pte_clear
804 */
805 WARN_ONCE(1, "Linear mapping %lx->%lx overlaps kernel "
806 "text, not splitting\n", addr, end);
807 return;
808 }
809 split_region = true;
810 }
811
812 if (split_region) {
813 params.pte = pte;
814 params.start = addr;
815 params.end = end;
816 params.aligned_start = addr & ~(size - 1);
817 params.aligned_end = min_t(unsigned long, aligned_end,
818 (unsigned long)__va(memblock_end_of_DRAM()));
819 stop_machine(stop_machine_change_mapping, &params, NULL);
820 return;
821 }
822
823 pte_clear(&init_mm, addr, pte);
824}
825
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600826static void remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
827 unsigned long end)
828{
829 unsigned long next;
830 pte_t *pte_base;
831 pmd_t *pmd;
832
833 pmd = pmd_start + pmd_index(addr);
834 for (; addr < end; addr = next, pmd++) {
835 next = pmd_addr_end(addr, end);
836
837 if (!pmd_present(*pmd))
838 continue;
839
840 if (pmd_huge(*pmd)) {
Balbir Singh4dd5f8a92018-02-07 17:35:51 +1100841 split_kernel_mapping(addr, end, PMD_SIZE, (pte_t *)pmd);
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600842 continue;
843 }
844
845 pte_base = (pte_t *)pmd_page_vaddr(*pmd);
846 remove_pte_table(pte_base, addr, next);
847 free_pte_table(pte_base, pmd);
848 }
849}
850
851static void remove_pud_table(pud_t *pud_start, unsigned long addr,
852 unsigned long end)
853{
854 unsigned long next;
855 pmd_t *pmd_base;
856 pud_t *pud;
857
858 pud = pud_start + pud_index(addr);
859 for (; addr < end; addr = next, pud++) {
860 next = pud_addr_end(addr, end);
861
862 if (!pud_present(*pud))
863 continue;
864
865 if (pud_huge(*pud)) {
Balbir Singh4dd5f8a92018-02-07 17:35:51 +1100866 split_kernel_mapping(addr, end, PUD_SIZE, (pte_t *)pud);
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600867 continue;
868 }
869
870 pmd_base = (pmd_t *)pud_page_vaddr(*pud);
871 remove_pmd_table(pmd_base, addr, next);
872 free_pmd_table(pmd_base, pud);
873 }
874}
875
Mauricio Faria de Oliveirabde709a2018-03-09 17:45:58 -0300876static void __meminit remove_pagetable(unsigned long start, unsigned long end)
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600877{
878 unsigned long addr, next;
879 pud_t *pud_base;
880 pgd_t *pgd;
881
882 spin_lock(&init_mm.page_table_lock);
883
884 for (addr = start; addr < end; addr = next) {
885 next = pgd_addr_end(addr, end);
886
887 pgd = pgd_offset_k(addr);
888 if (!pgd_present(*pgd))
889 continue;
890
891 if (pgd_huge(*pgd)) {
Balbir Singh4dd5f8a92018-02-07 17:35:51 +1100892 split_kernel_mapping(addr, end, PGDIR_SIZE, (pte_t *)pgd);
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600893 continue;
894 }
895
896 pud_base = (pud_t *)pgd_page_vaddr(*pgd);
897 remove_pud_table(pud_base, addr, next);
898 }
899
900 spin_unlock(&init_mm.page_table_lock);
901 radix__flush_tlb_kernel_range(start, end);
902}
903
Michael Ellermanf437c512018-03-31 00:11:24 +1100904int __meminit radix__create_section_mapping(unsigned long start, unsigned long end, int nid)
Reza Arbab6cc27342017-01-16 13:07:44 -0600905{
Aneesh Kumar K.Ve0909392019-04-17 18:29:15 +0530906 if (end >= RADIX_VMALLOC_START) {
Colin Ian Kingf341d892019-04-23 16:10:17 +0100907 pr_warn("Outside the supported range\n");
Aneesh Kumar K.Ve0909392019-04-17 18:29:15 +0530908 return -1;
909 }
910
Nicholas Piggin29ab6c42018-02-14 01:08:22 +1000911 return create_physical_mapping(start, end, nid);
Reza Arbab6cc27342017-01-16 13:07:44 -0600912}
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600913
Mauricio Faria de Oliveirabde709a2018-03-09 17:45:58 -0300914int __meminit radix__remove_section_mapping(unsigned long start, unsigned long end)
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600915{
916 remove_pagetable(start, end);
917 return 0;
918}
Reza Arbab6cc27342017-01-16 13:07:44 -0600919#endif /* CONFIG_MEMORY_HOTPLUG */
920
Aneesh Kumar K.Vd9225ad2016-04-29 23:26:00 +1000921#ifdef CONFIG_SPARSEMEM_VMEMMAP
Nicholas Piggin29ab6c42018-02-14 01:08:22 +1000922static int __map_kernel_page_nid(unsigned long ea, unsigned long pa,
923 pgprot_t flags, unsigned int map_page_size,
924 int nid)
925{
926 return __map_kernel_page(ea, pa, flags, map_page_size, nid, 0, 0);
927}
928
Aneesh Kumar K.Vd9225ad2016-04-29 23:26:00 +1000929int __meminit radix__vmemmap_create_mapping(unsigned long start,
930 unsigned long page_size,
931 unsigned long phys)
932{
933 /* Create a PTE encoding */
934 unsigned long flags = _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_KERNEL_RW;
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000935 int nid = early_pfn_to_nid(phys >> PAGE_SHIFT);
936 int ret;
Aneesh Kumar K.Vd9225ad2016-04-29 23:26:00 +1000937
Aneesh Kumar K.Ve0909392019-04-17 18:29:15 +0530938 if ((start + page_size) >= RADIX_VMEMMAP_END) {
Colin Ian Kingf341d892019-04-23 16:10:17 +0100939 pr_warn("Outside the supported range\n");
Aneesh Kumar K.Ve0909392019-04-17 18:29:15 +0530940 return -1;
941 }
942
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000943 ret = __map_kernel_page_nid(start, phys, __pgprot(flags), page_size, nid);
944 BUG_ON(ret);
945
Aneesh Kumar K.Vd9225ad2016-04-29 23:26:00 +1000946 return 0;
947}
948
949#ifdef CONFIG_MEMORY_HOTPLUG
Mauricio Faria de Oliveirabde709a2018-03-09 17:45:58 -0300950void __meminit radix__vmemmap_remove_mapping(unsigned long start, unsigned long page_size)
Aneesh Kumar K.Vd9225ad2016-04-29 23:26:00 +1000951{
Reza Arbab0d0a4bc2017-01-16 13:07:46 -0600952 remove_pagetable(start, start + page_size);
Aneesh Kumar K.Vd9225ad2016-04-29 23:26:00 +1000953}
954#endif
955#endif
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +1000956
957#ifdef CONFIG_TRANSPARENT_HUGEPAGE
958
959unsigned long radix__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
960 pmd_t *pmdp, unsigned long clr,
961 unsigned long set)
962{
963 unsigned long old;
964
965#ifdef CONFIG_DEBUG_VM
Oliver O'Halloranebd31192017-06-28 11:32:34 +1000966 WARN_ON(!radix__pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
Aneesh Kumar K.Vaf60a4c2018-04-16 16:57:16 +0530967 assert_spin_locked(pmd_lockptr(mm, pmdp));
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +1000968#endif
969
970 old = radix__pte_update(mm, addr, (pte_t *)pmdp, clr, set, 1);
971 trace_hugepage_update(addr, old, clr, set);
972
973 return old;
974}
975
976pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
977 pmd_t *pmdp)
978
979{
980 pmd_t pmd;
981
982 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
983 VM_BUG_ON(radix__pmd_trans_huge(*pmdp));
Oliver O'Halloranebd31192017-06-28 11:32:34 +1000984 VM_BUG_ON(pmd_devmap(*pmdp));
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +1000985 /*
986 * khugepaged calls this for normal pmd
987 */
988 pmd = *pmdp;
989 pmd_clear(pmdp);
Benjamin Herrenschmidt424de9c2017-07-19 14:49:06 +1000990
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +1000991 /*FIXME!! Verify whether we need this kick below */
Aneesh Kumar K.Vfa4531f2017-07-27 11:54:54 +0530992 serialize_against_pte_lookup(vma->vm_mm);
Benjamin Herrenschmidt424de9c2017-07-19 14:49:06 +1000993
994 radix__flush_tlb_collapsed_pmd(vma->vm_mm, address);
995
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +1000996 return pmd;
997}
998
999/*
1000 * For us pgtable_t is pte_t *. Inorder to save the deposisted
1001 * page table, we consider the allocated page table as a list
1002 * head. On withdraw we need to make sure we zero out the used
1003 * list_head memory area.
1004 */
1005void radix__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
1006 pgtable_t pgtable)
1007{
Christophe Leroy47d99942019-03-29 10:00:00 +00001008 struct list_head *lh = (struct list_head *) pgtable;
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +10001009
Christophe Leroy47d99942019-03-29 10:00:00 +00001010 assert_spin_locked(pmd_lockptr(mm, pmdp));
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +10001011
Christophe Leroy47d99942019-03-29 10:00:00 +00001012 /* FIFO */
1013 if (!pmd_huge_pte(mm, pmdp))
1014 INIT_LIST_HEAD(lh);
1015 else
1016 list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
1017 pmd_huge_pte(mm, pmdp) = pgtable;
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +10001018}
1019
1020pgtable_t radix__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
1021{
Christophe Leroy47d99942019-03-29 10:00:00 +00001022 pte_t *ptep;
1023 pgtable_t pgtable;
1024 struct list_head *lh;
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +10001025
Christophe Leroy47d99942019-03-29 10:00:00 +00001026 assert_spin_locked(pmd_lockptr(mm, pmdp));
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +10001027
Christophe Leroy47d99942019-03-29 10:00:00 +00001028 /* FIFO */
1029 pgtable = pmd_huge_pte(mm, pmdp);
1030 lh = (struct list_head *) pgtable;
1031 if (list_empty(lh))
1032 pmd_huge_pte(mm, pmdp) = NULL;
1033 else {
1034 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
1035 list_del(lh);
1036 }
1037 ptep = (pte_t *) pgtable;
1038 *ptep = __pte(0);
1039 ptep++;
1040 *ptep = __pte(0);
1041 return pgtable;
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +10001042}
1043
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +10001044pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm,
Christophe Leroy47d99942019-03-29 10:00:00 +00001045 unsigned long addr, pmd_t *pmdp)
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +10001046{
1047 pmd_t old_pmd;
1048 unsigned long old;
1049
1050 old = radix__pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0);
1051 old_pmd = __pmd(old);
1052 /*
Aneesh Kumar K.Vfa4531f2017-07-27 11:54:54 +05301053 * Serialize against find_current_mm_pte which does lock-less
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +10001054 * lookup in page tables with local interrupts disabled. For huge pages
1055 * it casts pmd_t to pte_t. Since format of pte_t is different from
1056 * pmd_t we want to prevent transit from pmd pointing to page table
1057 * to pmd pointing to huge page (and back) while interrupts are disabled.
1058 * We clear pmd to possibly replace it with page table pointer in
1059 * different code paths. So make sure we wait for the parallel
Aneesh Kumar K.Vfa4531f2017-07-27 11:54:54 +05301060 * find_current_mm_pte to finish.
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +10001061 */
Aneesh Kumar K.Vfa4531f2017-07-27 11:54:54 +05301062 serialize_against_pte_lookup(mm);
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +10001063 return old_pmd;
1064}
1065
1066int radix__has_transparent_hugepage(void)
1067{
1068 /* For radix 2M at PMD level means thp */
1069 if (mmu_psize_defs[MMU_PAGE_2M].shift == PMD_SHIFT)
1070 return 1;
1071 return 0;
1072}
1073#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
Aneesh Kumar K.V044003b2018-05-29 19:58:39 +05301074
Aneesh Kumar K.Ve4c11122018-05-29 19:58:40 +05301075void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep,
1076 pte_t entry, unsigned long address, int psize)
Aneesh Kumar K.V044003b2018-05-29 19:58:39 +05301077{
Aneesh Kumar K.Ve4c11122018-05-29 19:58:40 +05301078 struct mm_struct *mm = vma->vm_mm;
Aneesh Kumar K.V044003b2018-05-29 19:58:39 +05301079 unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED |
1080 _PAGE_RW | _PAGE_EXEC);
Aneesh Kumar K.Vf08d08f2018-08-22 22:46:05 +05301081
1082 unsigned long change = pte_val(entry) ^ pte_val(*ptep);
Aneesh Kumar K.Vbd5050e2018-05-29 19:58:41 +05301083 /*
1084 * To avoid NMMU hang while relaxing access, we need mark
1085 * the pte invalid in between.
1086 */
Aneesh Kumar K.Vf08d08f2018-08-22 22:46:05 +05301087 if ((change & _PAGE_RW) && atomic_read(&mm->context.copros) > 0) {
Aneesh Kumar K.V044003b2018-05-29 19:58:39 +05301088 unsigned long old_pte, new_pte;
1089
Aneesh Kumar K.Vf08d08f2018-08-22 22:46:05 +05301090 old_pte = __radix_pte_update(ptep, _PAGE_PRESENT, _PAGE_INVALID);
Aneesh Kumar K.V044003b2018-05-29 19:58:39 +05301091 /*
1092 * new value of pte
1093 */
1094 new_pte = old_pte | set;
Aneesh Kumar K.Vbd5050e2018-05-29 19:58:41 +05301095 radix__flush_tlb_page_psize(mm, address, psize);
Aneesh Kumar K.Vf08d08f2018-08-22 22:46:05 +05301096 __radix_pte_update(ptep, _PAGE_INVALID, new_pte);
Aneesh Kumar K.Vbd5050e2018-05-29 19:58:41 +05301097 } else {
Aneesh Kumar K.V044003b2018-05-29 19:58:39 +05301098 __radix_pte_update(ptep, 0, set);
Nicholas Piggine5f7cb52018-06-01 20:01:15 +10001099 /*
1100 * Book3S does not require a TLB flush when relaxing access
1101 * restrictions when the address space is not attached to a
1102 * NMMU, because the core MMU will reload the pte after taking
1103 * an access fault, which is defined by the architectue.
1104 */
Aneesh Kumar K.Vbd5050e2018-05-29 19:58:41 +05301105 }
Nicholas Pigginf1cb8f92018-06-01 20:01:19 +10001106 /* See ptesync comment in radix__set_pte_at */
Aneesh Kumar K.V044003b2018-05-29 19:58:39 +05301107}
Aneesh Kumar K.V5b323362019-03-05 15:46:33 -08001108
1109void radix__ptep_modify_prot_commit(struct vm_area_struct *vma,
1110 unsigned long addr, pte_t *ptep,
1111 pte_t old_pte, pte_t pte)
1112{
1113 struct mm_struct *mm = vma->vm_mm;
1114
1115 /*
1116 * To avoid NMMU hang while relaxing access we need to flush the tlb before
1117 * we set the new value. We need to do this only for radix, because hash
1118 * translation does flush when updating the linux pte.
1119 */
1120 if (is_pte_rw_upgrade(pte_val(old_pte), pte_val(pte)) &&
1121 (atomic_read(&mm->context.copros) > 0))
1122 radix__flush_tlb_page(vma, addr);
1123
1124 set_pte_at(mm, addr, ptep, pte);
1125}
Nicholas Piggind38153f2019-06-10 13:08:17 +10001126
Nicholas Piggind909f912019-06-10 13:08:18 +10001127int __init arch_ioremap_pud_supported(void)
1128{
1129 /* HPT does not cope with large pages in the vmalloc area */
1130 return radix_enabled();
1131}
1132
1133int __init arch_ioremap_pmd_supported(void)
1134{
1135 return radix_enabled();
1136}
1137
1138int p4d_free_pud_page(p4d_t *p4d, unsigned long addr)
1139{
1140 return 0;
1141}
1142
1143int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
1144{
1145 pte_t *ptep = (pte_t *)pud;
1146 pte_t new_pud = pfn_pte(__phys_to_pfn(addr), prot);
1147
1148 if (!radix_enabled())
1149 return 0;
1150
1151 set_pte_at(&init_mm, 0 /* radix unused */, ptep, new_pud);
1152
1153 return 1;
1154}
1155
1156int pud_clear_huge(pud_t *pud)
1157{
1158 if (pud_huge(*pud)) {
1159 pud_clear(pud);
1160 return 1;
1161 }
1162
1163 return 0;
1164}
1165
1166int pud_free_pmd_page(pud_t *pud, unsigned long addr)
1167{
1168 pmd_t *pmd;
1169 int i;
1170
1171 pmd = (pmd_t *)pud_page_vaddr(*pud);
1172 pud_clear(pud);
1173
1174 flush_tlb_kernel_range(addr, addr + PUD_SIZE);
1175
1176 for (i = 0; i < PTRS_PER_PMD; i++) {
1177 if (!pmd_none(pmd[i])) {
1178 pte_t *pte;
1179 pte = (pte_t *)pmd_page_vaddr(pmd[i]);
1180
1181 pte_free_kernel(&init_mm, pte);
1182 }
1183 }
1184
1185 pmd_free(&init_mm, pmd);
1186
1187 return 1;
1188}
1189
1190int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
1191{
1192 pte_t *ptep = (pte_t *)pmd;
1193 pte_t new_pmd = pfn_pte(__phys_to_pfn(addr), prot);
1194
1195 if (!radix_enabled())
1196 return 0;
1197
1198 set_pte_at(&init_mm, 0 /* radix unused */, ptep, new_pmd);
1199
1200 return 1;
1201}
1202
1203int pmd_clear_huge(pmd_t *pmd)
1204{
1205 if (pmd_huge(*pmd)) {
1206 pmd_clear(pmd);
1207 return 1;
1208 }
1209
1210 return 0;
1211}
1212
1213int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
1214{
1215 pte_t *pte;
1216
1217 pte = (pte_t *)pmd_page_vaddr(*pmd);
1218 pmd_clear(pmd);
1219
1220 flush_tlb_kernel_range(addr, addr + PMD_SIZE);
1221
1222 pte_free_kernel(&init_mm, pte);
1223
1224 return 1;
1225}
1226
Nicholas Piggind38153f2019-06-10 13:08:17 +10001227int radix__ioremap_range(unsigned long ea, phys_addr_t pa, unsigned long size,
1228 pgprot_t prot, int nid)
1229{
1230 if (likely(slab_is_available())) {
1231 int err = ioremap_page_range(ea, ea + size, pa, prot);
1232 if (err)
1233 unmap_kernel_range(ea, size);
1234 return err;
1235 } else {
1236 unsigned long i;
1237
1238 for (i = 0; i < size; i += PAGE_SIZE) {
1239 int err = map_kernel_page(ea + i, pa + i, prot);
1240 if (WARN_ON_ONCE(err)) /* Should clean up */
1241 return err;
1242 }
1243 return 0;
1244 }
1245}