blob: 96e07d1f673d73667e01e98d8f916d72fae336fe [file] [log] [blame]
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +10001/*
2 * Page table handling routines for radix page table.
3 *
4 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
Michael Ellermanbd350f72017-08-30 17:41:29 +100011
12#define pr_fmt(fmt) "radix-mmu: " fmt
13
14#include <linux/kernel.h>
Ingo Molnar589ee622017-02-04 00:16:44 +010015#include <linux/sched/mm.h>
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100016#include <linux/memblock.h>
17#include <linux/of_fdt.h>
Balbir Singh7614ff32017-06-29 03:04:09 +100018#include <linux/mm.h>
Michael Ellerman6deb6b42017-08-30 17:41:17 +100019#include <linux/string_helpers.h>
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100020
21#include <asm/pgtable.h>
22#include <asm/pgalloc.h>
Nicholas Piggineeb715c2018-02-07 11:20:02 +100023#include <asm/mmu_context.h>
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100024#include <asm/dma.h>
25#include <asm/machdep.h>
26#include <asm/mmu.h>
27#include <asm/firmware.h>
Alistair Popple1d0761d2016-12-14 13:36:51 +110028#include <asm/powernv.h>
Michael Ellerman9abcc982017-06-06 15:48:57 +100029#include <asm/sections.h>
Balbir Singh04284912017-04-11 15:23:25 +100030#include <asm/trace.h>
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100031
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +100032#include <trace/events/thp.h>
33
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +100034unsigned int mmu_pid_bits;
35unsigned int mmu_base_pid;
36
Aneesh Kumar K.V83209bc2016-07-13 15:05:28 +053037static int native_register_process_table(unsigned long base, unsigned long pg_sz,
38 unsigned long table_size)
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100039{
Suraj Jitindar Singh7cd2a862017-08-03 14:15:51 +100040 unsigned long patb0, patb1;
Aneesh Kumar K.V83209bc2016-07-13 15:05:28 +053041
Suraj Jitindar Singh7cd2a862017-08-03 14:15:51 +100042 patb0 = be64_to_cpu(partition_tb[0].patb0);
43 patb1 = base | table_size | PATB_GR;
44
45 mmu_partition_table_set_entry(0, patb0, patb1);
46
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100047 return 0;
48}
49
50static __ref void *early_alloc_pgtable(unsigned long size)
51{
52 void *pt;
53
54 pt = __va(memblock_alloc_base(size, size, MEMBLOCK_ALLOC_ANYWHERE));
55 memset(pt, 0, size);
56
57 return pt;
58}
59
60int radix__map_kernel_page(unsigned long ea, unsigned long pa,
61 pgprot_t flags,
62 unsigned int map_page_size)
63{
64 pgd_t *pgdp;
65 pud_t *pudp;
66 pmd_t *pmdp;
67 pte_t *ptep;
68 /*
69 * Make sure task size is correct as per the max adddr
70 */
71 BUILD_BUG_ON(TASK_SIZE_USER64 > RADIX_PGTABLE_RANGE);
72 if (slab_is_available()) {
73 pgdp = pgd_offset_k(ea);
74 pudp = pud_alloc(&init_mm, pgdp, ea);
75 if (!pudp)
76 return -ENOMEM;
77 if (map_page_size == PUD_SIZE) {
78 ptep = (pte_t *)pudp;
79 goto set_the_pte;
80 }
81 pmdp = pmd_alloc(&init_mm, pudp, ea);
82 if (!pmdp)
83 return -ENOMEM;
84 if (map_page_size == PMD_SIZE) {
Reza Arbaba0615a12017-01-25 09:54:33 -060085 ptep = pmdp_ptep(pmdp);
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100086 goto set_the_pte;
87 }
88 ptep = pte_alloc_kernel(pmdp, ea);
89 if (!ptep)
90 return -ENOMEM;
91 } else {
92 pgdp = pgd_offset_k(ea);
93 if (pgd_none(*pgdp)) {
94 pudp = early_alloc_pgtable(PUD_TABLE_SIZE);
95 BUG_ON(pudp == NULL);
96 pgd_populate(&init_mm, pgdp, pudp);
97 }
98 pudp = pud_offset(pgdp, ea);
99 if (map_page_size == PUD_SIZE) {
100 ptep = (pte_t *)pudp;
101 goto set_the_pte;
102 }
103 if (pud_none(*pudp)) {
104 pmdp = early_alloc_pgtable(PMD_TABLE_SIZE);
105 BUG_ON(pmdp == NULL);
106 pud_populate(&init_mm, pudp, pmdp);
107 }
108 pmdp = pmd_offset(pudp, ea);
109 if (map_page_size == PMD_SIZE) {
Reza Arbaba0615a12017-01-25 09:54:33 -0600110 ptep = pmdp_ptep(pmdp);
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000111 goto set_the_pte;
112 }
113 if (!pmd_present(*pmdp)) {
114 ptep = early_alloc_pgtable(PAGE_SIZE);
115 BUG_ON(ptep == NULL);
116 pmd_populate_kernel(&init_mm, pmdp, ptep);
117 }
118 ptep = pte_offset_kernel(pmdp, ea);
119 }
120
121set_the_pte:
122 set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, flags));
123 smp_wmb();
124 return 0;
125}
126
Balbir Singh7614ff32017-06-29 03:04:09 +1000127#ifdef CONFIG_STRICT_KERNEL_RWX
Michael Ellermanb134bd92017-07-14 16:51:21 +1000128void radix__change_memory_range(unsigned long start, unsigned long end,
129 unsigned long clear)
Balbir Singh7614ff32017-06-29 03:04:09 +1000130{
Balbir Singh7614ff32017-06-29 03:04:09 +1000131 unsigned long idx;
132 pgd_t *pgdp;
133 pud_t *pudp;
134 pmd_t *pmdp;
135 pte_t *ptep;
136
137 start = ALIGN_DOWN(start, PAGE_SIZE);
138 end = PAGE_ALIGN(end); // aligns up
139
Michael Ellermanb134bd92017-07-14 16:51:21 +1000140 pr_debug("Changing flags on range %lx-%lx removing 0x%lx\n",
141 start, end, clear);
Balbir Singh7614ff32017-06-29 03:04:09 +1000142
143 for (idx = start; idx < end; idx += PAGE_SIZE) {
144 pgdp = pgd_offset_k(idx);
145 pudp = pud_alloc(&init_mm, pgdp, idx);
146 if (!pudp)
147 continue;
148 if (pud_huge(*pudp)) {
149 ptep = (pte_t *)pudp;
150 goto update_the_pte;
151 }
152 pmdp = pmd_alloc(&init_mm, pudp, idx);
153 if (!pmdp)
154 continue;
155 if (pmd_huge(*pmdp)) {
156 ptep = pmdp_ptep(pmdp);
157 goto update_the_pte;
158 }
159 ptep = pte_alloc_kernel(pmdp, idx);
160 if (!ptep)
161 continue;
162update_the_pte:
Michael Ellermanb134bd92017-07-14 16:51:21 +1000163 radix__pte_update(&init_mm, idx, ptep, clear, 0, 0);
Balbir Singh7614ff32017-06-29 03:04:09 +1000164 }
165
166 radix__flush_tlb_kernel_range(start, end);
167}
Michael Ellermanb134bd92017-07-14 16:51:21 +1000168
169void radix__mark_rodata_ro(void)
170{
171 unsigned long start, end;
172
Balbir Singhf79ad502017-10-16 16:21:35 +1100173 /*
174 * mark_rodata_ro() will mark itself as !writable at some point.
175 * Due to DD1 workaround in radix__pte_update(), we'll end up with
176 * an invalid pte and the system will crash quite severly.
177 */
178 if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
179 pr_warn("Warning: Unable to mark rodata read only on P9 DD1\n");
180 return;
181 }
182
Michael Ellermanb134bd92017-07-14 16:51:21 +1000183 start = (unsigned long)_stext;
184 end = (unsigned long)__init_begin;
185
186 radix__change_memory_range(start, end, _PAGE_WRITE);
187}
Michael Ellerman029d9252017-07-14 16:51:23 +1000188
189void radix__mark_initmem_nx(void)
190{
191 unsigned long start = (unsigned long)__init_begin;
192 unsigned long end = (unsigned long)__init_end;
193
194 radix__change_memory_range(start, end, _PAGE_EXEC);
195}
Balbir Singh7614ff32017-06-29 03:04:09 +1000196#endif /* CONFIG_STRICT_KERNEL_RWX */
197
Reza Arbabb5200ec2017-01-16 13:07:43 -0600198static inline void __meminit print_mapping(unsigned long start,
199 unsigned long end,
200 unsigned long size)
201{
Michael Ellerman6deb6b42017-08-30 17:41:17 +1000202 char buf[10];
203
Reza Arbabb5200ec2017-01-16 13:07:43 -0600204 if (end <= start)
205 return;
206
Michael Ellerman6deb6b42017-08-30 17:41:17 +1000207 string_get_size(size, 1, STRING_UNITS_2, buf, sizeof(buf));
208
209 pr_info("Mapped 0x%016lx-0x%016lx with %s pages\n", start, end, buf);
Reza Arbabb5200ec2017-01-16 13:07:43 -0600210}
211
212static int __meminit create_physical_mapping(unsigned long start,
213 unsigned long end)
214{
Michael Ellerman9abcc982017-06-06 15:48:57 +1000215 unsigned long vaddr, addr, mapping_size = 0;
216 pgprot_t prot;
Balbir Singh7614ff32017-06-29 03:04:09 +1000217 unsigned long max_mapping_size;
218#ifdef CONFIG_STRICT_KERNEL_RWX
219 int split_text_mapping = 1;
220#else
221 int split_text_mapping = 0;
222#endif
Reza Arbabb5200ec2017-01-16 13:07:43 -0600223
224 start = _ALIGN_UP(start, PAGE_SIZE);
225 for (addr = start; addr < end; addr += mapping_size) {
226 unsigned long gap, previous_size;
227 int rc;
228
229 gap = end - addr;
230 previous_size = mapping_size;
Balbir Singh7614ff32017-06-29 03:04:09 +1000231 max_mapping_size = PUD_SIZE;
Reza Arbabb5200ec2017-01-16 13:07:43 -0600232
Balbir Singh7614ff32017-06-29 03:04:09 +1000233retry:
Reza Arbabb5200ec2017-01-16 13:07:43 -0600234 if (IS_ALIGNED(addr, PUD_SIZE) && gap >= PUD_SIZE &&
Balbir Singh7614ff32017-06-29 03:04:09 +1000235 mmu_psize_defs[MMU_PAGE_1G].shift &&
236 PUD_SIZE <= max_mapping_size)
Reza Arbabb5200ec2017-01-16 13:07:43 -0600237 mapping_size = PUD_SIZE;
238 else if (IS_ALIGNED(addr, PMD_SIZE) && gap >= PMD_SIZE &&
239 mmu_psize_defs[MMU_PAGE_2M].shift)
240 mapping_size = PMD_SIZE;
241 else
242 mapping_size = PAGE_SIZE;
243
Balbir Singh7614ff32017-06-29 03:04:09 +1000244 if (split_text_mapping && (mapping_size == PUD_SIZE) &&
245 (addr <= __pa_symbol(__init_begin)) &&
246 (addr + mapping_size) >= __pa_symbol(_stext)) {
247 max_mapping_size = PMD_SIZE;
248 goto retry;
249 }
250
251 if (split_text_mapping && (mapping_size == PMD_SIZE) &&
252 (addr <= __pa_symbol(__init_begin)) &&
253 (addr + mapping_size) >= __pa_symbol(_stext))
254 mapping_size = PAGE_SIZE;
255
Reza Arbabb5200ec2017-01-16 13:07:43 -0600256 if (mapping_size != previous_size) {
257 print_mapping(start, addr, previous_size);
258 start = addr;
259 }
260
Michael Ellerman9abcc982017-06-06 15:48:57 +1000261 vaddr = (unsigned long)__va(addr);
262
Balbir Singh7f6d4982017-06-29 03:04:10 +1000263 if (overlaps_kernel_text(vaddr, vaddr + mapping_size) ||
264 overlaps_interrupt_vector_text(vaddr, vaddr + mapping_size))
Michael Ellerman9abcc982017-06-06 15:48:57 +1000265 prot = PAGE_KERNEL_X;
266 else
267 prot = PAGE_KERNEL;
268
269 rc = radix__map_kernel_page(vaddr, addr, prot, mapping_size);
Reza Arbabb5200ec2017-01-16 13:07:43 -0600270 if (rc)
271 return rc;
272 }
273
274 print_mapping(start, addr, mapping_size);
275 return 0;
276}
277
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000278static void __init radix_init_pgtable(void)
279{
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000280 unsigned long rts_field;
281 struct memblock_region *reg;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000282
283 /* We don't support slb for radix */
284 mmu_slb_size = 0;
285 /*
286 * Create the linear mapping, using standard page size for now
287 */
Reza Arbabb5200ec2017-01-16 13:07:43 -0600288 for_each_memblock(memory, reg)
289 WARN_ON(create_physical_mapping(reg->base,
290 reg->base + reg->size));
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +1000291
292 /* Find out how many PID bits are supported */
293 if (cpu_has_feature(CPU_FTR_HVMODE)) {
294 if (!mmu_pid_bits)
295 mmu_pid_bits = 20;
296#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
297 /*
298 * When KVM is possible, we only use the top half of the
299 * PID space to avoid collisions between host and guest PIDs
300 * which can cause problems due to prefetch when exiting the
301 * guest with AIL=3
302 */
303 mmu_base_pid = 1 << (mmu_pid_bits - 1);
304#else
305 mmu_base_pid = 1;
306#endif
307 } else {
308 /* The guest uses the bottom half of the PID space */
309 if (!mmu_pid_bits)
310 mmu_pid_bits = 19;
311 mmu_base_pid = 1;
312 }
313
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000314 /*
315 * Allocate Partition table and process table for the
316 * host.
317 */
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +1000318 BUG_ON(PRTB_SIZE_SHIFT > 36);
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000319 process_tb = early_alloc_pgtable(1UL << PRTB_SIZE_SHIFT);
320 /*
321 * Fill in the process table.
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000322 */
Aneesh Kumar K.Vb23d9c52016-06-17 11:40:36 +0530323 rts_field = radix__get_tree_size();
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000324 process_tb->prtb0 = cpu_to_be64(rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE);
325 /*
326 * Fill in the partition table. We are suppose to use effective address
327 * of process table here. But our linear mapping also enable us to use
328 * physical address here.
329 */
Michael Ellermaneea81482016-08-04 15:32:06 +1000330 register_process_table(__pa(process_tb), 0, PRTB_SIZE_SHIFT - 12);
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000331 pr_info("Process table %p and radix root for kernel: %p\n", process_tb, init_mm.pgd);
Paul Mackerras7a70d722017-02-27 14:32:41 +1100332 asm volatile("ptesync" : : : "memory");
333 asm volatile(PPC_TLBIE_5(%0,%1,2,1,1) : :
334 "r" (TLBIEL_INVAL_SET_LPID), "r" (0));
335 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
Balbir Singh04284912017-04-11 15:23:25 +1000336 trace_tlbie(0, 0, TLBIEL_INVAL_SET_LPID, 0, 2, 1, 1);
Nicholas Piggineeb715c2018-02-07 11:20:02 +1000337
338 /*
339 * The init_mm context is given the first available (non-zero) PID,
340 * which is the "guard PID" and contains no page table. PIDR should
341 * never be set to zero because that duplicates the kernel address
342 * space at the 0x0... offset (quadrant 0)!
343 *
344 * An arbitrary PID that may later be allocated by the PID allocator
345 * for userspace processes must not be used either, because that
346 * would cause stale user mappings for that PID on CPUs outside of
347 * the TLB invalidation scheme (because it won't be in mm_cpumask).
348 *
349 * So permanently carve out one PID for the purpose of a guard PID.
350 */
351 init_mm.context.id = mmu_base_pid;
352 mmu_base_pid++;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000353}
354
355static void __init radix_init_partition_table(void)
356{
Paul Mackerras9d661952016-11-21 16:00:58 +1100357 unsigned long rts_field, dw0;
Aneesh Kumar K.Vb23d9c52016-06-17 11:40:36 +0530358
Paul Mackerras9d661952016-11-21 16:00:58 +1100359 mmu_partition_table_init();
Aneesh Kumar K.Vb23d9c52016-06-17 11:40:36 +0530360 rts_field = radix__get_tree_size();
Paul Mackerras9d661952016-11-21 16:00:58 +1100361 dw0 = rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE | PATB_HR;
362 mmu_partition_table_set_entry(0, dw0, 0);
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000363
Aneesh Kumar K.V56547412016-07-13 15:05:25 +0530364 pr_info("Initializing Radix MMU\n");
365 pr_info("Partition table %p\n", partition_tb);
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000366}
367
368void __init radix_init_native(void)
369{
Michael Ellermaneea81482016-08-04 15:32:06 +1000370 register_process_table = native_register_process_table;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000371}
372
373static int __init get_idx_from_shift(unsigned int shift)
374{
375 int idx = -1;
376
377 switch (shift) {
378 case 0xc:
379 idx = MMU_PAGE_4K;
380 break;
381 case 0x10:
382 idx = MMU_PAGE_64K;
383 break;
384 case 0x15:
385 idx = MMU_PAGE_2M;
386 break;
387 case 0x1e:
388 idx = MMU_PAGE_1G;
389 break;
390 }
391 return idx;
392}
393
394static int __init radix_dt_scan_page_sizes(unsigned long node,
395 const char *uname, int depth,
396 void *data)
397{
398 int size = 0;
399 int shift, idx;
400 unsigned int ap;
401 const __be32 *prop;
402 const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
403
404 /* We are scanning "cpu" nodes only */
405 if (type == NULL || strcmp(type, "cpu") != 0)
406 return 0;
407
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +1000408 /* Find MMU PID size */
409 prop = of_get_flat_dt_prop(node, "ibm,mmu-pid-bits", &size);
410 if (prop && size == 4)
411 mmu_pid_bits = be32_to_cpup(prop);
412
413 /* Grab page size encodings */
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000414 prop = of_get_flat_dt_prop(node, "ibm,processor-radix-AP-encodings", &size);
415 if (!prop)
416 return 0;
417
418 pr_info("Page sizes from device-tree:\n");
419 for (; size >= 4; size -= 4, ++prop) {
420
421 struct mmu_psize_def *def;
422
423 /* top 3 bit is AP encoding */
424 shift = be32_to_cpu(prop[0]) & ~(0xe << 28);
425 ap = be32_to_cpu(prop[0]) >> 29;
Balbir Singhac8d3812016-11-05 15:24:22 +1100426 pr_info("Page size shift = %d AP=0x%x\n", shift, ap);
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000427
428 idx = get_idx_from_shift(shift);
429 if (idx < 0)
430 continue;
431
432 def = &mmu_psize_defs[idx];
433 def->shift = shift;
434 def->ap = ap;
435 }
436
437 /* needed ? */
438 cur_cpu_spec->mmu_features &= ~MMU_FTR_NO_SLBIE_B;
439 return 1;
440}
441
Michael Ellerman2537b092016-07-26 21:55:27 +1000442void __init radix__early_init_devtree(void)
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000443{
444 int rc;
445
446 /*
447 * Try to find the available page sizes in the device-tree
448 */
449 rc = of_scan_flat_dt(radix_dt_scan_page_sizes, NULL);
450 if (rc != 0) /* Found */
451 goto found;
452 /*
453 * let's assume we have page 4k and 64k support
454 */
455 mmu_psize_defs[MMU_PAGE_4K].shift = 12;
456 mmu_psize_defs[MMU_PAGE_4K].ap = 0x0;
457
458 mmu_psize_defs[MMU_PAGE_64K].shift = 16;
459 mmu_psize_defs[MMU_PAGE_64K].ap = 0x5;
460found:
461#ifdef CONFIG_SPARSEMEM_VMEMMAP
462 if (mmu_psize_defs[MMU_PAGE_2M].shift) {
463 /*
464 * map vmemmap using 2M if available
465 */
466 mmu_vmemmap_psize = MMU_PAGE_2M;
467 }
468#endif /* CONFIG_SPARSEMEM_VMEMMAP */
469 return;
470}
471
Aneesh Kumar K.Vad410672016-08-24 15:03:39 +0530472static void update_hid_for_radix(void)
473{
474 unsigned long hid0;
475 unsigned long rb = 3UL << PPC_BITLSHIFT(53); /* IS = 3 */
476
477 asm volatile("ptesync": : :"memory");
478 /* prs = 0, ric = 2, rs = 0, r = 1 is = 3 */
479 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
480 : : "r"(rb), "i"(1), "i"(0), "i"(2), "r"(0) : "memory");
481 /* prs = 1, ric = 2, rs = 0, r = 1 is = 3 */
482 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
483 : : "r"(rb), "i"(1), "i"(1), "i"(2), "r"(0) : "memory");
484 asm volatile("eieio; tlbsync; ptesync; isync; slbia": : :"memory");
Balbir Singh04284912017-04-11 15:23:25 +1000485 trace_tlbie(0, 0, rb, 0, 2, 0, 1);
486 trace_tlbie(0, 0, rb, 0, 2, 1, 1);
487
Aneesh Kumar K.Vad410672016-08-24 15:03:39 +0530488 /*
489 * now switch the HID
490 */
491 hid0 = mfspr(SPRN_HID0);
492 hid0 |= HID0_POWER9_RADIX;
493 mtspr(SPRN_HID0, hid0);
494 asm volatile("isync": : :"memory");
495
496 /* Wait for it to happen */
497 while (!(mfspr(SPRN_HID0) & HID0_POWER9_RADIX))
498 cpu_relax();
499}
500
Balbir Singhee97b6b2016-11-15 17:56:14 +1100501static void radix_init_amor(void)
502{
503 /*
504 * In HV mode, we init AMOR (Authority Mask Override Register) so that
505 * the hypervisor and guest can setup IAMR (Instruction Authority Mask
506 * Register), enable key 0 and set it to 1.
507 *
508 * AMOR = 0b1100 .... 0000 (Mask for key 0 is 11)
509 */
510 mtspr(SPRN_AMOR, (3ul << 62));
511}
512
Balbir Singh3b10d002016-11-15 17:56:16 +1100513static void radix_init_iamr(void)
514{
515 unsigned long iamr;
516
517 /*
518 * The IAMR should set to 0 on DD1.
519 */
520 if (cpu_has_feature(CPU_FTR_POWER9_DD1))
521 iamr = 0;
522 else
523 iamr = (1ul << 62);
524
525 /*
526 * Radix always uses key0 of the IAMR to determine if an access is
527 * allowed. We set bit 0 (IBM bit 1) of key0, to prevent instruction
528 * fetch.
529 */
530 mtspr(SPRN_IAMR, iamr);
531}
532
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000533void __init radix__early_init_mmu(void)
534{
535 unsigned long lpcr;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000536
537#ifdef CONFIG_PPC_64K_PAGES
538 /* PAGE_SIZE mappings */
539 mmu_virtual_psize = MMU_PAGE_64K;
540#else
541 mmu_virtual_psize = MMU_PAGE_4K;
542#endif
543
544#ifdef CONFIG_SPARSEMEM_VMEMMAP
545 /* vmemmap mapping */
546 mmu_vmemmap_psize = mmu_virtual_psize;
547#endif
548 /*
549 * initialize page table size
550 */
551 __pte_index_size = RADIX_PTE_INDEX_SIZE;
552 __pmd_index_size = RADIX_PMD_INDEX_SIZE;
553 __pud_index_size = RADIX_PUD_INDEX_SIZE;
554 __pgd_index_size = RADIX_PGD_INDEX_SIZE;
555 __pmd_cache_index = RADIX_PMD_INDEX_SIZE;
556 __pte_table_size = RADIX_PTE_TABLE_SIZE;
557 __pmd_table_size = RADIX_PMD_TABLE_SIZE;
558 __pud_table_size = RADIX_PUD_TABLE_SIZE;
559 __pgd_table_size = RADIX_PGD_TABLE_SIZE;
560
Aneesh Kumar K.Va2f41eb2016-04-29 23:26:19 +1000561 __pmd_val_bits = RADIX_PMD_VAL_BITS;
562 __pud_val_bits = RADIX_PUD_VAL_BITS;
563 __pgd_val_bits = RADIX_PGD_VAL_BITS;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000564
Aneesh Kumar K.Vd6a99962016-04-29 23:26:21 +1000565 __kernel_virt_start = RADIX_KERN_VIRT_START;
566 __kernel_virt_size = RADIX_KERN_VIRT_SIZE;
567 __vmalloc_start = RADIX_VMALLOC_START;
568 __vmalloc_end = RADIX_VMALLOC_END;
Michael Ellerman63ee9b22017-08-01 20:29:22 +1000569 __kernel_io_start = RADIX_KERN_IO_START;
Aneesh Kumar K.Vd6a99962016-04-29 23:26:21 +1000570 vmemmap = (struct page *)RADIX_VMEMMAP_BASE;
571 ioremap_bot = IOREMAP_BASE;
Darren Stevensbfa37082016-06-29 21:06:28 +0100572
573#ifdef CONFIG_PCI
574 pci_io_base = ISA_IO_BASE;
575#endif
576
Aneesh Kumar K.V5ed7ecd2016-04-29 23:26:23 +1000577 /*
578 * For now radix also use the same frag size
579 */
580 __pte_frag_nr = H_PTE_FRAG_NR;
581 __pte_frag_size_shift = H_PTE_FRAG_SIZE_SHIFT;
Aneesh Kumar K.Vd6a99962016-04-29 23:26:21 +1000582
Aneesh Kumar K.Vd6c88602016-05-31 11:56:29 +0530583 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
Benjamin Herrenschmidt166dd7d2016-07-05 15:03:51 +1000584 radix_init_native();
Aneesh Kumar K.Vad410672016-08-24 15:03:39 +0530585 if (cpu_has_feature(CPU_FTR_POWER9_DD1))
586 update_hid_for_radix();
Aneesh Kumar K.Vd6c88602016-05-31 11:56:29 +0530587 lpcr = mfspr(SPRN_LPCR);
Aneesh Kumar K.Vbf16cdf2016-07-13 15:05:21 +0530588 mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000589 radix_init_partition_table();
Balbir Singhee97b6b2016-11-15 17:56:14 +1100590 radix_init_amor();
Paul Mackerrascc3d2942017-01-30 21:21:36 +1100591 } else {
592 radix_init_pseries();
Aneesh Kumar K.Vd6c88602016-05-31 11:56:29 +0530593 }
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000594
Paul Mackerras9d661952016-11-21 16:00:58 +1100595 memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
596
Balbir Singh3b10d002016-11-15 17:56:16 +1100597 radix_init_iamr();
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000598 radix_init_pgtable();
Nicholas Piggineeb715c2018-02-07 11:20:02 +1000599 /* Switch to the guard PID before turning on MMU */
600 radix__switch_mmu_context(NULL, &init_mm);
Nicholas Piggind4748272017-12-24 01:15:50 +1000601 if (cpu_has_feature(CPU_FTR_HVMODE))
602 tlbiel_all();
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000603}
604
605void radix__early_init_mmu_secondary(void)
606{
607 unsigned long lpcr;
608 /*
Aneesh Kumar K.Vd6c88602016-05-31 11:56:29 +0530609 * update partition table control register and UPRT
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000610 */
Aneesh Kumar K.Vd6c88602016-05-31 11:56:29 +0530611 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
Aneesh Kumar K.Vcac4a182016-11-17 15:46:23 +0530612
613 if (cpu_has_feature(CPU_FTR_POWER9_DD1))
614 update_hid_for_radix();
615
Aneesh Kumar K.Vd6c88602016-05-31 11:56:29 +0530616 lpcr = mfspr(SPRN_LPCR);
Aneesh Kumar K.Vbf16cdf2016-07-13 15:05:21 +0530617 mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
Aneesh Kumar K.Vd6c88602016-05-31 11:56:29 +0530618
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000619 mtspr(SPRN_PTCR,
620 __pa(partition_tb) | (PATB_SIZE_SHIFT - 12));
Balbir Singhee97b6b2016-11-15 17:56:14 +1100621 radix_init_amor();
Aneesh Kumar K.Vd6c88602016-05-31 11:56:29 +0530622 }
Balbir Singh3b10d002016-11-15 17:56:16 +1100623 radix_init_iamr();
Nicholas Piggind4748272017-12-24 01:15:50 +1000624
Nicholas Piggineeb715c2018-02-07 11:20:02 +1000625 radix__switch_mmu_context(NULL, &init_mm);
Nicholas Piggind4748272017-12-24 01:15:50 +1000626 if (cpu_has_feature(CPU_FTR_HVMODE))
627 tlbiel_all();
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000628}
629
Benjamin Herrenschmidtfe036a02016-08-19 14:22:37 +0530630void radix__mmu_cleanup_all(void)
631{
632 unsigned long lpcr;
633
634 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
635 lpcr = mfspr(SPRN_LPCR);
636 mtspr(SPRN_LPCR, lpcr & ~LPCR_UPRT);
637 mtspr(SPRN_PTCR, 0);
Alistair Popple1d0761d2016-12-14 13:36:51 +1100638 powernv_set_nmmu_ptcr(0);
Benjamin Herrenschmidtfe036a02016-08-19 14:22:37 +0530639 radix__flush_tlb_all();
640 }
641}
642
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000643void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base,
644 phys_addr_t first_memblock_size)
645{
Aneesh Kumar K.V177ba7c2016-04-29 23:26:10 +1000646 /* We don't currently support the first MEMBLOCK not mapping 0
647 * physical on those processors
648 */
649 BUG_ON(first_memblock_base != 0);
Nicholas Piggin1513c332017-12-22 21:17:08 +1000650
Nicholas Piggin5eae82c2017-12-22 21:17:11 +1000651 /*
652 * Radix mode is not limited by RMA / VRMA addressing.
653 */
654 ppc64_rma_size = ULONG_MAX;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000655}
Aneesh Kumar K.Vd9225ad2016-04-29 23:26:00 +1000656
Reza Arbab6cc27342017-01-16 13:07:44 -0600657#ifdef CONFIG_MEMORY_HOTPLUG
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600658static void free_pte_table(pte_t *pte_start, pmd_t *pmd)
659{
660 pte_t *pte;
661 int i;
662
663 for (i = 0; i < PTRS_PER_PTE; i++) {
664 pte = pte_start + i;
665 if (!pte_none(*pte))
666 return;
667 }
668
669 pte_free_kernel(&init_mm, pte_start);
670 pmd_clear(pmd);
671}
672
673static void free_pmd_table(pmd_t *pmd_start, pud_t *pud)
674{
675 pmd_t *pmd;
676 int i;
677
678 for (i = 0; i < PTRS_PER_PMD; i++) {
679 pmd = pmd_start + i;
680 if (!pmd_none(*pmd))
681 return;
682 }
683
684 pmd_free(&init_mm, pmd_start);
685 pud_clear(pud);
686}
687
688static void remove_pte_table(pte_t *pte_start, unsigned long addr,
689 unsigned long end)
690{
691 unsigned long next;
692 pte_t *pte;
693
694 pte = pte_start + pte_index(addr);
695 for (; addr < end; addr = next, pte++) {
696 next = (addr + PAGE_SIZE) & PAGE_MASK;
697 if (next > end)
698 next = end;
699
700 if (!pte_present(*pte))
701 continue;
702
Reza Arbab0d0a4bc2017-01-16 13:07:46 -0600703 if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(next)) {
704 /*
705 * The vmemmap_free() and remove_section_mapping()
706 * codepaths call us with aligned addresses.
707 */
708 WARN_ONCE(1, "%s: unaligned range\n", __func__);
709 continue;
710 }
711
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600712 pte_clear(&init_mm, addr, pte);
713 }
714}
715
716static void remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
717 unsigned long end)
718{
719 unsigned long next;
720 pte_t *pte_base;
721 pmd_t *pmd;
722
723 pmd = pmd_start + pmd_index(addr);
724 for (; addr < end; addr = next, pmd++) {
725 next = pmd_addr_end(addr, end);
726
727 if (!pmd_present(*pmd))
728 continue;
729
730 if (pmd_huge(*pmd)) {
Reza Arbab0d0a4bc2017-01-16 13:07:46 -0600731 if (!IS_ALIGNED(addr, PMD_SIZE) ||
732 !IS_ALIGNED(next, PMD_SIZE)) {
733 WARN_ONCE(1, "%s: unaligned range\n", __func__);
734 continue;
735 }
736
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600737 pte_clear(&init_mm, addr, (pte_t *)pmd);
738 continue;
739 }
740
741 pte_base = (pte_t *)pmd_page_vaddr(*pmd);
742 remove_pte_table(pte_base, addr, next);
743 free_pte_table(pte_base, pmd);
744 }
745}
746
747static void remove_pud_table(pud_t *pud_start, unsigned long addr,
748 unsigned long end)
749{
750 unsigned long next;
751 pmd_t *pmd_base;
752 pud_t *pud;
753
754 pud = pud_start + pud_index(addr);
755 for (; addr < end; addr = next, pud++) {
756 next = pud_addr_end(addr, end);
757
758 if (!pud_present(*pud))
759 continue;
760
761 if (pud_huge(*pud)) {
Reza Arbab0d0a4bc2017-01-16 13:07:46 -0600762 if (!IS_ALIGNED(addr, PUD_SIZE) ||
763 !IS_ALIGNED(next, PUD_SIZE)) {
764 WARN_ONCE(1, "%s: unaligned range\n", __func__);
765 continue;
766 }
767
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600768 pte_clear(&init_mm, addr, (pte_t *)pud);
769 continue;
770 }
771
772 pmd_base = (pmd_t *)pud_page_vaddr(*pud);
773 remove_pmd_table(pmd_base, addr, next);
774 free_pmd_table(pmd_base, pud);
775 }
776}
777
778static void remove_pagetable(unsigned long start, unsigned long end)
779{
780 unsigned long addr, next;
781 pud_t *pud_base;
782 pgd_t *pgd;
783
784 spin_lock(&init_mm.page_table_lock);
785
786 for (addr = start; addr < end; addr = next) {
787 next = pgd_addr_end(addr, end);
788
789 pgd = pgd_offset_k(addr);
790 if (!pgd_present(*pgd))
791 continue;
792
793 if (pgd_huge(*pgd)) {
Reza Arbab0d0a4bc2017-01-16 13:07:46 -0600794 if (!IS_ALIGNED(addr, PGDIR_SIZE) ||
795 !IS_ALIGNED(next, PGDIR_SIZE)) {
796 WARN_ONCE(1, "%s: unaligned range\n", __func__);
797 continue;
798 }
799
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600800 pte_clear(&init_mm, addr, (pte_t *)pgd);
801 continue;
802 }
803
804 pud_base = (pud_t *)pgd_page_vaddr(*pgd);
805 remove_pud_table(pud_base, addr, next);
806 }
807
808 spin_unlock(&init_mm.page_table_lock);
809 radix__flush_tlb_kernel_range(start, end);
810}
811
Reza Arbab6cc27342017-01-16 13:07:44 -0600812int __ref radix__create_section_mapping(unsigned long start, unsigned long end)
813{
814 return create_physical_mapping(start, end);
815}
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600816
817int radix__remove_section_mapping(unsigned long start, unsigned long end)
818{
819 remove_pagetable(start, end);
820 return 0;
821}
Reza Arbab6cc27342017-01-16 13:07:44 -0600822#endif /* CONFIG_MEMORY_HOTPLUG */
823
Aneesh Kumar K.Vd9225ad2016-04-29 23:26:00 +1000824#ifdef CONFIG_SPARSEMEM_VMEMMAP
825int __meminit radix__vmemmap_create_mapping(unsigned long start,
826 unsigned long page_size,
827 unsigned long phys)
828{
829 /* Create a PTE encoding */
830 unsigned long flags = _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_KERNEL_RW;
831
832 BUG_ON(radix__map_kernel_page(start, phys, __pgprot(flags), page_size));
833 return 0;
834}
835
836#ifdef CONFIG_MEMORY_HOTPLUG
837void radix__vmemmap_remove_mapping(unsigned long start, unsigned long page_size)
838{
Reza Arbab0d0a4bc2017-01-16 13:07:46 -0600839 remove_pagetable(start, start + page_size);
Aneesh Kumar K.Vd9225ad2016-04-29 23:26:00 +1000840}
841#endif
842#endif
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +1000843
844#ifdef CONFIG_TRANSPARENT_HUGEPAGE
845
846unsigned long radix__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
847 pmd_t *pmdp, unsigned long clr,
848 unsigned long set)
849{
850 unsigned long old;
851
852#ifdef CONFIG_DEBUG_VM
Oliver O'Halloranebd31192017-06-28 11:32:34 +1000853 WARN_ON(!radix__pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +1000854 assert_spin_locked(&mm->page_table_lock);
855#endif
856
857 old = radix__pte_update(mm, addr, (pte_t *)pmdp, clr, set, 1);
858 trace_hugepage_update(addr, old, clr, set);
859
860 return old;
861}
862
863pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
864 pmd_t *pmdp)
865
866{
867 pmd_t pmd;
868
869 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
870 VM_BUG_ON(radix__pmd_trans_huge(*pmdp));
Oliver O'Halloranebd31192017-06-28 11:32:34 +1000871 VM_BUG_ON(pmd_devmap(*pmdp));
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +1000872 /*
873 * khugepaged calls this for normal pmd
874 */
875 pmd = *pmdp;
876 pmd_clear(pmdp);
Benjamin Herrenschmidt424de9c2017-07-19 14:49:06 +1000877
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +1000878 /*FIXME!! Verify whether we need this kick below */
Aneesh Kumar K.Vfa4531f2017-07-27 11:54:54 +0530879 serialize_against_pte_lookup(vma->vm_mm);
Benjamin Herrenschmidt424de9c2017-07-19 14:49:06 +1000880
881 radix__flush_tlb_collapsed_pmd(vma->vm_mm, address);
882
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +1000883 return pmd;
884}
885
886/*
887 * For us pgtable_t is pte_t *. Inorder to save the deposisted
888 * page table, we consider the allocated page table as a list
889 * head. On withdraw we need to make sure we zero out the used
890 * list_head memory area.
891 */
892void radix__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
893 pgtable_t pgtable)
894{
895 struct list_head *lh = (struct list_head *) pgtable;
896
897 assert_spin_locked(pmd_lockptr(mm, pmdp));
898
899 /* FIFO */
900 if (!pmd_huge_pte(mm, pmdp))
901 INIT_LIST_HEAD(lh);
902 else
903 list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
904 pmd_huge_pte(mm, pmdp) = pgtable;
905}
906
907pgtable_t radix__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
908{
909 pte_t *ptep;
910 pgtable_t pgtable;
911 struct list_head *lh;
912
913 assert_spin_locked(pmd_lockptr(mm, pmdp));
914
915 /* FIFO */
916 pgtable = pmd_huge_pte(mm, pmdp);
917 lh = (struct list_head *) pgtable;
918 if (list_empty(lh))
919 pmd_huge_pte(mm, pmdp) = NULL;
920 else {
921 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
922 list_del(lh);
923 }
924 ptep = (pte_t *) pgtable;
925 *ptep = __pte(0);
926 ptep++;
927 *ptep = __pte(0);
928 return pgtable;
929}
930
931
932pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm,
933 unsigned long addr, pmd_t *pmdp)
934{
935 pmd_t old_pmd;
936 unsigned long old;
937
938 old = radix__pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0);
939 old_pmd = __pmd(old);
940 /*
Aneesh Kumar K.Vfa4531f2017-07-27 11:54:54 +0530941 * Serialize against find_current_mm_pte which does lock-less
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +1000942 * lookup in page tables with local interrupts disabled. For huge pages
943 * it casts pmd_t to pte_t. Since format of pte_t is different from
944 * pmd_t we want to prevent transit from pmd pointing to page table
945 * to pmd pointing to huge page (and back) while interrupts are disabled.
946 * We clear pmd to possibly replace it with page table pointer in
947 * different code paths. So make sure we wait for the parallel
Aneesh Kumar K.Vfa4531f2017-07-27 11:54:54 +0530948 * find_current_mm_pte to finish.
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +1000949 */
Aneesh Kumar K.Vfa4531f2017-07-27 11:54:54 +0530950 serialize_against_pte_lookup(mm);
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +1000951 return old_pmd;
952}
953
954int radix__has_transparent_hugepage(void)
955{
956 /* For radix 2M at PMD level means thp */
957 if (mmu_psize_defs[MMU_PAGE_2M].shift == PMD_SHIFT)
958 return 1;
959 return 0;
960}
961#endif /* CONFIG_TRANSPARENT_HUGEPAGE */