blob: 1ee5452daf3aab167aa555a8fb8be035b1ff5d82 [file] [log] [blame]
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +10001/*
2 * Page table handling routines for radix page table.
3 *
4 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
Ingo Molnar589ee622017-02-04 00:16:44 +010011#include <linux/sched/mm.h>
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100012#include <linux/memblock.h>
13#include <linux/of_fdt.h>
Balbir Singh7614ff32017-06-29 03:04:09 +100014#include <linux/mm.h>
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100015
16#include <asm/pgtable.h>
17#include <asm/pgalloc.h>
18#include <asm/dma.h>
19#include <asm/machdep.h>
20#include <asm/mmu.h>
21#include <asm/firmware.h>
Alistair Popple1d0761d2016-12-14 13:36:51 +110022#include <asm/powernv.h>
Michael Ellerman9abcc982017-06-06 15:48:57 +100023#include <asm/sections.h>
Balbir Singh04284912017-04-11 15:23:25 +100024#include <asm/trace.h>
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100025
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +100026#include <trace/events/thp.h>
27
Aneesh Kumar K.V83209bc2016-07-13 15:05:28 +053028static int native_register_process_table(unsigned long base, unsigned long pg_sz,
29 unsigned long table_size)
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100030{
Suraj Jitindar Singh7cd2a862017-08-03 14:15:51 +100031 unsigned long patb0, patb1;
Aneesh Kumar K.V83209bc2016-07-13 15:05:28 +053032
Suraj Jitindar Singh7cd2a862017-08-03 14:15:51 +100033 patb0 = be64_to_cpu(partition_tb[0].patb0);
34 patb1 = base | table_size | PATB_GR;
35
36 mmu_partition_table_set_entry(0, patb0, patb1);
37
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100038 return 0;
39}
40
41static __ref void *early_alloc_pgtable(unsigned long size)
42{
43 void *pt;
44
45 pt = __va(memblock_alloc_base(size, size, MEMBLOCK_ALLOC_ANYWHERE));
46 memset(pt, 0, size);
47
48 return pt;
49}
50
51int radix__map_kernel_page(unsigned long ea, unsigned long pa,
52 pgprot_t flags,
53 unsigned int map_page_size)
54{
55 pgd_t *pgdp;
56 pud_t *pudp;
57 pmd_t *pmdp;
58 pte_t *ptep;
59 /*
60 * Make sure task size is correct as per the max adddr
61 */
62 BUILD_BUG_ON(TASK_SIZE_USER64 > RADIX_PGTABLE_RANGE);
63 if (slab_is_available()) {
64 pgdp = pgd_offset_k(ea);
65 pudp = pud_alloc(&init_mm, pgdp, ea);
66 if (!pudp)
67 return -ENOMEM;
68 if (map_page_size == PUD_SIZE) {
69 ptep = (pte_t *)pudp;
70 goto set_the_pte;
71 }
72 pmdp = pmd_alloc(&init_mm, pudp, ea);
73 if (!pmdp)
74 return -ENOMEM;
75 if (map_page_size == PMD_SIZE) {
Reza Arbaba0615a12017-01-25 09:54:33 -060076 ptep = pmdp_ptep(pmdp);
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100077 goto set_the_pte;
78 }
79 ptep = pte_alloc_kernel(pmdp, ea);
80 if (!ptep)
81 return -ENOMEM;
82 } else {
83 pgdp = pgd_offset_k(ea);
84 if (pgd_none(*pgdp)) {
85 pudp = early_alloc_pgtable(PUD_TABLE_SIZE);
86 BUG_ON(pudp == NULL);
87 pgd_populate(&init_mm, pgdp, pudp);
88 }
89 pudp = pud_offset(pgdp, ea);
90 if (map_page_size == PUD_SIZE) {
91 ptep = (pte_t *)pudp;
92 goto set_the_pte;
93 }
94 if (pud_none(*pudp)) {
95 pmdp = early_alloc_pgtable(PMD_TABLE_SIZE);
96 BUG_ON(pmdp == NULL);
97 pud_populate(&init_mm, pudp, pmdp);
98 }
99 pmdp = pmd_offset(pudp, ea);
100 if (map_page_size == PMD_SIZE) {
Reza Arbaba0615a12017-01-25 09:54:33 -0600101 ptep = pmdp_ptep(pmdp);
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000102 goto set_the_pte;
103 }
104 if (!pmd_present(*pmdp)) {
105 ptep = early_alloc_pgtable(PAGE_SIZE);
106 BUG_ON(ptep == NULL);
107 pmd_populate_kernel(&init_mm, pmdp, ptep);
108 }
109 ptep = pte_offset_kernel(pmdp, ea);
110 }
111
112set_the_pte:
113 set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, flags));
114 smp_wmb();
115 return 0;
116}
117
Balbir Singh7614ff32017-06-29 03:04:09 +1000118#ifdef CONFIG_STRICT_KERNEL_RWX
Michael Ellermanb134bd92017-07-14 16:51:21 +1000119void radix__change_memory_range(unsigned long start, unsigned long end,
120 unsigned long clear)
Balbir Singh7614ff32017-06-29 03:04:09 +1000121{
Balbir Singh7614ff32017-06-29 03:04:09 +1000122 unsigned long idx;
123 pgd_t *pgdp;
124 pud_t *pudp;
125 pmd_t *pmdp;
126 pte_t *ptep;
127
128 start = ALIGN_DOWN(start, PAGE_SIZE);
129 end = PAGE_ALIGN(end); // aligns up
130
Michael Ellermanb134bd92017-07-14 16:51:21 +1000131 pr_debug("Changing flags on range %lx-%lx removing 0x%lx\n",
132 start, end, clear);
Balbir Singh7614ff32017-06-29 03:04:09 +1000133
134 for (idx = start; idx < end; idx += PAGE_SIZE) {
135 pgdp = pgd_offset_k(idx);
136 pudp = pud_alloc(&init_mm, pgdp, idx);
137 if (!pudp)
138 continue;
139 if (pud_huge(*pudp)) {
140 ptep = (pte_t *)pudp;
141 goto update_the_pte;
142 }
143 pmdp = pmd_alloc(&init_mm, pudp, idx);
144 if (!pmdp)
145 continue;
146 if (pmd_huge(*pmdp)) {
147 ptep = pmdp_ptep(pmdp);
148 goto update_the_pte;
149 }
150 ptep = pte_alloc_kernel(pmdp, idx);
151 if (!ptep)
152 continue;
153update_the_pte:
Michael Ellermanb134bd92017-07-14 16:51:21 +1000154 radix__pte_update(&init_mm, idx, ptep, clear, 0, 0);
Balbir Singh7614ff32017-06-29 03:04:09 +1000155 }
156
157 radix__flush_tlb_kernel_range(start, end);
158}
Michael Ellermanb134bd92017-07-14 16:51:21 +1000159
160void radix__mark_rodata_ro(void)
161{
162 unsigned long start, end;
163
164 start = (unsigned long)_stext;
165 end = (unsigned long)__init_begin;
166
167 radix__change_memory_range(start, end, _PAGE_WRITE);
168}
Michael Ellerman029d9252017-07-14 16:51:23 +1000169
170void radix__mark_initmem_nx(void)
171{
172 unsigned long start = (unsigned long)__init_begin;
173 unsigned long end = (unsigned long)__init_end;
174
175 radix__change_memory_range(start, end, _PAGE_EXEC);
176}
Balbir Singh7614ff32017-06-29 03:04:09 +1000177#endif /* CONFIG_STRICT_KERNEL_RWX */
178
Reza Arbabb5200ec2017-01-16 13:07:43 -0600179static inline void __meminit print_mapping(unsigned long start,
180 unsigned long end,
181 unsigned long size)
182{
183 if (end <= start)
184 return;
185
186 pr_info("Mapped range 0x%lx - 0x%lx with 0x%lx\n", start, end, size);
187}
188
189static int __meminit create_physical_mapping(unsigned long start,
190 unsigned long end)
191{
Michael Ellerman9abcc982017-06-06 15:48:57 +1000192 unsigned long vaddr, addr, mapping_size = 0;
193 pgprot_t prot;
Balbir Singh7614ff32017-06-29 03:04:09 +1000194 unsigned long max_mapping_size;
195#ifdef CONFIG_STRICT_KERNEL_RWX
196 int split_text_mapping = 1;
197#else
198 int split_text_mapping = 0;
199#endif
Reza Arbabb5200ec2017-01-16 13:07:43 -0600200
201 start = _ALIGN_UP(start, PAGE_SIZE);
202 for (addr = start; addr < end; addr += mapping_size) {
203 unsigned long gap, previous_size;
204 int rc;
205
206 gap = end - addr;
207 previous_size = mapping_size;
Balbir Singh7614ff32017-06-29 03:04:09 +1000208 max_mapping_size = PUD_SIZE;
Reza Arbabb5200ec2017-01-16 13:07:43 -0600209
Balbir Singh7614ff32017-06-29 03:04:09 +1000210retry:
Reza Arbabb5200ec2017-01-16 13:07:43 -0600211 if (IS_ALIGNED(addr, PUD_SIZE) && gap >= PUD_SIZE &&
Balbir Singh7614ff32017-06-29 03:04:09 +1000212 mmu_psize_defs[MMU_PAGE_1G].shift &&
213 PUD_SIZE <= max_mapping_size)
Reza Arbabb5200ec2017-01-16 13:07:43 -0600214 mapping_size = PUD_SIZE;
215 else if (IS_ALIGNED(addr, PMD_SIZE) && gap >= PMD_SIZE &&
216 mmu_psize_defs[MMU_PAGE_2M].shift)
217 mapping_size = PMD_SIZE;
218 else
219 mapping_size = PAGE_SIZE;
220
Balbir Singh7614ff32017-06-29 03:04:09 +1000221 if (split_text_mapping && (mapping_size == PUD_SIZE) &&
222 (addr <= __pa_symbol(__init_begin)) &&
223 (addr + mapping_size) >= __pa_symbol(_stext)) {
224 max_mapping_size = PMD_SIZE;
225 goto retry;
226 }
227
228 if (split_text_mapping && (mapping_size == PMD_SIZE) &&
229 (addr <= __pa_symbol(__init_begin)) &&
230 (addr + mapping_size) >= __pa_symbol(_stext))
231 mapping_size = PAGE_SIZE;
232
Reza Arbabb5200ec2017-01-16 13:07:43 -0600233 if (mapping_size != previous_size) {
234 print_mapping(start, addr, previous_size);
235 start = addr;
236 }
237
Michael Ellerman9abcc982017-06-06 15:48:57 +1000238 vaddr = (unsigned long)__va(addr);
239
Balbir Singh7f6d4982017-06-29 03:04:10 +1000240 if (overlaps_kernel_text(vaddr, vaddr + mapping_size) ||
241 overlaps_interrupt_vector_text(vaddr, vaddr + mapping_size))
Michael Ellerman9abcc982017-06-06 15:48:57 +1000242 prot = PAGE_KERNEL_X;
243 else
244 prot = PAGE_KERNEL;
245
246 rc = radix__map_kernel_page(vaddr, addr, prot, mapping_size);
Reza Arbabb5200ec2017-01-16 13:07:43 -0600247 if (rc)
248 return rc;
249 }
250
251 print_mapping(start, addr, mapping_size);
252 return 0;
253}
254
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000255static void __init radix_init_pgtable(void)
256{
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000257 unsigned long rts_field;
258 struct memblock_region *reg;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000259
260 /* We don't support slb for radix */
261 mmu_slb_size = 0;
262 /*
263 * Create the linear mapping, using standard page size for now
264 */
Reza Arbabb5200ec2017-01-16 13:07:43 -0600265 for_each_memblock(memory, reg)
266 WARN_ON(create_physical_mapping(reg->base,
267 reg->base + reg->size));
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000268 /*
269 * Allocate Partition table and process table for the
270 * host.
271 */
Suraj Jitindar Singh555c1632016-11-09 16:36:33 +1100272 BUILD_BUG_ON_MSG((PRTB_SIZE_SHIFT > 36), "Process table size too large.");
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000273 process_tb = early_alloc_pgtable(1UL << PRTB_SIZE_SHIFT);
274 /*
275 * Fill in the process table.
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000276 */
Aneesh Kumar K.Vb23d9c52016-06-17 11:40:36 +0530277 rts_field = radix__get_tree_size();
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000278 process_tb->prtb0 = cpu_to_be64(rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE);
279 /*
280 * Fill in the partition table. We are suppose to use effective address
281 * of process table here. But our linear mapping also enable us to use
282 * physical address here.
283 */
Michael Ellermaneea81482016-08-04 15:32:06 +1000284 register_process_table(__pa(process_tb), 0, PRTB_SIZE_SHIFT - 12);
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000285 pr_info("Process table %p and radix root for kernel: %p\n", process_tb, init_mm.pgd);
Paul Mackerras7a70d722017-02-27 14:32:41 +1100286 asm volatile("ptesync" : : : "memory");
287 asm volatile(PPC_TLBIE_5(%0,%1,2,1,1) : :
288 "r" (TLBIEL_INVAL_SET_LPID), "r" (0));
289 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
Balbir Singh04284912017-04-11 15:23:25 +1000290 trace_tlbie(0, 0, TLBIEL_INVAL_SET_LPID, 0, 2, 1, 1);
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000291}
292
293static void __init radix_init_partition_table(void)
294{
Paul Mackerras9d661952016-11-21 16:00:58 +1100295 unsigned long rts_field, dw0;
Aneesh Kumar K.Vb23d9c52016-06-17 11:40:36 +0530296
Paul Mackerras9d661952016-11-21 16:00:58 +1100297 mmu_partition_table_init();
Aneesh Kumar K.Vb23d9c52016-06-17 11:40:36 +0530298 rts_field = radix__get_tree_size();
Paul Mackerras9d661952016-11-21 16:00:58 +1100299 dw0 = rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE | PATB_HR;
300 mmu_partition_table_set_entry(0, dw0, 0);
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000301
Aneesh Kumar K.V56547412016-07-13 15:05:25 +0530302 pr_info("Initializing Radix MMU\n");
303 pr_info("Partition table %p\n", partition_tb);
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000304}
305
306void __init radix_init_native(void)
307{
Michael Ellermaneea81482016-08-04 15:32:06 +1000308 register_process_table = native_register_process_table;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000309}
310
311static int __init get_idx_from_shift(unsigned int shift)
312{
313 int idx = -1;
314
315 switch (shift) {
316 case 0xc:
317 idx = MMU_PAGE_4K;
318 break;
319 case 0x10:
320 idx = MMU_PAGE_64K;
321 break;
322 case 0x15:
323 idx = MMU_PAGE_2M;
324 break;
325 case 0x1e:
326 idx = MMU_PAGE_1G;
327 break;
328 }
329 return idx;
330}
331
332static int __init radix_dt_scan_page_sizes(unsigned long node,
333 const char *uname, int depth,
334 void *data)
335{
336 int size = 0;
337 int shift, idx;
338 unsigned int ap;
339 const __be32 *prop;
340 const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
341
342 /* We are scanning "cpu" nodes only */
343 if (type == NULL || strcmp(type, "cpu") != 0)
344 return 0;
345
346 prop = of_get_flat_dt_prop(node, "ibm,processor-radix-AP-encodings", &size);
347 if (!prop)
348 return 0;
349
350 pr_info("Page sizes from device-tree:\n");
351 for (; size >= 4; size -= 4, ++prop) {
352
353 struct mmu_psize_def *def;
354
355 /* top 3 bit is AP encoding */
356 shift = be32_to_cpu(prop[0]) & ~(0xe << 28);
357 ap = be32_to_cpu(prop[0]) >> 29;
Balbir Singhac8d3812016-11-05 15:24:22 +1100358 pr_info("Page size shift = %d AP=0x%x\n", shift, ap);
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000359
360 idx = get_idx_from_shift(shift);
361 if (idx < 0)
362 continue;
363
364 def = &mmu_psize_defs[idx];
365 def->shift = shift;
366 def->ap = ap;
367 }
368
369 /* needed ? */
370 cur_cpu_spec->mmu_features &= ~MMU_FTR_NO_SLBIE_B;
371 return 1;
372}
373
Michael Ellerman2537b092016-07-26 21:55:27 +1000374void __init radix__early_init_devtree(void)
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000375{
376 int rc;
377
378 /*
379 * Try to find the available page sizes in the device-tree
380 */
381 rc = of_scan_flat_dt(radix_dt_scan_page_sizes, NULL);
382 if (rc != 0) /* Found */
383 goto found;
384 /*
385 * let's assume we have page 4k and 64k support
386 */
387 mmu_psize_defs[MMU_PAGE_4K].shift = 12;
388 mmu_psize_defs[MMU_PAGE_4K].ap = 0x0;
389
390 mmu_psize_defs[MMU_PAGE_64K].shift = 16;
391 mmu_psize_defs[MMU_PAGE_64K].ap = 0x5;
392found:
393#ifdef CONFIG_SPARSEMEM_VMEMMAP
394 if (mmu_psize_defs[MMU_PAGE_2M].shift) {
395 /*
396 * map vmemmap using 2M if available
397 */
398 mmu_vmemmap_psize = MMU_PAGE_2M;
399 }
400#endif /* CONFIG_SPARSEMEM_VMEMMAP */
401 return;
402}
403
Aneesh Kumar K.Vad410672016-08-24 15:03:39 +0530404static void update_hid_for_radix(void)
405{
406 unsigned long hid0;
407 unsigned long rb = 3UL << PPC_BITLSHIFT(53); /* IS = 3 */
408
409 asm volatile("ptesync": : :"memory");
410 /* prs = 0, ric = 2, rs = 0, r = 1 is = 3 */
411 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
412 : : "r"(rb), "i"(1), "i"(0), "i"(2), "r"(0) : "memory");
413 /* prs = 1, ric = 2, rs = 0, r = 1 is = 3 */
414 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
415 : : "r"(rb), "i"(1), "i"(1), "i"(2), "r"(0) : "memory");
416 asm volatile("eieio; tlbsync; ptesync; isync; slbia": : :"memory");
Balbir Singh04284912017-04-11 15:23:25 +1000417 trace_tlbie(0, 0, rb, 0, 2, 0, 1);
418 trace_tlbie(0, 0, rb, 0, 2, 1, 1);
419
Aneesh Kumar K.Vad410672016-08-24 15:03:39 +0530420 /*
421 * now switch the HID
422 */
423 hid0 = mfspr(SPRN_HID0);
424 hid0 |= HID0_POWER9_RADIX;
425 mtspr(SPRN_HID0, hid0);
426 asm volatile("isync": : :"memory");
427
428 /* Wait for it to happen */
429 while (!(mfspr(SPRN_HID0) & HID0_POWER9_RADIX))
430 cpu_relax();
431}
432
Balbir Singhee97b6b2016-11-15 17:56:14 +1100433static void radix_init_amor(void)
434{
435 /*
436 * In HV mode, we init AMOR (Authority Mask Override Register) so that
437 * the hypervisor and guest can setup IAMR (Instruction Authority Mask
438 * Register), enable key 0 and set it to 1.
439 *
440 * AMOR = 0b1100 .... 0000 (Mask for key 0 is 11)
441 */
442 mtspr(SPRN_AMOR, (3ul << 62));
443}
444
Balbir Singh3b10d002016-11-15 17:56:16 +1100445static void radix_init_iamr(void)
446{
447 unsigned long iamr;
448
449 /*
450 * The IAMR should set to 0 on DD1.
451 */
452 if (cpu_has_feature(CPU_FTR_POWER9_DD1))
453 iamr = 0;
454 else
455 iamr = (1ul << 62);
456
457 /*
458 * Radix always uses key0 of the IAMR to determine if an access is
459 * allowed. We set bit 0 (IBM bit 1) of key0, to prevent instruction
460 * fetch.
461 */
462 mtspr(SPRN_IAMR, iamr);
463}
464
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000465void __init radix__early_init_mmu(void)
466{
467 unsigned long lpcr;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000468
469#ifdef CONFIG_PPC_64K_PAGES
470 /* PAGE_SIZE mappings */
471 mmu_virtual_psize = MMU_PAGE_64K;
472#else
473 mmu_virtual_psize = MMU_PAGE_4K;
474#endif
475
476#ifdef CONFIG_SPARSEMEM_VMEMMAP
477 /* vmemmap mapping */
478 mmu_vmemmap_psize = mmu_virtual_psize;
479#endif
480 /*
481 * initialize page table size
482 */
483 __pte_index_size = RADIX_PTE_INDEX_SIZE;
484 __pmd_index_size = RADIX_PMD_INDEX_SIZE;
485 __pud_index_size = RADIX_PUD_INDEX_SIZE;
486 __pgd_index_size = RADIX_PGD_INDEX_SIZE;
487 __pmd_cache_index = RADIX_PMD_INDEX_SIZE;
488 __pte_table_size = RADIX_PTE_TABLE_SIZE;
489 __pmd_table_size = RADIX_PMD_TABLE_SIZE;
490 __pud_table_size = RADIX_PUD_TABLE_SIZE;
491 __pgd_table_size = RADIX_PGD_TABLE_SIZE;
492
Aneesh Kumar K.Va2f41eb2016-04-29 23:26:19 +1000493 __pmd_val_bits = RADIX_PMD_VAL_BITS;
494 __pud_val_bits = RADIX_PUD_VAL_BITS;
495 __pgd_val_bits = RADIX_PGD_VAL_BITS;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000496
Aneesh Kumar K.Vd6a99962016-04-29 23:26:21 +1000497 __kernel_virt_start = RADIX_KERN_VIRT_START;
498 __kernel_virt_size = RADIX_KERN_VIRT_SIZE;
499 __vmalloc_start = RADIX_VMALLOC_START;
500 __vmalloc_end = RADIX_VMALLOC_END;
Michael Ellerman63ee9b22017-08-01 20:29:22 +1000501 __kernel_io_start = RADIX_KERN_IO_START;
Aneesh Kumar K.Vd6a99962016-04-29 23:26:21 +1000502 vmemmap = (struct page *)RADIX_VMEMMAP_BASE;
503 ioremap_bot = IOREMAP_BASE;
Darren Stevensbfa37082016-06-29 21:06:28 +0100504
505#ifdef CONFIG_PCI
506 pci_io_base = ISA_IO_BASE;
507#endif
508
Aneesh Kumar K.V5ed7ecd2016-04-29 23:26:23 +1000509 /*
510 * For now radix also use the same frag size
511 */
512 __pte_frag_nr = H_PTE_FRAG_NR;
513 __pte_frag_size_shift = H_PTE_FRAG_SIZE_SHIFT;
Aneesh Kumar K.Vd6a99962016-04-29 23:26:21 +1000514
Aneesh Kumar K.Vd6c88602016-05-31 11:56:29 +0530515 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
Benjamin Herrenschmidt166dd7d2016-07-05 15:03:51 +1000516 radix_init_native();
Aneesh Kumar K.Vad410672016-08-24 15:03:39 +0530517 if (cpu_has_feature(CPU_FTR_POWER9_DD1))
518 update_hid_for_radix();
Aneesh Kumar K.Vd6c88602016-05-31 11:56:29 +0530519 lpcr = mfspr(SPRN_LPCR);
Aneesh Kumar K.Vbf16cdf2016-07-13 15:05:21 +0530520 mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000521 radix_init_partition_table();
Balbir Singhee97b6b2016-11-15 17:56:14 +1100522 radix_init_amor();
Paul Mackerrascc3d2942017-01-30 21:21:36 +1100523 } else {
524 radix_init_pseries();
Aneesh Kumar K.Vd6c88602016-05-31 11:56:29 +0530525 }
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000526
Paul Mackerras9d661952016-11-21 16:00:58 +1100527 memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
528
Balbir Singh3b10d002016-11-15 17:56:16 +1100529 radix_init_iamr();
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000530 radix_init_pgtable();
531}
532
533void radix__early_init_mmu_secondary(void)
534{
535 unsigned long lpcr;
536 /*
Aneesh Kumar K.Vd6c88602016-05-31 11:56:29 +0530537 * update partition table control register and UPRT
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000538 */
Aneesh Kumar K.Vd6c88602016-05-31 11:56:29 +0530539 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
Aneesh Kumar K.Vcac4a182016-11-17 15:46:23 +0530540
541 if (cpu_has_feature(CPU_FTR_POWER9_DD1))
542 update_hid_for_radix();
543
Aneesh Kumar K.Vd6c88602016-05-31 11:56:29 +0530544 lpcr = mfspr(SPRN_LPCR);
Aneesh Kumar K.Vbf16cdf2016-07-13 15:05:21 +0530545 mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
Aneesh Kumar K.Vd6c88602016-05-31 11:56:29 +0530546
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000547 mtspr(SPRN_PTCR,
548 __pa(partition_tb) | (PATB_SIZE_SHIFT - 12));
Balbir Singhee97b6b2016-11-15 17:56:14 +1100549 radix_init_amor();
Aneesh Kumar K.Vd6c88602016-05-31 11:56:29 +0530550 }
Balbir Singh3b10d002016-11-15 17:56:16 +1100551 radix_init_iamr();
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000552}
553
Benjamin Herrenschmidtfe036a02016-08-19 14:22:37 +0530554void radix__mmu_cleanup_all(void)
555{
556 unsigned long lpcr;
557
558 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
559 lpcr = mfspr(SPRN_LPCR);
560 mtspr(SPRN_LPCR, lpcr & ~LPCR_UPRT);
561 mtspr(SPRN_PTCR, 0);
Alistair Popple1d0761d2016-12-14 13:36:51 +1100562 powernv_set_nmmu_ptcr(0);
Benjamin Herrenschmidtfe036a02016-08-19 14:22:37 +0530563 radix__flush_tlb_all();
564 }
565}
566
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000567void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base,
568 phys_addr_t first_memblock_size)
569{
Aneesh Kumar K.V177ba7c2016-04-29 23:26:10 +1000570 /* We don't currently support the first MEMBLOCK not mapping 0
571 * physical on those processors
572 */
573 BUG_ON(first_memblock_base != 0);
574 /*
575 * We limit the allocation that depend on ppc64_rma_size
576 * to first_memblock_size. We also clamp it to 1GB to
577 * avoid some funky things such as RTAS bugs.
578 *
579 * On radix config we really don't have a limitation
580 * on real mode access. But keeping it as above works
581 * well enough.
582 */
583 ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000);
584 /*
585 * Finally limit subsequent allocations. We really don't want
586 * to limit the memblock allocations to rma_size. FIXME!! should
587 * we even limit at all ?
588 */
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000589 memblock_set_current_limit(first_memblock_base + first_memblock_size);
590}
Aneesh Kumar K.Vd9225ad2016-04-29 23:26:00 +1000591
Reza Arbab6cc27342017-01-16 13:07:44 -0600592#ifdef CONFIG_MEMORY_HOTPLUG
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600593static void free_pte_table(pte_t *pte_start, pmd_t *pmd)
594{
595 pte_t *pte;
596 int i;
597
598 for (i = 0; i < PTRS_PER_PTE; i++) {
599 pte = pte_start + i;
600 if (!pte_none(*pte))
601 return;
602 }
603
604 pte_free_kernel(&init_mm, pte_start);
605 pmd_clear(pmd);
606}
607
608static void free_pmd_table(pmd_t *pmd_start, pud_t *pud)
609{
610 pmd_t *pmd;
611 int i;
612
613 for (i = 0; i < PTRS_PER_PMD; i++) {
614 pmd = pmd_start + i;
615 if (!pmd_none(*pmd))
616 return;
617 }
618
619 pmd_free(&init_mm, pmd_start);
620 pud_clear(pud);
621}
622
623static void remove_pte_table(pte_t *pte_start, unsigned long addr,
624 unsigned long end)
625{
626 unsigned long next;
627 pte_t *pte;
628
629 pte = pte_start + pte_index(addr);
630 for (; addr < end; addr = next, pte++) {
631 next = (addr + PAGE_SIZE) & PAGE_MASK;
632 if (next > end)
633 next = end;
634
635 if (!pte_present(*pte))
636 continue;
637
Reza Arbab0d0a4bc2017-01-16 13:07:46 -0600638 if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(next)) {
639 /*
640 * The vmemmap_free() and remove_section_mapping()
641 * codepaths call us with aligned addresses.
642 */
643 WARN_ONCE(1, "%s: unaligned range\n", __func__);
644 continue;
645 }
646
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600647 pte_clear(&init_mm, addr, pte);
648 }
649}
650
651static void remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
652 unsigned long end)
653{
654 unsigned long next;
655 pte_t *pte_base;
656 pmd_t *pmd;
657
658 pmd = pmd_start + pmd_index(addr);
659 for (; addr < end; addr = next, pmd++) {
660 next = pmd_addr_end(addr, end);
661
662 if (!pmd_present(*pmd))
663 continue;
664
665 if (pmd_huge(*pmd)) {
Reza Arbab0d0a4bc2017-01-16 13:07:46 -0600666 if (!IS_ALIGNED(addr, PMD_SIZE) ||
667 !IS_ALIGNED(next, PMD_SIZE)) {
668 WARN_ONCE(1, "%s: unaligned range\n", __func__);
669 continue;
670 }
671
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600672 pte_clear(&init_mm, addr, (pte_t *)pmd);
673 continue;
674 }
675
676 pte_base = (pte_t *)pmd_page_vaddr(*pmd);
677 remove_pte_table(pte_base, addr, next);
678 free_pte_table(pte_base, pmd);
679 }
680}
681
682static void remove_pud_table(pud_t *pud_start, unsigned long addr,
683 unsigned long end)
684{
685 unsigned long next;
686 pmd_t *pmd_base;
687 pud_t *pud;
688
689 pud = pud_start + pud_index(addr);
690 for (; addr < end; addr = next, pud++) {
691 next = pud_addr_end(addr, end);
692
693 if (!pud_present(*pud))
694 continue;
695
696 if (pud_huge(*pud)) {
Reza Arbab0d0a4bc2017-01-16 13:07:46 -0600697 if (!IS_ALIGNED(addr, PUD_SIZE) ||
698 !IS_ALIGNED(next, PUD_SIZE)) {
699 WARN_ONCE(1, "%s: unaligned range\n", __func__);
700 continue;
701 }
702
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600703 pte_clear(&init_mm, addr, (pte_t *)pud);
704 continue;
705 }
706
707 pmd_base = (pmd_t *)pud_page_vaddr(*pud);
708 remove_pmd_table(pmd_base, addr, next);
709 free_pmd_table(pmd_base, pud);
710 }
711}
712
713static void remove_pagetable(unsigned long start, unsigned long end)
714{
715 unsigned long addr, next;
716 pud_t *pud_base;
717 pgd_t *pgd;
718
719 spin_lock(&init_mm.page_table_lock);
720
721 for (addr = start; addr < end; addr = next) {
722 next = pgd_addr_end(addr, end);
723
724 pgd = pgd_offset_k(addr);
725 if (!pgd_present(*pgd))
726 continue;
727
728 if (pgd_huge(*pgd)) {
Reza Arbab0d0a4bc2017-01-16 13:07:46 -0600729 if (!IS_ALIGNED(addr, PGDIR_SIZE) ||
730 !IS_ALIGNED(next, PGDIR_SIZE)) {
731 WARN_ONCE(1, "%s: unaligned range\n", __func__);
732 continue;
733 }
734
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600735 pte_clear(&init_mm, addr, (pte_t *)pgd);
736 continue;
737 }
738
739 pud_base = (pud_t *)pgd_page_vaddr(*pgd);
740 remove_pud_table(pud_base, addr, next);
741 }
742
743 spin_unlock(&init_mm.page_table_lock);
744 radix__flush_tlb_kernel_range(start, end);
745}
746
Reza Arbab6cc27342017-01-16 13:07:44 -0600747int __ref radix__create_section_mapping(unsigned long start, unsigned long end)
748{
749 return create_physical_mapping(start, end);
750}
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600751
752int radix__remove_section_mapping(unsigned long start, unsigned long end)
753{
754 remove_pagetable(start, end);
755 return 0;
756}
Reza Arbab6cc27342017-01-16 13:07:44 -0600757#endif /* CONFIG_MEMORY_HOTPLUG */
758
Aneesh Kumar K.Vd9225ad2016-04-29 23:26:00 +1000759#ifdef CONFIG_SPARSEMEM_VMEMMAP
760int __meminit radix__vmemmap_create_mapping(unsigned long start,
761 unsigned long page_size,
762 unsigned long phys)
763{
764 /* Create a PTE encoding */
765 unsigned long flags = _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_KERNEL_RW;
766
767 BUG_ON(radix__map_kernel_page(start, phys, __pgprot(flags), page_size));
768 return 0;
769}
770
771#ifdef CONFIG_MEMORY_HOTPLUG
772void radix__vmemmap_remove_mapping(unsigned long start, unsigned long page_size)
773{
Reza Arbab0d0a4bc2017-01-16 13:07:46 -0600774 remove_pagetable(start, start + page_size);
Aneesh Kumar K.Vd9225ad2016-04-29 23:26:00 +1000775}
776#endif
777#endif
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +1000778
779#ifdef CONFIG_TRANSPARENT_HUGEPAGE
780
781unsigned long radix__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
782 pmd_t *pmdp, unsigned long clr,
783 unsigned long set)
784{
785 unsigned long old;
786
787#ifdef CONFIG_DEBUG_VM
Oliver O'Halloranebd31192017-06-28 11:32:34 +1000788 WARN_ON(!radix__pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +1000789 assert_spin_locked(&mm->page_table_lock);
790#endif
791
792 old = radix__pte_update(mm, addr, (pte_t *)pmdp, clr, set, 1);
793 trace_hugepage_update(addr, old, clr, set);
794
795 return old;
796}
797
798pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
799 pmd_t *pmdp)
800
801{
802 pmd_t pmd;
803
804 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
805 VM_BUG_ON(radix__pmd_trans_huge(*pmdp));
Oliver O'Halloranebd31192017-06-28 11:32:34 +1000806 VM_BUG_ON(pmd_devmap(*pmdp));
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +1000807 /*
808 * khugepaged calls this for normal pmd
809 */
810 pmd = *pmdp;
811 pmd_clear(pmdp);
Benjamin Herrenschmidt424de9c2017-07-19 14:49:06 +1000812
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +1000813 /*FIXME!! Verify whether we need this kick below */
Aneesh Kumar K.Vfa4531f2017-07-27 11:54:54 +0530814 serialize_against_pte_lookup(vma->vm_mm);
Benjamin Herrenschmidt424de9c2017-07-19 14:49:06 +1000815
816 radix__flush_tlb_collapsed_pmd(vma->vm_mm, address);
817
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +1000818 return pmd;
819}
820
821/*
822 * For us pgtable_t is pte_t *. Inorder to save the deposisted
823 * page table, we consider the allocated page table as a list
824 * head. On withdraw we need to make sure we zero out the used
825 * list_head memory area.
826 */
827void radix__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
828 pgtable_t pgtable)
829{
830 struct list_head *lh = (struct list_head *) pgtable;
831
832 assert_spin_locked(pmd_lockptr(mm, pmdp));
833
834 /* FIFO */
835 if (!pmd_huge_pte(mm, pmdp))
836 INIT_LIST_HEAD(lh);
837 else
838 list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
839 pmd_huge_pte(mm, pmdp) = pgtable;
840}
841
842pgtable_t radix__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
843{
844 pte_t *ptep;
845 pgtable_t pgtable;
846 struct list_head *lh;
847
848 assert_spin_locked(pmd_lockptr(mm, pmdp));
849
850 /* FIFO */
851 pgtable = pmd_huge_pte(mm, pmdp);
852 lh = (struct list_head *) pgtable;
853 if (list_empty(lh))
854 pmd_huge_pte(mm, pmdp) = NULL;
855 else {
856 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
857 list_del(lh);
858 }
859 ptep = (pte_t *) pgtable;
860 *ptep = __pte(0);
861 ptep++;
862 *ptep = __pte(0);
863 return pgtable;
864}
865
866
867pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm,
868 unsigned long addr, pmd_t *pmdp)
869{
870 pmd_t old_pmd;
871 unsigned long old;
872
873 old = radix__pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0);
874 old_pmd = __pmd(old);
875 /*
Aneesh Kumar K.Vfa4531f2017-07-27 11:54:54 +0530876 * Serialize against find_current_mm_pte which does lock-less
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +1000877 * lookup in page tables with local interrupts disabled. For huge pages
878 * it casts pmd_t to pte_t. Since format of pte_t is different from
879 * pmd_t we want to prevent transit from pmd pointing to page table
880 * to pmd pointing to huge page (and back) while interrupts are disabled.
881 * We clear pmd to possibly replace it with page table pointer in
882 * different code paths. So make sure we wait for the parallel
Aneesh Kumar K.Vfa4531f2017-07-27 11:54:54 +0530883 * find_current_mm_pte to finish.
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +1000884 */
Aneesh Kumar K.Vfa4531f2017-07-27 11:54:54 +0530885 serialize_against_pte_lookup(mm);
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +1000886 return old_pmd;
887}
888
889int radix__has_transparent_hugepage(void)
890{
891 /* For radix 2M at PMD level means thp */
892 if (mmu_psize_defs[MMU_PAGE_2M].shift == PMD_SHIFT)
893 return 1;
894 return 0;
895}
896#endif /* CONFIG_TRANSPARENT_HUGEPAGE */