blob: 336e52ec652cf59129532e801d3696827eee20b3 [file] [log] [blame]
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +10001/*
2 * Page table handling routines for radix page table.
3 *
4 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
Ingo Molnar589ee622017-02-04 00:16:44 +010011#include <linux/sched/mm.h>
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100012#include <linux/memblock.h>
13#include <linux/of_fdt.h>
Balbir Singh7614ff32017-06-29 03:04:09 +100014#include <linux/mm.h>
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100015
16#include <asm/pgtable.h>
17#include <asm/pgalloc.h>
18#include <asm/dma.h>
19#include <asm/machdep.h>
20#include <asm/mmu.h>
21#include <asm/firmware.h>
Alistair Popple1d0761d2016-12-14 13:36:51 +110022#include <asm/powernv.h>
Michael Ellerman9abcc982017-06-06 15:48:57 +100023#include <asm/sections.h>
Balbir Singh04284912017-04-11 15:23:25 +100024#include <asm/trace.h>
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100025
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +100026#include <trace/events/thp.h>
27
Aneesh Kumar K.V83209bc2016-07-13 15:05:28 +053028static int native_register_process_table(unsigned long base, unsigned long pg_sz,
29 unsigned long table_size)
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100030{
Aneesh Kumar K.V83209bc2016-07-13 15:05:28 +053031 unsigned long patb1 = base | table_size | PATB_GR;
32
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100033 partition_tb->patb1 = cpu_to_be64(patb1);
34 return 0;
35}
36
37static __ref void *early_alloc_pgtable(unsigned long size)
38{
39 void *pt;
40
41 pt = __va(memblock_alloc_base(size, size, MEMBLOCK_ALLOC_ANYWHERE));
42 memset(pt, 0, size);
43
44 return pt;
45}
46
47int radix__map_kernel_page(unsigned long ea, unsigned long pa,
48 pgprot_t flags,
49 unsigned int map_page_size)
50{
51 pgd_t *pgdp;
52 pud_t *pudp;
53 pmd_t *pmdp;
54 pte_t *ptep;
55 /*
56 * Make sure task size is correct as per the max adddr
57 */
58 BUILD_BUG_ON(TASK_SIZE_USER64 > RADIX_PGTABLE_RANGE);
59 if (slab_is_available()) {
60 pgdp = pgd_offset_k(ea);
61 pudp = pud_alloc(&init_mm, pgdp, ea);
62 if (!pudp)
63 return -ENOMEM;
64 if (map_page_size == PUD_SIZE) {
65 ptep = (pte_t *)pudp;
66 goto set_the_pte;
67 }
68 pmdp = pmd_alloc(&init_mm, pudp, ea);
69 if (!pmdp)
70 return -ENOMEM;
71 if (map_page_size == PMD_SIZE) {
Reza Arbaba0615a12017-01-25 09:54:33 -060072 ptep = pmdp_ptep(pmdp);
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100073 goto set_the_pte;
74 }
75 ptep = pte_alloc_kernel(pmdp, ea);
76 if (!ptep)
77 return -ENOMEM;
78 } else {
79 pgdp = pgd_offset_k(ea);
80 if (pgd_none(*pgdp)) {
81 pudp = early_alloc_pgtable(PUD_TABLE_SIZE);
82 BUG_ON(pudp == NULL);
83 pgd_populate(&init_mm, pgdp, pudp);
84 }
85 pudp = pud_offset(pgdp, ea);
86 if (map_page_size == PUD_SIZE) {
87 ptep = (pte_t *)pudp;
88 goto set_the_pte;
89 }
90 if (pud_none(*pudp)) {
91 pmdp = early_alloc_pgtable(PMD_TABLE_SIZE);
92 BUG_ON(pmdp == NULL);
93 pud_populate(&init_mm, pudp, pmdp);
94 }
95 pmdp = pmd_offset(pudp, ea);
96 if (map_page_size == PMD_SIZE) {
Reza Arbaba0615a12017-01-25 09:54:33 -060097 ptep = pmdp_ptep(pmdp);
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100098 goto set_the_pte;
99 }
100 if (!pmd_present(*pmdp)) {
101 ptep = early_alloc_pgtable(PAGE_SIZE);
102 BUG_ON(ptep == NULL);
103 pmd_populate_kernel(&init_mm, pmdp, ptep);
104 }
105 ptep = pte_offset_kernel(pmdp, ea);
106 }
107
108set_the_pte:
109 set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, flags));
110 smp_wmb();
111 return 0;
112}
113
Balbir Singh7614ff32017-06-29 03:04:09 +1000114#ifdef CONFIG_STRICT_KERNEL_RWX
Michael Ellermanb134bd92017-07-14 16:51:21 +1000115void radix__change_memory_range(unsigned long start, unsigned long end,
116 unsigned long clear)
Balbir Singh7614ff32017-06-29 03:04:09 +1000117{
Balbir Singh7614ff32017-06-29 03:04:09 +1000118 unsigned long idx;
119 pgd_t *pgdp;
120 pud_t *pudp;
121 pmd_t *pmdp;
122 pte_t *ptep;
123
124 start = ALIGN_DOWN(start, PAGE_SIZE);
125 end = PAGE_ALIGN(end); // aligns up
126
Michael Ellermanb134bd92017-07-14 16:51:21 +1000127 pr_debug("Changing flags on range %lx-%lx removing 0x%lx\n",
128 start, end, clear);
Balbir Singh7614ff32017-06-29 03:04:09 +1000129
130 for (idx = start; idx < end; idx += PAGE_SIZE) {
131 pgdp = pgd_offset_k(idx);
132 pudp = pud_alloc(&init_mm, pgdp, idx);
133 if (!pudp)
134 continue;
135 if (pud_huge(*pudp)) {
136 ptep = (pte_t *)pudp;
137 goto update_the_pte;
138 }
139 pmdp = pmd_alloc(&init_mm, pudp, idx);
140 if (!pmdp)
141 continue;
142 if (pmd_huge(*pmdp)) {
143 ptep = pmdp_ptep(pmdp);
144 goto update_the_pte;
145 }
146 ptep = pte_alloc_kernel(pmdp, idx);
147 if (!ptep)
148 continue;
149update_the_pte:
Michael Ellermanb134bd92017-07-14 16:51:21 +1000150 radix__pte_update(&init_mm, idx, ptep, clear, 0, 0);
Balbir Singh7614ff32017-06-29 03:04:09 +1000151 }
152
153 radix__flush_tlb_kernel_range(start, end);
154}
Michael Ellermanb134bd92017-07-14 16:51:21 +1000155
156void radix__mark_rodata_ro(void)
157{
158 unsigned long start, end;
159
160 start = (unsigned long)_stext;
161 end = (unsigned long)__init_begin;
162
163 radix__change_memory_range(start, end, _PAGE_WRITE);
164}
Balbir Singh7614ff32017-06-29 03:04:09 +1000165#endif /* CONFIG_STRICT_KERNEL_RWX */
166
Reza Arbabb5200ec2017-01-16 13:07:43 -0600167static inline void __meminit print_mapping(unsigned long start,
168 unsigned long end,
169 unsigned long size)
170{
171 if (end <= start)
172 return;
173
174 pr_info("Mapped range 0x%lx - 0x%lx with 0x%lx\n", start, end, size);
175}
176
177static int __meminit create_physical_mapping(unsigned long start,
178 unsigned long end)
179{
Michael Ellerman9abcc982017-06-06 15:48:57 +1000180 unsigned long vaddr, addr, mapping_size = 0;
181 pgprot_t prot;
Balbir Singh7614ff32017-06-29 03:04:09 +1000182 unsigned long max_mapping_size;
183#ifdef CONFIG_STRICT_KERNEL_RWX
184 int split_text_mapping = 1;
185#else
186 int split_text_mapping = 0;
187#endif
Reza Arbabb5200ec2017-01-16 13:07:43 -0600188
189 start = _ALIGN_UP(start, PAGE_SIZE);
190 for (addr = start; addr < end; addr += mapping_size) {
191 unsigned long gap, previous_size;
192 int rc;
193
194 gap = end - addr;
195 previous_size = mapping_size;
Balbir Singh7614ff32017-06-29 03:04:09 +1000196 max_mapping_size = PUD_SIZE;
Reza Arbabb5200ec2017-01-16 13:07:43 -0600197
Balbir Singh7614ff32017-06-29 03:04:09 +1000198retry:
Reza Arbabb5200ec2017-01-16 13:07:43 -0600199 if (IS_ALIGNED(addr, PUD_SIZE) && gap >= PUD_SIZE &&
Balbir Singh7614ff32017-06-29 03:04:09 +1000200 mmu_psize_defs[MMU_PAGE_1G].shift &&
201 PUD_SIZE <= max_mapping_size)
Reza Arbabb5200ec2017-01-16 13:07:43 -0600202 mapping_size = PUD_SIZE;
203 else if (IS_ALIGNED(addr, PMD_SIZE) && gap >= PMD_SIZE &&
204 mmu_psize_defs[MMU_PAGE_2M].shift)
205 mapping_size = PMD_SIZE;
206 else
207 mapping_size = PAGE_SIZE;
208
Balbir Singh7614ff32017-06-29 03:04:09 +1000209 if (split_text_mapping && (mapping_size == PUD_SIZE) &&
210 (addr <= __pa_symbol(__init_begin)) &&
211 (addr + mapping_size) >= __pa_symbol(_stext)) {
212 max_mapping_size = PMD_SIZE;
213 goto retry;
214 }
215
216 if (split_text_mapping && (mapping_size == PMD_SIZE) &&
217 (addr <= __pa_symbol(__init_begin)) &&
218 (addr + mapping_size) >= __pa_symbol(_stext))
219 mapping_size = PAGE_SIZE;
220
Reza Arbabb5200ec2017-01-16 13:07:43 -0600221 if (mapping_size != previous_size) {
222 print_mapping(start, addr, previous_size);
223 start = addr;
224 }
225
Michael Ellerman9abcc982017-06-06 15:48:57 +1000226 vaddr = (unsigned long)__va(addr);
227
Balbir Singh7f6d4982017-06-29 03:04:10 +1000228 if (overlaps_kernel_text(vaddr, vaddr + mapping_size) ||
229 overlaps_interrupt_vector_text(vaddr, vaddr + mapping_size))
Michael Ellerman9abcc982017-06-06 15:48:57 +1000230 prot = PAGE_KERNEL_X;
231 else
232 prot = PAGE_KERNEL;
233
234 rc = radix__map_kernel_page(vaddr, addr, prot, mapping_size);
Reza Arbabb5200ec2017-01-16 13:07:43 -0600235 if (rc)
236 return rc;
237 }
238
239 print_mapping(start, addr, mapping_size);
240 return 0;
241}
242
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000243static void __init radix_init_pgtable(void)
244{
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000245 unsigned long rts_field;
246 struct memblock_region *reg;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000247
248 /* We don't support slb for radix */
249 mmu_slb_size = 0;
250 /*
251 * Create the linear mapping, using standard page size for now
252 */
Reza Arbabb5200ec2017-01-16 13:07:43 -0600253 for_each_memblock(memory, reg)
254 WARN_ON(create_physical_mapping(reg->base,
255 reg->base + reg->size));
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000256 /*
257 * Allocate Partition table and process table for the
258 * host.
259 */
Suraj Jitindar Singh555c1632016-11-09 16:36:33 +1100260 BUILD_BUG_ON_MSG((PRTB_SIZE_SHIFT > 36), "Process table size too large.");
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000261 process_tb = early_alloc_pgtable(1UL << PRTB_SIZE_SHIFT);
262 /*
263 * Fill in the process table.
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000264 */
Aneesh Kumar K.Vb23d9c52016-06-17 11:40:36 +0530265 rts_field = radix__get_tree_size();
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000266 process_tb->prtb0 = cpu_to_be64(rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE);
267 /*
268 * Fill in the partition table. We are suppose to use effective address
269 * of process table here. But our linear mapping also enable us to use
270 * physical address here.
271 */
Michael Ellermaneea81482016-08-04 15:32:06 +1000272 register_process_table(__pa(process_tb), 0, PRTB_SIZE_SHIFT - 12);
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000273 pr_info("Process table %p and radix root for kernel: %p\n", process_tb, init_mm.pgd);
Paul Mackerras7a70d722017-02-27 14:32:41 +1100274 asm volatile("ptesync" : : : "memory");
275 asm volatile(PPC_TLBIE_5(%0,%1,2,1,1) : :
276 "r" (TLBIEL_INVAL_SET_LPID), "r" (0));
277 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
Balbir Singh04284912017-04-11 15:23:25 +1000278 trace_tlbie(0, 0, TLBIEL_INVAL_SET_LPID, 0, 2, 1, 1);
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000279}
280
281static void __init radix_init_partition_table(void)
282{
Paul Mackerras9d661952016-11-21 16:00:58 +1100283 unsigned long rts_field, dw0;
Aneesh Kumar K.Vb23d9c52016-06-17 11:40:36 +0530284
Paul Mackerras9d661952016-11-21 16:00:58 +1100285 mmu_partition_table_init();
Aneesh Kumar K.Vb23d9c52016-06-17 11:40:36 +0530286 rts_field = radix__get_tree_size();
Paul Mackerras9d661952016-11-21 16:00:58 +1100287 dw0 = rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE | PATB_HR;
288 mmu_partition_table_set_entry(0, dw0, 0);
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000289
Aneesh Kumar K.V56547412016-07-13 15:05:25 +0530290 pr_info("Initializing Radix MMU\n");
291 pr_info("Partition table %p\n", partition_tb);
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000292}
293
294void __init radix_init_native(void)
295{
Michael Ellermaneea81482016-08-04 15:32:06 +1000296 register_process_table = native_register_process_table;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000297}
298
299static int __init get_idx_from_shift(unsigned int shift)
300{
301 int idx = -1;
302
303 switch (shift) {
304 case 0xc:
305 idx = MMU_PAGE_4K;
306 break;
307 case 0x10:
308 idx = MMU_PAGE_64K;
309 break;
310 case 0x15:
311 idx = MMU_PAGE_2M;
312 break;
313 case 0x1e:
314 idx = MMU_PAGE_1G;
315 break;
316 }
317 return idx;
318}
319
320static int __init radix_dt_scan_page_sizes(unsigned long node,
321 const char *uname, int depth,
322 void *data)
323{
324 int size = 0;
325 int shift, idx;
326 unsigned int ap;
327 const __be32 *prop;
328 const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
329
330 /* We are scanning "cpu" nodes only */
331 if (type == NULL || strcmp(type, "cpu") != 0)
332 return 0;
333
334 prop = of_get_flat_dt_prop(node, "ibm,processor-radix-AP-encodings", &size);
335 if (!prop)
336 return 0;
337
338 pr_info("Page sizes from device-tree:\n");
339 for (; size >= 4; size -= 4, ++prop) {
340
341 struct mmu_psize_def *def;
342
343 /* top 3 bit is AP encoding */
344 shift = be32_to_cpu(prop[0]) & ~(0xe << 28);
345 ap = be32_to_cpu(prop[0]) >> 29;
Balbir Singhac8d3812016-11-05 15:24:22 +1100346 pr_info("Page size shift = %d AP=0x%x\n", shift, ap);
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000347
348 idx = get_idx_from_shift(shift);
349 if (idx < 0)
350 continue;
351
352 def = &mmu_psize_defs[idx];
353 def->shift = shift;
354 def->ap = ap;
355 }
356
357 /* needed ? */
358 cur_cpu_spec->mmu_features &= ~MMU_FTR_NO_SLBIE_B;
359 return 1;
360}
361
Michael Ellerman2537b092016-07-26 21:55:27 +1000362void __init radix__early_init_devtree(void)
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000363{
364 int rc;
365
366 /*
367 * Try to find the available page sizes in the device-tree
368 */
369 rc = of_scan_flat_dt(radix_dt_scan_page_sizes, NULL);
370 if (rc != 0) /* Found */
371 goto found;
372 /*
373 * let's assume we have page 4k and 64k support
374 */
375 mmu_psize_defs[MMU_PAGE_4K].shift = 12;
376 mmu_psize_defs[MMU_PAGE_4K].ap = 0x0;
377
378 mmu_psize_defs[MMU_PAGE_64K].shift = 16;
379 mmu_psize_defs[MMU_PAGE_64K].ap = 0x5;
380found:
381#ifdef CONFIG_SPARSEMEM_VMEMMAP
382 if (mmu_psize_defs[MMU_PAGE_2M].shift) {
383 /*
384 * map vmemmap using 2M if available
385 */
386 mmu_vmemmap_psize = MMU_PAGE_2M;
387 }
388#endif /* CONFIG_SPARSEMEM_VMEMMAP */
389 return;
390}
391
Aneesh Kumar K.Vad410672016-08-24 15:03:39 +0530392static void update_hid_for_radix(void)
393{
394 unsigned long hid0;
395 unsigned long rb = 3UL << PPC_BITLSHIFT(53); /* IS = 3 */
396
397 asm volatile("ptesync": : :"memory");
398 /* prs = 0, ric = 2, rs = 0, r = 1 is = 3 */
399 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
400 : : "r"(rb), "i"(1), "i"(0), "i"(2), "r"(0) : "memory");
401 /* prs = 1, ric = 2, rs = 0, r = 1 is = 3 */
402 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
403 : : "r"(rb), "i"(1), "i"(1), "i"(2), "r"(0) : "memory");
404 asm volatile("eieio; tlbsync; ptesync; isync; slbia": : :"memory");
Balbir Singh04284912017-04-11 15:23:25 +1000405 trace_tlbie(0, 0, rb, 0, 2, 0, 1);
406 trace_tlbie(0, 0, rb, 0, 2, 1, 1);
407
Aneesh Kumar K.Vad410672016-08-24 15:03:39 +0530408 /*
409 * now switch the HID
410 */
411 hid0 = mfspr(SPRN_HID0);
412 hid0 |= HID0_POWER9_RADIX;
413 mtspr(SPRN_HID0, hid0);
414 asm volatile("isync": : :"memory");
415
416 /* Wait for it to happen */
417 while (!(mfspr(SPRN_HID0) & HID0_POWER9_RADIX))
418 cpu_relax();
419}
420
Balbir Singhee97b6b2016-11-15 17:56:14 +1100421static void radix_init_amor(void)
422{
423 /*
424 * In HV mode, we init AMOR (Authority Mask Override Register) so that
425 * the hypervisor and guest can setup IAMR (Instruction Authority Mask
426 * Register), enable key 0 and set it to 1.
427 *
428 * AMOR = 0b1100 .... 0000 (Mask for key 0 is 11)
429 */
430 mtspr(SPRN_AMOR, (3ul << 62));
431}
432
Balbir Singh3b10d002016-11-15 17:56:16 +1100433static void radix_init_iamr(void)
434{
435 unsigned long iamr;
436
437 /*
438 * The IAMR should set to 0 on DD1.
439 */
440 if (cpu_has_feature(CPU_FTR_POWER9_DD1))
441 iamr = 0;
442 else
443 iamr = (1ul << 62);
444
445 /*
446 * Radix always uses key0 of the IAMR to determine if an access is
447 * allowed. We set bit 0 (IBM bit 1) of key0, to prevent instruction
448 * fetch.
449 */
450 mtspr(SPRN_IAMR, iamr);
451}
452
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000453void __init radix__early_init_mmu(void)
454{
455 unsigned long lpcr;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000456
457#ifdef CONFIG_PPC_64K_PAGES
458 /* PAGE_SIZE mappings */
459 mmu_virtual_psize = MMU_PAGE_64K;
460#else
461 mmu_virtual_psize = MMU_PAGE_4K;
462#endif
463
464#ifdef CONFIG_SPARSEMEM_VMEMMAP
465 /* vmemmap mapping */
466 mmu_vmemmap_psize = mmu_virtual_psize;
467#endif
468 /*
469 * initialize page table size
470 */
471 __pte_index_size = RADIX_PTE_INDEX_SIZE;
472 __pmd_index_size = RADIX_PMD_INDEX_SIZE;
473 __pud_index_size = RADIX_PUD_INDEX_SIZE;
474 __pgd_index_size = RADIX_PGD_INDEX_SIZE;
475 __pmd_cache_index = RADIX_PMD_INDEX_SIZE;
476 __pte_table_size = RADIX_PTE_TABLE_SIZE;
477 __pmd_table_size = RADIX_PMD_TABLE_SIZE;
478 __pud_table_size = RADIX_PUD_TABLE_SIZE;
479 __pgd_table_size = RADIX_PGD_TABLE_SIZE;
480
Aneesh Kumar K.Va2f41eb2016-04-29 23:26:19 +1000481 __pmd_val_bits = RADIX_PMD_VAL_BITS;
482 __pud_val_bits = RADIX_PUD_VAL_BITS;
483 __pgd_val_bits = RADIX_PGD_VAL_BITS;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000484
Aneesh Kumar K.Vd6a99962016-04-29 23:26:21 +1000485 __kernel_virt_start = RADIX_KERN_VIRT_START;
486 __kernel_virt_size = RADIX_KERN_VIRT_SIZE;
487 __vmalloc_start = RADIX_VMALLOC_START;
488 __vmalloc_end = RADIX_VMALLOC_END;
489 vmemmap = (struct page *)RADIX_VMEMMAP_BASE;
490 ioremap_bot = IOREMAP_BASE;
Darren Stevensbfa37082016-06-29 21:06:28 +0100491
492#ifdef CONFIG_PCI
493 pci_io_base = ISA_IO_BASE;
494#endif
495
Aneesh Kumar K.V5ed7ecd2016-04-29 23:26:23 +1000496 /*
497 * For now radix also use the same frag size
498 */
499 __pte_frag_nr = H_PTE_FRAG_NR;
500 __pte_frag_size_shift = H_PTE_FRAG_SIZE_SHIFT;
Aneesh Kumar K.Vd6a99962016-04-29 23:26:21 +1000501
Aneesh Kumar K.Vd6c88602016-05-31 11:56:29 +0530502 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
Benjamin Herrenschmidt166dd7d2016-07-05 15:03:51 +1000503 radix_init_native();
Aneesh Kumar K.Vad410672016-08-24 15:03:39 +0530504 if (cpu_has_feature(CPU_FTR_POWER9_DD1))
505 update_hid_for_radix();
Aneesh Kumar K.Vd6c88602016-05-31 11:56:29 +0530506 lpcr = mfspr(SPRN_LPCR);
Aneesh Kumar K.Vbf16cdf2016-07-13 15:05:21 +0530507 mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000508 radix_init_partition_table();
Balbir Singhee97b6b2016-11-15 17:56:14 +1100509 radix_init_amor();
Paul Mackerrascc3d2942017-01-30 21:21:36 +1100510 } else {
511 radix_init_pseries();
Aneesh Kumar K.Vd6c88602016-05-31 11:56:29 +0530512 }
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000513
Paul Mackerras9d661952016-11-21 16:00:58 +1100514 memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
515
Balbir Singh3b10d002016-11-15 17:56:16 +1100516 radix_init_iamr();
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000517 radix_init_pgtable();
518}
519
520void radix__early_init_mmu_secondary(void)
521{
522 unsigned long lpcr;
523 /*
Aneesh Kumar K.Vd6c88602016-05-31 11:56:29 +0530524 * update partition table control register and UPRT
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000525 */
Aneesh Kumar K.Vd6c88602016-05-31 11:56:29 +0530526 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
Aneesh Kumar K.Vcac4a182016-11-17 15:46:23 +0530527
528 if (cpu_has_feature(CPU_FTR_POWER9_DD1))
529 update_hid_for_radix();
530
Aneesh Kumar K.Vd6c88602016-05-31 11:56:29 +0530531 lpcr = mfspr(SPRN_LPCR);
Aneesh Kumar K.Vbf16cdf2016-07-13 15:05:21 +0530532 mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
Aneesh Kumar K.Vd6c88602016-05-31 11:56:29 +0530533
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000534 mtspr(SPRN_PTCR,
535 __pa(partition_tb) | (PATB_SIZE_SHIFT - 12));
Balbir Singhee97b6b2016-11-15 17:56:14 +1100536 radix_init_amor();
Aneesh Kumar K.Vd6c88602016-05-31 11:56:29 +0530537 }
Balbir Singh3b10d002016-11-15 17:56:16 +1100538 radix_init_iamr();
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000539}
540
Benjamin Herrenschmidtfe036a02016-08-19 14:22:37 +0530541void radix__mmu_cleanup_all(void)
542{
543 unsigned long lpcr;
544
545 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
546 lpcr = mfspr(SPRN_LPCR);
547 mtspr(SPRN_LPCR, lpcr & ~LPCR_UPRT);
548 mtspr(SPRN_PTCR, 0);
Alistair Popple1d0761d2016-12-14 13:36:51 +1100549 powernv_set_nmmu_ptcr(0);
Benjamin Herrenschmidtfe036a02016-08-19 14:22:37 +0530550 radix__flush_tlb_all();
551 }
552}
553
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000554void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base,
555 phys_addr_t first_memblock_size)
556{
Aneesh Kumar K.V177ba7c2016-04-29 23:26:10 +1000557 /* We don't currently support the first MEMBLOCK not mapping 0
558 * physical on those processors
559 */
560 BUG_ON(first_memblock_base != 0);
561 /*
562 * We limit the allocation that depend on ppc64_rma_size
563 * to first_memblock_size. We also clamp it to 1GB to
564 * avoid some funky things such as RTAS bugs.
565 *
566 * On radix config we really don't have a limitation
567 * on real mode access. But keeping it as above works
568 * well enough.
569 */
570 ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000);
571 /*
572 * Finally limit subsequent allocations. We really don't want
573 * to limit the memblock allocations to rma_size. FIXME!! should
574 * we even limit at all ?
575 */
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000576 memblock_set_current_limit(first_memblock_base + first_memblock_size);
577}
Aneesh Kumar K.Vd9225ad2016-04-29 23:26:00 +1000578
Reza Arbab6cc27342017-01-16 13:07:44 -0600579#ifdef CONFIG_MEMORY_HOTPLUG
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600580static void free_pte_table(pte_t *pte_start, pmd_t *pmd)
581{
582 pte_t *pte;
583 int i;
584
585 for (i = 0; i < PTRS_PER_PTE; i++) {
586 pte = pte_start + i;
587 if (!pte_none(*pte))
588 return;
589 }
590
591 pte_free_kernel(&init_mm, pte_start);
592 pmd_clear(pmd);
593}
594
595static void free_pmd_table(pmd_t *pmd_start, pud_t *pud)
596{
597 pmd_t *pmd;
598 int i;
599
600 for (i = 0; i < PTRS_PER_PMD; i++) {
601 pmd = pmd_start + i;
602 if (!pmd_none(*pmd))
603 return;
604 }
605
606 pmd_free(&init_mm, pmd_start);
607 pud_clear(pud);
608}
609
610static void remove_pte_table(pte_t *pte_start, unsigned long addr,
611 unsigned long end)
612{
613 unsigned long next;
614 pte_t *pte;
615
616 pte = pte_start + pte_index(addr);
617 for (; addr < end; addr = next, pte++) {
618 next = (addr + PAGE_SIZE) & PAGE_MASK;
619 if (next > end)
620 next = end;
621
622 if (!pte_present(*pte))
623 continue;
624
Reza Arbab0d0a4bc2017-01-16 13:07:46 -0600625 if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(next)) {
626 /*
627 * The vmemmap_free() and remove_section_mapping()
628 * codepaths call us with aligned addresses.
629 */
630 WARN_ONCE(1, "%s: unaligned range\n", __func__);
631 continue;
632 }
633
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600634 pte_clear(&init_mm, addr, pte);
635 }
636}
637
638static void remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
639 unsigned long end)
640{
641 unsigned long next;
642 pte_t *pte_base;
643 pmd_t *pmd;
644
645 pmd = pmd_start + pmd_index(addr);
646 for (; addr < end; addr = next, pmd++) {
647 next = pmd_addr_end(addr, end);
648
649 if (!pmd_present(*pmd))
650 continue;
651
652 if (pmd_huge(*pmd)) {
Reza Arbab0d0a4bc2017-01-16 13:07:46 -0600653 if (!IS_ALIGNED(addr, PMD_SIZE) ||
654 !IS_ALIGNED(next, PMD_SIZE)) {
655 WARN_ONCE(1, "%s: unaligned range\n", __func__);
656 continue;
657 }
658
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600659 pte_clear(&init_mm, addr, (pte_t *)pmd);
660 continue;
661 }
662
663 pte_base = (pte_t *)pmd_page_vaddr(*pmd);
664 remove_pte_table(pte_base, addr, next);
665 free_pte_table(pte_base, pmd);
666 }
667}
668
669static void remove_pud_table(pud_t *pud_start, unsigned long addr,
670 unsigned long end)
671{
672 unsigned long next;
673 pmd_t *pmd_base;
674 pud_t *pud;
675
676 pud = pud_start + pud_index(addr);
677 for (; addr < end; addr = next, pud++) {
678 next = pud_addr_end(addr, end);
679
680 if (!pud_present(*pud))
681 continue;
682
683 if (pud_huge(*pud)) {
Reza Arbab0d0a4bc2017-01-16 13:07:46 -0600684 if (!IS_ALIGNED(addr, PUD_SIZE) ||
685 !IS_ALIGNED(next, PUD_SIZE)) {
686 WARN_ONCE(1, "%s: unaligned range\n", __func__);
687 continue;
688 }
689
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600690 pte_clear(&init_mm, addr, (pte_t *)pud);
691 continue;
692 }
693
694 pmd_base = (pmd_t *)pud_page_vaddr(*pud);
695 remove_pmd_table(pmd_base, addr, next);
696 free_pmd_table(pmd_base, pud);
697 }
698}
699
700static void remove_pagetable(unsigned long start, unsigned long end)
701{
702 unsigned long addr, next;
703 pud_t *pud_base;
704 pgd_t *pgd;
705
706 spin_lock(&init_mm.page_table_lock);
707
708 for (addr = start; addr < end; addr = next) {
709 next = pgd_addr_end(addr, end);
710
711 pgd = pgd_offset_k(addr);
712 if (!pgd_present(*pgd))
713 continue;
714
715 if (pgd_huge(*pgd)) {
Reza Arbab0d0a4bc2017-01-16 13:07:46 -0600716 if (!IS_ALIGNED(addr, PGDIR_SIZE) ||
717 !IS_ALIGNED(next, PGDIR_SIZE)) {
718 WARN_ONCE(1, "%s: unaligned range\n", __func__);
719 continue;
720 }
721
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600722 pte_clear(&init_mm, addr, (pte_t *)pgd);
723 continue;
724 }
725
726 pud_base = (pud_t *)pgd_page_vaddr(*pgd);
727 remove_pud_table(pud_base, addr, next);
728 }
729
730 spin_unlock(&init_mm.page_table_lock);
731 radix__flush_tlb_kernel_range(start, end);
732}
733
Reza Arbab6cc27342017-01-16 13:07:44 -0600734int __ref radix__create_section_mapping(unsigned long start, unsigned long end)
735{
736 return create_physical_mapping(start, end);
737}
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600738
739int radix__remove_section_mapping(unsigned long start, unsigned long end)
740{
741 remove_pagetable(start, end);
742 return 0;
743}
Reza Arbab6cc27342017-01-16 13:07:44 -0600744#endif /* CONFIG_MEMORY_HOTPLUG */
745
Aneesh Kumar K.Vd9225ad2016-04-29 23:26:00 +1000746#ifdef CONFIG_SPARSEMEM_VMEMMAP
747int __meminit radix__vmemmap_create_mapping(unsigned long start,
748 unsigned long page_size,
749 unsigned long phys)
750{
751 /* Create a PTE encoding */
752 unsigned long flags = _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_KERNEL_RW;
753
754 BUG_ON(radix__map_kernel_page(start, phys, __pgprot(flags), page_size));
755 return 0;
756}
757
758#ifdef CONFIG_MEMORY_HOTPLUG
759void radix__vmemmap_remove_mapping(unsigned long start, unsigned long page_size)
760{
Reza Arbab0d0a4bc2017-01-16 13:07:46 -0600761 remove_pagetable(start, start + page_size);
Aneesh Kumar K.Vd9225ad2016-04-29 23:26:00 +1000762}
763#endif
764#endif
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +1000765
766#ifdef CONFIG_TRANSPARENT_HUGEPAGE
767
768unsigned long radix__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
769 pmd_t *pmdp, unsigned long clr,
770 unsigned long set)
771{
772 unsigned long old;
773
774#ifdef CONFIG_DEBUG_VM
Oliver O'Halloranebd31192017-06-28 11:32:34 +1000775 WARN_ON(!radix__pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +1000776 assert_spin_locked(&mm->page_table_lock);
777#endif
778
779 old = radix__pte_update(mm, addr, (pte_t *)pmdp, clr, set, 1);
780 trace_hugepage_update(addr, old, clr, set);
781
782 return old;
783}
784
785pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
786 pmd_t *pmdp)
787
788{
789 pmd_t pmd;
790
791 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
792 VM_BUG_ON(radix__pmd_trans_huge(*pmdp));
Oliver O'Halloranebd31192017-06-28 11:32:34 +1000793 VM_BUG_ON(pmd_devmap(*pmdp));
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +1000794 /*
795 * khugepaged calls this for normal pmd
796 */
797 pmd = *pmdp;
798 pmd_clear(pmdp);
799 /*FIXME!! Verify whether we need this kick below */
800 kick_all_cpus_sync();
801 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
802 return pmd;
803}
804
805/*
806 * For us pgtable_t is pte_t *. Inorder to save the deposisted
807 * page table, we consider the allocated page table as a list
808 * head. On withdraw we need to make sure we zero out the used
809 * list_head memory area.
810 */
811void radix__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
812 pgtable_t pgtable)
813{
814 struct list_head *lh = (struct list_head *) pgtable;
815
816 assert_spin_locked(pmd_lockptr(mm, pmdp));
817
818 /* FIFO */
819 if (!pmd_huge_pte(mm, pmdp))
820 INIT_LIST_HEAD(lh);
821 else
822 list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
823 pmd_huge_pte(mm, pmdp) = pgtable;
824}
825
826pgtable_t radix__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
827{
828 pte_t *ptep;
829 pgtable_t pgtable;
830 struct list_head *lh;
831
832 assert_spin_locked(pmd_lockptr(mm, pmdp));
833
834 /* FIFO */
835 pgtable = pmd_huge_pte(mm, pmdp);
836 lh = (struct list_head *) pgtable;
837 if (list_empty(lh))
838 pmd_huge_pte(mm, pmdp) = NULL;
839 else {
840 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
841 list_del(lh);
842 }
843 ptep = (pte_t *) pgtable;
844 *ptep = __pte(0);
845 ptep++;
846 *ptep = __pte(0);
847 return pgtable;
848}
849
850
851pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm,
852 unsigned long addr, pmd_t *pmdp)
853{
854 pmd_t old_pmd;
855 unsigned long old;
856
857 old = radix__pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0);
858 old_pmd = __pmd(old);
859 /*
860 * Serialize against find_linux_pte_or_hugepte which does lock-less
861 * lookup in page tables with local interrupts disabled. For huge pages
862 * it casts pmd_t to pte_t. Since format of pte_t is different from
863 * pmd_t we want to prevent transit from pmd pointing to page table
864 * to pmd pointing to huge page (and back) while interrupts are disabled.
865 * We clear pmd to possibly replace it with page table pointer in
866 * different code paths. So make sure we wait for the parallel
867 * find_linux_pte_or_hugepage to finish.
868 */
869 kick_all_cpus_sync();
870 return old_pmd;
871}
872
873int radix__has_transparent_hugepage(void)
874{
875 /* For radix 2M at PMD level means thp */
876 if (mmu_psize_defs[MMU_PAGE_2M].shift == PMD_SHIFT)
877 return 1;
878 return 0;
879}
880#endif /* CONFIG_TRANSPARENT_HUGEPAGE */