blob: 96f68c5aa1f5ba7fb2dca23b8c1993f7018e00dd [file] [log] [blame]
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +10001/*
2 * Page table handling routines for radix page table.
3 *
4 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
Michael Ellermanbd350f72017-08-30 17:41:29 +100011
12#define pr_fmt(fmt) "radix-mmu: " fmt
13
14#include <linux/kernel.h>
Ingo Molnar589ee622017-02-04 00:16:44 +010015#include <linux/sched/mm.h>
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100016#include <linux/memblock.h>
17#include <linux/of_fdt.h>
Balbir Singh7614ff32017-06-29 03:04:09 +100018#include <linux/mm.h>
Michael Ellerman6deb6b42017-08-30 17:41:17 +100019#include <linux/string_helpers.h>
Balbir Singh4dd5f8a92018-02-07 17:35:51 +110020#include <linux/stop_machine.h>
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100021
22#include <asm/pgtable.h>
23#include <asm/pgalloc.h>
Nicholas Piggineeb715c2018-02-07 11:20:02 +100024#include <asm/mmu_context.h>
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100025#include <asm/dma.h>
26#include <asm/machdep.h>
27#include <asm/mmu.h>
28#include <asm/firmware.h>
Alistair Popple1d0761d2016-12-14 13:36:51 +110029#include <asm/powernv.h>
Michael Ellerman9abcc982017-06-06 15:48:57 +100030#include <asm/sections.h>
Balbir Singh04284912017-04-11 15:23:25 +100031#include <asm/trace.h>
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100032
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +100033#include <trace/events/thp.h>
34
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +100035unsigned int mmu_pid_bits;
36unsigned int mmu_base_pid;
37
Aneesh Kumar K.V83209bc2016-07-13 15:05:28 +053038static int native_register_process_table(unsigned long base, unsigned long pg_sz,
39 unsigned long table_size)
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100040{
Suraj Jitindar Singh7cd2a862017-08-03 14:15:51 +100041 unsigned long patb0, patb1;
Aneesh Kumar K.V83209bc2016-07-13 15:05:28 +053042
Suraj Jitindar Singh7cd2a862017-08-03 14:15:51 +100043 patb0 = be64_to_cpu(partition_tb[0].patb0);
44 patb1 = base | table_size | PATB_GR;
45
46 mmu_partition_table_set_entry(0, patb0, patb1);
47
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100048 return 0;
49}
50
Nicholas Piggin2ad452f2018-02-14 01:08:24 +100051static __ref void *early_alloc_pgtable(unsigned long size, int nid,
52 unsigned long region_start, unsigned long region_end)
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100053{
Nicholas Piggin2ad452f2018-02-14 01:08:24 +100054 unsigned long pa = 0;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100055 void *pt;
56
Nicholas Piggin2ad452f2018-02-14 01:08:24 +100057 if (region_start || region_end) /* has region hint */
58 pa = memblock_alloc_range(size, size, region_start, region_end,
59 MEMBLOCK_NONE);
60 else if (nid != -1) /* has node hint */
61 pa = memblock_alloc_base_nid(size, size,
62 MEMBLOCK_ALLOC_ANYWHERE,
63 nid, MEMBLOCK_NONE);
64
65 if (!pa)
66 pa = memblock_alloc_base(size, size, MEMBLOCK_ALLOC_ANYWHERE);
67
68 BUG_ON(!pa);
69
70 pt = __va(pa);
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100071 memset(pt, 0, size);
72
73 return pt;
74}
75
Nicholas Piggin0633daf2018-02-14 01:08:23 +100076static int early_map_kernel_page(unsigned long ea, unsigned long pa,
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100077 pgprot_t flags,
Nicholas Piggin2ad452f2018-02-14 01:08:24 +100078 unsigned int map_page_size,
79 int nid,
80 unsigned long region_start, unsigned long region_end)
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100081{
Nicholas Piggin2ad452f2018-02-14 01:08:24 +100082 unsigned long pfn = pa >> PAGE_SHIFT;
Nicholas Piggin0633daf2018-02-14 01:08:23 +100083 pgd_t *pgdp;
84 pud_t *pudp;
85 pmd_t *pmdp;
86 pte_t *ptep;
87
88 pgdp = pgd_offset_k(ea);
89 if (pgd_none(*pgdp)) {
Nicholas Piggin2ad452f2018-02-14 01:08:24 +100090 pudp = early_alloc_pgtable(PUD_TABLE_SIZE, nid,
91 region_start, region_end);
Nicholas Piggin0633daf2018-02-14 01:08:23 +100092 pgd_populate(&init_mm, pgdp, pudp);
93 }
94 pudp = pud_offset(pgdp, ea);
95 if (map_page_size == PUD_SIZE) {
96 ptep = (pte_t *)pudp;
97 goto set_the_pte;
98 }
99 if (pud_none(*pudp)) {
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000100 pmdp = early_alloc_pgtable(PMD_TABLE_SIZE, nid,
101 region_start, region_end);
Nicholas Piggin0633daf2018-02-14 01:08:23 +1000102 pud_populate(&init_mm, pudp, pmdp);
103 }
104 pmdp = pmd_offset(pudp, ea);
105 if (map_page_size == PMD_SIZE) {
106 ptep = pmdp_ptep(pmdp);
107 goto set_the_pte;
108 }
109 if (!pmd_present(*pmdp)) {
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000110 ptep = early_alloc_pgtable(PAGE_SIZE, nid,
111 region_start, region_end);
Nicholas Piggin0633daf2018-02-14 01:08:23 +1000112 pmd_populate_kernel(&init_mm, pmdp, ptep);
113 }
114 ptep = pte_offset_kernel(pmdp, ea);
115
116set_the_pte:
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000117 set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags));
Nicholas Piggin0633daf2018-02-14 01:08:23 +1000118 smp_wmb();
119 return 0;
120}
121
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000122/*
123 * nid, region_start, and region_end are hints to try to place the page
124 * table memory in the same node or region.
125 */
126static int __map_kernel_page(unsigned long ea, unsigned long pa,
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000127 pgprot_t flags,
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000128 unsigned int map_page_size,
129 int nid,
130 unsigned long region_start, unsigned long region_end)
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000131{
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000132 unsigned long pfn = pa >> PAGE_SHIFT;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000133 pgd_t *pgdp;
134 pud_t *pudp;
135 pmd_t *pmdp;
136 pte_t *ptep;
137 /*
138 * Make sure task size is correct as per the max adddr
139 */
140 BUILD_BUG_ON(TASK_SIZE_USER64 > RADIX_PGTABLE_RANGE);
Nicholas Piggin0633daf2018-02-14 01:08:23 +1000141
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000142 if (unlikely(!slab_is_available()))
143 return early_map_kernel_page(ea, pa, flags, map_page_size,
144 nid, region_start, region_end);
Nicholas Piggin0633daf2018-02-14 01:08:23 +1000145
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000146 /*
147 * Should make page table allocation functions be able to take a
148 * node, so we can place kernel page tables on the right nodes after
149 * boot.
150 */
Nicholas Piggin0633daf2018-02-14 01:08:23 +1000151 pgdp = pgd_offset_k(ea);
152 pudp = pud_alloc(&init_mm, pgdp, ea);
153 if (!pudp)
154 return -ENOMEM;
155 if (map_page_size == PUD_SIZE) {
156 ptep = (pte_t *)pudp;
157 goto set_the_pte;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000158 }
Nicholas Piggin0633daf2018-02-14 01:08:23 +1000159 pmdp = pmd_alloc(&init_mm, pudp, ea);
160 if (!pmdp)
161 return -ENOMEM;
162 if (map_page_size == PMD_SIZE) {
163 ptep = pmdp_ptep(pmdp);
164 goto set_the_pte;
165 }
166 ptep = pte_alloc_kernel(pmdp, ea);
167 if (!ptep)
168 return -ENOMEM;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000169
170set_the_pte:
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000171 set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags));
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000172 smp_wmb();
173 return 0;
174}
175
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000176int radix__map_kernel_page(unsigned long ea, unsigned long pa,
177 pgprot_t flags,
178 unsigned int map_page_size)
179{
180 return __map_kernel_page(ea, pa, flags, map_page_size, -1, 0, 0);
181}
182
Balbir Singh7614ff32017-06-29 03:04:09 +1000183#ifdef CONFIG_STRICT_KERNEL_RWX
Michael Ellermanb134bd92017-07-14 16:51:21 +1000184void radix__change_memory_range(unsigned long start, unsigned long end,
185 unsigned long clear)
Balbir Singh7614ff32017-06-29 03:04:09 +1000186{
Balbir Singh7614ff32017-06-29 03:04:09 +1000187 unsigned long idx;
188 pgd_t *pgdp;
189 pud_t *pudp;
190 pmd_t *pmdp;
191 pte_t *ptep;
192
193 start = ALIGN_DOWN(start, PAGE_SIZE);
194 end = PAGE_ALIGN(end); // aligns up
195
Michael Ellermanb134bd92017-07-14 16:51:21 +1000196 pr_debug("Changing flags on range %lx-%lx removing 0x%lx\n",
197 start, end, clear);
Balbir Singh7614ff32017-06-29 03:04:09 +1000198
199 for (idx = start; idx < end; idx += PAGE_SIZE) {
200 pgdp = pgd_offset_k(idx);
201 pudp = pud_alloc(&init_mm, pgdp, idx);
202 if (!pudp)
203 continue;
204 if (pud_huge(*pudp)) {
205 ptep = (pte_t *)pudp;
206 goto update_the_pte;
207 }
208 pmdp = pmd_alloc(&init_mm, pudp, idx);
209 if (!pmdp)
210 continue;
211 if (pmd_huge(*pmdp)) {
212 ptep = pmdp_ptep(pmdp);
213 goto update_the_pte;
214 }
215 ptep = pte_alloc_kernel(pmdp, idx);
216 if (!ptep)
217 continue;
218update_the_pte:
Michael Ellermanb134bd92017-07-14 16:51:21 +1000219 radix__pte_update(&init_mm, idx, ptep, clear, 0, 0);
Balbir Singh7614ff32017-06-29 03:04:09 +1000220 }
221
222 radix__flush_tlb_kernel_range(start, end);
223}
Michael Ellermanb134bd92017-07-14 16:51:21 +1000224
225void radix__mark_rodata_ro(void)
226{
227 unsigned long start, end;
228
Balbir Singhf79ad502017-10-16 16:21:35 +1100229 /*
230 * mark_rodata_ro() will mark itself as !writable at some point.
231 * Due to DD1 workaround in radix__pte_update(), we'll end up with
232 * an invalid pte and the system will crash quite severly.
233 */
234 if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
235 pr_warn("Warning: Unable to mark rodata read only on P9 DD1\n");
236 return;
237 }
238
Michael Ellermanb134bd92017-07-14 16:51:21 +1000239 start = (unsigned long)_stext;
240 end = (unsigned long)__init_begin;
241
242 radix__change_memory_range(start, end, _PAGE_WRITE);
243}
Michael Ellerman029d9252017-07-14 16:51:23 +1000244
245void radix__mark_initmem_nx(void)
246{
247 unsigned long start = (unsigned long)__init_begin;
248 unsigned long end = (unsigned long)__init_end;
249
250 radix__change_memory_range(start, end, _PAGE_EXEC);
251}
Balbir Singh7614ff32017-06-29 03:04:09 +1000252#endif /* CONFIG_STRICT_KERNEL_RWX */
253
Reza Arbabb5200ec2017-01-16 13:07:43 -0600254static inline void __meminit print_mapping(unsigned long start,
255 unsigned long end,
256 unsigned long size)
257{
Michael Ellerman6deb6b42017-08-30 17:41:17 +1000258 char buf[10];
259
Reza Arbabb5200ec2017-01-16 13:07:43 -0600260 if (end <= start)
261 return;
262
Michael Ellerman6deb6b42017-08-30 17:41:17 +1000263 string_get_size(size, 1, STRING_UNITS_2, buf, sizeof(buf));
264
265 pr_info("Mapped 0x%016lx-0x%016lx with %s pages\n", start, end, buf);
Reza Arbabb5200ec2017-01-16 13:07:43 -0600266}
267
268static int __meminit create_physical_mapping(unsigned long start,
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000269 unsigned long end,
270 int nid)
Reza Arbabb5200ec2017-01-16 13:07:43 -0600271{
Michael Ellerman9abcc982017-06-06 15:48:57 +1000272 unsigned long vaddr, addr, mapping_size = 0;
273 pgprot_t prot;
Balbir Singh7614ff32017-06-29 03:04:09 +1000274 unsigned long max_mapping_size;
275#ifdef CONFIG_STRICT_KERNEL_RWX
276 int split_text_mapping = 1;
277#else
278 int split_text_mapping = 0;
279#endif
Reza Arbabb5200ec2017-01-16 13:07:43 -0600280
281 start = _ALIGN_UP(start, PAGE_SIZE);
282 for (addr = start; addr < end; addr += mapping_size) {
283 unsigned long gap, previous_size;
284 int rc;
285
286 gap = end - addr;
287 previous_size = mapping_size;
Balbir Singh7614ff32017-06-29 03:04:09 +1000288 max_mapping_size = PUD_SIZE;
Reza Arbabb5200ec2017-01-16 13:07:43 -0600289
Balbir Singh7614ff32017-06-29 03:04:09 +1000290retry:
Reza Arbabb5200ec2017-01-16 13:07:43 -0600291 if (IS_ALIGNED(addr, PUD_SIZE) && gap >= PUD_SIZE &&
Balbir Singh7614ff32017-06-29 03:04:09 +1000292 mmu_psize_defs[MMU_PAGE_1G].shift &&
293 PUD_SIZE <= max_mapping_size)
Reza Arbabb5200ec2017-01-16 13:07:43 -0600294 mapping_size = PUD_SIZE;
295 else if (IS_ALIGNED(addr, PMD_SIZE) && gap >= PMD_SIZE &&
296 mmu_psize_defs[MMU_PAGE_2M].shift)
297 mapping_size = PMD_SIZE;
298 else
299 mapping_size = PAGE_SIZE;
300
Balbir Singh7614ff32017-06-29 03:04:09 +1000301 if (split_text_mapping && (mapping_size == PUD_SIZE) &&
302 (addr <= __pa_symbol(__init_begin)) &&
303 (addr + mapping_size) >= __pa_symbol(_stext)) {
304 max_mapping_size = PMD_SIZE;
305 goto retry;
306 }
307
308 if (split_text_mapping && (mapping_size == PMD_SIZE) &&
309 (addr <= __pa_symbol(__init_begin)) &&
310 (addr + mapping_size) >= __pa_symbol(_stext))
311 mapping_size = PAGE_SIZE;
312
Reza Arbabb5200ec2017-01-16 13:07:43 -0600313 if (mapping_size != previous_size) {
314 print_mapping(start, addr, previous_size);
315 start = addr;
316 }
317
Michael Ellerman9abcc982017-06-06 15:48:57 +1000318 vaddr = (unsigned long)__va(addr);
319
Balbir Singh7f6d4982017-06-29 03:04:10 +1000320 if (overlaps_kernel_text(vaddr, vaddr + mapping_size) ||
321 overlaps_interrupt_vector_text(vaddr, vaddr + mapping_size))
Michael Ellerman9abcc982017-06-06 15:48:57 +1000322 prot = PAGE_KERNEL_X;
323 else
324 prot = PAGE_KERNEL;
325
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000326 rc = __map_kernel_page(vaddr, addr, prot, mapping_size, nid, start, end);
Reza Arbabb5200ec2017-01-16 13:07:43 -0600327 if (rc)
328 return rc;
329 }
330
331 print_mapping(start, addr, mapping_size);
332 return 0;
333}
334
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000335void __init radix_init_pgtable(void)
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000336{
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000337 unsigned long rts_field;
338 struct memblock_region *reg;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000339
340 /* We don't support slb for radix */
341 mmu_slb_size = 0;
342 /*
343 * Create the linear mapping, using standard page size for now
344 */
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000345 for_each_memblock(memory, reg) {
346 /*
347 * The memblock allocator is up at this point, so the
348 * page tables will be allocated within the range. No
349 * need or a node (which we don't have yet).
350 */
Reza Arbabb5200ec2017-01-16 13:07:43 -0600351 WARN_ON(create_physical_mapping(reg->base,
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000352 reg->base + reg->size,
353 -1));
354 }
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +1000355
356 /* Find out how many PID bits are supported */
357 if (cpu_has_feature(CPU_FTR_HVMODE)) {
358 if (!mmu_pid_bits)
359 mmu_pid_bits = 20;
360#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
361 /*
362 * When KVM is possible, we only use the top half of the
363 * PID space to avoid collisions between host and guest PIDs
364 * which can cause problems due to prefetch when exiting the
365 * guest with AIL=3
366 */
367 mmu_base_pid = 1 << (mmu_pid_bits - 1);
368#else
369 mmu_base_pid = 1;
370#endif
371 } else {
372 /* The guest uses the bottom half of the PID space */
373 if (!mmu_pid_bits)
374 mmu_pid_bits = 19;
375 mmu_base_pid = 1;
376 }
377
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000378 /*
379 * Allocate Partition table and process table for the
380 * host.
381 */
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +1000382 BUG_ON(PRTB_SIZE_SHIFT > 36);
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000383 process_tb = early_alloc_pgtable(1UL << PRTB_SIZE_SHIFT, -1, 0, 0);
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000384 /*
385 * Fill in the process table.
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000386 */
Aneesh Kumar K.Vb23d9c52016-06-17 11:40:36 +0530387 rts_field = radix__get_tree_size();
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000388 process_tb->prtb0 = cpu_to_be64(rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE);
389 /*
390 * Fill in the partition table. We are suppose to use effective address
391 * of process table here. But our linear mapping also enable us to use
392 * physical address here.
393 */
Michael Ellermaneea81482016-08-04 15:32:06 +1000394 register_process_table(__pa(process_tb), 0, PRTB_SIZE_SHIFT - 12);
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000395 pr_info("Process table %p and radix root for kernel: %p\n", process_tb, init_mm.pgd);
Paul Mackerras7a70d722017-02-27 14:32:41 +1100396 asm volatile("ptesync" : : : "memory");
397 asm volatile(PPC_TLBIE_5(%0,%1,2,1,1) : :
398 "r" (TLBIEL_INVAL_SET_LPID), "r" (0));
399 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
Balbir Singh04284912017-04-11 15:23:25 +1000400 trace_tlbie(0, 0, TLBIEL_INVAL_SET_LPID, 0, 2, 1, 1);
Nicholas Piggineeb715c2018-02-07 11:20:02 +1000401
402 /*
403 * The init_mm context is given the first available (non-zero) PID,
404 * which is the "guard PID" and contains no page table. PIDR should
405 * never be set to zero because that duplicates the kernel address
406 * space at the 0x0... offset (quadrant 0)!
407 *
408 * An arbitrary PID that may later be allocated by the PID allocator
409 * for userspace processes must not be used either, because that
410 * would cause stale user mappings for that PID on CPUs outside of
411 * the TLB invalidation scheme (because it won't be in mm_cpumask).
412 *
413 * So permanently carve out one PID for the purpose of a guard PID.
414 */
415 init_mm.context.id = mmu_base_pid;
416 mmu_base_pid++;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000417}
418
419static void __init radix_init_partition_table(void)
420{
Paul Mackerras9d661952016-11-21 16:00:58 +1100421 unsigned long rts_field, dw0;
Aneesh Kumar K.Vb23d9c52016-06-17 11:40:36 +0530422
Paul Mackerras9d661952016-11-21 16:00:58 +1100423 mmu_partition_table_init();
Aneesh Kumar K.Vb23d9c52016-06-17 11:40:36 +0530424 rts_field = radix__get_tree_size();
Paul Mackerras9d661952016-11-21 16:00:58 +1100425 dw0 = rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE | PATB_HR;
426 mmu_partition_table_set_entry(0, dw0, 0);
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000427
Aneesh Kumar K.V56547412016-07-13 15:05:25 +0530428 pr_info("Initializing Radix MMU\n");
429 pr_info("Partition table %p\n", partition_tb);
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000430}
431
432void __init radix_init_native(void)
433{
Michael Ellermaneea81482016-08-04 15:32:06 +1000434 register_process_table = native_register_process_table;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000435}
436
437static int __init get_idx_from_shift(unsigned int shift)
438{
439 int idx = -1;
440
441 switch (shift) {
442 case 0xc:
443 idx = MMU_PAGE_4K;
444 break;
445 case 0x10:
446 idx = MMU_PAGE_64K;
447 break;
448 case 0x15:
449 idx = MMU_PAGE_2M;
450 break;
451 case 0x1e:
452 idx = MMU_PAGE_1G;
453 break;
454 }
455 return idx;
456}
457
458static int __init radix_dt_scan_page_sizes(unsigned long node,
459 const char *uname, int depth,
460 void *data)
461{
462 int size = 0;
463 int shift, idx;
464 unsigned int ap;
465 const __be32 *prop;
466 const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
467
468 /* We are scanning "cpu" nodes only */
469 if (type == NULL || strcmp(type, "cpu") != 0)
470 return 0;
471
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +1000472 /* Find MMU PID size */
473 prop = of_get_flat_dt_prop(node, "ibm,mmu-pid-bits", &size);
474 if (prop && size == 4)
475 mmu_pid_bits = be32_to_cpup(prop);
476
477 /* Grab page size encodings */
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000478 prop = of_get_flat_dt_prop(node, "ibm,processor-radix-AP-encodings", &size);
479 if (!prop)
480 return 0;
481
482 pr_info("Page sizes from device-tree:\n");
483 for (; size >= 4; size -= 4, ++prop) {
484
485 struct mmu_psize_def *def;
486
487 /* top 3 bit is AP encoding */
488 shift = be32_to_cpu(prop[0]) & ~(0xe << 28);
489 ap = be32_to_cpu(prop[0]) >> 29;
Balbir Singhac8d3812016-11-05 15:24:22 +1100490 pr_info("Page size shift = %d AP=0x%x\n", shift, ap);
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000491
492 idx = get_idx_from_shift(shift);
493 if (idx < 0)
494 continue;
495
496 def = &mmu_psize_defs[idx];
497 def->shift = shift;
498 def->ap = ap;
499 }
500
501 /* needed ? */
502 cur_cpu_spec->mmu_features &= ~MMU_FTR_NO_SLBIE_B;
503 return 1;
504}
505
Michael Ellerman2537b092016-07-26 21:55:27 +1000506void __init radix__early_init_devtree(void)
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000507{
508 int rc;
509
510 /*
511 * Try to find the available page sizes in the device-tree
512 */
513 rc = of_scan_flat_dt(radix_dt_scan_page_sizes, NULL);
514 if (rc != 0) /* Found */
515 goto found;
516 /*
517 * let's assume we have page 4k and 64k support
518 */
519 mmu_psize_defs[MMU_PAGE_4K].shift = 12;
520 mmu_psize_defs[MMU_PAGE_4K].ap = 0x0;
521
522 mmu_psize_defs[MMU_PAGE_64K].shift = 16;
523 mmu_psize_defs[MMU_PAGE_64K].ap = 0x5;
524found:
525#ifdef CONFIG_SPARSEMEM_VMEMMAP
526 if (mmu_psize_defs[MMU_PAGE_2M].shift) {
527 /*
528 * map vmemmap using 2M if available
529 */
530 mmu_vmemmap_psize = MMU_PAGE_2M;
531 }
532#endif /* CONFIG_SPARSEMEM_VMEMMAP */
533 return;
534}
535
Aneesh Kumar K.Vad410672016-08-24 15:03:39 +0530536static void update_hid_for_radix(void)
537{
538 unsigned long hid0;
539 unsigned long rb = 3UL << PPC_BITLSHIFT(53); /* IS = 3 */
540
541 asm volatile("ptesync": : :"memory");
542 /* prs = 0, ric = 2, rs = 0, r = 1 is = 3 */
543 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
544 : : "r"(rb), "i"(1), "i"(0), "i"(2), "r"(0) : "memory");
545 /* prs = 1, ric = 2, rs = 0, r = 1 is = 3 */
546 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
547 : : "r"(rb), "i"(1), "i"(1), "i"(2), "r"(0) : "memory");
548 asm volatile("eieio; tlbsync; ptesync; isync; slbia": : :"memory");
Balbir Singh04284912017-04-11 15:23:25 +1000549 trace_tlbie(0, 0, rb, 0, 2, 0, 1);
550 trace_tlbie(0, 0, rb, 0, 2, 1, 1);
551
Aneesh Kumar K.Vad410672016-08-24 15:03:39 +0530552 /*
553 * now switch the HID
554 */
555 hid0 = mfspr(SPRN_HID0);
556 hid0 |= HID0_POWER9_RADIX;
557 mtspr(SPRN_HID0, hid0);
558 asm volatile("isync": : :"memory");
559
560 /* Wait for it to happen */
561 while (!(mfspr(SPRN_HID0) & HID0_POWER9_RADIX))
562 cpu_relax();
563}
564
Balbir Singhee97b6b2016-11-15 17:56:14 +1100565static void radix_init_amor(void)
566{
567 /*
568 * In HV mode, we init AMOR (Authority Mask Override Register) so that
569 * the hypervisor and guest can setup IAMR (Instruction Authority Mask
570 * Register), enable key 0 and set it to 1.
571 *
572 * AMOR = 0b1100 .... 0000 (Mask for key 0 is 11)
573 */
574 mtspr(SPRN_AMOR, (3ul << 62));
575}
576
Balbir Singh3b10d002016-11-15 17:56:16 +1100577static void radix_init_iamr(void)
578{
579 unsigned long iamr;
580
581 /*
582 * The IAMR should set to 0 on DD1.
583 */
584 if (cpu_has_feature(CPU_FTR_POWER9_DD1))
585 iamr = 0;
586 else
587 iamr = (1ul << 62);
588
589 /*
590 * Radix always uses key0 of the IAMR to determine if an access is
591 * allowed. We set bit 0 (IBM bit 1) of key0, to prevent instruction
592 * fetch.
593 */
594 mtspr(SPRN_IAMR, iamr);
595}
596
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000597void __init radix__early_init_mmu(void)
598{
599 unsigned long lpcr;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000600
601#ifdef CONFIG_PPC_64K_PAGES
602 /* PAGE_SIZE mappings */
603 mmu_virtual_psize = MMU_PAGE_64K;
604#else
605 mmu_virtual_psize = MMU_PAGE_4K;
606#endif
607
608#ifdef CONFIG_SPARSEMEM_VMEMMAP
609 /* vmemmap mapping */
610 mmu_vmemmap_psize = mmu_virtual_psize;
611#endif
612 /*
613 * initialize page table size
614 */
615 __pte_index_size = RADIX_PTE_INDEX_SIZE;
616 __pmd_index_size = RADIX_PMD_INDEX_SIZE;
617 __pud_index_size = RADIX_PUD_INDEX_SIZE;
618 __pgd_index_size = RADIX_PGD_INDEX_SIZE;
Aneesh Kumar K.Vfae22112018-02-11 20:30:06 +0530619 __pud_cache_index = RADIX_PUD_INDEX_SIZE;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000620 __pte_table_size = RADIX_PTE_TABLE_SIZE;
621 __pmd_table_size = RADIX_PMD_TABLE_SIZE;
622 __pud_table_size = RADIX_PUD_TABLE_SIZE;
623 __pgd_table_size = RADIX_PGD_TABLE_SIZE;
624
Aneesh Kumar K.Va2f41eb2016-04-29 23:26:19 +1000625 __pmd_val_bits = RADIX_PMD_VAL_BITS;
626 __pud_val_bits = RADIX_PUD_VAL_BITS;
627 __pgd_val_bits = RADIX_PGD_VAL_BITS;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000628
Aneesh Kumar K.Vd6a99962016-04-29 23:26:21 +1000629 __kernel_virt_start = RADIX_KERN_VIRT_START;
630 __kernel_virt_size = RADIX_KERN_VIRT_SIZE;
631 __vmalloc_start = RADIX_VMALLOC_START;
632 __vmalloc_end = RADIX_VMALLOC_END;
Michael Ellerman63ee9b22017-08-01 20:29:22 +1000633 __kernel_io_start = RADIX_KERN_IO_START;
Aneesh Kumar K.Vd6a99962016-04-29 23:26:21 +1000634 vmemmap = (struct page *)RADIX_VMEMMAP_BASE;
635 ioremap_bot = IOREMAP_BASE;
Darren Stevensbfa37082016-06-29 21:06:28 +0100636
637#ifdef CONFIG_PCI
638 pci_io_base = ISA_IO_BASE;
639#endif
Aneesh Kumar K.Vfb4e5db2018-03-22 14:13:50 +0530640 __pte_frag_nr = RADIX_PTE_FRAG_NR;
641 __pte_frag_size_shift = RADIX_PTE_FRAG_SIZE_SHIFT;
Aneesh Kumar K.V8a6c6972018-04-16 16:57:22 +0530642 __pmd_frag_nr = RADIX_PMD_FRAG_NR;
643 __pmd_frag_size_shift = RADIX_PMD_FRAG_SIZE_SHIFT;
Aneesh Kumar K.Vd6a99962016-04-29 23:26:21 +1000644
Aneesh Kumar K.Vd6c88602016-05-31 11:56:29 +0530645 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
Benjamin Herrenschmidt166dd7d2016-07-05 15:03:51 +1000646 radix_init_native();
Aneesh Kumar K.Vad410672016-08-24 15:03:39 +0530647 if (cpu_has_feature(CPU_FTR_POWER9_DD1))
648 update_hid_for_radix();
Aneesh Kumar K.Vd6c88602016-05-31 11:56:29 +0530649 lpcr = mfspr(SPRN_LPCR);
Aneesh Kumar K.Vbf16cdf2016-07-13 15:05:21 +0530650 mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000651 radix_init_partition_table();
Balbir Singhee97b6b2016-11-15 17:56:14 +1100652 radix_init_amor();
Paul Mackerrascc3d2942017-01-30 21:21:36 +1100653 } else {
654 radix_init_pseries();
Aneesh Kumar K.Vd6c88602016-05-31 11:56:29 +0530655 }
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000656
Paul Mackerras9d661952016-11-21 16:00:58 +1100657 memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
658
Balbir Singh3b10d002016-11-15 17:56:16 +1100659 radix_init_iamr();
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000660 radix_init_pgtable();
Nicholas Piggineeb715c2018-02-07 11:20:02 +1000661 /* Switch to the guard PID before turning on MMU */
662 radix__switch_mmu_context(NULL, &init_mm);
Nicholas Piggind4748272017-12-24 01:15:50 +1000663 if (cpu_has_feature(CPU_FTR_HVMODE))
664 tlbiel_all();
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000665}
666
667void radix__early_init_mmu_secondary(void)
668{
669 unsigned long lpcr;
670 /*
Aneesh Kumar K.Vd6c88602016-05-31 11:56:29 +0530671 * update partition table control register and UPRT
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000672 */
Aneesh Kumar K.Vd6c88602016-05-31 11:56:29 +0530673 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
Aneesh Kumar K.Vcac4a182016-11-17 15:46:23 +0530674
675 if (cpu_has_feature(CPU_FTR_POWER9_DD1))
676 update_hid_for_radix();
677
Aneesh Kumar K.Vd6c88602016-05-31 11:56:29 +0530678 lpcr = mfspr(SPRN_LPCR);
Aneesh Kumar K.Vbf16cdf2016-07-13 15:05:21 +0530679 mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
Aneesh Kumar K.Vd6c88602016-05-31 11:56:29 +0530680
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000681 mtspr(SPRN_PTCR,
682 __pa(partition_tb) | (PATB_SIZE_SHIFT - 12));
Balbir Singhee97b6b2016-11-15 17:56:14 +1100683 radix_init_amor();
Aneesh Kumar K.Vd6c88602016-05-31 11:56:29 +0530684 }
Balbir Singh3b10d002016-11-15 17:56:16 +1100685 radix_init_iamr();
Nicholas Piggind4748272017-12-24 01:15:50 +1000686
Nicholas Piggineeb715c2018-02-07 11:20:02 +1000687 radix__switch_mmu_context(NULL, &init_mm);
Nicholas Piggind4748272017-12-24 01:15:50 +1000688 if (cpu_has_feature(CPU_FTR_HVMODE))
689 tlbiel_all();
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000690}
691
Benjamin Herrenschmidtfe036a02016-08-19 14:22:37 +0530692void radix__mmu_cleanup_all(void)
693{
694 unsigned long lpcr;
695
696 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
697 lpcr = mfspr(SPRN_LPCR);
698 mtspr(SPRN_LPCR, lpcr & ~LPCR_UPRT);
699 mtspr(SPRN_PTCR, 0);
Alistair Popple1d0761d2016-12-14 13:36:51 +1100700 powernv_set_nmmu_ptcr(0);
Benjamin Herrenschmidtfe036a02016-08-19 14:22:37 +0530701 radix__flush_tlb_all();
702 }
703}
704
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000705void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base,
706 phys_addr_t first_memblock_size)
707{
Aneesh Kumar K.V177ba7c2016-04-29 23:26:10 +1000708 /* We don't currently support the first MEMBLOCK not mapping 0
709 * physical on those processors
710 */
711 BUG_ON(first_memblock_base != 0);
Nicholas Piggin1513c332017-12-22 21:17:08 +1000712
Nicholas Piggin5eae82c2017-12-22 21:17:11 +1000713 /*
714 * Radix mode is not limited by RMA / VRMA addressing.
715 */
716 ppc64_rma_size = ULONG_MAX;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000717}
Aneesh Kumar K.Vd9225ad2016-04-29 23:26:00 +1000718
Reza Arbab6cc27342017-01-16 13:07:44 -0600719#ifdef CONFIG_MEMORY_HOTPLUG
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600720static void free_pte_table(pte_t *pte_start, pmd_t *pmd)
721{
722 pte_t *pte;
723 int i;
724
725 for (i = 0; i < PTRS_PER_PTE; i++) {
726 pte = pte_start + i;
727 if (!pte_none(*pte))
728 return;
729 }
730
731 pte_free_kernel(&init_mm, pte_start);
732 pmd_clear(pmd);
733}
734
735static void free_pmd_table(pmd_t *pmd_start, pud_t *pud)
736{
737 pmd_t *pmd;
738 int i;
739
740 for (i = 0; i < PTRS_PER_PMD; i++) {
741 pmd = pmd_start + i;
742 if (!pmd_none(*pmd))
743 return;
744 }
745
746 pmd_free(&init_mm, pmd_start);
747 pud_clear(pud);
748}
749
Balbir Singh4dd5f8a92018-02-07 17:35:51 +1100750struct change_mapping_params {
751 pte_t *pte;
752 unsigned long start;
753 unsigned long end;
754 unsigned long aligned_start;
755 unsigned long aligned_end;
756};
757
Mauricio Faria de Oliveirabde709a2018-03-09 17:45:58 -0300758static int __meminit stop_machine_change_mapping(void *data)
Balbir Singh4dd5f8a92018-02-07 17:35:51 +1100759{
760 struct change_mapping_params *params =
761 (struct change_mapping_params *)data;
762
763 if (!data)
764 return -1;
765
766 spin_unlock(&init_mm.page_table_lock);
767 pte_clear(&init_mm, params->aligned_start, params->pte);
Michael Ellermanf437c512018-03-31 00:11:24 +1100768 create_physical_mapping(params->aligned_start, params->start, -1);
769 create_physical_mapping(params->end, params->aligned_end, -1);
Balbir Singh4dd5f8a92018-02-07 17:35:51 +1100770 spin_lock(&init_mm.page_table_lock);
771 return 0;
772}
773
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600774static void remove_pte_table(pte_t *pte_start, unsigned long addr,
775 unsigned long end)
776{
777 unsigned long next;
778 pte_t *pte;
779
780 pte = pte_start + pte_index(addr);
781 for (; addr < end; addr = next, pte++) {
782 next = (addr + PAGE_SIZE) & PAGE_MASK;
783 if (next > end)
784 next = end;
785
786 if (!pte_present(*pte))
787 continue;
788
Reza Arbab0d0a4bc2017-01-16 13:07:46 -0600789 if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(next)) {
790 /*
791 * The vmemmap_free() and remove_section_mapping()
792 * codepaths call us with aligned addresses.
793 */
794 WARN_ONCE(1, "%s: unaligned range\n", __func__);
795 continue;
796 }
797
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600798 pte_clear(&init_mm, addr, pte);
799 }
800}
801
Balbir Singh4dd5f8a92018-02-07 17:35:51 +1100802/*
803 * clear the pte and potentially split the mapping helper
804 */
Mauricio Faria de Oliveirabde709a2018-03-09 17:45:58 -0300805static void __meminit split_kernel_mapping(unsigned long addr, unsigned long end,
Balbir Singh4dd5f8a92018-02-07 17:35:51 +1100806 unsigned long size, pte_t *pte)
807{
808 unsigned long mask = ~(size - 1);
809 unsigned long aligned_start = addr & mask;
810 unsigned long aligned_end = addr + size;
811 struct change_mapping_params params;
812 bool split_region = false;
813
814 if ((end - addr) < size) {
815 /*
816 * We're going to clear the PTE, but not flushed
817 * the mapping, time to remap and flush. The
818 * effects if visible outside the processor or
819 * if we are running in code close to the
820 * mapping we cleared, we are in trouble.
821 */
822 if (overlaps_kernel_text(aligned_start, addr) ||
823 overlaps_kernel_text(end, aligned_end)) {
824 /*
825 * Hack, just return, don't pte_clear
826 */
827 WARN_ONCE(1, "Linear mapping %lx->%lx overlaps kernel "
828 "text, not splitting\n", addr, end);
829 return;
830 }
831 split_region = true;
832 }
833
834 if (split_region) {
835 params.pte = pte;
836 params.start = addr;
837 params.end = end;
838 params.aligned_start = addr & ~(size - 1);
839 params.aligned_end = min_t(unsigned long, aligned_end,
840 (unsigned long)__va(memblock_end_of_DRAM()));
841 stop_machine(stop_machine_change_mapping, &params, NULL);
842 return;
843 }
844
845 pte_clear(&init_mm, addr, pte);
846}
847
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600848static void remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
849 unsigned long end)
850{
851 unsigned long next;
852 pte_t *pte_base;
853 pmd_t *pmd;
854
855 pmd = pmd_start + pmd_index(addr);
856 for (; addr < end; addr = next, pmd++) {
857 next = pmd_addr_end(addr, end);
858
859 if (!pmd_present(*pmd))
860 continue;
861
862 if (pmd_huge(*pmd)) {
Balbir Singh4dd5f8a92018-02-07 17:35:51 +1100863 split_kernel_mapping(addr, end, PMD_SIZE, (pte_t *)pmd);
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600864 continue;
865 }
866
867 pte_base = (pte_t *)pmd_page_vaddr(*pmd);
868 remove_pte_table(pte_base, addr, next);
869 free_pte_table(pte_base, pmd);
870 }
871}
872
873static void remove_pud_table(pud_t *pud_start, unsigned long addr,
874 unsigned long end)
875{
876 unsigned long next;
877 pmd_t *pmd_base;
878 pud_t *pud;
879
880 pud = pud_start + pud_index(addr);
881 for (; addr < end; addr = next, pud++) {
882 next = pud_addr_end(addr, end);
883
884 if (!pud_present(*pud))
885 continue;
886
887 if (pud_huge(*pud)) {
Balbir Singh4dd5f8a92018-02-07 17:35:51 +1100888 split_kernel_mapping(addr, end, PUD_SIZE, (pte_t *)pud);
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600889 continue;
890 }
891
892 pmd_base = (pmd_t *)pud_page_vaddr(*pud);
893 remove_pmd_table(pmd_base, addr, next);
894 free_pmd_table(pmd_base, pud);
895 }
896}
897
Mauricio Faria de Oliveirabde709a2018-03-09 17:45:58 -0300898static void __meminit remove_pagetable(unsigned long start, unsigned long end)
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600899{
900 unsigned long addr, next;
901 pud_t *pud_base;
902 pgd_t *pgd;
903
904 spin_lock(&init_mm.page_table_lock);
905
906 for (addr = start; addr < end; addr = next) {
907 next = pgd_addr_end(addr, end);
908
909 pgd = pgd_offset_k(addr);
910 if (!pgd_present(*pgd))
911 continue;
912
913 if (pgd_huge(*pgd)) {
Balbir Singh4dd5f8a92018-02-07 17:35:51 +1100914 split_kernel_mapping(addr, end, PGDIR_SIZE, (pte_t *)pgd);
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600915 continue;
916 }
917
918 pud_base = (pud_t *)pgd_page_vaddr(*pgd);
919 remove_pud_table(pud_base, addr, next);
920 }
921
922 spin_unlock(&init_mm.page_table_lock);
923 radix__flush_tlb_kernel_range(start, end);
924}
925
Michael Ellermanf437c512018-03-31 00:11:24 +1100926int __meminit radix__create_section_mapping(unsigned long start, unsigned long end, int nid)
Reza Arbab6cc27342017-01-16 13:07:44 -0600927{
Nicholas Piggin29ab6c42018-02-14 01:08:22 +1000928 return create_physical_mapping(start, end, nid);
Reza Arbab6cc27342017-01-16 13:07:44 -0600929}
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600930
Mauricio Faria de Oliveirabde709a2018-03-09 17:45:58 -0300931int __meminit radix__remove_section_mapping(unsigned long start, unsigned long end)
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600932{
933 remove_pagetable(start, end);
934 return 0;
935}
Reza Arbab6cc27342017-01-16 13:07:44 -0600936#endif /* CONFIG_MEMORY_HOTPLUG */
937
Aneesh Kumar K.Vd9225ad2016-04-29 23:26:00 +1000938#ifdef CONFIG_SPARSEMEM_VMEMMAP
Nicholas Piggin29ab6c42018-02-14 01:08:22 +1000939static int __map_kernel_page_nid(unsigned long ea, unsigned long pa,
940 pgprot_t flags, unsigned int map_page_size,
941 int nid)
942{
943 return __map_kernel_page(ea, pa, flags, map_page_size, nid, 0, 0);
944}
945
Aneesh Kumar K.Vd9225ad2016-04-29 23:26:00 +1000946int __meminit radix__vmemmap_create_mapping(unsigned long start,
947 unsigned long page_size,
948 unsigned long phys)
949{
950 /* Create a PTE encoding */
951 unsigned long flags = _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_KERNEL_RW;
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000952 int nid = early_pfn_to_nid(phys >> PAGE_SHIFT);
953 int ret;
Aneesh Kumar K.Vd9225ad2016-04-29 23:26:00 +1000954
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000955 ret = __map_kernel_page_nid(start, phys, __pgprot(flags), page_size, nid);
956 BUG_ON(ret);
957
Aneesh Kumar K.Vd9225ad2016-04-29 23:26:00 +1000958 return 0;
959}
960
961#ifdef CONFIG_MEMORY_HOTPLUG
Mauricio Faria de Oliveirabde709a2018-03-09 17:45:58 -0300962void __meminit radix__vmemmap_remove_mapping(unsigned long start, unsigned long page_size)
Aneesh Kumar K.Vd9225ad2016-04-29 23:26:00 +1000963{
Reza Arbab0d0a4bc2017-01-16 13:07:46 -0600964 remove_pagetable(start, start + page_size);
Aneesh Kumar K.Vd9225ad2016-04-29 23:26:00 +1000965}
966#endif
967#endif
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +1000968
969#ifdef CONFIG_TRANSPARENT_HUGEPAGE
970
971unsigned long radix__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
972 pmd_t *pmdp, unsigned long clr,
973 unsigned long set)
974{
975 unsigned long old;
976
977#ifdef CONFIG_DEBUG_VM
Oliver O'Halloranebd31192017-06-28 11:32:34 +1000978 WARN_ON(!radix__pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
Aneesh Kumar K.Vaf60a4c2018-04-16 16:57:16 +0530979 assert_spin_locked(pmd_lockptr(mm, pmdp));
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +1000980#endif
981
982 old = radix__pte_update(mm, addr, (pte_t *)pmdp, clr, set, 1);
983 trace_hugepage_update(addr, old, clr, set);
984
985 return old;
986}
987
988pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
989 pmd_t *pmdp)
990
991{
992 pmd_t pmd;
993
994 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
995 VM_BUG_ON(radix__pmd_trans_huge(*pmdp));
Oliver O'Halloranebd31192017-06-28 11:32:34 +1000996 VM_BUG_ON(pmd_devmap(*pmdp));
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +1000997 /*
998 * khugepaged calls this for normal pmd
999 */
1000 pmd = *pmdp;
1001 pmd_clear(pmdp);
Benjamin Herrenschmidt424de9c2017-07-19 14:49:06 +10001002
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +10001003 /*FIXME!! Verify whether we need this kick below */
Aneesh Kumar K.Vfa4531f2017-07-27 11:54:54 +05301004 serialize_against_pte_lookup(vma->vm_mm);
Benjamin Herrenschmidt424de9c2017-07-19 14:49:06 +10001005
1006 radix__flush_tlb_collapsed_pmd(vma->vm_mm, address);
1007
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +10001008 return pmd;
1009}
1010
1011/*
1012 * For us pgtable_t is pte_t *. Inorder to save the deposisted
1013 * page table, we consider the allocated page table as a list
1014 * head. On withdraw we need to make sure we zero out the used
1015 * list_head memory area.
1016 */
1017void radix__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
1018 pgtable_t pgtable)
1019{
1020 struct list_head *lh = (struct list_head *) pgtable;
1021
1022 assert_spin_locked(pmd_lockptr(mm, pmdp));
1023
1024 /* FIFO */
1025 if (!pmd_huge_pte(mm, pmdp))
1026 INIT_LIST_HEAD(lh);
1027 else
1028 list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
1029 pmd_huge_pte(mm, pmdp) = pgtable;
1030}
1031
1032pgtable_t radix__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
1033{
1034 pte_t *ptep;
1035 pgtable_t pgtable;
1036 struct list_head *lh;
1037
1038 assert_spin_locked(pmd_lockptr(mm, pmdp));
1039
1040 /* FIFO */
1041 pgtable = pmd_huge_pte(mm, pmdp);
1042 lh = (struct list_head *) pgtable;
1043 if (list_empty(lh))
1044 pmd_huge_pte(mm, pmdp) = NULL;
1045 else {
1046 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
1047 list_del(lh);
1048 }
1049 ptep = (pte_t *) pgtable;
1050 *ptep = __pte(0);
1051 ptep++;
1052 *ptep = __pte(0);
1053 return pgtable;
1054}
1055
1056
1057pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm,
1058 unsigned long addr, pmd_t *pmdp)
1059{
1060 pmd_t old_pmd;
1061 unsigned long old;
1062
1063 old = radix__pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0);
1064 old_pmd = __pmd(old);
1065 /*
Aneesh Kumar K.Vfa4531f2017-07-27 11:54:54 +05301066 * Serialize against find_current_mm_pte which does lock-less
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +10001067 * lookup in page tables with local interrupts disabled. For huge pages
1068 * it casts pmd_t to pte_t. Since format of pte_t is different from
1069 * pmd_t we want to prevent transit from pmd pointing to page table
1070 * to pmd pointing to huge page (and back) while interrupts are disabled.
1071 * We clear pmd to possibly replace it with page table pointer in
1072 * different code paths. So make sure we wait for the parallel
Aneesh Kumar K.Vfa4531f2017-07-27 11:54:54 +05301073 * find_current_mm_pte to finish.
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +10001074 */
Aneesh Kumar K.Vfa4531f2017-07-27 11:54:54 +05301075 serialize_against_pte_lookup(mm);
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +10001076 return old_pmd;
1077}
1078
1079int radix__has_transparent_hugepage(void)
1080{
1081 /* For radix 2M at PMD level means thp */
1082 if (mmu_psize_defs[MMU_PAGE_2M].shift == PMD_SHIFT)
1083 return 1;
1084 return 0;
1085}
1086#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
Aneesh Kumar K.V044003b2018-05-29 19:58:39 +05301087
Aneesh Kumar K.Ve4c11122018-05-29 19:58:40 +05301088void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep,
1089 pte_t entry, unsigned long address, int psize)
Aneesh Kumar K.V044003b2018-05-29 19:58:39 +05301090{
Aneesh Kumar K.Ve4c11122018-05-29 19:58:40 +05301091 struct mm_struct *mm = vma->vm_mm;
Aneesh Kumar K.V044003b2018-05-29 19:58:39 +05301092 unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED |
1093 _PAGE_RW | _PAGE_EXEC);
Aneesh Kumar K.Vbd5050e2018-05-29 19:58:41 +05301094 /*
1095 * To avoid NMMU hang while relaxing access, we need mark
1096 * the pte invalid in between.
1097 */
1098 if (cpu_has_feature(CPU_FTR_POWER9_DD1) ||
1099 atomic_read(&mm->context.copros) > 0) {
Aneesh Kumar K.V044003b2018-05-29 19:58:39 +05301100 unsigned long old_pte, new_pte;
1101
1102 old_pte = __radix_pte_update(ptep, ~0, 0);
1103 /*
1104 * new value of pte
1105 */
1106 new_pte = old_pte | set;
Aneesh Kumar K.Vbd5050e2018-05-29 19:58:41 +05301107 radix__flush_tlb_page_psize(mm, address, psize);
Aneesh Kumar K.V044003b2018-05-29 19:58:39 +05301108 __radix_pte_update(ptep, 0, new_pte);
Aneesh Kumar K.Vbd5050e2018-05-29 19:58:41 +05301109 } else {
Aneesh Kumar K.V044003b2018-05-29 19:58:39 +05301110 __radix_pte_update(ptep, 0, set);
Nicholas Piggine5f7cb52018-06-01 20:01:15 +10001111 /*
1112 * Book3S does not require a TLB flush when relaxing access
1113 * restrictions when the address space is not attached to a
1114 * NMMU, because the core MMU will reload the pte after taking
1115 * an access fault, which is defined by the architectue.
1116 */
Aneesh Kumar K.Vbd5050e2018-05-29 19:58:41 +05301117 }
Nicholas Pigginf1cb8f92018-06-01 20:01:19 +10001118 /* See ptesync comment in radix__set_pte_at */
Aneesh Kumar K.V044003b2018-05-29 19:58:39 +05301119}