blob: 8616b291bcecc9959664d95ecab1f82b11090dd5 [file] [log] [blame]
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +10001/*
2 * Page table handling routines for radix page table.
3 *
4 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
Michael Ellermanbd350f72017-08-30 17:41:29 +100011
12#define pr_fmt(fmt) "radix-mmu: " fmt
13
14#include <linux/kernel.h>
Ingo Molnar589ee622017-02-04 00:16:44 +010015#include <linux/sched/mm.h>
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100016#include <linux/memblock.h>
17#include <linux/of_fdt.h>
Balbir Singh7614ff32017-06-29 03:04:09 +100018#include <linux/mm.h>
Michael Ellerman6deb6b42017-08-30 17:41:17 +100019#include <linux/string_helpers.h>
Balbir Singh4dd5f8a92018-02-07 17:35:51 +110020#include <linux/stop_machine.h>
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100021
22#include <asm/pgtable.h>
23#include <asm/pgalloc.h>
Nicholas Piggineeb715c2018-02-07 11:20:02 +100024#include <asm/mmu_context.h>
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100025#include <asm/dma.h>
26#include <asm/machdep.h>
27#include <asm/mmu.h>
28#include <asm/firmware.h>
Alistair Popple1d0761d2016-12-14 13:36:51 +110029#include <asm/powernv.h>
Michael Ellerman9abcc982017-06-06 15:48:57 +100030#include <asm/sections.h>
Balbir Singh04284912017-04-11 15:23:25 +100031#include <asm/trace.h>
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100032
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +100033#include <trace/events/thp.h>
34
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +100035unsigned int mmu_pid_bits;
36unsigned int mmu_base_pid;
37
Aneesh Kumar K.V83209bc2016-07-13 15:05:28 +053038static int native_register_process_table(unsigned long base, unsigned long pg_sz,
39 unsigned long table_size)
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100040{
Suraj Jitindar Singh7cd2a862017-08-03 14:15:51 +100041 unsigned long patb0, patb1;
Aneesh Kumar K.V83209bc2016-07-13 15:05:28 +053042
Suraj Jitindar Singh7cd2a862017-08-03 14:15:51 +100043 patb0 = be64_to_cpu(partition_tb[0].patb0);
44 patb1 = base | table_size | PATB_GR;
45
46 mmu_partition_table_set_entry(0, patb0, patb1);
47
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100048 return 0;
49}
50
Nicholas Piggin2ad452f2018-02-14 01:08:24 +100051static __ref void *early_alloc_pgtable(unsigned long size, int nid,
52 unsigned long region_start, unsigned long region_end)
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100053{
Mike Rapoportf8067142019-03-07 16:30:48 -080054 phys_addr_t min_addr = MEMBLOCK_LOW_LIMIT;
55 phys_addr_t max_addr = MEMBLOCK_ALLOC_ANYWHERE;
Mike Rapoport8a7f97b2019-03-11 23:30:31 -070056 void *ptr;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100057
Mike Rapoportf8067142019-03-07 16:30:48 -080058 if (region_start)
59 min_addr = region_start;
60 if (region_end)
61 max_addr = region_end;
Nicholas Piggin2ad452f2018-02-14 01:08:24 +100062
Mike Rapoport8a7f97b2019-03-11 23:30:31 -070063 ptr = memblock_alloc_try_nid(size, size, min_addr, max_addr, nid);
64
65 if (!ptr)
66 panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%pa max_addr=%pa\n",
67 __func__, size, size, nid, &min_addr, &max_addr);
68
69 return ptr;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100070}
71
Nicholas Piggin0633daf2018-02-14 01:08:23 +100072static int early_map_kernel_page(unsigned long ea, unsigned long pa,
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100073 pgprot_t flags,
Nicholas Piggin2ad452f2018-02-14 01:08:24 +100074 unsigned int map_page_size,
75 int nid,
76 unsigned long region_start, unsigned long region_end)
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +100077{
Nicholas Piggin2ad452f2018-02-14 01:08:24 +100078 unsigned long pfn = pa >> PAGE_SHIFT;
Nicholas Piggin0633daf2018-02-14 01:08:23 +100079 pgd_t *pgdp;
80 pud_t *pudp;
81 pmd_t *pmdp;
82 pte_t *ptep;
83
84 pgdp = pgd_offset_k(ea);
85 if (pgd_none(*pgdp)) {
Nicholas Piggin2ad452f2018-02-14 01:08:24 +100086 pudp = early_alloc_pgtable(PUD_TABLE_SIZE, nid,
87 region_start, region_end);
Nicholas Piggin0633daf2018-02-14 01:08:23 +100088 pgd_populate(&init_mm, pgdp, pudp);
89 }
90 pudp = pud_offset(pgdp, ea);
91 if (map_page_size == PUD_SIZE) {
92 ptep = (pte_t *)pudp;
93 goto set_the_pte;
94 }
95 if (pud_none(*pudp)) {
Nicholas Piggin2ad452f2018-02-14 01:08:24 +100096 pmdp = early_alloc_pgtable(PMD_TABLE_SIZE, nid,
97 region_start, region_end);
Nicholas Piggin0633daf2018-02-14 01:08:23 +100098 pud_populate(&init_mm, pudp, pmdp);
99 }
100 pmdp = pmd_offset(pudp, ea);
101 if (map_page_size == PMD_SIZE) {
102 ptep = pmdp_ptep(pmdp);
103 goto set_the_pte;
104 }
105 if (!pmd_present(*pmdp)) {
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000106 ptep = early_alloc_pgtable(PAGE_SIZE, nid,
107 region_start, region_end);
Nicholas Piggin0633daf2018-02-14 01:08:23 +1000108 pmd_populate_kernel(&init_mm, pmdp, ptep);
109 }
110 ptep = pte_offset_kernel(pmdp, ea);
111
112set_the_pte:
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000113 set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags));
Nicholas Piggin0633daf2018-02-14 01:08:23 +1000114 smp_wmb();
115 return 0;
116}
117
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000118/*
119 * nid, region_start, and region_end are hints to try to place the page
120 * table memory in the same node or region.
121 */
122static int __map_kernel_page(unsigned long ea, unsigned long pa,
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000123 pgprot_t flags,
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000124 unsigned int map_page_size,
125 int nid,
126 unsigned long region_start, unsigned long region_end)
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000127{
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000128 unsigned long pfn = pa >> PAGE_SHIFT;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000129 pgd_t *pgdp;
130 pud_t *pudp;
131 pmd_t *pmdp;
132 pte_t *ptep;
133 /*
134 * Make sure task size is correct as per the max adddr
135 */
136 BUILD_BUG_ON(TASK_SIZE_USER64 > RADIX_PGTABLE_RANGE);
Nicholas Piggin0633daf2018-02-14 01:08:23 +1000137
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000138 if (unlikely(!slab_is_available()))
139 return early_map_kernel_page(ea, pa, flags, map_page_size,
140 nid, region_start, region_end);
Nicholas Piggin0633daf2018-02-14 01:08:23 +1000141
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000142 /*
143 * Should make page table allocation functions be able to take a
144 * node, so we can place kernel page tables on the right nodes after
145 * boot.
146 */
Nicholas Piggin0633daf2018-02-14 01:08:23 +1000147 pgdp = pgd_offset_k(ea);
148 pudp = pud_alloc(&init_mm, pgdp, ea);
149 if (!pudp)
150 return -ENOMEM;
151 if (map_page_size == PUD_SIZE) {
152 ptep = (pte_t *)pudp;
153 goto set_the_pte;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000154 }
Nicholas Piggin0633daf2018-02-14 01:08:23 +1000155 pmdp = pmd_alloc(&init_mm, pudp, ea);
156 if (!pmdp)
157 return -ENOMEM;
158 if (map_page_size == PMD_SIZE) {
159 ptep = pmdp_ptep(pmdp);
160 goto set_the_pte;
161 }
162 ptep = pte_alloc_kernel(pmdp, ea);
163 if (!ptep)
164 return -ENOMEM;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000165
166set_the_pte:
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000167 set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags));
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000168 smp_wmb();
169 return 0;
170}
171
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000172int radix__map_kernel_page(unsigned long ea, unsigned long pa,
173 pgprot_t flags,
174 unsigned int map_page_size)
175{
176 return __map_kernel_page(ea, pa, flags, map_page_size, -1, 0, 0);
177}
178
Balbir Singh7614ff32017-06-29 03:04:09 +1000179#ifdef CONFIG_STRICT_KERNEL_RWX
Michael Ellermanb134bd92017-07-14 16:51:21 +1000180void radix__change_memory_range(unsigned long start, unsigned long end,
181 unsigned long clear)
Balbir Singh7614ff32017-06-29 03:04:09 +1000182{
Balbir Singh7614ff32017-06-29 03:04:09 +1000183 unsigned long idx;
184 pgd_t *pgdp;
185 pud_t *pudp;
186 pmd_t *pmdp;
187 pte_t *ptep;
188
189 start = ALIGN_DOWN(start, PAGE_SIZE);
190 end = PAGE_ALIGN(end); // aligns up
191
Michael Ellermanb134bd92017-07-14 16:51:21 +1000192 pr_debug("Changing flags on range %lx-%lx removing 0x%lx\n",
193 start, end, clear);
Balbir Singh7614ff32017-06-29 03:04:09 +1000194
195 for (idx = start; idx < end; idx += PAGE_SIZE) {
196 pgdp = pgd_offset_k(idx);
197 pudp = pud_alloc(&init_mm, pgdp, idx);
198 if (!pudp)
199 continue;
200 if (pud_huge(*pudp)) {
201 ptep = (pte_t *)pudp;
202 goto update_the_pte;
203 }
204 pmdp = pmd_alloc(&init_mm, pudp, idx);
205 if (!pmdp)
206 continue;
207 if (pmd_huge(*pmdp)) {
208 ptep = pmdp_ptep(pmdp);
209 goto update_the_pte;
210 }
211 ptep = pte_alloc_kernel(pmdp, idx);
212 if (!ptep)
213 continue;
214update_the_pte:
Michael Ellermanb134bd92017-07-14 16:51:21 +1000215 radix__pte_update(&init_mm, idx, ptep, clear, 0, 0);
Balbir Singh7614ff32017-06-29 03:04:09 +1000216 }
217
218 radix__flush_tlb_kernel_range(start, end);
219}
Michael Ellermanb134bd92017-07-14 16:51:21 +1000220
221void radix__mark_rodata_ro(void)
222{
223 unsigned long start, end;
224
225 start = (unsigned long)_stext;
226 end = (unsigned long)__init_begin;
227
228 radix__change_memory_range(start, end, _PAGE_WRITE);
229}
Michael Ellerman029d9252017-07-14 16:51:23 +1000230
231void radix__mark_initmem_nx(void)
232{
233 unsigned long start = (unsigned long)__init_begin;
234 unsigned long end = (unsigned long)__init_end;
235
236 radix__change_memory_range(start, end, _PAGE_EXEC);
237}
Balbir Singh7614ff32017-06-29 03:04:09 +1000238#endif /* CONFIG_STRICT_KERNEL_RWX */
239
Michael Ellermanafb6d062018-10-17 23:53:38 +1100240static inline void __meminit
241print_mapping(unsigned long start, unsigned long end, unsigned long size, bool exec)
Reza Arbabb5200ec2017-01-16 13:07:43 -0600242{
Michael Ellerman6deb6b42017-08-30 17:41:17 +1000243 char buf[10];
244
Reza Arbabb5200ec2017-01-16 13:07:43 -0600245 if (end <= start)
246 return;
247
Michael Ellerman6deb6b42017-08-30 17:41:17 +1000248 string_get_size(size, 1, STRING_UNITS_2, buf, sizeof(buf));
249
Michael Ellermanafb6d062018-10-17 23:53:38 +1100250 pr_info("Mapped 0x%016lx-0x%016lx with %s pages%s\n", start, end, buf,
251 exec ? " (exec)" : "");
Reza Arbabb5200ec2017-01-16 13:07:43 -0600252}
253
Michael Ellerman232aa402018-08-14 22:37:32 +1000254static unsigned long next_boundary(unsigned long addr, unsigned long end)
255{
256#ifdef CONFIG_STRICT_KERNEL_RWX
257 if (addr < __pa_symbol(__init_begin))
258 return __pa_symbol(__init_begin);
259#endif
260 return end;
261}
262
Reza Arbabb5200ec2017-01-16 13:07:43 -0600263static int __meminit create_physical_mapping(unsigned long start,
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000264 unsigned long end,
265 int nid)
Reza Arbabb5200ec2017-01-16 13:07:43 -0600266{
Michael Ellerman9abcc982017-06-06 15:48:57 +1000267 unsigned long vaddr, addr, mapping_size = 0;
Michael Ellermanafb6d062018-10-17 23:53:38 +1100268 bool prev_exec, exec = false;
Michael Ellerman9abcc982017-06-06 15:48:57 +1000269 pgprot_t prot;
Aneesh Kumar K.Va2dc0092018-08-13 11:14:57 +0530270 int psize;
Reza Arbabb5200ec2017-01-16 13:07:43 -0600271
272 start = _ALIGN_UP(start, PAGE_SIZE);
273 for (addr = start; addr < end; addr += mapping_size) {
274 unsigned long gap, previous_size;
275 int rc;
276
Michael Ellerman232aa402018-08-14 22:37:32 +1000277 gap = next_boundary(addr, end) - addr;
Reza Arbabb5200ec2017-01-16 13:07:43 -0600278 previous_size = mapping_size;
Michael Ellermanafb6d062018-10-17 23:53:38 +1100279 prev_exec = exec;
Reza Arbabb5200ec2017-01-16 13:07:43 -0600280
281 if (IS_ALIGNED(addr, PUD_SIZE) && gap >= PUD_SIZE &&
Michael Ellerman57306c62018-08-14 22:01:44 +1000282 mmu_psize_defs[MMU_PAGE_1G].shift) {
Reza Arbabb5200ec2017-01-16 13:07:43 -0600283 mapping_size = PUD_SIZE;
Aneesh Kumar K.Va2dc0092018-08-13 11:14:57 +0530284 psize = MMU_PAGE_1G;
285 } else if (IS_ALIGNED(addr, PMD_SIZE) && gap >= PMD_SIZE &&
286 mmu_psize_defs[MMU_PAGE_2M].shift) {
Reza Arbabb5200ec2017-01-16 13:07:43 -0600287 mapping_size = PMD_SIZE;
Aneesh Kumar K.Va2dc0092018-08-13 11:14:57 +0530288 psize = MMU_PAGE_2M;
289 } else {
Reza Arbabb5200ec2017-01-16 13:07:43 -0600290 mapping_size = PAGE_SIZE;
Aneesh Kumar K.Va2dc0092018-08-13 11:14:57 +0530291 psize = mmu_virtual_psize;
292 }
Reza Arbabb5200ec2017-01-16 13:07:43 -0600293
Michael Ellerman9abcc982017-06-06 15:48:57 +1000294 vaddr = (unsigned long)__va(addr);
295
Balbir Singh7f6d4982017-06-29 03:04:10 +1000296 if (overlaps_kernel_text(vaddr, vaddr + mapping_size) ||
Michael Ellermanafb6d062018-10-17 23:53:38 +1100297 overlaps_interrupt_vector_text(vaddr, vaddr + mapping_size)) {
Michael Ellerman9abcc982017-06-06 15:48:57 +1000298 prot = PAGE_KERNEL_X;
Michael Ellermanafb6d062018-10-17 23:53:38 +1100299 exec = true;
300 } else {
Michael Ellerman9abcc982017-06-06 15:48:57 +1000301 prot = PAGE_KERNEL;
Michael Ellermanafb6d062018-10-17 23:53:38 +1100302 exec = false;
303 }
304
305 if (mapping_size != previous_size || exec != prev_exec) {
306 print_mapping(start, addr, previous_size, prev_exec);
307 start = addr;
308 }
Michael Ellerman9abcc982017-06-06 15:48:57 +1000309
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000310 rc = __map_kernel_page(vaddr, addr, prot, mapping_size, nid, start, end);
Reza Arbabb5200ec2017-01-16 13:07:43 -0600311 if (rc)
312 return rc;
Aneesh Kumar K.Va2dc0092018-08-13 11:14:57 +0530313
314 update_page_count(psize, 1);
Reza Arbabb5200ec2017-01-16 13:07:43 -0600315 }
316
Michael Ellermanafb6d062018-10-17 23:53:38 +1100317 print_mapping(start, addr, mapping_size, exec);
Reza Arbabb5200ec2017-01-16 13:07:43 -0600318 return 0;
319}
320
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000321void __init radix_init_pgtable(void)
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000322{
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000323 unsigned long rts_field;
324 struct memblock_region *reg;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000325
326 /* We don't support slb for radix */
327 mmu_slb_size = 0;
328 /*
329 * Create the linear mapping, using standard page size for now
330 */
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000331 for_each_memblock(memory, reg) {
332 /*
333 * The memblock allocator is up at this point, so the
334 * page tables will be allocated within the range. No
335 * need or a node (which we don't have yet).
336 */
Reza Arbabb5200ec2017-01-16 13:07:43 -0600337 WARN_ON(create_physical_mapping(reg->base,
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000338 reg->base + reg->size,
339 -1));
340 }
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +1000341
342 /* Find out how many PID bits are supported */
343 if (cpu_has_feature(CPU_FTR_HVMODE)) {
344 if (!mmu_pid_bits)
345 mmu_pid_bits = 20;
346#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
347 /*
348 * When KVM is possible, we only use the top half of the
349 * PID space to avoid collisions between host and guest PIDs
350 * which can cause problems due to prefetch when exiting the
351 * guest with AIL=3
352 */
353 mmu_base_pid = 1 << (mmu_pid_bits - 1);
354#else
355 mmu_base_pid = 1;
356#endif
357 } else {
358 /* The guest uses the bottom half of the PID space */
359 if (!mmu_pid_bits)
360 mmu_pid_bits = 19;
361 mmu_base_pid = 1;
362 }
363
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000364 /*
365 * Allocate Partition table and process table for the
366 * host.
367 */
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +1000368 BUG_ON(PRTB_SIZE_SHIFT > 36);
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000369 process_tb = early_alloc_pgtable(1UL << PRTB_SIZE_SHIFT, -1, 0, 0);
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000370 /*
371 * Fill in the process table.
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000372 */
Aneesh Kumar K.Vb23d9c52016-06-17 11:40:36 +0530373 rts_field = radix__get_tree_size();
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000374 process_tb->prtb0 = cpu_to_be64(rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE);
375 /*
376 * Fill in the partition table. We are suppose to use effective address
377 * of process table here. But our linear mapping also enable us to use
378 * physical address here.
379 */
Michael Ellermaneea81482016-08-04 15:32:06 +1000380 register_process_table(__pa(process_tb), 0, PRTB_SIZE_SHIFT - 12);
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000381 pr_info("Process table %p and radix root for kernel: %p\n", process_tb, init_mm.pgd);
Paul Mackerras7a70d722017-02-27 14:32:41 +1100382 asm volatile("ptesync" : : : "memory");
383 asm volatile(PPC_TLBIE_5(%0,%1,2,1,1) : :
384 "r" (TLBIEL_INVAL_SET_LPID), "r" (0));
385 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
Balbir Singh04284912017-04-11 15:23:25 +1000386 trace_tlbie(0, 0, TLBIEL_INVAL_SET_LPID, 0, 2, 1, 1);
Nicholas Piggineeb715c2018-02-07 11:20:02 +1000387
388 /*
389 * The init_mm context is given the first available (non-zero) PID,
390 * which is the "guard PID" and contains no page table. PIDR should
391 * never be set to zero because that duplicates the kernel address
392 * space at the 0x0... offset (quadrant 0)!
393 *
394 * An arbitrary PID that may later be allocated by the PID allocator
395 * for userspace processes must not be used either, because that
396 * would cause stale user mappings for that PID on CPUs outside of
397 * the TLB invalidation scheme (because it won't be in mm_cpumask).
398 *
399 * So permanently carve out one PID for the purpose of a guard PID.
400 */
401 init_mm.context.id = mmu_base_pid;
402 mmu_base_pid++;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000403}
404
405static void __init radix_init_partition_table(void)
406{
Paul Mackerras9d661952016-11-21 16:00:58 +1100407 unsigned long rts_field, dw0;
Aneesh Kumar K.Vb23d9c52016-06-17 11:40:36 +0530408
Paul Mackerras9d661952016-11-21 16:00:58 +1100409 mmu_partition_table_init();
Aneesh Kumar K.Vb23d9c52016-06-17 11:40:36 +0530410 rts_field = radix__get_tree_size();
Paul Mackerras9d661952016-11-21 16:00:58 +1100411 dw0 = rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE | PATB_HR;
412 mmu_partition_table_set_entry(0, dw0, 0);
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000413
Aneesh Kumar K.V56547412016-07-13 15:05:25 +0530414 pr_info("Initializing Radix MMU\n");
415 pr_info("Partition table %p\n", partition_tb);
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000416}
417
418void __init radix_init_native(void)
419{
Michael Ellermaneea81482016-08-04 15:32:06 +1000420 register_process_table = native_register_process_table;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000421}
422
423static int __init get_idx_from_shift(unsigned int shift)
424{
425 int idx = -1;
426
427 switch (shift) {
428 case 0xc:
429 idx = MMU_PAGE_4K;
430 break;
431 case 0x10:
432 idx = MMU_PAGE_64K;
433 break;
434 case 0x15:
435 idx = MMU_PAGE_2M;
436 break;
437 case 0x1e:
438 idx = MMU_PAGE_1G;
439 break;
440 }
441 return idx;
442}
443
444static int __init radix_dt_scan_page_sizes(unsigned long node,
445 const char *uname, int depth,
446 void *data)
447{
448 int size = 0;
449 int shift, idx;
450 unsigned int ap;
451 const __be32 *prop;
452 const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
453
454 /* We are scanning "cpu" nodes only */
455 if (type == NULL || strcmp(type, "cpu") != 0)
456 return 0;
457
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +1000458 /* Find MMU PID size */
459 prop = of_get_flat_dt_prop(node, "ibm,mmu-pid-bits", &size);
460 if (prop && size == 4)
461 mmu_pid_bits = be32_to_cpup(prop);
462
463 /* Grab page size encodings */
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000464 prop = of_get_flat_dt_prop(node, "ibm,processor-radix-AP-encodings", &size);
465 if (!prop)
466 return 0;
467
468 pr_info("Page sizes from device-tree:\n");
469 for (; size >= 4; size -= 4, ++prop) {
470
471 struct mmu_psize_def *def;
472
473 /* top 3 bit is AP encoding */
474 shift = be32_to_cpu(prop[0]) & ~(0xe << 28);
475 ap = be32_to_cpu(prop[0]) >> 29;
Balbir Singhac8d3812016-11-05 15:24:22 +1100476 pr_info("Page size shift = %d AP=0x%x\n", shift, ap);
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000477
478 idx = get_idx_from_shift(shift);
479 if (idx < 0)
480 continue;
481
482 def = &mmu_psize_defs[idx];
483 def->shift = shift;
484 def->ap = ap;
485 }
486
487 /* needed ? */
488 cur_cpu_spec->mmu_features &= ~MMU_FTR_NO_SLBIE_B;
489 return 1;
490}
491
Michael Ellerman2537b092016-07-26 21:55:27 +1000492void __init radix__early_init_devtree(void)
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000493{
494 int rc;
495
496 /*
497 * Try to find the available page sizes in the device-tree
498 */
499 rc = of_scan_flat_dt(radix_dt_scan_page_sizes, NULL);
500 if (rc != 0) /* Found */
501 goto found;
502 /*
503 * let's assume we have page 4k and 64k support
504 */
505 mmu_psize_defs[MMU_PAGE_4K].shift = 12;
506 mmu_psize_defs[MMU_PAGE_4K].ap = 0x0;
507
508 mmu_psize_defs[MMU_PAGE_64K].shift = 16;
509 mmu_psize_defs[MMU_PAGE_64K].ap = 0x5;
510found:
511#ifdef CONFIG_SPARSEMEM_VMEMMAP
512 if (mmu_psize_defs[MMU_PAGE_2M].shift) {
513 /*
514 * map vmemmap using 2M if available
515 */
516 mmu_vmemmap_psize = MMU_PAGE_2M;
517 }
518#endif /* CONFIG_SPARSEMEM_VMEMMAP */
519 return;
520}
521
Balbir Singhee97b6b2016-11-15 17:56:14 +1100522static void radix_init_amor(void)
523{
524 /*
525 * In HV mode, we init AMOR (Authority Mask Override Register) so that
526 * the hypervisor and guest can setup IAMR (Instruction Authority Mask
527 * Register), enable key 0 and set it to 1.
528 *
529 * AMOR = 0b1100 .... 0000 (Mask for key 0 is 11)
530 */
531 mtspr(SPRN_AMOR, (3ul << 62));
532}
533
Russell Currey1bb2bae2019-04-18 16:51:22 +1000534#ifdef CONFIG_PPC_KUEP
535void setup_kuep(bool disabled)
Balbir Singh3b10d002016-11-15 17:56:16 +1100536{
Russell Currey1bb2bae2019-04-18 16:51:22 +1000537 if (disabled || !early_radix_enabled())
538 return;
539
540 if (smp_processor_id() == boot_cpuid)
541 pr_info("Activating Kernel Userspace Execution Prevention\n");
542
Balbir Singh3b10d002016-11-15 17:56:16 +1100543 /*
544 * Radix always uses key0 of the IAMR to determine if an access is
545 * allowed. We set bit 0 (IBM bit 1) of key0, to prevent instruction
546 * fetch.
547 */
Nicholas Piggin2bf10712018-07-05 18:47:00 +1000548 mtspr(SPRN_IAMR, (1ul << 62));
Balbir Singh3b10d002016-11-15 17:56:16 +1100549}
Russell Currey1bb2bae2019-04-18 16:51:22 +1000550#endif
Balbir Singh3b10d002016-11-15 17:56:16 +1100551
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000552void __init radix__early_init_mmu(void)
553{
554 unsigned long lpcr;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000555
556#ifdef CONFIG_PPC_64K_PAGES
557 /* PAGE_SIZE mappings */
558 mmu_virtual_psize = MMU_PAGE_64K;
559#else
560 mmu_virtual_psize = MMU_PAGE_4K;
561#endif
562
563#ifdef CONFIG_SPARSEMEM_VMEMMAP
564 /* vmemmap mapping */
565 mmu_vmemmap_psize = mmu_virtual_psize;
566#endif
567 /*
568 * initialize page table size
569 */
570 __pte_index_size = RADIX_PTE_INDEX_SIZE;
571 __pmd_index_size = RADIX_PMD_INDEX_SIZE;
572 __pud_index_size = RADIX_PUD_INDEX_SIZE;
573 __pgd_index_size = RADIX_PGD_INDEX_SIZE;
Aneesh Kumar K.Vfae22112018-02-11 20:30:06 +0530574 __pud_cache_index = RADIX_PUD_INDEX_SIZE;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000575 __pte_table_size = RADIX_PTE_TABLE_SIZE;
576 __pmd_table_size = RADIX_PMD_TABLE_SIZE;
577 __pud_table_size = RADIX_PUD_TABLE_SIZE;
578 __pgd_table_size = RADIX_PGD_TABLE_SIZE;
579
Aneesh Kumar K.Va2f41eb2016-04-29 23:26:19 +1000580 __pmd_val_bits = RADIX_PMD_VAL_BITS;
581 __pud_val_bits = RADIX_PUD_VAL_BITS;
582 __pgd_val_bits = RADIX_PGD_VAL_BITS;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000583
Aneesh Kumar K.Vd6a99962016-04-29 23:26:21 +1000584 __kernel_virt_start = RADIX_KERN_VIRT_START;
585 __kernel_virt_size = RADIX_KERN_VIRT_SIZE;
586 __vmalloc_start = RADIX_VMALLOC_START;
587 __vmalloc_end = RADIX_VMALLOC_END;
Michael Ellerman63ee9b22017-08-01 20:29:22 +1000588 __kernel_io_start = RADIX_KERN_IO_START;
Aneesh Kumar K.Vd6a99962016-04-29 23:26:21 +1000589 vmemmap = (struct page *)RADIX_VMEMMAP_BASE;
590 ioremap_bot = IOREMAP_BASE;
Darren Stevensbfa37082016-06-29 21:06:28 +0100591
592#ifdef CONFIG_PCI
593 pci_io_base = ISA_IO_BASE;
594#endif
Aneesh Kumar K.Vfb4e5db2018-03-22 14:13:50 +0530595 __pte_frag_nr = RADIX_PTE_FRAG_NR;
596 __pte_frag_size_shift = RADIX_PTE_FRAG_SIZE_SHIFT;
Aneesh Kumar K.V8a6c6972018-04-16 16:57:22 +0530597 __pmd_frag_nr = RADIX_PMD_FRAG_NR;
598 __pmd_frag_size_shift = RADIX_PMD_FRAG_SIZE_SHIFT;
Aneesh Kumar K.Vd6a99962016-04-29 23:26:21 +1000599
Aneesh Kumar K.Vd6c88602016-05-31 11:56:29 +0530600 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
Benjamin Herrenschmidt166dd7d2016-07-05 15:03:51 +1000601 radix_init_native();
Aneesh Kumar K.Vd6c88602016-05-31 11:56:29 +0530602 lpcr = mfspr(SPRN_LPCR);
Aneesh Kumar K.Vbf16cdf2016-07-13 15:05:21 +0530603 mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000604 radix_init_partition_table();
Balbir Singhee97b6b2016-11-15 17:56:14 +1100605 radix_init_amor();
Paul Mackerrascc3d2942017-01-30 21:21:36 +1100606 } else {
607 radix_init_pseries();
Aneesh Kumar K.Vd6c88602016-05-31 11:56:29 +0530608 }
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000609
Paul Mackerras9d661952016-11-21 16:00:58 +1100610 memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
611
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000612 radix_init_pgtable();
Nicholas Piggineeb715c2018-02-07 11:20:02 +1000613 /* Switch to the guard PID before turning on MMU */
614 radix__switch_mmu_context(NULL, &init_mm);
Nicholas Piggind4748272017-12-24 01:15:50 +1000615 if (cpu_has_feature(CPU_FTR_HVMODE))
616 tlbiel_all();
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000617}
618
619void radix__early_init_mmu_secondary(void)
620{
621 unsigned long lpcr;
622 /*
Aneesh Kumar K.Vd6c88602016-05-31 11:56:29 +0530623 * update partition table control register and UPRT
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000624 */
Aneesh Kumar K.Vd6c88602016-05-31 11:56:29 +0530625 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
626 lpcr = mfspr(SPRN_LPCR);
Aneesh Kumar K.Vbf16cdf2016-07-13 15:05:21 +0530627 mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
Aneesh Kumar K.Vd6c88602016-05-31 11:56:29 +0530628
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000629 mtspr(SPRN_PTCR,
630 __pa(partition_tb) | (PATB_SIZE_SHIFT - 12));
Balbir Singhee97b6b2016-11-15 17:56:14 +1100631 radix_init_amor();
Aneesh Kumar K.Vd6c88602016-05-31 11:56:29 +0530632 }
Nicholas Piggind4748272017-12-24 01:15:50 +1000633
Nicholas Piggineeb715c2018-02-07 11:20:02 +1000634 radix__switch_mmu_context(NULL, &init_mm);
Nicholas Piggind4748272017-12-24 01:15:50 +1000635 if (cpu_has_feature(CPU_FTR_HVMODE))
636 tlbiel_all();
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000637}
638
Benjamin Herrenschmidtfe036a02016-08-19 14:22:37 +0530639void radix__mmu_cleanup_all(void)
640{
641 unsigned long lpcr;
642
643 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
644 lpcr = mfspr(SPRN_LPCR);
645 mtspr(SPRN_LPCR, lpcr & ~LPCR_UPRT);
646 mtspr(SPRN_PTCR, 0);
Alistair Popple1d0761d2016-12-14 13:36:51 +1100647 powernv_set_nmmu_ptcr(0);
Benjamin Herrenschmidtfe036a02016-08-19 14:22:37 +0530648 radix__flush_tlb_all();
649 }
650}
651
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000652void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base,
653 phys_addr_t first_memblock_size)
654{
Aneesh Kumar K.V177ba7c2016-04-29 23:26:10 +1000655 /* We don't currently support the first MEMBLOCK not mapping 0
656 * physical on those processors
657 */
658 BUG_ON(first_memblock_base != 0);
Nicholas Piggin1513c332017-12-22 21:17:08 +1000659
Nicholas Piggin5eae82c2017-12-22 21:17:11 +1000660 /*
661 * Radix mode is not limited by RMA / VRMA addressing.
662 */
663 ppc64_rma_size = ULONG_MAX;
Aneesh Kumar K.V2bfd65e2016-04-29 23:25:58 +1000664}
Aneesh Kumar K.Vd9225ad2016-04-29 23:26:00 +1000665
Reza Arbab6cc27342017-01-16 13:07:44 -0600666#ifdef CONFIG_MEMORY_HOTPLUG
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600667static void free_pte_table(pte_t *pte_start, pmd_t *pmd)
668{
669 pte_t *pte;
670 int i;
671
672 for (i = 0; i < PTRS_PER_PTE; i++) {
673 pte = pte_start + i;
674 if (!pte_none(*pte))
675 return;
676 }
677
678 pte_free_kernel(&init_mm, pte_start);
679 pmd_clear(pmd);
680}
681
682static void free_pmd_table(pmd_t *pmd_start, pud_t *pud)
683{
684 pmd_t *pmd;
685 int i;
686
687 for (i = 0; i < PTRS_PER_PMD; i++) {
688 pmd = pmd_start + i;
689 if (!pmd_none(*pmd))
690 return;
691 }
692
693 pmd_free(&init_mm, pmd_start);
694 pud_clear(pud);
695}
696
Balbir Singh4dd5f8a92018-02-07 17:35:51 +1100697struct change_mapping_params {
698 pte_t *pte;
699 unsigned long start;
700 unsigned long end;
701 unsigned long aligned_start;
702 unsigned long aligned_end;
703};
704
Mauricio Faria de Oliveirabde709a2018-03-09 17:45:58 -0300705static int __meminit stop_machine_change_mapping(void *data)
Balbir Singh4dd5f8a92018-02-07 17:35:51 +1100706{
707 struct change_mapping_params *params =
708 (struct change_mapping_params *)data;
709
710 if (!data)
711 return -1;
712
713 spin_unlock(&init_mm.page_table_lock);
714 pte_clear(&init_mm, params->aligned_start, params->pte);
Michael Ellermanf437c512018-03-31 00:11:24 +1100715 create_physical_mapping(params->aligned_start, params->start, -1);
716 create_physical_mapping(params->end, params->aligned_end, -1);
Balbir Singh4dd5f8a92018-02-07 17:35:51 +1100717 spin_lock(&init_mm.page_table_lock);
718 return 0;
719}
720
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600721static void remove_pte_table(pte_t *pte_start, unsigned long addr,
722 unsigned long end)
723{
724 unsigned long next;
725 pte_t *pte;
726
727 pte = pte_start + pte_index(addr);
728 for (; addr < end; addr = next, pte++) {
729 next = (addr + PAGE_SIZE) & PAGE_MASK;
730 if (next > end)
731 next = end;
732
733 if (!pte_present(*pte))
734 continue;
735
Reza Arbab0d0a4bc2017-01-16 13:07:46 -0600736 if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(next)) {
737 /*
738 * The vmemmap_free() and remove_section_mapping()
739 * codepaths call us with aligned addresses.
740 */
741 WARN_ONCE(1, "%s: unaligned range\n", __func__);
742 continue;
743 }
744
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600745 pte_clear(&init_mm, addr, pte);
746 }
747}
748
Balbir Singh4dd5f8a92018-02-07 17:35:51 +1100749/*
750 * clear the pte and potentially split the mapping helper
751 */
Mauricio Faria de Oliveirabde709a2018-03-09 17:45:58 -0300752static void __meminit split_kernel_mapping(unsigned long addr, unsigned long end,
Balbir Singh4dd5f8a92018-02-07 17:35:51 +1100753 unsigned long size, pte_t *pte)
754{
755 unsigned long mask = ~(size - 1);
756 unsigned long aligned_start = addr & mask;
757 unsigned long aligned_end = addr + size;
758 struct change_mapping_params params;
759 bool split_region = false;
760
761 if ((end - addr) < size) {
762 /*
763 * We're going to clear the PTE, but not flushed
764 * the mapping, time to remap and flush. The
765 * effects if visible outside the processor or
766 * if we are running in code close to the
767 * mapping we cleared, we are in trouble.
768 */
769 if (overlaps_kernel_text(aligned_start, addr) ||
770 overlaps_kernel_text(end, aligned_end)) {
771 /*
772 * Hack, just return, don't pte_clear
773 */
774 WARN_ONCE(1, "Linear mapping %lx->%lx overlaps kernel "
775 "text, not splitting\n", addr, end);
776 return;
777 }
778 split_region = true;
779 }
780
781 if (split_region) {
782 params.pte = pte;
783 params.start = addr;
784 params.end = end;
785 params.aligned_start = addr & ~(size - 1);
786 params.aligned_end = min_t(unsigned long, aligned_end,
787 (unsigned long)__va(memblock_end_of_DRAM()));
788 stop_machine(stop_machine_change_mapping, &params, NULL);
789 return;
790 }
791
792 pte_clear(&init_mm, addr, pte);
793}
794
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600795static void remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
796 unsigned long end)
797{
798 unsigned long next;
799 pte_t *pte_base;
800 pmd_t *pmd;
801
802 pmd = pmd_start + pmd_index(addr);
803 for (; addr < end; addr = next, pmd++) {
804 next = pmd_addr_end(addr, end);
805
806 if (!pmd_present(*pmd))
807 continue;
808
809 if (pmd_huge(*pmd)) {
Balbir Singh4dd5f8a92018-02-07 17:35:51 +1100810 split_kernel_mapping(addr, end, PMD_SIZE, (pte_t *)pmd);
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600811 continue;
812 }
813
814 pte_base = (pte_t *)pmd_page_vaddr(*pmd);
815 remove_pte_table(pte_base, addr, next);
816 free_pte_table(pte_base, pmd);
817 }
818}
819
820static void remove_pud_table(pud_t *pud_start, unsigned long addr,
821 unsigned long end)
822{
823 unsigned long next;
824 pmd_t *pmd_base;
825 pud_t *pud;
826
827 pud = pud_start + pud_index(addr);
828 for (; addr < end; addr = next, pud++) {
829 next = pud_addr_end(addr, end);
830
831 if (!pud_present(*pud))
832 continue;
833
834 if (pud_huge(*pud)) {
Balbir Singh4dd5f8a92018-02-07 17:35:51 +1100835 split_kernel_mapping(addr, end, PUD_SIZE, (pte_t *)pud);
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600836 continue;
837 }
838
839 pmd_base = (pmd_t *)pud_page_vaddr(*pud);
840 remove_pmd_table(pmd_base, addr, next);
841 free_pmd_table(pmd_base, pud);
842 }
843}
844
Mauricio Faria de Oliveirabde709a2018-03-09 17:45:58 -0300845static void __meminit remove_pagetable(unsigned long start, unsigned long end)
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600846{
847 unsigned long addr, next;
848 pud_t *pud_base;
849 pgd_t *pgd;
850
851 spin_lock(&init_mm.page_table_lock);
852
853 for (addr = start; addr < end; addr = next) {
854 next = pgd_addr_end(addr, end);
855
856 pgd = pgd_offset_k(addr);
857 if (!pgd_present(*pgd))
858 continue;
859
860 if (pgd_huge(*pgd)) {
Balbir Singh4dd5f8a92018-02-07 17:35:51 +1100861 split_kernel_mapping(addr, end, PGDIR_SIZE, (pte_t *)pgd);
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600862 continue;
863 }
864
865 pud_base = (pud_t *)pgd_page_vaddr(*pgd);
866 remove_pud_table(pud_base, addr, next);
867 }
868
869 spin_unlock(&init_mm.page_table_lock);
870 radix__flush_tlb_kernel_range(start, end);
871}
872
Michael Ellermanf437c512018-03-31 00:11:24 +1100873int __meminit radix__create_section_mapping(unsigned long start, unsigned long end, int nid)
Reza Arbab6cc27342017-01-16 13:07:44 -0600874{
Nicholas Piggin29ab6c42018-02-14 01:08:22 +1000875 return create_physical_mapping(start, end, nid);
Reza Arbab6cc27342017-01-16 13:07:44 -0600876}
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600877
Mauricio Faria de Oliveirabde709a2018-03-09 17:45:58 -0300878int __meminit radix__remove_section_mapping(unsigned long start, unsigned long end)
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600879{
880 remove_pagetable(start, end);
881 return 0;
882}
Reza Arbab6cc27342017-01-16 13:07:44 -0600883#endif /* CONFIG_MEMORY_HOTPLUG */
884
Aneesh Kumar K.Vd9225ad2016-04-29 23:26:00 +1000885#ifdef CONFIG_SPARSEMEM_VMEMMAP
Nicholas Piggin29ab6c42018-02-14 01:08:22 +1000886static int __map_kernel_page_nid(unsigned long ea, unsigned long pa,
887 pgprot_t flags, unsigned int map_page_size,
888 int nid)
889{
890 return __map_kernel_page(ea, pa, flags, map_page_size, nid, 0, 0);
891}
892
Aneesh Kumar K.Vd9225ad2016-04-29 23:26:00 +1000893int __meminit radix__vmemmap_create_mapping(unsigned long start,
894 unsigned long page_size,
895 unsigned long phys)
896{
897 /* Create a PTE encoding */
898 unsigned long flags = _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_KERNEL_RW;
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000899 int nid = early_pfn_to_nid(phys >> PAGE_SHIFT);
900 int ret;
Aneesh Kumar K.Vd9225ad2016-04-29 23:26:00 +1000901
Nicholas Piggin2ad452f2018-02-14 01:08:24 +1000902 ret = __map_kernel_page_nid(start, phys, __pgprot(flags), page_size, nid);
903 BUG_ON(ret);
904
Aneesh Kumar K.Vd9225ad2016-04-29 23:26:00 +1000905 return 0;
906}
907
908#ifdef CONFIG_MEMORY_HOTPLUG
Mauricio Faria de Oliveirabde709a2018-03-09 17:45:58 -0300909void __meminit radix__vmemmap_remove_mapping(unsigned long start, unsigned long page_size)
Aneesh Kumar K.Vd9225ad2016-04-29 23:26:00 +1000910{
Reza Arbab0d0a4bc2017-01-16 13:07:46 -0600911 remove_pagetable(start, start + page_size);
Aneesh Kumar K.Vd9225ad2016-04-29 23:26:00 +1000912}
913#endif
914#endif
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +1000915
916#ifdef CONFIG_TRANSPARENT_HUGEPAGE
917
918unsigned long radix__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
919 pmd_t *pmdp, unsigned long clr,
920 unsigned long set)
921{
922 unsigned long old;
923
924#ifdef CONFIG_DEBUG_VM
Oliver O'Halloranebd31192017-06-28 11:32:34 +1000925 WARN_ON(!radix__pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
Aneesh Kumar K.Vaf60a4c2018-04-16 16:57:16 +0530926 assert_spin_locked(pmd_lockptr(mm, pmdp));
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +1000927#endif
928
929 old = radix__pte_update(mm, addr, (pte_t *)pmdp, clr, set, 1);
930 trace_hugepage_update(addr, old, clr, set);
931
932 return old;
933}
934
935pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
936 pmd_t *pmdp)
937
938{
939 pmd_t pmd;
940
941 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
942 VM_BUG_ON(radix__pmd_trans_huge(*pmdp));
Oliver O'Halloranebd31192017-06-28 11:32:34 +1000943 VM_BUG_ON(pmd_devmap(*pmdp));
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +1000944 /*
945 * khugepaged calls this for normal pmd
946 */
947 pmd = *pmdp;
948 pmd_clear(pmdp);
Benjamin Herrenschmidt424de9c2017-07-19 14:49:06 +1000949
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +1000950 /*FIXME!! Verify whether we need this kick below */
Aneesh Kumar K.Vfa4531f2017-07-27 11:54:54 +0530951 serialize_against_pte_lookup(vma->vm_mm);
Benjamin Herrenschmidt424de9c2017-07-19 14:49:06 +1000952
953 radix__flush_tlb_collapsed_pmd(vma->vm_mm, address);
954
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +1000955 return pmd;
956}
957
958/*
959 * For us pgtable_t is pte_t *. Inorder to save the deposisted
960 * page table, we consider the allocated page table as a list
961 * head. On withdraw we need to make sure we zero out the used
962 * list_head memory area.
963 */
964void radix__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
965 pgtable_t pgtable)
966{
967 struct list_head *lh = (struct list_head *) pgtable;
968
969 assert_spin_locked(pmd_lockptr(mm, pmdp));
970
971 /* FIFO */
972 if (!pmd_huge_pte(mm, pmdp))
973 INIT_LIST_HEAD(lh);
974 else
975 list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
976 pmd_huge_pte(mm, pmdp) = pgtable;
977}
978
979pgtable_t radix__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
980{
981 pte_t *ptep;
982 pgtable_t pgtable;
983 struct list_head *lh;
984
985 assert_spin_locked(pmd_lockptr(mm, pmdp));
986
987 /* FIFO */
988 pgtable = pmd_huge_pte(mm, pmdp);
989 lh = (struct list_head *) pgtable;
990 if (list_empty(lh))
991 pmd_huge_pte(mm, pmdp) = NULL;
992 else {
993 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
994 list_del(lh);
995 }
996 ptep = (pte_t *) pgtable;
997 *ptep = __pte(0);
998 ptep++;
999 *ptep = __pte(0);
1000 return pgtable;
1001}
1002
1003
1004pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm,
1005 unsigned long addr, pmd_t *pmdp)
1006{
1007 pmd_t old_pmd;
1008 unsigned long old;
1009
1010 old = radix__pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0);
1011 old_pmd = __pmd(old);
1012 /*
Aneesh Kumar K.Vfa4531f2017-07-27 11:54:54 +05301013 * Serialize against find_current_mm_pte which does lock-less
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +10001014 * lookup in page tables with local interrupts disabled. For huge pages
1015 * it casts pmd_t to pte_t. Since format of pte_t is different from
1016 * pmd_t we want to prevent transit from pmd pointing to page table
1017 * to pmd pointing to huge page (and back) while interrupts are disabled.
1018 * We clear pmd to possibly replace it with page table pointer in
1019 * different code paths. So make sure we wait for the parallel
Aneesh Kumar K.Vfa4531f2017-07-27 11:54:54 +05301020 * find_current_mm_pte to finish.
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +10001021 */
Aneesh Kumar K.Vfa4531f2017-07-27 11:54:54 +05301022 serialize_against_pte_lookup(mm);
Aneesh Kumar K.Vbde3eb62016-04-29 23:26:30 +10001023 return old_pmd;
1024}
1025
1026int radix__has_transparent_hugepage(void)
1027{
1028 /* For radix 2M at PMD level means thp */
1029 if (mmu_psize_defs[MMU_PAGE_2M].shift == PMD_SHIFT)
1030 return 1;
1031 return 0;
1032}
1033#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
Aneesh Kumar K.V044003b2018-05-29 19:58:39 +05301034
Aneesh Kumar K.Ve4c11122018-05-29 19:58:40 +05301035void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep,
1036 pte_t entry, unsigned long address, int psize)
Aneesh Kumar K.V044003b2018-05-29 19:58:39 +05301037{
Aneesh Kumar K.Ve4c11122018-05-29 19:58:40 +05301038 struct mm_struct *mm = vma->vm_mm;
Aneesh Kumar K.V044003b2018-05-29 19:58:39 +05301039 unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED |
1040 _PAGE_RW | _PAGE_EXEC);
Aneesh Kumar K.Vf08d08f2018-08-22 22:46:05 +05301041
1042 unsigned long change = pte_val(entry) ^ pte_val(*ptep);
Aneesh Kumar K.Vbd5050e2018-05-29 19:58:41 +05301043 /*
1044 * To avoid NMMU hang while relaxing access, we need mark
1045 * the pte invalid in between.
1046 */
Aneesh Kumar K.Vf08d08f2018-08-22 22:46:05 +05301047 if ((change & _PAGE_RW) && atomic_read(&mm->context.copros) > 0) {
Aneesh Kumar K.V044003b2018-05-29 19:58:39 +05301048 unsigned long old_pte, new_pte;
1049
Aneesh Kumar K.Vf08d08f2018-08-22 22:46:05 +05301050 old_pte = __radix_pte_update(ptep, _PAGE_PRESENT, _PAGE_INVALID);
Aneesh Kumar K.V044003b2018-05-29 19:58:39 +05301051 /*
1052 * new value of pte
1053 */
1054 new_pte = old_pte | set;
Aneesh Kumar K.Vbd5050e2018-05-29 19:58:41 +05301055 radix__flush_tlb_page_psize(mm, address, psize);
Aneesh Kumar K.Vf08d08f2018-08-22 22:46:05 +05301056 __radix_pte_update(ptep, _PAGE_INVALID, new_pte);
Aneesh Kumar K.Vbd5050e2018-05-29 19:58:41 +05301057 } else {
Aneesh Kumar K.V044003b2018-05-29 19:58:39 +05301058 __radix_pte_update(ptep, 0, set);
Nicholas Piggine5f7cb52018-06-01 20:01:15 +10001059 /*
1060 * Book3S does not require a TLB flush when relaxing access
1061 * restrictions when the address space is not attached to a
1062 * NMMU, because the core MMU will reload the pte after taking
1063 * an access fault, which is defined by the architectue.
1064 */
Aneesh Kumar K.Vbd5050e2018-05-29 19:58:41 +05301065 }
Nicholas Pigginf1cb8f92018-06-01 20:01:19 +10001066 /* See ptesync comment in radix__set_pte_at */
Aneesh Kumar K.V044003b2018-05-29 19:58:39 +05301067}
Aneesh Kumar K.V5b323362019-03-05 15:46:33 -08001068
1069void radix__ptep_modify_prot_commit(struct vm_area_struct *vma,
1070 unsigned long addr, pte_t *ptep,
1071 pte_t old_pte, pte_t pte)
1072{
1073 struct mm_struct *mm = vma->vm_mm;
1074
1075 /*
1076 * To avoid NMMU hang while relaxing access we need to flush the tlb before
1077 * we set the new value. We need to do this only for radix, because hash
1078 * translation does flush when updating the linux pte.
1079 */
1080 if (is_pte_rw_upgrade(pte_val(old_pte), pte_val(pte)) &&
1081 (atomic_read(&mm->context.copros) > 0))
1082 radix__flush_tlb_page(vma, addr);
1083
1084 set_pte_at(mm, addr, ptep, pte);
1085}