blob: b4221d3727d0a5abe88eb631b39a3ef320877bfe [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Adrian Bunkb00dc832008-05-19 16:52:27 -07002/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 * arch/sparc64/mm/init.c
4 *
5 * Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu)
6 * Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7 */
8
Paul Gortmakercdd4f4c2016-09-19 17:36:29 -04009#include <linux/extable.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/kernel.h>
11#include <linux/sched.h>
12#include <linux/string.h>
13#include <linux/init.h>
Mike Rapoport57c8a662018-10-30 15:09:49 -070014#include <linux/memblock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/mm.h>
16#include <linux/hugetlb.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/initrd.h>
18#include <linux/swap.h>
19#include <linux/pagemap.h>
Randy Dunlapc9cf5522006-06-27 02:53:52 -070020#include <linux/poison.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/fs.h>
22#include <linux/seq_file.h>
Prasanna S Panchamukhi05e14cb2005-09-06 15:19:30 -070023#include <linux/kprobes.h>
David S. Miller1ac4f5e2005-09-21 21:49:32 -070024#include <linux/cache.h>
David S. Miller13edad72005-09-29 17:58:26 -070025#include <linux/sort.h>
bob piccof6d4fb52014-03-03 11:54:42 -050026#include <linux/ioport.h>
David S. Miller5cbc3072007-05-25 15:49:59 -070027#include <linux/percpu.h>
David S. Miller919ee672008-04-23 05:40:25 -070028#include <linux/mmzone.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090029#include <linux/gfp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
31#include <asm/head.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#include <asm/page.h>
33#include <asm/pgalloc.h>
34#include <asm/pgtable.h>
35#include <asm/oplib.h>
36#include <asm/iommu.h>
37#include <asm/io.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080038#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include <asm/mmu_context.h>
40#include <asm/tlbflush.h>
41#include <asm/dma.h>
42#include <asm/starfire.h>
43#include <asm/tlb.h>
44#include <asm/spitfire.h>
45#include <asm/sections.h>
David S. Miller517af332006-02-01 15:55:21 -080046#include <asm/tsb.h>
David S. Miller481295f2006-02-07 21:51:08 -080047#include <asm/hypervisor.h>
David S. Miller372b07b2006-06-21 15:35:28 -070048#include <asm/prom.h>
David S. Miller5cbc3072007-05-25 15:49:59 -070049#include <asm/mdesc.h>
David S. Miller3d5ae6b2008-03-25 21:51:40 -070050#include <asm/cpudata.h>
Sam Ravnborg59dec132014-05-16 23:26:07 +020051#include <asm/setup.h>
David S. Miller4f70f7a2008-08-12 18:33:56 -070052#include <asm/irq.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070053
Sam Ravnborg27137e52008-11-16 20:08:45 -080054#include "init_64.h"
David S. Miller9cc3a1a2006-02-21 20:51:13 -080055
David S. Miller4f93d212012-09-06 18:13:58 -070056unsigned long kern_linear_pte_xor[4] __read_mostly;
Khalid Aziz494e5b62015-05-27 10:00:46 -060057static unsigned long page_cache4v_flag;
David S. Miller9cc3a1a2006-02-21 20:51:13 -080058
David S. Miller4f93d212012-09-06 18:13:58 -070059/* A bitmap, two bits for every 256MB of physical memory. These two
60 * bits determine what page size we use for kernel linear
61 * translations. They form an index into kern_linear_pte_xor[]. The
62 * value in the indexed slot is XOR'd with the TLB miss virtual
63 * address to form the resulting TTE. The mapping is:
64 *
65 * 0 ==> 4MB
66 * 1 ==> 256MB
67 * 2 ==> 2GB
68 * 3 ==> 16GB
69 *
70 * All sun4v chips support 256MB pages. Only SPARC-T4 and later
71 * support 2GB pages, and hopefully future cpus will support the 16GB
72 * pages as well. For slots 2 and 3, we encode a 256MB TTE xor there
73 * if these larger page sizes are not supported by the cpu.
74 *
75 * It would be nice to determine this from the machine description
76 * 'cpu' properties, but we need to have this table setup before the
77 * MDESC is initialized.
David S. Miller9cc3a1a2006-02-21 20:51:13 -080078 */
David S. Miller9cc3a1a2006-02-21 20:51:13 -080079
David S. Millerd1acb422007-03-16 17:20:28 -070080#ifndef CONFIG_DEBUG_PAGEALLOC
David S. Miller4f93d212012-09-06 18:13:58 -070081/* A special kernel TSB for 4MB, 256MB, 2GB and 16GB linear mappings.
82 * Space is allocated for this right after the trap table in
83 * arch/sparc64/kernel/head.S
David S. Miller2d9e2762007-05-29 01:58:31 -070084 */
85extern struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES];
David S. Millerd1acb422007-03-16 17:20:28 -070086#endif
David S. Miller0dd5b7b2014-09-24 20:56:11 -070087extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
David S. Millerd7744a02006-02-21 22:31:11 -080088
David S. Millerce33fdc2012-09-06 19:01:25 -070089static unsigned long cpu_pgsz_mask;
90
David S. Millerd195b712014-09-27 21:30:57 -070091#define MAX_BANKS 1024
David S. Miller10147572005-09-28 21:46:43 -070092
Greg Kroah-Hartman7c9503b2012-12-21 14:03:26 -080093static struct linux_prom64_registers pavail[MAX_BANKS];
94static int pavail_ents;
David S. Miller10147572005-09-28 21:46:43 -070095
Nitin Gupta52708d62015-11-02 16:30:24 -050096u64 numa_latency[MAX_NUMNODES][MAX_NUMNODES];
97
David S. Miller13edad72005-09-29 17:58:26 -070098static int cmp_p64(const void *a, const void *b)
99{
100 const struct linux_prom64_registers *x = a, *y = b;
101
102 if (x->phys_addr > y->phys_addr)
103 return 1;
104 if (x->phys_addr < y->phys_addr)
105 return -1;
106 return 0;
107}
108
109static void __init read_obp_memory(const char *property,
110 struct linux_prom64_registers *regs,
111 int *num_ents)
112{
Andres Salomon8d125562010-10-08 14:18:11 -0700113 phandle node = prom_finddevice("/memory");
David S. Miller13edad72005-09-29 17:58:26 -0700114 int prop_size = prom_getproplen(node, property);
115 int ents, ret, i;
116
117 ents = prop_size / sizeof(struct linux_prom64_registers);
118 if (ents > MAX_BANKS) {
119 prom_printf("The machine has more %s property entries than "
120 "this kernel can support (%d).\n",
121 property, MAX_BANKS);
122 prom_halt();
123 }
124
125 ret = prom_getproperty(node, property, (char *) regs, prop_size);
126 if (ret == -1) {
Akinobu Mita5da444a2012-09-29 03:14:49 +0000127 prom_printf("Couldn't get %s property from /memory.\n",
128 property);
David S. Miller13edad72005-09-29 17:58:26 -0700129 prom_halt();
130 }
131
David S. Miller13edad72005-09-29 17:58:26 -0700132 /* Sanitize what we got from the firmware, by page aligning
133 * everything.
134 */
135 for (i = 0; i < ents; i++) {
136 unsigned long base, size;
137
138 base = regs[i].phys_addr;
139 size = regs[i].reg_size;
140
141 size &= PAGE_MASK;
142 if (base & ~PAGE_MASK) {
143 unsigned long new_base = PAGE_ALIGN(base);
144
145 size -= new_base - base;
146 if ((long) size < 0L)
147 size = 0UL;
148 base = new_base;
149 }
David S. Miller0015d3d2007-03-15 00:06:34 -0700150 if (size == 0UL) {
151 /* If it is empty, simply get rid of it.
152 * This simplifies the logic of the other
153 * functions that process these arrays.
154 */
155 memmove(&regs[i], &regs[i + 1],
156 (ents - i - 1) * sizeof(regs[0]));
157 i--;
158 ents--;
159 continue;
160 }
David S. Miller13edad72005-09-29 17:58:26 -0700161 regs[i].phys_addr = base;
162 regs[i].reg_size = size;
163 }
David S. Miller486ad102006-06-22 00:00:00 -0700164
David S. Miller486ad102006-06-22 00:00:00 -0700165 *num_ents = ents;
166
David S. Millerc9c10832005-10-12 12:22:46 -0700167 sort(regs, ents, sizeof(struct linux_prom64_registers),
David S. Miller13edad72005-09-29 17:58:26 -0700168 cmp_p64, NULL);
169}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170
David S. Millerd1112012006-03-08 02:16:07 -0800171/* Kernel physical address base and size in bytes. */
David S. Miller1ac4f5e2005-09-21 21:49:32 -0700172unsigned long kern_base __read_mostly;
173unsigned long kern_size __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175/* Initial ramdisk setup */
176extern unsigned long sparc_ramdisk_image64;
177extern unsigned int sparc_ramdisk_image;
178extern unsigned int sparc_ramdisk_size;
179
David S. Miller1ac4f5e2005-09-21 21:49:32 -0700180struct page *mem_map_zero __read_mostly;
Aneesh Kumar K.V35802c02008-04-29 08:11:12 -0400181EXPORT_SYMBOL(mem_map_zero);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182
David S. Miller0835ae02005-10-04 15:23:20 -0700183unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly;
184
185unsigned long sparc64_kern_pri_context __read_mostly;
186unsigned long sparc64_kern_pri_nuc_bits __read_mostly;
187unsigned long sparc64_kern_sec_context __read_mostly;
188
David S. Miller64658742008-03-21 17:01:38 -0700189int num_kernel_image_mappings;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191#ifdef CONFIG_DEBUG_DCFLUSH
192atomic_t dcpage_flushes = ATOMIC_INIT(0);
193#ifdef CONFIG_SMP
194atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
195#endif
196#endif
197
David S. Miller7a591cf2006-02-26 19:44:50 -0800198inline void flush_dcache_page_impl(struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199{
David S. Miller7a591cf2006-02-26 19:44:50 -0800200 BUG_ON(tlb_type == hypervisor);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201#ifdef CONFIG_DEBUG_DCFLUSH
202 atomic_inc(&dcpage_flushes);
203#endif
204
205#ifdef DCACHE_ALIASING_POSSIBLE
206 __flush_dcache_page(page_address(page),
207 ((tlb_type == spitfire) &&
Huang Yingcb9f7532018-04-05 16:24:39 -0700208 page_mapping_file(page) != NULL));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209#else
Huang Yingcb9f7532018-04-05 16:24:39 -0700210 if (page_mapping_file(page) != NULL &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211 tlb_type == spitfire)
212 __flush_icache_page(__pa(page_address(page)));
213#endif
214}
215
216#define PG_dcache_dirty PG_arch_1
David S. Miller22adb352007-05-26 01:14:43 -0700217#define PG_dcache_cpu_shift 32UL
218#define PG_dcache_cpu_mask \
219 ((1UL<<ilog2(roundup_pow_of_two(NR_CPUS)))-1UL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220
221#define dcache_dirty_cpu(page) \
David S. Miller48b0e542005-07-27 16:08:44 -0700222 (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223
David S. Millerd979f172007-10-27 00:13:04 -0700224static inline void set_dcache_dirty(struct page *page, int this_cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225{
226 unsigned long mask = this_cpu;
David S. Miller48b0e542005-07-27 16:08:44 -0700227 unsigned long non_cpu_bits;
228
229 non_cpu_bits = ~(PG_dcache_cpu_mask << PG_dcache_cpu_shift);
230 mask = (mask << PG_dcache_cpu_shift) | (1UL << PG_dcache_dirty);
231
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 __asm__ __volatile__("1:\n\t"
233 "ldx [%2], %%g7\n\t"
234 "and %%g7, %1, %%g1\n\t"
235 "or %%g1, %0, %%g1\n\t"
236 "casx [%2], %%g7, %%g1\n\t"
237 "cmp %%g7, %%g1\n\t"
238 "bne,pn %%xcc, 1b\n\t"
David S. Millerb445e262005-06-27 15:42:04 -0700239 " nop"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240 : /* no outputs */
241 : "r" (mask), "r" (non_cpu_bits), "r" (&page->flags)
242 : "g1", "g7");
243}
244
David S. Millerd979f172007-10-27 00:13:04 -0700245static inline void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246{
247 unsigned long mask = (1UL << PG_dcache_dirty);
248
249 __asm__ __volatile__("! test_and_clear_dcache_dirty\n"
250 "1:\n\t"
251 "ldx [%2], %%g7\n\t"
David S. Miller48b0e542005-07-27 16:08:44 -0700252 "srlx %%g7, %4, %%g1\n\t"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253 "and %%g1, %3, %%g1\n\t"
254 "cmp %%g1, %0\n\t"
255 "bne,pn %%icc, 2f\n\t"
256 " andn %%g7, %1, %%g1\n\t"
257 "casx [%2], %%g7, %%g1\n\t"
258 "cmp %%g7, %%g1\n\t"
259 "bne,pn %%xcc, 1b\n\t"
David S. Millerb445e262005-06-27 15:42:04 -0700260 " nop\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261 "2:"
262 : /* no outputs */
263 : "r" (cpu), "r" (mask), "r" (&page->flags),
David S. Miller48b0e542005-07-27 16:08:44 -0700264 "i" (PG_dcache_cpu_mask),
265 "i" (PG_dcache_cpu_shift)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266 : "g1", "g7");
267}
268
David S. Miller517af332006-02-01 15:55:21 -0800269static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long pte)
270{
271 unsigned long tsb_addr = (unsigned long) ent;
272
David S. Miller3b3ab2e2006-02-17 09:54:42 -0800273 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
David S. Miller517af332006-02-01 15:55:21 -0800274 tsb_addr = __pa(tsb_addr);
275
276 __tsb_insert(tsb_addr, tag, pte);
277}
278
David S. Millerc4bce902006-02-11 21:57:54 -0800279unsigned long _PAGE_ALL_SZ_BITS __read_mostly;
David S. Millerc4bce902006-02-11 21:57:54 -0800280
Sam Ravnborgff9aefb2009-01-06 12:51:26 -0800281static void flush_dcache(unsigned long pfn)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282{
Sam Ravnborgff9aefb2009-01-06 12:51:26 -0800283 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284
Sam Ravnborgff9aefb2009-01-06 12:51:26 -0800285 page = pfn_to_page(pfn);
David S. Miller1a78ced2009-10-12 03:20:57 -0700286 if (page) {
David S. Miller7a591cf2006-02-26 19:44:50 -0800287 unsigned long pg_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288
Sam Ravnborgff9aefb2009-01-06 12:51:26 -0800289 pg_flags = page->flags;
290 if (pg_flags & (1UL << PG_dcache_dirty)) {
David S. Miller7a591cf2006-02-26 19:44:50 -0800291 int cpu = ((pg_flags >> PG_dcache_cpu_shift) &
292 PG_dcache_cpu_mask);
293 int this_cpu = get_cpu();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294
David S. Miller7a591cf2006-02-26 19:44:50 -0800295 /* This is just to optimize away some function calls
296 * in the SMP case.
297 */
298 if (cpu == this_cpu)
299 flush_dcache_page_impl(page);
300 else
301 smp_flush_dcache_page_impl(page, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302
David S. Miller7a591cf2006-02-26 19:44:50 -0800303 clear_dcache_dirty_cpu(page, cpu);
304
305 put_cpu();
306 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307 }
Sam Ravnborgff9aefb2009-01-06 12:51:26 -0800308}
309
David Miller9e695d22012-10-08 16:34:29 -0700310/* mm->context.lock must be held */
311static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_index,
312 unsigned long tsb_hash_shift, unsigned long address,
313 unsigned long tte)
314{
315 struct tsb *tsb = mm->context.tsb_block[tsb_index].tsb;
316 unsigned long tag;
317
David S. Millerbcd896b2013-02-19 13:20:08 -0800318 if (unlikely(!tsb))
319 return;
320
David Miller9e695d22012-10-08 16:34:29 -0700321 tsb += ((address >> tsb_hash_shift) &
322 (mm->context.tsb_block[tsb_index].tsb_nentries - 1UL));
323 tag = (address >> 22UL);
324 tsb_insert(tsb, tag, tte);
325}
326
Nitin Guptac7d9f772017-02-01 16:16:36 -0800327#ifdef CONFIG_HUGETLB_PAGE
Nitin Gupta8399e4b2017-07-19 17:12:54 -0700328static void __init add_huge_page_size(unsigned long size)
329{
330 unsigned int order;
331
332 if (size_to_hstate(size))
333 return;
334
335 order = ilog2(size) - PAGE_SHIFT;
336 hugetlb_add_hstate(order);
337}
338
339static int __init hugetlbpage_init(void)
340{
341 add_huge_page_size(1UL << HPAGE_64K_SHIFT);
342 add_huge_page_size(1UL << HPAGE_SHIFT);
343 add_huge_page_size(1UL << HPAGE_256MB_SHIFT);
344 add_huge_page_size(1UL << HPAGE_2GB_SHIFT);
345
346 return 0;
347}
348
349arch_initcall(hugetlbpage_init);
350
Nitin Guptadf7b2152017-08-11 16:46:50 -0700351static void __init pud_huge_patch(void)
352{
353 struct pud_huge_patch_entry *p;
354 unsigned long addr;
355
356 p = &__pud_huge_patch;
357 addr = p->addr;
358 *(unsigned int *)addr = p->insn;
359
360 __asm__ __volatile__("flush %0" : : "r" (addr));
361}
362
Nitin Guptac7d9f772017-02-01 16:16:36 -0800363static int __init setup_hugepagesz(char *string)
364{
365 unsigned long long hugepage_size;
366 unsigned int hugepage_shift;
367 unsigned short hv_pgsz_idx;
368 unsigned int hv_pgsz_mask;
369 int rc = 0;
370
371 hugepage_size = memparse(string, &string);
372 hugepage_shift = ilog2(hugepage_size);
373
374 switch (hugepage_shift) {
Nitin Guptadf7b2152017-08-11 16:46:50 -0700375 case HPAGE_16GB_SHIFT:
376 hv_pgsz_mask = HV_PGSZ_MASK_16GB;
377 hv_pgsz_idx = HV_PGSZ_IDX_16GB;
378 pud_huge_patch();
379 break;
Nitin Gupta85b1da72017-03-09 14:22:23 -0800380 case HPAGE_2GB_SHIFT:
381 hv_pgsz_mask = HV_PGSZ_MASK_2GB;
382 hv_pgsz_idx = HV_PGSZ_IDX_2GB;
383 break;
Nitin Guptac7d9f772017-02-01 16:16:36 -0800384 case HPAGE_256MB_SHIFT:
385 hv_pgsz_mask = HV_PGSZ_MASK_256MB;
386 hv_pgsz_idx = HV_PGSZ_IDX_256MB;
387 break;
388 case HPAGE_SHIFT:
389 hv_pgsz_mask = HV_PGSZ_MASK_4MB;
390 hv_pgsz_idx = HV_PGSZ_IDX_4MB;
391 break;
Nitin Guptadcd19122017-02-06 12:33:26 -0800392 case HPAGE_64K_SHIFT:
393 hv_pgsz_mask = HV_PGSZ_MASK_64K;
394 hv_pgsz_idx = HV_PGSZ_IDX_64K;
395 break;
Nitin Guptac7d9f772017-02-01 16:16:36 -0800396 default:
397 hv_pgsz_mask = 0;
398 }
399
400 if ((hv_pgsz_mask & cpu_pgsz_mask) == 0U) {
Liam R. Howlettf3229802017-05-30 15:45:00 -0400401 hugetlb_bad_size();
402 pr_err("hugepagesz=%llu not supported by MMU.\n",
Nitin Guptac7d9f772017-02-01 16:16:36 -0800403 hugepage_size);
404 goto out;
405 }
406
Nitin Gupta8399e4b2017-07-19 17:12:54 -0700407 add_huge_page_size(hugepage_size);
Nitin Guptac7d9f772017-02-01 16:16:36 -0800408 rc = 1;
409
410out:
411 return rc;
412}
413__setup("hugepagesz=", setup_hugepagesz);
414#endif /* CONFIG_HUGETLB_PAGE */
415
Russell King4b3073e2009-12-18 16:40:18 +0000416void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
Sam Ravnborgff9aefb2009-01-06 12:51:26 -0800417{
418 struct mm_struct *mm;
David S. Millerbcd896b2013-02-19 13:20:08 -0800419 unsigned long flags;
Nitin Guptadf7b2152017-08-11 16:46:50 -0700420 bool is_huge_tsb;
Russell King4b3073e2009-12-18 16:40:18 +0000421 pte_t pte = *ptep;
Sam Ravnborgff9aefb2009-01-06 12:51:26 -0800422
423 if (tlb_type != hypervisor) {
424 unsigned long pfn = pte_pfn(pte);
425
426 if (pfn_valid(pfn))
427 flush_dcache(pfn);
428 }
David S. Millerbd407912006-01-31 18:31:38 -0800429
430 mm = vma->vm_mm;
David S. Miller7a1ac522006-03-16 02:02:32 -0800431
David S. Miller18f38132014-08-04 16:34:01 -0700432 /* Don't insert a non-valid PTE into the TSB, we'll deadlock. */
433 if (!pte_accessible(mm, pte))
434 return;
435
David S. Miller7a1ac522006-03-16 02:02:32 -0800436 spin_lock_irqsave(&mm->context.lock, flags);
437
Nitin Guptadf7b2152017-08-11 16:46:50 -0700438 is_huge_tsb = false;
David Miller9e695d22012-10-08 16:34:29 -0700439#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
Nitin Guptadf7b2152017-08-11 16:46:50 -0700440 if (mm->context.hugetlb_pte_count || mm->context.thp_pte_count) {
441 unsigned long hugepage_size = PAGE_SIZE;
442
443 if (is_vm_hugetlb_page(vma))
444 hugepage_size = huge_page_size(hstate_vma(vma));
445
446 if (hugepage_size >= PUD_SIZE) {
447 unsigned long mask = 0x1ffc00000UL;
448
449 /* Transfer bits [32:22] from address to resolve
450 * at 4M granularity.
451 */
452 pte_val(pte) &= ~mask;
453 pte_val(pte) |= (address & mask);
454 } else if (hugepage_size >= PMD_SIZE) {
455 /* We are fabricating 8MB pages using 4MB
456 * real hw pages.
457 */
458 pte_val(pte) |= (address & (1UL << REAL_HPAGE_SHIFT));
459 }
460
461 if (hugepage_size >= PMD_SIZE) {
462 __update_mmu_tsb_insert(mm, MM_TSB_HUGE,
463 REAL_HPAGE_SHIFT, address, pte_val(pte));
464 is_huge_tsb = true;
465 }
466 }
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800467#endif
Nitin Guptadf7b2152017-08-11 16:46:50 -0700468 if (!is_huge_tsb)
David S. Millerbcd896b2013-02-19 13:20:08 -0800469 __update_mmu_tsb_insert(mm, MM_TSB_BASE, PAGE_SHIFT,
470 address, pte_val(pte));
David S. Miller7a1ac522006-03-16 02:02:32 -0800471
472 spin_unlock_irqrestore(&mm->context.lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473}
474
475void flush_dcache_page(struct page *page)
476{
David S. Millera9546f52005-04-17 18:03:09 -0700477 struct address_space *mapping;
478 int this_cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479
David S. Miller7a591cf2006-02-26 19:44:50 -0800480 if (tlb_type == hypervisor)
481 return;
482
David S. Millera9546f52005-04-17 18:03:09 -0700483 /* Do not bother with the expensive D-cache flush if it
484 * is merely the zero page. The 'bigcore' testcase in GDB
485 * causes this case to run millions of times.
486 */
487 if (page == ZERO_PAGE(0))
488 return;
489
490 this_cpu = get_cpu();
491
Huang Yingcb9f7532018-04-05 16:24:39 -0700492 mapping = page_mapping_file(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493 if (mapping && !mapping_mapped(mapping)) {
David S. Millera9546f52005-04-17 18:03:09 -0700494 int dirty = test_bit(PG_dcache_dirty, &page->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495 if (dirty) {
David S. Millera9546f52005-04-17 18:03:09 -0700496 int dirty_cpu = dcache_dirty_cpu(page);
497
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498 if (dirty_cpu == this_cpu)
499 goto out;
500 smp_flush_dcache_page_impl(page, dirty_cpu);
501 }
502 set_dcache_dirty(page, this_cpu);
503 } else {
504 /* We could delay the flush for the !page_mapping
505 * case too. But that case is for exec env/arg
506 * pages and those are %99 certainly going to get
507 * faulted into the tlb (and thus flushed) anyways.
508 */
509 flush_dcache_page_impl(page);
510 }
511
512out:
513 put_cpu();
514}
Sam Ravnborg917c3662009-01-08 16:58:20 -0800515EXPORT_SYMBOL(flush_dcache_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516
Prasanna S Panchamukhi05e14cb2005-09-06 15:19:30 -0700517void __kprobes flush_icache_range(unsigned long start, unsigned long end)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518{
David S. Millera43fe0e2006-02-04 03:10:53 -0800519 /* Cheetah and Hypervisor platform cpus have coherent I-cache. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520 if (tlb_type == spitfire) {
521 unsigned long kaddr;
522
David S. Millera94aa252007-03-15 15:50:11 -0700523 /* This code only runs on Spitfire cpus so this is
524 * why we can assume _PAGE_PADDR_4U.
525 */
526 for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE) {
527 unsigned long paddr, mask = _PAGE_PADDR_4U;
528
529 if (kaddr >= PAGE_OFFSET)
530 paddr = kaddr & mask;
531 else {
532 pgd_t *pgdp = pgd_offset_k(kaddr);
533 pud_t *pudp = pud_offset(pgdp, kaddr);
534 pmd_t *pmdp = pmd_offset(pudp, kaddr);
535 pte_t *ptep = pte_offset_kernel(pmdp, kaddr);
536
537 paddr = pte_val(*ptep) & mask;
538 }
539 __flush_icache_page(paddr);
540 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541 }
542}
Sam Ravnborg917c3662009-01-08 16:58:20 -0800543EXPORT_SYMBOL(flush_icache_range);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545void mmu_info(struct seq_file *m)
546{
David S. Millerce33fdc2012-09-06 19:01:25 -0700547 static const char *pgsz_strings[] = {
548 "8K", "64K", "512K", "4MB", "32MB",
549 "256MB", "2GB", "16GB",
550 };
551 int i, printed;
552
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553 if (tlb_type == cheetah)
554 seq_printf(m, "MMU Type\t: Cheetah\n");
555 else if (tlb_type == cheetah_plus)
556 seq_printf(m, "MMU Type\t: Cheetah+\n");
557 else if (tlb_type == spitfire)
558 seq_printf(m, "MMU Type\t: Spitfire\n");
David S. Millera43fe0e2006-02-04 03:10:53 -0800559 else if (tlb_type == hypervisor)
560 seq_printf(m, "MMU Type\t: Hypervisor (sun4v)\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561 else
562 seq_printf(m, "MMU Type\t: ???\n");
563
David S. Millerce33fdc2012-09-06 19:01:25 -0700564 seq_printf(m, "MMU PGSZs\t: ");
565 printed = 0;
566 for (i = 0; i < ARRAY_SIZE(pgsz_strings); i++) {
567 if (cpu_pgsz_mask & (1UL << i)) {
568 seq_printf(m, "%s%s",
569 printed ? "," : "", pgsz_strings[i]);
570 printed++;
571 }
572 }
573 seq_putc(m, '\n');
574
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575#ifdef CONFIG_DEBUG_DCFLUSH
576 seq_printf(m, "DCPageFlushes\t: %d\n",
577 atomic_read(&dcpage_flushes));
578#ifdef CONFIG_SMP
579 seq_printf(m, "DCPageFlushesXC\t: %d\n",
580 atomic_read(&dcpage_flushes_xcall));
581#endif /* CONFIG_SMP */
582#endif /* CONFIG_DEBUG_DCFLUSH */
583}
584
David S. Millera94aa252007-03-15 15:50:11 -0700585struct linux_prom_translation prom_trans[512] __read_mostly;
586unsigned int prom_trans_ents __read_mostly;
587
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588unsigned long kern_locked_tte_data;
589
David S. Miller405599b2005-09-22 00:12:35 -0700590/* The obp translations are saved based on 8k pagesize, since obp can
591 * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS ->
David S. Miller74bf4312006-01-31 18:29:18 -0800592 * HI_OBP_ADDRESS range are handled in ktlb.S.
David S. Miller405599b2005-09-22 00:12:35 -0700593 */
David S. Miller5085b4a2005-09-22 00:45:41 -0700594static inline int in_obp_range(unsigned long vaddr)
595{
596 return (vaddr >= LOW_OBP_ADDRESS &&
597 vaddr < HI_OBP_ADDRESS);
598}
599
David S. Millerc9c10832005-10-12 12:22:46 -0700600static int cmp_ptrans(const void *a, const void *b)
David S. Miller405599b2005-09-22 00:12:35 -0700601{
David S. Millerc9c10832005-10-12 12:22:46 -0700602 const struct linux_prom_translation *x = a, *y = b;
David S. Miller405599b2005-09-22 00:12:35 -0700603
David S. Millerc9c10832005-10-12 12:22:46 -0700604 if (x->virt > y->virt)
605 return 1;
606 if (x->virt < y->virt)
607 return -1;
608 return 0;
David S. Miller405599b2005-09-22 00:12:35 -0700609}
610
David S. Millerc9c10832005-10-12 12:22:46 -0700611/* Read OBP translations property into 'prom_trans[]'. */
David S. Miller9ad98c52005-10-05 15:12:00 -0700612static void __init read_obp_translations(void)
David S. Miller405599b2005-09-22 00:12:35 -0700613{
David S. Millerc9c10832005-10-12 12:22:46 -0700614 int n, node, ents, first, last, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615
616 node = prom_finddevice("/virtual-memory");
617 n = prom_getproplen(node, "translations");
David S. Miller405599b2005-09-22 00:12:35 -0700618 if (unlikely(n == 0 || n == -1)) {
David S. Millerb206fc42005-09-21 22:31:13 -0700619 prom_printf("prom_mappings: Couldn't get size.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620 prom_halt();
621 }
David S. Miller405599b2005-09-22 00:12:35 -0700622 if (unlikely(n > sizeof(prom_trans))) {
Akinobu Mita5da444a2012-09-29 03:14:49 +0000623 prom_printf("prom_mappings: Size %d is too big.\n", n);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624 prom_halt();
625 }
David S. Miller405599b2005-09-22 00:12:35 -0700626
David S. Millerb206fc42005-09-21 22:31:13 -0700627 if ((n = prom_getproperty(node, "translations",
David S. Miller405599b2005-09-22 00:12:35 -0700628 (char *)&prom_trans[0],
629 sizeof(prom_trans))) == -1) {
David S. Millerb206fc42005-09-21 22:31:13 -0700630 prom_printf("prom_mappings: Couldn't get property.\n");
631 prom_halt();
632 }
David S. Miller9ad98c52005-10-05 15:12:00 -0700633
David S. Millerb206fc42005-09-21 22:31:13 -0700634 n = n / sizeof(struct linux_prom_translation);
David S. Miller9ad98c52005-10-05 15:12:00 -0700635
David S. Millerc9c10832005-10-12 12:22:46 -0700636 ents = n;
637
638 sort(prom_trans, ents, sizeof(struct linux_prom_translation),
639 cmp_ptrans, NULL);
640
641 /* Now kick out all the non-OBP entries. */
642 for (i = 0; i < ents; i++) {
643 if (in_obp_range(prom_trans[i].virt))
644 break;
645 }
646 first = i;
647 for (; i < ents; i++) {
648 if (!in_obp_range(prom_trans[i].virt))
649 break;
650 }
651 last = i;
652
653 for (i = 0; i < (last - first); i++) {
654 struct linux_prom_translation *src = &prom_trans[i + first];
655 struct linux_prom_translation *dest = &prom_trans[i];
656
657 *dest = *src;
658 }
659 for (; i < ents; i++) {
660 struct linux_prom_translation *dest = &prom_trans[i];
661 dest->virt = dest->size = dest->data = 0x0UL;
662 }
663
664 prom_trans_ents = last - first;
665
666 if (tlb_type == spitfire) {
667 /* Clear diag TTE bits. */
668 for (i = 0; i < prom_trans_ents; i++)
669 prom_trans[i].data &= ~0x0003fe0000000000UL;
670 }
David S. Millerf4142cb2011-09-29 12:18:59 -0700671
672 /* Force execute bit on. */
673 for (i = 0; i < prom_trans_ents; i++)
674 prom_trans[i].data |= (tlb_type == hypervisor ?
675 _PAGE_EXEC_4V : _PAGE_EXEC_4U);
David S. Miller405599b2005-09-22 00:12:35 -0700676}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677
David S. Millerd82ace72006-02-09 02:52:44 -0800678static void __init hypervisor_tlb_lock(unsigned long vaddr,
679 unsigned long pte,
680 unsigned long mmu)
681{
David S. Miller7db35f32007-05-29 02:22:14 -0700682 unsigned long ret = sun4v_mmu_map_perm_addr(vaddr, 0, pte, mmu);
David S. Millerd82ace72006-02-09 02:52:44 -0800683
David S. Miller7db35f32007-05-29 02:22:14 -0700684 if (ret != 0) {
Akinobu Mita5da444a2012-09-29 03:14:49 +0000685 prom_printf("hypervisor_tlb_lock[%lx:%x:%lx:%lx]: "
David S. Miller7db35f32007-05-29 02:22:14 -0700686 "errors with %lx\n", vaddr, 0, pte, mmu, ret);
David S. Miller12e126a2006-02-17 14:40:30 -0800687 prom_halt();
688 }
David S. Millerd82ace72006-02-09 02:52:44 -0800689}
690
David S. Millerc4bce902006-02-11 21:57:54 -0800691static unsigned long kern_large_tte(unsigned long paddr);
692
David S. Miller898cf0e2005-09-23 11:59:44 -0700693static void __init remap_kernel(void)
David S. Miller405599b2005-09-22 00:12:35 -0700694{
695 unsigned long phys_page, tte_vaddr, tte_data;
David S. Miller64658742008-03-21 17:01:38 -0700696 int i, tlb_ent = sparc64_highest_locked_tlbent();
David S. Miller405599b2005-09-22 00:12:35 -0700697
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698 tte_vaddr = (unsigned long) KERNBASE;
David S. Miller0eef3312014-05-03 22:52:50 -0700699 phys_page = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB;
David S. Millerc4bce902006-02-11 21:57:54 -0800700 tte_data = kern_large_tte(phys_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701
702 kern_locked_tte_data = tte_data;
703
David S. Millerd82ace72006-02-09 02:52:44 -0800704 /* Now lock us into the TLBs via Hypervisor or OBP. */
705 if (tlb_type == hypervisor) {
David S. Miller64658742008-03-21 17:01:38 -0700706 for (i = 0; i < num_kernel_image_mappings; i++) {
David S. Millerd82ace72006-02-09 02:52:44 -0800707 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU);
708 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU);
David S. Miller64658742008-03-21 17:01:38 -0700709 tte_vaddr += 0x400000;
710 tte_data += 0x400000;
David S. Millerd82ace72006-02-09 02:52:44 -0800711 }
712 } else {
David S. Miller64658742008-03-21 17:01:38 -0700713 for (i = 0; i < num_kernel_image_mappings; i++) {
714 prom_dtlb_load(tlb_ent - i, tte_data, tte_vaddr);
715 prom_itlb_load(tlb_ent - i, tte_data, tte_vaddr);
716 tte_vaddr += 0x400000;
717 tte_data += 0x400000;
David S. Millerd82ace72006-02-09 02:52:44 -0800718 }
David S. Miller64658742008-03-21 17:01:38 -0700719 sparc64_highest_unlocked_tlb_ent = tlb_ent - i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720 }
David S. Miller0835ae02005-10-04 15:23:20 -0700721 if (tlb_type == cheetah_plus) {
722 sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 |
723 CTX_CHEETAH_PLUS_NUC);
724 sparc64_kern_pri_nuc_bits = CTX_CHEETAH_PLUS_NUC;
725 sparc64_kern_sec_context = CTX_CHEETAH_PLUS_CTX0;
726 }
David S. Miller405599b2005-09-22 00:12:35 -0700727}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728
David S. Miller405599b2005-09-22 00:12:35 -0700729
David S. Millerc9c10832005-10-12 12:22:46 -0700730static void __init inherit_prom_mappings(void)
David S. Miller9ad98c52005-10-05 15:12:00 -0700731{
David S. Miller405599b2005-09-22 00:12:35 -0700732 /* Now fixup OBP's idea about where we really are mapped. */
David S. Miller3c62a2d2008-02-17 23:22:50 -0800733 printk("Remapping the kernel... ");
David S. Miller405599b2005-09-22 00:12:35 -0700734 remap_kernel();
David S. Miller3c62a2d2008-02-17 23:22:50 -0800735 printk("done.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736}
737
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738void prom_world(int enter)
739{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740 if (!enter)
Al Virodff933d2012-09-26 01:21:14 -0400741 set_fs(get_fs());
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742
David S. Miller3487d1d2006-01-31 18:33:25 -0800743 __asm__ __volatile__("flushw");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744}
745
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746void __flush_dcache_range(unsigned long start, unsigned long end)
747{
748 unsigned long va;
749
750 if (tlb_type == spitfire) {
751 int n = 0;
752
753 for (va = start; va < end; va += 32) {
754 spitfire_put_dcache_tag(va & 0x3fe0, 0x0);
755 if (++n >= 512)
756 break;
757 }
David S. Millera43fe0e2006-02-04 03:10:53 -0800758 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759 start = __pa(start);
760 end = __pa(end);
761 for (va = start; va < end; va += 32)
762 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
763 "membar #Sync"
764 : /* no outputs */
765 : "r" (va),
766 "i" (ASI_DCACHE_INVALIDATE));
767 }
768}
Sam Ravnborg917c3662009-01-08 16:58:20 -0800769EXPORT_SYMBOL(__flush_dcache_range);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770
David S. Miller85f1e1f2007-03-15 17:51:26 -0700771/* get_new_mmu_context() uses "cache + 1". */
772DEFINE_SPINLOCK(ctx_alloc_lock);
Pavel Tatashinc4415232017-05-31 11:25:22 -0400773unsigned long tlb_context_cache = CTX_FIRST_VERSION;
David S. Miller85f1e1f2007-03-15 17:51:26 -0700774#define MAX_CTX_NR (1UL << CTX_NR_BITS)
775#define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR)
776DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR);
Pavel Tatashin7a5b4bb2017-05-31 11:25:23 -0400777DEFINE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm) = {0};
David S. Miller85f1e1f2007-03-15 17:51:26 -0700778
Pavel Tatashina0582f22017-05-31 11:25:24 -0400779static void mmu_context_wrap(void)
780{
781 unsigned long old_ver = tlb_context_cache & CTX_VERSION_MASK;
782 unsigned long new_ver, new_ctx, old_ctx;
783 struct mm_struct *mm;
784 int cpu;
785
786 bitmap_zero(mmu_context_bmap, 1 << CTX_NR_BITS);
787
788 /* Reserve kernel context */
789 set_bit(0, mmu_context_bmap);
790
791 new_ver = (tlb_context_cache & CTX_VERSION_MASK) + CTX_FIRST_VERSION;
792 if (unlikely(new_ver == 0))
793 new_ver = CTX_FIRST_VERSION;
794 tlb_context_cache = new_ver;
795
796 /*
797 * Make sure that any new mm that are added into per_cpu_secondary_mm,
798 * are going to go through get_new_mmu_context() path.
799 */
800 mb();
801
802 /*
803 * Updated versions to current on those CPUs that had valid secondary
804 * contexts
805 */
806 for_each_online_cpu(cpu) {
807 /*
808 * If a new mm is stored after we took this mm from the array,
809 * it will go into get_new_mmu_context() path, because we
810 * already bumped the version in tlb_context_cache.
811 */
812 mm = per_cpu(per_cpu_secondary_mm, cpu);
813
814 if (unlikely(!mm || mm == &init_mm))
815 continue;
816
817 old_ctx = mm->context.sparc64_ctx_val;
818 if (likely((old_ctx & CTX_VERSION_MASK) == old_ver)) {
819 new_ctx = (old_ctx & ~CTX_VERSION_MASK) | new_ver;
820 set_bit(new_ctx & CTX_NR_MASK, mmu_context_bmap);
821 mm->context.sparc64_ctx_val = new_ctx;
822 }
823 }
824}
825
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826/* Caller does TLB context flushing on local CPU if necessary.
827 * The caller also ensures that CTX_VALID(mm->context) is false.
828 *
829 * We must be careful about boundary cases so that we never
830 * let the user have CTX 0 (nucleus) or we ever use a CTX
831 * version of zero (and thus NO_CONTEXT would not be caught
832 * by version mis-match tests in mmu_context.h).
David S. Millera0663a72006-02-23 14:19:28 -0800833 *
834 * Always invoked with interrupts disabled.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700835 */
836void get_new_mmu_context(struct mm_struct *mm)
837{
838 unsigned long ctx, new_ctx;
839 unsigned long orig_pgsz_bits;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840
Kirill Tkhai07df8412013-04-09 00:29:46 +0400841 spin_lock(&ctx_alloc_lock);
Pavel Tatashina0582f22017-05-31 11:25:24 -0400842retry:
843 /* wrap might have happened, test again if our context became valid */
844 if (unlikely(CTX_VALID(mm->context)))
845 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846 orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
847 ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
848 new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
849 if (new_ctx >= (1 << CTX_NR_BITS)) {
850 new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
851 if (new_ctx >= ctx) {
Pavel Tatashina0582f22017-05-31 11:25:24 -0400852 mmu_context_wrap();
853 goto retry;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854 }
855 }
Pavel Tatashin58897482017-05-31 11:25:20 -0400856 if (mm->context.sparc64_ctx_val)
857 cpumask_clear(mm_cpumask(mm));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858 mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
859 new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860 tlb_context_cache = new_ctx;
861 mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
Pavel Tatashina0582f22017-05-31 11:25:24 -0400862out:
Kirill Tkhai07df8412013-04-09 00:29:46 +0400863 spin_unlock(&ctx_alloc_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864}
865
David S. Miller919ee672008-04-23 05:40:25 -0700866static int numa_enabled = 1;
867static int numa_debug;
868
869static int __init early_numa(char *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700870{
David S. Miller919ee672008-04-23 05:40:25 -0700871 if (!p)
872 return 0;
David S. Millerd1112012006-03-08 02:16:07 -0800873
David S. Miller919ee672008-04-23 05:40:25 -0700874 if (strstr(p, "off"))
875 numa_enabled = 0;
David S. Millerd1112012006-03-08 02:16:07 -0800876
David S. Miller919ee672008-04-23 05:40:25 -0700877 if (strstr(p, "debug"))
878 numa_debug = 1;
879
880 return 0;
David S. Millerd1112012006-03-08 02:16:07 -0800881}
David S. Miller919ee672008-04-23 05:40:25 -0700882early_param("numa", early_numa);
883
884#define numadbg(f, a...) \
885do { if (numa_debug) \
886 printk(KERN_INFO f, ## a); \
887} while (0)
David S. Millerd1112012006-03-08 02:16:07 -0800888
David S. Miller4e82c9a2008-02-13 18:00:03 -0800889static void __init find_ramdisk(unsigned long phys_base)
890{
891#ifdef CONFIG_BLK_DEV_INITRD
892 if (sparc_ramdisk_image || sparc_ramdisk_image64) {
893 unsigned long ramdisk_image;
894
895 /* Older versions of the bootloader only supported a
896 * 32-bit physical address for the ramdisk image
897 * location, stored at sparc_ramdisk_image. Newer
898 * SILO versions set sparc_ramdisk_image to zero and
899 * provide a full 64-bit physical address at
900 * sparc_ramdisk_image64.
901 */
902 ramdisk_image = sparc_ramdisk_image;
903 if (!ramdisk_image)
904 ramdisk_image = sparc_ramdisk_image64;
905
906 /* Another bootloader quirk. The bootloader normalizes
907 * the physical address to KERNBASE, so we have to
908 * factor that back out and add in the lowest valid
909 * physical page address to get the true physical address.
910 */
911 ramdisk_image -= KERNBASE;
912 ramdisk_image += phys_base;
913
David S. Miller919ee672008-04-23 05:40:25 -0700914 numadbg("Found ramdisk at physical address 0x%lx, size %u\n",
915 ramdisk_image, sparc_ramdisk_size);
916
David S. Miller4e82c9a2008-02-13 18:00:03 -0800917 initrd_start = ramdisk_image;
918 initrd_end = ramdisk_image + sparc_ramdisk_size;
David S. Miller3b2a7e22008-02-13 18:13:20 -0800919
Yinghai Lu95f72d12010-07-12 14:36:09 +1000920 memblock_reserve(initrd_start, sparc_ramdisk_size);
David S. Millerd45100f2008-05-06 15:19:54 -0700921
922 initrd_start += PAGE_OFFSET;
923 initrd_end += PAGE_OFFSET;
David S. Miller4e82c9a2008-02-13 18:00:03 -0800924 }
925#endif
926}
927
David S. Miller919ee672008-04-23 05:40:25 -0700928struct node_mem_mask {
929 unsigned long mask;
Pavel Tatashin1537b262017-02-16 15:05:58 -0500930 unsigned long match;
David S. Miller919ee672008-04-23 05:40:25 -0700931};
932static struct node_mem_mask node_masks[MAX_NUMNODES];
933static int num_node_masks;
934
Sam Ravnborg48d37212014-05-16 23:26:12 +0200935#ifdef CONFIG_NEED_MULTIPLE_NODES
936
Pavel Tatashin1537b262017-02-16 15:05:58 -0500937struct mdesc_mlgroup {
938 u64 node;
939 u64 latency;
940 u64 match;
941 u64 mask;
942};
943
944static struct mdesc_mlgroup *mlgroups;
945static int num_mlgroups;
946
David S. Miller919ee672008-04-23 05:40:25 -0700947int numa_cpu_lookup_table[NR_CPUS];
948cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES];
949
David S. Miller919ee672008-04-23 05:40:25 -0700950struct mdesc_mblock {
951 u64 base;
952 u64 size;
953 u64 offset; /* RA-to-PA */
954};
955static struct mdesc_mblock *mblocks;
956static int num_mblocks;
957
Pavel Tatashin1537b262017-02-16 15:05:58 -0500958static struct mdesc_mblock * __init addr_to_mblock(unsigned long addr)
David S. Millerd1112012006-03-08 02:16:07 -0800959{
Pavel Tatashin1537b262017-02-16 15:05:58 -0500960 struct mdesc_mblock *m = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961 int i;
962
David S. Miller919ee672008-04-23 05:40:25 -0700963 for (i = 0; i < num_mblocks; i++) {
Pavel Tatashin1537b262017-02-16 15:05:58 -0500964 m = &mblocks[i];
David S. Miller6fc5bae2006-12-28 21:00:23 -0800965
David S. Miller919ee672008-04-23 05:40:25 -0700966 if (addr >= m->base &&
967 addr < (m->base + m->size)) {
David S. Miller919ee672008-04-23 05:40:25 -0700968 break;
969 }
970 }
Pavel Tatashin1537b262017-02-16 15:05:58 -0500971
972 return m;
David S. Miller919ee672008-04-23 05:40:25 -0700973}
974
Pavel Tatashin1537b262017-02-16 15:05:58 -0500975static u64 __init memblock_nid_range_sun4u(u64 start, u64 end, int *nid)
David S. Miller919ee672008-04-23 05:40:25 -0700976{
Pavel Tatashin1537b262017-02-16 15:05:58 -0500977 int prev_nid, new_nid;
David S. Miller919ee672008-04-23 05:40:25 -0700978
Pavel Tatashin1537b262017-02-16 15:05:58 -0500979 prev_nid = -1;
980 for ( ; start < end; start += PAGE_SIZE) {
981 for (new_nid = 0; new_nid < num_node_masks; new_nid++) {
982 struct node_mem_mask *p = &node_masks[new_nid];
David S. Miller919ee672008-04-23 05:40:25 -0700983
Pavel Tatashin1537b262017-02-16 15:05:58 -0500984 if ((start & p->mask) == p->match) {
985 if (prev_nid == -1)
986 prev_nid = new_nid;
987 break;
988 }
Thomas Tai74a5ed52016-11-03 09:19:01 -0700989 }
Thomas Tai74a5ed52016-11-03 09:19:01 -0700990
Pavel Tatashin1537b262017-02-16 15:05:58 -0500991 if (new_nid == num_node_masks) {
992 prev_nid = 0;
993 WARN_ONCE(1, "addr[%Lx] doesn't match a NUMA node rule. Some memory will be owned by node 0.",
994 start);
995 break;
996 }
997
998 if (prev_nid != new_nid)
999 break;
1000 }
1001 *nid = prev_nid;
1002
1003 return start > end ? end : start;
David S. Miller919ee672008-04-23 05:40:25 -07001004}
1005
Thomas Tai87a349f2016-11-11 16:41:00 -08001006static u64 __init memblock_nid_range(u64 start, u64 end, int *nid)
David S. Miller919ee672008-04-23 05:40:25 -07001007{
Pavel Tatashin1537b262017-02-16 15:05:58 -05001008 u64 ret_end, pa_start, m_mask, m_match, m_end;
1009 struct mdesc_mblock *mblock;
1010 int _nid, i;
David S. Miller919ee672008-04-23 05:40:25 -07001011
Pavel Tatashin1537b262017-02-16 15:05:58 -05001012 if (tlb_type != hypervisor)
1013 return memblock_nid_range_sun4u(start, end, nid);
1014
1015 mblock = addr_to_mblock(start);
1016 if (!mblock) {
1017 WARN_ONCE(1, "memblock_nid_range: Can't find mblock addr[%Lx]",
1018 start);
1019
1020 _nid = 0;
1021 ret_end = end;
1022 goto done;
David S. Miller919ee672008-04-23 05:40:25 -07001023 }
1024
Pavel Tatashin1537b262017-02-16 15:05:58 -05001025 pa_start = start + mblock->offset;
1026 m_match = 0;
1027 m_mask = 0;
David S. Millerc918dcc2008-08-14 01:41:39 -07001028
Pavel Tatashin1537b262017-02-16 15:05:58 -05001029 for (_nid = 0; _nid < num_node_masks; _nid++) {
1030 struct node_mem_mask *const m = &node_masks[_nid];
1031
1032 if ((pa_start & m->mask) == m->match) {
1033 m_match = m->match;
1034 m_mask = m->mask;
1035 break;
1036 }
1037 }
1038
1039 if (num_node_masks == _nid) {
1040 /* We could not find NUMA group, so default to 0, but lets
1041 * search for latency group, so we could calculate the correct
1042 * end address that we return
1043 */
1044 _nid = 0;
1045
1046 for (i = 0; i < num_mlgroups; i++) {
1047 struct mdesc_mlgroup *const m = &mlgroups[i];
1048
1049 if ((pa_start & m->mask) == m->match) {
1050 m_match = m->match;
1051 m_mask = m->mask;
1052 break;
1053 }
1054 }
1055
1056 if (i == num_mlgroups) {
1057 WARN_ONCE(1, "memblock_nid_range: Can't find latency group addr[%Lx]",
1058 start);
1059
1060 ret_end = end;
1061 goto done;
1062 }
1063 }
1064
1065 /*
1066 * Each latency group has match and mask, and each memory block has an
1067 * offset. An address belongs to a latency group if its address matches
1068 * the following formula: ((addr + offset) & mask) == match
1069 * It is, however, slow to check every single page if it matches a
1070 * particular latency group. As optimization we calculate end value by
1071 * using bit arithmetics.
1072 */
1073 m_end = m_match + (1ul << __ffs(m_mask)) - mblock->offset;
1074 m_end += pa_start & ~((1ul << fls64(m_mask)) - 1);
1075 ret_end = m_end > end ? end : m_end;
1076
1077done:
1078 *nid = _nid;
1079 return ret_end;
David S. Miller919ee672008-04-23 05:40:25 -07001080}
David S. Miller919ee672008-04-23 05:40:25 -07001081#endif
1082
1083/* This must be invoked after performing all of the necessary
Tejun Heo2a4814d2011-12-08 10:22:08 -08001084 * memblock_set_node() calls for 'nid'. We need to be able to get
David S. Miller919ee672008-04-23 05:40:25 -07001085 * correct data from get_pfn_range_for_nid().
1086 */
1087static void __init allocate_node_data(int nid)
1088{
David S. Miller919ee672008-04-23 05:40:25 -07001089 struct pglist_data *p;
Paul Gortmakeraa6f0792012-05-09 20:44:29 -04001090 unsigned long start_pfn, end_pfn;
David S. Miller919ee672008-04-23 05:40:25 -07001091#ifdef CONFIG_NEED_MULTIPLE_NODES
Paul Gortmakeraa6f0792012-05-09 20:44:29 -04001092 unsigned long paddr;
1093
Mike Rapoport9a8dd702018-10-30 15:07:59 -07001094 paddr = memblock_phys_alloc_try_nid(sizeof(struct pglist_data),
1095 SMP_CACHE_BYTES, nid);
David S. Miller919ee672008-04-23 05:40:25 -07001096 if (!paddr) {
1097 prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid);
1098 prom_halt();
1099 }
1100 NODE_DATA(nid) = __va(paddr);
1101 memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
1102
David S. Miller625d6932012-04-25 13:13:43 -07001103 NODE_DATA(nid)->node_id = nid;
David S. Miller919ee672008-04-23 05:40:25 -07001104#endif
1105
1106 p = NODE_DATA(nid);
1107
1108 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
1109 p->node_start_pfn = start_pfn;
1110 p->node_spanned_pages = end_pfn - start_pfn;
David S. Miller919ee672008-04-23 05:40:25 -07001111}
1112
1113static void init_node_masks_nonnuma(void)
1114{
Sam Ravnborg48d37212014-05-16 23:26:12 +02001115#ifdef CONFIG_NEED_MULTIPLE_NODES
David S. Miller919ee672008-04-23 05:40:25 -07001116 int i;
Sam Ravnborg48d37212014-05-16 23:26:12 +02001117#endif
David S. Miller919ee672008-04-23 05:40:25 -07001118
1119 numadbg("Initializing tables for non-numa.\n");
1120
Pavel Tatashin1537b262017-02-16 15:05:58 -05001121 node_masks[0].mask = 0;
1122 node_masks[0].match = 0;
David S. Miller919ee672008-04-23 05:40:25 -07001123 num_node_masks = 1;
1124
Sam Ravnborg48d37212014-05-16 23:26:12 +02001125#ifdef CONFIG_NEED_MULTIPLE_NODES
David S. Miller919ee672008-04-23 05:40:25 -07001126 for (i = 0; i < NR_CPUS; i++)
1127 numa_cpu_lookup_table[i] = 0;
1128
KOSAKI Motohirofb1fece2011-05-16 13:38:07 -07001129 cpumask_setall(&numa_cpumask_lookup_table[0]);
Sam Ravnborg48d37212014-05-16 23:26:12 +02001130#endif
David S. Miller919ee672008-04-23 05:40:25 -07001131}
1132
1133#ifdef CONFIG_NEED_MULTIPLE_NODES
1134struct pglist_data *node_data[MAX_NUMNODES];
1135
1136EXPORT_SYMBOL(numa_cpu_lookup_table);
1137EXPORT_SYMBOL(numa_cpumask_lookup_table);
1138EXPORT_SYMBOL(node_data);
1139
David S. Miller919ee672008-04-23 05:40:25 -07001140static int scan_pio_for_cfg_handle(struct mdesc_handle *md, u64 pio,
1141 u32 cfg_handle)
1142{
1143 u64 arc;
1144
1145 mdesc_for_each_arc(arc, md, pio, MDESC_ARC_TYPE_FWD) {
1146 u64 target = mdesc_arc_target(md, arc);
1147 const u64 *val;
1148
1149 val = mdesc_get_property(md, target,
1150 "cfg-handle", NULL);
1151 if (val && *val == cfg_handle)
1152 return 0;
1153 }
1154 return -ENODEV;
1155}
1156
1157static int scan_arcs_for_cfg_handle(struct mdesc_handle *md, u64 grp,
1158 u32 cfg_handle)
1159{
1160 u64 arc, candidate, best_latency = ~(u64)0;
1161
1162 candidate = MDESC_NODE_NULL;
1163 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
1164 u64 target = mdesc_arc_target(md, arc);
1165 const char *name = mdesc_node_name(md, target);
1166 const u64 *val;
1167
1168 if (strcmp(name, "pio-latency-group"))
1169 continue;
1170
1171 val = mdesc_get_property(md, target, "latency", NULL);
1172 if (!val)
1173 continue;
1174
1175 if (*val < best_latency) {
1176 candidate = target;
1177 best_latency = *val;
1178 }
1179 }
1180
1181 if (candidate == MDESC_NODE_NULL)
1182 return -ENODEV;
1183
1184 return scan_pio_for_cfg_handle(md, candidate, cfg_handle);
1185}
1186
1187int of_node_to_nid(struct device_node *dp)
1188{
1189 const struct linux_prom64_registers *regs;
1190 struct mdesc_handle *md;
1191 u32 cfg_handle;
1192 int count, nid;
1193 u64 grp;
1194
David S. Miller072bd412008-08-18 20:36:17 -07001195 /* This is the right thing to do on currently supported
1196 * SUN4U NUMA platforms as well, as the PCI controller does
1197 * not sit behind any particular memory controller.
1198 */
David S. Miller919ee672008-04-23 05:40:25 -07001199 if (!mlgroups)
1200 return -1;
1201
1202 regs = of_get_property(dp, "reg", NULL);
1203 if (!regs)
1204 return -1;
1205
1206 cfg_handle = (regs->phys_addr >> 32UL) & 0x0fffffff;
1207
1208 md = mdesc_grab();
1209
1210 count = 0;
1211 nid = -1;
1212 mdesc_for_each_node_by_name(md, grp, "group") {
1213 if (!scan_arcs_for_cfg_handle(md, grp, cfg_handle)) {
1214 nid = count;
1215 break;
1216 }
1217 count++;
1218 }
1219
1220 mdesc_release(md);
1221
1222 return nid;
1223}
1224
David S. Miller01c453812009-04-07 01:05:22 -07001225static void __init add_node_ranges(void)
David S. Miller919ee672008-04-23 05:40:25 -07001226{
Benjamin Herrenschmidt08b84792010-08-04 13:43:31 +10001227 struct memblock_region *reg;
Pavel Tatashincd429ce2017-02-16 15:13:54 -05001228 unsigned long prev_max;
1229
1230memblock_resized:
1231 prev_max = memblock.memory.max;
David S. Miller919ee672008-04-23 05:40:25 -07001232
Benjamin Herrenschmidt08b84792010-08-04 13:43:31 +10001233 for_each_memblock(memory, reg) {
1234 unsigned long size = reg->size;
David S. Miller919ee672008-04-23 05:40:25 -07001235 unsigned long start, end;
1236
Benjamin Herrenschmidt08b84792010-08-04 13:43:31 +10001237 start = reg->base;
David S. Miller919ee672008-04-23 05:40:25 -07001238 end = start + size;
1239 while (start < end) {
1240 unsigned long this_end;
1241 int nid;
1242
Benjamin Herrenschmidt35a1f0b2010-07-06 15:38:58 -07001243 this_end = memblock_nid_range(start, end, &nid);
David S. Miller919ee672008-04-23 05:40:25 -07001244
Tejun Heo2a4814d2011-12-08 10:22:08 -08001245 numadbg("Setting memblock NUMA node nid[%d] "
David S. Miller919ee672008-04-23 05:40:25 -07001246 "start[%lx] end[%lx]\n",
1247 nid, start, this_end);
1248
Tang Chene7e8de52014-01-21 15:49:26 -08001249 memblock_set_node(start, this_end - start,
1250 &memblock.memory, nid);
Pavel Tatashincd429ce2017-02-16 15:13:54 -05001251 if (memblock.memory.max != prev_max)
1252 goto memblock_resized;
David S. Miller919ee672008-04-23 05:40:25 -07001253 start = this_end;
1254 }
1255 }
1256}
1257
1258static int __init grab_mlgroups(struct mdesc_handle *md)
1259{
1260 unsigned long paddr;
1261 int count = 0;
1262 u64 node;
1263
1264 mdesc_for_each_node_by_name(md, node, "memory-latency-group")
1265 count++;
1266 if (!count)
1267 return -ENOENT;
1268
Mike Rapoport9a8dd702018-10-30 15:07:59 -07001269 paddr = memblock_phys_alloc(count * sizeof(struct mdesc_mlgroup),
1270 SMP_CACHE_BYTES);
David S. Miller919ee672008-04-23 05:40:25 -07001271 if (!paddr)
1272 return -ENOMEM;
1273
1274 mlgroups = __va(paddr);
1275 num_mlgroups = count;
1276
1277 count = 0;
1278 mdesc_for_each_node_by_name(md, node, "memory-latency-group") {
1279 struct mdesc_mlgroup *m = &mlgroups[count++];
1280 const u64 *val;
1281
1282 m->node = node;
1283
1284 val = mdesc_get_property(md, node, "latency", NULL);
1285 m->latency = *val;
1286 val = mdesc_get_property(md, node, "address-match", NULL);
1287 m->match = *val;
1288 val = mdesc_get_property(md, node, "address-mask", NULL);
1289 m->mask = *val;
1290
Sam Ravnborg90181132009-01-06 13:19:28 -08001291 numadbg("MLGROUP[%d]: node[%llx] latency[%llx] "
1292 "match[%llx] mask[%llx]\n",
David S. Miller919ee672008-04-23 05:40:25 -07001293 count - 1, m->node, m->latency, m->match, m->mask);
1294 }
1295
1296 return 0;
1297}
1298
1299static int __init grab_mblocks(struct mdesc_handle *md)
1300{
1301 unsigned long paddr;
1302 int count = 0;
1303 u64 node;
1304
1305 mdesc_for_each_node_by_name(md, node, "mblock")
1306 count++;
1307 if (!count)
1308 return -ENOENT;
1309
Mike Rapoport9a8dd702018-10-30 15:07:59 -07001310 paddr = memblock_phys_alloc(count * sizeof(struct mdesc_mblock),
1311 SMP_CACHE_BYTES);
David S. Miller919ee672008-04-23 05:40:25 -07001312 if (!paddr)
1313 return -ENOMEM;
1314
1315 mblocks = __va(paddr);
1316 num_mblocks = count;
1317
1318 count = 0;
1319 mdesc_for_each_node_by_name(md, node, "mblock") {
1320 struct mdesc_mblock *m = &mblocks[count++];
1321 const u64 *val;
1322
1323 val = mdesc_get_property(md, node, "base", NULL);
1324 m->base = *val;
1325 val = mdesc_get_property(md, node, "size", NULL);
1326 m->size = *val;
1327 val = mdesc_get_property(md, node,
1328 "address-congruence-offset", NULL);
bob picco771a37f2013-06-11 14:54:51 -04001329
1330 /* The address-congruence-offset property is optional.
1331 * Explicity zero it be identifty this.
1332 */
1333 if (val)
1334 m->offset = *val;
1335 else
1336 m->offset = 0UL;
David S. Miller919ee672008-04-23 05:40:25 -07001337
Sam Ravnborg90181132009-01-06 13:19:28 -08001338 numadbg("MBLOCK[%d]: base[%llx] size[%llx] offset[%llx]\n",
David S. Miller919ee672008-04-23 05:40:25 -07001339 count - 1, m->base, m->size, m->offset);
1340 }
1341
1342 return 0;
1343}
1344
1345static void __init numa_parse_mdesc_group_cpus(struct mdesc_handle *md,
1346 u64 grp, cpumask_t *mask)
1347{
1348 u64 arc;
1349
KOSAKI Motohirofb1fece2011-05-16 13:38:07 -07001350 cpumask_clear(mask);
David S. Miller919ee672008-04-23 05:40:25 -07001351
1352 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_BACK) {
1353 u64 target = mdesc_arc_target(md, arc);
1354 const char *name = mdesc_node_name(md, target);
1355 const u64 *id;
1356
1357 if (strcmp(name, "cpu"))
1358 continue;
1359 id = mdesc_get_property(md, target, "id", NULL);
Rusty Russelle305cb8f2009-03-16 14:40:23 +10301360 if (*id < nr_cpu_ids)
KOSAKI Motohirofb1fece2011-05-16 13:38:07 -07001361 cpumask_set_cpu(*id, mask);
David S. Miller919ee672008-04-23 05:40:25 -07001362 }
1363}
1364
1365static struct mdesc_mlgroup * __init find_mlgroup(u64 node)
1366{
1367 int i;
1368
1369 for (i = 0; i < num_mlgroups; i++) {
1370 struct mdesc_mlgroup *m = &mlgroups[i];
1371 if (m->node == node)
1372 return m;
1373 }
1374 return NULL;
1375}
1376
Nitin Gupta52708d62015-11-02 16:30:24 -05001377int __node_distance(int from, int to)
1378{
1379 if ((from >= MAX_NUMNODES) || (to >= MAX_NUMNODES)) {
1380 pr_warn("Returning default NUMA distance value for %d->%d\n",
1381 from, to);
1382 return (from == to) ? LOCAL_DISTANCE : REMOTE_DISTANCE;
1383 }
1384 return numa_latency[from][to];
1385}
David S. Miller2b4792e2018-10-26 15:11:56 -07001386EXPORT_SYMBOL(__node_distance);
Nitin Gupta52708d62015-11-02 16:30:24 -05001387
Paul Gortmakerbdf2f592016-08-06 00:31:48 -04001388static int __init find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp)
Nitin Gupta52708d62015-11-02 16:30:24 -05001389{
1390 int i;
1391
1392 for (i = 0; i < MAX_NUMNODES; i++) {
1393 struct node_mem_mask *n = &node_masks[i];
1394
Pavel Tatashin1537b262017-02-16 15:05:58 -05001395 if ((grp->mask == n->mask) && (grp->match == n->match))
Nitin Gupta52708d62015-11-02 16:30:24 -05001396 break;
1397 }
1398 return i;
1399}
1400
Paul Gortmakerbdf2f592016-08-06 00:31:48 -04001401static void __init find_numa_latencies_for_group(struct mdesc_handle *md,
1402 u64 grp, int index)
Nitin Gupta52708d62015-11-02 16:30:24 -05001403{
1404 u64 arc;
1405
1406 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
1407 int tnode;
1408 u64 target = mdesc_arc_target(md, arc);
1409 struct mdesc_mlgroup *m = find_mlgroup(target);
1410
1411 if (!m)
1412 continue;
1413 tnode = find_best_numa_node_for_mlgroup(m);
1414 if (tnode == MAX_NUMNODES)
1415 continue;
1416 numa_latency[index][tnode] = m->latency;
1417 }
1418}
1419
David S. Miller919ee672008-04-23 05:40:25 -07001420static int __init numa_attach_mlgroup(struct mdesc_handle *md, u64 grp,
1421 int index)
1422{
1423 struct mdesc_mlgroup *candidate = NULL;
1424 u64 arc, best_latency = ~(u64)0;
1425 struct node_mem_mask *n;
1426
1427 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
1428 u64 target = mdesc_arc_target(md, arc);
1429 struct mdesc_mlgroup *m = find_mlgroup(target);
1430 if (!m)
1431 continue;
1432 if (m->latency < best_latency) {
1433 candidate = m;
1434 best_latency = m->latency;
1435 }
1436 }
1437 if (!candidate)
1438 return -ENOENT;
1439
1440 if (num_node_masks != index) {
1441 printk(KERN_ERR "Inconsistent NUMA state, "
1442 "index[%d] != num_node_masks[%d]\n",
1443 index, num_node_masks);
1444 return -EINVAL;
1445 }
1446
1447 n = &node_masks[num_node_masks++];
1448
1449 n->mask = candidate->mask;
Pavel Tatashin1537b262017-02-16 15:05:58 -05001450 n->match = candidate->match;
David S. Miller919ee672008-04-23 05:40:25 -07001451
Pavel Tatashin1537b262017-02-16 15:05:58 -05001452 numadbg("NUMA NODE[%d]: mask[%lx] match[%lx] (latency[%llx])\n",
1453 index, n->mask, n->match, candidate->latency);
David S. Miller919ee672008-04-23 05:40:25 -07001454
1455 return 0;
1456}
1457
1458static int __init numa_parse_mdesc_group(struct mdesc_handle *md, u64 grp,
1459 int index)
1460{
1461 cpumask_t mask;
1462 int cpu;
1463
1464 numa_parse_mdesc_group_cpus(md, grp, &mask);
1465
KOSAKI Motohirofb1fece2011-05-16 13:38:07 -07001466 for_each_cpu(cpu, &mask)
David S. Miller919ee672008-04-23 05:40:25 -07001467 numa_cpu_lookup_table[cpu] = index;
KOSAKI Motohirofb1fece2011-05-16 13:38:07 -07001468 cpumask_copy(&numa_cpumask_lookup_table[index], &mask);
David S. Miller919ee672008-04-23 05:40:25 -07001469
1470 if (numa_debug) {
1471 printk(KERN_INFO "NUMA GROUP[%d]: cpus [ ", index);
KOSAKI Motohirofb1fece2011-05-16 13:38:07 -07001472 for_each_cpu(cpu, &mask)
David S. Miller919ee672008-04-23 05:40:25 -07001473 printk("%d ", cpu);
1474 printk("]\n");
1475 }
1476
1477 return numa_attach_mlgroup(md, grp, index);
1478}
1479
1480static int __init numa_parse_mdesc(void)
1481{
1482 struct mdesc_handle *md = mdesc_grab();
Nitin Gupta52708d62015-11-02 16:30:24 -05001483 int i, j, err, count;
David S. Miller919ee672008-04-23 05:40:25 -07001484 u64 node;
1485
1486 node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups");
1487 if (node == MDESC_NODE_NULL) {
1488 mdesc_release(md);
1489 return -ENOENT;
1490 }
1491
1492 err = grab_mblocks(md);
1493 if (err < 0)
1494 goto out;
1495
1496 err = grab_mlgroups(md);
1497 if (err < 0)
1498 goto out;
1499
1500 count = 0;
1501 mdesc_for_each_node_by_name(md, node, "group") {
1502 err = numa_parse_mdesc_group(md, node, count);
1503 if (err < 0)
1504 break;
1505 count++;
1506 }
1507
Nitin Gupta52708d62015-11-02 16:30:24 -05001508 count = 0;
1509 mdesc_for_each_node_by_name(md, node, "group") {
1510 find_numa_latencies_for_group(md, node, count);
1511 count++;
1512 }
1513
1514 /* Normalize numa latency matrix according to ACPI SLIT spec. */
1515 for (i = 0; i < MAX_NUMNODES; i++) {
1516 u64 self_latency = numa_latency[i][i];
1517
1518 for (j = 0; j < MAX_NUMNODES; j++) {
1519 numa_latency[i][j] =
1520 (numa_latency[i][j] * LOCAL_DISTANCE) /
1521 self_latency;
1522 }
1523 }
1524
David S. Miller919ee672008-04-23 05:40:25 -07001525 add_node_ranges();
1526
1527 for (i = 0; i < num_node_masks; i++) {
1528 allocate_node_data(i);
1529 node_set_online(i);
1530 }
1531
1532 err = 0;
1533out:
1534 mdesc_release(md);
1535 return err;
1536}
1537
David S. Miller072bd412008-08-18 20:36:17 -07001538static int __init numa_parse_jbus(void)
1539{
1540 unsigned long cpu, index;
1541
1542 /* NUMA node id is encoded in bits 36 and higher, and there is
1543 * a 1-to-1 mapping from CPU ID to NUMA node ID.
1544 */
1545 index = 0;
1546 for_each_present_cpu(cpu) {
1547 numa_cpu_lookup_table[cpu] = index;
KOSAKI Motohirofb1fece2011-05-16 13:38:07 -07001548 cpumask_copy(&numa_cpumask_lookup_table[index], cpumask_of(cpu));
David S. Miller072bd412008-08-18 20:36:17 -07001549 node_masks[index].mask = ~((1UL << 36UL) - 1UL);
Pavel Tatashin1537b262017-02-16 15:05:58 -05001550 node_masks[index].match = cpu << 36UL;
David S. Miller072bd412008-08-18 20:36:17 -07001551
1552 index++;
1553 }
1554 num_node_masks = index;
1555
1556 add_node_ranges();
1557
1558 for (index = 0; index < num_node_masks; index++) {
1559 allocate_node_data(index);
1560 node_set_online(index);
1561 }
1562
1563 return 0;
1564}
1565
David S. Miller919ee672008-04-23 05:40:25 -07001566static int __init numa_parse_sun4u(void)
1567{
David S. Miller072bd412008-08-18 20:36:17 -07001568 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1569 unsigned long ver;
1570
1571 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
1572 if ((ver >> 32UL) == __JALAPENO_ID ||
1573 (ver >> 32UL) == __SERRANO_ID)
1574 return numa_parse_jbus();
1575 }
David S. Miller919ee672008-04-23 05:40:25 -07001576 return -1;
1577}
1578
1579static int __init bootmem_init_numa(void)
1580{
Nitin Gupta36beca62016-01-05 22:35:35 -08001581 int i, j;
David S. Miller919ee672008-04-23 05:40:25 -07001582 int err = -1;
1583
1584 numadbg("bootmem_init_numa()\n");
1585
Nitin Gupta36beca62016-01-05 22:35:35 -08001586 /* Some sane defaults for numa latency values */
1587 for (i = 0; i < MAX_NUMNODES; i++) {
1588 for (j = 0; j < MAX_NUMNODES; j++)
1589 numa_latency[i][j] = (i == j) ?
1590 LOCAL_DISTANCE : REMOTE_DISTANCE;
1591 }
1592
David S. Miller919ee672008-04-23 05:40:25 -07001593 if (numa_enabled) {
1594 if (tlb_type == hypervisor)
1595 err = numa_parse_mdesc();
1596 else
1597 err = numa_parse_sun4u();
1598 }
1599 return err;
1600}
1601
1602#else
1603
1604static int bootmem_init_numa(void)
1605{
1606 return -1;
1607}
1608
1609#endif
1610
1611static void __init bootmem_init_nonnuma(void)
1612{
Yinghai Lu95f72d12010-07-12 14:36:09 +10001613 unsigned long top_of_ram = memblock_end_of_DRAM();
1614 unsigned long total_ram = memblock_phys_mem_size();
David S. Miller919ee672008-04-23 05:40:25 -07001615
1616 numadbg("bootmem_init_nonnuma()\n");
1617
1618 printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
1619 top_of_ram, total_ram);
1620 printk(KERN_INFO "Memory hole size: %ldMB\n",
1621 (top_of_ram - total_ram) >> 20);
1622
1623 init_node_masks_nonnuma();
Stefan Agnerd7dc8992018-06-14 15:28:02 -07001624 memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
David S. Miller919ee672008-04-23 05:40:25 -07001625 allocate_node_data(0);
David S. Miller919ee672008-04-23 05:40:25 -07001626 node_set_online(0);
1627}
1628
David S. Miller919ee672008-04-23 05:40:25 -07001629static unsigned long __init bootmem_init(unsigned long phys_base)
1630{
1631 unsigned long end_pfn;
David S. Miller919ee672008-04-23 05:40:25 -07001632
Yinghai Lu95f72d12010-07-12 14:36:09 +10001633 end_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001634 max_pfn = max_low_pfn = end_pfn;
David S. Millerd1112012006-03-08 02:16:07 -08001635 min_low_pfn = (phys_base >> PAGE_SHIFT);
1636
David S. Miller919ee672008-04-23 05:40:25 -07001637 if (bootmem_init_numa() < 0)
1638 bootmem_init_nonnuma();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001639
David S. Miller625d6932012-04-25 13:13:43 -07001640 /* Dump memblock with node info. */
1641 memblock_dump_all();
1642
David S. Miller919ee672008-04-23 05:40:25 -07001643 /* XXX cpu notifier XXX */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001644
David S. Miller625d6932012-04-25 13:13:43 -07001645 sparse_memory_present_with_active_regions(MAX_NUMNODES);
David S. Millerd1112012006-03-08 02:16:07 -08001646 sparse_init();
1647
Linus Torvalds1da177e2005-04-16 15:20:36 -07001648 return end_pfn;
1649}
1650
David S. Miller9cc3a1a2006-02-21 20:51:13 -08001651static struct linux_prom64_registers pall[MAX_BANKS] __initdata;
1652static int pall_ents __initdata;
1653
David S. Miller0dd5b7b2014-09-24 20:56:11 -07001654static unsigned long max_phys_bits = 40;
1655
1656bool kern_addr_valid(unsigned long addr)
1657{
David S. Miller0dd5b7b2014-09-24 20:56:11 -07001658 pgd_t *pgd;
1659 pud_t *pud;
1660 pmd_t *pmd;
1661 pte_t *pte;
1662
David S. Millerbb4e6e82014-09-27 11:05:21 -07001663 if ((long)addr < 0L) {
1664 unsigned long pa = __pa(addr);
1665
bob piccoadfae8a2017-03-10 14:31:19 -05001666 if ((pa >> max_phys_bits) != 0UL)
David S. Millerbb4e6e82014-09-27 11:05:21 -07001667 return false;
1668
1669 return pfn_valid(pa >> PAGE_SHIFT);
1670 }
David S. Miller0dd5b7b2014-09-24 20:56:11 -07001671
1672 if (addr >= (unsigned long) KERNBASE &&
1673 addr < (unsigned long)&_end)
1674 return true;
1675
David S. Miller0dd5b7b2014-09-24 20:56:11 -07001676 pgd = pgd_offset_k(addr);
1677 if (pgd_none(*pgd))
1678 return 0;
1679
1680 pud = pud_offset(pgd, addr);
1681 if (pud_none(*pud))
1682 return 0;
1683
1684 if (pud_large(*pud))
1685 return pfn_valid(pud_pfn(*pud));
1686
1687 pmd = pmd_offset(pud, addr);
1688 if (pmd_none(*pmd))
1689 return 0;
1690
1691 if (pmd_large(*pmd))
1692 return pfn_valid(pmd_pfn(*pmd));
1693
1694 pte = pte_offset_kernel(pmd, addr);
1695 if (pte_none(*pte))
1696 return 0;
1697
1698 return pfn_valid(pte_pfn(*pte));
1699}
1700EXPORT_SYMBOL(kern_addr_valid);
1701
1702static unsigned long __ref kernel_map_hugepud(unsigned long vstart,
1703 unsigned long vend,
1704 pud_t *pud)
1705{
1706 const unsigned long mask16gb = (1UL << 34) - 1UL;
1707 u64 pte_val = vstart;
1708
1709 /* Each PUD is 8GB */
1710 if ((vstart & mask16gb) ||
1711 (vend - vstart <= mask16gb)) {
1712 pte_val ^= kern_linear_pte_xor[2];
1713 pud_val(*pud) = pte_val | _PAGE_PUD_HUGE;
1714
1715 return vstart + PUD_SIZE;
1716 }
1717
1718 pte_val ^= kern_linear_pte_xor[3];
1719 pte_val |= _PAGE_PUD_HUGE;
1720
1721 vend = vstart + mask16gb + 1UL;
1722 while (vstart < vend) {
1723 pud_val(*pud) = pte_val;
1724
1725 pte_val += PUD_SIZE;
1726 vstart += PUD_SIZE;
1727 pud++;
1728 }
1729 return vstart;
1730}
1731
1732static bool kernel_can_map_hugepud(unsigned long vstart, unsigned long vend,
1733 bool guard)
1734{
1735 if (guard && !(vstart & ~PUD_MASK) && (vend - vstart) >= PUD_SIZE)
1736 return true;
1737
1738 return false;
1739}
1740
1741static unsigned long __ref kernel_map_hugepmd(unsigned long vstart,
1742 unsigned long vend,
1743 pmd_t *pmd)
1744{
1745 const unsigned long mask256mb = (1UL << 28) - 1UL;
1746 const unsigned long mask2gb = (1UL << 31) - 1UL;
1747 u64 pte_val = vstart;
1748
1749 /* Each PMD is 8MB */
1750 if ((vstart & mask256mb) ||
1751 (vend - vstart <= mask256mb)) {
1752 pte_val ^= kern_linear_pte_xor[0];
1753 pmd_val(*pmd) = pte_val | _PAGE_PMD_HUGE;
1754
1755 return vstart + PMD_SIZE;
1756 }
1757
1758 if ((vstart & mask2gb) ||
1759 (vend - vstart <= mask2gb)) {
1760 pte_val ^= kern_linear_pte_xor[1];
1761 pte_val |= _PAGE_PMD_HUGE;
1762 vend = vstart + mask256mb + 1UL;
1763 } else {
1764 pte_val ^= kern_linear_pte_xor[2];
1765 pte_val |= _PAGE_PMD_HUGE;
1766 vend = vstart + mask2gb + 1UL;
1767 }
1768
1769 while (vstart < vend) {
1770 pmd_val(*pmd) = pte_val;
1771
1772 pte_val += PMD_SIZE;
1773 vstart += PMD_SIZE;
1774 pmd++;
1775 }
1776
1777 return vstart;
1778}
1779
1780static bool kernel_can_map_hugepmd(unsigned long vstart, unsigned long vend,
1781 bool guard)
1782{
1783 if (guard && !(vstart & ~PMD_MASK) && (vend - vstart) >= PMD_SIZE)
1784 return true;
1785
1786 return false;
1787}
1788
Sam Ravnborg896aef42008-02-24 19:49:52 -08001789static unsigned long __ref kernel_map_range(unsigned long pstart,
David S. Miller0dd5b7b2014-09-24 20:56:11 -07001790 unsigned long pend, pgprot_t prot,
1791 bool use_huge)
David S. Miller56425302005-09-25 16:46:57 -07001792{
1793 unsigned long vstart = PAGE_OFFSET + pstart;
1794 unsigned long vend = PAGE_OFFSET + pend;
1795 unsigned long alloc_bytes = 0UL;
1796
1797 if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) {
David S. Miller13edad72005-09-29 17:58:26 -07001798 prom_printf("kernel_map: Unaligned physmem[%lx:%lx]\n",
David S. Miller56425302005-09-25 16:46:57 -07001799 vstart, vend);
1800 prom_halt();
1801 }
1802
1803 while (vstart < vend) {
1804 unsigned long this_end, paddr = __pa(vstart);
1805 pgd_t *pgd = pgd_offset_k(vstart);
1806 pud_t *pud;
1807 pmd_t *pmd;
1808 pte_t *pte;
1809
David S. Millerac55c762014-09-26 21:19:46 -07001810 if (pgd_none(*pgd)) {
1811 pud_t *new;
1812
Mike Rapoport4fc4a092018-10-30 15:09:03 -07001813 new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE,
1814 PAGE_SIZE);
David S. Millerac55c762014-09-26 21:19:46 -07001815 alloc_bytes += PAGE_SIZE;
1816 pgd_populate(&init_mm, pgd, new);
1817 }
David S. Miller56425302005-09-25 16:46:57 -07001818 pud = pud_offset(pgd, vstart);
1819 if (pud_none(*pud)) {
1820 pmd_t *new;
1821
David S. Miller0dd5b7b2014-09-24 20:56:11 -07001822 if (kernel_can_map_hugepud(vstart, vend, use_huge)) {
1823 vstart = kernel_map_hugepud(vstart, vend, pud);
1824 continue;
1825 }
Mike Rapoport4fc4a092018-10-30 15:09:03 -07001826 new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE,
1827 PAGE_SIZE);
David S. Miller56425302005-09-25 16:46:57 -07001828 alloc_bytes += PAGE_SIZE;
1829 pud_populate(&init_mm, pud, new);
1830 }
1831
1832 pmd = pmd_offset(pud, vstart);
David S. Miller0dd5b7b2014-09-24 20:56:11 -07001833 if (pmd_none(*pmd)) {
David S. Miller56425302005-09-25 16:46:57 -07001834 pte_t *new;
1835
David S. Miller0dd5b7b2014-09-24 20:56:11 -07001836 if (kernel_can_map_hugepmd(vstart, vend, use_huge)) {
1837 vstart = kernel_map_hugepmd(vstart, vend, pmd);
1838 continue;
1839 }
Mike Rapoport4fc4a092018-10-30 15:09:03 -07001840 new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE,
1841 PAGE_SIZE);
David S. Miller56425302005-09-25 16:46:57 -07001842 alloc_bytes += PAGE_SIZE;
1843 pmd_populate_kernel(&init_mm, pmd, new);
1844 }
1845
1846 pte = pte_offset_kernel(pmd, vstart);
1847 this_end = (vstart + PMD_SIZE) & PMD_MASK;
1848 if (this_end > vend)
1849 this_end = vend;
1850
1851 while (vstart < this_end) {
1852 pte_val(*pte) = (paddr | pgprot_val(prot));
1853
1854 vstart += PAGE_SIZE;
1855 paddr += PAGE_SIZE;
1856 pte++;
1857 }
1858 }
1859
1860 return alloc_bytes;
1861}
1862
David S. Miller0dd5b7b2014-09-24 20:56:11 -07001863static void __init flush_all_kernel_tsbs(void)
1864{
1865 int i;
1866
1867 for (i = 0; i < KERNEL_TSB_NENTRIES; i++) {
1868 struct tsb *ent = &swapper_tsb[i];
1869
1870 ent->tag = (1UL << TSB_TAG_INVALID_BIT);
1871 }
1872#ifndef CONFIG_DEBUG_PAGEALLOC
1873 for (i = 0; i < KERNEL_TSB4M_NENTRIES; i++) {
1874 struct tsb *ent = &swapper_4m_tsb[i];
1875
1876 ent->tag = (1UL << TSB_TAG_INVALID_BIT);
1877 }
1878#endif
1879}
1880
David S. Miller56425302005-09-25 16:46:57 -07001881extern unsigned int kvmap_linear_patch[1];
David S. Miller9cc3a1a2006-02-21 20:51:13 -08001882
David S. Miller8f3614532007-12-13 06:13:38 -08001883static void __init kernel_physical_mapping_init(void)
1884{
David S. Miller8f3614532007-12-13 06:13:38 -08001885 unsigned long i, mem_alloced = 0UL;
David S. Miller0dd5b7b2014-09-24 20:56:11 -07001886 bool use_huge = true;
David S. Miller8f3614532007-12-13 06:13:38 -08001887
David S. Miller0dd5b7b2014-09-24 20:56:11 -07001888#ifdef CONFIG_DEBUG_PAGEALLOC
1889 use_huge = false;
1890#endif
David S. Miller8f3614532007-12-13 06:13:38 -08001891 for (i = 0; i < pall_ents; i++) {
1892 unsigned long phys_start, phys_end;
1893
1894 phys_start = pall[i].phys_addr;
1895 phys_end = phys_start + pall[i].reg_size;
1896
David S. Miller56425302005-09-25 16:46:57 -07001897 mem_alloced += kernel_map_range(phys_start, phys_end,
David S. Miller0dd5b7b2014-09-24 20:56:11 -07001898 PAGE_KERNEL, use_huge);
David S. Miller56425302005-09-25 16:46:57 -07001899 }
1900
1901 printk("Allocated %ld bytes for kernel page tables.\n",
1902 mem_alloced);
1903
1904 kvmap_linear_patch[0] = 0x01000000; /* nop */
1905 flushi(&kvmap_linear_patch[0]);
1906
David S. Miller0dd5b7b2014-09-24 20:56:11 -07001907 flush_all_kernel_tsbs();
1908
David S. Miller56425302005-09-25 16:46:57 -07001909 __flush_tlb_all();
1910}
1911
David S. Miller9cc3a1a2006-02-21 20:51:13 -08001912#ifdef CONFIG_DEBUG_PAGEALLOC
Joonsoo Kim031bc572014-12-12 16:55:52 -08001913void __kernel_map_pages(struct page *page, int numpages, int enable)
David S. Miller56425302005-09-25 16:46:57 -07001914{
1915 unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT;
1916 unsigned long phys_end = phys_start + (numpages * PAGE_SIZE);
1917
1918 kernel_map_range(phys_start, phys_end,
David S. Miller0dd5b7b2014-09-24 20:56:11 -07001919 (enable ? PAGE_KERNEL : __pgprot(0)), false);
David S. Miller56425302005-09-25 16:46:57 -07001920
David S. Miller74bf4312006-01-31 18:29:18 -08001921 flush_tsb_kernel_range(PAGE_OFFSET + phys_start,
1922 PAGE_OFFSET + phys_end);
1923
David S. Miller56425302005-09-25 16:46:57 -07001924 /* we should perform an IPI and flush all tlbs,
1925 * but that can deadlock->flush only current cpu.
1926 */
1927 __flush_tlb_kernel_range(PAGE_OFFSET + phys_start,
1928 PAGE_OFFSET + phys_end);
1929}
1930#endif
1931
David S. Miller10147572005-09-28 21:46:43 -07001932unsigned long __init find_ecache_flush_span(unsigned long size)
1933{
David S. Miller13edad72005-09-29 17:58:26 -07001934 int i;
David S. Miller10147572005-09-28 21:46:43 -07001935
David S. Miller13edad72005-09-29 17:58:26 -07001936 for (i = 0; i < pavail_ents; i++) {
1937 if (pavail[i].reg_size >= size)
1938 return pavail[i].phys_addr;
David S. Miller10147572005-09-28 21:46:43 -07001939 }
1940
1941 return ~0UL;
1942}
1943
David S. Millerb2d43832013-09-20 21:50:41 -07001944unsigned long PAGE_OFFSET;
1945EXPORT_SYMBOL(PAGE_OFFSET);
1946
David S. Millerbb4e6e82014-09-27 11:05:21 -07001947unsigned long VMALLOC_END = 0x0000010000000000UL;
1948EXPORT_SYMBOL(VMALLOC_END);
1949
David S. Miller4397bed2014-09-26 21:58:33 -07001950unsigned long sparc64_va_hole_top = 0xfffff80000000000UL;
1951unsigned long sparc64_va_hole_bottom = 0x0000080000000000UL;
1952
David S. Millerb2d43832013-09-20 21:50:41 -07001953static void __init setup_page_offset(void)
1954{
David S. Millerb2d43832013-09-20 21:50:41 -07001955 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
David S. Miller4397bed2014-09-26 21:58:33 -07001956 /* Cheetah/Panther support a full 64-bit virtual
1957 * address, so we can use all that our page tables
1958 * support.
1959 */
1960 sparc64_va_hole_top = 0xfff0000000000000UL;
1961 sparc64_va_hole_bottom = 0x0010000000000000UL;
1962
David S. Millerb2d43832013-09-20 21:50:41 -07001963 max_phys_bits = 42;
1964 } else if (tlb_type == hypervisor) {
1965 switch (sun4v_chip_type) {
1966 case SUN4V_CHIP_NIAGARA1:
1967 case SUN4V_CHIP_NIAGARA2:
David S. Miller4397bed2014-09-26 21:58:33 -07001968 /* T1 and T2 support 48-bit virtual addresses. */
1969 sparc64_va_hole_top = 0xffff800000000000UL;
1970 sparc64_va_hole_bottom = 0x0000800000000000UL;
1971
David S. Millerb2d43832013-09-20 21:50:41 -07001972 max_phys_bits = 39;
1973 break;
1974 case SUN4V_CHIP_NIAGARA3:
David S. Miller4397bed2014-09-26 21:58:33 -07001975 /* T3 supports 48-bit virtual addresses. */
1976 sparc64_va_hole_top = 0xffff800000000000UL;
1977 sparc64_va_hole_bottom = 0x0000800000000000UL;
1978
David S. Millerb2d43832013-09-20 21:50:41 -07001979 max_phys_bits = 43;
1980 break;
1981 case SUN4V_CHIP_NIAGARA4:
1982 case SUN4V_CHIP_NIAGARA5:
1983 case SUN4V_CHIP_SPARC64X:
David S. Miller7c0fa0f2014-09-24 21:49:29 -07001984 case SUN4V_CHIP_SPARC_M6:
David S. Miller4397bed2014-09-26 21:58:33 -07001985 /* T4 and later support 52-bit virtual addresses. */
1986 sparc64_va_hole_top = 0xfff8000000000000UL;
1987 sparc64_va_hole_bottom = 0x0008000000000000UL;
David S. Millerb2d43832013-09-20 21:50:41 -07001988 max_phys_bits = 47;
1989 break;
David S. Miller7c0fa0f2014-09-24 21:49:29 -07001990 case SUN4V_CHIP_SPARC_M7:
Khalid Azizc5b8b5b2016-04-19 11:12:54 -06001991 case SUN4V_CHIP_SPARC_SN:
David S. Miller7c0fa0f2014-09-24 21:49:29 -07001992 /* M7 and later support 52-bit virtual addresses. */
1993 sparc64_va_hole_top = 0xfff8000000000000UL;
1994 sparc64_va_hole_bottom = 0x0008000000000000UL;
1995 max_phys_bits = 49;
1996 break;
Vijay Kumarfdaccf72017-07-28 19:29:32 -06001997 case SUN4V_CHIP_SPARC_M8:
1998 default:
1999 /* M8 and later support 54-bit virtual addresses.
2000 * However, restricting M8 and above VA bits to 53
2001 * as 4-level page table cannot support more than
2002 * 53 VA bits.
2003 */
2004 sparc64_va_hole_top = 0xfff0000000000000UL;
2005 sparc64_va_hole_bottom = 0x0010000000000000UL;
2006 max_phys_bits = 51;
2007 break;
David S. Millerb2d43832013-09-20 21:50:41 -07002008 }
2009 }
2010
2011 if (max_phys_bits > MAX_PHYS_ADDRESS_BITS) {
2012 prom_printf("MAX_PHYS_ADDRESS_BITS is too small, need %lu\n",
2013 max_phys_bits);
2014 prom_halt();
2015 }
2016
David S. Millerbb4e6e82014-09-27 11:05:21 -07002017 PAGE_OFFSET = sparc64_va_hole_top;
2018 VMALLOC_END = ((sparc64_va_hole_bottom >> 1) +
2019 (sparc64_va_hole_bottom >> 2));
David S. Millerb2d43832013-09-20 21:50:41 -07002020
David S. Millerbb4e6e82014-09-27 11:05:21 -07002021 pr_info("MM: PAGE_OFFSET is 0x%016lx (max_phys_bits == %lu)\n",
David S. Millerb2d43832013-09-20 21:50:41 -07002022 PAGE_OFFSET, max_phys_bits);
David S. Millerbb4e6e82014-09-27 11:05:21 -07002023 pr_info("MM: VMALLOC [0x%016lx --> 0x%016lx]\n",
2024 VMALLOC_START, VMALLOC_END);
2025 pr_info("MM: VMEMMAP [0x%016lx --> 0x%016lx]\n",
2026 VMEMMAP_BASE, VMEMMAP_BASE << 1);
David S. Millerb2d43832013-09-20 21:50:41 -07002027}
2028
David S. Miller517af332006-02-01 15:55:21 -08002029static void __init tsb_phys_patch(void)
2030{
David S. Millerd257d5d2006-02-06 23:44:37 -08002031 struct tsb_ldquad_phys_patch_entry *pquad;
David S. Miller517af332006-02-01 15:55:21 -08002032 struct tsb_phys_patch_entry *p;
2033
David S. Millerd257d5d2006-02-06 23:44:37 -08002034 pquad = &__tsb_ldquad_phys_patch;
2035 while (pquad < &__tsb_ldquad_phys_patch_end) {
2036 unsigned long addr = pquad->addr;
2037
2038 if (tlb_type == hypervisor)
2039 *(unsigned int *) addr = pquad->sun4v_insn;
2040 else
2041 *(unsigned int *) addr = pquad->sun4u_insn;
2042 wmb();
2043 __asm__ __volatile__("flush %0"
2044 : /* no outputs */
2045 : "r" (addr));
2046
2047 pquad++;
2048 }
2049
David S. Miller517af332006-02-01 15:55:21 -08002050 p = &__tsb_phys_patch;
2051 while (p < &__tsb_phys_patch_end) {
2052 unsigned long addr = p->addr;
2053
2054 *(unsigned int *) addr = p->insn;
2055 wmb();
2056 __asm__ __volatile__("flush %0"
2057 : /* no outputs */
2058 : "r" (addr));
2059
2060 p++;
2061 }
2062}
2063
David S. Miller490384e2006-02-11 14:41:18 -08002064/* Don't mark as init, we give this to the Hypervisor. */
David S. Millerd1acb422007-03-16 17:20:28 -07002065#ifndef CONFIG_DEBUG_PAGEALLOC
2066#define NUM_KTSB_DESCR 2
2067#else
2068#define NUM_KTSB_DESCR 1
2069#endif
2070static struct hv_tsb_descr ktsb_descr[NUM_KTSB_DESCR];
David S. Miller490384e2006-02-11 14:41:18 -08002071
David S. Miller8c82dc02014-09-17 10:14:56 -07002072/* The swapper TSBs are loaded with a base sequence of:
2073 *
2074 * sethi %uhi(SYMBOL), REG1
2075 * sethi %hi(SYMBOL), REG2
2076 * or REG1, %ulo(SYMBOL), REG1
2077 * or REG2, %lo(SYMBOL), REG2
2078 * sllx REG1, 32, REG1
2079 * or REG1, REG2, REG1
2080 *
2081 * When we use physical addressing for the TSB accesses, we patch the
2082 * first four instructions in the above sequence.
2083 */
2084
David S. Miller9076d0e2011-08-05 00:53:57 -07002085static void patch_one_ktsb_phys(unsigned int *start, unsigned int *end, unsigned long pa)
2086{
David S. Miller8c82dc02014-09-17 10:14:56 -07002087 unsigned long high_bits, low_bits;
2088
2089 high_bits = (pa >> 32) & 0xffffffff;
2090 low_bits = (pa >> 0) & 0xffffffff;
David S. Miller9076d0e2011-08-05 00:53:57 -07002091
2092 while (start < end) {
2093 unsigned int *ia = (unsigned int *)(unsigned long)*start;
2094
David S. Miller8c82dc02014-09-17 10:14:56 -07002095 ia[0] = (ia[0] & ~0x3fffff) | (high_bits >> 10);
David S. Miller9076d0e2011-08-05 00:53:57 -07002096 __asm__ __volatile__("flush %0" : : "r" (ia));
2097
David S. Miller8c82dc02014-09-17 10:14:56 -07002098 ia[1] = (ia[1] & ~0x3fffff) | (low_bits >> 10);
David S. Miller9076d0e2011-08-05 00:53:57 -07002099 __asm__ __volatile__("flush %0" : : "r" (ia + 1));
2100
David S. Miller8c82dc02014-09-17 10:14:56 -07002101 ia[2] = (ia[2] & ~0x1fff) | (high_bits & 0x3ff);
2102 __asm__ __volatile__("flush %0" : : "r" (ia + 2));
2103
2104 ia[3] = (ia[3] & ~0x1fff) | (low_bits & 0x3ff);
2105 __asm__ __volatile__("flush %0" : : "r" (ia + 3));
2106
David S. Miller9076d0e2011-08-05 00:53:57 -07002107 start++;
2108 }
2109}
2110
2111static void ktsb_phys_patch(void)
2112{
2113 extern unsigned int __swapper_tsb_phys_patch;
2114 extern unsigned int __swapper_tsb_phys_patch_end;
David S. Miller9076d0e2011-08-05 00:53:57 -07002115 unsigned long ktsb_pa;
2116
2117 ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE);
2118 patch_one_ktsb_phys(&__swapper_tsb_phys_patch,
2119 &__swapper_tsb_phys_patch_end, ktsb_pa);
2120#ifndef CONFIG_DEBUG_PAGEALLOC
David S. Miller0785a8e2011-08-06 05:26:35 -07002121 {
2122 extern unsigned int __swapper_4m_tsb_phys_patch;
2123 extern unsigned int __swapper_4m_tsb_phys_patch_end;
David S. Miller9076d0e2011-08-05 00:53:57 -07002124 ktsb_pa = (kern_base +
2125 ((unsigned long)&swapper_4m_tsb[0] - KERNBASE));
2126 patch_one_ktsb_phys(&__swapper_4m_tsb_phys_patch,
2127 &__swapper_4m_tsb_phys_patch_end, ktsb_pa);
David S. Miller0785a8e2011-08-06 05:26:35 -07002128 }
David S. Miller9076d0e2011-08-05 00:53:57 -07002129#endif
2130}
2131
David S. Miller490384e2006-02-11 14:41:18 -08002132static void __init sun4v_ktsb_init(void)
2133{
2134 unsigned long ktsb_pa;
2135
David S. Millerd7744a02006-02-21 22:31:11 -08002136 /* First KTSB for PAGE_SIZE mappings. */
David S. Miller490384e2006-02-11 14:41:18 -08002137 ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE);
2138
2139 switch (PAGE_SIZE) {
2140 case 8 * 1024:
2141 default:
2142 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_8K;
2143 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_8K;
2144 break;
2145
2146 case 64 * 1024:
2147 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_64K;
2148 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_64K;
2149 break;
2150
2151 case 512 * 1024:
2152 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_512K;
2153 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_512K;
2154 break;
2155
2156 case 4 * 1024 * 1024:
2157 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_4MB;
2158 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_4MB;
2159 break;
Joe Perches6cb79b32011-06-03 14:45:23 +00002160 }
David S. Miller490384e2006-02-11 14:41:18 -08002161
David S. Miller3f19a842006-02-17 12:03:20 -08002162 ktsb_descr[0].assoc = 1;
David S. Miller490384e2006-02-11 14:41:18 -08002163 ktsb_descr[0].num_ttes = KERNEL_TSB_NENTRIES;
2164 ktsb_descr[0].ctx_idx = 0;
2165 ktsb_descr[0].tsb_base = ktsb_pa;
2166 ktsb_descr[0].resv = 0;
2167
David S. Millerd1acb422007-03-16 17:20:28 -07002168#ifndef CONFIG_DEBUG_PAGEALLOC
David S. Miller4f93d212012-09-06 18:13:58 -07002169 /* Second KTSB for 4MB/256MB/2GB/16GB mappings. */
David S. Millerd7744a02006-02-21 22:31:11 -08002170 ktsb_pa = (kern_base +
2171 ((unsigned long)&swapper_4m_tsb[0] - KERNBASE));
2172
2173 ktsb_descr[1].pgsz_idx = HV_PGSZ_IDX_4MB;
David S. Millerc69ad0a2012-09-06 20:35:36 -07002174 ktsb_descr[1].pgsz_mask = ((HV_PGSZ_MASK_4MB |
2175 HV_PGSZ_MASK_256MB |
2176 HV_PGSZ_MASK_2GB |
2177 HV_PGSZ_MASK_16GB) &
2178 cpu_pgsz_mask);
David S. Millerd7744a02006-02-21 22:31:11 -08002179 ktsb_descr[1].assoc = 1;
2180 ktsb_descr[1].num_ttes = KERNEL_TSB4M_NENTRIES;
2181 ktsb_descr[1].ctx_idx = 0;
2182 ktsb_descr[1].tsb_base = ktsb_pa;
2183 ktsb_descr[1].resv = 0;
David S. Millerd1acb422007-03-16 17:20:28 -07002184#endif
David S. Miller490384e2006-02-11 14:41:18 -08002185}
2186
Paul Gortmaker2066aad2013-06-17 15:43:14 -04002187void sun4v_ktsb_register(void)
David S. Miller490384e2006-02-11 14:41:18 -08002188{
David S. Miller7db35f32007-05-29 02:22:14 -07002189 unsigned long pa, ret;
David S. Miller490384e2006-02-11 14:41:18 -08002190
2191 pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE);
2192
David S. Miller7db35f32007-05-29 02:22:14 -07002193 ret = sun4v_mmu_tsb_ctx0(NUM_KTSB_DESCR, pa);
2194 if (ret != 0) {
2195 prom_printf("hypervisor_mmu_tsb_ctx0[%lx]: "
2196 "errors with %lx\n", pa, ret);
2197 prom_halt();
2198 }
David S. Miller490384e2006-02-11 14:41:18 -08002199}
2200
David S. Millerc69ad0a2012-09-06 20:35:36 -07002201static void __init sun4u_linear_pte_xor_finalize(void)
2202{
2203#ifndef CONFIG_DEBUG_PAGEALLOC
2204 /* This is where we would add Panther support for
2205 * 32MB and 256MB pages.
2206 */
2207#endif
2208}
2209
2210static void __init sun4v_linear_pte_xor_finalize(void)
2211{
Khalid Aziz494e5b62015-05-27 10:00:46 -06002212 unsigned long pagecv_flag;
2213
2214 /* Bit 9 of TTE is no longer CV bit on M7 processor and it instead
2215 * enables MCD error. Do not set bit 9 on M7 processor.
2216 */
2217 switch (sun4v_chip_type) {
2218 case SUN4V_CHIP_SPARC_M7:
Allen Pais7d484ac2017-07-24 11:44:18 +05302219 case SUN4V_CHIP_SPARC_M8:
Khalid Azizc5b8b5b2016-04-19 11:12:54 -06002220 case SUN4V_CHIP_SPARC_SN:
Khalid Aziz494e5b62015-05-27 10:00:46 -06002221 pagecv_flag = 0x00;
2222 break;
2223 default:
2224 pagecv_flag = _PAGE_CV_4V;
2225 break;
2226 }
David S. Millerc69ad0a2012-09-06 20:35:36 -07002227#ifndef CONFIG_DEBUG_PAGEALLOC
2228 if (cpu_pgsz_mask & HV_PGSZ_MASK_256MB) {
2229 kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^
David S. Miller922631b2013-09-18 12:00:00 -07002230 PAGE_OFFSET;
Khalid Aziz494e5b62015-05-27 10:00:46 -06002231 kern_linear_pte_xor[1] |= (_PAGE_CP_4V | pagecv_flag |
David S. Millerc69ad0a2012-09-06 20:35:36 -07002232 _PAGE_P_4V | _PAGE_W_4V);
2233 } else {
2234 kern_linear_pte_xor[1] = kern_linear_pte_xor[0];
2235 }
2236
2237 if (cpu_pgsz_mask & HV_PGSZ_MASK_2GB) {
2238 kern_linear_pte_xor[2] = (_PAGE_VALID | _PAGE_SZ2GB_4V) ^
David S. Miller922631b2013-09-18 12:00:00 -07002239 PAGE_OFFSET;
Khalid Aziz494e5b62015-05-27 10:00:46 -06002240 kern_linear_pte_xor[2] |= (_PAGE_CP_4V | pagecv_flag |
David S. Millerc69ad0a2012-09-06 20:35:36 -07002241 _PAGE_P_4V | _PAGE_W_4V);
2242 } else {
2243 kern_linear_pte_xor[2] = kern_linear_pte_xor[1];
2244 }
2245
2246 if (cpu_pgsz_mask & HV_PGSZ_MASK_16GB) {
2247 kern_linear_pte_xor[3] = (_PAGE_VALID | _PAGE_SZ16GB_4V) ^
David S. Miller922631b2013-09-18 12:00:00 -07002248 PAGE_OFFSET;
Khalid Aziz494e5b62015-05-27 10:00:46 -06002249 kern_linear_pte_xor[3] |= (_PAGE_CP_4V | pagecv_flag |
David S. Millerc69ad0a2012-09-06 20:35:36 -07002250 _PAGE_P_4V | _PAGE_W_4V);
2251 } else {
2252 kern_linear_pte_xor[3] = kern_linear_pte_xor[2];
2253 }
2254#endif
2255}
2256
Linus Torvalds1da177e2005-04-16 15:20:36 -07002257/* paging_init() sets up the page tables */
2258
Linus Torvalds1da177e2005-04-16 15:20:36 -07002259static unsigned long last_valid_pfn;
David S. Millerac55c762014-09-26 21:19:46 -07002260
David S. Millerc4bce902006-02-11 21:57:54 -08002261static void sun4u_pgprot_init(void);
2262static void sun4v_pgprot_init(void);
2263
bob picco7c21d532014-09-16 09:29:54 -04002264static phys_addr_t __init available_memory(void)
2265{
2266 phys_addr_t available = 0ULL;
2267 phys_addr_t pa_start, pa_end;
2268 u64 i;
2269
Tony Luckfc6daaf2015-06-24 16:58:09 -07002270 for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &pa_start,
2271 &pa_end, NULL)
bob picco7c21d532014-09-16 09:29:54 -04002272 available = available + (pa_end - pa_start);
2273
2274 return available;
2275}
2276
Khalid Aziz494e5b62015-05-27 10:00:46 -06002277#define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U)
2278#define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V)
2279#define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U)
2280#define __DIRTY_BITS_4V (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V)
2281#define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R)
2282#define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R)
2283
bob picco7c21d532014-09-16 09:29:54 -04002284/* We need to exclude reserved regions. This exclusion will include
2285 * vmlinux and initrd. To be more precise the initrd size could be used to
2286 * compute a new lower limit because it is freed later during initialization.
2287 */
2288static void __init reduce_memory(phys_addr_t limit_ram)
2289{
2290 phys_addr_t avail_ram = available_memory();
2291 phys_addr_t pa_start, pa_end;
2292 u64 i;
2293
2294 if (limit_ram >= avail_ram)
2295 return;
2296
Tony Luckfc6daaf2015-06-24 16:58:09 -07002297 for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &pa_start,
2298 &pa_end, NULL) {
bob picco7c21d532014-09-16 09:29:54 -04002299 phys_addr_t region_size = pa_end - pa_start;
2300 phys_addr_t clip_start = pa_start;
2301
2302 avail_ram = avail_ram - region_size;
2303 /* Are we consuming too much? */
2304 if (avail_ram < limit_ram) {
2305 phys_addr_t give_back = limit_ram - avail_ram;
2306
2307 region_size = region_size - give_back;
2308 clip_start = clip_start + give_back;
2309 }
2310
2311 memblock_remove(clip_start, region_size);
2312
2313 if (avail_ram <= limit_ram)
2314 break;
2315 i = 0UL;
2316 }
2317}
2318
Linus Torvalds1da177e2005-04-16 15:20:36 -07002319void __init paging_init(void)
2320{
David S. Miller919ee672008-04-23 05:40:25 -07002321 unsigned long end_pfn, shift, phys_base;
David S. Miller0836a0e2005-09-28 21:38:08 -07002322 unsigned long real_end, i;
2323
David S. Millerb2d43832013-09-20 21:50:41 -07002324 setup_page_offset();
2325
David S. Miller22adb352007-05-26 01:14:43 -07002326 /* These build time checkes make sure that the dcache_dirty_cpu()
2327 * page->flags usage will work.
2328 *
2329 * When a page gets marked as dcache-dirty, we store the
2330 * cpu number starting at bit 32 in the page->flags. Also,
2331 * functions like clear_dcache_dirty_cpu use the cpu mask
2332 * in 13-bit signed-immediate instruction fields.
2333 */
Christoph Lameter9223b4192008-04-28 02:12:48 -07002334
2335 /*
2336 * Page flags must not reach into upper 32 bits that are used
2337 * for the cpu number
2338 */
2339 BUILD_BUG_ON(NR_PAGEFLAGS > 32);
2340
2341 /*
2342 * The bit fields placed in the high range must not reach below
2343 * the 32 bit boundary. Otherwise we cannot place the cpu field
2344 * at the 32 bit boundary.
2345 */
David S. Miller22adb352007-05-26 01:14:43 -07002346 BUILD_BUG_ON(SECTIONS_WIDTH + NODES_WIDTH + ZONES_WIDTH +
Christoph Lameter9223b4192008-04-28 02:12:48 -07002347 ilog2(roundup_pow_of_two(NR_CPUS)) > 32);
2348
David S. Miller22adb352007-05-26 01:14:43 -07002349 BUILD_BUG_ON(NR_CPUS > 4096);
2350
David S. Miller0eef3312014-05-03 22:52:50 -07002351 kern_base = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB;
David S. Miller481295f2006-02-07 21:51:08 -08002352 kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
2353
David S. Millerd7744a02006-02-21 22:31:11 -08002354 /* Invalidate both kernel TSBs. */
David S. Miller8b234272006-02-17 18:01:02 -08002355 memset(swapper_tsb, 0x40, sizeof(swapper_tsb));
David S. Millerd1acb422007-03-16 17:20:28 -07002356#ifndef CONFIG_DEBUG_PAGEALLOC
David S. Millerd7744a02006-02-21 22:31:11 -08002357 memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb));
David S. Millerd1acb422007-03-16 17:20:28 -07002358#endif
David S. Miller8b234272006-02-17 18:01:02 -08002359
Khalid Aziz494e5b62015-05-27 10:00:46 -06002360 /* TTE.cv bit on sparc v9 occupies the same position as TTE.mcde
2361 * bit on M7 processor. This is a conflicting usage of the same
2362 * bit. Enabling TTE.cv on M7 would turn on Memory Corruption
2363 * Detection error on all pages and this will lead to problems
2364 * later. Kernel does not run with MCD enabled and hence rest
2365 * of the required steps to fully configure memory corruption
2366 * detection are not taken. We need to ensure TTE.mcde is not
2367 * set on M7 processor. Compute the value of cacheability
2368 * flag for use later taking this into consideration.
2369 */
2370 switch (sun4v_chip_type) {
2371 case SUN4V_CHIP_SPARC_M7:
Allen Pais7d484ac2017-07-24 11:44:18 +05302372 case SUN4V_CHIP_SPARC_M8:
Khalid Azizc5b8b5b2016-04-19 11:12:54 -06002373 case SUN4V_CHIP_SPARC_SN:
Khalid Aziz494e5b62015-05-27 10:00:46 -06002374 page_cache4v_flag = _PAGE_CP_4V;
2375 break;
2376 default:
2377 page_cache4v_flag = _PAGE_CACHE_4V;
2378 break;
2379 }
2380
David S. Millerc4bce902006-02-11 21:57:54 -08002381 if (tlb_type == hypervisor)
2382 sun4v_pgprot_init();
2383 else
2384 sun4u_pgprot_init();
2385
David S. Millerd257d5d2006-02-06 23:44:37 -08002386 if (tlb_type == cheetah_plus ||
David S. Miller9076d0e2011-08-05 00:53:57 -07002387 tlb_type == hypervisor) {
David S. Miller517af332006-02-01 15:55:21 -08002388 tsb_phys_patch();
David S. Miller9076d0e2011-08-05 00:53:57 -07002389 ktsb_phys_patch();
2390 }
David S. Miller517af332006-02-01 15:55:21 -08002391
David S. Millerc69ad0a2012-09-06 20:35:36 -07002392 if (tlb_type == hypervisor)
David S. Millerd257d5d2006-02-06 23:44:37 -08002393 sun4v_patch_tlb_handlers();
2394
David S. Millera94a1722008-05-11 21:04:48 -07002395 /* Find available physical memory...
2396 *
2397 * Read it twice in order to work around a bug in openfirmware.
2398 * The call to grab this table itself can cause openfirmware to
2399 * allocate memory, which in turn can take away some space from
2400 * the list of available memory. Reading it twice makes sure
2401 * we really do get the final value.
2402 */
2403 read_obp_translations();
2404 read_obp_memory("reg", &pall[0], &pall_ents);
2405 read_obp_memory("available", &pavail[0], &pavail_ents);
David S. Miller13edad72005-09-29 17:58:26 -07002406 read_obp_memory("available", &pavail[0], &pavail_ents);
David S. Miller0836a0e2005-09-28 21:38:08 -07002407
2408 phys_base = 0xffffffffffffffffUL;
David S. Miller3b2a7e22008-02-13 18:13:20 -08002409 for (i = 0; i < pavail_ents; i++) {
David S. Miller13edad72005-09-29 17:58:26 -07002410 phys_base = min(phys_base, pavail[i].phys_addr);
Yinghai Lu95f72d12010-07-12 14:36:09 +10002411 memblock_add(pavail[i].phys_addr, pavail[i].reg_size);
David S. Miller3b2a7e22008-02-13 18:13:20 -08002412 }
2413
Yinghai Lu95f72d12010-07-12 14:36:09 +10002414 memblock_reserve(kern_base, kern_size);
David S. Miller0836a0e2005-09-28 21:38:08 -07002415
David S. Miller4e82c9a2008-02-13 18:00:03 -08002416 find_ramdisk(phys_base);
2417
bob picco7c21d532014-09-16 09:29:54 -04002418 if (cmdline_memory_size)
2419 reduce_memory(cmdline_memory_size);
David S. Miller25b0c652008-02-13 18:20:14 -08002420
Tejun Heo1aadc052011-12-08 10:22:08 -08002421 memblock_allow_resize();
Yinghai Lu95f72d12010-07-12 14:36:09 +10002422 memblock_dump_all();
David S. Miller3b2a7e22008-02-13 18:13:20 -08002423
Linus Torvalds1da177e2005-04-16 15:20:36 -07002424 set_bit(0, mmu_context_bmap);
2425
David S. Miller2bdb3cb2005-09-22 01:08:57 -07002426 shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE);
2427
Linus Torvalds1da177e2005-04-16 15:20:36 -07002428 real_end = (unsigned long)_end;
David S. Miller0eef3312014-05-03 22:52:50 -07002429 num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << ILOG2_4MB);
David S. Miller64658742008-03-21 17:01:38 -07002430 printk("Kernel: Using %d locked TLB entries for main kernel image.\n",
2431 num_kernel_image_mappings);
David S. Miller2bdb3cb2005-09-22 01:08:57 -07002432
2433 /* Set kernel pgd to upper alias so physical page computations
Linus Torvalds1da177e2005-04-16 15:20:36 -07002434 * work.
2435 */
2436 init_mm.pgd += ((shift) / (sizeof(pgd_t)));
2437
David S. Millerd195b712014-09-27 21:30:57 -07002438 memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
David S. Miller0dd5b7b2014-09-24 20:56:11 -07002439
David S. Millerc9c10832005-10-12 12:22:46 -07002440 inherit_prom_mappings();
David S. Miller5085b4a2005-09-22 00:45:41 -07002441
David S. Millera8b900d2006-01-31 18:33:37 -08002442 /* Ok, we can use our TLB miss and window trap handlers safely. */
2443 setup_tba();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002444
David S. Millerc9c10832005-10-12 12:22:46 -07002445 __flush_tlb_all();
David S. Miller9ad98c52005-10-05 15:12:00 -07002446
David S. Millerad072002008-02-13 19:21:51 -08002447 prom_build_devicetree();
David S. Millerb696fdc2009-05-26 22:37:25 -07002448 of_populate_present_mask();
David S. Millerb99c6eb2009-06-18 01:44:19 -07002449#ifndef CONFIG_SMP
2450 of_fill_in_cpu_data();
2451#endif
David S. Millerad072002008-02-13 19:21:51 -08002452
David S. Miller890db402009-04-01 03:13:15 -07002453 if (tlb_type == hypervisor) {
David S. Miller4a283332008-02-13 19:22:23 -08002454 sun4v_mdesc_init();
Stephen Rothwell6ac5c612009-06-15 03:06:18 -07002455 mdesc_populate_present_mask(cpu_all_mask);
David S. Millerb99c6eb2009-06-18 01:44:19 -07002456#ifndef CONFIG_SMP
2457 mdesc_fill_in_cpu_data(cpu_all_mask);
2458#endif
David S. Millerce33fdc2012-09-06 19:01:25 -07002459 mdesc_get_page_sizes(cpu_all_mask, &cpu_pgsz_mask);
David S. Millerc69ad0a2012-09-06 20:35:36 -07002460
2461 sun4v_linear_pte_xor_finalize();
2462
2463 sun4v_ktsb_init();
2464 sun4v_ktsb_register();
David S. Millerce33fdc2012-09-06 19:01:25 -07002465 } else {
2466 unsigned long impl, ver;
2467
2468 cpu_pgsz_mask = (HV_PGSZ_MASK_8K | HV_PGSZ_MASK_64K |
2469 HV_PGSZ_MASK_512K | HV_PGSZ_MASK_4MB);
2470
2471 __asm__ __volatile__("rdpr %%ver, %0" : "=r" (ver));
2472 impl = ((ver >> 32) & 0xffff);
2473 if (impl == PANTHER_IMPL)
2474 cpu_pgsz_mask |= (HV_PGSZ_MASK_32MB |
2475 HV_PGSZ_MASK_256MB);
David S. Millerc69ad0a2012-09-06 20:35:36 -07002476
2477 sun4u_linear_pte_xor_finalize();
David S. Miller890db402009-04-01 03:13:15 -07002478 }
David S. Miller4a283332008-02-13 19:22:23 -08002479
David S. Millerc69ad0a2012-09-06 20:35:36 -07002480 /* Flush the TLBs and the 4M TSB so that the updated linear
2481 * pte XOR settings are realized for all mappings.
2482 */
2483 __flush_tlb_all();
2484#ifndef CONFIG_DEBUG_PAGEALLOC
2485 memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb));
2486#endif
2487 __flush_tlb_all();
2488
David S. Miller2bdb3cb2005-09-22 01:08:57 -07002489 /* Setup bootmem... */
David S. Miller919ee672008-04-23 05:40:25 -07002490 last_valid_pfn = end_pfn = bootmem_init(phys_base);
David S. Millerd1112012006-03-08 02:16:07 -08002491
David S. Miller56425302005-09-25 16:46:57 -07002492 kernel_physical_mapping_init();
David S. Miller56425302005-09-25 16:46:57 -07002493
Linus Torvalds1da177e2005-04-16 15:20:36 -07002494 {
David S. Miller919ee672008-04-23 05:40:25 -07002495 unsigned long max_zone_pfns[MAX_NR_ZONES];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002496
David S. Miller919ee672008-04-23 05:40:25 -07002497 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002498
David S. Miller919ee672008-04-23 05:40:25 -07002499 max_zone_pfns[ZONE_NORMAL] = end_pfn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002500
David S. Miller919ee672008-04-23 05:40:25 -07002501 free_area_init_nodes(max_zone_pfns);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002502 }
2503
David S. Miller3c62a2d2008-02-17 23:22:50 -08002504 printk("Booting Linux...\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002505}
2506
Greg Kroah-Hartman7c9503b2012-12-21 14:03:26 -08002507int page_in_phys_avail(unsigned long paddr)
David S. Miller919ee672008-04-23 05:40:25 -07002508{
2509 int i;
2510
2511 paddr &= PAGE_MASK;
2512
2513 for (i = 0; i < pavail_ents; i++) {
2514 unsigned long start, end;
2515
2516 start = pavail[i].phys_addr;
2517 end = start + pavail[i].reg_size;
2518
2519 if (paddr >= start && paddr < end)
2520 return 1;
2521 }
2522 if (paddr >= kern_base && paddr < (kern_base + kern_size))
2523 return 1;
2524#ifdef CONFIG_BLK_DEV_INITRD
2525 if (paddr >= __pa(initrd_start) &&
2526 paddr < __pa(PAGE_ALIGN(initrd_end)))
2527 return 1;
2528#endif
2529
2530 return 0;
2531}
2532
Yinghai Lu961f8fa2012-11-16 19:39:21 -08002533static void __init register_page_bootmem_info(void)
2534{
2535#ifdef CONFIG_NEED_MULTIPLE_NODES
2536 int i;
2537
2538 for_each_online_node(i)
2539 if (NODE_DATA(i)->node_spanned_pages)
2540 register_page_bootmem_info_node(NODE_DATA(i));
2541#endif
2542}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002543void __init mem_init(void)
2544{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002545 high_memory = __va(last_valid_pfn << PAGE_SHIFT);
2546
Mike Rapoportc6ffc5c2018-10-30 15:09:30 -07002547 memblock_free_all();
David S. Miller919ee672008-04-23 05:40:25 -07002548
Linus Torvalds1da177e2005-04-16 15:20:36 -07002549 /*
Pavel Tatashin2a20aa12017-11-15 17:36:18 -08002550 * Must be done after boot memory is put on freelist, because here we
2551 * might set fields in deferred struct pages that have not yet been
Mike Rapoportc6ffc5c2018-10-30 15:09:30 -07002552 * initialized, and memblock_free_all() initializes all the reserved
Pavel Tatashin2a20aa12017-11-15 17:36:18 -08002553 * deferred pages for us.
2554 */
2555 register_page_bootmem_info();
2556
2557 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002558 * Set up the zero page, mark it reserved, so that page count
2559 * is not manipulated when freeing the page from user ptes.
2560 */
2561 mem_map_zero = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
2562 if (mem_map_zero == NULL) {
2563 prom_printf("paging_init: Cannot alloc zero page.\n");
2564 prom_halt();
2565 }
Jiang Liu70affe42013-05-07 16:18:08 -07002566 mark_page_reserved(mem_map_zero);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002567
Jiang Liudceccbe2013-07-03 15:04:14 -07002568 mem_init_print_info(NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002569
2570 if (tlb_type == cheetah || tlb_type == cheetah_plus)
2571 cheetah_ecache_flush_init();
2572}
2573
David S. Miller898cf0e2005-09-23 11:59:44 -07002574void free_initmem(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002575{
2576 unsigned long addr, initend;
David S. Millerf2b60792008-08-14 01:45:41 -07002577 int do_free = 1;
2578
2579 /* If the physical memory maps were trimmed by kernel command
2580 * line options, don't even try freeing this initmem stuff up.
2581 * The kernel image could have been in the trimmed out region
2582 * and if so the freeing below will free invalid page structs.
2583 */
2584 if (cmdline_memory_size)
2585 do_free = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002586
2587 /*
2588 * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes.
2589 */
2590 addr = PAGE_ALIGN((unsigned long)(__init_begin));
2591 initend = (unsigned long)(__init_end) & PAGE_MASK;
2592 for (; addr < initend; addr += PAGE_SIZE) {
2593 unsigned long page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002594
2595 page = (addr +
2596 ((unsigned long) __va(kern_base)) -
2597 ((unsigned long) KERNBASE));
Randy Dunlapc9cf5522006-06-27 02:53:52 -07002598 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002599
Jiang Liu70affe42013-05-07 16:18:08 -07002600 if (do_free)
2601 free_reserved_page(virt_to_page(page));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002602 }
2603}
2604
2605#ifdef CONFIG_BLK_DEV_INITRD
2606void free_initrd_mem(unsigned long start, unsigned long end)
2607{
Jiang Liudceccbe2013-07-03 15:04:14 -07002608 free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
2609 "initrd");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002610}
2611#endif
David S. Millerc4bce902006-02-11 21:57:54 -08002612
David S. Millerc4bce902006-02-11 21:57:54 -08002613pgprot_t PAGE_KERNEL __read_mostly;
2614EXPORT_SYMBOL(PAGE_KERNEL);
2615
2616pgprot_t PAGE_KERNEL_LOCKED __read_mostly;
2617pgprot_t PAGE_COPY __read_mostly;
David S. Miller0f159522006-02-18 12:43:16 -08002618
2619pgprot_t PAGE_SHARED __read_mostly;
2620EXPORT_SYMBOL(PAGE_SHARED);
2621
David S. Millerc4bce902006-02-11 21:57:54 -08002622unsigned long pg_iobits __read_mostly;
2623
2624unsigned long _PAGE_IE __read_mostly;
David S. Miller987c74f2006-06-25 01:34:43 -07002625EXPORT_SYMBOL(_PAGE_IE);
David S. Millerb2bef442006-02-23 01:55:55 -08002626
David S. Millerc4bce902006-02-11 21:57:54 -08002627unsigned long _PAGE_E __read_mostly;
David S. Millerb2bef442006-02-23 01:55:55 -08002628EXPORT_SYMBOL(_PAGE_E);
2629
David S. Millerc4bce902006-02-11 21:57:54 -08002630unsigned long _PAGE_CACHE __read_mostly;
David S. Millerb2bef442006-02-23 01:55:55 -08002631EXPORT_SYMBOL(_PAGE_CACHE);
David S. Millerc4bce902006-02-11 21:57:54 -08002632
David Miller46644c22007-10-16 01:24:16 -07002633#ifdef CONFIG_SPARSEMEM_VMEMMAP
Johannes Weiner0aad8182013-04-29 15:07:50 -07002634int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend,
Christoph Hellwig7b73d972017-12-29 08:53:54 +01002635 int node, struct vmem_altmap *altmap)
David Miller46644c22007-10-16 01:24:16 -07002636{
David Miller46644c22007-10-16 01:24:16 -07002637 unsigned long pte_base;
2638
2639 pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4U |
2640 _PAGE_CP_4U | _PAGE_CV_4U |
2641 _PAGE_P_4U | _PAGE_W_4U);
2642 if (tlb_type == hypervisor)
2643 pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4V |
Khalid Aziz494e5b62015-05-27 10:00:46 -06002644 page_cache4v_flag | _PAGE_P_4V | _PAGE_W_4V);
David Miller46644c22007-10-16 01:24:16 -07002645
David S. Millerc06240c2014-09-24 21:20:14 -07002646 pte_base |= _PAGE_PMD_HUGE;
David Miller46644c22007-10-16 01:24:16 -07002647
David S. Millerc06240c2014-09-24 21:20:14 -07002648 vstart = vstart & PMD_MASK;
2649 vend = ALIGN(vend, PMD_SIZE);
2650 for (; vstart < vend; vstart += PMD_SIZE) {
Pavel Tatashindf8ee572017-11-15 17:36:22 -08002651 pgd_t *pgd = vmemmap_pgd_populate(vstart, node);
David S. Millerc06240c2014-09-24 21:20:14 -07002652 unsigned long pte;
2653 pud_t *pud;
2654 pmd_t *pmd;
2655
Pavel Tatashindf8ee572017-11-15 17:36:22 -08002656 if (!pgd)
2657 return -ENOMEM;
David S. Millerc06240c2014-09-24 21:20:14 -07002658
Pavel Tatashindf8ee572017-11-15 17:36:22 -08002659 pud = vmemmap_pud_populate(pgd, vstart, node);
2660 if (!pud)
2661 return -ENOMEM;
David S. Millerc06240c2014-09-24 21:20:14 -07002662
2663 pmd = pmd_offset(pud, vstart);
David S. Millerc06240c2014-09-24 21:20:14 -07002664 pte = pmd_val(*pmd);
2665 if (!(pte & _PAGE_VALID)) {
2666 void *block = vmemmap_alloc_block(PMD_SIZE, node);
2667
David Miller46644c22007-10-16 01:24:16 -07002668 if (!block)
2669 return -ENOMEM;
2670
David S. Millerc06240c2014-09-24 21:20:14 -07002671 pmd_val(*pmd) = pte_base | __pa(block);
David Miller46644c22007-10-16 01:24:16 -07002672 }
2673 }
David S. Miller2856cc22012-08-15 00:37:29 -07002674
David S. Millerc06240c2014-09-24 21:20:14 -07002675 return 0;
David S. Miller2856cc22012-08-15 00:37:29 -07002676}
Yasuaki Ishimatsu46723bf2013-02-22 16:33:00 -08002677
Christoph Hellwig24b6d412017-12-29 08:53:56 +01002678void vmemmap_free(unsigned long start, unsigned long end,
2679 struct vmem_altmap *altmap)
Tang Chen01975182013-02-22 16:33:08 -08002680{
2681}
David Miller46644c22007-10-16 01:24:16 -07002682#endif /* CONFIG_SPARSEMEM_VMEMMAP */
2683
David S. Millerc4bce902006-02-11 21:57:54 -08002684static void prot_init_common(unsigned long page_none,
2685 unsigned long page_shared,
2686 unsigned long page_copy,
2687 unsigned long page_readonly,
2688 unsigned long page_exec_bit)
2689{
2690 PAGE_COPY = __pgprot(page_copy);
David S. Miller0f159522006-02-18 12:43:16 -08002691 PAGE_SHARED = __pgprot(page_shared);
David S. Millerc4bce902006-02-11 21:57:54 -08002692
2693 protection_map[0x0] = __pgprot(page_none);
2694 protection_map[0x1] = __pgprot(page_readonly & ~page_exec_bit);
2695 protection_map[0x2] = __pgprot(page_copy & ~page_exec_bit);
2696 protection_map[0x3] = __pgprot(page_copy & ~page_exec_bit);
2697 protection_map[0x4] = __pgprot(page_readonly);
2698 protection_map[0x5] = __pgprot(page_readonly);
2699 protection_map[0x6] = __pgprot(page_copy);
2700 protection_map[0x7] = __pgprot(page_copy);
2701 protection_map[0x8] = __pgprot(page_none);
2702 protection_map[0x9] = __pgprot(page_readonly & ~page_exec_bit);
2703 protection_map[0xa] = __pgprot(page_shared & ~page_exec_bit);
2704 protection_map[0xb] = __pgprot(page_shared & ~page_exec_bit);
2705 protection_map[0xc] = __pgprot(page_readonly);
2706 protection_map[0xd] = __pgprot(page_readonly);
2707 protection_map[0xe] = __pgprot(page_shared);
2708 protection_map[0xf] = __pgprot(page_shared);
2709}
2710
2711static void __init sun4u_pgprot_init(void)
2712{
2713 unsigned long page_none, page_shared, page_copy, page_readonly;
2714 unsigned long page_exec_bit;
David S. Miller4f93d212012-09-06 18:13:58 -07002715 int i;
David S. Millerc4bce902006-02-11 21:57:54 -08002716
2717 PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
2718 _PAGE_CACHE_4U | _PAGE_P_4U |
2719 __ACCESS_BITS_4U | __DIRTY_BITS_4U |
2720 _PAGE_EXEC_4U);
2721 PAGE_KERNEL_LOCKED = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
2722 _PAGE_CACHE_4U | _PAGE_P_4U |
2723 __ACCESS_BITS_4U | __DIRTY_BITS_4U |
2724 _PAGE_EXEC_4U | _PAGE_L_4U);
David S. Millerc4bce902006-02-11 21:57:54 -08002725
2726 _PAGE_IE = _PAGE_IE_4U;
2727 _PAGE_E = _PAGE_E_4U;
2728 _PAGE_CACHE = _PAGE_CACHE_4U;
2729
2730 pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4U | __DIRTY_BITS_4U |
2731 __ACCESS_BITS_4U | _PAGE_E_4U);
2732
David S. Millerd1acb422007-03-16 17:20:28 -07002733#ifdef CONFIG_DEBUG_PAGEALLOC
David S. Miller922631b2013-09-18 12:00:00 -07002734 kern_linear_pte_xor[0] = _PAGE_VALID ^ PAGE_OFFSET;
David S. Millerd1acb422007-03-16 17:20:28 -07002735#else
David S. Miller9cc3a1a2006-02-21 20:51:13 -08002736 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^
David S. Miller922631b2013-09-18 12:00:00 -07002737 PAGE_OFFSET;
David S. Millerd1acb422007-03-16 17:20:28 -07002738#endif
David S. Miller9cc3a1a2006-02-21 20:51:13 -08002739 kern_linear_pte_xor[0] |= (_PAGE_CP_4U | _PAGE_CV_4U |
2740 _PAGE_P_4U | _PAGE_W_4U);
2741
David S. Miller4f93d212012-09-06 18:13:58 -07002742 for (i = 1; i < 4; i++)
2743 kern_linear_pte_xor[i] = kern_linear_pte_xor[0];
David S. Millerc4bce902006-02-11 21:57:54 -08002744
David S. Millerc4bce902006-02-11 21:57:54 -08002745 _PAGE_ALL_SZ_BITS = (_PAGE_SZ4MB_4U | _PAGE_SZ512K_4U |
2746 _PAGE_SZ64K_4U | _PAGE_SZ8K_4U |
2747 _PAGE_SZ32MB_4U | _PAGE_SZ256MB_4U);
2748
2749
2750 page_none = _PAGE_PRESENT_4U | _PAGE_ACCESSED_4U | _PAGE_CACHE_4U;
2751 page_shared = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2752 __ACCESS_BITS_4U | _PAGE_WRITE_4U | _PAGE_EXEC_4U);
2753 page_copy = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2754 __ACCESS_BITS_4U | _PAGE_EXEC_4U);
2755 page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2756 __ACCESS_BITS_4U | _PAGE_EXEC_4U);
2757
2758 page_exec_bit = _PAGE_EXEC_4U;
2759
2760 prot_init_common(page_none, page_shared, page_copy, page_readonly,
2761 page_exec_bit);
2762}
2763
2764static void __init sun4v_pgprot_init(void)
2765{
2766 unsigned long page_none, page_shared, page_copy, page_readonly;
2767 unsigned long page_exec_bit;
David S. Miller4f93d212012-09-06 18:13:58 -07002768 int i;
David S. Millerc4bce902006-02-11 21:57:54 -08002769
2770 PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4V | _PAGE_VALID |
Khalid Aziz494e5b62015-05-27 10:00:46 -06002771 page_cache4v_flag | _PAGE_P_4V |
David S. Millerc4bce902006-02-11 21:57:54 -08002772 __ACCESS_BITS_4V | __DIRTY_BITS_4V |
2773 _PAGE_EXEC_4V);
2774 PAGE_KERNEL_LOCKED = PAGE_KERNEL;
David S. Millerc4bce902006-02-11 21:57:54 -08002775
2776 _PAGE_IE = _PAGE_IE_4V;
2777 _PAGE_E = _PAGE_E_4V;
Khalid Aziz494e5b62015-05-27 10:00:46 -06002778 _PAGE_CACHE = page_cache4v_flag;
David S. Millerc4bce902006-02-11 21:57:54 -08002779
David S. Millerd1acb422007-03-16 17:20:28 -07002780#ifdef CONFIG_DEBUG_PAGEALLOC
David S. Miller922631b2013-09-18 12:00:00 -07002781 kern_linear_pte_xor[0] = _PAGE_VALID ^ PAGE_OFFSET;
David S. Millerd1acb422007-03-16 17:20:28 -07002782#else
David S. Miller9cc3a1a2006-02-21 20:51:13 -08002783 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^
David S. Miller922631b2013-09-18 12:00:00 -07002784 PAGE_OFFSET;
David S. Millerd1acb422007-03-16 17:20:28 -07002785#endif
Khalid Aziz494e5b62015-05-27 10:00:46 -06002786 kern_linear_pte_xor[0] |= (page_cache4v_flag | _PAGE_P_4V |
2787 _PAGE_W_4V);
David S. Miller9cc3a1a2006-02-21 20:51:13 -08002788
David S. Millerc69ad0a2012-09-06 20:35:36 -07002789 for (i = 1; i < 4; i++)
2790 kern_linear_pte_xor[i] = kern_linear_pte_xor[0];
David S. Miller4f93d212012-09-06 18:13:58 -07002791
David S. Millerc4bce902006-02-11 21:57:54 -08002792 pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4V | __DIRTY_BITS_4V |
2793 __ACCESS_BITS_4V | _PAGE_E_4V);
2794
David S. Millerc4bce902006-02-11 21:57:54 -08002795 _PAGE_ALL_SZ_BITS = (_PAGE_SZ16GB_4V | _PAGE_SZ2GB_4V |
2796 _PAGE_SZ256MB_4V | _PAGE_SZ32MB_4V |
2797 _PAGE_SZ4MB_4V | _PAGE_SZ512K_4V |
2798 _PAGE_SZ64K_4V | _PAGE_SZ8K_4V);
2799
Khalid Aziz494e5b62015-05-27 10:00:46 -06002800 page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | page_cache4v_flag;
2801 page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag |
David S. Millerc4bce902006-02-11 21:57:54 -08002802 __ACCESS_BITS_4V | _PAGE_WRITE_4V | _PAGE_EXEC_4V);
Khalid Aziz494e5b62015-05-27 10:00:46 -06002803 page_copy = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag |
David S. Millerc4bce902006-02-11 21:57:54 -08002804 __ACCESS_BITS_4V | _PAGE_EXEC_4V);
Khalid Aziz494e5b62015-05-27 10:00:46 -06002805 page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag |
David S. Millerc4bce902006-02-11 21:57:54 -08002806 __ACCESS_BITS_4V | _PAGE_EXEC_4V);
2807
2808 page_exec_bit = _PAGE_EXEC_4V;
2809
2810 prot_init_common(page_none, page_shared, page_copy, page_readonly,
2811 page_exec_bit);
2812}
2813
2814unsigned long pte_sz_bits(unsigned long sz)
2815{
2816 if (tlb_type == hypervisor) {
2817 switch (sz) {
2818 case 8 * 1024:
2819 default:
2820 return _PAGE_SZ8K_4V;
2821 case 64 * 1024:
2822 return _PAGE_SZ64K_4V;
2823 case 512 * 1024:
2824 return _PAGE_SZ512K_4V;
2825 case 4 * 1024 * 1024:
2826 return _PAGE_SZ4MB_4V;
Joe Perches6cb79b32011-06-03 14:45:23 +00002827 }
David S. Millerc4bce902006-02-11 21:57:54 -08002828 } else {
2829 switch (sz) {
2830 case 8 * 1024:
2831 default:
2832 return _PAGE_SZ8K_4U;
2833 case 64 * 1024:
2834 return _PAGE_SZ64K_4U;
2835 case 512 * 1024:
2836 return _PAGE_SZ512K_4U;
2837 case 4 * 1024 * 1024:
2838 return _PAGE_SZ4MB_4U;
Joe Perches6cb79b32011-06-03 14:45:23 +00002839 }
David S. Millerc4bce902006-02-11 21:57:54 -08002840 }
2841}
2842
2843pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space, unsigned long page_size)
2844{
2845 pte_t pte;
David S. Millercf627152006-02-12 21:10:07 -08002846
2847 pte_val(pte) = page | pgprot_val(pgprot_noncached(prot));
David S. Millerc4bce902006-02-11 21:57:54 -08002848 pte_val(pte) |= (((unsigned long)space) << 32);
2849 pte_val(pte) |= pte_sz_bits(page_size);
David S. Millercf627152006-02-12 21:10:07 -08002850
David S. Millerc4bce902006-02-11 21:57:54 -08002851 return pte;
2852}
2853
David S. Millerc4bce902006-02-11 21:57:54 -08002854static unsigned long kern_large_tte(unsigned long paddr)
2855{
2856 unsigned long val;
2857
2858 val = (_PAGE_VALID | _PAGE_SZ4MB_4U |
2859 _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_P_4U |
2860 _PAGE_EXEC_4U | _PAGE_L_4U | _PAGE_W_4U);
2861 if (tlb_type == hypervisor)
2862 val = (_PAGE_VALID | _PAGE_SZ4MB_4V |
Khalid Aziz494e5b62015-05-27 10:00:46 -06002863 page_cache4v_flag | _PAGE_P_4V |
David S. Millerc4bce902006-02-11 21:57:54 -08002864 _PAGE_EXEC_4V | _PAGE_W_4V);
2865
2866 return val | paddr;
2867}
2868
David S. Millerc4bce902006-02-11 21:57:54 -08002869/* If not locked, zap it. */
2870void __flush_tlb_all(void)
2871{
2872 unsigned long pstate;
2873 int i;
2874
2875 __asm__ __volatile__("flushw\n\t"
2876 "rdpr %%pstate, %0\n\t"
2877 "wrpr %0, %1, %%pstate"
2878 : "=r" (pstate)
2879 : "i" (PSTATE_IE));
David S. Miller8f3614532007-12-13 06:13:38 -08002880 if (tlb_type == hypervisor) {
2881 sun4v_mmu_demap_all();
2882 } else if (tlb_type == spitfire) {
David S. Millerc4bce902006-02-11 21:57:54 -08002883 for (i = 0; i < 64; i++) {
2884 /* Spitfire Errata #32 workaround */
2885 /* NOTE: Always runs on spitfire, so no
2886 * cheetah+ page size encodings.
2887 */
2888 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
2889 "flush %%g6"
2890 : /* No outputs */
2891 : "r" (0),
2892 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
2893
2894 if (!(spitfire_get_dtlb_data(i) & _PAGE_L_4U)) {
2895 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
2896 "membar #Sync"
2897 : /* no outputs */
2898 : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
2899 spitfire_put_dtlb_data(i, 0x0UL);
2900 }
2901
2902 /* Spitfire Errata #32 workaround */
2903 /* NOTE: Always runs on spitfire, so no
2904 * cheetah+ page size encodings.
2905 */
2906 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
2907 "flush %%g6"
2908 : /* No outputs */
2909 : "r" (0),
2910 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
2911
2912 if (!(spitfire_get_itlb_data(i) & _PAGE_L_4U)) {
2913 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
2914 "membar #Sync"
2915 : /* no outputs */
2916 : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
2917 spitfire_put_itlb_data(i, 0x0UL);
2918 }
2919 }
2920 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
2921 cheetah_flush_dtlb_all();
2922 cheetah_flush_itlb_all();
2923 }
2924 __asm__ __volatile__("wrpr %0, 0, %%pstate"
2925 : : "r" (pstate));
2926}
David Millerc460bec2012-10-08 16:34:22 -07002927
Joel Fernandes (Google)4cf58922019-01-03 15:28:34 -08002928pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
David Millerc460bec2012-10-08 16:34:22 -07002929{
Levin, Alexander (Sasha Levin)75f296d2017-11-15 17:35:54 -08002930 struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
David S. Miller37b3a8f2013-09-25 13:48:49 -07002931 pte_t *pte = NULL;
David Millerc460bec2012-10-08 16:34:22 -07002932
David Millerc460bec2012-10-08 16:34:22 -07002933 if (page)
2934 pte = (pte_t *) page_address(page);
2935
2936 return pte;
2937}
2938
Joel Fernandes (Google)4cf58922019-01-03 15:28:34 -08002939pgtable_t pte_alloc_one(struct mm_struct *mm)
David Millerc460bec2012-10-08 16:34:22 -07002940{
Levin, Alexander (Sasha Levin)75f296d2017-11-15 17:35:54 -08002941 struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
Kirill A. Shutemov1ae9ae52013-11-14 14:31:42 -08002942 if (!page)
2943 return NULL;
2944 if (!pgtable_page_ctor(page)) {
Mel Gorman2d4894b2017-11-15 17:37:59 -08002945 free_unref_page(page);
Kirill A. Shutemov1ae9ae52013-11-14 14:31:42 -08002946 return NULL;
David Millerc460bec2012-10-08 16:34:22 -07002947 }
Kirill A. Shutemov1ae9ae52013-11-14 14:31:42 -08002948 return (pte_t *) page_address(page);
David Millerc460bec2012-10-08 16:34:22 -07002949}
2950
2951void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
2952{
David S. Miller37b3a8f2013-09-25 13:48:49 -07002953 free_page((unsigned long)pte);
David Millerc460bec2012-10-08 16:34:22 -07002954}
2955
2956static void __pte_free(pgtable_t pte)
2957{
2958 struct page *page = virt_to_page(pte);
David S. Miller37b3a8f2013-09-25 13:48:49 -07002959
2960 pgtable_page_dtor(page);
2961 __free_page(page);
David Millerc460bec2012-10-08 16:34:22 -07002962}
2963
2964void pte_free(struct mm_struct *mm, pgtable_t pte)
2965{
2966 __pte_free(pte);
2967}
2968
2969void pgtable_free(void *table, bool is_page)
2970{
2971 if (is_page)
2972 __pte_free(table);
2973 else
2974 kmem_cache_free(pgtable_cache, table);
2975}
David Miller9e695d22012-10-08 16:34:29 -07002976
2977#ifdef CONFIG_TRANSPARENT_HUGEPAGE
David Miller9e695d22012-10-08 16:34:29 -07002978void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
2979 pmd_t *pmd)
2980{
2981 unsigned long pte, flags;
2982 struct mm_struct *mm;
2983 pmd_t entry = *pmd;
David Miller9e695d22012-10-08 16:34:29 -07002984
2985 if (!pmd_large(entry) || !pmd_young(entry))
2986 return;
2987
David S. Millera7b94032013-09-26 13:45:15 -07002988 pte = pmd_val(entry);
David Miller9e695d22012-10-08 16:34:29 -07002989
David S. Miller18f38132014-08-04 16:34:01 -07002990 /* Don't insert a non-valid PMD into the TSB, we'll deadlock. */
2991 if (!(pte & _PAGE_VALID))
2992 return;
2993
David S. Miller37b3a8f2013-09-25 13:48:49 -07002994 /* We are fabricating 8MB pages using 4MB real hw pages. */
2995 pte |= (addr & (1UL << REAL_HPAGE_SHIFT));
David Miller9e695d22012-10-08 16:34:29 -07002996
2997 mm = vma->vm_mm;
2998
2999 spin_lock_irqsave(&mm->context.lock, flags);
3000
3001 if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL)
David S. Miller37b3a8f2013-09-25 13:48:49 -07003002 __update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT,
David Miller9e695d22012-10-08 16:34:29 -07003003 addr, pte);
3004
3005 spin_unlock_irqrestore(&mm->context.lock, flags);
3006}
3007#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
3008
3009#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
3010static void context_reload(void *__data)
3011{
3012 struct mm_struct *mm = __data;
3013
3014 if (mm == current->mm)
3015 load_secondary_context(mm);
3016}
3017
David S. Miller0fbebed2013-02-19 22:34:10 -08003018void hugetlb_setup(struct pt_regs *regs)
David Miller9e695d22012-10-08 16:34:29 -07003019{
David S. Miller0fbebed2013-02-19 22:34:10 -08003020 struct mm_struct *mm = current->mm;
3021 struct tsb_config *tp;
David Miller9e695d22012-10-08 16:34:29 -07003022
David Hildenbrand70ffdb92015-05-11 17:52:11 +02003023 if (faulthandler_disabled() || !mm) {
David S. Miller0fbebed2013-02-19 22:34:10 -08003024 const struct exception_table_entry *entry;
David Miller9e695d22012-10-08 16:34:29 -07003025
David S. Miller0fbebed2013-02-19 22:34:10 -08003026 entry = search_exception_tables(regs->tpc);
3027 if (entry) {
3028 regs->tpc = entry->fixup;
3029 regs->tnpc = regs->tpc + 4;
3030 return;
3031 }
3032 pr_alert("Unexpected HugeTLB setup in atomic context.\n");
3033 die_if_kernel("HugeTSB in atomic", regs);
3034 }
3035
3036 tp = &mm->context.tsb_block[MM_TSB_HUGE];
3037 if (likely(tp->tsb == NULL))
3038 tsb_grow(mm, MM_TSB_HUGE, 0);
3039
David Miller9e695d22012-10-08 16:34:29 -07003040 tsb_context_switch(mm);
3041 smp_tsb_sync(mm);
3042
3043 /* On UltraSPARC-III+ and later, configure the second half of
3044 * the Data-TLB for huge pages.
3045 */
3046 if (tlb_type == cheetah_plus) {
David S. Miller9ea46abe2016-05-25 12:51:20 -07003047 bool need_context_reload = false;
David Miller9e695d22012-10-08 16:34:29 -07003048 unsigned long ctx;
3049
David S. Miller9ea46abe2016-05-25 12:51:20 -07003050 spin_lock_irq(&ctx_alloc_lock);
David Miller9e695d22012-10-08 16:34:29 -07003051 ctx = mm->context.sparc64_ctx_val;
3052 ctx &= ~CTX_PGSZ_MASK;
3053 ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT;
3054 ctx |= CTX_PGSZ_HUGE << CTX_PGSZ1_SHIFT;
3055
3056 if (ctx != mm->context.sparc64_ctx_val) {
3057 /* When changing the page size fields, we
3058 * must perform a context flush so that no
3059 * stale entries match. This flush must
3060 * occur with the original context register
3061 * settings.
3062 */
3063 do_flush_tlb_mm(mm);
3064
3065 /* Reload the context register of all processors
3066 * also executing in this address space.
3067 */
3068 mm->context.sparc64_ctx_val = ctx;
David S. Miller9ea46abe2016-05-25 12:51:20 -07003069 need_context_reload = true;
David Miller9e695d22012-10-08 16:34:29 -07003070 }
David S. Miller9ea46abe2016-05-25 12:51:20 -07003071 spin_unlock_irq(&ctx_alloc_lock);
3072
3073 if (need_context_reload)
3074 on_each_cpu(context_reload, mm, 0);
David Miller9e695d22012-10-08 16:34:29 -07003075 }
3076}
3077#endif
bob piccof6d4fb52014-03-03 11:54:42 -05003078
3079static struct resource code_resource = {
3080 .name = "Kernel code",
Toshi Kani35d98e92016-01-26 21:57:22 +01003081 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
bob piccof6d4fb52014-03-03 11:54:42 -05003082};
3083
3084static struct resource data_resource = {
3085 .name = "Kernel data",
Toshi Kani35d98e92016-01-26 21:57:22 +01003086 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
bob piccof6d4fb52014-03-03 11:54:42 -05003087};
3088
3089static struct resource bss_resource = {
3090 .name = "Kernel bss",
Toshi Kani35d98e92016-01-26 21:57:22 +01003091 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
bob piccof6d4fb52014-03-03 11:54:42 -05003092};
3093
3094static inline resource_size_t compute_kern_paddr(void *addr)
3095{
3096 return (resource_size_t) (addr - KERNBASE + kern_base);
3097}
3098
3099static void __init kernel_lds_init(void)
3100{
3101 code_resource.start = compute_kern_paddr(_text);
3102 code_resource.end = compute_kern_paddr(_etext - 1);
3103 data_resource.start = compute_kern_paddr(_etext);
3104 data_resource.end = compute_kern_paddr(_edata - 1);
3105 bss_resource.start = compute_kern_paddr(__bss_start);
3106 bss_resource.end = compute_kern_paddr(_end - 1);
3107}
3108
3109static int __init report_memory(void)
3110{
3111 int i;
3112 struct resource *res;
3113
3114 kernel_lds_init();
3115
3116 for (i = 0; i < pavail_ents; i++) {
3117 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
3118
3119 if (!res) {
3120 pr_warn("Failed to allocate source.\n");
3121 break;
3122 }
3123
3124 res->name = "System RAM";
3125 res->start = pavail[i].phys_addr;
3126 res->end = pavail[i].phys_addr + pavail[i].reg_size - 1;
Toshi Kani35d98e92016-01-26 21:57:22 +01003127 res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
bob piccof6d4fb52014-03-03 11:54:42 -05003128
3129 if (insert_resource(&iomem_resource, res) < 0) {
3130 pr_warn("Resource insertion failed.\n");
3131 break;
3132 }
3133
3134 insert_resource(res, &code_resource);
3135 insert_resource(res, &data_resource);
3136 insert_resource(res, &bss_resource);
3137 }
3138
3139 return 0;
3140}
David S. Miller3c081582015-03-18 19:15:28 -07003141arch_initcall(report_memory);
David S. Millere9011d02014-08-05 18:57:18 -07003142
David S. Miller4ca9a232014-08-04 20:07:37 -07003143#ifdef CONFIG_SMP
3144#define do_flush_tlb_kernel_range smp_flush_tlb_kernel_range
3145#else
3146#define do_flush_tlb_kernel_range __flush_tlb_kernel_range
3147#endif
3148
3149void flush_tlb_kernel_range(unsigned long start, unsigned long end)
3150{
3151 if (start < HI_OBP_ADDRESS && end > LOW_OBP_ADDRESS) {
3152 if (start < LOW_OBP_ADDRESS) {
3153 flush_tsb_kernel_range(start, LOW_OBP_ADDRESS);
3154 do_flush_tlb_kernel_range(start, LOW_OBP_ADDRESS);
3155 }
3156 if (end > HI_OBP_ADDRESS) {
David S. Miller473ad7f2014-10-04 21:05:14 -07003157 flush_tsb_kernel_range(HI_OBP_ADDRESS, end);
3158 do_flush_tlb_kernel_range(HI_OBP_ADDRESS, end);
David S. Miller4ca9a232014-08-04 20:07:37 -07003159 }
3160 } else {
3161 flush_tsb_kernel_range(start, end);
3162 do_flush_tlb_kernel_range(start, end);
3163 }
3164}
Khalid Aziz74a04962018-02-23 15:46:41 -07003165
3166void copy_user_highpage(struct page *to, struct page *from,
3167 unsigned long vaddr, struct vm_area_struct *vma)
3168{
3169 char *vfrom, *vto;
3170
3171 vfrom = kmap_atomic(from);
3172 vto = kmap_atomic(to);
3173 copy_user_page(vto, vfrom, vaddr, to);
3174 kunmap_atomic(vto);
3175 kunmap_atomic(vfrom);
3176
3177 /* If this page has ADI enabled, copy over any ADI tags
3178 * as well
3179 */
3180 if (vma->vm_flags & VM_SPARC_ADI) {
3181 unsigned long pfrom, pto, i, adi_tag;
3182
3183 pfrom = page_to_phys(from);
3184 pto = page_to_phys(to);
3185
3186 for (i = pfrom; i < (pfrom + PAGE_SIZE); i += adi_blksize()) {
3187 asm volatile("ldxa [%1] %2, %0\n\t"
3188 : "=r" (adi_tag)
3189 : "r" (i), "i" (ASI_MCD_REAL));
3190 asm volatile("stxa %0, [%1] %2\n\t"
3191 :
3192 : "r" (adi_tag), "r" (pto),
3193 "i" (ASI_MCD_REAL));
3194 pto += adi_blksize();
3195 }
3196 asm volatile("membar #Sync\n\t");
3197 }
3198}
3199EXPORT_SYMBOL(copy_user_highpage);
3200
3201void copy_highpage(struct page *to, struct page *from)
3202{
3203 char *vfrom, *vto;
3204
3205 vfrom = kmap_atomic(from);
3206 vto = kmap_atomic(to);
3207 copy_page(vto, vfrom);
3208 kunmap_atomic(vto);
3209 kunmap_atomic(vfrom);
3210
3211 /* If this platform is ADI enabled, copy any ADI tags
3212 * as well
3213 */
3214 if (adi_capable()) {
3215 unsigned long pfrom, pto, i, adi_tag;
3216
3217 pfrom = page_to_phys(from);
3218 pto = page_to_phys(to);
3219
3220 for (i = pfrom; i < (pfrom + PAGE_SIZE); i += adi_blksize()) {
3221 asm volatile("ldxa [%1] %2, %0\n\t"
3222 : "=r" (adi_tag)
3223 : "r" (i), "i" (ASI_MCD_REAL));
3224 asm volatile("stxa %0, [%1] %2\n\t"
3225 :
3226 : "r" (adi_tag), "r" (pto),
3227 "i" (ASI_MCD_REAL));
3228 pto += adi_blksize();
3229 }
3230 asm volatile("membar #Sync\n\t");
3231 }
3232}
3233EXPORT_SYMBOL(copy_highpage);