blob: 3c40ebd50f928cbbbfe69c65c35810a78b30c53d [file] [log] [blame]
Adrian Bunkb00dc832008-05-19 16:52:27 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * arch/sparc64/mm/init.c
3 *
4 * Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6 */
7
Paul Gortmakercdd4f4c2016-09-19 17:36:29 -04008#include <linux/extable.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <linux/kernel.h>
10#include <linux/sched.h>
11#include <linux/string.h>
12#include <linux/init.h>
13#include <linux/bootmem.h>
14#include <linux/mm.h>
15#include <linux/hugetlb.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/initrd.h>
17#include <linux/swap.h>
18#include <linux/pagemap.h>
Randy Dunlapc9cf5522006-06-27 02:53:52 -070019#include <linux/poison.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <linux/fs.h>
21#include <linux/seq_file.h>
Prasanna S Panchamukhi05e14cb2005-09-06 15:19:30 -070022#include <linux/kprobes.h>
David S. Miller1ac4f5e2005-09-21 21:49:32 -070023#include <linux/cache.h>
David S. Miller13edad72005-09-29 17:58:26 -070024#include <linux/sort.h>
bob piccof6d4fb52014-03-03 11:54:42 -050025#include <linux/ioport.h>
David S. Miller5cbc3072007-05-25 15:49:59 -070026#include <linux/percpu.h>
Yinghai Lu95f72d12010-07-12 14:36:09 +100027#include <linux/memblock.h>
David S. Miller919ee672008-04-23 05:40:25 -070028#include <linux/mmzone.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090029#include <linux/gfp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
31#include <asm/head.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#include <asm/page.h>
33#include <asm/pgalloc.h>
34#include <asm/pgtable.h>
35#include <asm/oplib.h>
36#include <asm/iommu.h>
37#include <asm/io.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080038#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include <asm/mmu_context.h>
40#include <asm/tlbflush.h>
41#include <asm/dma.h>
42#include <asm/starfire.h>
43#include <asm/tlb.h>
44#include <asm/spitfire.h>
45#include <asm/sections.h>
David S. Miller517af332006-02-01 15:55:21 -080046#include <asm/tsb.h>
David S. Miller481295f2006-02-07 21:51:08 -080047#include <asm/hypervisor.h>
David S. Miller372b07b2006-06-21 15:35:28 -070048#include <asm/prom.h>
David S. Miller5cbc3072007-05-25 15:49:59 -070049#include <asm/mdesc.h>
David S. Miller3d5ae6b2008-03-25 21:51:40 -070050#include <asm/cpudata.h>
Sam Ravnborg59dec132014-05-16 23:26:07 +020051#include <asm/setup.h>
David S. Miller4f70f7a2008-08-12 18:33:56 -070052#include <asm/irq.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070053
Sam Ravnborg27137e52008-11-16 20:08:45 -080054#include "init_64.h"
David S. Miller9cc3a1a2006-02-21 20:51:13 -080055
David S. Miller4f93d212012-09-06 18:13:58 -070056unsigned long kern_linear_pte_xor[4] __read_mostly;
Khalid Aziz494e5b62015-05-27 10:00:46 -060057static unsigned long page_cache4v_flag;
David S. Miller9cc3a1a2006-02-21 20:51:13 -080058
David S. Miller4f93d212012-09-06 18:13:58 -070059/* A bitmap, two bits for every 256MB of physical memory. These two
60 * bits determine what page size we use for kernel linear
61 * translations. They form an index into kern_linear_pte_xor[]. The
62 * value in the indexed slot is XOR'd with the TLB miss virtual
63 * address to form the resulting TTE. The mapping is:
64 *
65 * 0 ==> 4MB
66 * 1 ==> 256MB
67 * 2 ==> 2GB
68 * 3 ==> 16GB
69 *
70 * All sun4v chips support 256MB pages. Only SPARC-T4 and later
71 * support 2GB pages, and hopefully future cpus will support the 16GB
72 * pages as well. For slots 2 and 3, we encode a 256MB TTE xor there
73 * if these larger page sizes are not supported by the cpu.
74 *
75 * It would be nice to determine this from the machine description
76 * 'cpu' properties, but we need to have this table setup before the
77 * MDESC is initialized.
David S. Miller9cc3a1a2006-02-21 20:51:13 -080078 */
David S. Miller9cc3a1a2006-02-21 20:51:13 -080079
David S. Millerd1acb422007-03-16 17:20:28 -070080#ifndef CONFIG_DEBUG_PAGEALLOC
David S. Miller4f93d212012-09-06 18:13:58 -070081/* A special kernel TSB for 4MB, 256MB, 2GB and 16GB linear mappings.
82 * Space is allocated for this right after the trap table in
83 * arch/sparc64/kernel/head.S
David S. Miller2d9e2762007-05-29 01:58:31 -070084 */
85extern struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES];
David S. Millerd1acb422007-03-16 17:20:28 -070086#endif
David S. Miller0dd5b7b2014-09-24 20:56:11 -070087extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
David S. Millerd7744a02006-02-21 22:31:11 -080088
David S. Millerce33fdc2012-09-06 19:01:25 -070089static unsigned long cpu_pgsz_mask;
90
David S. Millerd195b712014-09-27 21:30:57 -070091#define MAX_BANKS 1024
David S. Miller10147572005-09-28 21:46:43 -070092
Greg Kroah-Hartman7c9503b2012-12-21 14:03:26 -080093static struct linux_prom64_registers pavail[MAX_BANKS];
94static int pavail_ents;
David S. Miller10147572005-09-28 21:46:43 -070095
Nitin Gupta52708d62015-11-02 16:30:24 -050096u64 numa_latency[MAX_NUMNODES][MAX_NUMNODES];
97
David S. Miller13edad72005-09-29 17:58:26 -070098static int cmp_p64(const void *a, const void *b)
99{
100 const struct linux_prom64_registers *x = a, *y = b;
101
102 if (x->phys_addr > y->phys_addr)
103 return 1;
104 if (x->phys_addr < y->phys_addr)
105 return -1;
106 return 0;
107}
108
109static void __init read_obp_memory(const char *property,
110 struct linux_prom64_registers *regs,
111 int *num_ents)
112{
Andres Salomon8d125562010-10-08 14:18:11 -0700113 phandle node = prom_finddevice("/memory");
David S. Miller13edad72005-09-29 17:58:26 -0700114 int prop_size = prom_getproplen(node, property);
115 int ents, ret, i;
116
117 ents = prop_size / sizeof(struct linux_prom64_registers);
118 if (ents > MAX_BANKS) {
119 prom_printf("The machine has more %s property entries than "
120 "this kernel can support (%d).\n",
121 property, MAX_BANKS);
122 prom_halt();
123 }
124
125 ret = prom_getproperty(node, property, (char *) regs, prop_size);
126 if (ret == -1) {
Akinobu Mita5da444a2012-09-29 03:14:49 +0000127 prom_printf("Couldn't get %s property from /memory.\n",
128 property);
David S. Miller13edad72005-09-29 17:58:26 -0700129 prom_halt();
130 }
131
David S. Miller13edad72005-09-29 17:58:26 -0700132 /* Sanitize what we got from the firmware, by page aligning
133 * everything.
134 */
135 for (i = 0; i < ents; i++) {
136 unsigned long base, size;
137
138 base = regs[i].phys_addr;
139 size = regs[i].reg_size;
140
141 size &= PAGE_MASK;
142 if (base & ~PAGE_MASK) {
143 unsigned long new_base = PAGE_ALIGN(base);
144
145 size -= new_base - base;
146 if ((long) size < 0L)
147 size = 0UL;
148 base = new_base;
149 }
David S. Miller0015d3d2007-03-15 00:06:34 -0700150 if (size == 0UL) {
151 /* If it is empty, simply get rid of it.
152 * This simplifies the logic of the other
153 * functions that process these arrays.
154 */
155 memmove(&regs[i], &regs[i + 1],
156 (ents - i - 1) * sizeof(regs[0]));
157 i--;
158 ents--;
159 continue;
160 }
David S. Miller13edad72005-09-29 17:58:26 -0700161 regs[i].phys_addr = base;
162 regs[i].reg_size = size;
163 }
David S. Miller486ad102006-06-22 00:00:00 -0700164
David S. Miller486ad102006-06-22 00:00:00 -0700165 *num_ents = ents;
166
David S. Millerc9c10832005-10-12 12:22:46 -0700167 sort(regs, ents, sizeof(struct linux_prom64_registers),
David S. Miller13edad72005-09-29 17:58:26 -0700168 cmp_p64, NULL);
169}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170
David S. Millerd1112012006-03-08 02:16:07 -0800171/* Kernel physical address base and size in bytes. */
David S. Miller1ac4f5e2005-09-21 21:49:32 -0700172unsigned long kern_base __read_mostly;
173unsigned long kern_size __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175/* Initial ramdisk setup */
176extern unsigned long sparc_ramdisk_image64;
177extern unsigned int sparc_ramdisk_image;
178extern unsigned int sparc_ramdisk_size;
179
David S. Miller1ac4f5e2005-09-21 21:49:32 -0700180struct page *mem_map_zero __read_mostly;
Aneesh Kumar K.V35802c02008-04-29 08:11:12 -0400181EXPORT_SYMBOL(mem_map_zero);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182
David S. Miller0835ae02005-10-04 15:23:20 -0700183unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly;
184
185unsigned long sparc64_kern_pri_context __read_mostly;
186unsigned long sparc64_kern_pri_nuc_bits __read_mostly;
187unsigned long sparc64_kern_sec_context __read_mostly;
188
David S. Miller64658742008-03-21 17:01:38 -0700189int num_kernel_image_mappings;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191#ifdef CONFIG_DEBUG_DCFLUSH
192atomic_t dcpage_flushes = ATOMIC_INIT(0);
193#ifdef CONFIG_SMP
194atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
195#endif
196#endif
197
David S. Miller7a591cf2006-02-26 19:44:50 -0800198inline void flush_dcache_page_impl(struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199{
David S. Miller7a591cf2006-02-26 19:44:50 -0800200 BUG_ON(tlb_type == hypervisor);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201#ifdef CONFIG_DEBUG_DCFLUSH
202 atomic_inc(&dcpage_flushes);
203#endif
204
205#ifdef DCACHE_ALIASING_POSSIBLE
206 __flush_dcache_page(page_address(page),
207 ((tlb_type == spitfire) &&
208 page_mapping(page) != NULL));
209#else
210 if (page_mapping(page) != NULL &&
211 tlb_type == spitfire)
212 __flush_icache_page(__pa(page_address(page)));
213#endif
214}
215
216#define PG_dcache_dirty PG_arch_1
David S. Miller22adb352007-05-26 01:14:43 -0700217#define PG_dcache_cpu_shift 32UL
218#define PG_dcache_cpu_mask \
219 ((1UL<<ilog2(roundup_pow_of_two(NR_CPUS)))-1UL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220
221#define dcache_dirty_cpu(page) \
David S. Miller48b0e542005-07-27 16:08:44 -0700222 (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223
David S. Millerd979f172007-10-27 00:13:04 -0700224static inline void set_dcache_dirty(struct page *page, int this_cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225{
226 unsigned long mask = this_cpu;
David S. Miller48b0e542005-07-27 16:08:44 -0700227 unsigned long non_cpu_bits;
228
229 non_cpu_bits = ~(PG_dcache_cpu_mask << PG_dcache_cpu_shift);
230 mask = (mask << PG_dcache_cpu_shift) | (1UL << PG_dcache_dirty);
231
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 __asm__ __volatile__("1:\n\t"
233 "ldx [%2], %%g7\n\t"
234 "and %%g7, %1, %%g1\n\t"
235 "or %%g1, %0, %%g1\n\t"
236 "casx [%2], %%g7, %%g1\n\t"
237 "cmp %%g7, %%g1\n\t"
238 "bne,pn %%xcc, 1b\n\t"
David S. Millerb445e262005-06-27 15:42:04 -0700239 " nop"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240 : /* no outputs */
241 : "r" (mask), "r" (non_cpu_bits), "r" (&page->flags)
242 : "g1", "g7");
243}
244
David S. Millerd979f172007-10-27 00:13:04 -0700245static inline void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246{
247 unsigned long mask = (1UL << PG_dcache_dirty);
248
249 __asm__ __volatile__("! test_and_clear_dcache_dirty\n"
250 "1:\n\t"
251 "ldx [%2], %%g7\n\t"
David S. Miller48b0e542005-07-27 16:08:44 -0700252 "srlx %%g7, %4, %%g1\n\t"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253 "and %%g1, %3, %%g1\n\t"
254 "cmp %%g1, %0\n\t"
255 "bne,pn %%icc, 2f\n\t"
256 " andn %%g7, %1, %%g1\n\t"
257 "casx [%2], %%g7, %%g1\n\t"
258 "cmp %%g7, %%g1\n\t"
259 "bne,pn %%xcc, 1b\n\t"
David S. Millerb445e262005-06-27 15:42:04 -0700260 " nop\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261 "2:"
262 : /* no outputs */
263 : "r" (cpu), "r" (mask), "r" (&page->flags),
David S. Miller48b0e542005-07-27 16:08:44 -0700264 "i" (PG_dcache_cpu_mask),
265 "i" (PG_dcache_cpu_shift)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266 : "g1", "g7");
267}
268
David S. Miller517af332006-02-01 15:55:21 -0800269static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long pte)
270{
271 unsigned long tsb_addr = (unsigned long) ent;
272
David S. Miller3b3ab2e2006-02-17 09:54:42 -0800273 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
David S. Miller517af332006-02-01 15:55:21 -0800274 tsb_addr = __pa(tsb_addr);
275
276 __tsb_insert(tsb_addr, tag, pte);
277}
278
David S. Millerc4bce902006-02-11 21:57:54 -0800279unsigned long _PAGE_ALL_SZ_BITS __read_mostly;
David S. Millerc4bce902006-02-11 21:57:54 -0800280
Sam Ravnborgff9aefb2009-01-06 12:51:26 -0800281static void flush_dcache(unsigned long pfn)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282{
Sam Ravnborgff9aefb2009-01-06 12:51:26 -0800283 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284
Sam Ravnborgff9aefb2009-01-06 12:51:26 -0800285 page = pfn_to_page(pfn);
David S. Miller1a78ced2009-10-12 03:20:57 -0700286 if (page) {
David S. Miller7a591cf2006-02-26 19:44:50 -0800287 unsigned long pg_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288
Sam Ravnborgff9aefb2009-01-06 12:51:26 -0800289 pg_flags = page->flags;
290 if (pg_flags & (1UL << PG_dcache_dirty)) {
David S. Miller7a591cf2006-02-26 19:44:50 -0800291 int cpu = ((pg_flags >> PG_dcache_cpu_shift) &
292 PG_dcache_cpu_mask);
293 int this_cpu = get_cpu();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294
David S. Miller7a591cf2006-02-26 19:44:50 -0800295 /* This is just to optimize away some function calls
296 * in the SMP case.
297 */
298 if (cpu == this_cpu)
299 flush_dcache_page_impl(page);
300 else
301 smp_flush_dcache_page_impl(page, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302
David S. Miller7a591cf2006-02-26 19:44:50 -0800303 clear_dcache_dirty_cpu(page, cpu);
304
305 put_cpu();
306 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307 }
Sam Ravnborgff9aefb2009-01-06 12:51:26 -0800308}
309
David Miller9e695d22012-10-08 16:34:29 -0700310/* mm->context.lock must be held */
311static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_index,
312 unsigned long tsb_hash_shift, unsigned long address,
313 unsigned long tte)
314{
315 struct tsb *tsb = mm->context.tsb_block[tsb_index].tsb;
316 unsigned long tag;
317
David S. Millerbcd896b2013-02-19 13:20:08 -0800318 if (unlikely(!tsb))
319 return;
320
David Miller9e695d22012-10-08 16:34:29 -0700321 tsb += ((address >> tsb_hash_shift) &
322 (mm->context.tsb_block[tsb_index].tsb_nentries - 1UL));
323 tag = (address >> 22UL);
324 tsb_insert(tsb, tag, tte);
325}
326
Nitin Guptac7d9f772017-02-01 16:16:36 -0800327#ifdef CONFIG_HUGETLB_PAGE
328static int __init setup_hugepagesz(char *string)
329{
330 unsigned long long hugepage_size;
331 unsigned int hugepage_shift;
332 unsigned short hv_pgsz_idx;
333 unsigned int hv_pgsz_mask;
334 int rc = 0;
335
336 hugepage_size = memparse(string, &string);
337 hugepage_shift = ilog2(hugepage_size);
338
339 switch (hugepage_shift) {
Nitin Gupta85b1da72017-03-09 14:22:23 -0800340 case HPAGE_2GB_SHIFT:
341 hv_pgsz_mask = HV_PGSZ_MASK_2GB;
342 hv_pgsz_idx = HV_PGSZ_IDX_2GB;
343 break;
Nitin Guptac7d9f772017-02-01 16:16:36 -0800344 case HPAGE_256MB_SHIFT:
345 hv_pgsz_mask = HV_PGSZ_MASK_256MB;
346 hv_pgsz_idx = HV_PGSZ_IDX_256MB;
347 break;
348 case HPAGE_SHIFT:
349 hv_pgsz_mask = HV_PGSZ_MASK_4MB;
350 hv_pgsz_idx = HV_PGSZ_IDX_4MB;
351 break;
Nitin Guptadcd19122017-02-06 12:33:26 -0800352 case HPAGE_64K_SHIFT:
353 hv_pgsz_mask = HV_PGSZ_MASK_64K;
354 hv_pgsz_idx = HV_PGSZ_IDX_64K;
355 break;
Nitin Guptac7d9f772017-02-01 16:16:36 -0800356 default:
357 hv_pgsz_mask = 0;
358 }
359
360 if ((hv_pgsz_mask & cpu_pgsz_mask) == 0U) {
Liam R. Howlettf3229802017-05-30 15:45:00 -0400361 hugetlb_bad_size();
362 pr_err("hugepagesz=%llu not supported by MMU.\n",
Nitin Guptac7d9f772017-02-01 16:16:36 -0800363 hugepage_size);
364 goto out;
365 }
366
367 hugetlb_add_hstate(hugepage_shift - PAGE_SHIFT);
368 rc = 1;
369
370out:
371 return rc;
372}
373__setup("hugepagesz=", setup_hugepagesz);
374#endif /* CONFIG_HUGETLB_PAGE */
375
Russell King4b3073e2009-12-18 16:40:18 +0000376void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
Sam Ravnborgff9aefb2009-01-06 12:51:26 -0800377{
378 struct mm_struct *mm;
David S. Millerbcd896b2013-02-19 13:20:08 -0800379 unsigned long flags;
Russell King4b3073e2009-12-18 16:40:18 +0000380 pte_t pte = *ptep;
Sam Ravnborgff9aefb2009-01-06 12:51:26 -0800381
382 if (tlb_type != hypervisor) {
383 unsigned long pfn = pte_pfn(pte);
384
385 if (pfn_valid(pfn))
386 flush_dcache(pfn);
387 }
David S. Millerbd407912006-01-31 18:31:38 -0800388
389 mm = vma->vm_mm;
David S. Miller7a1ac522006-03-16 02:02:32 -0800390
David S. Miller18f38132014-08-04 16:34:01 -0700391 /* Don't insert a non-valid PTE into the TSB, we'll deadlock. */
392 if (!pte_accessible(mm, pte))
393 return;
394
David S. Miller7a1ac522006-03-16 02:02:32 -0800395 spin_lock_irqsave(&mm->context.lock, flags);
396
David Miller9e695d22012-10-08 16:34:29 -0700397#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
Mike Kravetzaf1b1a92016-07-15 13:08:42 -0700398 if ((mm->context.hugetlb_pte_count || mm->context.thp_pte_count) &&
Nitin Guptac7d9f772017-02-01 16:16:36 -0800399 is_hugetlb_pmd(__pmd(pte_val(pte)))) {
Nitin Gupta7bc37772016-07-29 00:54:21 -0700400 /* We are fabricating 8MB pages using 4MB real hw pages. */
401 pte_val(pte) |= (address & (1UL << REAL_HPAGE_SHIFT));
David S. Miller37b3a8f2013-09-25 13:48:49 -0700402 __update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT,
David S. Millerbcd896b2013-02-19 13:20:08 -0800403 address, pte_val(pte));
Nitin Gupta7bc37772016-07-29 00:54:21 -0700404 } else
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800405#endif
David S. Millerbcd896b2013-02-19 13:20:08 -0800406 __update_mmu_tsb_insert(mm, MM_TSB_BASE, PAGE_SHIFT,
407 address, pte_val(pte));
David S. Miller7a1ac522006-03-16 02:02:32 -0800408
409 spin_unlock_irqrestore(&mm->context.lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410}
411
412void flush_dcache_page(struct page *page)
413{
David S. Millera9546f52005-04-17 18:03:09 -0700414 struct address_space *mapping;
415 int this_cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416
David S. Miller7a591cf2006-02-26 19:44:50 -0800417 if (tlb_type == hypervisor)
418 return;
419
David S. Millera9546f52005-04-17 18:03:09 -0700420 /* Do not bother with the expensive D-cache flush if it
421 * is merely the zero page. The 'bigcore' testcase in GDB
422 * causes this case to run millions of times.
423 */
424 if (page == ZERO_PAGE(0))
425 return;
426
427 this_cpu = get_cpu();
428
429 mapping = page_mapping(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430 if (mapping && !mapping_mapped(mapping)) {
David S. Millera9546f52005-04-17 18:03:09 -0700431 int dirty = test_bit(PG_dcache_dirty, &page->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432 if (dirty) {
David S. Millera9546f52005-04-17 18:03:09 -0700433 int dirty_cpu = dcache_dirty_cpu(page);
434
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435 if (dirty_cpu == this_cpu)
436 goto out;
437 smp_flush_dcache_page_impl(page, dirty_cpu);
438 }
439 set_dcache_dirty(page, this_cpu);
440 } else {
441 /* We could delay the flush for the !page_mapping
442 * case too. But that case is for exec env/arg
443 * pages and those are %99 certainly going to get
444 * faulted into the tlb (and thus flushed) anyways.
445 */
446 flush_dcache_page_impl(page);
447 }
448
449out:
450 put_cpu();
451}
Sam Ravnborg917c3662009-01-08 16:58:20 -0800452EXPORT_SYMBOL(flush_dcache_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453
Prasanna S Panchamukhi05e14cb2005-09-06 15:19:30 -0700454void __kprobes flush_icache_range(unsigned long start, unsigned long end)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455{
David S. Millera43fe0e2006-02-04 03:10:53 -0800456 /* Cheetah and Hypervisor platform cpus have coherent I-cache. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457 if (tlb_type == spitfire) {
458 unsigned long kaddr;
459
David S. Millera94aa252007-03-15 15:50:11 -0700460 /* This code only runs on Spitfire cpus so this is
461 * why we can assume _PAGE_PADDR_4U.
462 */
463 for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE) {
464 unsigned long paddr, mask = _PAGE_PADDR_4U;
465
466 if (kaddr >= PAGE_OFFSET)
467 paddr = kaddr & mask;
468 else {
469 pgd_t *pgdp = pgd_offset_k(kaddr);
470 pud_t *pudp = pud_offset(pgdp, kaddr);
471 pmd_t *pmdp = pmd_offset(pudp, kaddr);
472 pte_t *ptep = pte_offset_kernel(pmdp, kaddr);
473
474 paddr = pte_val(*ptep) & mask;
475 }
476 __flush_icache_page(paddr);
477 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478 }
479}
Sam Ravnborg917c3662009-01-08 16:58:20 -0800480EXPORT_SYMBOL(flush_icache_range);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482void mmu_info(struct seq_file *m)
483{
David S. Millerce33fdc2012-09-06 19:01:25 -0700484 static const char *pgsz_strings[] = {
485 "8K", "64K", "512K", "4MB", "32MB",
486 "256MB", "2GB", "16GB",
487 };
488 int i, printed;
489
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490 if (tlb_type == cheetah)
491 seq_printf(m, "MMU Type\t: Cheetah\n");
492 else if (tlb_type == cheetah_plus)
493 seq_printf(m, "MMU Type\t: Cheetah+\n");
494 else if (tlb_type == spitfire)
495 seq_printf(m, "MMU Type\t: Spitfire\n");
David S. Millera43fe0e2006-02-04 03:10:53 -0800496 else if (tlb_type == hypervisor)
497 seq_printf(m, "MMU Type\t: Hypervisor (sun4v)\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498 else
499 seq_printf(m, "MMU Type\t: ???\n");
500
David S. Millerce33fdc2012-09-06 19:01:25 -0700501 seq_printf(m, "MMU PGSZs\t: ");
502 printed = 0;
503 for (i = 0; i < ARRAY_SIZE(pgsz_strings); i++) {
504 if (cpu_pgsz_mask & (1UL << i)) {
505 seq_printf(m, "%s%s",
506 printed ? "," : "", pgsz_strings[i]);
507 printed++;
508 }
509 }
510 seq_putc(m, '\n');
511
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512#ifdef CONFIG_DEBUG_DCFLUSH
513 seq_printf(m, "DCPageFlushes\t: %d\n",
514 atomic_read(&dcpage_flushes));
515#ifdef CONFIG_SMP
516 seq_printf(m, "DCPageFlushesXC\t: %d\n",
517 atomic_read(&dcpage_flushes_xcall));
518#endif /* CONFIG_SMP */
519#endif /* CONFIG_DEBUG_DCFLUSH */
520}
521
David S. Millera94aa252007-03-15 15:50:11 -0700522struct linux_prom_translation prom_trans[512] __read_mostly;
523unsigned int prom_trans_ents __read_mostly;
524
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525unsigned long kern_locked_tte_data;
526
David S. Miller405599b2005-09-22 00:12:35 -0700527/* The obp translations are saved based on 8k pagesize, since obp can
528 * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS ->
David S. Miller74bf4312006-01-31 18:29:18 -0800529 * HI_OBP_ADDRESS range are handled in ktlb.S.
David S. Miller405599b2005-09-22 00:12:35 -0700530 */
David S. Miller5085b4a2005-09-22 00:45:41 -0700531static inline int in_obp_range(unsigned long vaddr)
532{
533 return (vaddr >= LOW_OBP_ADDRESS &&
534 vaddr < HI_OBP_ADDRESS);
535}
536
David S. Millerc9c10832005-10-12 12:22:46 -0700537static int cmp_ptrans(const void *a, const void *b)
David S. Miller405599b2005-09-22 00:12:35 -0700538{
David S. Millerc9c10832005-10-12 12:22:46 -0700539 const struct linux_prom_translation *x = a, *y = b;
David S. Miller405599b2005-09-22 00:12:35 -0700540
David S. Millerc9c10832005-10-12 12:22:46 -0700541 if (x->virt > y->virt)
542 return 1;
543 if (x->virt < y->virt)
544 return -1;
545 return 0;
David S. Miller405599b2005-09-22 00:12:35 -0700546}
547
David S. Millerc9c10832005-10-12 12:22:46 -0700548/* Read OBP translations property into 'prom_trans[]'. */
David S. Miller9ad98c52005-10-05 15:12:00 -0700549static void __init read_obp_translations(void)
David S. Miller405599b2005-09-22 00:12:35 -0700550{
David S. Millerc9c10832005-10-12 12:22:46 -0700551 int n, node, ents, first, last, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552
553 node = prom_finddevice("/virtual-memory");
554 n = prom_getproplen(node, "translations");
David S. Miller405599b2005-09-22 00:12:35 -0700555 if (unlikely(n == 0 || n == -1)) {
David S. Millerb206fc42005-09-21 22:31:13 -0700556 prom_printf("prom_mappings: Couldn't get size.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557 prom_halt();
558 }
David S. Miller405599b2005-09-22 00:12:35 -0700559 if (unlikely(n > sizeof(prom_trans))) {
Akinobu Mita5da444a2012-09-29 03:14:49 +0000560 prom_printf("prom_mappings: Size %d is too big.\n", n);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561 prom_halt();
562 }
David S. Miller405599b2005-09-22 00:12:35 -0700563
David S. Millerb206fc42005-09-21 22:31:13 -0700564 if ((n = prom_getproperty(node, "translations",
David S. Miller405599b2005-09-22 00:12:35 -0700565 (char *)&prom_trans[0],
566 sizeof(prom_trans))) == -1) {
David S. Millerb206fc42005-09-21 22:31:13 -0700567 prom_printf("prom_mappings: Couldn't get property.\n");
568 prom_halt();
569 }
David S. Miller9ad98c52005-10-05 15:12:00 -0700570
David S. Millerb206fc42005-09-21 22:31:13 -0700571 n = n / sizeof(struct linux_prom_translation);
David S. Miller9ad98c52005-10-05 15:12:00 -0700572
David S. Millerc9c10832005-10-12 12:22:46 -0700573 ents = n;
574
575 sort(prom_trans, ents, sizeof(struct linux_prom_translation),
576 cmp_ptrans, NULL);
577
578 /* Now kick out all the non-OBP entries. */
579 for (i = 0; i < ents; i++) {
580 if (in_obp_range(prom_trans[i].virt))
581 break;
582 }
583 first = i;
584 for (; i < ents; i++) {
585 if (!in_obp_range(prom_trans[i].virt))
586 break;
587 }
588 last = i;
589
590 for (i = 0; i < (last - first); i++) {
591 struct linux_prom_translation *src = &prom_trans[i + first];
592 struct linux_prom_translation *dest = &prom_trans[i];
593
594 *dest = *src;
595 }
596 for (; i < ents; i++) {
597 struct linux_prom_translation *dest = &prom_trans[i];
598 dest->virt = dest->size = dest->data = 0x0UL;
599 }
600
601 prom_trans_ents = last - first;
602
603 if (tlb_type == spitfire) {
604 /* Clear diag TTE bits. */
605 for (i = 0; i < prom_trans_ents; i++)
606 prom_trans[i].data &= ~0x0003fe0000000000UL;
607 }
David S. Millerf4142cb2011-09-29 12:18:59 -0700608
609 /* Force execute bit on. */
610 for (i = 0; i < prom_trans_ents; i++)
611 prom_trans[i].data |= (tlb_type == hypervisor ?
612 _PAGE_EXEC_4V : _PAGE_EXEC_4U);
David S. Miller405599b2005-09-22 00:12:35 -0700613}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614
David S. Millerd82ace72006-02-09 02:52:44 -0800615static void __init hypervisor_tlb_lock(unsigned long vaddr,
616 unsigned long pte,
617 unsigned long mmu)
618{
David S. Miller7db35f32007-05-29 02:22:14 -0700619 unsigned long ret = sun4v_mmu_map_perm_addr(vaddr, 0, pte, mmu);
David S. Millerd82ace72006-02-09 02:52:44 -0800620
David S. Miller7db35f32007-05-29 02:22:14 -0700621 if (ret != 0) {
Akinobu Mita5da444a2012-09-29 03:14:49 +0000622 prom_printf("hypervisor_tlb_lock[%lx:%x:%lx:%lx]: "
David S. Miller7db35f32007-05-29 02:22:14 -0700623 "errors with %lx\n", vaddr, 0, pte, mmu, ret);
David S. Miller12e126a2006-02-17 14:40:30 -0800624 prom_halt();
625 }
David S. Millerd82ace72006-02-09 02:52:44 -0800626}
627
David S. Millerc4bce902006-02-11 21:57:54 -0800628static unsigned long kern_large_tte(unsigned long paddr);
629
David S. Miller898cf0e2005-09-23 11:59:44 -0700630static void __init remap_kernel(void)
David S. Miller405599b2005-09-22 00:12:35 -0700631{
632 unsigned long phys_page, tte_vaddr, tte_data;
David S. Miller64658742008-03-21 17:01:38 -0700633 int i, tlb_ent = sparc64_highest_locked_tlbent();
David S. Miller405599b2005-09-22 00:12:35 -0700634
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635 tte_vaddr = (unsigned long) KERNBASE;
David S. Miller0eef3312014-05-03 22:52:50 -0700636 phys_page = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB;
David S. Millerc4bce902006-02-11 21:57:54 -0800637 tte_data = kern_large_tte(phys_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638
639 kern_locked_tte_data = tte_data;
640
David S. Millerd82ace72006-02-09 02:52:44 -0800641 /* Now lock us into the TLBs via Hypervisor or OBP. */
642 if (tlb_type == hypervisor) {
David S. Miller64658742008-03-21 17:01:38 -0700643 for (i = 0; i < num_kernel_image_mappings; i++) {
David S. Millerd82ace72006-02-09 02:52:44 -0800644 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU);
645 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU);
David S. Miller64658742008-03-21 17:01:38 -0700646 tte_vaddr += 0x400000;
647 tte_data += 0x400000;
David S. Millerd82ace72006-02-09 02:52:44 -0800648 }
649 } else {
David S. Miller64658742008-03-21 17:01:38 -0700650 for (i = 0; i < num_kernel_image_mappings; i++) {
651 prom_dtlb_load(tlb_ent - i, tte_data, tte_vaddr);
652 prom_itlb_load(tlb_ent - i, tte_data, tte_vaddr);
653 tte_vaddr += 0x400000;
654 tte_data += 0x400000;
David S. Millerd82ace72006-02-09 02:52:44 -0800655 }
David S. Miller64658742008-03-21 17:01:38 -0700656 sparc64_highest_unlocked_tlb_ent = tlb_ent - i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657 }
David S. Miller0835ae02005-10-04 15:23:20 -0700658 if (tlb_type == cheetah_plus) {
659 sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 |
660 CTX_CHEETAH_PLUS_NUC);
661 sparc64_kern_pri_nuc_bits = CTX_CHEETAH_PLUS_NUC;
662 sparc64_kern_sec_context = CTX_CHEETAH_PLUS_CTX0;
663 }
David S. Miller405599b2005-09-22 00:12:35 -0700664}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665
David S. Miller405599b2005-09-22 00:12:35 -0700666
David S. Millerc9c10832005-10-12 12:22:46 -0700667static void __init inherit_prom_mappings(void)
David S. Miller9ad98c52005-10-05 15:12:00 -0700668{
David S. Miller405599b2005-09-22 00:12:35 -0700669 /* Now fixup OBP's idea about where we really are mapped. */
David S. Miller3c62a2d2008-02-17 23:22:50 -0800670 printk("Remapping the kernel... ");
David S. Miller405599b2005-09-22 00:12:35 -0700671 remap_kernel();
David S. Miller3c62a2d2008-02-17 23:22:50 -0800672 printk("done.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673}
674
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675void prom_world(int enter)
676{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677 if (!enter)
Al Virodff933d2012-09-26 01:21:14 -0400678 set_fs(get_fs());
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679
David S. Miller3487d1d2006-01-31 18:33:25 -0800680 __asm__ __volatile__("flushw");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681}
682
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683void __flush_dcache_range(unsigned long start, unsigned long end)
684{
685 unsigned long va;
686
687 if (tlb_type == spitfire) {
688 int n = 0;
689
690 for (va = start; va < end; va += 32) {
691 spitfire_put_dcache_tag(va & 0x3fe0, 0x0);
692 if (++n >= 512)
693 break;
694 }
David S. Millera43fe0e2006-02-04 03:10:53 -0800695 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696 start = __pa(start);
697 end = __pa(end);
698 for (va = start; va < end; va += 32)
699 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
700 "membar #Sync"
701 : /* no outputs */
702 : "r" (va),
703 "i" (ASI_DCACHE_INVALIDATE));
704 }
705}
Sam Ravnborg917c3662009-01-08 16:58:20 -0800706EXPORT_SYMBOL(__flush_dcache_range);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707
David S. Miller85f1e1f2007-03-15 17:51:26 -0700708/* get_new_mmu_context() uses "cache + 1". */
709DEFINE_SPINLOCK(ctx_alloc_lock);
Pavel Tatashinc4415232017-05-31 11:25:22 -0400710unsigned long tlb_context_cache = CTX_FIRST_VERSION;
David S. Miller85f1e1f2007-03-15 17:51:26 -0700711#define MAX_CTX_NR (1UL << CTX_NR_BITS)
712#define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR)
713DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR);
Pavel Tatashin7a5b4bb2017-05-31 11:25:23 -0400714DEFINE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm) = {0};
David S. Miller85f1e1f2007-03-15 17:51:26 -0700715
Pavel Tatashina0582f22017-05-31 11:25:24 -0400716static void mmu_context_wrap(void)
717{
718 unsigned long old_ver = tlb_context_cache & CTX_VERSION_MASK;
719 unsigned long new_ver, new_ctx, old_ctx;
720 struct mm_struct *mm;
721 int cpu;
722
723 bitmap_zero(mmu_context_bmap, 1 << CTX_NR_BITS);
724
725 /* Reserve kernel context */
726 set_bit(0, mmu_context_bmap);
727
728 new_ver = (tlb_context_cache & CTX_VERSION_MASK) + CTX_FIRST_VERSION;
729 if (unlikely(new_ver == 0))
730 new_ver = CTX_FIRST_VERSION;
731 tlb_context_cache = new_ver;
732
733 /*
734 * Make sure that any new mm that are added into per_cpu_secondary_mm,
735 * are going to go through get_new_mmu_context() path.
736 */
737 mb();
738
739 /*
740 * Updated versions to current on those CPUs that had valid secondary
741 * contexts
742 */
743 for_each_online_cpu(cpu) {
744 /*
745 * If a new mm is stored after we took this mm from the array,
746 * it will go into get_new_mmu_context() path, because we
747 * already bumped the version in tlb_context_cache.
748 */
749 mm = per_cpu(per_cpu_secondary_mm, cpu);
750
751 if (unlikely(!mm || mm == &init_mm))
752 continue;
753
754 old_ctx = mm->context.sparc64_ctx_val;
755 if (likely((old_ctx & CTX_VERSION_MASK) == old_ver)) {
756 new_ctx = (old_ctx & ~CTX_VERSION_MASK) | new_ver;
757 set_bit(new_ctx & CTX_NR_MASK, mmu_context_bmap);
758 mm->context.sparc64_ctx_val = new_ctx;
759 }
760 }
761}
762
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763/* Caller does TLB context flushing on local CPU if necessary.
764 * The caller also ensures that CTX_VALID(mm->context) is false.
765 *
766 * We must be careful about boundary cases so that we never
767 * let the user have CTX 0 (nucleus) or we ever use a CTX
768 * version of zero (and thus NO_CONTEXT would not be caught
769 * by version mis-match tests in mmu_context.h).
David S. Millera0663a72006-02-23 14:19:28 -0800770 *
771 * Always invoked with interrupts disabled.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700772 */
773void get_new_mmu_context(struct mm_struct *mm)
774{
775 unsigned long ctx, new_ctx;
776 unsigned long orig_pgsz_bits;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777
Kirill Tkhai07df8412013-04-09 00:29:46 +0400778 spin_lock(&ctx_alloc_lock);
Pavel Tatashina0582f22017-05-31 11:25:24 -0400779retry:
780 /* wrap might have happened, test again if our context became valid */
781 if (unlikely(CTX_VALID(mm->context)))
782 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783 orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
784 ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
785 new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
786 if (new_ctx >= (1 << CTX_NR_BITS)) {
787 new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
788 if (new_ctx >= ctx) {
Pavel Tatashina0582f22017-05-31 11:25:24 -0400789 mmu_context_wrap();
790 goto retry;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791 }
792 }
Pavel Tatashin58897482017-05-31 11:25:20 -0400793 if (mm->context.sparc64_ctx_val)
794 cpumask_clear(mm_cpumask(mm));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795 mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
796 new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797 tlb_context_cache = new_ctx;
798 mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
Pavel Tatashina0582f22017-05-31 11:25:24 -0400799out:
Kirill Tkhai07df8412013-04-09 00:29:46 +0400800 spin_unlock(&ctx_alloc_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801}
802
David S. Miller919ee672008-04-23 05:40:25 -0700803static int numa_enabled = 1;
804static int numa_debug;
805
806static int __init early_numa(char *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807{
David S. Miller919ee672008-04-23 05:40:25 -0700808 if (!p)
809 return 0;
David S. Millerd1112012006-03-08 02:16:07 -0800810
David S. Miller919ee672008-04-23 05:40:25 -0700811 if (strstr(p, "off"))
812 numa_enabled = 0;
David S. Millerd1112012006-03-08 02:16:07 -0800813
David S. Miller919ee672008-04-23 05:40:25 -0700814 if (strstr(p, "debug"))
815 numa_debug = 1;
816
817 return 0;
David S. Millerd1112012006-03-08 02:16:07 -0800818}
David S. Miller919ee672008-04-23 05:40:25 -0700819early_param("numa", early_numa);
820
821#define numadbg(f, a...) \
822do { if (numa_debug) \
823 printk(KERN_INFO f, ## a); \
824} while (0)
David S. Millerd1112012006-03-08 02:16:07 -0800825
David S. Miller4e82c9a2008-02-13 18:00:03 -0800826static void __init find_ramdisk(unsigned long phys_base)
827{
828#ifdef CONFIG_BLK_DEV_INITRD
829 if (sparc_ramdisk_image || sparc_ramdisk_image64) {
830 unsigned long ramdisk_image;
831
832 /* Older versions of the bootloader only supported a
833 * 32-bit physical address for the ramdisk image
834 * location, stored at sparc_ramdisk_image. Newer
835 * SILO versions set sparc_ramdisk_image to zero and
836 * provide a full 64-bit physical address at
837 * sparc_ramdisk_image64.
838 */
839 ramdisk_image = sparc_ramdisk_image;
840 if (!ramdisk_image)
841 ramdisk_image = sparc_ramdisk_image64;
842
843 /* Another bootloader quirk. The bootloader normalizes
844 * the physical address to KERNBASE, so we have to
845 * factor that back out and add in the lowest valid
846 * physical page address to get the true physical address.
847 */
848 ramdisk_image -= KERNBASE;
849 ramdisk_image += phys_base;
850
David S. Miller919ee672008-04-23 05:40:25 -0700851 numadbg("Found ramdisk at physical address 0x%lx, size %u\n",
852 ramdisk_image, sparc_ramdisk_size);
853
David S. Miller4e82c9a2008-02-13 18:00:03 -0800854 initrd_start = ramdisk_image;
855 initrd_end = ramdisk_image + sparc_ramdisk_size;
David S. Miller3b2a7e22008-02-13 18:13:20 -0800856
Yinghai Lu95f72d12010-07-12 14:36:09 +1000857 memblock_reserve(initrd_start, sparc_ramdisk_size);
David S. Millerd45100f2008-05-06 15:19:54 -0700858
859 initrd_start += PAGE_OFFSET;
860 initrd_end += PAGE_OFFSET;
David S. Miller4e82c9a2008-02-13 18:00:03 -0800861 }
862#endif
863}
864
David S. Miller919ee672008-04-23 05:40:25 -0700865struct node_mem_mask {
866 unsigned long mask;
Pavel Tatashin1537b262017-02-16 15:05:58 -0500867 unsigned long match;
David S. Miller919ee672008-04-23 05:40:25 -0700868};
869static struct node_mem_mask node_masks[MAX_NUMNODES];
870static int num_node_masks;
871
Sam Ravnborg48d37212014-05-16 23:26:12 +0200872#ifdef CONFIG_NEED_MULTIPLE_NODES
873
Pavel Tatashin1537b262017-02-16 15:05:58 -0500874struct mdesc_mlgroup {
875 u64 node;
876 u64 latency;
877 u64 match;
878 u64 mask;
879};
880
881static struct mdesc_mlgroup *mlgroups;
882static int num_mlgroups;
883
David S. Miller919ee672008-04-23 05:40:25 -0700884int numa_cpu_lookup_table[NR_CPUS];
885cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES];
886
David S. Miller919ee672008-04-23 05:40:25 -0700887struct mdesc_mblock {
888 u64 base;
889 u64 size;
890 u64 offset; /* RA-to-PA */
891};
892static struct mdesc_mblock *mblocks;
893static int num_mblocks;
894
Pavel Tatashin1537b262017-02-16 15:05:58 -0500895static struct mdesc_mblock * __init addr_to_mblock(unsigned long addr)
David S. Millerd1112012006-03-08 02:16:07 -0800896{
Pavel Tatashin1537b262017-02-16 15:05:58 -0500897 struct mdesc_mblock *m = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700898 int i;
899
David S. Miller919ee672008-04-23 05:40:25 -0700900 for (i = 0; i < num_mblocks; i++) {
Pavel Tatashin1537b262017-02-16 15:05:58 -0500901 m = &mblocks[i];
David S. Miller6fc5bae2006-12-28 21:00:23 -0800902
David S. Miller919ee672008-04-23 05:40:25 -0700903 if (addr >= m->base &&
904 addr < (m->base + m->size)) {
David S. Miller919ee672008-04-23 05:40:25 -0700905 break;
906 }
907 }
Pavel Tatashin1537b262017-02-16 15:05:58 -0500908
909 return m;
David S. Miller919ee672008-04-23 05:40:25 -0700910}
911
Pavel Tatashin1537b262017-02-16 15:05:58 -0500912static u64 __init memblock_nid_range_sun4u(u64 start, u64 end, int *nid)
David S. Miller919ee672008-04-23 05:40:25 -0700913{
Pavel Tatashin1537b262017-02-16 15:05:58 -0500914 int prev_nid, new_nid;
David S. Miller919ee672008-04-23 05:40:25 -0700915
Pavel Tatashin1537b262017-02-16 15:05:58 -0500916 prev_nid = -1;
917 for ( ; start < end; start += PAGE_SIZE) {
918 for (new_nid = 0; new_nid < num_node_masks; new_nid++) {
919 struct node_mem_mask *p = &node_masks[new_nid];
David S. Miller919ee672008-04-23 05:40:25 -0700920
Pavel Tatashin1537b262017-02-16 15:05:58 -0500921 if ((start & p->mask) == p->match) {
922 if (prev_nid == -1)
923 prev_nid = new_nid;
924 break;
925 }
Thomas Tai74a5ed52016-11-03 09:19:01 -0700926 }
Thomas Tai74a5ed52016-11-03 09:19:01 -0700927
Pavel Tatashin1537b262017-02-16 15:05:58 -0500928 if (new_nid == num_node_masks) {
929 prev_nid = 0;
930 WARN_ONCE(1, "addr[%Lx] doesn't match a NUMA node rule. Some memory will be owned by node 0.",
931 start);
932 break;
933 }
934
935 if (prev_nid != new_nid)
936 break;
937 }
938 *nid = prev_nid;
939
940 return start > end ? end : start;
David S. Miller919ee672008-04-23 05:40:25 -0700941}
942
Thomas Tai87a349f2016-11-11 16:41:00 -0800943static u64 __init memblock_nid_range(u64 start, u64 end, int *nid)
David S. Miller919ee672008-04-23 05:40:25 -0700944{
Pavel Tatashin1537b262017-02-16 15:05:58 -0500945 u64 ret_end, pa_start, m_mask, m_match, m_end;
946 struct mdesc_mblock *mblock;
947 int _nid, i;
David S. Miller919ee672008-04-23 05:40:25 -0700948
Pavel Tatashin1537b262017-02-16 15:05:58 -0500949 if (tlb_type != hypervisor)
950 return memblock_nid_range_sun4u(start, end, nid);
951
952 mblock = addr_to_mblock(start);
953 if (!mblock) {
954 WARN_ONCE(1, "memblock_nid_range: Can't find mblock addr[%Lx]",
955 start);
956
957 _nid = 0;
958 ret_end = end;
959 goto done;
David S. Miller919ee672008-04-23 05:40:25 -0700960 }
961
Pavel Tatashin1537b262017-02-16 15:05:58 -0500962 pa_start = start + mblock->offset;
963 m_match = 0;
964 m_mask = 0;
David S. Millerc918dcc2008-08-14 01:41:39 -0700965
Pavel Tatashin1537b262017-02-16 15:05:58 -0500966 for (_nid = 0; _nid < num_node_masks; _nid++) {
967 struct node_mem_mask *const m = &node_masks[_nid];
968
969 if ((pa_start & m->mask) == m->match) {
970 m_match = m->match;
971 m_mask = m->mask;
972 break;
973 }
974 }
975
976 if (num_node_masks == _nid) {
977 /* We could not find NUMA group, so default to 0, but lets
978 * search for latency group, so we could calculate the correct
979 * end address that we return
980 */
981 _nid = 0;
982
983 for (i = 0; i < num_mlgroups; i++) {
984 struct mdesc_mlgroup *const m = &mlgroups[i];
985
986 if ((pa_start & m->mask) == m->match) {
987 m_match = m->match;
988 m_mask = m->mask;
989 break;
990 }
991 }
992
993 if (i == num_mlgroups) {
994 WARN_ONCE(1, "memblock_nid_range: Can't find latency group addr[%Lx]",
995 start);
996
997 ret_end = end;
998 goto done;
999 }
1000 }
1001
1002 /*
1003 * Each latency group has match and mask, and each memory block has an
1004 * offset. An address belongs to a latency group if its address matches
1005 * the following formula: ((addr + offset) & mask) == match
1006 * It is, however, slow to check every single page if it matches a
1007 * particular latency group. As optimization we calculate end value by
1008 * using bit arithmetics.
1009 */
1010 m_end = m_match + (1ul << __ffs(m_mask)) - mblock->offset;
1011 m_end += pa_start & ~((1ul << fls64(m_mask)) - 1);
1012 ret_end = m_end > end ? end : m_end;
1013
1014done:
1015 *nid = _nid;
1016 return ret_end;
David S. Miller919ee672008-04-23 05:40:25 -07001017}
David S. Miller919ee672008-04-23 05:40:25 -07001018#endif
1019
1020/* This must be invoked after performing all of the necessary
Tejun Heo2a4814d2011-12-08 10:22:08 -08001021 * memblock_set_node() calls for 'nid'. We need to be able to get
David S. Miller919ee672008-04-23 05:40:25 -07001022 * correct data from get_pfn_range_for_nid().
1023 */
1024static void __init allocate_node_data(int nid)
1025{
David S. Miller919ee672008-04-23 05:40:25 -07001026 struct pglist_data *p;
Paul Gortmakeraa6f0792012-05-09 20:44:29 -04001027 unsigned long start_pfn, end_pfn;
David S. Miller919ee672008-04-23 05:40:25 -07001028#ifdef CONFIG_NEED_MULTIPLE_NODES
Paul Gortmakeraa6f0792012-05-09 20:44:29 -04001029 unsigned long paddr;
1030
Benjamin Herrenschmidt9d1e2492010-07-06 15:39:17 -07001031 paddr = memblock_alloc_try_nid(sizeof(struct pglist_data), SMP_CACHE_BYTES, nid);
David S. Miller919ee672008-04-23 05:40:25 -07001032 if (!paddr) {
1033 prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid);
1034 prom_halt();
1035 }
1036 NODE_DATA(nid) = __va(paddr);
1037 memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
1038
David S. Miller625d6932012-04-25 13:13:43 -07001039 NODE_DATA(nid)->node_id = nid;
David S. Miller919ee672008-04-23 05:40:25 -07001040#endif
1041
1042 p = NODE_DATA(nid);
1043
1044 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
1045 p->node_start_pfn = start_pfn;
1046 p->node_spanned_pages = end_pfn - start_pfn;
David S. Miller919ee672008-04-23 05:40:25 -07001047}
1048
1049static void init_node_masks_nonnuma(void)
1050{
Sam Ravnborg48d37212014-05-16 23:26:12 +02001051#ifdef CONFIG_NEED_MULTIPLE_NODES
David S. Miller919ee672008-04-23 05:40:25 -07001052 int i;
Sam Ravnborg48d37212014-05-16 23:26:12 +02001053#endif
David S. Miller919ee672008-04-23 05:40:25 -07001054
1055 numadbg("Initializing tables for non-numa.\n");
1056
Pavel Tatashin1537b262017-02-16 15:05:58 -05001057 node_masks[0].mask = 0;
1058 node_masks[0].match = 0;
David S. Miller919ee672008-04-23 05:40:25 -07001059 num_node_masks = 1;
1060
Sam Ravnborg48d37212014-05-16 23:26:12 +02001061#ifdef CONFIG_NEED_MULTIPLE_NODES
David S. Miller919ee672008-04-23 05:40:25 -07001062 for (i = 0; i < NR_CPUS; i++)
1063 numa_cpu_lookup_table[i] = 0;
1064
KOSAKI Motohirofb1fece2011-05-16 13:38:07 -07001065 cpumask_setall(&numa_cpumask_lookup_table[0]);
Sam Ravnborg48d37212014-05-16 23:26:12 +02001066#endif
David S. Miller919ee672008-04-23 05:40:25 -07001067}
1068
1069#ifdef CONFIG_NEED_MULTIPLE_NODES
1070struct pglist_data *node_data[MAX_NUMNODES];
1071
1072EXPORT_SYMBOL(numa_cpu_lookup_table);
1073EXPORT_SYMBOL(numa_cpumask_lookup_table);
1074EXPORT_SYMBOL(node_data);
1075
David S. Miller919ee672008-04-23 05:40:25 -07001076static int scan_pio_for_cfg_handle(struct mdesc_handle *md, u64 pio,
1077 u32 cfg_handle)
1078{
1079 u64 arc;
1080
1081 mdesc_for_each_arc(arc, md, pio, MDESC_ARC_TYPE_FWD) {
1082 u64 target = mdesc_arc_target(md, arc);
1083 const u64 *val;
1084
1085 val = mdesc_get_property(md, target,
1086 "cfg-handle", NULL);
1087 if (val && *val == cfg_handle)
1088 return 0;
1089 }
1090 return -ENODEV;
1091}
1092
1093static int scan_arcs_for_cfg_handle(struct mdesc_handle *md, u64 grp,
1094 u32 cfg_handle)
1095{
1096 u64 arc, candidate, best_latency = ~(u64)0;
1097
1098 candidate = MDESC_NODE_NULL;
1099 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
1100 u64 target = mdesc_arc_target(md, arc);
1101 const char *name = mdesc_node_name(md, target);
1102 const u64 *val;
1103
1104 if (strcmp(name, "pio-latency-group"))
1105 continue;
1106
1107 val = mdesc_get_property(md, target, "latency", NULL);
1108 if (!val)
1109 continue;
1110
1111 if (*val < best_latency) {
1112 candidate = target;
1113 best_latency = *val;
1114 }
1115 }
1116
1117 if (candidate == MDESC_NODE_NULL)
1118 return -ENODEV;
1119
1120 return scan_pio_for_cfg_handle(md, candidate, cfg_handle);
1121}
1122
1123int of_node_to_nid(struct device_node *dp)
1124{
1125 const struct linux_prom64_registers *regs;
1126 struct mdesc_handle *md;
1127 u32 cfg_handle;
1128 int count, nid;
1129 u64 grp;
1130
David S. Miller072bd412008-08-18 20:36:17 -07001131 /* This is the right thing to do on currently supported
1132 * SUN4U NUMA platforms as well, as the PCI controller does
1133 * not sit behind any particular memory controller.
1134 */
David S. Miller919ee672008-04-23 05:40:25 -07001135 if (!mlgroups)
1136 return -1;
1137
1138 regs = of_get_property(dp, "reg", NULL);
1139 if (!regs)
1140 return -1;
1141
1142 cfg_handle = (regs->phys_addr >> 32UL) & 0x0fffffff;
1143
1144 md = mdesc_grab();
1145
1146 count = 0;
1147 nid = -1;
1148 mdesc_for_each_node_by_name(md, grp, "group") {
1149 if (!scan_arcs_for_cfg_handle(md, grp, cfg_handle)) {
1150 nid = count;
1151 break;
1152 }
1153 count++;
1154 }
1155
1156 mdesc_release(md);
1157
1158 return nid;
1159}
1160
David S. Miller01c453812009-04-07 01:05:22 -07001161static void __init add_node_ranges(void)
David S. Miller919ee672008-04-23 05:40:25 -07001162{
Benjamin Herrenschmidt08b84792010-08-04 13:43:31 +10001163 struct memblock_region *reg;
Pavel Tatashincd429ce2017-02-16 15:13:54 -05001164 unsigned long prev_max;
1165
1166memblock_resized:
1167 prev_max = memblock.memory.max;
David S. Miller919ee672008-04-23 05:40:25 -07001168
Benjamin Herrenschmidt08b84792010-08-04 13:43:31 +10001169 for_each_memblock(memory, reg) {
1170 unsigned long size = reg->size;
David S. Miller919ee672008-04-23 05:40:25 -07001171 unsigned long start, end;
1172
Benjamin Herrenschmidt08b84792010-08-04 13:43:31 +10001173 start = reg->base;
David S. Miller919ee672008-04-23 05:40:25 -07001174 end = start + size;
1175 while (start < end) {
1176 unsigned long this_end;
1177 int nid;
1178
Benjamin Herrenschmidt35a1f0b2010-07-06 15:38:58 -07001179 this_end = memblock_nid_range(start, end, &nid);
David S. Miller919ee672008-04-23 05:40:25 -07001180
Tejun Heo2a4814d2011-12-08 10:22:08 -08001181 numadbg("Setting memblock NUMA node nid[%d] "
David S. Miller919ee672008-04-23 05:40:25 -07001182 "start[%lx] end[%lx]\n",
1183 nid, start, this_end);
1184
Tang Chene7e8de52014-01-21 15:49:26 -08001185 memblock_set_node(start, this_end - start,
1186 &memblock.memory, nid);
Pavel Tatashincd429ce2017-02-16 15:13:54 -05001187 if (memblock.memory.max != prev_max)
1188 goto memblock_resized;
David S. Miller919ee672008-04-23 05:40:25 -07001189 start = this_end;
1190 }
1191 }
1192}
1193
1194static int __init grab_mlgroups(struct mdesc_handle *md)
1195{
1196 unsigned long paddr;
1197 int count = 0;
1198 u64 node;
1199
1200 mdesc_for_each_node_by_name(md, node, "memory-latency-group")
1201 count++;
1202 if (!count)
1203 return -ENOENT;
1204
Yinghai Lu95f72d12010-07-12 14:36:09 +10001205 paddr = memblock_alloc(count * sizeof(struct mdesc_mlgroup),
David S. Miller919ee672008-04-23 05:40:25 -07001206 SMP_CACHE_BYTES);
1207 if (!paddr)
1208 return -ENOMEM;
1209
1210 mlgroups = __va(paddr);
1211 num_mlgroups = count;
1212
1213 count = 0;
1214 mdesc_for_each_node_by_name(md, node, "memory-latency-group") {
1215 struct mdesc_mlgroup *m = &mlgroups[count++];
1216 const u64 *val;
1217
1218 m->node = node;
1219
1220 val = mdesc_get_property(md, node, "latency", NULL);
1221 m->latency = *val;
1222 val = mdesc_get_property(md, node, "address-match", NULL);
1223 m->match = *val;
1224 val = mdesc_get_property(md, node, "address-mask", NULL);
1225 m->mask = *val;
1226
Sam Ravnborg90181132009-01-06 13:19:28 -08001227 numadbg("MLGROUP[%d]: node[%llx] latency[%llx] "
1228 "match[%llx] mask[%llx]\n",
David S. Miller919ee672008-04-23 05:40:25 -07001229 count - 1, m->node, m->latency, m->match, m->mask);
1230 }
1231
1232 return 0;
1233}
1234
1235static int __init grab_mblocks(struct mdesc_handle *md)
1236{
1237 unsigned long paddr;
1238 int count = 0;
1239 u64 node;
1240
1241 mdesc_for_each_node_by_name(md, node, "mblock")
1242 count++;
1243 if (!count)
1244 return -ENOENT;
1245
Yinghai Lu95f72d12010-07-12 14:36:09 +10001246 paddr = memblock_alloc(count * sizeof(struct mdesc_mblock),
David S. Miller919ee672008-04-23 05:40:25 -07001247 SMP_CACHE_BYTES);
1248 if (!paddr)
1249 return -ENOMEM;
1250
1251 mblocks = __va(paddr);
1252 num_mblocks = count;
1253
1254 count = 0;
1255 mdesc_for_each_node_by_name(md, node, "mblock") {
1256 struct mdesc_mblock *m = &mblocks[count++];
1257 const u64 *val;
1258
1259 val = mdesc_get_property(md, node, "base", NULL);
1260 m->base = *val;
1261 val = mdesc_get_property(md, node, "size", NULL);
1262 m->size = *val;
1263 val = mdesc_get_property(md, node,
1264 "address-congruence-offset", NULL);
bob picco771a37f2013-06-11 14:54:51 -04001265
1266 /* The address-congruence-offset property is optional.
1267 * Explicity zero it be identifty this.
1268 */
1269 if (val)
1270 m->offset = *val;
1271 else
1272 m->offset = 0UL;
David S. Miller919ee672008-04-23 05:40:25 -07001273
Sam Ravnborg90181132009-01-06 13:19:28 -08001274 numadbg("MBLOCK[%d]: base[%llx] size[%llx] offset[%llx]\n",
David S. Miller919ee672008-04-23 05:40:25 -07001275 count - 1, m->base, m->size, m->offset);
1276 }
1277
1278 return 0;
1279}
1280
1281static void __init numa_parse_mdesc_group_cpus(struct mdesc_handle *md,
1282 u64 grp, cpumask_t *mask)
1283{
1284 u64 arc;
1285
KOSAKI Motohirofb1fece2011-05-16 13:38:07 -07001286 cpumask_clear(mask);
David S. Miller919ee672008-04-23 05:40:25 -07001287
1288 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_BACK) {
1289 u64 target = mdesc_arc_target(md, arc);
1290 const char *name = mdesc_node_name(md, target);
1291 const u64 *id;
1292
1293 if (strcmp(name, "cpu"))
1294 continue;
1295 id = mdesc_get_property(md, target, "id", NULL);
Rusty Russelle305cb8f2009-03-16 14:40:23 +10301296 if (*id < nr_cpu_ids)
KOSAKI Motohirofb1fece2011-05-16 13:38:07 -07001297 cpumask_set_cpu(*id, mask);
David S. Miller919ee672008-04-23 05:40:25 -07001298 }
1299}
1300
1301static struct mdesc_mlgroup * __init find_mlgroup(u64 node)
1302{
1303 int i;
1304
1305 for (i = 0; i < num_mlgroups; i++) {
1306 struct mdesc_mlgroup *m = &mlgroups[i];
1307 if (m->node == node)
1308 return m;
1309 }
1310 return NULL;
1311}
1312
Nitin Gupta52708d62015-11-02 16:30:24 -05001313int __node_distance(int from, int to)
1314{
1315 if ((from >= MAX_NUMNODES) || (to >= MAX_NUMNODES)) {
1316 pr_warn("Returning default NUMA distance value for %d->%d\n",
1317 from, to);
1318 return (from == to) ? LOCAL_DISTANCE : REMOTE_DISTANCE;
1319 }
1320 return numa_latency[from][to];
1321}
1322
Paul Gortmakerbdf2f592016-08-06 00:31:48 -04001323static int __init find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp)
Nitin Gupta52708d62015-11-02 16:30:24 -05001324{
1325 int i;
1326
1327 for (i = 0; i < MAX_NUMNODES; i++) {
1328 struct node_mem_mask *n = &node_masks[i];
1329
Pavel Tatashin1537b262017-02-16 15:05:58 -05001330 if ((grp->mask == n->mask) && (grp->match == n->match))
Nitin Gupta52708d62015-11-02 16:30:24 -05001331 break;
1332 }
1333 return i;
1334}
1335
Paul Gortmakerbdf2f592016-08-06 00:31:48 -04001336static void __init find_numa_latencies_for_group(struct mdesc_handle *md,
1337 u64 grp, int index)
Nitin Gupta52708d62015-11-02 16:30:24 -05001338{
1339 u64 arc;
1340
1341 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
1342 int tnode;
1343 u64 target = mdesc_arc_target(md, arc);
1344 struct mdesc_mlgroup *m = find_mlgroup(target);
1345
1346 if (!m)
1347 continue;
1348 tnode = find_best_numa_node_for_mlgroup(m);
1349 if (tnode == MAX_NUMNODES)
1350 continue;
1351 numa_latency[index][tnode] = m->latency;
1352 }
1353}
1354
David S. Miller919ee672008-04-23 05:40:25 -07001355static int __init numa_attach_mlgroup(struct mdesc_handle *md, u64 grp,
1356 int index)
1357{
1358 struct mdesc_mlgroup *candidate = NULL;
1359 u64 arc, best_latency = ~(u64)0;
1360 struct node_mem_mask *n;
1361
1362 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
1363 u64 target = mdesc_arc_target(md, arc);
1364 struct mdesc_mlgroup *m = find_mlgroup(target);
1365 if (!m)
1366 continue;
1367 if (m->latency < best_latency) {
1368 candidate = m;
1369 best_latency = m->latency;
1370 }
1371 }
1372 if (!candidate)
1373 return -ENOENT;
1374
1375 if (num_node_masks != index) {
1376 printk(KERN_ERR "Inconsistent NUMA state, "
1377 "index[%d] != num_node_masks[%d]\n",
1378 index, num_node_masks);
1379 return -EINVAL;
1380 }
1381
1382 n = &node_masks[num_node_masks++];
1383
1384 n->mask = candidate->mask;
Pavel Tatashin1537b262017-02-16 15:05:58 -05001385 n->match = candidate->match;
David S. Miller919ee672008-04-23 05:40:25 -07001386
Pavel Tatashin1537b262017-02-16 15:05:58 -05001387 numadbg("NUMA NODE[%d]: mask[%lx] match[%lx] (latency[%llx])\n",
1388 index, n->mask, n->match, candidate->latency);
David S. Miller919ee672008-04-23 05:40:25 -07001389
1390 return 0;
1391}
1392
1393static int __init numa_parse_mdesc_group(struct mdesc_handle *md, u64 grp,
1394 int index)
1395{
1396 cpumask_t mask;
1397 int cpu;
1398
1399 numa_parse_mdesc_group_cpus(md, grp, &mask);
1400
KOSAKI Motohirofb1fece2011-05-16 13:38:07 -07001401 for_each_cpu(cpu, &mask)
David S. Miller919ee672008-04-23 05:40:25 -07001402 numa_cpu_lookup_table[cpu] = index;
KOSAKI Motohirofb1fece2011-05-16 13:38:07 -07001403 cpumask_copy(&numa_cpumask_lookup_table[index], &mask);
David S. Miller919ee672008-04-23 05:40:25 -07001404
1405 if (numa_debug) {
1406 printk(KERN_INFO "NUMA GROUP[%d]: cpus [ ", index);
KOSAKI Motohirofb1fece2011-05-16 13:38:07 -07001407 for_each_cpu(cpu, &mask)
David S. Miller919ee672008-04-23 05:40:25 -07001408 printk("%d ", cpu);
1409 printk("]\n");
1410 }
1411
1412 return numa_attach_mlgroup(md, grp, index);
1413}
1414
1415static int __init numa_parse_mdesc(void)
1416{
1417 struct mdesc_handle *md = mdesc_grab();
Nitin Gupta52708d62015-11-02 16:30:24 -05001418 int i, j, err, count;
David S. Miller919ee672008-04-23 05:40:25 -07001419 u64 node;
1420
1421 node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups");
1422 if (node == MDESC_NODE_NULL) {
1423 mdesc_release(md);
1424 return -ENOENT;
1425 }
1426
1427 err = grab_mblocks(md);
1428 if (err < 0)
1429 goto out;
1430
1431 err = grab_mlgroups(md);
1432 if (err < 0)
1433 goto out;
1434
1435 count = 0;
1436 mdesc_for_each_node_by_name(md, node, "group") {
1437 err = numa_parse_mdesc_group(md, node, count);
1438 if (err < 0)
1439 break;
1440 count++;
1441 }
1442
Nitin Gupta52708d62015-11-02 16:30:24 -05001443 count = 0;
1444 mdesc_for_each_node_by_name(md, node, "group") {
1445 find_numa_latencies_for_group(md, node, count);
1446 count++;
1447 }
1448
1449 /* Normalize numa latency matrix according to ACPI SLIT spec. */
1450 for (i = 0; i < MAX_NUMNODES; i++) {
1451 u64 self_latency = numa_latency[i][i];
1452
1453 for (j = 0; j < MAX_NUMNODES; j++) {
1454 numa_latency[i][j] =
1455 (numa_latency[i][j] * LOCAL_DISTANCE) /
1456 self_latency;
1457 }
1458 }
1459
David S. Miller919ee672008-04-23 05:40:25 -07001460 add_node_ranges();
1461
1462 for (i = 0; i < num_node_masks; i++) {
1463 allocate_node_data(i);
1464 node_set_online(i);
1465 }
1466
1467 err = 0;
1468out:
1469 mdesc_release(md);
1470 return err;
1471}
1472
David S. Miller072bd412008-08-18 20:36:17 -07001473static int __init numa_parse_jbus(void)
1474{
1475 unsigned long cpu, index;
1476
1477 /* NUMA node id is encoded in bits 36 and higher, and there is
1478 * a 1-to-1 mapping from CPU ID to NUMA node ID.
1479 */
1480 index = 0;
1481 for_each_present_cpu(cpu) {
1482 numa_cpu_lookup_table[cpu] = index;
KOSAKI Motohirofb1fece2011-05-16 13:38:07 -07001483 cpumask_copy(&numa_cpumask_lookup_table[index], cpumask_of(cpu));
David S. Miller072bd412008-08-18 20:36:17 -07001484 node_masks[index].mask = ~((1UL << 36UL) - 1UL);
Pavel Tatashin1537b262017-02-16 15:05:58 -05001485 node_masks[index].match = cpu << 36UL;
David S. Miller072bd412008-08-18 20:36:17 -07001486
1487 index++;
1488 }
1489 num_node_masks = index;
1490
1491 add_node_ranges();
1492
1493 for (index = 0; index < num_node_masks; index++) {
1494 allocate_node_data(index);
1495 node_set_online(index);
1496 }
1497
1498 return 0;
1499}
1500
David S. Miller919ee672008-04-23 05:40:25 -07001501static int __init numa_parse_sun4u(void)
1502{
David S. Miller072bd412008-08-18 20:36:17 -07001503 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1504 unsigned long ver;
1505
1506 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
1507 if ((ver >> 32UL) == __JALAPENO_ID ||
1508 (ver >> 32UL) == __SERRANO_ID)
1509 return numa_parse_jbus();
1510 }
David S. Miller919ee672008-04-23 05:40:25 -07001511 return -1;
1512}
1513
1514static int __init bootmem_init_numa(void)
1515{
Nitin Gupta36beca62016-01-05 22:35:35 -08001516 int i, j;
David S. Miller919ee672008-04-23 05:40:25 -07001517 int err = -1;
1518
1519 numadbg("bootmem_init_numa()\n");
1520
Nitin Gupta36beca62016-01-05 22:35:35 -08001521 /* Some sane defaults for numa latency values */
1522 for (i = 0; i < MAX_NUMNODES; i++) {
1523 for (j = 0; j < MAX_NUMNODES; j++)
1524 numa_latency[i][j] = (i == j) ?
1525 LOCAL_DISTANCE : REMOTE_DISTANCE;
1526 }
1527
David S. Miller919ee672008-04-23 05:40:25 -07001528 if (numa_enabled) {
1529 if (tlb_type == hypervisor)
1530 err = numa_parse_mdesc();
1531 else
1532 err = numa_parse_sun4u();
1533 }
1534 return err;
1535}
1536
1537#else
1538
1539static int bootmem_init_numa(void)
1540{
1541 return -1;
1542}
1543
1544#endif
1545
1546static void __init bootmem_init_nonnuma(void)
1547{
Yinghai Lu95f72d12010-07-12 14:36:09 +10001548 unsigned long top_of_ram = memblock_end_of_DRAM();
1549 unsigned long total_ram = memblock_phys_mem_size();
David S. Miller919ee672008-04-23 05:40:25 -07001550
1551 numadbg("bootmem_init_nonnuma()\n");
1552
1553 printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
1554 top_of_ram, total_ram);
1555 printk(KERN_INFO "Memory hole size: %ldMB\n",
1556 (top_of_ram - total_ram) >> 20);
1557
1558 init_node_masks_nonnuma();
Tang Chene7e8de52014-01-21 15:49:26 -08001559 memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0);
David S. Miller919ee672008-04-23 05:40:25 -07001560 allocate_node_data(0);
David S. Miller919ee672008-04-23 05:40:25 -07001561 node_set_online(0);
1562}
1563
David S. Miller919ee672008-04-23 05:40:25 -07001564static unsigned long __init bootmem_init(unsigned long phys_base)
1565{
1566 unsigned long end_pfn;
David S. Miller919ee672008-04-23 05:40:25 -07001567
Yinghai Lu95f72d12010-07-12 14:36:09 +10001568 end_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001569 max_pfn = max_low_pfn = end_pfn;
David S. Millerd1112012006-03-08 02:16:07 -08001570 min_low_pfn = (phys_base >> PAGE_SHIFT);
1571
David S. Miller919ee672008-04-23 05:40:25 -07001572 if (bootmem_init_numa() < 0)
1573 bootmem_init_nonnuma();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001574
David S. Miller625d6932012-04-25 13:13:43 -07001575 /* Dump memblock with node info. */
1576 memblock_dump_all();
1577
David S. Miller919ee672008-04-23 05:40:25 -07001578 /* XXX cpu notifier XXX */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001579
David S. Miller625d6932012-04-25 13:13:43 -07001580 sparse_memory_present_with_active_regions(MAX_NUMNODES);
David S. Millerd1112012006-03-08 02:16:07 -08001581 sparse_init();
1582
Linus Torvalds1da177e2005-04-16 15:20:36 -07001583 return end_pfn;
1584}
1585
David S. Miller9cc3a1a2006-02-21 20:51:13 -08001586static struct linux_prom64_registers pall[MAX_BANKS] __initdata;
1587static int pall_ents __initdata;
1588
David S. Miller0dd5b7b2014-09-24 20:56:11 -07001589static unsigned long max_phys_bits = 40;
1590
1591bool kern_addr_valid(unsigned long addr)
1592{
David S. Miller0dd5b7b2014-09-24 20:56:11 -07001593 pgd_t *pgd;
1594 pud_t *pud;
1595 pmd_t *pmd;
1596 pte_t *pte;
1597
David S. Millerbb4e6e82014-09-27 11:05:21 -07001598 if ((long)addr < 0L) {
1599 unsigned long pa = __pa(addr);
1600
bob piccoadfae8a2017-03-10 14:31:19 -05001601 if ((pa >> max_phys_bits) != 0UL)
David S. Millerbb4e6e82014-09-27 11:05:21 -07001602 return false;
1603
1604 return pfn_valid(pa >> PAGE_SHIFT);
1605 }
David S. Miller0dd5b7b2014-09-24 20:56:11 -07001606
1607 if (addr >= (unsigned long) KERNBASE &&
1608 addr < (unsigned long)&_end)
1609 return true;
1610
David S. Miller0dd5b7b2014-09-24 20:56:11 -07001611 pgd = pgd_offset_k(addr);
1612 if (pgd_none(*pgd))
1613 return 0;
1614
1615 pud = pud_offset(pgd, addr);
1616 if (pud_none(*pud))
1617 return 0;
1618
1619 if (pud_large(*pud))
1620 return pfn_valid(pud_pfn(*pud));
1621
1622 pmd = pmd_offset(pud, addr);
1623 if (pmd_none(*pmd))
1624 return 0;
1625
1626 if (pmd_large(*pmd))
1627 return pfn_valid(pmd_pfn(*pmd));
1628
1629 pte = pte_offset_kernel(pmd, addr);
1630 if (pte_none(*pte))
1631 return 0;
1632
1633 return pfn_valid(pte_pfn(*pte));
1634}
1635EXPORT_SYMBOL(kern_addr_valid);
1636
1637static unsigned long __ref kernel_map_hugepud(unsigned long vstart,
1638 unsigned long vend,
1639 pud_t *pud)
1640{
1641 const unsigned long mask16gb = (1UL << 34) - 1UL;
1642 u64 pte_val = vstart;
1643
1644 /* Each PUD is 8GB */
1645 if ((vstart & mask16gb) ||
1646 (vend - vstart <= mask16gb)) {
1647 pte_val ^= kern_linear_pte_xor[2];
1648 pud_val(*pud) = pte_val | _PAGE_PUD_HUGE;
1649
1650 return vstart + PUD_SIZE;
1651 }
1652
1653 pte_val ^= kern_linear_pte_xor[3];
1654 pte_val |= _PAGE_PUD_HUGE;
1655
1656 vend = vstart + mask16gb + 1UL;
1657 while (vstart < vend) {
1658 pud_val(*pud) = pte_val;
1659
1660 pte_val += PUD_SIZE;
1661 vstart += PUD_SIZE;
1662 pud++;
1663 }
1664 return vstart;
1665}
1666
1667static bool kernel_can_map_hugepud(unsigned long vstart, unsigned long vend,
1668 bool guard)
1669{
1670 if (guard && !(vstart & ~PUD_MASK) && (vend - vstart) >= PUD_SIZE)
1671 return true;
1672
1673 return false;
1674}
1675
1676static unsigned long __ref kernel_map_hugepmd(unsigned long vstart,
1677 unsigned long vend,
1678 pmd_t *pmd)
1679{
1680 const unsigned long mask256mb = (1UL << 28) - 1UL;
1681 const unsigned long mask2gb = (1UL << 31) - 1UL;
1682 u64 pte_val = vstart;
1683
1684 /* Each PMD is 8MB */
1685 if ((vstart & mask256mb) ||
1686 (vend - vstart <= mask256mb)) {
1687 pte_val ^= kern_linear_pte_xor[0];
1688 pmd_val(*pmd) = pte_val | _PAGE_PMD_HUGE;
1689
1690 return vstart + PMD_SIZE;
1691 }
1692
1693 if ((vstart & mask2gb) ||
1694 (vend - vstart <= mask2gb)) {
1695 pte_val ^= kern_linear_pte_xor[1];
1696 pte_val |= _PAGE_PMD_HUGE;
1697 vend = vstart + mask256mb + 1UL;
1698 } else {
1699 pte_val ^= kern_linear_pte_xor[2];
1700 pte_val |= _PAGE_PMD_HUGE;
1701 vend = vstart + mask2gb + 1UL;
1702 }
1703
1704 while (vstart < vend) {
1705 pmd_val(*pmd) = pte_val;
1706
1707 pte_val += PMD_SIZE;
1708 vstart += PMD_SIZE;
1709 pmd++;
1710 }
1711
1712 return vstart;
1713}
1714
1715static bool kernel_can_map_hugepmd(unsigned long vstart, unsigned long vend,
1716 bool guard)
1717{
1718 if (guard && !(vstart & ~PMD_MASK) && (vend - vstart) >= PMD_SIZE)
1719 return true;
1720
1721 return false;
1722}
1723
Sam Ravnborg896aef42008-02-24 19:49:52 -08001724static unsigned long __ref kernel_map_range(unsigned long pstart,
David S. Miller0dd5b7b2014-09-24 20:56:11 -07001725 unsigned long pend, pgprot_t prot,
1726 bool use_huge)
David S. Miller56425302005-09-25 16:46:57 -07001727{
1728 unsigned long vstart = PAGE_OFFSET + pstart;
1729 unsigned long vend = PAGE_OFFSET + pend;
1730 unsigned long alloc_bytes = 0UL;
1731
1732 if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) {
David S. Miller13edad72005-09-29 17:58:26 -07001733 prom_printf("kernel_map: Unaligned physmem[%lx:%lx]\n",
David S. Miller56425302005-09-25 16:46:57 -07001734 vstart, vend);
1735 prom_halt();
1736 }
1737
1738 while (vstart < vend) {
1739 unsigned long this_end, paddr = __pa(vstart);
1740 pgd_t *pgd = pgd_offset_k(vstart);
1741 pud_t *pud;
1742 pmd_t *pmd;
1743 pte_t *pte;
1744
David S. Millerac55c762014-09-26 21:19:46 -07001745 if (pgd_none(*pgd)) {
1746 pud_t *new;
1747
1748 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1749 alloc_bytes += PAGE_SIZE;
1750 pgd_populate(&init_mm, pgd, new);
1751 }
David S. Miller56425302005-09-25 16:46:57 -07001752 pud = pud_offset(pgd, vstart);
1753 if (pud_none(*pud)) {
1754 pmd_t *new;
1755
David S. Miller0dd5b7b2014-09-24 20:56:11 -07001756 if (kernel_can_map_hugepud(vstart, vend, use_huge)) {
1757 vstart = kernel_map_hugepud(vstart, vend, pud);
1758 continue;
1759 }
David S. Miller56425302005-09-25 16:46:57 -07001760 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1761 alloc_bytes += PAGE_SIZE;
1762 pud_populate(&init_mm, pud, new);
1763 }
1764
1765 pmd = pmd_offset(pud, vstart);
David S. Miller0dd5b7b2014-09-24 20:56:11 -07001766 if (pmd_none(*pmd)) {
David S. Miller56425302005-09-25 16:46:57 -07001767 pte_t *new;
1768
David S. Miller0dd5b7b2014-09-24 20:56:11 -07001769 if (kernel_can_map_hugepmd(vstart, vend, use_huge)) {
1770 vstart = kernel_map_hugepmd(vstart, vend, pmd);
1771 continue;
1772 }
David S. Miller56425302005-09-25 16:46:57 -07001773 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1774 alloc_bytes += PAGE_SIZE;
1775 pmd_populate_kernel(&init_mm, pmd, new);
1776 }
1777
1778 pte = pte_offset_kernel(pmd, vstart);
1779 this_end = (vstart + PMD_SIZE) & PMD_MASK;
1780 if (this_end > vend)
1781 this_end = vend;
1782
1783 while (vstart < this_end) {
1784 pte_val(*pte) = (paddr | pgprot_val(prot));
1785
1786 vstart += PAGE_SIZE;
1787 paddr += PAGE_SIZE;
1788 pte++;
1789 }
1790 }
1791
1792 return alloc_bytes;
1793}
1794
David S. Miller0dd5b7b2014-09-24 20:56:11 -07001795static void __init flush_all_kernel_tsbs(void)
1796{
1797 int i;
1798
1799 for (i = 0; i < KERNEL_TSB_NENTRIES; i++) {
1800 struct tsb *ent = &swapper_tsb[i];
1801
1802 ent->tag = (1UL << TSB_TAG_INVALID_BIT);
1803 }
1804#ifndef CONFIG_DEBUG_PAGEALLOC
1805 for (i = 0; i < KERNEL_TSB4M_NENTRIES; i++) {
1806 struct tsb *ent = &swapper_4m_tsb[i];
1807
1808 ent->tag = (1UL << TSB_TAG_INVALID_BIT);
1809 }
1810#endif
1811}
1812
David S. Miller56425302005-09-25 16:46:57 -07001813extern unsigned int kvmap_linear_patch[1];
David S. Miller9cc3a1a2006-02-21 20:51:13 -08001814
David S. Miller8f3614532007-12-13 06:13:38 -08001815static void __init kernel_physical_mapping_init(void)
1816{
David S. Miller8f3614532007-12-13 06:13:38 -08001817 unsigned long i, mem_alloced = 0UL;
David S. Miller0dd5b7b2014-09-24 20:56:11 -07001818 bool use_huge = true;
David S. Miller8f3614532007-12-13 06:13:38 -08001819
David S. Miller0dd5b7b2014-09-24 20:56:11 -07001820#ifdef CONFIG_DEBUG_PAGEALLOC
1821 use_huge = false;
1822#endif
David S. Miller8f3614532007-12-13 06:13:38 -08001823 for (i = 0; i < pall_ents; i++) {
1824 unsigned long phys_start, phys_end;
1825
1826 phys_start = pall[i].phys_addr;
1827 phys_end = phys_start + pall[i].reg_size;
1828
David S. Miller56425302005-09-25 16:46:57 -07001829 mem_alloced += kernel_map_range(phys_start, phys_end,
David S. Miller0dd5b7b2014-09-24 20:56:11 -07001830 PAGE_KERNEL, use_huge);
David S. Miller56425302005-09-25 16:46:57 -07001831 }
1832
1833 printk("Allocated %ld bytes for kernel page tables.\n",
1834 mem_alloced);
1835
1836 kvmap_linear_patch[0] = 0x01000000; /* nop */
1837 flushi(&kvmap_linear_patch[0]);
1838
David S. Miller0dd5b7b2014-09-24 20:56:11 -07001839 flush_all_kernel_tsbs();
1840
David S. Miller56425302005-09-25 16:46:57 -07001841 __flush_tlb_all();
1842}
1843
David S. Miller9cc3a1a2006-02-21 20:51:13 -08001844#ifdef CONFIG_DEBUG_PAGEALLOC
Joonsoo Kim031bc572014-12-12 16:55:52 -08001845void __kernel_map_pages(struct page *page, int numpages, int enable)
David S. Miller56425302005-09-25 16:46:57 -07001846{
1847 unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT;
1848 unsigned long phys_end = phys_start + (numpages * PAGE_SIZE);
1849
1850 kernel_map_range(phys_start, phys_end,
David S. Miller0dd5b7b2014-09-24 20:56:11 -07001851 (enable ? PAGE_KERNEL : __pgprot(0)), false);
David S. Miller56425302005-09-25 16:46:57 -07001852
David S. Miller74bf4312006-01-31 18:29:18 -08001853 flush_tsb_kernel_range(PAGE_OFFSET + phys_start,
1854 PAGE_OFFSET + phys_end);
1855
David S. Miller56425302005-09-25 16:46:57 -07001856 /* we should perform an IPI and flush all tlbs,
1857 * but that can deadlock->flush only current cpu.
1858 */
1859 __flush_tlb_kernel_range(PAGE_OFFSET + phys_start,
1860 PAGE_OFFSET + phys_end);
1861}
1862#endif
1863
David S. Miller10147572005-09-28 21:46:43 -07001864unsigned long __init find_ecache_flush_span(unsigned long size)
1865{
David S. Miller13edad72005-09-29 17:58:26 -07001866 int i;
David S. Miller10147572005-09-28 21:46:43 -07001867
David S. Miller13edad72005-09-29 17:58:26 -07001868 for (i = 0; i < pavail_ents; i++) {
1869 if (pavail[i].reg_size >= size)
1870 return pavail[i].phys_addr;
David S. Miller10147572005-09-28 21:46:43 -07001871 }
1872
1873 return ~0UL;
1874}
1875
David S. Millerb2d43832013-09-20 21:50:41 -07001876unsigned long PAGE_OFFSET;
1877EXPORT_SYMBOL(PAGE_OFFSET);
1878
David S. Millerbb4e6e82014-09-27 11:05:21 -07001879unsigned long VMALLOC_END = 0x0000010000000000UL;
1880EXPORT_SYMBOL(VMALLOC_END);
1881
David S. Miller4397bed2014-09-26 21:58:33 -07001882unsigned long sparc64_va_hole_top = 0xfffff80000000000UL;
1883unsigned long sparc64_va_hole_bottom = 0x0000080000000000UL;
1884
David S. Millerb2d43832013-09-20 21:50:41 -07001885static void __init setup_page_offset(void)
1886{
David S. Millerb2d43832013-09-20 21:50:41 -07001887 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
David S. Miller4397bed2014-09-26 21:58:33 -07001888 /* Cheetah/Panther support a full 64-bit virtual
1889 * address, so we can use all that our page tables
1890 * support.
1891 */
1892 sparc64_va_hole_top = 0xfff0000000000000UL;
1893 sparc64_va_hole_bottom = 0x0010000000000000UL;
1894
David S. Millerb2d43832013-09-20 21:50:41 -07001895 max_phys_bits = 42;
1896 } else if (tlb_type == hypervisor) {
1897 switch (sun4v_chip_type) {
1898 case SUN4V_CHIP_NIAGARA1:
1899 case SUN4V_CHIP_NIAGARA2:
David S. Miller4397bed2014-09-26 21:58:33 -07001900 /* T1 and T2 support 48-bit virtual addresses. */
1901 sparc64_va_hole_top = 0xffff800000000000UL;
1902 sparc64_va_hole_bottom = 0x0000800000000000UL;
1903
David S. Millerb2d43832013-09-20 21:50:41 -07001904 max_phys_bits = 39;
1905 break;
1906 case SUN4V_CHIP_NIAGARA3:
David S. Miller4397bed2014-09-26 21:58:33 -07001907 /* T3 supports 48-bit virtual addresses. */
1908 sparc64_va_hole_top = 0xffff800000000000UL;
1909 sparc64_va_hole_bottom = 0x0000800000000000UL;
1910
David S. Millerb2d43832013-09-20 21:50:41 -07001911 max_phys_bits = 43;
1912 break;
1913 case SUN4V_CHIP_NIAGARA4:
1914 case SUN4V_CHIP_NIAGARA5:
1915 case SUN4V_CHIP_SPARC64X:
David S. Miller7c0fa0f2014-09-24 21:49:29 -07001916 case SUN4V_CHIP_SPARC_M6:
David S. Miller4397bed2014-09-26 21:58:33 -07001917 /* T4 and later support 52-bit virtual addresses. */
1918 sparc64_va_hole_top = 0xfff8000000000000UL;
1919 sparc64_va_hole_bottom = 0x0008000000000000UL;
David S. Millerb2d43832013-09-20 21:50:41 -07001920 max_phys_bits = 47;
1921 break;
David S. Miller7c0fa0f2014-09-24 21:49:29 -07001922 case SUN4V_CHIP_SPARC_M7:
Khalid Azizc5b8b5b2016-04-19 11:12:54 -06001923 case SUN4V_CHIP_SPARC_SN:
David S. Miller7c0fa0f2014-09-24 21:49:29 -07001924 default:
1925 /* M7 and later support 52-bit virtual addresses. */
1926 sparc64_va_hole_top = 0xfff8000000000000UL;
1927 sparc64_va_hole_bottom = 0x0008000000000000UL;
1928 max_phys_bits = 49;
1929 break;
David S. Millerb2d43832013-09-20 21:50:41 -07001930 }
1931 }
1932
1933 if (max_phys_bits > MAX_PHYS_ADDRESS_BITS) {
1934 prom_printf("MAX_PHYS_ADDRESS_BITS is too small, need %lu\n",
1935 max_phys_bits);
1936 prom_halt();
1937 }
1938
David S. Millerbb4e6e82014-09-27 11:05:21 -07001939 PAGE_OFFSET = sparc64_va_hole_top;
1940 VMALLOC_END = ((sparc64_va_hole_bottom >> 1) +
1941 (sparc64_va_hole_bottom >> 2));
David S. Millerb2d43832013-09-20 21:50:41 -07001942
David S. Millerbb4e6e82014-09-27 11:05:21 -07001943 pr_info("MM: PAGE_OFFSET is 0x%016lx (max_phys_bits == %lu)\n",
David S. Millerb2d43832013-09-20 21:50:41 -07001944 PAGE_OFFSET, max_phys_bits);
David S. Millerbb4e6e82014-09-27 11:05:21 -07001945 pr_info("MM: VMALLOC [0x%016lx --> 0x%016lx]\n",
1946 VMALLOC_START, VMALLOC_END);
1947 pr_info("MM: VMEMMAP [0x%016lx --> 0x%016lx]\n",
1948 VMEMMAP_BASE, VMEMMAP_BASE << 1);
David S. Millerb2d43832013-09-20 21:50:41 -07001949}
1950
David S. Miller517af332006-02-01 15:55:21 -08001951static void __init tsb_phys_patch(void)
1952{
David S. Millerd257d5d2006-02-06 23:44:37 -08001953 struct tsb_ldquad_phys_patch_entry *pquad;
David S. Miller517af332006-02-01 15:55:21 -08001954 struct tsb_phys_patch_entry *p;
1955
David S. Millerd257d5d2006-02-06 23:44:37 -08001956 pquad = &__tsb_ldquad_phys_patch;
1957 while (pquad < &__tsb_ldquad_phys_patch_end) {
1958 unsigned long addr = pquad->addr;
1959
1960 if (tlb_type == hypervisor)
1961 *(unsigned int *) addr = pquad->sun4v_insn;
1962 else
1963 *(unsigned int *) addr = pquad->sun4u_insn;
1964 wmb();
1965 __asm__ __volatile__("flush %0"
1966 : /* no outputs */
1967 : "r" (addr));
1968
1969 pquad++;
1970 }
1971
David S. Miller517af332006-02-01 15:55:21 -08001972 p = &__tsb_phys_patch;
1973 while (p < &__tsb_phys_patch_end) {
1974 unsigned long addr = p->addr;
1975
1976 *(unsigned int *) addr = p->insn;
1977 wmb();
1978 __asm__ __volatile__("flush %0"
1979 : /* no outputs */
1980 : "r" (addr));
1981
1982 p++;
1983 }
1984}
1985
David S. Miller490384e2006-02-11 14:41:18 -08001986/* Don't mark as init, we give this to the Hypervisor. */
David S. Millerd1acb422007-03-16 17:20:28 -07001987#ifndef CONFIG_DEBUG_PAGEALLOC
1988#define NUM_KTSB_DESCR 2
1989#else
1990#define NUM_KTSB_DESCR 1
1991#endif
1992static struct hv_tsb_descr ktsb_descr[NUM_KTSB_DESCR];
David S. Miller490384e2006-02-11 14:41:18 -08001993
David S. Miller8c82dc02014-09-17 10:14:56 -07001994/* The swapper TSBs are loaded with a base sequence of:
1995 *
1996 * sethi %uhi(SYMBOL), REG1
1997 * sethi %hi(SYMBOL), REG2
1998 * or REG1, %ulo(SYMBOL), REG1
1999 * or REG2, %lo(SYMBOL), REG2
2000 * sllx REG1, 32, REG1
2001 * or REG1, REG2, REG1
2002 *
2003 * When we use physical addressing for the TSB accesses, we patch the
2004 * first four instructions in the above sequence.
2005 */
2006
David S. Miller9076d0e2011-08-05 00:53:57 -07002007static void patch_one_ktsb_phys(unsigned int *start, unsigned int *end, unsigned long pa)
2008{
David S. Miller8c82dc02014-09-17 10:14:56 -07002009 unsigned long high_bits, low_bits;
2010
2011 high_bits = (pa >> 32) & 0xffffffff;
2012 low_bits = (pa >> 0) & 0xffffffff;
David S. Miller9076d0e2011-08-05 00:53:57 -07002013
2014 while (start < end) {
2015 unsigned int *ia = (unsigned int *)(unsigned long)*start;
2016
David S. Miller8c82dc02014-09-17 10:14:56 -07002017 ia[0] = (ia[0] & ~0x3fffff) | (high_bits >> 10);
David S. Miller9076d0e2011-08-05 00:53:57 -07002018 __asm__ __volatile__("flush %0" : : "r" (ia));
2019
David S. Miller8c82dc02014-09-17 10:14:56 -07002020 ia[1] = (ia[1] & ~0x3fffff) | (low_bits >> 10);
David S. Miller9076d0e2011-08-05 00:53:57 -07002021 __asm__ __volatile__("flush %0" : : "r" (ia + 1));
2022
David S. Miller8c82dc02014-09-17 10:14:56 -07002023 ia[2] = (ia[2] & ~0x1fff) | (high_bits & 0x3ff);
2024 __asm__ __volatile__("flush %0" : : "r" (ia + 2));
2025
2026 ia[3] = (ia[3] & ~0x1fff) | (low_bits & 0x3ff);
2027 __asm__ __volatile__("flush %0" : : "r" (ia + 3));
2028
David S. Miller9076d0e2011-08-05 00:53:57 -07002029 start++;
2030 }
2031}
2032
2033static void ktsb_phys_patch(void)
2034{
2035 extern unsigned int __swapper_tsb_phys_patch;
2036 extern unsigned int __swapper_tsb_phys_patch_end;
David S. Miller9076d0e2011-08-05 00:53:57 -07002037 unsigned long ktsb_pa;
2038
2039 ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE);
2040 patch_one_ktsb_phys(&__swapper_tsb_phys_patch,
2041 &__swapper_tsb_phys_patch_end, ktsb_pa);
2042#ifndef CONFIG_DEBUG_PAGEALLOC
David S. Miller0785a8e2011-08-06 05:26:35 -07002043 {
2044 extern unsigned int __swapper_4m_tsb_phys_patch;
2045 extern unsigned int __swapper_4m_tsb_phys_patch_end;
David S. Miller9076d0e2011-08-05 00:53:57 -07002046 ktsb_pa = (kern_base +
2047 ((unsigned long)&swapper_4m_tsb[0] - KERNBASE));
2048 patch_one_ktsb_phys(&__swapper_4m_tsb_phys_patch,
2049 &__swapper_4m_tsb_phys_patch_end, ktsb_pa);
David S. Miller0785a8e2011-08-06 05:26:35 -07002050 }
David S. Miller9076d0e2011-08-05 00:53:57 -07002051#endif
2052}
2053
David S. Miller490384e2006-02-11 14:41:18 -08002054static void __init sun4v_ktsb_init(void)
2055{
2056 unsigned long ktsb_pa;
2057
David S. Millerd7744a02006-02-21 22:31:11 -08002058 /* First KTSB for PAGE_SIZE mappings. */
David S. Miller490384e2006-02-11 14:41:18 -08002059 ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE);
2060
2061 switch (PAGE_SIZE) {
2062 case 8 * 1024:
2063 default:
2064 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_8K;
2065 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_8K;
2066 break;
2067
2068 case 64 * 1024:
2069 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_64K;
2070 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_64K;
2071 break;
2072
2073 case 512 * 1024:
2074 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_512K;
2075 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_512K;
2076 break;
2077
2078 case 4 * 1024 * 1024:
2079 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_4MB;
2080 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_4MB;
2081 break;
Joe Perches6cb79b32011-06-03 14:45:23 +00002082 }
David S. Miller490384e2006-02-11 14:41:18 -08002083
David S. Miller3f19a842006-02-17 12:03:20 -08002084 ktsb_descr[0].assoc = 1;
David S. Miller490384e2006-02-11 14:41:18 -08002085 ktsb_descr[0].num_ttes = KERNEL_TSB_NENTRIES;
2086 ktsb_descr[0].ctx_idx = 0;
2087 ktsb_descr[0].tsb_base = ktsb_pa;
2088 ktsb_descr[0].resv = 0;
2089
David S. Millerd1acb422007-03-16 17:20:28 -07002090#ifndef CONFIG_DEBUG_PAGEALLOC
David S. Miller4f93d212012-09-06 18:13:58 -07002091 /* Second KTSB for 4MB/256MB/2GB/16GB mappings. */
David S. Millerd7744a02006-02-21 22:31:11 -08002092 ktsb_pa = (kern_base +
2093 ((unsigned long)&swapper_4m_tsb[0] - KERNBASE));
2094
2095 ktsb_descr[1].pgsz_idx = HV_PGSZ_IDX_4MB;
David S. Millerc69ad0a2012-09-06 20:35:36 -07002096 ktsb_descr[1].pgsz_mask = ((HV_PGSZ_MASK_4MB |
2097 HV_PGSZ_MASK_256MB |
2098 HV_PGSZ_MASK_2GB |
2099 HV_PGSZ_MASK_16GB) &
2100 cpu_pgsz_mask);
David S. Millerd7744a02006-02-21 22:31:11 -08002101 ktsb_descr[1].assoc = 1;
2102 ktsb_descr[1].num_ttes = KERNEL_TSB4M_NENTRIES;
2103 ktsb_descr[1].ctx_idx = 0;
2104 ktsb_descr[1].tsb_base = ktsb_pa;
2105 ktsb_descr[1].resv = 0;
David S. Millerd1acb422007-03-16 17:20:28 -07002106#endif
David S. Miller490384e2006-02-11 14:41:18 -08002107}
2108
Paul Gortmaker2066aad2013-06-17 15:43:14 -04002109void sun4v_ktsb_register(void)
David S. Miller490384e2006-02-11 14:41:18 -08002110{
David S. Miller7db35f32007-05-29 02:22:14 -07002111 unsigned long pa, ret;
David S. Miller490384e2006-02-11 14:41:18 -08002112
2113 pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE);
2114
David S. Miller7db35f32007-05-29 02:22:14 -07002115 ret = sun4v_mmu_tsb_ctx0(NUM_KTSB_DESCR, pa);
2116 if (ret != 0) {
2117 prom_printf("hypervisor_mmu_tsb_ctx0[%lx]: "
2118 "errors with %lx\n", pa, ret);
2119 prom_halt();
2120 }
David S. Miller490384e2006-02-11 14:41:18 -08002121}
2122
David S. Millerc69ad0a2012-09-06 20:35:36 -07002123static void __init sun4u_linear_pte_xor_finalize(void)
2124{
2125#ifndef CONFIG_DEBUG_PAGEALLOC
2126 /* This is where we would add Panther support for
2127 * 32MB and 256MB pages.
2128 */
2129#endif
2130}
2131
2132static void __init sun4v_linear_pte_xor_finalize(void)
2133{
Khalid Aziz494e5b62015-05-27 10:00:46 -06002134 unsigned long pagecv_flag;
2135
2136 /* Bit 9 of TTE is no longer CV bit on M7 processor and it instead
2137 * enables MCD error. Do not set bit 9 on M7 processor.
2138 */
2139 switch (sun4v_chip_type) {
2140 case SUN4V_CHIP_SPARC_M7:
Khalid Azizc5b8b5b2016-04-19 11:12:54 -06002141 case SUN4V_CHIP_SPARC_SN:
Khalid Aziz494e5b62015-05-27 10:00:46 -06002142 pagecv_flag = 0x00;
2143 break;
2144 default:
2145 pagecv_flag = _PAGE_CV_4V;
2146 break;
2147 }
David S. Millerc69ad0a2012-09-06 20:35:36 -07002148#ifndef CONFIG_DEBUG_PAGEALLOC
2149 if (cpu_pgsz_mask & HV_PGSZ_MASK_256MB) {
2150 kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^
David S. Miller922631b2013-09-18 12:00:00 -07002151 PAGE_OFFSET;
Khalid Aziz494e5b62015-05-27 10:00:46 -06002152 kern_linear_pte_xor[1] |= (_PAGE_CP_4V | pagecv_flag |
David S. Millerc69ad0a2012-09-06 20:35:36 -07002153 _PAGE_P_4V | _PAGE_W_4V);
2154 } else {
2155 kern_linear_pte_xor[1] = kern_linear_pte_xor[0];
2156 }
2157
2158 if (cpu_pgsz_mask & HV_PGSZ_MASK_2GB) {
2159 kern_linear_pte_xor[2] = (_PAGE_VALID | _PAGE_SZ2GB_4V) ^
David S. Miller922631b2013-09-18 12:00:00 -07002160 PAGE_OFFSET;
Khalid Aziz494e5b62015-05-27 10:00:46 -06002161 kern_linear_pte_xor[2] |= (_PAGE_CP_4V | pagecv_flag |
David S. Millerc69ad0a2012-09-06 20:35:36 -07002162 _PAGE_P_4V | _PAGE_W_4V);
2163 } else {
2164 kern_linear_pte_xor[2] = kern_linear_pte_xor[1];
2165 }
2166
2167 if (cpu_pgsz_mask & HV_PGSZ_MASK_16GB) {
2168 kern_linear_pte_xor[3] = (_PAGE_VALID | _PAGE_SZ16GB_4V) ^
David S. Miller922631b2013-09-18 12:00:00 -07002169 PAGE_OFFSET;
Khalid Aziz494e5b62015-05-27 10:00:46 -06002170 kern_linear_pte_xor[3] |= (_PAGE_CP_4V | pagecv_flag |
David S. Millerc69ad0a2012-09-06 20:35:36 -07002171 _PAGE_P_4V | _PAGE_W_4V);
2172 } else {
2173 kern_linear_pte_xor[3] = kern_linear_pte_xor[2];
2174 }
2175#endif
2176}
2177
Linus Torvalds1da177e2005-04-16 15:20:36 -07002178/* paging_init() sets up the page tables */
2179
Linus Torvalds1da177e2005-04-16 15:20:36 -07002180static unsigned long last_valid_pfn;
David S. Millerac55c762014-09-26 21:19:46 -07002181
David S. Millerc4bce902006-02-11 21:57:54 -08002182static void sun4u_pgprot_init(void);
2183static void sun4v_pgprot_init(void);
2184
bob picco7c21d532014-09-16 09:29:54 -04002185static phys_addr_t __init available_memory(void)
2186{
2187 phys_addr_t available = 0ULL;
2188 phys_addr_t pa_start, pa_end;
2189 u64 i;
2190
Tony Luckfc6daaf2015-06-24 16:58:09 -07002191 for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &pa_start,
2192 &pa_end, NULL)
bob picco7c21d532014-09-16 09:29:54 -04002193 available = available + (pa_end - pa_start);
2194
2195 return available;
2196}
2197
Khalid Aziz494e5b62015-05-27 10:00:46 -06002198#define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U)
2199#define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V)
2200#define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U)
2201#define __DIRTY_BITS_4V (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V)
2202#define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R)
2203#define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R)
2204
bob picco7c21d532014-09-16 09:29:54 -04002205/* We need to exclude reserved regions. This exclusion will include
2206 * vmlinux and initrd. To be more precise the initrd size could be used to
2207 * compute a new lower limit because it is freed later during initialization.
2208 */
2209static void __init reduce_memory(phys_addr_t limit_ram)
2210{
2211 phys_addr_t avail_ram = available_memory();
2212 phys_addr_t pa_start, pa_end;
2213 u64 i;
2214
2215 if (limit_ram >= avail_ram)
2216 return;
2217
Tony Luckfc6daaf2015-06-24 16:58:09 -07002218 for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &pa_start,
2219 &pa_end, NULL) {
bob picco7c21d532014-09-16 09:29:54 -04002220 phys_addr_t region_size = pa_end - pa_start;
2221 phys_addr_t clip_start = pa_start;
2222
2223 avail_ram = avail_ram - region_size;
2224 /* Are we consuming too much? */
2225 if (avail_ram < limit_ram) {
2226 phys_addr_t give_back = limit_ram - avail_ram;
2227
2228 region_size = region_size - give_back;
2229 clip_start = clip_start + give_back;
2230 }
2231
2232 memblock_remove(clip_start, region_size);
2233
2234 if (avail_ram <= limit_ram)
2235 break;
2236 i = 0UL;
2237 }
2238}
2239
Linus Torvalds1da177e2005-04-16 15:20:36 -07002240void __init paging_init(void)
2241{
David S. Miller919ee672008-04-23 05:40:25 -07002242 unsigned long end_pfn, shift, phys_base;
David S. Miller0836a0e2005-09-28 21:38:08 -07002243 unsigned long real_end, i;
2244
David S. Millerb2d43832013-09-20 21:50:41 -07002245 setup_page_offset();
2246
David S. Miller22adb352007-05-26 01:14:43 -07002247 /* These build time checkes make sure that the dcache_dirty_cpu()
2248 * page->flags usage will work.
2249 *
2250 * When a page gets marked as dcache-dirty, we store the
2251 * cpu number starting at bit 32 in the page->flags. Also,
2252 * functions like clear_dcache_dirty_cpu use the cpu mask
2253 * in 13-bit signed-immediate instruction fields.
2254 */
Christoph Lameter9223b4192008-04-28 02:12:48 -07002255
2256 /*
2257 * Page flags must not reach into upper 32 bits that are used
2258 * for the cpu number
2259 */
2260 BUILD_BUG_ON(NR_PAGEFLAGS > 32);
2261
2262 /*
2263 * The bit fields placed in the high range must not reach below
2264 * the 32 bit boundary. Otherwise we cannot place the cpu field
2265 * at the 32 bit boundary.
2266 */
David S. Miller22adb352007-05-26 01:14:43 -07002267 BUILD_BUG_ON(SECTIONS_WIDTH + NODES_WIDTH + ZONES_WIDTH +
Christoph Lameter9223b4192008-04-28 02:12:48 -07002268 ilog2(roundup_pow_of_two(NR_CPUS)) > 32);
2269
David S. Miller22adb352007-05-26 01:14:43 -07002270 BUILD_BUG_ON(NR_CPUS > 4096);
2271
David S. Miller0eef3312014-05-03 22:52:50 -07002272 kern_base = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB;
David S. Miller481295f2006-02-07 21:51:08 -08002273 kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
2274
David S. Millerd7744a02006-02-21 22:31:11 -08002275 /* Invalidate both kernel TSBs. */
David S. Miller8b234272006-02-17 18:01:02 -08002276 memset(swapper_tsb, 0x40, sizeof(swapper_tsb));
David S. Millerd1acb422007-03-16 17:20:28 -07002277#ifndef CONFIG_DEBUG_PAGEALLOC
David S. Millerd7744a02006-02-21 22:31:11 -08002278 memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb));
David S. Millerd1acb422007-03-16 17:20:28 -07002279#endif
David S. Miller8b234272006-02-17 18:01:02 -08002280
Khalid Aziz494e5b62015-05-27 10:00:46 -06002281 /* TTE.cv bit on sparc v9 occupies the same position as TTE.mcde
2282 * bit on M7 processor. This is a conflicting usage of the same
2283 * bit. Enabling TTE.cv on M7 would turn on Memory Corruption
2284 * Detection error on all pages and this will lead to problems
2285 * later. Kernel does not run with MCD enabled and hence rest
2286 * of the required steps to fully configure memory corruption
2287 * detection are not taken. We need to ensure TTE.mcde is not
2288 * set on M7 processor. Compute the value of cacheability
2289 * flag for use later taking this into consideration.
2290 */
2291 switch (sun4v_chip_type) {
2292 case SUN4V_CHIP_SPARC_M7:
Khalid Azizc5b8b5b2016-04-19 11:12:54 -06002293 case SUN4V_CHIP_SPARC_SN:
Khalid Aziz494e5b62015-05-27 10:00:46 -06002294 page_cache4v_flag = _PAGE_CP_4V;
2295 break;
2296 default:
2297 page_cache4v_flag = _PAGE_CACHE_4V;
2298 break;
2299 }
2300
David S. Millerc4bce902006-02-11 21:57:54 -08002301 if (tlb_type == hypervisor)
2302 sun4v_pgprot_init();
2303 else
2304 sun4u_pgprot_init();
2305
David S. Millerd257d5d2006-02-06 23:44:37 -08002306 if (tlb_type == cheetah_plus ||
David S. Miller9076d0e2011-08-05 00:53:57 -07002307 tlb_type == hypervisor) {
David S. Miller517af332006-02-01 15:55:21 -08002308 tsb_phys_patch();
David S. Miller9076d0e2011-08-05 00:53:57 -07002309 ktsb_phys_patch();
2310 }
David S. Miller517af332006-02-01 15:55:21 -08002311
David S. Millerc69ad0a2012-09-06 20:35:36 -07002312 if (tlb_type == hypervisor)
David S. Millerd257d5d2006-02-06 23:44:37 -08002313 sun4v_patch_tlb_handlers();
2314
David S. Millera94a1722008-05-11 21:04:48 -07002315 /* Find available physical memory...
2316 *
2317 * Read it twice in order to work around a bug in openfirmware.
2318 * The call to grab this table itself can cause openfirmware to
2319 * allocate memory, which in turn can take away some space from
2320 * the list of available memory. Reading it twice makes sure
2321 * we really do get the final value.
2322 */
2323 read_obp_translations();
2324 read_obp_memory("reg", &pall[0], &pall_ents);
2325 read_obp_memory("available", &pavail[0], &pavail_ents);
David S. Miller13edad72005-09-29 17:58:26 -07002326 read_obp_memory("available", &pavail[0], &pavail_ents);
David S. Miller0836a0e2005-09-28 21:38:08 -07002327
2328 phys_base = 0xffffffffffffffffUL;
David S. Miller3b2a7e22008-02-13 18:13:20 -08002329 for (i = 0; i < pavail_ents; i++) {
David S. Miller13edad72005-09-29 17:58:26 -07002330 phys_base = min(phys_base, pavail[i].phys_addr);
Yinghai Lu95f72d12010-07-12 14:36:09 +10002331 memblock_add(pavail[i].phys_addr, pavail[i].reg_size);
David S. Miller3b2a7e22008-02-13 18:13:20 -08002332 }
2333
Yinghai Lu95f72d12010-07-12 14:36:09 +10002334 memblock_reserve(kern_base, kern_size);
David S. Miller0836a0e2005-09-28 21:38:08 -07002335
David S. Miller4e82c9a2008-02-13 18:00:03 -08002336 find_ramdisk(phys_base);
2337
bob picco7c21d532014-09-16 09:29:54 -04002338 if (cmdline_memory_size)
2339 reduce_memory(cmdline_memory_size);
David S. Miller25b0c652008-02-13 18:20:14 -08002340
Tejun Heo1aadc052011-12-08 10:22:08 -08002341 memblock_allow_resize();
Yinghai Lu95f72d12010-07-12 14:36:09 +10002342 memblock_dump_all();
David S. Miller3b2a7e22008-02-13 18:13:20 -08002343
Linus Torvalds1da177e2005-04-16 15:20:36 -07002344 set_bit(0, mmu_context_bmap);
2345
David S. Miller2bdb3cb2005-09-22 01:08:57 -07002346 shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE);
2347
Linus Torvalds1da177e2005-04-16 15:20:36 -07002348 real_end = (unsigned long)_end;
David S. Miller0eef3312014-05-03 22:52:50 -07002349 num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << ILOG2_4MB);
David S. Miller64658742008-03-21 17:01:38 -07002350 printk("Kernel: Using %d locked TLB entries for main kernel image.\n",
2351 num_kernel_image_mappings);
David S. Miller2bdb3cb2005-09-22 01:08:57 -07002352
2353 /* Set kernel pgd to upper alias so physical page computations
Linus Torvalds1da177e2005-04-16 15:20:36 -07002354 * work.
2355 */
2356 init_mm.pgd += ((shift) / (sizeof(pgd_t)));
2357
David S. Millerd195b712014-09-27 21:30:57 -07002358 memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
David S. Miller0dd5b7b2014-09-24 20:56:11 -07002359
David S. Millerc9c10832005-10-12 12:22:46 -07002360 inherit_prom_mappings();
David S. Miller5085b4a2005-09-22 00:45:41 -07002361
David S. Millera8b900d2006-01-31 18:33:37 -08002362 /* Ok, we can use our TLB miss and window trap handlers safely. */
2363 setup_tba();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002364
David S. Millerc9c10832005-10-12 12:22:46 -07002365 __flush_tlb_all();
David S. Miller9ad98c52005-10-05 15:12:00 -07002366
David S. Millerad072002008-02-13 19:21:51 -08002367 prom_build_devicetree();
David S. Millerb696fdc2009-05-26 22:37:25 -07002368 of_populate_present_mask();
David S. Millerb99c6eb2009-06-18 01:44:19 -07002369#ifndef CONFIG_SMP
2370 of_fill_in_cpu_data();
2371#endif
David S. Millerad072002008-02-13 19:21:51 -08002372
David S. Miller890db402009-04-01 03:13:15 -07002373 if (tlb_type == hypervisor) {
David S. Miller4a283332008-02-13 19:22:23 -08002374 sun4v_mdesc_init();
Stephen Rothwell6ac5c612009-06-15 03:06:18 -07002375 mdesc_populate_present_mask(cpu_all_mask);
David S. Millerb99c6eb2009-06-18 01:44:19 -07002376#ifndef CONFIG_SMP
2377 mdesc_fill_in_cpu_data(cpu_all_mask);
2378#endif
David S. Millerce33fdc2012-09-06 19:01:25 -07002379 mdesc_get_page_sizes(cpu_all_mask, &cpu_pgsz_mask);
David S. Millerc69ad0a2012-09-06 20:35:36 -07002380
2381 sun4v_linear_pte_xor_finalize();
2382
2383 sun4v_ktsb_init();
2384 sun4v_ktsb_register();
David S. Millerce33fdc2012-09-06 19:01:25 -07002385 } else {
2386 unsigned long impl, ver;
2387
2388 cpu_pgsz_mask = (HV_PGSZ_MASK_8K | HV_PGSZ_MASK_64K |
2389 HV_PGSZ_MASK_512K | HV_PGSZ_MASK_4MB);
2390
2391 __asm__ __volatile__("rdpr %%ver, %0" : "=r" (ver));
2392 impl = ((ver >> 32) & 0xffff);
2393 if (impl == PANTHER_IMPL)
2394 cpu_pgsz_mask |= (HV_PGSZ_MASK_32MB |
2395 HV_PGSZ_MASK_256MB);
David S. Millerc69ad0a2012-09-06 20:35:36 -07002396
2397 sun4u_linear_pte_xor_finalize();
David S. Miller890db402009-04-01 03:13:15 -07002398 }
David S. Miller4a283332008-02-13 19:22:23 -08002399
David S. Millerc69ad0a2012-09-06 20:35:36 -07002400 /* Flush the TLBs and the 4M TSB so that the updated linear
2401 * pte XOR settings are realized for all mappings.
2402 */
2403 __flush_tlb_all();
2404#ifndef CONFIG_DEBUG_PAGEALLOC
2405 memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb));
2406#endif
2407 __flush_tlb_all();
2408
David S. Miller2bdb3cb2005-09-22 01:08:57 -07002409 /* Setup bootmem... */
David S. Miller919ee672008-04-23 05:40:25 -07002410 last_valid_pfn = end_pfn = bootmem_init(phys_base);
David S. Millerd1112012006-03-08 02:16:07 -08002411
David S. Miller56425302005-09-25 16:46:57 -07002412 kernel_physical_mapping_init();
David S. Miller56425302005-09-25 16:46:57 -07002413
Linus Torvalds1da177e2005-04-16 15:20:36 -07002414 {
David S. Miller919ee672008-04-23 05:40:25 -07002415 unsigned long max_zone_pfns[MAX_NR_ZONES];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002416
David S. Miller919ee672008-04-23 05:40:25 -07002417 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002418
David S. Miller919ee672008-04-23 05:40:25 -07002419 max_zone_pfns[ZONE_NORMAL] = end_pfn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002420
David S. Miller919ee672008-04-23 05:40:25 -07002421 free_area_init_nodes(max_zone_pfns);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002422 }
2423
David S. Miller3c62a2d2008-02-17 23:22:50 -08002424 printk("Booting Linux...\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002425}
2426
Greg Kroah-Hartman7c9503b2012-12-21 14:03:26 -08002427int page_in_phys_avail(unsigned long paddr)
David S. Miller919ee672008-04-23 05:40:25 -07002428{
2429 int i;
2430
2431 paddr &= PAGE_MASK;
2432
2433 for (i = 0; i < pavail_ents; i++) {
2434 unsigned long start, end;
2435
2436 start = pavail[i].phys_addr;
2437 end = start + pavail[i].reg_size;
2438
2439 if (paddr >= start && paddr < end)
2440 return 1;
2441 }
2442 if (paddr >= kern_base && paddr < (kern_base + kern_size))
2443 return 1;
2444#ifdef CONFIG_BLK_DEV_INITRD
2445 if (paddr >= __pa(initrd_start) &&
2446 paddr < __pa(PAGE_ALIGN(initrd_end)))
2447 return 1;
2448#endif
2449
2450 return 0;
2451}
2452
Yinghai Lu961f8fa2012-11-16 19:39:21 -08002453static void __init register_page_bootmem_info(void)
2454{
2455#ifdef CONFIG_NEED_MULTIPLE_NODES
2456 int i;
2457
2458 for_each_online_node(i)
2459 if (NODE_DATA(i)->node_spanned_pages)
2460 register_page_bootmem_info_node(NODE_DATA(i));
2461#endif
2462}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002463void __init mem_init(void)
2464{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002465 high_memory = __va(last_valid_pfn << PAGE_SHIFT);
2466
Yinghai Lu961f8fa2012-11-16 19:39:21 -08002467 register_page_bootmem_info();
Jiang Liu0c988532013-07-03 15:03:24 -07002468 free_all_bootmem();
David S. Miller919ee672008-04-23 05:40:25 -07002469
Linus Torvalds1da177e2005-04-16 15:20:36 -07002470 /*
2471 * Set up the zero page, mark it reserved, so that page count
2472 * is not manipulated when freeing the page from user ptes.
2473 */
2474 mem_map_zero = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
2475 if (mem_map_zero == NULL) {
2476 prom_printf("paging_init: Cannot alloc zero page.\n");
2477 prom_halt();
2478 }
Jiang Liu70affe42013-05-07 16:18:08 -07002479 mark_page_reserved(mem_map_zero);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002480
Jiang Liudceccbe2013-07-03 15:04:14 -07002481 mem_init_print_info(NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002482
2483 if (tlb_type == cheetah || tlb_type == cheetah_plus)
2484 cheetah_ecache_flush_init();
2485}
2486
David S. Miller898cf0e2005-09-23 11:59:44 -07002487void free_initmem(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002488{
2489 unsigned long addr, initend;
David S. Millerf2b60792008-08-14 01:45:41 -07002490 int do_free = 1;
2491
2492 /* If the physical memory maps were trimmed by kernel command
2493 * line options, don't even try freeing this initmem stuff up.
2494 * The kernel image could have been in the trimmed out region
2495 * and if so the freeing below will free invalid page structs.
2496 */
2497 if (cmdline_memory_size)
2498 do_free = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002499
2500 /*
2501 * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes.
2502 */
2503 addr = PAGE_ALIGN((unsigned long)(__init_begin));
2504 initend = (unsigned long)(__init_end) & PAGE_MASK;
2505 for (; addr < initend; addr += PAGE_SIZE) {
2506 unsigned long page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002507
2508 page = (addr +
2509 ((unsigned long) __va(kern_base)) -
2510 ((unsigned long) KERNBASE));
Randy Dunlapc9cf5522006-06-27 02:53:52 -07002511 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002512
Jiang Liu70affe42013-05-07 16:18:08 -07002513 if (do_free)
2514 free_reserved_page(virt_to_page(page));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002515 }
2516}
2517
2518#ifdef CONFIG_BLK_DEV_INITRD
2519void free_initrd_mem(unsigned long start, unsigned long end)
2520{
Jiang Liudceccbe2013-07-03 15:04:14 -07002521 free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
2522 "initrd");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002523}
2524#endif
David S. Millerc4bce902006-02-11 21:57:54 -08002525
David S. Millerc4bce902006-02-11 21:57:54 -08002526pgprot_t PAGE_KERNEL __read_mostly;
2527EXPORT_SYMBOL(PAGE_KERNEL);
2528
2529pgprot_t PAGE_KERNEL_LOCKED __read_mostly;
2530pgprot_t PAGE_COPY __read_mostly;
David S. Miller0f159522006-02-18 12:43:16 -08002531
2532pgprot_t PAGE_SHARED __read_mostly;
2533EXPORT_SYMBOL(PAGE_SHARED);
2534
David S. Millerc4bce902006-02-11 21:57:54 -08002535unsigned long pg_iobits __read_mostly;
2536
2537unsigned long _PAGE_IE __read_mostly;
David S. Miller987c74f2006-06-25 01:34:43 -07002538EXPORT_SYMBOL(_PAGE_IE);
David S. Millerb2bef442006-02-23 01:55:55 -08002539
David S. Millerc4bce902006-02-11 21:57:54 -08002540unsigned long _PAGE_E __read_mostly;
David S. Millerb2bef442006-02-23 01:55:55 -08002541EXPORT_SYMBOL(_PAGE_E);
2542
David S. Millerc4bce902006-02-11 21:57:54 -08002543unsigned long _PAGE_CACHE __read_mostly;
David S. Millerb2bef442006-02-23 01:55:55 -08002544EXPORT_SYMBOL(_PAGE_CACHE);
David S. Millerc4bce902006-02-11 21:57:54 -08002545
David Miller46644c22007-10-16 01:24:16 -07002546#ifdef CONFIG_SPARSEMEM_VMEMMAP
Johannes Weiner0aad8182013-04-29 15:07:50 -07002547int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend,
2548 int node)
David Miller46644c22007-10-16 01:24:16 -07002549{
David Miller46644c22007-10-16 01:24:16 -07002550 unsigned long pte_base;
2551
2552 pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4U |
2553 _PAGE_CP_4U | _PAGE_CV_4U |
2554 _PAGE_P_4U | _PAGE_W_4U);
2555 if (tlb_type == hypervisor)
2556 pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4V |
Khalid Aziz494e5b62015-05-27 10:00:46 -06002557 page_cache4v_flag | _PAGE_P_4V | _PAGE_W_4V);
David Miller46644c22007-10-16 01:24:16 -07002558
David S. Millerc06240c2014-09-24 21:20:14 -07002559 pte_base |= _PAGE_PMD_HUGE;
David Miller46644c22007-10-16 01:24:16 -07002560
David S. Millerc06240c2014-09-24 21:20:14 -07002561 vstart = vstart & PMD_MASK;
2562 vend = ALIGN(vend, PMD_SIZE);
2563 for (; vstart < vend; vstart += PMD_SIZE) {
2564 pgd_t *pgd = pgd_offset_k(vstart);
2565 unsigned long pte;
2566 pud_t *pud;
2567 pmd_t *pmd;
2568
2569 if (pgd_none(*pgd)) {
2570 pud_t *new = vmemmap_alloc_block(PAGE_SIZE, node);
2571
2572 if (!new)
2573 return -ENOMEM;
2574 pgd_populate(&init_mm, pgd, new);
2575 }
2576
2577 pud = pud_offset(pgd, vstart);
2578 if (pud_none(*pud)) {
2579 pmd_t *new = vmemmap_alloc_block(PAGE_SIZE, node);
2580
2581 if (!new)
2582 return -ENOMEM;
2583 pud_populate(&init_mm, pud, new);
2584 }
2585
2586 pmd = pmd_offset(pud, vstart);
2587
2588 pte = pmd_val(*pmd);
2589 if (!(pte & _PAGE_VALID)) {
2590 void *block = vmemmap_alloc_block(PMD_SIZE, node);
2591
David Miller46644c22007-10-16 01:24:16 -07002592 if (!block)
2593 return -ENOMEM;
2594
David S. Millerc06240c2014-09-24 21:20:14 -07002595 pmd_val(*pmd) = pte_base | __pa(block);
David Miller46644c22007-10-16 01:24:16 -07002596 }
2597 }
David S. Miller2856cc22012-08-15 00:37:29 -07002598
David S. Millerc06240c2014-09-24 21:20:14 -07002599 return 0;
David S. Miller2856cc22012-08-15 00:37:29 -07002600}
Yasuaki Ishimatsu46723bf2013-02-22 16:33:00 -08002601
Johannes Weiner0aad8182013-04-29 15:07:50 -07002602void vmemmap_free(unsigned long start, unsigned long end)
Tang Chen01975182013-02-22 16:33:08 -08002603{
2604}
David Miller46644c22007-10-16 01:24:16 -07002605#endif /* CONFIG_SPARSEMEM_VMEMMAP */
2606
David S. Millerc4bce902006-02-11 21:57:54 -08002607static void prot_init_common(unsigned long page_none,
2608 unsigned long page_shared,
2609 unsigned long page_copy,
2610 unsigned long page_readonly,
2611 unsigned long page_exec_bit)
2612{
2613 PAGE_COPY = __pgprot(page_copy);
David S. Miller0f159522006-02-18 12:43:16 -08002614 PAGE_SHARED = __pgprot(page_shared);
David S. Millerc4bce902006-02-11 21:57:54 -08002615
2616 protection_map[0x0] = __pgprot(page_none);
2617 protection_map[0x1] = __pgprot(page_readonly & ~page_exec_bit);
2618 protection_map[0x2] = __pgprot(page_copy & ~page_exec_bit);
2619 protection_map[0x3] = __pgprot(page_copy & ~page_exec_bit);
2620 protection_map[0x4] = __pgprot(page_readonly);
2621 protection_map[0x5] = __pgprot(page_readonly);
2622 protection_map[0x6] = __pgprot(page_copy);
2623 protection_map[0x7] = __pgprot(page_copy);
2624 protection_map[0x8] = __pgprot(page_none);
2625 protection_map[0x9] = __pgprot(page_readonly & ~page_exec_bit);
2626 protection_map[0xa] = __pgprot(page_shared & ~page_exec_bit);
2627 protection_map[0xb] = __pgprot(page_shared & ~page_exec_bit);
2628 protection_map[0xc] = __pgprot(page_readonly);
2629 protection_map[0xd] = __pgprot(page_readonly);
2630 protection_map[0xe] = __pgprot(page_shared);
2631 protection_map[0xf] = __pgprot(page_shared);
2632}
2633
2634static void __init sun4u_pgprot_init(void)
2635{
2636 unsigned long page_none, page_shared, page_copy, page_readonly;
2637 unsigned long page_exec_bit;
David S. Miller4f93d212012-09-06 18:13:58 -07002638 int i;
David S. Millerc4bce902006-02-11 21:57:54 -08002639
2640 PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
2641 _PAGE_CACHE_4U | _PAGE_P_4U |
2642 __ACCESS_BITS_4U | __DIRTY_BITS_4U |
2643 _PAGE_EXEC_4U);
2644 PAGE_KERNEL_LOCKED = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
2645 _PAGE_CACHE_4U | _PAGE_P_4U |
2646 __ACCESS_BITS_4U | __DIRTY_BITS_4U |
2647 _PAGE_EXEC_4U | _PAGE_L_4U);
David S. Millerc4bce902006-02-11 21:57:54 -08002648
2649 _PAGE_IE = _PAGE_IE_4U;
2650 _PAGE_E = _PAGE_E_4U;
2651 _PAGE_CACHE = _PAGE_CACHE_4U;
2652
2653 pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4U | __DIRTY_BITS_4U |
2654 __ACCESS_BITS_4U | _PAGE_E_4U);
2655
David S. Millerd1acb422007-03-16 17:20:28 -07002656#ifdef CONFIG_DEBUG_PAGEALLOC
David S. Miller922631b2013-09-18 12:00:00 -07002657 kern_linear_pte_xor[0] = _PAGE_VALID ^ PAGE_OFFSET;
David S. Millerd1acb422007-03-16 17:20:28 -07002658#else
David S. Miller9cc3a1a2006-02-21 20:51:13 -08002659 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^
David S. Miller922631b2013-09-18 12:00:00 -07002660 PAGE_OFFSET;
David S. Millerd1acb422007-03-16 17:20:28 -07002661#endif
David S. Miller9cc3a1a2006-02-21 20:51:13 -08002662 kern_linear_pte_xor[0] |= (_PAGE_CP_4U | _PAGE_CV_4U |
2663 _PAGE_P_4U | _PAGE_W_4U);
2664
David S. Miller4f93d212012-09-06 18:13:58 -07002665 for (i = 1; i < 4; i++)
2666 kern_linear_pte_xor[i] = kern_linear_pte_xor[0];
David S. Millerc4bce902006-02-11 21:57:54 -08002667
David S. Millerc4bce902006-02-11 21:57:54 -08002668 _PAGE_ALL_SZ_BITS = (_PAGE_SZ4MB_4U | _PAGE_SZ512K_4U |
2669 _PAGE_SZ64K_4U | _PAGE_SZ8K_4U |
2670 _PAGE_SZ32MB_4U | _PAGE_SZ256MB_4U);
2671
2672
2673 page_none = _PAGE_PRESENT_4U | _PAGE_ACCESSED_4U | _PAGE_CACHE_4U;
2674 page_shared = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2675 __ACCESS_BITS_4U | _PAGE_WRITE_4U | _PAGE_EXEC_4U);
2676 page_copy = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2677 __ACCESS_BITS_4U | _PAGE_EXEC_4U);
2678 page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2679 __ACCESS_BITS_4U | _PAGE_EXEC_4U);
2680
2681 page_exec_bit = _PAGE_EXEC_4U;
2682
2683 prot_init_common(page_none, page_shared, page_copy, page_readonly,
2684 page_exec_bit);
2685}
2686
2687static void __init sun4v_pgprot_init(void)
2688{
2689 unsigned long page_none, page_shared, page_copy, page_readonly;
2690 unsigned long page_exec_bit;
David S. Miller4f93d212012-09-06 18:13:58 -07002691 int i;
David S. Millerc4bce902006-02-11 21:57:54 -08002692
2693 PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4V | _PAGE_VALID |
Khalid Aziz494e5b62015-05-27 10:00:46 -06002694 page_cache4v_flag | _PAGE_P_4V |
David S. Millerc4bce902006-02-11 21:57:54 -08002695 __ACCESS_BITS_4V | __DIRTY_BITS_4V |
2696 _PAGE_EXEC_4V);
2697 PAGE_KERNEL_LOCKED = PAGE_KERNEL;
David S. Millerc4bce902006-02-11 21:57:54 -08002698
2699 _PAGE_IE = _PAGE_IE_4V;
2700 _PAGE_E = _PAGE_E_4V;
Khalid Aziz494e5b62015-05-27 10:00:46 -06002701 _PAGE_CACHE = page_cache4v_flag;
David S. Millerc4bce902006-02-11 21:57:54 -08002702
David S. Millerd1acb422007-03-16 17:20:28 -07002703#ifdef CONFIG_DEBUG_PAGEALLOC
David S. Miller922631b2013-09-18 12:00:00 -07002704 kern_linear_pte_xor[0] = _PAGE_VALID ^ PAGE_OFFSET;
David S. Millerd1acb422007-03-16 17:20:28 -07002705#else
David S. Miller9cc3a1a2006-02-21 20:51:13 -08002706 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^
David S. Miller922631b2013-09-18 12:00:00 -07002707 PAGE_OFFSET;
David S. Millerd1acb422007-03-16 17:20:28 -07002708#endif
Khalid Aziz494e5b62015-05-27 10:00:46 -06002709 kern_linear_pte_xor[0] |= (page_cache4v_flag | _PAGE_P_4V |
2710 _PAGE_W_4V);
David S. Miller9cc3a1a2006-02-21 20:51:13 -08002711
David S. Millerc69ad0a2012-09-06 20:35:36 -07002712 for (i = 1; i < 4; i++)
2713 kern_linear_pte_xor[i] = kern_linear_pte_xor[0];
David S. Miller4f93d212012-09-06 18:13:58 -07002714
David S. Millerc4bce902006-02-11 21:57:54 -08002715 pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4V | __DIRTY_BITS_4V |
2716 __ACCESS_BITS_4V | _PAGE_E_4V);
2717
David S. Millerc4bce902006-02-11 21:57:54 -08002718 _PAGE_ALL_SZ_BITS = (_PAGE_SZ16GB_4V | _PAGE_SZ2GB_4V |
2719 _PAGE_SZ256MB_4V | _PAGE_SZ32MB_4V |
2720 _PAGE_SZ4MB_4V | _PAGE_SZ512K_4V |
2721 _PAGE_SZ64K_4V | _PAGE_SZ8K_4V);
2722
Khalid Aziz494e5b62015-05-27 10:00:46 -06002723 page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | page_cache4v_flag;
2724 page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag |
David S. Millerc4bce902006-02-11 21:57:54 -08002725 __ACCESS_BITS_4V | _PAGE_WRITE_4V | _PAGE_EXEC_4V);
Khalid Aziz494e5b62015-05-27 10:00:46 -06002726 page_copy = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag |
David S. Millerc4bce902006-02-11 21:57:54 -08002727 __ACCESS_BITS_4V | _PAGE_EXEC_4V);
Khalid Aziz494e5b62015-05-27 10:00:46 -06002728 page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag |
David S. Millerc4bce902006-02-11 21:57:54 -08002729 __ACCESS_BITS_4V | _PAGE_EXEC_4V);
2730
2731 page_exec_bit = _PAGE_EXEC_4V;
2732
2733 prot_init_common(page_none, page_shared, page_copy, page_readonly,
2734 page_exec_bit);
2735}
2736
2737unsigned long pte_sz_bits(unsigned long sz)
2738{
2739 if (tlb_type == hypervisor) {
2740 switch (sz) {
2741 case 8 * 1024:
2742 default:
2743 return _PAGE_SZ8K_4V;
2744 case 64 * 1024:
2745 return _PAGE_SZ64K_4V;
2746 case 512 * 1024:
2747 return _PAGE_SZ512K_4V;
2748 case 4 * 1024 * 1024:
2749 return _PAGE_SZ4MB_4V;
Joe Perches6cb79b32011-06-03 14:45:23 +00002750 }
David S. Millerc4bce902006-02-11 21:57:54 -08002751 } else {
2752 switch (sz) {
2753 case 8 * 1024:
2754 default:
2755 return _PAGE_SZ8K_4U;
2756 case 64 * 1024:
2757 return _PAGE_SZ64K_4U;
2758 case 512 * 1024:
2759 return _PAGE_SZ512K_4U;
2760 case 4 * 1024 * 1024:
2761 return _PAGE_SZ4MB_4U;
Joe Perches6cb79b32011-06-03 14:45:23 +00002762 }
David S. Millerc4bce902006-02-11 21:57:54 -08002763 }
2764}
2765
2766pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space, unsigned long page_size)
2767{
2768 pte_t pte;
David S. Millercf627152006-02-12 21:10:07 -08002769
2770 pte_val(pte) = page | pgprot_val(pgprot_noncached(prot));
David S. Millerc4bce902006-02-11 21:57:54 -08002771 pte_val(pte) |= (((unsigned long)space) << 32);
2772 pte_val(pte) |= pte_sz_bits(page_size);
David S. Millercf627152006-02-12 21:10:07 -08002773
David S. Millerc4bce902006-02-11 21:57:54 -08002774 return pte;
2775}
2776
David S. Millerc4bce902006-02-11 21:57:54 -08002777static unsigned long kern_large_tte(unsigned long paddr)
2778{
2779 unsigned long val;
2780
2781 val = (_PAGE_VALID | _PAGE_SZ4MB_4U |
2782 _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_P_4U |
2783 _PAGE_EXEC_4U | _PAGE_L_4U | _PAGE_W_4U);
2784 if (tlb_type == hypervisor)
2785 val = (_PAGE_VALID | _PAGE_SZ4MB_4V |
Khalid Aziz494e5b62015-05-27 10:00:46 -06002786 page_cache4v_flag | _PAGE_P_4V |
David S. Millerc4bce902006-02-11 21:57:54 -08002787 _PAGE_EXEC_4V | _PAGE_W_4V);
2788
2789 return val | paddr;
2790}
2791
David S. Millerc4bce902006-02-11 21:57:54 -08002792/* If not locked, zap it. */
2793void __flush_tlb_all(void)
2794{
2795 unsigned long pstate;
2796 int i;
2797
2798 __asm__ __volatile__("flushw\n\t"
2799 "rdpr %%pstate, %0\n\t"
2800 "wrpr %0, %1, %%pstate"
2801 : "=r" (pstate)
2802 : "i" (PSTATE_IE));
David S. Miller8f3614532007-12-13 06:13:38 -08002803 if (tlb_type == hypervisor) {
2804 sun4v_mmu_demap_all();
2805 } else if (tlb_type == spitfire) {
David S. Millerc4bce902006-02-11 21:57:54 -08002806 for (i = 0; i < 64; i++) {
2807 /* Spitfire Errata #32 workaround */
2808 /* NOTE: Always runs on spitfire, so no
2809 * cheetah+ page size encodings.
2810 */
2811 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
2812 "flush %%g6"
2813 : /* No outputs */
2814 : "r" (0),
2815 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
2816
2817 if (!(spitfire_get_dtlb_data(i) & _PAGE_L_4U)) {
2818 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
2819 "membar #Sync"
2820 : /* no outputs */
2821 : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
2822 spitfire_put_dtlb_data(i, 0x0UL);
2823 }
2824
2825 /* Spitfire Errata #32 workaround */
2826 /* NOTE: Always runs on spitfire, so no
2827 * cheetah+ page size encodings.
2828 */
2829 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
2830 "flush %%g6"
2831 : /* No outputs */
2832 : "r" (0),
2833 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
2834
2835 if (!(spitfire_get_itlb_data(i) & _PAGE_L_4U)) {
2836 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
2837 "membar #Sync"
2838 : /* no outputs */
2839 : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
2840 spitfire_put_itlb_data(i, 0x0UL);
2841 }
2842 }
2843 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
2844 cheetah_flush_dtlb_all();
2845 cheetah_flush_itlb_all();
2846 }
2847 __asm__ __volatile__("wrpr %0, 0, %%pstate"
2848 : : "r" (pstate));
2849}
David Millerc460bec2012-10-08 16:34:22 -07002850
David Millerc460bec2012-10-08 16:34:22 -07002851pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
2852 unsigned long address)
2853{
Michal Hocko32d6bd92016-06-24 14:48:47 -07002854 struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
David S. Miller37b3a8f2013-09-25 13:48:49 -07002855 pte_t *pte = NULL;
David Millerc460bec2012-10-08 16:34:22 -07002856
David Millerc460bec2012-10-08 16:34:22 -07002857 if (page)
2858 pte = (pte_t *) page_address(page);
2859
2860 return pte;
2861}
2862
2863pgtable_t pte_alloc_one(struct mm_struct *mm,
2864 unsigned long address)
2865{
Michal Hocko32d6bd92016-06-24 14:48:47 -07002866 struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
Kirill A. Shutemov1ae9ae52013-11-14 14:31:42 -08002867 if (!page)
2868 return NULL;
2869 if (!pgtable_page_ctor(page)) {
2870 free_hot_cold_page(page, 0);
2871 return NULL;
David Millerc460bec2012-10-08 16:34:22 -07002872 }
Kirill A. Shutemov1ae9ae52013-11-14 14:31:42 -08002873 return (pte_t *) page_address(page);
David Millerc460bec2012-10-08 16:34:22 -07002874}
2875
2876void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
2877{
David S. Miller37b3a8f2013-09-25 13:48:49 -07002878 free_page((unsigned long)pte);
David Millerc460bec2012-10-08 16:34:22 -07002879}
2880
2881static void __pte_free(pgtable_t pte)
2882{
2883 struct page *page = virt_to_page(pte);
David S. Miller37b3a8f2013-09-25 13:48:49 -07002884
2885 pgtable_page_dtor(page);
2886 __free_page(page);
David Millerc460bec2012-10-08 16:34:22 -07002887}
2888
2889void pte_free(struct mm_struct *mm, pgtable_t pte)
2890{
2891 __pte_free(pte);
2892}
2893
2894void pgtable_free(void *table, bool is_page)
2895{
2896 if (is_page)
2897 __pte_free(table);
2898 else
2899 kmem_cache_free(pgtable_cache, table);
2900}
David Miller9e695d22012-10-08 16:34:29 -07002901
2902#ifdef CONFIG_TRANSPARENT_HUGEPAGE
David Miller9e695d22012-10-08 16:34:29 -07002903void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
2904 pmd_t *pmd)
2905{
2906 unsigned long pte, flags;
2907 struct mm_struct *mm;
2908 pmd_t entry = *pmd;
David Miller9e695d22012-10-08 16:34:29 -07002909
2910 if (!pmd_large(entry) || !pmd_young(entry))
2911 return;
2912
David S. Millera7b94032013-09-26 13:45:15 -07002913 pte = pmd_val(entry);
David Miller9e695d22012-10-08 16:34:29 -07002914
David S. Miller18f38132014-08-04 16:34:01 -07002915 /* Don't insert a non-valid PMD into the TSB, we'll deadlock. */
2916 if (!(pte & _PAGE_VALID))
2917 return;
2918
David S. Miller37b3a8f2013-09-25 13:48:49 -07002919 /* We are fabricating 8MB pages using 4MB real hw pages. */
2920 pte |= (addr & (1UL << REAL_HPAGE_SHIFT));
David Miller9e695d22012-10-08 16:34:29 -07002921
2922 mm = vma->vm_mm;
2923
2924 spin_lock_irqsave(&mm->context.lock, flags);
2925
2926 if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL)
David S. Miller37b3a8f2013-09-25 13:48:49 -07002927 __update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT,
David Miller9e695d22012-10-08 16:34:29 -07002928 addr, pte);
2929
2930 spin_unlock_irqrestore(&mm->context.lock, flags);
2931}
2932#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
2933
2934#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
2935static void context_reload(void *__data)
2936{
2937 struct mm_struct *mm = __data;
2938
2939 if (mm == current->mm)
2940 load_secondary_context(mm);
2941}
2942
David S. Miller0fbebed2013-02-19 22:34:10 -08002943void hugetlb_setup(struct pt_regs *regs)
David Miller9e695d22012-10-08 16:34:29 -07002944{
David S. Miller0fbebed2013-02-19 22:34:10 -08002945 struct mm_struct *mm = current->mm;
2946 struct tsb_config *tp;
David Miller9e695d22012-10-08 16:34:29 -07002947
David Hildenbrand70ffdb92015-05-11 17:52:11 +02002948 if (faulthandler_disabled() || !mm) {
David S. Miller0fbebed2013-02-19 22:34:10 -08002949 const struct exception_table_entry *entry;
David Miller9e695d22012-10-08 16:34:29 -07002950
David S. Miller0fbebed2013-02-19 22:34:10 -08002951 entry = search_exception_tables(regs->tpc);
2952 if (entry) {
2953 regs->tpc = entry->fixup;
2954 regs->tnpc = regs->tpc + 4;
2955 return;
2956 }
2957 pr_alert("Unexpected HugeTLB setup in atomic context.\n");
2958 die_if_kernel("HugeTSB in atomic", regs);
2959 }
2960
2961 tp = &mm->context.tsb_block[MM_TSB_HUGE];
2962 if (likely(tp->tsb == NULL))
2963 tsb_grow(mm, MM_TSB_HUGE, 0);
2964
David Miller9e695d22012-10-08 16:34:29 -07002965 tsb_context_switch(mm);
2966 smp_tsb_sync(mm);
2967
2968 /* On UltraSPARC-III+ and later, configure the second half of
2969 * the Data-TLB for huge pages.
2970 */
2971 if (tlb_type == cheetah_plus) {
David S. Miller9ea46abe2016-05-25 12:51:20 -07002972 bool need_context_reload = false;
David Miller9e695d22012-10-08 16:34:29 -07002973 unsigned long ctx;
2974
David S. Miller9ea46abe2016-05-25 12:51:20 -07002975 spin_lock_irq(&ctx_alloc_lock);
David Miller9e695d22012-10-08 16:34:29 -07002976 ctx = mm->context.sparc64_ctx_val;
2977 ctx &= ~CTX_PGSZ_MASK;
2978 ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT;
2979 ctx |= CTX_PGSZ_HUGE << CTX_PGSZ1_SHIFT;
2980
2981 if (ctx != mm->context.sparc64_ctx_val) {
2982 /* When changing the page size fields, we
2983 * must perform a context flush so that no
2984 * stale entries match. This flush must
2985 * occur with the original context register
2986 * settings.
2987 */
2988 do_flush_tlb_mm(mm);
2989
2990 /* Reload the context register of all processors
2991 * also executing in this address space.
2992 */
2993 mm->context.sparc64_ctx_val = ctx;
David S. Miller9ea46abe2016-05-25 12:51:20 -07002994 need_context_reload = true;
David Miller9e695d22012-10-08 16:34:29 -07002995 }
David S. Miller9ea46abe2016-05-25 12:51:20 -07002996 spin_unlock_irq(&ctx_alloc_lock);
2997
2998 if (need_context_reload)
2999 on_each_cpu(context_reload, mm, 0);
David Miller9e695d22012-10-08 16:34:29 -07003000 }
3001}
3002#endif
bob piccof6d4fb52014-03-03 11:54:42 -05003003
3004static struct resource code_resource = {
3005 .name = "Kernel code",
Toshi Kani35d98e92016-01-26 21:57:22 +01003006 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
bob piccof6d4fb52014-03-03 11:54:42 -05003007};
3008
3009static struct resource data_resource = {
3010 .name = "Kernel data",
Toshi Kani35d98e92016-01-26 21:57:22 +01003011 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
bob piccof6d4fb52014-03-03 11:54:42 -05003012};
3013
3014static struct resource bss_resource = {
3015 .name = "Kernel bss",
Toshi Kani35d98e92016-01-26 21:57:22 +01003016 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
bob piccof6d4fb52014-03-03 11:54:42 -05003017};
3018
3019static inline resource_size_t compute_kern_paddr(void *addr)
3020{
3021 return (resource_size_t) (addr - KERNBASE + kern_base);
3022}
3023
3024static void __init kernel_lds_init(void)
3025{
3026 code_resource.start = compute_kern_paddr(_text);
3027 code_resource.end = compute_kern_paddr(_etext - 1);
3028 data_resource.start = compute_kern_paddr(_etext);
3029 data_resource.end = compute_kern_paddr(_edata - 1);
3030 bss_resource.start = compute_kern_paddr(__bss_start);
3031 bss_resource.end = compute_kern_paddr(_end - 1);
3032}
3033
3034static int __init report_memory(void)
3035{
3036 int i;
3037 struct resource *res;
3038
3039 kernel_lds_init();
3040
3041 for (i = 0; i < pavail_ents; i++) {
3042 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
3043
3044 if (!res) {
3045 pr_warn("Failed to allocate source.\n");
3046 break;
3047 }
3048
3049 res->name = "System RAM";
3050 res->start = pavail[i].phys_addr;
3051 res->end = pavail[i].phys_addr + pavail[i].reg_size - 1;
Toshi Kani35d98e92016-01-26 21:57:22 +01003052 res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
bob piccof6d4fb52014-03-03 11:54:42 -05003053
3054 if (insert_resource(&iomem_resource, res) < 0) {
3055 pr_warn("Resource insertion failed.\n");
3056 break;
3057 }
3058
3059 insert_resource(res, &code_resource);
3060 insert_resource(res, &data_resource);
3061 insert_resource(res, &bss_resource);
3062 }
3063
3064 return 0;
3065}
David S. Miller3c081582015-03-18 19:15:28 -07003066arch_initcall(report_memory);
David S. Millere9011d02014-08-05 18:57:18 -07003067
David S. Miller4ca9a232014-08-04 20:07:37 -07003068#ifdef CONFIG_SMP
3069#define do_flush_tlb_kernel_range smp_flush_tlb_kernel_range
3070#else
3071#define do_flush_tlb_kernel_range __flush_tlb_kernel_range
3072#endif
3073
3074void flush_tlb_kernel_range(unsigned long start, unsigned long end)
3075{
3076 if (start < HI_OBP_ADDRESS && end > LOW_OBP_ADDRESS) {
3077 if (start < LOW_OBP_ADDRESS) {
3078 flush_tsb_kernel_range(start, LOW_OBP_ADDRESS);
3079 do_flush_tlb_kernel_range(start, LOW_OBP_ADDRESS);
3080 }
3081 if (end > HI_OBP_ADDRESS) {
David S. Miller473ad7f2014-10-04 21:05:14 -07003082 flush_tsb_kernel_range(HI_OBP_ADDRESS, end);
3083 do_flush_tlb_kernel_range(HI_OBP_ADDRESS, end);
David S. Miller4ca9a232014-08-04 20:07:37 -07003084 }
3085 } else {
3086 flush_tsb_kernel_range(start, end);
3087 do_flush_tlb_kernel_range(start, end);
3088 }
3089}