Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
| 3 | * linux/arch/parisc/mm/init.c |
| 4 | * |
| 5 | * Copyright (C) 1995 Linus Torvalds |
| 6 | * Copyright 1999 SuSE GmbH |
| 7 | * changed by Philipp Rumpf |
| 8 | * Copyright 1999 Philipp Rumpf (prumpf@tux.org) |
| 9 | * Copyright 2004 Randolph Chung (tausq@debian.org) |
Helge Deller | a8f44e3 | 2007-01-28 14:58:52 +0100 | [diff] [blame] | 10 | * Copyright 2006-2007 Helge Deller (deller@gmx.de) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | * |
| 12 | */ |
| 13 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | |
| 15 | #include <linux/module.h> |
| 16 | #include <linux/mm.h> |
Helge Deller | 4fe9e1d | 2016-10-07 16:50:21 +0200 | [diff] [blame] | 17 | #include <linux/memblock.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 18 | #include <linux/gfp.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | #include <linux/delay.h> |
| 20 | #include <linux/init.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | #include <linux/initrd.h> |
| 22 | #include <linux/swap.h> |
| 23 | #include <linux/unistd.h> |
| 24 | #include <linux/nodemask.h> /* for node_online_map */ |
Kirill A. Shutemov | ea1754a | 2016-04-01 15:29:48 +0300 | [diff] [blame] | 25 | #include <linux/pagemap.h> /* for release_pages */ |
Helge Deller | d0cf62f | 2015-11-06 23:36:01 +0100 | [diff] [blame] | 26 | #include <linux/compat.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | |
| 28 | #include <asm/pgalloc.h> |
Helge Deller | ce8420b | 2006-10-14 22:10:44 +0200 | [diff] [blame] | 29 | #include <asm/pgtable.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | #include <asm/tlb.h> |
| 31 | #include <asm/pdc_chassis.h> |
| 32 | #include <asm/mmzone.h> |
Heiko Carstens | a581c2a | 2006-07-01 04:36:30 -0700 | [diff] [blame] | 33 | #include <asm/sections.h> |
Helge Deller | d0cf62f | 2015-11-06 23:36:01 +0100 | [diff] [blame] | 34 | #include <asm/msgbuf.h> |
Helge Deller | dbdf076 | 2019-04-09 21:52:35 +0200 | [diff] [blame] | 35 | #include <asm/sparsemem.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | extern int data_start; |
Helge Deller | 161bd3b | 2013-11-30 22:07:51 +0100 | [diff] [blame] | 38 | extern void parisc_kernel_start(void); /* Kernel entry point in head.S */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | |
Kirill A. Shutemov | f24ffde | 2015-04-14 15:45:54 -0700 | [diff] [blame] | 40 | #if CONFIG_PGTABLE_LEVELS == 3 |
Thomas Gleixner | c39f52a | 2012-05-03 09:02:57 +0000 | [diff] [blame] | 41 | /* NOTE: This layout exactly conforms to the hybrid L2/L3 page table layout |
| 42 | * with the first pmd adjacent to the pgd and below it. gcc doesn't actually |
| 43 | * guarantee that global objects will be laid out in memory in the same order |
| 44 | * as the order of declaration, so put these in different sections and use |
| 45 | * the linker script to order them. */ |
| 46 | pmd_t pmd0[PTRS_PER_PMD] __attribute__ ((__section__ (".data..vm0.pmd"), aligned(PAGE_SIZE))); |
| 47 | #endif |
| 48 | |
| 49 | pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__ ((__section__ (".data..vm0.pgd"), aligned(PAGE_SIZE))); |
| 50 | pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((__section__ (".data..vm0.pte"), aligned(PAGE_SIZE))); |
| 51 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 52 | static struct resource data_resource = { |
| 53 | .name = "Kernel data", |
Toshi Kani | 35d98e9 | 2016-01-26 21:57:22 +0100 | [diff] [blame] | 54 | .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 55 | }; |
| 56 | |
| 57 | static struct resource code_resource = { |
| 58 | .name = "Kernel code", |
Toshi Kani | 35d98e9 | 2016-01-26 21:57:22 +0100 | [diff] [blame] | 59 | .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 60 | }; |
| 61 | |
| 62 | static struct resource pdcdata_resource = { |
| 63 | .name = "PDC data (Page Zero)", |
| 64 | .start = 0, |
| 65 | .end = 0x9ff, |
| 66 | .flags = IORESOURCE_BUSY | IORESOURCE_MEM, |
| 67 | }; |
| 68 | |
Helge Deller | 4e617c8 | 2019-05-10 20:56:16 +0200 | [diff] [blame] | 69 | static struct resource sysram_resources[MAX_PHYSMEM_RANGES] __ro_after_init; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 70 | |
| 71 | /* The following array is initialized from the firmware specific |
| 72 | * information retrieved in kernel/inventory.c. |
| 73 | */ |
| 74 | |
Helge Deller | dbdf076 | 2019-04-09 21:52:35 +0200 | [diff] [blame] | 75 | physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES] __initdata; |
| 76 | int npmem_ranges __initdata; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 77 | |
Helge Deller | a8f44e3 | 2007-01-28 14:58:52 +0100 | [diff] [blame] | 78 | #ifdef CONFIG_64BIT |
Helge Deller | dbdf076 | 2019-04-09 21:52:35 +0200 | [diff] [blame] | 79 | #define MAX_MEM (1UL << MAX_PHYSMEM_BITS) |
Helge Deller | a8f44e3 | 2007-01-28 14:58:52 +0100 | [diff] [blame] | 80 | #else /* !CONFIG_64BIT */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 81 | #define MAX_MEM (3584U*1024U*1024U) |
Helge Deller | a8f44e3 | 2007-01-28 14:58:52 +0100 | [diff] [blame] | 82 | #endif /* !CONFIG_64BIT */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 83 | |
Helge Deller | 8039de1 | 2006-01-10 20:35:03 -0500 | [diff] [blame] | 84 | static unsigned long mem_limit __read_mostly = MAX_MEM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 85 | |
| 86 | static void __init mem_limit_func(void) |
| 87 | { |
| 88 | char *cp, *end; |
| 89 | unsigned long limit; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 90 | |
| 91 | /* We need this before __setup() functions are called */ |
| 92 | |
| 93 | limit = MAX_MEM; |
Alon Bar-Lev | 668f993 | 2007-02-12 00:54:16 -0800 | [diff] [blame] | 94 | for (cp = boot_command_line; *cp; ) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 95 | if (memcmp(cp, "mem=", 4) == 0) { |
| 96 | cp += 4; |
| 97 | limit = memparse(cp, &end); |
| 98 | if (end != cp) |
| 99 | break; |
| 100 | cp = end; |
| 101 | } else { |
| 102 | while (*cp != ' ' && *cp) |
| 103 | ++cp; |
| 104 | while (*cp == ' ') |
| 105 | ++cp; |
| 106 | } |
| 107 | } |
| 108 | |
| 109 | if (limit < mem_limit) |
| 110 | mem_limit = limit; |
| 111 | } |
| 112 | |
| 113 | #define MAX_GAP (0x40000000UL >> PAGE_SHIFT) |
| 114 | |
| 115 | static void __init setup_bootmem(void) |
| 116 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 117 | unsigned long mem_max; |
Helge Deller | dbdf076 | 2019-04-09 21:52:35 +0200 | [diff] [blame] | 118 | #ifndef CONFIG_SPARSEMEM |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 119 | physmem_range_t pmem_holes[MAX_PHYSMEM_RANGES - 1]; |
| 120 | int npmem_holes; |
| 121 | #endif |
| 122 | int i, sysram_resource_count; |
| 123 | |
| 124 | disable_sr_hashing(); /* Turn off space register hashing */ |
| 125 | |
| 126 | /* |
| 127 | * Sort the ranges. Since the number of ranges is typically |
| 128 | * small, and performance is not an issue here, just do |
| 129 | * a simple insertion sort. |
| 130 | */ |
| 131 | |
| 132 | for (i = 1; i < npmem_ranges; i++) { |
| 133 | int j; |
| 134 | |
| 135 | for (j = i; j > 0; j--) { |
Helge Deller | dbdf076 | 2019-04-09 21:52:35 +0200 | [diff] [blame] | 136 | physmem_range_t tmp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 137 | |
| 138 | if (pmem_ranges[j-1].start_pfn < |
| 139 | pmem_ranges[j].start_pfn) { |
| 140 | |
| 141 | break; |
| 142 | } |
Helge Deller | dbdf076 | 2019-04-09 21:52:35 +0200 | [diff] [blame] | 143 | tmp = pmem_ranges[j-1]; |
| 144 | pmem_ranges[j-1] = pmem_ranges[j]; |
| 145 | pmem_ranges[j] = tmp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 146 | } |
| 147 | } |
| 148 | |
Helge Deller | dbdf076 | 2019-04-09 21:52:35 +0200 | [diff] [blame] | 149 | #ifndef CONFIG_SPARSEMEM |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 150 | /* |
| 151 | * Throw out ranges that are too far apart (controlled by |
| 152 | * MAX_GAP). |
| 153 | */ |
| 154 | |
| 155 | for (i = 1; i < npmem_ranges; i++) { |
| 156 | if (pmem_ranges[i].start_pfn - |
| 157 | (pmem_ranges[i-1].start_pfn + |
| 158 | pmem_ranges[i-1].pages) > MAX_GAP) { |
| 159 | npmem_ranges = i; |
| 160 | printk("Large gap in memory detected (%ld pages). " |
Helge Deller | dbdf076 | 2019-04-09 21:52:35 +0200 | [diff] [blame] | 161 | "Consider turning on CONFIG_SPARSEMEM\n", |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 162 | pmem_ranges[i].start_pfn - |
| 163 | (pmem_ranges[i-1].start_pfn + |
| 164 | pmem_ranges[i-1].pages)); |
| 165 | break; |
| 166 | } |
| 167 | } |
| 168 | #endif |
| 169 | |
Helge Deller | 4fe9e1d | 2016-10-07 16:50:21 +0200 | [diff] [blame] | 170 | /* Print the memory ranges */ |
| 171 | pr_info("Memory Ranges:\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 172 | |
Helge Deller | 4fe9e1d | 2016-10-07 16:50:21 +0200 | [diff] [blame] | 173 | for (i = 0; i < npmem_ranges; i++) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 174 | struct resource *res = &sysram_resources[i]; |
Helge Deller | 4fe9e1d | 2016-10-07 16:50:21 +0200 | [diff] [blame] | 175 | unsigned long start; |
| 176 | unsigned long size; |
| 177 | |
| 178 | size = (pmem_ranges[i].pages << PAGE_SHIFT); |
| 179 | start = (pmem_ranges[i].start_pfn << PAGE_SHIFT); |
| 180 | pr_info("%2d) Start 0x%016lx End 0x%016lx Size %6ld MB\n", |
| 181 | i, start, start + (size - 1), size >> 20); |
| 182 | |
| 183 | /* request memory resource */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 184 | res->name = "System RAM"; |
Helge Deller | 4fe9e1d | 2016-10-07 16:50:21 +0200 | [diff] [blame] | 185 | res->start = start; |
| 186 | res->end = start + size - 1; |
Toshi Kani | 35d98e9 | 2016-01-26 21:57:22 +0100 | [diff] [blame] | 187 | res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 188 | request_resource(&iomem_resource, res); |
| 189 | } |
| 190 | |
Helge Deller | 4fe9e1d | 2016-10-07 16:50:21 +0200 | [diff] [blame] | 191 | sysram_resource_count = npmem_ranges; |
| 192 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 193 | /* |
| 194 | * For 32 bit kernels we limit the amount of memory we can |
| 195 | * support, in order to preserve enough kernel address space |
| 196 | * for other purposes. For 64 bit kernels we don't normally |
| 197 | * limit the memory, but this mechanism can be used to |
| 198 | * artificially limit the amount of memory (and it is written |
| 199 | * to work with multiple memory ranges). |
| 200 | */ |
| 201 | |
| 202 | mem_limit_func(); /* check for "mem=" argument */ |
| 203 | |
| 204 | mem_max = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 205 | for (i = 0; i < npmem_ranges; i++) { |
| 206 | unsigned long rsize; |
| 207 | |
| 208 | rsize = pmem_ranges[i].pages << PAGE_SHIFT; |
| 209 | if ((mem_max + rsize) > mem_limit) { |
| 210 | printk(KERN_WARNING "Memory truncated to %ld MB\n", mem_limit >> 20); |
| 211 | if (mem_max == mem_limit) |
| 212 | npmem_ranges = i; |
| 213 | else { |
| 214 | pmem_ranges[i].pages = (mem_limit >> PAGE_SHIFT) |
| 215 | - (mem_max >> PAGE_SHIFT); |
| 216 | npmem_ranges = i + 1; |
| 217 | mem_max = mem_limit; |
| 218 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 219 | break; |
| 220 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 221 | mem_max += rsize; |
| 222 | } |
| 223 | |
| 224 | printk(KERN_INFO "Total Memory: %ld MB\n",mem_max >> 20); |
| 225 | |
Helge Deller | dbdf076 | 2019-04-09 21:52:35 +0200 | [diff] [blame] | 226 | #ifndef CONFIG_SPARSEMEM |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 227 | /* Merge the ranges, keeping track of the holes */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 228 | { |
| 229 | unsigned long end_pfn; |
| 230 | unsigned long hole_pages; |
| 231 | |
| 232 | npmem_holes = 0; |
| 233 | end_pfn = pmem_ranges[0].start_pfn + pmem_ranges[0].pages; |
| 234 | for (i = 1; i < npmem_ranges; i++) { |
| 235 | |
| 236 | hole_pages = pmem_ranges[i].start_pfn - end_pfn; |
| 237 | if (hole_pages) { |
| 238 | pmem_holes[npmem_holes].start_pfn = end_pfn; |
| 239 | pmem_holes[npmem_holes++].pages = hole_pages; |
| 240 | end_pfn += hole_pages; |
| 241 | } |
| 242 | end_pfn += pmem_ranges[i].pages; |
| 243 | } |
| 244 | |
| 245 | pmem_ranges[0].pages = end_pfn - pmem_ranges[0].start_pfn; |
| 246 | npmem_ranges = 1; |
| 247 | } |
| 248 | #endif |
| 249 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 250 | /* |
| 251 | * Initialize and free the full range of memory in each range. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 252 | */ |
| 253 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 254 | max_pfn = 0; |
| 255 | for (i = 0; i < npmem_ranges; i++) { |
| 256 | unsigned long start_pfn; |
| 257 | unsigned long npages; |
Helge Deller | 4fe9e1d | 2016-10-07 16:50:21 +0200 | [diff] [blame] | 258 | unsigned long start; |
| 259 | unsigned long size; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 260 | |
| 261 | start_pfn = pmem_ranges[i].start_pfn; |
| 262 | npages = pmem_ranges[i].pages; |
| 263 | |
Helge Deller | 4fe9e1d | 2016-10-07 16:50:21 +0200 | [diff] [blame] | 264 | start = start_pfn << PAGE_SHIFT; |
| 265 | size = npages << PAGE_SHIFT; |
| 266 | |
| 267 | /* add system RAM memblock */ |
| 268 | memblock_add(start, size); |
| 269 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 270 | if ((start_pfn + npages) > max_pfn) |
| 271 | max_pfn = start_pfn + npages; |
| 272 | } |
| 273 | |
Mike Rapoport | 6a52800 | 2019-02-12 17:16:12 +0200 | [diff] [blame] | 274 | /* |
| 275 | * We can't use memblock top-down allocations because we only |
| 276 | * created the initial mapping up to KERNEL_INITIAL_SIZE in |
| 277 | * the assembly bootup code. |
| 278 | */ |
| 279 | memblock_set_bottom_up(true); |
| 280 | |
Grant Grundler | 5cdb820 | 2006-01-10 20:47:57 -0500 | [diff] [blame] | 281 | /* IOMMU is always used to access "high mem" on those boxes |
| 282 | * that can support enough mem that a PCI device couldn't |
| 283 | * directly DMA to any physical addresses. |
| 284 | * ISA DMA support will need to revisit this. |
| 285 | */ |
| 286 | max_low_pfn = max_pfn; |
| 287 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 288 | /* reserve PAGE0 pdc memory, kernel text/data/bss & bootmap */ |
| 289 | |
| 290 | #define PDC_CONSOLE_IO_IODC_SIZE 32768 |
| 291 | |
Helge Deller | 4fe9e1d | 2016-10-07 16:50:21 +0200 | [diff] [blame] | 292 | memblock_reserve(0UL, (unsigned long)(PAGE0->mem_free + |
| 293 | PDC_CONSOLE_IO_IODC_SIZE)); |
| 294 | memblock_reserve(__pa(KERNEL_BINARY_TEXT_START), |
| 295 | (unsigned long)(_end - KERNEL_BINARY_TEXT_START)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 296 | |
Helge Deller | dbdf076 | 2019-04-09 21:52:35 +0200 | [diff] [blame] | 297 | #ifndef CONFIG_SPARSEMEM |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 298 | |
| 299 | /* reserve the holes */ |
| 300 | |
| 301 | for (i = 0; i < npmem_holes; i++) { |
Helge Deller | 4fe9e1d | 2016-10-07 16:50:21 +0200 | [diff] [blame] | 302 | memblock_reserve((pmem_holes[i].start_pfn << PAGE_SHIFT), |
| 303 | (pmem_holes[i].pages << PAGE_SHIFT)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 304 | } |
| 305 | #endif |
| 306 | |
| 307 | #ifdef CONFIG_BLK_DEV_INITRD |
| 308 | if (initrd_start) { |
| 309 | printk(KERN_INFO "initrd: %08lx-%08lx\n", initrd_start, initrd_end); |
| 310 | if (__pa(initrd_start) < mem_max) { |
| 311 | unsigned long initrd_reserve; |
| 312 | |
| 313 | if (__pa(initrd_end) > mem_max) { |
| 314 | initrd_reserve = mem_max - __pa(initrd_start); |
| 315 | } else { |
| 316 | initrd_reserve = initrd_end - initrd_start; |
| 317 | } |
| 318 | initrd_below_start_ok = 1; |
| 319 | printk(KERN_INFO "initrd: reserving %08lx-%08lx (mem_max %08lx)\n", __pa(initrd_start), __pa(initrd_start) + initrd_reserve, mem_max); |
| 320 | |
Helge Deller | 4fe9e1d | 2016-10-07 16:50:21 +0200 | [diff] [blame] | 321 | memblock_reserve(__pa(initrd_start), initrd_reserve); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 322 | } |
| 323 | } |
| 324 | #endif |
| 325 | |
| 326 | data_resource.start = virt_to_phys(&data_start); |
Kyle McMartin | c51d476 | 2006-08-13 20:39:48 -0400 | [diff] [blame] | 327 | data_resource.end = virt_to_phys(_end) - 1; |
| 328 | code_resource.start = virt_to_phys(_text); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 329 | code_resource.end = virt_to_phys(&data_start)-1; |
| 330 | |
| 331 | /* We don't know which region the kernel will be in, so try |
| 332 | * all of them. |
| 333 | */ |
| 334 | for (i = 0; i < sysram_resource_count; i++) { |
| 335 | struct resource *res = &sysram_resources[i]; |
| 336 | request_resource(res, &code_resource); |
| 337 | request_resource(res, &data_resource); |
| 338 | } |
| 339 | request_resource(&sysram_resources[0], &pdcdata_resource); |
Helge Deller | c9c2877 | 2017-05-11 22:24:15 +0200 | [diff] [blame] | 340 | |
| 341 | /* Initialize Page Deallocation Table (PDT) and check for bad memory. */ |
| 342 | pdc_pdt_init(); |
Helge Deller | dbdf076 | 2019-04-09 21:52:35 +0200 | [diff] [blame] | 343 | |
| 344 | memblock_allow_resize(); |
| 345 | memblock_dump_all(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 346 | } |
| 347 | |
Helge Deller | 8d0e051 | 2019-05-10 17:00:01 +0200 | [diff] [blame] | 348 | static bool kernel_set_to_readonly; |
Helge Deller | 161bd3b | 2013-11-30 22:07:51 +0100 | [diff] [blame] | 349 | |
James Bottomley | d7dd2ff | 2011-04-14 18:25:21 -0500 | [diff] [blame] | 350 | static void __init map_pages(unsigned long start_vaddr, |
| 351 | unsigned long start_paddr, unsigned long size, |
| 352 | pgprot_t pgprot, int force) |
| 353 | { |
James Bottomley | d7dd2ff | 2011-04-14 18:25:21 -0500 | [diff] [blame] | 354 | pmd_t *pmd; |
| 355 | pte_t *pg_table; |
| 356 | unsigned long end_paddr; |
| 357 | unsigned long start_pmd; |
| 358 | unsigned long start_pte; |
| 359 | unsigned long tmp1; |
| 360 | unsigned long tmp2; |
| 361 | unsigned long address; |
| 362 | unsigned long vaddr; |
| 363 | unsigned long ro_start; |
| 364 | unsigned long ro_end; |
Helge Deller | 8d0e051 | 2019-05-10 17:00:01 +0200 | [diff] [blame] | 365 | unsigned long kernel_start, kernel_end; |
James Bottomley | d7dd2ff | 2011-04-14 18:25:21 -0500 | [diff] [blame] | 366 | |
| 367 | ro_start = __pa((unsigned long)_text); |
| 368 | ro_end = __pa((unsigned long)&data_start); |
Helge Deller | 8d0e051 | 2019-05-10 17:00:01 +0200 | [diff] [blame] | 369 | kernel_start = __pa((unsigned long)&__init_begin); |
Helge Deller | 41b85a1 | 2015-11-22 00:07:44 +0100 | [diff] [blame] | 370 | kernel_end = __pa((unsigned long)&_end); |
James Bottomley | d7dd2ff | 2011-04-14 18:25:21 -0500 | [diff] [blame] | 371 | |
| 372 | end_paddr = start_paddr + size; |
| 373 | |
Mike Rapoport | 8121fbc | 2020-01-12 13:36:15 +0200 | [diff] [blame] | 374 | /* for 2-level configuration PTRS_PER_PMD is 0 so start_pmd will be 0 */ |
James Bottomley | d7dd2ff | 2011-04-14 18:25:21 -0500 | [diff] [blame] | 375 | start_pmd = ((start_vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1)); |
James Bottomley | d7dd2ff | 2011-04-14 18:25:21 -0500 | [diff] [blame] | 376 | start_pte = ((start_vaddr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)); |
| 377 | |
| 378 | address = start_paddr; |
| 379 | vaddr = start_vaddr; |
| 380 | while (address < end_paddr) { |
Mike Rapoport | 8121fbc | 2020-01-12 13:36:15 +0200 | [diff] [blame] | 381 | pgd_t *pgd = pgd_offset_k(vaddr); |
| 382 | p4d_t *p4d = p4d_offset(pgd, vaddr); |
| 383 | pud_t *pud = pud_offset(p4d, vaddr); |
James Bottomley | d7dd2ff | 2011-04-14 18:25:21 -0500 | [diff] [blame] | 384 | |
Mike Rapoport | 8121fbc | 2020-01-12 13:36:15 +0200 | [diff] [blame] | 385 | #if CONFIG_PGTABLE_LEVELS == 3 |
| 386 | if (pud_none(*pud)) { |
Mike Rapoport | 6a52800 | 2019-02-12 17:16:12 +0200 | [diff] [blame] | 387 | pmd = memblock_alloc(PAGE_SIZE << PMD_ORDER, |
| 388 | PAGE_SIZE << PMD_ORDER); |
| 389 | if (!pmd) |
| 390 | panic("pmd allocation failed.\n"); |
Mike Rapoport | 8121fbc | 2020-01-12 13:36:15 +0200 | [diff] [blame] | 391 | pud_populate(NULL, pud, pmd); |
James Bottomley | d7dd2ff | 2011-04-14 18:25:21 -0500 | [diff] [blame] | 392 | } |
James Bottomley | d7dd2ff | 2011-04-14 18:25:21 -0500 | [diff] [blame] | 393 | #endif |
James Bottomley | d7dd2ff | 2011-04-14 18:25:21 -0500 | [diff] [blame] | 394 | |
Mike Rapoport | 8121fbc | 2020-01-12 13:36:15 +0200 | [diff] [blame] | 395 | pmd = pmd_offset(pud, vaddr); |
James Bottomley | d7dd2ff | 2011-04-14 18:25:21 -0500 | [diff] [blame] | 396 | for (tmp1 = start_pmd; tmp1 < PTRS_PER_PMD; tmp1++, pmd++) { |
Mike Rapoport | 8121fbc | 2020-01-12 13:36:15 +0200 | [diff] [blame] | 397 | if (pmd_none(*pmd)) { |
| 398 | pg_table = memblock_alloc(PAGE_SIZE, PAGE_SIZE); |
Mike Rapoport | 6a52800 | 2019-02-12 17:16:12 +0200 | [diff] [blame] | 399 | if (!pg_table) |
| 400 | panic("page table allocation failed\n"); |
Mike Rapoport | 8121fbc | 2020-01-12 13:36:15 +0200 | [diff] [blame] | 401 | pmd_populate_kernel(NULL, pmd, pg_table); |
James Bottomley | d7dd2ff | 2011-04-14 18:25:21 -0500 | [diff] [blame] | 402 | } |
| 403 | |
Mike Rapoport | 8121fbc | 2020-01-12 13:36:15 +0200 | [diff] [blame] | 404 | pg_table = pte_offset_kernel(pmd, vaddr); |
James Bottomley | d7dd2ff | 2011-04-14 18:25:21 -0500 | [diff] [blame] | 405 | for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++, pg_table++) { |
| 406 | pte_t pte; |
Helge Deller | 8d0e051 | 2019-05-10 17:00:01 +0200 | [diff] [blame] | 407 | pgprot_t prot; |
| 408 | bool huge = false; |
James Bottomley | d7dd2ff | 2011-04-14 18:25:21 -0500 | [diff] [blame] | 409 | |
Helge Deller | 8d0e051 | 2019-05-10 17:00:01 +0200 | [diff] [blame] | 410 | if (force) { |
| 411 | prot = pgprot; |
| 412 | } else if (address < kernel_start || address >= kernel_end) { |
| 413 | /* outside kernel memory */ |
| 414 | prot = PAGE_KERNEL; |
| 415 | } else if (!kernel_set_to_readonly) { |
| 416 | /* still initializing, allow writing to RO memory */ |
| 417 | prot = PAGE_KERNEL_RWX; |
| 418 | huge = true; |
| 419 | } else if (address >= ro_start) { |
| 420 | /* Code (ro) and Data areas */ |
| 421 | prot = (address < ro_end) ? |
| 422 | PAGE_KERNEL_EXEC : PAGE_KERNEL; |
| 423 | huge = true; |
| 424 | } else { |
| 425 | prot = PAGE_KERNEL; |
Helge Deller | 41b85a1 | 2015-11-22 00:07:44 +0100 | [diff] [blame] | 426 | } |
Helge Deller | 8d0e051 | 2019-05-10 17:00:01 +0200 | [diff] [blame] | 427 | |
| 428 | pte = __mk_pte(address, prot); |
| 429 | if (huge) |
Helge Deller | 41b85a1 | 2015-11-22 00:07:44 +0100 | [diff] [blame] | 430 | pte = pte_mkhuge(pte); |
James Bottomley | d7dd2ff | 2011-04-14 18:25:21 -0500 | [diff] [blame] | 431 | |
Helge Deller | 3c229b3 | 2018-10-12 22:37:46 +0200 | [diff] [blame] | 432 | if (address >= end_paddr) |
| 433 | break; |
James Bottomley | d7dd2ff | 2011-04-14 18:25:21 -0500 | [diff] [blame] | 434 | |
| 435 | set_pte(pg_table, pte); |
| 436 | |
| 437 | address += PAGE_SIZE; |
| 438 | vaddr += PAGE_SIZE; |
| 439 | } |
| 440 | start_pte = 0; |
| 441 | |
| 442 | if (address >= end_paddr) |
| 443 | break; |
| 444 | } |
| 445 | start_pmd = 0; |
| 446 | } |
| 447 | } |
| 448 | |
Helge Deller | 3847dab | 2018-10-16 22:38:22 +0200 | [diff] [blame] | 449 | void __init set_kernel_text_rw(int enable_read_write) |
| 450 | { |
Helge Deller | d19a129 | 2019-05-01 14:59:58 +0200 | [diff] [blame] | 451 | unsigned long start = (unsigned long) __init_begin; |
Helge Deller | dfbaecb | 2019-01-04 23:32:53 +0100 | [diff] [blame] | 452 | unsigned long end = (unsigned long) &data_start; |
Helge Deller | 3847dab | 2018-10-16 22:38:22 +0200 | [diff] [blame] | 453 | |
| 454 | map_pages(start, __pa(start), end-start, |
| 455 | PAGE_KERNEL_RWX, enable_read_write ? 1:0); |
| 456 | |
John David Anglin | c9fa406 | 2018-10-27 18:03:25 -0400 | [diff] [blame] | 457 | /* force the kernel to see the new page table entries */ |
| 458 | flush_cache_all(); |
| 459 | flush_tlb_all(); |
Helge Deller | 3847dab | 2018-10-16 22:38:22 +0200 | [diff] [blame] | 460 | } |
| 461 | |
Helge Deller | 8d73b18 | 2018-04-20 23:23:37 +0200 | [diff] [blame] | 462 | void __ref free_initmem(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 463 | { |
Kyle McMartin | 4fb1178 | 2009-04-05 02:53:47 +0000 | [diff] [blame] | 464 | unsigned long init_begin = (unsigned long)__init_begin; |
| 465 | unsigned long init_end = (unsigned long)__init_end; |
Helge Deller | 8d0e051 | 2019-05-10 17:00:01 +0200 | [diff] [blame] | 466 | unsigned long kernel_end = (unsigned long)&_end; |
| 467 | |
| 468 | /* Remap kernel text and data, but do not touch init section yet. */ |
| 469 | kernel_set_to_readonly = true; |
| 470 | map_pages(init_end, __pa(init_end), kernel_end - init_end, |
| 471 | PAGE_KERNEL, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 472 | |
James Bottomley | d7dd2ff | 2011-04-14 18:25:21 -0500 | [diff] [blame] | 473 | /* The init text pages are marked R-X. We have to |
| 474 | * flush the icache and mark them RW- |
| 475 | * |
| 476 | * This is tricky, because map_pages is in the init section. |
| 477 | * Do a dummy remap of the data section first (the data |
| 478 | * section is already PAGE_KERNEL) to pull in the TLB entries |
| 479 | * for map_kernel */ |
| 480 | map_pages(init_begin, __pa(init_begin), init_end - init_begin, |
| 481 | PAGE_KERNEL_RWX, 1); |
| 482 | /* now remap at PAGE_KERNEL since the TLB is pre-primed to execute |
| 483 | * map_pages */ |
| 484 | map_pages(init_begin, __pa(init_begin), init_end - init_begin, |
| 485 | PAGE_KERNEL, 1); |
| 486 | |
| 487 | /* force the kernel to see the new TLB entries */ |
Helge Deller | 8d0e051 | 2019-05-10 17:00:01 +0200 | [diff] [blame] | 488 | __flush_tlb_range(0, init_begin, kernel_end); |
Helge Deller | 41b85a1 | 2015-11-22 00:07:44 +0100 | [diff] [blame] | 489 | |
James Bottomley | d7dd2ff | 2011-04-14 18:25:21 -0500 | [diff] [blame] | 490 | /* finally dump all the instructions which were cached, since the |
| 491 | * pages are no-longer executable */ |
Kyle McMartin | 4fb1178 | 2009-04-05 02:53:47 +0000 | [diff] [blame] | 492 | flush_icache_range(init_begin, init_end); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 493 | |
Helge Deller | 41b85a1 | 2015-11-22 00:07:44 +0100 | [diff] [blame] | 494 | free_initmem_default(POISON_FREE_INITMEM); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 495 | |
| 496 | /* set up a new led state on systems shipped LED State panel */ |
| 497 | pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 498 | } |
| 499 | |
Helge Deller | 1bcdd85 | 2006-01-13 13:21:06 -0700 | [diff] [blame] | 500 | |
Laura Abbott | 0f5bf6d | 2017-02-06 16:31:58 -0800 | [diff] [blame] | 501 | #ifdef CONFIG_STRICT_KERNEL_RWX |
Helge Deller | 1bcdd85 | 2006-01-13 13:21:06 -0700 | [diff] [blame] | 502 | void mark_rodata_ro(void) |
| 503 | { |
Helge Deller | 1bcdd85 | 2006-01-13 13:21:06 -0700 | [diff] [blame] | 504 | /* rodata memory was already mapped with KERNEL_RO access rights by |
| 505 | pagetable_init() and map_pages(). No need to do additional stuff here */ |
Helge Deller | 8d0e051 | 2019-05-10 17:00:01 +0200 | [diff] [blame] | 506 | unsigned long roai_size = __end_ro_after_init - __start_ro_after_init; |
| 507 | |
| 508 | pr_info("Write protected read-only-after-init data: %luk\n", roai_size >> 10); |
Helge Deller | 1bcdd85 | 2006-01-13 13:21:06 -0700 | [diff] [blame] | 509 | } |
| 510 | #endif |
| 511 | |
| 512 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 513 | /* |
| 514 | * Just an arbitrary offset to serve as a "hole" between mapping areas |
| 515 | * (between top of physical memory and a potential pcxl dma mapping |
| 516 | * area, and below the vmalloc mapping area). |
| 517 | * |
| 518 | * The current 32K value just means that there will be a 32K "hole" |
| 519 | * between mapping areas. That means that any out-of-bounds memory |
| 520 | * accesses will hopefully be caught. The vmalloc() routines leaves |
| 521 | * a hole of 4kB between each vmalloced area for the same reason. |
| 522 | */ |
| 523 | |
| 524 | /* Leave room for gateway page expansion */ |
| 525 | #if KERNEL_MAP_START < GATEWAY_PAGE_SIZE |
| 526 | #error KERNEL_MAP_START is in gateway reserved region |
| 527 | #endif |
| 528 | #define MAP_START (KERNEL_MAP_START) |
| 529 | |
| 530 | #define VM_MAP_OFFSET (32*1024) |
| 531 | #define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \ |
| 532 | & ~(VM_MAP_OFFSET-1))) |
| 533 | |
Helge Deller | 4e617c8 | 2019-05-10 20:56:16 +0200 | [diff] [blame] | 534 | void *parisc_vmalloc_start __ro_after_init; |
Helge Deller | 4255f0d | 2009-09-27 23:26:01 -0400 | [diff] [blame] | 535 | EXPORT_SYMBOL(parisc_vmalloc_start); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 536 | |
| 537 | #ifdef CONFIG_PA11 |
Helge Deller | 4e617c8 | 2019-05-10 20:56:16 +0200 | [diff] [blame] | 538 | unsigned long pcxl_dma_start __ro_after_init; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 539 | #endif |
| 540 | |
| 541 | void __init mem_init(void) |
| 542 | { |
Helge Deller | d0cf62f | 2015-11-06 23:36:01 +0100 | [diff] [blame] | 543 | /* Do sanity checks on IPC (compat) structures */ |
| 544 | BUILD_BUG_ON(sizeof(struct ipc64_perm) != 48); |
| 545 | #ifndef CONFIG_64BIT |
| 546 | BUILD_BUG_ON(sizeof(struct semid64_ds) != 80); |
| 547 | BUILD_BUG_ON(sizeof(struct msqid64_ds) != 104); |
| 548 | BUILD_BUG_ON(sizeof(struct shmid64_ds) != 104); |
| 549 | #endif |
| 550 | #ifdef CONFIG_COMPAT |
| 551 | BUILD_BUG_ON(sizeof(struct compat_ipc64_perm) != sizeof(struct ipc64_perm)); |
| 552 | BUILD_BUG_ON(sizeof(struct compat_semid64_ds) != 80); |
| 553 | BUILD_BUG_ON(sizeof(struct compat_msqid64_ds) != 104); |
| 554 | BUILD_BUG_ON(sizeof(struct compat_shmid64_ds) != 104); |
| 555 | #endif |
| 556 | |
Helge Deller | 48d27cb | 2009-01-18 19:16:16 +0100 | [diff] [blame] | 557 | /* Do sanity checks on page table constants */ |
| 558 | BUILD_BUG_ON(PTE_ENTRY_SIZE != sizeof(pte_t)); |
| 559 | BUILD_BUG_ON(PMD_ENTRY_SIZE != sizeof(pmd_t)); |
| 560 | BUILD_BUG_ON(PGD_ENTRY_SIZE != sizeof(pgd_t)); |
| 561 | BUILD_BUG_ON(PAGE_SHIFT + BITS_PER_PTE + BITS_PER_PMD + BITS_PER_PGD |
| 562 | > BITS_PER_LONG); |
| 563 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 564 | high_memory = __va((max_pfn << PAGE_SHIFT)); |
Helge Deller | bf71bc1 | 2020-05-28 22:29:25 +0200 | [diff] [blame^] | 565 | set_max_mapnr(max_low_pfn); |
Mike Rapoport | c6ffc5c | 2018-10-30 15:09:30 -0700 | [diff] [blame] | 566 | memblock_free_all(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 567 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 568 | #ifdef CONFIG_PA11 |
Christoph Hellwig | a34a9b9 | 2018-06-19 09:04:53 +0200 | [diff] [blame] | 569 | if (boot_cpu_data.cpu_type == pcxl2 || boot_cpu_data.cpu_type == pcxl) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 570 | pcxl_dma_start = (unsigned long)SET_MAP_OFFSET(MAP_START); |
Helge Deller | 4255f0d | 2009-09-27 23:26:01 -0400 | [diff] [blame] | 571 | parisc_vmalloc_start = SET_MAP_OFFSET(pcxl_dma_start |
| 572 | + PCXL_DMA_MAP_SIZE); |
Christoph Hellwig | a34a9b9 | 2018-06-19 09:04:53 +0200 | [diff] [blame] | 573 | } else |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 574 | #endif |
Christoph Hellwig | a34a9b9 | 2018-06-19 09:04:53 +0200 | [diff] [blame] | 575 | parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 576 | |
Jiang Liu | 7d2c774 | 2013-07-03 15:04:07 -0700 | [diff] [blame] | 577 | mem_init_print_info(NULL); |
Helge Deller | fd8d0ca | 2018-01-12 22:57:15 +0100 | [diff] [blame] | 578 | |
| 579 | #if 0 |
| 580 | /* |
| 581 | * Do not expose the virtual kernel memory layout to userspace. |
| 582 | * But keep code for debugging purposes. |
| 583 | */ |
Helge Deller | ce8420b | 2006-10-14 22:10:44 +0200 | [diff] [blame] | 584 | printk("virtual kernel memory layout:\n" |
Sven Schnelle | ccfbc68 | 2019-04-04 21:14:08 +0200 | [diff] [blame] | 585 | " vmalloc : 0x%px - 0x%px (%4ld MB)\n" |
| 586 | " fixmap : 0x%px - 0x%px (%4ld kB)\n" |
| 587 | " memory : 0x%px - 0x%px (%4ld MB)\n" |
| 588 | " .init : 0x%px - 0x%px (%4ld kB)\n" |
| 589 | " .data : 0x%px - 0x%px (%4ld kB)\n" |
| 590 | " .text : 0x%px - 0x%px (%4ld kB)\n", |
Helge Deller | ce8420b | 2006-10-14 22:10:44 +0200 | [diff] [blame] | 591 | |
| 592 | (void*)VMALLOC_START, (void*)VMALLOC_END, |
| 593 | (VMALLOC_END - VMALLOC_START) >> 20, |
| 594 | |
Sven Schnelle | ccfbc68 | 2019-04-04 21:14:08 +0200 | [diff] [blame] | 595 | (void *)FIXMAP_START, (void *)(FIXMAP_START + FIXMAP_SIZE), |
| 596 | (unsigned long)(FIXMAP_SIZE / 1024), |
| 597 | |
Helge Deller | ce8420b | 2006-10-14 22:10:44 +0200 | [diff] [blame] | 598 | __va(0), high_memory, |
| 599 | ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20, |
| 600 | |
Kyle McMartin | 53faf29 | 2006-10-16 20:34:00 -0400 | [diff] [blame] | 601 | __init_begin, __init_end, |
| 602 | ((unsigned long)__init_end - (unsigned long)__init_begin) >> 10, |
Helge Deller | ce8420b | 2006-10-14 22:10:44 +0200 | [diff] [blame] | 603 | |
Kyle McMartin | 53faf29 | 2006-10-16 20:34:00 -0400 | [diff] [blame] | 604 | _etext, _edata, |
| 605 | ((unsigned long)_edata - (unsigned long)_etext) >> 10, |
Helge Deller | ce8420b | 2006-10-14 22:10:44 +0200 | [diff] [blame] | 606 | |
Kyle McMartin | 53faf29 | 2006-10-16 20:34:00 -0400 | [diff] [blame] | 607 | _text, _etext, |
| 608 | ((unsigned long)_etext - (unsigned long)_text) >> 10); |
Helge Deller | ce8420b | 2006-10-14 22:10:44 +0200 | [diff] [blame] | 609 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 610 | } |
| 611 | |
Helge Deller | 4e617c8 | 2019-05-10 20:56:16 +0200 | [diff] [blame] | 612 | unsigned long *empty_zero_page __ro_after_init; |
Kyle McMartin | 22febf1 | 2008-05-26 01:54:35 -0400 | [diff] [blame] | 613 | EXPORT_SYMBOL(empty_zero_page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 614 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 615 | /* |
| 616 | * pagetable_init() sets up the page tables |
| 617 | * |
| 618 | * Note that gateway_init() places the Linux gateway page at page 0. |
| 619 | * Since gateway pages cannot be dereferenced this has the desirable |
| 620 | * side effect of trapping those pesky NULL-reference errors in the |
| 621 | * kernel. |
| 622 | */ |
| 623 | static void __init pagetable_init(void) |
| 624 | { |
| 625 | int range; |
| 626 | |
| 627 | /* Map each physical memory range to its kernel vaddr */ |
| 628 | |
| 629 | for (range = 0; range < npmem_ranges; range++) { |
| 630 | unsigned long start_paddr; |
| 631 | unsigned long end_paddr; |
| 632 | unsigned long size; |
| 633 | |
| 634 | start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 635 | size = pmem_ranges[range].pages << PAGE_SHIFT; |
Helge Deller | 41b85a1 | 2015-11-22 00:07:44 +0100 | [diff] [blame] | 636 | end_paddr = start_paddr + size; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 637 | |
| 638 | map_pages((unsigned long)__va(start_paddr), start_paddr, |
James Bottomley | d7dd2ff | 2011-04-14 18:25:21 -0500 | [diff] [blame] | 639 | size, PAGE_KERNEL, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 640 | } |
| 641 | |
| 642 | #ifdef CONFIG_BLK_DEV_INITRD |
| 643 | if (initrd_end && initrd_end > mem_limit) { |
Helge Deller | 1bcdd85 | 2006-01-13 13:21:06 -0700 | [diff] [blame] | 644 | printk(KERN_INFO "initrd: mapping %08lx-%08lx\n", initrd_start, initrd_end); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 645 | map_pages(initrd_start, __pa(initrd_start), |
James Bottomley | d7dd2ff | 2011-04-14 18:25:21 -0500 | [diff] [blame] | 646 | initrd_end - initrd_start, PAGE_KERNEL, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 647 | } |
| 648 | #endif |
| 649 | |
Mike Rapoport | 6a52800 | 2019-02-12 17:16:12 +0200 | [diff] [blame] | 650 | empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE); |
| 651 | if (!empty_zero_page) |
| 652 | panic("zero page allocation failed.\n"); |
| 653 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 654 | } |
| 655 | |
| 656 | static void __init gateway_init(void) |
| 657 | { |
| 658 | unsigned long linux_gateway_page_addr; |
| 659 | /* FIXME: This is 'const' in order to trick the compiler |
| 660 | into not treating it as DP-relative data. */ |
| 661 | extern void * const linux_gateway_page; |
| 662 | |
| 663 | linux_gateway_page_addr = LINUX_GATEWAY_ADDR & PAGE_MASK; |
| 664 | |
| 665 | /* |
| 666 | * Setup Linux Gateway page. |
| 667 | * |
| 668 | * The Linux gateway page will reside in kernel space (on virtual |
| 669 | * page 0), so it doesn't need to be aliased into user space. |
| 670 | */ |
| 671 | |
| 672 | map_pages(linux_gateway_page_addr, __pa(&linux_gateway_page), |
James Bottomley | d7dd2ff | 2011-04-14 18:25:21 -0500 | [diff] [blame] | 673 | PAGE_SIZE, PAGE_GATEWAY, 1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 674 | } |
| 675 | |
Helge Deller | dbdf076 | 2019-04-09 21:52:35 +0200 | [diff] [blame] | 676 | static void __init parisc_bootmem_free(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 677 | { |
Helge Deller | dbdf076 | 2019-04-09 21:52:35 +0200 | [diff] [blame] | 678 | unsigned long zones_size[MAX_NR_ZONES] = { 0, }; |
| 679 | unsigned long holes_size[MAX_NR_ZONES] = { 0, }; |
| 680 | unsigned long mem_start_pfn = ~0UL, mem_end_pfn = 0, mem_size_pfn = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 681 | int i; |
| 682 | |
Helge Deller | dbdf076 | 2019-04-09 21:52:35 +0200 | [diff] [blame] | 683 | for (i = 0; i < npmem_ranges; i++) { |
| 684 | unsigned long start = pmem_ranges[i].start_pfn; |
| 685 | unsigned long size = pmem_ranges[i].pages; |
| 686 | unsigned long end = start + size; |
| 687 | |
| 688 | if (mem_start_pfn > start) |
| 689 | mem_start_pfn = start; |
| 690 | if (mem_end_pfn < end) |
| 691 | mem_end_pfn = end; |
| 692 | mem_size_pfn += size; |
| 693 | } |
| 694 | |
| 695 | zones_size[0] = mem_end_pfn - mem_start_pfn; |
| 696 | holes_size[0] = zones_size[0] - mem_size_pfn; |
| 697 | |
| 698 | free_area_init_node(0, zones_size, mem_start_pfn, holes_size); |
| 699 | } |
| 700 | |
| 701 | void __init paging_init(void) |
| 702 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 703 | setup_bootmem(); |
| 704 | pagetable_init(); |
| 705 | gateway_init(); |
| 706 | flush_cache_all_local(); /* start with known state */ |
Matthew Wilcox | ce33941 | 2006-01-10 20:47:49 -0500 | [diff] [blame] | 707 | flush_tlb_all_local(NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 708 | |
Helge Deller | dbdf076 | 2019-04-09 21:52:35 +0200 | [diff] [blame] | 709 | /* |
| 710 | * Mark all memblocks as present for sparsemem using |
| 711 | * memory_present() and then initialize sparsemem. |
| 712 | */ |
| 713 | memblocks_present(); |
| 714 | sparse_init(); |
| 715 | parisc_bootmem_free(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 716 | } |
| 717 | |
| 718 | #ifdef CONFIG_PA20 |
| 719 | |
| 720 | /* |
Simon Arlott | 7022672 | 2007-05-11 20:42:34 +0100 | [diff] [blame] | 721 | * Currently, all PA20 chips have 18 bit protection IDs, which is the |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 722 | * limiting factor (space ids are 32 bits). |
| 723 | */ |
| 724 | |
| 725 | #define NR_SPACE_IDS 262144 |
| 726 | |
| 727 | #else |
| 728 | |
| 729 | /* |
Simon Arlott | 7022672 | 2007-05-11 20:42:34 +0100 | [diff] [blame] | 730 | * Currently we have a one-to-one relationship between space IDs and |
| 731 | * protection IDs. Older parisc chips (PCXS, PCXT, PCXL, PCXL2) only |
| 732 | * support 15 bit protection IDs, so that is the limiting factor. |
| 733 | * PCXT' has 18 bit protection IDs, but only 16 bit spaceids, so it's |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 734 | * probably not worth the effort for a special case here. |
| 735 | */ |
| 736 | |
| 737 | #define NR_SPACE_IDS 32768 |
| 738 | |
| 739 | #endif /* !CONFIG_PA20 */ |
| 740 | |
| 741 | #define RECYCLE_THRESHOLD (NR_SPACE_IDS / 2) |
| 742 | #define SID_ARRAY_SIZE (NR_SPACE_IDS / (8 * sizeof(long))) |
| 743 | |
| 744 | static unsigned long space_id[SID_ARRAY_SIZE] = { 1 }; /* disallow space 0 */ |
| 745 | static unsigned long dirty_space_id[SID_ARRAY_SIZE]; |
| 746 | static unsigned long space_id_index; |
| 747 | static unsigned long free_space_ids = NR_SPACE_IDS - 1; |
| 748 | static unsigned long dirty_space_ids = 0; |
| 749 | |
| 750 | static DEFINE_SPINLOCK(sid_lock); |
| 751 | |
| 752 | unsigned long alloc_sid(void) |
| 753 | { |
| 754 | unsigned long index; |
| 755 | |
| 756 | spin_lock(&sid_lock); |
| 757 | |
| 758 | if (free_space_ids == 0) { |
| 759 | if (dirty_space_ids != 0) { |
| 760 | spin_unlock(&sid_lock); |
| 761 | flush_tlb_all(); /* flush_tlb_all() calls recycle_sids() */ |
| 762 | spin_lock(&sid_lock); |
| 763 | } |
Helge Deller | 2fd8303 | 2006-04-20 20:40:23 +0000 | [diff] [blame] | 764 | BUG_ON(free_space_ids == 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 765 | } |
| 766 | |
| 767 | free_space_ids--; |
| 768 | |
| 769 | index = find_next_zero_bit(space_id, NR_SPACE_IDS, space_id_index); |
| 770 | space_id[index >> SHIFT_PER_LONG] |= (1L << (index & (BITS_PER_LONG - 1))); |
| 771 | space_id_index = index; |
| 772 | |
| 773 | spin_unlock(&sid_lock); |
| 774 | |
| 775 | return index << SPACEID_SHIFT; |
| 776 | } |
| 777 | |
| 778 | void free_sid(unsigned long spaceid) |
| 779 | { |
| 780 | unsigned long index = spaceid >> SPACEID_SHIFT; |
| 781 | unsigned long *dirty_space_offset; |
| 782 | |
| 783 | dirty_space_offset = dirty_space_id + (index >> SHIFT_PER_LONG); |
| 784 | index &= (BITS_PER_LONG - 1); |
| 785 | |
| 786 | spin_lock(&sid_lock); |
| 787 | |
Helge Deller | 2fd8303 | 2006-04-20 20:40:23 +0000 | [diff] [blame] | 788 | BUG_ON(*dirty_space_offset & (1L << index)); /* attempt to free space id twice */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 789 | |
| 790 | *dirty_space_offset |= (1L << index); |
| 791 | dirty_space_ids++; |
| 792 | |
| 793 | spin_unlock(&sid_lock); |
| 794 | } |
| 795 | |
| 796 | |
| 797 | #ifdef CONFIG_SMP |
| 798 | static void get_dirty_sids(unsigned long *ndirtyptr,unsigned long *dirty_array) |
| 799 | { |
| 800 | int i; |
| 801 | |
| 802 | /* NOTE: sid_lock must be held upon entry */ |
| 803 | |
| 804 | *ndirtyptr = dirty_space_ids; |
| 805 | if (dirty_space_ids != 0) { |
| 806 | for (i = 0; i < SID_ARRAY_SIZE; i++) { |
| 807 | dirty_array[i] = dirty_space_id[i]; |
| 808 | dirty_space_id[i] = 0; |
| 809 | } |
| 810 | dirty_space_ids = 0; |
| 811 | } |
| 812 | |
| 813 | return; |
| 814 | } |
| 815 | |
| 816 | static void recycle_sids(unsigned long ndirty,unsigned long *dirty_array) |
| 817 | { |
| 818 | int i; |
| 819 | |
| 820 | /* NOTE: sid_lock must be held upon entry */ |
| 821 | |
| 822 | if (ndirty != 0) { |
| 823 | for (i = 0; i < SID_ARRAY_SIZE; i++) { |
| 824 | space_id[i] ^= dirty_array[i]; |
| 825 | } |
| 826 | |
| 827 | free_space_ids += ndirty; |
| 828 | space_id_index = 0; |
| 829 | } |
| 830 | } |
| 831 | |
| 832 | #else /* CONFIG_SMP */ |
| 833 | |
| 834 | static void recycle_sids(void) |
| 835 | { |
| 836 | int i; |
| 837 | |
| 838 | /* NOTE: sid_lock must be held upon entry */ |
| 839 | |
| 840 | if (dirty_space_ids != 0) { |
| 841 | for (i = 0; i < SID_ARRAY_SIZE; i++) { |
| 842 | space_id[i] ^= dirty_space_id[i]; |
| 843 | dirty_space_id[i] = 0; |
| 844 | } |
| 845 | |
| 846 | free_space_ids += dirty_space_ids; |
| 847 | dirty_space_ids = 0; |
| 848 | space_id_index = 0; |
| 849 | } |
| 850 | } |
| 851 | #endif |
| 852 | |
| 853 | /* |
| 854 | * flush_tlb_all() calls recycle_sids(), since whenever the entire tlb is |
| 855 | * purged, we can safely reuse the space ids that were released but |
| 856 | * not flushed from the tlb. |
| 857 | */ |
| 858 | |
| 859 | #ifdef CONFIG_SMP |
| 860 | |
| 861 | static unsigned long recycle_ndirty; |
| 862 | static unsigned long recycle_dirty_array[SID_ARRAY_SIZE]; |
Helge Deller | 2fd8303 | 2006-04-20 20:40:23 +0000 | [diff] [blame] | 863 | static unsigned int recycle_inuse; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 864 | |
| 865 | void flush_tlb_all(void) |
| 866 | { |
| 867 | int do_recycle; |
| 868 | |
Helge Deller | 416821d | 2013-05-10 21:24:01 +0000 | [diff] [blame] | 869 | __inc_irq_stat(irq_tlb_count); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 870 | do_recycle = 0; |
| 871 | spin_lock(&sid_lock); |
| 872 | if (dirty_space_ids > RECYCLE_THRESHOLD) { |
Helge Deller | 2fd8303 | 2006-04-20 20:40:23 +0000 | [diff] [blame] | 873 | BUG_ON(recycle_inuse); /* FIXME: Use a semaphore/wait queue here */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 874 | get_dirty_sids(&recycle_ndirty,recycle_dirty_array); |
| 875 | recycle_inuse++; |
| 876 | do_recycle++; |
| 877 | } |
| 878 | spin_unlock(&sid_lock); |
Jens Axboe | 15c8b6c | 2008-05-09 09:39:44 +0200 | [diff] [blame] | 879 | on_each_cpu(flush_tlb_all_local, NULL, 1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 880 | if (do_recycle) { |
| 881 | spin_lock(&sid_lock); |
| 882 | recycle_sids(recycle_ndirty,recycle_dirty_array); |
| 883 | recycle_inuse = 0; |
| 884 | spin_unlock(&sid_lock); |
| 885 | } |
| 886 | } |
| 887 | #else |
| 888 | void flush_tlb_all(void) |
| 889 | { |
Helge Deller | 416821d | 2013-05-10 21:24:01 +0000 | [diff] [blame] | 890 | __inc_irq_stat(irq_tlb_count); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 891 | spin_lock(&sid_lock); |
Matthew Wilcox | 1b2425e | 2006-01-10 20:47:49 -0500 | [diff] [blame] | 892 | flush_tlb_all_local(NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 893 | recycle_sids(); |
| 894 | spin_unlock(&sid_lock); |
| 895 | } |
| 896 | #endif |