blob: d35b15113b17fd99cc910b8afdad74cb773ecb90 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * S390 version
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02003 * Copyright IBM Corp. 1999
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 * Author(s): Hartmut Penner (hp@de.ibm.com)
5 *
6 * Derived from "arch/i386/mm/init.c"
7 * Copyright (C) 1995 Linus Torvalds
8 */
9
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/signal.h>
11#include <linux/sched.h>
12#include <linux/kernel.h>
13#include <linux/errno.h>
14#include <linux/string.h>
15#include <linux/types.h>
16#include <linux/ptrace.h>
17#include <linux/mman.h>
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/smp.h>
21#include <linux/init.h>
22#include <linux/pagemap.h>
23#include <linux/bootmem.h>
Heiko Carstense5d709b2013-05-02 09:15:58 +020024#include <linux/memory.h>
Heiko Carstensd882b172006-07-01 04:36:31 -070025#include <linux/pfn.h>
Heiko Carstens028d9b32006-12-08 15:56:13 +010026#include <linux/poison.h>
Heiko Carstens2b67fc42007-02-05 21:16:47 +010027#include <linux/initrd.h>
Heiko Carstens3a4c5d52011-07-30 09:25:15 +020028#include <linux/export.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090029#include <linux/gfp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#include <asm/processor.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include <asm/uaccess.h>
32#include <asm/pgtable.h>
33#include <asm/pgalloc.h>
34#include <asm/dma.h>
35#include <asm/lowcore.h>
36#include <asm/tlb.h>
37#include <asm/tlbflush.h>
Heiko Carstensd882b172006-07-01 04:36:31 -070038#include <asm/sections.h>
David Howellsa0616cd2012-03-28 18:30:02 +010039#include <asm/ctl_reg.h>
Heiko Carstense5d709b2013-05-02 09:15:58 +020040#include <asm/sclp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
Linus Torvalds1da177e2005-04-16 15:20:36 -070042pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE)));
Heiko Carstens1485c5c2009-03-26 15:24:04 +010043
Martin Schwidefsky238ec4e2010-10-25 16:10:07 +020044unsigned long empty_zero_page, zero_page_mask;
Heiko Carstens1485c5c2009-03-26 15:24:04 +010045EXPORT_SYMBOL(empty_zero_page);
Ard Biesheuvel0b700682014-09-12 22:17:23 +020046EXPORT_SYMBOL(zero_page_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
Jiang Liu0999f112013-04-29 15:06:48 -070048static void __init setup_zero_pages(void)
Martin Schwidefsky238ec4e2010-10-25 16:10:07 +020049{
50 struct cpuid cpu_id;
51 unsigned int order;
Martin Schwidefsky238ec4e2010-10-25 16:10:07 +020052 struct page *page;
53 int i;
54
55 get_cpu_id(&cpu_id);
56 switch (cpu_id.machine) {
57 case 0x9672: /* g5 */
58 case 0x2064: /* z900 */
59 case 0x2066: /* z900 */
60 case 0x2084: /* z990 */
61 case 0x2086: /* z990 */
62 case 0x2094: /* z9-109 */
63 case 0x2096: /* z9-109 */
64 order = 0;
65 break;
66 case 0x2097: /* z10 */
67 case 0x2098: /* z10 */
Martin Schwidefsky7919e912013-02-28 11:08:54 +010068 case 0x2817: /* z196 */
69 case 0x2818: /* z196 */
Martin Schwidefsky238ec4e2010-10-25 16:10:07 +020070 order = 2;
71 break;
Martin Schwidefsky7919e912013-02-28 11:08:54 +010072 case 0x2827: /* zEC12 */
Heiko Carstens59471222013-07-24 10:35:33 +020073 case 0x2828: /* zEC12 */
Martin Schwidefsky7919e912013-02-28 11:08:54 +010074 order = 5;
75 break;
Martin Schwidefsky1f6b83e2015-01-14 17:51:17 +010076 case 0x2964: /* z13 */
77 default:
78 order = 7;
79 break;
Martin Schwidefsky238ec4e2010-10-25 16:10:07 +020080 }
Martin Schwidefsky7919e912013-02-28 11:08:54 +010081 /* Limit number of empty zero pages for small memory sizes */
Martin Schwidefsky1f6b83e2015-01-14 17:51:17 +010082 while (order > 2 && (totalram_pages >> 10) < (1UL << order))
83 order--;
Martin Schwidefsky238ec4e2010-10-25 16:10:07 +020084
85 empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
86 if (!empty_zero_page)
87 panic("Out of memory in setup_zero_pages");
88
89 page = virt_to_page((void *) empty_zero_page);
90 split_page(page, order);
91 for (i = 1 << order; i > 0; i--) {
Jiang Liu0999f112013-04-29 15:06:48 -070092 mark_page_reserved(page);
Martin Schwidefsky238ec4e2010-10-25 16:10:07 +020093 page++;
94 }
95
Jiang Liu0999f112013-04-29 15:06:48 -070096 zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
Martin Schwidefsky238ec4e2010-10-25 16:10:07 +020097}
98
Linus Torvalds1da177e2005-04-16 15:20:36 -070099/*
100 * paging_init() sets up the page tables
101 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102void __init paging_init(void)
103{
Heiko Carstens39b742f2006-12-08 15:56:10 +0100104 unsigned long max_zone_pfns[MAX_NR_ZONES];
Martin Schwidefsky14045eb2011-12-27 11:27:07 +0100105 unsigned long pgd_type, asce_bits;
Heiko Carstensd882b172006-07-01 04:36:31 -0700106
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200107 init_mm.pgd = swapper_pg_dir;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100108#ifdef CONFIG_64BIT
Martin Schwidefsky14045eb2011-12-27 11:27:07 +0100109 if (VMALLOC_END > (1UL << 42)) {
110 asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
111 pgd_type = _REGION2_ENTRY_EMPTY;
112 } else {
113 asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
114 pgd_type = _REGION3_ENTRY_EMPTY;
115 }
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100116#else
Martin Schwidefsky14045eb2011-12-27 11:27:07 +0100117 asce_bits = _ASCE_TABLE_LENGTH;
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200118 pgd_type = _SEGMENT_ENTRY_EMPTY;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100119#endif
Martin Schwidefsky14045eb2011-12-27 11:27:07 +0100120 S390_lowcore.kernel_asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200121 clear_table((unsigned long *) init_mm.pgd, pgd_type,
122 sizeof(unsigned long)*2048);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100123 vmem_map_init();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125 /* enable virtual mapping in kernel mode */
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200126 __ctl_load(S390_lowcore.kernel_asce, 1, 1);
127 __ctl_load(S390_lowcore.kernel_asce, 7, 7);
128 __ctl_load(S390_lowcore.kernel_asce, 13, 13);
David Howellsdf9ee292010-10-07 14:08:55 +0100129 arch_local_irq_restore(4UL << (BITS_PER_LONG - 8));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130
Heiko Carstens17f34582008-04-30 13:38:47 +0200131 sparse_memory_present_with_active_regions(MAX_NUMNODES);
132 sparse_init();
Heiko Carstens39b742f2006-12-08 15:56:10 +0100133 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
134 max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS);
135 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
136 free_area_init_nodes(max_zone_pfns);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138
139void __init mem_init(void)
140{
Martin Schwidefsky1b948d62014-04-03 13:55:01 +0200141 if (MACHINE_HAS_TLB_LC)
142 cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
143 cpumask_set_cpu(0, mm_cpumask(&init_mm));
144 atomic_set(&init_mm.context.attach_count, 1);
145
Jiang Liua18d0e22013-07-03 15:04:10 -0700146 max_mapnr = max_low_pfn;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
148
Martin Schwidefsky45e576b2008-05-07 09:22:59 +0200149 /* Setup guest page hinting */
150 cmma_init();
151
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 /* this will put all low memory onto the freelists */
Jiang Liu0c988532013-07-03 15:03:24 -0700153 free_all_bootmem();
Jiang Liu0999f112013-04-29 15:06:48 -0700154 setup_zero_pages(); /* Setup zeroed pages. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155
Jiang Liua18d0e22013-07-03 15:04:10 -0700156 mem_init_print_info(NULL);
Heiko Carstensd882b172006-07-01 04:36:31 -0700157 printk("Write protected kernel read-only data: %#lx - %#lx\n",
Heiko Carstens162e0062007-02-05 21:18:41 +0100158 (unsigned long)&_stext,
159 PFN_ALIGN((unsigned long)&_eshared) - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160}
161
Heiko Carstensd96221a2010-02-26 22:37:42 +0100162void free_initmem(void)
163{
Jiang Liudbe67df2013-07-03 15:02:51 -0700164 free_initmem_default(POISON_FREE_INITMEM);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165}
166
167#ifdef CONFIG_BLK_DEV_INITRD
Heiko Carstens5e249d62012-09-24 08:17:58 +0200168void __init free_initrd_mem(unsigned long start, unsigned long end)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169{
Jiang Liu11199692013-07-03 15:02:48 -0700170 free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
171 "initrd");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172}
173#endif
Heiko Carstens421c1752008-07-14 09:59:18 +0200174
175#ifdef CONFIG_MEMORY_HOTPLUG
176int arch_add_memory(int nid, u64 start, u64 size)
177{
Gerald Schaefer892365a2012-02-24 18:01:29 +0100178 unsigned long zone_start_pfn, zone_end_pfn, nr_pages;
179 unsigned long start_pfn = PFN_DOWN(start);
180 unsigned long size_pages = PFN_DOWN(size);
Heiko Carstens421c1752008-07-14 09:59:18 +0200181 struct zone *zone;
182 int rc;
183
Heiko Carstens421c1752008-07-14 09:59:18 +0200184 rc = vmem_add_mapping(start, size);
185 if (rc)
186 return rc;
Gerald Schaefer892365a2012-02-24 18:01:29 +0100187 for_each_zone(zone) {
188 if (zone_idx(zone) != ZONE_MOVABLE) {
189 /* Add range within existing zone limits */
190 zone_start_pfn = zone->zone_start_pfn;
191 zone_end_pfn = zone->zone_start_pfn +
192 zone->spanned_pages;
193 } else {
194 /* Add remaining range to ZONE_MOVABLE */
195 zone_start_pfn = start_pfn;
196 zone_end_pfn = start_pfn + size_pages;
197 }
198 if (start_pfn < zone_start_pfn || start_pfn >= zone_end_pfn)
199 continue;
200 nr_pages = (start_pfn + size_pages > zone_end_pfn) ?
201 zone_end_pfn - start_pfn : size_pages;
202 rc = __add_pages(nid, zone, start_pfn, nr_pages);
203 if (rc)
204 break;
205 start_pfn += nr_pages;
206 size_pages -= nr_pages;
207 if (!size_pages)
208 break;
209 }
Heiko Carstens421c1752008-07-14 09:59:18 +0200210 if (rc)
211 vmem_remove_mapping(start, size);
212 return rc;
213}
Wen Congyang24d335c2013-02-22 16:32:58 -0800214
Heiko Carstense5d709b2013-05-02 09:15:58 +0200215unsigned long memory_block_size_bytes(void)
216{
217 /*
218 * Make sure the memory block size is always greater
219 * or equal than the memory increment size.
220 */
221 return max_t(unsigned long, MIN_MEMORY_BLOCK_SIZE, sclp_get_rzm());
222}
223
Wen Congyang24d335c2013-02-22 16:32:58 -0800224#ifdef CONFIG_MEMORY_HOTREMOVE
225int arch_remove_memory(u64 start, u64 size)
226{
227 /*
228 * There is no hardware or firmware interface which could trigger a
229 * hot memory remove on s390. So there is nothing that needs to be
230 * implemented.
231 */
232 return -EBUSY;
233}
234#endif
Heiko Carstens421c1752008-07-14 09:59:18 +0200235#endif /* CONFIG_MEMORY_HOTPLUG */