blob: 86ffd0d51fd593a9d86b22d1384fd94da5eb314a [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 * S390 version
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02004 * Copyright IBM Corp. 1999
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 * Author(s): Hartmut Penner (hp@de.ibm.com)
6 *
7 * Derived from "arch/i386/mm/init.c"
8 * Copyright (C) 1995 Linus Torvalds
9 */
10
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <linux/signal.h>
12#include <linux/sched.h>
13#include <linux/kernel.h>
14#include <linux/errno.h>
15#include <linux/string.h>
16#include <linux/types.h>
17#include <linux/ptrace.h>
18#include <linux/mman.h>
19#include <linux/mm.h>
20#include <linux/swap.h>
Halil Pasic64e1f0c2018-09-13 18:57:16 +020021#include <linux/swiotlb.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/smp.h>
23#include <linux/init.h>
24#include <linux/pagemap.h>
Mike Rapoport57c8a662018-10-30 15:09:49 -070025#include <linux/memblock.h>
Heiko Carstense5d709b2013-05-02 09:15:58 +020026#include <linux/memory.h>
Heiko Carstensd882b172006-07-01 04:36:31 -070027#include <linux/pfn.h>
Heiko Carstens028d9b32006-12-08 15:56:13 +010028#include <linux/poison.h>
Heiko Carstens2b67fc42007-02-05 21:16:47 +010029#include <linux/initrd.h>
Heiko Carstens3a4c5d52011-07-30 09:25:15 +020030#include <linux/export.h>
Heiko Carstens34ad7cd2017-08-07 15:16:15 +020031#include <linux/cma.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090032#include <linux/gfp.h>
Tom Lendacky9087c372019-07-10 19:01:19 +000033#include <linux/dma-direct.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <asm/processor.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080035#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <asm/pgalloc.h>
Sven Schnellee41ba112021-07-28 21:02:53 +020037#include <asm/kfence.h>
Heiko Carstens08c8e682020-09-09 17:10:29 +020038#include <asm/ptdump.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include <asm/dma.h>
40#include <asm/lowcore.h>
41#include <asm/tlb.h>
42#include <asm/tlbflush.h>
Heiko Carstensd882b172006-07-01 04:36:31 -070043#include <asm/sections.h>
David Howellsa0616cd2012-03-28 18:30:02 +010044#include <asm/ctl_reg.h>
Heiko Carstense5d709b2013-05-02 09:15:58 +020045#include <asm/sclp.h>
Laura Abbotte6c7c632017-05-08 15:58:08 -070046#include <asm/set_memory.h>
Vasily Gorbik42db5ed2017-11-17 14:29:13 +010047#include <asm/kasan.h>
Halil Pasic64e1f0c2018-09-13 18:57:16 +020048#include <asm/dma-mapping.h>
49#include <asm/uv.h>
Pierre Morel4ce1cf72020-09-10 10:53:50 +020050#include <linux/virtio_config.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070051
Joe Perches33def842020-10-21 19:36:07 -070052pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(".bss..swapper_pg_dir");
Heiko Carstens0290c9e2020-11-16 08:06:41 +010053static pgd_t invalid_pg_dir[PTRS_PER_PGD] __section(".bss..invalid_pg_dir");
54
55unsigned long s390_invalid_asce;
Heiko Carstens1485c5c2009-03-26 15:24:04 +010056
Martin Schwidefsky238ec4e2010-10-25 16:10:07 +020057unsigned long empty_zero_page, zero_page_mask;
Heiko Carstens1485c5c2009-03-26 15:24:04 +010058EXPORT_SYMBOL(empty_zero_page);
Ard Biesheuvel0b700682014-09-12 22:17:23 +020059EXPORT_SYMBOL(zero_page_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
Jiang Liu0999f112013-04-29 15:06:48 -070061static void __init setup_zero_pages(void)
Martin Schwidefsky238ec4e2010-10-25 16:10:07 +020062{
Martin Schwidefsky238ec4e2010-10-25 16:10:07 +020063 unsigned int order;
Martin Schwidefsky238ec4e2010-10-25 16:10:07 +020064 struct page *page;
65 int i;
66
Martin Schwidefskyc7e8b2c2015-11-10 12:30:28 +010067 /* Latest machines require a mapping granularity of 512KB */
68 order = 7;
69
Martin Schwidefsky7919e912013-02-28 11:08:54 +010070 /* Limit number of empty zero pages for small memory sizes */
Arun KSca79b0c2018-12-28 00:34:29 -080071 while (order > 2 && (totalram_pages() >> 10) < (1UL << order))
Martin Schwidefsky1f6b83e2015-01-14 17:51:17 +010072 order--;
Martin Schwidefsky238ec4e2010-10-25 16:10:07 +020073
74 empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
75 if (!empty_zero_page)
76 panic("Out of memory in setup_zero_pages");
77
78 page = virt_to_page((void *) empty_zero_page);
79 split_page(page, order);
80 for (i = 1 << order; i > 0; i--) {
Jiang Liu0999f112013-04-29 15:06:48 -070081 mark_page_reserved(page);
Martin Schwidefsky238ec4e2010-10-25 16:10:07 +020082 page++;
83 }
84
Jiang Liu0999f112013-04-29 15:06:48 -070085 zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
Martin Schwidefsky238ec4e2010-10-25 16:10:07 +020086}
87
Linus Torvalds1da177e2005-04-16 15:20:36 -070088/*
89 * paging_init() sets up the page tables
90 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070091void __init paging_init(void)
92{
Heiko Carstens39b742f2006-12-08 15:56:10 +010093 unsigned long max_zone_pfns[MAX_NR_ZONES];
Martin Schwidefsky14045eb2011-12-27 11:27:07 +010094 unsigned long pgd_type, asce_bits;
Heiko Carstens60c49702017-06-01 11:04:04 +020095 psw_t psw;
Heiko Carstensd882b172006-07-01 04:36:31 -070096
Heiko Carstens0290c9e2020-11-16 08:06:41 +010097 s390_invalid_asce = (unsigned long)invalid_pg_dir;
98 s390_invalid_asce |= _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
99 crst_table_init((unsigned long *)invalid_pg_dir, _REGION3_ENTRY_EMPTY);
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200100 init_mm.pgd = swapper_pg_dir;
Heiko Carstensf1c11742017-07-05 07:37:27 +0200101 if (VMALLOC_END > _REGION2_SIZE) {
Martin Schwidefsky14045eb2011-12-27 11:27:07 +0100102 asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
103 pgd_type = _REGION2_ENTRY_EMPTY;
104 } else {
105 asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
106 pgd_type = _REGION3_ENTRY_EMPTY;
107 }
Gerald Schaefer723cacb2016-04-15 16:38:40 +0200108 init_mm.context.asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
109 S390_lowcore.kernel_asce = init_mm.context.asce;
Heiko Carstens0290c9e2020-11-16 08:06:41 +0100110 S390_lowcore.user_asce = s390_invalid_asce;
Heiko Carstensf1c11742017-07-05 07:37:27 +0200111 crst_table_init((unsigned long *) init_mm.pgd, pgd_type);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100112 vmem_map_init();
Vasily Gorbik54b52982020-10-05 10:28:48 +0200113 kasan_copy_shadow_mapping();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114
Vasily Gorbik42db5ed2017-11-17 14:29:13 +0100115 /* enable virtual mapping in kernel mode */
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200116 __ctl_load(S390_lowcore.kernel_asce, 1, 1);
Heiko Carstens0290c9e2020-11-16 08:06:41 +0100117 __ctl_load(S390_lowcore.user_asce, 7, 7);
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200118 __ctl_load(S390_lowcore.kernel_asce, 13, 13);
Heiko Carstens60c49702017-06-01 11:04:04 +0200119 psw.mask = __extract_psw();
Heiko Carstensa7525982017-06-03 10:56:07 +0200120 psw_bits(psw).dat = 1;
Heiko Carstens8bb3fdd2017-06-03 10:19:55 +0200121 psw_bits(psw).as = PSW_BITS_AS_HOME;
Heiko Carstens60c49702017-06-01 11:04:04 +0200122 __load_psw_mask(psw.mask);
Vasily Gorbik135ff162017-11-20 12:56:10 +0100123 kasan_free_early_identity();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124
Heiko Carstens17f34582008-04-30 13:38:47 +0200125 sparse_init();
Nicolas Saenz Julienne8b5369e2019-10-14 20:31:03 +0200126 zone_dma_bits = 31;
Heiko Carstens39b742f2006-12-08 15:56:10 +0100127 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
128 max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS);
129 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
Mike Rapoport9691a072020-06-03 15:57:10 -0700130 free_area_init(max_zone_pfns);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132
Heiko Carstens91d37212016-03-17 12:47:12 +0100133void mark_rodata_ro(void)
134{
Heiko Carstensd07a9802016-06-07 10:12:55 +0200135 unsigned long size = __end_ro_after_init - __start_ro_after_init;
136
137 set_memory_ro((unsigned long)__start_ro_after_init, size >> PAGE_SHIFT);
138 pr_info("Write protected read-only-after-init data: %luk\n", size >> 10);
Heiko Carstens08c8e682020-09-09 17:10:29 +0200139 debug_checkwx();
Heiko Carstens91d37212016-03-17 12:47:12 +0100140}
141
Halil Pasic64e1f0c2018-09-13 18:57:16 +0200142int set_memory_encrypted(unsigned long addr, int numpages)
143{
144 int i;
145
146 /* make specified pages unshared, (swiotlb, dma_free) */
147 for (i = 0; i < numpages; ++i) {
148 uv_remove_shared(addr);
149 addr += PAGE_SIZE;
150 }
151 return 0;
152}
153
154int set_memory_decrypted(unsigned long addr, int numpages)
155{
156 int i;
157 /* make specified pages shared (swiotlb, dma_alloca) */
158 for (i = 0; i < numpages; ++i) {
159 uv_set_shared(addr);
160 addr += PAGE_SIZE;
161 }
162 return 0;
163}
164
165/* are we a protected virtualization guest? */
Tom Lendacky9087c372019-07-10 19:01:19 +0000166bool force_dma_unencrypted(struct device *dev)
167{
Thiago Jung Bauermann5cbdaee2019-08-06 01:49:19 -0300168 return is_prot_virt_guest();
Tom Lendacky9087c372019-07-10 19:01:19 +0000169}
170
Pierre Morel4ce1cf72020-09-10 10:53:50 +0200171#ifdef CONFIG_ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS
172
173int arch_has_restricted_virtio_memory_access(void)
174{
175 return is_prot_virt_guest();
176}
177EXPORT_SYMBOL(arch_has_restricted_virtio_memory_access);
178
179#endif
180
Halil Pasic64e1f0c2018-09-13 18:57:16 +0200181/* protected virtualization */
182static void pv_init(void)
183{
184 if (!is_prot_virt_guest())
185 return;
186
187 /* make sure bounce buffers are shared */
Halil Pasic93ebb682021-07-24 01:17:46 +0200188 swiotlb_force = SWIOTLB_FORCE;
Halil Pasic64e1f0c2018-09-13 18:57:16 +0200189 swiotlb_init(1);
190 swiotlb_update_mem_attributes();
Halil Pasic64e1f0c2018-09-13 18:57:16 +0200191}
192
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193void __init mem_init(void)
194{
Martin Schwidefsky64f31d52016-05-25 09:45:26 +0200195 cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
Martin Schwidefsky1b948d62014-04-03 13:55:01 +0200196 cpumask_set_cpu(0, mm_cpumask(&init_mm));
Martin Schwidefsky1b948d62014-04-03 13:55:01 +0200197
Philipp Hachtmann3a368f72014-03-06 18:25:13 +0100198 set_max_mapnr(max_low_pfn);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
200
Halil Pasic64e1f0c2018-09-13 18:57:16 +0200201 pv_init();
Sven Schnellee41ba112021-07-28 21:02:53 +0200202 kfence_split_mapping();
Martin Schwidefsky45e576b2008-05-07 09:22:59 +0200203 /* Setup guest page hinting */
204 cmma_init();
205
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 /* this will put all low memory onto the freelists */
Mike Rapoportc6ffc5c2018-10-30 15:09:30 -0700207 memblock_free_all();
Jiang Liu0999f112013-04-29 15:06:48 -0700208 setup_zero_pages(); /* Setup zeroed pages. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209
Martin Schwidefskyc9b5ad52016-06-14 12:56:01 +0200210 cmma_init_nodat();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211}
212
Heiko Carstensd96221a2010-02-26 22:37:42 +0100213void free_initmem(void)
214{
Heiko Carstensead7a222017-11-08 11:18:29 +0100215 __set_memory((unsigned long)_sinittext,
216 (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT,
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100217 SET_MEMORY_RW | SET_MEMORY_NX);
Alexander Egorenkovcb22cd22021-11-11 12:33:59 +0100218 free_reserved_area(sclp_early_sccb,
219 sclp_early_sccb + EXT_SCCB_READ_SCP,
220 POISON_FREE_INITMEM, "unused early sccb");
Jiang Liudbe67df2013-07-03 15:02:51 -0700221 free_initmem_default(POISON_FREE_INITMEM);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222}
223
Heiko Carstens604ddad2017-02-13 14:58:36 +0100224unsigned long memory_block_size_bytes(void)
225{
226 /*
227 * Make sure the memory block size is always greater
228 * or equal than the memory increment size.
229 */
230 return max_t(unsigned long, MIN_MEMORY_BLOCK_SIZE, sclp.rzm);
231}
232
Heiko Carstens421c1752008-07-14 09:59:18 +0200233#ifdef CONFIG_MEMORY_HOTPLUG
Heiko Carstens34ad7cd2017-08-07 15:16:15 +0200234
235#ifdef CONFIG_CMA
236
237/* Prevent memory blocks which contain cma regions from going offline */
238
239struct s390_cma_mem_data {
240 unsigned long start;
241 unsigned long end;
242};
243
244static int s390_cma_check_range(struct cma *cma, void *data)
245{
246 struct s390_cma_mem_data *mem_data;
247 unsigned long start, end;
248
249 mem_data = data;
250 start = cma_get_base(cma);
251 end = start + cma_get_size(cma);
252 if (end < mem_data->start)
253 return 0;
254 if (start >= mem_data->end)
255 return 0;
256 return -EBUSY;
257}
258
259static int s390_cma_mem_notifier(struct notifier_block *nb,
260 unsigned long action, void *data)
261{
262 struct s390_cma_mem_data mem_data;
263 struct memory_notify *arg;
264 int rc = 0;
265
266 arg = data;
267 mem_data.start = arg->start_pfn << PAGE_SHIFT;
268 mem_data.end = mem_data.start + (arg->nr_pages << PAGE_SHIFT);
269 if (action == MEM_GOING_OFFLINE)
270 rc = cma_for_each_area(s390_cma_check_range, &mem_data);
271 return notifier_from_errno(rc);
272}
273
274static struct notifier_block s390_cma_mem_nb = {
275 .notifier_call = s390_cma_mem_notifier,
276};
277
278static int __init s390_cma_mem_init(void)
279{
280 return register_memory_notifier(&s390_cma_mem_nb);
281}
282device_initcall(s390_cma_mem_init);
283
284#endif /* CONFIG_CMA */
285
Michal Hocko940519f2019-05-13 17:21:26 -0700286int arch_add_memory(int nid, u64 start, u64 size,
Logan Gunthorpef5637d32020-04-10 14:33:21 -0700287 struct mhp_params *params)
Heiko Carstens421c1752008-07-14 09:59:18 +0200288{
Gerald Schaefer892365a2012-02-24 18:01:29 +0100289 unsigned long start_pfn = PFN_DOWN(start);
290 unsigned long size_pages = PFN_DOWN(size);
Michal Hockof1dd2cd2017-07-06 15:38:11 -0700291 int rc;
Heiko Carstens421c1752008-07-14 09:59:18 +0200292
Logan Gunthorpef5637d32020-04-10 14:33:21 -0700293 if (WARN_ON_ONCE(params->altmap))
David Hildenbrand973de242019-07-18 15:56:30 -0700294 return -EINVAL;
295
Logan Gunthorpebfeb0222020-04-10 14:33:36 -0700296 if (WARN_ON_ONCE(params->pgprot.pgprot != PAGE_KERNEL.pgprot))
297 return -EINVAL;
298
Anshuman Khandual77072482021-02-25 17:17:41 -0800299 VM_BUG_ON(!mhp_range_allowed(start, size, true));
Heiko Carstens421c1752008-07-14 09:59:18 +0200300 rc = vmem_add_mapping(start, size);
301 if (rc)
302 return rc;
Gerald Schaefer199071f2015-05-08 17:40:43 +0200303
Logan Gunthorpef5637d32020-04-10 14:33:21 -0700304 rc = __add_pages(nid, start_pfn, size_pages, params);
Heiko Carstens421c1752008-07-14 09:59:18 +0200305 if (rc)
306 vmem_remove_mapping(start, size);
307 return rc;
308}
Wen Congyang24d335c2013-02-22 16:32:58 -0800309
David Hildenbrand65a2aa52021-09-07 19:55:04 -0700310void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
Wen Congyang24d335c2013-02-22 16:32:58 -0800311{
David Hildenbrand18c86502019-07-18 15:56:35 -0700312 unsigned long start_pfn = start >> PAGE_SHIFT;
313 unsigned long nr_pages = size >> PAGE_SHIFT;
David Hildenbrand18c86502019-07-18 15:56:35 -0700314
David Hildenbrandfeee6b22020-01-04 12:59:33 -0800315 __remove_pages(start_pfn, nr_pages, altmap);
David Hildenbrand18c86502019-07-18 15:56:35 -0700316 vmem_remove_mapping(start, size);
Wen Congyang24d335c2013-02-22 16:32:58 -0800317}
Heiko Carstens421c1752008-07-14 09:59:18 +0200318#endif /* CONFIG_MEMORY_HOTPLUG */