Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | * S390 version |
Heiko Carstens | a53c8fa | 2012-07-20 11:15:04 +0200 | [diff] [blame] | 4 | * Copyright IBM Corp. 1999 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | * Author(s): Hartmut Penner (hp@de.ibm.com) |
| 6 | * |
| 7 | * Derived from "arch/i386/mm/init.c" |
| 8 | * Copyright (C) 1995 Linus Torvalds |
| 9 | */ |
| 10 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | #include <linux/signal.h> |
| 12 | #include <linux/sched.h> |
| 13 | #include <linux/kernel.h> |
| 14 | #include <linux/errno.h> |
| 15 | #include <linux/string.h> |
| 16 | #include <linux/types.h> |
| 17 | #include <linux/ptrace.h> |
| 18 | #include <linux/mman.h> |
| 19 | #include <linux/mm.h> |
| 20 | #include <linux/swap.h> |
Halil Pasic | 64e1f0c | 2018-09-13 18:57:16 +0200 | [diff] [blame] | 21 | #include <linux/swiotlb.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | #include <linux/smp.h> |
| 23 | #include <linux/init.h> |
| 24 | #include <linux/pagemap.h> |
Mike Rapoport | 57c8a66 | 2018-10-30 15:09:49 -0700 | [diff] [blame] | 25 | #include <linux/memblock.h> |
Heiko Carstens | e5d709b | 2013-05-02 09:15:58 +0200 | [diff] [blame] | 26 | #include <linux/memory.h> |
Heiko Carstens | d882b17 | 2006-07-01 04:36:31 -0700 | [diff] [blame] | 27 | #include <linux/pfn.h> |
Heiko Carstens | 028d9b3 | 2006-12-08 15:56:13 +0100 | [diff] [blame] | 28 | #include <linux/poison.h> |
Heiko Carstens | 2b67fc4 | 2007-02-05 21:16:47 +0100 | [diff] [blame] | 29 | #include <linux/initrd.h> |
Heiko Carstens | 3a4c5d5 | 2011-07-30 09:25:15 +0200 | [diff] [blame] | 30 | #include <linux/export.h> |
Heiko Carstens | 34ad7cd | 2017-08-07 15:16:15 +0200 | [diff] [blame] | 31 | #include <linux/cma.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 32 | #include <linux/gfp.h> |
Tom Lendacky | 9087c37 | 2019-07-10 19:01:19 +0000 | [diff] [blame] | 33 | #include <linux/dma-direct.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 34 | #include <asm/processor.h> |
Linus Torvalds | 7c0f6ba | 2016-12-24 11:46:01 -0800 | [diff] [blame] | 35 | #include <linux/uaccess.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | #include <asm/pgalloc.h> |
Sven Schnelle | e41ba11 | 2021-07-28 21:02:53 +0200 | [diff] [blame] | 37 | #include <asm/kfence.h> |
Heiko Carstens | 08c8e68 | 2020-09-09 17:10:29 +0200 | [diff] [blame] | 38 | #include <asm/ptdump.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | #include <asm/dma.h> |
| 40 | #include <asm/lowcore.h> |
| 41 | #include <asm/tlb.h> |
| 42 | #include <asm/tlbflush.h> |
Heiko Carstens | d882b17 | 2006-07-01 04:36:31 -0700 | [diff] [blame] | 43 | #include <asm/sections.h> |
David Howells | a0616cd | 2012-03-28 18:30:02 +0100 | [diff] [blame] | 44 | #include <asm/ctl_reg.h> |
Heiko Carstens | e5d709b | 2013-05-02 09:15:58 +0200 | [diff] [blame] | 45 | #include <asm/sclp.h> |
Laura Abbott | e6c7c63 | 2017-05-08 15:58:08 -0700 | [diff] [blame] | 46 | #include <asm/set_memory.h> |
Vasily Gorbik | 42db5ed | 2017-11-17 14:29:13 +0100 | [diff] [blame] | 47 | #include <asm/kasan.h> |
Halil Pasic | 64e1f0c | 2018-09-13 18:57:16 +0200 | [diff] [blame] | 48 | #include <asm/dma-mapping.h> |
| 49 | #include <asm/uv.h> |
Pierre Morel | 4ce1cf7 | 2020-09-10 10:53:50 +0200 | [diff] [blame] | 50 | #include <linux/virtio_config.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | |
Joe Perches | 33def84 | 2020-10-21 19:36:07 -0700 | [diff] [blame] | 52 | pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(".bss..swapper_pg_dir"); |
Heiko Carstens | 0290c9e | 2020-11-16 08:06:41 +0100 | [diff] [blame] | 53 | static pgd_t invalid_pg_dir[PTRS_PER_PGD] __section(".bss..invalid_pg_dir"); |
| 54 | |
| 55 | unsigned long s390_invalid_asce; |
Heiko Carstens | 1485c5c | 2009-03-26 15:24:04 +0100 | [diff] [blame] | 56 | |
Martin Schwidefsky | 238ec4e | 2010-10-25 16:10:07 +0200 | [diff] [blame] | 57 | unsigned long empty_zero_page, zero_page_mask; |
Heiko Carstens | 1485c5c | 2009-03-26 15:24:04 +0100 | [diff] [blame] | 58 | EXPORT_SYMBOL(empty_zero_page); |
Ard Biesheuvel | 0b70068 | 2014-09-12 22:17:23 +0200 | [diff] [blame] | 59 | EXPORT_SYMBOL(zero_page_mask); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 60 | |
Jiang Liu | 0999f11 | 2013-04-29 15:06:48 -0700 | [diff] [blame] | 61 | static void __init setup_zero_pages(void) |
Martin Schwidefsky | 238ec4e | 2010-10-25 16:10:07 +0200 | [diff] [blame] | 62 | { |
Martin Schwidefsky | 238ec4e | 2010-10-25 16:10:07 +0200 | [diff] [blame] | 63 | unsigned int order; |
Martin Schwidefsky | 238ec4e | 2010-10-25 16:10:07 +0200 | [diff] [blame] | 64 | struct page *page; |
| 65 | int i; |
| 66 | |
Martin Schwidefsky | c7e8b2c | 2015-11-10 12:30:28 +0100 | [diff] [blame] | 67 | /* Latest machines require a mapping granularity of 512KB */ |
| 68 | order = 7; |
| 69 | |
Martin Schwidefsky | 7919e91 | 2013-02-28 11:08:54 +0100 | [diff] [blame] | 70 | /* Limit number of empty zero pages for small memory sizes */ |
Arun KS | ca79b0c | 2018-12-28 00:34:29 -0800 | [diff] [blame] | 71 | while (order > 2 && (totalram_pages() >> 10) < (1UL << order)) |
Martin Schwidefsky | 1f6b83e | 2015-01-14 17:51:17 +0100 | [diff] [blame] | 72 | order--; |
Martin Schwidefsky | 238ec4e | 2010-10-25 16:10:07 +0200 | [diff] [blame] | 73 | |
| 74 | empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order); |
| 75 | if (!empty_zero_page) |
| 76 | panic("Out of memory in setup_zero_pages"); |
| 77 | |
| 78 | page = virt_to_page((void *) empty_zero_page); |
| 79 | split_page(page, order); |
| 80 | for (i = 1 << order; i > 0; i--) { |
Jiang Liu | 0999f11 | 2013-04-29 15:06:48 -0700 | [diff] [blame] | 81 | mark_page_reserved(page); |
Martin Schwidefsky | 238ec4e | 2010-10-25 16:10:07 +0200 | [diff] [blame] | 82 | page++; |
| 83 | } |
| 84 | |
Jiang Liu | 0999f11 | 2013-04-29 15:06:48 -0700 | [diff] [blame] | 85 | zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK; |
Martin Schwidefsky | 238ec4e | 2010-10-25 16:10:07 +0200 | [diff] [blame] | 86 | } |
| 87 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 88 | /* |
| 89 | * paging_init() sets up the page tables |
| 90 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 91 | void __init paging_init(void) |
| 92 | { |
Heiko Carstens | 39b742f | 2006-12-08 15:56:10 +0100 | [diff] [blame] | 93 | unsigned long max_zone_pfns[MAX_NR_ZONES]; |
Martin Schwidefsky | 14045eb | 2011-12-27 11:27:07 +0100 | [diff] [blame] | 94 | unsigned long pgd_type, asce_bits; |
Heiko Carstens | 60c4970 | 2017-06-01 11:04:04 +0200 | [diff] [blame] | 95 | psw_t psw; |
Heiko Carstens | d882b17 | 2006-07-01 04:36:31 -0700 | [diff] [blame] | 96 | |
Heiko Carstens | 0290c9e | 2020-11-16 08:06:41 +0100 | [diff] [blame] | 97 | s390_invalid_asce = (unsigned long)invalid_pg_dir; |
| 98 | s390_invalid_asce |= _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH; |
| 99 | crst_table_init((unsigned long *)invalid_pg_dir, _REGION3_ENTRY_EMPTY); |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 100 | init_mm.pgd = swapper_pg_dir; |
Heiko Carstens | f1c1174 | 2017-07-05 07:37:27 +0200 | [diff] [blame] | 101 | if (VMALLOC_END > _REGION2_SIZE) { |
Martin Schwidefsky | 14045eb | 2011-12-27 11:27:07 +0100 | [diff] [blame] | 102 | asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH; |
| 103 | pgd_type = _REGION2_ENTRY_EMPTY; |
| 104 | } else { |
| 105 | asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH; |
| 106 | pgd_type = _REGION3_ENTRY_EMPTY; |
| 107 | } |
Gerald Schaefer | 723cacb | 2016-04-15 16:38:40 +0200 | [diff] [blame] | 108 | init_mm.context.asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits; |
| 109 | S390_lowcore.kernel_asce = init_mm.context.asce; |
Heiko Carstens | 0290c9e | 2020-11-16 08:06:41 +0100 | [diff] [blame] | 110 | S390_lowcore.user_asce = s390_invalid_asce; |
Heiko Carstens | f1c1174 | 2017-07-05 07:37:27 +0200 | [diff] [blame] | 111 | crst_table_init((unsigned long *) init_mm.pgd, pgd_type); |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 112 | vmem_map_init(); |
Vasily Gorbik | 54b5298 | 2020-10-05 10:28:48 +0200 | [diff] [blame] | 113 | kasan_copy_shadow_mapping(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 114 | |
Vasily Gorbik | 42db5ed | 2017-11-17 14:29:13 +0100 | [diff] [blame] | 115 | /* enable virtual mapping in kernel mode */ |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 116 | __ctl_load(S390_lowcore.kernel_asce, 1, 1); |
Heiko Carstens | 0290c9e | 2020-11-16 08:06:41 +0100 | [diff] [blame] | 117 | __ctl_load(S390_lowcore.user_asce, 7, 7); |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 118 | __ctl_load(S390_lowcore.kernel_asce, 13, 13); |
Heiko Carstens | 60c4970 | 2017-06-01 11:04:04 +0200 | [diff] [blame] | 119 | psw.mask = __extract_psw(); |
Heiko Carstens | a752598 | 2017-06-03 10:56:07 +0200 | [diff] [blame] | 120 | psw_bits(psw).dat = 1; |
Heiko Carstens | 8bb3fdd | 2017-06-03 10:19:55 +0200 | [diff] [blame] | 121 | psw_bits(psw).as = PSW_BITS_AS_HOME; |
Heiko Carstens | 60c4970 | 2017-06-01 11:04:04 +0200 | [diff] [blame] | 122 | __load_psw_mask(psw.mask); |
Vasily Gorbik | 135ff16 | 2017-11-20 12:56:10 +0100 | [diff] [blame] | 123 | kasan_free_early_identity(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 124 | |
Heiko Carstens | 17f3458 | 2008-04-30 13:38:47 +0200 | [diff] [blame] | 125 | sparse_init(); |
Nicolas Saenz Julienne | 8b5369e | 2019-10-14 20:31:03 +0200 | [diff] [blame] | 126 | zone_dma_bits = 31; |
Heiko Carstens | 39b742f | 2006-12-08 15:56:10 +0100 | [diff] [blame] | 127 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); |
| 128 | max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS); |
| 129 | max_zone_pfns[ZONE_NORMAL] = max_low_pfn; |
Mike Rapoport | 9691a07 | 2020-06-03 15:57:10 -0700 | [diff] [blame] | 130 | free_area_init(max_zone_pfns); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 131 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 132 | |
Heiko Carstens | 91d3721 | 2016-03-17 12:47:12 +0100 | [diff] [blame] | 133 | void mark_rodata_ro(void) |
| 134 | { |
Heiko Carstens | d07a980 | 2016-06-07 10:12:55 +0200 | [diff] [blame] | 135 | unsigned long size = __end_ro_after_init - __start_ro_after_init; |
| 136 | |
| 137 | set_memory_ro((unsigned long)__start_ro_after_init, size >> PAGE_SHIFT); |
| 138 | pr_info("Write protected read-only-after-init data: %luk\n", size >> 10); |
Heiko Carstens | 08c8e68 | 2020-09-09 17:10:29 +0200 | [diff] [blame] | 139 | debug_checkwx(); |
Heiko Carstens | 91d3721 | 2016-03-17 12:47:12 +0100 | [diff] [blame] | 140 | } |
| 141 | |
Halil Pasic | 64e1f0c | 2018-09-13 18:57:16 +0200 | [diff] [blame] | 142 | int set_memory_encrypted(unsigned long addr, int numpages) |
| 143 | { |
| 144 | int i; |
| 145 | |
| 146 | /* make specified pages unshared, (swiotlb, dma_free) */ |
| 147 | for (i = 0; i < numpages; ++i) { |
| 148 | uv_remove_shared(addr); |
| 149 | addr += PAGE_SIZE; |
| 150 | } |
| 151 | return 0; |
| 152 | } |
| 153 | |
| 154 | int set_memory_decrypted(unsigned long addr, int numpages) |
| 155 | { |
| 156 | int i; |
| 157 | /* make specified pages shared (swiotlb, dma_alloca) */ |
| 158 | for (i = 0; i < numpages; ++i) { |
| 159 | uv_set_shared(addr); |
| 160 | addr += PAGE_SIZE; |
| 161 | } |
| 162 | return 0; |
| 163 | } |
| 164 | |
| 165 | /* are we a protected virtualization guest? */ |
Tom Lendacky | 9087c37 | 2019-07-10 19:01:19 +0000 | [diff] [blame] | 166 | bool force_dma_unencrypted(struct device *dev) |
| 167 | { |
Thiago Jung Bauermann | 5cbdaee | 2019-08-06 01:49:19 -0300 | [diff] [blame] | 168 | return is_prot_virt_guest(); |
Tom Lendacky | 9087c37 | 2019-07-10 19:01:19 +0000 | [diff] [blame] | 169 | } |
| 170 | |
Pierre Morel | 4ce1cf7 | 2020-09-10 10:53:50 +0200 | [diff] [blame] | 171 | #ifdef CONFIG_ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS |
| 172 | |
| 173 | int arch_has_restricted_virtio_memory_access(void) |
| 174 | { |
| 175 | return is_prot_virt_guest(); |
| 176 | } |
| 177 | EXPORT_SYMBOL(arch_has_restricted_virtio_memory_access); |
| 178 | |
| 179 | #endif |
| 180 | |
Halil Pasic | 64e1f0c | 2018-09-13 18:57:16 +0200 | [diff] [blame] | 181 | /* protected virtualization */ |
| 182 | static void pv_init(void) |
| 183 | { |
| 184 | if (!is_prot_virt_guest()) |
| 185 | return; |
| 186 | |
| 187 | /* make sure bounce buffers are shared */ |
Halil Pasic | 93ebb68 | 2021-07-24 01:17:46 +0200 | [diff] [blame] | 188 | swiotlb_force = SWIOTLB_FORCE; |
Halil Pasic | 64e1f0c | 2018-09-13 18:57:16 +0200 | [diff] [blame] | 189 | swiotlb_init(1); |
| 190 | swiotlb_update_mem_attributes(); |
Halil Pasic | 64e1f0c | 2018-09-13 18:57:16 +0200 | [diff] [blame] | 191 | } |
| 192 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 193 | void __init mem_init(void) |
| 194 | { |
Martin Schwidefsky | 64f31d5 | 2016-05-25 09:45:26 +0200 | [diff] [blame] | 195 | cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask); |
Martin Schwidefsky | 1b948d6 | 2014-04-03 13:55:01 +0200 | [diff] [blame] | 196 | cpumask_set_cpu(0, mm_cpumask(&init_mm)); |
Martin Schwidefsky | 1b948d6 | 2014-04-03 13:55:01 +0200 | [diff] [blame] | 197 | |
Philipp Hachtmann | 3a368f7 | 2014-03-06 18:25:13 +0100 | [diff] [blame] | 198 | set_max_mapnr(max_low_pfn); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 199 | high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); |
| 200 | |
Halil Pasic | 64e1f0c | 2018-09-13 18:57:16 +0200 | [diff] [blame] | 201 | pv_init(); |
Sven Schnelle | e41ba11 | 2021-07-28 21:02:53 +0200 | [diff] [blame] | 202 | kfence_split_mapping(); |
Martin Schwidefsky | 45e576b | 2008-05-07 09:22:59 +0200 | [diff] [blame] | 203 | /* Setup guest page hinting */ |
| 204 | cmma_init(); |
| 205 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 206 | /* this will put all low memory onto the freelists */ |
Mike Rapoport | c6ffc5c | 2018-10-30 15:09:30 -0700 | [diff] [blame] | 207 | memblock_free_all(); |
Jiang Liu | 0999f11 | 2013-04-29 15:06:48 -0700 | [diff] [blame] | 208 | setup_zero_pages(); /* Setup zeroed pages. */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 209 | |
Martin Schwidefsky | c9b5ad5 | 2016-06-14 12:56:01 +0200 | [diff] [blame] | 210 | cmma_init_nodat(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 211 | } |
| 212 | |
Heiko Carstens | d96221a | 2010-02-26 22:37:42 +0100 | [diff] [blame] | 213 | void free_initmem(void) |
| 214 | { |
Heiko Carstens | ead7a22 | 2017-11-08 11:18:29 +0100 | [diff] [blame] | 215 | __set_memory((unsigned long)_sinittext, |
| 216 | (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT, |
Martin Schwidefsky | 57d7f93 | 2016-03-22 10:54:24 +0100 | [diff] [blame] | 217 | SET_MEMORY_RW | SET_MEMORY_NX); |
Alexander Egorenkov | cb22cd2 | 2021-11-11 12:33:59 +0100 | [diff] [blame] | 218 | free_reserved_area(sclp_early_sccb, |
| 219 | sclp_early_sccb + EXT_SCCB_READ_SCP, |
| 220 | POISON_FREE_INITMEM, "unused early sccb"); |
Jiang Liu | dbe67df | 2013-07-03 15:02:51 -0700 | [diff] [blame] | 221 | free_initmem_default(POISON_FREE_INITMEM); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 222 | } |
| 223 | |
Heiko Carstens | 604ddad | 2017-02-13 14:58:36 +0100 | [diff] [blame] | 224 | unsigned long memory_block_size_bytes(void) |
| 225 | { |
| 226 | /* |
| 227 | * Make sure the memory block size is always greater |
| 228 | * or equal than the memory increment size. |
| 229 | */ |
| 230 | return max_t(unsigned long, MIN_MEMORY_BLOCK_SIZE, sclp.rzm); |
| 231 | } |
| 232 | |
Heiko Carstens | 421c175 | 2008-07-14 09:59:18 +0200 | [diff] [blame] | 233 | #ifdef CONFIG_MEMORY_HOTPLUG |
Heiko Carstens | 34ad7cd | 2017-08-07 15:16:15 +0200 | [diff] [blame] | 234 | |
| 235 | #ifdef CONFIG_CMA |
| 236 | |
| 237 | /* Prevent memory blocks which contain cma regions from going offline */ |
| 238 | |
| 239 | struct s390_cma_mem_data { |
| 240 | unsigned long start; |
| 241 | unsigned long end; |
| 242 | }; |
| 243 | |
| 244 | static int s390_cma_check_range(struct cma *cma, void *data) |
| 245 | { |
| 246 | struct s390_cma_mem_data *mem_data; |
| 247 | unsigned long start, end; |
| 248 | |
| 249 | mem_data = data; |
| 250 | start = cma_get_base(cma); |
| 251 | end = start + cma_get_size(cma); |
| 252 | if (end < mem_data->start) |
| 253 | return 0; |
| 254 | if (start >= mem_data->end) |
| 255 | return 0; |
| 256 | return -EBUSY; |
| 257 | } |
| 258 | |
| 259 | static int s390_cma_mem_notifier(struct notifier_block *nb, |
| 260 | unsigned long action, void *data) |
| 261 | { |
| 262 | struct s390_cma_mem_data mem_data; |
| 263 | struct memory_notify *arg; |
| 264 | int rc = 0; |
| 265 | |
| 266 | arg = data; |
| 267 | mem_data.start = arg->start_pfn << PAGE_SHIFT; |
| 268 | mem_data.end = mem_data.start + (arg->nr_pages << PAGE_SHIFT); |
| 269 | if (action == MEM_GOING_OFFLINE) |
| 270 | rc = cma_for_each_area(s390_cma_check_range, &mem_data); |
| 271 | return notifier_from_errno(rc); |
| 272 | } |
| 273 | |
| 274 | static struct notifier_block s390_cma_mem_nb = { |
| 275 | .notifier_call = s390_cma_mem_notifier, |
| 276 | }; |
| 277 | |
| 278 | static int __init s390_cma_mem_init(void) |
| 279 | { |
| 280 | return register_memory_notifier(&s390_cma_mem_nb); |
| 281 | } |
| 282 | device_initcall(s390_cma_mem_init); |
| 283 | |
| 284 | #endif /* CONFIG_CMA */ |
| 285 | |
Michal Hocko | 940519f | 2019-05-13 17:21:26 -0700 | [diff] [blame] | 286 | int arch_add_memory(int nid, u64 start, u64 size, |
Logan Gunthorpe | f5637d3 | 2020-04-10 14:33:21 -0700 | [diff] [blame] | 287 | struct mhp_params *params) |
Heiko Carstens | 421c175 | 2008-07-14 09:59:18 +0200 | [diff] [blame] | 288 | { |
Gerald Schaefer | 892365a | 2012-02-24 18:01:29 +0100 | [diff] [blame] | 289 | unsigned long start_pfn = PFN_DOWN(start); |
| 290 | unsigned long size_pages = PFN_DOWN(size); |
Michal Hocko | f1dd2cd | 2017-07-06 15:38:11 -0700 | [diff] [blame] | 291 | int rc; |
Heiko Carstens | 421c175 | 2008-07-14 09:59:18 +0200 | [diff] [blame] | 292 | |
Logan Gunthorpe | f5637d3 | 2020-04-10 14:33:21 -0700 | [diff] [blame] | 293 | if (WARN_ON_ONCE(params->altmap)) |
David Hildenbrand | 973de24 | 2019-07-18 15:56:30 -0700 | [diff] [blame] | 294 | return -EINVAL; |
| 295 | |
Logan Gunthorpe | bfeb022 | 2020-04-10 14:33:36 -0700 | [diff] [blame] | 296 | if (WARN_ON_ONCE(params->pgprot.pgprot != PAGE_KERNEL.pgprot)) |
| 297 | return -EINVAL; |
| 298 | |
Anshuman Khandual | 7707248 | 2021-02-25 17:17:41 -0800 | [diff] [blame] | 299 | VM_BUG_ON(!mhp_range_allowed(start, size, true)); |
Heiko Carstens | 421c175 | 2008-07-14 09:59:18 +0200 | [diff] [blame] | 300 | rc = vmem_add_mapping(start, size); |
| 301 | if (rc) |
| 302 | return rc; |
Gerald Schaefer | 199071f | 2015-05-08 17:40:43 +0200 | [diff] [blame] | 303 | |
Logan Gunthorpe | f5637d3 | 2020-04-10 14:33:21 -0700 | [diff] [blame] | 304 | rc = __add_pages(nid, start_pfn, size_pages, params); |
Heiko Carstens | 421c175 | 2008-07-14 09:59:18 +0200 | [diff] [blame] | 305 | if (rc) |
| 306 | vmem_remove_mapping(start, size); |
| 307 | return rc; |
| 308 | } |
Wen Congyang | 24d335c | 2013-02-22 16:32:58 -0800 | [diff] [blame] | 309 | |
David Hildenbrand | 65a2aa5 | 2021-09-07 19:55:04 -0700 | [diff] [blame] | 310 | void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap) |
Wen Congyang | 24d335c | 2013-02-22 16:32:58 -0800 | [diff] [blame] | 311 | { |
David Hildenbrand | 18c8650 | 2019-07-18 15:56:35 -0700 | [diff] [blame] | 312 | unsigned long start_pfn = start >> PAGE_SHIFT; |
| 313 | unsigned long nr_pages = size >> PAGE_SHIFT; |
David Hildenbrand | 18c8650 | 2019-07-18 15:56:35 -0700 | [diff] [blame] | 314 | |
David Hildenbrand | feee6b2 | 2020-01-04 12:59:33 -0800 | [diff] [blame] | 315 | __remove_pages(start_pfn, nr_pages, altmap); |
David Hildenbrand | 18c8650 | 2019-07-18 15:56:35 -0700 | [diff] [blame] | 316 | vmem_remove_mapping(start, size); |
Wen Congyang | 24d335c | 2013-02-22 16:32:58 -0800 | [diff] [blame] | 317 | } |
Heiko Carstens | 421c175 | 2008-07-14 09:59:18 +0200 | [diff] [blame] | 318 | #endif /* CONFIG_MEMORY_HOTPLUG */ |