Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 2 | /* |
| 3 | * sparse memory mappings. |
| 4 | */ |
Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 5 | #include <linux/mm.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 6 | #include <linux/slab.h> |
Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 7 | #include <linux/mmzone.h> |
Mike Rapoport | 97ad108 | 2018-10-30 15:09:44 -0700 | [diff] [blame] | 8 | #include <linux/memblock.h> |
Gideon Israel Dsouza | 3b32123 | 2014-04-07 15:37:26 -0700 | [diff] [blame] | 9 | #include <linux/compiler.h> |
Dave Hansen | 0b0acbec | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 10 | #include <linux/highmem.h> |
Paul Gortmaker | b95f1b31 | 2011-10-16 02:01:52 -0400 | [diff] [blame] | 11 | #include <linux/export.h> |
Dave Hansen | 28ae55c | 2005-09-03 15:54:29 -0700 | [diff] [blame] | 12 | #include <linux/spinlock.h> |
Dave Hansen | 0b0acbec | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 13 | #include <linux/vmalloc.h> |
Alastair D'Silva | 9f82883 | 2019-09-23 15:36:30 -0700 | [diff] [blame] | 14 | #include <linux/swap.h> |
| 15 | #include <linux/swapops.h> |
Gideon Israel Dsouza | 3b32123 | 2014-04-07 15:37:26 -0700 | [diff] [blame] | 16 | |
Yasunori Goto | 0c0a4a5 | 2008-04-28 02:13:34 -0700 | [diff] [blame] | 17 | #include "internal.h" |
Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 18 | #include <asm/dma.h> |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 19 | #include <asm/pgalloc.h> |
| 20 | #include <asm/pgtable.h> |
Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 21 | |
| 22 | /* |
| 23 | * Permanent SPARSEMEM data: |
| 24 | * |
| 25 | * 1) mem_section - memory sections, mem_map's for valid memory |
| 26 | */ |
Bob Picco | 3e34726 | 2005-09-03 15:54:28 -0700 | [diff] [blame] | 27 | #ifdef CONFIG_SPARSEMEM_EXTREME |
Kirill A. Shutemov | 83e3c48 | 2017-09-29 17:08:16 +0300 | [diff] [blame] | 28 | struct mem_section **mem_section; |
Bob Picco | 3e34726 | 2005-09-03 15:54:28 -0700 | [diff] [blame] | 29 | #else |
| 30 | struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT] |
Ravikiran G Thirumalai | 22fc6ec | 2006-01-08 01:01:27 -0800 | [diff] [blame] | 31 | ____cacheline_internodealigned_in_smp; |
Bob Picco | 3e34726 | 2005-09-03 15:54:28 -0700 | [diff] [blame] | 32 | #endif |
| 33 | EXPORT_SYMBOL(mem_section); |
| 34 | |
Christoph Lameter | 89689ae | 2006-12-06 20:31:45 -0800 | [diff] [blame] | 35 | #ifdef NODE_NOT_IN_PAGE_FLAGS |
| 36 | /* |
| 37 | * If we did not store the node number in the page then we have to |
| 38 | * do a lookup in the section_to_node_table in order to find which |
| 39 | * node the page belongs to. |
| 40 | */ |
| 41 | #if MAX_NUMNODES <= 256 |
| 42 | static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned; |
| 43 | #else |
| 44 | static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned; |
| 45 | #endif |
| 46 | |
Ian Campbell | 33dd4e0 | 2011-07-25 17:11:51 -0700 | [diff] [blame] | 47 | int page_to_nid(const struct page *page) |
Christoph Lameter | 89689ae | 2006-12-06 20:31:45 -0800 | [diff] [blame] | 48 | { |
| 49 | return section_to_node_table[page_to_section(page)]; |
| 50 | } |
| 51 | EXPORT_SYMBOL(page_to_nid); |
Andy Whitcroft | 85770ff | 2007-08-22 14:01:03 -0700 | [diff] [blame] | 52 | |
| 53 | static void set_section_nid(unsigned long section_nr, int nid) |
| 54 | { |
| 55 | section_to_node_table[section_nr] = nid; |
| 56 | } |
| 57 | #else /* !NODE_NOT_IN_PAGE_FLAGS */ |
| 58 | static inline void set_section_nid(unsigned long section_nr, int nid) |
| 59 | { |
| 60 | } |
Christoph Lameter | 89689ae | 2006-12-06 20:31:45 -0800 | [diff] [blame] | 61 | #endif |
| 62 | |
Bob Picco | 3e34726 | 2005-09-03 15:54:28 -0700 | [diff] [blame] | 63 | #ifdef CONFIG_SPARSEMEM_EXTREME |
Fabian Frederick | bd721ea | 2016-08-02 14:03:33 -0700 | [diff] [blame] | 64 | static noinline struct mem_section __ref *sparse_index_alloc(int nid) |
Bob Picco | 802f192 | 2005-09-03 15:54:26 -0700 | [diff] [blame] | 65 | { |
Dave Hansen | 28ae55c | 2005-09-03 15:54:29 -0700 | [diff] [blame] | 66 | struct mem_section *section = NULL; |
| 67 | unsigned long array_size = SECTIONS_PER_ROOT * |
| 68 | sizeof(struct mem_section); |
Bob Picco | 802f192 | 2005-09-03 15:54:26 -0700 | [diff] [blame] | 69 | |
Mike Rapoport | 8a7f97b | 2019-03-11 23:30:31 -0700 | [diff] [blame] | 70 | if (slab_is_available()) { |
Michal Hocko | b95046b | 2017-09-06 16:20:41 -0700 | [diff] [blame] | 71 | section = kzalloc_node(array_size, GFP_KERNEL, nid); |
Mike Rapoport | 8a7f97b | 2019-03-11 23:30:31 -0700 | [diff] [blame] | 72 | } else { |
Mike Rapoport | 7e1c4e2 | 2018-10-30 15:09:57 -0700 | [diff] [blame] | 73 | section = memblock_alloc_node(array_size, SMP_CACHE_BYTES, |
| 74 | nid); |
Mike Rapoport | 8a7f97b | 2019-03-11 23:30:31 -0700 | [diff] [blame] | 75 | if (!section) |
| 76 | panic("%s: Failed to allocate %lu bytes nid=%d\n", |
| 77 | __func__, array_size, nid); |
| 78 | } |
Bob Picco | 3e34726 | 2005-09-03 15:54:28 -0700 | [diff] [blame] | 79 | |
Dave Hansen | 28ae55c | 2005-09-03 15:54:29 -0700 | [diff] [blame] | 80 | return section; |
Bob Picco | 802f192 | 2005-09-03 15:54:26 -0700 | [diff] [blame] | 81 | } |
Dave Hansen | 28ae55c | 2005-09-03 15:54:29 -0700 | [diff] [blame] | 82 | |
Yasunori Goto | a3142c8 | 2007-05-08 00:23:07 -0700 | [diff] [blame] | 83 | static int __meminit sparse_index_init(unsigned long section_nr, int nid) |
Dave Hansen | 28ae55c | 2005-09-03 15:54:29 -0700 | [diff] [blame] | 84 | { |
Dave Hansen | 28ae55c | 2005-09-03 15:54:29 -0700 | [diff] [blame] | 85 | unsigned long root = SECTION_NR_TO_ROOT(section_nr); |
| 86 | struct mem_section *section; |
Dave Hansen | 28ae55c | 2005-09-03 15:54:29 -0700 | [diff] [blame] | 87 | |
Dan Williams | ba72b4c | 2019-07-18 15:58:26 -0700 | [diff] [blame] | 88 | /* |
| 89 | * An existing section is possible in the sub-section hotplug |
| 90 | * case. First hot-add instantiates, follow-on hot-add reuses |
| 91 | * the existing section. |
| 92 | * |
| 93 | * The mem_hotplug_lock resolves the apparent race below. |
| 94 | */ |
Dave Hansen | 28ae55c | 2005-09-03 15:54:29 -0700 | [diff] [blame] | 95 | if (mem_section[root]) |
Dan Williams | ba72b4c | 2019-07-18 15:58:26 -0700 | [diff] [blame] | 96 | return 0; |
Dave Hansen | 28ae55c | 2005-09-03 15:54:29 -0700 | [diff] [blame] | 97 | |
| 98 | section = sparse_index_alloc(nid); |
WANG Cong | af0cd5a | 2007-12-17 16:19:58 -0800 | [diff] [blame] | 99 | if (!section) |
| 100 | return -ENOMEM; |
Dave Hansen | 28ae55c | 2005-09-03 15:54:29 -0700 | [diff] [blame] | 101 | |
| 102 | mem_section[root] = section; |
Gavin Shan | c1c9518 | 2012-07-31 16:46:06 -0700 | [diff] [blame] | 103 | |
Zhang Yanfei | 9d1936c | 2013-05-17 22:10:38 +0800 | [diff] [blame] | 104 | return 0; |
Dave Hansen | 28ae55c | 2005-09-03 15:54:29 -0700 | [diff] [blame] | 105 | } |
| 106 | #else /* !SPARSEMEM_EXTREME */ |
| 107 | static inline int sparse_index_init(unsigned long section_nr, int nid) |
| 108 | { |
| 109 | return 0; |
| 110 | } |
| 111 | #endif |
| 112 | |
Zhou Chengming | 91fd8b9 | 2016-07-28 15:48:35 -0700 | [diff] [blame] | 113 | #ifdef CONFIG_SPARSEMEM_EXTREME |
David Hildenbrand | 2491f0a | 2019-07-18 15:57:37 -0700 | [diff] [blame] | 114 | unsigned long __section_nr(struct mem_section *ms) |
Dave Hansen | 4ca644d | 2005-10-29 18:16:51 -0700 | [diff] [blame] | 115 | { |
| 116 | unsigned long root_nr; |
Kirill A. Shutemov | 83e3c48 | 2017-09-29 17:08:16 +0300 | [diff] [blame] | 117 | struct mem_section *root = NULL; |
Dave Hansen | 4ca644d | 2005-10-29 18:16:51 -0700 | [diff] [blame] | 118 | |
Mike Kravetz | 12783b0 | 2006-05-20 15:00:05 -0700 | [diff] [blame] | 119 | for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) { |
| 120 | root = __nr_to_section(root_nr * SECTIONS_PER_ROOT); |
Dave Hansen | 4ca644d | 2005-10-29 18:16:51 -0700 | [diff] [blame] | 121 | if (!root) |
| 122 | continue; |
| 123 | |
| 124 | if ((ms >= root) && (ms < (root + SECTIONS_PER_ROOT))) |
| 125 | break; |
| 126 | } |
| 127 | |
Kirill A. Shutemov | 83e3c48 | 2017-09-29 17:08:16 +0300 | [diff] [blame] | 128 | VM_BUG_ON(!root); |
Gavin Shan | db36a46 | 2012-07-31 16:46:04 -0700 | [diff] [blame] | 129 | |
Dave Hansen | 4ca644d | 2005-10-29 18:16:51 -0700 | [diff] [blame] | 130 | return (root_nr * SECTIONS_PER_ROOT) + (ms - root); |
| 131 | } |
Zhou Chengming | 91fd8b9 | 2016-07-28 15:48:35 -0700 | [diff] [blame] | 132 | #else |
David Hildenbrand | 2491f0a | 2019-07-18 15:57:37 -0700 | [diff] [blame] | 133 | unsigned long __section_nr(struct mem_section *ms) |
Zhou Chengming | 91fd8b9 | 2016-07-28 15:48:35 -0700 | [diff] [blame] | 134 | { |
David Hildenbrand | 2491f0a | 2019-07-18 15:57:37 -0700 | [diff] [blame] | 135 | return (unsigned long)(ms - mem_section[0]); |
Zhou Chengming | 91fd8b9 | 2016-07-28 15:48:35 -0700 | [diff] [blame] | 136 | } |
| 137 | #endif |
Dave Hansen | 4ca644d | 2005-10-29 18:16:51 -0700 | [diff] [blame] | 138 | |
Andy Whitcroft | 30c253e | 2006-06-23 02:03:41 -0700 | [diff] [blame] | 139 | /* |
| 140 | * During early boot, before section_mem_map is used for an actual |
| 141 | * mem_map, we use section_mem_map to store the section's NUMA |
| 142 | * node. This keeps us from having to use another data structure. The |
| 143 | * node information is cleared just before we store the real mem_map. |
| 144 | */ |
| 145 | static inline unsigned long sparse_encode_early_nid(int nid) |
| 146 | { |
| 147 | return (nid << SECTION_NID_SHIFT); |
| 148 | } |
| 149 | |
| 150 | static inline int sparse_early_nid(struct mem_section *section) |
| 151 | { |
| 152 | return (section->section_mem_map >> SECTION_NID_SHIFT); |
| 153 | } |
| 154 | |
Mel Gorman | 2dbb51c | 2008-07-23 21:26:52 -0700 | [diff] [blame] | 155 | /* Validate the physical addressing limitations of the model */ |
| 156 | void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn, |
| 157 | unsigned long *end_pfn) |
Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 158 | { |
Mel Gorman | 2dbb51c | 2008-07-23 21:26:52 -0700 | [diff] [blame] | 159 | unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT); |
Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 160 | |
Ingo Molnar | bead9a3 | 2008-04-16 01:40:00 +0200 | [diff] [blame] | 161 | /* |
| 162 | * Sanity checks - do not allow an architecture to pass |
| 163 | * in larger pfns than the maximum scope of sparsemem: |
| 164 | */ |
Mel Gorman | 2dbb51c | 2008-07-23 21:26:52 -0700 | [diff] [blame] | 165 | if (*start_pfn > max_sparsemem_pfn) { |
| 166 | mminit_dprintk(MMINIT_WARNING, "pfnvalidation", |
| 167 | "Start of range %lu -> %lu exceeds SPARSEMEM max %lu\n", |
| 168 | *start_pfn, *end_pfn, max_sparsemem_pfn); |
| 169 | WARN_ON_ONCE(1); |
| 170 | *start_pfn = max_sparsemem_pfn; |
| 171 | *end_pfn = max_sparsemem_pfn; |
Cyrill Gorcunov | ef161a9 | 2009-03-31 15:19:25 -0700 | [diff] [blame] | 172 | } else if (*end_pfn > max_sparsemem_pfn) { |
Mel Gorman | 2dbb51c | 2008-07-23 21:26:52 -0700 | [diff] [blame] | 173 | mminit_dprintk(MMINIT_WARNING, "pfnvalidation", |
| 174 | "End of range %lu -> %lu exceeds SPARSEMEM max %lu\n", |
| 175 | *start_pfn, *end_pfn, max_sparsemem_pfn); |
| 176 | WARN_ON_ONCE(1); |
| 177 | *end_pfn = max_sparsemem_pfn; |
| 178 | } |
| 179 | } |
| 180 | |
Dave Hansen | c4e1be9 | 2017-07-06 15:36:44 -0700 | [diff] [blame] | 181 | /* |
| 182 | * There are a number of times that we loop over NR_MEM_SECTIONS, |
| 183 | * looking for section_present() on each. But, when we have very |
| 184 | * large physical address spaces, NR_MEM_SECTIONS can also be |
| 185 | * very large which makes the loops quite long. |
| 186 | * |
| 187 | * Keeping track of this gives us an easy way to break out of |
| 188 | * those loops early. |
| 189 | */ |
David Hildenbrand | 2491f0a | 2019-07-18 15:57:37 -0700 | [diff] [blame] | 190 | unsigned long __highest_present_section_nr; |
Dave Hansen | c4e1be9 | 2017-07-06 15:36:44 -0700 | [diff] [blame] | 191 | static void section_mark_present(struct mem_section *ms) |
| 192 | { |
David Hildenbrand | 2491f0a | 2019-07-18 15:57:37 -0700 | [diff] [blame] | 193 | unsigned long section_nr = __section_nr(ms); |
Dave Hansen | c4e1be9 | 2017-07-06 15:36:44 -0700 | [diff] [blame] | 194 | |
| 195 | if (section_nr > __highest_present_section_nr) |
| 196 | __highest_present_section_nr = section_nr; |
| 197 | |
| 198 | ms->section_mem_map |= SECTION_MARKED_PRESENT; |
| 199 | } |
| 200 | |
Dave Hansen | c4e1be9 | 2017-07-06 15:36:44 -0700 | [diff] [blame] | 201 | #define for_each_present_section_nr(start, section_nr) \ |
| 202 | for (section_nr = next_present_section_nr(start-1); \ |
Qian Cai | d778015 | 2019-03-05 15:50:11 -0800 | [diff] [blame] | 203 | ((section_nr != -1) && \ |
Dave Hansen | c4e1be9 | 2017-07-06 15:36:44 -0700 | [diff] [blame] | 204 | (section_nr <= __highest_present_section_nr)); \ |
| 205 | section_nr = next_present_section_nr(section_nr)) |
| 206 | |
Pavel Tatashin | 85c77f7 | 2018-08-17 15:49:33 -0700 | [diff] [blame] | 207 | static inline unsigned long first_present_section_nr(void) |
| 208 | { |
| 209 | return next_present_section_nr(-1); |
| 210 | } |
| 211 | |
Yi Wang | 758b8db | 2019-10-06 17:58:12 -0700 | [diff] [blame] | 212 | static void subsection_mask_set(unsigned long *map, unsigned long pfn, |
Dan Williams | f46edbd | 2019-07-18 15:58:04 -0700 | [diff] [blame] | 213 | unsigned long nr_pages) |
| 214 | { |
| 215 | int idx = subsection_map_index(pfn); |
| 216 | int end = subsection_map_index(pfn + nr_pages - 1); |
| 217 | |
| 218 | bitmap_set(map, idx, end - idx + 1); |
| 219 | } |
| 220 | |
| 221 | void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages) |
| 222 | { |
| 223 | int end_sec = pfn_to_section_nr(pfn + nr_pages - 1); |
Dan Williams | 9a84503 | 2019-07-18 15:58:43 -0700 | [diff] [blame] | 224 | unsigned long nr, start_sec = pfn_to_section_nr(pfn); |
Dan Williams | f46edbd | 2019-07-18 15:58:04 -0700 | [diff] [blame] | 225 | |
| 226 | if (!nr_pages) |
| 227 | return; |
| 228 | |
Dan Williams | 9a84503 | 2019-07-18 15:58:43 -0700 | [diff] [blame] | 229 | for (nr = start_sec; nr <= end_sec; nr++) { |
Dan Williams | f46edbd | 2019-07-18 15:58:04 -0700 | [diff] [blame] | 230 | struct mem_section *ms; |
| 231 | unsigned long pfns; |
| 232 | |
| 233 | pfns = min(nr_pages, PAGES_PER_SECTION |
| 234 | - (pfn & ~PAGE_SECTION_MASK)); |
Dan Williams | 9a84503 | 2019-07-18 15:58:43 -0700 | [diff] [blame] | 235 | ms = __nr_to_section(nr); |
Dan Williams | f46edbd | 2019-07-18 15:58:04 -0700 | [diff] [blame] | 236 | subsection_mask_set(ms->usage->subsection_map, pfn, pfns); |
| 237 | |
Dan Williams | 9a84503 | 2019-07-18 15:58:43 -0700 | [diff] [blame] | 238 | pr_debug("%s: sec: %lu pfns: %lu set(%d, %d)\n", __func__, nr, |
Dan Williams | f46edbd | 2019-07-18 15:58:04 -0700 | [diff] [blame] | 239 | pfns, subsection_map_index(pfn), |
| 240 | subsection_map_index(pfn + pfns - 1)); |
| 241 | |
| 242 | pfn += pfns; |
| 243 | nr_pages -= pfns; |
| 244 | } |
| 245 | } |
| 246 | |
Mel Gorman | 2dbb51c | 2008-07-23 21:26:52 -0700 | [diff] [blame] | 247 | /* Record a memory area against a node. */ |
| 248 | void __init memory_present(int nid, unsigned long start, unsigned long end) |
| 249 | { |
| 250 | unsigned long pfn; |
Ingo Molnar | bead9a3 | 2008-04-16 01:40:00 +0200 | [diff] [blame] | 251 | |
Kirill A. Shutemov | 629a359 | 2017-11-07 11:33:37 +0300 | [diff] [blame] | 252 | #ifdef CONFIG_SPARSEMEM_EXTREME |
| 253 | if (unlikely(!mem_section)) { |
| 254 | unsigned long size, align; |
| 255 | |
Baoquan He | d09cfbb | 2018-01-04 16:18:06 -0800 | [diff] [blame] | 256 | size = sizeof(struct mem_section*) * NR_SECTION_ROOTS; |
Kirill A. Shutemov | 629a359 | 2017-11-07 11:33:37 +0300 | [diff] [blame] | 257 | align = 1 << (INTERNODE_CACHE_SHIFT); |
Mike Rapoport | eb31d55 | 2018-10-30 15:08:04 -0700 | [diff] [blame] | 258 | mem_section = memblock_alloc(size, align); |
Mike Rapoport | 8a7f97b | 2019-03-11 23:30:31 -0700 | [diff] [blame] | 259 | if (!mem_section) |
| 260 | panic("%s: Failed to allocate %lu bytes align=0x%lx\n", |
| 261 | __func__, size, align); |
Kirill A. Shutemov | 629a359 | 2017-11-07 11:33:37 +0300 | [diff] [blame] | 262 | } |
| 263 | #endif |
| 264 | |
Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 265 | start &= PAGE_SECTION_MASK; |
Mel Gorman | 2dbb51c | 2008-07-23 21:26:52 -0700 | [diff] [blame] | 266 | mminit_validate_memmodel_limits(&start, &end); |
Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 267 | for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) { |
| 268 | unsigned long section = pfn_to_section_nr(pfn); |
Bob Picco | 802f192 | 2005-09-03 15:54:26 -0700 | [diff] [blame] | 269 | struct mem_section *ms; |
| 270 | |
| 271 | sparse_index_init(section, nid); |
Andy Whitcroft | 85770ff | 2007-08-22 14:01:03 -0700 | [diff] [blame] | 272 | set_section_nid(section, nid); |
Bob Picco | 802f192 | 2005-09-03 15:54:26 -0700 | [diff] [blame] | 273 | |
| 274 | ms = __nr_to_section(section); |
Dave Hansen | c4e1be9 | 2017-07-06 15:36:44 -0700 | [diff] [blame] | 275 | if (!ms->section_mem_map) { |
Michal Hocko | 2d070ea | 2017-07-06 15:37:56 -0700 | [diff] [blame] | 276 | ms->section_mem_map = sparse_encode_early_nid(nid) | |
| 277 | SECTION_IS_ONLINE; |
Dave Hansen | c4e1be9 | 2017-07-06 15:36:44 -0700 | [diff] [blame] | 278 | section_mark_present(ms); |
| 279 | } |
Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 280 | } |
| 281 | } |
| 282 | |
| 283 | /* |
Logan Gunthorpe | 9def36e | 2018-12-14 14:16:57 -0800 | [diff] [blame] | 284 | * Mark all memblocks as present using memory_present(). This is a |
| 285 | * convienence function that is useful for a number of arches |
| 286 | * to mark all of the systems memory as present during initialization. |
| 287 | */ |
| 288 | void __init memblocks_present(void) |
| 289 | { |
| 290 | struct memblock_region *reg; |
| 291 | |
| 292 | for_each_memblock(memory, reg) { |
| 293 | memory_present(memblock_get_region_node(reg), |
| 294 | memblock_region_memory_base_pfn(reg), |
| 295 | memblock_region_memory_end_pfn(reg)); |
| 296 | } |
| 297 | } |
| 298 | |
| 299 | /* |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 300 | * Subtle, we encode the real pfn into the mem_map such that |
| 301 | * the identity pfn - section_mem_map will return the actual |
| 302 | * physical page frame number. |
| 303 | */ |
| 304 | static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum) |
| 305 | { |
Petr Tesarik | def9b71 | 2018-01-31 16:20:26 -0800 | [diff] [blame] | 306 | unsigned long coded_mem_map = |
| 307 | (unsigned long)(mem_map - (section_nr_to_pfn(pnum))); |
| 308 | BUILD_BUG_ON(SECTION_MAP_LAST_BIT > (1UL<<PFN_SECTION_SHIFT)); |
| 309 | BUG_ON(coded_mem_map & ~SECTION_MAP_MASK); |
| 310 | return coded_mem_map; |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 311 | } |
| 312 | |
| 313 | /* |
Badari Pulavarty | ea01ea9 | 2008-04-28 02:12:01 -0700 | [diff] [blame] | 314 | * Decode mem_map from the coded memmap |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 315 | */ |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 316 | struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum) |
| 317 | { |
Badari Pulavarty | ea01ea9 | 2008-04-28 02:12:01 -0700 | [diff] [blame] | 318 | /* mask off the extra low bits of information */ |
| 319 | coded_mem_map &= SECTION_MAP_MASK; |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 320 | return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum); |
| 321 | } |
| 322 | |
Oscar Salvador | 4e40987 | 2018-08-17 15:47:14 -0700 | [diff] [blame] | 323 | static void __meminit sparse_init_one_section(struct mem_section *ms, |
Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 324 | unsigned long pnum, struct page *mem_map, |
Dan Williams | 326e1b8 | 2019-07-18 15:58:00 -0700 | [diff] [blame] | 325 | struct mem_section_usage *usage, unsigned long flags) |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 326 | { |
Andy Whitcroft | 30c253e | 2006-06-23 02:03:41 -0700 | [diff] [blame] | 327 | ms->section_mem_map &= ~SECTION_MAP_MASK; |
Dan Williams | 326e1b8 | 2019-07-18 15:58:00 -0700 | [diff] [blame] | 328 | ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum) |
| 329 | | SECTION_HAS_MEM_MAP | flags; |
Dan Williams | f1eca35 | 2019-07-18 15:57:57 -0700 | [diff] [blame] | 330 | ms->usage = usage; |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 331 | } |
| 332 | |
Dan Williams | f1eca35 | 2019-07-18 15:57:57 -0700 | [diff] [blame] | 333 | static unsigned long usemap_size(void) |
Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 334 | { |
Wei Yang | 60a7a88 | 2017-05-03 14:53:51 -0700 | [diff] [blame] | 335 | return BITS_TO_LONGS(SECTION_BLOCKFLAGS_BITS) * sizeof(unsigned long); |
Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 336 | } |
| 337 | |
Dan Williams | f1eca35 | 2019-07-18 15:57:57 -0700 | [diff] [blame] | 338 | size_t mem_section_usage_size(void) |
Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 339 | { |
Dan Williams | f1eca35 | 2019-07-18 15:57:57 -0700 | [diff] [blame] | 340 | return sizeof(struct mem_section_usage) + usemap_size(); |
Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 341 | } |
Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 342 | |
Yasunori Goto | 48c9068 | 2008-07-23 21:28:15 -0700 | [diff] [blame] | 343 | #ifdef CONFIG_MEMORY_HOTREMOVE |
Dan Williams | f1eca35 | 2019-07-18 15:57:57 -0700 | [diff] [blame] | 344 | static struct mem_section_usage * __init |
Yinghai Lu | a4322e1b | 2010-02-10 01:20:21 -0800 | [diff] [blame] | 345 | sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, |
Johannes Weiner | 238305b | 2012-05-29 15:06:36 -0700 | [diff] [blame] | 346 | unsigned long size) |
Yasunori Goto | 48c9068 | 2008-07-23 21:28:15 -0700 | [diff] [blame] | 347 | { |
Dan Williams | f1eca35 | 2019-07-18 15:57:57 -0700 | [diff] [blame] | 348 | struct mem_section_usage *usage; |
Yinghai Lu | 99ab7b1 | 2012-07-11 14:02:53 -0700 | [diff] [blame] | 349 | unsigned long goal, limit; |
Yinghai Lu | 99ab7b1 | 2012-07-11 14:02:53 -0700 | [diff] [blame] | 350 | int nid; |
Yasunori Goto | 48c9068 | 2008-07-23 21:28:15 -0700 | [diff] [blame] | 351 | /* |
| 352 | * A page may contain usemaps for other sections preventing the |
| 353 | * page being freed and making a section unremovable while |
Li Zhong | c800bcd | 2014-03-31 16:41:58 +0800 | [diff] [blame] | 354 | * other sections referencing the usemap remain active. Similarly, |
Yasunori Goto | 48c9068 | 2008-07-23 21:28:15 -0700 | [diff] [blame] | 355 | * a pgdat can prevent a section being removed. If section A |
| 356 | * contains a pgdat and section B contains the usemap, both |
| 357 | * sections become inter-dependent. This allocates usemaps |
| 358 | * from the same section as the pgdat where possible to avoid |
| 359 | * this problem. |
| 360 | */ |
Yinghai Lu | 07b4e2b | 2012-07-11 14:02:51 -0700 | [diff] [blame] | 361 | goal = __pa(pgdat) & (PAGE_SECTION_MASK << PAGE_SHIFT); |
Yinghai Lu | 99ab7b1 | 2012-07-11 14:02:53 -0700 | [diff] [blame] | 362 | limit = goal + (1UL << PA_SECTION_SHIFT); |
| 363 | nid = early_pfn_to_nid(goal >> PAGE_SHIFT); |
| 364 | again: |
Dan Williams | f1eca35 | 2019-07-18 15:57:57 -0700 | [diff] [blame] | 365 | usage = memblock_alloc_try_nid(size, SMP_CACHE_BYTES, goal, limit, nid); |
| 366 | if (!usage && limit) { |
Yinghai Lu | 99ab7b1 | 2012-07-11 14:02:53 -0700 | [diff] [blame] | 367 | limit = 0; |
| 368 | goto again; |
| 369 | } |
Dan Williams | f1eca35 | 2019-07-18 15:57:57 -0700 | [diff] [blame] | 370 | return usage; |
Yasunori Goto | 48c9068 | 2008-07-23 21:28:15 -0700 | [diff] [blame] | 371 | } |
| 372 | |
Dan Williams | f1eca35 | 2019-07-18 15:57:57 -0700 | [diff] [blame] | 373 | static void __init check_usemap_section_nr(int nid, |
| 374 | struct mem_section_usage *usage) |
Yasunori Goto | 48c9068 | 2008-07-23 21:28:15 -0700 | [diff] [blame] | 375 | { |
| 376 | unsigned long usemap_snr, pgdat_snr; |
Kirill A. Shutemov | 83e3c48 | 2017-09-29 17:08:16 +0300 | [diff] [blame] | 377 | static unsigned long old_usemap_snr; |
| 378 | static unsigned long old_pgdat_snr; |
Yasunori Goto | 48c9068 | 2008-07-23 21:28:15 -0700 | [diff] [blame] | 379 | struct pglist_data *pgdat = NODE_DATA(nid); |
| 380 | int usemap_nid; |
| 381 | |
Kirill A. Shutemov | 83e3c48 | 2017-09-29 17:08:16 +0300 | [diff] [blame] | 382 | /* First call */ |
| 383 | if (!old_usemap_snr) { |
| 384 | old_usemap_snr = NR_MEM_SECTIONS; |
| 385 | old_pgdat_snr = NR_MEM_SECTIONS; |
| 386 | } |
| 387 | |
Dan Williams | f1eca35 | 2019-07-18 15:57:57 -0700 | [diff] [blame] | 388 | usemap_snr = pfn_to_section_nr(__pa(usage) >> PAGE_SHIFT); |
Yasunori Goto | 48c9068 | 2008-07-23 21:28:15 -0700 | [diff] [blame] | 389 | pgdat_snr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT); |
| 390 | if (usemap_snr == pgdat_snr) |
| 391 | return; |
| 392 | |
| 393 | if (old_usemap_snr == usemap_snr && old_pgdat_snr == pgdat_snr) |
| 394 | /* skip redundant message */ |
| 395 | return; |
| 396 | |
| 397 | old_usemap_snr = usemap_snr; |
| 398 | old_pgdat_snr = pgdat_snr; |
| 399 | |
| 400 | usemap_nid = sparse_early_nid(__nr_to_section(usemap_snr)); |
| 401 | if (usemap_nid != nid) { |
Joe Perches | 1170532 | 2016-03-17 14:19:50 -0700 | [diff] [blame] | 402 | pr_info("node %d must be removed before remove section %ld\n", |
| 403 | nid, usemap_snr); |
Yasunori Goto | 48c9068 | 2008-07-23 21:28:15 -0700 | [diff] [blame] | 404 | return; |
| 405 | } |
| 406 | /* |
| 407 | * There is a circular dependency. |
| 408 | * Some platforms allow un-removable section because they will just |
| 409 | * gather other removable sections for dynamic partitioning. |
| 410 | * Just notify un-removable section's number here. |
| 411 | */ |
Joe Perches | 1170532 | 2016-03-17 14:19:50 -0700 | [diff] [blame] | 412 | pr_info("Section %ld and %ld (node %d) have a circular dependency on usemap and pgdat allocations\n", |
| 413 | usemap_snr, pgdat_snr, nid); |
Yasunori Goto | 48c9068 | 2008-07-23 21:28:15 -0700 | [diff] [blame] | 414 | } |
| 415 | #else |
Dan Williams | f1eca35 | 2019-07-18 15:57:57 -0700 | [diff] [blame] | 416 | static struct mem_section_usage * __init |
Yinghai Lu | a4322e1b | 2010-02-10 01:20:21 -0800 | [diff] [blame] | 417 | sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, |
Johannes Weiner | 238305b | 2012-05-29 15:06:36 -0700 | [diff] [blame] | 418 | unsigned long size) |
Yasunori Goto | 48c9068 | 2008-07-23 21:28:15 -0700 | [diff] [blame] | 419 | { |
Mike Rapoport | 26fb3da | 2019-03-11 23:30:42 -0700 | [diff] [blame] | 420 | return memblock_alloc_node(size, SMP_CACHE_BYTES, pgdat->node_id); |
Yasunori Goto | 48c9068 | 2008-07-23 21:28:15 -0700 | [diff] [blame] | 421 | } |
| 422 | |
Dan Williams | f1eca35 | 2019-07-18 15:57:57 -0700 | [diff] [blame] | 423 | static void __init check_usemap_section_nr(int nid, |
| 424 | struct mem_section_usage *usage) |
Yasunori Goto | 48c9068 | 2008-07-23 21:28:15 -0700 | [diff] [blame] | 425 | { |
| 426 | } |
| 427 | #endif /* CONFIG_MEMORY_HOTREMOVE */ |
| 428 | |
Pavel Tatashin | 35fd1eb | 2018-08-17 15:49:21 -0700 | [diff] [blame] | 429 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
Pavel Tatashin | afda57b | 2018-08-17 15:49:30 -0700 | [diff] [blame] | 430 | static unsigned long __init section_map_size(void) |
Pavel Tatashin | 35fd1eb | 2018-08-17 15:49:21 -0700 | [diff] [blame] | 431 | { |
| 432 | return ALIGN(sizeof(struct page) * PAGES_PER_SECTION, PMD_SIZE); |
| 433 | } |
| 434 | |
| 435 | #else |
Pavel Tatashin | afda57b | 2018-08-17 15:49:30 -0700 | [diff] [blame] | 436 | static unsigned long __init section_map_size(void) |
Pavel Tatashin | e131c06 | 2018-08-17 15:49:26 -0700 | [diff] [blame] | 437 | { |
| 438 | return PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION); |
| 439 | } |
| 440 | |
Dan Williams | e9c0a3f0 | 2019-07-18 15:58:11 -0700 | [diff] [blame] | 441 | struct page __init *__populate_section_memmap(unsigned long pfn, |
| 442 | unsigned long nr_pages, int nid, struct vmem_altmap *altmap) |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 443 | { |
Pavel Tatashin | e131c06 | 2018-08-17 15:49:26 -0700 | [diff] [blame] | 444 | unsigned long size = section_map_size(); |
| 445 | struct page *map = sparse_buffer_alloc(size); |
Mike Rapoport | 8a7f97b | 2019-03-11 23:30:31 -0700 | [diff] [blame] | 446 | phys_addr_t addr = __pa(MAX_DMA_ADDRESS); |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 447 | |
Pavel Tatashin | e131c06 | 2018-08-17 15:49:26 -0700 | [diff] [blame] | 448 | if (map) |
| 449 | return map; |
| 450 | |
Michal Hocko | 09dbcf4 | 2019-11-30 17:54:27 -0800 | [diff] [blame] | 451 | map = memblock_alloc_try_nid_raw(size, size, addr, |
Mike Rapoport | 97ad108 | 2018-10-30 15:09:44 -0700 | [diff] [blame] | 452 | MEMBLOCK_ALLOC_ACCESSIBLE, nid); |
Mike Rapoport | 8a7f97b | 2019-03-11 23:30:31 -0700 | [diff] [blame] | 453 | if (!map) |
| 454 | panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%pa\n", |
| 455 | __func__, size, PAGE_SIZE, nid, &addr); |
| 456 | |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 457 | return map; |
| 458 | } |
| 459 | #endif /* !CONFIG_SPARSEMEM_VMEMMAP */ |
| 460 | |
Pavel Tatashin | 35fd1eb | 2018-08-17 15:49:21 -0700 | [diff] [blame] | 461 | static void *sparsemap_buf __meminitdata; |
| 462 | static void *sparsemap_buf_end __meminitdata; |
| 463 | |
Lecopzer Chen | ae83189 | 2019-09-23 15:36:21 -0700 | [diff] [blame] | 464 | static inline void __meminit sparse_buffer_free(unsigned long size) |
| 465 | { |
| 466 | WARN_ON(!sparsemap_buf || size == 0); |
| 467 | memblock_free_early(__pa(sparsemap_buf), size); |
| 468 | } |
| 469 | |
Pavel Tatashin | afda57b | 2018-08-17 15:49:30 -0700 | [diff] [blame] | 470 | static void __init sparse_buffer_init(unsigned long size, int nid) |
Pavel Tatashin | 35fd1eb | 2018-08-17 15:49:21 -0700 | [diff] [blame] | 471 | { |
Mike Rapoport | 8a7f97b | 2019-03-11 23:30:31 -0700 | [diff] [blame] | 472 | phys_addr_t addr = __pa(MAX_DMA_ADDRESS); |
Pavel Tatashin | 35fd1eb | 2018-08-17 15:49:21 -0700 | [diff] [blame] | 473 | WARN_ON(sparsemap_buf); /* forgot to call sparse_buffer_fini()? */ |
Michal Hocko | 09dbcf4 | 2019-11-30 17:54:27 -0800 | [diff] [blame] | 474 | /* |
| 475 | * Pre-allocated buffer is mainly used by __populate_section_memmap |
| 476 | * and we want it to be properly aligned to the section size - this is |
| 477 | * especially the case for VMEMMAP which maps memmap to PMDs |
| 478 | */ |
Yunfeng Ye | 0ac398b | 2019-11-30 17:56:27 -0800 | [diff] [blame] | 479 | sparsemap_buf = memblock_alloc_exact_nid_raw(size, section_map_size(), |
Michal Hocko | 09dbcf4 | 2019-11-30 17:54:27 -0800 | [diff] [blame] | 480 | addr, MEMBLOCK_ALLOC_ACCESSIBLE, nid); |
Pavel Tatashin | 35fd1eb | 2018-08-17 15:49:21 -0700 | [diff] [blame] | 481 | sparsemap_buf_end = sparsemap_buf + size; |
| 482 | } |
| 483 | |
Pavel Tatashin | afda57b | 2018-08-17 15:49:30 -0700 | [diff] [blame] | 484 | static void __init sparse_buffer_fini(void) |
Pavel Tatashin | 35fd1eb | 2018-08-17 15:49:21 -0700 | [diff] [blame] | 485 | { |
| 486 | unsigned long size = sparsemap_buf_end - sparsemap_buf; |
| 487 | |
| 488 | if (sparsemap_buf && size > 0) |
Lecopzer Chen | ae83189 | 2019-09-23 15:36:21 -0700 | [diff] [blame] | 489 | sparse_buffer_free(size); |
Pavel Tatashin | 35fd1eb | 2018-08-17 15:49:21 -0700 | [diff] [blame] | 490 | sparsemap_buf = NULL; |
| 491 | } |
| 492 | |
| 493 | void * __meminit sparse_buffer_alloc(unsigned long size) |
| 494 | { |
| 495 | void *ptr = NULL; |
| 496 | |
| 497 | if (sparsemap_buf) { |
Lecopzer Chen | db57e98 | 2019-09-23 15:36:24 -0700 | [diff] [blame] | 498 | ptr = (void *) roundup((unsigned long)sparsemap_buf, size); |
Pavel Tatashin | 35fd1eb | 2018-08-17 15:49:21 -0700 | [diff] [blame] | 499 | if (ptr + size > sparsemap_buf_end) |
| 500 | ptr = NULL; |
Lecopzer Chen | ae83189 | 2019-09-23 15:36:21 -0700 | [diff] [blame] | 501 | else { |
| 502 | /* Free redundant aligned space */ |
| 503 | if ((unsigned long)(ptr - sparsemap_buf) > 0) |
| 504 | sparse_buffer_free((unsigned long)(ptr - sparsemap_buf)); |
Pavel Tatashin | 35fd1eb | 2018-08-17 15:49:21 -0700 | [diff] [blame] | 505 | sparsemap_buf = ptr + size; |
Lecopzer Chen | ae83189 | 2019-09-23 15:36:21 -0700 | [diff] [blame] | 506 | } |
Pavel Tatashin | 35fd1eb | 2018-08-17 15:49:21 -0700 | [diff] [blame] | 507 | } |
| 508 | return ptr; |
| 509 | } |
| 510 | |
Gideon Israel Dsouza | 3b32123 | 2014-04-07 15:37:26 -0700 | [diff] [blame] | 511 | void __weak __meminit vmemmap_populate_print_last(void) |
Yinghai Lu | c2b91e2 | 2008-04-12 01:19:24 -0700 | [diff] [blame] | 512 | { |
| 513 | } |
Yinghai Lu | a4322e1b | 2010-02-10 01:20:21 -0800 | [diff] [blame] | 514 | |
Pavel Tatashin | 85c77f7 | 2018-08-17 15:49:33 -0700 | [diff] [blame] | 515 | /* |
| 516 | * Initialize sparse on a specific node. The node spans [pnum_begin, pnum_end) |
| 517 | * And number of present sections in this node is map_count. |
| 518 | */ |
| 519 | static void __init sparse_init_nid(int nid, unsigned long pnum_begin, |
| 520 | unsigned long pnum_end, |
| 521 | unsigned long map_count) |
| 522 | { |
Dan Williams | f1eca35 | 2019-07-18 15:57:57 -0700 | [diff] [blame] | 523 | struct mem_section_usage *usage; |
| 524 | unsigned long pnum; |
Pavel Tatashin | 85c77f7 | 2018-08-17 15:49:33 -0700 | [diff] [blame] | 525 | struct page *map; |
| 526 | |
Dan Williams | f1eca35 | 2019-07-18 15:57:57 -0700 | [diff] [blame] | 527 | usage = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nid), |
| 528 | mem_section_usage_size() * map_count); |
| 529 | if (!usage) { |
Pavel Tatashin | 85c77f7 | 2018-08-17 15:49:33 -0700 | [diff] [blame] | 530 | pr_err("%s: node[%d] usemap allocation failed", __func__, nid); |
| 531 | goto failed; |
| 532 | } |
| 533 | sparse_buffer_init(map_count * section_map_size(), nid); |
| 534 | for_each_present_section_nr(pnum_begin, pnum) { |
Dan Williams | e9c0a3f0 | 2019-07-18 15:58:11 -0700 | [diff] [blame] | 535 | unsigned long pfn = section_nr_to_pfn(pnum); |
| 536 | |
Pavel Tatashin | 85c77f7 | 2018-08-17 15:49:33 -0700 | [diff] [blame] | 537 | if (pnum >= pnum_end) |
| 538 | break; |
| 539 | |
Dan Williams | e9c0a3f0 | 2019-07-18 15:58:11 -0700 | [diff] [blame] | 540 | map = __populate_section_memmap(pfn, PAGES_PER_SECTION, |
| 541 | nid, NULL); |
Pavel Tatashin | 85c77f7 | 2018-08-17 15:49:33 -0700 | [diff] [blame] | 542 | if (!map) { |
| 543 | pr_err("%s: node[%d] memory map backing failed. Some memory will not be available.", |
| 544 | __func__, nid); |
| 545 | pnum_begin = pnum; |
| 546 | goto failed; |
| 547 | } |
Dan Williams | f1eca35 | 2019-07-18 15:57:57 -0700 | [diff] [blame] | 548 | check_usemap_section_nr(nid, usage); |
Dan Williams | 326e1b8 | 2019-07-18 15:58:00 -0700 | [diff] [blame] | 549 | sparse_init_one_section(__nr_to_section(pnum), pnum, map, usage, |
| 550 | SECTION_IS_EARLY); |
Dan Williams | f1eca35 | 2019-07-18 15:57:57 -0700 | [diff] [blame] | 551 | usage = (void *) usage + mem_section_usage_size(); |
Pavel Tatashin | 85c77f7 | 2018-08-17 15:49:33 -0700 | [diff] [blame] | 552 | } |
| 553 | sparse_buffer_fini(); |
| 554 | return; |
| 555 | failed: |
| 556 | /* We failed to allocate, mark all the following pnums as not present */ |
| 557 | for_each_present_section_nr(pnum_begin, pnum) { |
| 558 | struct mem_section *ms; |
| 559 | |
| 560 | if (pnum >= pnum_end) |
| 561 | break; |
| 562 | ms = __nr_to_section(pnum); |
| 563 | ms->section_mem_map = 0; |
| 564 | } |
| 565 | } |
| 566 | |
| 567 | /* |
| 568 | * Allocate the accumulated non-linear sections, allocate a mem_map |
| 569 | * for each and record the physical to section mapping. |
| 570 | */ |
Pavel Tatashin | 2a3cb8b | 2018-08-17 15:49:37 -0700 | [diff] [blame] | 571 | void __init sparse_init(void) |
Pavel Tatashin | 85c77f7 | 2018-08-17 15:49:33 -0700 | [diff] [blame] | 572 | { |
| 573 | unsigned long pnum_begin = first_present_section_nr(); |
| 574 | int nid_begin = sparse_early_nid(__nr_to_section(pnum_begin)); |
| 575 | unsigned long pnum_end, map_count = 1; |
| 576 | |
| 577 | /* Setup pageblock_order for HUGETLB_PAGE_SIZE_VARIABLE */ |
| 578 | set_pageblock_order(); |
| 579 | |
| 580 | for_each_present_section_nr(pnum_begin + 1, pnum_end) { |
| 581 | int nid = sparse_early_nid(__nr_to_section(pnum_end)); |
| 582 | |
| 583 | if (nid == nid_begin) { |
| 584 | map_count++; |
| 585 | continue; |
| 586 | } |
| 587 | /* Init node with sections in range [pnum_begin, pnum_end) */ |
| 588 | sparse_init_nid(nid_begin, pnum_begin, pnum_end, map_count); |
| 589 | nid_begin = nid; |
| 590 | pnum_begin = pnum_end; |
| 591 | map_count = 1; |
| 592 | } |
| 593 | /* cover the last node */ |
| 594 | sparse_init_nid(nid_begin, pnum_begin, pnum_end, map_count); |
| 595 | vmemmap_populate_print_last(); |
| 596 | } |
| 597 | |
Stephen Rothwell | 193faea | 2007-06-08 13:46:51 -0700 | [diff] [blame] | 598 | #ifdef CONFIG_MEMORY_HOTPLUG |
Michal Hocko | 2d070ea | 2017-07-06 15:37:56 -0700 | [diff] [blame] | 599 | |
| 600 | /* Mark all memory sections within the pfn range as online */ |
| 601 | void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn) |
| 602 | { |
| 603 | unsigned long pfn; |
| 604 | |
| 605 | for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { |
Michal Hocko | b4ccec4 | 2017-09-08 16:13:15 -0700 | [diff] [blame] | 606 | unsigned long section_nr = pfn_to_section_nr(pfn); |
Michal Hocko | 2d070ea | 2017-07-06 15:37:56 -0700 | [diff] [blame] | 607 | struct mem_section *ms; |
| 608 | |
| 609 | /* onlining code should never touch invalid ranges */ |
| 610 | if (WARN_ON(!valid_section_nr(section_nr))) |
| 611 | continue; |
| 612 | |
| 613 | ms = __nr_to_section(section_nr); |
| 614 | ms->section_mem_map |= SECTION_IS_ONLINE; |
| 615 | } |
| 616 | } |
| 617 | |
| 618 | #ifdef CONFIG_MEMORY_HOTREMOVE |
Qian Cai | 9b7ea46 | 2019-03-28 20:43:34 -0700 | [diff] [blame] | 619 | /* Mark all memory sections within the pfn range as offline */ |
Michal Hocko | 2d070ea | 2017-07-06 15:37:56 -0700 | [diff] [blame] | 620 | void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn) |
| 621 | { |
| 622 | unsigned long pfn; |
| 623 | |
| 624 | for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { |
Pavel Tatashin | 27227c7 | 2018-05-11 16:01:50 -0700 | [diff] [blame] | 625 | unsigned long section_nr = pfn_to_section_nr(pfn); |
Michal Hocko | 2d070ea | 2017-07-06 15:37:56 -0700 | [diff] [blame] | 626 | struct mem_section *ms; |
| 627 | |
| 628 | /* |
| 629 | * TODO this needs some double checking. Offlining code makes |
| 630 | * sure to check pfn_valid but those checks might be just bogus |
| 631 | */ |
| 632 | if (WARN_ON(!valid_section_nr(section_nr))) |
| 633 | continue; |
| 634 | |
| 635 | ms = __nr_to_section(section_nr); |
| 636 | ms->section_mem_map &= ~SECTION_IS_ONLINE; |
| 637 | } |
| 638 | } |
| 639 | #endif |
| 640 | |
Yasunori Goto | 98f3cfc | 2007-10-16 01:26:14 -0700 | [diff] [blame] | 641 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
Ilya Leoshkevich | 030eab4f | 2019-11-30 17:54:24 -0800 | [diff] [blame] | 642 | static struct page * __meminit populate_section_memmap(unsigned long pfn, |
Dan Williams | e9c0a3f0 | 2019-07-18 15:58:11 -0700 | [diff] [blame] | 643 | unsigned long nr_pages, int nid, struct vmem_altmap *altmap) |
Yasunori Goto | 98f3cfc | 2007-10-16 01:26:14 -0700 | [diff] [blame] | 644 | { |
Dan Williams | e9c0a3f0 | 2019-07-18 15:58:11 -0700 | [diff] [blame] | 645 | return __populate_section_memmap(pfn, nr_pages, nid, altmap); |
Yasunori Goto | 98f3cfc | 2007-10-16 01:26:14 -0700 | [diff] [blame] | 646 | } |
Dan Williams | e9c0a3f0 | 2019-07-18 15:58:11 -0700 | [diff] [blame] | 647 | |
| 648 | static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages, |
Christoph Hellwig | 24b6d41 | 2017-12-29 08:53:56 +0100 | [diff] [blame] | 649 | struct vmem_altmap *altmap) |
Yasunori Goto | 98f3cfc | 2007-10-16 01:26:14 -0700 | [diff] [blame] | 650 | { |
Dan Williams | e9c0a3f0 | 2019-07-18 15:58:11 -0700 | [diff] [blame] | 651 | unsigned long start = (unsigned long) pfn_to_page(pfn); |
| 652 | unsigned long end = start + nr_pages * sizeof(struct page); |
Johannes Weiner | 0aad818 | 2013-04-29 15:07:50 -0700 | [diff] [blame] | 653 | |
Christoph Hellwig | 24b6d41 | 2017-12-29 08:53:56 +0100 | [diff] [blame] | 654 | vmemmap_free(start, end, altmap); |
Yasunori Goto | 98f3cfc | 2007-10-16 01:26:14 -0700 | [diff] [blame] | 655 | } |
Zhang Yanfei | 81556b0 | 2013-11-12 15:07:43 -0800 | [diff] [blame] | 656 | static void free_map_bootmem(struct page *memmap) |
Yasunori Goto | 0c0a4a5 | 2008-04-28 02:13:34 -0700 | [diff] [blame] | 657 | { |
Johannes Weiner | 0aad818 | 2013-04-29 15:07:50 -0700 | [diff] [blame] | 658 | unsigned long start = (unsigned long)memmap; |
Zhang Yanfei | 81556b0 | 2013-11-12 15:07:43 -0800 | [diff] [blame] | 659 | unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION); |
Johannes Weiner | 0aad818 | 2013-04-29 15:07:50 -0700 | [diff] [blame] | 660 | |
Christoph Hellwig | 24b6d41 | 2017-12-29 08:53:56 +0100 | [diff] [blame] | 661 | vmemmap_free(start, end, NULL); |
Yasunori Goto | 0c0a4a5 | 2008-04-28 02:13:34 -0700 | [diff] [blame] | 662 | } |
Yasunori Goto | 98f3cfc | 2007-10-16 01:26:14 -0700 | [diff] [blame] | 663 | #else |
Ilya Leoshkevich | 030eab4f | 2019-11-30 17:54:24 -0800 | [diff] [blame] | 664 | struct page * __meminit populate_section_memmap(unsigned long pfn, |
Dan Williams | e9c0a3f0 | 2019-07-18 15:58:11 -0700 | [diff] [blame] | 665 | unsigned long nr_pages, int nid, struct vmem_altmap *altmap) |
Dave Hansen | 0b0acbec | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 666 | { |
Baoquan He | 4027149 | 2020-04-01 21:09:34 -0700 | [diff] [blame] | 667 | return kvmalloc_node(array_size(sizeof(struct page), |
| 668 | PAGES_PER_SECTION), GFP_KERNEL, nid); |
Dave Hansen | 0b0acbec | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 669 | } |
| 670 | |
Dan Williams | e9c0a3f0 | 2019-07-18 15:58:11 -0700 | [diff] [blame] | 671 | static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages, |
Christoph Hellwig | 7b73d97 | 2017-12-29 08:53:54 +0100 | [diff] [blame] | 672 | struct vmem_altmap *altmap) |
Yasunori Goto | 98f3cfc | 2007-10-16 01:26:14 -0700 | [diff] [blame] | 673 | { |
Baoquan He | 3af776f | 2020-04-01 21:09:31 -0700 | [diff] [blame] | 674 | kvfree(pfn_to_page(pfn)); |
Dave Hansen | 0b0acbec | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 675 | } |
Yasunori Goto | 0c0a4a5 | 2008-04-28 02:13:34 -0700 | [diff] [blame] | 676 | |
Zhang Yanfei | 81556b0 | 2013-11-12 15:07:43 -0800 | [diff] [blame] | 677 | static void free_map_bootmem(struct page *memmap) |
Yasunori Goto | 0c0a4a5 | 2008-04-28 02:13:34 -0700 | [diff] [blame] | 678 | { |
| 679 | unsigned long maps_section_nr, removing_section_nr, i; |
Zhang Yanfei | 81556b0 | 2013-11-12 15:07:43 -0800 | [diff] [blame] | 680 | unsigned long magic, nr_pages; |
Jianguo Wu | ae64ffc | 2012-11-29 13:54:21 -0800 | [diff] [blame] | 681 | struct page *page = virt_to_page(memmap); |
Yasunori Goto | 0c0a4a5 | 2008-04-28 02:13:34 -0700 | [diff] [blame] | 682 | |
Zhang Yanfei | 81556b0 | 2013-11-12 15:07:43 -0800 | [diff] [blame] | 683 | nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page)) |
| 684 | >> PAGE_SHIFT; |
| 685 | |
Yasunori Goto | 0c0a4a5 | 2008-04-28 02:13:34 -0700 | [diff] [blame] | 686 | for (i = 0; i < nr_pages; i++, page++) { |
Yasuaki Ishimatsu | ddffe98 | 2017-02-22 15:45:13 -0800 | [diff] [blame] | 687 | magic = (unsigned long) page->freelist; |
Yasunori Goto | 0c0a4a5 | 2008-04-28 02:13:34 -0700 | [diff] [blame] | 688 | |
| 689 | BUG_ON(magic == NODE_INFO); |
| 690 | |
| 691 | maps_section_nr = pfn_to_section_nr(page_to_pfn(page)); |
Yasuaki Ishimatsu | 857e522 | 2017-02-22 15:45:10 -0800 | [diff] [blame] | 692 | removing_section_nr = page_private(page); |
Yasunori Goto | 0c0a4a5 | 2008-04-28 02:13:34 -0700 | [diff] [blame] | 693 | |
| 694 | /* |
| 695 | * When this function is called, the removing section is |
| 696 | * logical offlined state. This means all pages are isolated |
| 697 | * from page allocator. If removing section's memmap is placed |
| 698 | * on the same section, it must not be freed. |
| 699 | * If it is freed, page allocator may allocate it which will |
| 700 | * be removed physically soon. |
| 701 | */ |
| 702 | if (maps_section_nr != removing_section_nr) |
| 703 | put_page_bootmem(page); |
| 704 | } |
| 705 | } |
Yasunori Goto | 98f3cfc | 2007-10-16 01:26:14 -0700 | [diff] [blame] | 706 | #endif /* CONFIG_SPARSEMEM_VMEMMAP */ |
Dave Hansen | 0b0acbec | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 707 | |
Baoquan He | 37bc150 | 2020-04-06 20:07:03 -0700 | [diff] [blame^] | 708 | static int clear_subsection_map(unsigned long pfn, unsigned long nr_pages) |
Dan Williams | ba72b4c | 2019-07-18 15:58:26 -0700 | [diff] [blame] | 709 | { |
| 710 | DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 }; |
| 711 | DECLARE_BITMAP(tmp, SUBSECTIONS_PER_SECTION) = { 0 }; |
| 712 | struct mem_section *ms = __pfn_to_section(pfn); |
Dan Williams | ba72b4c | 2019-07-18 15:58:26 -0700 | [diff] [blame] | 713 | unsigned long *subsection_map = ms->usage |
| 714 | ? &ms->usage->subsection_map[0] : NULL; |
| 715 | |
| 716 | subsection_mask_set(map, pfn, nr_pages); |
| 717 | if (subsection_map) |
| 718 | bitmap_and(tmp, map, subsection_map, SUBSECTIONS_PER_SECTION); |
| 719 | |
| 720 | if (WARN(!subsection_map || !bitmap_equal(tmp, map, SUBSECTIONS_PER_SECTION), |
| 721 | "section already deactivated (%#lx + %ld)\n", |
| 722 | pfn, nr_pages)) |
Baoquan He | 37bc150 | 2020-04-06 20:07:03 -0700 | [diff] [blame^] | 723 | return -EINVAL; |
Dan Williams | ba72b4c | 2019-07-18 15:58:26 -0700 | [diff] [blame] | 724 | |
Baoquan He | 37bc150 | 2020-04-06 20:07:03 -0700 | [diff] [blame^] | 725 | bitmap_xor(subsection_map, map, subsection_map, SUBSECTIONS_PER_SECTION); |
| 726 | return 0; |
| 727 | } |
| 728 | |
| 729 | static bool is_subsection_map_empty(struct mem_section *ms) |
| 730 | { |
| 731 | return bitmap_empty(&ms->usage->subsection_map[0], |
| 732 | SUBSECTIONS_PER_SECTION); |
| 733 | } |
| 734 | |
| 735 | static void section_deactivate(unsigned long pfn, unsigned long nr_pages, |
| 736 | struct vmem_altmap *altmap) |
| 737 | { |
| 738 | struct mem_section *ms = __pfn_to_section(pfn); |
| 739 | bool section_is_early = early_section(ms); |
| 740 | struct page *memmap = NULL; |
| 741 | bool empty; |
| 742 | |
| 743 | if (clear_subsection_map(pfn, nr_pages)) |
| 744 | return; |
Dan Williams | ba72b4c | 2019-07-18 15:58:26 -0700 | [diff] [blame] | 745 | /* |
| 746 | * There are 3 cases to handle across two configurations |
| 747 | * (SPARSEMEM_VMEMMAP={y,n}): |
| 748 | * |
| 749 | * 1/ deactivation of a partial hot-added section (only possible |
| 750 | * in the SPARSEMEM_VMEMMAP=y case). |
| 751 | * a/ section was present at memory init |
| 752 | * b/ section was hot-added post memory init |
| 753 | * 2/ deactivation of a complete hot-added section |
| 754 | * 3/ deactivation of a complete section from memory init |
| 755 | * |
| 756 | * For 1/, when subsection_map does not empty we will not be |
| 757 | * freeing the usage map, but still need to free the vmemmap |
| 758 | * range. |
| 759 | * |
| 760 | * For 2/ and 3/ the SPARSEMEM_VMEMMAP={y,n} cases are unified |
| 761 | */ |
Baoquan He | 37bc150 | 2020-04-06 20:07:03 -0700 | [diff] [blame^] | 762 | empty = is_subsection_map_empty(ms); |
Baoquan He | d41e2f3 | 2020-03-21 18:22:13 -0700 | [diff] [blame] | 763 | if (empty) { |
Dan Williams | ba72b4c | 2019-07-18 15:58:26 -0700 | [diff] [blame] | 764 | unsigned long section_nr = pfn_to_section_nr(pfn); |
| 765 | |
David Hildenbrand | 8068df3 | 2020-01-13 16:29:07 -0800 | [diff] [blame] | 766 | /* |
| 767 | * When removing an early section, the usage map is kept (as the |
| 768 | * usage maps of other sections fall into the same page). It |
| 769 | * will be re-used when re-adding the section - which is then no |
| 770 | * longer an early section. If the usage map is PageReserved, it |
| 771 | * was allocated during boot. |
| 772 | */ |
| 773 | if (!PageReserved(virt_to_page(ms->usage))) { |
Dan Williams | ba72b4c | 2019-07-18 15:58:26 -0700 | [diff] [blame] | 774 | kfree(ms->usage); |
| 775 | ms->usage = NULL; |
| 776 | } |
| 777 | memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr); |
Aneesh Kumar K.V | b943f04 | 2020-03-28 19:17:29 -0700 | [diff] [blame] | 778 | /* |
| 779 | * Mark the section invalid so that valid_section() |
| 780 | * return false. This prevents code from dereferencing |
| 781 | * ms->usage array. |
| 782 | */ |
| 783 | ms->section_mem_map &= ~SECTION_HAS_MEM_MAP; |
Dan Williams | ba72b4c | 2019-07-18 15:58:26 -0700 | [diff] [blame] | 784 | } |
| 785 | |
| 786 | if (section_is_early && memmap) |
| 787 | free_map_bootmem(memmap); |
| 788 | else |
| 789 | depopulate_section_memmap(pfn, nr_pages, altmap); |
Baoquan He | d41e2f3 | 2020-03-21 18:22:13 -0700 | [diff] [blame] | 790 | |
| 791 | if (empty) |
| 792 | ms->section_mem_map = (unsigned long)NULL; |
Dan Williams | ba72b4c | 2019-07-18 15:58:26 -0700 | [diff] [blame] | 793 | } |
| 794 | |
Baoquan He | 5d87255 | 2020-04-06 20:07:00 -0700 | [diff] [blame] | 795 | static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages) |
Dan Williams | ba72b4c | 2019-07-18 15:58:26 -0700 | [diff] [blame] | 796 | { |
Dan Williams | ba72b4c | 2019-07-18 15:58:26 -0700 | [diff] [blame] | 797 | struct mem_section *ms = __pfn_to_section(pfn); |
Baoquan He | 5d87255 | 2020-04-06 20:07:00 -0700 | [diff] [blame] | 798 | DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 }; |
Dan Williams | ba72b4c | 2019-07-18 15:58:26 -0700 | [diff] [blame] | 799 | unsigned long *subsection_map; |
Dan Williams | ba72b4c | 2019-07-18 15:58:26 -0700 | [diff] [blame] | 800 | int rc = 0; |
| 801 | |
| 802 | subsection_mask_set(map, pfn, nr_pages); |
| 803 | |
Dan Williams | ba72b4c | 2019-07-18 15:58:26 -0700 | [diff] [blame] | 804 | subsection_map = &ms->usage->subsection_map[0]; |
| 805 | |
| 806 | if (bitmap_empty(map, SUBSECTIONS_PER_SECTION)) |
| 807 | rc = -EINVAL; |
| 808 | else if (bitmap_intersects(map, subsection_map, SUBSECTIONS_PER_SECTION)) |
| 809 | rc = -EEXIST; |
| 810 | else |
| 811 | bitmap_or(subsection_map, map, subsection_map, |
| 812 | SUBSECTIONS_PER_SECTION); |
| 813 | |
Baoquan He | 5d87255 | 2020-04-06 20:07:00 -0700 | [diff] [blame] | 814 | return rc; |
| 815 | } |
| 816 | |
| 817 | static struct page * __meminit section_activate(int nid, unsigned long pfn, |
| 818 | unsigned long nr_pages, struct vmem_altmap *altmap) |
| 819 | { |
| 820 | struct mem_section *ms = __pfn_to_section(pfn); |
| 821 | struct mem_section_usage *usage = NULL; |
| 822 | struct page *memmap; |
| 823 | int rc = 0; |
| 824 | |
| 825 | if (!ms->usage) { |
| 826 | usage = kzalloc(mem_section_usage_size(), GFP_KERNEL); |
| 827 | if (!usage) |
| 828 | return ERR_PTR(-ENOMEM); |
| 829 | ms->usage = usage; |
| 830 | } |
| 831 | |
| 832 | rc = fill_subsection_map(pfn, nr_pages); |
Dan Williams | ba72b4c | 2019-07-18 15:58:26 -0700 | [diff] [blame] | 833 | if (rc) { |
| 834 | if (usage) |
| 835 | ms->usage = NULL; |
| 836 | kfree(usage); |
| 837 | return ERR_PTR(rc); |
| 838 | } |
| 839 | |
| 840 | /* |
| 841 | * The early init code does not consider partially populated |
| 842 | * initial sections, it simply assumes that memory will never be |
| 843 | * referenced. If we hot-add memory into such a section then we |
| 844 | * do not need to populate the memmap and can simply reuse what |
| 845 | * is already there. |
| 846 | */ |
| 847 | if (nr_pages < PAGES_PER_SECTION && early_section(ms)) |
| 848 | return pfn_to_page(pfn); |
| 849 | |
| 850 | memmap = populate_section_memmap(pfn, nr_pages, nid, altmap); |
| 851 | if (!memmap) { |
| 852 | section_deactivate(pfn, nr_pages, altmap); |
| 853 | return ERR_PTR(-ENOMEM); |
| 854 | } |
| 855 | |
| 856 | return memmap; |
| 857 | } |
| 858 | |
Baoquan He | 7567cfc | 2019-05-13 17:19:32 -0700 | [diff] [blame] | 859 | /** |
Dan Williams | ba72b4c | 2019-07-18 15:58:26 -0700 | [diff] [blame] | 860 | * sparse_add_section - add a memory section, or populate an existing one |
Baoquan He | 7567cfc | 2019-05-13 17:19:32 -0700 | [diff] [blame] | 861 | * @nid: The node to add section on |
| 862 | * @start_pfn: start pfn of the memory range |
Dan Williams | ba72b4c | 2019-07-18 15:58:26 -0700 | [diff] [blame] | 863 | * @nr_pages: number of pfns to add in the section |
Baoquan He | 7567cfc | 2019-05-13 17:19:32 -0700 | [diff] [blame] | 864 | * @altmap: device page map |
| 865 | * |
| 866 | * This is only intended for hotplug. |
| 867 | * |
| 868 | * Return: |
| 869 | * * 0 - On success. |
| 870 | * * -EEXIST - Section has been present. |
| 871 | * * -ENOMEM - Out of memory. |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 872 | */ |
Dan Williams | 7ea6216 | 2019-07-18 15:58:22 -0700 | [diff] [blame] | 873 | int __meminit sparse_add_section(int nid, unsigned long start_pfn, |
| 874 | unsigned long nr_pages, struct vmem_altmap *altmap) |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 875 | { |
Dave Hansen | 0b0acbec | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 876 | unsigned long section_nr = pfn_to_section_nr(start_pfn); |
Dave Hansen | 0b0acbec | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 877 | struct mem_section *ms; |
| 878 | struct page *memmap; |
Dave Hansen | 0b0acbec | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 879 | int ret; |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 880 | |
Wei Yang | 4e0d2e7 | 2018-12-28 00:37:06 -0800 | [diff] [blame] | 881 | ret = sparse_index_init(section_nr, nid); |
Dan Williams | ba72b4c | 2019-07-18 15:58:26 -0700 | [diff] [blame] | 882 | if (ret < 0) |
WANG Cong | bbd0682 | 2007-12-17 16:19:59 -0800 | [diff] [blame] | 883 | return ret; |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 884 | |
Dan Williams | ba72b4c | 2019-07-18 15:58:26 -0700 | [diff] [blame] | 885 | memmap = section_activate(nid, start_pfn, nr_pages, altmap); |
| 886 | if (IS_ERR(memmap)) |
| 887 | return PTR_ERR(memmap); |
Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 888 | |
Pavel Tatashin | d0dc12e | 2018-04-05 16:23:00 -0700 | [diff] [blame] | 889 | /* |
| 890 | * Poison uninitialized struct pages in order to catch invalid flags |
| 891 | * combinations. |
| 892 | */ |
Wei Yang | 18e19f1 | 2020-02-20 20:04:27 -0800 | [diff] [blame] | 893 | page_init_poison(memmap, sizeof(struct page) * nr_pages); |
Wen Congyang | 3ac19f8 | 2012-12-11 16:00:59 -0800 | [diff] [blame] | 894 | |
Wei Yang | c1cbc3e | 2019-09-23 15:36:27 -0700 | [diff] [blame] | 895 | ms = __nr_to_section(section_nr); |
Wei Yang | 26f26be | 2019-07-18 15:57:21 -0700 | [diff] [blame] | 896 | set_section_nid(section_nr, nid); |
Dave Hansen | c4e1be9 | 2017-07-06 15:36:44 -0700 | [diff] [blame] | 897 | section_mark_present(ms); |
Dave Hansen | 0b0acbec | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 898 | |
Dan Williams | ba72b4c | 2019-07-18 15:58:26 -0700 | [diff] [blame] | 899 | /* Align memmap to section boundary in the subsection case */ |
| 900 | if (section_nr_to_pfn(section_nr) != start_pfn) |
Wei Yang | 4627d76 | 2020-04-01 21:09:24 -0700 | [diff] [blame] | 901 | memmap = pfn_to_page(section_nr_to_pfn(section_nr)); |
Dan Williams | ba72b4c | 2019-07-18 15:58:26 -0700 | [diff] [blame] | 902 | sparse_init_one_section(ms, section_nr, memmap, ms->usage, 0); |
| 903 | |
| 904 | return 0; |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 905 | } |
Badari Pulavarty | ea01ea9 | 2008-04-28 02:12:01 -0700 | [diff] [blame] | 906 | |
Wen Congyang | 95a4774 | 2012-12-11 16:00:47 -0800 | [diff] [blame] | 907 | #ifdef CONFIG_MEMORY_FAILURE |
| 908 | static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages) |
| 909 | { |
| 910 | int i; |
| 911 | |
Balbir Singh | 5eb570a | 2018-12-28 00:33:24 -0800 | [diff] [blame] | 912 | /* |
| 913 | * A further optimization is to have per section refcounted |
| 914 | * num_poisoned_pages. But that would need more space per memmap, so |
| 915 | * for now just do a quick global check to speed up this routine in the |
| 916 | * absence of bad pages. |
| 917 | */ |
| 918 | if (atomic_long_read(&num_poisoned_pages) == 0) |
| 919 | return; |
| 920 | |
Dan Williams | 4b94ffd | 2016-01-15 16:56:22 -0800 | [diff] [blame] | 921 | for (i = 0; i < nr_pages; i++) { |
Wen Congyang | 95a4774 | 2012-12-11 16:00:47 -0800 | [diff] [blame] | 922 | if (PageHWPoison(&memmap[i])) { |
Alastair D'Silva | 9f82883 | 2019-09-23 15:36:30 -0700 | [diff] [blame] | 923 | num_poisoned_pages_dec(); |
Wen Congyang | 95a4774 | 2012-12-11 16:00:47 -0800 | [diff] [blame] | 924 | ClearPageHWPoison(&memmap[i]); |
| 925 | } |
| 926 | } |
| 927 | } |
| 928 | #else |
| 929 | static inline void clear_hwpoisoned_pages(struct page *memmap, int nr_pages) |
| 930 | { |
| 931 | } |
| 932 | #endif |
| 933 | |
Dan Williams | ba72b4c | 2019-07-18 15:58:26 -0700 | [diff] [blame] | 934 | void sparse_remove_section(struct mem_section *ms, unsigned long pfn, |
Dan Williams | 7ea6216 | 2019-07-18 15:58:22 -0700 | [diff] [blame] | 935 | unsigned long nr_pages, unsigned long map_offset, |
| 936 | struct vmem_altmap *altmap) |
Badari Pulavarty | ea01ea9 | 2008-04-28 02:12:01 -0700 | [diff] [blame] | 937 | { |
Dan Williams | ba72b4c | 2019-07-18 15:58:26 -0700 | [diff] [blame] | 938 | clear_hwpoisoned_pages(pfn_to_page(pfn) + map_offset, |
| 939 | nr_pages - map_offset); |
| 940 | section_deactivate(pfn, nr_pages, altmap); |
Badari Pulavarty | ea01ea9 | 2008-04-28 02:12:01 -0700 | [diff] [blame] | 941 | } |
David Rientjes | 4edd7ce | 2013-04-29 15:08:22 -0700 | [diff] [blame] | 942 | #endif /* CONFIG_MEMORY_HOTPLUG */ |