Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 2 | /* |
| 3 | * sparse memory mappings. |
| 4 | */ |
Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 5 | #include <linux/mm.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 6 | #include <linux/slab.h> |
Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 7 | #include <linux/mmzone.h> |
Mike Rapoport | 97ad108 | 2018-10-30 15:09:44 -0700 | [diff] [blame] | 8 | #include <linux/memblock.h> |
Gideon Israel Dsouza | 3b32123 | 2014-04-07 15:37:26 -0700 | [diff] [blame] | 9 | #include <linux/compiler.h> |
Dave Hansen | 0b0acbec | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 10 | #include <linux/highmem.h> |
Paul Gortmaker | b95f1b31 | 2011-10-16 02:01:52 -0400 | [diff] [blame] | 11 | #include <linux/export.h> |
Dave Hansen | 28ae55c | 2005-09-03 15:54:29 -0700 | [diff] [blame] | 12 | #include <linux/spinlock.h> |
Dave Hansen | 0b0acbec | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 13 | #include <linux/vmalloc.h> |
Gideon Israel Dsouza | 3b32123 | 2014-04-07 15:37:26 -0700 | [diff] [blame] | 14 | |
Yasunori Goto | 0c0a4a5 | 2008-04-28 02:13:34 -0700 | [diff] [blame] | 15 | #include "internal.h" |
Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 16 | #include <asm/dma.h> |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 17 | #include <asm/pgalloc.h> |
| 18 | #include <asm/pgtable.h> |
Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 19 | |
| 20 | /* |
| 21 | * Permanent SPARSEMEM data: |
| 22 | * |
| 23 | * 1) mem_section - memory sections, mem_map's for valid memory |
| 24 | */ |
Bob Picco | 3e34726 | 2005-09-03 15:54:28 -0700 | [diff] [blame] | 25 | #ifdef CONFIG_SPARSEMEM_EXTREME |
Kirill A. Shutemov | 83e3c48 | 2017-09-29 17:08:16 +0300 | [diff] [blame] | 26 | struct mem_section **mem_section; |
Bob Picco | 3e34726 | 2005-09-03 15:54:28 -0700 | [diff] [blame] | 27 | #else |
| 28 | struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT] |
Ravikiran G Thirumalai | 22fc6ec | 2006-01-08 01:01:27 -0800 | [diff] [blame] | 29 | ____cacheline_internodealigned_in_smp; |
Bob Picco | 3e34726 | 2005-09-03 15:54:28 -0700 | [diff] [blame] | 30 | #endif |
| 31 | EXPORT_SYMBOL(mem_section); |
| 32 | |
Christoph Lameter | 89689ae | 2006-12-06 20:31:45 -0800 | [diff] [blame] | 33 | #ifdef NODE_NOT_IN_PAGE_FLAGS |
| 34 | /* |
| 35 | * If we did not store the node number in the page then we have to |
| 36 | * do a lookup in the section_to_node_table in order to find which |
| 37 | * node the page belongs to. |
| 38 | */ |
| 39 | #if MAX_NUMNODES <= 256 |
| 40 | static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned; |
| 41 | #else |
| 42 | static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned; |
| 43 | #endif |
| 44 | |
Ian Campbell | 33dd4e0 | 2011-07-25 17:11:51 -0700 | [diff] [blame] | 45 | int page_to_nid(const struct page *page) |
Christoph Lameter | 89689ae | 2006-12-06 20:31:45 -0800 | [diff] [blame] | 46 | { |
| 47 | return section_to_node_table[page_to_section(page)]; |
| 48 | } |
| 49 | EXPORT_SYMBOL(page_to_nid); |
Andy Whitcroft | 85770ff | 2007-08-22 14:01:03 -0700 | [diff] [blame] | 50 | |
| 51 | static void set_section_nid(unsigned long section_nr, int nid) |
| 52 | { |
| 53 | section_to_node_table[section_nr] = nid; |
| 54 | } |
| 55 | #else /* !NODE_NOT_IN_PAGE_FLAGS */ |
| 56 | static inline void set_section_nid(unsigned long section_nr, int nid) |
| 57 | { |
| 58 | } |
Christoph Lameter | 89689ae | 2006-12-06 20:31:45 -0800 | [diff] [blame] | 59 | #endif |
| 60 | |
Bob Picco | 3e34726 | 2005-09-03 15:54:28 -0700 | [diff] [blame] | 61 | #ifdef CONFIG_SPARSEMEM_EXTREME |
Fabian Frederick | bd721ea | 2016-08-02 14:03:33 -0700 | [diff] [blame] | 62 | static noinline struct mem_section __ref *sparse_index_alloc(int nid) |
Bob Picco | 802f192 | 2005-09-03 15:54:26 -0700 | [diff] [blame] | 63 | { |
Dave Hansen | 28ae55c | 2005-09-03 15:54:29 -0700 | [diff] [blame] | 64 | struct mem_section *section = NULL; |
| 65 | unsigned long array_size = SECTIONS_PER_ROOT * |
| 66 | sizeof(struct mem_section); |
Bob Picco | 802f192 | 2005-09-03 15:54:26 -0700 | [diff] [blame] | 67 | |
Mike Rapoport | 8a7f97b | 2019-03-11 23:30:31 -0700 | [diff] [blame] | 68 | if (slab_is_available()) { |
Michal Hocko | b95046b | 2017-09-06 16:20:41 -0700 | [diff] [blame] | 69 | section = kzalloc_node(array_size, GFP_KERNEL, nid); |
Mike Rapoport | 8a7f97b | 2019-03-11 23:30:31 -0700 | [diff] [blame] | 70 | } else { |
Mike Rapoport | 7e1c4e2 | 2018-10-30 15:09:57 -0700 | [diff] [blame] | 71 | section = memblock_alloc_node(array_size, SMP_CACHE_BYTES, |
| 72 | nid); |
Mike Rapoport | 8a7f97b | 2019-03-11 23:30:31 -0700 | [diff] [blame] | 73 | if (!section) |
| 74 | panic("%s: Failed to allocate %lu bytes nid=%d\n", |
| 75 | __func__, array_size, nid); |
| 76 | } |
Bob Picco | 3e34726 | 2005-09-03 15:54:28 -0700 | [diff] [blame] | 77 | |
Dave Hansen | 28ae55c | 2005-09-03 15:54:29 -0700 | [diff] [blame] | 78 | return section; |
Bob Picco | 802f192 | 2005-09-03 15:54:26 -0700 | [diff] [blame] | 79 | } |
Dave Hansen | 28ae55c | 2005-09-03 15:54:29 -0700 | [diff] [blame] | 80 | |
Yasunori Goto | a3142c8 | 2007-05-08 00:23:07 -0700 | [diff] [blame] | 81 | static int __meminit sparse_index_init(unsigned long section_nr, int nid) |
Dave Hansen | 28ae55c | 2005-09-03 15:54:29 -0700 | [diff] [blame] | 82 | { |
Dave Hansen | 28ae55c | 2005-09-03 15:54:29 -0700 | [diff] [blame] | 83 | unsigned long root = SECTION_NR_TO_ROOT(section_nr); |
| 84 | struct mem_section *section; |
Dave Hansen | 28ae55c | 2005-09-03 15:54:29 -0700 | [diff] [blame] | 85 | |
Dan Williams | ba72b4c | 2019-07-18 15:58:26 -0700 | [diff] [blame] | 86 | /* |
| 87 | * An existing section is possible in the sub-section hotplug |
| 88 | * case. First hot-add instantiates, follow-on hot-add reuses |
| 89 | * the existing section. |
| 90 | * |
| 91 | * The mem_hotplug_lock resolves the apparent race below. |
| 92 | */ |
Dave Hansen | 28ae55c | 2005-09-03 15:54:29 -0700 | [diff] [blame] | 93 | if (mem_section[root]) |
Dan Williams | ba72b4c | 2019-07-18 15:58:26 -0700 | [diff] [blame] | 94 | return 0; |
Dave Hansen | 28ae55c | 2005-09-03 15:54:29 -0700 | [diff] [blame] | 95 | |
| 96 | section = sparse_index_alloc(nid); |
WANG Cong | af0cd5a | 2007-12-17 16:19:58 -0800 | [diff] [blame] | 97 | if (!section) |
| 98 | return -ENOMEM; |
Dave Hansen | 28ae55c | 2005-09-03 15:54:29 -0700 | [diff] [blame] | 99 | |
| 100 | mem_section[root] = section; |
Gavin Shan | c1c9518 | 2012-07-31 16:46:06 -0700 | [diff] [blame] | 101 | |
Zhang Yanfei | 9d1936c | 2013-05-17 22:10:38 +0800 | [diff] [blame] | 102 | return 0; |
Dave Hansen | 28ae55c | 2005-09-03 15:54:29 -0700 | [diff] [blame] | 103 | } |
| 104 | #else /* !SPARSEMEM_EXTREME */ |
| 105 | static inline int sparse_index_init(unsigned long section_nr, int nid) |
| 106 | { |
| 107 | return 0; |
| 108 | } |
| 109 | #endif |
| 110 | |
Zhou Chengming | 91fd8b9 | 2016-07-28 15:48:35 -0700 | [diff] [blame] | 111 | #ifdef CONFIG_SPARSEMEM_EXTREME |
David Hildenbrand | 2491f0a | 2019-07-18 15:57:37 -0700 | [diff] [blame] | 112 | unsigned long __section_nr(struct mem_section *ms) |
Dave Hansen | 4ca644d | 2005-10-29 18:16:51 -0700 | [diff] [blame] | 113 | { |
| 114 | unsigned long root_nr; |
Kirill A. Shutemov | 83e3c48 | 2017-09-29 17:08:16 +0300 | [diff] [blame] | 115 | struct mem_section *root = NULL; |
Dave Hansen | 4ca644d | 2005-10-29 18:16:51 -0700 | [diff] [blame] | 116 | |
Mike Kravetz | 12783b0 | 2006-05-20 15:00:05 -0700 | [diff] [blame] | 117 | for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) { |
| 118 | root = __nr_to_section(root_nr * SECTIONS_PER_ROOT); |
Dave Hansen | 4ca644d | 2005-10-29 18:16:51 -0700 | [diff] [blame] | 119 | if (!root) |
| 120 | continue; |
| 121 | |
| 122 | if ((ms >= root) && (ms < (root + SECTIONS_PER_ROOT))) |
| 123 | break; |
| 124 | } |
| 125 | |
Kirill A. Shutemov | 83e3c48 | 2017-09-29 17:08:16 +0300 | [diff] [blame] | 126 | VM_BUG_ON(!root); |
Gavin Shan | db36a46 | 2012-07-31 16:46:04 -0700 | [diff] [blame] | 127 | |
Dave Hansen | 4ca644d | 2005-10-29 18:16:51 -0700 | [diff] [blame] | 128 | return (root_nr * SECTIONS_PER_ROOT) + (ms - root); |
| 129 | } |
Zhou Chengming | 91fd8b9 | 2016-07-28 15:48:35 -0700 | [diff] [blame] | 130 | #else |
David Hildenbrand | 2491f0a | 2019-07-18 15:57:37 -0700 | [diff] [blame] | 131 | unsigned long __section_nr(struct mem_section *ms) |
Zhou Chengming | 91fd8b9 | 2016-07-28 15:48:35 -0700 | [diff] [blame] | 132 | { |
David Hildenbrand | 2491f0a | 2019-07-18 15:57:37 -0700 | [diff] [blame] | 133 | return (unsigned long)(ms - mem_section[0]); |
Zhou Chengming | 91fd8b9 | 2016-07-28 15:48:35 -0700 | [diff] [blame] | 134 | } |
| 135 | #endif |
Dave Hansen | 4ca644d | 2005-10-29 18:16:51 -0700 | [diff] [blame] | 136 | |
Andy Whitcroft | 30c253e | 2006-06-23 02:03:41 -0700 | [diff] [blame] | 137 | /* |
| 138 | * During early boot, before section_mem_map is used for an actual |
| 139 | * mem_map, we use section_mem_map to store the section's NUMA |
| 140 | * node. This keeps us from having to use another data structure. The |
| 141 | * node information is cleared just before we store the real mem_map. |
| 142 | */ |
| 143 | static inline unsigned long sparse_encode_early_nid(int nid) |
| 144 | { |
| 145 | return (nid << SECTION_NID_SHIFT); |
| 146 | } |
| 147 | |
| 148 | static inline int sparse_early_nid(struct mem_section *section) |
| 149 | { |
| 150 | return (section->section_mem_map >> SECTION_NID_SHIFT); |
| 151 | } |
| 152 | |
Mel Gorman | 2dbb51c | 2008-07-23 21:26:52 -0700 | [diff] [blame] | 153 | /* Validate the physical addressing limitations of the model */ |
| 154 | void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn, |
| 155 | unsigned long *end_pfn) |
Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 156 | { |
Mel Gorman | 2dbb51c | 2008-07-23 21:26:52 -0700 | [diff] [blame] | 157 | unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT); |
Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 158 | |
Ingo Molnar | bead9a3 | 2008-04-16 01:40:00 +0200 | [diff] [blame] | 159 | /* |
| 160 | * Sanity checks - do not allow an architecture to pass |
| 161 | * in larger pfns than the maximum scope of sparsemem: |
| 162 | */ |
Mel Gorman | 2dbb51c | 2008-07-23 21:26:52 -0700 | [diff] [blame] | 163 | if (*start_pfn > max_sparsemem_pfn) { |
| 164 | mminit_dprintk(MMINIT_WARNING, "pfnvalidation", |
| 165 | "Start of range %lu -> %lu exceeds SPARSEMEM max %lu\n", |
| 166 | *start_pfn, *end_pfn, max_sparsemem_pfn); |
| 167 | WARN_ON_ONCE(1); |
| 168 | *start_pfn = max_sparsemem_pfn; |
| 169 | *end_pfn = max_sparsemem_pfn; |
Cyrill Gorcunov | ef161a9 | 2009-03-31 15:19:25 -0700 | [diff] [blame] | 170 | } else if (*end_pfn > max_sparsemem_pfn) { |
Mel Gorman | 2dbb51c | 2008-07-23 21:26:52 -0700 | [diff] [blame] | 171 | mminit_dprintk(MMINIT_WARNING, "pfnvalidation", |
| 172 | "End of range %lu -> %lu exceeds SPARSEMEM max %lu\n", |
| 173 | *start_pfn, *end_pfn, max_sparsemem_pfn); |
| 174 | WARN_ON_ONCE(1); |
| 175 | *end_pfn = max_sparsemem_pfn; |
| 176 | } |
| 177 | } |
| 178 | |
Dave Hansen | c4e1be9 | 2017-07-06 15:36:44 -0700 | [diff] [blame] | 179 | /* |
| 180 | * There are a number of times that we loop over NR_MEM_SECTIONS, |
| 181 | * looking for section_present() on each. But, when we have very |
| 182 | * large physical address spaces, NR_MEM_SECTIONS can also be |
| 183 | * very large which makes the loops quite long. |
| 184 | * |
| 185 | * Keeping track of this gives us an easy way to break out of |
| 186 | * those loops early. |
| 187 | */ |
David Hildenbrand | 2491f0a | 2019-07-18 15:57:37 -0700 | [diff] [blame] | 188 | unsigned long __highest_present_section_nr; |
Dave Hansen | c4e1be9 | 2017-07-06 15:36:44 -0700 | [diff] [blame] | 189 | static void section_mark_present(struct mem_section *ms) |
| 190 | { |
David Hildenbrand | 2491f0a | 2019-07-18 15:57:37 -0700 | [diff] [blame] | 191 | unsigned long section_nr = __section_nr(ms); |
Dave Hansen | c4e1be9 | 2017-07-06 15:36:44 -0700 | [diff] [blame] | 192 | |
| 193 | if (section_nr > __highest_present_section_nr) |
| 194 | __highest_present_section_nr = section_nr; |
| 195 | |
| 196 | ms->section_mem_map |= SECTION_MARKED_PRESENT; |
| 197 | } |
| 198 | |
David Hildenbrand | 2491f0a | 2019-07-18 15:57:37 -0700 | [diff] [blame] | 199 | static inline unsigned long next_present_section_nr(unsigned long section_nr) |
Dave Hansen | c4e1be9 | 2017-07-06 15:36:44 -0700 | [diff] [blame] | 200 | { |
| 201 | do { |
| 202 | section_nr++; |
| 203 | if (present_section_nr(section_nr)) |
| 204 | return section_nr; |
Wei Yang | d538c16 | 2018-06-07 17:06:39 -0700 | [diff] [blame] | 205 | } while ((section_nr <= __highest_present_section_nr)); |
Dave Hansen | c4e1be9 | 2017-07-06 15:36:44 -0700 | [diff] [blame] | 206 | |
| 207 | return -1; |
| 208 | } |
| 209 | #define for_each_present_section_nr(start, section_nr) \ |
| 210 | for (section_nr = next_present_section_nr(start-1); \ |
Qian Cai | d778015 | 2019-03-05 15:50:11 -0800 | [diff] [blame] | 211 | ((section_nr != -1) && \ |
Dave Hansen | c4e1be9 | 2017-07-06 15:36:44 -0700 | [diff] [blame] | 212 | (section_nr <= __highest_present_section_nr)); \ |
| 213 | section_nr = next_present_section_nr(section_nr)) |
| 214 | |
Pavel Tatashin | 85c77f7 | 2018-08-17 15:49:33 -0700 | [diff] [blame] | 215 | static inline unsigned long first_present_section_nr(void) |
| 216 | { |
| 217 | return next_present_section_nr(-1); |
| 218 | } |
| 219 | |
Dan Williams | f46edbd | 2019-07-18 15:58:04 -0700 | [diff] [blame] | 220 | void subsection_mask_set(unsigned long *map, unsigned long pfn, |
| 221 | unsigned long nr_pages) |
| 222 | { |
| 223 | int idx = subsection_map_index(pfn); |
| 224 | int end = subsection_map_index(pfn + nr_pages - 1); |
| 225 | |
| 226 | bitmap_set(map, idx, end - idx + 1); |
| 227 | } |
| 228 | |
| 229 | void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages) |
| 230 | { |
| 231 | int end_sec = pfn_to_section_nr(pfn + nr_pages - 1); |
Dan Williams | 9a84503 | 2019-07-18 15:58:43 -0700 | [diff] [blame] | 232 | unsigned long nr, start_sec = pfn_to_section_nr(pfn); |
Dan Williams | f46edbd | 2019-07-18 15:58:04 -0700 | [diff] [blame] | 233 | |
| 234 | if (!nr_pages) |
| 235 | return; |
| 236 | |
Dan Williams | 9a84503 | 2019-07-18 15:58:43 -0700 | [diff] [blame] | 237 | for (nr = start_sec; nr <= end_sec; nr++) { |
Dan Williams | f46edbd | 2019-07-18 15:58:04 -0700 | [diff] [blame] | 238 | struct mem_section *ms; |
| 239 | unsigned long pfns; |
| 240 | |
| 241 | pfns = min(nr_pages, PAGES_PER_SECTION |
| 242 | - (pfn & ~PAGE_SECTION_MASK)); |
Dan Williams | 9a84503 | 2019-07-18 15:58:43 -0700 | [diff] [blame] | 243 | ms = __nr_to_section(nr); |
Dan Williams | f46edbd | 2019-07-18 15:58:04 -0700 | [diff] [blame] | 244 | subsection_mask_set(ms->usage->subsection_map, pfn, pfns); |
| 245 | |
Dan Williams | 9a84503 | 2019-07-18 15:58:43 -0700 | [diff] [blame] | 246 | pr_debug("%s: sec: %lu pfns: %lu set(%d, %d)\n", __func__, nr, |
Dan Williams | f46edbd | 2019-07-18 15:58:04 -0700 | [diff] [blame] | 247 | pfns, subsection_map_index(pfn), |
| 248 | subsection_map_index(pfn + pfns - 1)); |
| 249 | |
| 250 | pfn += pfns; |
| 251 | nr_pages -= pfns; |
| 252 | } |
| 253 | } |
| 254 | |
Mel Gorman | 2dbb51c | 2008-07-23 21:26:52 -0700 | [diff] [blame] | 255 | /* Record a memory area against a node. */ |
| 256 | void __init memory_present(int nid, unsigned long start, unsigned long end) |
| 257 | { |
| 258 | unsigned long pfn; |
Ingo Molnar | bead9a3 | 2008-04-16 01:40:00 +0200 | [diff] [blame] | 259 | |
Kirill A. Shutemov | 629a359 | 2017-11-07 11:33:37 +0300 | [diff] [blame] | 260 | #ifdef CONFIG_SPARSEMEM_EXTREME |
| 261 | if (unlikely(!mem_section)) { |
| 262 | unsigned long size, align; |
| 263 | |
Baoquan He | d09cfbb | 2018-01-04 16:18:06 -0800 | [diff] [blame] | 264 | size = sizeof(struct mem_section*) * NR_SECTION_ROOTS; |
Kirill A. Shutemov | 629a359 | 2017-11-07 11:33:37 +0300 | [diff] [blame] | 265 | align = 1 << (INTERNODE_CACHE_SHIFT); |
Mike Rapoport | eb31d55 | 2018-10-30 15:08:04 -0700 | [diff] [blame] | 266 | mem_section = memblock_alloc(size, align); |
Mike Rapoport | 8a7f97b | 2019-03-11 23:30:31 -0700 | [diff] [blame] | 267 | if (!mem_section) |
| 268 | panic("%s: Failed to allocate %lu bytes align=0x%lx\n", |
| 269 | __func__, size, align); |
Kirill A. Shutemov | 629a359 | 2017-11-07 11:33:37 +0300 | [diff] [blame] | 270 | } |
| 271 | #endif |
| 272 | |
Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 273 | start &= PAGE_SECTION_MASK; |
Mel Gorman | 2dbb51c | 2008-07-23 21:26:52 -0700 | [diff] [blame] | 274 | mminit_validate_memmodel_limits(&start, &end); |
Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 275 | for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) { |
| 276 | unsigned long section = pfn_to_section_nr(pfn); |
Bob Picco | 802f192 | 2005-09-03 15:54:26 -0700 | [diff] [blame] | 277 | struct mem_section *ms; |
| 278 | |
| 279 | sparse_index_init(section, nid); |
Andy Whitcroft | 85770ff | 2007-08-22 14:01:03 -0700 | [diff] [blame] | 280 | set_section_nid(section, nid); |
Bob Picco | 802f192 | 2005-09-03 15:54:26 -0700 | [diff] [blame] | 281 | |
| 282 | ms = __nr_to_section(section); |
Dave Hansen | c4e1be9 | 2017-07-06 15:36:44 -0700 | [diff] [blame] | 283 | if (!ms->section_mem_map) { |
Michal Hocko | 2d070ea | 2017-07-06 15:37:56 -0700 | [diff] [blame] | 284 | ms->section_mem_map = sparse_encode_early_nid(nid) | |
| 285 | SECTION_IS_ONLINE; |
Dave Hansen | c4e1be9 | 2017-07-06 15:36:44 -0700 | [diff] [blame] | 286 | section_mark_present(ms); |
| 287 | } |
Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 288 | } |
| 289 | } |
| 290 | |
| 291 | /* |
Logan Gunthorpe | 9def36e | 2018-12-14 14:16:57 -0800 | [diff] [blame] | 292 | * Mark all memblocks as present using memory_present(). This is a |
| 293 | * convienence function that is useful for a number of arches |
| 294 | * to mark all of the systems memory as present during initialization. |
| 295 | */ |
| 296 | void __init memblocks_present(void) |
| 297 | { |
| 298 | struct memblock_region *reg; |
| 299 | |
| 300 | for_each_memblock(memory, reg) { |
| 301 | memory_present(memblock_get_region_node(reg), |
| 302 | memblock_region_memory_base_pfn(reg), |
| 303 | memblock_region_memory_end_pfn(reg)); |
| 304 | } |
| 305 | } |
| 306 | |
| 307 | /* |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 308 | * Subtle, we encode the real pfn into the mem_map such that |
| 309 | * the identity pfn - section_mem_map will return the actual |
| 310 | * physical page frame number. |
| 311 | */ |
| 312 | static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum) |
| 313 | { |
Petr Tesarik | def9b71 | 2018-01-31 16:20:26 -0800 | [diff] [blame] | 314 | unsigned long coded_mem_map = |
| 315 | (unsigned long)(mem_map - (section_nr_to_pfn(pnum))); |
| 316 | BUILD_BUG_ON(SECTION_MAP_LAST_BIT > (1UL<<PFN_SECTION_SHIFT)); |
| 317 | BUG_ON(coded_mem_map & ~SECTION_MAP_MASK); |
| 318 | return coded_mem_map; |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 319 | } |
| 320 | |
| 321 | /* |
Badari Pulavarty | ea01ea9 | 2008-04-28 02:12:01 -0700 | [diff] [blame] | 322 | * Decode mem_map from the coded memmap |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 323 | */ |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 324 | struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum) |
| 325 | { |
Badari Pulavarty | ea01ea9 | 2008-04-28 02:12:01 -0700 | [diff] [blame] | 326 | /* mask off the extra low bits of information */ |
| 327 | coded_mem_map &= SECTION_MAP_MASK; |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 328 | return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum); |
| 329 | } |
| 330 | |
Oscar Salvador | 4e40987 | 2018-08-17 15:47:14 -0700 | [diff] [blame] | 331 | static void __meminit sparse_init_one_section(struct mem_section *ms, |
Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 332 | unsigned long pnum, struct page *mem_map, |
Dan Williams | 326e1b8 | 2019-07-18 15:58:00 -0700 | [diff] [blame] | 333 | struct mem_section_usage *usage, unsigned long flags) |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 334 | { |
Andy Whitcroft | 30c253e | 2006-06-23 02:03:41 -0700 | [diff] [blame] | 335 | ms->section_mem_map &= ~SECTION_MAP_MASK; |
Dan Williams | 326e1b8 | 2019-07-18 15:58:00 -0700 | [diff] [blame] | 336 | ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum) |
| 337 | | SECTION_HAS_MEM_MAP | flags; |
Dan Williams | f1eca35 | 2019-07-18 15:57:57 -0700 | [diff] [blame] | 338 | ms->usage = usage; |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 339 | } |
| 340 | |
Dan Williams | f1eca35 | 2019-07-18 15:57:57 -0700 | [diff] [blame] | 341 | static unsigned long usemap_size(void) |
Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 342 | { |
Wei Yang | 60a7a88 | 2017-05-03 14:53:51 -0700 | [diff] [blame] | 343 | return BITS_TO_LONGS(SECTION_BLOCKFLAGS_BITS) * sizeof(unsigned long); |
Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 344 | } |
| 345 | |
Dan Williams | f1eca35 | 2019-07-18 15:57:57 -0700 | [diff] [blame] | 346 | size_t mem_section_usage_size(void) |
Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 347 | { |
Dan Williams | f1eca35 | 2019-07-18 15:57:57 -0700 | [diff] [blame] | 348 | return sizeof(struct mem_section_usage) + usemap_size(); |
Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 349 | } |
Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 350 | |
Yasunori Goto | 48c9068 | 2008-07-23 21:28:15 -0700 | [diff] [blame] | 351 | #ifdef CONFIG_MEMORY_HOTREMOVE |
Dan Williams | f1eca35 | 2019-07-18 15:57:57 -0700 | [diff] [blame] | 352 | static struct mem_section_usage * __init |
Yinghai Lu | a4322e1b | 2010-02-10 01:20:21 -0800 | [diff] [blame] | 353 | sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, |
Johannes Weiner | 238305b | 2012-05-29 15:06:36 -0700 | [diff] [blame] | 354 | unsigned long size) |
Yasunori Goto | 48c9068 | 2008-07-23 21:28:15 -0700 | [diff] [blame] | 355 | { |
Dan Williams | f1eca35 | 2019-07-18 15:57:57 -0700 | [diff] [blame] | 356 | struct mem_section_usage *usage; |
Yinghai Lu | 99ab7b1 | 2012-07-11 14:02:53 -0700 | [diff] [blame] | 357 | unsigned long goal, limit; |
Yinghai Lu | 99ab7b1 | 2012-07-11 14:02:53 -0700 | [diff] [blame] | 358 | int nid; |
Yasunori Goto | 48c9068 | 2008-07-23 21:28:15 -0700 | [diff] [blame] | 359 | /* |
| 360 | * A page may contain usemaps for other sections preventing the |
| 361 | * page being freed and making a section unremovable while |
Li Zhong | c800bcd | 2014-03-31 16:41:58 +0800 | [diff] [blame] | 362 | * other sections referencing the usemap remain active. Similarly, |
Yasunori Goto | 48c9068 | 2008-07-23 21:28:15 -0700 | [diff] [blame] | 363 | * a pgdat can prevent a section being removed. If section A |
| 364 | * contains a pgdat and section B contains the usemap, both |
| 365 | * sections become inter-dependent. This allocates usemaps |
| 366 | * from the same section as the pgdat where possible to avoid |
| 367 | * this problem. |
| 368 | */ |
Yinghai Lu | 07b4e2b | 2012-07-11 14:02:51 -0700 | [diff] [blame] | 369 | goal = __pa(pgdat) & (PAGE_SECTION_MASK << PAGE_SHIFT); |
Yinghai Lu | 99ab7b1 | 2012-07-11 14:02:53 -0700 | [diff] [blame] | 370 | limit = goal + (1UL << PA_SECTION_SHIFT); |
| 371 | nid = early_pfn_to_nid(goal >> PAGE_SHIFT); |
| 372 | again: |
Dan Williams | f1eca35 | 2019-07-18 15:57:57 -0700 | [diff] [blame] | 373 | usage = memblock_alloc_try_nid(size, SMP_CACHE_BYTES, goal, limit, nid); |
| 374 | if (!usage && limit) { |
Yinghai Lu | 99ab7b1 | 2012-07-11 14:02:53 -0700 | [diff] [blame] | 375 | limit = 0; |
| 376 | goto again; |
| 377 | } |
Dan Williams | f1eca35 | 2019-07-18 15:57:57 -0700 | [diff] [blame] | 378 | return usage; |
Yasunori Goto | 48c9068 | 2008-07-23 21:28:15 -0700 | [diff] [blame] | 379 | } |
| 380 | |
Dan Williams | f1eca35 | 2019-07-18 15:57:57 -0700 | [diff] [blame] | 381 | static void __init check_usemap_section_nr(int nid, |
| 382 | struct mem_section_usage *usage) |
Yasunori Goto | 48c9068 | 2008-07-23 21:28:15 -0700 | [diff] [blame] | 383 | { |
| 384 | unsigned long usemap_snr, pgdat_snr; |
Kirill A. Shutemov | 83e3c48 | 2017-09-29 17:08:16 +0300 | [diff] [blame] | 385 | static unsigned long old_usemap_snr; |
| 386 | static unsigned long old_pgdat_snr; |
Yasunori Goto | 48c9068 | 2008-07-23 21:28:15 -0700 | [diff] [blame] | 387 | struct pglist_data *pgdat = NODE_DATA(nid); |
| 388 | int usemap_nid; |
| 389 | |
Kirill A. Shutemov | 83e3c48 | 2017-09-29 17:08:16 +0300 | [diff] [blame] | 390 | /* First call */ |
| 391 | if (!old_usemap_snr) { |
| 392 | old_usemap_snr = NR_MEM_SECTIONS; |
| 393 | old_pgdat_snr = NR_MEM_SECTIONS; |
| 394 | } |
| 395 | |
Dan Williams | f1eca35 | 2019-07-18 15:57:57 -0700 | [diff] [blame] | 396 | usemap_snr = pfn_to_section_nr(__pa(usage) >> PAGE_SHIFT); |
Yasunori Goto | 48c9068 | 2008-07-23 21:28:15 -0700 | [diff] [blame] | 397 | pgdat_snr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT); |
| 398 | if (usemap_snr == pgdat_snr) |
| 399 | return; |
| 400 | |
| 401 | if (old_usemap_snr == usemap_snr && old_pgdat_snr == pgdat_snr) |
| 402 | /* skip redundant message */ |
| 403 | return; |
| 404 | |
| 405 | old_usemap_snr = usemap_snr; |
| 406 | old_pgdat_snr = pgdat_snr; |
| 407 | |
| 408 | usemap_nid = sparse_early_nid(__nr_to_section(usemap_snr)); |
| 409 | if (usemap_nid != nid) { |
Joe Perches | 1170532 | 2016-03-17 14:19:50 -0700 | [diff] [blame] | 410 | pr_info("node %d must be removed before remove section %ld\n", |
| 411 | nid, usemap_snr); |
Yasunori Goto | 48c9068 | 2008-07-23 21:28:15 -0700 | [diff] [blame] | 412 | return; |
| 413 | } |
| 414 | /* |
| 415 | * There is a circular dependency. |
| 416 | * Some platforms allow un-removable section because they will just |
| 417 | * gather other removable sections for dynamic partitioning. |
| 418 | * Just notify un-removable section's number here. |
| 419 | */ |
Joe Perches | 1170532 | 2016-03-17 14:19:50 -0700 | [diff] [blame] | 420 | pr_info("Section %ld and %ld (node %d) have a circular dependency on usemap and pgdat allocations\n", |
| 421 | usemap_snr, pgdat_snr, nid); |
Yasunori Goto | 48c9068 | 2008-07-23 21:28:15 -0700 | [diff] [blame] | 422 | } |
| 423 | #else |
Dan Williams | f1eca35 | 2019-07-18 15:57:57 -0700 | [diff] [blame] | 424 | static struct mem_section_usage * __init |
Yinghai Lu | a4322e1b | 2010-02-10 01:20:21 -0800 | [diff] [blame] | 425 | sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, |
Johannes Weiner | 238305b | 2012-05-29 15:06:36 -0700 | [diff] [blame] | 426 | unsigned long size) |
Yasunori Goto | 48c9068 | 2008-07-23 21:28:15 -0700 | [diff] [blame] | 427 | { |
Mike Rapoport | 26fb3da | 2019-03-11 23:30:42 -0700 | [diff] [blame] | 428 | return memblock_alloc_node(size, SMP_CACHE_BYTES, pgdat->node_id); |
Yasunori Goto | 48c9068 | 2008-07-23 21:28:15 -0700 | [diff] [blame] | 429 | } |
| 430 | |
Dan Williams | f1eca35 | 2019-07-18 15:57:57 -0700 | [diff] [blame] | 431 | static void __init check_usemap_section_nr(int nid, |
| 432 | struct mem_section_usage *usage) |
Yasunori Goto | 48c9068 | 2008-07-23 21:28:15 -0700 | [diff] [blame] | 433 | { |
| 434 | } |
| 435 | #endif /* CONFIG_MEMORY_HOTREMOVE */ |
| 436 | |
Pavel Tatashin | 35fd1eb | 2018-08-17 15:49:21 -0700 | [diff] [blame] | 437 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
Pavel Tatashin | afda57b | 2018-08-17 15:49:30 -0700 | [diff] [blame] | 438 | static unsigned long __init section_map_size(void) |
Pavel Tatashin | 35fd1eb | 2018-08-17 15:49:21 -0700 | [diff] [blame] | 439 | { |
| 440 | return ALIGN(sizeof(struct page) * PAGES_PER_SECTION, PMD_SIZE); |
| 441 | } |
| 442 | |
| 443 | #else |
Pavel Tatashin | afda57b | 2018-08-17 15:49:30 -0700 | [diff] [blame] | 444 | static unsigned long __init section_map_size(void) |
Pavel Tatashin | e131c06 | 2018-08-17 15:49:26 -0700 | [diff] [blame] | 445 | { |
| 446 | return PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION); |
| 447 | } |
| 448 | |
Dan Williams | e9c0a3f0 | 2019-07-18 15:58:11 -0700 | [diff] [blame] | 449 | struct page __init *__populate_section_memmap(unsigned long pfn, |
| 450 | unsigned long nr_pages, int nid, struct vmem_altmap *altmap) |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 451 | { |
Pavel Tatashin | e131c06 | 2018-08-17 15:49:26 -0700 | [diff] [blame] | 452 | unsigned long size = section_map_size(); |
| 453 | struct page *map = sparse_buffer_alloc(size); |
Mike Rapoport | 8a7f97b | 2019-03-11 23:30:31 -0700 | [diff] [blame] | 454 | phys_addr_t addr = __pa(MAX_DMA_ADDRESS); |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 455 | |
Pavel Tatashin | e131c06 | 2018-08-17 15:49:26 -0700 | [diff] [blame] | 456 | if (map) |
| 457 | return map; |
| 458 | |
Mike Rapoport | eb31d55 | 2018-10-30 15:08:04 -0700 | [diff] [blame] | 459 | map = memblock_alloc_try_nid(size, |
Mike Rapoport | 8a7f97b | 2019-03-11 23:30:31 -0700 | [diff] [blame] | 460 | PAGE_SIZE, addr, |
Mike Rapoport | 97ad108 | 2018-10-30 15:09:44 -0700 | [diff] [blame] | 461 | MEMBLOCK_ALLOC_ACCESSIBLE, nid); |
Mike Rapoport | 8a7f97b | 2019-03-11 23:30:31 -0700 | [diff] [blame] | 462 | if (!map) |
| 463 | panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%pa\n", |
| 464 | __func__, size, PAGE_SIZE, nid, &addr); |
| 465 | |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 466 | return map; |
| 467 | } |
| 468 | #endif /* !CONFIG_SPARSEMEM_VMEMMAP */ |
| 469 | |
Pavel Tatashin | 35fd1eb | 2018-08-17 15:49:21 -0700 | [diff] [blame] | 470 | static void *sparsemap_buf __meminitdata; |
| 471 | static void *sparsemap_buf_end __meminitdata; |
| 472 | |
Pavel Tatashin | afda57b | 2018-08-17 15:49:30 -0700 | [diff] [blame] | 473 | static void __init sparse_buffer_init(unsigned long size, int nid) |
Pavel Tatashin | 35fd1eb | 2018-08-17 15:49:21 -0700 | [diff] [blame] | 474 | { |
Mike Rapoport | 8a7f97b | 2019-03-11 23:30:31 -0700 | [diff] [blame] | 475 | phys_addr_t addr = __pa(MAX_DMA_ADDRESS); |
Pavel Tatashin | 35fd1eb | 2018-08-17 15:49:21 -0700 | [diff] [blame] | 476 | WARN_ON(sparsemap_buf); /* forgot to call sparse_buffer_fini()? */ |
| 477 | sparsemap_buf = |
Mike Rapoport | eb31d55 | 2018-10-30 15:08:04 -0700 | [diff] [blame] | 478 | memblock_alloc_try_nid_raw(size, PAGE_SIZE, |
Mike Rapoport | 8a7f97b | 2019-03-11 23:30:31 -0700 | [diff] [blame] | 479 | addr, |
Mike Rapoport | 97ad108 | 2018-10-30 15:09:44 -0700 | [diff] [blame] | 480 | MEMBLOCK_ALLOC_ACCESSIBLE, nid); |
Pavel Tatashin | 35fd1eb | 2018-08-17 15:49:21 -0700 | [diff] [blame] | 481 | sparsemap_buf_end = sparsemap_buf + size; |
| 482 | } |
| 483 | |
Pavel Tatashin | afda57b | 2018-08-17 15:49:30 -0700 | [diff] [blame] | 484 | static void __init sparse_buffer_fini(void) |
Pavel Tatashin | 35fd1eb | 2018-08-17 15:49:21 -0700 | [diff] [blame] | 485 | { |
| 486 | unsigned long size = sparsemap_buf_end - sparsemap_buf; |
| 487 | |
| 488 | if (sparsemap_buf && size > 0) |
| 489 | memblock_free_early(__pa(sparsemap_buf), size); |
| 490 | sparsemap_buf = NULL; |
| 491 | } |
| 492 | |
| 493 | void * __meminit sparse_buffer_alloc(unsigned long size) |
| 494 | { |
| 495 | void *ptr = NULL; |
| 496 | |
| 497 | if (sparsemap_buf) { |
| 498 | ptr = PTR_ALIGN(sparsemap_buf, size); |
| 499 | if (ptr + size > sparsemap_buf_end) |
| 500 | ptr = NULL; |
| 501 | else |
| 502 | sparsemap_buf = ptr + size; |
| 503 | } |
| 504 | return ptr; |
| 505 | } |
| 506 | |
Gideon Israel Dsouza | 3b32123 | 2014-04-07 15:37:26 -0700 | [diff] [blame] | 507 | void __weak __meminit vmemmap_populate_print_last(void) |
Yinghai Lu | c2b91e2 | 2008-04-12 01:19:24 -0700 | [diff] [blame] | 508 | { |
| 509 | } |
Yinghai Lu | a4322e1b | 2010-02-10 01:20:21 -0800 | [diff] [blame] | 510 | |
Pavel Tatashin | 85c77f7 | 2018-08-17 15:49:33 -0700 | [diff] [blame] | 511 | /* |
| 512 | * Initialize sparse on a specific node. The node spans [pnum_begin, pnum_end) |
| 513 | * And number of present sections in this node is map_count. |
| 514 | */ |
| 515 | static void __init sparse_init_nid(int nid, unsigned long pnum_begin, |
| 516 | unsigned long pnum_end, |
| 517 | unsigned long map_count) |
| 518 | { |
Dan Williams | f1eca35 | 2019-07-18 15:57:57 -0700 | [diff] [blame] | 519 | struct mem_section_usage *usage; |
| 520 | unsigned long pnum; |
Pavel Tatashin | 85c77f7 | 2018-08-17 15:49:33 -0700 | [diff] [blame] | 521 | struct page *map; |
| 522 | |
Dan Williams | f1eca35 | 2019-07-18 15:57:57 -0700 | [diff] [blame] | 523 | usage = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nid), |
| 524 | mem_section_usage_size() * map_count); |
| 525 | if (!usage) { |
Pavel Tatashin | 85c77f7 | 2018-08-17 15:49:33 -0700 | [diff] [blame] | 526 | pr_err("%s: node[%d] usemap allocation failed", __func__, nid); |
| 527 | goto failed; |
| 528 | } |
| 529 | sparse_buffer_init(map_count * section_map_size(), nid); |
| 530 | for_each_present_section_nr(pnum_begin, pnum) { |
Dan Williams | e9c0a3f0 | 2019-07-18 15:58:11 -0700 | [diff] [blame] | 531 | unsigned long pfn = section_nr_to_pfn(pnum); |
| 532 | |
Pavel Tatashin | 85c77f7 | 2018-08-17 15:49:33 -0700 | [diff] [blame] | 533 | if (pnum >= pnum_end) |
| 534 | break; |
| 535 | |
Dan Williams | e9c0a3f0 | 2019-07-18 15:58:11 -0700 | [diff] [blame] | 536 | map = __populate_section_memmap(pfn, PAGES_PER_SECTION, |
| 537 | nid, NULL); |
Pavel Tatashin | 85c77f7 | 2018-08-17 15:49:33 -0700 | [diff] [blame] | 538 | if (!map) { |
| 539 | pr_err("%s: node[%d] memory map backing failed. Some memory will not be available.", |
| 540 | __func__, nid); |
| 541 | pnum_begin = pnum; |
| 542 | goto failed; |
| 543 | } |
Dan Williams | f1eca35 | 2019-07-18 15:57:57 -0700 | [diff] [blame] | 544 | check_usemap_section_nr(nid, usage); |
Dan Williams | 326e1b8 | 2019-07-18 15:58:00 -0700 | [diff] [blame] | 545 | sparse_init_one_section(__nr_to_section(pnum), pnum, map, usage, |
| 546 | SECTION_IS_EARLY); |
Dan Williams | f1eca35 | 2019-07-18 15:57:57 -0700 | [diff] [blame] | 547 | usage = (void *) usage + mem_section_usage_size(); |
Pavel Tatashin | 85c77f7 | 2018-08-17 15:49:33 -0700 | [diff] [blame] | 548 | } |
| 549 | sparse_buffer_fini(); |
| 550 | return; |
| 551 | failed: |
| 552 | /* We failed to allocate, mark all the following pnums as not present */ |
| 553 | for_each_present_section_nr(pnum_begin, pnum) { |
| 554 | struct mem_section *ms; |
| 555 | |
| 556 | if (pnum >= pnum_end) |
| 557 | break; |
| 558 | ms = __nr_to_section(pnum); |
| 559 | ms->section_mem_map = 0; |
| 560 | } |
| 561 | } |
| 562 | |
| 563 | /* |
| 564 | * Allocate the accumulated non-linear sections, allocate a mem_map |
| 565 | * for each and record the physical to section mapping. |
| 566 | */ |
Pavel Tatashin | 2a3cb8b | 2018-08-17 15:49:37 -0700 | [diff] [blame] | 567 | void __init sparse_init(void) |
Pavel Tatashin | 85c77f7 | 2018-08-17 15:49:33 -0700 | [diff] [blame] | 568 | { |
| 569 | unsigned long pnum_begin = first_present_section_nr(); |
| 570 | int nid_begin = sparse_early_nid(__nr_to_section(pnum_begin)); |
| 571 | unsigned long pnum_end, map_count = 1; |
| 572 | |
| 573 | /* Setup pageblock_order for HUGETLB_PAGE_SIZE_VARIABLE */ |
| 574 | set_pageblock_order(); |
| 575 | |
| 576 | for_each_present_section_nr(pnum_begin + 1, pnum_end) { |
| 577 | int nid = sparse_early_nid(__nr_to_section(pnum_end)); |
| 578 | |
| 579 | if (nid == nid_begin) { |
| 580 | map_count++; |
| 581 | continue; |
| 582 | } |
| 583 | /* Init node with sections in range [pnum_begin, pnum_end) */ |
| 584 | sparse_init_nid(nid_begin, pnum_begin, pnum_end, map_count); |
| 585 | nid_begin = nid; |
| 586 | pnum_begin = pnum_end; |
| 587 | map_count = 1; |
| 588 | } |
| 589 | /* cover the last node */ |
| 590 | sparse_init_nid(nid_begin, pnum_begin, pnum_end, map_count); |
| 591 | vmemmap_populate_print_last(); |
| 592 | } |
| 593 | |
Stephen Rothwell | 193faea | 2007-06-08 13:46:51 -0700 | [diff] [blame] | 594 | #ifdef CONFIG_MEMORY_HOTPLUG |
Michal Hocko | 2d070ea | 2017-07-06 15:37:56 -0700 | [diff] [blame] | 595 | |
| 596 | /* Mark all memory sections within the pfn range as online */ |
| 597 | void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn) |
| 598 | { |
| 599 | unsigned long pfn; |
| 600 | |
| 601 | for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { |
Michal Hocko | b4ccec4 | 2017-09-08 16:13:15 -0700 | [diff] [blame] | 602 | unsigned long section_nr = pfn_to_section_nr(pfn); |
Michal Hocko | 2d070ea | 2017-07-06 15:37:56 -0700 | [diff] [blame] | 603 | struct mem_section *ms; |
| 604 | |
| 605 | /* onlining code should never touch invalid ranges */ |
| 606 | if (WARN_ON(!valid_section_nr(section_nr))) |
| 607 | continue; |
| 608 | |
| 609 | ms = __nr_to_section(section_nr); |
| 610 | ms->section_mem_map |= SECTION_IS_ONLINE; |
| 611 | } |
| 612 | } |
| 613 | |
| 614 | #ifdef CONFIG_MEMORY_HOTREMOVE |
Qian Cai | 9b7ea46 | 2019-03-28 20:43:34 -0700 | [diff] [blame] | 615 | /* Mark all memory sections within the pfn range as offline */ |
Michal Hocko | 2d070ea | 2017-07-06 15:37:56 -0700 | [diff] [blame] | 616 | void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn) |
| 617 | { |
| 618 | unsigned long pfn; |
| 619 | |
| 620 | for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { |
Pavel Tatashin | 27227c7 | 2018-05-11 16:01:50 -0700 | [diff] [blame] | 621 | unsigned long section_nr = pfn_to_section_nr(pfn); |
Michal Hocko | 2d070ea | 2017-07-06 15:37:56 -0700 | [diff] [blame] | 622 | struct mem_section *ms; |
| 623 | |
| 624 | /* |
| 625 | * TODO this needs some double checking. Offlining code makes |
| 626 | * sure to check pfn_valid but those checks might be just bogus |
| 627 | */ |
| 628 | if (WARN_ON(!valid_section_nr(section_nr))) |
| 629 | continue; |
| 630 | |
| 631 | ms = __nr_to_section(section_nr); |
| 632 | ms->section_mem_map &= ~SECTION_IS_ONLINE; |
| 633 | } |
| 634 | } |
| 635 | #endif |
| 636 | |
Yasunori Goto | 98f3cfc | 2007-10-16 01:26:14 -0700 | [diff] [blame] | 637 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
Dan Williams | e9c0a3f0 | 2019-07-18 15:58:11 -0700 | [diff] [blame] | 638 | static struct page *populate_section_memmap(unsigned long pfn, |
| 639 | unsigned long nr_pages, int nid, struct vmem_altmap *altmap) |
Yasunori Goto | 98f3cfc | 2007-10-16 01:26:14 -0700 | [diff] [blame] | 640 | { |
Dan Williams | e9c0a3f0 | 2019-07-18 15:58:11 -0700 | [diff] [blame] | 641 | return __populate_section_memmap(pfn, nr_pages, nid, altmap); |
Yasunori Goto | 98f3cfc | 2007-10-16 01:26:14 -0700 | [diff] [blame] | 642 | } |
Dan Williams | e9c0a3f0 | 2019-07-18 15:58:11 -0700 | [diff] [blame] | 643 | |
| 644 | static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages, |
Christoph Hellwig | 24b6d41 | 2017-12-29 08:53:56 +0100 | [diff] [blame] | 645 | struct vmem_altmap *altmap) |
Yasunori Goto | 98f3cfc | 2007-10-16 01:26:14 -0700 | [diff] [blame] | 646 | { |
Dan Williams | e9c0a3f0 | 2019-07-18 15:58:11 -0700 | [diff] [blame] | 647 | unsigned long start = (unsigned long) pfn_to_page(pfn); |
| 648 | unsigned long end = start + nr_pages * sizeof(struct page); |
Johannes Weiner | 0aad818 | 2013-04-29 15:07:50 -0700 | [diff] [blame] | 649 | |
Christoph Hellwig | 24b6d41 | 2017-12-29 08:53:56 +0100 | [diff] [blame] | 650 | vmemmap_free(start, end, altmap); |
Yasunori Goto | 98f3cfc | 2007-10-16 01:26:14 -0700 | [diff] [blame] | 651 | } |
Zhang Yanfei | 81556b0 | 2013-11-12 15:07:43 -0800 | [diff] [blame] | 652 | static void free_map_bootmem(struct page *memmap) |
Yasunori Goto | 0c0a4a5 | 2008-04-28 02:13:34 -0700 | [diff] [blame] | 653 | { |
Johannes Weiner | 0aad818 | 2013-04-29 15:07:50 -0700 | [diff] [blame] | 654 | unsigned long start = (unsigned long)memmap; |
Zhang Yanfei | 81556b0 | 2013-11-12 15:07:43 -0800 | [diff] [blame] | 655 | unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION); |
Johannes Weiner | 0aad818 | 2013-04-29 15:07:50 -0700 | [diff] [blame] | 656 | |
Christoph Hellwig | 24b6d41 | 2017-12-29 08:53:56 +0100 | [diff] [blame] | 657 | vmemmap_free(start, end, NULL); |
Yasunori Goto | 0c0a4a5 | 2008-04-28 02:13:34 -0700 | [diff] [blame] | 658 | } |
Yasunori Goto | 98f3cfc | 2007-10-16 01:26:14 -0700 | [diff] [blame] | 659 | #else |
Dan Williams | e9c0a3f0 | 2019-07-18 15:58:11 -0700 | [diff] [blame] | 660 | struct page *populate_section_memmap(unsigned long pfn, |
| 661 | unsigned long nr_pages, int nid, struct vmem_altmap *altmap) |
Dave Hansen | 0b0acbec | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 662 | { |
| 663 | struct page *page, *ret; |
Zhang Yanfei | 85b35fe | 2013-11-12 15:07:42 -0800 | [diff] [blame] | 664 | unsigned long memmap_size = sizeof(struct page) * PAGES_PER_SECTION; |
Dave Hansen | 0b0acbec | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 665 | |
Yasunori Goto | f2d0aa5 | 2006-10-28 10:38:32 -0700 | [diff] [blame] | 666 | page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size)); |
Dave Hansen | 0b0acbec | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 667 | if (page) |
| 668 | goto got_map_page; |
| 669 | |
| 670 | ret = vmalloc(memmap_size); |
| 671 | if (ret) |
| 672 | goto got_map_ptr; |
| 673 | |
| 674 | return NULL; |
| 675 | got_map_page: |
| 676 | ret = (struct page *)pfn_to_kaddr(page_to_pfn(page)); |
| 677 | got_map_ptr: |
Dave Hansen | 0b0acbec | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 678 | |
| 679 | return ret; |
| 680 | } |
| 681 | |
Dan Williams | e9c0a3f0 | 2019-07-18 15:58:11 -0700 | [diff] [blame] | 682 | static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages, |
Christoph Hellwig | 7b73d97 | 2017-12-29 08:53:54 +0100 | [diff] [blame] | 683 | struct vmem_altmap *altmap) |
Yasunori Goto | 98f3cfc | 2007-10-16 01:26:14 -0700 | [diff] [blame] | 684 | { |
Dan Williams | e9c0a3f0 | 2019-07-18 15:58:11 -0700 | [diff] [blame] | 685 | struct page *memmap = pfn_to_page(pfn); |
Yasunori Goto | 98f3cfc | 2007-10-16 01:26:14 -0700 | [diff] [blame] | 686 | |
Christoph Lameter | 9e2779f | 2008-02-04 22:28:34 -0800 | [diff] [blame] | 687 | if (is_vmalloc_addr(memmap)) |
Dave Hansen | 0b0acbec | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 688 | vfree(memmap); |
| 689 | else |
| 690 | free_pages((unsigned long)memmap, |
Zhang Yanfei | 85b35fe | 2013-11-12 15:07:42 -0800 | [diff] [blame] | 691 | get_order(sizeof(struct page) * PAGES_PER_SECTION)); |
Dave Hansen | 0b0acbec | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 692 | } |
Yasunori Goto | 0c0a4a5 | 2008-04-28 02:13:34 -0700 | [diff] [blame] | 693 | |
Zhang Yanfei | 81556b0 | 2013-11-12 15:07:43 -0800 | [diff] [blame] | 694 | static void free_map_bootmem(struct page *memmap) |
Yasunori Goto | 0c0a4a5 | 2008-04-28 02:13:34 -0700 | [diff] [blame] | 695 | { |
| 696 | unsigned long maps_section_nr, removing_section_nr, i; |
Zhang Yanfei | 81556b0 | 2013-11-12 15:07:43 -0800 | [diff] [blame] | 697 | unsigned long magic, nr_pages; |
Jianguo Wu | ae64ffc | 2012-11-29 13:54:21 -0800 | [diff] [blame] | 698 | struct page *page = virt_to_page(memmap); |
Yasunori Goto | 0c0a4a5 | 2008-04-28 02:13:34 -0700 | [diff] [blame] | 699 | |
Zhang Yanfei | 81556b0 | 2013-11-12 15:07:43 -0800 | [diff] [blame] | 700 | nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page)) |
| 701 | >> PAGE_SHIFT; |
| 702 | |
Yasunori Goto | 0c0a4a5 | 2008-04-28 02:13:34 -0700 | [diff] [blame] | 703 | for (i = 0; i < nr_pages; i++, page++) { |
Yasuaki Ishimatsu | ddffe98 | 2017-02-22 15:45:13 -0800 | [diff] [blame] | 704 | magic = (unsigned long) page->freelist; |
Yasunori Goto | 0c0a4a5 | 2008-04-28 02:13:34 -0700 | [diff] [blame] | 705 | |
| 706 | BUG_ON(magic == NODE_INFO); |
| 707 | |
| 708 | maps_section_nr = pfn_to_section_nr(page_to_pfn(page)); |
Yasuaki Ishimatsu | 857e522 | 2017-02-22 15:45:10 -0800 | [diff] [blame] | 709 | removing_section_nr = page_private(page); |
Yasunori Goto | 0c0a4a5 | 2008-04-28 02:13:34 -0700 | [diff] [blame] | 710 | |
| 711 | /* |
| 712 | * When this function is called, the removing section is |
| 713 | * logical offlined state. This means all pages are isolated |
| 714 | * from page allocator. If removing section's memmap is placed |
| 715 | * on the same section, it must not be freed. |
| 716 | * If it is freed, page allocator may allocate it which will |
| 717 | * be removed physically soon. |
| 718 | */ |
| 719 | if (maps_section_nr != removing_section_nr) |
| 720 | put_page_bootmem(page); |
| 721 | } |
| 722 | } |
Yasunori Goto | 98f3cfc | 2007-10-16 01:26:14 -0700 | [diff] [blame] | 723 | #endif /* CONFIG_SPARSEMEM_VMEMMAP */ |
Dave Hansen | 0b0acbec | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 724 | |
Dan Williams | ba72b4c | 2019-07-18 15:58:26 -0700 | [diff] [blame] | 725 | static void section_deactivate(unsigned long pfn, unsigned long nr_pages, |
| 726 | struct vmem_altmap *altmap) |
| 727 | { |
| 728 | DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 }; |
| 729 | DECLARE_BITMAP(tmp, SUBSECTIONS_PER_SECTION) = { 0 }; |
| 730 | struct mem_section *ms = __pfn_to_section(pfn); |
| 731 | bool section_is_early = early_section(ms); |
| 732 | struct page *memmap = NULL; |
| 733 | unsigned long *subsection_map = ms->usage |
| 734 | ? &ms->usage->subsection_map[0] : NULL; |
| 735 | |
| 736 | subsection_mask_set(map, pfn, nr_pages); |
| 737 | if (subsection_map) |
| 738 | bitmap_and(tmp, map, subsection_map, SUBSECTIONS_PER_SECTION); |
| 739 | |
| 740 | if (WARN(!subsection_map || !bitmap_equal(tmp, map, SUBSECTIONS_PER_SECTION), |
| 741 | "section already deactivated (%#lx + %ld)\n", |
| 742 | pfn, nr_pages)) |
| 743 | return; |
| 744 | |
| 745 | /* |
| 746 | * There are 3 cases to handle across two configurations |
| 747 | * (SPARSEMEM_VMEMMAP={y,n}): |
| 748 | * |
| 749 | * 1/ deactivation of a partial hot-added section (only possible |
| 750 | * in the SPARSEMEM_VMEMMAP=y case). |
| 751 | * a/ section was present at memory init |
| 752 | * b/ section was hot-added post memory init |
| 753 | * 2/ deactivation of a complete hot-added section |
| 754 | * 3/ deactivation of a complete section from memory init |
| 755 | * |
| 756 | * For 1/, when subsection_map does not empty we will not be |
| 757 | * freeing the usage map, but still need to free the vmemmap |
| 758 | * range. |
| 759 | * |
| 760 | * For 2/ and 3/ the SPARSEMEM_VMEMMAP={y,n} cases are unified |
| 761 | */ |
| 762 | bitmap_xor(subsection_map, map, subsection_map, SUBSECTIONS_PER_SECTION); |
| 763 | if (bitmap_empty(subsection_map, SUBSECTIONS_PER_SECTION)) { |
| 764 | unsigned long section_nr = pfn_to_section_nr(pfn); |
| 765 | |
| 766 | if (!section_is_early) { |
| 767 | kfree(ms->usage); |
| 768 | ms->usage = NULL; |
| 769 | } |
| 770 | memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr); |
| 771 | ms->section_mem_map = sparse_encode_mem_map(NULL, section_nr); |
| 772 | } |
| 773 | |
| 774 | if (section_is_early && memmap) |
| 775 | free_map_bootmem(memmap); |
| 776 | else |
| 777 | depopulate_section_memmap(pfn, nr_pages, altmap); |
| 778 | } |
| 779 | |
| 780 | static struct page * __meminit section_activate(int nid, unsigned long pfn, |
| 781 | unsigned long nr_pages, struct vmem_altmap *altmap) |
| 782 | { |
| 783 | DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 }; |
| 784 | struct mem_section *ms = __pfn_to_section(pfn); |
| 785 | struct mem_section_usage *usage = NULL; |
| 786 | unsigned long *subsection_map; |
| 787 | struct page *memmap; |
| 788 | int rc = 0; |
| 789 | |
| 790 | subsection_mask_set(map, pfn, nr_pages); |
| 791 | |
| 792 | if (!ms->usage) { |
| 793 | usage = kzalloc(mem_section_usage_size(), GFP_KERNEL); |
| 794 | if (!usage) |
| 795 | return ERR_PTR(-ENOMEM); |
| 796 | ms->usage = usage; |
| 797 | } |
| 798 | subsection_map = &ms->usage->subsection_map[0]; |
| 799 | |
| 800 | if (bitmap_empty(map, SUBSECTIONS_PER_SECTION)) |
| 801 | rc = -EINVAL; |
| 802 | else if (bitmap_intersects(map, subsection_map, SUBSECTIONS_PER_SECTION)) |
| 803 | rc = -EEXIST; |
| 804 | else |
| 805 | bitmap_or(subsection_map, map, subsection_map, |
| 806 | SUBSECTIONS_PER_SECTION); |
| 807 | |
| 808 | if (rc) { |
| 809 | if (usage) |
| 810 | ms->usage = NULL; |
| 811 | kfree(usage); |
| 812 | return ERR_PTR(rc); |
| 813 | } |
| 814 | |
| 815 | /* |
| 816 | * The early init code does not consider partially populated |
| 817 | * initial sections, it simply assumes that memory will never be |
| 818 | * referenced. If we hot-add memory into such a section then we |
| 819 | * do not need to populate the memmap and can simply reuse what |
| 820 | * is already there. |
| 821 | */ |
| 822 | if (nr_pages < PAGES_PER_SECTION && early_section(ms)) |
| 823 | return pfn_to_page(pfn); |
| 824 | |
| 825 | memmap = populate_section_memmap(pfn, nr_pages, nid, altmap); |
| 826 | if (!memmap) { |
| 827 | section_deactivate(pfn, nr_pages, altmap); |
| 828 | return ERR_PTR(-ENOMEM); |
| 829 | } |
| 830 | |
| 831 | return memmap; |
| 832 | } |
| 833 | |
Baoquan He | 7567cfc | 2019-05-13 17:19:32 -0700 | [diff] [blame] | 834 | /** |
Dan Williams | ba72b4c | 2019-07-18 15:58:26 -0700 | [diff] [blame] | 835 | * sparse_add_section - add a memory section, or populate an existing one |
Baoquan He | 7567cfc | 2019-05-13 17:19:32 -0700 | [diff] [blame] | 836 | * @nid: The node to add section on |
| 837 | * @start_pfn: start pfn of the memory range |
Dan Williams | ba72b4c | 2019-07-18 15:58:26 -0700 | [diff] [blame] | 838 | * @nr_pages: number of pfns to add in the section |
Baoquan He | 7567cfc | 2019-05-13 17:19:32 -0700 | [diff] [blame] | 839 | * @altmap: device page map |
| 840 | * |
| 841 | * This is only intended for hotplug. |
| 842 | * |
| 843 | * Return: |
| 844 | * * 0 - On success. |
| 845 | * * -EEXIST - Section has been present. |
| 846 | * * -ENOMEM - Out of memory. |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 847 | */ |
Dan Williams | 7ea6216 | 2019-07-18 15:58:22 -0700 | [diff] [blame] | 848 | int __meminit sparse_add_section(int nid, unsigned long start_pfn, |
| 849 | unsigned long nr_pages, struct vmem_altmap *altmap) |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 850 | { |
Dave Hansen | 0b0acbec | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 851 | unsigned long section_nr = pfn_to_section_nr(start_pfn); |
Dave Hansen | 0b0acbec | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 852 | struct mem_section *ms; |
| 853 | struct page *memmap; |
Dave Hansen | 0b0acbec | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 854 | int ret; |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 855 | |
Wei Yang | 4e0d2e7 | 2018-12-28 00:37:06 -0800 | [diff] [blame] | 856 | ret = sparse_index_init(section_nr, nid); |
Dan Williams | ba72b4c | 2019-07-18 15:58:26 -0700 | [diff] [blame] | 857 | if (ret < 0) |
WANG Cong | bbd0682 | 2007-12-17 16:19:59 -0800 | [diff] [blame] | 858 | return ret; |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 859 | |
Dan Williams | ba72b4c | 2019-07-18 15:58:26 -0700 | [diff] [blame] | 860 | memmap = section_activate(nid, start_pfn, nr_pages, altmap); |
| 861 | if (IS_ERR(memmap)) |
| 862 | return PTR_ERR(memmap); |
Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 863 | |
Pavel Tatashin | d0dc12e | 2018-04-05 16:23:00 -0700 | [diff] [blame] | 864 | /* |
| 865 | * Poison uninitialized struct pages in order to catch invalid flags |
| 866 | * combinations. |
| 867 | */ |
Dan Williams | ba72b4c | 2019-07-18 15:58:26 -0700 | [diff] [blame] | 868 | page_init_poison(pfn_to_page(start_pfn), sizeof(struct page) * nr_pages); |
Wen Congyang | 3ac19f8 | 2012-12-11 16:00:59 -0800 | [diff] [blame] | 869 | |
Dan Williams | ba72b4c | 2019-07-18 15:58:26 -0700 | [diff] [blame] | 870 | ms = __pfn_to_section(start_pfn); |
Wei Yang | 26f26be | 2019-07-18 15:57:21 -0700 | [diff] [blame] | 871 | set_section_nid(section_nr, nid); |
Dave Hansen | c4e1be9 | 2017-07-06 15:36:44 -0700 | [diff] [blame] | 872 | section_mark_present(ms); |
Dave Hansen | 0b0acbec | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 873 | |
Dan Williams | ba72b4c | 2019-07-18 15:58:26 -0700 | [diff] [blame] | 874 | /* Align memmap to section boundary in the subsection case */ |
| 875 | if (section_nr_to_pfn(section_nr) != start_pfn) |
| 876 | memmap = pfn_to_kaddr(section_nr_to_pfn(section_nr)); |
| 877 | sparse_init_one_section(ms, section_nr, memmap, ms->usage, 0); |
| 878 | |
| 879 | return 0; |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 880 | } |
Badari Pulavarty | ea01ea9 | 2008-04-28 02:12:01 -0700 | [diff] [blame] | 881 | |
Wen Congyang | 95a4774 | 2012-12-11 16:00:47 -0800 | [diff] [blame] | 882 | #ifdef CONFIG_MEMORY_FAILURE |
| 883 | static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages) |
| 884 | { |
| 885 | int i; |
| 886 | |
| 887 | if (!memmap) |
| 888 | return; |
| 889 | |
Balbir Singh | 5eb570a | 2018-12-28 00:33:24 -0800 | [diff] [blame] | 890 | /* |
| 891 | * A further optimization is to have per section refcounted |
| 892 | * num_poisoned_pages. But that would need more space per memmap, so |
| 893 | * for now just do a quick global check to speed up this routine in the |
| 894 | * absence of bad pages. |
| 895 | */ |
| 896 | if (atomic_long_read(&num_poisoned_pages) == 0) |
| 897 | return; |
| 898 | |
Dan Williams | 4b94ffd | 2016-01-15 16:56:22 -0800 | [diff] [blame] | 899 | for (i = 0; i < nr_pages; i++) { |
Wen Congyang | 95a4774 | 2012-12-11 16:00:47 -0800 | [diff] [blame] | 900 | if (PageHWPoison(&memmap[i])) { |
Xishi Qiu | 293c07e | 2013-02-22 16:34:02 -0800 | [diff] [blame] | 901 | atomic_long_sub(1, &num_poisoned_pages); |
Wen Congyang | 95a4774 | 2012-12-11 16:00:47 -0800 | [diff] [blame] | 902 | ClearPageHWPoison(&memmap[i]); |
| 903 | } |
| 904 | } |
| 905 | } |
| 906 | #else |
| 907 | static inline void clear_hwpoisoned_pages(struct page *memmap, int nr_pages) |
| 908 | { |
| 909 | } |
| 910 | #endif |
| 911 | |
Dan Williams | ba72b4c | 2019-07-18 15:58:26 -0700 | [diff] [blame] | 912 | void sparse_remove_section(struct mem_section *ms, unsigned long pfn, |
Dan Williams | 7ea6216 | 2019-07-18 15:58:22 -0700 | [diff] [blame] | 913 | unsigned long nr_pages, unsigned long map_offset, |
| 914 | struct vmem_altmap *altmap) |
Badari Pulavarty | ea01ea9 | 2008-04-28 02:12:01 -0700 | [diff] [blame] | 915 | { |
Dan Williams | ba72b4c | 2019-07-18 15:58:26 -0700 | [diff] [blame] | 916 | clear_hwpoisoned_pages(pfn_to_page(pfn) + map_offset, |
| 917 | nr_pages - map_offset); |
| 918 | section_deactivate(pfn, nr_pages, altmap); |
Badari Pulavarty | ea01ea9 | 2008-04-28 02:12:01 -0700 | [diff] [blame] | 919 | } |
David Rientjes | 4edd7ce | 2013-04-29 15:08:22 -0700 | [diff] [blame] | 920 | #endif /* CONFIG_MEMORY_HOTPLUG */ |