Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 2 | /* |
| 3 | * sparse memory mappings. |
| 4 | */ |
Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 5 | #include <linux/mm.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 6 | #include <linux/slab.h> |
Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 7 | #include <linux/mmzone.h> |
| 8 | #include <linux/bootmem.h> |
Gideon Israel Dsouza | 3b32123 | 2014-04-07 15:37:26 -0700 | [diff] [blame] | 9 | #include <linux/compiler.h> |
Dave Hansen | 0b0acbec | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 10 | #include <linux/highmem.h> |
Paul Gortmaker | b95f1b31 | 2011-10-16 02:01:52 -0400 | [diff] [blame] | 11 | #include <linux/export.h> |
Dave Hansen | 28ae55c | 2005-09-03 15:54:29 -0700 | [diff] [blame] | 12 | #include <linux/spinlock.h> |
Dave Hansen | 0b0acbec | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 13 | #include <linux/vmalloc.h> |
Gideon Israel Dsouza | 3b32123 | 2014-04-07 15:37:26 -0700 | [diff] [blame] | 14 | |
Yasunori Goto | 0c0a4a5 | 2008-04-28 02:13:34 -0700 | [diff] [blame] | 15 | #include "internal.h" |
Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 16 | #include <asm/dma.h> |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 17 | #include <asm/pgalloc.h> |
| 18 | #include <asm/pgtable.h> |
Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 19 | |
| 20 | /* |
| 21 | * Permanent SPARSEMEM data: |
| 22 | * |
| 23 | * 1) mem_section - memory sections, mem_map's for valid memory |
| 24 | */ |
Bob Picco | 3e34726 | 2005-09-03 15:54:28 -0700 | [diff] [blame] | 25 | #ifdef CONFIG_SPARSEMEM_EXTREME |
Kirill A. Shutemov | 83e3c48 | 2017-09-29 17:08:16 +0300 | [diff] [blame] | 26 | struct mem_section **mem_section; |
Bob Picco | 3e34726 | 2005-09-03 15:54:28 -0700 | [diff] [blame] | 27 | #else |
| 28 | struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT] |
Ravikiran G Thirumalai | 22fc6ec | 2006-01-08 01:01:27 -0800 | [diff] [blame] | 29 | ____cacheline_internodealigned_in_smp; |
Bob Picco | 3e34726 | 2005-09-03 15:54:28 -0700 | [diff] [blame] | 30 | #endif |
| 31 | EXPORT_SYMBOL(mem_section); |
| 32 | |
Christoph Lameter | 89689ae | 2006-12-06 20:31:45 -0800 | [diff] [blame] | 33 | #ifdef NODE_NOT_IN_PAGE_FLAGS |
| 34 | /* |
| 35 | * If we did not store the node number in the page then we have to |
| 36 | * do a lookup in the section_to_node_table in order to find which |
| 37 | * node the page belongs to. |
| 38 | */ |
| 39 | #if MAX_NUMNODES <= 256 |
| 40 | static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned; |
| 41 | #else |
| 42 | static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned; |
| 43 | #endif |
| 44 | |
Ian Campbell | 33dd4e0 | 2011-07-25 17:11:51 -0700 | [diff] [blame] | 45 | int page_to_nid(const struct page *page) |
Christoph Lameter | 89689ae | 2006-12-06 20:31:45 -0800 | [diff] [blame] | 46 | { |
| 47 | return section_to_node_table[page_to_section(page)]; |
| 48 | } |
| 49 | EXPORT_SYMBOL(page_to_nid); |
Andy Whitcroft | 85770ff | 2007-08-22 14:01:03 -0700 | [diff] [blame] | 50 | |
| 51 | static void set_section_nid(unsigned long section_nr, int nid) |
| 52 | { |
| 53 | section_to_node_table[section_nr] = nid; |
| 54 | } |
| 55 | #else /* !NODE_NOT_IN_PAGE_FLAGS */ |
| 56 | static inline void set_section_nid(unsigned long section_nr, int nid) |
| 57 | { |
| 58 | } |
Christoph Lameter | 89689ae | 2006-12-06 20:31:45 -0800 | [diff] [blame] | 59 | #endif |
| 60 | |
Bob Picco | 3e34726 | 2005-09-03 15:54:28 -0700 | [diff] [blame] | 61 | #ifdef CONFIG_SPARSEMEM_EXTREME |
Fabian Frederick | bd721ea | 2016-08-02 14:03:33 -0700 | [diff] [blame] | 62 | static noinline struct mem_section __ref *sparse_index_alloc(int nid) |
Bob Picco | 802f192 | 2005-09-03 15:54:26 -0700 | [diff] [blame] | 63 | { |
Dave Hansen | 28ae55c | 2005-09-03 15:54:29 -0700 | [diff] [blame] | 64 | struct mem_section *section = NULL; |
| 65 | unsigned long array_size = SECTIONS_PER_ROOT * |
| 66 | sizeof(struct mem_section); |
Bob Picco | 802f192 | 2005-09-03 15:54:26 -0700 | [diff] [blame] | 67 | |
Michal Hocko | b95046b | 2017-09-06 16:20:41 -0700 | [diff] [blame] | 68 | if (slab_is_available()) |
| 69 | section = kzalloc_node(array_size, GFP_KERNEL, nid); |
| 70 | else |
Santosh Shilimkar | bb016b8 | 2014-01-21 15:50:34 -0800 | [diff] [blame] | 71 | section = memblock_virt_alloc_node(array_size, nid); |
Bob Picco | 3e34726 | 2005-09-03 15:54:28 -0700 | [diff] [blame] | 72 | |
Dave Hansen | 28ae55c | 2005-09-03 15:54:29 -0700 | [diff] [blame] | 73 | return section; |
Bob Picco | 802f192 | 2005-09-03 15:54:26 -0700 | [diff] [blame] | 74 | } |
Dave Hansen | 28ae55c | 2005-09-03 15:54:29 -0700 | [diff] [blame] | 75 | |
Yasunori Goto | a3142c8 | 2007-05-08 00:23:07 -0700 | [diff] [blame] | 76 | static int __meminit sparse_index_init(unsigned long section_nr, int nid) |
Dave Hansen | 28ae55c | 2005-09-03 15:54:29 -0700 | [diff] [blame] | 77 | { |
Dave Hansen | 28ae55c | 2005-09-03 15:54:29 -0700 | [diff] [blame] | 78 | unsigned long root = SECTION_NR_TO_ROOT(section_nr); |
| 79 | struct mem_section *section; |
Dave Hansen | 28ae55c | 2005-09-03 15:54:29 -0700 | [diff] [blame] | 80 | |
| 81 | if (mem_section[root]) |
| 82 | return -EEXIST; |
| 83 | |
| 84 | section = sparse_index_alloc(nid); |
WANG Cong | af0cd5a | 2007-12-17 16:19:58 -0800 | [diff] [blame] | 85 | if (!section) |
| 86 | return -ENOMEM; |
Dave Hansen | 28ae55c | 2005-09-03 15:54:29 -0700 | [diff] [blame] | 87 | |
| 88 | mem_section[root] = section; |
Gavin Shan | c1c9518 | 2012-07-31 16:46:06 -0700 | [diff] [blame] | 89 | |
Zhang Yanfei | 9d1936c | 2013-05-17 22:10:38 +0800 | [diff] [blame] | 90 | return 0; |
Dave Hansen | 28ae55c | 2005-09-03 15:54:29 -0700 | [diff] [blame] | 91 | } |
| 92 | #else /* !SPARSEMEM_EXTREME */ |
| 93 | static inline int sparse_index_init(unsigned long section_nr, int nid) |
| 94 | { |
| 95 | return 0; |
| 96 | } |
| 97 | #endif |
| 98 | |
Zhou Chengming | 91fd8b9 | 2016-07-28 15:48:35 -0700 | [diff] [blame] | 99 | #ifdef CONFIG_SPARSEMEM_EXTREME |
Dave Hansen | 4ca644d | 2005-10-29 18:16:51 -0700 | [diff] [blame] | 100 | int __section_nr(struct mem_section* ms) |
| 101 | { |
| 102 | unsigned long root_nr; |
Kirill A. Shutemov | 83e3c48 | 2017-09-29 17:08:16 +0300 | [diff] [blame] | 103 | struct mem_section *root = NULL; |
Dave Hansen | 4ca644d | 2005-10-29 18:16:51 -0700 | [diff] [blame] | 104 | |
Mike Kravetz | 12783b0 | 2006-05-20 15:00:05 -0700 | [diff] [blame] | 105 | for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) { |
| 106 | root = __nr_to_section(root_nr * SECTIONS_PER_ROOT); |
Dave Hansen | 4ca644d | 2005-10-29 18:16:51 -0700 | [diff] [blame] | 107 | if (!root) |
| 108 | continue; |
| 109 | |
| 110 | if ((ms >= root) && (ms < (root + SECTIONS_PER_ROOT))) |
| 111 | break; |
| 112 | } |
| 113 | |
Kirill A. Shutemov | 83e3c48 | 2017-09-29 17:08:16 +0300 | [diff] [blame] | 114 | VM_BUG_ON(!root); |
Gavin Shan | db36a46 | 2012-07-31 16:46:04 -0700 | [diff] [blame] | 115 | |
Dave Hansen | 4ca644d | 2005-10-29 18:16:51 -0700 | [diff] [blame] | 116 | return (root_nr * SECTIONS_PER_ROOT) + (ms - root); |
| 117 | } |
Zhou Chengming | 91fd8b9 | 2016-07-28 15:48:35 -0700 | [diff] [blame] | 118 | #else |
| 119 | int __section_nr(struct mem_section* ms) |
| 120 | { |
| 121 | return (int)(ms - mem_section[0]); |
| 122 | } |
| 123 | #endif |
Dave Hansen | 4ca644d | 2005-10-29 18:16:51 -0700 | [diff] [blame] | 124 | |
Andy Whitcroft | 30c253e | 2006-06-23 02:03:41 -0700 | [diff] [blame] | 125 | /* |
| 126 | * During early boot, before section_mem_map is used for an actual |
| 127 | * mem_map, we use section_mem_map to store the section's NUMA |
| 128 | * node. This keeps us from having to use another data structure. The |
| 129 | * node information is cleared just before we store the real mem_map. |
| 130 | */ |
| 131 | static inline unsigned long sparse_encode_early_nid(int nid) |
| 132 | { |
| 133 | return (nid << SECTION_NID_SHIFT); |
| 134 | } |
| 135 | |
| 136 | static inline int sparse_early_nid(struct mem_section *section) |
| 137 | { |
| 138 | return (section->section_mem_map >> SECTION_NID_SHIFT); |
| 139 | } |
| 140 | |
Mel Gorman | 2dbb51c | 2008-07-23 21:26:52 -0700 | [diff] [blame] | 141 | /* Validate the physical addressing limitations of the model */ |
| 142 | void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn, |
| 143 | unsigned long *end_pfn) |
Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 144 | { |
Mel Gorman | 2dbb51c | 2008-07-23 21:26:52 -0700 | [diff] [blame] | 145 | unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT); |
Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 146 | |
Ingo Molnar | bead9a3 | 2008-04-16 01:40:00 +0200 | [diff] [blame] | 147 | /* |
| 148 | * Sanity checks - do not allow an architecture to pass |
| 149 | * in larger pfns than the maximum scope of sparsemem: |
| 150 | */ |
Mel Gorman | 2dbb51c | 2008-07-23 21:26:52 -0700 | [diff] [blame] | 151 | if (*start_pfn > max_sparsemem_pfn) { |
| 152 | mminit_dprintk(MMINIT_WARNING, "pfnvalidation", |
| 153 | "Start of range %lu -> %lu exceeds SPARSEMEM max %lu\n", |
| 154 | *start_pfn, *end_pfn, max_sparsemem_pfn); |
| 155 | WARN_ON_ONCE(1); |
| 156 | *start_pfn = max_sparsemem_pfn; |
| 157 | *end_pfn = max_sparsemem_pfn; |
Cyrill Gorcunov | ef161a9 | 2009-03-31 15:19:25 -0700 | [diff] [blame] | 158 | } else if (*end_pfn > max_sparsemem_pfn) { |
Mel Gorman | 2dbb51c | 2008-07-23 21:26:52 -0700 | [diff] [blame] | 159 | mminit_dprintk(MMINIT_WARNING, "pfnvalidation", |
| 160 | "End of range %lu -> %lu exceeds SPARSEMEM max %lu\n", |
| 161 | *start_pfn, *end_pfn, max_sparsemem_pfn); |
| 162 | WARN_ON_ONCE(1); |
| 163 | *end_pfn = max_sparsemem_pfn; |
| 164 | } |
| 165 | } |
| 166 | |
Dave Hansen | c4e1be9 | 2017-07-06 15:36:44 -0700 | [diff] [blame] | 167 | /* |
| 168 | * There are a number of times that we loop over NR_MEM_SECTIONS, |
| 169 | * looking for section_present() on each. But, when we have very |
| 170 | * large physical address spaces, NR_MEM_SECTIONS can also be |
| 171 | * very large which makes the loops quite long. |
| 172 | * |
| 173 | * Keeping track of this gives us an easy way to break out of |
| 174 | * those loops early. |
| 175 | */ |
| 176 | int __highest_present_section_nr; |
| 177 | static void section_mark_present(struct mem_section *ms) |
| 178 | { |
| 179 | int section_nr = __section_nr(ms); |
| 180 | |
| 181 | if (section_nr > __highest_present_section_nr) |
| 182 | __highest_present_section_nr = section_nr; |
| 183 | |
| 184 | ms->section_mem_map |= SECTION_MARKED_PRESENT; |
| 185 | } |
| 186 | |
| 187 | static inline int next_present_section_nr(int section_nr) |
| 188 | { |
| 189 | do { |
| 190 | section_nr++; |
| 191 | if (present_section_nr(section_nr)) |
| 192 | return section_nr; |
| 193 | } while ((section_nr < NR_MEM_SECTIONS) && |
| 194 | (section_nr <= __highest_present_section_nr)); |
| 195 | |
| 196 | return -1; |
| 197 | } |
| 198 | #define for_each_present_section_nr(start, section_nr) \ |
| 199 | for (section_nr = next_present_section_nr(start-1); \ |
| 200 | ((section_nr >= 0) && \ |
| 201 | (section_nr < NR_MEM_SECTIONS) && \ |
| 202 | (section_nr <= __highest_present_section_nr)); \ |
| 203 | section_nr = next_present_section_nr(section_nr)) |
| 204 | |
Mel Gorman | 2dbb51c | 2008-07-23 21:26:52 -0700 | [diff] [blame] | 205 | /* Record a memory area against a node. */ |
| 206 | void __init memory_present(int nid, unsigned long start, unsigned long end) |
| 207 | { |
| 208 | unsigned long pfn; |
Ingo Molnar | bead9a3 | 2008-04-16 01:40:00 +0200 | [diff] [blame] | 209 | |
Kirill A. Shutemov | 629a359 | 2017-11-07 11:33:37 +0300 | [diff] [blame] | 210 | #ifdef CONFIG_SPARSEMEM_EXTREME |
| 211 | if (unlikely(!mem_section)) { |
| 212 | unsigned long size, align; |
| 213 | |
Baoquan He | d09cfbb | 2018-01-04 16:18:06 -0800 | [diff] [blame] | 214 | size = sizeof(struct mem_section*) * NR_SECTION_ROOTS; |
Kirill A. Shutemov | 629a359 | 2017-11-07 11:33:37 +0300 | [diff] [blame] | 215 | align = 1 << (INTERNODE_CACHE_SHIFT); |
| 216 | mem_section = memblock_virt_alloc(size, align); |
| 217 | } |
| 218 | #endif |
| 219 | |
Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 220 | start &= PAGE_SECTION_MASK; |
Mel Gorman | 2dbb51c | 2008-07-23 21:26:52 -0700 | [diff] [blame] | 221 | mminit_validate_memmodel_limits(&start, &end); |
Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 222 | for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) { |
| 223 | unsigned long section = pfn_to_section_nr(pfn); |
Bob Picco | 802f192 | 2005-09-03 15:54:26 -0700 | [diff] [blame] | 224 | struct mem_section *ms; |
| 225 | |
| 226 | sparse_index_init(section, nid); |
Andy Whitcroft | 85770ff | 2007-08-22 14:01:03 -0700 | [diff] [blame] | 227 | set_section_nid(section, nid); |
Bob Picco | 802f192 | 2005-09-03 15:54:26 -0700 | [diff] [blame] | 228 | |
| 229 | ms = __nr_to_section(section); |
Dave Hansen | c4e1be9 | 2017-07-06 15:36:44 -0700 | [diff] [blame] | 230 | if (!ms->section_mem_map) { |
Michal Hocko | 2d070ea | 2017-07-06 15:37:56 -0700 | [diff] [blame] | 231 | ms->section_mem_map = sparse_encode_early_nid(nid) | |
| 232 | SECTION_IS_ONLINE; |
Dave Hansen | c4e1be9 | 2017-07-06 15:36:44 -0700 | [diff] [blame] | 233 | section_mark_present(ms); |
| 234 | } |
Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 235 | } |
| 236 | } |
| 237 | |
| 238 | /* |
| 239 | * Only used by the i386 NUMA architecures, but relatively |
| 240 | * generic code. |
| 241 | */ |
| 242 | unsigned long __init node_memmap_size_bytes(int nid, unsigned long start_pfn, |
| 243 | unsigned long end_pfn) |
| 244 | { |
| 245 | unsigned long pfn; |
| 246 | unsigned long nr_pages = 0; |
| 247 | |
Mel Gorman | 2dbb51c | 2008-07-23 21:26:52 -0700 | [diff] [blame] | 248 | mminit_validate_memmodel_limits(&start_pfn, &end_pfn); |
Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 249 | for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { |
| 250 | if (nid != early_pfn_to_nid(pfn)) |
| 251 | continue; |
| 252 | |
Andy Whitcroft | 540557b | 2007-10-16 01:24:11 -0700 | [diff] [blame] | 253 | if (pfn_present(pfn)) |
Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 254 | nr_pages += PAGES_PER_SECTION; |
| 255 | } |
| 256 | |
| 257 | return nr_pages * sizeof(struct page); |
| 258 | } |
| 259 | |
| 260 | /* |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 261 | * Subtle, we encode the real pfn into the mem_map such that |
| 262 | * the identity pfn - section_mem_map will return the actual |
| 263 | * physical page frame number. |
| 264 | */ |
| 265 | static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum) |
| 266 | { |
Petr Tesarik | def9b71 | 2018-01-31 16:20:26 -0800 | [diff] [blame] | 267 | unsigned long coded_mem_map = |
| 268 | (unsigned long)(mem_map - (section_nr_to_pfn(pnum))); |
| 269 | BUILD_BUG_ON(SECTION_MAP_LAST_BIT > (1UL<<PFN_SECTION_SHIFT)); |
| 270 | BUG_ON(coded_mem_map & ~SECTION_MAP_MASK); |
| 271 | return coded_mem_map; |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 272 | } |
| 273 | |
| 274 | /* |
Badari Pulavarty | ea01ea9 | 2008-04-28 02:12:01 -0700 | [diff] [blame] | 275 | * Decode mem_map from the coded memmap |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 276 | */ |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 277 | struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum) |
| 278 | { |
Badari Pulavarty | ea01ea9 | 2008-04-28 02:12:01 -0700 | [diff] [blame] | 279 | /* mask off the extra low bits of information */ |
| 280 | coded_mem_map &= SECTION_MAP_MASK; |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 281 | return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum); |
| 282 | } |
| 283 | |
Yasunori Goto | a3142c8 | 2007-05-08 00:23:07 -0700 | [diff] [blame] | 284 | static int __meminit sparse_init_one_section(struct mem_section *ms, |
Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 285 | unsigned long pnum, struct page *mem_map, |
| 286 | unsigned long *pageblock_bitmap) |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 287 | { |
Andy Whitcroft | 540557b | 2007-10-16 01:24:11 -0700 | [diff] [blame] | 288 | if (!present_section(ms)) |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 289 | return -EINVAL; |
| 290 | |
Andy Whitcroft | 30c253e | 2006-06-23 02:03:41 -0700 | [diff] [blame] | 291 | ms->section_mem_map &= ~SECTION_MAP_MASK; |
Andy Whitcroft | 540557b | 2007-10-16 01:24:11 -0700 | [diff] [blame] | 292 | ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum) | |
| 293 | SECTION_HAS_MEM_MAP; |
Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 294 | ms->pageblock_flags = pageblock_bitmap; |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 295 | |
| 296 | return 1; |
| 297 | } |
| 298 | |
Yasunori Goto | 0475327 | 2008-04-28 02:13:31 -0700 | [diff] [blame] | 299 | unsigned long usemap_size(void) |
Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 300 | { |
Wei Yang | 60a7a88 | 2017-05-03 14:53:51 -0700 | [diff] [blame] | 301 | return BITS_TO_LONGS(SECTION_BLOCKFLAGS_BITS) * sizeof(unsigned long); |
Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 302 | } |
| 303 | |
| 304 | #ifdef CONFIG_MEMORY_HOTPLUG |
| 305 | static unsigned long *__kmalloc_section_usemap(void) |
| 306 | { |
| 307 | return kmalloc(usemap_size(), GFP_KERNEL); |
| 308 | } |
| 309 | #endif /* CONFIG_MEMORY_HOTPLUG */ |
| 310 | |
Yasunori Goto | 48c9068 | 2008-07-23 21:28:15 -0700 | [diff] [blame] | 311 | #ifdef CONFIG_MEMORY_HOTREMOVE |
| 312 | static unsigned long * __init |
Yinghai Lu | a4322e1b | 2010-02-10 01:20:21 -0800 | [diff] [blame] | 313 | sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, |
Johannes Weiner | 238305b | 2012-05-29 15:06:36 -0700 | [diff] [blame] | 314 | unsigned long size) |
Yasunori Goto | 48c9068 | 2008-07-23 21:28:15 -0700 | [diff] [blame] | 315 | { |
Yinghai Lu | 99ab7b1 | 2012-07-11 14:02:53 -0700 | [diff] [blame] | 316 | unsigned long goal, limit; |
| 317 | unsigned long *p; |
| 318 | int nid; |
Yasunori Goto | 48c9068 | 2008-07-23 21:28:15 -0700 | [diff] [blame] | 319 | /* |
| 320 | * A page may contain usemaps for other sections preventing the |
| 321 | * page being freed and making a section unremovable while |
Li Zhong | c800bcd | 2014-03-31 16:41:58 +0800 | [diff] [blame] | 322 | * other sections referencing the usemap remain active. Similarly, |
Yasunori Goto | 48c9068 | 2008-07-23 21:28:15 -0700 | [diff] [blame] | 323 | * a pgdat can prevent a section being removed. If section A |
| 324 | * contains a pgdat and section B contains the usemap, both |
| 325 | * sections become inter-dependent. This allocates usemaps |
| 326 | * from the same section as the pgdat where possible to avoid |
| 327 | * this problem. |
| 328 | */ |
Yinghai Lu | 07b4e2b | 2012-07-11 14:02:51 -0700 | [diff] [blame] | 329 | goal = __pa(pgdat) & (PAGE_SECTION_MASK << PAGE_SHIFT); |
Yinghai Lu | 99ab7b1 | 2012-07-11 14:02:53 -0700 | [diff] [blame] | 330 | limit = goal + (1UL << PA_SECTION_SHIFT); |
| 331 | nid = early_pfn_to_nid(goal >> PAGE_SHIFT); |
| 332 | again: |
Santosh Shilimkar | bb016b8 | 2014-01-21 15:50:34 -0800 | [diff] [blame] | 333 | p = memblock_virt_alloc_try_nid_nopanic(size, |
| 334 | SMP_CACHE_BYTES, goal, limit, |
| 335 | nid); |
Yinghai Lu | 99ab7b1 | 2012-07-11 14:02:53 -0700 | [diff] [blame] | 336 | if (!p && limit) { |
| 337 | limit = 0; |
| 338 | goto again; |
| 339 | } |
| 340 | return p; |
Yasunori Goto | 48c9068 | 2008-07-23 21:28:15 -0700 | [diff] [blame] | 341 | } |
| 342 | |
| 343 | static void __init check_usemap_section_nr(int nid, unsigned long *usemap) |
| 344 | { |
| 345 | unsigned long usemap_snr, pgdat_snr; |
Kirill A. Shutemov | 83e3c48 | 2017-09-29 17:08:16 +0300 | [diff] [blame] | 346 | static unsigned long old_usemap_snr; |
| 347 | static unsigned long old_pgdat_snr; |
Yasunori Goto | 48c9068 | 2008-07-23 21:28:15 -0700 | [diff] [blame] | 348 | struct pglist_data *pgdat = NODE_DATA(nid); |
| 349 | int usemap_nid; |
| 350 | |
Kirill A. Shutemov | 83e3c48 | 2017-09-29 17:08:16 +0300 | [diff] [blame] | 351 | /* First call */ |
| 352 | if (!old_usemap_snr) { |
| 353 | old_usemap_snr = NR_MEM_SECTIONS; |
| 354 | old_pgdat_snr = NR_MEM_SECTIONS; |
| 355 | } |
| 356 | |
Yasunori Goto | 48c9068 | 2008-07-23 21:28:15 -0700 | [diff] [blame] | 357 | usemap_snr = pfn_to_section_nr(__pa(usemap) >> PAGE_SHIFT); |
| 358 | pgdat_snr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT); |
| 359 | if (usemap_snr == pgdat_snr) |
| 360 | return; |
| 361 | |
| 362 | if (old_usemap_snr == usemap_snr && old_pgdat_snr == pgdat_snr) |
| 363 | /* skip redundant message */ |
| 364 | return; |
| 365 | |
| 366 | old_usemap_snr = usemap_snr; |
| 367 | old_pgdat_snr = pgdat_snr; |
| 368 | |
| 369 | usemap_nid = sparse_early_nid(__nr_to_section(usemap_snr)); |
| 370 | if (usemap_nid != nid) { |
Joe Perches | 1170532 | 2016-03-17 14:19:50 -0700 | [diff] [blame] | 371 | pr_info("node %d must be removed before remove section %ld\n", |
| 372 | nid, usemap_snr); |
Yasunori Goto | 48c9068 | 2008-07-23 21:28:15 -0700 | [diff] [blame] | 373 | return; |
| 374 | } |
| 375 | /* |
| 376 | * There is a circular dependency. |
| 377 | * Some platforms allow un-removable section because they will just |
| 378 | * gather other removable sections for dynamic partitioning. |
| 379 | * Just notify un-removable section's number here. |
| 380 | */ |
Joe Perches | 1170532 | 2016-03-17 14:19:50 -0700 | [diff] [blame] | 381 | pr_info("Section %ld and %ld (node %d) have a circular dependency on usemap and pgdat allocations\n", |
| 382 | usemap_snr, pgdat_snr, nid); |
Yasunori Goto | 48c9068 | 2008-07-23 21:28:15 -0700 | [diff] [blame] | 383 | } |
| 384 | #else |
| 385 | static unsigned long * __init |
Yinghai Lu | a4322e1b | 2010-02-10 01:20:21 -0800 | [diff] [blame] | 386 | sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, |
Johannes Weiner | 238305b | 2012-05-29 15:06:36 -0700 | [diff] [blame] | 387 | unsigned long size) |
Yasunori Goto | 48c9068 | 2008-07-23 21:28:15 -0700 | [diff] [blame] | 388 | { |
Santosh Shilimkar | bb016b8 | 2014-01-21 15:50:34 -0800 | [diff] [blame] | 389 | return memblock_virt_alloc_node_nopanic(size, pgdat->node_id); |
Yasunori Goto | 48c9068 | 2008-07-23 21:28:15 -0700 | [diff] [blame] | 390 | } |
| 391 | |
| 392 | static void __init check_usemap_section_nr(int nid, unsigned long *usemap) |
| 393 | { |
| 394 | } |
| 395 | #endif /* CONFIG_MEMORY_HOTREMOVE */ |
| 396 | |
Wanpeng Li | 1873209 | 2013-09-11 14:22:38 -0700 | [diff] [blame] | 397 | static void __init sparse_early_usemaps_alloc_node(void *data, |
Yinghai Lu | a4322e1b | 2010-02-10 01:20:21 -0800 | [diff] [blame] | 398 | unsigned long pnum_begin, |
| 399 | unsigned long pnum_end, |
| 400 | unsigned long usemap_count, int nodeid) |
Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 401 | { |
Yinghai Lu | a4322e1b | 2010-02-10 01:20:21 -0800 | [diff] [blame] | 402 | void *usemap; |
| 403 | unsigned long pnum; |
Wanpeng Li | 1873209 | 2013-09-11 14:22:38 -0700 | [diff] [blame] | 404 | unsigned long **usemap_map = (unsigned long **)data; |
Yinghai Lu | a4322e1b | 2010-02-10 01:20:21 -0800 | [diff] [blame] | 405 | int size = usemap_size(); |
Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 406 | |
Yinghai Lu | a4322e1b | 2010-02-10 01:20:21 -0800 | [diff] [blame] | 407 | usemap = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nodeid), |
Johannes Weiner | 238305b | 2012-05-29 15:06:36 -0700 | [diff] [blame] | 408 | size * usemap_count); |
Nishanth Aravamudan | f5bf18f | 2012-03-21 16:34:07 -0700 | [diff] [blame] | 409 | if (!usemap) { |
Joe Perches | 1170532 | 2016-03-17 14:19:50 -0700 | [diff] [blame] | 410 | pr_warn("%s: allocation failed\n", __func__); |
Johannes Weiner | 238305b | 2012-05-29 15:06:36 -0700 | [diff] [blame] | 411 | return; |
Yasunori Goto | 48c9068 | 2008-07-23 21:28:15 -0700 | [diff] [blame] | 412 | } |
| 413 | |
Nishanth Aravamudan | f5bf18f | 2012-03-21 16:34:07 -0700 | [diff] [blame] | 414 | for (pnum = pnum_begin; pnum < pnum_end; pnum++) { |
| 415 | if (!present_section_nr(pnum)) |
| 416 | continue; |
| 417 | usemap_map[pnum] = usemap; |
| 418 | usemap += size; |
| 419 | check_usemap_section_nr(nodeid, usemap_map[pnum]); |
Yinghai Lu | a4322e1b | 2010-02-10 01:20:21 -0800 | [diff] [blame] | 420 | } |
Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 421 | } |
| 422 | |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 423 | #ifndef CONFIG_SPARSEMEM_VMEMMAP |
Christoph Hellwig | 7b73d97 | 2017-12-29 08:53:54 +0100 | [diff] [blame] | 424 | struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid, |
| 425 | struct vmem_altmap *altmap) |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 426 | { |
| 427 | struct page *map; |
Yinghai Lu | e48e67e | 2010-05-24 14:31:57 -0700 | [diff] [blame] | 428 | unsigned long size; |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 429 | |
| 430 | map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION); |
| 431 | if (map) |
| 432 | return map; |
| 433 | |
Yinghai Lu | e48e67e | 2010-05-24 14:31:57 -0700 | [diff] [blame] | 434 | size = PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION); |
Santosh Shilimkar | bb016b8 | 2014-01-21 15:50:34 -0800 | [diff] [blame] | 435 | map = memblock_virt_alloc_try_nid(size, |
| 436 | PAGE_SIZE, __pa(MAX_DMA_ADDRESS), |
| 437 | BOOTMEM_ALLOC_ACCESSIBLE, nid); |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 438 | return map; |
| 439 | } |
Yinghai Lu | 9bdac91 | 2010-02-10 01:20:22 -0800 | [diff] [blame] | 440 | void __init sparse_mem_maps_populate_node(struct page **map_map, |
| 441 | unsigned long pnum_begin, |
| 442 | unsigned long pnum_end, |
| 443 | unsigned long map_count, int nodeid) |
| 444 | { |
| 445 | void *map; |
| 446 | unsigned long pnum; |
| 447 | unsigned long size = sizeof(struct page) * PAGES_PER_SECTION; |
| 448 | |
| 449 | map = alloc_remap(nodeid, size * map_count); |
| 450 | if (map) { |
| 451 | for (pnum = pnum_begin; pnum < pnum_end; pnum++) { |
| 452 | if (!present_section_nr(pnum)) |
| 453 | continue; |
| 454 | map_map[pnum] = map; |
| 455 | map += size; |
| 456 | } |
| 457 | return; |
| 458 | } |
| 459 | |
| 460 | size = PAGE_ALIGN(size); |
Pavel Tatashin | f7f9910 | 2017-11-15 17:36:44 -0800 | [diff] [blame] | 461 | map = memblock_virt_alloc_try_nid_raw(size * map_count, |
| 462 | PAGE_SIZE, __pa(MAX_DMA_ADDRESS), |
| 463 | BOOTMEM_ALLOC_ACCESSIBLE, nodeid); |
Yinghai Lu | 9bdac91 | 2010-02-10 01:20:22 -0800 | [diff] [blame] | 464 | if (map) { |
| 465 | for (pnum = pnum_begin; pnum < pnum_end; pnum++) { |
| 466 | if (!present_section_nr(pnum)) |
| 467 | continue; |
| 468 | map_map[pnum] = map; |
| 469 | map += size; |
| 470 | } |
| 471 | return; |
| 472 | } |
| 473 | |
| 474 | /* fallback */ |
| 475 | for (pnum = pnum_begin; pnum < pnum_end; pnum++) { |
| 476 | struct mem_section *ms; |
| 477 | |
| 478 | if (!present_section_nr(pnum)) |
| 479 | continue; |
Christoph Hellwig | 7b73d97 | 2017-12-29 08:53:54 +0100 | [diff] [blame] | 480 | map_map[pnum] = sparse_mem_map_populate(pnum, nodeid, NULL); |
Yinghai Lu | 9bdac91 | 2010-02-10 01:20:22 -0800 | [diff] [blame] | 481 | if (map_map[pnum]) |
| 482 | continue; |
| 483 | ms = __nr_to_section(pnum); |
Joe Perches | 1170532 | 2016-03-17 14:19:50 -0700 | [diff] [blame] | 484 | pr_err("%s: sparsemem memory map backing failed some memory will not be available\n", |
Joe Perches | 756a025 | 2016-03-17 14:19:47 -0700 | [diff] [blame] | 485 | __func__); |
Yinghai Lu | 9bdac91 | 2010-02-10 01:20:22 -0800 | [diff] [blame] | 486 | ms->section_mem_map = 0; |
| 487 | } |
| 488 | } |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 489 | #endif /* !CONFIG_SPARSEMEM_VMEMMAP */ |
| 490 | |
Yinghai Lu | 81d0d95 | 2010-02-27 09:29:38 -0800 | [diff] [blame] | 491 | #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER |
Wanpeng Li | 1873209 | 2013-09-11 14:22:38 -0700 | [diff] [blame] | 492 | static void __init sparse_early_mem_maps_alloc_node(void *data, |
Yinghai Lu | 9bdac91 | 2010-02-10 01:20:22 -0800 | [diff] [blame] | 493 | unsigned long pnum_begin, |
| 494 | unsigned long pnum_end, |
| 495 | unsigned long map_count, int nodeid) |
| 496 | { |
Wanpeng Li | 1873209 | 2013-09-11 14:22:38 -0700 | [diff] [blame] | 497 | struct page **map_map = (struct page **)data; |
Yinghai Lu | 9bdac91 | 2010-02-10 01:20:22 -0800 | [diff] [blame] | 498 | sparse_mem_maps_populate_node(map_map, pnum_begin, pnum_end, |
| 499 | map_count, nodeid); |
| 500 | } |
Yinghai Lu | 81d0d95 | 2010-02-27 09:29:38 -0800 | [diff] [blame] | 501 | #else |
Adrian Bunk | 9e5c6da | 2008-07-25 19:46:22 -0700 | [diff] [blame] | 502 | static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum) |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 503 | { |
| 504 | struct page *map; |
| 505 | struct mem_section *ms = __nr_to_section(pnum); |
| 506 | int nid = sparse_early_nid(ms); |
| 507 | |
Christoph Hellwig | 7b73d97 | 2017-12-29 08:53:54 +0100 | [diff] [blame] | 508 | map = sparse_mem_map_populate(pnum, nid, NULL); |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 509 | if (map) |
| 510 | return map; |
| 511 | |
Joe Perches | 1170532 | 2016-03-17 14:19:50 -0700 | [diff] [blame] | 512 | pr_err("%s: sparsemem memory map backing failed some memory will not be available\n", |
Joe Perches | 756a025 | 2016-03-17 14:19:47 -0700 | [diff] [blame] | 513 | __func__); |
Bob Picco | 802f192 | 2005-09-03 15:54:26 -0700 | [diff] [blame] | 514 | ms->section_mem_map = 0; |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 515 | return NULL; |
| 516 | } |
Yinghai Lu | 9bdac91 | 2010-02-10 01:20:22 -0800 | [diff] [blame] | 517 | #endif |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 518 | |
Gideon Israel Dsouza | 3b32123 | 2014-04-07 15:37:26 -0700 | [diff] [blame] | 519 | void __weak __meminit vmemmap_populate_print_last(void) |
Yinghai Lu | c2b91e2 | 2008-04-12 01:19:24 -0700 | [diff] [blame] | 520 | { |
| 521 | } |
Yinghai Lu | a4322e1b | 2010-02-10 01:20:21 -0800 | [diff] [blame] | 522 | |
Wanpeng Li | 1873209 | 2013-09-11 14:22:38 -0700 | [diff] [blame] | 523 | /** |
| 524 | * alloc_usemap_and_memmap - memory alloction for pageblock flags and vmemmap |
| 525 | * @map: usemap_map for pageblock flags or mmap_map for vmemmap |
Stephen Rothwell | 193faea | 2007-06-08 13:46:51 -0700 | [diff] [blame] | 526 | */ |
Wanpeng Li | 1873209 | 2013-09-11 14:22:38 -0700 | [diff] [blame] | 527 | static void __init alloc_usemap_and_memmap(void (*alloc_func) |
| 528 | (void *, unsigned long, unsigned long, |
| 529 | unsigned long, int), void *data) |
Stephen Rothwell | 193faea | 2007-06-08 13:46:51 -0700 | [diff] [blame] | 530 | { |
| 531 | unsigned long pnum; |
Wanpeng Li | 1873209 | 2013-09-11 14:22:38 -0700 | [diff] [blame] | 532 | unsigned long map_count; |
Yinghai Lu | a4322e1b | 2010-02-10 01:20:21 -0800 | [diff] [blame] | 533 | int nodeid_begin = 0; |
| 534 | unsigned long pnum_begin = 0; |
Yinghai Lu | 9bdac91 | 2010-02-10 01:20:22 -0800 | [diff] [blame] | 535 | |
Dave Hansen | c4e1be9 | 2017-07-06 15:36:44 -0700 | [diff] [blame] | 536 | for_each_present_section_nr(0, pnum) { |
Yinghai Lu | 9bdac91 | 2010-02-10 01:20:22 -0800 | [diff] [blame] | 537 | struct mem_section *ms; |
| 538 | |
Yinghai Lu | 9bdac91 | 2010-02-10 01:20:22 -0800 | [diff] [blame] | 539 | ms = __nr_to_section(pnum); |
| 540 | nodeid_begin = sparse_early_nid(ms); |
| 541 | pnum_begin = pnum; |
| 542 | break; |
| 543 | } |
| 544 | map_count = 1; |
Dave Hansen | c4e1be9 | 2017-07-06 15:36:44 -0700 | [diff] [blame] | 545 | for_each_present_section_nr(pnum_begin + 1, pnum) { |
Yinghai Lu | 9bdac91 | 2010-02-10 01:20:22 -0800 | [diff] [blame] | 546 | struct mem_section *ms; |
| 547 | int nodeid; |
| 548 | |
Yinghai Lu | 9bdac91 | 2010-02-10 01:20:22 -0800 | [diff] [blame] | 549 | ms = __nr_to_section(pnum); |
| 550 | nodeid = sparse_early_nid(ms); |
| 551 | if (nodeid == nodeid_begin) { |
| 552 | map_count++; |
| 553 | continue; |
| 554 | } |
| 555 | /* ok, we need to take cake of from pnum_begin to pnum - 1*/ |
Wanpeng Li | 1873209 | 2013-09-11 14:22:38 -0700 | [diff] [blame] | 556 | alloc_func(data, pnum_begin, pnum, |
| 557 | map_count, nodeid_begin); |
Yinghai Lu | 9bdac91 | 2010-02-10 01:20:22 -0800 | [diff] [blame] | 558 | /* new start, update count etc*/ |
| 559 | nodeid_begin = nodeid; |
| 560 | pnum_begin = pnum; |
| 561 | map_count = 1; |
| 562 | } |
| 563 | /* ok, last chunk */ |
Wanpeng Li | 1873209 | 2013-09-11 14:22:38 -0700 | [diff] [blame] | 564 | alloc_func(data, pnum_begin, NR_MEM_SECTIONS, |
| 565 | map_count, nodeid_begin); |
| 566 | } |
| 567 | |
| 568 | /* |
| 569 | * Allocate the accumulated non-linear sections, allocate a mem_map |
| 570 | * for each and record the physical to section mapping. |
| 571 | */ |
| 572 | void __init sparse_init(void) |
| 573 | { |
| 574 | unsigned long pnum; |
| 575 | struct page *map; |
| 576 | unsigned long *usemap; |
| 577 | unsigned long **usemap_map; |
| 578 | int size; |
| 579 | #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER |
| 580 | int size2; |
| 581 | struct page **map_map; |
| 582 | #endif |
| 583 | |
| 584 | /* see include/linux/mmzone.h 'struct mem_section' definition */ |
| 585 | BUILD_BUG_ON(!is_power_of_2(sizeof(struct mem_section))); |
| 586 | |
| 587 | /* Setup pageblock_order for HUGETLB_PAGE_SIZE_VARIABLE */ |
| 588 | set_pageblock_order(); |
| 589 | |
| 590 | /* |
| 591 | * map is using big page (aka 2M in x86 64 bit) |
| 592 | * usemap is less one page (aka 24 bytes) |
| 593 | * so alloc 2M (with 2M align) and 24 bytes in turn will |
| 594 | * make next 2M slip to one more 2M later. |
| 595 | * then in big system, the memory will have a lot of holes... |
| 596 | * here try to allocate 2M pages continuously. |
| 597 | * |
| 598 | * powerpc need to call sparse_init_one_section right after each |
| 599 | * sparse_early_mem_map_alloc, so allocate usemap_map at first. |
| 600 | */ |
| 601 | size = sizeof(unsigned long *) * NR_MEM_SECTIONS; |
Santosh Shilimkar | bb016b8 | 2014-01-21 15:50:34 -0800 | [diff] [blame] | 602 | usemap_map = memblock_virt_alloc(size, 0); |
Wanpeng Li | 1873209 | 2013-09-11 14:22:38 -0700 | [diff] [blame] | 603 | if (!usemap_map) |
| 604 | panic("can not allocate usemap_map\n"); |
| 605 | alloc_usemap_and_memmap(sparse_early_usemaps_alloc_node, |
| 606 | (void *)usemap_map); |
| 607 | |
| 608 | #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER |
| 609 | size2 = sizeof(struct page *) * NR_MEM_SECTIONS; |
Santosh Shilimkar | bb016b8 | 2014-01-21 15:50:34 -0800 | [diff] [blame] | 610 | map_map = memblock_virt_alloc(size2, 0); |
Wanpeng Li | 1873209 | 2013-09-11 14:22:38 -0700 | [diff] [blame] | 611 | if (!map_map) |
| 612 | panic("can not allocate map_map\n"); |
| 613 | alloc_usemap_and_memmap(sparse_early_mem_maps_alloc_node, |
| 614 | (void *)map_map); |
Yinghai Lu | 9bdac91 | 2010-02-10 01:20:22 -0800 | [diff] [blame] | 615 | #endif |
| 616 | |
Dave Hansen | c4e1be9 | 2017-07-06 15:36:44 -0700 | [diff] [blame] | 617 | for_each_present_section_nr(0, pnum) { |
Yinghai Lu | e123dd3 | 2008-04-13 11:51:06 -0700 | [diff] [blame] | 618 | usemap = usemap_map[pnum]; |
| 619 | if (!usemap) |
| 620 | continue; |
Stephen Rothwell | 193faea | 2007-06-08 13:46:51 -0700 | [diff] [blame] | 621 | |
Yinghai Lu | 9bdac91 | 2010-02-10 01:20:22 -0800 | [diff] [blame] | 622 | #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER |
| 623 | map = map_map[pnum]; |
| 624 | #else |
Stephen Rothwell | 193faea | 2007-06-08 13:46:51 -0700 | [diff] [blame] | 625 | map = sparse_early_mem_map_alloc(pnum); |
Yinghai Lu | 9bdac91 | 2010-02-10 01:20:22 -0800 | [diff] [blame] | 626 | #endif |
Stephen Rothwell | 193faea | 2007-06-08 13:46:51 -0700 | [diff] [blame] | 627 | if (!map) |
| 628 | continue; |
Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 629 | |
Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 630 | sparse_init_one_section(__nr_to_section(pnum), pnum, map, |
| 631 | usemap); |
Stephen Rothwell | 193faea | 2007-06-08 13:46:51 -0700 | [diff] [blame] | 632 | } |
Yinghai Lu | e123dd3 | 2008-04-13 11:51:06 -0700 | [diff] [blame] | 633 | |
Yinghai Lu | c2b91e2 | 2008-04-12 01:19:24 -0700 | [diff] [blame] | 634 | vmemmap_populate_print_last(); |
| 635 | |
Yinghai Lu | 9bdac91 | 2010-02-10 01:20:22 -0800 | [diff] [blame] | 636 | #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER |
Santosh Shilimkar | bb016b8 | 2014-01-21 15:50:34 -0800 | [diff] [blame] | 637 | memblock_free_early(__pa(map_map), size2); |
Yinghai Lu | 9bdac91 | 2010-02-10 01:20:22 -0800 | [diff] [blame] | 638 | #endif |
Santosh Shilimkar | bb016b8 | 2014-01-21 15:50:34 -0800 | [diff] [blame] | 639 | memblock_free_early(__pa(usemap_map), size); |
Stephen Rothwell | 193faea | 2007-06-08 13:46:51 -0700 | [diff] [blame] | 640 | } |
| 641 | |
| 642 | #ifdef CONFIG_MEMORY_HOTPLUG |
Michal Hocko | 2d070ea | 2017-07-06 15:37:56 -0700 | [diff] [blame] | 643 | |
| 644 | /* Mark all memory sections within the pfn range as online */ |
| 645 | void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn) |
| 646 | { |
| 647 | unsigned long pfn; |
| 648 | |
| 649 | for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { |
Michal Hocko | b4ccec4 | 2017-09-08 16:13:15 -0700 | [diff] [blame] | 650 | unsigned long section_nr = pfn_to_section_nr(pfn); |
Michal Hocko | 2d070ea | 2017-07-06 15:37:56 -0700 | [diff] [blame] | 651 | struct mem_section *ms; |
| 652 | |
| 653 | /* onlining code should never touch invalid ranges */ |
| 654 | if (WARN_ON(!valid_section_nr(section_nr))) |
| 655 | continue; |
| 656 | |
| 657 | ms = __nr_to_section(section_nr); |
| 658 | ms->section_mem_map |= SECTION_IS_ONLINE; |
| 659 | } |
| 660 | } |
| 661 | |
| 662 | #ifdef CONFIG_MEMORY_HOTREMOVE |
| 663 | /* Mark all memory sections within the pfn range as online */ |
| 664 | void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn) |
| 665 | { |
| 666 | unsigned long pfn; |
| 667 | |
| 668 | for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { |
| 669 | unsigned long section_nr = pfn_to_section_nr(start_pfn); |
| 670 | struct mem_section *ms; |
| 671 | |
| 672 | /* |
| 673 | * TODO this needs some double checking. Offlining code makes |
| 674 | * sure to check pfn_valid but those checks might be just bogus |
| 675 | */ |
| 676 | if (WARN_ON(!valid_section_nr(section_nr))) |
| 677 | continue; |
| 678 | |
| 679 | ms = __nr_to_section(section_nr); |
| 680 | ms->section_mem_map &= ~SECTION_IS_ONLINE; |
| 681 | } |
| 682 | } |
| 683 | #endif |
| 684 | |
Yasunori Goto | 98f3cfc | 2007-10-16 01:26:14 -0700 | [diff] [blame] | 685 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
Christoph Hellwig | 7b73d97 | 2017-12-29 08:53:54 +0100 | [diff] [blame] | 686 | static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid, |
| 687 | struct vmem_altmap *altmap) |
Yasunori Goto | 98f3cfc | 2007-10-16 01:26:14 -0700 | [diff] [blame] | 688 | { |
| 689 | /* This will make the necessary allocations eventually. */ |
Christoph Hellwig | 7b73d97 | 2017-12-29 08:53:54 +0100 | [diff] [blame] | 690 | return sparse_mem_map_populate(pnum, nid, altmap); |
Yasunori Goto | 98f3cfc | 2007-10-16 01:26:14 -0700 | [diff] [blame] | 691 | } |
Christoph Hellwig | 24b6d41 | 2017-12-29 08:53:56 +0100 | [diff] [blame] | 692 | static void __kfree_section_memmap(struct page *memmap, |
| 693 | struct vmem_altmap *altmap) |
Yasunori Goto | 98f3cfc | 2007-10-16 01:26:14 -0700 | [diff] [blame] | 694 | { |
Johannes Weiner | 0aad818 | 2013-04-29 15:07:50 -0700 | [diff] [blame] | 695 | unsigned long start = (unsigned long)memmap; |
Zhang Yanfei | 85b35fe | 2013-11-12 15:07:42 -0800 | [diff] [blame] | 696 | unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION); |
Johannes Weiner | 0aad818 | 2013-04-29 15:07:50 -0700 | [diff] [blame] | 697 | |
Christoph Hellwig | 24b6d41 | 2017-12-29 08:53:56 +0100 | [diff] [blame] | 698 | vmemmap_free(start, end, altmap); |
Yasunori Goto | 98f3cfc | 2007-10-16 01:26:14 -0700 | [diff] [blame] | 699 | } |
David Rientjes | 4edd7ce | 2013-04-29 15:08:22 -0700 | [diff] [blame] | 700 | #ifdef CONFIG_MEMORY_HOTREMOVE |
Zhang Yanfei | 81556b0 | 2013-11-12 15:07:43 -0800 | [diff] [blame] | 701 | static void free_map_bootmem(struct page *memmap) |
Yasunori Goto | 0c0a4a5 | 2008-04-28 02:13:34 -0700 | [diff] [blame] | 702 | { |
Johannes Weiner | 0aad818 | 2013-04-29 15:07:50 -0700 | [diff] [blame] | 703 | unsigned long start = (unsigned long)memmap; |
Zhang Yanfei | 81556b0 | 2013-11-12 15:07:43 -0800 | [diff] [blame] | 704 | unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION); |
Johannes Weiner | 0aad818 | 2013-04-29 15:07:50 -0700 | [diff] [blame] | 705 | |
Christoph Hellwig | 24b6d41 | 2017-12-29 08:53:56 +0100 | [diff] [blame] | 706 | vmemmap_free(start, end, NULL); |
Yasunori Goto | 0c0a4a5 | 2008-04-28 02:13:34 -0700 | [diff] [blame] | 707 | } |
David Rientjes | 4edd7ce | 2013-04-29 15:08:22 -0700 | [diff] [blame] | 708 | #endif /* CONFIG_MEMORY_HOTREMOVE */ |
Yasunori Goto | 98f3cfc | 2007-10-16 01:26:14 -0700 | [diff] [blame] | 709 | #else |
Zhang Yanfei | 85b35fe | 2013-11-12 15:07:42 -0800 | [diff] [blame] | 710 | static struct page *__kmalloc_section_memmap(void) |
Dave Hansen | 0b0acbec | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 711 | { |
| 712 | struct page *page, *ret; |
Zhang Yanfei | 85b35fe | 2013-11-12 15:07:42 -0800 | [diff] [blame] | 713 | unsigned long memmap_size = sizeof(struct page) * PAGES_PER_SECTION; |
Dave Hansen | 0b0acbec | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 714 | |
Yasunori Goto | f2d0aa5 | 2006-10-28 10:38:32 -0700 | [diff] [blame] | 715 | page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size)); |
Dave Hansen | 0b0acbec | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 716 | if (page) |
| 717 | goto got_map_page; |
| 718 | |
| 719 | ret = vmalloc(memmap_size); |
| 720 | if (ret) |
| 721 | goto got_map_ptr; |
| 722 | |
| 723 | return NULL; |
| 724 | got_map_page: |
| 725 | ret = (struct page *)pfn_to_kaddr(page_to_pfn(page)); |
| 726 | got_map_ptr: |
Dave Hansen | 0b0acbec | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 727 | |
| 728 | return ret; |
| 729 | } |
| 730 | |
Christoph Hellwig | 7b73d97 | 2017-12-29 08:53:54 +0100 | [diff] [blame] | 731 | static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid, |
| 732 | struct vmem_altmap *altmap) |
Yasunori Goto | 98f3cfc | 2007-10-16 01:26:14 -0700 | [diff] [blame] | 733 | { |
Zhang Yanfei | 85b35fe | 2013-11-12 15:07:42 -0800 | [diff] [blame] | 734 | return __kmalloc_section_memmap(); |
Yasunori Goto | 98f3cfc | 2007-10-16 01:26:14 -0700 | [diff] [blame] | 735 | } |
| 736 | |
Christoph Hellwig | 24b6d41 | 2017-12-29 08:53:56 +0100 | [diff] [blame] | 737 | static void __kfree_section_memmap(struct page *memmap, |
| 738 | struct vmem_altmap *altmap) |
Dave Hansen | 0b0acbec | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 739 | { |
Christoph Lameter | 9e2779f | 2008-02-04 22:28:34 -0800 | [diff] [blame] | 740 | if (is_vmalloc_addr(memmap)) |
Dave Hansen | 0b0acbec | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 741 | vfree(memmap); |
| 742 | else |
| 743 | free_pages((unsigned long)memmap, |
Zhang Yanfei | 85b35fe | 2013-11-12 15:07:42 -0800 | [diff] [blame] | 744 | get_order(sizeof(struct page) * PAGES_PER_SECTION)); |
Dave Hansen | 0b0acbec | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 745 | } |
Yasunori Goto | 0c0a4a5 | 2008-04-28 02:13:34 -0700 | [diff] [blame] | 746 | |
David Rientjes | 4edd7ce | 2013-04-29 15:08:22 -0700 | [diff] [blame] | 747 | #ifdef CONFIG_MEMORY_HOTREMOVE |
Zhang Yanfei | 81556b0 | 2013-11-12 15:07:43 -0800 | [diff] [blame] | 748 | static void free_map_bootmem(struct page *memmap) |
Yasunori Goto | 0c0a4a5 | 2008-04-28 02:13:34 -0700 | [diff] [blame] | 749 | { |
| 750 | unsigned long maps_section_nr, removing_section_nr, i; |
Zhang Yanfei | 81556b0 | 2013-11-12 15:07:43 -0800 | [diff] [blame] | 751 | unsigned long magic, nr_pages; |
Jianguo Wu | ae64ffc | 2012-11-29 13:54:21 -0800 | [diff] [blame] | 752 | struct page *page = virt_to_page(memmap); |
Yasunori Goto | 0c0a4a5 | 2008-04-28 02:13:34 -0700 | [diff] [blame] | 753 | |
Zhang Yanfei | 81556b0 | 2013-11-12 15:07:43 -0800 | [diff] [blame] | 754 | nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page)) |
| 755 | >> PAGE_SHIFT; |
| 756 | |
Yasunori Goto | 0c0a4a5 | 2008-04-28 02:13:34 -0700 | [diff] [blame] | 757 | for (i = 0; i < nr_pages; i++, page++) { |
Yasuaki Ishimatsu | ddffe98 | 2017-02-22 15:45:13 -0800 | [diff] [blame] | 758 | magic = (unsigned long) page->freelist; |
Yasunori Goto | 0c0a4a5 | 2008-04-28 02:13:34 -0700 | [diff] [blame] | 759 | |
| 760 | BUG_ON(magic == NODE_INFO); |
| 761 | |
| 762 | maps_section_nr = pfn_to_section_nr(page_to_pfn(page)); |
Yasuaki Ishimatsu | 857e522 | 2017-02-22 15:45:10 -0800 | [diff] [blame] | 763 | removing_section_nr = page_private(page); |
Yasunori Goto | 0c0a4a5 | 2008-04-28 02:13:34 -0700 | [diff] [blame] | 764 | |
| 765 | /* |
| 766 | * When this function is called, the removing section is |
| 767 | * logical offlined state. This means all pages are isolated |
| 768 | * from page allocator. If removing section's memmap is placed |
| 769 | * on the same section, it must not be freed. |
| 770 | * If it is freed, page allocator may allocate it which will |
| 771 | * be removed physically soon. |
| 772 | */ |
| 773 | if (maps_section_nr != removing_section_nr) |
| 774 | put_page_bootmem(page); |
| 775 | } |
| 776 | } |
David Rientjes | 4edd7ce | 2013-04-29 15:08:22 -0700 | [diff] [blame] | 777 | #endif /* CONFIG_MEMORY_HOTREMOVE */ |
Yasunori Goto | 98f3cfc | 2007-10-16 01:26:14 -0700 | [diff] [blame] | 778 | #endif /* CONFIG_SPARSEMEM_VMEMMAP */ |
Dave Hansen | 0b0acbec | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 779 | |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 780 | /* |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 781 | * returns the number of sections whose mem_maps were properly |
| 782 | * set. If this is <=0, then that means that the passed-in |
| 783 | * map was not consumed and must be freed. |
| 784 | */ |
Christoph Hellwig | 7b73d97 | 2017-12-29 08:53:54 +0100 | [diff] [blame] | 785 | int __meminit sparse_add_one_section(struct pglist_data *pgdat, |
| 786 | unsigned long start_pfn, struct vmem_altmap *altmap) |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 787 | { |
Dave Hansen | 0b0acbec | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 788 | unsigned long section_nr = pfn_to_section_nr(start_pfn); |
Dave Hansen | 0b0acbec | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 789 | struct mem_section *ms; |
| 790 | struct page *memmap; |
Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 791 | unsigned long *usemap; |
Dave Hansen | 0b0acbec | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 792 | unsigned long flags; |
| 793 | int ret; |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 794 | |
Dave Hansen | 0b0acbec | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 795 | /* |
| 796 | * no locking for this, because it does its own |
| 797 | * plus, it does a kmalloc |
| 798 | */ |
WANG Cong | bbd0682 | 2007-12-17 16:19:59 -0800 | [diff] [blame] | 799 | ret = sparse_index_init(section_nr, pgdat->node_id); |
| 800 | if (ret < 0 && ret != -EEXIST) |
| 801 | return ret; |
Christoph Hellwig | 7b73d97 | 2017-12-29 08:53:54 +0100 | [diff] [blame] | 802 | memmap = kmalloc_section_memmap(section_nr, pgdat->node_id, altmap); |
WANG Cong | bbd0682 | 2007-12-17 16:19:59 -0800 | [diff] [blame] | 803 | if (!memmap) |
| 804 | return -ENOMEM; |
Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 805 | usemap = __kmalloc_section_usemap(); |
WANG Cong | bbd0682 | 2007-12-17 16:19:59 -0800 | [diff] [blame] | 806 | if (!usemap) { |
Christoph Hellwig | 24b6d41 | 2017-12-29 08:53:56 +0100 | [diff] [blame] | 807 | __kfree_section_memmap(memmap, altmap); |
WANG Cong | bbd0682 | 2007-12-17 16:19:59 -0800 | [diff] [blame] | 808 | return -ENOMEM; |
| 809 | } |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 810 | |
Dave Hansen | 0b0acbec | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 811 | pgdat_resize_lock(pgdat, &flags); |
| 812 | |
| 813 | ms = __pfn_to_section(start_pfn); |
| 814 | if (ms->section_mem_map & SECTION_MARKED_PRESENT) { |
| 815 | ret = -EEXIST; |
| 816 | goto out; |
| 817 | } |
Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 818 | |
Zhang Yanfei | 85b35fe | 2013-11-12 15:07:42 -0800 | [diff] [blame] | 819 | memset(memmap, 0, sizeof(struct page) * PAGES_PER_SECTION); |
Wen Congyang | 3ac19f8 | 2012-12-11 16:00:59 -0800 | [diff] [blame] | 820 | |
Dave Hansen | c4e1be9 | 2017-07-06 15:36:44 -0700 | [diff] [blame] | 821 | section_mark_present(ms); |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 822 | |
Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 823 | ret = sparse_init_one_section(ms, section_nr, memmap, usemap); |
Dave Hansen | 0b0acbec | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 824 | |
Dave Hansen | 0b0acbec | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 825 | out: |
| 826 | pgdat_resize_unlock(pgdat, &flags); |
WANG Cong | bbd0682 | 2007-12-17 16:19:59 -0800 | [diff] [blame] | 827 | if (ret <= 0) { |
| 828 | kfree(usemap); |
Christoph Hellwig | 24b6d41 | 2017-12-29 08:53:56 +0100 | [diff] [blame] | 829 | __kfree_section_memmap(memmap, altmap); |
WANG Cong | bbd0682 | 2007-12-17 16:19:59 -0800 | [diff] [blame] | 830 | } |
Dave Hansen | 0b0acbec | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 831 | return ret; |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 832 | } |
Badari Pulavarty | ea01ea9 | 2008-04-28 02:12:01 -0700 | [diff] [blame] | 833 | |
Zhang Yanfei | f3deb68 | 2013-07-08 16:00:10 -0700 | [diff] [blame] | 834 | #ifdef CONFIG_MEMORY_HOTREMOVE |
Wen Congyang | 95a4774 | 2012-12-11 16:00:47 -0800 | [diff] [blame] | 835 | #ifdef CONFIG_MEMORY_FAILURE |
| 836 | static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages) |
| 837 | { |
| 838 | int i; |
| 839 | |
| 840 | if (!memmap) |
| 841 | return; |
| 842 | |
Dan Williams | 4b94ffd | 2016-01-15 16:56:22 -0800 | [diff] [blame] | 843 | for (i = 0; i < nr_pages; i++) { |
Wen Congyang | 95a4774 | 2012-12-11 16:00:47 -0800 | [diff] [blame] | 844 | if (PageHWPoison(&memmap[i])) { |
Xishi Qiu | 293c07e | 2013-02-22 16:34:02 -0800 | [diff] [blame] | 845 | atomic_long_sub(1, &num_poisoned_pages); |
Wen Congyang | 95a4774 | 2012-12-11 16:00:47 -0800 | [diff] [blame] | 846 | ClearPageHWPoison(&memmap[i]); |
| 847 | } |
| 848 | } |
| 849 | } |
| 850 | #else |
| 851 | static inline void clear_hwpoisoned_pages(struct page *memmap, int nr_pages) |
| 852 | { |
| 853 | } |
| 854 | #endif |
| 855 | |
Christoph Hellwig | 24b6d41 | 2017-12-29 08:53:56 +0100 | [diff] [blame] | 856 | static void free_section_usemap(struct page *memmap, unsigned long *usemap, |
| 857 | struct vmem_altmap *altmap) |
David Rientjes | 4edd7ce | 2013-04-29 15:08:22 -0700 | [diff] [blame] | 858 | { |
| 859 | struct page *usemap_page; |
David Rientjes | 4edd7ce | 2013-04-29 15:08:22 -0700 | [diff] [blame] | 860 | |
| 861 | if (!usemap) |
| 862 | return; |
| 863 | |
| 864 | usemap_page = virt_to_page(usemap); |
| 865 | /* |
| 866 | * Check to see if allocation came from hot-plug-add |
| 867 | */ |
| 868 | if (PageSlab(usemap_page) || PageCompound(usemap_page)) { |
| 869 | kfree(usemap); |
| 870 | if (memmap) |
Christoph Hellwig | 24b6d41 | 2017-12-29 08:53:56 +0100 | [diff] [blame] | 871 | __kfree_section_memmap(memmap, altmap); |
David Rientjes | 4edd7ce | 2013-04-29 15:08:22 -0700 | [diff] [blame] | 872 | return; |
| 873 | } |
| 874 | |
| 875 | /* |
| 876 | * The usemap came from bootmem. This is packed with other usemaps |
| 877 | * on the section which has pgdat at boot time. Just keep it as is now. |
| 878 | */ |
| 879 | |
Zhang Yanfei | 81556b0 | 2013-11-12 15:07:43 -0800 | [diff] [blame] | 880 | if (memmap) |
| 881 | free_map_bootmem(memmap); |
David Rientjes | 4edd7ce | 2013-04-29 15:08:22 -0700 | [diff] [blame] | 882 | } |
| 883 | |
Dan Williams | 4b94ffd | 2016-01-15 16:56:22 -0800 | [diff] [blame] | 884 | void sparse_remove_one_section(struct zone *zone, struct mem_section *ms, |
Christoph Hellwig | 24b6d41 | 2017-12-29 08:53:56 +0100 | [diff] [blame] | 885 | unsigned long map_offset, struct vmem_altmap *altmap) |
Badari Pulavarty | ea01ea9 | 2008-04-28 02:12:01 -0700 | [diff] [blame] | 886 | { |
| 887 | struct page *memmap = NULL; |
Tang Chen | cd09968 | 2013-02-22 16:33:02 -0800 | [diff] [blame] | 888 | unsigned long *usemap = NULL, flags; |
| 889 | struct pglist_data *pgdat = zone->zone_pgdat; |
Badari Pulavarty | ea01ea9 | 2008-04-28 02:12:01 -0700 | [diff] [blame] | 890 | |
Tang Chen | cd09968 | 2013-02-22 16:33:02 -0800 | [diff] [blame] | 891 | pgdat_resize_lock(pgdat, &flags); |
Badari Pulavarty | ea01ea9 | 2008-04-28 02:12:01 -0700 | [diff] [blame] | 892 | if (ms->section_mem_map) { |
| 893 | usemap = ms->pageblock_flags; |
| 894 | memmap = sparse_decode_mem_map(ms->section_mem_map, |
| 895 | __section_nr(ms)); |
| 896 | ms->section_mem_map = 0; |
| 897 | ms->pageblock_flags = NULL; |
| 898 | } |
Tang Chen | cd09968 | 2013-02-22 16:33:02 -0800 | [diff] [blame] | 899 | pgdat_resize_unlock(pgdat, &flags); |
Badari Pulavarty | ea01ea9 | 2008-04-28 02:12:01 -0700 | [diff] [blame] | 900 | |
Dan Williams | 4b94ffd | 2016-01-15 16:56:22 -0800 | [diff] [blame] | 901 | clear_hwpoisoned_pages(memmap + map_offset, |
| 902 | PAGES_PER_SECTION - map_offset); |
Christoph Hellwig | 24b6d41 | 2017-12-29 08:53:56 +0100 | [diff] [blame] | 903 | free_section_usemap(memmap, usemap, altmap); |
Badari Pulavarty | ea01ea9 | 2008-04-28 02:12:01 -0700 | [diff] [blame] | 904 | } |
David Rientjes | 4edd7ce | 2013-04-29 15:08:22 -0700 | [diff] [blame] | 905 | #endif /* CONFIG_MEMORY_HOTREMOVE */ |
| 906 | #endif /* CONFIG_MEMORY_HOTPLUG */ |