Thomas Gleixner | 2874c5f | 2019-05-27 08:55:01 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 2 | /* |
| 3 | * Procedures for maintaining information about logical memory blocks. |
| 4 | * |
| 5 | * Peter Bergner, IBM Corp. June 2001. |
| 6 | * Copyright (C) 2001 Peter Bergner. |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 7 | */ |
| 8 | |
| 9 | #include <linux/kernel.h> |
Benjamin Herrenschmidt | 142b45a | 2010-07-06 15:39:13 -0700 | [diff] [blame] | 10 | #include <linux/slab.h> |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 11 | #include <linux/init.h> |
| 12 | #include <linux/bitops.h> |
Benjamin Herrenschmidt | 449e8df | 2010-07-06 15:39:07 -0700 | [diff] [blame] | 13 | #include <linux/poison.h> |
Benjamin Herrenschmidt | c196f76 | 2010-07-06 15:39:16 -0700 | [diff] [blame] | 14 | #include <linux/pfn.h> |
Benjamin Herrenschmidt | 6d03b88 | 2010-07-06 15:39:19 -0700 | [diff] [blame] | 15 | #include <linux/debugfs.h> |
Randy Dunlap | 514c603 | 2018-04-05 16:25:34 -0700 | [diff] [blame] | 16 | #include <linux/kmemleak.h> |
Benjamin Herrenschmidt | 6d03b88 | 2010-07-06 15:39:19 -0700 | [diff] [blame] | 17 | #include <linux/seq_file.h> |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 18 | #include <linux/memblock.h> |
| 19 | |
Christoph Hellwig | c4c5ad6 | 2016-07-28 15:48:06 -0700 | [diff] [blame] | 20 | #include <asm/sections.h> |
Santosh Shilimkar | 26f09e9 | 2014-01-21 15:50:19 -0800 | [diff] [blame] | 21 | #include <linux/io.h> |
| 22 | |
| 23 | #include "internal.h" |
Tang Chen | 79442ed | 2013-11-12 15:07:59 -0800 | [diff] [blame] | 24 | |
Ard Biesheuvel | 8a5b403d | 2019-02-15 13:33:32 +0100 | [diff] [blame] | 25 | #define INIT_MEMBLOCK_REGIONS 128 |
| 26 | #define INIT_PHYSMEM_REGIONS 4 |
| 27 | |
| 28 | #ifndef INIT_MEMBLOCK_RESERVED_REGIONS |
| 29 | # define INIT_MEMBLOCK_RESERVED_REGIONS INIT_MEMBLOCK_REGIONS |
| 30 | #endif |
| 31 | |
Mike Rapoport | 3e039c5 | 2018-06-30 17:55:05 +0300 | [diff] [blame] | 32 | /** |
| 33 | * DOC: memblock overview |
| 34 | * |
| 35 | * Memblock is a method of managing memory regions during the early |
| 36 | * boot period when the usual kernel memory allocators are not up and |
| 37 | * running. |
| 38 | * |
| 39 | * Memblock views the system memory as collections of contiguous |
| 40 | * regions. There are several types of these collections: |
| 41 | * |
| 42 | * * ``memory`` - describes the physical memory available to the |
| 43 | * kernel; this may differ from the actual physical memory installed |
| 44 | * in the system, for instance when the memory is restricted with |
| 45 | * ``mem=`` command line parameter |
| 46 | * * ``reserved`` - describes the regions that were allocated |
David Hildenbrand | 7764990 | 2020-07-01 16:18:29 +0200 | [diff] [blame] | 47 | * * ``physmem`` - describes the actual physical memory available during |
| 48 | * boot regardless of the possible restrictions and memory hot(un)plug; |
| 49 | * the ``physmem`` type is only available on some architectures. |
Mike Rapoport | 3e039c5 | 2018-06-30 17:55:05 +0300 | [diff] [blame] | 50 | * |
Mauro Carvalho Chehab | 9303c9d | 2020-09-25 12:01:25 +0200 | [diff] [blame] | 51 | * Each region is represented by struct memblock_region that |
Mike Rapoport | 3e039c5 | 2018-06-30 17:55:05 +0300 | [diff] [blame] | 52 | * defines the region extents, its attributes and NUMA node id on NUMA |
Mauro Carvalho Chehab | 1bf162e | 2020-09-28 15:50:33 +0200 | [diff] [blame] | 53 | * systems. Every memory type is described by the struct memblock_type |
| 54 | * which contains an array of memory regions along with |
David Hildenbrand | 7764990 | 2020-07-01 16:18:29 +0200 | [diff] [blame] | 55 | * the allocator metadata. The "memory" and "reserved" types are nicely |
Mauro Carvalho Chehab | 9303c9d | 2020-09-25 12:01:25 +0200 | [diff] [blame] | 56 | * wrapped with struct memblock. This structure is statically |
David Hildenbrand | 7764990 | 2020-07-01 16:18:29 +0200 | [diff] [blame] | 57 | * initialized at build time. The region arrays are initially sized to |
| 58 | * %INIT_MEMBLOCK_REGIONS for "memory" and %INIT_MEMBLOCK_RESERVED_REGIONS |
| 59 | * for "reserved". The region array for "physmem" is initially sized to |
| 60 | * %INIT_PHYSMEM_REGIONS. |
Cao jin | 6e5af9a | 2019-11-30 17:56:21 -0800 | [diff] [blame] | 61 | * The memblock_allow_resize() enables automatic resizing of the region |
| 62 | * arrays during addition of new regions. This feature should be used |
| 63 | * with care so that memory allocated for the region array will not |
| 64 | * overlap with areas that should be reserved, for example initrd. |
Mike Rapoport | 3e039c5 | 2018-06-30 17:55:05 +0300 | [diff] [blame] | 65 | * |
| 66 | * The early architecture setup should tell memblock what the physical |
Cao jin | 6e5af9a | 2019-11-30 17:56:21 -0800 | [diff] [blame] | 67 | * memory layout is by using memblock_add() or memblock_add_node() |
| 68 | * functions. The first function does not assign the region to a NUMA |
| 69 | * node and it is appropriate for UMA systems. Yet, it is possible to |
| 70 | * use it on NUMA systems as well and assign the region to a NUMA node |
| 71 | * later in the setup process using memblock_set_node(). The |
| 72 | * memblock_add_node() performs such an assignment directly. |
Mike Rapoport | 3e039c5 | 2018-06-30 17:55:05 +0300 | [diff] [blame] | 73 | * |
Mike Rapoport | a297413 | 2019-03-11 23:30:54 -0700 | [diff] [blame] | 74 | * Once memblock is setup the memory can be allocated using one of the |
| 75 | * API variants: |
| 76 | * |
Cao jin | 6e5af9a | 2019-11-30 17:56:21 -0800 | [diff] [blame] | 77 | * * memblock_phys_alloc*() - these functions return the **physical** |
| 78 | * address of the allocated memory |
| 79 | * * memblock_alloc*() - these functions return the **virtual** address |
| 80 | * of the allocated memory. |
Mike Rapoport | a297413 | 2019-03-11 23:30:54 -0700 | [diff] [blame] | 81 | * |
Ethon Paul | df1758d | 2020-06-04 16:49:16 -0700 | [diff] [blame] | 82 | * Note, that both API variants use implicit assumptions about allowed |
Mike Rapoport | a297413 | 2019-03-11 23:30:54 -0700 | [diff] [blame] | 83 | * memory ranges and the fallback methods. Consult the documentation |
Cao jin | 6e5af9a | 2019-11-30 17:56:21 -0800 | [diff] [blame] | 84 | * of memblock_alloc_internal() and memblock_alloc_range_nid() |
| 85 | * functions for more elaborate description. |
Mike Rapoport | 3e039c5 | 2018-06-30 17:55:05 +0300 | [diff] [blame] | 86 | * |
Cao jin | 6e5af9a | 2019-11-30 17:56:21 -0800 | [diff] [blame] | 87 | * As the system boot progresses, the architecture specific mem_init() |
| 88 | * function frees all the memory to the buddy page allocator. |
Mike Rapoport | 3e039c5 | 2018-06-30 17:55:05 +0300 | [diff] [blame] | 89 | * |
Cao jin | 6e5af9a | 2019-11-30 17:56:21 -0800 | [diff] [blame] | 90 | * Unless an architecture enables %CONFIG_ARCH_KEEP_MEMBLOCK, the |
David Hildenbrand | 7764990 | 2020-07-01 16:18:29 +0200 | [diff] [blame] | 91 | * memblock data structures (except "physmem") will be discarded after the |
| 92 | * system initialization completes. |
Mike Rapoport | 3e039c5 | 2018-06-30 17:55:05 +0300 | [diff] [blame] | 93 | */ |
| 94 | |
Mike Rapoport | a9ee6cf | 2021-06-28 19:43:01 -0700 | [diff] [blame] | 95 | #ifndef CONFIG_NUMA |
Mike Rapoport | bda49a8 | 2018-10-30 15:09:40 -0700 | [diff] [blame] | 96 | struct pglist_data __refdata contig_page_data; |
| 97 | EXPORT_SYMBOL(contig_page_data); |
| 98 | #endif |
| 99 | |
| 100 | unsigned long max_low_pfn; |
| 101 | unsigned long min_low_pfn; |
| 102 | unsigned long max_pfn; |
| 103 | unsigned long long max_possible_pfn; |
| 104 | |
Tejun Heo | fe091c2 | 2011-12-08 10:22:07 -0800 | [diff] [blame] | 105 | static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; |
Ard Biesheuvel | 8a5b403d | 2019-02-15 13:33:32 +0100 | [diff] [blame] | 106 | static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_RESERVED_REGIONS] __initdata_memblock; |
Philipp Hachtmann | 70210ed | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 107 | #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP |
David Hildenbrand | 7764990 | 2020-07-01 16:18:29 +0200 | [diff] [blame] | 108 | static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS]; |
Philipp Hachtmann | 70210ed | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 109 | #endif |
Tejun Heo | fe091c2 | 2011-12-08 10:22:07 -0800 | [diff] [blame] | 110 | |
| 111 | struct memblock memblock __initdata_memblock = { |
| 112 | .memory.regions = memblock_memory_init_regions, |
| 113 | .memory.cnt = 1, /* empty dummy entry */ |
| 114 | .memory.max = INIT_MEMBLOCK_REGIONS, |
Heiko Carstens | 0262d9c | 2017-02-24 14:55:59 -0800 | [diff] [blame] | 115 | .memory.name = "memory", |
Tejun Heo | fe091c2 | 2011-12-08 10:22:07 -0800 | [diff] [blame] | 116 | |
| 117 | .reserved.regions = memblock_reserved_init_regions, |
| 118 | .reserved.cnt = 1, /* empty dummy entry */ |
Ard Biesheuvel | 8a5b403d | 2019-02-15 13:33:32 +0100 | [diff] [blame] | 119 | .reserved.max = INIT_MEMBLOCK_RESERVED_REGIONS, |
Heiko Carstens | 0262d9c | 2017-02-24 14:55:59 -0800 | [diff] [blame] | 120 | .reserved.name = "reserved", |
Tejun Heo | fe091c2 | 2011-12-08 10:22:07 -0800 | [diff] [blame] | 121 | |
Tang Chen | 79442ed | 2013-11-12 15:07:59 -0800 | [diff] [blame] | 122 | .bottom_up = false, |
Tejun Heo | fe091c2 | 2011-12-08 10:22:07 -0800 | [diff] [blame] | 123 | .current_limit = MEMBLOCK_ALLOC_ANYWHERE, |
| 124 | }; |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 125 | |
David Hildenbrand | 7764990 | 2020-07-01 16:18:29 +0200 | [diff] [blame] | 126 | #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP |
| 127 | struct memblock_type physmem = { |
| 128 | .regions = memblock_physmem_init_regions, |
| 129 | .cnt = 1, /* empty dummy entry */ |
| 130 | .max = INIT_PHYSMEM_REGIONS, |
| 131 | .name = "physmem", |
| 132 | }; |
| 133 | #endif |
| 134 | |
Mike Rapoport | 9f3d5ea | 2020-10-13 16:58:25 -0700 | [diff] [blame] | 135 | /* |
| 136 | * keep a pointer to &memblock.memory in the text section to use it in |
| 137 | * __next_mem_range() and its helpers. |
| 138 | * For architectures that do not keep memblock data after init, this |
| 139 | * pointer will be reset to NULL at memblock_discard() |
| 140 | */ |
| 141 | static __refdata struct memblock_type *memblock_memory = &memblock.memory; |
| 142 | |
Mike Rapoport | cd991db | 2020-10-13 16:57:49 -0700 | [diff] [blame] | 143 | #define for_each_memblock_type(i, memblock_type, rgn) \ |
| 144 | for (i = 0, rgn = &memblock_type->regions[0]; \ |
| 145 | i < memblock_type->cnt; \ |
| 146 | i++, rgn = &memblock_type->regions[i]) |
| 147 | |
Mike Rapoport | 87c5587 | 2020-10-13 16:57:54 -0700 | [diff] [blame] | 148 | #define memblock_dbg(fmt, ...) \ |
| 149 | do { \ |
| 150 | if (memblock_debug) \ |
| 151 | pr_info(fmt, ##__VA_ARGS__); \ |
| 152 | } while (0) |
| 153 | |
| 154 | static int memblock_debug __initdata_memblock; |
Tony Luck | a3f5baf | 2015-06-24 16:58:12 -0700 | [diff] [blame] | 155 | static bool system_has_some_mirror __initdata_memblock = false; |
Tejun Heo | 1aadc05 | 2011-12-08 10:22:08 -0800 | [diff] [blame] | 156 | static int memblock_can_resize __initdata_memblock; |
Gavin Shan | 181eb39 | 2012-05-29 15:06:50 -0700 | [diff] [blame] | 157 | static int memblock_memory_in_slab __initdata_memblock = 0; |
| 158 | static int memblock_reserved_in_slab __initdata_memblock = 0; |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 159 | |
Mike Rapoport | c366ea8 | 2019-03-11 23:29:46 -0700 | [diff] [blame] | 160 | static enum memblock_flags __init_memblock choose_memblock_flags(void) |
Tony Luck | a3f5baf | 2015-06-24 16:58:12 -0700 | [diff] [blame] | 161 | { |
| 162 | return system_has_some_mirror ? MEMBLOCK_MIRROR : MEMBLOCK_NONE; |
| 163 | } |
| 164 | |
Tejun Heo | eb18f1b | 2011-12-08 10:22:07 -0800 | [diff] [blame] | 165 | /* adjust *@size so that (@base + *@size) doesn't overflow, return new size */ |
| 166 | static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size) |
| 167 | { |
Stefan Agner | 1c4bc43 | 2018-06-07 17:06:15 -0700 | [diff] [blame] | 168 | return *size = min(*size, PHYS_ADDR_MAX - base); |
Tejun Heo | eb18f1b | 2011-12-08 10:22:07 -0800 | [diff] [blame] | 169 | } |
| 170 | |
Benjamin Herrenschmidt | 6ed311b | 2010-07-12 14:36:48 +1000 | [diff] [blame] | 171 | /* |
| 172 | * Address comparison utilities |
| 173 | */ |
Yinghai Lu | 10d0643 | 2010-07-28 15:43:02 +1000 | [diff] [blame] | 174 | static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1, |
Benjamin Herrenschmidt | 2898cc4 | 2010-08-04 13:34:42 +1000 | [diff] [blame] | 175 | phys_addr_t base2, phys_addr_t size2) |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 176 | { |
| 177 | return ((base1 < (base2 + size2)) && (base2 < (base1 + size1))); |
| 178 | } |
| 179 | |
Tang Chen | 95cf82e | 2015-09-08 15:02:03 -0700 | [diff] [blame] | 180 | bool __init_memblock memblock_overlaps_region(struct memblock_type *type, |
H Hartley Sweeten | 2d7d3eb | 2011-10-31 17:09:15 -0700 | [diff] [blame] | 181 | phys_addr_t base, phys_addr_t size) |
Benjamin Herrenschmidt | 6ed311b | 2010-07-12 14:36:48 +1000 | [diff] [blame] | 182 | { |
| 183 | unsigned long i; |
| 184 | |
Mike Rapoport | 023accf | 2021-06-30 09:12:13 +0300 | [diff] [blame] | 185 | memblock_cap_size(base, &size); |
| 186 | |
Alexander Kuleshov | f14516f | 2016-01-14 15:20:39 -0800 | [diff] [blame] | 187 | for (i = 0; i < type->cnt; i++) |
| 188 | if (memblock_addrs_overlap(base, size, type->regions[i].base, |
| 189 | type->regions[i].size)) |
Benjamin Herrenschmidt | 6ed311b | 2010-07-12 14:36:48 +1000 | [diff] [blame] | 190 | break; |
Tang Chen | c5c5c9d | 2015-09-08 15:02:00 -0700 | [diff] [blame] | 191 | return i < type->cnt; |
Benjamin Herrenschmidt | 6ed311b | 2010-07-12 14:36:48 +1000 | [diff] [blame] | 192 | } |
| 193 | |
Mike Rapoport | 47cec44 | 2018-06-30 17:55:02 +0300 | [diff] [blame] | 194 | /** |
Tang Chen | 79442ed | 2013-11-12 15:07:59 -0800 | [diff] [blame] | 195 | * __memblock_find_range_bottom_up - find free area utility in bottom-up |
| 196 | * @start: start of candidate range |
Mike Rapoport | 47cec44 | 2018-06-30 17:55:02 +0300 | [diff] [blame] | 197 | * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or |
| 198 | * %MEMBLOCK_ALLOC_ACCESSIBLE |
Tang Chen | 79442ed | 2013-11-12 15:07:59 -0800 | [diff] [blame] | 199 | * @size: size of free area to find |
| 200 | * @align: alignment of free area to find |
Grygorii Strashko | b115423 | 2014-01-21 15:50:16 -0800 | [diff] [blame] | 201 | * @nid: nid of the free area to find, %NUMA_NO_NODE for any node |
Tony Luck | fc6daaf | 2015-06-24 16:58:09 -0700 | [diff] [blame] | 202 | * @flags: pick from blocks based on memory attributes |
Tang Chen | 79442ed | 2013-11-12 15:07:59 -0800 | [diff] [blame] | 203 | * |
| 204 | * Utility called from memblock_find_in_range_node(), find free area bottom-up. |
| 205 | * |
Mike Rapoport | 47cec44 | 2018-06-30 17:55:02 +0300 | [diff] [blame] | 206 | * Return: |
Tang Chen | 79442ed | 2013-11-12 15:07:59 -0800 | [diff] [blame] | 207 | * Found address on success, 0 on failure. |
| 208 | */ |
| 209 | static phys_addr_t __init_memblock |
| 210 | __memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end, |
Tony Luck | fc6daaf | 2015-06-24 16:58:09 -0700 | [diff] [blame] | 211 | phys_addr_t size, phys_addr_t align, int nid, |
Mike Rapoport | e1720fe | 2018-06-30 17:55:01 +0300 | [diff] [blame] | 212 | enum memblock_flags flags) |
Tang Chen | 79442ed | 2013-11-12 15:07:59 -0800 | [diff] [blame] | 213 | { |
| 214 | phys_addr_t this_start, this_end, cand; |
| 215 | u64 i; |
| 216 | |
Tony Luck | fc6daaf | 2015-06-24 16:58:09 -0700 | [diff] [blame] | 217 | for_each_free_mem_range(i, nid, flags, &this_start, &this_end, NULL) { |
Tang Chen | 79442ed | 2013-11-12 15:07:59 -0800 | [diff] [blame] | 218 | this_start = clamp(this_start, start, end); |
| 219 | this_end = clamp(this_end, start, end); |
| 220 | |
| 221 | cand = round_up(this_start, align); |
| 222 | if (cand < this_end && this_end - cand >= size) |
| 223 | return cand; |
| 224 | } |
| 225 | |
| 226 | return 0; |
| 227 | } |
| 228 | |
Tejun Heo | 7bd0b0f | 2011-12-08 10:22:09 -0800 | [diff] [blame] | 229 | /** |
Tang Chen | 1402899 | 2013-11-12 15:07:57 -0800 | [diff] [blame] | 230 | * __memblock_find_range_top_down - find free area utility, in top-down |
| 231 | * @start: start of candidate range |
Mike Rapoport | 47cec44 | 2018-06-30 17:55:02 +0300 | [diff] [blame] | 232 | * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or |
| 233 | * %MEMBLOCK_ALLOC_ACCESSIBLE |
Tang Chen | 1402899 | 2013-11-12 15:07:57 -0800 | [diff] [blame] | 234 | * @size: size of free area to find |
| 235 | * @align: alignment of free area to find |
Grygorii Strashko | b115423 | 2014-01-21 15:50:16 -0800 | [diff] [blame] | 236 | * @nid: nid of the free area to find, %NUMA_NO_NODE for any node |
Tony Luck | fc6daaf | 2015-06-24 16:58:09 -0700 | [diff] [blame] | 237 | * @flags: pick from blocks based on memory attributes |
Tang Chen | 1402899 | 2013-11-12 15:07:57 -0800 | [diff] [blame] | 238 | * |
| 239 | * Utility called from memblock_find_in_range_node(), find free area top-down. |
| 240 | * |
Mike Rapoport | 47cec44 | 2018-06-30 17:55:02 +0300 | [diff] [blame] | 241 | * Return: |
Tang Chen | 79442ed | 2013-11-12 15:07:59 -0800 | [diff] [blame] | 242 | * Found address on success, 0 on failure. |
Tang Chen | 1402899 | 2013-11-12 15:07:57 -0800 | [diff] [blame] | 243 | */ |
| 244 | static phys_addr_t __init_memblock |
| 245 | __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end, |
Tony Luck | fc6daaf | 2015-06-24 16:58:09 -0700 | [diff] [blame] | 246 | phys_addr_t size, phys_addr_t align, int nid, |
Mike Rapoport | e1720fe | 2018-06-30 17:55:01 +0300 | [diff] [blame] | 247 | enum memblock_flags flags) |
Tang Chen | 1402899 | 2013-11-12 15:07:57 -0800 | [diff] [blame] | 248 | { |
| 249 | phys_addr_t this_start, this_end, cand; |
| 250 | u64 i; |
| 251 | |
Tony Luck | fc6daaf | 2015-06-24 16:58:09 -0700 | [diff] [blame] | 252 | for_each_free_mem_range_reverse(i, nid, flags, &this_start, &this_end, |
| 253 | NULL) { |
Tang Chen | 1402899 | 2013-11-12 15:07:57 -0800 | [diff] [blame] | 254 | this_start = clamp(this_start, start, end); |
| 255 | this_end = clamp(this_end, start, end); |
| 256 | |
| 257 | if (this_end < size) |
| 258 | continue; |
| 259 | |
| 260 | cand = round_down(this_end - size, align); |
| 261 | if (cand >= this_start) |
| 262 | return cand; |
| 263 | } |
| 264 | |
| 265 | return 0; |
| 266 | } |
| 267 | |
| 268 | /** |
Tejun Heo | 7bd0b0f | 2011-12-08 10:22:09 -0800 | [diff] [blame] | 269 | * memblock_find_in_range_node - find free area in given range and node |
Tejun Heo | 7bd0b0f | 2011-12-08 10:22:09 -0800 | [diff] [blame] | 270 | * @size: size of free area to find |
| 271 | * @align: alignment of free area to find |
Grygorii Strashko | 87029ee | 2014-01-21 15:50:14 -0800 | [diff] [blame] | 272 | * @start: start of candidate range |
Mike Rapoport | 47cec44 | 2018-06-30 17:55:02 +0300 | [diff] [blame] | 273 | * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or |
| 274 | * %MEMBLOCK_ALLOC_ACCESSIBLE |
Grygorii Strashko | b115423 | 2014-01-21 15:50:16 -0800 | [diff] [blame] | 275 | * @nid: nid of the free area to find, %NUMA_NO_NODE for any node |
Tony Luck | fc6daaf | 2015-06-24 16:58:09 -0700 | [diff] [blame] | 276 | * @flags: pick from blocks based on memory attributes |
Tejun Heo | 7bd0b0f | 2011-12-08 10:22:09 -0800 | [diff] [blame] | 277 | * |
| 278 | * Find @size free area aligned to @align in the specified range and node. |
| 279 | * |
Mike Rapoport | 47cec44 | 2018-06-30 17:55:02 +0300 | [diff] [blame] | 280 | * Return: |
Tang Chen | 79442ed | 2013-11-12 15:07:59 -0800 | [diff] [blame] | 281 | * Found address on success, 0 on failure. |
Benjamin Herrenschmidt | 6ed311b | 2010-07-12 14:36:48 +1000 | [diff] [blame] | 282 | */ |
Mike Rapoport | c366ea8 | 2019-03-11 23:29:46 -0700 | [diff] [blame] | 283 | static phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size, |
Grygorii Strashko | 87029ee | 2014-01-21 15:50:14 -0800 | [diff] [blame] | 284 | phys_addr_t align, phys_addr_t start, |
Mike Rapoport | e1720fe | 2018-06-30 17:55:01 +0300 | [diff] [blame] | 285 | phys_addr_t end, int nid, |
| 286 | enum memblock_flags flags) |
Tang Chen | f7210e6 | 2013-02-22 16:33:51 -0800 | [diff] [blame] | 287 | { |
Tang Chen | f7210e6 | 2013-02-22 16:33:51 -0800 | [diff] [blame] | 288 | /* pump up @end */ |
Qian Cai | fed84c7 | 2018-12-28 00:36:29 -0800 | [diff] [blame] | 289 | if (end == MEMBLOCK_ALLOC_ACCESSIBLE || |
| 290 | end == MEMBLOCK_ALLOC_KASAN) |
Tang Chen | f7210e6 | 2013-02-22 16:33:51 -0800 | [diff] [blame] | 291 | end = memblock.current_limit; |
| 292 | |
| 293 | /* avoid allocating the first page */ |
| 294 | start = max_t(phys_addr_t, start, PAGE_SIZE); |
| 295 | end = max(start, end); |
Tang Chen | 79442ed | 2013-11-12 15:07:59 -0800 | [diff] [blame] | 296 | |
Roman Gushchin | 2dcb396 | 2021-02-04 18:32:36 -0800 | [diff] [blame] | 297 | if (memblock_bottom_up()) |
| 298 | return __memblock_find_range_bottom_up(start, end, size, align, |
| 299 | nid, flags); |
| 300 | else |
| 301 | return __memblock_find_range_top_down(start, end, size, align, |
| 302 | nid, flags); |
Tang Chen | f7210e6 | 2013-02-22 16:33:51 -0800 | [diff] [blame] | 303 | } |
Benjamin Herrenschmidt | 6ed311b | 2010-07-12 14:36:48 +1000 | [diff] [blame] | 304 | |
Tejun Heo | 7bd0b0f | 2011-12-08 10:22:09 -0800 | [diff] [blame] | 305 | /** |
| 306 | * memblock_find_in_range - find free area in given range |
| 307 | * @start: start of candidate range |
Mike Rapoport | 47cec44 | 2018-06-30 17:55:02 +0300 | [diff] [blame] | 308 | * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or |
| 309 | * %MEMBLOCK_ALLOC_ACCESSIBLE |
Tejun Heo | 7bd0b0f | 2011-12-08 10:22:09 -0800 | [diff] [blame] | 310 | * @size: size of free area to find |
| 311 | * @align: alignment of free area to find |
| 312 | * |
| 313 | * Find @size free area aligned to @align in the specified range. |
| 314 | * |
Mike Rapoport | 47cec44 | 2018-06-30 17:55:02 +0300 | [diff] [blame] | 315 | * Return: |
Tang Chen | 79442ed | 2013-11-12 15:07:59 -0800 | [diff] [blame] | 316 | * Found address on success, 0 on failure. |
Tejun Heo | 7bd0b0f | 2011-12-08 10:22:09 -0800 | [diff] [blame] | 317 | */ |
Mike Rapoport | a7259df | 2021-09-02 15:00:26 -0700 | [diff] [blame] | 318 | static phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start, |
Tejun Heo | 7bd0b0f | 2011-12-08 10:22:09 -0800 | [diff] [blame] | 319 | phys_addr_t end, phys_addr_t size, |
| 320 | phys_addr_t align) |
| 321 | { |
Tony Luck | a3f5baf | 2015-06-24 16:58:12 -0700 | [diff] [blame] | 322 | phys_addr_t ret; |
Mike Rapoport | e1720fe | 2018-06-30 17:55:01 +0300 | [diff] [blame] | 323 | enum memblock_flags flags = choose_memblock_flags(); |
Tony Luck | a3f5baf | 2015-06-24 16:58:12 -0700 | [diff] [blame] | 324 | |
| 325 | again: |
| 326 | ret = memblock_find_in_range_node(size, align, start, end, |
| 327 | NUMA_NO_NODE, flags); |
| 328 | |
| 329 | if (!ret && (flags & MEMBLOCK_MIRROR)) { |
| 330 | pr_warn("Could not allocate %pap bytes of mirrored memory\n", |
| 331 | &size); |
| 332 | flags &= ~MEMBLOCK_MIRROR; |
| 333 | goto again; |
| 334 | } |
| 335 | |
| 336 | return ret; |
Tejun Heo | 7bd0b0f | 2011-12-08 10:22:09 -0800 | [diff] [blame] | 337 | } |
| 338 | |
Yinghai Lu | 10d0643 | 2010-07-28 15:43:02 +1000 | [diff] [blame] | 339 | static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r) |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 340 | { |
Tejun Heo | 1440c4e | 2011-12-08 10:22:08 -0800 | [diff] [blame] | 341 | type->total_size -= type->regions[r].size; |
Tejun Heo | 7c0caeb | 2011-07-14 11:43:42 +0200 | [diff] [blame] | 342 | memmove(&type->regions[r], &type->regions[r + 1], |
| 343 | (type->cnt - (r + 1)) * sizeof(type->regions[r])); |
Benjamin Herrenschmidt | e3239ff | 2010-08-04 14:06:41 +1000 | [diff] [blame] | 344 | type->cnt--; |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 345 | |
Benjamin Herrenschmidt | 8f7a660 | 2011-03-22 16:33:43 -0700 | [diff] [blame] | 346 | /* Special case for empty arrays */ |
| 347 | if (type->cnt == 0) { |
Tejun Heo | 1440c4e | 2011-12-08 10:22:08 -0800 | [diff] [blame] | 348 | WARN_ON(type->total_size != 0); |
Benjamin Herrenschmidt | 8f7a660 | 2011-03-22 16:33:43 -0700 | [diff] [blame] | 349 | type->cnt = 1; |
| 350 | type->regions[0].base = 0; |
| 351 | type->regions[0].size = 0; |
Tang Chen | 66a2075 | 2014-01-21 15:49:20 -0800 | [diff] [blame] | 352 | type->regions[0].flags = 0; |
Tejun Heo | 7c0caeb | 2011-07-14 11:43:42 +0200 | [diff] [blame] | 353 | memblock_set_region_node(&type->regions[0], MAX_NUMNODES); |
Benjamin Herrenschmidt | 8f7a660 | 2011-03-22 16:33:43 -0700 | [diff] [blame] | 354 | } |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 355 | } |
| 356 | |
Mike Rapoport | 350e88b | 2019-05-13 17:22:59 -0700 | [diff] [blame] | 357 | #ifndef CONFIG_ARCH_KEEP_MEMBLOCK |
Pavel Tatashin | 3010f87 | 2017-08-18 15:16:05 -0700 | [diff] [blame] | 358 | /** |
Mike Rapoport | 47cec44 | 2018-06-30 17:55:02 +0300 | [diff] [blame] | 359 | * memblock_discard - discard memory and reserved arrays if they were allocated |
Pavel Tatashin | 3010f87 | 2017-08-18 15:16:05 -0700 | [diff] [blame] | 360 | */ |
| 361 | void __init memblock_discard(void) |
Yinghai Lu | 29f6738 | 2012-07-11 14:02:56 -0700 | [diff] [blame] | 362 | { |
Pavel Tatashin | 3010f87 | 2017-08-18 15:16:05 -0700 | [diff] [blame] | 363 | phys_addr_t addr, size; |
Yinghai Lu | 29f6738 | 2012-07-11 14:02:56 -0700 | [diff] [blame] | 364 | |
Pavel Tatashin | 3010f87 | 2017-08-18 15:16:05 -0700 | [diff] [blame] | 365 | if (memblock.reserved.regions != memblock_reserved_init_regions) { |
| 366 | addr = __pa(memblock.reserved.regions); |
| 367 | size = PAGE_ALIGN(sizeof(struct memblock_region) * |
| 368 | memblock.reserved.max); |
Mike Rapoport | 621d973 | 2021-11-05 13:43:16 -0700 | [diff] [blame] | 369 | memblock_free_late(addr, size); |
Pavel Tatashin | 3010f87 | 2017-08-18 15:16:05 -0700 | [diff] [blame] | 370 | } |
Yinghai Lu | 29f6738 | 2012-07-11 14:02:56 -0700 | [diff] [blame] | 371 | |
Pavel Tatashin | 91b540f9 | 2017-08-25 15:55:46 -0700 | [diff] [blame] | 372 | if (memblock.memory.regions != memblock_memory_init_regions) { |
Pavel Tatashin | 3010f87 | 2017-08-18 15:16:05 -0700 | [diff] [blame] | 373 | addr = __pa(memblock.memory.regions); |
| 374 | size = PAGE_ALIGN(sizeof(struct memblock_region) * |
| 375 | memblock.memory.max); |
Mike Rapoport | 621d973 | 2021-11-05 13:43:16 -0700 | [diff] [blame] | 376 | memblock_free_late(addr, size); |
Pavel Tatashin | 3010f87 | 2017-08-18 15:16:05 -0700 | [diff] [blame] | 377 | } |
Mike Rapoport | 9f3d5ea | 2020-10-13 16:58:25 -0700 | [diff] [blame] | 378 | |
| 379 | memblock_memory = NULL; |
Yinghai Lu | 29f6738 | 2012-07-11 14:02:56 -0700 | [diff] [blame] | 380 | } |
Philipp Hachtmann | 5e270e2 | 2014-01-23 15:53:11 -0800 | [diff] [blame] | 381 | #endif |
| 382 | |
Greg Pearson | 48c3b58 | 2012-06-20 12:53:05 -0700 | [diff] [blame] | 383 | /** |
| 384 | * memblock_double_array - double the size of the memblock regions array |
| 385 | * @type: memblock type of the regions array being doubled |
| 386 | * @new_area_start: starting address of memory range to avoid overlap with |
| 387 | * @new_area_size: size of memory range to avoid overlap with |
| 388 | * |
| 389 | * Double the size of the @type regions array. If memblock is being used to |
| 390 | * allocate memory for a new reserved regions array and there is a previously |
Mike Rapoport | 47cec44 | 2018-06-30 17:55:02 +0300 | [diff] [blame] | 391 | * allocated memory range [@new_area_start, @new_area_start + @new_area_size] |
Greg Pearson | 48c3b58 | 2012-06-20 12:53:05 -0700 | [diff] [blame] | 392 | * waiting to be reserved, ensure the memory used by the new array does |
| 393 | * not overlap. |
| 394 | * |
Mike Rapoport | 47cec44 | 2018-06-30 17:55:02 +0300 | [diff] [blame] | 395 | * Return: |
Greg Pearson | 48c3b58 | 2012-06-20 12:53:05 -0700 | [diff] [blame] | 396 | * 0 on success, -1 on failure. |
| 397 | */ |
| 398 | static int __init_memblock memblock_double_array(struct memblock_type *type, |
| 399 | phys_addr_t new_area_start, |
| 400 | phys_addr_t new_area_size) |
Benjamin Herrenschmidt | 142b45a | 2010-07-06 15:39:13 -0700 | [diff] [blame] | 401 | { |
| 402 | struct memblock_region *new_array, *old_array; |
Yinghai Lu | 29f6738 | 2012-07-11 14:02:56 -0700 | [diff] [blame] | 403 | phys_addr_t old_alloc_size, new_alloc_size; |
Mike Rapoport | a36aab8 | 2018-08-17 15:47:17 -0700 | [diff] [blame] | 404 | phys_addr_t old_size, new_size, addr, new_end; |
Benjamin Herrenschmidt | 142b45a | 2010-07-06 15:39:13 -0700 | [diff] [blame] | 405 | int use_slab = slab_is_available(); |
Gavin Shan | 181eb39 | 2012-05-29 15:06:50 -0700 | [diff] [blame] | 406 | int *in_slab; |
Benjamin Herrenschmidt | 142b45a | 2010-07-06 15:39:13 -0700 | [diff] [blame] | 407 | |
| 408 | /* We don't allow resizing until we know about the reserved regions |
| 409 | * of memory that aren't suitable for allocation |
| 410 | */ |
| 411 | if (!memblock_can_resize) |
| 412 | return -1; |
| 413 | |
Benjamin Herrenschmidt | 142b45a | 2010-07-06 15:39:13 -0700 | [diff] [blame] | 414 | /* Calculate new doubled size */ |
| 415 | old_size = type->max * sizeof(struct memblock_region); |
| 416 | new_size = old_size << 1; |
Yinghai Lu | 29f6738 | 2012-07-11 14:02:56 -0700 | [diff] [blame] | 417 | /* |
| 418 | * We need to allocated new one align to PAGE_SIZE, |
| 419 | * so we can free them completely later. |
| 420 | */ |
| 421 | old_alloc_size = PAGE_ALIGN(old_size); |
| 422 | new_alloc_size = PAGE_ALIGN(new_size); |
Benjamin Herrenschmidt | 142b45a | 2010-07-06 15:39:13 -0700 | [diff] [blame] | 423 | |
Gavin Shan | 181eb39 | 2012-05-29 15:06:50 -0700 | [diff] [blame] | 424 | /* Retrieve the slab flag */ |
| 425 | if (type == &memblock.memory) |
| 426 | in_slab = &memblock_memory_in_slab; |
| 427 | else |
| 428 | in_slab = &memblock_reserved_in_slab; |
| 429 | |
Mike Rapoport | a297413 | 2019-03-11 23:30:54 -0700 | [diff] [blame] | 430 | /* Try to find some space for it */ |
Benjamin Herrenschmidt | 142b45a | 2010-07-06 15:39:13 -0700 | [diff] [blame] | 431 | if (use_slab) { |
| 432 | new_array = kmalloc(new_size, GFP_KERNEL); |
Tejun Heo | 1f5026a | 2011-07-12 09:58:09 +0200 | [diff] [blame] | 433 | addr = new_array ? __pa(new_array) : 0; |
Gavin Shan | 4e2f077 | 2012-05-29 15:06:50 -0700 | [diff] [blame] | 434 | } else { |
Greg Pearson | 48c3b58 | 2012-06-20 12:53:05 -0700 | [diff] [blame] | 435 | /* only exclude range when trying to double reserved.regions */ |
| 436 | if (type != &memblock.reserved) |
| 437 | new_area_start = new_area_size = 0; |
| 438 | |
| 439 | addr = memblock_find_in_range(new_area_start + new_area_size, |
| 440 | memblock.current_limit, |
Yinghai Lu | 29f6738 | 2012-07-11 14:02:56 -0700 | [diff] [blame] | 441 | new_alloc_size, PAGE_SIZE); |
Greg Pearson | 48c3b58 | 2012-06-20 12:53:05 -0700 | [diff] [blame] | 442 | if (!addr && new_area_size) |
| 443 | addr = memblock_find_in_range(0, |
Andrew Morton | fd07383 | 2012-07-31 16:42:40 -0700 | [diff] [blame] | 444 | min(new_area_start, memblock.current_limit), |
| 445 | new_alloc_size, PAGE_SIZE); |
Greg Pearson | 48c3b58 | 2012-06-20 12:53:05 -0700 | [diff] [blame] | 446 | |
Sachin Kamat | 1567486 | 2012-09-04 13:55:05 +0530 | [diff] [blame] | 447 | new_array = addr ? __va(addr) : NULL; |
Gavin Shan | 4e2f077 | 2012-05-29 15:06:50 -0700 | [diff] [blame] | 448 | } |
Tejun Heo | 1f5026a | 2011-07-12 09:58:09 +0200 | [diff] [blame] | 449 | if (!addr) { |
Benjamin Herrenschmidt | 142b45a | 2010-07-06 15:39:13 -0700 | [diff] [blame] | 450 | pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n", |
Heiko Carstens | 0262d9c | 2017-02-24 14:55:59 -0800 | [diff] [blame] | 451 | type->name, type->max, type->max * 2); |
Benjamin Herrenschmidt | 142b45a | 2010-07-06 15:39:13 -0700 | [diff] [blame] | 452 | return -1; |
| 453 | } |
Benjamin Herrenschmidt | 142b45a | 2010-07-06 15:39:13 -0700 | [diff] [blame] | 454 | |
Mike Rapoport | a36aab8 | 2018-08-17 15:47:17 -0700 | [diff] [blame] | 455 | new_end = addr + new_size - 1; |
| 456 | memblock_dbg("memblock: %s is doubled to %ld at [%pa-%pa]", |
| 457 | type->name, type->max * 2, &addr, &new_end); |
Yinghai Lu | ea9e437 | 2010-07-28 15:13:22 +1000 | [diff] [blame] | 458 | |
Andrew Morton | fd07383 | 2012-07-31 16:42:40 -0700 | [diff] [blame] | 459 | /* |
| 460 | * Found space, we now need to move the array over before we add the |
| 461 | * reserved region since it may be our reserved array itself that is |
| 462 | * full. |
Benjamin Herrenschmidt | 142b45a | 2010-07-06 15:39:13 -0700 | [diff] [blame] | 463 | */ |
| 464 | memcpy(new_array, type->regions, old_size); |
| 465 | memset(new_array + type->max, 0, old_size); |
| 466 | old_array = type->regions; |
| 467 | type->regions = new_array; |
| 468 | type->max <<= 1; |
| 469 | |
Andrew Morton | fd07383 | 2012-07-31 16:42:40 -0700 | [diff] [blame] | 470 | /* Free old array. We needn't free it if the array is the static one */ |
Gavin Shan | 181eb39 | 2012-05-29 15:06:50 -0700 | [diff] [blame] | 471 | if (*in_slab) |
| 472 | kfree(old_array); |
| 473 | else if (old_array != memblock_memory_init_regions && |
| 474 | old_array != memblock_reserved_init_regions) |
Linus Torvalds | 77e02cf | 2021-09-14 13:23:22 -0700 | [diff] [blame] | 475 | memblock_free_ptr(old_array, old_alloc_size); |
Benjamin Herrenschmidt | 142b45a | 2010-07-06 15:39:13 -0700 | [diff] [blame] | 476 | |
Andrew Morton | fd07383 | 2012-07-31 16:42:40 -0700 | [diff] [blame] | 477 | /* |
| 478 | * Reserve the new array if that comes from the memblock. Otherwise, we |
| 479 | * needn't do it |
Gavin Shan | 181eb39 | 2012-05-29 15:06:50 -0700 | [diff] [blame] | 480 | */ |
| 481 | if (!use_slab) |
Yinghai Lu | 29f6738 | 2012-07-11 14:02:56 -0700 | [diff] [blame] | 482 | BUG_ON(memblock_reserve(addr, new_alloc_size)); |
Gavin Shan | 181eb39 | 2012-05-29 15:06:50 -0700 | [diff] [blame] | 483 | |
| 484 | /* Update slab flag */ |
| 485 | *in_slab = use_slab; |
| 486 | |
Benjamin Herrenschmidt | 142b45a | 2010-07-06 15:39:13 -0700 | [diff] [blame] | 487 | return 0; |
| 488 | } |
| 489 | |
Tejun Heo | 784656f9 | 2011-07-12 11:15:55 +0200 | [diff] [blame] | 490 | /** |
| 491 | * memblock_merge_regions - merge neighboring compatible regions |
| 492 | * @type: memblock type to scan |
| 493 | * |
| 494 | * Scan @type and merge neighboring compatible regions. |
| 495 | */ |
| 496 | static void __init_memblock memblock_merge_regions(struct memblock_type *type) |
| 497 | { |
| 498 | int i = 0; |
| 499 | |
| 500 | /* cnt never goes below 1 */ |
| 501 | while (i < type->cnt - 1) { |
| 502 | struct memblock_region *this = &type->regions[i]; |
| 503 | struct memblock_region *next = &type->regions[i + 1]; |
| 504 | |
Tejun Heo | 7c0caeb | 2011-07-14 11:43:42 +0200 | [diff] [blame] | 505 | if (this->base + this->size != next->base || |
| 506 | memblock_get_region_node(this) != |
Tang Chen | 66a2075 | 2014-01-21 15:49:20 -0800 | [diff] [blame] | 507 | memblock_get_region_node(next) || |
| 508 | this->flags != next->flags) { |
Tejun Heo | 784656f9 | 2011-07-12 11:15:55 +0200 | [diff] [blame] | 509 | BUG_ON(this->base + this->size > next->base); |
| 510 | i++; |
| 511 | continue; |
| 512 | } |
| 513 | |
| 514 | this->size += next->size; |
Lin Feng | c0232ae | 2013-01-11 14:31:44 -0800 | [diff] [blame] | 515 | /* move forward from next + 1, index of which is i + 2 */ |
| 516 | memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next)); |
Tejun Heo | 784656f9 | 2011-07-12 11:15:55 +0200 | [diff] [blame] | 517 | type->cnt--; |
| 518 | } |
| 519 | } |
| 520 | |
| 521 | /** |
| 522 | * memblock_insert_region - insert new memblock region |
Tang Chen | 209ff86 | 2013-04-29 15:08:41 -0700 | [diff] [blame] | 523 | * @type: memblock type to insert into |
| 524 | * @idx: index for the insertion point |
| 525 | * @base: base address of the new region |
| 526 | * @size: size of the new region |
| 527 | * @nid: node id of the new region |
Tang Chen | 66a2075 | 2014-01-21 15:49:20 -0800 | [diff] [blame] | 528 | * @flags: flags of the new region |
Tejun Heo | 784656f9 | 2011-07-12 11:15:55 +0200 | [diff] [blame] | 529 | * |
Mike Rapoport | 47cec44 | 2018-06-30 17:55:02 +0300 | [diff] [blame] | 530 | * Insert new memblock region [@base, @base + @size) into @type at @idx. |
Alexander Kuleshov | 412d000 | 2016-08-04 15:31:46 -0700 | [diff] [blame] | 531 | * @type must already have extra room to accommodate the new region. |
Tejun Heo | 784656f9 | 2011-07-12 11:15:55 +0200 | [diff] [blame] | 532 | */ |
| 533 | static void __init_memblock memblock_insert_region(struct memblock_type *type, |
| 534 | int idx, phys_addr_t base, |
Tang Chen | 66a2075 | 2014-01-21 15:49:20 -0800 | [diff] [blame] | 535 | phys_addr_t size, |
Mike Rapoport | e1720fe | 2018-06-30 17:55:01 +0300 | [diff] [blame] | 536 | int nid, |
| 537 | enum memblock_flags flags) |
Tejun Heo | 784656f9 | 2011-07-12 11:15:55 +0200 | [diff] [blame] | 538 | { |
| 539 | struct memblock_region *rgn = &type->regions[idx]; |
| 540 | |
| 541 | BUG_ON(type->cnt >= type->max); |
| 542 | memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn)); |
| 543 | rgn->base = base; |
| 544 | rgn->size = size; |
Tang Chen | 66a2075 | 2014-01-21 15:49:20 -0800 | [diff] [blame] | 545 | rgn->flags = flags; |
Tejun Heo | 7c0caeb | 2011-07-14 11:43:42 +0200 | [diff] [blame] | 546 | memblock_set_region_node(rgn, nid); |
Tejun Heo | 784656f9 | 2011-07-12 11:15:55 +0200 | [diff] [blame] | 547 | type->cnt++; |
Tejun Heo | 1440c4e | 2011-12-08 10:22:08 -0800 | [diff] [blame] | 548 | type->total_size += size; |
Tejun Heo | 784656f9 | 2011-07-12 11:15:55 +0200 | [diff] [blame] | 549 | } |
| 550 | |
| 551 | /** |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 552 | * memblock_add_range - add new memblock region |
Tejun Heo | 784656f9 | 2011-07-12 11:15:55 +0200 | [diff] [blame] | 553 | * @type: memblock type to add new region into |
| 554 | * @base: base address of the new region |
| 555 | * @size: size of the new region |
Tejun Heo | 7fb0bc3 | 2011-12-08 10:22:08 -0800 | [diff] [blame] | 556 | * @nid: nid of the new region |
Tang Chen | 66a2075 | 2014-01-21 15:49:20 -0800 | [diff] [blame] | 557 | * @flags: flags of the new region |
Tejun Heo | 784656f9 | 2011-07-12 11:15:55 +0200 | [diff] [blame] | 558 | * |
Mike Rapoport | 47cec44 | 2018-06-30 17:55:02 +0300 | [diff] [blame] | 559 | * Add new memblock region [@base, @base + @size) into @type. The new region |
Tejun Heo | 784656f9 | 2011-07-12 11:15:55 +0200 | [diff] [blame] | 560 | * is allowed to overlap with existing ones - overlaps don't affect already |
| 561 | * existing regions. @type is guaranteed to be minimal (all neighbouring |
| 562 | * compatible regions are merged) after the addition. |
| 563 | * |
Mike Rapoport | 47cec44 | 2018-06-30 17:55:02 +0300 | [diff] [blame] | 564 | * Return: |
Tejun Heo | 784656f9 | 2011-07-12 11:15:55 +0200 | [diff] [blame] | 565 | * 0 on success, -errno on failure. |
| 566 | */ |
Anshuman Khandual | 02634a4 | 2020-01-30 22:14:20 -0800 | [diff] [blame] | 567 | static int __init_memblock memblock_add_range(struct memblock_type *type, |
Tang Chen | 66a2075 | 2014-01-21 15:49:20 -0800 | [diff] [blame] | 568 | phys_addr_t base, phys_addr_t size, |
Mike Rapoport | e1720fe | 2018-06-30 17:55:01 +0300 | [diff] [blame] | 569 | int nid, enum memblock_flags flags) |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 570 | { |
Tejun Heo | 784656f9 | 2011-07-12 11:15:55 +0200 | [diff] [blame] | 571 | bool insert = false; |
Tejun Heo | eb18f1b | 2011-12-08 10:22:07 -0800 | [diff] [blame] | 572 | phys_addr_t obase = base; |
| 573 | phys_addr_t end = base + memblock_cap_size(base, &size); |
Alexander Kuleshov | 8c9c170 | 2016-01-14 15:20:42 -0800 | [diff] [blame] | 574 | int idx, nr_new; |
| 575 | struct memblock_region *rgn; |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 576 | |
Tejun Heo | b3dc627 | 2012-04-20 08:31:34 -0700 | [diff] [blame] | 577 | if (!size) |
| 578 | return 0; |
| 579 | |
Tejun Heo | 784656f9 | 2011-07-12 11:15:55 +0200 | [diff] [blame] | 580 | /* special case for empty array */ |
| 581 | if (type->regions[0].size == 0) { |
Tejun Heo | 1440c4e | 2011-12-08 10:22:08 -0800 | [diff] [blame] | 582 | WARN_ON(type->cnt != 1 || type->total_size); |
Benjamin Herrenschmidt | e3239ff | 2010-08-04 14:06:41 +1000 | [diff] [blame] | 583 | type->regions[0].base = base; |
| 584 | type->regions[0].size = size; |
Tang Chen | 66a2075 | 2014-01-21 15:49:20 -0800 | [diff] [blame] | 585 | type->regions[0].flags = flags; |
Tejun Heo | 7fb0bc3 | 2011-12-08 10:22:08 -0800 | [diff] [blame] | 586 | memblock_set_region_node(&type->regions[0], nid); |
Tejun Heo | 1440c4e | 2011-12-08 10:22:08 -0800 | [diff] [blame] | 587 | type->total_size = size; |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 588 | return 0; |
| 589 | } |
Tejun Heo | 784656f9 | 2011-07-12 11:15:55 +0200 | [diff] [blame] | 590 | repeat: |
| 591 | /* |
| 592 | * The following is executed twice. Once with %false @insert and |
| 593 | * then with %true. The first counts the number of regions needed |
Alexander Kuleshov | 412d000 | 2016-08-04 15:31:46 -0700 | [diff] [blame] | 594 | * to accommodate the new area. The second actually inserts them. |
Benjamin Herrenschmidt | 142b45a | 2010-07-06 15:39:13 -0700 | [diff] [blame] | 595 | */ |
Tejun Heo | 784656f9 | 2011-07-12 11:15:55 +0200 | [diff] [blame] | 596 | base = obase; |
| 597 | nr_new = 0; |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 598 | |
Gioh Kim | 66e8b43 | 2017-11-15 17:33:42 -0800 | [diff] [blame] | 599 | for_each_memblock_type(idx, type, rgn) { |
Tejun Heo | 784656f9 | 2011-07-12 11:15:55 +0200 | [diff] [blame] | 600 | phys_addr_t rbase = rgn->base; |
| 601 | phys_addr_t rend = rbase + rgn->size; |
| 602 | |
| 603 | if (rbase >= end) |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 604 | break; |
Tejun Heo | 784656f9 | 2011-07-12 11:15:55 +0200 | [diff] [blame] | 605 | if (rend <= base) |
| 606 | continue; |
| 607 | /* |
| 608 | * @rgn overlaps. If it separates the lower part of new |
| 609 | * area, insert that portion. |
| 610 | */ |
| 611 | if (rbase > base) { |
Mike Rapoport | a9ee6cf | 2021-06-28 19:43:01 -0700 | [diff] [blame] | 612 | #ifdef CONFIG_NUMA |
Wei Yang | c0a2949 | 2015-09-04 15:47:38 -0700 | [diff] [blame] | 613 | WARN_ON(nid != memblock_get_region_node(rgn)); |
| 614 | #endif |
Wei Yang | 4fcab5f | 2015-09-08 14:59:53 -0700 | [diff] [blame] | 615 | WARN_ON(flags != rgn->flags); |
Tejun Heo | 784656f9 | 2011-07-12 11:15:55 +0200 | [diff] [blame] | 616 | nr_new++; |
| 617 | if (insert) |
Alexander Kuleshov | 8c9c170 | 2016-01-14 15:20:42 -0800 | [diff] [blame] | 618 | memblock_insert_region(type, idx++, base, |
Tang Chen | 66a2075 | 2014-01-21 15:49:20 -0800 | [diff] [blame] | 619 | rbase - base, nid, |
| 620 | flags); |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 621 | } |
Tejun Heo | 784656f9 | 2011-07-12 11:15:55 +0200 | [diff] [blame] | 622 | /* area below @rend is dealt with, forget about it */ |
| 623 | base = min(rend, end); |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 624 | } |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 625 | |
Tejun Heo | 784656f9 | 2011-07-12 11:15:55 +0200 | [diff] [blame] | 626 | /* insert the remaining portion */ |
| 627 | if (base < end) { |
| 628 | nr_new++; |
| 629 | if (insert) |
Alexander Kuleshov | 8c9c170 | 2016-01-14 15:20:42 -0800 | [diff] [blame] | 630 | memblock_insert_region(type, idx, base, end - base, |
Tang Chen | 66a2075 | 2014-01-21 15:49:20 -0800 | [diff] [blame] | 631 | nid, flags); |
Tejun Heo | 784656f9 | 2011-07-12 11:15:55 +0200 | [diff] [blame] | 632 | } |
| 633 | |
nimisolo | ef3cc4d | 2016-07-26 15:24:56 -0700 | [diff] [blame] | 634 | if (!nr_new) |
| 635 | return 0; |
| 636 | |
Tejun Heo | 784656f9 | 2011-07-12 11:15:55 +0200 | [diff] [blame] | 637 | /* |
| 638 | * If this was the first round, resize array and repeat for actual |
| 639 | * insertions; otherwise, merge and return. |
Benjamin Herrenschmidt | 142b45a | 2010-07-06 15:39:13 -0700 | [diff] [blame] | 640 | */ |
Tejun Heo | 784656f9 | 2011-07-12 11:15:55 +0200 | [diff] [blame] | 641 | if (!insert) { |
| 642 | while (type->cnt + nr_new > type->max) |
Greg Pearson | 48c3b58 | 2012-06-20 12:53:05 -0700 | [diff] [blame] | 643 | if (memblock_double_array(type, obase, size) < 0) |
Tejun Heo | 784656f9 | 2011-07-12 11:15:55 +0200 | [diff] [blame] | 644 | return -ENOMEM; |
| 645 | insert = true; |
| 646 | goto repeat; |
| 647 | } else { |
| 648 | memblock_merge_regions(type); |
| 649 | return 0; |
Benjamin Herrenschmidt | 142b45a | 2010-07-06 15:39:13 -0700 | [diff] [blame] | 650 | } |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 651 | } |
| 652 | |
Mike Rapoport | 48a833c | 2018-06-30 17:55:03 +0300 | [diff] [blame] | 653 | /** |
| 654 | * memblock_add_node - add new memblock region within a NUMA node |
| 655 | * @base: base address of the new region |
| 656 | * @size: size of the new region |
| 657 | * @nid: nid of the new region |
| 658 | * |
| 659 | * Add new memblock region [@base, @base + @size) to the "memory" |
| 660 | * type. See memblock_add_range() description for mode details |
| 661 | * |
| 662 | * Return: |
| 663 | * 0 on success, -errno on failure. |
| 664 | */ |
Tejun Heo | 7fb0bc3 | 2011-12-08 10:22:08 -0800 | [diff] [blame] | 665 | int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size, |
| 666 | int nid) |
| 667 | { |
Geert Uytterhoeven | 00974b9 | 2021-08-11 10:54:36 +0200 | [diff] [blame] | 668 | phys_addr_t end = base + size - 1; |
| 669 | |
| 670 | memblock_dbg("%s: [%pa-%pa] nid=%d %pS\n", __func__, |
| 671 | &base, &end, nid, (void *)_RET_IP_); |
| 672 | |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 673 | return memblock_add_range(&memblock.memory, base, size, nid, 0); |
Tejun Heo | 7fb0bc3 | 2011-12-08 10:22:08 -0800 | [diff] [blame] | 674 | } |
| 675 | |
Mike Rapoport | 48a833c | 2018-06-30 17:55:03 +0300 | [diff] [blame] | 676 | /** |
| 677 | * memblock_add - add new memblock region |
| 678 | * @base: base address of the new region |
| 679 | * @size: size of the new region |
| 680 | * |
| 681 | * Add new memblock region [@base, @base + @size) to the "memory" |
| 682 | * type. See memblock_add_range() description for mode details |
| 683 | * |
| 684 | * Return: |
| 685 | * 0 on success, -errno on failure. |
| 686 | */ |
Alexander Kuleshov | f705ac4 | 2016-05-20 16:57:35 -0700 | [diff] [blame] | 687 | int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size) |
Alexander Kuleshov | 6a4055b | 2015-04-15 16:14:44 -0700 | [diff] [blame] | 688 | { |
Miles Chen | 5d63f81 | 2017-02-22 15:46:42 -0800 | [diff] [blame] | 689 | phys_addr_t end = base + size - 1; |
| 690 | |
Anshuman Khandual | a090d71 | 2020-01-30 22:14:23 -0800 | [diff] [blame] | 691 | memblock_dbg("%s: [%pa-%pa] %pS\n", __func__, |
Miles Chen | 5d63f81 | 2017-02-22 15:46:42 -0800 | [diff] [blame] | 692 | &base, &end, (void *)_RET_IP_); |
Alexander Kuleshov | 6a4055b | 2015-04-15 16:14:44 -0700 | [diff] [blame] | 693 | |
Alexander Kuleshov | f705ac4 | 2016-05-20 16:57:35 -0700 | [diff] [blame] | 694 | return memblock_add_range(&memblock.memory, base, size, MAX_NUMNODES, 0); |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 695 | } |
| 696 | |
Tejun Heo | 6a9ceb3 | 2011-12-08 10:22:07 -0800 | [diff] [blame] | 697 | /** |
| 698 | * memblock_isolate_range - isolate given range into disjoint memblocks |
| 699 | * @type: memblock type to isolate range for |
| 700 | * @base: base of range to isolate |
| 701 | * @size: size of range to isolate |
| 702 | * @start_rgn: out parameter for the start of isolated region |
| 703 | * @end_rgn: out parameter for the end of isolated region |
| 704 | * |
| 705 | * Walk @type and ensure that regions don't cross the boundaries defined by |
Mike Rapoport | 47cec44 | 2018-06-30 17:55:02 +0300 | [diff] [blame] | 706 | * [@base, @base + @size). Crossing regions are split at the boundaries, |
Tejun Heo | 6a9ceb3 | 2011-12-08 10:22:07 -0800 | [diff] [blame] | 707 | * which may create at most two more regions. The index of the first |
| 708 | * region inside the range is returned in *@start_rgn and end in *@end_rgn. |
| 709 | * |
Mike Rapoport | 47cec44 | 2018-06-30 17:55:02 +0300 | [diff] [blame] | 710 | * Return: |
Tejun Heo | 6a9ceb3 | 2011-12-08 10:22:07 -0800 | [diff] [blame] | 711 | * 0 on success, -errno on failure. |
| 712 | */ |
| 713 | static int __init_memblock memblock_isolate_range(struct memblock_type *type, |
| 714 | phys_addr_t base, phys_addr_t size, |
| 715 | int *start_rgn, int *end_rgn) |
| 716 | { |
Tejun Heo | eb18f1b | 2011-12-08 10:22:07 -0800 | [diff] [blame] | 717 | phys_addr_t end = base + memblock_cap_size(base, &size); |
Alexander Kuleshov | 8c9c170 | 2016-01-14 15:20:42 -0800 | [diff] [blame] | 718 | int idx; |
| 719 | struct memblock_region *rgn; |
Tejun Heo | 6a9ceb3 | 2011-12-08 10:22:07 -0800 | [diff] [blame] | 720 | |
| 721 | *start_rgn = *end_rgn = 0; |
| 722 | |
Tejun Heo | b3dc627 | 2012-04-20 08:31:34 -0700 | [diff] [blame] | 723 | if (!size) |
| 724 | return 0; |
| 725 | |
Tejun Heo | 6a9ceb3 | 2011-12-08 10:22:07 -0800 | [diff] [blame] | 726 | /* we'll create at most two more regions */ |
| 727 | while (type->cnt + 2 > type->max) |
Greg Pearson | 48c3b58 | 2012-06-20 12:53:05 -0700 | [diff] [blame] | 728 | if (memblock_double_array(type, base, size) < 0) |
Tejun Heo | 6a9ceb3 | 2011-12-08 10:22:07 -0800 | [diff] [blame] | 729 | return -ENOMEM; |
| 730 | |
Gioh Kim | 66e8b43 | 2017-11-15 17:33:42 -0800 | [diff] [blame] | 731 | for_each_memblock_type(idx, type, rgn) { |
Tejun Heo | 6a9ceb3 | 2011-12-08 10:22:07 -0800 | [diff] [blame] | 732 | phys_addr_t rbase = rgn->base; |
| 733 | phys_addr_t rend = rbase + rgn->size; |
| 734 | |
| 735 | if (rbase >= end) |
| 736 | break; |
| 737 | if (rend <= base) |
| 738 | continue; |
| 739 | |
| 740 | if (rbase < base) { |
| 741 | /* |
| 742 | * @rgn intersects from below. Split and continue |
| 743 | * to process the next region - the new top half. |
| 744 | */ |
| 745 | rgn->base = base; |
Tejun Heo | 1440c4e | 2011-12-08 10:22:08 -0800 | [diff] [blame] | 746 | rgn->size -= base - rbase; |
| 747 | type->total_size -= base - rbase; |
Alexander Kuleshov | 8c9c170 | 2016-01-14 15:20:42 -0800 | [diff] [blame] | 748 | memblock_insert_region(type, idx, rbase, base - rbase, |
Tang Chen | 66a2075 | 2014-01-21 15:49:20 -0800 | [diff] [blame] | 749 | memblock_get_region_node(rgn), |
| 750 | rgn->flags); |
Tejun Heo | 6a9ceb3 | 2011-12-08 10:22:07 -0800 | [diff] [blame] | 751 | } else if (rend > end) { |
| 752 | /* |
| 753 | * @rgn intersects from above. Split and redo the |
| 754 | * current region - the new bottom half. |
| 755 | */ |
| 756 | rgn->base = end; |
Tejun Heo | 1440c4e | 2011-12-08 10:22:08 -0800 | [diff] [blame] | 757 | rgn->size -= end - rbase; |
| 758 | type->total_size -= end - rbase; |
Alexander Kuleshov | 8c9c170 | 2016-01-14 15:20:42 -0800 | [diff] [blame] | 759 | memblock_insert_region(type, idx--, rbase, end - rbase, |
Tang Chen | 66a2075 | 2014-01-21 15:49:20 -0800 | [diff] [blame] | 760 | memblock_get_region_node(rgn), |
| 761 | rgn->flags); |
Tejun Heo | 6a9ceb3 | 2011-12-08 10:22:07 -0800 | [diff] [blame] | 762 | } else { |
| 763 | /* @rgn is fully contained, record it */ |
| 764 | if (!*end_rgn) |
Alexander Kuleshov | 8c9c170 | 2016-01-14 15:20:42 -0800 | [diff] [blame] | 765 | *start_rgn = idx; |
| 766 | *end_rgn = idx + 1; |
Tejun Heo | 6a9ceb3 | 2011-12-08 10:22:07 -0800 | [diff] [blame] | 767 | } |
| 768 | } |
| 769 | |
| 770 | return 0; |
| 771 | } |
Tejun Heo | 6a9ceb3 | 2011-12-08 10:22:07 -0800 | [diff] [blame] | 772 | |
Alexander Kuleshov | 35bd16a | 2015-11-05 18:47:00 -0800 | [diff] [blame] | 773 | static int __init_memblock memblock_remove_range(struct memblock_type *type, |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 774 | phys_addr_t base, phys_addr_t size) |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 775 | { |
Tejun Heo | 7193618 | 2011-12-08 10:22:07 -0800 | [diff] [blame] | 776 | int start_rgn, end_rgn; |
| 777 | int i, ret; |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 778 | |
Tejun Heo | 7193618 | 2011-12-08 10:22:07 -0800 | [diff] [blame] | 779 | ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); |
| 780 | if (ret) |
| 781 | return ret; |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 782 | |
Tejun Heo | 7193618 | 2011-12-08 10:22:07 -0800 | [diff] [blame] | 783 | for (i = end_rgn - 1; i >= start_rgn; i--) |
| 784 | memblock_remove_region(type, i); |
Benjamin Herrenschmidt | 8f7a660 | 2011-03-22 16:33:43 -0700 | [diff] [blame] | 785 | return 0; |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 786 | } |
| 787 | |
Tejun Heo | 581adcb | 2011-12-08 10:22:06 -0800 | [diff] [blame] | 788 | int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size) |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 789 | { |
Minchan Kim | 25cf23d | 2018-06-07 17:07:35 -0700 | [diff] [blame] | 790 | phys_addr_t end = base + size - 1; |
| 791 | |
Anshuman Khandual | a090d71 | 2020-01-30 22:14:23 -0800 | [diff] [blame] | 792 | memblock_dbg("%s: [%pa-%pa] %pS\n", __func__, |
Minchan Kim | 25cf23d | 2018-06-07 17:07:35 -0700 | [diff] [blame] | 793 | &base, &end, (void *)_RET_IP_); |
| 794 | |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 795 | return memblock_remove_range(&memblock.memory, base, size); |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 796 | } |
| 797 | |
Mike Rapoport | 4d72868 | 2018-12-28 00:35:29 -0800 | [diff] [blame] | 798 | /** |
Linus Torvalds | 77e02cf | 2021-09-14 13:23:22 -0700 | [diff] [blame] | 799 | * memblock_free_ptr - free boot memory allocation |
| 800 | * @ptr: starting address of the boot memory allocation |
| 801 | * @size: size of the boot memory block in bytes |
| 802 | * |
| 803 | * Free boot memory block previously allocated by memblock_alloc_xx() API. |
| 804 | * The freeing memory will not be released to the buddy allocator. |
| 805 | */ |
| 806 | void __init_memblock memblock_free_ptr(void *ptr, size_t size) |
| 807 | { |
| 808 | if (ptr) |
Mike Rapoport | 3ecc683 | 2021-11-05 13:43:19 -0700 | [diff] [blame^] | 809 | memblock_phys_free(__pa(ptr), size); |
Linus Torvalds | 77e02cf | 2021-09-14 13:23:22 -0700 | [diff] [blame] | 810 | } |
| 811 | |
| 812 | /** |
Mike Rapoport | 3ecc683 | 2021-11-05 13:43:19 -0700 | [diff] [blame^] | 813 | * memblock_phys_free - free boot memory block |
Mike Rapoport | 4d72868 | 2018-12-28 00:35:29 -0800 | [diff] [blame] | 814 | * @base: phys starting address of the boot memory block |
| 815 | * @size: size of the boot memory block in bytes |
| 816 | * |
| 817 | * Free boot memory block previously allocated by memblock_alloc_xx() API. |
| 818 | * The freeing memory will not be released to the buddy allocator. |
| 819 | */ |
Mike Rapoport | 3ecc683 | 2021-11-05 13:43:19 -0700 | [diff] [blame^] | 820 | int __init_memblock memblock_phys_free(phys_addr_t base, phys_addr_t size) |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 821 | { |
Miles Chen | 5d63f81 | 2017-02-22 15:46:42 -0800 | [diff] [blame] | 822 | phys_addr_t end = base + size - 1; |
| 823 | |
Anshuman Khandual | a090d71 | 2020-01-30 22:14:23 -0800 | [diff] [blame] | 824 | memblock_dbg("%s: [%pa-%pa] %pS\n", __func__, |
Miles Chen | 5d63f81 | 2017-02-22 15:46:42 -0800 | [diff] [blame] | 825 | &base, &end, (void *)_RET_IP_); |
Tejun Heo | 24aa078 | 2011-07-12 11:16:06 +0200 | [diff] [blame] | 826 | |
Catalin Marinas | 9099dae | 2016-10-11 13:55:11 -0700 | [diff] [blame] | 827 | kmemleak_free_part_phys(base, size); |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 828 | return memblock_remove_range(&memblock.reserved, base, size); |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 829 | } |
| 830 | |
Alexander Kuleshov | f705ac4 | 2016-05-20 16:57:35 -0700 | [diff] [blame] | 831 | int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size) |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 832 | { |
Miles Chen | 5d63f81 | 2017-02-22 15:46:42 -0800 | [diff] [blame] | 833 | phys_addr_t end = base + size - 1; |
| 834 | |
Anshuman Khandual | a090d71 | 2020-01-30 22:14:23 -0800 | [diff] [blame] | 835 | memblock_dbg("%s: [%pa-%pa] %pS\n", __func__, |
Miles Chen | 5d63f81 | 2017-02-22 15:46:42 -0800 | [diff] [blame] | 836 | &base, &end, (void *)_RET_IP_); |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 837 | |
Alexander Kuleshov | f705ac4 | 2016-05-20 16:57:35 -0700 | [diff] [blame] | 838 | return memblock_add_range(&memblock.reserved, base, size, MAX_NUMNODES, 0); |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 839 | } |
| 840 | |
Anshuman Khandual | 02634a4 | 2020-01-30 22:14:20 -0800 | [diff] [blame] | 841 | #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP |
| 842 | int __init_memblock memblock_physmem_add(phys_addr_t base, phys_addr_t size) |
| 843 | { |
| 844 | phys_addr_t end = base + size - 1; |
| 845 | |
| 846 | memblock_dbg("%s: [%pa-%pa] %pS\n", __func__, |
| 847 | &base, &end, (void *)_RET_IP_); |
| 848 | |
David Hildenbrand | 7764990 | 2020-07-01 16:18:29 +0200 | [diff] [blame] | 849 | return memblock_add_range(&physmem, base, size, MAX_NUMNODES, 0); |
Anshuman Khandual | 02634a4 | 2020-01-30 22:14:20 -0800 | [diff] [blame] | 850 | } |
| 851 | #endif |
| 852 | |
Tejun Heo | 35fd080 | 2011-07-12 11:15:59 +0200 | [diff] [blame] | 853 | /** |
Mike Rapoport | 47cec44 | 2018-06-30 17:55:02 +0300 | [diff] [blame] | 854 | * memblock_setclr_flag - set or clear flag for a memory region |
| 855 | * @base: base address of the region |
| 856 | * @size: size of the region |
| 857 | * @set: set or clear the flag |
Haitao Shi | 8958b24 | 2020-12-15 20:47:26 -0800 | [diff] [blame] | 858 | * @flag: the flag to update |
Tang Chen | 66b16ed | 2014-01-21 15:49:23 -0800 | [diff] [blame] | 859 | * |
Tony Luck | 4308ce1 | 2014-12-12 16:54:59 -0800 | [diff] [blame] | 860 | * This function isolates region [@base, @base + @size), and sets/clears flag |
Tang Chen | 66b16ed | 2014-01-21 15:49:23 -0800 | [diff] [blame] | 861 | * |
Mike Rapoport | 47cec44 | 2018-06-30 17:55:02 +0300 | [diff] [blame] | 862 | * Return: 0 on success, -errno on failure. |
Tang Chen | 66b16ed | 2014-01-21 15:49:23 -0800 | [diff] [blame] | 863 | */ |
Tony Luck | 4308ce1 | 2014-12-12 16:54:59 -0800 | [diff] [blame] | 864 | static int __init_memblock memblock_setclr_flag(phys_addr_t base, |
| 865 | phys_addr_t size, int set, int flag) |
Tang Chen | 66b16ed | 2014-01-21 15:49:23 -0800 | [diff] [blame] | 866 | { |
| 867 | struct memblock_type *type = &memblock.memory; |
| 868 | int i, ret, start_rgn, end_rgn; |
| 869 | |
| 870 | ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); |
| 871 | if (ret) |
| 872 | return ret; |
| 873 | |
Mike Rapoport | fe14512 | 2019-03-11 23:30:46 -0700 | [diff] [blame] | 874 | for (i = start_rgn; i < end_rgn; i++) { |
| 875 | struct memblock_region *r = &type->regions[i]; |
| 876 | |
Tony Luck | 4308ce1 | 2014-12-12 16:54:59 -0800 | [diff] [blame] | 877 | if (set) |
Mike Rapoport | fe14512 | 2019-03-11 23:30:46 -0700 | [diff] [blame] | 878 | r->flags |= flag; |
Tony Luck | 4308ce1 | 2014-12-12 16:54:59 -0800 | [diff] [blame] | 879 | else |
Mike Rapoport | fe14512 | 2019-03-11 23:30:46 -0700 | [diff] [blame] | 880 | r->flags &= ~flag; |
| 881 | } |
Tang Chen | 66b16ed | 2014-01-21 15:49:23 -0800 | [diff] [blame] | 882 | |
| 883 | memblock_merge_regions(type); |
| 884 | return 0; |
| 885 | } |
| 886 | |
| 887 | /** |
Tony Luck | 4308ce1 | 2014-12-12 16:54:59 -0800 | [diff] [blame] | 888 | * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG. |
| 889 | * @base: the base phys addr of the region |
| 890 | * @size: the size of the region |
| 891 | * |
Mike Rapoport | 47cec44 | 2018-06-30 17:55:02 +0300 | [diff] [blame] | 892 | * Return: 0 on success, -errno on failure. |
Tony Luck | 4308ce1 | 2014-12-12 16:54:59 -0800 | [diff] [blame] | 893 | */ |
| 894 | int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size) |
| 895 | { |
| 896 | return memblock_setclr_flag(base, size, 1, MEMBLOCK_HOTPLUG); |
| 897 | } |
| 898 | |
| 899 | /** |
Tang Chen | 66b16ed | 2014-01-21 15:49:23 -0800 | [diff] [blame] | 900 | * memblock_clear_hotplug - Clear flag MEMBLOCK_HOTPLUG for a specified region. |
| 901 | * @base: the base phys addr of the region |
| 902 | * @size: the size of the region |
| 903 | * |
Mike Rapoport | 47cec44 | 2018-06-30 17:55:02 +0300 | [diff] [blame] | 904 | * Return: 0 on success, -errno on failure. |
Tang Chen | 66b16ed | 2014-01-21 15:49:23 -0800 | [diff] [blame] | 905 | */ |
| 906 | int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size) |
| 907 | { |
Tony Luck | 4308ce1 | 2014-12-12 16:54:59 -0800 | [diff] [blame] | 908 | return memblock_setclr_flag(base, size, 0, MEMBLOCK_HOTPLUG); |
Tang Chen | 66b16ed | 2014-01-21 15:49:23 -0800 | [diff] [blame] | 909 | } |
| 910 | |
| 911 | /** |
Tony Luck | a3f5baf | 2015-06-24 16:58:12 -0700 | [diff] [blame] | 912 | * memblock_mark_mirror - Mark mirrored memory with flag MEMBLOCK_MIRROR. |
| 913 | * @base: the base phys addr of the region |
| 914 | * @size: the size of the region |
| 915 | * |
Mike Rapoport | 47cec44 | 2018-06-30 17:55:02 +0300 | [diff] [blame] | 916 | * Return: 0 on success, -errno on failure. |
Tony Luck | a3f5baf | 2015-06-24 16:58:12 -0700 | [diff] [blame] | 917 | */ |
| 918 | int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size) |
| 919 | { |
| 920 | system_has_some_mirror = true; |
| 921 | |
| 922 | return memblock_setclr_flag(base, size, 1, MEMBLOCK_MIRROR); |
| 923 | } |
| 924 | |
Ard Biesheuvel | bf3d3cc | 2015-11-30 13:28:15 +0100 | [diff] [blame] | 925 | /** |
| 926 | * memblock_mark_nomap - Mark a memory region with flag MEMBLOCK_NOMAP. |
| 927 | * @base: the base phys addr of the region |
| 928 | * @size: the size of the region |
| 929 | * |
Mike Rapoport | 9092d4f | 2021-06-30 18:51:16 -0700 | [diff] [blame] | 930 | * The memory regions marked with %MEMBLOCK_NOMAP will not be added to the |
| 931 | * direct mapping of the physical memory. These regions will still be |
| 932 | * covered by the memory map. The struct page representing NOMAP memory |
| 933 | * frames in the memory map will be PageReserved() |
| 934 | * |
Mike Rapoport | 658aafc | 2021-10-21 10:09:29 +0300 | [diff] [blame] | 935 | * Note: if the memory being marked %MEMBLOCK_NOMAP was allocated from |
| 936 | * memblock, the caller must inform kmemleak to ignore that memory |
| 937 | * |
Mike Rapoport | 47cec44 | 2018-06-30 17:55:02 +0300 | [diff] [blame] | 938 | * Return: 0 on success, -errno on failure. |
Ard Biesheuvel | bf3d3cc | 2015-11-30 13:28:15 +0100 | [diff] [blame] | 939 | */ |
| 940 | int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size) |
| 941 | { |
Mike Rapoport | 6c9a545 | 2021-10-21 10:09:28 +0300 | [diff] [blame] | 942 | return memblock_setclr_flag(base, size, 1, MEMBLOCK_NOMAP); |
Ard Biesheuvel | bf3d3cc | 2015-11-30 13:28:15 +0100 | [diff] [blame] | 943 | } |
Tony Luck | a3f5baf | 2015-06-24 16:58:12 -0700 | [diff] [blame] | 944 | |
| 945 | /** |
AKASHI Takahiro | 4c546b8 | 2017-04-03 11:23:54 +0900 | [diff] [blame] | 946 | * memblock_clear_nomap - Clear flag MEMBLOCK_NOMAP for a specified region. |
| 947 | * @base: the base phys addr of the region |
| 948 | * @size: the size of the region |
| 949 | * |
Mike Rapoport | 47cec44 | 2018-06-30 17:55:02 +0300 | [diff] [blame] | 950 | * Return: 0 on success, -errno on failure. |
AKASHI Takahiro | 4c546b8 | 2017-04-03 11:23:54 +0900 | [diff] [blame] | 951 | */ |
| 952 | int __init_memblock memblock_clear_nomap(phys_addr_t base, phys_addr_t size) |
| 953 | { |
| 954 | return memblock_setclr_flag(base, size, 0, MEMBLOCK_NOMAP); |
| 955 | } |
| 956 | |
Mike Rapoport | 9f3d5ea | 2020-10-13 16:58:25 -0700 | [diff] [blame] | 957 | static bool should_skip_region(struct memblock_type *type, |
| 958 | struct memblock_region *m, |
| 959 | int nid, int flags) |
Mike Rapoport | c9a688a | 2019-03-11 23:30:50 -0700 | [diff] [blame] | 960 | { |
| 961 | int m_nid = memblock_get_region_node(m); |
| 962 | |
Mike Rapoport | 9f3d5ea | 2020-10-13 16:58:25 -0700 | [diff] [blame] | 963 | /* we never skip regions when iterating memblock.reserved or physmem */ |
| 964 | if (type != memblock_memory) |
| 965 | return false; |
| 966 | |
Mike Rapoport | c9a688a | 2019-03-11 23:30:50 -0700 | [diff] [blame] | 967 | /* only memory regions are associated with nodes, check it */ |
| 968 | if (nid != NUMA_NO_NODE && nid != m_nid) |
| 969 | return true; |
| 970 | |
| 971 | /* skip hotpluggable memory regions if needed */ |
Mike Rapoport | 79e482e | 2021-07-23 15:50:26 -0700 | [diff] [blame] | 972 | if (movable_node_is_enabled() && memblock_is_hotpluggable(m) && |
| 973 | !(flags & MEMBLOCK_HOTPLUG)) |
Mike Rapoport | c9a688a | 2019-03-11 23:30:50 -0700 | [diff] [blame] | 974 | return true; |
| 975 | |
| 976 | /* if we want mirror memory skip non-mirror memory regions */ |
| 977 | if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m)) |
| 978 | return true; |
| 979 | |
| 980 | /* skip nomap memory unless we were asked for it explicitly */ |
| 981 | if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m)) |
| 982 | return true; |
| 983 | |
| 984 | return false; |
| 985 | } |
| 986 | |
Robin Holt | 8e7a7f8 | 2015-06-30 14:56:41 -0700 | [diff] [blame] | 987 | /** |
Mike Rapoport | a297413 | 2019-03-11 23:30:54 -0700 | [diff] [blame] | 988 | * __next_mem_range - next function for for_each_free_mem_range() etc. |
Tejun Heo | 35fd080 | 2011-07-12 11:15:59 +0200 | [diff] [blame] | 989 | * @idx: pointer to u64 loop variable |
Grygorii Strashko | b115423 | 2014-01-21 15:50:16 -0800 | [diff] [blame] | 990 | * @nid: node selector, %NUMA_NO_NODE for all nodes |
Tony Luck | fc6daaf | 2015-06-24 16:58:09 -0700 | [diff] [blame] | 991 | * @flags: pick from blocks based on memory attributes |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 992 | * @type_a: pointer to memblock_type from where the range is taken |
| 993 | * @type_b: pointer to memblock_type which excludes memory from being taken |
Wanpeng Li | dad7557 | 2012-06-20 12:53:01 -0700 | [diff] [blame] | 994 | * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL |
| 995 | * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL |
| 996 | * @out_nid: ptr to int for nid of the range, can be %NULL |
Tejun Heo | 35fd080 | 2011-07-12 11:15:59 +0200 | [diff] [blame] | 997 | * |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 998 | * Find the first area from *@idx which matches @nid, fill the out |
Tejun Heo | 35fd080 | 2011-07-12 11:15:59 +0200 | [diff] [blame] | 999 | * parameters, and update *@idx for the next iteration. The lower 32bit of |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 1000 | * *@idx contains index into type_a and the upper 32bit indexes the |
| 1001 | * areas before each region in type_b. For example, if type_b regions |
Tejun Heo | 35fd080 | 2011-07-12 11:15:59 +0200 | [diff] [blame] | 1002 | * look like the following, |
| 1003 | * |
| 1004 | * 0:[0-16), 1:[32-48), 2:[128-130) |
| 1005 | * |
| 1006 | * The upper 32bit indexes the following regions. |
| 1007 | * |
| 1008 | * 0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX) |
| 1009 | * |
| 1010 | * As both region arrays are sorted, the function advances the two indices |
| 1011 | * in lockstep and returns each intersection. |
| 1012 | */ |
David Hildenbrand | 7764990 | 2020-07-01 16:18:29 +0200 | [diff] [blame] | 1013 | void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags, |
| 1014 | struct memblock_type *type_a, |
| 1015 | struct memblock_type *type_b, phys_addr_t *out_start, |
| 1016 | phys_addr_t *out_end, int *out_nid) |
Tejun Heo | 35fd080 | 2011-07-12 11:15:59 +0200 | [diff] [blame] | 1017 | { |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 1018 | int idx_a = *idx & 0xffffffff; |
| 1019 | int idx_b = *idx >> 32; |
Grygorii Strashko | b115423 | 2014-01-21 15:50:16 -0800 | [diff] [blame] | 1020 | |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 1021 | if (WARN_ONCE(nid == MAX_NUMNODES, |
| 1022 | "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) |
Grygorii Strashko | 560dca27 | 2014-01-21 15:50:55 -0800 | [diff] [blame] | 1023 | nid = NUMA_NO_NODE; |
Tejun Heo | 35fd080 | 2011-07-12 11:15:59 +0200 | [diff] [blame] | 1024 | |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 1025 | for (; idx_a < type_a->cnt; idx_a++) { |
| 1026 | struct memblock_region *m = &type_a->regions[idx_a]; |
| 1027 | |
Tejun Heo | 35fd080 | 2011-07-12 11:15:59 +0200 | [diff] [blame] | 1028 | phys_addr_t m_start = m->base; |
| 1029 | phys_addr_t m_end = m->base + m->size; |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 1030 | int m_nid = memblock_get_region_node(m); |
Tejun Heo | 35fd080 | 2011-07-12 11:15:59 +0200 | [diff] [blame] | 1031 | |
Mike Rapoport | 9f3d5ea | 2020-10-13 16:58:25 -0700 | [diff] [blame] | 1032 | if (should_skip_region(type_a, m, nid, flags)) |
Ard Biesheuvel | bf3d3cc | 2015-11-30 13:28:15 +0100 | [diff] [blame] | 1033 | continue; |
| 1034 | |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 1035 | if (!type_b) { |
| 1036 | if (out_start) |
| 1037 | *out_start = m_start; |
| 1038 | if (out_end) |
| 1039 | *out_end = m_end; |
| 1040 | if (out_nid) |
| 1041 | *out_nid = m_nid; |
| 1042 | idx_a++; |
| 1043 | *idx = (u32)idx_a | (u64)idx_b << 32; |
| 1044 | return; |
| 1045 | } |
Tejun Heo | 35fd080 | 2011-07-12 11:15:59 +0200 | [diff] [blame] | 1046 | |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 1047 | /* scan areas before each reservation */ |
| 1048 | for (; idx_b < type_b->cnt + 1; idx_b++) { |
| 1049 | struct memblock_region *r; |
| 1050 | phys_addr_t r_start; |
| 1051 | phys_addr_t r_end; |
| 1052 | |
| 1053 | r = &type_b->regions[idx_b]; |
| 1054 | r_start = idx_b ? r[-1].base + r[-1].size : 0; |
| 1055 | r_end = idx_b < type_b->cnt ? |
Stefan Agner | 1c4bc43 | 2018-06-07 17:06:15 -0700 | [diff] [blame] | 1056 | r->base : PHYS_ADDR_MAX; |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 1057 | |
| 1058 | /* |
| 1059 | * if idx_b advanced past idx_a, |
| 1060 | * break out to advance idx_a |
| 1061 | */ |
Tejun Heo | 35fd080 | 2011-07-12 11:15:59 +0200 | [diff] [blame] | 1062 | if (r_start >= m_end) |
| 1063 | break; |
| 1064 | /* if the two regions intersect, we're done */ |
| 1065 | if (m_start < r_end) { |
| 1066 | if (out_start) |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 1067 | *out_start = |
| 1068 | max(m_start, r_start); |
Tejun Heo | 35fd080 | 2011-07-12 11:15:59 +0200 | [diff] [blame] | 1069 | if (out_end) |
| 1070 | *out_end = min(m_end, r_end); |
| 1071 | if (out_nid) |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 1072 | *out_nid = m_nid; |
Tejun Heo | 35fd080 | 2011-07-12 11:15:59 +0200 | [diff] [blame] | 1073 | /* |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 1074 | * The region which ends first is |
| 1075 | * advanced for the next iteration. |
Tejun Heo | 35fd080 | 2011-07-12 11:15:59 +0200 | [diff] [blame] | 1076 | */ |
| 1077 | if (m_end <= r_end) |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 1078 | idx_a++; |
Tejun Heo | 35fd080 | 2011-07-12 11:15:59 +0200 | [diff] [blame] | 1079 | else |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 1080 | idx_b++; |
| 1081 | *idx = (u32)idx_a | (u64)idx_b << 32; |
Tejun Heo | 35fd080 | 2011-07-12 11:15:59 +0200 | [diff] [blame] | 1082 | return; |
| 1083 | } |
| 1084 | } |
| 1085 | } |
| 1086 | |
| 1087 | /* signal end of iteration */ |
| 1088 | *idx = ULLONG_MAX; |
| 1089 | } |
| 1090 | |
Tejun Heo | 7bd0b0f | 2011-12-08 10:22:09 -0800 | [diff] [blame] | 1091 | /** |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 1092 | * __next_mem_range_rev - generic next function for for_each_*_range_rev() |
| 1093 | * |
Tejun Heo | 7bd0b0f | 2011-12-08 10:22:09 -0800 | [diff] [blame] | 1094 | * @idx: pointer to u64 loop variable |
Alexander Kuleshov | ad5ea8c | 2015-09-08 15:04:22 -0700 | [diff] [blame] | 1095 | * @nid: node selector, %NUMA_NO_NODE for all nodes |
Tony Luck | fc6daaf | 2015-06-24 16:58:09 -0700 | [diff] [blame] | 1096 | * @flags: pick from blocks based on memory attributes |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 1097 | * @type_a: pointer to memblock_type from where the range is taken |
| 1098 | * @type_b: pointer to memblock_type which excludes memory from being taken |
Wanpeng Li | dad7557 | 2012-06-20 12:53:01 -0700 | [diff] [blame] | 1099 | * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL |
| 1100 | * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL |
| 1101 | * @out_nid: ptr to int for nid of the range, can be %NULL |
Tejun Heo | 7bd0b0f | 2011-12-08 10:22:09 -0800 | [diff] [blame] | 1102 | * |
Mike Rapoport | 47cec44 | 2018-06-30 17:55:02 +0300 | [diff] [blame] | 1103 | * Finds the next range from type_a which is not marked as unsuitable |
| 1104 | * in type_b. |
| 1105 | * |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 1106 | * Reverse of __next_mem_range(). |
Tejun Heo | 7bd0b0f | 2011-12-08 10:22:09 -0800 | [diff] [blame] | 1107 | */ |
Mike Rapoport | e1720fe | 2018-06-30 17:55:01 +0300 | [diff] [blame] | 1108 | void __init_memblock __next_mem_range_rev(u64 *idx, int nid, |
| 1109 | enum memblock_flags flags, |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 1110 | struct memblock_type *type_a, |
| 1111 | struct memblock_type *type_b, |
| 1112 | phys_addr_t *out_start, |
| 1113 | phys_addr_t *out_end, int *out_nid) |
Tejun Heo | 7bd0b0f | 2011-12-08 10:22:09 -0800 | [diff] [blame] | 1114 | { |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 1115 | int idx_a = *idx & 0xffffffff; |
| 1116 | int idx_b = *idx >> 32; |
Grygorii Strashko | b115423 | 2014-01-21 15:50:16 -0800 | [diff] [blame] | 1117 | |
Grygorii Strashko | 560dca27 | 2014-01-21 15:50:55 -0800 | [diff] [blame] | 1118 | if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) |
| 1119 | nid = NUMA_NO_NODE; |
Tejun Heo | 7bd0b0f | 2011-12-08 10:22:09 -0800 | [diff] [blame] | 1120 | |
| 1121 | if (*idx == (u64)ULLONG_MAX) { |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 1122 | idx_a = type_a->cnt - 1; |
zijun_hu | e47608a | 2016-08-04 15:32:00 -0700 | [diff] [blame] | 1123 | if (type_b != NULL) |
| 1124 | idx_b = type_b->cnt; |
| 1125 | else |
| 1126 | idx_b = 0; |
Tejun Heo | 7bd0b0f | 2011-12-08 10:22:09 -0800 | [diff] [blame] | 1127 | } |
| 1128 | |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 1129 | for (; idx_a >= 0; idx_a--) { |
| 1130 | struct memblock_region *m = &type_a->regions[idx_a]; |
| 1131 | |
Tejun Heo | 7bd0b0f | 2011-12-08 10:22:09 -0800 | [diff] [blame] | 1132 | phys_addr_t m_start = m->base; |
| 1133 | phys_addr_t m_end = m->base + m->size; |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 1134 | int m_nid = memblock_get_region_node(m); |
Tejun Heo | 7bd0b0f | 2011-12-08 10:22:09 -0800 | [diff] [blame] | 1135 | |
Mike Rapoport | 9f3d5ea | 2020-10-13 16:58:25 -0700 | [diff] [blame] | 1136 | if (should_skip_region(type_a, m, nid, flags)) |
Ard Biesheuvel | bf3d3cc | 2015-11-30 13:28:15 +0100 | [diff] [blame] | 1137 | continue; |
| 1138 | |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 1139 | if (!type_b) { |
| 1140 | if (out_start) |
| 1141 | *out_start = m_start; |
| 1142 | if (out_end) |
| 1143 | *out_end = m_end; |
| 1144 | if (out_nid) |
| 1145 | *out_nid = m_nid; |
zijun_hu | fb399b4 | 2016-07-28 15:48:56 -0700 | [diff] [blame] | 1146 | idx_a--; |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 1147 | *idx = (u32)idx_a | (u64)idx_b << 32; |
| 1148 | return; |
| 1149 | } |
Tejun Heo | 7bd0b0f | 2011-12-08 10:22:09 -0800 | [diff] [blame] | 1150 | |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 1151 | /* scan areas before each reservation */ |
| 1152 | for (; idx_b >= 0; idx_b--) { |
| 1153 | struct memblock_region *r; |
| 1154 | phys_addr_t r_start; |
| 1155 | phys_addr_t r_end; |
| 1156 | |
| 1157 | r = &type_b->regions[idx_b]; |
| 1158 | r_start = idx_b ? r[-1].base + r[-1].size : 0; |
| 1159 | r_end = idx_b < type_b->cnt ? |
Stefan Agner | 1c4bc43 | 2018-06-07 17:06:15 -0700 | [diff] [blame] | 1160 | r->base : PHYS_ADDR_MAX; |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 1161 | /* |
| 1162 | * if idx_b advanced past idx_a, |
| 1163 | * break out to advance idx_a |
| 1164 | */ |
| 1165 | |
Tejun Heo | 7bd0b0f | 2011-12-08 10:22:09 -0800 | [diff] [blame] | 1166 | if (r_end <= m_start) |
| 1167 | break; |
| 1168 | /* if the two regions intersect, we're done */ |
| 1169 | if (m_end > r_start) { |
| 1170 | if (out_start) |
| 1171 | *out_start = max(m_start, r_start); |
| 1172 | if (out_end) |
| 1173 | *out_end = min(m_end, r_end); |
| 1174 | if (out_nid) |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 1175 | *out_nid = m_nid; |
Tejun Heo | 7bd0b0f | 2011-12-08 10:22:09 -0800 | [diff] [blame] | 1176 | if (m_start >= r_start) |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 1177 | idx_a--; |
Tejun Heo | 7bd0b0f | 2011-12-08 10:22:09 -0800 | [diff] [blame] | 1178 | else |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 1179 | idx_b--; |
| 1180 | *idx = (u32)idx_a | (u64)idx_b << 32; |
Tejun Heo | 7bd0b0f | 2011-12-08 10:22:09 -0800 | [diff] [blame] | 1181 | return; |
| 1182 | } |
| 1183 | } |
| 1184 | } |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 1185 | /* signal end of iteration */ |
Tejun Heo | 7bd0b0f | 2011-12-08 10:22:09 -0800 | [diff] [blame] | 1186 | *idx = ULLONG_MAX; |
| 1187 | } |
| 1188 | |
Tejun Heo | 7c0caeb | 2011-07-14 11:43:42 +0200 | [diff] [blame] | 1189 | /* |
Chen Chang | 45e7981 | 2018-11-16 15:08:57 -0800 | [diff] [blame] | 1190 | * Common iterator interface used to define for_each_mem_pfn_range(). |
Tejun Heo | 7c0caeb | 2011-07-14 11:43:42 +0200 | [diff] [blame] | 1191 | */ |
| 1192 | void __init_memblock __next_mem_pfn_range(int *idx, int nid, |
| 1193 | unsigned long *out_start_pfn, |
| 1194 | unsigned long *out_end_pfn, int *out_nid) |
| 1195 | { |
| 1196 | struct memblock_type *type = &memblock.memory; |
| 1197 | struct memblock_region *r; |
Mike Rapoport | d622abf | 2020-06-03 15:56:53 -0700 | [diff] [blame] | 1198 | int r_nid; |
Tejun Heo | 7c0caeb | 2011-07-14 11:43:42 +0200 | [diff] [blame] | 1199 | |
| 1200 | while (++*idx < type->cnt) { |
| 1201 | r = &type->regions[*idx]; |
Mike Rapoport | d622abf | 2020-06-03 15:56:53 -0700 | [diff] [blame] | 1202 | r_nid = memblock_get_region_node(r); |
Tejun Heo | 7c0caeb | 2011-07-14 11:43:42 +0200 | [diff] [blame] | 1203 | |
| 1204 | if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size)) |
| 1205 | continue; |
Mike Rapoport | d622abf | 2020-06-03 15:56:53 -0700 | [diff] [blame] | 1206 | if (nid == MAX_NUMNODES || nid == r_nid) |
Tejun Heo | 7c0caeb | 2011-07-14 11:43:42 +0200 | [diff] [blame] | 1207 | break; |
| 1208 | } |
| 1209 | if (*idx >= type->cnt) { |
| 1210 | *idx = -1; |
| 1211 | return; |
| 1212 | } |
| 1213 | |
| 1214 | if (out_start_pfn) |
| 1215 | *out_start_pfn = PFN_UP(r->base); |
| 1216 | if (out_end_pfn) |
| 1217 | *out_end_pfn = PFN_DOWN(r->base + r->size); |
| 1218 | if (out_nid) |
Mike Rapoport | d622abf | 2020-06-03 15:56:53 -0700 | [diff] [blame] | 1219 | *out_nid = r_nid; |
Tejun Heo | 7c0caeb | 2011-07-14 11:43:42 +0200 | [diff] [blame] | 1220 | } |
| 1221 | |
| 1222 | /** |
| 1223 | * memblock_set_node - set node ID on memblock regions |
| 1224 | * @base: base of area to set node ID for |
| 1225 | * @size: size of area to set node ID for |
Tang Chen | e7e8de5 | 2014-01-21 15:49:26 -0800 | [diff] [blame] | 1226 | * @type: memblock type to set node ID for |
Tejun Heo | 7c0caeb | 2011-07-14 11:43:42 +0200 | [diff] [blame] | 1227 | * @nid: node ID to set |
| 1228 | * |
Mike Rapoport | 47cec44 | 2018-06-30 17:55:02 +0300 | [diff] [blame] | 1229 | * Set the nid of memblock @type regions in [@base, @base + @size) to @nid. |
Tejun Heo | 7c0caeb | 2011-07-14 11:43:42 +0200 | [diff] [blame] | 1230 | * Regions which cross the area boundaries are split as necessary. |
| 1231 | * |
Mike Rapoport | 47cec44 | 2018-06-30 17:55:02 +0300 | [diff] [blame] | 1232 | * Return: |
Tejun Heo | 7c0caeb | 2011-07-14 11:43:42 +0200 | [diff] [blame] | 1233 | * 0 on success, -errno on failure. |
| 1234 | */ |
| 1235 | int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size, |
Tang Chen | e7e8de5 | 2014-01-21 15:49:26 -0800 | [diff] [blame] | 1236 | struct memblock_type *type, int nid) |
Tejun Heo | 7c0caeb | 2011-07-14 11:43:42 +0200 | [diff] [blame] | 1237 | { |
Mike Rapoport | a9ee6cf | 2021-06-28 19:43:01 -0700 | [diff] [blame] | 1238 | #ifdef CONFIG_NUMA |
Tejun Heo | 6a9ceb3 | 2011-12-08 10:22:07 -0800 | [diff] [blame] | 1239 | int start_rgn, end_rgn; |
| 1240 | int i, ret; |
Tejun Heo | 7c0caeb | 2011-07-14 11:43:42 +0200 | [diff] [blame] | 1241 | |
Tejun Heo | 6a9ceb3 | 2011-12-08 10:22:07 -0800 | [diff] [blame] | 1242 | ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); |
| 1243 | if (ret) |
| 1244 | return ret; |
Tejun Heo | 7c0caeb | 2011-07-14 11:43:42 +0200 | [diff] [blame] | 1245 | |
Tejun Heo | 6a9ceb3 | 2011-12-08 10:22:07 -0800 | [diff] [blame] | 1246 | for (i = start_rgn; i < end_rgn; i++) |
Wanpeng Li | e9d24ad | 2012-10-08 16:32:21 -0700 | [diff] [blame] | 1247 | memblock_set_region_node(&type->regions[i], nid); |
Tejun Heo | 7c0caeb | 2011-07-14 11:43:42 +0200 | [diff] [blame] | 1248 | |
| 1249 | memblock_merge_regions(type); |
Mike Rapoport | 3f08a30 | 2020-06-03 15:57:02 -0700 | [diff] [blame] | 1250 | #endif |
Tejun Heo | 7c0caeb | 2011-07-14 11:43:42 +0200 | [diff] [blame] | 1251 | return 0; |
| 1252 | } |
Mike Rapoport | 3f08a30 | 2020-06-03 15:57:02 -0700 | [diff] [blame] | 1253 | |
Alexander Duyck | 837566e | 2019-05-13 17:21:17 -0700 | [diff] [blame] | 1254 | #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT |
| 1255 | /** |
| 1256 | * __next_mem_pfn_range_in_zone - iterator for for_each_*_range_in_zone() |
| 1257 | * |
| 1258 | * @idx: pointer to u64 loop variable |
| 1259 | * @zone: zone in which all of the memory blocks reside |
| 1260 | * @out_spfn: ptr to ulong for start pfn of the range, can be %NULL |
| 1261 | * @out_epfn: ptr to ulong for end pfn of the range, can be %NULL |
| 1262 | * |
| 1263 | * This function is meant to be a zone/pfn specific wrapper for the |
| 1264 | * for_each_mem_range type iterators. Specifically they are used in the |
| 1265 | * deferred memory init routines and as such we were duplicating much of |
| 1266 | * this logic throughout the code. So instead of having it in multiple |
| 1267 | * locations it seemed like it would make more sense to centralize this to |
| 1268 | * one new iterator that does everything they need. |
| 1269 | */ |
| 1270 | void __init_memblock |
| 1271 | __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone, |
| 1272 | unsigned long *out_spfn, unsigned long *out_epfn) |
| 1273 | { |
| 1274 | int zone_nid = zone_to_nid(zone); |
| 1275 | phys_addr_t spa, epa; |
| 1276 | int nid; |
| 1277 | |
| 1278 | __next_mem_range(idx, zone_nid, MEMBLOCK_NONE, |
| 1279 | &memblock.memory, &memblock.reserved, |
| 1280 | &spa, &epa, &nid); |
| 1281 | |
| 1282 | while (*idx != U64_MAX) { |
| 1283 | unsigned long epfn = PFN_DOWN(epa); |
| 1284 | unsigned long spfn = PFN_UP(spa); |
| 1285 | |
| 1286 | /* |
| 1287 | * Verify the end is at least past the start of the zone and |
| 1288 | * that we have at least one PFN to initialize. |
| 1289 | */ |
| 1290 | if (zone->zone_start_pfn < epfn && spfn < epfn) { |
| 1291 | /* if we went too far just stop searching */ |
| 1292 | if (zone_end_pfn(zone) <= spfn) { |
| 1293 | *idx = U64_MAX; |
| 1294 | break; |
| 1295 | } |
| 1296 | |
| 1297 | if (out_spfn) |
| 1298 | *out_spfn = max(zone->zone_start_pfn, spfn); |
| 1299 | if (out_epfn) |
| 1300 | *out_epfn = min(zone_end_pfn(zone), epfn); |
| 1301 | |
| 1302 | return; |
| 1303 | } |
| 1304 | |
| 1305 | __next_mem_range(idx, zone_nid, MEMBLOCK_NONE, |
| 1306 | &memblock.memory, &memblock.reserved, |
| 1307 | &spa, &epa, &nid); |
| 1308 | } |
| 1309 | |
| 1310 | /* signal end of iteration */ |
| 1311 | if (out_spfn) |
| 1312 | *out_spfn = ULONG_MAX; |
| 1313 | if (out_epfn) |
| 1314 | *out_epfn = 0; |
| 1315 | } |
| 1316 | |
| 1317 | #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ |
Tejun Heo | 7c0caeb | 2011-07-14 11:43:42 +0200 | [diff] [blame] | 1318 | |
Mike Rapoport | 92d12f9 | 2019-03-11 23:29:41 -0700 | [diff] [blame] | 1319 | /** |
| 1320 | * memblock_alloc_range_nid - allocate boot memory block |
| 1321 | * @size: size of memory block to be allocated in bytes |
| 1322 | * @align: alignment of the region and block's size |
| 1323 | * @start: the lower bound of the memory region to allocate (phys address) |
| 1324 | * @end: the upper bound of the memory region to allocate (phys address) |
| 1325 | * @nid: nid of the free area to find, %NUMA_NO_NODE for any node |
Yunfeng Ye | 0ac398b | 2019-11-30 17:56:27 -0800 | [diff] [blame] | 1326 | * @exact_nid: control the allocation fall back to other nodes |
Mike Rapoport | 92d12f9 | 2019-03-11 23:29:41 -0700 | [diff] [blame] | 1327 | * |
| 1328 | * The allocation is performed from memory region limited by |
Cao jin | 9583066 | 2019-11-30 17:56:24 -0800 | [diff] [blame] | 1329 | * memblock.current_limit if @end == %MEMBLOCK_ALLOC_ACCESSIBLE. |
Mike Rapoport | 92d12f9 | 2019-03-11 23:29:41 -0700 | [diff] [blame] | 1330 | * |
Yunfeng Ye | 0ac398b | 2019-11-30 17:56:27 -0800 | [diff] [blame] | 1331 | * If the specified node can not hold the requested memory and @exact_nid |
| 1332 | * is false, the allocation falls back to any node in the system. |
Mike Rapoport | 92d12f9 | 2019-03-11 23:29:41 -0700 | [diff] [blame] | 1333 | * |
| 1334 | * For systems with memory mirroring, the allocation is attempted first |
| 1335 | * from the regions with mirroring enabled and then retried from any |
| 1336 | * memory region. |
| 1337 | * |
| 1338 | * In addition, function sets the min_count to 0 using kmemleak_alloc_phys for |
| 1339 | * allocated boot memory block, so that it is never reported as leaks. |
| 1340 | * |
| 1341 | * Return: |
| 1342 | * Physical address of allocated memory block on success, %0 on failure. |
| 1343 | */ |
Aslan Bakirov | 8676af1 | 2020-04-10 14:32:42 -0700 | [diff] [blame] | 1344 | phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size, |
Akinobu Mita | 2bfc286 | 2014-06-04 16:06:53 -0700 | [diff] [blame] | 1345 | phys_addr_t align, phys_addr_t start, |
Yunfeng Ye | 0ac398b | 2019-11-30 17:56:27 -0800 | [diff] [blame] | 1346 | phys_addr_t end, int nid, |
| 1347 | bool exact_nid) |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 1348 | { |
Mike Rapoport | 92d12f9 | 2019-03-11 23:29:41 -0700 | [diff] [blame] | 1349 | enum memblock_flags flags = choose_memblock_flags(); |
Benjamin Herrenschmidt | 6ed311b | 2010-07-12 14:36:48 +1000 | [diff] [blame] | 1350 | phys_addr_t found; |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 1351 | |
Mike Rapoport | 92d12f9 | 2019-03-11 23:29:41 -0700 | [diff] [blame] | 1352 | if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) |
| 1353 | nid = NUMA_NO_NODE; |
| 1354 | |
Mike Rapoport | 2f77080 | 2018-10-30 15:10:01 -0700 | [diff] [blame] | 1355 | if (!align) { |
| 1356 | /* Can't use WARNs this early in boot on powerpc */ |
| 1357 | dump_stack(); |
| 1358 | align = SMP_CACHE_BYTES; |
| 1359 | } |
| 1360 | |
Mike Rapoport | 92d12f9 | 2019-03-11 23:29:41 -0700 | [diff] [blame] | 1361 | again: |
Tony Luck | fc6daaf | 2015-06-24 16:58:09 -0700 | [diff] [blame] | 1362 | found = memblock_find_in_range_node(size, align, start, end, nid, |
| 1363 | flags); |
Mike Rapoport | 92d12f9 | 2019-03-11 23:29:41 -0700 | [diff] [blame] | 1364 | if (found && !memblock_reserve(found, size)) |
| 1365 | goto done; |
| 1366 | |
Yunfeng Ye | 0ac398b | 2019-11-30 17:56:27 -0800 | [diff] [blame] | 1367 | if (nid != NUMA_NO_NODE && !exact_nid) { |
Mike Rapoport | 92d12f9 | 2019-03-11 23:29:41 -0700 | [diff] [blame] | 1368 | found = memblock_find_in_range_node(size, align, start, |
| 1369 | end, NUMA_NO_NODE, |
| 1370 | flags); |
| 1371 | if (found && !memblock_reserve(found, size)) |
| 1372 | goto done; |
| 1373 | } |
| 1374 | |
| 1375 | if (flags & MEMBLOCK_MIRROR) { |
| 1376 | flags &= ~MEMBLOCK_MIRROR; |
| 1377 | pr_warn("Could not allocate %pap bytes of mirrored memory\n", |
| 1378 | &size); |
| 1379 | goto again; |
| 1380 | } |
| 1381 | |
| 1382 | return 0; |
| 1383 | |
| 1384 | done: |
| 1385 | /* Skip kmemleak for kasan_init() due to high volume. */ |
| 1386 | if (end != MEMBLOCK_ALLOC_KASAN) |
Catalin Marinas | aedf95e | 2014-06-06 14:38:20 -0700 | [diff] [blame] | 1387 | /* |
Mike Rapoport | 92d12f9 | 2019-03-11 23:29:41 -0700 | [diff] [blame] | 1388 | * The min_count is set to 0 so that memblock allocated |
| 1389 | * blocks are never reported as leaks. This is because many |
| 1390 | * of these blocks are only referred via the physical |
| 1391 | * address which is not looked up by kmemleak. |
Catalin Marinas | aedf95e | 2014-06-06 14:38:20 -0700 | [diff] [blame] | 1392 | */ |
Catalin Marinas | 9099dae | 2016-10-11 13:55:11 -0700 | [diff] [blame] | 1393 | kmemleak_alloc_phys(found, size, 0, 0); |
Mike Rapoport | 92d12f9 | 2019-03-11 23:29:41 -0700 | [diff] [blame] | 1394 | |
| 1395 | return found; |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 1396 | } |
| 1397 | |
Mike Rapoport | a297413 | 2019-03-11 23:30:54 -0700 | [diff] [blame] | 1398 | /** |
| 1399 | * memblock_phys_alloc_range - allocate a memory block inside specified range |
| 1400 | * @size: size of memory block to be allocated in bytes |
| 1401 | * @align: alignment of the region and block's size |
| 1402 | * @start: the lower bound of the memory region to allocate (physical address) |
| 1403 | * @end: the upper bound of the memory region to allocate (physical address) |
| 1404 | * |
| 1405 | * Allocate @size bytes in the between @start and @end. |
| 1406 | * |
| 1407 | * Return: physical address of the allocated memory block on success, |
| 1408 | * %0 on failure. |
| 1409 | */ |
Mike Rapoport | 8a770c2 | 2019-03-11 23:29:16 -0700 | [diff] [blame] | 1410 | phys_addr_t __init memblock_phys_alloc_range(phys_addr_t size, |
| 1411 | phys_addr_t align, |
| 1412 | phys_addr_t start, |
| 1413 | phys_addr_t end) |
Akinobu Mita | 2bfc286 | 2014-06-04 16:06:53 -0700 | [diff] [blame] | 1414 | { |
Faiyaz Mohammed | b5cf2d6 | 2020-11-16 10:14:04 +0530 | [diff] [blame] | 1415 | memblock_dbg("%s: %llu bytes align=0x%llx from=%pa max_addr=%pa %pS\n", |
| 1416 | __func__, (u64)size, (u64)align, &start, &end, |
| 1417 | (void *)_RET_IP_); |
Yunfeng Ye | 0ac398b | 2019-11-30 17:56:27 -0800 | [diff] [blame] | 1418 | return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE, |
| 1419 | false); |
Tejun Heo | 7bd0b0f | 2011-12-08 10:22:09 -0800 | [diff] [blame] | 1420 | } |
| 1421 | |
Mike Rapoport | a297413 | 2019-03-11 23:30:54 -0700 | [diff] [blame] | 1422 | /** |
Levi Yun | 17cbe03 | 2021-01-20 21:28:18 +0900 | [diff] [blame] | 1423 | * memblock_phys_alloc_try_nid - allocate a memory block from specified NUMA node |
Mike Rapoport | a297413 | 2019-03-11 23:30:54 -0700 | [diff] [blame] | 1424 | * @size: size of memory block to be allocated in bytes |
| 1425 | * @align: alignment of the region and block's size |
| 1426 | * @nid: nid of the free area to find, %NUMA_NO_NODE for any node |
| 1427 | * |
| 1428 | * Allocates memory block from the specified NUMA node. If the node |
| 1429 | * has no available memory, attempts to allocated from any node in the |
| 1430 | * system. |
| 1431 | * |
| 1432 | * Return: physical address of the allocated memory block on success, |
| 1433 | * %0 on failure. |
| 1434 | */ |
Mike Rapoport | 9a8dd70 | 2018-10-30 15:07:59 -0700 | [diff] [blame] | 1435 | phys_addr_t __init memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid) |
Benjamin Herrenschmidt | 9d1e249 | 2010-07-06 15:39:17 -0700 | [diff] [blame] | 1436 | { |
Mike Rapoport | 3375557 | 2019-03-11 23:29:21 -0700 | [diff] [blame] | 1437 | return memblock_alloc_range_nid(size, align, 0, |
Yunfeng Ye | 0ac398b | 2019-11-30 17:56:27 -0800 | [diff] [blame] | 1438 | MEMBLOCK_ALLOC_ACCESSIBLE, nid, false); |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 1439 | } |
| 1440 | |
Santosh Shilimkar | 26f09e9 | 2014-01-21 15:50:19 -0800 | [diff] [blame] | 1441 | /** |
Mike Rapoport | eb31d55 | 2018-10-30 15:08:04 -0700 | [diff] [blame] | 1442 | * memblock_alloc_internal - allocate boot memory block |
Santosh Shilimkar | 26f09e9 | 2014-01-21 15:50:19 -0800 | [diff] [blame] | 1443 | * @size: size of memory block to be allocated in bytes |
| 1444 | * @align: alignment of the region and block's size |
| 1445 | * @min_addr: the lower bound of the memory region to allocate (phys address) |
| 1446 | * @max_addr: the upper bound of the memory region to allocate (phys address) |
| 1447 | * @nid: nid of the free area to find, %NUMA_NO_NODE for any node |
Yunfeng Ye | 0ac398b | 2019-11-30 17:56:27 -0800 | [diff] [blame] | 1448 | * @exact_nid: control the allocation fall back to other nodes |
Santosh Shilimkar | 26f09e9 | 2014-01-21 15:50:19 -0800 | [diff] [blame] | 1449 | * |
Mike Rapoport | 92d12f9 | 2019-03-11 23:29:41 -0700 | [diff] [blame] | 1450 | * Allocates memory block using memblock_alloc_range_nid() and |
| 1451 | * converts the returned physical address to virtual. |
| 1452 | * |
Santosh Shilimkar | 26f09e9 | 2014-01-21 15:50:19 -0800 | [diff] [blame] | 1453 | * The @min_addr limit is dropped if it can not be satisfied and the allocation |
Mike Rapoport | 92d12f9 | 2019-03-11 23:29:41 -0700 | [diff] [blame] | 1454 | * will fall back to memory below @min_addr. Other constraints, such |
| 1455 | * as node and mirrored memory will be handled again in |
| 1456 | * memblock_alloc_range_nid(). |
Santosh Shilimkar | 26f09e9 | 2014-01-21 15:50:19 -0800 | [diff] [blame] | 1457 | * |
Mike Rapoport | 47cec44 | 2018-06-30 17:55:02 +0300 | [diff] [blame] | 1458 | * Return: |
Santosh Shilimkar | 26f09e9 | 2014-01-21 15:50:19 -0800 | [diff] [blame] | 1459 | * Virtual address of allocated memory block on success, NULL on failure. |
| 1460 | */ |
Mike Rapoport | eb31d55 | 2018-10-30 15:08:04 -0700 | [diff] [blame] | 1461 | static void * __init memblock_alloc_internal( |
Santosh Shilimkar | 26f09e9 | 2014-01-21 15:50:19 -0800 | [diff] [blame] | 1462 | phys_addr_t size, phys_addr_t align, |
| 1463 | phys_addr_t min_addr, phys_addr_t max_addr, |
Yunfeng Ye | 0ac398b | 2019-11-30 17:56:27 -0800 | [diff] [blame] | 1464 | int nid, bool exact_nid) |
Santosh Shilimkar | 26f09e9 | 2014-01-21 15:50:19 -0800 | [diff] [blame] | 1465 | { |
| 1466 | phys_addr_t alloc; |
Santosh Shilimkar | 26f09e9 | 2014-01-21 15:50:19 -0800 | [diff] [blame] | 1467 | |
| 1468 | /* |
| 1469 | * Detect any accidental use of these APIs after slab is ready, as at |
| 1470 | * this moment memblock may be deinitialized already and its |
Mike Rapoport | c6ffc5c | 2018-10-30 15:09:30 -0700 | [diff] [blame] | 1471 | * internal data may be destroyed (after execution of memblock_free_all) |
Santosh Shilimkar | 26f09e9 | 2014-01-21 15:50:19 -0800 | [diff] [blame] | 1472 | */ |
| 1473 | if (WARN_ON_ONCE(slab_is_available())) |
| 1474 | return kzalloc_node(size, GFP_NOWAIT, nid); |
| 1475 | |
Mike Rapoport | f3057ad | 2019-10-18 20:20:01 -0700 | [diff] [blame] | 1476 | if (max_addr > memblock.current_limit) |
| 1477 | max_addr = memblock.current_limit; |
| 1478 | |
Yunfeng Ye | 0ac398b | 2019-11-30 17:56:27 -0800 | [diff] [blame] | 1479 | alloc = memblock_alloc_range_nid(size, align, min_addr, max_addr, nid, |
| 1480 | exact_nid); |
Mike Rapoport | 2f77080 | 2018-10-30 15:10:01 -0700 | [diff] [blame] | 1481 | |
Mike Rapoport | 92d12f9 | 2019-03-11 23:29:41 -0700 | [diff] [blame] | 1482 | /* retry allocation without lower limit */ |
| 1483 | if (!alloc && min_addr) |
Yunfeng Ye | 0ac398b | 2019-11-30 17:56:27 -0800 | [diff] [blame] | 1484 | alloc = memblock_alloc_range_nid(size, align, 0, max_addr, nid, |
| 1485 | exact_nid); |
Santosh Shilimkar | 26f09e9 | 2014-01-21 15:50:19 -0800 | [diff] [blame] | 1486 | |
Mike Rapoport | 92d12f9 | 2019-03-11 23:29:41 -0700 | [diff] [blame] | 1487 | if (!alloc) |
| 1488 | return NULL; |
Santosh Shilimkar | 26f09e9 | 2014-01-21 15:50:19 -0800 | [diff] [blame] | 1489 | |
Mike Rapoport | 92d12f9 | 2019-03-11 23:29:41 -0700 | [diff] [blame] | 1490 | return phys_to_virt(alloc); |
Santosh Shilimkar | 26f09e9 | 2014-01-21 15:50:19 -0800 | [diff] [blame] | 1491 | } |
| 1492 | |
| 1493 | /** |
Yunfeng Ye | 0ac398b | 2019-11-30 17:56:27 -0800 | [diff] [blame] | 1494 | * memblock_alloc_exact_nid_raw - allocate boot memory block on the exact node |
| 1495 | * without zeroing memory |
| 1496 | * @size: size of memory block to be allocated in bytes |
| 1497 | * @align: alignment of the region and block's size |
| 1498 | * @min_addr: the lower bound of the memory region from where the allocation |
| 1499 | * is preferred (phys address) |
| 1500 | * @max_addr: the upper bound of the memory region from where the allocation |
| 1501 | * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to |
| 1502 | * allocate only from memory limited by memblock.current_limit value |
| 1503 | * @nid: nid of the free area to find, %NUMA_NO_NODE for any node |
| 1504 | * |
| 1505 | * Public function, provides additional debug information (including caller |
| 1506 | * info), if enabled. Does not zero allocated memory. |
| 1507 | * |
| 1508 | * Return: |
| 1509 | * Virtual address of allocated memory block on success, NULL on failure. |
| 1510 | */ |
| 1511 | void * __init memblock_alloc_exact_nid_raw( |
| 1512 | phys_addr_t size, phys_addr_t align, |
| 1513 | phys_addr_t min_addr, phys_addr_t max_addr, |
| 1514 | int nid) |
| 1515 | { |
Yunfeng Ye | 0ac398b | 2019-11-30 17:56:27 -0800 | [diff] [blame] | 1516 | memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n", |
| 1517 | __func__, (u64)size, (u64)align, nid, &min_addr, |
| 1518 | &max_addr, (void *)_RET_IP_); |
| 1519 | |
Mike Rapoport | 0867880 | 2021-09-02 14:58:05 -0700 | [diff] [blame] | 1520 | return memblock_alloc_internal(size, align, min_addr, max_addr, nid, |
| 1521 | true); |
Yunfeng Ye | 0ac398b | 2019-11-30 17:56:27 -0800 | [diff] [blame] | 1522 | } |
| 1523 | |
| 1524 | /** |
Mike Rapoport | eb31d55 | 2018-10-30 15:08:04 -0700 | [diff] [blame] | 1525 | * memblock_alloc_try_nid_raw - allocate boot memory block without zeroing |
Pavel Tatashin | ea1f5f3 | 2017-11-15 17:36:27 -0800 | [diff] [blame] | 1526 | * memory and without panicking |
| 1527 | * @size: size of memory block to be allocated in bytes |
| 1528 | * @align: alignment of the region and block's size |
| 1529 | * @min_addr: the lower bound of the memory region from where the allocation |
| 1530 | * is preferred (phys address) |
| 1531 | * @max_addr: the upper bound of the memory region from where the allocation |
Mike Rapoport | 97ad108 | 2018-10-30 15:09:44 -0700 | [diff] [blame] | 1532 | * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to |
Pavel Tatashin | ea1f5f3 | 2017-11-15 17:36:27 -0800 | [diff] [blame] | 1533 | * allocate only from memory limited by memblock.current_limit value |
| 1534 | * @nid: nid of the free area to find, %NUMA_NO_NODE for any node |
| 1535 | * |
| 1536 | * Public function, provides additional debug information (including caller |
| 1537 | * info), if enabled. Does not zero allocated memory, does not panic if request |
| 1538 | * cannot be satisfied. |
| 1539 | * |
Mike Rapoport | 47cec44 | 2018-06-30 17:55:02 +0300 | [diff] [blame] | 1540 | * Return: |
Pavel Tatashin | ea1f5f3 | 2017-11-15 17:36:27 -0800 | [diff] [blame] | 1541 | * Virtual address of allocated memory block on success, NULL on failure. |
| 1542 | */ |
Mike Rapoport | eb31d55 | 2018-10-30 15:08:04 -0700 | [diff] [blame] | 1543 | void * __init memblock_alloc_try_nid_raw( |
Pavel Tatashin | ea1f5f3 | 2017-11-15 17:36:27 -0800 | [diff] [blame] | 1544 | phys_addr_t size, phys_addr_t align, |
| 1545 | phys_addr_t min_addr, phys_addr_t max_addr, |
| 1546 | int nid) |
| 1547 | { |
Sakari Ailus | d75f773 | 2019-03-25 21:32:28 +0200 | [diff] [blame] | 1548 | memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n", |
Mike Rapoport | a36aab8 | 2018-08-17 15:47:17 -0700 | [diff] [blame] | 1549 | __func__, (u64)size, (u64)align, nid, &min_addr, |
| 1550 | &max_addr, (void *)_RET_IP_); |
Pavel Tatashin | ea1f5f3 | 2017-11-15 17:36:27 -0800 | [diff] [blame] | 1551 | |
Mike Rapoport | 0867880 | 2021-09-02 14:58:05 -0700 | [diff] [blame] | 1552 | return memblock_alloc_internal(size, align, min_addr, max_addr, nid, |
| 1553 | false); |
Pavel Tatashin | ea1f5f3 | 2017-11-15 17:36:27 -0800 | [diff] [blame] | 1554 | } |
| 1555 | |
| 1556 | /** |
Mike Rapoport | c0dbe82 | 2019-03-11 23:30:37 -0700 | [diff] [blame] | 1557 | * memblock_alloc_try_nid - allocate boot memory block |
Santosh Shilimkar | 26f09e9 | 2014-01-21 15:50:19 -0800 | [diff] [blame] | 1558 | * @size: size of memory block to be allocated in bytes |
| 1559 | * @align: alignment of the region and block's size |
| 1560 | * @min_addr: the lower bound of the memory region from where the allocation |
| 1561 | * is preferred (phys address) |
| 1562 | * @max_addr: the upper bound of the memory region from where the allocation |
Mike Rapoport | 97ad108 | 2018-10-30 15:09:44 -0700 | [diff] [blame] | 1563 | * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to |
Santosh Shilimkar | 26f09e9 | 2014-01-21 15:50:19 -0800 | [diff] [blame] | 1564 | * allocate only from memory limited by memblock.current_limit value |
| 1565 | * @nid: nid of the free area to find, %NUMA_NO_NODE for any node |
| 1566 | * |
Mike Rapoport | c0dbe82 | 2019-03-11 23:30:37 -0700 | [diff] [blame] | 1567 | * Public function, provides additional debug information (including caller |
| 1568 | * info), if enabled. This function zeroes the allocated memory. |
Santosh Shilimkar | 26f09e9 | 2014-01-21 15:50:19 -0800 | [diff] [blame] | 1569 | * |
Mike Rapoport | 47cec44 | 2018-06-30 17:55:02 +0300 | [diff] [blame] | 1570 | * Return: |
Santosh Shilimkar | 26f09e9 | 2014-01-21 15:50:19 -0800 | [diff] [blame] | 1571 | * Virtual address of allocated memory block on success, NULL on failure. |
| 1572 | */ |
Mike Rapoport | eb31d55 | 2018-10-30 15:08:04 -0700 | [diff] [blame] | 1573 | void * __init memblock_alloc_try_nid( |
Santosh Shilimkar | 26f09e9 | 2014-01-21 15:50:19 -0800 | [diff] [blame] | 1574 | phys_addr_t size, phys_addr_t align, |
| 1575 | phys_addr_t min_addr, phys_addr_t max_addr, |
| 1576 | int nid) |
| 1577 | { |
| 1578 | void *ptr; |
| 1579 | |
Sakari Ailus | d75f773 | 2019-03-25 21:32:28 +0200 | [diff] [blame] | 1580 | memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n", |
Mike Rapoport | a36aab8 | 2018-08-17 15:47:17 -0700 | [diff] [blame] | 1581 | __func__, (u64)size, (u64)align, nid, &min_addr, |
| 1582 | &max_addr, (void *)_RET_IP_); |
Mike Rapoport | eb31d55 | 2018-10-30 15:08:04 -0700 | [diff] [blame] | 1583 | ptr = memblock_alloc_internal(size, align, |
Yunfeng Ye | 0ac398b | 2019-11-30 17:56:27 -0800 | [diff] [blame] | 1584 | min_addr, max_addr, nid, false); |
Mike Rapoport | c0dbe82 | 2019-03-11 23:30:37 -0700 | [diff] [blame] | 1585 | if (ptr) |
Pavel Tatashin | ea1f5f3 | 2017-11-15 17:36:27 -0800 | [diff] [blame] | 1586 | memset(ptr, 0, size); |
Santosh Shilimkar | 26f09e9 | 2014-01-21 15:50:19 -0800 | [diff] [blame] | 1587 | |
Mike Rapoport | c0dbe82 | 2019-03-11 23:30:37 -0700 | [diff] [blame] | 1588 | return ptr; |
Santosh Shilimkar | 26f09e9 | 2014-01-21 15:50:19 -0800 | [diff] [blame] | 1589 | } |
| 1590 | |
| 1591 | /** |
Mike Rapoport | 621d973 | 2021-11-05 13:43:16 -0700 | [diff] [blame] | 1592 | * memblock_free_late - free pages directly to buddy allocator |
Mike Rapoport | 48a833c | 2018-06-30 17:55:03 +0300 | [diff] [blame] | 1593 | * @base: phys starting address of the boot memory block |
Santosh Shilimkar | 26f09e9 | 2014-01-21 15:50:19 -0800 | [diff] [blame] | 1594 | * @size: size of the boot memory block in bytes |
| 1595 | * |
Mike Rapoport | a297413 | 2019-03-11 23:30:54 -0700 | [diff] [blame] | 1596 | * This is only useful when the memblock allocator has already been torn |
Santosh Shilimkar | 26f09e9 | 2014-01-21 15:50:19 -0800 | [diff] [blame] | 1597 | * down, but we are still initializing the system. Pages are released directly |
Mike Rapoport | a297413 | 2019-03-11 23:30:54 -0700 | [diff] [blame] | 1598 | * to the buddy allocator. |
Santosh Shilimkar | 26f09e9 | 2014-01-21 15:50:19 -0800 | [diff] [blame] | 1599 | */ |
Mike Rapoport | 621d973 | 2021-11-05 13:43:16 -0700 | [diff] [blame] | 1600 | void __init memblock_free_late(phys_addr_t base, phys_addr_t size) |
Santosh Shilimkar | 26f09e9 | 2014-01-21 15:50:19 -0800 | [diff] [blame] | 1601 | { |
Mike Rapoport | a36aab8 | 2018-08-17 15:47:17 -0700 | [diff] [blame] | 1602 | phys_addr_t cursor, end; |
Santosh Shilimkar | 26f09e9 | 2014-01-21 15:50:19 -0800 | [diff] [blame] | 1603 | |
Mike Rapoport | a36aab8 | 2018-08-17 15:47:17 -0700 | [diff] [blame] | 1604 | end = base + size - 1; |
Sakari Ailus | d75f773 | 2019-03-25 21:32:28 +0200 | [diff] [blame] | 1605 | memblock_dbg("%s: [%pa-%pa] %pS\n", |
Mike Rapoport | a36aab8 | 2018-08-17 15:47:17 -0700 | [diff] [blame] | 1606 | __func__, &base, &end, (void *)_RET_IP_); |
Catalin Marinas | 9099dae | 2016-10-11 13:55:11 -0700 | [diff] [blame] | 1607 | kmemleak_free_part_phys(base, size); |
Santosh Shilimkar | 26f09e9 | 2014-01-21 15:50:19 -0800 | [diff] [blame] | 1608 | cursor = PFN_UP(base); |
| 1609 | end = PFN_DOWN(base + size); |
| 1610 | |
| 1611 | for (; cursor < end; cursor++) { |
Mike Rapoport | 7c2ee34 | 2018-10-30 15:09:36 -0700 | [diff] [blame] | 1612 | memblock_free_pages(pfn_to_page(cursor), cursor, 0); |
Arun KS | ca79b0c | 2018-12-28 00:34:29 -0800 | [diff] [blame] | 1613 | totalram_pages_inc(); |
Santosh Shilimkar | 26f09e9 | 2014-01-21 15:50:19 -0800 | [diff] [blame] | 1614 | } |
| 1615 | } |
Benjamin Herrenschmidt | 9d1e249 | 2010-07-06 15:39:17 -0700 | [diff] [blame] | 1616 | |
| 1617 | /* |
| 1618 | * Remaining API functions |
| 1619 | */ |
| 1620 | |
David Gibson | 1f1ffb8a | 2016-02-05 15:36:19 -0800 | [diff] [blame] | 1621 | phys_addr_t __init_memblock memblock_phys_mem_size(void) |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 1622 | { |
Tejun Heo | 1440c4e | 2011-12-08 10:22:08 -0800 | [diff] [blame] | 1623 | return memblock.memory.total_size; |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 1624 | } |
| 1625 | |
Srikar Dronamraju | 8907de5 | 2016-10-07 16:59:18 -0700 | [diff] [blame] | 1626 | phys_addr_t __init_memblock memblock_reserved_size(void) |
| 1627 | { |
| 1628 | return memblock.reserved.total_size; |
| 1629 | } |
| 1630 | |
Sam Ravnborg | 0a93ebe | 2011-10-31 17:08:16 -0700 | [diff] [blame] | 1631 | /* lowest address */ |
| 1632 | phys_addr_t __init_memblock memblock_start_of_DRAM(void) |
| 1633 | { |
| 1634 | return memblock.memory.regions[0].base; |
| 1635 | } |
| 1636 | |
Yinghai Lu | 10d0643 | 2010-07-28 15:43:02 +1000 | [diff] [blame] | 1637 | phys_addr_t __init_memblock memblock_end_of_DRAM(void) |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 1638 | { |
| 1639 | int idx = memblock.memory.cnt - 1; |
| 1640 | |
Benjamin Herrenschmidt | e3239ff | 2010-08-04 14:06:41 +1000 | [diff] [blame] | 1641 | return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size); |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 1642 | } |
| 1643 | |
Dennis Chen | a571d4e | 2016-07-28 15:48:26 -0700 | [diff] [blame] | 1644 | static phys_addr_t __init_memblock __find_max_addr(phys_addr_t limit) |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 1645 | { |
Stefan Agner | 1c4bc43 | 2018-06-07 17:06:15 -0700 | [diff] [blame] | 1646 | phys_addr_t max_addr = PHYS_ADDR_MAX; |
Emil Medve | 136199f | 2014-04-07 15:37:52 -0700 | [diff] [blame] | 1647 | struct memblock_region *r; |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 1648 | |
Dennis Chen | a571d4e | 2016-07-28 15:48:26 -0700 | [diff] [blame] | 1649 | /* |
| 1650 | * translate the memory @limit size into the max address within one of |
| 1651 | * the memory memblock regions, if the @limit exceeds the total size |
Stefan Agner | 1c4bc43 | 2018-06-07 17:06:15 -0700 | [diff] [blame] | 1652 | * of those regions, max_addr will keep original value PHYS_ADDR_MAX |
Dennis Chen | a571d4e | 2016-07-28 15:48:26 -0700 | [diff] [blame] | 1653 | */ |
Mike Rapoport | cc6de16 | 2020-10-13 16:58:30 -0700 | [diff] [blame] | 1654 | for_each_mem_region(r) { |
Tejun Heo | c0ce8fe | 2011-12-08 10:22:07 -0800 | [diff] [blame] | 1655 | if (limit <= r->size) { |
| 1656 | max_addr = r->base + limit; |
| 1657 | break; |
| 1658 | } |
| 1659 | limit -= r->size; |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 1660 | } |
| 1661 | |
Dennis Chen | a571d4e | 2016-07-28 15:48:26 -0700 | [diff] [blame] | 1662 | return max_addr; |
| 1663 | } |
| 1664 | |
| 1665 | void __init memblock_enforce_memory_limit(phys_addr_t limit) |
| 1666 | { |
Colin Ian King | 49aef71 | 2020-04-01 21:11:01 -0700 | [diff] [blame] | 1667 | phys_addr_t max_addr; |
Dennis Chen | a571d4e | 2016-07-28 15:48:26 -0700 | [diff] [blame] | 1668 | |
| 1669 | if (!limit) |
| 1670 | return; |
| 1671 | |
| 1672 | max_addr = __find_max_addr(limit); |
| 1673 | |
| 1674 | /* @limit exceeds the total size of the memory, do nothing */ |
Stefan Agner | 1c4bc43 | 2018-06-07 17:06:15 -0700 | [diff] [blame] | 1675 | if (max_addr == PHYS_ADDR_MAX) |
Dennis Chen | a571d4e | 2016-07-28 15:48:26 -0700 | [diff] [blame] | 1676 | return; |
| 1677 | |
Tejun Heo | c0ce8fe | 2011-12-08 10:22:07 -0800 | [diff] [blame] | 1678 | /* truncate both memory and reserved regions */ |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 1679 | memblock_remove_range(&memblock.memory, max_addr, |
Stefan Agner | 1c4bc43 | 2018-06-07 17:06:15 -0700 | [diff] [blame] | 1680 | PHYS_ADDR_MAX); |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 1681 | memblock_remove_range(&memblock.reserved, max_addr, |
Stefan Agner | 1c4bc43 | 2018-06-07 17:06:15 -0700 | [diff] [blame] | 1682 | PHYS_ADDR_MAX); |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 1683 | } |
| 1684 | |
AKASHI Takahiro | c9ca9b4 | 2017-04-03 11:23:55 +0900 | [diff] [blame] | 1685 | void __init memblock_cap_memory_range(phys_addr_t base, phys_addr_t size) |
| 1686 | { |
| 1687 | int start_rgn, end_rgn; |
| 1688 | int i, ret; |
| 1689 | |
| 1690 | if (!size) |
| 1691 | return; |
| 1692 | |
Peng Fan | 5173ed7 | 2021-10-18 15:15:45 -0700 | [diff] [blame] | 1693 | if (!memblock_memory->total_size) { |
Geert Uytterhoeven | e888fa7 | 2021-08-11 10:55:18 +0200 | [diff] [blame] | 1694 | pr_warn("%s: No memory registered yet\n", __func__); |
| 1695 | return; |
| 1696 | } |
| 1697 | |
AKASHI Takahiro | c9ca9b4 | 2017-04-03 11:23:55 +0900 | [diff] [blame] | 1698 | ret = memblock_isolate_range(&memblock.memory, base, size, |
| 1699 | &start_rgn, &end_rgn); |
| 1700 | if (ret) |
| 1701 | return; |
| 1702 | |
| 1703 | /* remove all the MAP regions */ |
| 1704 | for (i = memblock.memory.cnt - 1; i >= end_rgn; i--) |
| 1705 | if (!memblock_is_nomap(&memblock.memory.regions[i])) |
| 1706 | memblock_remove_region(&memblock.memory, i); |
| 1707 | |
| 1708 | for (i = start_rgn - 1; i >= 0; i--) |
| 1709 | if (!memblock_is_nomap(&memblock.memory.regions[i])) |
| 1710 | memblock_remove_region(&memblock.memory, i); |
| 1711 | |
| 1712 | /* truncate the reserved regions */ |
| 1713 | memblock_remove_range(&memblock.reserved, 0, base); |
| 1714 | memblock_remove_range(&memblock.reserved, |
Stefan Agner | 1c4bc43 | 2018-06-07 17:06:15 -0700 | [diff] [blame] | 1715 | base + size, PHYS_ADDR_MAX); |
AKASHI Takahiro | c9ca9b4 | 2017-04-03 11:23:55 +0900 | [diff] [blame] | 1716 | } |
| 1717 | |
Dennis Chen | a571d4e | 2016-07-28 15:48:26 -0700 | [diff] [blame] | 1718 | void __init memblock_mem_limit_remove_map(phys_addr_t limit) |
| 1719 | { |
Dennis Chen | a571d4e | 2016-07-28 15:48:26 -0700 | [diff] [blame] | 1720 | phys_addr_t max_addr; |
Dennis Chen | a571d4e | 2016-07-28 15:48:26 -0700 | [diff] [blame] | 1721 | |
| 1722 | if (!limit) |
| 1723 | return; |
| 1724 | |
| 1725 | max_addr = __find_max_addr(limit); |
| 1726 | |
| 1727 | /* @limit exceeds the total size of the memory, do nothing */ |
Stefan Agner | 1c4bc43 | 2018-06-07 17:06:15 -0700 | [diff] [blame] | 1728 | if (max_addr == PHYS_ADDR_MAX) |
Dennis Chen | a571d4e | 2016-07-28 15:48:26 -0700 | [diff] [blame] | 1729 | return; |
| 1730 | |
AKASHI Takahiro | c9ca9b4 | 2017-04-03 11:23:55 +0900 | [diff] [blame] | 1731 | memblock_cap_memory_range(0, max_addr); |
Dennis Chen | a571d4e | 2016-07-28 15:48:26 -0700 | [diff] [blame] | 1732 | } |
| 1733 | |
Yinghai Lu | cd79481 | 2010-10-11 12:34:09 -0700 | [diff] [blame] | 1734 | static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr) |
Benjamin Herrenschmidt | 72d4b0b | 2010-08-04 14:38:47 +1000 | [diff] [blame] | 1735 | { |
| 1736 | unsigned int left = 0, right = type->cnt; |
| 1737 | |
| 1738 | do { |
| 1739 | unsigned int mid = (right + left) / 2; |
| 1740 | |
| 1741 | if (addr < type->regions[mid].base) |
| 1742 | right = mid; |
| 1743 | else if (addr >= (type->regions[mid].base + |
| 1744 | type->regions[mid].size)) |
| 1745 | left = mid + 1; |
| 1746 | else |
| 1747 | return mid; |
| 1748 | } while (left < right); |
| 1749 | return -1; |
| 1750 | } |
| 1751 | |
Yueyi Li | f5a222d | 2018-12-14 14:17:06 -0800 | [diff] [blame] | 1752 | bool __init_memblock memblock_is_reserved(phys_addr_t addr) |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 1753 | { |
Benjamin Herrenschmidt | 72d4b0b | 2010-08-04 14:38:47 +1000 | [diff] [blame] | 1754 | return memblock_search(&memblock.reserved, addr) != -1; |
| 1755 | } |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 1756 | |
Yaowei Bai | b4ad0c7 | 2016-01-14 15:18:54 -0800 | [diff] [blame] | 1757 | bool __init_memblock memblock_is_memory(phys_addr_t addr) |
Benjamin Herrenschmidt | 72d4b0b | 2010-08-04 14:38:47 +1000 | [diff] [blame] | 1758 | { |
| 1759 | return memblock_search(&memblock.memory, addr) != -1; |
| 1760 | } |
| 1761 | |
Yaowei Bai | 937f0c2 | 2018-02-06 15:41:18 -0800 | [diff] [blame] | 1762 | bool __init_memblock memblock_is_map_memory(phys_addr_t addr) |
Ard Biesheuvel | bf3d3cc | 2015-11-30 13:28:15 +0100 | [diff] [blame] | 1763 | { |
| 1764 | int i = memblock_search(&memblock.memory, addr); |
| 1765 | |
| 1766 | if (i == -1) |
| 1767 | return false; |
| 1768 | return !memblock_is_nomap(&memblock.memory.regions[i]); |
| 1769 | } |
| 1770 | |
Yinghai Lu | e76b63f | 2013-09-11 14:22:17 -0700 | [diff] [blame] | 1771 | int __init_memblock memblock_search_pfn_nid(unsigned long pfn, |
| 1772 | unsigned long *start_pfn, unsigned long *end_pfn) |
| 1773 | { |
| 1774 | struct memblock_type *type = &memblock.memory; |
Fabian Frederick | 1676323 | 2014-04-07 15:37:53 -0700 | [diff] [blame] | 1775 | int mid = memblock_search(type, PFN_PHYS(pfn)); |
Yinghai Lu | e76b63f | 2013-09-11 14:22:17 -0700 | [diff] [blame] | 1776 | |
| 1777 | if (mid == -1) |
| 1778 | return -1; |
| 1779 | |
Fabian Frederick | f7e2f7e | 2014-06-04 16:07:51 -0700 | [diff] [blame] | 1780 | *start_pfn = PFN_DOWN(type->regions[mid].base); |
| 1781 | *end_pfn = PFN_DOWN(type->regions[mid].base + type->regions[mid].size); |
Yinghai Lu | e76b63f | 2013-09-11 14:22:17 -0700 | [diff] [blame] | 1782 | |
Mike Rapoport | d622abf | 2020-06-03 15:56:53 -0700 | [diff] [blame] | 1783 | return memblock_get_region_node(&type->regions[mid]); |
Yinghai Lu | e76b63f | 2013-09-11 14:22:17 -0700 | [diff] [blame] | 1784 | } |
Yinghai Lu | e76b63f | 2013-09-11 14:22:17 -0700 | [diff] [blame] | 1785 | |
Stephen Boyd | eab3094 | 2012-05-24 00:45:21 -0700 | [diff] [blame] | 1786 | /** |
| 1787 | * memblock_is_region_memory - check if a region is a subset of memory |
| 1788 | * @base: base of region to check |
| 1789 | * @size: size of region to check |
| 1790 | * |
Mike Rapoport | 47cec44 | 2018-06-30 17:55:02 +0300 | [diff] [blame] | 1791 | * Check if the region [@base, @base + @size) is a subset of a memory block. |
Stephen Boyd | eab3094 | 2012-05-24 00:45:21 -0700 | [diff] [blame] | 1792 | * |
Mike Rapoport | 47cec44 | 2018-06-30 17:55:02 +0300 | [diff] [blame] | 1793 | * Return: |
Stephen Boyd | eab3094 | 2012-05-24 00:45:21 -0700 | [diff] [blame] | 1794 | * 0 if false, non-zero if true |
| 1795 | */ |
Yaowei Bai | 937f0c2 | 2018-02-06 15:41:18 -0800 | [diff] [blame] | 1796 | bool __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size) |
Benjamin Herrenschmidt | 72d4b0b | 2010-08-04 14:38:47 +1000 | [diff] [blame] | 1797 | { |
Tomi Valkeinen | abb6527 | 2011-01-20 14:44:20 -0800 | [diff] [blame] | 1798 | int idx = memblock_search(&memblock.memory, base); |
Tejun Heo | eb18f1b | 2011-12-08 10:22:07 -0800 | [diff] [blame] | 1799 | phys_addr_t end = base + memblock_cap_size(base, &size); |
Benjamin Herrenschmidt | 72d4b0b | 2010-08-04 14:38:47 +1000 | [diff] [blame] | 1800 | |
| 1801 | if (idx == -1) |
Yaowei Bai | 937f0c2 | 2018-02-06 15:41:18 -0800 | [diff] [blame] | 1802 | return false; |
Wei Yang | ef415ef | 2017-02-22 15:45:04 -0800 | [diff] [blame] | 1803 | return (memblock.memory.regions[idx].base + |
Tejun Heo | eb18f1b | 2011-12-08 10:22:07 -0800 | [diff] [blame] | 1804 | memblock.memory.regions[idx].size) >= end; |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 1805 | } |
| 1806 | |
Stephen Boyd | eab3094 | 2012-05-24 00:45:21 -0700 | [diff] [blame] | 1807 | /** |
| 1808 | * memblock_is_region_reserved - check if a region intersects reserved memory |
| 1809 | * @base: base of region to check |
| 1810 | * @size: size of region to check |
| 1811 | * |
Mike Rapoport | 47cec44 | 2018-06-30 17:55:02 +0300 | [diff] [blame] | 1812 | * Check if the region [@base, @base + @size) intersects a reserved |
| 1813 | * memory block. |
Stephen Boyd | eab3094 | 2012-05-24 00:45:21 -0700 | [diff] [blame] | 1814 | * |
Mike Rapoport | 47cec44 | 2018-06-30 17:55:02 +0300 | [diff] [blame] | 1815 | * Return: |
Tang Chen | c5c5c9d | 2015-09-08 15:02:00 -0700 | [diff] [blame] | 1816 | * True if they intersect, false if not. |
Stephen Boyd | eab3094 | 2012-05-24 00:45:21 -0700 | [diff] [blame] | 1817 | */ |
Tang Chen | c5c5c9d | 2015-09-08 15:02:00 -0700 | [diff] [blame] | 1818 | bool __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size) |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 1819 | { |
Tang Chen | c5c5c9d | 2015-09-08 15:02:00 -0700 | [diff] [blame] | 1820 | return memblock_overlaps_region(&memblock.reserved, base, size); |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 1821 | } |
| 1822 | |
Yinghai Lu | 6ede1fd | 2012-10-22 16:35:18 -0700 | [diff] [blame] | 1823 | void __init_memblock memblock_trim_memory(phys_addr_t align) |
| 1824 | { |
Yinghai Lu | 6ede1fd | 2012-10-22 16:35:18 -0700 | [diff] [blame] | 1825 | phys_addr_t start, end, orig_start, orig_end; |
Emil Medve | 136199f | 2014-04-07 15:37:52 -0700 | [diff] [blame] | 1826 | struct memblock_region *r; |
Yinghai Lu | 6ede1fd | 2012-10-22 16:35:18 -0700 | [diff] [blame] | 1827 | |
Mike Rapoport | cc6de16 | 2020-10-13 16:58:30 -0700 | [diff] [blame] | 1828 | for_each_mem_region(r) { |
Emil Medve | 136199f | 2014-04-07 15:37:52 -0700 | [diff] [blame] | 1829 | orig_start = r->base; |
| 1830 | orig_end = r->base + r->size; |
Yinghai Lu | 6ede1fd | 2012-10-22 16:35:18 -0700 | [diff] [blame] | 1831 | start = round_up(orig_start, align); |
| 1832 | end = round_down(orig_end, align); |
| 1833 | |
| 1834 | if (start == orig_start && end == orig_end) |
| 1835 | continue; |
| 1836 | |
| 1837 | if (start < end) { |
Emil Medve | 136199f | 2014-04-07 15:37:52 -0700 | [diff] [blame] | 1838 | r->base = start; |
| 1839 | r->size = end - start; |
Yinghai Lu | 6ede1fd | 2012-10-22 16:35:18 -0700 | [diff] [blame] | 1840 | } else { |
Emil Medve | 136199f | 2014-04-07 15:37:52 -0700 | [diff] [blame] | 1841 | memblock_remove_region(&memblock.memory, |
| 1842 | r - memblock.memory.regions); |
| 1843 | r--; |
Yinghai Lu | 6ede1fd | 2012-10-22 16:35:18 -0700 | [diff] [blame] | 1844 | } |
| 1845 | } |
| 1846 | } |
Benjamin Herrenschmidt | e63075a | 2010-07-06 15:39:01 -0700 | [diff] [blame] | 1847 | |
Yinghai Lu | 3661ca6 | 2010-09-15 13:05:29 -0700 | [diff] [blame] | 1848 | void __init_memblock memblock_set_current_limit(phys_addr_t limit) |
Benjamin Herrenschmidt | e63075a | 2010-07-06 15:39:01 -0700 | [diff] [blame] | 1849 | { |
| 1850 | memblock.current_limit = limit; |
| 1851 | } |
| 1852 | |
Laura Abbott | fec5101 | 2014-02-27 01:23:43 +0100 | [diff] [blame] | 1853 | phys_addr_t __init_memblock memblock_get_current_limit(void) |
| 1854 | { |
| 1855 | return memblock.current_limit; |
| 1856 | } |
| 1857 | |
Heiko Carstens | 0262d9c | 2017-02-24 14:55:59 -0800 | [diff] [blame] | 1858 | static void __init_memblock memblock_dump(struct memblock_type *type) |
Benjamin Herrenschmidt | 6ed311b | 2010-07-12 14:36:48 +1000 | [diff] [blame] | 1859 | { |
Miles Chen | 5d63f81 | 2017-02-22 15:46:42 -0800 | [diff] [blame] | 1860 | phys_addr_t base, end, size; |
Mike Rapoport | e1720fe | 2018-06-30 17:55:01 +0300 | [diff] [blame] | 1861 | enum memblock_flags flags; |
Alexander Kuleshov | 8c9c170 | 2016-01-14 15:20:42 -0800 | [diff] [blame] | 1862 | int idx; |
| 1863 | struct memblock_region *rgn; |
Benjamin Herrenschmidt | 6ed311b | 2010-07-12 14:36:48 +1000 | [diff] [blame] | 1864 | |
Heiko Carstens | 0262d9c | 2017-02-24 14:55:59 -0800 | [diff] [blame] | 1865 | pr_info(" %s.cnt = 0x%lx\n", type->name, type->cnt); |
Benjamin Herrenschmidt | 6ed311b | 2010-07-12 14:36:48 +1000 | [diff] [blame] | 1866 | |
Gioh Kim | 66e8b43 | 2017-11-15 17:33:42 -0800 | [diff] [blame] | 1867 | for_each_memblock_type(idx, type, rgn) { |
Tejun Heo | 7c0caeb | 2011-07-14 11:43:42 +0200 | [diff] [blame] | 1868 | char nid_buf[32] = ""; |
Benjamin Herrenschmidt | 6ed311b | 2010-07-12 14:36:48 +1000 | [diff] [blame] | 1869 | |
Tejun Heo | 7c0caeb | 2011-07-14 11:43:42 +0200 | [diff] [blame] | 1870 | base = rgn->base; |
| 1871 | size = rgn->size; |
Miles Chen | 5d63f81 | 2017-02-22 15:46:42 -0800 | [diff] [blame] | 1872 | end = base + size - 1; |
Tang Chen | 66a2075 | 2014-01-21 15:49:20 -0800 | [diff] [blame] | 1873 | flags = rgn->flags; |
Mike Rapoport | a9ee6cf | 2021-06-28 19:43:01 -0700 | [diff] [blame] | 1874 | #ifdef CONFIG_NUMA |
Tejun Heo | 7c0caeb | 2011-07-14 11:43:42 +0200 | [diff] [blame] | 1875 | if (memblock_get_region_node(rgn) != MAX_NUMNODES) |
| 1876 | snprintf(nid_buf, sizeof(nid_buf), " on node %d", |
| 1877 | memblock_get_region_node(rgn)); |
| 1878 | #endif |
Mike Rapoport | e1720fe | 2018-06-30 17:55:01 +0300 | [diff] [blame] | 1879 | pr_info(" %s[%#x]\t[%pa-%pa], %pa bytes%s flags: %#x\n", |
Heiko Carstens | 0262d9c | 2017-02-24 14:55:59 -0800 | [diff] [blame] | 1880 | type->name, idx, &base, &end, &size, nid_buf, flags); |
Benjamin Herrenschmidt | 6ed311b | 2010-07-12 14:36:48 +1000 | [diff] [blame] | 1881 | } |
| 1882 | } |
| 1883 | |
Mike Rapoport | 87c5587 | 2020-10-13 16:57:54 -0700 | [diff] [blame] | 1884 | static void __init_memblock __memblock_dump_all(void) |
Benjamin Herrenschmidt | 6ed311b | 2010-07-12 14:36:48 +1000 | [diff] [blame] | 1885 | { |
Benjamin Herrenschmidt | 6ed311b | 2010-07-12 14:36:48 +1000 | [diff] [blame] | 1886 | pr_info("MEMBLOCK configuration:\n"); |
Miles Chen | 5d63f81 | 2017-02-22 15:46:42 -0800 | [diff] [blame] | 1887 | pr_info(" memory size = %pa reserved size = %pa\n", |
| 1888 | &memblock.memory.total_size, |
| 1889 | &memblock.reserved.total_size); |
Benjamin Herrenschmidt | 6ed311b | 2010-07-12 14:36:48 +1000 | [diff] [blame] | 1890 | |
Heiko Carstens | 0262d9c | 2017-02-24 14:55:59 -0800 | [diff] [blame] | 1891 | memblock_dump(&memblock.memory); |
| 1892 | memblock_dump(&memblock.reserved); |
Heiko Carstens | 409efd4 | 2017-02-24 14:55:56 -0800 | [diff] [blame] | 1893 | #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP |
David Hildenbrand | 7764990 | 2020-07-01 16:18:29 +0200 | [diff] [blame] | 1894 | memblock_dump(&physmem); |
Heiko Carstens | 409efd4 | 2017-02-24 14:55:56 -0800 | [diff] [blame] | 1895 | #endif |
Benjamin Herrenschmidt | 6ed311b | 2010-07-12 14:36:48 +1000 | [diff] [blame] | 1896 | } |
| 1897 | |
Mike Rapoport | 87c5587 | 2020-10-13 16:57:54 -0700 | [diff] [blame] | 1898 | void __init_memblock memblock_dump_all(void) |
| 1899 | { |
| 1900 | if (memblock_debug) |
| 1901 | __memblock_dump_all(); |
| 1902 | } |
| 1903 | |
Tejun Heo | 1aadc05 | 2011-12-08 10:22:08 -0800 | [diff] [blame] | 1904 | void __init memblock_allow_resize(void) |
Benjamin Herrenschmidt | 6ed311b | 2010-07-12 14:36:48 +1000 | [diff] [blame] | 1905 | { |
Benjamin Herrenschmidt | 142b45a | 2010-07-06 15:39:13 -0700 | [diff] [blame] | 1906 | memblock_can_resize = 1; |
Benjamin Herrenschmidt | 6ed311b | 2010-07-12 14:36:48 +1000 | [diff] [blame] | 1907 | } |
| 1908 | |
Benjamin Herrenschmidt | 6ed311b | 2010-07-12 14:36:48 +1000 | [diff] [blame] | 1909 | static int __init early_memblock(char *p) |
| 1910 | { |
| 1911 | if (p && strstr(p, "debug")) |
| 1912 | memblock_debug = 1; |
| 1913 | return 0; |
| 1914 | } |
| 1915 | early_param("memblock", early_memblock); |
| 1916 | |
Mike Rapoport | 4f5b0c1 | 2020-12-14 19:09:59 -0800 | [diff] [blame] | 1917 | static void __init free_memmap(unsigned long start_pfn, unsigned long end_pfn) |
| 1918 | { |
| 1919 | struct page *start_pg, *end_pg; |
| 1920 | phys_addr_t pg, pgend; |
| 1921 | |
| 1922 | /* |
| 1923 | * Convert start_pfn/end_pfn to a struct page pointer. |
| 1924 | */ |
| 1925 | start_pg = pfn_to_page(start_pfn - 1) + 1; |
| 1926 | end_pg = pfn_to_page(end_pfn - 1) + 1; |
| 1927 | |
| 1928 | /* |
| 1929 | * Convert to physical addresses, and round start upwards and end |
| 1930 | * downwards. |
| 1931 | */ |
| 1932 | pg = PAGE_ALIGN(__pa(start_pg)); |
| 1933 | pgend = __pa(end_pg) & PAGE_MASK; |
| 1934 | |
| 1935 | /* |
| 1936 | * If there are free pages between these, free the section of the |
| 1937 | * memmap array. |
| 1938 | */ |
| 1939 | if (pg < pgend) |
Mike Rapoport | 3ecc683 | 2021-11-05 13:43:19 -0700 | [diff] [blame^] | 1940 | memblock_phys_free(pg, pgend - pg); |
Mike Rapoport | 4f5b0c1 | 2020-12-14 19:09:59 -0800 | [diff] [blame] | 1941 | } |
| 1942 | |
| 1943 | /* |
| 1944 | * The mem_map array can get very big. Free the unused area of the memory map. |
| 1945 | */ |
| 1946 | static void __init free_unused_memmap(void) |
| 1947 | { |
| 1948 | unsigned long start, end, prev_end = 0; |
| 1949 | int i; |
| 1950 | |
| 1951 | if (!IS_ENABLED(CONFIG_HAVE_ARCH_PFN_VALID) || |
| 1952 | IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP)) |
| 1953 | return; |
| 1954 | |
| 1955 | /* |
| 1956 | * This relies on each bank being in address order. |
| 1957 | * The banks are sorted previously in bootmem_init(). |
| 1958 | */ |
| 1959 | for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) { |
| 1960 | #ifdef CONFIG_SPARSEMEM |
| 1961 | /* |
| 1962 | * Take care not to free memmap entries that don't exist |
| 1963 | * due to SPARSEMEM sections which aren't present. |
| 1964 | */ |
| 1965 | start = min(start, ALIGN(prev_end, PAGES_PER_SECTION)); |
Mike Rapoport | 4f5b0c1 | 2020-12-14 19:09:59 -0800 | [diff] [blame] | 1966 | #endif |
Mike Rapoport | 4f5b0c1 | 2020-12-14 19:09:59 -0800 | [diff] [blame] | 1967 | /* |
Mike Rapoport | e2a8680 | 2021-05-17 21:15:15 +0300 | [diff] [blame] | 1968 | * Align down here since many operations in VM subsystem |
| 1969 | * presume that there are no holes in the memory map inside |
| 1970 | * a pageblock |
Mike Rapoport | 4f5b0c1 | 2020-12-14 19:09:59 -0800 | [diff] [blame] | 1971 | */ |
Mike Rapoport | e2a8680 | 2021-05-17 21:15:15 +0300 | [diff] [blame] | 1972 | start = round_down(start, pageblock_nr_pages); |
Mike Rapoport | 4f5b0c1 | 2020-12-14 19:09:59 -0800 | [diff] [blame] | 1973 | |
| 1974 | /* |
| 1975 | * If we had a previous bank, and there is a space |
| 1976 | * between the current bank and the previous, free it. |
| 1977 | */ |
| 1978 | if (prev_end && prev_end < start) |
| 1979 | free_memmap(prev_end, start); |
| 1980 | |
| 1981 | /* |
Mike Rapoport | e2a8680 | 2021-05-17 21:15:15 +0300 | [diff] [blame] | 1982 | * Align up here since many operations in VM subsystem |
| 1983 | * presume that there are no holes in the memory map inside |
| 1984 | * a pageblock |
Mike Rapoport | 4f5b0c1 | 2020-12-14 19:09:59 -0800 | [diff] [blame] | 1985 | */ |
Mike Rapoport | e2a8680 | 2021-05-17 21:15:15 +0300 | [diff] [blame] | 1986 | prev_end = ALIGN(end, pageblock_nr_pages); |
Mike Rapoport | 4f5b0c1 | 2020-12-14 19:09:59 -0800 | [diff] [blame] | 1987 | } |
| 1988 | |
| 1989 | #ifdef CONFIG_SPARSEMEM |
Mike Rapoport | f921f53 | 2021-05-17 21:31:59 +0300 | [diff] [blame] | 1990 | if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION)) { |
| 1991 | prev_end = ALIGN(end, pageblock_nr_pages); |
Mike Rapoport | 4f5b0c1 | 2020-12-14 19:09:59 -0800 | [diff] [blame] | 1992 | free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION)); |
Mike Rapoport | f921f53 | 2021-05-17 21:31:59 +0300 | [diff] [blame] | 1993 | } |
Mike Rapoport | 4f5b0c1 | 2020-12-14 19:09:59 -0800 | [diff] [blame] | 1994 | #endif |
| 1995 | } |
| 1996 | |
Mike Rapoport | bda49a8 | 2018-10-30 15:09:40 -0700 | [diff] [blame] | 1997 | static void __init __free_pages_memory(unsigned long start, unsigned long end) |
| 1998 | { |
| 1999 | int order; |
| 2000 | |
| 2001 | while (start < end) { |
| 2002 | order = min(MAX_ORDER - 1UL, __ffs(start)); |
| 2003 | |
| 2004 | while (start + (1UL << order) > end) |
| 2005 | order--; |
| 2006 | |
| 2007 | memblock_free_pages(pfn_to_page(start), start, order); |
| 2008 | |
| 2009 | start += (1UL << order); |
| 2010 | } |
| 2011 | } |
| 2012 | |
| 2013 | static unsigned long __init __free_memory_core(phys_addr_t start, |
| 2014 | phys_addr_t end) |
| 2015 | { |
| 2016 | unsigned long start_pfn = PFN_UP(start); |
| 2017 | unsigned long end_pfn = min_t(unsigned long, |
| 2018 | PFN_DOWN(end), max_low_pfn); |
| 2019 | |
| 2020 | if (start_pfn >= end_pfn) |
| 2021 | return 0; |
| 2022 | |
| 2023 | __free_pages_memory(start_pfn, end_pfn); |
| 2024 | |
| 2025 | return end_pfn - start_pfn; |
| 2026 | } |
| 2027 | |
Mike Rapoport | 9092d4f | 2021-06-30 18:51:16 -0700 | [diff] [blame] | 2028 | static void __init memmap_init_reserved_pages(void) |
| 2029 | { |
| 2030 | struct memblock_region *region; |
| 2031 | phys_addr_t start, end; |
| 2032 | u64 i; |
| 2033 | |
| 2034 | /* initialize struct pages for the reserved regions */ |
| 2035 | for_each_reserved_mem_range(i, &start, &end) |
| 2036 | reserve_bootmem_region(start, end); |
| 2037 | |
| 2038 | /* and also treat struct pages for the NOMAP regions as PageReserved */ |
| 2039 | for_each_mem_region(region) { |
| 2040 | if (memblock_is_nomap(region)) { |
| 2041 | start = region->base; |
| 2042 | end = start + region->size; |
| 2043 | reserve_bootmem_region(start, end); |
| 2044 | } |
| 2045 | } |
| 2046 | } |
| 2047 | |
Mike Rapoport | bda49a8 | 2018-10-30 15:09:40 -0700 | [diff] [blame] | 2048 | static unsigned long __init free_low_memory_core_early(void) |
| 2049 | { |
| 2050 | unsigned long count = 0; |
| 2051 | phys_addr_t start, end; |
| 2052 | u64 i; |
| 2053 | |
| 2054 | memblock_clear_hotplug(0, -1); |
| 2055 | |
Mike Rapoport | 9092d4f | 2021-06-30 18:51:16 -0700 | [diff] [blame] | 2056 | memmap_init_reserved_pages(); |
Mike Rapoport | bda49a8 | 2018-10-30 15:09:40 -0700 | [diff] [blame] | 2057 | |
| 2058 | /* |
| 2059 | * We need to use NUMA_NO_NODE instead of NODE_DATA(0)->node_id |
| 2060 | * because in some case like Node0 doesn't have RAM installed |
| 2061 | * low ram will be on Node1 |
| 2062 | */ |
| 2063 | for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end, |
| 2064 | NULL) |
| 2065 | count += __free_memory_core(start, end); |
| 2066 | |
| 2067 | return count; |
| 2068 | } |
| 2069 | |
| 2070 | static int reset_managed_pages_done __initdata; |
| 2071 | |
| 2072 | void reset_node_managed_pages(pg_data_t *pgdat) |
| 2073 | { |
| 2074 | struct zone *z; |
| 2075 | |
| 2076 | for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) |
Arun KS | 9705bea | 2018-12-28 00:34:24 -0800 | [diff] [blame] | 2077 | atomic_long_set(&z->managed_pages, 0); |
Mike Rapoport | bda49a8 | 2018-10-30 15:09:40 -0700 | [diff] [blame] | 2078 | } |
| 2079 | |
| 2080 | void __init reset_all_zones_managed_pages(void) |
| 2081 | { |
| 2082 | struct pglist_data *pgdat; |
| 2083 | |
| 2084 | if (reset_managed_pages_done) |
| 2085 | return; |
| 2086 | |
| 2087 | for_each_online_pgdat(pgdat) |
| 2088 | reset_node_managed_pages(pgdat); |
| 2089 | |
| 2090 | reset_managed_pages_done = 1; |
| 2091 | } |
| 2092 | |
| 2093 | /** |
| 2094 | * memblock_free_all - release free pages to the buddy allocator |
Mike Rapoport | bda49a8 | 2018-10-30 15:09:40 -0700 | [diff] [blame] | 2095 | */ |
Daeseok Youn | 097d43d | 2021-01-14 16:08:17 +0900 | [diff] [blame] | 2096 | void __init memblock_free_all(void) |
Mike Rapoport | bda49a8 | 2018-10-30 15:09:40 -0700 | [diff] [blame] | 2097 | { |
| 2098 | unsigned long pages; |
| 2099 | |
Mike Rapoport | 4f5b0c1 | 2020-12-14 19:09:59 -0800 | [diff] [blame] | 2100 | free_unused_memmap(); |
Mike Rapoport | bda49a8 | 2018-10-30 15:09:40 -0700 | [diff] [blame] | 2101 | reset_all_zones_managed_pages(); |
| 2102 | |
| 2103 | pages = free_low_memory_core_early(); |
Arun KS | ca79b0c | 2018-12-28 00:34:29 -0800 | [diff] [blame] | 2104 | totalram_pages_add(pages); |
Mike Rapoport | bda49a8 | 2018-10-30 15:09:40 -0700 | [diff] [blame] | 2105 | } |
| 2106 | |
Mike Rapoport | 350e88b | 2019-05-13 17:22:59 -0700 | [diff] [blame] | 2107 | #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_ARCH_KEEP_MEMBLOCK) |
Benjamin Herrenschmidt | 6d03b88 | 2010-07-06 15:39:19 -0700 | [diff] [blame] | 2108 | |
| 2109 | static int memblock_debug_show(struct seq_file *m, void *private) |
| 2110 | { |
| 2111 | struct memblock_type *type = m->private; |
| 2112 | struct memblock_region *reg; |
| 2113 | int i; |
Miles Chen | 5d63f81 | 2017-02-22 15:46:42 -0800 | [diff] [blame] | 2114 | phys_addr_t end; |
Benjamin Herrenschmidt | 6d03b88 | 2010-07-06 15:39:19 -0700 | [diff] [blame] | 2115 | |
| 2116 | for (i = 0; i < type->cnt; i++) { |
| 2117 | reg = &type->regions[i]; |
Miles Chen | 5d63f81 | 2017-02-22 15:46:42 -0800 | [diff] [blame] | 2118 | end = reg->base + reg->size - 1; |
Benjamin Herrenschmidt | 6d03b88 | 2010-07-06 15:39:19 -0700 | [diff] [blame] | 2119 | |
Miles Chen | 5d63f81 | 2017-02-22 15:46:42 -0800 | [diff] [blame] | 2120 | seq_printf(m, "%4d: ", i); |
| 2121 | seq_printf(m, "%pa..%pa\n", ®->base, &end); |
Benjamin Herrenschmidt | 6d03b88 | 2010-07-06 15:39:19 -0700 | [diff] [blame] | 2122 | } |
| 2123 | return 0; |
| 2124 | } |
Andy Shevchenko | 5ad3509 | 2018-04-05 16:23:16 -0700 | [diff] [blame] | 2125 | DEFINE_SHOW_ATTRIBUTE(memblock_debug); |
Benjamin Herrenschmidt | 6d03b88 | 2010-07-06 15:39:19 -0700 | [diff] [blame] | 2126 | |
| 2127 | static int __init memblock_init_debugfs(void) |
| 2128 | { |
| 2129 | struct dentry *root = debugfs_create_dir("memblock", NULL); |
Greg Kroah-Hartman | d9f7979 | 2019-03-05 15:46:09 -0800 | [diff] [blame] | 2130 | |
Joe Perches | 0825a6f | 2018-06-14 15:27:58 -0700 | [diff] [blame] | 2131 | debugfs_create_file("memory", 0444, root, |
| 2132 | &memblock.memory, &memblock_debug_fops); |
| 2133 | debugfs_create_file("reserved", 0444, root, |
| 2134 | &memblock.reserved, &memblock_debug_fops); |
Philipp Hachtmann | 70210ed | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 2135 | #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP |
David Hildenbrand | 7764990 | 2020-07-01 16:18:29 +0200 | [diff] [blame] | 2136 | debugfs_create_file("physmem", 0444, root, &physmem, |
| 2137 | &memblock_debug_fops); |
Philipp Hachtmann | 70210ed | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 2138 | #endif |
Benjamin Herrenschmidt | 6d03b88 | 2010-07-06 15:39:19 -0700 | [diff] [blame] | 2139 | |
| 2140 | return 0; |
| 2141 | } |
| 2142 | __initcall(memblock_init_debugfs); |
| 2143 | |
| 2144 | #endif /* CONFIG_DEBUG_FS */ |