Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Dave Hansen | 208d54e | 2005-10-29 18:16:52 -0700 | [diff] [blame] | 2 | #ifndef __LINUX_MEMORY_HOTPLUG_H |
| 3 | #define __LINUX_MEMORY_HOTPLUG_H |
| 4 | |
| 5 | #include <linux/mmzone.h> |
| 6 | #include <linux/spinlock.h> |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 7 | #include <linux/notifier.h> |
Paul Gortmaker | 187f188 | 2011-11-23 20:12:59 -0500 | [diff] [blame] | 8 | #include <linux/bug.h> |
Dave Hansen | 208d54e | 2005-10-29 18:16:52 -0700 | [diff] [blame] | 9 | |
KAMEZAWA Hiroyuki | 7867930 | 2006-03-06 15:42:49 -0800 | [diff] [blame] | 10 | struct page; |
| 11 | struct zone; |
| 12 | struct pglist_data; |
Badari Pulavarty | ea01ea9 | 2008-04-28 02:12:01 -0700 | [diff] [blame] | 13 | struct mem_section; |
Wen Congyang | e90bdb7 | 2012-10-08 16:34:01 -0700 | [diff] [blame] | 14 | struct memory_block; |
David Vrabel | 62cedb9 | 2015-06-25 16:35:49 +0100 | [diff] [blame] | 15 | struct resource; |
Christoph Hellwig | 24e6d5a | 2017-12-29 08:53:53 +0100 | [diff] [blame] | 16 | struct vmem_altmap; |
KAMEZAWA Hiroyuki | 7867930 | 2006-03-06 15:42:49 -0800 | [diff] [blame] | 17 | |
Dave Hansen | 208d54e | 2005-10-29 18:16:52 -0700 | [diff] [blame] | 18 | #ifdef CONFIG_MEMORY_HOTPLUG |
Michal Hocko | 2d070ea | 2017-07-06 15:37:56 -0700 | [diff] [blame] | 19 | /* |
| 20 | * Return page for the valid pfn only if the page is online. All pfn |
| 21 | * walkers which rely on the fully initialized page->flags and others |
| 22 | * should use this rather than pfn_valid && pfn_to_page |
| 23 | */ |
Qian Cai | b13bc35 | 2019-02-01 14:20:51 -0800 | [diff] [blame] | 24 | #define pfn_to_online_page(pfn) \ |
| 25 | ({ \ |
| 26 | struct page *___page = NULL; \ |
| 27 | unsigned long ___pfn = pfn; \ |
| 28 | unsigned long ___nr = pfn_to_section_nr(___pfn); \ |
| 29 | \ |
| 30 | if (___nr < NR_MEM_SECTIONS && online_section_nr(___nr) && \ |
| 31 | pfn_valid_within(___pfn)) \ |
| 32 | ___page = pfn_to_page(___pfn); \ |
| 33 | ___page; \ |
Michal Hocko | 2d070ea | 2017-07-06 15:37:56 -0700 | [diff] [blame] | 34 | }) |
Yasunori Goto | 0475327 | 2008-04-28 02:13:31 -0700 | [diff] [blame] | 35 | |
| 36 | /* |
Andrea Arcangeli | 5f24ce5 | 2011-01-13 15:47:00 -0800 | [diff] [blame] | 37 | * Types for free bootmem stored in page->lru.next. These have to be in |
| 38 | * some random range in unsigned long space for debugging purposes. |
Yasunori Goto | 0475327 | 2008-04-28 02:13:31 -0700 | [diff] [blame] | 39 | */ |
Andrea Arcangeli | 5f24ce5 | 2011-01-13 15:47:00 -0800 | [diff] [blame] | 40 | enum { |
| 41 | MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE = 12, |
| 42 | SECTION_INFO = MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE, |
| 43 | MIX_SECTION_INFO, |
| 44 | NODE_INFO, |
| 45 | MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE = NODE_INFO, |
| 46 | }; |
Yasunori Goto | 0475327 | 2008-04-28 02:13:31 -0700 | [diff] [blame] | 47 | |
Tang Chen | 4f7c6b4 | 2014-08-06 16:05:13 -0700 | [diff] [blame] | 48 | /* Types for control the zone type of onlined and offlined memory */ |
Lai Jiangshan | 511c2ab | 2012-12-11 16:03:16 -0800 | [diff] [blame] | 49 | enum { |
Tang Chen | 4f7c6b4 | 2014-08-06 16:05:13 -0700 | [diff] [blame] | 50 | MMOP_OFFLINE = -1, |
| 51 | MMOP_ONLINE_KEEP, |
| 52 | MMOP_ONLINE_KERNEL, |
| 53 | MMOP_ONLINE_MOVABLE, |
Lai Jiangshan | 511c2ab | 2012-12-11 16:03:16 -0800 | [diff] [blame] | 54 | }; |
| 55 | |
Dave Hansen | 208d54e | 2005-10-29 18:16:52 -0700 | [diff] [blame] | 56 | /* |
Dave Hansen | bdc8cb9 | 2005-10-29 18:16:53 -0700 | [diff] [blame] | 57 | * Zone resizing functions |
Lai Jiangshan | 511c2ab | 2012-12-11 16:03:16 -0800 | [diff] [blame] | 58 | * |
| 59 | * Note: any attempt to resize a zone should has pgdat_resize_lock() |
| 60 | * zone_span_writelock() both held. This ensure the size of a zone |
| 61 | * can't be changed while pgdat_resize_lock() held. |
Dave Hansen | bdc8cb9 | 2005-10-29 18:16:53 -0700 | [diff] [blame] | 62 | */ |
| 63 | static inline unsigned zone_span_seqbegin(struct zone *zone) |
| 64 | { |
| 65 | return read_seqbegin(&zone->span_seqlock); |
| 66 | } |
| 67 | static inline int zone_span_seqretry(struct zone *zone, unsigned iv) |
| 68 | { |
| 69 | return read_seqretry(&zone->span_seqlock, iv); |
| 70 | } |
| 71 | static inline void zone_span_writelock(struct zone *zone) |
| 72 | { |
| 73 | write_seqlock(&zone->span_seqlock); |
| 74 | } |
| 75 | static inline void zone_span_writeunlock(struct zone *zone) |
| 76 | { |
| 77 | write_sequnlock(&zone->span_seqlock); |
| 78 | } |
| 79 | static inline void zone_seqlock_init(struct zone *zone) |
| 80 | { |
| 81 | seqlock_init(&zone->span_seqlock); |
| 82 | } |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 83 | extern int zone_grow_free_lists(struct zone *zone, unsigned long new_nr_pages); |
| 84 | extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages); |
| 85 | extern int add_one_highpage(struct page *page, int pfn, int bad_ppro); |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 86 | /* VM interface that may be used by firmware interface */ |
Lai Jiangshan | 511c2ab | 2012-12-11 16:03:16 -0800 | [diff] [blame] | 87 | extern int online_pages(unsigned long, unsigned long, int); |
Toshi Kani | a96dfdd | 2017-02-03 13:13:23 -0800 | [diff] [blame] | 88 | extern int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn, |
| 89 | unsigned long *valid_start, unsigned long *valid_end); |
KAMEZAWA Hiroyuki | 0c0e619 | 2007-10-16 01:26:12 -0700 | [diff] [blame] | 90 | extern void __offline_isolated_pages(unsigned long, unsigned long); |
KAMEZAWA Hiroyuki | 48e9419 | 2007-10-16 01:26:14 -0700 | [diff] [blame] | 91 | |
Arun KS | a9cd410 | 2019-03-05 15:42:14 -0800 | [diff] [blame^] | 92 | typedef void (*online_page_callback_t)(struct page *page, unsigned int order); |
Daniel Kiper | 9d0ad8c | 2011-07-25 17:12:05 -0700 | [diff] [blame] | 93 | |
| 94 | extern int set_online_page_callback(online_page_callback_t callback); |
| 95 | extern int restore_online_page_callback(online_page_callback_t callback); |
| 96 | |
| 97 | extern void __online_page_set_limits(struct page *page); |
| 98 | extern void __online_page_increment_counters(struct page *page); |
| 99 | extern void __online_page_free(struct page *page); |
| 100 | |
Toshi Kani | 01b0f19 | 2013-11-12 15:07:25 -0800 | [diff] [blame] | 101 | extern int try_online_node(int nid); |
| 102 | |
Vitaly Kuznetsov | 31bc385 | 2016-03-15 14:56:48 -0700 | [diff] [blame] | 103 | extern bool memhp_auto_online; |
Michal Hocko | 4932381 | 2017-07-06 15:41:05 -0700 | [diff] [blame] | 104 | /* If movable_node boot option specified */ |
| 105 | extern bool movable_node_enabled; |
| 106 | static inline bool movable_node_is_enabled(void) |
| 107 | { |
| 108 | return movable_node_enabled; |
| 109 | } |
Vitaly Kuznetsov | 31bc385 | 2016-03-15 14:56:48 -0700 | [diff] [blame] | 110 | |
KAMEZAWA Hiroyuki | 49ac825 | 2010-10-26 14:21:30 -0700 | [diff] [blame] | 111 | #ifdef CONFIG_MEMORY_HOTREMOVE |
Oscar Salvador | 2c2a5af | 2018-12-28 00:36:22 -0800 | [diff] [blame] | 112 | extern int arch_remove_memory(int nid, u64 start, u64 size, |
| 113 | struct vmem_altmap *altmap); |
David Rientjes | 4edd7ce | 2013-04-29 15:08:22 -0700 | [diff] [blame] | 114 | extern int __remove_pages(struct zone *zone, unsigned long start_pfn, |
Christoph Hellwig | da02451 | 2017-12-29 08:53:55 +0100 | [diff] [blame] | 115 | unsigned long nr_pages, struct vmem_altmap *altmap); |
KAMEZAWA Hiroyuki | 49ac825 | 2010-10-26 14:21:30 -0700 | [diff] [blame] | 116 | #endif /* CONFIG_MEMORY_HOTREMOVE */ |
| 117 | |
Michal Hocko | f1dd2cd | 2017-07-06 15:38:11 -0700 | [diff] [blame] | 118 | /* reasonably generic interface to expand the physical pages */ |
Christoph Hellwig | 24e6d5a | 2017-12-29 08:53:53 +0100 | [diff] [blame] | 119 | extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, |
| 120 | struct vmem_altmap *altmap, bool want_memblock); |
Yasunori Goto | bc02af9 | 2006-06-27 02:53:30 -0700 | [diff] [blame] | 121 | |
Michal Hocko | 3072e41 | 2017-09-08 16:11:39 -0700 | [diff] [blame] | 122 | #ifndef CONFIG_ARCH_HAS_ADD_PAGES |
| 123 | static inline int add_pages(int nid, unsigned long start_pfn, |
Christoph Hellwig | 24e6d5a | 2017-12-29 08:53:53 +0100 | [diff] [blame] | 124 | unsigned long nr_pages, struct vmem_altmap *altmap, |
| 125 | bool want_memblock) |
Michal Hocko | 3072e41 | 2017-09-08 16:11:39 -0700 | [diff] [blame] | 126 | { |
Christoph Hellwig | 24e6d5a | 2017-12-29 08:53:53 +0100 | [diff] [blame] | 127 | return __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock); |
Michal Hocko | 3072e41 | 2017-09-08 16:11:39 -0700 | [diff] [blame] | 128 | } |
| 129 | #else /* ARCH_HAS_ADD_PAGES */ |
Christoph Hellwig | 24e6d5a | 2017-12-29 08:53:53 +0100 | [diff] [blame] | 130 | int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, |
| 131 | struct vmem_altmap *altmap, bool want_memblock); |
Michal Hocko | 3072e41 | 2017-09-08 16:11:39 -0700 | [diff] [blame] | 132 | #endif /* ARCH_HAS_ADD_PAGES */ |
| 133 | |
Yasunori Goto | bc02af9 | 2006-06-27 02:53:30 -0700 | [diff] [blame] | 134 | #ifdef CONFIG_NUMA |
| 135 | extern int memory_add_physaddr_to_nid(u64 start); |
| 136 | #else |
| 137 | static inline int memory_add_physaddr_to_nid(u64 start) |
| 138 | { |
| 139 | return 0; |
| 140 | } |
| 141 | #endif |
| 142 | |
Yasunori Goto | 306d6cb | 2006-06-27 02:53:32 -0700 | [diff] [blame] | 143 | #ifdef CONFIG_HAVE_ARCH_NODEDATA_EXTENSION |
| 144 | /* |
| 145 | * For supporting node-hotadd, we have to allocate a new pgdat. |
| 146 | * |
| 147 | * If an arch has generic style NODE_DATA(), |
| 148 | * node_data[nid] = kzalloc() works well. But it depends on the architecture. |
| 149 | * |
| 150 | * In general, generic_alloc_nodedata() is used. |
| 151 | * Now, arch_free_nodedata() is just defined for error path of node_hot_add. |
| 152 | * |
| 153 | */ |
Yasunori Goto | dd0932d | 2006-06-27 02:53:40 -0700 | [diff] [blame] | 154 | extern pg_data_t *arch_alloc_nodedata(int nid); |
| 155 | extern void arch_free_nodedata(pg_data_t *pgdat); |
Yasunori Goto | 7049027 | 2006-06-27 02:53:39 -0700 | [diff] [blame] | 156 | extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat); |
Yasunori Goto | 306d6cb | 2006-06-27 02:53:32 -0700 | [diff] [blame] | 157 | |
| 158 | #else /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */ |
| 159 | |
| 160 | #define arch_alloc_nodedata(nid) generic_alloc_nodedata(nid) |
| 161 | #define arch_free_nodedata(pgdat) generic_free_nodedata(pgdat) |
| 162 | |
| 163 | #ifdef CONFIG_NUMA |
| 164 | /* |
| 165 | * If ARCH_HAS_NODEDATA_EXTENSION=n, this func is used to allocate pgdat. |
| 166 | * XXX: kmalloc_node() can't work well to get new node's memory at this time. |
| 167 | * Because, pgdat for the new node is not allocated/initialized yet itself. |
| 168 | * To use new node's memory, more consideration will be necessary. |
| 169 | */ |
| 170 | #define generic_alloc_nodedata(nid) \ |
| 171 | ({ \ |
| 172 | kzalloc(sizeof(pg_data_t), GFP_KERNEL); \ |
| 173 | }) |
| 174 | /* |
| 175 | * This definition is just for error path in node hotadd. |
| 176 | * For node hotremove, we have to replace this. |
| 177 | */ |
| 178 | #define generic_free_nodedata(pgdat) kfree(pgdat) |
| 179 | |
Yasunori Goto | 10ad400 | 2006-06-27 02:53:33 -0700 | [diff] [blame] | 180 | extern pg_data_t *node_data[]; |
| 181 | static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat) |
| 182 | { |
| 183 | node_data[nid] = pgdat; |
| 184 | } |
| 185 | |
Yasunori Goto | 306d6cb | 2006-06-27 02:53:32 -0700 | [diff] [blame] | 186 | #else /* !CONFIG_NUMA */ |
| 187 | |
| 188 | /* never called */ |
| 189 | static inline pg_data_t *generic_alloc_nodedata(int nid) |
| 190 | { |
| 191 | BUG(); |
| 192 | return NULL; |
| 193 | } |
| 194 | static inline void generic_free_nodedata(pg_data_t *pgdat) |
| 195 | { |
| 196 | } |
Yasunori Goto | 10ad400 | 2006-06-27 02:53:33 -0700 | [diff] [blame] | 197 | static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat) |
| 198 | { |
| 199 | } |
Yasunori Goto | 306d6cb | 2006-06-27 02:53:32 -0700 | [diff] [blame] | 200 | #endif /* CONFIG_NUMA */ |
| 201 | #endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */ |
| 202 | |
Yasuaki Ishimatsu | 46723bf | 2013-02-22 16:33:00 -0800 | [diff] [blame] | 203 | #ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE |
Linus Torvalds | 7ded384 | 2016-05-27 15:23:32 -0700 | [diff] [blame] | 204 | extern void __init register_page_bootmem_info_node(struct pglist_data *pgdat); |
Yasuaki Ishimatsu | 46723bf | 2013-02-22 16:33:00 -0800 | [diff] [blame] | 205 | #else |
Yasunori Goto | 0475327 | 2008-04-28 02:13:31 -0700 | [diff] [blame] | 206 | static inline void register_page_bootmem_info_node(struct pglist_data *pgdat) |
| 207 | { |
| 208 | } |
Yasunori Goto | 0475327 | 2008-04-28 02:13:31 -0700 | [diff] [blame] | 209 | #endif |
Yasuaki Ishimatsu | 46723bf | 2013-02-22 16:33:00 -0800 | [diff] [blame] | 210 | extern void put_page_bootmem(struct page *page); |
| 211 | extern void get_page_bootmem(unsigned long ingo, struct page *page, |
| 212 | unsigned long type); |
Yasunori Goto | 0475327 | 2008-04-28 02:13:31 -0700 | [diff] [blame] | 213 | |
Vladimir Davydov | bfc8c90 | 2014-06-04 16:07:18 -0700 | [diff] [blame] | 214 | void get_online_mems(void); |
| 215 | void put_online_mems(void); |
KOSAKI Motohiro | 20d6c96 | 2010-12-02 14:31:19 -0800 | [diff] [blame] | 216 | |
David Rientjes | 30467e0 | 2015-04-14 15:45:11 -0700 | [diff] [blame] | 217 | void mem_hotplug_begin(void); |
| 218 | void mem_hotplug_done(void); |
| 219 | |
Joonsoo Kim | d883c6c | 2018-05-23 10:18:21 +0900 | [diff] [blame] | 220 | extern void set_zone_contiguous(struct zone *zone); |
| 221 | extern void clear_zone_contiguous(struct zone *zone); |
| 222 | |
Dave Hansen | 208d54e | 2005-10-29 18:16:52 -0700 | [diff] [blame] | 223 | #else /* ! CONFIG_MEMORY_HOTPLUG */ |
Michal Hocko | 2d070ea | 2017-07-06 15:37:56 -0700 | [diff] [blame] | 224 | #define pfn_to_online_page(pfn) \ |
| 225 | ({ \ |
| 226 | struct page *___page = NULL; \ |
| 227 | if (pfn_valid(pfn)) \ |
| 228 | ___page = pfn_to_page(pfn); \ |
| 229 | ___page; \ |
| 230 | }) |
| 231 | |
Dave Hansen | bdc8cb9 | 2005-10-29 18:16:53 -0700 | [diff] [blame] | 232 | static inline unsigned zone_span_seqbegin(struct zone *zone) |
| 233 | { |
| 234 | return 0; |
| 235 | } |
| 236 | static inline int zone_span_seqretry(struct zone *zone, unsigned iv) |
| 237 | { |
| 238 | return 0; |
| 239 | } |
| 240 | static inline void zone_span_writelock(struct zone *zone) {} |
| 241 | static inline void zone_span_writeunlock(struct zone *zone) {} |
| 242 | static inline void zone_seqlock_init(struct zone *zone) {} |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 243 | |
| 244 | static inline int mhp_notimplemented(const char *func) |
| 245 | { |
| 246 | printk(KERN_WARNING "%s() called, with CONFIG_MEMORY_HOTPLUG disabled\n", func); |
| 247 | dump_stack(); |
| 248 | return -ENOSYS; |
| 249 | } |
| 250 | |
Yasunori Goto | 0475327 | 2008-04-28 02:13:31 -0700 | [diff] [blame] | 251 | static inline void register_page_bootmem_info_node(struct pglist_data *pgdat) |
| 252 | { |
| 253 | } |
| 254 | |
Toshi Kani | 01b0f19 | 2013-11-12 15:07:25 -0800 | [diff] [blame] | 255 | static inline int try_online_node(int nid) |
| 256 | { |
| 257 | return 0; |
| 258 | } |
| 259 | |
Vladimir Davydov | bfc8c90 | 2014-06-04 16:07:18 -0700 | [diff] [blame] | 260 | static inline void get_online_mems(void) {} |
| 261 | static inline void put_online_mems(void) {} |
KOSAKI Motohiro | 20d6c96 | 2010-12-02 14:31:19 -0800 | [diff] [blame] | 262 | |
David Rientjes | 30467e0 | 2015-04-14 15:45:11 -0700 | [diff] [blame] | 263 | static inline void mem_hotplug_begin(void) {} |
| 264 | static inline void mem_hotplug_done(void) {} |
| 265 | |
Michal Hocko | 4932381 | 2017-07-06 15:41:05 -0700 | [diff] [blame] | 266 | static inline bool movable_node_is_enabled(void) |
| 267 | { |
| 268 | return false; |
| 269 | } |
Dave Hansen | bdc8cb9 | 2005-10-29 18:16:53 -0700 | [diff] [blame] | 270 | #endif /* ! CONFIG_MEMORY_HOTPLUG */ |
Andi Kleen | 9d99aaa | 2006-04-07 19:49:15 +0200 | [diff] [blame] | 271 | |
Pavel Tatashin | 3a2d7fa | 2018-04-05 16:22:27 -0700 | [diff] [blame] | 272 | #if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT) |
| 273 | /* |
| 274 | * pgdat resizing functions |
| 275 | */ |
| 276 | static inline |
| 277 | void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags) |
| 278 | { |
| 279 | spin_lock_irqsave(&pgdat->node_size_lock, *flags); |
| 280 | } |
| 281 | static inline |
| 282 | void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags) |
| 283 | { |
| 284 | spin_unlock_irqrestore(&pgdat->node_size_lock, *flags); |
| 285 | } |
| 286 | static inline |
| 287 | void pgdat_resize_init(struct pglist_data *pgdat) |
| 288 | { |
| 289 | spin_lock_init(&pgdat->node_size_lock); |
| 290 | } |
| 291 | #else /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */ |
| 292 | /* |
| 293 | * Stub functions for when hotplug is off |
| 294 | */ |
| 295 | static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {} |
| 296 | static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {} |
| 297 | static inline void pgdat_resize_init(struct pglist_data *pgdat) {} |
| 298 | #endif /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */ |
| 299 | |
Badari Pulavarty | 5c755e9 | 2008-07-23 21:28:19 -0700 | [diff] [blame] | 300 | #ifdef CONFIG_MEMORY_HOTREMOVE |
| 301 | |
Yaowei Bai | c98940f | 2016-05-19 17:11:26 -0700 | [diff] [blame] | 302 | extern bool is_mem_section_removable(unsigned long pfn, unsigned long nr_pages); |
Wen Congyang | 90b30cd | 2013-02-22 16:33:27 -0800 | [diff] [blame] | 303 | extern void try_offline_node(int nid); |
Rafael J. Wysocki | aba6efc | 2013-06-01 22:24:07 +0200 | [diff] [blame] | 304 | extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages); |
| 305 | extern void remove_memory(int nid, u64 start, u64 size); |
David Hildenbrand | d15e592 | 2018-10-30 15:10:18 -0700 | [diff] [blame] | 306 | extern void __remove_memory(int nid, u64 start, u64 size); |
Badari Pulavarty | 5c755e9 | 2008-07-23 21:28:19 -0700 | [diff] [blame] | 307 | |
| 308 | #else |
Yaowei Bai | c98940f | 2016-05-19 17:11:26 -0700 | [diff] [blame] | 309 | static inline bool is_mem_section_removable(unsigned long pfn, |
Badari Pulavarty | 5c755e9 | 2008-07-23 21:28:19 -0700 | [diff] [blame] | 310 | unsigned long nr_pages) |
| 311 | { |
Yaowei Bai | c98940f | 2016-05-19 17:11:26 -0700 | [diff] [blame] | 312 | return false; |
Badari Pulavarty | 5c755e9 | 2008-07-23 21:28:19 -0700 | [diff] [blame] | 313 | } |
Wen Congyang | 90b30cd | 2013-02-22 16:33:27 -0800 | [diff] [blame] | 314 | |
| 315 | static inline void try_offline_node(int nid) {} |
Rafael J. Wysocki | aba6efc | 2013-06-01 22:24:07 +0200 | [diff] [blame] | 316 | |
| 317 | static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages) |
| 318 | { |
| 319 | return -EINVAL; |
| 320 | } |
| 321 | |
| 322 | static inline void remove_memory(int nid, u64 start, u64 size) {} |
David Hildenbrand | d15e592 | 2018-10-30 15:10:18 -0700 | [diff] [blame] | 323 | static inline void __remove_memory(int nid, u64 start, u64 size) {} |
Badari Pulavarty | 5c755e9 | 2008-07-23 21:28:19 -0700 | [diff] [blame] | 324 | #endif /* CONFIG_MEMORY_HOTREMOVE */ |
| 325 | |
Oscar Salvador | 03e85f9 | 2018-08-21 21:53:43 -0700 | [diff] [blame] | 326 | extern void __ref free_area_init_core_hotplug(int nid); |
Rafael J. Wysocki | e2ff394 | 2013-05-08 00:29:49 +0200 | [diff] [blame] | 327 | extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn, |
| 328 | void *arg, int (*func)(struct memory_block *, void *)); |
David Hildenbrand | 8df1d0e | 2018-10-30 15:10:24 -0700 | [diff] [blame] | 329 | extern int __add_memory(int nid, u64 start, u64 size); |
Yasunori Goto | bc02af9 | 2006-06-27 02:53:30 -0700 | [diff] [blame] | 330 | extern int add_memory(int nid, u64 start, u64 size); |
David Hildenbrand | f29d8e9c0 | 2018-12-28 00:35:36 -0800 | [diff] [blame] | 331 | extern int add_memory_resource(int nid, struct resource *resource); |
Christoph Hellwig | 24e6d5a | 2017-12-29 08:53:53 +0100 | [diff] [blame] | 332 | extern int arch_add_memory(int nid, u64 start, u64 size, |
| 333 | struct vmem_altmap *altmap, bool want_memblock); |
Michal Hocko | f1dd2cd | 2017-07-06 15:38:11 -0700 | [diff] [blame] | 334 | extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn, |
Christoph Hellwig | a99583e | 2017-12-29 08:53:57 +0100 | [diff] [blame] | 335 | unsigned long nr_pages, struct vmem_altmap *altmap); |
Yasuaki Ishimatsu | 6677e3e | 2013-02-22 16:32:52 -0800 | [diff] [blame] | 336 | extern bool is_memblock_offlined(struct memory_block *mem); |
Wei Yang | 4e0d2e7 | 2018-12-28 00:37:06 -0800 | [diff] [blame] | 337 | extern int sparse_add_one_section(int nid, unsigned long start_pfn, |
| 338 | struct vmem_altmap *altmap); |
Dan Williams | 4b94ffd | 2016-01-15 16:56:22 -0800 | [diff] [blame] | 339 | extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms, |
Christoph Hellwig | 24b6d41 | 2017-12-29 08:53:56 +0100 | [diff] [blame] | 340 | unsigned long map_offset, struct vmem_altmap *altmap); |
Yasunori Goto | 0475327 | 2008-04-28 02:13:31 -0700 | [diff] [blame] | 341 | extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map, |
| 342 | unsigned long pnum); |
Michal Hocko | f1dd2cd | 2017-07-06 15:38:11 -0700 | [diff] [blame] | 343 | extern bool allow_online_pfn_range(int nid, unsigned long pfn, unsigned long nr_pages, |
| 344 | int online_type); |
Michal Hocko | e5e6893 | 2017-09-06 16:19:37 -0700 | [diff] [blame] | 345 | extern struct zone *zone_for_pfn_range(int online_type, int nid, unsigned start_pfn, |
Michal Hocko | c246a21 | 2017-07-06 15:38:18 -0700 | [diff] [blame] | 346 | unsigned long nr_pages); |
Dave Hansen | 208d54e | 2005-10-29 18:16:52 -0700 | [diff] [blame] | 347 | #endif /* __LINUX_MEMORY_HOTPLUG_H */ |