blob: 551093b74596bcb6c2040d1bfe5c08755869f577 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Dave Hansen208d54e2005-10-29 18:16:52 -07002#ifndef __LINUX_MEMORY_HOTPLUG_H
3#define __LINUX_MEMORY_HOTPLUG_H
4
5#include <linux/mmzone.h>
6#include <linux/spinlock.h>
Dave Hansen3947be12005-10-29 18:16:54 -07007#include <linux/notifier.h>
Paul Gortmaker187f1882011-11-23 20:12:59 -05008#include <linux/bug.h>
Dave Hansen208d54e2005-10-29 18:16:52 -07009
KAMEZAWA Hiroyuki78679302006-03-06 15:42:49 -080010struct page;
11struct zone;
12struct pglist_data;
Badari Pulavartyea01ea92008-04-28 02:12:01 -070013struct mem_section;
Wen Congyange90bdb72012-10-08 16:34:01 -070014struct memory_block;
David Vrabel62cedb92015-06-25 16:35:49 +010015struct resource;
Christoph Hellwig24e6d5a2017-12-29 08:53:53 +010016struct vmem_altmap;
KAMEZAWA Hiroyuki78679302006-03-06 15:42:49 -080017
Dave Hansen208d54e2005-10-29 18:16:52 -070018#ifdef CONFIG_MEMORY_HOTPLUG
Michal Hocko2d070ea2017-07-06 15:37:56 -070019/*
20 * Return page for the valid pfn only if the page is online. All pfn
21 * walkers which rely on the fully initialized page->flags and others
22 * should use this rather than pfn_valid && pfn_to_page
23 */
Qian Caib13bc352019-02-01 14:20:51 -080024#define pfn_to_online_page(pfn) \
25({ \
26 struct page *___page = NULL; \
27 unsigned long ___pfn = pfn; \
28 unsigned long ___nr = pfn_to_section_nr(___pfn); \
29 \
30 if (___nr < NR_MEM_SECTIONS && online_section_nr(___nr) && \
31 pfn_valid_within(___pfn)) \
32 ___page = pfn_to_page(___pfn); \
33 ___page; \
Michal Hocko2d070ea2017-07-06 15:37:56 -070034})
Yasunori Goto04753272008-04-28 02:13:31 -070035
36/*
Andrea Arcangeli5f24ce52011-01-13 15:47:00 -080037 * Types for free bootmem stored in page->lru.next. These have to be in
38 * some random range in unsigned long space for debugging purposes.
Yasunori Goto04753272008-04-28 02:13:31 -070039 */
Andrea Arcangeli5f24ce52011-01-13 15:47:00 -080040enum {
41 MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE = 12,
42 SECTION_INFO = MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE,
43 MIX_SECTION_INFO,
44 NODE_INFO,
45 MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE = NODE_INFO,
46};
Yasunori Goto04753272008-04-28 02:13:31 -070047
Tang Chen4f7c6b42014-08-06 16:05:13 -070048/* Types for control the zone type of onlined and offlined memory */
Lai Jiangshan511c2ab2012-12-11 16:03:16 -080049enum {
David Hildenbrand956f8b42020-04-06 20:07:16 -070050 /* Offline the memory. */
David Hildenbrandefc978a2020-04-06 20:07:20 -070051 MMOP_OFFLINE = 0,
David Hildenbrand956f8b42020-04-06 20:07:16 -070052 /* Online the memory. Zone depends, see default_zone_for_pfn(). */
53 MMOP_ONLINE,
54 /* Online the memory to ZONE_NORMAL. */
Tang Chen4f7c6b42014-08-06 16:05:13 -070055 MMOP_ONLINE_KERNEL,
David Hildenbrand956f8b42020-04-06 20:07:16 -070056 /* Online the memory to ZONE_MOVABLE. */
Tang Chen4f7c6b42014-08-06 16:05:13 -070057 MMOP_ONLINE_MOVABLE,
Lai Jiangshan511c2ab2012-12-11 16:03:16 -080058};
59
David Hildenbrandb6117192020-10-15 20:08:44 -070060/* Flags for add_memory() and friends to specify memory hotplug details. */
61typedef int __bitwise mhp_t;
62
63/* No special request */
64#define MHP_NONE ((__force mhp_t)0)
David Hildenbrand9ca65512020-10-15 20:08:49 -070065/*
66 * Allow merging of the added System RAM resource with adjacent,
67 * mergeable resources. After a successful call to add_memory_resource()
68 * with this flag set, the resource pointer must no longer be used as it
69 * might be stale, or the resource might have changed.
70 */
71#define MEMHP_MERGE_RESOURCE ((__force mhp_t)BIT(0))
David Hildenbrandb6117192020-10-15 20:08:44 -070072
Dave Hansen208d54e2005-10-29 18:16:52 -070073/*
Logan Gunthorpef5637d32020-04-10 14:33:21 -070074 * Extended parameters for memory hotplug:
75 * altmap: alternative allocator for memmap array (optional)
Logan Gunthorpebfeb0222020-04-10 14:33:36 -070076 * pgprot: page protection flags to apply to newly created page tables
77 * (required)
Michal Hocko940519f2019-05-13 17:21:26 -070078 */
Logan Gunthorpef5637d32020-04-10 14:33:21 -070079struct mhp_params {
Michal Hocko940519f2019-05-13 17:21:26 -070080 struct vmem_altmap *altmap;
Logan Gunthorpebfeb0222020-04-10 14:33:36 -070081 pgprot_t pgprot;
Michal Hocko940519f2019-05-13 17:21:26 -070082};
83
84/*
Dave Hansenbdc8cb92005-10-29 18:16:53 -070085 * Zone resizing functions
Lai Jiangshan511c2ab2012-12-11 16:03:16 -080086 *
87 * Note: any attempt to resize a zone should has pgdat_resize_lock()
88 * zone_span_writelock() both held. This ensure the size of a zone
89 * can't be changed while pgdat_resize_lock() held.
Dave Hansenbdc8cb92005-10-29 18:16:53 -070090 */
91static inline unsigned zone_span_seqbegin(struct zone *zone)
92{
93 return read_seqbegin(&zone->span_seqlock);
94}
95static inline int zone_span_seqretry(struct zone *zone, unsigned iv)
96{
97 return read_seqretry(&zone->span_seqlock, iv);
98}
99static inline void zone_span_writelock(struct zone *zone)
100{
101 write_seqlock(&zone->span_seqlock);
102}
103static inline void zone_span_writeunlock(struct zone *zone)
104{
105 write_sequnlock(&zone->span_seqlock);
106}
107static inline void zone_seqlock_init(struct zone *zone)
108{
109 seqlock_init(&zone->span_seqlock);
110}
Dave Hansen3947be12005-10-29 18:16:54 -0700111extern int zone_grow_free_lists(struct zone *zone, unsigned long new_nr_pages);
112extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages);
113extern int add_one_highpage(struct page *page, int pfn, int bad_ppro);
Dave Hansen3947be12005-10-29 18:16:54 -0700114/* VM interface that may be used by firmware interface */
David Hildenbrandbd5c2342020-01-30 22:14:54 -0800115extern int online_pages(unsigned long pfn, unsigned long nr_pages,
116 int online_type, int nid);
David Hildenbrand92917992020-02-03 17:34:26 -0800117extern struct zone *test_pages_in_a_zone(unsigned long start_pfn,
118 unsigned long end_pfn);
David Hildenbrand257bea72020-10-15 20:07:59 -0700119extern void __offline_isolated_pages(unsigned long start_pfn,
120 unsigned long end_pfn);
KAMEZAWA Hiroyuki48e94192007-10-16 01:26:14 -0700121
Arun KSa9cd4102019-03-05 15:42:14 -0800122typedef void (*online_page_callback_t)(struct page *page, unsigned int order);
Daniel Kiper9d0ad8c2011-07-25 17:12:05 -0700123
David Hildenbrand18db1492019-11-30 17:53:51 -0800124extern void generic_online_page(struct page *page, unsigned int order);
Daniel Kiper9d0ad8c2011-07-25 17:12:05 -0700125extern int set_online_page_callback(online_page_callback_t callback);
126extern int restore_online_page_callback(online_page_callback_t callback);
127
Toshi Kani01b0f192013-11-12 15:07:25 -0800128extern int try_online_node(int nid);
129
Michal Hocko940519f2019-05-13 17:21:26 -0700130extern int arch_add_memory(int nid, u64 start, u64 size,
Logan Gunthorpef5637d32020-04-10 14:33:21 -0700131 struct mhp_params *params);
Juergen Gross357b4da2019-02-14 11:42:39 +0100132extern u64 max_mem_size;
133
David Hildenbrand5f47adf2020-04-06 20:07:44 -0700134extern int memhp_online_type_from_str(const char *str);
135
David Hildenbrand862919e2020-04-06 20:07:40 -0700136/* Default online_type (MMOP_*) when new memory blocks are added. */
137extern int memhp_default_online_type;
Michal Hocko49323812017-07-06 15:41:05 -0700138/* If movable_node boot option specified */
139extern bool movable_node_enabled;
140static inline bool movable_node_is_enabled(void)
141{
142 return movable_node_enabled;
143}
Vitaly Kuznetsov31bc3852016-03-15 14:56:48 -0700144
David Hildenbrandac5c9422019-05-13 17:21:46 -0700145extern void arch_remove_memory(int nid, u64 start, u64 size,
146 struct vmem_altmap *altmap);
David Hildenbrandfeee6b22020-01-04 12:59:33 -0800147extern void __remove_pages(unsigned long start_pfn, unsigned long nr_pages,
148 struct vmem_altmap *altmap);
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -0700149
Michal Hockof1dd2cd2017-07-06 15:38:11 -0700150/* reasonably generic interface to expand the physical pages */
Christoph Hellwig24e6d5a2017-12-29 08:53:53 +0100151extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
Logan Gunthorpef5637d32020-04-10 14:33:21 -0700152 struct mhp_params *params);
Yasunori Gotobc02af92006-06-27 02:53:30 -0700153
Michal Hocko3072e412017-09-08 16:11:39 -0700154#ifndef CONFIG_ARCH_HAS_ADD_PAGES
155static inline int add_pages(int nid, unsigned long start_pfn,
Logan Gunthorpef5637d32020-04-10 14:33:21 -0700156 unsigned long nr_pages, struct mhp_params *params)
Michal Hocko3072e412017-09-08 16:11:39 -0700157{
Logan Gunthorpef5637d32020-04-10 14:33:21 -0700158 return __add_pages(nid, start_pfn, nr_pages, params);
Michal Hocko3072e412017-09-08 16:11:39 -0700159}
160#else /* ARCH_HAS_ADD_PAGES */
Christoph Hellwig24e6d5a2017-12-29 08:53:53 +0100161int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
Logan Gunthorpef5637d32020-04-10 14:33:21 -0700162 struct mhp_params *params);
Michal Hocko3072e412017-09-08 16:11:39 -0700163#endif /* ARCH_HAS_ADD_PAGES */
164
Yasunori Goto306d6cb2006-06-27 02:53:32 -0700165#ifdef CONFIG_HAVE_ARCH_NODEDATA_EXTENSION
166/*
167 * For supporting node-hotadd, we have to allocate a new pgdat.
168 *
169 * If an arch has generic style NODE_DATA(),
170 * node_data[nid] = kzalloc() works well. But it depends on the architecture.
171 *
172 * In general, generic_alloc_nodedata() is used.
173 * Now, arch_free_nodedata() is just defined for error path of node_hot_add.
174 *
175 */
Yasunori Gotodd0932d2006-06-27 02:53:40 -0700176extern pg_data_t *arch_alloc_nodedata(int nid);
177extern void arch_free_nodedata(pg_data_t *pgdat);
Yasunori Goto70490272006-06-27 02:53:39 -0700178extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat);
Yasunori Goto306d6cb2006-06-27 02:53:32 -0700179
180#else /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
181
182#define arch_alloc_nodedata(nid) generic_alloc_nodedata(nid)
183#define arch_free_nodedata(pgdat) generic_free_nodedata(pgdat)
184
185#ifdef CONFIG_NUMA
186/*
187 * If ARCH_HAS_NODEDATA_EXTENSION=n, this func is used to allocate pgdat.
188 * XXX: kmalloc_node() can't work well to get new node's memory at this time.
189 * Because, pgdat for the new node is not allocated/initialized yet itself.
190 * To use new node's memory, more consideration will be necessary.
191 */
192#define generic_alloc_nodedata(nid) \
193({ \
194 kzalloc(sizeof(pg_data_t), GFP_KERNEL); \
195})
196/*
197 * This definition is just for error path in node hotadd.
198 * For node hotremove, we have to replace this.
199 */
200#define generic_free_nodedata(pgdat) kfree(pgdat)
201
Yasunori Goto10ad4002006-06-27 02:53:33 -0700202extern pg_data_t *node_data[];
203static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
204{
205 node_data[nid] = pgdat;
206}
207
Yasunori Goto306d6cb2006-06-27 02:53:32 -0700208#else /* !CONFIG_NUMA */
209
210/* never called */
211static inline pg_data_t *generic_alloc_nodedata(int nid)
212{
213 BUG();
214 return NULL;
215}
216static inline void generic_free_nodedata(pg_data_t *pgdat)
217{
218}
Yasunori Goto10ad4002006-06-27 02:53:33 -0700219static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
220{
221}
Yasunori Goto306d6cb2006-06-27 02:53:32 -0700222#endif /* CONFIG_NUMA */
223#endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
224
Yasuaki Ishimatsu46723bf2013-02-22 16:33:00 -0800225#ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE
Linus Torvalds7ded3842016-05-27 15:23:32 -0700226extern void __init register_page_bootmem_info_node(struct pglist_data *pgdat);
Yasuaki Ishimatsu46723bf2013-02-22 16:33:00 -0800227#else
Yasunori Goto04753272008-04-28 02:13:31 -0700228static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
229{
230}
Yasunori Goto04753272008-04-28 02:13:31 -0700231#endif
Yasuaki Ishimatsu46723bf2013-02-22 16:33:00 -0800232extern void put_page_bootmem(struct page *page);
233extern void get_page_bootmem(unsigned long ingo, struct page *page,
234 unsigned long type);
Yasunori Goto04753272008-04-28 02:13:31 -0700235
Vladimir Davydovbfc8c902014-06-04 16:07:18 -0700236void get_online_mems(void);
237void put_online_mems(void);
KOSAKI Motohiro20d6c962010-12-02 14:31:19 -0800238
David Rientjes30467e02015-04-14 15:45:11 -0700239void mem_hotplug_begin(void);
240void mem_hotplug_done(void);
241
Dave Hansen208d54e2005-10-29 18:16:52 -0700242#else /* ! CONFIG_MEMORY_HOTPLUG */
Michal Hocko2d070ea2017-07-06 15:37:56 -0700243#define pfn_to_online_page(pfn) \
244({ \
245 struct page *___page = NULL; \
246 if (pfn_valid(pfn)) \
247 ___page = pfn_to_page(pfn); \
248 ___page; \
249 })
250
Dave Hansenbdc8cb92005-10-29 18:16:53 -0700251static inline unsigned zone_span_seqbegin(struct zone *zone)
252{
253 return 0;
254}
255static inline int zone_span_seqretry(struct zone *zone, unsigned iv)
256{
257 return 0;
258}
259static inline void zone_span_writelock(struct zone *zone) {}
260static inline void zone_span_writeunlock(struct zone *zone) {}
261static inline void zone_seqlock_init(struct zone *zone) {}
Dave Hansen3947be12005-10-29 18:16:54 -0700262
Yasunori Goto04753272008-04-28 02:13:31 -0700263static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
264{
265}
266
Toshi Kani01b0f192013-11-12 15:07:25 -0800267static inline int try_online_node(int nid)
268{
269 return 0;
270}
271
Vladimir Davydovbfc8c902014-06-04 16:07:18 -0700272static inline void get_online_mems(void) {}
273static inline void put_online_mems(void) {}
KOSAKI Motohiro20d6c962010-12-02 14:31:19 -0800274
David Rientjes30467e02015-04-14 15:45:11 -0700275static inline void mem_hotplug_begin(void) {}
276static inline void mem_hotplug_done(void) {}
277
Michal Hocko49323812017-07-06 15:41:05 -0700278static inline bool movable_node_is_enabled(void)
279{
280 return false;
281}
Dave Hansenbdc8cb92005-10-29 18:16:53 -0700282#endif /* ! CONFIG_MEMORY_HOTPLUG */
Andi Kleen9d99aaa2006-04-07 19:49:15 +0200283
Pavel Tatashin3a2d7fa2018-04-05 16:22:27 -0700284#if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT)
285/*
286 * pgdat resizing functions
287 */
288static inline
289void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags)
290{
291 spin_lock_irqsave(&pgdat->node_size_lock, *flags);
292}
293static inline
294void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags)
295{
296 spin_unlock_irqrestore(&pgdat->node_size_lock, *flags);
297}
298static inline
299void pgdat_resize_init(struct pglist_data *pgdat)
300{
301 spin_lock_init(&pgdat->node_size_lock);
302}
303#else /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */
304/*
305 * Stub functions for when hotplug is off
306 */
307static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {}
308static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {}
309static inline void pgdat_resize_init(struct pglist_data *pgdat) {}
310#endif /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */
311
Badari Pulavarty5c755e92008-07-23 21:28:19 -0700312#ifdef CONFIG_MEMORY_HOTREMOVE
313
Wen Congyang90b30cd2013-02-22 16:33:27 -0800314extern void try_offline_node(int nid);
Rafael J. Wysockiaba6efc2013-06-01 22:24:07 +0200315extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
Pavel Tatashineca499a2019-07-16 16:30:31 -0700316extern int remove_memory(int nid, u64 start, u64 size);
David Hildenbrandd15e5922018-10-30 15:10:18 -0700317extern void __remove_memory(int nid, u64 start, u64 size);
David Hildenbrand08b3acd2020-05-07 16:01:32 +0200318extern int offline_and_remove_memory(int nid, u64 start, u64 size);
Badari Pulavarty5c755e92008-07-23 21:28:19 -0700319
320#else
Wen Congyang90b30cd2013-02-22 16:33:27 -0800321static inline void try_offline_node(int nid) {}
Rafael J. Wysockiaba6efc2013-06-01 22:24:07 +0200322
323static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages)
324{
325 return -EINVAL;
326}
327
Pavel Tatashineca499a2019-07-16 16:30:31 -0700328static inline int remove_memory(int nid, u64 start, u64 size)
329{
330 return -EBUSY;
331}
332
David Hildenbrandd15e5922018-10-30 15:10:18 -0700333static inline void __remove_memory(int nid, u64 start, u64 size) {}
Badari Pulavarty5c755e92008-07-23 21:28:19 -0700334#endif /* CONFIG_MEMORY_HOTREMOVE */
335
Ben Dooks (Codethink)aba98172019-11-30 17:54:10 -0800336extern void set_zone_contiguous(struct zone *zone);
337extern void clear_zone_contiguous(struct zone *zone);
338
David Hildenbrand3a0aaef2020-10-15 20:08:39 -0700339#ifdef CONFIG_MEMORY_HOTPLUG
Oscar Salvador03e85f92018-08-21 21:53:43 -0700340extern void __ref free_area_init_core_hotplug(int nid);
David Hildenbrandb6117192020-10-15 20:08:44 -0700341extern int __add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags);
342extern int add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags);
343extern int add_memory_resource(int nid, struct resource *resource,
344 mhp_t mhp_flags);
David Hildenbrand7b7b2722020-06-04 16:48:41 -0700345extern int add_memory_driver_managed(int nid, u64 start, u64 size,
David Hildenbrandb6117192020-10-15 20:08:44 -0700346 const char *resource_name,
347 mhp_t mhp_flags);
Michal Hockof1dd2cd2017-07-06 15:38:11 -0700348extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
David Hildenbrandd882c002020-10-15 20:08:19 -0700349 unsigned long nr_pages,
350 struct vmem_altmap *altmap, int migratetype);
David Hildenbrandfeee6b22020-01-04 12:59:33 -0800351extern void remove_pfn_range_from_zone(struct zone *zone,
352 unsigned long start_pfn,
353 unsigned long nr_pages);
Yasuaki Ishimatsu6677e3e2013-02-22 16:32:52 -0800354extern bool is_memblock_offlined(struct memory_block *mem);
Dan Williams7ea62162019-07-18 15:58:22 -0700355extern int sparse_add_section(int nid, unsigned long pfn,
356 unsigned long nr_pages, struct vmem_altmap *altmap);
Dan Williamsba72b4c2019-07-18 15:58:26 -0700357extern void sparse_remove_section(struct mem_section *ms,
Dan Williams7ea62162019-07-18 15:58:22 -0700358 unsigned long pfn, unsigned long nr_pages,
Christoph Hellwig24b6d412017-12-29 08:53:56 +0100359 unsigned long map_offset, struct vmem_altmap *altmap);
Yasunori Goto04753272008-04-28 02:13:31 -0700360extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
361 unsigned long pnum);
Michal Hockoe5e68932017-09-06 16:19:37 -0700362extern struct zone *zone_for_pfn_range(int online_type, int nid, unsigned start_pfn,
Michal Hockoc246a212017-07-06 15:38:18 -0700363 unsigned long nr_pages);
David Hildenbrand3a0aaef2020-10-15 20:08:39 -0700364#endif /* CONFIG_MEMORY_HOTPLUG */
365
Dave Hansen208d54e2005-10-29 18:16:52 -0700366#endif /* __LINUX_MEMORY_HOTPLUG_H */