blob: 52869d6d38b37307a36280654aa0d289eb773d10 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Dave Hansen208d54e2005-10-29 18:16:52 -07002#ifndef __LINUX_MEMORY_HOTPLUG_H
3#define __LINUX_MEMORY_HOTPLUG_H
4
5#include <linux/mmzone.h>
6#include <linux/spinlock.h>
Dave Hansen3947be12005-10-29 18:16:54 -07007#include <linux/notifier.h>
Paul Gortmaker187f1882011-11-23 20:12:59 -05008#include <linux/bug.h>
Dave Hansen208d54e2005-10-29 18:16:52 -07009
KAMEZAWA Hiroyuki78679302006-03-06 15:42:49 -080010struct page;
11struct zone;
12struct pglist_data;
Badari Pulavartyea01ea92008-04-28 02:12:01 -070013struct mem_section;
Wen Congyange90bdb72012-10-08 16:34:01 -070014struct memory_block;
David Vrabel62cedb92015-06-25 16:35:49 +010015struct resource;
Christoph Hellwig24e6d5a2017-12-29 08:53:53 +010016struct vmem_altmap;
KAMEZAWA Hiroyuki78679302006-03-06 15:42:49 -080017
Dave Hansen208d54e2005-10-29 18:16:52 -070018#ifdef CONFIG_MEMORY_HOTPLUG
Michal Hocko2d070ea2017-07-06 15:37:56 -070019/*
20 * Return page for the valid pfn only if the page is online. All pfn
21 * walkers which rely on the fully initialized page->flags and others
22 * should use this rather than pfn_valid && pfn_to_page
23 */
Qian Caib13bc352019-02-01 14:20:51 -080024#define pfn_to_online_page(pfn) \
25({ \
26 struct page *___page = NULL; \
27 unsigned long ___pfn = pfn; \
28 unsigned long ___nr = pfn_to_section_nr(___pfn); \
29 \
30 if (___nr < NR_MEM_SECTIONS && online_section_nr(___nr) && \
31 pfn_valid_within(___pfn)) \
32 ___page = pfn_to_page(___pfn); \
33 ___page; \
Michal Hocko2d070ea2017-07-06 15:37:56 -070034})
Yasunori Goto04753272008-04-28 02:13:31 -070035
36/*
Andrea Arcangeli5f24ce52011-01-13 15:47:00 -080037 * Types for free bootmem stored in page->lru.next. These have to be in
38 * some random range in unsigned long space for debugging purposes.
Yasunori Goto04753272008-04-28 02:13:31 -070039 */
Andrea Arcangeli5f24ce52011-01-13 15:47:00 -080040enum {
41 MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE = 12,
42 SECTION_INFO = MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE,
43 MIX_SECTION_INFO,
44 NODE_INFO,
45 MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE = NODE_INFO,
46};
Yasunori Goto04753272008-04-28 02:13:31 -070047
Tang Chen4f7c6b42014-08-06 16:05:13 -070048/* Types for control the zone type of onlined and offlined memory */
Lai Jiangshan511c2ab2012-12-11 16:03:16 -080049enum {
Tang Chen4f7c6b42014-08-06 16:05:13 -070050 MMOP_OFFLINE = -1,
51 MMOP_ONLINE_KEEP,
52 MMOP_ONLINE_KERNEL,
53 MMOP_ONLINE_MOVABLE,
Lai Jiangshan511c2ab2012-12-11 16:03:16 -080054};
55
Dave Hansen208d54e2005-10-29 18:16:52 -070056/*
Dave Hansenbdc8cb92005-10-29 18:16:53 -070057 * Zone resizing functions
Lai Jiangshan511c2ab2012-12-11 16:03:16 -080058 *
59 * Note: any attempt to resize a zone should has pgdat_resize_lock()
60 * zone_span_writelock() both held. This ensure the size of a zone
61 * can't be changed while pgdat_resize_lock() held.
Dave Hansenbdc8cb92005-10-29 18:16:53 -070062 */
63static inline unsigned zone_span_seqbegin(struct zone *zone)
64{
65 return read_seqbegin(&zone->span_seqlock);
66}
67static inline int zone_span_seqretry(struct zone *zone, unsigned iv)
68{
69 return read_seqretry(&zone->span_seqlock, iv);
70}
71static inline void zone_span_writelock(struct zone *zone)
72{
73 write_seqlock(&zone->span_seqlock);
74}
75static inline void zone_span_writeunlock(struct zone *zone)
76{
77 write_sequnlock(&zone->span_seqlock);
78}
79static inline void zone_seqlock_init(struct zone *zone)
80{
81 seqlock_init(&zone->span_seqlock);
82}
Dave Hansen3947be12005-10-29 18:16:54 -070083extern int zone_grow_free_lists(struct zone *zone, unsigned long new_nr_pages);
84extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages);
85extern int add_one_highpage(struct page *page, int pfn, int bad_ppro);
Dave Hansen3947be12005-10-29 18:16:54 -070086/* VM interface that may be used by firmware interface */
Lai Jiangshan511c2ab2012-12-11 16:03:16 -080087extern int online_pages(unsigned long, unsigned long, int);
Toshi Kania96dfdd2017-02-03 13:13:23 -080088extern int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
89 unsigned long *valid_start, unsigned long *valid_end);
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -070090extern void __offline_isolated_pages(unsigned long, unsigned long);
KAMEZAWA Hiroyuki48e94192007-10-16 01:26:14 -070091
Arun KSa9cd4102019-03-05 15:42:14 -080092typedef void (*online_page_callback_t)(struct page *page, unsigned int order);
Daniel Kiper9d0ad8c2011-07-25 17:12:05 -070093
94extern int set_online_page_callback(online_page_callback_t callback);
95extern int restore_online_page_callback(online_page_callback_t callback);
96
97extern void __online_page_set_limits(struct page *page);
98extern void __online_page_increment_counters(struct page *page);
99extern void __online_page_free(struct page *page);
100
Toshi Kani01b0f192013-11-12 15:07:25 -0800101extern int try_online_node(int nid);
102
Vitaly Kuznetsov31bc3852016-03-15 14:56:48 -0700103extern bool memhp_auto_online;
Michal Hocko49323812017-07-06 15:41:05 -0700104/* If movable_node boot option specified */
105extern bool movable_node_enabled;
106static inline bool movable_node_is_enabled(void)
107{
108 return movable_node_enabled;
109}
Vitaly Kuznetsov31bc3852016-03-15 14:56:48 -0700110
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -0700111#ifdef CONFIG_MEMORY_HOTREMOVE
Oscar Salvador2c2a5af2018-12-28 00:36:22 -0800112extern int arch_remove_memory(int nid, u64 start, u64 size,
113 struct vmem_altmap *altmap);
David Rientjes4edd7ce2013-04-29 15:08:22 -0700114extern int __remove_pages(struct zone *zone, unsigned long start_pfn,
Christoph Hellwigda024512017-12-29 08:53:55 +0100115 unsigned long nr_pages, struct vmem_altmap *altmap);
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -0700116#endif /* CONFIG_MEMORY_HOTREMOVE */
117
Michal Hockof1dd2cd2017-07-06 15:38:11 -0700118/* reasonably generic interface to expand the physical pages */
Christoph Hellwig24e6d5a2017-12-29 08:53:53 +0100119extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
120 struct vmem_altmap *altmap, bool want_memblock);
Yasunori Gotobc02af92006-06-27 02:53:30 -0700121
Michal Hocko3072e412017-09-08 16:11:39 -0700122#ifndef CONFIG_ARCH_HAS_ADD_PAGES
123static inline int add_pages(int nid, unsigned long start_pfn,
Christoph Hellwig24e6d5a2017-12-29 08:53:53 +0100124 unsigned long nr_pages, struct vmem_altmap *altmap,
125 bool want_memblock)
Michal Hocko3072e412017-09-08 16:11:39 -0700126{
Christoph Hellwig24e6d5a2017-12-29 08:53:53 +0100127 return __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
Michal Hocko3072e412017-09-08 16:11:39 -0700128}
129#else /* ARCH_HAS_ADD_PAGES */
Christoph Hellwig24e6d5a2017-12-29 08:53:53 +0100130int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
131 struct vmem_altmap *altmap, bool want_memblock);
Michal Hocko3072e412017-09-08 16:11:39 -0700132#endif /* ARCH_HAS_ADD_PAGES */
133
Yasunori Gotobc02af92006-06-27 02:53:30 -0700134#ifdef CONFIG_NUMA
135extern int memory_add_physaddr_to_nid(u64 start);
136#else
137static inline int memory_add_physaddr_to_nid(u64 start)
138{
139 return 0;
140}
141#endif
142
Yasunori Goto306d6cb2006-06-27 02:53:32 -0700143#ifdef CONFIG_HAVE_ARCH_NODEDATA_EXTENSION
144/*
145 * For supporting node-hotadd, we have to allocate a new pgdat.
146 *
147 * If an arch has generic style NODE_DATA(),
148 * node_data[nid] = kzalloc() works well. But it depends on the architecture.
149 *
150 * In general, generic_alloc_nodedata() is used.
151 * Now, arch_free_nodedata() is just defined for error path of node_hot_add.
152 *
153 */
Yasunori Gotodd0932d2006-06-27 02:53:40 -0700154extern pg_data_t *arch_alloc_nodedata(int nid);
155extern void arch_free_nodedata(pg_data_t *pgdat);
Yasunori Goto70490272006-06-27 02:53:39 -0700156extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat);
Yasunori Goto306d6cb2006-06-27 02:53:32 -0700157
158#else /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
159
160#define arch_alloc_nodedata(nid) generic_alloc_nodedata(nid)
161#define arch_free_nodedata(pgdat) generic_free_nodedata(pgdat)
162
163#ifdef CONFIG_NUMA
164/*
165 * If ARCH_HAS_NODEDATA_EXTENSION=n, this func is used to allocate pgdat.
166 * XXX: kmalloc_node() can't work well to get new node's memory at this time.
167 * Because, pgdat for the new node is not allocated/initialized yet itself.
168 * To use new node's memory, more consideration will be necessary.
169 */
170#define generic_alloc_nodedata(nid) \
171({ \
172 kzalloc(sizeof(pg_data_t), GFP_KERNEL); \
173})
174/*
175 * This definition is just for error path in node hotadd.
176 * For node hotremove, we have to replace this.
177 */
178#define generic_free_nodedata(pgdat) kfree(pgdat)
179
Yasunori Goto10ad4002006-06-27 02:53:33 -0700180extern pg_data_t *node_data[];
181static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
182{
183 node_data[nid] = pgdat;
184}
185
Yasunori Goto306d6cb2006-06-27 02:53:32 -0700186#else /* !CONFIG_NUMA */
187
188/* never called */
189static inline pg_data_t *generic_alloc_nodedata(int nid)
190{
191 BUG();
192 return NULL;
193}
194static inline void generic_free_nodedata(pg_data_t *pgdat)
195{
196}
Yasunori Goto10ad4002006-06-27 02:53:33 -0700197static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
198{
199}
Yasunori Goto306d6cb2006-06-27 02:53:32 -0700200#endif /* CONFIG_NUMA */
201#endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
202
Yasuaki Ishimatsu46723bf2013-02-22 16:33:00 -0800203#ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE
Linus Torvalds7ded3842016-05-27 15:23:32 -0700204extern void __init register_page_bootmem_info_node(struct pglist_data *pgdat);
Yasuaki Ishimatsu46723bf2013-02-22 16:33:00 -0800205#else
Yasunori Goto04753272008-04-28 02:13:31 -0700206static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
207{
208}
Yasunori Goto04753272008-04-28 02:13:31 -0700209#endif
Yasuaki Ishimatsu46723bf2013-02-22 16:33:00 -0800210extern void put_page_bootmem(struct page *page);
211extern void get_page_bootmem(unsigned long ingo, struct page *page,
212 unsigned long type);
Yasunori Goto04753272008-04-28 02:13:31 -0700213
Vladimir Davydovbfc8c902014-06-04 16:07:18 -0700214void get_online_mems(void);
215void put_online_mems(void);
KOSAKI Motohiro20d6c962010-12-02 14:31:19 -0800216
David Rientjes30467e02015-04-14 15:45:11 -0700217void mem_hotplug_begin(void);
218void mem_hotplug_done(void);
219
Joonsoo Kimd883c6c2018-05-23 10:18:21 +0900220extern void set_zone_contiguous(struct zone *zone);
221extern void clear_zone_contiguous(struct zone *zone);
222
Dave Hansen208d54e2005-10-29 18:16:52 -0700223#else /* ! CONFIG_MEMORY_HOTPLUG */
Michal Hocko2d070ea2017-07-06 15:37:56 -0700224#define pfn_to_online_page(pfn) \
225({ \
226 struct page *___page = NULL; \
227 if (pfn_valid(pfn)) \
228 ___page = pfn_to_page(pfn); \
229 ___page; \
230 })
231
Dave Hansenbdc8cb92005-10-29 18:16:53 -0700232static inline unsigned zone_span_seqbegin(struct zone *zone)
233{
234 return 0;
235}
236static inline int zone_span_seqretry(struct zone *zone, unsigned iv)
237{
238 return 0;
239}
240static inline void zone_span_writelock(struct zone *zone) {}
241static inline void zone_span_writeunlock(struct zone *zone) {}
242static inline void zone_seqlock_init(struct zone *zone) {}
Dave Hansen3947be12005-10-29 18:16:54 -0700243
244static inline int mhp_notimplemented(const char *func)
245{
246 printk(KERN_WARNING "%s() called, with CONFIG_MEMORY_HOTPLUG disabled\n", func);
247 dump_stack();
248 return -ENOSYS;
249}
250
Yasunori Goto04753272008-04-28 02:13:31 -0700251static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
252{
253}
254
Toshi Kani01b0f192013-11-12 15:07:25 -0800255static inline int try_online_node(int nid)
256{
257 return 0;
258}
259
Vladimir Davydovbfc8c902014-06-04 16:07:18 -0700260static inline void get_online_mems(void) {}
261static inline void put_online_mems(void) {}
KOSAKI Motohiro20d6c962010-12-02 14:31:19 -0800262
David Rientjes30467e02015-04-14 15:45:11 -0700263static inline void mem_hotplug_begin(void) {}
264static inline void mem_hotplug_done(void) {}
265
Michal Hocko49323812017-07-06 15:41:05 -0700266static inline bool movable_node_is_enabled(void)
267{
268 return false;
269}
Dave Hansenbdc8cb92005-10-29 18:16:53 -0700270#endif /* ! CONFIG_MEMORY_HOTPLUG */
Andi Kleen9d99aaa2006-04-07 19:49:15 +0200271
Pavel Tatashin3a2d7fa2018-04-05 16:22:27 -0700272#if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT)
273/*
274 * pgdat resizing functions
275 */
276static inline
277void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags)
278{
279 spin_lock_irqsave(&pgdat->node_size_lock, *flags);
280}
281static inline
282void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags)
283{
284 spin_unlock_irqrestore(&pgdat->node_size_lock, *flags);
285}
286static inline
287void pgdat_resize_init(struct pglist_data *pgdat)
288{
289 spin_lock_init(&pgdat->node_size_lock);
290}
291#else /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */
292/*
293 * Stub functions for when hotplug is off
294 */
295static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {}
296static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {}
297static inline void pgdat_resize_init(struct pglist_data *pgdat) {}
298#endif /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */
299
Badari Pulavarty5c755e92008-07-23 21:28:19 -0700300#ifdef CONFIG_MEMORY_HOTREMOVE
301
Yaowei Baic98940f2016-05-19 17:11:26 -0700302extern bool is_mem_section_removable(unsigned long pfn, unsigned long nr_pages);
Wen Congyang90b30cd2013-02-22 16:33:27 -0800303extern void try_offline_node(int nid);
Rafael J. Wysockiaba6efc2013-06-01 22:24:07 +0200304extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
305extern void remove_memory(int nid, u64 start, u64 size);
David Hildenbrandd15e5922018-10-30 15:10:18 -0700306extern void __remove_memory(int nid, u64 start, u64 size);
Badari Pulavarty5c755e92008-07-23 21:28:19 -0700307
308#else
Yaowei Baic98940f2016-05-19 17:11:26 -0700309static inline bool is_mem_section_removable(unsigned long pfn,
Badari Pulavarty5c755e92008-07-23 21:28:19 -0700310 unsigned long nr_pages)
311{
Yaowei Baic98940f2016-05-19 17:11:26 -0700312 return false;
Badari Pulavarty5c755e92008-07-23 21:28:19 -0700313}
Wen Congyang90b30cd2013-02-22 16:33:27 -0800314
315static inline void try_offline_node(int nid) {}
Rafael J. Wysockiaba6efc2013-06-01 22:24:07 +0200316
317static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages)
318{
319 return -EINVAL;
320}
321
322static inline void remove_memory(int nid, u64 start, u64 size) {}
David Hildenbrandd15e5922018-10-30 15:10:18 -0700323static inline void __remove_memory(int nid, u64 start, u64 size) {}
Badari Pulavarty5c755e92008-07-23 21:28:19 -0700324#endif /* CONFIG_MEMORY_HOTREMOVE */
325
Oscar Salvador03e85f92018-08-21 21:53:43 -0700326extern void __ref free_area_init_core_hotplug(int nid);
Rafael J. Wysockie2ff3942013-05-08 00:29:49 +0200327extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn,
328 void *arg, int (*func)(struct memory_block *, void *));
David Hildenbrand8df1d0e2018-10-30 15:10:24 -0700329extern int __add_memory(int nid, u64 start, u64 size);
Yasunori Gotobc02af92006-06-27 02:53:30 -0700330extern int add_memory(int nid, u64 start, u64 size);
David Hildenbrandf29d8e9c02018-12-28 00:35:36 -0800331extern int add_memory_resource(int nid, struct resource *resource);
Christoph Hellwig24e6d5a2017-12-29 08:53:53 +0100332extern int arch_add_memory(int nid, u64 start, u64 size,
333 struct vmem_altmap *altmap, bool want_memblock);
Michal Hockof1dd2cd2017-07-06 15:38:11 -0700334extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
Christoph Hellwiga99583e2017-12-29 08:53:57 +0100335 unsigned long nr_pages, struct vmem_altmap *altmap);
Yasuaki Ishimatsu6677e3e2013-02-22 16:32:52 -0800336extern bool is_memblock_offlined(struct memory_block *mem);
Wei Yang4e0d2e72018-12-28 00:37:06 -0800337extern int sparse_add_one_section(int nid, unsigned long start_pfn,
338 struct vmem_altmap *altmap);
Dan Williams4b94ffd2016-01-15 16:56:22 -0800339extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
Christoph Hellwig24b6d412017-12-29 08:53:56 +0100340 unsigned long map_offset, struct vmem_altmap *altmap);
Yasunori Goto04753272008-04-28 02:13:31 -0700341extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
342 unsigned long pnum);
Michal Hockof1dd2cd2017-07-06 15:38:11 -0700343extern bool allow_online_pfn_range(int nid, unsigned long pfn, unsigned long nr_pages,
344 int online_type);
Michal Hockoe5e68932017-09-06 16:19:37 -0700345extern struct zone *zone_for_pfn_range(int online_type, int nid, unsigned start_pfn,
Michal Hockoc246a212017-07-06 15:38:18 -0700346 unsigned long nr_pages);
Dave Hansen208d54e2005-10-29 18:16:52 -0700347#endif /* __LINUX_MEMORY_HOTPLUG_H */