blob: 079d17d964101e458e9f2f37ee51a87df3155115 [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001/* SPDX-License-Identifier: GPL-2.0-or-later */
Yinghai Lu95f72d12010-07-12 14:36:09 +10002#ifndef _LINUX_MEMBLOCK_H
3#define _LINUX_MEMBLOCK_H
4#ifdef __KERNEL__
5
6/*
7 * Logical memory blocks.
8 *
9 * Copyright (C) 2001 Peter Bergner, IBM Corp.
Yinghai Lu95f72d12010-07-12 14:36:09 +100010 */
11
12#include <linux/init.h>
13#include <linux/mm.h>
Mike Rapoport57c8a662018-10-30 15:09:49 -070014#include <asm/dma.h>
15
16extern unsigned long max_low_pfn;
17extern unsigned long min_low_pfn;
18
19/*
20 * highest page
21 */
22extern unsigned long max_pfn;
23/*
24 * highest possible page
25 */
26extern unsigned long long max_possible_pfn;
Yinghai Lu95f72d12010-07-12 14:36:09 +100027
Mike Rapoport9a0de1b2018-06-30 17:55:04 +030028/**
29 * enum memblock_flags - definition of memory region attributes
30 * @MEMBLOCK_NONE: no special request
31 * @MEMBLOCK_HOTPLUG: hotpluggable region
32 * @MEMBLOCK_MIRROR: mirrored region
33 * @MEMBLOCK_NOMAP: don't add to kernel direct mapping
34 */
Mike Rapoporte1720fe2018-06-30 17:55:01 +030035enum memblock_flags {
Tony Luckfc6daaf2015-06-24 16:58:09 -070036 MEMBLOCK_NONE = 0x0, /* No special request */
37 MEMBLOCK_HOTPLUG = 0x1, /* hotpluggable region */
Tony Lucka3f5baf2015-06-24 16:58:12 -070038 MEMBLOCK_MIRROR = 0x2, /* mirrored region */
Ard Biesheuvelbf3d3cc2015-11-30 13:28:15 +010039 MEMBLOCK_NOMAP = 0x4, /* don't add to kernel direct mapping */
Tony Luckfc6daaf2015-06-24 16:58:09 -070040};
Tang Chen66b16ed2014-01-21 15:49:23 -080041
Mike Rapoport9a0de1b2018-06-30 17:55:04 +030042/**
43 * struct memblock_region - represents a memory region
44 * @base: physical address of the region
45 * @size: size of the region
46 * @flags: memory region attributes
47 * @nid: NUMA node id
48 */
Benjamin Herrenschmidte3239ff2010-08-04 14:06:41 +100049struct memblock_region {
Benjamin Herrenschmidt2898cc42010-08-04 13:34:42 +100050 phys_addr_t base;
51 phys_addr_t size;
Mike Rapoporte1720fe2018-06-30 17:55:01 +030052 enum memblock_flags flags;
Tejun Heo7c0caeb2011-07-14 11:43:42 +020053#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
54 int nid;
55#endif
Yinghai Lu95f72d12010-07-12 14:36:09 +100056};
57
Mike Rapoport9a0de1b2018-06-30 17:55:04 +030058/**
59 * struct memblock_type - collection of memory regions of certain type
60 * @cnt: number of regions
61 * @max: size of the allocated array
62 * @total_size: size of all regions
63 * @regions: array of regions
64 * @name: the memory type symbolic name
65 */
Benjamin Herrenschmidte3239ff2010-08-04 14:06:41 +100066struct memblock_type {
Mike Rapoport9a0de1b2018-06-30 17:55:04 +030067 unsigned long cnt;
68 unsigned long max;
69 phys_addr_t total_size;
Benjamin Herrenschmidtbf23c512010-07-06 15:39:06 -070070 struct memblock_region *regions;
Heiko Carstens0262d9c2017-02-24 14:55:59 -080071 char *name;
Yinghai Lu95f72d12010-07-12 14:36:09 +100072};
73
Mike Rapoport9a0de1b2018-06-30 17:55:04 +030074/**
75 * struct memblock - memblock allocator metadata
76 * @bottom_up: is bottom up direction?
77 * @current_limit: physical address of the current allocation limit
78 * @memory: usabe memory regions
79 * @reserved: reserved memory regions
80 * @physmem: all physical memory
81 */
Yinghai Lu95f72d12010-07-12 14:36:09 +100082struct memblock {
Tang Chen79442ed2013-11-12 15:07:59 -080083 bool bottom_up; /* is bottom up direction? */
Benjamin Herrenschmidt2898cc42010-08-04 13:34:42 +100084 phys_addr_t current_limit;
Benjamin Herrenschmidte3239ff2010-08-04 14:06:41 +100085 struct memblock_type memory;
86 struct memblock_type reserved;
Philipp Hachtmann70210ed2014-01-29 18:16:01 +010087#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
88 struct memblock_type physmem;
89#endif
Yinghai Lu95f72d12010-07-12 14:36:09 +100090};
91
92extern struct memblock memblock;
Yinghai Lu5e63cf42010-07-28 15:07:21 +100093extern int memblock_debug;
Yinghai Lu5e63cf42010-07-28 15:07:21 +100094
Mike Rapoport350e88b2019-05-13 17:22:59 -070095#ifndef CONFIG_ARCH_KEEP_MEMBLOCK
Kirill A. Shutemov036fbb22016-01-15 16:57:11 -080096#define __init_memblock __meminit
97#define __initdata_memblock __meminitdata
Pavel Tatashin3010f872017-08-18 15:16:05 -070098void memblock_discard(void);
Kirill A. Shutemov036fbb22016-01-15 16:57:11 -080099#else
100#define __init_memblock
101#define __initdata_memblock
Mike Rapoport350e88b2019-05-13 17:22:59 -0700102static inline void memblock_discard(void) {}
Kirill A. Shutemov036fbb22016-01-15 16:57:11 -0800103#endif
104
Yinghai Lu5e63cf42010-07-28 15:07:21 +1000105#define memblock_dbg(fmt, ...) \
106 if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
Yinghai Lu95f72d12010-07-12 14:36:09 +1000107
Tejun Heofc769a82011-07-12 09:58:10 +0200108phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
109 phys_addr_t size, phys_addr_t align);
Tejun Heo1aadc052011-12-08 10:22:08 -0800110void memblock_allow_resize(void);
Tejun Heo7fb0bc32011-12-08 10:22:08 -0800111int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid);
Tejun Heo581adcb2011-12-08 10:22:06 -0800112int memblock_add(phys_addr_t base, phys_addr_t size);
113int memblock_remove(phys_addr_t base, phys_addr_t size);
114int memblock_free(phys_addr_t base, phys_addr_t size);
115int memblock_reserve(phys_addr_t base, phys_addr_t size);
Anshuman Khandual02634a42020-01-30 22:14:20 -0800116#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
117int memblock_physmem_add(phys_addr_t base, phys_addr_t size);
118#endif
Yinghai Lu6ede1fd2012-10-22 16:35:18 -0700119void memblock_trim_memory(phys_addr_t align);
Tang Chen95cf82e2015-09-08 15:02:03 -0700120bool memblock_overlaps_region(struct memblock_type *type,
121 phys_addr_t base, phys_addr_t size);
Tang Chen66b16ed2014-01-21 15:49:23 -0800122int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size);
123int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size);
Tony Lucka3f5baf2015-06-24 16:58:12 -0700124int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
Ard Biesheuvelbf3d3cc2015-11-30 13:28:15 +0100125int memblock_mark_nomap(phys_addr_t base, phys_addr_t size);
AKASHI Takahiro4c546b82017-04-03 11:23:54 +0900126int memblock_clear_nomap(phys_addr_t base, phys_addr_t size);
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +0100127
Mike Rapoport57c8a662018-10-30 15:09:49 -0700128unsigned long memblock_free_all(void);
129void reset_node_managed_pages(pg_data_t *pgdat);
130void reset_all_zones_managed_pages(void);
131
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +0100132/* Low level functions */
Mike Rapoporte1720fe2018-06-30 17:55:01 +0300133void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags,
Tony Luckfc6daaf2015-06-24 16:58:09 -0700134 struct memblock_type *type_a,
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +0100135 struct memblock_type *type_b, phys_addr_t *out_start,
136 phys_addr_t *out_end, int *out_nid);
137
Mike Rapoporte1720fe2018-06-30 17:55:01 +0300138void __next_mem_range_rev(u64 *idx, int nid, enum memblock_flags flags,
Tony Luckfc6daaf2015-06-24 16:58:09 -0700139 struct memblock_type *type_a,
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +0100140 struct memblock_type *type_b, phys_addr_t *out_start,
141 phys_addr_t *out_end, int *out_nid);
142
Robin Holt8e7a7f82015-06-30 14:56:41 -0700143void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start,
Chen Gangba6c19f2016-07-26 15:24:47 -0700144 phys_addr_t *out_end);
Robin Holt8e7a7f82015-06-30 14:56:41 -0700145
Pavel Tatashin3010f872017-08-18 15:16:05 -0700146void __memblock_free_late(phys_addr_t base, phys_addr_t size);
147
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +0100148/**
149 * for_each_mem_range - iterate through memblock areas from type_a and not
150 * included in type_b. Or just type_a if type_b is NULL.
151 * @i: u64 used as loop variable
152 * @type_a: ptr to memblock_type to iterate
153 * @type_b: ptr to memblock_type which excludes from the iteration
154 * @nid: node selector, %NUMA_NO_NODE for all nodes
Tony Luckfc6daaf2015-06-24 16:58:09 -0700155 * @flags: pick from blocks based on memory attributes
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +0100156 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
157 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
158 * @p_nid: ptr to int for nid of the range, can be %NULL
159 */
Tony Luckfc6daaf2015-06-24 16:58:09 -0700160#define for_each_mem_range(i, type_a, type_b, nid, flags, \
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +0100161 p_start, p_end, p_nid) \
Tony Luckfc6daaf2015-06-24 16:58:09 -0700162 for (i = 0, __next_mem_range(&i, nid, flags, type_a, type_b, \
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +0100163 p_start, p_end, p_nid); \
164 i != (u64)ULLONG_MAX; \
Tony Luckfc6daaf2015-06-24 16:58:09 -0700165 __next_mem_range(&i, nid, flags, type_a, type_b, \
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +0100166 p_start, p_end, p_nid))
167
168/**
169 * for_each_mem_range_rev - reverse iterate through memblock areas from
170 * type_a and not included in type_b. Or just type_a if type_b is NULL.
171 * @i: u64 used as loop variable
172 * @type_a: ptr to memblock_type to iterate
173 * @type_b: ptr to memblock_type which excludes from the iteration
174 * @nid: node selector, %NUMA_NO_NODE for all nodes
Tony Luckfc6daaf2015-06-24 16:58:09 -0700175 * @flags: pick from blocks based on memory attributes
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +0100176 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
177 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
178 * @p_nid: ptr to int for nid of the range, can be %NULL
179 */
Tony Luckfc6daaf2015-06-24 16:58:09 -0700180#define for_each_mem_range_rev(i, type_a, type_b, nid, flags, \
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +0100181 p_start, p_end, p_nid) \
182 for (i = (u64)ULLONG_MAX, \
Tony Luckfc6daaf2015-06-24 16:58:09 -0700183 __next_mem_range_rev(&i, nid, flags, type_a, type_b,\
Chen Gangba6c19f2016-07-26 15:24:47 -0700184 p_start, p_end, p_nid); \
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +0100185 i != (u64)ULLONG_MAX; \
Tony Luckfc6daaf2015-06-24 16:58:09 -0700186 __next_mem_range_rev(&i, nid, flags, type_a, type_b, \
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +0100187 p_start, p_end, p_nid))
188
Robin Holt8e7a7f82015-06-30 14:56:41 -0700189/**
190 * for_each_reserved_mem_region - iterate over all reserved memblock areas
191 * @i: u64 used as loop variable
192 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
193 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
194 *
195 * Walks over reserved areas of memblock. Available as soon as memblock
196 * is initialized.
197 */
198#define for_each_reserved_mem_region(i, p_start, p_end) \
Chen Gangba6c19f2016-07-26 15:24:47 -0700199 for (i = 0UL, __next_reserved_mem_region(&i, p_start, p_end); \
Robin Holt8e7a7f82015-06-30 14:56:41 -0700200 i != (u64)ULLONG_MAX; \
201 __next_reserved_mem_region(&i, p_start, p_end))
202
Tang Chen55ac5902014-01-21 15:49:35 -0800203static inline bool memblock_is_hotpluggable(struct memblock_region *m)
204{
205 return m->flags & MEMBLOCK_HOTPLUG;
206}
207
Tony Lucka3f5baf2015-06-24 16:58:12 -0700208static inline bool memblock_is_mirror(struct memblock_region *m)
209{
210 return m->flags & MEMBLOCK_MIRROR;
211}
212
Ard Biesheuvelbf3d3cc2015-11-30 13:28:15 +0100213static inline bool memblock_is_nomap(struct memblock_region *m)
214{
215 return m->flags & MEMBLOCK_NOMAP;
216}
217
Tejun Heo0ee332c2011-12-08 10:22:09 -0800218#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
Yinghai Lue76b63f2013-09-11 14:22:17 -0700219int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn,
220 unsigned long *end_pfn);
Tejun Heo0ee332c2011-12-08 10:22:09 -0800221void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
222 unsigned long *out_end_pfn, int *out_nid);
223
224/**
225 * for_each_mem_pfn_range - early memory pfn range iterator
226 * @i: an integer used as loop variable
227 * @nid: node selector, %MAX_NUMNODES for all nodes
228 * @p_start: ptr to ulong for start pfn of the range, can be %NULL
229 * @p_end: ptr to ulong for end pfn of the range, can be %NULL
230 * @p_nid: ptr to int for nid of the range, can be %NULL
231 *
Wanpeng Lif2d52fe2012-10-08 16:32:24 -0700232 * Walks over configured memory ranges.
Tejun Heo0ee332c2011-12-08 10:22:09 -0800233 */
234#define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid) \
235 for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \
236 i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid))
237#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
238
Alexander Duyck837566e2019-05-13 17:21:17 -0700239#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
240void __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
241 unsigned long *out_spfn,
242 unsigned long *out_epfn);
243/**
244 * for_each_free_mem_range_in_zone - iterate through zone specific free
245 * memblock areas
246 * @i: u64 used as loop variable
247 * @zone: zone in which all of the memory blocks reside
248 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
249 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
250 *
251 * Walks over free (memory && !reserved) areas of memblock in a specific
252 * zone. Available once memblock and an empty zone is initialized. The main
253 * assumption is that the zone start, end, and pgdat have been associated.
254 * This way we can use the zone to determine NUMA node, and if a given part
255 * of the memblock is valid for the zone.
256 */
257#define for_each_free_mem_pfn_range_in_zone(i, zone, p_start, p_end) \
258 for (i = 0, \
259 __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end); \
260 i != U64_MAX; \
261 __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end))
Alexander Duyck0e56aca2019-05-13 17:21:20 -0700262
263/**
264 * for_each_free_mem_range_in_zone_from - iterate through zone specific
265 * free memblock areas from a given point
266 * @i: u64 used as loop variable
267 * @zone: zone in which all of the memory blocks reside
268 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
269 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
270 *
271 * Walks over free (memory && !reserved) areas of memblock in a specific
272 * zone, continuing from current position. Available as soon as memblock is
273 * initialized.
274 */
275#define for_each_free_mem_pfn_range_in_zone_from(i, zone, p_start, p_end) \
276 for (; i != U64_MAX; \
277 __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end))
Alexander Duyck837566e2019-05-13 17:21:17 -0700278#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
279
Tejun Heo35fd0802011-07-12 11:15:59 +0200280/**
281 * for_each_free_mem_range - iterate through free memblock areas
282 * @i: u64 used as loop variable
Grygorii Strashkob1154232014-01-21 15:50:16 -0800283 * @nid: node selector, %NUMA_NO_NODE for all nodes
Florian Fainellid30b5542016-01-14 15:22:04 -0800284 * @flags: pick from blocks based on memory attributes
Tejun Heo35fd0802011-07-12 11:15:59 +0200285 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
286 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
287 * @p_nid: ptr to int for nid of the range, can be %NULL
288 *
289 * Walks over free (memory && !reserved) areas of memblock. Available as
290 * soon as memblock is initialized.
291 */
Tony Luckfc6daaf2015-06-24 16:58:09 -0700292#define for_each_free_mem_range(i, nid, flags, p_start, p_end, p_nid) \
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +0100293 for_each_mem_range(i, &memblock.memory, &memblock.reserved, \
Tony Luckfc6daaf2015-06-24 16:58:09 -0700294 nid, flags, p_start, p_end, p_nid)
Tejun Heo7bd0b0f2011-12-08 10:22:09 -0800295
296/**
297 * for_each_free_mem_range_reverse - rev-iterate through free memblock areas
298 * @i: u64 used as loop variable
Grygorii Strashkob1154232014-01-21 15:50:16 -0800299 * @nid: node selector, %NUMA_NO_NODE for all nodes
Florian Fainellid30b5542016-01-14 15:22:04 -0800300 * @flags: pick from blocks based on memory attributes
Tejun Heo7bd0b0f2011-12-08 10:22:09 -0800301 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
302 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
303 * @p_nid: ptr to int for nid of the range, can be %NULL
304 *
305 * Walks over free (memory && !reserved) areas of memblock in reverse
306 * order. Available as soon as memblock is initialized.
307 */
Tony Luckfc6daaf2015-06-24 16:58:09 -0700308#define for_each_free_mem_range_reverse(i, nid, flags, p_start, p_end, \
309 p_nid) \
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +0100310 for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \
Tony Luckfc6daaf2015-06-24 16:58:09 -0700311 nid, flags, p_start, p_end, p_nid)
Tejun Heo7bd0b0f2011-12-08 10:22:09 -0800312
Tejun Heo7c0caeb2011-07-14 11:43:42 +0200313#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
Tang Chene7e8de52014-01-21 15:49:26 -0800314int memblock_set_node(phys_addr_t base, phys_addr_t size,
315 struct memblock_type *type, int nid);
Tejun Heo7c0caeb2011-07-14 11:43:42 +0200316
317static inline void memblock_set_region_node(struct memblock_region *r, int nid)
318{
319 r->nid = nid;
320}
321
322static inline int memblock_get_region_node(const struct memblock_region *r)
323{
324 return r->nid;
325}
326#else
327static inline void memblock_set_region_node(struct memblock_region *r, int nid)
328{
329}
330
331static inline int memblock_get_region_node(const struct memblock_region *r)
332{
333 return 0;
334}
335#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
336
Mike Rapoport57c8a662018-10-30 15:09:49 -0700337/* Flags for memblock allocation APIs */
338#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
339#define MEMBLOCK_ALLOC_ACCESSIBLE 0
Qian Caifed84c72018-12-28 00:36:29 -0800340#define MEMBLOCK_ALLOC_KASAN 1
Mike Rapoport57c8a662018-10-30 15:09:49 -0700341
342/* We are using top down, so it is safe to use 0 here */
343#define MEMBLOCK_LOW_LIMIT 0
344
345#ifndef ARCH_LOW_ADDRESS_LIMIT
346#define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
347#endif
348
Mike Rapoport8a770c22019-03-11 23:29:16 -0700349phys_addr_t memblock_phys_alloc_range(phys_addr_t size, phys_addr_t align,
350 phys_addr_t start, phys_addr_t end);
Mike Rapoport9a8dd702018-10-30 15:07:59 -0700351phys_addr_t memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
Benjamin Herrenschmidt9d1e2492010-07-06 15:39:17 -0700352
Mike Rapoportecc3e772019-03-11 23:29:26 -0700353static inline phys_addr_t memblock_phys_alloc(phys_addr_t size,
354 phys_addr_t align)
355{
356 return memblock_phys_alloc_range(size, align, 0,
357 MEMBLOCK_ALLOC_ACCESSIBLE);
358}
Benjamin Herrenschmidte63075a2010-07-06 15:39:01 -0700359
Yunfeng Ye0ac398b2019-11-30 17:56:27 -0800360void *memblock_alloc_exact_nid_raw(phys_addr_t size, phys_addr_t align,
361 phys_addr_t min_addr, phys_addr_t max_addr,
362 int nid);
Mike Rapoport57c8a662018-10-30 15:09:49 -0700363void *memblock_alloc_try_nid_raw(phys_addr_t size, phys_addr_t align,
364 phys_addr_t min_addr, phys_addr_t max_addr,
365 int nid);
Mike Rapoport57c8a662018-10-30 15:09:49 -0700366void *memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align,
367 phys_addr_t min_addr, phys_addr_t max_addr,
368 int nid);
369
370static inline void * __init memblock_alloc(phys_addr_t size, phys_addr_t align)
371{
372 return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
373 MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
374}
375
376static inline void * __init memblock_alloc_raw(phys_addr_t size,
377 phys_addr_t align)
378{
379 return memblock_alloc_try_nid_raw(size, align, MEMBLOCK_LOW_LIMIT,
380 MEMBLOCK_ALLOC_ACCESSIBLE,
381 NUMA_NO_NODE);
382}
383
384static inline void * __init memblock_alloc_from(phys_addr_t size,
385 phys_addr_t align,
386 phys_addr_t min_addr)
387{
388 return memblock_alloc_try_nid(size, align, min_addr,
389 MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
390}
391
Mike Rapoport57c8a662018-10-30 15:09:49 -0700392static inline void * __init memblock_alloc_low(phys_addr_t size,
393 phys_addr_t align)
394{
395 return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
396 ARCH_LOW_ADDRESS_LIMIT, NUMA_NO_NODE);
397}
Mike Rapoport57c8a662018-10-30 15:09:49 -0700398
399static inline void * __init memblock_alloc_node(phys_addr_t size,
400 phys_addr_t align, int nid)
401{
402 return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
403 MEMBLOCK_ALLOC_ACCESSIBLE, nid);
404}
405
Mike Rapoport57c8a662018-10-30 15:09:49 -0700406static inline void __init memblock_free_early(phys_addr_t base,
407 phys_addr_t size)
408{
Mike Rapoport4d728682018-12-28 00:35:29 -0800409 memblock_free(base, size);
Mike Rapoport57c8a662018-10-30 15:09:49 -0700410}
411
412static inline void __init memblock_free_early_nid(phys_addr_t base,
413 phys_addr_t size, int nid)
414{
Mike Rapoport4d728682018-12-28 00:35:29 -0800415 memblock_free(base, size);
Mike Rapoport57c8a662018-10-30 15:09:49 -0700416}
417
418static inline void __init memblock_free_late(phys_addr_t base, phys_addr_t size)
419{
420 __memblock_free_late(base, size);
421}
422
Tang Chen79442ed2013-11-12 15:07:59 -0800423/*
424 * Set the allocation direction to bottom-up or top-down.
425 */
Fabian Frederick2cfb3662014-08-06 16:05:03 -0700426static inline void __init memblock_set_bottom_up(bool enable)
Tang Chen79442ed2013-11-12 15:07:59 -0800427{
428 memblock.bottom_up = enable;
429}
430
431/*
432 * Check if the allocation direction is bottom-up or not.
433 * if this is true, that said, memblock will allocate memory
434 * in bottom-up direction.
435 */
436static inline bool memblock_bottom_up(void)
437{
438 return memblock.bottom_up;
439}
Tang Chen79442ed2013-11-12 15:07:59 -0800440
Tejun Heo581adcb2011-12-08 10:22:06 -0800441phys_addr_t memblock_phys_mem_size(void);
Srikar Dronamraju8907de52016-10-07 16:59:18 -0700442phys_addr_t memblock_reserved_size(void);
Yinghai Lu595ad9a2013-01-24 12:20:09 -0800443phys_addr_t memblock_mem_size(unsigned long limit_pfn);
Tejun Heo581adcb2011-12-08 10:22:06 -0800444phys_addr_t memblock_start_of_DRAM(void);
445phys_addr_t memblock_end_of_DRAM(void);
446void memblock_enforce_memory_limit(phys_addr_t memory_limit);
AKASHI Takahiroc9ca9b42017-04-03 11:23:55 +0900447void memblock_cap_memory_range(phys_addr_t base, phys_addr_t size);
Dennis Chena571d4e2016-07-28 15:48:26 -0700448void memblock_mem_limit_remove_map(phys_addr_t limit);
Yaowei Baib4ad0c72016-01-14 15:18:54 -0800449bool memblock_is_memory(phys_addr_t addr);
Yaowei Bai937f0c22018-02-06 15:41:18 -0800450bool memblock_is_map_memory(phys_addr_t addr);
451bool memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
Yaowei Baib4ad0c72016-01-14 15:18:54 -0800452bool memblock_is_reserved(phys_addr_t addr);
Tang Chenc5c5c9d2015-09-08 15:02:00 -0700453bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
Yinghai Lu95f72d12010-07-12 14:36:09 +1000454
Tejun Heo4ff7b822011-12-08 10:22:06 -0800455extern void __memblock_dump_all(void);
456
457static inline void memblock_dump_all(void)
458{
459 if (memblock_debug)
460 __memblock_dump_all();
461}
Yinghai Lu95f72d12010-07-12 14:36:09 +1000462
Benjamin Herrenschmidte63075a2010-07-06 15:39:01 -0700463/**
464 * memblock_set_current_limit - Set the current allocation limit to allow
465 * limiting allocations to what is currently
466 * accessible during boot
467 * @limit: New limit value (physical address)
468 */
Tejun Heo581adcb2011-12-08 10:22:06 -0800469void memblock_set_current_limit(phys_addr_t limit);
Benjamin Herrenschmidte63075a2010-07-06 15:39:01 -0700470
Benjamin Herrenschmidt35a1f0b2010-07-06 15:38:58 -0700471
Laura Abbottfec51012014-02-27 01:23:43 +0100472phys_addr_t memblock_get_current_limit(void);
473
Benjamin Herrenschmidt5b385f22010-08-04 13:40:38 +1000474/*
475 * pfn conversion functions
476 *
477 * While the memory MEMBLOCKs should always be page aligned, the reserved
478 * MEMBLOCKs may not be. This accessor attempt to provide a very clear
479 * idea of what they return for such non aligned MEMBLOCKs.
480 */
481
482/**
Mike Rapoport47cec442018-06-30 17:55:02 +0300483 * memblock_region_memory_base_pfn - get the lowest pfn of the memory region
Benjamin Herrenschmidt5b385f22010-08-04 13:40:38 +1000484 * @reg: memblock_region structure
Mike Rapoport47cec442018-06-30 17:55:02 +0300485 *
486 * Return: the lowest pfn intersecting with the memory region
Benjamin Herrenschmidt5b385f22010-08-04 13:40:38 +1000487 */
Yinghai Luc7fc2de2010-10-12 14:07:09 -0700488static inline unsigned long memblock_region_memory_base_pfn(const struct memblock_region *reg)
Benjamin Herrenschmidt5b385f22010-08-04 13:40:38 +1000489{
Yinghai Luc7fc2de2010-10-12 14:07:09 -0700490 return PFN_UP(reg->base);
Benjamin Herrenschmidt5b385f22010-08-04 13:40:38 +1000491}
492
493/**
Mike Rapoport47cec442018-06-30 17:55:02 +0300494 * memblock_region_memory_end_pfn - get the end pfn of the memory region
Benjamin Herrenschmidt5b385f22010-08-04 13:40:38 +1000495 * @reg: memblock_region structure
Mike Rapoport47cec442018-06-30 17:55:02 +0300496 *
497 * Return: the end_pfn of the reserved region
Benjamin Herrenschmidt5b385f22010-08-04 13:40:38 +1000498 */
Yinghai Luc7fc2de2010-10-12 14:07:09 -0700499static inline unsigned long memblock_region_memory_end_pfn(const struct memblock_region *reg)
Benjamin Herrenschmidt5b385f22010-08-04 13:40:38 +1000500{
Yinghai Luc7fc2de2010-10-12 14:07:09 -0700501 return PFN_DOWN(reg->base + reg->size);
Benjamin Herrenschmidt5b385f22010-08-04 13:40:38 +1000502}
503
504/**
Mike Rapoport47cec442018-06-30 17:55:02 +0300505 * memblock_region_reserved_base_pfn - get the lowest pfn of the reserved region
Benjamin Herrenschmidt5b385f22010-08-04 13:40:38 +1000506 * @reg: memblock_region structure
Mike Rapoport47cec442018-06-30 17:55:02 +0300507 *
508 * Return: the lowest pfn intersecting with the reserved region
Benjamin Herrenschmidt5b385f22010-08-04 13:40:38 +1000509 */
Yinghai Luc7fc2de2010-10-12 14:07:09 -0700510static inline unsigned long memblock_region_reserved_base_pfn(const struct memblock_region *reg)
Benjamin Herrenschmidt5b385f22010-08-04 13:40:38 +1000511{
Yinghai Luc7fc2de2010-10-12 14:07:09 -0700512 return PFN_DOWN(reg->base);
Benjamin Herrenschmidt5b385f22010-08-04 13:40:38 +1000513}
514
515/**
Mike Rapoport47cec442018-06-30 17:55:02 +0300516 * memblock_region_reserved_end_pfn - get the end pfn of the reserved region
Benjamin Herrenschmidt5b385f22010-08-04 13:40:38 +1000517 * @reg: memblock_region structure
Mike Rapoport47cec442018-06-30 17:55:02 +0300518 *
519 * Return: the end_pfn of the reserved region
Benjamin Herrenschmidt5b385f22010-08-04 13:40:38 +1000520 */
Yinghai Luc7fc2de2010-10-12 14:07:09 -0700521static inline unsigned long memblock_region_reserved_end_pfn(const struct memblock_region *reg)
Benjamin Herrenschmidt5b385f22010-08-04 13:40:38 +1000522{
Yinghai Luc7fc2de2010-10-12 14:07:09 -0700523 return PFN_UP(reg->base + reg->size);
Benjamin Herrenschmidt5b385f22010-08-04 13:40:38 +1000524}
525
526#define for_each_memblock(memblock_type, region) \
Chen Gangba6c19f2016-07-26 15:24:47 -0700527 for (region = memblock.memblock_type.regions; \
Benjamin Herrenschmidt5b385f22010-08-04 13:40:38 +1000528 region < (memblock.memblock_type.regions + memblock.memblock_type.cnt); \
529 region++)
530
Gioh Kim66e8b432017-11-15 17:33:42 -0800531#define for_each_memblock_type(i, memblock_type, rgn) \
532 for (i = 0, rgn = &memblock_type->regions[0]; \
533 i < memblock_type->cnt; \
534 i++, rgn = &memblock_type->regions[i])
Benjamin Herrenschmidt5b385f22010-08-04 13:40:38 +1000535
Mike Rapoport57c8a662018-10-30 15:09:49 -0700536extern void *alloc_large_system_hash(const char *tablename,
537 unsigned long bucketsize,
538 unsigned long numentries,
539 int scale,
540 int flags,
541 unsigned int *_hash_shift,
542 unsigned int *_hash_mask,
543 unsigned long low_limit,
544 unsigned long high_limit);
545
546#define HASH_EARLY 0x00000001 /* Allocating during early boot? */
547#define HASH_SMALL 0x00000002 /* sub-page allocation allowed, min
548 * shift passed via *_hash_shift */
549#define HASH_ZERO 0x00000004 /* Zero allocated hash table */
550
551/* Only NUMA needs hash distribution. 64bit NUMA architectures have
552 * sufficient vmalloc space.
553 */
554#ifdef CONFIG_NUMA
555#define HASHDIST_DEFAULT IS_ENABLED(CONFIG_64BIT)
556extern int hashdist; /* Distribute hashes across NUMA nodes? */
557#else
558#define hashdist (0)
559#endif
560
Vladimir Murzin4a207992015-04-14 15:48:27 -0700561#ifdef CONFIG_MEMTEST
Vladimir Murzin7f70bae2015-04-14 15:48:30 -0700562extern void early_memtest(phys_addr_t start, phys_addr_t end);
Vladimir Murzin4a207992015-04-14 15:48:27 -0700563#else
Vladimir Murzin7f70bae2015-04-14 15:48:30 -0700564static inline void early_memtest(phys_addr_t start, phys_addr_t end)
Vladimir Murzin4a207992015-04-14 15:48:27 -0700565{
566}
567#endif
Yinghai Luf0b37fad2010-07-28 15:28:21 +1000568
Yinghai Lu95f72d12010-07-12 14:36:09 +1000569#endif /* __KERNEL__ */
570
571#endif /* _LINUX_MEMBLOCK_H */