Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 2 | #ifndef __CMA_H__ |
| 3 | #define __CMA_H__ |
| 4 | |
Thierry Reding | d5e6eff | 2016-12-12 16:43:15 -0800 | [diff] [blame] | 5 | #include <linux/init.h> |
| 6 | #include <linux/types.h> |
Aslan Bakirov | 8676af1 | 2020-04-10 14:32:42 -0700 | [diff] [blame] | 7 | #include <linux/numa.h> |
Thierry Reding | d5e6eff | 2016-12-12 16:43:15 -0800 | [diff] [blame] | 8 | |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 9 | /* |
| 10 | * There is always at least global CMA area and a few optional |
| 11 | * areas configured in kernel .config. |
| 12 | */ |
| 13 | #ifdef CONFIG_CMA_AREAS |
| 14 | #define MAX_CMA_AREAS (1 + CONFIG_CMA_AREAS) |
| 15 | |
| 16 | #else |
| 17 | #define MAX_CMA_AREAS (0) |
| 18 | |
| 19 | #endif |
| 20 | |
| 21 | struct cma; |
| 22 | |
Pintu Kumar | e48322a | 2014-12-18 16:17:15 -0800 | [diff] [blame] | 23 | extern unsigned long totalcma_pages; |
Sasha Levin | ac17382 | 2015-04-14 15:47:04 -0700 | [diff] [blame] | 24 | extern phys_addr_t cma_get_base(const struct cma *cma); |
| 25 | extern unsigned long cma_get_size(const struct cma *cma); |
Laura Abbott | f318dd0 | 2017-04-18 11:27:03 -0700 | [diff] [blame] | 26 | extern const char *cma_get_name(const struct cma *cma); |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 27 | |
Aslan Bakirov | 8676af1 | 2020-04-10 14:32:42 -0700 | [diff] [blame] | 28 | extern int __init cma_declare_contiguous_nid(phys_addr_t base, |
Weijie Yang | dda02fd | 2014-10-24 17:47:57 +0800 | [diff] [blame] | 29 | phys_addr_t size, phys_addr_t limit, |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 30 | phys_addr_t alignment, unsigned int order_per_bit, |
Aslan Bakirov | 8676af1 | 2020-04-10 14:32:42 -0700 | [diff] [blame] | 31 | bool fixed, const char *name, struct cma **res_cma, |
| 32 | int nid); |
| 33 | static inline int __init cma_declare_contiguous(phys_addr_t base, |
| 34 | phys_addr_t size, phys_addr_t limit, |
| 35 | phys_addr_t alignment, unsigned int order_per_bit, |
| 36 | bool fixed, const char *name, struct cma **res_cma) |
| 37 | { |
| 38 | return cma_declare_contiguous_nid(base, size, limit, alignment, |
| 39 | order_per_bit, fixed, name, res_cma, NUMA_NO_NODE); |
| 40 | } |
Sasha Levin | ac17382 | 2015-04-14 15:47:04 -0700 | [diff] [blame] | 41 | extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, |
| 42 | unsigned int order_per_bit, |
Laura Abbott | f318dd0 | 2017-04-18 11:27:03 -0700 | [diff] [blame] | 43 | const char *name, |
Marek Szyprowski | de9e14e | 2014-10-13 15:51:09 -0700 | [diff] [blame] | 44 | struct cma **res_cma); |
Lucas Stach | e2f466e | 2017-02-24 14:58:41 -0800 | [diff] [blame] | 45 | extern struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, |
Marek Szyprowski | 6518202 | 2018-08-17 15:48:57 -0700 | [diff] [blame] | 46 | bool no_warn); |
Sasha Levin | ac17382 | 2015-04-14 15:47:04 -0700 | [diff] [blame] | 47 | extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count); |
Laura Abbott | e4231bc | 2017-04-18 11:27:04 -0700 | [diff] [blame] | 48 | |
| 49 | extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data); |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 50 | #endif |