blob: a337313e064f4ec9e851d761554ecac1951178db [file] [log] [blame]
Jes Sorensenf14f75b2005-06-21 17:15:02 -07001/*
Huang Ying7f184272011-07-13 13:14:24 +08002 * Basic general purpose allocator for managing special purpose
3 * memory, for example, memory that is not managed by the regular
4 * kmalloc/kfree interface. Uses for this includes on-device special
5 * memory, uncached memory etc.
6 *
7 * It is safe to use the allocator in NMI handlers and other special
8 * unblockable contexts that could otherwise deadlock on locks. This
9 * is implemented by using atomic operations and retries on any
10 * conflicts. The disadvantage is that there may be livelocks in
11 * extreme cases. For better scalability, one allocator can be used
12 * for each CPU.
13 *
14 * The lockless operation only works if there is enough memory
15 * available. If new memory is added to the pool a lock has to be
16 * still taken. So any user relying on locklessness has to ensure
17 * that sufficient memory is preallocated.
18 *
19 * The basic atomic operation of this allocator is cmpxchg on long.
20 * On architectures that don't have NMI-safe cmpxchg implementation,
21 * the allocator can NOT be used in NMI handler. So code uses the
22 * allocator in NMI handler should depend on
23 * CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
Jes Sorensenf14f75b2005-06-21 17:15:02 -070024 *
Jes Sorensenf14f75b2005-06-21 17:15:02 -070025 * This source code is licensed under the GNU General Public License,
26 * Version 2. See the file COPYING for more details.
27 */
28
Jes Sorensenf14f75b2005-06-21 17:15:02 -070029
Jean-Christophe PLAGNIOL-VILLARD6aae6e02011-05-24 17:13:33 -070030#ifndef __GENALLOC_H__
31#define __GENALLOC_H__
Philipp Zabel9375db02013-04-29 16:17:10 -070032
Zhao Qiangde2dd0e2015-11-30 10:48:52 +080033#include <linux/types.h>
Shawn Guob30afea02014-01-23 15:53:18 -080034#include <linux/spinlock_types.h>
Stephen Bates36a3d1d2017-11-17 15:28:16 -080035#include <linux/atomic.h>
Shawn Guob30afea02014-01-23 15:53:18 -080036
Philipp Zabel9375db02013-04-29 16:17:10 -070037struct device;
38struct device_node;
Zhao Qiangde2dd0e2015-11-30 10:48:52 +080039struct gen_pool;
Philipp Zabel9375db02013-04-29 16:17:10 -070040
Benjamin Gaignardca279cf2012-10-04 17:13:20 -070041/**
Jonathan Corbeta27bfca2017-08-31 09:47:22 -060042 * typedef genpool_algo_t: Allocation callback function type definition
Benjamin Gaignardca279cf2012-10-04 17:13:20 -070043 * @map: Pointer to bitmap
44 * @size: The bitmap size in bits
45 * @start: The bitnumber to start searching at
46 * @nr: The number of zeroed bits we're looking for
Jonathan Corbeta27bfca2017-08-31 09:47:22 -060047 * @data: optional additional data used by the callback
48 * @pool: the pool being allocated from
Benjamin Gaignardca279cf2012-10-04 17:13:20 -070049 */
50typedef unsigned long (*genpool_algo_t)(unsigned long *map,
51 unsigned long size,
52 unsigned long start,
53 unsigned int nr,
Alexey Skidanov52fbf112019-01-03 15:26:44 -080054 void *data, struct gen_pool *pool,
55 unsigned long start_addr);
Benjamin Gaignardca279cf2012-10-04 17:13:20 -070056
Jes Sorensenf14f75b2005-06-21 17:15:02 -070057/*
Dean Nelson929f9722006-06-23 02:03:21 -070058 * General purpose special memory pool descriptor.
Jes Sorensenf14f75b2005-06-21 17:15:02 -070059 */
60struct gen_pool {
Huang Ying7f184272011-07-13 13:14:24 +080061 spinlock_t lock;
Dean Nelson929f9722006-06-23 02:03:21 -070062 struct list_head chunks; /* list of chunks in this pool */
63 int min_alloc_order; /* minimum allocation order */
Benjamin Gaignardca279cf2012-10-04 17:13:20 -070064
65 genpool_algo_t algo; /* allocation function */
66 void *data;
Vladimir Zapolskiyc98c3632015-09-04 15:47:47 -070067
68 const char *name;
Jes Sorensenf14f75b2005-06-21 17:15:02 -070069};
70
Dean Nelson929f9722006-06-23 02:03:21 -070071/*
72 * General purpose special memory pool chunk descriptor.
73 */
74struct gen_pool_chunk {
Dean Nelson929f9722006-06-23 02:03:21 -070075 struct list_head next_chunk; /* next chunk in pool */
Stephen Bates36a3d1d2017-11-17 15:28:16 -080076 atomic_long_t avail;
Jean-Christophe PLAGNIOL-VILLARD3c8f3702011-05-24 17:13:34 -070077 phys_addr_t phys_addr; /* physical starting address of memory chunk */
Dan Williams795ee302019-06-13 15:56:27 -070078 void *owner; /* private data to retrieve at alloc time */
Joonyoung Shim674470d2013-09-11 14:21:43 -070079 unsigned long start_addr; /* start address of memory chunk */
80 unsigned long end_addr; /* end address of memory chunk (inclusive) */
Dean Nelson929f9722006-06-23 02:03:21 -070081 unsigned long bits[0]; /* bitmap for allocating memory chunk */
82};
83
Zhao Qiangde2dd0e2015-11-30 10:48:52 +080084/*
85 * gen_pool data descriptor for gen_pool_first_fit_align.
86 */
87struct genpool_data_align {
88 int align; /* alignment by bytes for starting address */
89};
90
Zhao Qiangb26981c2015-11-30 10:48:53 +080091/*
92 * gen_pool data descriptor for gen_pool_fixed_alloc.
93 */
94struct genpool_data_fixed {
95 unsigned long offset; /* The offset of the specific region */
96};
97
Dean Nelson929f9722006-06-23 02:03:21 -070098extern struct gen_pool *gen_pool_create(int, int);
Jean-Christophe PLAGNIOL-VILLARD3c8f3702011-05-24 17:13:34 -070099extern phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long);
Dan Williams795ee302019-06-13 15:56:27 -0700100extern int gen_pool_add_owner(struct gen_pool *, unsigned long, phys_addr_t,
101 size_t, int, void *);
102
103static inline int gen_pool_add_virt(struct gen_pool *pool, unsigned long addr,
104 phys_addr_t phys, size_t size, int nid)
105{
106 return gen_pool_add_owner(pool, addr, phys, size, nid, NULL);
107}
108
Jean-Christophe PLAGNIOL-VILLARD3c8f3702011-05-24 17:13:34 -0700109/**
110 * gen_pool_add - add a new chunk of special memory to the pool
111 * @pool: pool to add new memory chunk to
112 * @addr: starting address of memory chunk to add to pool
113 * @size: size in bytes of the memory chunk to add to pool
114 * @nid: node id of the node the chunk structure and bitmap should be
115 * allocated on, or -1
116 *
117 * Add a new chunk of special memory to the specified pool.
118 *
119 * Returns 0 on success or a -ve errno on failure.
120 */
121static inline int gen_pool_add(struct gen_pool *pool, unsigned long addr,
122 size_t size, int nid)
123{
124 return gen_pool_add_virt(pool, addr, -1, size, nid);
125}
Steve Wise322acc92006-10-02 02:17:00 -0700126extern void gen_pool_destroy(struct gen_pool *);
Dan Williams795ee302019-06-13 15:56:27 -0700127unsigned long gen_pool_alloc_algo_owner(struct gen_pool *pool, size_t size,
128 genpool_algo_t algo, void *data, void **owner);
129
130static inline unsigned long gen_pool_alloc_owner(struct gen_pool *pool,
131 size_t size, void **owner)
132{
133 return gen_pool_alloc_algo_owner(pool, size, pool->algo, pool->data,
134 owner);
135}
136
137static inline unsigned long gen_pool_alloc_algo(struct gen_pool *pool,
138 size_t size, genpool_algo_t algo, void *data)
139{
140 return gen_pool_alloc_algo_owner(pool, size, algo, data, NULL);
141}
142
143/**
144 * gen_pool_alloc - allocate special memory from the pool
145 * @pool: pool to allocate from
146 * @size: number of bytes to allocate from the pool
147 *
148 * Allocate the requested number of bytes from the specified pool.
149 * Uses the pool allocation function (with first-fit algorithm by default).
150 * Can not be used in NMI handler on architectures without
151 * NMI-safe cmpxchg implementation.
152 */
153static inline unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
154{
155 return gen_pool_alloc_algo(pool, size, pool->algo, pool->data);
156}
157
Nicolin Chen684f0d32013-11-12 15:09:52 -0800158extern void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size,
159 dma_addr_t *dma);
Dan Williams795ee302019-06-13 15:56:27 -0700160extern void gen_pool_free_owner(struct gen_pool *pool, unsigned long addr,
161 size_t size, void **owner);
162static inline void gen_pool_free(struct gen_pool *pool, unsigned long addr,
163 size_t size)
164{
165 gen_pool_free_owner(pool, addr, size, NULL);
166}
167
Huang Ying7f184272011-07-13 13:14:24 +0800168extern void gen_pool_for_each_chunk(struct gen_pool *,
169 void (*)(struct gen_pool *, struct gen_pool_chunk *, void *), void *);
170extern size_t gen_pool_avail(struct gen_pool *);
171extern size_t gen_pool_size(struct gen_pool *);
Benjamin Gaignardca279cf2012-10-04 17:13:20 -0700172
173extern void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo,
174 void *data);
175
176extern unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
Zhao Qiangde2dd0e2015-11-30 10:48:52 +0800177 unsigned long start, unsigned int nr, void *data,
Alexey Skidanov52fbf112019-01-03 15:26:44 -0800178 struct gen_pool *pool, unsigned long start_addr);
Zhao Qiangde2dd0e2015-11-30 10:48:52 +0800179
Zhao Qiangb26981c2015-11-30 10:48:53 +0800180extern unsigned long gen_pool_fixed_alloc(unsigned long *map,
181 unsigned long size, unsigned long start, unsigned int nr,
Alexey Skidanov52fbf112019-01-03 15:26:44 -0800182 void *data, struct gen_pool *pool, unsigned long start_addr);
Zhao Qiangb26981c2015-11-30 10:48:53 +0800183
Zhao Qiangde2dd0e2015-11-30 10:48:52 +0800184extern unsigned long gen_pool_first_fit_align(unsigned long *map,
185 unsigned long size, unsigned long start, unsigned int nr,
Alexey Skidanov52fbf112019-01-03 15:26:44 -0800186 void *data, struct gen_pool *pool, unsigned long start_addr);
Zhao Qiangde2dd0e2015-11-30 10:48:52 +0800187
Benjamin Gaignardca279cf2012-10-04 17:13:20 -0700188
Laura Abbott505e3be2014-10-09 15:26:35 -0700189extern unsigned long gen_pool_first_fit_order_align(unsigned long *map,
190 unsigned long size, unsigned long start, unsigned int nr,
Alexey Skidanov52fbf112019-01-03 15:26:44 -0800191 void *data, struct gen_pool *pool, unsigned long start_addr);
Laura Abbott505e3be2014-10-09 15:26:35 -0700192
Benjamin Gaignardca279cf2012-10-04 17:13:20 -0700193extern unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
Zhao Qiangde2dd0e2015-11-30 10:48:52 +0800194 unsigned long start, unsigned int nr, void *data,
Alexey Skidanov52fbf112019-01-03 15:26:44 -0800195 struct gen_pool *pool, unsigned long start_addr);
Zhao Qiangde2dd0e2015-11-30 10:48:52 +0800196
Benjamin Gaignardca279cf2012-10-04 17:13:20 -0700197
Philipp Zabel9375db02013-04-29 16:17:10 -0700198extern struct gen_pool *devm_gen_pool_create(struct device *dev,
Vladimir Zapolskiy73858172015-09-04 15:47:43 -0700199 int min_alloc_order, int nid, const char *name);
200extern struct gen_pool *gen_pool_get(struct device *dev, const char *name);
Philipp Zabel9375db02013-04-29 16:17:10 -0700201
Laura Abbott9efb3a42014-10-09 15:26:38 -0700202bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start,
203 size_t size);
204
Philipp Zabel9375db02013-04-29 16:17:10 -0700205#ifdef CONFIG_OF
Vladimir Zapolskiyabdd4a72015-06-30 15:00:07 -0700206extern struct gen_pool *of_gen_pool_get(struct device_node *np,
Philipp Zabel9375db02013-04-29 16:17:10 -0700207 const char *propname, int index);
208#else
Vladimir Zapolskiyabdd4a72015-06-30 15:00:07 -0700209static inline struct gen_pool *of_gen_pool_get(struct device_node *np,
Philipp Zabel9375db02013-04-29 16:17:10 -0700210 const char *propname, int index)
211{
212 return NULL;
213}
214#endif
Jean-Christophe PLAGNIOL-VILLARD6aae6e02011-05-24 17:13:33 -0700215#endif /* __GENALLOC_H__ */