blob: 5e36d7418031cad51bad0864dae3412f7bc8b783 [file] [log] [blame]
Joonsoo Kima2541292014-08-06 16:05:25 -07001/*
2 * Contiguous Memory Allocator
3 *
4 * Copyright (c) 2010-2011 by Samsung Electronics.
5 * Copyright IBM Corporation, 2013
6 * Copyright LG Electronics Inc., 2014
7 * Written by:
8 * Marek Szyprowski <m.szyprowski@samsung.com>
9 * Michal Nazarewicz <mina86@mina86.com>
10 * Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
11 * Joonsoo Kim <iamjoonsoo.kim@lge.com>
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License as
15 * published by the Free Software Foundation; either version 2 of the
16 * License or (at your optional) any later version of the license.
17 */
18
19#define pr_fmt(fmt) "cma: " fmt
20
21#ifdef CONFIG_CMA_DEBUG
22#ifndef DEBUG
23# define DEBUG
24#endif
25#endif
Stefan Strogin99e8ea62015-04-15 16:14:50 -070026#define CREATE_TRACE_POINTS
Joonsoo Kima2541292014-08-06 16:05:25 -070027
28#include <linux/memblock.h>
29#include <linux/err.h>
30#include <linux/mm.h>
31#include <linux/mutex.h>
32#include <linux/sizes.h>
33#include <linux/slab.h>
34#include <linux/log2.h>
35#include <linux/cma.h>
Marek Szyprowskif7426b92014-10-09 15:26:47 -070036#include <linux/highmem.h>
Thierry Reding620951e2014-12-12 16:58:31 -080037#include <linux/io.h>
Randy Dunlap514c6032018-04-05 16:25:34 -070038#include <linux/kmemleak.h>
Stefan Strogin99e8ea62015-04-15 16:14:50 -070039#include <trace/events/cma.h>
Joonsoo Kima2541292014-08-06 16:05:25 -070040
Sasha Levin28b24c12015-04-14 15:44:57 -070041#include "cma.h"
Joonsoo Kima2541292014-08-06 16:05:25 -070042
Sasha Levin28b24c12015-04-14 15:44:57 -070043struct cma cma_areas[MAX_CMA_AREAS];
44unsigned cma_area_count;
Joonsoo Kima2541292014-08-06 16:05:25 -070045static DEFINE_MUTEX(cma_mutex);
46
Sasha Levinac173822015-04-14 15:47:04 -070047phys_addr_t cma_get_base(const struct cma *cma)
Joonsoo Kima2541292014-08-06 16:05:25 -070048{
49 return PFN_PHYS(cma->base_pfn);
50}
51
Sasha Levinac173822015-04-14 15:47:04 -070052unsigned long cma_get_size(const struct cma *cma)
Joonsoo Kima2541292014-08-06 16:05:25 -070053{
54 return cma->count << PAGE_SHIFT;
55}
56
Laura Abbottf318dd02017-04-18 11:27:03 -070057const char *cma_get_name(const struct cma *cma)
58{
59 return cma->name ? cma->name : "(undefined)";
60}
61
Sasha Levinac173822015-04-14 15:47:04 -070062static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
Doug Bergere048cb32017-07-10 15:49:44 -070063 unsigned int align_order)
Joonsoo Kima2541292014-08-06 16:05:25 -070064{
Weijie Yang68faed62014-10-13 15:51:03 -070065 if (align_order <= cma->order_per_bit)
66 return 0;
67 return (1UL << (align_order - cma->order_per_bit)) - 1;
Joonsoo Kima2541292014-08-06 16:05:25 -070068}
69
Danesh Petigara850fc432015-03-12 16:25:57 -070070/*
Doug Bergere048cb32017-07-10 15:49:44 -070071 * Find the offset of the base PFN from the specified align_order.
72 * The value returned is represented in order_per_bits.
Danesh Petigara850fc432015-03-12 16:25:57 -070073 */
Sasha Levinac173822015-04-14 15:47:04 -070074static unsigned long cma_bitmap_aligned_offset(const struct cma *cma,
Doug Bergere048cb32017-07-10 15:49:44 -070075 unsigned int align_order)
Gregory Fongb5be83e2014-12-12 16:54:48 -080076{
Doug Bergere048cb32017-07-10 15:49:44 -070077 return (cma->base_pfn & ((1UL << align_order) - 1))
78 >> cma->order_per_bit;
Gregory Fongb5be83e2014-12-12 16:54:48 -080079}
80
Sasha Levinac173822015-04-14 15:47:04 -070081static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma,
82 unsigned long pages)
Joonsoo Kima2541292014-08-06 16:05:25 -070083{
84 return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit;
85}
86
Sasha Levinac173822015-04-14 15:47:04 -070087static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
88 unsigned int count)
Joonsoo Kima2541292014-08-06 16:05:25 -070089{
90 unsigned long bitmap_no, bitmap_count;
91
92 bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit;
93 bitmap_count = cma_bitmap_pages_to_bits(cma, count);
94
95 mutex_lock(&cma->lock);
96 bitmap_clear(cma->bitmap, bitmap_no, bitmap_count);
97 mutex_unlock(&cma->lock);
98}
99
100static int __init cma_activate_area(struct cma *cma)
101{
102 int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long);
103 unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
104 unsigned i = cma->count >> pageblock_order;
105 struct zone *zone;
106
107 cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
108
Yue Hu1df3a332019-05-13 17:18:14 -0700109 if (!cma->bitmap) {
110 cma->count = 0;
Joonsoo Kima2541292014-08-06 16:05:25 -0700111 return -ENOMEM;
Yue Hu1df3a332019-05-13 17:18:14 -0700112 }
Joonsoo Kima2541292014-08-06 16:05:25 -0700113
Joonsoo Kimd883c6c2018-05-23 10:18:21 +0900114 WARN_ON_ONCE(!pfn_valid(pfn));
115 zone = page_zone(pfn_to_page(pfn));
116
Joonsoo Kima2541292014-08-06 16:05:25 -0700117 do {
118 unsigned j;
119
120 base_pfn = pfn;
121 for (j = pageblock_nr_pages; j; --j, pfn++) {
Joonsoo Kimd883c6c2018-05-23 10:18:21 +0900122 WARN_ON_ONCE(!pfn_valid(pfn));
Joonsoo Kima2541292014-08-06 16:05:25 -0700123 /*
Joonsoo Kimd883c6c2018-05-23 10:18:21 +0900124 * alloc_contig_range requires the pfn range
125 * specified to be in the same zone. Make this
126 * simple by forcing the entire CMA resv range
127 * to be in the same zone.
Joonsoo Kima2541292014-08-06 16:05:25 -0700128 */
129 if (page_zone(pfn_to_page(pfn)) != zone)
Joonsoo Kimd883c6c2018-05-23 10:18:21 +0900130 goto not_in_zone;
Joonsoo Kima2541292014-08-06 16:05:25 -0700131 }
132 init_cma_reserved_pageblock(pfn_to_page(base_pfn));
133 } while (--i);
134
135 mutex_init(&cma->lock);
Sasha Levin26b02a12015-04-14 15:44:59 -0700136
137#ifdef CONFIG_CMA_DEBUGFS
138 INIT_HLIST_HEAD(&cma->mem_head);
139 spin_lock_init(&cma->mem_head_lock);
140#endif
141
Joonsoo Kima2541292014-08-06 16:05:25 -0700142 return 0;
143
Joonsoo Kimd883c6c2018-05-23 10:18:21 +0900144not_in_zone:
Anshuman Khanduale35ef632017-07-10 15:48:12 -0700145 pr_err("CMA area %s could not be activated\n", cma->name);
Joonsoo Kima2541292014-08-06 16:05:25 -0700146 kfree(cma->bitmap);
Laurent Pinchartf022d8c2014-10-24 13:18:39 +0300147 cma->count = 0;
Joonsoo Kima2541292014-08-06 16:05:25 -0700148 return -EINVAL;
149}
150
151static int __init cma_init_reserved_areas(void)
152{
153 int i;
154
155 for (i = 0; i < cma_area_count; i++) {
156 int ret = cma_activate_area(&cma_areas[i]);
157
158 if (ret)
159 return ret;
160 }
161
162 return 0;
163}
Joonsoo Kimd883c6c2018-05-23 10:18:21 +0900164core_initcall(cma_init_reserved_areas);
Joonsoo Kima2541292014-08-06 16:05:25 -0700165
166/**
Marek Szyprowskide9e14e2014-10-13 15:51:09 -0700167 * cma_init_reserved_mem() - create custom contiguous area from reserved memory
168 * @base: Base address of the reserved area
169 * @size: Size of the reserved area (in bytes),
170 * @order_per_bit: Order of pages represented by one bit on bitmap.
Mike Rapoporte8b098f2018-04-05 16:24:57 -0700171 * @name: The name of the area. If this parameter is NULL, the name of
172 * the area will be set to "cmaN", where N is a running counter of
173 * used areas.
Marek Szyprowskide9e14e2014-10-13 15:51:09 -0700174 * @res_cma: Pointer to store the created cma region.
175 *
176 * This function creates custom contiguous area from already reserved memory.
177 */
178int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
Sasha Levinac173822015-04-14 15:47:04 -0700179 unsigned int order_per_bit,
Laura Abbottf318dd02017-04-18 11:27:03 -0700180 const char *name,
Sasha Levinac173822015-04-14 15:47:04 -0700181 struct cma **res_cma)
Marek Szyprowskide9e14e2014-10-13 15:51:09 -0700182{
183 struct cma *cma;
184 phys_addr_t alignment;
185
186 /* Sanity checks */
187 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
188 pr_err("Not enough slots for CMA reserved regions!\n");
189 return -ENOSPC;
190 }
191
192 if (!size || !memblock_is_region_reserved(base, size))
193 return -EINVAL;
194
Shailendra Verma0f96ae22015-06-24 16:58:03 -0700195 /* ensure minimal alignment required by mm core */
Stephen Rothwellbadbda52016-05-27 14:27:41 -0700196 alignment = PAGE_SIZE <<
197 max_t(unsigned long, MAX_ORDER - 1, pageblock_order);
Marek Szyprowskide9e14e2014-10-13 15:51:09 -0700198
199 /* alignment should be aligned with order_per_bit */
200 if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit))
201 return -EINVAL;
202
203 if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size)
204 return -EINVAL;
205
206 /*
207 * Each reserved area must be initialised later, when more kernel
208 * subsystems (like slab allocator) are available.
209 */
210 cma = &cma_areas[cma_area_count];
Laura Abbottf318dd02017-04-18 11:27:03 -0700211 if (name) {
212 cma->name = name;
213 } else {
214 cma->name = kasprintf(GFP_KERNEL, "cma%d\n", cma_area_count);
215 if (!cma->name)
216 return -ENOMEM;
217 }
Marek Szyprowskide9e14e2014-10-13 15:51:09 -0700218 cma->base_pfn = PFN_DOWN(base);
219 cma->count = size >> PAGE_SHIFT;
220 cma->order_per_bit = order_per_bit;
221 *res_cma = cma;
222 cma_area_count++;
George G. Davis94737a82015-02-11 15:26:27 -0800223 totalcma_pages += (size / PAGE_SIZE);
Marek Szyprowskide9e14e2014-10-13 15:51:09 -0700224
225 return 0;
226}
227
228/**
Joonsoo Kima2541292014-08-06 16:05:25 -0700229 * cma_declare_contiguous() - reserve custom contiguous area
Joonsoo Kima2541292014-08-06 16:05:25 -0700230 * @base: Base address of the reserved area optional, use 0 for any
Joonsoo Kimc1f733aa2014-08-06 16:05:32 -0700231 * @size: Size of the reserved area (in bytes),
Joonsoo Kima2541292014-08-06 16:05:25 -0700232 * @limit: End address of the reserved memory (optional, 0 for any).
233 * @alignment: Alignment for the CMA area, should be power of 2 or zero
234 * @order_per_bit: Order of pages represented by one bit on bitmap.
Joonsoo Kima2541292014-08-06 16:05:25 -0700235 * @fixed: hint about where to place the reserved area
Mike Rapoporte8b098f2018-04-05 16:24:57 -0700236 * @name: The name of the area. See function cma_init_reserved_mem()
Joonsoo Kimc1f733aa2014-08-06 16:05:32 -0700237 * @res_cma: Pointer to store the created cma region.
Joonsoo Kima2541292014-08-06 16:05:25 -0700238 *
239 * This function reserves memory from early allocator. It should be
240 * called by arch specific code once the early allocator (memblock or bootmem)
241 * has been activated and all other subsystems have already allocated/reserved
242 * memory. This function allows to create custom reserved areas.
243 *
244 * If @fixed is true, reserve contiguous area at exactly @base. If false,
245 * reserve in range from @base to @limit.
246 */
Joonsoo Kimc1f733aa2014-08-06 16:05:32 -0700247int __init cma_declare_contiguous(phys_addr_t base,
248 phys_addr_t size, phys_addr_t limit,
Joonsoo Kima2541292014-08-06 16:05:25 -0700249 phys_addr_t alignment, unsigned int order_per_bit,
Laura Abbottf318dd02017-04-18 11:27:03 -0700250 bool fixed, const char *name, struct cma **res_cma)
Joonsoo Kima2541292014-08-06 16:05:25 -0700251{
Marek Szyprowskif7426b92014-10-09 15:26:47 -0700252 phys_addr_t memblock_end = memblock_end_of_DRAM();
Joonsoo Kim6b101e22014-12-10 15:41:12 -0800253 phys_addr_t highmem_start;
Joonsoo Kima2541292014-08-06 16:05:25 -0700254 int ret = 0;
255
Joonsoo Kim6b101e22014-12-10 15:41:12 -0800256 /*
Laura Abbott2dece442017-01-10 13:35:41 -0800257 * We can't use __pa(high_memory) directly, since high_memory
258 * isn't a valid direct map VA, and DEBUG_VIRTUAL will (validly)
259 * complain. Find the boundary by adding one to the last valid
260 * address.
Joonsoo Kim6b101e22014-12-10 15:41:12 -0800261 */
Laura Abbott2dece442017-01-10 13:35:41 -0800262 highmem_start = __pa(high_memory - 1) + 1;
Laurent Pinchart56fa4f62014-10-24 13:18:42 +0300263 pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n",
264 __func__, &size, &base, &limit, &alignment);
Joonsoo Kima2541292014-08-06 16:05:25 -0700265
266 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
267 pr_err("Not enough slots for CMA reserved regions!\n");
268 return -ENOSPC;
269 }
270
271 if (!size)
272 return -EINVAL;
273
274 if (alignment && !is_power_of_2(alignment))
275 return -EINVAL;
276
277 /*
278 * Sanitise input arguments.
279 * Pages both ends in CMA area could be merged into adjacent unmovable
280 * migratetype page by page allocator's buddy algorithm. In the case,
281 * you couldn't get a contiguous memory, which is not what we want.
282 */
Stephen Rothwellbadbda52016-05-27 14:27:41 -0700283 alignment = max(alignment, (phys_addr_t)PAGE_SIZE <<
284 max_t(unsigned long, MAX_ORDER - 1, pageblock_order));
Joonsoo Kima2541292014-08-06 16:05:25 -0700285 base = ALIGN(base, alignment);
286 size = ALIGN(size, alignment);
287 limit &= ~(alignment - 1);
288
Laurent Pinchart800a85d2014-10-24 13:18:40 +0300289 if (!base)
290 fixed = false;
291
Joonsoo Kima2541292014-08-06 16:05:25 -0700292 /* size should be aligned with order_per_bit */
293 if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit))
294 return -EINVAL;
295
Marek Szyprowskif7426b92014-10-09 15:26:47 -0700296 /*
Laurent Pinchart16195dd2014-10-24 13:18:41 +0300297 * If allocating at a fixed base the request region must not cross the
298 * low/high memory boundary.
Marek Szyprowskif7426b92014-10-09 15:26:47 -0700299 */
Laurent Pinchart16195dd2014-10-24 13:18:41 +0300300 if (fixed && base < highmem_start && base + size > highmem_start) {
Marek Szyprowskif7426b92014-10-09 15:26:47 -0700301 ret = -EINVAL;
Laurent Pinchart56fa4f62014-10-24 13:18:42 +0300302 pr_err("Region at %pa defined on low/high memory boundary (%pa)\n",
303 &base, &highmem_start);
Marek Szyprowskif7426b92014-10-09 15:26:47 -0700304 goto err;
305 }
306
Laurent Pinchart16195dd2014-10-24 13:18:41 +0300307 /*
308 * If the limit is unspecified or above the memblock end, its effective
309 * value will be the memblock end. Set it explicitly to simplify further
310 * checks.
311 */
312 if (limit == 0 || limit > memblock_end)
313 limit = memblock_end;
314
Joonsoo Kima2541292014-08-06 16:05:25 -0700315 /* Reserve memory */
Laurent Pinchart800a85d2014-10-24 13:18:40 +0300316 if (fixed) {
Joonsoo Kima2541292014-08-06 16:05:25 -0700317 if (memblock_is_region_reserved(base, size) ||
318 memblock_reserve(base, size) < 0) {
319 ret = -EBUSY;
320 goto err;
321 }
322 } else {
Laurent Pinchart16195dd2014-10-24 13:18:41 +0300323 phys_addr_t addr = 0;
324
325 /*
326 * All pages in the reserved area must come from the same zone.
327 * If the requested region crosses the low/high memory boundary,
328 * try allocating from high memory first and fall back to low
329 * memory in case of failure.
330 */
331 if (base < highmem_start && limit > highmem_start) {
Mike Rapoport8a770c22019-03-11 23:29:16 -0700332 addr = memblock_phys_alloc_range(size, alignment,
333 highmem_start, limit);
Laurent Pinchart16195dd2014-10-24 13:18:41 +0300334 limit = highmem_start;
Joonsoo Kima2541292014-08-06 16:05:25 -0700335 }
Laurent Pinchart16195dd2014-10-24 13:18:41 +0300336
337 if (!addr) {
Mike Rapoport8a770c22019-03-11 23:29:16 -0700338 addr = memblock_phys_alloc_range(size, alignment, base,
339 limit);
Laurent Pinchart16195dd2014-10-24 13:18:41 +0300340 if (!addr) {
341 ret = -ENOMEM;
342 goto err;
343 }
344 }
345
Thierry Reding620951e2014-12-12 16:58:31 -0800346 /*
347 * kmemleak scans/reads tracked objects for pointers to other
348 * objects but this address isn't mapped and accessible
349 */
Catalin Marinas9099dae2016-10-11 13:55:11 -0700350 kmemleak_ignore_phys(addr);
Laurent Pinchart16195dd2014-10-24 13:18:41 +0300351 base = addr;
Joonsoo Kima2541292014-08-06 16:05:25 -0700352 }
353
Laura Abbottf318dd02017-04-18 11:27:03 -0700354 ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma);
Marek Szyprowskide9e14e2014-10-13 15:51:09 -0700355 if (ret)
Peng Fan0d3bd182019-03-05 15:49:50 -0800356 goto free_mem;
Joonsoo Kima2541292014-08-06 16:05:25 -0700357
Laurent Pinchart56fa4f62014-10-24 13:18:42 +0300358 pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M,
359 &base);
Joonsoo Kima2541292014-08-06 16:05:25 -0700360 return 0;
361
Peng Fan0d3bd182019-03-05 15:49:50 -0800362free_mem:
363 memblock_free(base, size);
Joonsoo Kima2541292014-08-06 16:05:25 -0700364err:
Joonsoo Kim0de9d2e2014-08-06 16:05:34 -0700365 pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
Joonsoo Kima2541292014-08-06 16:05:25 -0700366 return ret;
367}
368
Jaewon Kimdbe43d42017-02-24 14:58:50 -0800369#ifdef CONFIG_CMA_DEBUG
370static void cma_debug_show_areas(struct cma *cma)
371{
Yue Hu2b59e012019-05-13 17:17:41 -0700372 unsigned long next_zero_bit, next_set_bit, nr_zero;
Jaewon Kimdbe43d42017-02-24 14:58:50 -0800373 unsigned long start = 0;
Yue Hu2b59e012019-05-13 17:17:41 -0700374 unsigned long nr_part, nr_total = 0;
375 unsigned long nbits = cma_bitmap_maxno(cma);
Jaewon Kimdbe43d42017-02-24 14:58:50 -0800376
377 mutex_lock(&cma->lock);
378 pr_info("number of available pages: ");
379 for (;;) {
Yue Hu2b59e012019-05-13 17:17:41 -0700380 next_zero_bit = find_next_zero_bit(cma->bitmap, nbits, start);
381 if (next_zero_bit >= nbits)
Jaewon Kimdbe43d42017-02-24 14:58:50 -0800382 break;
Yue Hu2b59e012019-05-13 17:17:41 -0700383 next_set_bit = find_next_bit(cma->bitmap, nbits, next_zero_bit);
Jaewon Kimdbe43d42017-02-24 14:58:50 -0800384 nr_zero = next_set_bit - next_zero_bit;
Yue Hu2b59e012019-05-13 17:17:41 -0700385 nr_part = nr_zero << cma->order_per_bit;
386 pr_cont("%s%lu@%lu", nr_total ? "+" : "", nr_part,
387 next_zero_bit);
388 nr_total += nr_part;
Jaewon Kimdbe43d42017-02-24 14:58:50 -0800389 start = next_zero_bit + nr_zero;
390 }
Yue Hu2b59e012019-05-13 17:17:41 -0700391 pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count);
Jaewon Kimdbe43d42017-02-24 14:58:50 -0800392 mutex_unlock(&cma->lock);
393}
394#else
395static inline void cma_debug_show_areas(struct cma *cma) { }
396#endif
397
Joonsoo Kima2541292014-08-06 16:05:25 -0700398/**
399 * cma_alloc() - allocate pages from contiguous area
400 * @cma: Contiguous memory region for which the allocation is performed.
401 * @count: Requested number of pages.
402 * @align: Requested alignment of pages (in PAGE_SIZE order).
Marek Szyprowski65182022018-08-17 15:48:57 -0700403 * @no_warn: Avoid printing message about failed allocation
Joonsoo Kima2541292014-08-06 16:05:25 -0700404 *
405 * This function allocates part of contiguous memory on specific
406 * contiguous memory area.
407 */
Lucas Stache2f466e2017-02-24 14:58:41 -0800408struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
Marek Szyprowski65182022018-08-17 15:48:57 -0700409 bool no_warn)
Joonsoo Kima2541292014-08-06 16:05:25 -0700410{
Andrew Morton3acaea62015-11-05 18:50:08 -0800411 unsigned long mask, offset;
412 unsigned long pfn = -1;
413 unsigned long start = 0;
Joonsoo Kima2541292014-08-06 16:05:25 -0700414 unsigned long bitmap_maxno, bitmap_no, bitmap_count;
Andrey Konovalov2813b9c2018-12-28 00:30:57 -0800415 size_t i;
Joonsoo Kima2541292014-08-06 16:05:25 -0700416 struct page *page = NULL;
Jaewon Kimdbe43d42017-02-24 14:58:50 -0800417 int ret = -ENOMEM;
Joonsoo Kima2541292014-08-06 16:05:25 -0700418
419 if (!cma || !cma->count)
420 return NULL;
421
Rohit Vaswani67a2e2132015-10-22 13:32:11 -0700422 pr_debug("%s(cma %p, count %zu, align %d)\n", __func__, (void *)cma,
Joonsoo Kima2541292014-08-06 16:05:25 -0700423 count, align);
424
425 if (!count)
426 return NULL;
427
428 mask = cma_bitmap_aligned_mask(cma, align);
Gregory Fongb5be83e2014-12-12 16:54:48 -0800429 offset = cma_bitmap_aligned_offset(cma, align);
Joonsoo Kima2541292014-08-06 16:05:25 -0700430 bitmap_maxno = cma_bitmap_maxno(cma);
431 bitmap_count = cma_bitmap_pages_to_bits(cma, count);
432
Shiraz Hashim6b36ba52016-11-10 10:46:16 -0800433 if (bitmap_count > bitmap_maxno)
434 return NULL;
435
Joonsoo Kima2541292014-08-06 16:05:25 -0700436 for (;;) {
437 mutex_lock(&cma->lock);
Gregory Fongb5be83e2014-12-12 16:54:48 -0800438 bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
439 bitmap_maxno, start, bitmap_count, mask,
440 offset);
Joonsoo Kima2541292014-08-06 16:05:25 -0700441 if (bitmap_no >= bitmap_maxno) {
442 mutex_unlock(&cma->lock);
443 break;
444 }
445 bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
446 /*
447 * It's safe to drop the lock here. We've marked this region for
448 * our exclusive use. If the migration fails we will take the
449 * lock again and unmark it.
450 */
451 mutex_unlock(&cma->lock);
452
453 pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
454 mutex_lock(&cma_mutex);
Lucas Stachca96b622017-02-24 14:58:37 -0800455 ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA,
Marek Szyprowski65182022018-08-17 15:48:57 -0700456 GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0));
Joonsoo Kima2541292014-08-06 16:05:25 -0700457 mutex_unlock(&cma_mutex);
458 if (ret == 0) {
459 page = pfn_to_page(pfn);
460 break;
Joonsoo Kima2541292014-08-06 16:05:25 -0700461 }
Joonsoo Kimb7155e72014-08-06 16:05:30 -0700462
Joonsoo Kima2541292014-08-06 16:05:25 -0700463 cma_clear_bitmap(cma, pfn, count);
Joonsoo Kimb7155e72014-08-06 16:05:30 -0700464 if (ret != -EBUSY)
465 break;
466
Joonsoo Kima2541292014-08-06 16:05:25 -0700467 pr_debug("%s(): memory range at %p is busy, retrying\n",
468 __func__, pfn_to_page(pfn));
469 /* try again with a bit different memory target */
470 start = bitmap_no + mask + 1;
471 }
472
Andrew Morton3acaea62015-11-05 18:50:08 -0800473 trace_cma_alloc(pfn, page, count, align);
Stefan Strogin99e8ea62015-04-15 16:14:50 -0700474
Andrey Konovalov2813b9c2018-12-28 00:30:57 -0800475 /*
476 * CMA can allocate multiple page blocks, which results in different
477 * blocks being marked with different tags. Reset the tags to ignore
478 * those page blocks.
479 */
480 if (page) {
481 for (i = 0; i < count; i++)
482 page_kasan_tag_reset(page + i);
483 }
484
Marek Szyprowski65182022018-08-17 15:48:57 -0700485 if (ret && !no_warn) {
Pintu Agarwal5984af1082017-11-15 17:34:26 -0800486 pr_err("%s: alloc failed, req-size: %zu pages, ret: %d\n",
Jaewon Kimdbe43d42017-02-24 14:58:50 -0800487 __func__, count, ret);
488 cma_debug_show_areas(cma);
489 }
490
Joonsoo Kima2541292014-08-06 16:05:25 -0700491 pr_debug("%s(): returned %p\n", __func__, page);
492 return page;
493}
494
495/**
496 * cma_release() - release allocated pages
497 * @cma: Contiguous memory region for which the allocation is performed.
498 * @pages: Allocated pages.
499 * @count: Number of allocated pages.
500 *
501 * This function releases memory allocated by alloc_cma().
502 * It returns false when provided pages do not belong to contiguous area and
503 * true otherwise.
504 */
Sasha Levinac173822015-04-14 15:47:04 -0700505bool cma_release(struct cma *cma, const struct page *pages, unsigned int count)
Joonsoo Kima2541292014-08-06 16:05:25 -0700506{
507 unsigned long pfn;
508
509 if (!cma || !pages)
510 return false;
511
512 pr_debug("%s(page %p)\n", __func__, (void *)pages);
513
514 pfn = page_to_pfn(pages);
515
516 if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
517 return false;
518
519 VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
520
521 free_contig_range(pfn, count);
522 cma_clear_bitmap(cma, pfn, count);
Stefan Strogin99e8ea62015-04-15 16:14:50 -0700523 trace_cma_release(pfn, pages, count);
Joonsoo Kima2541292014-08-06 16:05:25 -0700524
525 return true;
526}
Laura Abbotte4231bc2017-04-18 11:27:04 -0700527
528int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data)
529{
530 int i;
531
532 for (i = 0; i < cma_area_count; i++) {
533 int ret = it(&cma_areas[i], data);
534
535 if (ret)
536 return ret;
537 }
538
539 return 0;
540}