Thomas Gleixner | 8607a96 | 2019-05-22 09:51:44 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 2 | /* |
| 3 | * Contiguous Memory Allocator |
| 4 | * |
| 5 | * Copyright (c) 2010-2011 by Samsung Electronics. |
| 6 | * Copyright IBM Corporation, 2013 |
| 7 | * Copyright LG Electronics Inc., 2014 |
| 8 | * Written by: |
| 9 | * Marek Szyprowski <m.szyprowski@samsung.com> |
| 10 | * Michal Nazarewicz <mina86@mina86.com> |
| 11 | * Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> |
| 12 | * Joonsoo Kim <iamjoonsoo.kim@lge.com> |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 13 | */ |
| 14 | |
| 15 | #define pr_fmt(fmt) "cma: " fmt |
| 16 | |
| 17 | #ifdef CONFIG_CMA_DEBUG |
| 18 | #ifndef DEBUG |
| 19 | # define DEBUG |
| 20 | #endif |
| 21 | #endif |
Stefan Strogin | 99e8ea6 | 2015-04-15 16:14:50 -0700 | [diff] [blame] | 22 | #define CREATE_TRACE_POINTS |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 23 | |
| 24 | #include <linux/memblock.h> |
| 25 | #include <linux/err.h> |
| 26 | #include <linux/mm.h> |
Sandeep Patil | 716306e | 2019-09-13 14:50:38 -0700 | [diff] [blame] | 27 | #include <linux/module.h> |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 28 | #include <linux/mutex.h> |
| 29 | #include <linux/sizes.h> |
| 30 | #include <linux/slab.h> |
| 31 | #include <linux/log2.h> |
| 32 | #include <linux/cma.h> |
Marek Szyprowski | f7426b9 | 2014-10-09 15:26:47 -0700 | [diff] [blame] | 33 | #include <linux/highmem.h> |
Thierry Reding | 620951e | 2014-12-12 16:58:31 -0800 | [diff] [blame] | 34 | #include <linux/io.h> |
Randy Dunlap | 514c603 | 2018-04-05 16:25:34 -0700 | [diff] [blame] | 35 | #include <linux/kmemleak.h> |
Chris Goldsworthy | 73eda8e | 2020-09-18 09:19:53 -0700 | [diff] [blame] | 36 | #include <linux/sched.h> |
| 37 | #include <linux/jiffies.h> |
Stefan Strogin | 99e8ea6 | 2015-04-15 16:14:50 -0700 | [diff] [blame] | 38 | #include <trace/events/cma.h> |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 39 | |
Sasha Levin | 28b24c1 | 2015-04-14 15:44:57 -0700 | [diff] [blame] | 40 | #include "cma.h" |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 41 | |
Sasha Levin | 28b24c1 | 2015-04-14 15:44:57 -0700 | [diff] [blame] | 42 | struct cma cma_areas[MAX_CMA_AREAS]; |
| 43 | unsigned cma_area_count; |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 44 | static DEFINE_MUTEX(cma_mutex); |
| 45 | |
Sasha Levin | ac17382 | 2015-04-14 15:47:04 -0700 | [diff] [blame] | 46 | phys_addr_t cma_get_base(const struct cma *cma) |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 47 | { |
| 48 | return PFN_PHYS(cma->base_pfn); |
| 49 | } |
| 50 | |
Sasha Levin | ac17382 | 2015-04-14 15:47:04 -0700 | [diff] [blame] | 51 | unsigned long cma_get_size(const struct cma *cma) |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 52 | { |
| 53 | return cma->count << PAGE_SHIFT; |
| 54 | } |
| 55 | |
Laura Abbott | f318dd0 | 2017-04-18 11:27:03 -0700 | [diff] [blame] | 56 | const char *cma_get_name(const struct cma *cma) |
| 57 | { |
Barry Song | 18e98e5 | 2020-08-11 18:31:57 -0700 | [diff] [blame] | 58 | return cma->name; |
Laura Abbott | f318dd0 | 2017-04-18 11:27:03 -0700 | [diff] [blame] | 59 | } |
Sandeep Patil | 716306e | 2019-09-13 14:50:38 -0700 | [diff] [blame] | 60 | EXPORT_SYMBOL_GPL(cma_get_name); |
Laura Abbott | f318dd0 | 2017-04-18 11:27:03 -0700 | [diff] [blame] | 61 | |
Sasha Levin | ac17382 | 2015-04-14 15:47:04 -0700 | [diff] [blame] | 62 | static unsigned long cma_bitmap_aligned_mask(const struct cma *cma, |
Doug Berger | e048cb3 | 2017-07-10 15:49:44 -0700 | [diff] [blame] | 63 | unsigned int align_order) |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 64 | { |
Weijie Yang | 68faed6 | 2014-10-13 15:51:03 -0700 | [diff] [blame] | 65 | if (align_order <= cma->order_per_bit) |
| 66 | return 0; |
| 67 | return (1UL << (align_order - cma->order_per_bit)) - 1; |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 68 | } |
| 69 | |
Danesh Petigara | 850fc43 | 2015-03-12 16:25:57 -0700 | [diff] [blame] | 70 | /* |
Doug Berger | e048cb3 | 2017-07-10 15:49:44 -0700 | [diff] [blame] | 71 | * Find the offset of the base PFN from the specified align_order. |
| 72 | * The value returned is represented in order_per_bits. |
Danesh Petigara | 850fc43 | 2015-03-12 16:25:57 -0700 | [diff] [blame] | 73 | */ |
Sasha Levin | ac17382 | 2015-04-14 15:47:04 -0700 | [diff] [blame] | 74 | static unsigned long cma_bitmap_aligned_offset(const struct cma *cma, |
Doug Berger | e048cb3 | 2017-07-10 15:49:44 -0700 | [diff] [blame] | 75 | unsigned int align_order) |
Gregory Fong | b5be83e | 2014-12-12 16:54:48 -0800 | [diff] [blame] | 76 | { |
Doug Berger | e048cb3 | 2017-07-10 15:49:44 -0700 | [diff] [blame] | 77 | return (cma->base_pfn & ((1UL << align_order) - 1)) |
| 78 | >> cma->order_per_bit; |
Gregory Fong | b5be83e | 2014-12-12 16:54:48 -0800 | [diff] [blame] | 79 | } |
| 80 | |
Sasha Levin | ac17382 | 2015-04-14 15:47:04 -0700 | [diff] [blame] | 81 | static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma, |
| 82 | unsigned long pages) |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 83 | { |
| 84 | return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit; |
| 85 | } |
| 86 | |
Sasha Levin | ac17382 | 2015-04-14 15:47:04 -0700 | [diff] [blame] | 87 | static void cma_clear_bitmap(struct cma *cma, unsigned long pfn, |
| 88 | unsigned int count) |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 89 | { |
| 90 | unsigned long bitmap_no, bitmap_count; |
| 91 | |
| 92 | bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit; |
| 93 | bitmap_count = cma_bitmap_pages_to_bits(cma, count); |
| 94 | |
| 95 | mutex_lock(&cma->lock); |
| 96 | bitmap_clear(cma->bitmap, bitmap_no, bitmap_count); |
| 97 | mutex_unlock(&cma->lock); |
| 98 | } |
| 99 | |
Mike Kravetz | 3a5139f | 2020-08-11 18:32:03 -0700 | [diff] [blame] | 100 | static void __init cma_activate_area(struct cma *cma) |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 101 | { |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 102 | unsigned long base_pfn = cma->base_pfn, pfn = base_pfn; |
| 103 | unsigned i = cma->count >> pageblock_order; |
| 104 | struct zone *zone; |
| 105 | |
Yunfeng Ye | 2184f99 | 2019-11-30 17:57:22 -0800 | [diff] [blame] | 106 | cma->bitmap = bitmap_zalloc(cma_bitmap_maxno(cma), GFP_KERNEL); |
Mike Kravetz | 3a5139f | 2020-08-11 18:32:03 -0700 | [diff] [blame] | 107 | if (!cma->bitmap) |
| 108 | goto out_error; |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 109 | |
Joonsoo Kim | d883c6c | 2018-05-23 10:18:21 +0900 | [diff] [blame] | 110 | WARN_ON_ONCE(!pfn_valid(pfn)); |
| 111 | zone = page_zone(pfn_to_page(pfn)); |
| 112 | |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 113 | do { |
| 114 | unsigned j; |
| 115 | |
| 116 | base_pfn = pfn; |
| 117 | for (j = pageblock_nr_pages; j; --j, pfn++) { |
Joonsoo Kim | d883c6c | 2018-05-23 10:18:21 +0900 | [diff] [blame] | 118 | WARN_ON_ONCE(!pfn_valid(pfn)); |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 119 | /* |
Joonsoo Kim | d883c6c | 2018-05-23 10:18:21 +0900 | [diff] [blame] | 120 | * alloc_contig_range requires the pfn range |
| 121 | * specified to be in the same zone. Make this |
| 122 | * simple by forcing the entire CMA resv range |
| 123 | * to be in the same zone. |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 124 | */ |
| 125 | if (page_zone(pfn_to_page(pfn)) != zone) |
Joonsoo Kim | d883c6c | 2018-05-23 10:18:21 +0900 | [diff] [blame] | 126 | goto not_in_zone; |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 127 | } |
| 128 | init_cma_reserved_pageblock(pfn_to_page(base_pfn)); |
| 129 | } while (--i); |
| 130 | |
| 131 | mutex_init(&cma->lock); |
Sasha Levin | 26b02a1 | 2015-04-14 15:44:59 -0700 | [diff] [blame] | 132 | |
| 133 | #ifdef CONFIG_CMA_DEBUGFS |
| 134 | INIT_HLIST_HEAD(&cma->mem_head); |
| 135 | spin_lock_init(&cma->mem_head_lock); |
| 136 | #endif |
| 137 | |
Mike Kravetz | 3a5139f | 2020-08-11 18:32:03 -0700 | [diff] [blame] | 138 | return; |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 139 | |
Joonsoo Kim | d883c6c | 2018-05-23 10:18:21 +0900 | [diff] [blame] | 140 | not_in_zone: |
Yunfeng Ye | 2184f99 | 2019-11-30 17:57:22 -0800 | [diff] [blame] | 141 | bitmap_free(cma->bitmap); |
Mike Kravetz | 3a5139f | 2020-08-11 18:32:03 -0700 | [diff] [blame] | 142 | out_error: |
Laurent Pinchart | f022d8c | 2014-10-24 13:18:39 +0300 | [diff] [blame] | 143 | cma->count = 0; |
Mike Kravetz | 3a5139f | 2020-08-11 18:32:03 -0700 | [diff] [blame] | 144 | pr_err("CMA area %s could not be activated\n", cma->name); |
| 145 | return; |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 146 | } |
| 147 | |
| 148 | static int __init cma_init_reserved_areas(void) |
| 149 | { |
| 150 | int i; |
| 151 | |
Mike Kravetz | 3a5139f | 2020-08-11 18:32:03 -0700 | [diff] [blame] | 152 | for (i = 0; i < cma_area_count; i++) |
| 153 | cma_activate_area(&cma_areas[i]); |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 154 | |
| 155 | return 0; |
| 156 | } |
Joonsoo Kim | d883c6c | 2018-05-23 10:18:21 +0900 | [diff] [blame] | 157 | core_initcall(cma_init_reserved_areas); |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 158 | |
| 159 | /** |
Marek Szyprowski | de9e14e | 2014-10-13 15:51:09 -0700 | [diff] [blame] | 160 | * cma_init_reserved_mem() - create custom contiguous area from reserved memory |
| 161 | * @base: Base address of the reserved area |
| 162 | * @size: Size of the reserved area (in bytes), |
| 163 | * @order_per_bit: Order of pages represented by one bit on bitmap. |
Mike Rapoport | e8b098f | 2018-04-05 16:24:57 -0700 | [diff] [blame] | 164 | * @name: The name of the area. If this parameter is NULL, the name of |
| 165 | * the area will be set to "cmaN", where N is a running counter of |
| 166 | * used areas. |
Marek Szyprowski | de9e14e | 2014-10-13 15:51:09 -0700 | [diff] [blame] | 167 | * @res_cma: Pointer to store the created cma region. |
| 168 | * |
| 169 | * This function creates custom contiguous area from already reserved memory. |
| 170 | */ |
| 171 | int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, |
Sasha Levin | ac17382 | 2015-04-14 15:47:04 -0700 | [diff] [blame] | 172 | unsigned int order_per_bit, |
Laura Abbott | f318dd0 | 2017-04-18 11:27:03 -0700 | [diff] [blame] | 173 | const char *name, |
Sasha Levin | ac17382 | 2015-04-14 15:47:04 -0700 | [diff] [blame] | 174 | struct cma **res_cma) |
Marek Szyprowski | de9e14e | 2014-10-13 15:51:09 -0700 | [diff] [blame] | 175 | { |
| 176 | struct cma *cma; |
| 177 | phys_addr_t alignment; |
| 178 | |
| 179 | /* Sanity checks */ |
| 180 | if (cma_area_count == ARRAY_SIZE(cma_areas)) { |
| 181 | pr_err("Not enough slots for CMA reserved regions!\n"); |
| 182 | return -ENOSPC; |
| 183 | } |
| 184 | |
| 185 | if (!size || !memblock_is_region_reserved(base, size)) |
| 186 | return -EINVAL; |
| 187 | |
Shailendra Verma | 0f96ae2 | 2015-06-24 16:58:03 -0700 | [diff] [blame] | 188 | /* ensure minimal alignment required by mm core */ |
Stephen Rothwell | badbda5 | 2016-05-27 14:27:41 -0700 | [diff] [blame] | 189 | alignment = PAGE_SIZE << |
| 190 | max_t(unsigned long, MAX_ORDER - 1, pageblock_order); |
Marek Szyprowski | de9e14e | 2014-10-13 15:51:09 -0700 | [diff] [blame] | 191 | |
| 192 | /* alignment should be aligned with order_per_bit */ |
| 193 | if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit)) |
| 194 | return -EINVAL; |
| 195 | |
| 196 | if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size) |
| 197 | return -EINVAL; |
| 198 | |
| 199 | /* |
| 200 | * Each reserved area must be initialised later, when more kernel |
| 201 | * subsystems (like slab allocator) are available. |
| 202 | */ |
| 203 | cma = &cma_areas[cma_area_count]; |
Barry Song | 18e98e5 | 2020-08-11 18:31:57 -0700 | [diff] [blame] | 204 | |
| 205 | if (name) |
| 206 | snprintf(cma->name, CMA_MAX_NAME, name); |
| 207 | else |
| 208 | snprintf(cma->name, CMA_MAX_NAME, "cma%d\n", cma_area_count); |
| 209 | |
Marek Szyprowski | de9e14e | 2014-10-13 15:51:09 -0700 | [diff] [blame] | 210 | cma->base_pfn = PFN_DOWN(base); |
| 211 | cma->count = size >> PAGE_SHIFT; |
| 212 | cma->order_per_bit = order_per_bit; |
| 213 | *res_cma = cma; |
| 214 | cma_area_count++; |
George G. Davis | 94737a8 | 2015-02-11 15:26:27 -0800 | [diff] [blame] | 215 | totalcma_pages += (size / PAGE_SIZE); |
Marek Szyprowski | de9e14e | 2014-10-13 15:51:09 -0700 | [diff] [blame] | 216 | |
| 217 | return 0; |
| 218 | } |
| 219 | |
| 220 | /** |
Aslan Bakirov | 8676af1 | 2020-04-10 14:32:42 -0700 | [diff] [blame] | 221 | * cma_declare_contiguous_nid() - reserve custom contiguous area |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 222 | * @base: Base address of the reserved area optional, use 0 for any |
Joonsoo Kim | c1f733aa | 2014-08-06 16:05:32 -0700 | [diff] [blame] | 223 | * @size: Size of the reserved area (in bytes), |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 224 | * @limit: End address of the reserved memory (optional, 0 for any). |
| 225 | * @alignment: Alignment for the CMA area, should be power of 2 or zero |
| 226 | * @order_per_bit: Order of pages represented by one bit on bitmap. |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 227 | * @fixed: hint about where to place the reserved area |
Mike Rapoport | e8b098f | 2018-04-05 16:24:57 -0700 | [diff] [blame] | 228 | * @name: The name of the area. See function cma_init_reserved_mem() |
Joonsoo Kim | c1f733aa | 2014-08-06 16:05:32 -0700 | [diff] [blame] | 229 | * @res_cma: Pointer to store the created cma region. |
Aslan Bakirov | 8676af1 | 2020-04-10 14:32:42 -0700 | [diff] [blame] | 230 | * @nid: nid of the free area to find, %NUMA_NO_NODE for any node |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 231 | * |
| 232 | * This function reserves memory from early allocator. It should be |
| 233 | * called by arch specific code once the early allocator (memblock or bootmem) |
| 234 | * has been activated and all other subsystems have already allocated/reserved |
| 235 | * memory. This function allows to create custom reserved areas. |
| 236 | * |
| 237 | * If @fixed is true, reserve contiguous area at exactly @base. If false, |
| 238 | * reserve in range from @base to @limit. |
| 239 | */ |
Aslan Bakirov | 8676af1 | 2020-04-10 14:32:42 -0700 | [diff] [blame] | 240 | int __init cma_declare_contiguous_nid(phys_addr_t base, |
Joonsoo Kim | c1f733aa | 2014-08-06 16:05:32 -0700 | [diff] [blame] | 241 | phys_addr_t size, phys_addr_t limit, |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 242 | phys_addr_t alignment, unsigned int order_per_bit, |
Aslan Bakirov | 8676af1 | 2020-04-10 14:32:42 -0700 | [diff] [blame] | 243 | bool fixed, const char *name, struct cma **res_cma, |
| 244 | int nid) |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 245 | { |
Marek Szyprowski | f7426b9 | 2014-10-09 15:26:47 -0700 | [diff] [blame] | 246 | phys_addr_t memblock_end = memblock_end_of_DRAM(); |
Joonsoo Kim | 6b101e2 | 2014-12-10 15:41:12 -0800 | [diff] [blame] | 247 | phys_addr_t highmem_start; |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 248 | int ret = 0; |
| 249 | |
Joonsoo Kim | 6b101e2 | 2014-12-10 15:41:12 -0800 | [diff] [blame] | 250 | /* |
Laura Abbott | 2dece44 | 2017-01-10 13:35:41 -0800 | [diff] [blame] | 251 | * We can't use __pa(high_memory) directly, since high_memory |
| 252 | * isn't a valid direct map VA, and DEBUG_VIRTUAL will (validly) |
| 253 | * complain. Find the boundary by adding one to the last valid |
| 254 | * address. |
Joonsoo Kim | 6b101e2 | 2014-12-10 15:41:12 -0800 | [diff] [blame] | 255 | */ |
Laura Abbott | 2dece44 | 2017-01-10 13:35:41 -0800 | [diff] [blame] | 256 | highmem_start = __pa(high_memory - 1) + 1; |
Laurent Pinchart | 56fa4f6 | 2014-10-24 13:18:42 +0300 | [diff] [blame] | 257 | pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n", |
| 258 | __func__, &size, &base, &limit, &alignment); |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 259 | |
| 260 | if (cma_area_count == ARRAY_SIZE(cma_areas)) { |
| 261 | pr_err("Not enough slots for CMA reserved regions!\n"); |
| 262 | return -ENOSPC; |
| 263 | } |
| 264 | |
| 265 | if (!size) |
| 266 | return -EINVAL; |
| 267 | |
| 268 | if (alignment && !is_power_of_2(alignment)) |
| 269 | return -EINVAL; |
| 270 | |
| 271 | /* |
| 272 | * Sanitise input arguments. |
| 273 | * Pages both ends in CMA area could be merged into adjacent unmovable |
| 274 | * migratetype page by page allocator's buddy algorithm. In the case, |
| 275 | * you couldn't get a contiguous memory, which is not what we want. |
| 276 | */ |
Stephen Rothwell | badbda5 | 2016-05-27 14:27:41 -0700 | [diff] [blame] | 277 | alignment = max(alignment, (phys_addr_t)PAGE_SIZE << |
| 278 | max_t(unsigned long, MAX_ORDER - 1, pageblock_order)); |
Doug Berger | c633324 | 2019-07-16 16:26:24 -0700 | [diff] [blame] | 279 | if (fixed && base & (alignment - 1)) { |
| 280 | ret = -EINVAL; |
| 281 | pr_err("Region at %pa must be aligned to %pa bytes\n", |
| 282 | &base, &alignment); |
| 283 | goto err; |
| 284 | } |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 285 | base = ALIGN(base, alignment); |
| 286 | size = ALIGN(size, alignment); |
| 287 | limit &= ~(alignment - 1); |
| 288 | |
Laurent Pinchart | 800a85d | 2014-10-24 13:18:40 +0300 | [diff] [blame] | 289 | if (!base) |
| 290 | fixed = false; |
| 291 | |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 292 | /* size should be aligned with order_per_bit */ |
| 293 | if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit)) |
| 294 | return -EINVAL; |
| 295 | |
Marek Szyprowski | f7426b9 | 2014-10-09 15:26:47 -0700 | [diff] [blame] | 296 | /* |
Laurent Pinchart | 16195dd | 2014-10-24 13:18:41 +0300 | [diff] [blame] | 297 | * If allocating at a fixed base the request region must not cross the |
| 298 | * low/high memory boundary. |
Marek Szyprowski | f7426b9 | 2014-10-09 15:26:47 -0700 | [diff] [blame] | 299 | */ |
Laurent Pinchart | 16195dd | 2014-10-24 13:18:41 +0300 | [diff] [blame] | 300 | if (fixed && base < highmem_start && base + size > highmem_start) { |
Marek Szyprowski | f7426b9 | 2014-10-09 15:26:47 -0700 | [diff] [blame] | 301 | ret = -EINVAL; |
Laurent Pinchart | 56fa4f6 | 2014-10-24 13:18:42 +0300 | [diff] [blame] | 302 | pr_err("Region at %pa defined on low/high memory boundary (%pa)\n", |
| 303 | &base, &highmem_start); |
Marek Szyprowski | f7426b9 | 2014-10-09 15:26:47 -0700 | [diff] [blame] | 304 | goto err; |
| 305 | } |
| 306 | |
Laurent Pinchart | 16195dd | 2014-10-24 13:18:41 +0300 | [diff] [blame] | 307 | /* |
| 308 | * If the limit is unspecified or above the memblock end, its effective |
| 309 | * value will be the memblock end. Set it explicitly to simplify further |
| 310 | * checks. |
| 311 | */ |
| 312 | if (limit == 0 || limit > memblock_end) |
| 313 | limit = memblock_end; |
| 314 | |
Doug Berger | c633324 | 2019-07-16 16:26:24 -0700 | [diff] [blame] | 315 | if (base + size > limit) { |
| 316 | ret = -EINVAL; |
| 317 | pr_err("Size (%pa) of region at %pa exceeds limit (%pa)\n", |
| 318 | &size, &base, &limit); |
| 319 | goto err; |
| 320 | } |
| 321 | |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 322 | /* Reserve memory */ |
Laurent Pinchart | 800a85d | 2014-10-24 13:18:40 +0300 | [diff] [blame] | 323 | if (fixed) { |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 324 | if (memblock_is_region_reserved(base, size) || |
| 325 | memblock_reserve(base, size) < 0) { |
| 326 | ret = -EBUSY; |
| 327 | goto err; |
| 328 | } |
| 329 | } else { |
Laurent Pinchart | 16195dd | 2014-10-24 13:18:41 +0300 | [diff] [blame] | 330 | phys_addr_t addr = 0; |
| 331 | |
| 332 | /* |
| 333 | * All pages in the reserved area must come from the same zone. |
| 334 | * If the requested region crosses the low/high memory boundary, |
| 335 | * try allocating from high memory first and fall back to low |
| 336 | * memory in case of failure. |
| 337 | */ |
| 338 | if (base < highmem_start && limit > highmem_start) { |
Aslan Bakirov | 8676af1 | 2020-04-10 14:32:42 -0700 | [diff] [blame] | 339 | addr = memblock_alloc_range_nid(size, alignment, |
Barry Song | 40366bd | 2020-07-03 15:15:24 -0700 | [diff] [blame] | 340 | highmem_start, limit, nid, true); |
Laurent Pinchart | 16195dd | 2014-10-24 13:18:41 +0300 | [diff] [blame] | 341 | limit = highmem_start; |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 342 | } |
Laurent Pinchart | 16195dd | 2014-10-24 13:18:41 +0300 | [diff] [blame] | 343 | |
| 344 | if (!addr) { |
Aslan Bakirov | 8676af1 | 2020-04-10 14:32:42 -0700 | [diff] [blame] | 345 | addr = memblock_alloc_range_nid(size, alignment, base, |
Barry Song | 40366bd | 2020-07-03 15:15:24 -0700 | [diff] [blame] | 346 | limit, nid, true); |
Laurent Pinchart | 16195dd | 2014-10-24 13:18:41 +0300 | [diff] [blame] | 347 | if (!addr) { |
| 348 | ret = -ENOMEM; |
| 349 | goto err; |
| 350 | } |
| 351 | } |
| 352 | |
Thierry Reding | 620951e | 2014-12-12 16:58:31 -0800 | [diff] [blame] | 353 | /* |
| 354 | * kmemleak scans/reads tracked objects for pointers to other |
| 355 | * objects but this address isn't mapped and accessible |
| 356 | */ |
Catalin Marinas | 9099dae | 2016-10-11 13:55:11 -0700 | [diff] [blame] | 357 | kmemleak_ignore_phys(addr); |
Laurent Pinchart | 16195dd | 2014-10-24 13:18:41 +0300 | [diff] [blame] | 358 | base = addr; |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 359 | } |
| 360 | |
Laura Abbott | f318dd0 | 2017-04-18 11:27:03 -0700 | [diff] [blame] | 361 | ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma); |
Marek Szyprowski | de9e14e | 2014-10-13 15:51:09 -0700 | [diff] [blame] | 362 | if (ret) |
Peng Fan | 0d3bd18 | 2019-03-05 15:49:50 -0800 | [diff] [blame] | 363 | goto free_mem; |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 364 | |
Laurent Pinchart | 56fa4f6 | 2014-10-24 13:18:42 +0300 | [diff] [blame] | 365 | pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M, |
| 366 | &base); |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 367 | return 0; |
| 368 | |
Peng Fan | 0d3bd18 | 2019-03-05 15:49:50 -0800 | [diff] [blame] | 369 | free_mem: |
| 370 | memblock_free(base, size); |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 371 | err: |
Joonsoo Kim | 0de9d2e | 2014-08-06 16:05:34 -0700 | [diff] [blame] | 372 | pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M); |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 373 | return ret; |
| 374 | } |
| 375 | |
Jaewon Kim | dbe43d4 | 2017-02-24 14:58:50 -0800 | [diff] [blame] | 376 | #ifdef CONFIG_CMA_DEBUG |
| 377 | static void cma_debug_show_areas(struct cma *cma) |
| 378 | { |
Yue Hu | 2b59e01 | 2019-05-13 17:17:41 -0700 | [diff] [blame] | 379 | unsigned long next_zero_bit, next_set_bit, nr_zero; |
Jaewon Kim | dbe43d4 | 2017-02-24 14:58:50 -0800 | [diff] [blame] | 380 | unsigned long start = 0; |
Yue Hu | 2b59e01 | 2019-05-13 17:17:41 -0700 | [diff] [blame] | 381 | unsigned long nr_part, nr_total = 0; |
| 382 | unsigned long nbits = cma_bitmap_maxno(cma); |
Jaewon Kim | dbe43d4 | 2017-02-24 14:58:50 -0800 | [diff] [blame] | 383 | |
| 384 | mutex_lock(&cma->lock); |
| 385 | pr_info("number of available pages: "); |
| 386 | for (;;) { |
Yue Hu | 2b59e01 | 2019-05-13 17:17:41 -0700 | [diff] [blame] | 387 | next_zero_bit = find_next_zero_bit(cma->bitmap, nbits, start); |
| 388 | if (next_zero_bit >= nbits) |
Jaewon Kim | dbe43d4 | 2017-02-24 14:58:50 -0800 | [diff] [blame] | 389 | break; |
Yue Hu | 2b59e01 | 2019-05-13 17:17:41 -0700 | [diff] [blame] | 390 | next_set_bit = find_next_bit(cma->bitmap, nbits, next_zero_bit); |
Jaewon Kim | dbe43d4 | 2017-02-24 14:58:50 -0800 | [diff] [blame] | 391 | nr_zero = next_set_bit - next_zero_bit; |
Yue Hu | 2b59e01 | 2019-05-13 17:17:41 -0700 | [diff] [blame] | 392 | nr_part = nr_zero << cma->order_per_bit; |
| 393 | pr_cont("%s%lu@%lu", nr_total ? "+" : "", nr_part, |
| 394 | next_zero_bit); |
| 395 | nr_total += nr_part; |
Jaewon Kim | dbe43d4 | 2017-02-24 14:58:50 -0800 | [diff] [blame] | 396 | start = next_zero_bit + nr_zero; |
| 397 | } |
Yue Hu | 2b59e01 | 2019-05-13 17:17:41 -0700 | [diff] [blame] | 398 | pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count); |
Jaewon Kim | dbe43d4 | 2017-02-24 14:58:50 -0800 | [diff] [blame] | 399 | mutex_unlock(&cma->lock); |
| 400 | } |
| 401 | #else |
| 402 | static inline void cma_debug_show_areas(struct cma *cma) { } |
| 403 | #endif |
| 404 | |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 405 | /** |
| 406 | * cma_alloc() - allocate pages from contiguous area |
| 407 | * @cma: Contiguous memory region for which the allocation is performed. |
| 408 | * @count: Requested number of pages. |
| 409 | * @align: Requested alignment of pages (in PAGE_SIZE order). |
Minchan Kim | 23ba990 | 2021-01-21 12:09:34 -0800 | [diff] [blame^] | 410 | * @gfp_mask: GFP mask to use during the cma allocation. |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 411 | * |
| 412 | * This function allocates part of contiguous memory on specific |
| 413 | * contiguous memory area. |
| 414 | */ |
Lucas Stach | e2f466e | 2017-02-24 14:58:41 -0800 | [diff] [blame] | 415 | struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, |
Minchan Kim | 23ba990 | 2021-01-21 12:09:34 -0800 | [diff] [blame^] | 416 | gfp_t gfp_mask) |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 417 | { |
Andrew Morton | 3acaea6 | 2015-11-05 18:50:08 -0800 | [diff] [blame] | 418 | unsigned long mask, offset; |
| 419 | unsigned long pfn = -1; |
| 420 | unsigned long start = 0; |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 421 | unsigned long bitmap_maxno, bitmap_no, bitmap_count; |
Andrey Konovalov | 2813b9c | 2018-12-28 00:30:57 -0800 | [diff] [blame] | 422 | size_t i; |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 423 | struct page *page = NULL; |
Jaewon Kim | dbe43d4 | 2017-02-24 14:58:50 -0800 | [diff] [blame] | 424 | int ret = -ENOMEM; |
Chris Goldsworthy | 73eda8e | 2020-09-18 09:19:53 -0700 | [diff] [blame] | 425 | int num_attempts = 0; |
| 426 | int max_retries = 5; |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 427 | |
Jianqun Xu | 835832b | 2020-08-11 18:31:54 -0700 | [diff] [blame] | 428 | if (!cma || !cma->count || !cma->bitmap) |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 429 | return NULL; |
| 430 | |
Minchan Kim | 23ba990 | 2021-01-21 12:09:34 -0800 | [diff] [blame^] | 431 | pr_debug("%s(cma %p, count %zu, align %d gfp_mask 0x%x)\n", __func__, |
| 432 | (void *)cma, count, align, gfp_mask); |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 433 | |
| 434 | if (!count) |
| 435 | return NULL; |
| 436 | |
| 437 | mask = cma_bitmap_aligned_mask(cma, align); |
Gregory Fong | b5be83e | 2014-12-12 16:54:48 -0800 | [diff] [blame] | 438 | offset = cma_bitmap_aligned_offset(cma, align); |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 439 | bitmap_maxno = cma_bitmap_maxno(cma); |
| 440 | bitmap_count = cma_bitmap_pages_to_bits(cma, count); |
| 441 | |
Shiraz Hashim | 6b36ba5 | 2016-11-10 10:46:16 -0800 | [diff] [blame] | 442 | if (bitmap_count > bitmap_maxno) |
| 443 | return NULL; |
| 444 | |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 445 | for (;;) { |
| 446 | mutex_lock(&cma->lock); |
Gregory Fong | b5be83e | 2014-12-12 16:54:48 -0800 | [diff] [blame] | 447 | bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap, |
| 448 | bitmap_maxno, start, bitmap_count, mask, |
| 449 | offset); |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 450 | if (bitmap_no >= bitmap_maxno) { |
Chris Goldsworthy | 73eda8e | 2020-09-18 09:19:53 -0700 | [diff] [blame] | 451 | if ((num_attempts < max_retries) && (ret == -EBUSY)) { |
| 452 | mutex_unlock(&cma->lock); |
| 453 | |
| 454 | if (fatal_signal_pending(current)) |
| 455 | break; |
| 456 | |
| 457 | /* |
| 458 | * Page may be momentarily pinned by some other |
| 459 | * process which has been scheduled out, e.g. |
| 460 | * in exit path, during unmap call, or process |
| 461 | * fork and so cannot be freed there. Sleep |
| 462 | * for 100ms and retry the allocation. |
| 463 | */ |
| 464 | start = 0; |
| 465 | ret = -ENOMEM; |
| 466 | schedule_timeout_killable(msecs_to_jiffies(100)); |
| 467 | num_attempts++; |
| 468 | continue; |
| 469 | } else { |
| 470 | mutex_unlock(&cma->lock); |
| 471 | break; |
| 472 | } |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 473 | } |
| 474 | bitmap_set(cma->bitmap, bitmap_no, bitmap_count); |
| 475 | /* |
| 476 | * It's safe to drop the lock here. We've marked this region for |
| 477 | * our exclusive use. If the migration fails we will take the |
| 478 | * lock again and unmark it. |
| 479 | */ |
| 480 | mutex_unlock(&cma->lock); |
| 481 | |
| 482 | pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit); |
| 483 | mutex_lock(&cma_mutex); |
Minchan Kim | 23ba990 | 2021-01-21 12:09:34 -0800 | [diff] [blame^] | 484 | ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA, gfp_mask); |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 485 | mutex_unlock(&cma_mutex); |
| 486 | if (ret == 0) { |
| 487 | page = pfn_to_page(pfn); |
| 488 | break; |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 489 | } |
Joonsoo Kim | b7155e7 | 2014-08-06 16:05:30 -0700 | [diff] [blame] | 490 | |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 491 | cma_clear_bitmap(cma, pfn, count); |
Joonsoo Kim | b7155e7 | 2014-08-06 16:05:30 -0700 | [diff] [blame] | 492 | if (ret != -EBUSY) |
| 493 | break; |
| 494 | |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 495 | pr_debug("%s(): memory range at %p is busy, retrying\n", |
| 496 | __func__, pfn_to_page(pfn)); |
| 497 | /* try again with a bit different memory target */ |
| 498 | start = bitmap_no + mask + 1; |
| 499 | } |
| 500 | |
Andrew Morton | 3acaea6 | 2015-11-05 18:50:08 -0800 | [diff] [blame] | 501 | trace_cma_alloc(pfn, page, count, align); |
Stefan Strogin | 99e8ea6 | 2015-04-15 16:14:50 -0700 | [diff] [blame] | 502 | |
Andrey Konovalov | 2813b9c | 2018-12-28 00:30:57 -0800 | [diff] [blame] | 503 | /* |
| 504 | * CMA can allocate multiple page blocks, which results in different |
| 505 | * blocks being marked with different tags. Reset the tags to ignore |
| 506 | * those page blocks. |
| 507 | */ |
| 508 | if (page) { |
| 509 | for (i = 0; i < count; i++) |
| 510 | page_kasan_tag_reset(page + i); |
| 511 | } |
| 512 | |
Minchan Kim | 23ba990 | 2021-01-21 12:09:34 -0800 | [diff] [blame^] | 513 | if (ret && !(gfp_mask & __GFP_NOWARN)) { |
Pintu Agarwal | 5984af108 | 2017-11-15 17:34:26 -0800 | [diff] [blame] | 514 | pr_err("%s: alloc failed, req-size: %zu pages, ret: %d\n", |
Jaewon Kim | dbe43d4 | 2017-02-24 14:58:50 -0800 | [diff] [blame] | 515 | __func__, count, ret); |
| 516 | cma_debug_show_areas(cma); |
| 517 | } |
| 518 | |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 519 | pr_debug("%s(): returned %p\n", __func__, page); |
| 520 | return page; |
| 521 | } |
Sandeep Patil | 716306e | 2019-09-13 14:50:38 -0700 | [diff] [blame] | 522 | EXPORT_SYMBOL_GPL(cma_alloc); |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 523 | |
| 524 | /** |
| 525 | * cma_release() - release allocated pages |
| 526 | * @cma: Contiguous memory region for which the allocation is performed. |
| 527 | * @pages: Allocated pages. |
| 528 | * @count: Number of allocated pages. |
| 529 | * |
Ryohei Suzuki | 929f92f | 2019-07-16 16:26:00 -0700 | [diff] [blame] | 530 | * This function releases memory allocated by cma_alloc(). |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 531 | * It returns false when provided pages do not belong to contiguous area and |
| 532 | * true otherwise. |
| 533 | */ |
Sasha Levin | ac17382 | 2015-04-14 15:47:04 -0700 | [diff] [blame] | 534 | bool cma_release(struct cma *cma, const struct page *pages, unsigned int count) |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 535 | { |
| 536 | unsigned long pfn; |
| 537 | |
| 538 | if (!cma || !pages) |
| 539 | return false; |
| 540 | |
| 541 | pr_debug("%s(page %p)\n", __func__, (void *)pages); |
| 542 | |
| 543 | pfn = page_to_pfn(pages); |
| 544 | |
| 545 | if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count) |
| 546 | return false; |
| 547 | |
| 548 | VM_BUG_ON(pfn + count > cma->base_pfn + cma->count); |
| 549 | |
| 550 | free_contig_range(pfn, count); |
| 551 | cma_clear_bitmap(cma, pfn, count); |
Stefan Strogin | 99e8ea6 | 2015-04-15 16:14:50 -0700 | [diff] [blame] | 552 | trace_cma_release(pfn, pages, count); |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 553 | |
| 554 | return true; |
| 555 | } |
Sandeep Patil | 716306e | 2019-09-13 14:50:38 -0700 | [diff] [blame] | 556 | EXPORT_SYMBOL_GPL(cma_release); |
Laura Abbott | e4231bc | 2017-04-18 11:27:04 -0700 | [diff] [blame] | 557 | |
| 558 | int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data) |
| 559 | { |
| 560 | int i; |
| 561 | |
| 562 | for (i = 0; i < cma_area_count; i++) { |
| 563 | int ret = it(&cma_areas[i], data); |
| 564 | |
| 565 | if (ret) |
| 566 | return ret; |
| 567 | } |
| 568 | |
| 569 | return 0; |
| 570 | } |
Sandeep Patil | 716306e | 2019-09-13 14:50:38 -0700 | [diff] [blame] | 571 | EXPORT_SYMBOL_GPL(cma_for_each_area); |