Greg Kroah-Hartman | 989d42e | 2017-11-07 17:30:07 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0+ |
Marek Szyprowski | c64be2b | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 2 | /* |
| 3 | * Contiguous Memory Allocator for DMA mapping framework |
| 4 | * Copyright (c) 2010-2011 by Samsung Electronics. |
| 5 | * Written by: |
| 6 | * Marek Szyprowski <m.szyprowski@samsung.com> |
| 7 | * Michal Nazarewicz <mina86@mina86.com> |
Christoph Hellwig | 0b1abd1 | 2020-09-11 10:56:52 +0200 | [diff] [blame] | 8 | * |
| 9 | * Contiguous Memory Allocator |
| 10 | * |
| 11 | * The Contiguous Memory Allocator (CMA) makes it possible to |
| 12 | * allocate big contiguous chunks of memory after the system has |
| 13 | * booted. |
| 14 | * |
| 15 | * Why is it needed? |
| 16 | * |
| 17 | * Various devices on embedded systems have no scatter-getter and/or |
| 18 | * IO map support and require contiguous blocks of memory to |
| 19 | * operate. They include devices such as cameras, hardware video |
| 20 | * coders, etc. |
| 21 | * |
| 22 | * Such devices often require big memory buffers (a full HD frame |
tangjianqiang | 819b70a | 2020-11-24 18:40:19 +0800 | [diff] [blame] | 23 | * is, for instance, more than 2 mega pixels large, i.e. more than 6 |
Christoph Hellwig | 0b1abd1 | 2020-09-11 10:56:52 +0200 | [diff] [blame] | 24 | * MB of memory), which makes mechanisms such as kmalloc() or |
| 25 | * alloc_page() ineffective. |
| 26 | * |
| 27 | * At the same time, a solution where a big memory region is |
| 28 | * reserved for a device is suboptimal since often more memory is |
| 29 | * reserved then strictly required and, moreover, the memory is |
| 30 | * inaccessible to page system even if device drivers don't use it. |
| 31 | * |
| 32 | * CMA tries to solve this issue by operating on memory regions |
| 33 | * where only movable pages can be allocated from. This way, kernel |
| 34 | * can use the memory for pagecache and when device driver requests |
| 35 | * it, allocated pages can be migrated. |
Marek Szyprowski | c64be2b | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 36 | */ |
| 37 | |
| 38 | #define pr_fmt(fmt) "cma: " fmt |
| 39 | |
| 40 | #ifdef CONFIG_CMA_DEBUG |
| 41 | #ifndef DEBUG |
| 42 | # define DEBUG |
| 43 | #endif |
| 44 | #endif |
| 45 | |
| 46 | #include <asm/page.h> |
Marek Szyprowski | c64be2b | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 47 | |
| 48 | #include <linux/memblock.h> |
| 49 | #include <linux/err.h> |
Laurent Pinchart | 446c82f | 2012-10-18 09:29:44 +0200 | [diff] [blame] | 50 | #include <linux/sizes.h> |
Christoph Hellwig | 0b1abd1 | 2020-09-11 10:56:52 +0200 | [diff] [blame] | 51 | #include <linux/dma-map-ops.h> |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 52 | #include <linux/cma.h> |
Marek Szyprowski | c64be2b | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 53 | |
| 54 | #ifdef CONFIG_CMA_SIZE_MBYTES |
| 55 | #define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES |
| 56 | #else |
| 57 | #define CMA_SIZE_MBYTES 0 |
| 58 | #endif |
| 59 | |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 60 | struct cma *dma_contiguous_default_area; |
| 61 | |
Marek Szyprowski | c64be2b | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 62 | /* |
| 63 | * Default global CMA area size can be defined in kernel's .config. |
Michael Opdenacker | 7367880 | 2013-09-18 06:04:48 +0200 | [diff] [blame] | 64 | * This is useful mainly for distro maintainers to create a kernel |
Marek Szyprowski | c64be2b | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 65 | * that works correctly for most supported systems. |
| 66 | * The size can be set in bytes or as a percentage of the total memory |
| 67 | * in the system. |
| 68 | * |
| 69 | * Users, who want to set the size of global CMA area for their system |
| 70 | * should use cma= kernel parameter. |
| 71 | */ |
Shyam Saini | ca66536 | 2019-10-20 10:33:22 +0530 | [diff] [blame] | 72 | static const phys_addr_t size_bytes __initconst = |
| 73 | (phys_addr_t)CMA_SIZE_MBYTES * SZ_1M; |
| 74 | static phys_addr_t size_cmdline __initdata = -1; |
| 75 | static phys_addr_t base_cmdline __initdata; |
| 76 | static phys_addr_t limit_cmdline __initdata; |
Marek Szyprowski | c64be2b | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 77 | |
| 78 | static int __init early_cma(char *p) |
| 79 | { |
He Zhe | a3ceed8 | 2018-09-17 11:24:20 +0800 | [diff] [blame] | 80 | if (!p) { |
| 81 | pr_err("Config string not provided\n"); |
| 82 | return -EINVAL; |
| 83 | } |
| 84 | |
Marek Szyprowski | c64be2b | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 85 | size_cmdline = memparse(p, &p); |
Akinobu Mita | 5ea3b1b | 2014-06-04 16:06:54 -0700 | [diff] [blame] | 86 | if (*p != '@') |
| 87 | return 0; |
| 88 | base_cmdline = memparse(p + 1, &p); |
| 89 | if (*p != '-') { |
| 90 | limit_cmdline = base_cmdline + size_cmdline; |
| 91 | return 0; |
| 92 | } |
| 93 | limit_cmdline = memparse(p + 1, &p); |
| 94 | |
Marek Szyprowski | c64be2b | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 95 | return 0; |
| 96 | } |
| 97 | early_param("cma", early_cma); |
| 98 | |
Barry Song | b7176c2 | 2020-08-24 11:03:07 +1200 | [diff] [blame] | 99 | #ifdef CONFIG_DMA_PERNUMA_CMA |
| 100 | |
| 101 | static struct cma *dma_contiguous_pernuma_area[MAX_NUMNODES]; |
| 102 | static phys_addr_t pernuma_size_bytes __initdata; |
| 103 | |
| 104 | static int __init early_cma_pernuma(char *p) |
| 105 | { |
| 106 | pernuma_size_bytes = memparse(p, &p); |
| 107 | return 0; |
| 108 | } |
| 109 | early_param("cma_pernuma", early_cma_pernuma); |
| 110 | #endif |
| 111 | |
Marek Szyprowski | c64be2b | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 112 | #ifdef CONFIG_CMA_SIZE_PERCENTAGE |
| 113 | |
Vitaly Andrianov | 4009793 | 2012-12-05 09:29:25 -0500 | [diff] [blame] | 114 | static phys_addr_t __init __maybe_unused cma_early_percent_memory(void) |
Marek Szyprowski | c64be2b | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 115 | { |
Mike Rapoport | e9aa36c | 2020-10-13 16:57:22 -0700 | [diff] [blame] | 116 | unsigned long total_pages = PHYS_PFN(memblock_phys_mem_size()); |
Marek Szyprowski | c64be2b | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 117 | |
| 118 | return (total_pages * CONFIG_CMA_SIZE_PERCENTAGE / 100) << PAGE_SHIFT; |
| 119 | } |
| 120 | |
| 121 | #else |
| 122 | |
Vitaly Andrianov | 4009793 | 2012-12-05 09:29:25 -0500 | [diff] [blame] | 123 | static inline __maybe_unused phys_addr_t cma_early_percent_memory(void) |
Marek Szyprowski | c64be2b | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 124 | { |
| 125 | return 0; |
| 126 | } |
| 127 | |
| 128 | #endif |
| 129 | |
Barry Song | b7176c2 | 2020-08-24 11:03:07 +1200 | [diff] [blame] | 130 | #ifdef CONFIG_DMA_PERNUMA_CMA |
| 131 | void __init dma_pernuma_cma_reserve(void) |
| 132 | { |
| 133 | int nid; |
| 134 | |
| 135 | if (!pernuma_size_bytes) |
| 136 | return; |
| 137 | |
| 138 | for_each_online_node(nid) { |
| 139 | int ret; |
Barry Song | 2281f79 | 2020-08-24 11:03:09 +1200 | [diff] [blame] | 140 | char name[CMA_MAX_NAME]; |
Barry Song | b7176c2 | 2020-08-24 11:03:07 +1200 | [diff] [blame] | 141 | struct cma **cma = &dma_contiguous_pernuma_area[nid]; |
| 142 | |
| 143 | snprintf(name, sizeof(name), "pernuma%d", nid); |
| 144 | ret = cma_declare_contiguous_nid(0, pernuma_size_bytes, 0, 0, |
| 145 | 0, false, name, cma, nid); |
| 146 | if (ret) { |
| 147 | pr_warn("%s: reservation failed: err %d, node %d", __func__, |
| 148 | ret, nid); |
| 149 | continue; |
| 150 | } |
| 151 | |
| 152 | pr_debug("%s: reserved %llu MiB on node %d\n", __func__, |
| 153 | (unsigned long long)pernuma_size_bytes / SZ_1M, nid); |
| 154 | } |
| 155 | } |
| 156 | #endif |
| 157 | |
Marek Szyprowski | c64be2b | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 158 | /** |
Marek Szyprowski | a254738 | 2013-07-29 14:31:45 +0200 | [diff] [blame] | 159 | * dma_contiguous_reserve() - reserve area(s) for contiguous memory handling |
Marek Szyprowski | c64be2b | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 160 | * @limit: End address of the reserved memory (optional, 0 for any). |
| 161 | * |
| 162 | * This function reserves memory from early allocator. It should be |
| 163 | * called by arch specific code once the early allocator (memblock or bootmem) |
| 164 | * has been activated and all other subsystems have already allocated/reserved |
| 165 | * memory. |
| 166 | */ |
| 167 | void __init dma_contiguous_reserve(phys_addr_t limit) |
| 168 | { |
Vitaly Andrianov | 4009793 | 2012-12-05 09:29:25 -0500 | [diff] [blame] | 169 | phys_addr_t selected_size = 0; |
Akinobu Mita | 5ea3b1b | 2014-06-04 16:06:54 -0700 | [diff] [blame] | 170 | phys_addr_t selected_base = 0; |
| 171 | phys_addr_t selected_limit = limit; |
| 172 | bool fixed = false; |
Marek Szyprowski | c64be2b | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 173 | |
| 174 | pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit); |
| 175 | |
| 176 | if (size_cmdline != -1) { |
| 177 | selected_size = size_cmdline; |
Akinobu Mita | 5ea3b1b | 2014-06-04 16:06:54 -0700 | [diff] [blame] | 178 | selected_base = base_cmdline; |
| 179 | selected_limit = min_not_zero(limit_cmdline, limit); |
| 180 | if (base_cmdline + size_cmdline == limit_cmdline) |
| 181 | fixed = true; |
Marek Szyprowski | c64be2b | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 182 | } else { |
| 183 | #ifdef CONFIG_CMA_SIZE_SEL_MBYTES |
| 184 | selected_size = size_bytes; |
| 185 | #elif defined(CONFIG_CMA_SIZE_SEL_PERCENTAGE) |
| 186 | selected_size = cma_early_percent_memory(); |
| 187 | #elif defined(CONFIG_CMA_SIZE_SEL_MIN) |
| 188 | selected_size = min(size_bytes, cma_early_percent_memory()); |
| 189 | #elif defined(CONFIG_CMA_SIZE_SEL_MAX) |
| 190 | selected_size = max(size_bytes, cma_early_percent_memory()); |
| 191 | #endif |
| 192 | } |
| 193 | |
Marek Szyprowski | a254738 | 2013-07-29 14:31:45 +0200 | [diff] [blame] | 194 | if (selected_size && !dma_contiguous_default_area) { |
Marek Szyprowski | c64be2b | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 195 | pr_debug("%s: reserving %ld MiB for global area\n", __func__, |
Vitaly Andrianov | 4009793 | 2012-12-05 09:29:25 -0500 | [diff] [blame] | 196 | (unsigned long)selected_size / SZ_1M); |
Marek Szyprowski | c64be2b | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 197 | |
Akinobu Mita | 5ea3b1b | 2014-06-04 16:06:54 -0700 | [diff] [blame] | 198 | dma_contiguous_reserve_area(selected_size, selected_base, |
| 199 | selected_limit, |
| 200 | &dma_contiguous_default_area, |
| 201 | fixed); |
Marek Szyprowski | c64be2b | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 202 | } |
Akinobu Mita | 5ea3b1b | 2014-06-04 16:06:54 -0700 | [diff] [blame] | 203 | } |
Marek Szyprowski | c64be2b | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 204 | |
Christoph Hellwig | 5db5d93 | 2020-09-11 11:04:43 +0200 | [diff] [blame] | 205 | void __weak |
| 206 | dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) |
| 207 | { |
| 208 | } |
| 209 | |
Joonsoo Kim | 3162bbd | 2014-08-06 16:05:19 -0700 | [diff] [blame] | 210 | /** |
| 211 | * dma_contiguous_reserve_area() - reserve custom contiguous area |
| 212 | * @size: Size of the reserved area (in bytes), |
| 213 | * @base: Base address of the reserved area optional, use 0 for any |
| 214 | * @limit: End address of the reserved memory (optional, 0 for any). |
| 215 | * @res_cma: Pointer to store the created cma region. |
| 216 | * @fixed: hint about where to place the reserved area |
| 217 | * |
| 218 | * This function reserves memory from early allocator. It should be |
| 219 | * called by arch specific code once the early allocator (memblock or bootmem) |
| 220 | * has been activated and all other subsystems have already allocated/reserved |
| 221 | * memory. This function allows to create custom reserved areas for specific |
| 222 | * devices. |
| 223 | * |
| 224 | * If @fixed is true, reserve contiguous area at exactly @base. If false, |
| 225 | * reserve in range from @base to @limit. |
| 226 | */ |
| 227 | int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, |
| 228 | phys_addr_t limit, struct cma **res_cma, |
| 229 | bool fixed) |
| 230 | { |
| 231 | int ret; |
| 232 | |
Laura Abbott | f318dd0 | 2017-04-18 11:27:03 -0700 | [diff] [blame] | 233 | ret = cma_declare_contiguous(base, size, limit, 0, 0, fixed, |
| 234 | "reserved", res_cma); |
Joonsoo Kim | 3162bbd | 2014-08-06 16:05:19 -0700 | [diff] [blame] | 235 | if (ret) |
| 236 | return ret; |
| 237 | |
| 238 | /* Architecture specific contiguous memory fixup. */ |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 239 | dma_contiguous_early_fixup(cma_get_base(*res_cma), |
| 240 | cma_get_size(*res_cma)); |
Joonsoo Kim | 3162bbd | 2014-08-06 16:05:19 -0700 | [diff] [blame] | 241 | |
| 242 | return 0; |
| 243 | } |
| 244 | |
Marek Szyprowski | c64be2b | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 245 | /** |
Joonsoo Kim | 3162bbd | 2014-08-06 16:05:19 -0700 | [diff] [blame] | 246 | * dma_alloc_from_contiguous() - allocate pages from contiguous area |
| 247 | * @dev: Pointer to device for which the allocation is performed. |
| 248 | * @count: Requested number of pages. |
| 249 | * @align: Requested alignment of pages (in PAGE_SIZE order). |
Marek Szyprowski | d834c5a | 2018-08-17 15:49:00 -0700 | [diff] [blame] | 250 | * @no_warn: Avoid printing message about failed allocation. |
Marek Szyprowski | c64be2b | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 251 | * |
Joonsoo Kim | 3162bbd | 2014-08-06 16:05:19 -0700 | [diff] [blame] | 252 | * This function allocates memory buffer for specified device. It uses |
| 253 | * device specific contiguous memory area if available or the default |
| 254 | * global one. Requires architecture specific dev_get_cma_area() helper |
| 255 | * function. |
Marek Szyprowski | c64be2b | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 256 | */ |
Rohit Vaswani | 67a2e213 | 2015-10-22 13:32:11 -0700 | [diff] [blame] | 257 | struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, |
Marek Szyprowski | d834c5a | 2018-08-17 15:49:00 -0700 | [diff] [blame] | 258 | unsigned int align, bool no_warn) |
Marek Szyprowski | c64be2b | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 259 | { |
Joonsoo Kim | 3162bbd | 2014-08-06 16:05:19 -0700 | [diff] [blame] | 260 | if (align > CONFIG_CMA_ALIGNMENT) |
| 261 | align = CONFIG_CMA_ALIGNMENT; |
| 262 | |
Marek Szyprowski | d834c5a | 2018-08-17 15:49:00 -0700 | [diff] [blame] | 263 | return cma_alloc(dev_get_cma_area(dev), count, align, no_warn); |
Marek Szyprowski | c64be2b | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 264 | } |
Joonsoo Kim | 3162bbd | 2014-08-06 16:05:19 -0700 | [diff] [blame] | 265 | |
| 266 | /** |
| 267 | * dma_release_from_contiguous() - release allocated pages |
| 268 | * @dev: Pointer to device for which the pages were allocated. |
| 269 | * @pages: Allocated pages. |
| 270 | * @count: Number of allocated pages. |
| 271 | * |
| 272 | * This function releases memory allocated by dma_alloc_from_contiguous(). |
| 273 | * It returns false when provided pages do not belong to contiguous area and |
| 274 | * true otherwise. |
| 275 | */ |
| 276 | bool dma_release_from_contiguous(struct device *dev, struct page *pages, |
| 277 | int count) |
| 278 | { |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 279 | return cma_release(dev_get_cma_area(dev), pages, count); |
Joonsoo Kim | 3162bbd | 2014-08-06 16:05:19 -0700 | [diff] [blame] | 280 | } |
Marek Szyprowski | de9e14e | 2014-10-13 15:51:09 -0700 | [diff] [blame] | 281 | |
Christoph Hellwig | 274b3f7 | 2020-07-22 16:33:43 +0200 | [diff] [blame] | 282 | static struct page *cma_alloc_aligned(struct cma *cma, size_t size, gfp_t gfp) |
| 283 | { |
| 284 | unsigned int align = min(get_order(size), CONFIG_CMA_ALIGNMENT); |
| 285 | |
| 286 | return cma_alloc(cma, size >> PAGE_SHIFT, align, gfp & __GFP_NOWARN); |
| 287 | } |
| 288 | |
Nicolin Chen | b1d2dc0 | 2019-05-23 21:06:32 -0700 | [diff] [blame] | 289 | /** |
| 290 | * dma_alloc_contiguous() - allocate contiguous pages |
| 291 | * @dev: Pointer to device for which the allocation is performed. |
| 292 | * @size: Requested allocation size. |
| 293 | * @gfp: Allocation flags. |
| 294 | * |
Barry Song | b7176c2 | 2020-08-24 11:03:07 +1200 | [diff] [blame] | 295 | * tries to use device specific contiguous memory area if available, or it |
| 296 | * tries to use per-numa cma, if the allocation fails, it will fallback to |
| 297 | * try default global one. |
Nicolin Chen | bd2e756 | 2019-05-23 21:06:33 -0700 | [diff] [blame] | 298 | * |
Barry Song | b7176c2 | 2020-08-24 11:03:07 +1200 | [diff] [blame] | 299 | * Note that it bypass one-page size of allocations from the per-numa and |
| 300 | * global area as the addresses within one page are always contiguous, so |
| 301 | * there is no need to waste CMA pages for that kind; it also helps reduce |
| 302 | * fragmentations. |
Nicolin Chen | b1d2dc0 | 2019-05-23 21:06:32 -0700 | [diff] [blame] | 303 | */ |
| 304 | struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp) |
| 305 | { |
Barry Song | b7176c2 | 2020-08-24 11:03:07 +1200 | [diff] [blame] | 306 | #ifdef CONFIG_DMA_PERNUMA_CMA |
| 307 | int nid = dev_to_node(dev); |
| 308 | #endif |
| 309 | |
Nicolin Chen | b1d2dc0 | 2019-05-23 21:06:32 -0700 | [diff] [blame] | 310 | /* CMA can be used only in the context which permits sleeping */ |
Christoph Hellwig | 274b3f7 | 2020-07-22 16:33:43 +0200 | [diff] [blame] | 311 | if (!gfpflags_allow_blocking(gfp)) |
| 312 | return NULL; |
| 313 | if (dev->cma_area) |
| 314 | return cma_alloc_aligned(dev->cma_area, size, gfp); |
Barry Song | b7176c2 | 2020-08-24 11:03:07 +1200 | [diff] [blame] | 315 | if (size <= PAGE_SIZE) |
Christoph Hellwig | 274b3f7 | 2020-07-22 16:33:43 +0200 | [diff] [blame] | 316 | return NULL; |
Barry Song | b7176c2 | 2020-08-24 11:03:07 +1200 | [diff] [blame] | 317 | |
| 318 | #ifdef CONFIG_DMA_PERNUMA_CMA |
| 319 | if (nid != NUMA_NO_NODE && !(gfp & (GFP_DMA | GFP_DMA32))) { |
| 320 | struct cma *cma = dma_contiguous_pernuma_area[nid]; |
| 321 | struct page *page; |
| 322 | |
| 323 | if (cma) { |
| 324 | page = cma_alloc_aligned(cma, size, gfp); |
| 325 | if (page) |
| 326 | return page; |
| 327 | } |
| 328 | } |
| 329 | #endif |
| 330 | if (!dma_contiguous_default_area) |
| 331 | return NULL; |
| 332 | |
Christoph Hellwig | 274b3f7 | 2020-07-22 16:33:43 +0200 | [diff] [blame] | 333 | return cma_alloc_aligned(dma_contiguous_default_area, size, gfp); |
Nicolin Chen | b1d2dc0 | 2019-05-23 21:06:32 -0700 | [diff] [blame] | 334 | } |
| 335 | |
| 336 | /** |
| 337 | * dma_free_contiguous() - release allocated pages |
| 338 | * @dev: Pointer to device for which the pages were allocated. |
| 339 | * @page: Pointer to the allocated pages. |
| 340 | * @size: Size of allocated pages. |
| 341 | * |
| 342 | * This function releases memory allocated by dma_alloc_contiguous(). As the |
| 343 | * cma_release returns false when provided pages do not belong to contiguous |
| 344 | * area and true otherwise, this function then does a fallback __free_pages() |
| 345 | * upon a false-return. |
| 346 | */ |
| 347 | void dma_free_contiguous(struct device *dev, struct page *page, size_t size) |
| 348 | { |
Barry Song | b7176c2 | 2020-08-24 11:03:07 +1200 | [diff] [blame] | 349 | unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; |
| 350 | |
| 351 | /* if dev has its own cma, free page from there */ |
| 352 | if (dev->cma_area) { |
| 353 | if (cma_release(dev->cma_area, page, count)) |
| 354 | return; |
| 355 | } else { |
| 356 | /* |
| 357 | * otherwise, page is from either per-numa cma or default cma |
| 358 | */ |
| 359 | #ifdef CONFIG_DMA_PERNUMA_CMA |
| 360 | if (cma_release(dma_contiguous_pernuma_area[page_to_nid(page)], |
| 361 | page, count)) |
| 362 | return; |
| 363 | #endif |
| 364 | if (cma_release(dma_contiguous_default_area, page, count)) |
| 365 | return; |
| 366 | } |
| 367 | |
| 368 | /* not in any cma, free from buddy */ |
| 369 | __free_pages(page, get_order(size)); |
Nicolin Chen | b1d2dc0 | 2019-05-23 21:06:32 -0700 | [diff] [blame] | 370 | } |
| 371 | |
Marek Szyprowski | de9e14e | 2014-10-13 15:51:09 -0700 | [diff] [blame] | 372 | /* |
| 373 | * Support for reserved memory regions defined in device tree |
| 374 | */ |
| 375 | #ifdef CONFIG_OF_RESERVED_MEM |
| 376 | #include <linux/of.h> |
| 377 | #include <linux/of_fdt.h> |
| 378 | #include <linux/of_reserved_mem.h> |
| 379 | |
| 380 | #undef pr_fmt |
| 381 | #define pr_fmt(fmt) fmt |
| 382 | |
Marek Szyprowski | 47f29df | 2014-10-29 14:50:29 -0700 | [diff] [blame] | 383 | static int rmem_cma_device_init(struct reserved_mem *rmem, struct device *dev) |
Marek Szyprowski | de9e14e | 2014-10-13 15:51:09 -0700 | [diff] [blame] | 384 | { |
Christoph Hellwig | 5af6389 | 2020-09-11 10:56:03 +0200 | [diff] [blame] | 385 | dev->cma_area = rmem->priv; |
Marek Szyprowski | 47f29df | 2014-10-29 14:50:29 -0700 | [diff] [blame] | 386 | return 0; |
Marek Szyprowski | de9e14e | 2014-10-13 15:51:09 -0700 | [diff] [blame] | 387 | } |
| 388 | |
| 389 | static void rmem_cma_device_release(struct reserved_mem *rmem, |
| 390 | struct device *dev) |
| 391 | { |
Christoph Hellwig | 5af6389 | 2020-09-11 10:56:03 +0200 | [diff] [blame] | 392 | dev->cma_area = NULL; |
Marek Szyprowski | de9e14e | 2014-10-13 15:51:09 -0700 | [diff] [blame] | 393 | } |
| 394 | |
| 395 | static const struct reserved_mem_ops rmem_cma_ops = { |
| 396 | .device_init = rmem_cma_device_init, |
| 397 | .device_release = rmem_cma_device_release, |
| 398 | }; |
| 399 | |
| 400 | static int __init rmem_cma_setup(struct reserved_mem *rmem) |
| 401 | { |
| 402 | phys_addr_t align = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order); |
| 403 | phys_addr_t mask = align - 1; |
| 404 | unsigned long node = rmem->fdt_node; |
Nicolas Saenz Julienne | 8c8c5a499 | 2020-01-10 18:19:33 +0100 | [diff] [blame] | 405 | bool default_cma = of_get_flat_dt_prop(node, "linux,cma-default", NULL); |
Marek Szyprowski | de9e14e | 2014-10-13 15:51:09 -0700 | [diff] [blame] | 406 | struct cma *cma; |
| 407 | int err; |
| 408 | |
Nicolas Saenz Julienne | 8c8c5a499 | 2020-01-10 18:19:33 +0100 | [diff] [blame] | 409 | if (size_cmdline != -1 && default_cma) { |
| 410 | pr_info("Reserved memory: bypass %s node, using cmdline CMA params instead\n", |
| 411 | rmem->name); |
| 412 | return -EBUSY; |
| 413 | } |
| 414 | |
Marek Szyprowski | de9e14e | 2014-10-13 15:51:09 -0700 | [diff] [blame] | 415 | if (!of_get_flat_dt_prop(node, "reusable", NULL) || |
| 416 | of_get_flat_dt_prop(node, "no-map", NULL)) |
| 417 | return -EINVAL; |
| 418 | |
| 419 | if ((rmem->base & mask) || (rmem->size & mask)) { |
| 420 | pr_err("Reserved memory: incorrect alignment of CMA region\n"); |
| 421 | return -EINVAL; |
| 422 | } |
| 423 | |
Laura Abbott | f318dd0 | 2017-04-18 11:27:03 -0700 | [diff] [blame] | 424 | err = cma_init_reserved_mem(rmem->base, rmem->size, 0, rmem->name, &cma); |
Marek Szyprowski | de9e14e | 2014-10-13 15:51:09 -0700 | [diff] [blame] | 425 | if (err) { |
| 426 | pr_err("Reserved memory: unable to setup CMA region\n"); |
| 427 | return err; |
| 428 | } |
| 429 | /* Architecture specific contiguous memory fixup. */ |
| 430 | dma_contiguous_early_fixup(rmem->base, rmem->size); |
| 431 | |
Nicolas Saenz Julienne | 8c8c5a499 | 2020-01-10 18:19:33 +0100 | [diff] [blame] | 432 | if (default_cma) |
Christoph Hellwig | 580a0cc | 2020-09-11 10:56:40 +0200 | [diff] [blame] | 433 | dma_contiguous_default_area = cma; |
Marek Szyprowski | de9e14e | 2014-10-13 15:51:09 -0700 | [diff] [blame] | 434 | |
| 435 | rmem->ops = &rmem_cma_ops; |
| 436 | rmem->priv = cma; |
| 437 | |
| 438 | pr_info("Reserved memory: created CMA memory pool at %pa, size %ld MiB\n", |
| 439 | &rmem->base, (unsigned long)rmem->size / SZ_1M); |
| 440 | |
| 441 | return 0; |
| 442 | } |
| 443 | RESERVEDMEM_OF_DECLARE(cma, "shared-dma-pool", rmem_cma_setup); |
| 444 | #endif |