blob: 25fc85a7aebefa627a20ca266da5d6271fd5c67e [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +04002/*
3 * Coherent per-device memory handling.
4 * Borrowed from i386
5 */
Brian Starkey6b03ae02016-03-22 14:28:03 -07006#include <linux/io.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09007#include <linux/slab.h>
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +04008#include <linux/kernel.h>
Paul Gortmaker08a999c2011-07-01 16:07:32 -04009#include <linux/module.h>
Jim Quinlane0d07272020-09-17 18:43:40 +020010#include <linux/dma-direct.h>
Christoph Hellwig0a0f0d82020-09-22 15:31:03 +020011#include <linux/dma-map-ops.h>
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040012
13struct dma_coherent_mem {
14 void *virt_base;
Marin Mitoved1d2182010-05-31 13:03:04 +030015 dma_addr_t device_base;
Bjorn Helgaas88a984b2014-05-20 16:54:22 -060016 unsigned long pfn_base;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040017 int size;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040018 unsigned long *bitmap;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070019 spinlock_t spinlock;
Vladimir Murzinc41f9ea2017-06-26 10:18:57 +010020 bool use_dev_dma_pfn_offset;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040021};
22
Vladimir Murzin93228b42017-06-26 10:18:58 +010023static inline struct dma_coherent_mem *dev_get_coherent_memory(struct device *dev)
24{
25 if (dev && dev->dma_mem)
26 return dev->dma_mem;
Vladimir Murzin43fc5092017-07-20 11:19:58 +010027 return NULL;
Vladimir Murzin93228b42017-06-26 10:18:58 +010028}
29
Vladimir Murzinc41f9ea2017-06-26 10:18:57 +010030static inline dma_addr_t dma_get_device_base(struct device *dev,
31 struct dma_coherent_mem * mem)
32{
33 if (mem->use_dev_dma_pfn_offset)
Jim Quinlane0d07272020-09-17 18:43:40 +020034 return phys_to_dma(dev, PFN_PHYS(mem->pfn_base));
35 return mem->device_base;
Vladimir Murzinc41f9ea2017-06-26 10:18:57 +010036}
37
Christoph Hellwiga6933572021-06-23 16:00:04 +020038static struct dma_coherent_mem *dma_init_coherent_memory(phys_addr_t phys_addr,
39 dma_addr_t device_addr, size_t size, bool use_dma_pfn_offset)
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040040{
Christoph Hellwiga6933572021-06-23 16:00:04 +020041 struct dma_coherent_mem *dma_mem;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040042 int pages = size >> PAGE_SHIFT;
43 int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
Christoph Hellwiga6933572021-06-23 16:00:04 +020044 void *mem_base;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040045
Christoph Hellwiga6933572021-06-23 16:00:04 +020046 if (!size)
47 return ERR_PTR(-EINVAL);
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040048
Christoph Hellwig2436bdc2017-08-25 17:13:09 +020049 mem_base = memremap(phys_addr, size, MEMREMAP_WC);
Christoph Hellwiga6933572021-06-23 16:00:04 +020050 if (!mem_base)
51 return ERR_PTR(-EINVAL);
52
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070053 dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
Christoph Hellwiga6933572021-06-23 16:00:04 +020054 if (!dma_mem)
55 goto out_unmap_membase;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070056 dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
Christoph Hellwiga6933572021-06-23 16:00:04 +020057 if (!dma_mem->bitmap)
58 goto out_free_dma_mem;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040059
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070060 dma_mem->virt_base = mem_base;
61 dma_mem->device_base = device_addr;
62 dma_mem->pfn_base = PFN_DOWN(phys_addr);
63 dma_mem->size = pages;
Christoph Hellwiga6933572021-06-23 16:00:04 +020064 dma_mem->use_dev_dma_pfn_offset = use_dma_pfn_offset;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070065 spin_lock_init(&dma_mem->spinlock);
66
Christoph Hellwiga6933572021-06-23 16:00:04 +020067 return dma_mem;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040068
Christoph Hellwiga6933572021-06-23 16:00:04 +020069out_free_dma_mem:
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070070 kfree(dma_mem);
Christoph Hellwiga6933572021-06-23 16:00:04 +020071out_unmap_membase:
72 memunmap(mem_base);
73 pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %zd MiB\n",
74 &phys_addr, size / SZ_1M);
75 return ERR_PTR(-ENOMEM);
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040076}
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070077
78static void dma_release_coherent_memory(struct dma_coherent_mem *mem)
79{
80 if (!mem)
81 return;
Brian Starkey6b03ae02016-03-22 14:28:03 -070082
Christoph Hellwig2436bdc2017-08-25 17:13:09 +020083 memunmap(mem->virt_base);
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070084 kfree(mem->bitmap);
85 kfree(mem);
86}
87
88static int dma_assign_coherent_memory(struct device *dev,
89 struct dma_coherent_mem *mem)
90{
Vladimir Murzin93228b42017-06-26 10:18:58 +010091 if (!dev)
92 return -ENODEV;
93
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070094 if (dev->dma_mem)
95 return -EBUSY;
96
97 dev->dma_mem = mem;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070098 return 0;
99}
100
Christoph Hellwiga92df4f2020-08-18 19:32:42 +0200101/*
102 * Declare a region of memory to be handed out by dma_alloc_coherent() when it
103 * is asked for coherent memory for this device. This shall only be used
104 * from platform code, usually based on the device tree description.
Zhen Leibab16222021-06-08 15:52:19 +0800105 *
Christoph Hellwiga92df4f2020-08-18 19:32:42 +0200106 * phys_addr is the CPU physical address to which the memory is currently
107 * assigned (this will be ioremapped so the CPU can access the region).
108 *
109 * device_addr is the DMA address the device needs to be programmed with to
110 * actually address this memory (this will be handed out as the dma_addr_t in
111 * dma_alloc_coherent()).
112 *
113 * size is the size of the area (must be a multiple of PAGE_SIZE).
114 *
115 * As a simplification for the platforms, only *one* such region of memory may
116 * be declared per device.
117 */
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700118int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
Christoph Hellwig82c5de02018-12-25 13:29:54 +0100119 dma_addr_t device_addr, size_t size)
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700120{
121 struct dma_coherent_mem *mem;
Christoph Hellwig2436bdc2017-08-25 17:13:09 +0200122 int ret;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700123
Christoph Hellwiga6933572021-06-23 16:00:04 +0200124 mem = dma_init_coherent_memory(phys_addr, device_addr, size, false);
125 if (IS_ERR(mem))
126 return PTR_ERR(mem);
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700127
Christoph Hellwig2436bdc2017-08-25 17:13:09 +0200128 ret = dma_assign_coherent_memory(dev, mem);
129 if (ret)
130 dma_release_coherent_memory(mem);
131 return ret;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700132}
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400133
Vladimir Murzina445e9402019-10-30 10:13:13 +0000134static void *__dma_alloc_from_coherent(struct device *dev,
135 struct dma_coherent_mem *mem,
136 ssize_t size, dma_addr_t *dma_handle)
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100137{
138 int order = get_order(size);
139 unsigned long flags;
140 int pageno;
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100141 void *ret;
142
143 spin_lock_irqsave(&mem->spinlock, flags);
144
Kevin Grandemange286c21d2020-03-12 15:41:45 +0000145 if (unlikely(size > ((dma_addr_t)mem->size << PAGE_SHIFT)))
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100146 goto err;
147
148 pageno = bitmap_find_free_region(mem->bitmap, mem->size, order);
149 if (unlikely(pageno < 0))
150 goto err;
151
152 /*
153 * Memory was found in the coherent area.
154 */
Kevin Grandemange286c21d2020-03-12 15:41:45 +0000155 *dma_handle = dma_get_device_base(dev, mem) +
156 ((dma_addr_t)pageno << PAGE_SHIFT);
157 ret = mem->virt_base + ((dma_addr_t)pageno << PAGE_SHIFT);
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100158 spin_unlock_irqrestore(&mem->spinlock, flags);
Christoph Hellwig2436bdc2017-08-25 17:13:09 +0200159 memset(ret, 0, size);
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100160 return ret;
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100161err:
162 spin_unlock_irqrestore(&mem->spinlock, flags);
163 return NULL;
164}
165
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400166/**
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100167 * dma_alloc_from_dev_coherent() - allocate memory from device coherent pool
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400168 * @dev: device from which we allocate memory
169 * @size: size of requested memory area
170 * @dma_handle: This will be filled with the correct dma handle
171 * @ret: This pointer will be filled with the virtual address
Paul Mundt06096972009-01-21 18:51:53 +0900172 * to allocated area.
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400173 *
Dmitry Baryshkovcb3952b2008-07-30 14:46:50 +0400174 * This function should be only called from per-arch dma_alloc_coherent()
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400175 * to support allocation from per-device coherent memory pools.
176 *
177 * Returns 0 if dma_alloc_coherent should continue with allocating from
Dmitry Baryshkovcb3952b2008-07-30 14:46:50 +0400178 * generic memory areas, or !0 if dma_alloc_coherent should return @ret.
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400179 */
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100180int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
181 dma_addr_t *dma_handle, void **ret)
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400182{
Vladimir Murzin93228b42017-06-26 10:18:58 +0100183 struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400184
Andrew Mortoneccd83e2009-01-06 14:43:09 -0800185 if (!mem)
186 return 0;
Paul Mundt06096972009-01-21 18:51:53 +0900187
Vladimir Murzina445e9402019-10-30 10:13:13 +0000188 *ret = __dma_alloc_from_coherent(dev, mem, size, dma_handle);
Christoph Hellwig82c5de02018-12-25 13:29:54 +0100189 return 1;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400190}
191
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100192static int __dma_release_from_coherent(struct dma_coherent_mem *mem,
193 int order, void *vaddr)
194{
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400195 if (mem && vaddr >= mem->virt_base && vaddr <
Kevin Grandemange286c21d2020-03-12 15:41:45 +0000196 (mem->virt_base + ((dma_addr_t)mem->size << PAGE_SHIFT))) {
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400197 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700198 unsigned long flags;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400199
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700200 spin_lock_irqsave(&mem->spinlock, flags);
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400201 bitmap_release_region(mem->bitmap, page, order);
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700202 spin_unlock_irqrestore(&mem->spinlock, flags);
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400203 return 1;
204 }
205 return 0;
206}
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100207
208/**
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100209 * dma_release_from_dev_coherent() - free memory to device coherent memory pool
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100210 * @dev: device from which the memory was allocated
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100211 * @order: the order of pages allocated
212 * @vaddr: virtual address of allocated pages
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100213 *
214 * This checks whether the memory was allocated from the per-device
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100215 * coherent memory pool and if so, releases that memory.
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100216 *
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100217 * Returns 1 if we correctly released the memory, or 0 if the caller should
218 * proceed with releasing memory from generic pools.
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100219 */
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100220int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr)
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100221{
Vladimir Murzin93228b42017-06-26 10:18:58 +0100222 struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100223
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100224 return __dma_release_from_coherent(mem, order, vaddr);
225}
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100226
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100227static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem,
228 struct vm_area_struct *vma, void *vaddr, size_t size, int *ret)
229{
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100230 if (mem && vaddr >= mem->virt_base && vaddr + size <=
Kevin Grandemange286c21d2020-03-12 15:41:45 +0000231 (mem->virt_base + ((dma_addr_t)mem->size << PAGE_SHIFT))) {
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100232 unsigned long off = vma->vm_pgoff;
233 int start = (vaddr - mem->virt_base) >> PAGE_SHIFT;
Kevin Grandemange286c21d2020-03-12 15:41:45 +0000234 unsigned long user_count = vma_pages(vma);
George G. Davis9ca5d4f2016-09-28 08:51:56 +0100235 int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100236
237 *ret = -ENXIO;
238 if (off < count && user_count <= count - off) {
Bjorn Helgaas88a984b2014-05-20 16:54:22 -0600239 unsigned long pfn = mem->pfn_base + start + off;
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100240 *ret = remap_pfn_range(vma, vma->vm_start, pfn,
241 user_count << PAGE_SHIFT,
242 vma->vm_page_prot);
243 }
244 return 1;
245 }
246 return 0;
247}
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100248
249/**
250 * dma_mmap_from_dev_coherent() - mmap memory from the device coherent pool
251 * @dev: device from which the memory was allocated
252 * @vma: vm_area for the userspace memory
253 * @vaddr: cpu address returned by dma_alloc_from_dev_coherent
254 * @size: size of the memory buffer allocated
255 * @ret: result from remap_pfn_range()
256 *
257 * This checks whether the memory was allocated from the per-device
258 * coherent memory pool and if so, maps that memory to the provided vma.
259 *
Robin Murphy41d0bbc2018-04-09 18:59:14 +0100260 * Returns 1 if @vaddr belongs to the device coherent pool and the caller
261 * should return @ret, or 0 if they should proceed with mapping memory from
262 * generic areas.
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100263 */
264int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
265 void *vaddr, size_t size, int *ret)
266{
267 struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
268
269 return __dma_mmap_from_coherent(mem, vma, vaddr, size, ret);
270}
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100271
Christoph Hellwig22f9feb2021-06-24 19:37:00 +0200272#ifdef CONFIG_DMA_GLOBAL_POOL
273static struct dma_coherent_mem *dma_coherent_default_memory __ro_after_init;
274
275void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size,
276 dma_addr_t *dma_handle)
277{
278 if (!dma_coherent_default_memory)
279 return NULL;
280
281 return __dma_alloc_from_coherent(dev, dma_coherent_default_memory, size,
282 dma_handle);
283}
284
285int dma_release_from_global_coherent(int order, void *vaddr)
286{
287 if (!dma_coherent_default_memory)
288 return 0;
289
290 return __dma_release_from_coherent(dma_coherent_default_memory, order,
291 vaddr);
292}
293
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100294int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr,
295 size_t size, int *ret)
296{
297 if (!dma_coherent_default_memory)
298 return 0;
299
300 return __dma_mmap_from_coherent(dma_coherent_default_memory, vma,
301 vaddr, size, ret);
302}
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700303
Christoph Hellwig39a2d352021-06-23 16:05:05 +0200304int dma_init_global_coherent(phys_addr_t phys_addr, size_t size)
305{
306 struct dma_coherent_mem *mem;
307
308 mem = dma_init_coherent_memory(phys_addr, phys_addr, size, true);
309 if (IS_ERR(mem))
310 return PTR_ERR(mem);
311 dma_coherent_default_memory = mem;
312 pr_info("DMA: default coherent area is set\n");
313 return 0;
314}
Christoph Hellwig22f9feb2021-06-24 19:37:00 +0200315#endif /* CONFIG_DMA_GLOBAL_POOL */
Christoph Hellwig39a2d352021-06-23 16:05:05 +0200316
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700317/*
318 * Support for reserved memory regions defined in device tree
319 */
320#ifdef CONFIG_OF_RESERVED_MEM
321#include <linux/of.h>
322#include <linux/of_fdt.h>
323#include <linux/of_reserved_mem.h>
324
Christoph Hellwig22f9feb2021-06-24 19:37:00 +0200325#ifdef CONFIG_DMA_GLOBAL_POOL
Vladimir Murzin93228b42017-06-26 10:18:58 +0100326static struct reserved_mem *dma_reserved_default_memory __initdata;
Christoph Hellwig22f9feb2021-06-24 19:37:00 +0200327#endif
Vladimir Murzin93228b42017-06-26 10:18:58 +0100328
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700329static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev)
330{
Christoph Hellwiga6933572021-06-23 16:00:04 +0200331 if (!rmem->priv) {
332 struct dma_coherent_mem *mem;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700333
Christoph Hellwiga6933572021-06-23 16:00:04 +0200334 mem = dma_init_coherent_memory(rmem->base, rmem->base,
335 rmem->size, true);
336 if (IS_ERR(mem))
337 return PTR_ERR(mem);
338 rmem->priv = mem;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700339 }
Christoph Hellwiga6933572021-06-23 16:00:04 +0200340 dma_assign_coherent_memory(dev, rmem->priv);
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700341 return 0;
342}
343
344static void rmem_dma_device_release(struct reserved_mem *rmem,
345 struct device *dev)
346{
Vladimir Murzin93228b42017-06-26 10:18:58 +0100347 if (dev)
348 dev->dma_mem = NULL;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700349}
350
351static const struct reserved_mem_ops rmem_dma_ops = {
352 .device_init = rmem_dma_device_init,
353 .device_release = rmem_dma_device_release,
354};
355
356static int __init rmem_dma_setup(struct reserved_mem *rmem)
357{
358 unsigned long node = rmem->fdt_node;
359
360 if (of_get_flat_dt_prop(node, "reusable", NULL))
361 return -EINVAL;
362
363#ifdef CONFIG_ARM
364 if (!of_get_flat_dt_prop(node, "no-map", NULL)) {
365 pr_err("Reserved memory: regions without no-map are not yet supported\n");
366 return -EINVAL;
367 }
Christoph Hellwig70d6aa02021-06-24 19:38:19 +0200368#endif
Vladimir Murzin93228b42017-06-26 10:18:58 +0100369
Christoph Hellwig70d6aa02021-06-24 19:38:19 +0200370#ifdef CONFIG_DMA_GLOBAL_POOL
Vladimir Murzin93228b42017-06-26 10:18:58 +0100371 if (of_get_flat_dt_prop(node, "linux,dma-default", NULL)) {
372 WARN(dma_reserved_default_memory,
373 "Reserved memory: region for default DMA coherent area is redefined\n");
374 dma_reserved_default_memory = rmem;
375 }
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700376#endif
377
378 rmem->ops = &rmem_dma_ops;
379 pr_info("Reserved memory: created DMA memory pool at %pa, size %ld MiB\n",
380 &rmem->base, (unsigned long)rmem->size / SZ_1M);
381 return 0;
382}
Vladimir Murzin93228b42017-06-26 10:18:58 +0100383
Christoph Hellwig22f9feb2021-06-24 19:37:00 +0200384#ifdef CONFIG_DMA_GLOBAL_POOL
Vladimir Murzin93228b42017-06-26 10:18:58 +0100385static int __init dma_init_reserved_memory(void)
386{
Vladimir Murzin93228b42017-06-26 10:18:58 +0100387 if (!dma_reserved_default_memory)
388 return -ENOMEM;
Christoph Hellwig39a2d352021-06-23 16:05:05 +0200389 return dma_init_global_coherent(dma_reserved_default_memory->base,
390 dma_reserved_default_memory->size);
Vladimir Murzin93228b42017-06-26 10:18:58 +0100391}
Vladimir Murzin93228b42017-06-26 10:18:58 +0100392core_initcall(dma_init_reserved_memory);
Christoph Hellwig22f9feb2021-06-24 19:37:00 +0200393#endif /* CONFIG_DMA_GLOBAL_POOL */
Vladimir Murzin93228b42017-06-26 10:18:58 +0100394
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700395RESERVEDMEM_OF_DECLARE(dma, "shared-dma-pool", rmem_dma_setup);
396#endif