blob: 794e76b03b3451cbd6efb2d1eef1855bb6ff2ea9 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +04002/*
3 * Coherent per-device memory handling.
4 * Borrowed from i386
5 */
Brian Starkey6b03ae02016-03-22 14:28:03 -07006#include <linux/io.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09007#include <linux/slab.h>
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +04008#include <linux/kernel.h>
Paul Gortmaker08a999c2011-07-01 16:07:32 -04009#include <linux/module.h>
Jim Quinlane0d07272020-09-17 18:43:40 +020010#include <linux/dma-direct.h>
Christoph Hellwig0a0f0d82020-09-22 15:31:03 +020011#include <linux/dma-map-ops.h>
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040012
13struct dma_coherent_mem {
14 void *virt_base;
Marin Mitoved1d2182010-05-31 13:03:04 +030015 dma_addr_t device_base;
Bjorn Helgaas88a984b2014-05-20 16:54:22 -060016 unsigned long pfn_base;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040017 int size;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040018 unsigned long *bitmap;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070019 spinlock_t spinlock;
Vladimir Murzinc41f9ea2017-06-26 10:18:57 +010020 bool use_dev_dma_pfn_offset;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040021};
22
Vladimir Murzin93228b42017-06-26 10:18:58 +010023static struct dma_coherent_mem *dma_coherent_default_memory __ro_after_init;
24
25static inline struct dma_coherent_mem *dev_get_coherent_memory(struct device *dev)
26{
27 if (dev && dev->dma_mem)
28 return dev->dma_mem;
Vladimir Murzin43fc5092017-07-20 11:19:58 +010029 return NULL;
Vladimir Murzin93228b42017-06-26 10:18:58 +010030}
31
Vladimir Murzinc41f9ea2017-06-26 10:18:57 +010032static inline dma_addr_t dma_get_device_base(struct device *dev,
33 struct dma_coherent_mem * mem)
34{
35 if (mem->use_dev_dma_pfn_offset)
Jim Quinlane0d07272020-09-17 18:43:40 +020036 return phys_to_dma(dev, PFN_PHYS(mem->pfn_base));
37 return mem->device_base;
Vladimir Murzinc41f9ea2017-06-26 10:18:57 +010038}
39
Christoph Hellwig82c5de02018-12-25 13:29:54 +010040static int dma_init_coherent_memory(phys_addr_t phys_addr,
41 dma_addr_t device_addr, size_t size,
42 struct dma_coherent_mem **mem)
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040043{
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070044 struct dma_coherent_mem *dma_mem = NULL;
Christoph Hellwigbe4311a2019-02-01 22:25:09 +010045 void *mem_base = NULL;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040046 int pages = size >> PAGE_SHIFT;
47 int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
Arnd Bergmannd35b0992017-09-05 10:10:09 +020048 int ret;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040049
Arnd Bergmannd35b0992017-09-05 10:10:09 +020050 if (!size) {
51 ret = -EINVAL;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040052 goto out;
Arnd Bergmannd35b0992017-09-05 10:10:09 +020053 }
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040054
Christoph Hellwig2436bdc2017-08-25 17:13:09 +020055 mem_base = memremap(phys_addr, size, MEMREMAP_WC);
Arnd Bergmannd35b0992017-09-05 10:10:09 +020056 if (!mem_base) {
57 ret = -EINVAL;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040058 goto out;
Arnd Bergmannd35b0992017-09-05 10:10:09 +020059 }
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070060 dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
Arnd Bergmannd35b0992017-09-05 10:10:09 +020061 if (!dma_mem) {
62 ret = -ENOMEM;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040063 goto out;
Arnd Bergmannd35b0992017-09-05 10:10:09 +020064 }
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070065 dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
Arnd Bergmannd35b0992017-09-05 10:10:09 +020066 if (!dma_mem->bitmap) {
67 ret = -ENOMEM;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070068 goto out;
Arnd Bergmannd35b0992017-09-05 10:10:09 +020069 }
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040070
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070071 dma_mem->virt_base = mem_base;
72 dma_mem->device_base = device_addr;
73 dma_mem->pfn_base = PFN_DOWN(phys_addr);
74 dma_mem->size = pages;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070075 spin_lock_init(&dma_mem->spinlock);
76
77 *mem = dma_mem;
Arnd Bergmannd35b0992017-09-05 10:10:09 +020078 return 0;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040079
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070080out:
81 kfree(dma_mem);
Christoph Hellwig2436bdc2017-08-25 17:13:09 +020082 if (mem_base)
83 memunmap(mem_base);
Arnd Bergmannd35b0992017-09-05 10:10:09 +020084 return ret;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040085}
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070086
87static void dma_release_coherent_memory(struct dma_coherent_mem *mem)
88{
89 if (!mem)
90 return;
Brian Starkey6b03ae02016-03-22 14:28:03 -070091
Christoph Hellwig2436bdc2017-08-25 17:13:09 +020092 memunmap(mem->virt_base);
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070093 kfree(mem->bitmap);
94 kfree(mem);
95}
96
97static int dma_assign_coherent_memory(struct device *dev,
98 struct dma_coherent_mem *mem)
99{
Vladimir Murzin93228b42017-06-26 10:18:58 +0100100 if (!dev)
101 return -ENODEV;
102
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700103 if (dev->dma_mem)
104 return -EBUSY;
105
106 dev->dma_mem = mem;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700107 return 0;
108}
109
Christoph Hellwiga92df4f2020-08-18 19:32:42 +0200110/*
111 * Declare a region of memory to be handed out by dma_alloc_coherent() when it
112 * is asked for coherent memory for this device. This shall only be used
113 * from platform code, usually based on the device tree description.
Zhen Leibab16222021-06-08 15:52:19 +0800114 *
Christoph Hellwiga92df4f2020-08-18 19:32:42 +0200115 * phys_addr is the CPU physical address to which the memory is currently
116 * assigned (this will be ioremapped so the CPU can access the region).
117 *
118 * device_addr is the DMA address the device needs to be programmed with to
119 * actually address this memory (this will be handed out as the dma_addr_t in
120 * dma_alloc_coherent()).
121 *
122 * size is the size of the area (must be a multiple of PAGE_SIZE).
123 *
124 * As a simplification for the platforms, only *one* such region of memory may
125 * be declared per device.
126 */
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700127int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
Christoph Hellwig82c5de02018-12-25 13:29:54 +0100128 dma_addr_t device_addr, size_t size)
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700129{
130 struct dma_coherent_mem *mem;
Christoph Hellwig2436bdc2017-08-25 17:13:09 +0200131 int ret;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700132
Christoph Hellwig82c5de02018-12-25 13:29:54 +0100133 ret = dma_init_coherent_memory(phys_addr, device_addr, size, &mem);
Christoph Hellwig2436bdc2017-08-25 17:13:09 +0200134 if (ret)
135 return ret;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700136
Christoph Hellwig2436bdc2017-08-25 17:13:09 +0200137 ret = dma_assign_coherent_memory(dev, mem);
138 if (ret)
139 dma_release_coherent_memory(mem);
140 return ret;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700141}
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400142
Vladimir Murzina445e9402019-10-30 10:13:13 +0000143static void *__dma_alloc_from_coherent(struct device *dev,
144 struct dma_coherent_mem *mem,
145 ssize_t size, dma_addr_t *dma_handle)
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100146{
147 int order = get_order(size);
148 unsigned long flags;
149 int pageno;
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100150 void *ret;
151
152 spin_lock_irqsave(&mem->spinlock, flags);
153
Kevin Grandemange286c21d2020-03-12 15:41:45 +0000154 if (unlikely(size > ((dma_addr_t)mem->size << PAGE_SHIFT)))
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100155 goto err;
156
157 pageno = bitmap_find_free_region(mem->bitmap, mem->size, order);
158 if (unlikely(pageno < 0))
159 goto err;
160
161 /*
162 * Memory was found in the coherent area.
163 */
Kevin Grandemange286c21d2020-03-12 15:41:45 +0000164 *dma_handle = dma_get_device_base(dev, mem) +
165 ((dma_addr_t)pageno << PAGE_SHIFT);
166 ret = mem->virt_base + ((dma_addr_t)pageno << PAGE_SHIFT);
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100167 spin_unlock_irqrestore(&mem->spinlock, flags);
Christoph Hellwig2436bdc2017-08-25 17:13:09 +0200168 memset(ret, 0, size);
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100169 return ret;
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100170err:
171 spin_unlock_irqrestore(&mem->spinlock, flags);
172 return NULL;
173}
174
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400175/**
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100176 * dma_alloc_from_dev_coherent() - allocate memory from device coherent pool
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400177 * @dev: device from which we allocate memory
178 * @size: size of requested memory area
179 * @dma_handle: This will be filled with the correct dma handle
180 * @ret: This pointer will be filled with the virtual address
Paul Mundt06096972009-01-21 18:51:53 +0900181 * to allocated area.
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400182 *
Dmitry Baryshkovcb3952b2008-07-30 14:46:50 +0400183 * This function should be only called from per-arch dma_alloc_coherent()
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400184 * to support allocation from per-device coherent memory pools.
185 *
186 * Returns 0 if dma_alloc_coherent should continue with allocating from
Dmitry Baryshkovcb3952b2008-07-30 14:46:50 +0400187 * generic memory areas, or !0 if dma_alloc_coherent should return @ret.
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400188 */
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100189int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
190 dma_addr_t *dma_handle, void **ret)
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400191{
Vladimir Murzin93228b42017-06-26 10:18:58 +0100192 struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400193
Andrew Mortoneccd83e2009-01-06 14:43:09 -0800194 if (!mem)
195 return 0;
Paul Mundt06096972009-01-21 18:51:53 +0900196
Vladimir Murzina445e9402019-10-30 10:13:13 +0000197 *ret = __dma_alloc_from_coherent(dev, mem, size, dma_handle);
Christoph Hellwig82c5de02018-12-25 13:29:54 +0100198 return 1;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400199}
200
Vladimir Murzina445e9402019-10-30 10:13:13 +0000201void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size,
202 dma_addr_t *dma_handle)
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400203{
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100204 if (!dma_coherent_default_memory)
205 return NULL;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400206
Vladimir Murzina445e9402019-10-30 10:13:13 +0000207 return __dma_alloc_from_coherent(dev, dma_coherent_default_memory, size,
208 dma_handle);
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100209}
210
211static int __dma_release_from_coherent(struct dma_coherent_mem *mem,
212 int order, void *vaddr)
213{
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400214 if (mem && vaddr >= mem->virt_base && vaddr <
Kevin Grandemange286c21d2020-03-12 15:41:45 +0000215 (mem->virt_base + ((dma_addr_t)mem->size << PAGE_SHIFT))) {
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400216 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700217 unsigned long flags;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400218
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700219 spin_lock_irqsave(&mem->spinlock, flags);
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400220 bitmap_release_region(mem->bitmap, page, order);
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700221 spin_unlock_irqrestore(&mem->spinlock, flags);
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400222 return 1;
223 }
224 return 0;
225}
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100226
227/**
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100228 * dma_release_from_dev_coherent() - free memory to device coherent memory pool
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100229 * @dev: device from which the memory was allocated
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100230 * @order: the order of pages allocated
231 * @vaddr: virtual address of allocated pages
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100232 *
233 * This checks whether the memory was allocated from the per-device
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100234 * coherent memory pool and if so, releases that memory.
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100235 *
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100236 * Returns 1 if we correctly released the memory, or 0 if the caller should
237 * proceed with releasing memory from generic pools.
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100238 */
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100239int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr)
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100240{
Vladimir Murzin93228b42017-06-26 10:18:58 +0100241 struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100242
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100243 return __dma_release_from_coherent(mem, order, vaddr);
244}
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100245
246int dma_release_from_global_coherent(int order, void *vaddr)
247{
248 if (!dma_coherent_default_memory)
249 return 0;
250
251 return __dma_release_from_coherent(dma_coherent_default_memory, order,
252 vaddr);
253}
254
255static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem,
256 struct vm_area_struct *vma, void *vaddr, size_t size, int *ret)
257{
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100258 if (mem && vaddr >= mem->virt_base && vaddr + size <=
Kevin Grandemange286c21d2020-03-12 15:41:45 +0000259 (mem->virt_base + ((dma_addr_t)mem->size << PAGE_SHIFT))) {
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100260 unsigned long off = vma->vm_pgoff;
261 int start = (vaddr - mem->virt_base) >> PAGE_SHIFT;
Kevin Grandemange286c21d2020-03-12 15:41:45 +0000262 unsigned long user_count = vma_pages(vma);
George G. Davis9ca5d4f2016-09-28 08:51:56 +0100263 int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100264
265 *ret = -ENXIO;
266 if (off < count && user_count <= count - off) {
Bjorn Helgaas88a984b2014-05-20 16:54:22 -0600267 unsigned long pfn = mem->pfn_base + start + off;
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100268 *ret = remap_pfn_range(vma, vma->vm_start, pfn,
269 user_count << PAGE_SHIFT,
270 vma->vm_page_prot);
271 }
272 return 1;
273 }
274 return 0;
275}
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100276
277/**
278 * dma_mmap_from_dev_coherent() - mmap memory from the device coherent pool
279 * @dev: device from which the memory was allocated
280 * @vma: vm_area for the userspace memory
281 * @vaddr: cpu address returned by dma_alloc_from_dev_coherent
282 * @size: size of the memory buffer allocated
283 * @ret: result from remap_pfn_range()
284 *
285 * This checks whether the memory was allocated from the per-device
286 * coherent memory pool and if so, maps that memory to the provided vma.
287 *
Robin Murphy41d0bbc2018-04-09 18:59:14 +0100288 * Returns 1 if @vaddr belongs to the device coherent pool and the caller
289 * should return @ret, or 0 if they should proceed with mapping memory from
290 * generic areas.
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100291 */
292int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
293 void *vaddr, size_t size, int *ret)
294{
295 struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
296
297 return __dma_mmap_from_coherent(mem, vma, vaddr, size, ret);
298}
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100299
300int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr,
301 size_t size, int *ret)
302{
303 if (!dma_coherent_default_memory)
304 return 0;
305
306 return __dma_mmap_from_coherent(dma_coherent_default_memory, vma,
307 vaddr, size, ret);
308}
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700309
310/*
311 * Support for reserved memory regions defined in device tree
312 */
313#ifdef CONFIG_OF_RESERVED_MEM
314#include <linux/of.h>
315#include <linux/of_fdt.h>
316#include <linux/of_reserved_mem.h>
317
Vladimir Murzin93228b42017-06-26 10:18:58 +0100318static struct reserved_mem *dma_reserved_default_memory __initdata;
319
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700320static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev)
321{
322 struct dma_coherent_mem *mem = rmem->priv;
Arnd Bergmannd35b0992017-09-05 10:10:09 +0200323 int ret;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700324
Arnd Bergmann6d573392017-09-15 17:08:16 +0200325 if (!mem) {
326 ret = dma_init_coherent_memory(rmem->base, rmem->base,
Christoph Hellwig82c5de02018-12-25 13:29:54 +0100327 rmem->size, &mem);
Arnd Bergmann6d573392017-09-15 17:08:16 +0200328 if (ret) {
329 pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n",
330 &rmem->base, (unsigned long)rmem->size / SZ_1M);
331 return ret;
332 }
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700333 }
Vladimir Murzinc41f9ea2017-06-26 10:18:57 +0100334 mem->use_dev_dma_pfn_offset = true;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700335 rmem->priv = mem;
336 dma_assign_coherent_memory(dev, mem);
337 return 0;
338}
339
340static void rmem_dma_device_release(struct reserved_mem *rmem,
341 struct device *dev)
342{
Vladimir Murzin93228b42017-06-26 10:18:58 +0100343 if (dev)
344 dev->dma_mem = NULL;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700345}
346
347static const struct reserved_mem_ops rmem_dma_ops = {
348 .device_init = rmem_dma_device_init,
349 .device_release = rmem_dma_device_release,
350};
351
352static int __init rmem_dma_setup(struct reserved_mem *rmem)
353{
354 unsigned long node = rmem->fdt_node;
355
356 if (of_get_flat_dt_prop(node, "reusable", NULL))
357 return -EINVAL;
358
359#ifdef CONFIG_ARM
360 if (!of_get_flat_dt_prop(node, "no-map", NULL)) {
361 pr_err("Reserved memory: regions without no-map are not yet supported\n");
362 return -EINVAL;
363 }
Vladimir Murzin93228b42017-06-26 10:18:58 +0100364
365 if (of_get_flat_dt_prop(node, "linux,dma-default", NULL)) {
366 WARN(dma_reserved_default_memory,
367 "Reserved memory: region for default DMA coherent area is redefined\n");
368 dma_reserved_default_memory = rmem;
369 }
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700370#endif
371
372 rmem->ops = &rmem_dma_ops;
373 pr_info("Reserved memory: created DMA memory pool at %pa, size %ld MiB\n",
374 &rmem->base, (unsigned long)rmem->size / SZ_1M);
375 return 0;
376}
Vladimir Murzin93228b42017-06-26 10:18:58 +0100377
378static int __init dma_init_reserved_memory(void)
379{
380 const struct reserved_mem_ops *ops;
381 int ret;
382
383 if (!dma_reserved_default_memory)
384 return -ENOMEM;
385
386 ops = dma_reserved_default_memory->ops;
387
388 /*
389 * We rely on rmem_dma_device_init() does not propagate error of
390 * dma_assign_coherent_memory() for "NULL" device.
391 */
392 ret = ops->device_init(dma_reserved_default_memory, NULL);
393
394 if (!ret) {
395 dma_coherent_default_memory = dma_reserved_default_memory->priv;
396 pr_info("DMA: default coherent area is set\n");
397 }
398
399 return ret;
400}
401
402core_initcall(dma_init_reserved_memory);
403
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700404RESERVEDMEM_OF_DECLARE(dma, "shared-dma-pool", rmem_dma_setup);
405#endif