blob: 744f64f43454314c35acc4341def004968aeff87 [file] [log] [blame]
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +04001/*
2 * Coherent per-device memory handling.
3 * Borrowed from i386
4 */
Brian Starkey6b03ae02016-03-22 14:28:03 -07005#include <linux/io.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09006#include <linux/slab.h>
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +04007#include <linux/kernel.h>
Paul Gortmaker08a999c2011-07-01 16:07:32 -04008#include <linux/module.h>
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +04009#include <linux/dma-mapping.h>
10
11struct dma_coherent_mem {
12 void *virt_base;
Marin Mitoved1d2182010-05-31 13:03:04 +030013 dma_addr_t device_base;
Bjorn Helgaas88a984b2014-05-20 16:54:22 -060014 unsigned long pfn_base;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040015 int size;
16 int flags;
17 unsigned long *bitmap;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070018 spinlock_t spinlock;
Vladimir Murzinc41f9ea2017-06-26 10:18:57 +010019 bool use_dev_dma_pfn_offset;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040020};
21
Vladimir Murzin93228b42017-06-26 10:18:58 +010022static struct dma_coherent_mem *dma_coherent_default_memory __ro_after_init;
23
24static inline struct dma_coherent_mem *dev_get_coherent_memory(struct device *dev)
25{
26 if (dev && dev->dma_mem)
27 return dev->dma_mem;
Vladimir Murzin43fc5092017-07-20 11:19:58 +010028 return NULL;
Vladimir Murzin93228b42017-06-26 10:18:58 +010029}
30
Vladimir Murzinc41f9ea2017-06-26 10:18:57 +010031static inline dma_addr_t dma_get_device_base(struct device *dev,
32 struct dma_coherent_mem * mem)
33{
34 if (mem->use_dev_dma_pfn_offset)
35 return (mem->pfn_base - dev->dma_pfn_offset) << PAGE_SHIFT;
36 else
37 return mem->device_base;
38}
39
Arnd Bergmannd35b0992017-09-05 10:10:09 +020040static int dma_init_coherent_memory(
Michal Nazarewicz9e5b3d62016-01-04 22:36:40 +010041 phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags,
42 struct dma_coherent_mem **mem)
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040043{
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070044 struct dma_coherent_mem *dma_mem = NULL;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040045 void __iomem *mem_base = NULL;
46 int pages = size >> PAGE_SHIFT;
47 int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
Arnd Bergmannd35b0992017-09-05 10:10:09 +020048 int ret;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040049
Arnd Bergmannd35b0992017-09-05 10:10:09 +020050 if (!size) {
51 ret = -EINVAL;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040052 goto out;
Arnd Bergmannd35b0992017-09-05 10:10:09 +020053 }
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040054
Christoph Hellwig2436bdc2017-08-25 17:13:09 +020055 mem_base = memremap(phys_addr, size, MEMREMAP_WC);
Arnd Bergmannd35b0992017-09-05 10:10:09 +020056 if (!mem_base) {
57 ret = -EINVAL;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040058 goto out;
Arnd Bergmannd35b0992017-09-05 10:10:09 +020059 }
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070060 dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
Arnd Bergmannd35b0992017-09-05 10:10:09 +020061 if (!dma_mem) {
62 ret = -ENOMEM;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040063 goto out;
Arnd Bergmannd35b0992017-09-05 10:10:09 +020064 }
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070065 dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
Arnd Bergmannd35b0992017-09-05 10:10:09 +020066 if (!dma_mem->bitmap) {
67 ret = -ENOMEM;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070068 goto out;
Arnd Bergmannd35b0992017-09-05 10:10:09 +020069 }
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040070
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070071 dma_mem->virt_base = mem_base;
72 dma_mem->device_base = device_addr;
73 dma_mem->pfn_base = PFN_DOWN(phys_addr);
74 dma_mem->size = pages;
75 dma_mem->flags = flags;
76 spin_lock_init(&dma_mem->spinlock);
77
78 *mem = dma_mem;
Arnd Bergmannd35b0992017-09-05 10:10:09 +020079 return 0;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040080
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070081out:
82 kfree(dma_mem);
Christoph Hellwig2436bdc2017-08-25 17:13:09 +020083 if (mem_base)
84 memunmap(mem_base);
Arnd Bergmannd35b0992017-09-05 10:10:09 +020085 return ret;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040086}
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070087
88static void dma_release_coherent_memory(struct dma_coherent_mem *mem)
89{
90 if (!mem)
91 return;
Brian Starkey6b03ae02016-03-22 14:28:03 -070092
Christoph Hellwig2436bdc2017-08-25 17:13:09 +020093 memunmap(mem->virt_base);
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070094 kfree(mem->bitmap);
95 kfree(mem);
96}
97
98static int dma_assign_coherent_memory(struct device *dev,
99 struct dma_coherent_mem *mem)
100{
Vladimir Murzin93228b42017-06-26 10:18:58 +0100101 if (!dev)
102 return -ENODEV;
103
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700104 if (dev->dma_mem)
105 return -EBUSY;
106
107 dev->dma_mem = mem;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700108 return 0;
109}
110
111int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
112 dma_addr_t device_addr, size_t size, int flags)
113{
114 struct dma_coherent_mem *mem;
Christoph Hellwig2436bdc2017-08-25 17:13:09 +0200115 int ret;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700116
Christoph Hellwig2436bdc2017-08-25 17:13:09 +0200117 ret = dma_init_coherent_memory(phys_addr, device_addr, size, flags, &mem);
118 if (ret)
119 return ret;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700120
Christoph Hellwig2436bdc2017-08-25 17:13:09 +0200121 ret = dma_assign_coherent_memory(dev, mem);
122 if (ret)
123 dma_release_coherent_memory(mem);
124 return ret;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700125}
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400126EXPORT_SYMBOL(dma_declare_coherent_memory);
127
128void dma_release_declared_memory(struct device *dev)
129{
130 struct dma_coherent_mem *mem = dev->dma_mem;
131
132 if (!mem)
133 return;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700134 dma_release_coherent_memory(mem);
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400135 dev->dma_mem = NULL;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400136}
137EXPORT_SYMBOL(dma_release_declared_memory);
138
139void *dma_mark_declared_memory_occupied(struct device *dev,
140 dma_addr_t device_addr, size_t size)
141{
142 struct dma_coherent_mem *mem = dev->dma_mem;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700143 unsigned long flags;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400144 int pos, err;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400145
Jan Beulichd2dc1f42008-08-05 13:01:31 -0700146 size += device_addr & ~PAGE_MASK;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400147
148 if (!mem)
149 return ERR_PTR(-EINVAL);
150
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700151 spin_lock_irqsave(&mem->spinlock, flags);
Vladimir Murzinc41f9ea2017-06-26 10:18:57 +0100152 pos = PFN_DOWN(device_addr - dma_get_device_base(dev, mem));
Jan Beulichd2dc1f42008-08-05 13:01:31 -0700153 err = bitmap_allocate_region(mem->bitmap, pos, get_order(size));
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700154 spin_unlock_irqrestore(&mem->spinlock, flags);
155
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400156 if (err != 0)
157 return ERR_PTR(err);
158 return mem->virt_base + (pos << PAGE_SHIFT);
159}
160EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
161
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100162static void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem,
163 ssize_t size, dma_addr_t *dma_handle)
164{
165 int order = get_order(size);
166 unsigned long flags;
167 int pageno;
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100168 void *ret;
169
170 spin_lock_irqsave(&mem->spinlock, flags);
171
172 if (unlikely(size > (mem->size << PAGE_SHIFT)))
173 goto err;
174
175 pageno = bitmap_find_free_region(mem->bitmap, mem->size, order);
176 if (unlikely(pageno < 0))
177 goto err;
178
179 /*
180 * Memory was found in the coherent area.
181 */
182 *dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
183 ret = mem->virt_base + (pageno << PAGE_SHIFT);
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100184 spin_unlock_irqrestore(&mem->spinlock, flags);
Christoph Hellwig2436bdc2017-08-25 17:13:09 +0200185 memset(ret, 0, size);
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100186 return ret;
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100187err:
188 spin_unlock_irqrestore(&mem->spinlock, flags);
189 return NULL;
190}
191
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400192/**
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100193 * dma_alloc_from_dev_coherent() - allocate memory from device coherent pool
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400194 * @dev: device from which we allocate memory
195 * @size: size of requested memory area
196 * @dma_handle: This will be filled with the correct dma handle
197 * @ret: This pointer will be filled with the virtual address
Paul Mundt06096972009-01-21 18:51:53 +0900198 * to allocated area.
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400199 *
Dmitry Baryshkovcb3952b2008-07-30 14:46:50 +0400200 * This function should be only called from per-arch dma_alloc_coherent()
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400201 * to support allocation from per-device coherent memory pools.
202 *
203 * Returns 0 if dma_alloc_coherent should continue with allocating from
Dmitry Baryshkovcb3952b2008-07-30 14:46:50 +0400204 * generic memory areas, or !0 if dma_alloc_coherent should return @ret.
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400205 */
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100206int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
207 dma_addr_t *dma_handle, void **ret)
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400208{
Vladimir Murzin93228b42017-06-26 10:18:58 +0100209 struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400210
Andrew Mortoneccd83e2009-01-06 14:43:09 -0800211 if (!mem)
212 return 0;
Paul Mundt06096972009-01-21 18:51:53 +0900213
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100214 *ret = __dma_alloc_from_coherent(mem, size, dma_handle);
215 if (*ret)
216 return 1;
Paul Mundt06096972009-01-21 18:51:53 +0900217
Paul Mundt06096972009-01-21 18:51:53 +0900218 /*
219 * In the case where the allocation can not be satisfied from the
220 * per-device area, try to fall back to generic memory if the
221 * constraints allow it.
222 */
223 return mem->flags & DMA_MEMORY_EXCLUSIVE;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400224}
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100225EXPORT_SYMBOL(dma_alloc_from_dev_coherent);
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400226
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100227void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle)
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400228{
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100229 if (!dma_coherent_default_memory)
230 return NULL;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400231
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100232 return __dma_alloc_from_coherent(dma_coherent_default_memory, size,
233 dma_handle);
234}
235
236static int __dma_release_from_coherent(struct dma_coherent_mem *mem,
237 int order, void *vaddr)
238{
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400239 if (mem && vaddr >= mem->virt_base && vaddr <
240 (mem->virt_base + (mem->size << PAGE_SHIFT))) {
241 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700242 unsigned long flags;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400243
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700244 spin_lock_irqsave(&mem->spinlock, flags);
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400245 bitmap_release_region(mem->bitmap, page, order);
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700246 spin_unlock_irqrestore(&mem->spinlock, flags);
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400247 return 1;
248 }
249 return 0;
250}
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100251
252/**
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100253 * dma_release_from_dev_coherent() - free memory to device coherent memory pool
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100254 * @dev: device from which the memory was allocated
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100255 * @order: the order of pages allocated
256 * @vaddr: virtual address of allocated pages
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100257 *
258 * This checks whether the memory was allocated from the per-device
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100259 * coherent memory pool and if so, releases that memory.
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100260 *
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100261 * Returns 1 if we correctly released the memory, or 0 if the caller should
262 * proceed with releasing memory from generic pools.
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100263 */
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100264int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr)
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100265{
Vladimir Murzin93228b42017-06-26 10:18:58 +0100266 struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100267
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100268 return __dma_release_from_coherent(mem, order, vaddr);
269}
270EXPORT_SYMBOL(dma_release_from_dev_coherent);
271
272int dma_release_from_global_coherent(int order, void *vaddr)
273{
274 if (!dma_coherent_default_memory)
275 return 0;
276
277 return __dma_release_from_coherent(dma_coherent_default_memory, order,
278 vaddr);
279}
280
281static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem,
282 struct vm_area_struct *vma, void *vaddr, size_t size, int *ret)
283{
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100284 if (mem && vaddr >= mem->virt_base && vaddr + size <=
285 (mem->virt_base + (mem->size << PAGE_SHIFT))) {
286 unsigned long off = vma->vm_pgoff;
287 int start = (vaddr - mem->virt_base) >> PAGE_SHIFT;
Muhammad Falak R Wanie688f142016-05-21 18:52:57 +0530288 int user_count = vma_pages(vma);
George G. Davis9ca5d4f2016-09-28 08:51:56 +0100289 int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100290
291 *ret = -ENXIO;
292 if (off < count && user_count <= count - off) {
Bjorn Helgaas88a984b2014-05-20 16:54:22 -0600293 unsigned long pfn = mem->pfn_base + start + off;
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100294 *ret = remap_pfn_range(vma, vma->vm_start, pfn,
295 user_count << PAGE_SHIFT,
296 vma->vm_page_prot);
297 }
298 return 1;
299 }
300 return 0;
301}
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100302
303/**
304 * dma_mmap_from_dev_coherent() - mmap memory from the device coherent pool
305 * @dev: device from which the memory was allocated
306 * @vma: vm_area for the userspace memory
307 * @vaddr: cpu address returned by dma_alloc_from_dev_coherent
308 * @size: size of the memory buffer allocated
309 * @ret: result from remap_pfn_range()
310 *
311 * This checks whether the memory was allocated from the per-device
312 * coherent memory pool and if so, maps that memory to the provided vma.
313 *
314 * Returns 1 if we correctly mapped the memory, or 0 if the caller should
315 * proceed with mapping memory from generic pools.
316 */
317int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
318 void *vaddr, size_t size, int *ret)
319{
320 struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
321
322 return __dma_mmap_from_coherent(mem, vma, vaddr, size, ret);
323}
324EXPORT_SYMBOL(dma_mmap_from_dev_coherent);
325
326int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr,
327 size_t size, int *ret)
328{
329 if (!dma_coherent_default_memory)
330 return 0;
331
332 return __dma_mmap_from_coherent(dma_coherent_default_memory, vma,
333 vaddr, size, ret);
334}
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700335
336/*
337 * Support for reserved memory regions defined in device tree
338 */
339#ifdef CONFIG_OF_RESERVED_MEM
340#include <linux/of.h>
341#include <linux/of_fdt.h>
342#include <linux/of_reserved_mem.h>
343
Vladimir Murzin93228b42017-06-26 10:18:58 +0100344static struct reserved_mem *dma_reserved_default_memory __initdata;
345
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700346static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev)
347{
348 struct dma_coherent_mem *mem = rmem->priv;
Arnd Bergmannd35b0992017-09-05 10:10:09 +0200349 int ret;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700350
Arnd Bergmann6d573392017-09-15 17:08:16 +0200351 if (!mem) {
352 ret = dma_init_coherent_memory(rmem->base, rmem->base,
353 rmem->size,
354 DMA_MEMORY_EXCLUSIVE, &mem);
355 if (ret) {
356 pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n",
357 &rmem->base, (unsigned long)rmem->size / SZ_1M);
358 return ret;
359 }
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700360 }
Vladimir Murzinc41f9ea2017-06-26 10:18:57 +0100361 mem->use_dev_dma_pfn_offset = true;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700362 rmem->priv = mem;
363 dma_assign_coherent_memory(dev, mem);
364 return 0;
365}
366
367static void rmem_dma_device_release(struct reserved_mem *rmem,
368 struct device *dev)
369{
Vladimir Murzin93228b42017-06-26 10:18:58 +0100370 if (dev)
371 dev->dma_mem = NULL;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700372}
373
374static const struct reserved_mem_ops rmem_dma_ops = {
375 .device_init = rmem_dma_device_init,
376 .device_release = rmem_dma_device_release,
377};
378
379static int __init rmem_dma_setup(struct reserved_mem *rmem)
380{
381 unsigned long node = rmem->fdt_node;
382
383 if (of_get_flat_dt_prop(node, "reusable", NULL))
384 return -EINVAL;
385
386#ifdef CONFIG_ARM
387 if (!of_get_flat_dt_prop(node, "no-map", NULL)) {
388 pr_err("Reserved memory: regions without no-map are not yet supported\n");
389 return -EINVAL;
390 }
Vladimir Murzin93228b42017-06-26 10:18:58 +0100391
392 if (of_get_flat_dt_prop(node, "linux,dma-default", NULL)) {
393 WARN(dma_reserved_default_memory,
394 "Reserved memory: region for default DMA coherent area is redefined\n");
395 dma_reserved_default_memory = rmem;
396 }
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700397#endif
398
399 rmem->ops = &rmem_dma_ops;
400 pr_info("Reserved memory: created DMA memory pool at %pa, size %ld MiB\n",
401 &rmem->base, (unsigned long)rmem->size / SZ_1M);
402 return 0;
403}
Vladimir Murzin93228b42017-06-26 10:18:58 +0100404
405static int __init dma_init_reserved_memory(void)
406{
407 const struct reserved_mem_ops *ops;
408 int ret;
409
410 if (!dma_reserved_default_memory)
411 return -ENOMEM;
412
413 ops = dma_reserved_default_memory->ops;
414
415 /*
416 * We rely on rmem_dma_device_init() does not propagate error of
417 * dma_assign_coherent_memory() for "NULL" device.
418 */
419 ret = ops->device_init(dma_reserved_default_memory, NULL);
420
421 if (!ret) {
422 dma_coherent_default_memory = dma_reserved_default_memory->priv;
423 pr_info("DMA: default coherent area is set\n");
424 }
425
426 return ret;
427}
428
429core_initcall(dma_init_reserved_memory);
430
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700431RESERVEDMEM_OF_DECLARE(dma, "shared-dma-pool", rmem_dma_setup);
432#endif