blob: 1e6396bb807b1255ab61e0a168e1b7eac40f892c [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +04002/*
3 * Coherent per-device memory handling.
4 * Borrowed from i386
5 */
Brian Starkey6b03ae02016-03-22 14:28:03 -07006#include <linux/io.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09007#include <linux/slab.h>
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +04008#include <linux/kernel.h>
Paul Gortmaker08a999c2011-07-01 16:07:32 -04009#include <linux/module.h>
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040010#include <linux/dma-mapping.h>
11
12struct dma_coherent_mem {
13 void *virt_base;
Marin Mitoved1d2182010-05-31 13:03:04 +030014 dma_addr_t device_base;
Bjorn Helgaas88a984b2014-05-20 16:54:22 -060015 unsigned long pfn_base;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040016 int size;
17 int flags;
18 unsigned long *bitmap;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070019 spinlock_t spinlock;
Vladimir Murzinc41f9ea2017-06-26 10:18:57 +010020 bool use_dev_dma_pfn_offset;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040021};
22
Vladimir Murzin93228b42017-06-26 10:18:58 +010023static struct dma_coherent_mem *dma_coherent_default_memory __ro_after_init;
24
25static inline struct dma_coherent_mem *dev_get_coherent_memory(struct device *dev)
26{
27 if (dev && dev->dma_mem)
28 return dev->dma_mem;
Vladimir Murzin43fc5092017-07-20 11:19:58 +010029 return NULL;
Vladimir Murzin93228b42017-06-26 10:18:58 +010030}
31
Vladimir Murzinc41f9ea2017-06-26 10:18:57 +010032static inline dma_addr_t dma_get_device_base(struct device *dev,
33 struct dma_coherent_mem * mem)
34{
35 if (mem->use_dev_dma_pfn_offset)
36 return (mem->pfn_base - dev->dma_pfn_offset) << PAGE_SHIFT;
37 else
38 return mem->device_base;
39}
40
Arnd Bergmannd35b0992017-09-05 10:10:09 +020041static int dma_init_coherent_memory(
Michal Nazarewicz9e5b3d62016-01-04 22:36:40 +010042 phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags,
43 struct dma_coherent_mem **mem)
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040044{
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070045 struct dma_coherent_mem *dma_mem = NULL;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040046 void __iomem *mem_base = NULL;
47 int pages = size >> PAGE_SHIFT;
48 int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
Arnd Bergmannd35b0992017-09-05 10:10:09 +020049 int ret;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040050
Arnd Bergmannd35b0992017-09-05 10:10:09 +020051 if (!size) {
52 ret = -EINVAL;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040053 goto out;
Arnd Bergmannd35b0992017-09-05 10:10:09 +020054 }
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040055
Christoph Hellwig2436bdc2017-08-25 17:13:09 +020056 mem_base = memremap(phys_addr, size, MEMREMAP_WC);
Arnd Bergmannd35b0992017-09-05 10:10:09 +020057 if (!mem_base) {
58 ret = -EINVAL;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040059 goto out;
Arnd Bergmannd35b0992017-09-05 10:10:09 +020060 }
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070061 dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
Arnd Bergmannd35b0992017-09-05 10:10:09 +020062 if (!dma_mem) {
63 ret = -ENOMEM;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040064 goto out;
Arnd Bergmannd35b0992017-09-05 10:10:09 +020065 }
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070066 dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
Arnd Bergmannd35b0992017-09-05 10:10:09 +020067 if (!dma_mem->bitmap) {
68 ret = -ENOMEM;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070069 goto out;
Arnd Bergmannd35b0992017-09-05 10:10:09 +020070 }
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040071
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070072 dma_mem->virt_base = mem_base;
73 dma_mem->device_base = device_addr;
74 dma_mem->pfn_base = PFN_DOWN(phys_addr);
75 dma_mem->size = pages;
76 dma_mem->flags = flags;
77 spin_lock_init(&dma_mem->spinlock);
78
79 *mem = dma_mem;
Arnd Bergmannd35b0992017-09-05 10:10:09 +020080 return 0;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040081
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070082out:
83 kfree(dma_mem);
Christoph Hellwig2436bdc2017-08-25 17:13:09 +020084 if (mem_base)
85 memunmap(mem_base);
Arnd Bergmannd35b0992017-09-05 10:10:09 +020086 return ret;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040087}
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070088
89static void dma_release_coherent_memory(struct dma_coherent_mem *mem)
90{
91 if (!mem)
92 return;
Brian Starkey6b03ae02016-03-22 14:28:03 -070093
Christoph Hellwig2436bdc2017-08-25 17:13:09 +020094 memunmap(mem->virt_base);
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070095 kfree(mem->bitmap);
96 kfree(mem);
97}
98
99static int dma_assign_coherent_memory(struct device *dev,
100 struct dma_coherent_mem *mem)
101{
Vladimir Murzin93228b42017-06-26 10:18:58 +0100102 if (!dev)
103 return -ENODEV;
104
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700105 if (dev->dma_mem)
106 return -EBUSY;
107
108 dev->dma_mem = mem;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700109 return 0;
110}
111
112int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
113 dma_addr_t device_addr, size_t size, int flags)
114{
115 struct dma_coherent_mem *mem;
Christoph Hellwig2436bdc2017-08-25 17:13:09 +0200116 int ret;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700117
Christoph Hellwig2436bdc2017-08-25 17:13:09 +0200118 ret = dma_init_coherent_memory(phys_addr, device_addr, size, flags, &mem);
119 if (ret)
120 return ret;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700121
Christoph Hellwig2436bdc2017-08-25 17:13:09 +0200122 ret = dma_assign_coherent_memory(dev, mem);
123 if (ret)
124 dma_release_coherent_memory(mem);
125 return ret;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700126}
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400127EXPORT_SYMBOL(dma_declare_coherent_memory);
128
129void dma_release_declared_memory(struct device *dev)
130{
131 struct dma_coherent_mem *mem = dev->dma_mem;
132
133 if (!mem)
134 return;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700135 dma_release_coherent_memory(mem);
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400136 dev->dma_mem = NULL;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400137}
138EXPORT_SYMBOL(dma_release_declared_memory);
139
140void *dma_mark_declared_memory_occupied(struct device *dev,
141 dma_addr_t device_addr, size_t size)
142{
143 struct dma_coherent_mem *mem = dev->dma_mem;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700144 unsigned long flags;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400145 int pos, err;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400146
Jan Beulichd2dc1f42008-08-05 13:01:31 -0700147 size += device_addr & ~PAGE_MASK;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400148
149 if (!mem)
150 return ERR_PTR(-EINVAL);
151
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700152 spin_lock_irqsave(&mem->spinlock, flags);
Vladimir Murzinc41f9ea2017-06-26 10:18:57 +0100153 pos = PFN_DOWN(device_addr - dma_get_device_base(dev, mem));
Jan Beulichd2dc1f42008-08-05 13:01:31 -0700154 err = bitmap_allocate_region(mem->bitmap, pos, get_order(size));
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700155 spin_unlock_irqrestore(&mem->spinlock, flags);
156
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400157 if (err != 0)
158 return ERR_PTR(err);
159 return mem->virt_base + (pos << PAGE_SHIFT);
160}
161EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
162
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100163static void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem,
164 ssize_t size, dma_addr_t *dma_handle)
165{
166 int order = get_order(size);
167 unsigned long flags;
168 int pageno;
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100169 void *ret;
170
171 spin_lock_irqsave(&mem->spinlock, flags);
172
173 if (unlikely(size > (mem->size << PAGE_SHIFT)))
174 goto err;
175
176 pageno = bitmap_find_free_region(mem->bitmap, mem->size, order);
177 if (unlikely(pageno < 0))
178 goto err;
179
180 /*
181 * Memory was found in the coherent area.
182 */
183 *dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
184 ret = mem->virt_base + (pageno << PAGE_SHIFT);
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100185 spin_unlock_irqrestore(&mem->spinlock, flags);
Christoph Hellwig2436bdc2017-08-25 17:13:09 +0200186 memset(ret, 0, size);
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100187 return ret;
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100188err:
189 spin_unlock_irqrestore(&mem->spinlock, flags);
190 return NULL;
191}
192
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400193/**
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100194 * dma_alloc_from_dev_coherent() - allocate memory from device coherent pool
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400195 * @dev: device from which we allocate memory
196 * @size: size of requested memory area
197 * @dma_handle: This will be filled with the correct dma handle
198 * @ret: This pointer will be filled with the virtual address
Paul Mundt06096972009-01-21 18:51:53 +0900199 * to allocated area.
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400200 *
Dmitry Baryshkovcb3952b2008-07-30 14:46:50 +0400201 * This function should be only called from per-arch dma_alloc_coherent()
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400202 * to support allocation from per-device coherent memory pools.
203 *
204 * Returns 0 if dma_alloc_coherent should continue with allocating from
Dmitry Baryshkovcb3952b2008-07-30 14:46:50 +0400205 * generic memory areas, or !0 if dma_alloc_coherent should return @ret.
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400206 */
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100207int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
208 dma_addr_t *dma_handle, void **ret)
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400209{
Vladimir Murzin93228b42017-06-26 10:18:58 +0100210 struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400211
Andrew Mortoneccd83e2009-01-06 14:43:09 -0800212 if (!mem)
213 return 0;
Paul Mundt06096972009-01-21 18:51:53 +0900214
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100215 *ret = __dma_alloc_from_coherent(mem, size, dma_handle);
216 if (*ret)
217 return 1;
Paul Mundt06096972009-01-21 18:51:53 +0900218
Paul Mundt06096972009-01-21 18:51:53 +0900219 /*
220 * In the case where the allocation can not be satisfied from the
221 * per-device area, try to fall back to generic memory if the
222 * constraints allow it.
223 */
224 return mem->flags & DMA_MEMORY_EXCLUSIVE;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400225}
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100226EXPORT_SYMBOL(dma_alloc_from_dev_coherent);
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400227
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100228void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle)
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400229{
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100230 if (!dma_coherent_default_memory)
231 return NULL;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400232
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100233 return __dma_alloc_from_coherent(dma_coherent_default_memory, size,
234 dma_handle);
235}
236
237static int __dma_release_from_coherent(struct dma_coherent_mem *mem,
238 int order, void *vaddr)
239{
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400240 if (mem && vaddr >= mem->virt_base && vaddr <
241 (mem->virt_base + (mem->size << PAGE_SHIFT))) {
242 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700243 unsigned long flags;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400244
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700245 spin_lock_irqsave(&mem->spinlock, flags);
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400246 bitmap_release_region(mem->bitmap, page, order);
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700247 spin_unlock_irqrestore(&mem->spinlock, flags);
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400248 return 1;
249 }
250 return 0;
251}
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100252
253/**
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100254 * dma_release_from_dev_coherent() - free memory to device coherent memory pool
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100255 * @dev: device from which the memory was allocated
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100256 * @order: the order of pages allocated
257 * @vaddr: virtual address of allocated pages
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100258 *
259 * This checks whether the memory was allocated from the per-device
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100260 * coherent memory pool and if so, releases that memory.
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100261 *
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100262 * Returns 1 if we correctly released the memory, or 0 if the caller should
263 * proceed with releasing memory from generic pools.
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100264 */
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100265int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr)
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100266{
Vladimir Murzin93228b42017-06-26 10:18:58 +0100267 struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100268
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100269 return __dma_release_from_coherent(mem, order, vaddr);
270}
271EXPORT_SYMBOL(dma_release_from_dev_coherent);
272
273int dma_release_from_global_coherent(int order, void *vaddr)
274{
275 if (!dma_coherent_default_memory)
276 return 0;
277
278 return __dma_release_from_coherent(dma_coherent_default_memory, order,
279 vaddr);
280}
281
282static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem,
283 struct vm_area_struct *vma, void *vaddr, size_t size, int *ret)
284{
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100285 if (mem && vaddr >= mem->virt_base && vaddr + size <=
286 (mem->virt_base + (mem->size << PAGE_SHIFT))) {
287 unsigned long off = vma->vm_pgoff;
288 int start = (vaddr - mem->virt_base) >> PAGE_SHIFT;
Muhammad Falak R Wanie688f142016-05-21 18:52:57 +0530289 int user_count = vma_pages(vma);
George G. Davis9ca5d4f2016-09-28 08:51:56 +0100290 int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100291
292 *ret = -ENXIO;
293 if (off < count && user_count <= count - off) {
Bjorn Helgaas88a984b2014-05-20 16:54:22 -0600294 unsigned long pfn = mem->pfn_base + start + off;
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100295 *ret = remap_pfn_range(vma, vma->vm_start, pfn,
296 user_count << PAGE_SHIFT,
297 vma->vm_page_prot);
298 }
299 return 1;
300 }
301 return 0;
302}
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100303
304/**
305 * dma_mmap_from_dev_coherent() - mmap memory from the device coherent pool
306 * @dev: device from which the memory was allocated
307 * @vma: vm_area for the userspace memory
308 * @vaddr: cpu address returned by dma_alloc_from_dev_coherent
309 * @size: size of the memory buffer allocated
310 * @ret: result from remap_pfn_range()
311 *
312 * This checks whether the memory was allocated from the per-device
313 * coherent memory pool and if so, maps that memory to the provided vma.
314 *
315 * Returns 1 if we correctly mapped the memory, or 0 if the caller should
316 * proceed with mapping memory from generic pools.
317 */
318int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
319 void *vaddr, size_t size, int *ret)
320{
321 struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
322
323 return __dma_mmap_from_coherent(mem, vma, vaddr, size, ret);
324}
325EXPORT_SYMBOL(dma_mmap_from_dev_coherent);
326
327int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr,
328 size_t size, int *ret)
329{
330 if (!dma_coherent_default_memory)
331 return 0;
332
333 return __dma_mmap_from_coherent(dma_coherent_default_memory, vma,
334 vaddr, size, ret);
335}
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700336
337/*
338 * Support for reserved memory regions defined in device tree
339 */
340#ifdef CONFIG_OF_RESERVED_MEM
341#include <linux/of.h>
342#include <linux/of_fdt.h>
343#include <linux/of_reserved_mem.h>
344
Vladimir Murzin93228b42017-06-26 10:18:58 +0100345static struct reserved_mem *dma_reserved_default_memory __initdata;
346
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700347static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev)
348{
349 struct dma_coherent_mem *mem = rmem->priv;
Arnd Bergmannd35b0992017-09-05 10:10:09 +0200350 int ret;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700351
Arnd Bergmann6d573392017-09-15 17:08:16 +0200352 if (!mem) {
353 ret = dma_init_coherent_memory(rmem->base, rmem->base,
354 rmem->size,
355 DMA_MEMORY_EXCLUSIVE, &mem);
356 if (ret) {
357 pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n",
358 &rmem->base, (unsigned long)rmem->size / SZ_1M);
359 return ret;
360 }
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700361 }
Vladimir Murzinc41f9ea2017-06-26 10:18:57 +0100362 mem->use_dev_dma_pfn_offset = true;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700363 rmem->priv = mem;
364 dma_assign_coherent_memory(dev, mem);
365 return 0;
366}
367
368static void rmem_dma_device_release(struct reserved_mem *rmem,
369 struct device *dev)
370{
Vladimir Murzin93228b42017-06-26 10:18:58 +0100371 if (dev)
372 dev->dma_mem = NULL;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700373}
374
375static const struct reserved_mem_ops rmem_dma_ops = {
376 .device_init = rmem_dma_device_init,
377 .device_release = rmem_dma_device_release,
378};
379
380static int __init rmem_dma_setup(struct reserved_mem *rmem)
381{
382 unsigned long node = rmem->fdt_node;
383
384 if (of_get_flat_dt_prop(node, "reusable", NULL))
385 return -EINVAL;
386
387#ifdef CONFIG_ARM
388 if (!of_get_flat_dt_prop(node, "no-map", NULL)) {
389 pr_err("Reserved memory: regions without no-map are not yet supported\n");
390 return -EINVAL;
391 }
Vladimir Murzin93228b42017-06-26 10:18:58 +0100392
393 if (of_get_flat_dt_prop(node, "linux,dma-default", NULL)) {
394 WARN(dma_reserved_default_memory,
395 "Reserved memory: region for default DMA coherent area is redefined\n");
396 dma_reserved_default_memory = rmem;
397 }
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700398#endif
399
400 rmem->ops = &rmem_dma_ops;
401 pr_info("Reserved memory: created DMA memory pool at %pa, size %ld MiB\n",
402 &rmem->base, (unsigned long)rmem->size / SZ_1M);
403 return 0;
404}
Vladimir Murzin93228b42017-06-26 10:18:58 +0100405
406static int __init dma_init_reserved_memory(void)
407{
408 const struct reserved_mem_ops *ops;
409 int ret;
410
411 if (!dma_reserved_default_memory)
412 return -ENOMEM;
413
414 ops = dma_reserved_default_memory->ops;
415
416 /*
417 * We rely on rmem_dma_device_init() does not propagate error of
418 * dma_assign_coherent_memory() for "NULL" device.
419 */
420 ret = ops->device_init(dma_reserved_default_memory, NULL);
421
422 if (!ret) {
423 dma_coherent_default_memory = dma_reserved_default_memory->priv;
424 pr_info("DMA: default coherent area is set\n");
425 }
426
427 return ret;
428}
429
430core_initcall(dma_init_reserved_memory);
431
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700432RESERVEDMEM_OF_DECLARE(dma, "shared-dma-pool", rmem_dma_setup);
433#endif