blob: 6f6dc4d41788bcb827fdf310aebc96711f477146 [file] [log] [blame]
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +04001/*
2 * Coherent per-device memory handling.
3 * Borrowed from i386
4 */
Brian Starkey6b03ae02016-03-22 14:28:03 -07005#include <linux/io.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09006#include <linux/slab.h>
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +04007#include <linux/kernel.h>
Paul Gortmaker08a999c2011-07-01 16:07:32 -04008#include <linux/module.h>
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +04009#include <linux/dma-mapping.h>
10
11struct dma_coherent_mem {
12 void *virt_base;
Marin Mitoved1d2182010-05-31 13:03:04 +030013 dma_addr_t device_base;
Bjorn Helgaas88a984b2014-05-20 16:54:22 -060014 unsigned long pfn_base;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040015 int size;
16 int flags;
17 unsigned long *bitmap;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070018 spinlock_t spinlock;
Vladimir Murzinc41f9ea2017-06-26 10:18:57 +010019 bool use_dev_dma_pfn_offset;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040020};
21
Vladimir Murzin93228b42017-06-26 10:18:58 +010022static struct dma_coherent_mem *dma_coherent_default_memory __ro_after_init;
23
24static inline struct dma_coherent_mem *dev_get_coherent_memory(struct device *dev)
25{
26 if (dev && dev->dma_mem)
27 return dev->dma_mem;
Vladimir Murzin43fc5092017-07-20 11:19:58 +010028 return NULL;
Vladimir Murzin93228b42017-06-26 10:18:58 +010029}
30
Vladimir Murzinc41f9ea2017-06-26 10:18:57 +010031static inline dma_addr_t dma_get_device_base(struct device *dev,
32 struct dma_coherent_mem * mem)
33{
34 if (mem->use_dev_dma_pfn_offset)
35 return (mem->pfn_base - dev->dma_pfn_offset) << PAGE_SHIFT;
36 else
37 return mem->device_base;
38}
39
Michal Nazarewicz9e5b3d62016-01-04 22:36:40 +010040static bool dma_init_coherent_memory(
41 phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags,
42 struct dma_coherent_mem **mem)
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040043{
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070044 struct dma_coherent_mem *dma_mem = NULL;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040045 void __iomem *mem_base = NULL;
46 int pages = size >> PAGE_SHIFT;
47 int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
48
49 if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
50 goto out;
51 if (!size)
52 goto out;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040053
Brian Starkey6b03ae02016-03-22 14:28:03 -070054 if (flags & DMA_MEMORY_MAP)
55 mem_base = memremap(phys_addr, size, MEMREMAP_WC);
56 else
57 mem_base = ioremap(phys_addr, size);
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040058 if (!mem_base)
59 goto out;
60
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070061 dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
62 if (!dma_mem)
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040063 goto out;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070064 dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
65 if (!dma_mem->bitmap)
66 goto out;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040067
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070068 dma_mem->virt_base = mem_base;
69 dma_mem->device_base = device_addr;
70 dma_mem->pfn_base = PFN_DOWN(phys_addr);
71 dma_mem->size = pages;
72 dma_mem->flags = flags;
73 spin_lock_init(&dma_mem->spinlock);
74
75 *mem = dma_mem;
Michal Nazarewicz9e5b3d62016-01-04 22:36:40 +010076 return true;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040077
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070078out:
79 kfree(dma_mem);
Brian Starkey6b03ae02016-03-22 14:28:03 -070080 if (mem_base) {
81 if (flags & DMA_MEMORY_MAP)
82 memunmap(mem_base);
83 else
84 iounmap(mem_base);
85 }
Michal Nazarewicz9e5b3d62016-01-04 22:36:40 +010086 return false;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040087}
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070088
89static void dma_release_coherent_memory(struct dma_coherent_mem *mem)
90{
91 if (!mem)
92 return;
Brian Starkey6b03ae02016-03-22 14:28:03 -070093
94 if (mem->flags & DMA_MEMORY_MAP)
95 memunmap(mem->virt_base);
96 else
97 iounmap(mem->virt_base);
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070098 kfree(mem->bitmap);
99 kfree(mem);
100}
101
102static int dma_assign_coherent_memory(struct device *dev,
103 struct dma_coherent_mem *mem)
104{
Vladimir Murzin93228b42017-06-26 10:18:58 +0100105 if (!dev)
106 return -ENODEV;
107
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700108 if (dev->dma_mem)
109 return -EBUSY;
110
111 dev->dma_mem = mem;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700112 return 0;
113}
114
115int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
116 dma_addr_t device_addr, size_t size, int flags)
117{
118 struct dma_coherent_mem *mem;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700119
Michal Nazarewicz9e5b3d62016-01-04 22:36:40 +0100120 if (!dma_init_coherent_memory(phys_addr, device_addr, size, flags,
121 &mem))
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700122 return 0;
123
124 if (dma_assign_coherent_memory(dev, mem) == 0)
Michal Nazarewicz9e5b3d62016-01-04 22:36:40 +0100125 return flags & DMA_MEMORY_MAP ? DMA_MEMORY_MAP : DMA_MEMORY_IO;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700126
127 dma_release_coherent_memory(mem);
128 return 0;
129}
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400130EXPORT_SYMBOL(dma_declare_coherent_memory);
131
132void dma_release_declared_memory(struct device *dev)
133{
134 struct dma_coherent_mem *mem = dev->dma_mem;
135
136 if (!mem)
137 return;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700138 dma_release_coherent_memory(mem);
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400139 dev->dma_mem = NULL;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400140}
141EXPORT_SYMBOL(dma_release_declared_memory);
142
143void *dma_mark_declared_memory_occupied(struct device *dev,
144 dma_addr_t device_addr, size_t size)
145{
146 struct dma_coherent_mem *mem = dev->dma_mem;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700147 unsigned long flags;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400148 int pos, err;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400149
Jan Beulichd2dc1f42008-08-05 13:01:31 -0700150 size += device_addr & ~PAGE_MASK;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400151
152 if (!mem)
153 return ERR_PTR(-EINVAL);
154
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700155 spin_lock_irqsave(&mem->spinlock, flags);
Vladimir Murzinc41f9ea2017-06-26 10:18:57 +0100156 pos = PFN_DOWN(device_addr - dma_get_device_base(dev, mem));
Jan Beulichd2dc1f42008-08-05 13:01:31 -0700157 err = bitmap_allocate_region(mem->bitmap, pos, get_order(size));
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700158 spin_unlock_irqrestore(&mem->spinlock, flags);
159
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400160 if (err != 0)
161 return ERR_PTR(err);
162 return mem->virt_base + (pos << PAGE_SHIFT);
163}
164EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
165
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100166static void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem,
167 ssize_t size, dma_addr_t *dma_handle)
168{
169 int order = get_order(size);
170 unsigned long flags;
171 int pageno;
172 int dma_memory_map;
173 void *ret;
174
175 spin_lock_irqsave(&mem->spinlock, flags);
176
177 if (unlikely(size > (mem->size << PAGE_SHIFT)))
178 goto err;
179
180 pageno = bitmap_find_free_region(mem->bitmap, mem->size, order);
181 if (unlikely(pageno < 0))
182 goto err;
183
184 /*
185 * Memory was found in the coherent area.
186 */
187 *dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
188 ret = mem->virt_base + (pageno << PAGE_SHIFT);
189 dma_memory_map = (mem->flags & DMA_MEMORY_MAP);
190 spin_unlock_irqrestore(&mem->spinlock, flags);
191 if (dma_memory_map)
192 memset(ret, 0, size);
193 else
194 memset_io(ret, 0, size);
195
196 return ret;
197
198err:
199 spin_unlock_irqrestore(&mem->spinlock, flags);
200 return NULL;
201}
202
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400203/**
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100204 * dma_alloc_from_dev_coherent() - allocate memory from device coherent pool
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400205 * @dev: device from which we allocate memory
206 * @size: size of requested memory area
207 * @dma_handle: This will be filled with the correct dma handle
208 * @ret: This pointer will be filled with the virtual address
Paul Mundt06096972009-01-21 18:51:53 +0900209 * to allocated area.
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400210 *
Dmitry Baryshkovcb3952b2008-07-30 14:46:50 +0400211 * This function should be only called from per-arch dma_alloc_coherent()
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400212 * to support allocation from per-device coherent memory pools.
213 *
214 * Returns 0 if dma_alloc_coherent should continue with allocating from
Dmitry Baryshkovcb3952b2008-07-30 14:46:50 +0400215 * generic memory areas, or !0 if dma_alloc_coherent should return @ret.
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400216 */
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100217int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
218 dma_addr_t *dma_handle, void **ret)
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400219{
Vladimir Murzin93228b42017-06-26 10:18:58 +0100220 struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400221
Andrew Mortoneccd83e2009-01-06 14:43:09 -0800222 if (!mem)
223 return 0;
Paul Mundt06096972009-01-21 18:51:53 +0900224
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100225 *ret = __dma_alloc_from_coherent(mem, size, dma_handle);
226 if (*ret)
227 return 1;
Paul Mundt06096972009-01-21 18:51:53 +0900228
Paul Mundt06096972009-01-21 18:51:53 +0900229 /*
230 * In the case where the allocation can not be satisfied from the
231 * per-device area, try to fall back to generic memory if the
232 * constraints allow it.
233 */
234 return mem->flags & DMA_MEMORY_EXCLUSIVE;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400235}
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100236EXPORT_SYMBOL(dma_alloc_from_dev_coherent);
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400237
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100238void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle)
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400239{
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100240 if (!dma_coherent_default_memory)
241 return NULL;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400242
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100243 return __dma_alloc_from_coherent(dma_coherent_default_memory, size,
244 dma_handle);
245}
246
247static int __dma_release_from_coherent(struct dma_coherent_mem *mem,
248 int order, void *vaddr)
249{
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400250 if (mem && vaddr >= mem->virt_base && vaddr <
251 (mem->virt_base + (mem->size << PAGE_SHIFT))) {
252 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700253 unsigned long flags;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400254
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700255 spin_lock_irqsave(&mem->spinlock, flags);
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400256 bitmap_release_region(mem->bitmap, page, order);
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700257 spin_unlock_irqrestore(&mem->spinlock, flags);
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400258 return 1;
259 }
260 return 0;
261}
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100262
263/**
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100264 * dma_release_from_dev_coherent() - free memory to device coherent memory pool
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100265 * @dev: device from which the memory was allocated
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100266 * @order: the order of pages allocated
267 * @vaddr: virtual address of allocated pages
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100268 *
269 * This checks whether the memory was allocated from the per-device
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100270 * coherent memory pool and if so, releases that memory.
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100271 *
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100272 * Returns 1 if we correctly released the memory, or 0 if the caller should
273 * proceed with releasing memory from generic pools.
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100274 */
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100275int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr)
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100276{
Vladimir Murzin93228b42017-06-26 10:18:58 +0100277 struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100278
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100279 return __dma_release_from_coherent(mem, order, vaddr);
280}
281EXPORT_SYMBOL(dma_release_from_dev_coherent);
282
283int dma_release_from_global_coherent(int order, void *vaddr)
284{
285 if (!dma_coherent_default_memory)
286 return 0;
287
288 return __dma_release_from_coherent(dma_coherent_default_memory, order,
289 vaddr);
290}
291
292static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem,
293 struct vm_area_struct *vma, void *vaddr, size_t size, int *ret)
294{
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100295 if (mem && vaddr >= mem->virt_base && vaddr + size <=
296 (mem->virt_base + (mem->size << PAGE_SHIFT))) {
297 unsigned long off = vma->vm_pgoff;
298 int start = (vaddr - mem->virt_base) >> PAGE_SHIFT;
Muhammad Falak R Wanie688f142016-05-21 18:52:57 +0530299 int user_count = vma_pages(vma);
George G. Davis9ca5d4f2016-09-28 08:51:56 +0100300 int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100301
302 *ret = -ENXIO;
303 if (off < count && user_count <= count - off) {
Bjorn Helgaas88a984b2014-05-20 16:54:22 -0600304 unsigned long pfn = mem->pfn_base + start + off;
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100305 *ret = remap_pfn_range(vma, vma->vm_start, pfn,
306 user_count << PAGE_SHIFT,
307 vma->vm_page_prot);
308 }
309 return 1;
310 }
311 return 0;
312}
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100313
314/**
315 * dma_mmap_from_dev_coherent() - mmap memory from the device coherent pool
316 * @dev: device from which the memory was allocated
317 * @vma: vm_area for the userspace memory
318 * @vaddr: cpu address returned by dma_alloc_from_dev_coherent
319 * @size: size of the memory buffer allocated
320 * @ret: result from remap_pfn_range()
321 *
322 * This checks whether the memory was allocated from the per-device
323 * coherent memory pool and if so, maps that memory to the provided vma.
324 *
325 * Returns 1 if we correctly mapped the memory, or 0 if the caller should
326 * proceed with mapping memory from generic pools.
327 */
328int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
329 void *vaddr, size_t size, int *ret)
330{
331 struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
332
333 return __dma_mmap_from_coherent(mem, vma, vaddr, size, ret);
334}
335EXPORT_SYMBOL(dma_mmap_from_dev_coherent);
336
337int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr,
338 size_t size, int *ret)
339{
340 if (!dma_coherent_default_memory)
341 return 0;
342
343 return __dma_mmap_from_coherent(dma_coherent_default_memory, vma,
344 vaddr, size, ret);
345}
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700346
347/*
348 * Support for reserved memory regions defined in device tree
349 */
350#ifdef CONFIG_OF_RESERVED_MEM
351#include <linux/of.h>
352#include <linux/of_fdt.h>
353#include <linux/of_reserved_mem.h>
354
Vladimir Murzin93228b42017-06-26 10:18:58 +0100355static struct reserved_mem *dma_reserved_default_memory __initdata;
356
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700357static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev)
358{
359 struct dma_coherent_mem *mem = rmem->priv;
360
361 if (!mem &&
Michal Nazarewicz9e5b3d62016-01-04 22:36:40 +0100362 !dma_init_coherent_memory(rmem->base, rmem->base, rmem->size,
363 DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE,
364 &mem)) {
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700365 pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n",
366 &rmem->base, (unsigned long)rmem->size / SZ_1M);
367 return -ENODEV;
368 }
Vladimir Murzinc41f9ea2017-06-26 10:18:57 +0100369 mem->use_dev_dma_pfn_offset = true;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700370 rmem->priv = mem;
371 dma_assign_coherent_memory(dev, mem);
372 return 0;
373}
374
375static void rmem_dma_device_release(struct reserved_mem *rmem,
376 struct device *dev)
377{
Vladimir Murzin93228b42017-06-26 10:18:58 +0100378 if (dev)
379 dev->dma_mem = NULL;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700380}
381
382static const struct reserved_mem_ops rmem_dma_ops = {
383 .device_init = rmem_dma_device_init,
384 .device_release = rmem_dma_device_release,
385};
386
387static int __init rmem_dma_setup(struct reserved_mem *rmem)
388{
389 unsigned long node = rmem->fdt_node;
390
391 if (of_get_flat_dt_prop(node, "reusable", NULL))
392 return -EINVAL;
393
394#ifdef CONFIG_ARM
395 if (!of_get_flat_dt_prop(node, "no-map", NULL)) {
396 pr_err("Reserved memory: regions without no-map are not yet supported\n");
397 return -EINVAL;
398 }
Vladimir Murzin93228b42017-06-26 10:18:58 +0100399
400 if (of_get_flat_dt_prop(node, "linux,dma-default", NULL)) {
401 WARN(dma_reserved_default_memory,
402 "Reserved memory: region for default DMA coherent area is redefined\n");
403 dma_reserved_default_memory = rmem;
404 }
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700405#endif
406
407 rmem->ops = &rmem_dma_ops;
408 pr_info("Reserved memory: created DMA memory pool at %pa, size %ld MiB\n",
409 &rmem->base, (unsigned long)rmem->size / SZ_1M);
410 return 0;
411}
Vladimir Murzin93228b42017-06-26 10:18:58 +0100412
413static int __init dma_init_reserved_memory(void)
414{
415 const struct reserved_mem_ops *ops;
416 int ret;
417
418 if (!dma_reserved_default_memory)
419 return -ENOMEM;
420
421 ops = dma_reserved_default_memory->ops;
422
423 /*
424 * We rely on rmem_dma_device_init() does not propagate error of
425 * dma_assign_coherent_memory() for "NULL" device.
426 */
427 ret = ops->device_init(dma_reserved_default_memory, NULL);
428
429 if (!ret) {
430 dma_coherent_default_memory = dma_reserved_default_memory->priv;
431 pr_info("DMA: default coherent area is set\n");
432 }
433
434 return ret;
435}
436
437core_initcall(dma_init_reserved_memory);
438
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700439RESERVEDMEM_OF_DECLARE(dma, "shared-dma-pool", rmem_dma_setup);
440#endif