blob: 2a0c4985f38e4135d6ffbb2a2734af8c2a359a77 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +04002/*
3 * Coherent per-device memory handling.
4 * Borrowed from i386
5 */
Brian Starkey6b03ae02016-03-22 14:28:03 -07006#include <linux/io.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09007#include <linux/slab.h>
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +04008#include <linux/kernel.h>
Paul Gortmaker08a999c2011-07-01 16:07:32 -04009#include <linux/module.h>
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040010#include <linux/dma-mapping.h>
11
12struct dma_coherent_mem {
13 void *virt_base;
Marin Mitoved1d2182010-05-31 13:03:04 +030014 dma_addr_t device_base;
Bjorn Helgaas88a984b2014-05-20 16:54:22 -060015 unsigned long pfn_base;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040016 int size;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040017 unsigned long *bitmap;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070018 spinlock_t spinlock;
Vladimir Murzinc41f9ea2017-06-26 10:18:57 +010019 bool use_dev_dma_pfn_offset;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040020};
21
Vladimir Murzin93228b42017-06-26 10:18:58 +010022static struct dma_coherent_mem *dma_coherent_default_memory __ro_after_init;
23
24static inline struct dma_coherent_mem *dev_get_coherent_memory(struct device *dev)
25{
26 if (dev && dev->dma_mem)
27 return dev->dma_mem;
Vladimir Murzin43fc5092017-07-20 11:19:58 +010028 return NULL;
Vladimir Murzin93228b42017-06-26 10:18:58 +010029}
30
Vladimir Murzinc41f9ea2017-06-26 10:18:57 +010031static inline dma_addr_t dma_get_device_base(struct device *dev,
32 struct dma_coherent_mem * mem)
33{
34 if (mem->use_dev_dma_pfn_offset)
35 return (mem->pfn_base - dev->dma_pfn_offset) << PAGE_SHIFT;
36 else
37 return mem->device_base;
38}
39
Christoph Hellwig82c5de02018-12-25 13:29:54 +010040static int dma_init_coherent_memory(phys_addr_t phys_addr,
41 dma_addr_t device_addr, size_t size,
42 struct dma_coherent_mem **mem)
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040043{
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070044 struct dma_coherent_mem *dma_mem = NULL;
Christoph Hellwigbe4311a2019-02-01 22:25:09 +010045 void *mem_base = NULL;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040046 int pages = size >> PAGE_SHIFT;
47 int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
Arnd Bergmannd35b0992017-09-05 10:10:09 +020048 int ret;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040049
Arnd Bergmannd35b0992017-09-05 10:10:09 +020050 if (!size) {
51 ret = -EINVAL;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040052 goto out;
Arnd Bergmannd35b0992017-09-05 10:10:09 +020053 }
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040054
Christoph Hellwig2436bdc2017-08-25 17:13:09 +020055 mem_base = memremap(phys_addr, size, MEMREMAP_WC);
Arnd Bergmannd35b0992017-09-05 10:10:09 +020056 if (!mem_base) {
57 ret = -EINVAL;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040058 goto out;
Arnd Bergmannd35b0992017-09-05 10:10:09 +020059 }
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070060 dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
Arnd Bergmannd35b0992017-09-05 10:10:09 +020061 if (!dma_mem) {
62 ret = -ENOMEM;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040063 goto out;
Arnd Bergmannd35b0992017-09-05 10:10:09 +020064 }
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070065 dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
Arnd Bergmannd35b0992017-09-05 10:10:09 +020066 if (!dma_mem->bitmap) {
67 ret = -ENOMEM;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070068 goto out;
Arnd Bergmannd35b0992017-09-05 10:10:09 +020069 }
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040070
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070071 dma_mem->virt_base = mem_base;
72 dma_mem->device_base = device_addr;
73 dma_mem->pfn_base = PFN_DOWN(phys_addr);
74 dma_mem->size = pages;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070075 spin_lock_init(&dma_mem->spinlock);
76
77 *mem = dma_mem;
Arnd Bergmannd35b0992017-09-05 10:10:09 +020078 return 0;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040079
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070080out:
81 kfree(dma_mem);
Christoph Hellwig2436bdc2017-08-25 17:13:09 +020082 if (mem_base)
83 memunmap(mem_base);
Arnd Bergmannd35b0992017-09-05 10:10:09 +020084 return ret;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040085}
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070086
87static void dma_release_coherent_memory(struct dma_coherent_mem *mem)
88{
89 if (!mem)
90 return;
Brian Starkey6b03ae02016-03-22 14:28:03 -070091
Christoph Hellwig2436bdc2017-08-25 17:13:09 +020092 memunmap(mem->virt_base);
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070093 kfree(mem->bitmap);
94 kfree(mem);
95}
96
97static int dma_assign_coherent_memory(struct device *dev,
98 struct dma_coherent_mem *mem)
99{
Vladimir Murzin93228b42017-06-26 10:18:58 +0100100 if (!dev)
101 return -ENODEV;
102
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700103 if (dev->dma_mem)
104 return -EBUSY;
105
106 dev->dma_mem = mem;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700107 return 0;
108}
109
110int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
Christoph Hellwig82c5de02018-12-25 13:29:54 +0100111 dma_addr_t device_addr, size_t size)
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700112{
113 struct dma_coherent_mem *mem;
Christoph Hellwig2436bdc2017-08-25 17:13:09 +0200114 int ret;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700115
Christoph Hellwig82c5de02018-12-25 13:29:54 +0100116 ret = dma_init_coherent_memory(phys_addr, device_addr, size, &mem);
Christoph Hellwig2436bdc2017-08-25 17:13:09 +0200117 if (ret)
118 return ret;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700119
Christoph Hellwig2436bdc2017-08-25 17:13:09 +0200120 ret = dma_assign_coherent_memory(dev, mem);
121 if (ret)
122 dma_release_coherent_memory(mem);
123 return ret;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700124}
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400125
Vladimir Murzina445e9402019-10-30 10:13:13 +0000126static void *__dma_alloc_from_coherent(struct device *dev,
127 struct dma_coherent_mem *mem,
128 ssize_t size, dma_addr_t *dma_handle)
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100129{
130 int order = get_order(size);
131 unsigned long flags;
132 int pageno;
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100133 void *ret;
134
135 spin_lock_irqsave(&mem->spinlock, flags);
136
Kevin Grandemange286c21d2020-03-12 15:41:45 +0000137 if (unlikely(size > ((dma_addr_t)mem->size << PAGE_SHIFT)))
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100138 goto err;
139
140 pageno = bitmap_find_free_region(mem->bitmap, mem->size, order);
141 if (unlikely(pageno < 0))
142 goto err;
143
144 /*
145 * Memory was found in the coherent area.
146 */
Kevin Grandemange286c21d2020-03-12 15:41:45 +0000147 *dma_handle = dma_get_device_base(dev, mem) +
148 ((dma_addr_t)pageno << PAGE_SHIFT);
149 ret = mem->virt_base + ((dma_addr_t)pageno << PAGE_SHIFT);
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100150 spin_unlock_irqrestore(&mem->spinlock, flags);
Christoph Hellwig2436bdc2017-08-25 17:13:09 +0200151 memset(ret, 0, size);
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100152 return ret;
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100153err:
154 spin_unlock_irqrestore(&mem->spinlock, flags);
155 return NULL;
156}
157
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400158/**
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100159 * dma_alloc_from_dev_coherent() - allocate memory from device coherent pool
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400160 * @dev: device from which we allocate memory
161 * @size: size of requested memory area
162 * @dma_handle: This will be filled with the correct dma handle
163 * @ret: This pointer will be filled with the virtual address
Paul Mundt06096972009-01-21 18:51:53 +0900164 * to allocated area.
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400165 *
Dmitry Baryshkovcb3952b2008-07-30 14:46:50 +0400166 * This function should be only called from per-arch dma_alloc_coherent()
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400167 * to support allocation from per-device coherent memory pools.
168 *
169 * Returns 0 if dma_alloc_coherent should continue with allocating from
Dmitry Baryshkovcb3952b2008-07-30 14:46:50 +0400170 * generic memory areas, or !0 if dma_alloc_coherent should return @ret.
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400171 */
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100172int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
173 dma_addr_t *dma_handle, void **ret)
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400174{
Vladimir Murzin93228b42017-06-26 10:18:58 +0100175 struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400176
Andrew Mortoneccd83e2009-01-06 14:43:09 -0800177 if (!mem)
178 return 0;
Paul Mundt06096972009-01-21 18:51:53 +0900179
Vladimir Murzina445e9402019-10-30 10:13:13 +0000180 *ret = __dma_alloc_from_coherent(dev, mem, size, dma_handle);
Christoph Hellwig82c5de02018-12-25 13:29:54 +0100181 return 1;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400182}
183
Vladimir Murzina445e9402019-10-30 10:13:13 +0000184void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size,
185 dma_addr_t *dma_handle)
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400186{
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100187 if (!dma_coherent_default_memory)
188 return NULL;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400189
Vladimir Murzina445e9402019-10-30 10:13:13 +0000190 return __dma_alloc_from_coherent(dev, dma_coherent_default_memory, size,
191 dma_handle);
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100192}
193
194static int __dma_release_from_coherent(struct dma_coherent_mem *mem,
195 int order, void *vaddr)
196{
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400197 if (mem && vaddr >= mem->virt_base && vaddr <
Kevin Grandemange286c21d2020-03-12 15:41:45 +0000198 (mem->virt_base + ((dma_addr_t)mem->size << PAGE_SHIFT))) {
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400199 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700200 unsigned long flags;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400201
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700202 spin_lock_irqsave(&mem->spinlock, flags);
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400203 bitmap_release_region(mem->bitmap, page, order);
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700204 spin_unlock_irqrestore(&mem->spinlock, flags);
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400205 return 1;
206 }
207 return 0;
208}
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100209
210/**
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100211 * dma_release_from_dev_coherent() - free memory to device coherent memory pool
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100212 * @dev: device from which the memory was allocated
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100213 * @order: the order of pages allocated
214 * @vaddr: virtual address of allocated pages
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100215 *
216 * This checks whether the memory was allocated from the per-device
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100217 * coherent memory pool and if so, releases that memory.
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100218 *
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100219 * Returns 1 if we correctly released the memory, or 0 if the caller should
220 * proceed with releasing memory from generic pools.
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100221 */
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100222int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr)
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100223{
Vladimir Murzin93228b42017-06-26 10:18:58 +0100224 struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100225
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100226 return __dma_release_from_coherent(mem, order, vaddr);
227}
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100228
229int dma_release_from_global_coherent(int order, void *vaddr)
230{
231 if (!dma_coherent_default_memory)
232 return 0;
233
234 return __dma_release_from_coherent(dma_coherent_default_memory, order,
235 vaddr);
236}
237
238static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem,
239 struct vm_area_struct *vma, void *vaddr, size_t size, int *ret)
240{
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100241 if (mem && vaddr >= mem->virt_base && vaddr + size <=
Kevin Grandemange286c21d2020-03-12 15:41:45 +0000242 (mem->virt_base + ((dma_addr_t)mem->size << PAGE_SHIFT))) {
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100243 unsigned long off = vma->vm_pgoff;
244 int start = (vaddr - mem->virt_base) >> PAGE_SHIFT;
Kevin Grandemange286c21d2020-03-12 15:41:45 +0000245 unsigned long user_count = vma_pages(vma);
George G. Davis9ca5d4f2016-09-28 08:51:56 +0100246 int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100247
248 *ret = -ENXIO;
249 if (off < count && user_count <= count - off) {
Bjorn Helgaas88a984b2014-05-20 16:54:22 -0600250 unsigned long pfn = mem->pfn_base + start + off;
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100251 *ret = remap_pfn_range(vma, vma->vm_start, pfn,
252 user_count << PAGE_SHIFT,
253 vma->vm_page_prot);
254 }
255 return 1;
256 }
257 return 0;
258}
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100259
260/**
261 * dma_mmap_from_dev_coherent() - mmap memory from the device coherent pool
262 * @dev: device from which the memory was allocated
263 * @vma: vm_area for the userspace memory
264 * @vaddr: cpu address returned by dma_alloc_from_dev_coherent
265 * @size: size of the memory buffer allocated
266 * @ret: result from remap_pfn_range()
267 *
268 * This checks whether the memory was allocated from the per-device
269 * coherent memory pool and if so, maps that memory to the provided vma.
270 *
Robin Murphy41d0bbc2018-04-09 18:59:14 +0100271 * Returns 1 if @vaddr belongs to the device coherent pool and the caller
272 * should return @ret, or 0 if they should proceed with mapping memory from
273 * generic areas.
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100274 */
275int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
276 void *vaddr, size_t size, int *ret)
277{
278 struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
279
280 return __dma_mmap_from_coherent(mem, vma, vaddr, size, ret);
281}
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100282
283int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr,
284 size_t size, int *ret)
285{
286 if (!dma_coherent_default_memory)
287 return 0;
288
289 return __dma_mmap_from_coherent(dma_coherent_default_memory, vma,
290 vaddr, size, ret);
291}
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700292
293/*
294 * Support for reserved memory regions defined in device tree
295 */
296#ifdef CONFIG_OF_RESERVED_MEM
297#include <linux/of.h>
298#include <linux/of_fdt.h>
299#include <linux/of_reserved_mem.h>
300
Vladimir Murzin93228b42017-06-26 10:18:58 +0100301static struct reserved_mem *dma_reserved_default_memory __initdata;
302
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700303static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev)
304{
305 struct dma_coherent_mem *mem = rmem->priv;
Arnd Bergmannd35b0992017-09-05 10:10:09 +0200306 int ret;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700307
Arnd Bergmann6d573392017-09-15 17:08:16 +0200308 if (!mem) {
309 ret = dma_init_coherent_memory(rmem->base, rmem->base,
Christoph Hellwig82c5de02018-12-25 13:29:54 +0100310 rmem->size, &mem);
Arnd Bergmann6d573392017-09-15 17:08:16 +0200311 if (ret) {
312 pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n",
313 &rmem->base, (unsigned long)rmem->size / SZ_1M);
314 return ret;
315 }
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700316 }
Vladimir Murzinc41f9ea2017-06-26 10:18:57 +0100317 mem->use_dev_dma_pfn_offset = true;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700318 rmem->priv = mem;
319 dma_assign_coherent_memory(dev, mem);
320 return 0;
321}
322
323static void rmem_dma_device_release(struct reserved_mem *rmem,
324 struct device *dev)
325{
Vladimir Murzin93228b42017-06-26 10:18:58 +0100326 if (dev)
327 dev->dma_mem = NULL;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700328}
329
330static const struct reserved_mem_ops rmem_dma_ops = {
331 .device_init = rmem_dma_device_init,
332 .device_release = rmem_dma_device_release,
333};
334
335static int __init rmem_dma_setup(struct reserved_mem *rmem)
336{
337 unsigned long node = rmem->fdt_node;
338
339 if (of_get_flat_dt_prop(node, "reusable", NULL))
340 return -EINVAL;
341
342#ifdef CONFIG_ARM
343 if (!of_get_flat_dt_prop(node, "no-map", NULL)) {
344 pr_err("Reserved memory: regions without no-map are not yet supported\n");
345 return -EINVAL;
346 }
Vladimir Murzin93228b42017-06-26 10:18:58 +0100347
348 if (of_get_flat_dt_prop(node, "linux,dma-default", NULL)) {
349 WARN(dma_reserved_default_memory,
350 "Reserved memory: region for default DMA coherent area is redefined\n");
351 dma_reserved_default_memory = rmem;
352 }
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700353#endif
354
355 rmem->ops = &rmem_dma_ops;
356 pr_info("Reserved memory: created DMA memory pool at %pa, size %ld MiB\n",
357 &rmem->base, (unsigned long)rmem->size / SZ_1M);
358 return 0;
359}
Vladimir Murzin93228b42017-06-26 10:18:58 +0100360
361static int __init dma_init_reserved_memory(void)
362{
363 const struct reserved_mem_ops *ops;
364 int ret;
365
366 if (!dma_reserved_default_memory)
367 return -ENOMEM;
368
369 ops = dma_reserved_default_memory->ops;
370
371 /*
372 * We rely on rmem_dma_device_init() does not propagate error of
373 * dma_assign_coherent_memory() for "NULL" device.
374 */
375 ret = ops->device_init(dma_reserved_default_memory, NULL);
376
377 if (!ret) {
378 dma_coherent_default_memory = dma_reserved_default_memory->priv;
379 pr_info("DMA: default coherent area is set\n");
380 }
381
382 return ret;
383}
384
385core_initcall(dma_init_reserved_memory);
386
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700387RESERVEDMEM_OF_DECLARE(dma, "shared-dma-pool", rmem_dma_setup);
388#endif