Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Dmitry Baryshkov | ee7e551 | 2008-06-29 14:18:46 +0400 | [diff] [blame] | 2 | /* |
| 3 | * Coherent per-device memory handling. |
| 4 | * Borrowed from i386 |
| 5 | */ |
Brian Starkey | 6b03ae0 | 2016-03-22 14:28:03 -0700 | [diff] [blame] | 6 | #include <linux/io.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 7 | #include <linux/slab.h> |
Dmitry Baryshkov | ee7e551 | 2008-06-29 14:18:46 +0400 | [diff] [blame] | 8 | #include <linux/kernel.h> |
Paul Gortmaker | 08a999c | 2011-07-01 16:07:32 -0400 | [diff] [blame] | 9 | #include <linux/module.h> |
Jim Quinlan | e0d0727 | 2020-09-17 18:43:40 +0200 | [diff] [blame] | 10 | #include <linux/dma-direct.h> |
Christoph Hellwig | 0a0f0d8 | 2020-09-22 15:31:03 +0200 | [diff] [blame] | 11 | #include <linux/dma-map-ops.h> |
Dmitry Baryshkov | ee7e551 | 2008-06-29 14:18:46 +0400 | [diff] [blame] | 12 | |
| 13 | struct dma_coherent_mem { |
| 14 | void *virt_base; |
Marin Mitov | ed1d218 | 2010-05-31 13:03:04 +0300 | [diff] [blame] | 15 | dma_addr_t device_base; |
Bjorn Helgaas | 88a984b | 2014-05-20 16:54:22 -0600 | [diff] [blame] | 16 | unsigned long pfn_base; |
Dmitry Baryshkov | ee7e551 | 2008-06-29 14:18:46 +0400 | [diff] [blame] | 17 | int size; |
Dmitry Baryshkov | ee7e551 | 2008-06-29 14:18:46 +0400 | [diff] [blame] | 18 | unsigned long *bitmap; |
Marek Szyprowski | 7bfa5ab | 2014-10-13 15:51:07 -0700 | [diff] [blame] | 19 | spinlock_t spinlock; |
Vladimir Murzin | c41f9ea | 2017-06-26 10:18:57 +0100 | [diff] [blame] | 20 | bool use_dev_dma_pfn_offset; |
Dmitry Baryshkov | ee7e551 | 2008-06-29 14:18:46 +0400 | [diff] [blame] | 21 | }; |
| 22 | |
Vladimir Murzin | 93228b4 | 2017-06-26 10:18:58 +0100 | [diff] [blame] | 23 | static inline struct dma_coherent_mem *dev_get_coherent_memory(struct device *dev) |
| 24 | { |
| 25 | if (dev && dev->dma_mem) |
| 26 | return dev->dma_mem; |
Vladimir Murzin | 43fc509 | 2017-07-20 11:19:58 +0100 | [diff] [blame] | 27 | return NULL; |
Vladimir Murzin | 93228b4 | 2017-06-26 10:18:58 +0100 | [diff] [blame] | 28 | } |
| 29 | |
Vladimir Murzin | c41f9ea | 2017-06-26 10:18:57 +0100 | [diff] [blame] | 30 | static inline dma_addr_t dma_get_device_base(struct device *dev, |
| 31 | struct dma_coherent_mem * mem) |
| 32 | { |
| 33 | if (mem->use_dev_dma_pfn_offset) |
Jim Quinlan | e0d0727 | 2020-09-17 18:43:40 +0200 | [diff] [blame] | 34 | return phys_to_dma(dev, PFN_PHYS(mem->pfn_base)); |
| 35 | return mem->device_base; |
Vladimir Murzin | c41f9ea | 2017-06-26 10:18:57 +0100 | [diff] [blame] | 36 | } |
| 37 | |
Christoph Hellwig | a693357 | 2021-06-23 16:00:04 +0200 | [diff] [blame] | 38 | static struct dma_coherent_mem *dma_init_coherent_memory(phys_addr_t phys_addr, |
| 39 | dma_addr_t device_addr, size_t size, bool use_dma_pfn_offset) |
Dmitry Baryshkov | ee7e551 | 2008-06-29 14:18:46 +0400 | [diff] [blame] | 40 | { |
Christoph Hellwig | a693357 | 2021-06-23 16:00:04 +0200 | [diff] [blame] | 41 | struct dma_coherent_mem *dma_mem; |
Dmitry Baryshkov | ee7e551 | 2008-06-29 14:18:46 +0400 | [diff] [blame] | 42 | int pages = size >> PAGE_SHIFT; |
| 43 | int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long); |
Christoph Hellwig | a693357 | 2021-06-23 16:00:04 +0200 | [diff] [blame] | 44 | void *mem_base; |
Dmitry Baryshkov | ee7e551 | 2008-06-29 14:18:46 +0400 | [diff] [blame] | 45 | |
Christoph Hellwig | a693357 | 2021-06-23 16:00:04 +0200 | [diff] [blame] | 46 | if (!size) |
| 47 | return ERR_PTR(-EINVAL); |
Dmitry Baryshkov | ee7e551 | 2008-06-29 14:18:46 +0400 | [diff] [blame] | 48 | |
Christoph Hellwig | 2436bdc | 2017-08-25 17:13:09 +0200 | [diff] [blame] | 49 | mem_base = memremap(phys_addr, size, MEMREMAP_WC); |
Christoph Hellwig | a693357 | 2021-06-23 16:00:04 +0200 | [diff] [blame] | 50 | if (!mem_base) |
| 51 | return ERR_PTR(-EINVAL); |
| 52 | |
Marek Szyprowski | 7bfa5ab | 2014-10-13 15:51:07 -0700 | [diff] [blame] | 53 | dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL); |
Christoph Hellwig | a693357 | 2021-06-23 16:00:04 +0200 | [diff] [blame] | 54 | if (!dma_mem) |
| 55 | goto out_unmap_membase; |
Marek Szyprowski | 7bfa5ab | 2014-10-13 15:51:07 -0700 | [diff] [blame] | 56 | dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL); |
Christoph Hellwig | a693357 | 2021-06-23 16:00:04 +0200 | [diff] [blame] | 57 | if (!dma_mem->bitmap) |
| 58 | goto out_free_dma_mem; |
Dmitry Baryshkov | ee7e551 | 2008-06-29 14:18:46 +0400 | [diff] [blame] | 59 | |
Marek Szyprowski | 7bfa5ab | 2014-10-13 15:51:07 -0700 | [diff] [blame] | 60 | dma_mem->virt_base = mem_base; |
| 61 | dma_mem->device_base = device_addr; |
| 62 | dma_mem->pfn_base = PFN_DOWN(phys_addr); |
| 63 | dma_mem->size = pages; |
Christoph Hellwig | a693357 | 2021-06-23 16:00:04 +0200 | [diff] [blame] | 64 | dma_mem->use_dev_dma_pfn_offset = use_dma_pfn_offset; |
Marek Szyprowski | 7bfa5ab | 2014-10-13 15:51:07 -0700 | [diff] [blame] | 65 | spin_lock_init(&dma_mem->spinlock); |
| 66 | |
Christoph Hellwig | a693357 | 2021-06-23 16:00:04 +0200 | [diff] [blame] | 67 | return dma_mem; |
Dmitry Baryshkov | ee7e551 | 2008-06-29 14:18:46 +0400 | [diff] [blame] | 68 | |
Christoph Hellwig | a693357 | 2021-06-23 16:00:04 +0200 | [diff] [blame] | 69 | out_free_dma_mem: |
Marek Szyprowski | 7bfa5ab | 2014-10-13 15:51:07 -0700 | [diff] [blame] | 70 | kfree(dma_mem); |
Christoph Hellwig | a693357 | 2021-06-23 16:00:04 +0200 | [diff] [blame] | 71 | out_unmap_membase: |
| 72 | memunmap(mem_base); |
| 73 | pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %zd MiB\n", |
| 74 | &phys_addr, size / SZ_1M); |
| 75 | return ERR_PTR(-ENOMEM); |
Dmitry Baryshkov | ee7e551 | 2008-06-29 14:18:46 +0400 | [diff] [blame] | 76 | } |
Marek Szyprowski | 7bfa5ab | 2014-10-13 15:51:07 -0700 | [diff] [blame] | 77 | |
| 78 | static void dma_release_coherent_memory(struct dma_coherent_mem *mem) |
| 79 | { |
| 80 | if (!mem) |
| 81 | return; |
Brian Starkey | 6b03ae0 | 2016-03-22 14:28:03 -0700 | [diff] [blame] | 82 | |
Christoph Hellwig | 2436bdc | 2017-08-25 17:13:09 +0200 | [diff] [blame] | 83 | memunmap(mem->virt_base); |
Marek Szyprowski | 7bfa5ab | 2014-10-13 15:51:07 -0700 | [diff] [blame] | 84 | kfree(mem->bitmap); |
| 85 | kfree(mem); |
| 86 | } |
| 87 | |
| 88 | static int dma_assign_coherent_memory(struct device *dev, |
| 89 | struct dma_coherent_mem *mem) |
| 90 | { |
Vladimir Murzin | 93228b4 | 2017-06-26 10:18:58 +0100 | [diff] [blame] | 91 | if (!dev) |
| 92 | return -ENODEV; |
| 93 | |
Marek Szyprowski | 7bfa5ab | 2014-10-13 15:51:07 -0700 | [diff] [blame] | 94 | if (dev->dma_mem) |
| 95 | return -EBUSY; |
| 96 | |
| 97 | dev->dma_mem = mem; |
Marek Szyprowski | 7bfa5ab | 2014-10-13 15:51:07 -0700 | [diff] [blame] | 98 | return 0; |
| 99 | } |
| 100 | |
Christoph Hellwig | a92df4f | 2020-08-18 19:32:42 +0200 | [diff] [blame] | 101 | /* |
| 102 | * Declare a region of memory to be handed out by dma_alloc_coherent() when it |
| 103 | * is asked for coherent memory for this device. This shall only be used |
| 104 | * from platform code, usually based on the device tree description. |
Zhen Lei | bab1622 | 2021-06-08 15:52:19 +0800 | [diff] [blame] | 105 | * |
Christoph Hellwig | a92df4f | 2020-08-18 19:32:42 +0200 | [diff] [blame] | 106 | * phys_addr is the CPU physical address to which the memory is currently |
| 107 | * assigned (this will be ioremapped so the CPU can access the region). |
| 108 | * |
| 109 | * device_addr is the DMA address the device needs to be programmed with to |
| 110 | * actually address this memory (this will be handed out as the dma_addr_t in |
| 111 | * dma_alloc_coherent()). |
| 112 | * |
| 113 | * size is the size of the area (must be a multiple of PAGE_SIZE). |
| 114 | * |
| 115 | * As a simplification for the platforms, only *one* such region of memory may |
| 116 | * be declared per device. |
| 117 | */ |
Marek Szyprowski | 7bfa5ab | 2014-10-13 15:51:07 -0700 | [diff] [blame] | 118 | int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, |
Christoph Hellwig | 82c5de0 | 2018-12-25 13:29:54 +0100 | [diff] [blame] | 119 | dma_addr_t device_addr, size_t size) |
Marek Szyprowski | 7bfa5ab | 2014-10-13 15:51:07 -0700 | [diff] [blame] | 120 | { |
| 121 | struct dma_coherent_mem *mem; |
Christoph Hellwig | 2436bdc | 2017-08-25 17:13:09 +0200 | [diff] [blame] | 122 | int ret; |
Marek Szyprowski | 7bfa5ab | 2014-10-13 15:51:07 -0700 | [diff] [blame] | 123 | |
Christoph Hellwig | a693357 | 2021-06-23 16:00:04 +0200 | [diff] [blame] | 124 | mem = dma_init_coherent_memory(phys_addr, device_addr, size, false); |
| 125 | if (IS_ERR(mem)) |
| 126 | return PTR_ERR(mem); |
Marek Szyprowski | 7bfa5ab | 2014-10-13 15:51:07 -0700 | [diff] [blame] | 127 | |
Christoph Hellwig | 2436bdc | 2017-08-25 17:13:09 +0200 | [diff] [blame] | 128 | ret = dma_assign_coherent_memory(dev, mem); |
| 129 | if (ret) |
| 130 | dma_release_coherent_memory(mem); |
| 131 | return ret; |
Marek Szyprowski | 7bfa5ab | 2014-10-13 15:51:07 -0700 | [diff] [blame] | 132 | } |
Dmitry Baryshkov | ee7e551 | 2008-06-29 14:18:46 +0400 | [diff] [blame] | 133 | |
Vladimir Murzin | a445e940 | 2019-10-30 10:13:13 +0000 | [diff] [blame] | 134 | static void *__dma_alloc_from_coherent(struct device *dev, |
| 135 | struct dma_coherent_mem *mem, |
| 136 | ssize_t size, dma_addr_t *dma_handle) |
Vladimir Murzin | 43fc509 | 2017-07-20 11:19:58 +0100 | [diff] [blame] | 137 | { |
| 138 | int order = get_order(size); |
| 139 | unsigned long flags; |
| 140 | int pageno; |
Vladimir Murzin | 43fc509 | 2017-07-20 11:19:58 +0100 | [diff] [blame] | 141 | void *ret; |
| 142 | |
| 143 | spin_lock_irqsave(&mem->spinlock, flags); |
| 144 | |
Kevin Grandemange | 286c21d | 2020-03-12 15:41:45 +0000 | [diff] [blame] | 145 | if (unlikely(size > ((dma_addr_t)mem->size << PAGE_SHIFT))) |
Vladimir Murzin | 43fc509 | 2017-07-20 11:19:58 +0100 | [diff] [blame] | 146 | goto err; |
| 147 | |
| 148 | pageno = bitmap_find_free_region(mem->bitmap, mem->size, order); |
| 149 | if (unlikely(pageno < 0)) |
| 150 | goto err; |
| 151 | |
| 152 | /* |
| 153 | * Memory was found in the coherent area. |
| 154 | */ |
Kevin Grandemange | 286c21d | 2020-03-12 15:41:45 +0000 | [diff] [blame] | 155 | *dma_handle = dma_get_device_base(dev, mem) + |
| 156 | ((dma_addr_t)pageno << PAGE_SHIFT); |
| 157 | ret = mem->virt_base + ((dma_addr_t)pageno << PAGE_SHIFT); |
Vladimir Murzin | 43fc509 | 2017-07-20 11:19:58 +0100 | [diff] [blame] | 158 | spin_unlock_irqrestore(&mem->spinlock, flags); |
Christoph Hellwig | 2436bdc | 2017-08-25 17:13:09 +0200 | [diff] [blame] | 159 | memset(ret, 0, size); |
Vladimir Murzin | 43fc509 | 2017-07-20 11:19:58 +0100 | [diff] [blame] | 160 | return ret; |
Vladimir Murzin | 43fc509 | 2017-07-20 11:19:58 +0100 | [diff] [blame] | 161 | err: |
| 162 | spin_unlock_irqrestore(&mem->spinlock, flags); |
| 163 | return NULL; |
| 164 | } |
| 165 | |
Dmitry Baryshkov | b6d4f7e | 2008-07-20 15:01:10 +0400 | [diff] [blame] | 166 | /** |
Vladimir Murzin | 43fc509 | 2017-07-20 11:19:58 +0100 | [diff] [blame] | 167 | * dma_alloc_from_dev_coherent() - allocate memory from device coherent pool |
Dmitry Baryshkov | b6d4f7e | 2008-07-20 15:01:10 +0400 | [diff] [blame] | 168 | * @dev: device from which we allocate memory |
| 169 | * @size: size of requested memory area |
| 170 | * @dma_handle: This will be filled with the correct dma handle |
| 171 | * @ret: This pointer will be filled with the virtual address |
Paul Mundt | 0609697 | 2009-01-21 18:51:53 +0900 | [diff] [blame] | 172 | * to allocated area. |
Dmitry Baryshkov | b6d4f7e | 2008-07-20 15:01:10 +0400 | [diff] [blame] | 173 | * |
Dmitry Baryshkov | cb3952b | 2008-07-30 14:46:50 +0400 | [diff] [blame] | 174 | * This function should be only called from per-arch dma_alloc_coherent() |
Dmitry Baryshkov | b6d4f7e | 2008-07-20 15:01:10 +0400 | [diff] [blame] | 175 | * to support allocation from per-device coherent memory pools. |
| 176 | * |
| 177 | * Returns 0 if dma_alloc_coherent should continue with allocating from |
Dmitry Baryshkov | cb3952b | 2008-07-30 14:46:50 +0400 | [diff] [blame] | 178 | * generic memory areas, or !0 if dma_alloc_coherent should return @ret. |
Dmitry Baryshkov | b6d4f7e | 2008-07-20 15:01:10 +0400 | [diff] [blame] | 179 | */ |
Vladimir Murzin | 43fc509 | 2017-07-20 11:19:58 +0100 | [diff] [blame] | 180 | int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size, |
| 181 | dma_addr_t *dma_handle, void **ret) |
Dmitry Baryshkov | ee7e551 | 2008-06-29 14:18:46 +0400 | [diff] [blame] | 182 | { |
Vladimir Murzin | 93228b4 | 2017-06-26 10:18:58 +0100 | [diff] [blame] | 183 | struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); |
Dmitry Baryshkov | ee7e551 | 2008-06-29 14:18:46 +0400 | [diff] [blame] | 184 | |
Andrew Morton | eccd83e | 2009-01-06 14:43:09 -0800 | [diff] [blame] | 185 | if (!mem) |
| 186 | return 0; |
Paul Mundt | 0609697 | 2009-01-21 18:51:53 +0900 | [diff] [blame] | 187 | |
Vladimir Murzin | a445e940 | 2019-10-30 10:13:13 +0000 | [diff] [blame] | 188 | *ret = __dma_alloc_from_coherent(dev, mem, size, dma_handle); |
Christoph Hellwig | 82c5de0 | 2018-12-25 13:29:54 +0100 | [diff] [blame] | 189 | return 1; |
Dmitry Baryshkov | ee7e551 | 2008-06-29 14:18:46 +0400 | [diff] [blame] | 190 | } |
| 191 | |
Vladimir Murzin | 43fc509 | 2017-07-20 11:19:58 +0100 | [diff] [blame] | 192 | static int __dma_release_from_coherent(struct dma_coherent_mem *mem, |
| 193 | int order, void *vaddr) |
| 194 | { |
Dmitry Baryshkov | ee7e551 | 2008-06-29 14:18:46 +0400 | [diff] [blame] | 195 | if (mem && vaddr >= mem->virt_base && vaddr < |
Kevin Grandemange | 286c21d | 2020-03-12 15:41:45 +0000 | [diff] [blame] | 196 | (mem->virt_base + ((dma_addr_t)mem->size << PAGE_SHIFT))) { |
Dmitry Baryshkov | ee7e551 | 2008-06-29 14:18:46 +0400 | [diff] [blame] | 197 | int page = (vaddr - mem->virt_base) >> PAGE_SHIFT; |
Marek Szyprowski | 7bfa5ab | 2014-10-13 15:51:07 -0700 | [diff] [blame] | 198 | unsigned long flags; |
Dmitry Baryshkov | ee7e551 | 2008-06-29 14:18:46 +0400 | [diff] [blame] | 199 | |
Marek Szyprowski | 7bfa5ab | 2014-10-13 15:51:07 -0700 | [diff] [blame] | 200 | spin_lock_irqsave(&mem->spinlock, flags); |
Dmitry Baryshkov | ee7e551 | 2008-06-29 14:18:46 +0400 | [diff] [blame] | 201 | bitmap_release_region(mem->bitmap, page, order); |
Marek Szyprowski | 7bfa5ab | 2014-10-13 15:51:07 -0700 | [diff] [blame] | 202 | spin_unlock_irqrestore(&mem->spinlock, flags); |
Dmitry Baryshkov | ee7e551 | 2008-06-29 14:18:46 +0400 | [diff] [blame] | 203 | return 1; |
| 204 | } |
| 205 | return 0; |
| 206 | } |
Marek Szyprowski | bca0fa5 | 2012-03-23 13:05:14 +0100 | [diff] [blame] | 207 | |
| 208 | /** |
Vladimir Murzin | 43fc509 | 2017-07-20 11:19:58 +0100 | [diff] [blame] | 209 | * dma_release_from_dev_coherent() - free memory to device coherent memory pool |
Marek Szyprowski | bca0fa5 | 2012-03-23 13:05:14 +0100 | [diff] [blame] | 210 | * @dev: device from which the memory was allocated |
Vladimir Murzin | 43fc509 | 2017-07-20 11:19:58 +0100 | [diff] [blame] | 211 | * @order: the order of pages allocated |
| 212 | * @vaddr: virtual address of allocated pages |
Marek Szyprowski | bca0fa5 | 2012-03-23 13:05:14 +0100 | [diff] [blame] | 213 | * |
| 214 | * This checks whether the memory was allocated from the per-device |
Vladimir Murzin | 43fc509 | 2017-07-20 11:19:58 +0100 | [diff] [blame] | 215 | * coherent memory pool and if so, releases that memory. |
Marek Szyprowski | bca0fa5 | 2012-03-23 13:05:14 +0100 | [diff] [blame] | 216 | * |
Vladimir Murzin | 43fc509 | 2017-07-20 11:19:58 +0100 | [diff] [blame] | 217 | * Returns 1 if we correctly released the memory, or 0 if the caller should |
| 218 | * proceed with releasing memory from generic pools. |
Marek Szyprowski | bca0fa5 | 2012-03-23 13:05:14 +0100 | [diff] [blame] | 219 | */ |
Vladimir Murzin | 43fc509 | 2017-07-20 11:19:58 +0100 | [diff] [blame] | 220 | int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr) |
Marek Szyprowski | bca0fa5 | 2012-03-23 13:05:14 +0100 | [diff] [blame] | 221 | { |
Vladimir Murzin | 93228b4 | 2017-06-26 10:18:58 +0100 | [diff] [blame] | 222 | struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); |
Marek Szyprowski | bca0fa5 | 2012-03-23 13:05:14 +0100 | [diff] [blame] | 223 | |
Vladimir Murzin | 43fc509 | 2017-07-20 11:19:58 +0100 | [diff] [blame] | 224 | return __dma_release_from_coherent(mem, order, vaddr); |
| 225 | } |
Vladimir Murzin | 43fc509 | 2017-07-20 11:19:58 +0100 | [diff] [blame] | 226 | |
Vladimir Murzin | 43fc509 | 2017-07-20 11:19:58 +0100 | [diff] [blame] | 227 | static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem, |
| 228 | struct vm_area_struct *vma, void *vaddr, size_t size, int *ret) |
| 229 | { |
Marek Szyprowski | bca0fa5 | 2012-03-23 13:05:14 +0100 | [diff] [blame] | 230 | if (mem && vaddr >= mem->virt_base && vaddr + size <= |
Kevin Grandemange | 286c21d | 2020-03-12 15:41:45 +0000 | [diff] [blame] | 231 | (mem->virt_base + ((dma_addr_t)mem->size << PAGE_SHIFT))) { |
Marek Szyprowski | bca0fa5 | 2012-03-23 13:05:14 +0100 | [diff] [blame] | 232 | unsigned long off = vma->vm_pgoff; |
| 233 | int start = (vaddr - mem->virt_base) >> PAGE_SHIFT; |
Kevin Grandemange | 286c21d | 2020-03-12 15:41:45 +0000 | [diff] [blame] | 234 | unsigned long user_count = vma_pages(vma); |
George G. Davis | 9ca5d4f | 2016-09-28 08:51:56 +0100 | [diff] [blame] | 235 | int count = PAGE_ALIGN(size) >> PAGE_SHIFT; |
Marek Szyprowski | bca0fa5 | 2012-03-23 13:05:14 +0100 | [diff] [blame] | 236 | |
| 237 | *ret = -ENXIO; |
| 238 | if (off < count && user_count <= count - off) { |
Bjorn Helgaas | 88a984b | 2014-05-20 16:54:22 -0600 | [diff] [blame] | 239 | unsigned long pfn = mem->pfn_base + start + off; |
Marek Szyprowski | bca0fa5 | 2012-03-23 13:05:14 +0100 | [diff] [blame] | 240 | *ret = remap_pfn_range(vma, vma->vm_start, pfn, |
| 241 | user_count << PAGE_SHIFT, |
| 242 | vma->vm_page_prot); |
| 243 | } |
| 244 | return 1; |
| 245 | } |
| 246 | return 0; |
| 247 | } |
Vladimir Murzin | 43fc509 | 2017-07-20 11:19:58 +0100 | [diff] [blame] | 248 | |
| 249 | /** |
| 250 | * dma_mmap_from_dev_coherent() - mmap memory from the device coherent pool |
| 251 | * @dev: device from which the memory was allocated |
| 252 | * @vma: vm_area for the userspace memory |
| 253 | * @vaddr: cpu address returned by dma_alloc_from_dev_coherent |
| 254 | * @size: size of the memory buffer allocated |
| 255 | * @ret: result from remap_pfn_range() |
| 256 | * |
| 257 | * This checks whether the memory was allocated from the per-device |
| 258 | * coherent memory pool and if so, maps that memory to the provided vma. |
| 259 | * |
Robin Murphy | 41d0bbc | 2018-04-09 18:59:14 +0100 | [diff] [blame] | 260 | * Returns 1 if @vaddr belongs to the device coherent pool and the caller |
| 261 | * should return @ret, or 0 if they should proceed with mapping memory from |
| 262 | * generic areas. |
Vladimir Murzin | 43fc509 | 2017-07-20 11:19:58 +0100 | [diff] [blame] | 263 | */ |
| 264 | int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma, |
| 265 | void *vaddr, size_t size, int *ret) |
| 266 | { |
| 267 | struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); |
| 268 | |
| 269 | return __dma_mmap_from_coherent(mem, vma, vaddr, size, ret); |
| 270 | } |
Vladimir Murzin | 43fc509 | 2017-07-20 11:19:58 +0100 | [diff] [blame] | 271 | |
Christoph Hellwig | 22f9feb | 2021-06-24 19:37:00 +0200 | [diff] [blame^] | 272 | #ifdef CONFIG_DMA_GLOBAL_POOL |
| 273 | static struct dma_coherent_mem *dma_coherent_default_memory __ro_after_init; |
| 274 | |
| 275 | void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size, |
| 276 | dma_addr_t *dma_handle) |
| 277 | { |
| 278 | if (!dma_coherent_default_memory) |
| 279 | return NULL; |
| 280 | |
| 281 | return __dma_alloc_from_coherent(dev, dma_coherent_default_memory, size, |
| 282 | dma_handle); |
| 283 | } |
| 284 | |
| 285 | int dma_release_from_global_coherent(int order, void *vaddr) |
| 286 | { |
| 287 | if (!dma_coherent_default_memory) |
| 288 | return 0; |
| 289 | |
| 290 | return __dma_release_from_coherent(dma_coherent_default_memory, order, |
| 291 | vaddr); |
| 292 | } |
| 293 | |
Vladimir Murzin | 43fc509 | 2017-07-20 11:19:58 +0100 | [diff] [blame] | 294 | int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr, |
| 295 | size_t size, int *ret) |
| 296 | { |
| 297 | if (!dma_coherent_default_memory) |
| 298 | return 0; |
| 299 | |
| 300 | return __dma_mmap_from_coherent(dma_coherent_default_memory, vma, |
| 301 | vaddr, size, ret); |
| 302 | } |
Marek Szyprowski | 7bfa5ab | 2014-10-13 15:51:07 -0700 | [diff] [blame] | 303 | |
Christoph Hellwig | 39a2d35 | 2021-06-23 16:05:05 +0200 | [diff] [blame] | 304 | int dma_init_global_coherent(phys_addr_t phys_addr, size_t size) |
| 305 | { |
| 306 | struct dma_coherent_mem *mem; |
| 307 | |
| 308 | mem = dma_init_coherent_memory(phys_addr, phys_addr, size, true); |
| 309 | if (IS_ERR(mem)) |
| 310 | return PTR_ERR(mem); |
| 311 | dma_coherent_default_memory = mem; |
| 312 | pr_info("DMA: default coherent area is set\n"); |
| 313 | return 0; |
| 314 | } |
Christoph Hellwig | 22f9feb | 2021-06-24 19:37:00 +0200 | [diff] [blame^] | 315 | #endif /* CONFIG_DMA_GLOBAL_POOL */ |
Christoph Hellwig | 39a2d35 | 2021-06-23 16:05:05 +0200 | [diff] [blame] | 316 | |
Marek Szyprowski | 7bfa5ab | 2014-10-13 15:51:07 -0700 | [diff] [blame] | 317 | /* |
| 318 | * Support for reserved memory regions defined in device tree |
| 319 | */ |
| 320 | #ifdef CONFIG_OF_RESERVED_MEM |
| 321 | #include <linux/of.h> |
| 322 | #include <linux/of_fdt.h> |
| 323 | #include <linux/of_reserved_mem.h> |
| 324 | |
Christoph Hellwig | 22f9feb | 2021-06-24 19:37:00 +0200 | [diff] [blame^] | 325 | #ifdef CONFIG_DMA_GLOBAL_POOL |
Vladimir Murzin | 93228b4 | 2017-06-26 10:18:58 +0100 | [diff] [blame] | 326 | static struct reserved_mem *dma_reserved_default_memory __initdata; |
Christoph Hellwig | 22f9feb | 2021-06-24 19:37:00 +0200 | [diff] [blame^] | 327 | #endif |
Vladimir Murzin | 93228b4 | 2017-06-26 10:18:58 +0100 | [diff] [blame] | 328 | |
Marek Szyprowski | 7bfa5ab | 2014-10-13 15:51:07 -0700 | [diff] [blame] | 329 | static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev) |
| 330 | { |
Christoph Hellwig | a693357 | 2021-06-23 16:00:04 +0200 | [diff] [blame] | 331 | if (!rmem->priv) { |
| 332 | struct dma_coherent_mem *mem; |
Marek Szyprowski | 7bfa5ab | 2014-10-13 15:51:07 -0700 | [diff] [blame] | 333 | |
Christoph Hellwig | a693357 | 2021-06-23 16:00:04 +0200 | [diff] [blame] | 334 | mem = dma_init_coherent_memory(rmem->base, rmem->base, |
| 335 | rmem->size, true); |
| 336 | if (IS_ERR(mem)) |
| 337 | return PTR_ERR(mem); |
| 338 | rmem->priv = mem; |
Marek Szyprowski | 7bfa5ab | 2014-10-13 15:51:07 -0700 | [diff] [blame] | 339 | } |
Christoph Hellwig | a693357 | 2021-06-23 16:00:04 +0200 | [diff] [blame] | 340 | dma_assign_coherent_memory(dev, rmem->priv); |
Marek Szyprowski | 7bfa5ab | 2014-10-13 15:51:07 -0700 | [diff] [blame] | 341 | return 0; |
| 342 | } |
| 343 | |
| 344 | static void rmem_dma_device_release(struct reserved_mem *rmem, |
| 345 | struct device *dev) |
| 346 | { |
Vladimir Murzin | 93228b4 | 2017-06-26 10:18:58 +0100 | [diff] [blame] | 347 | if (dev) |
| 348 | dev->dma_mem = NULL; |
Marek Szyprowski | 7bfa5ab | 2014-10-13 15:51:07 -0700 | [diff] [blame] | 349 | } |
| 350 | |
| 351 | static const struct reserved_mem_ops rmem_dma_ops = { |
| 352 | .device_init = rmem_dma_device_init, |
| 353 | .device_release = rmem_dma_device_release, |
| 354 | }; |
| 355 | |
| 356 | static int __init rmem_dma_setup(struct reserved_mem *rmem) |
| 357 | { |
| 358 | unsigned long node = rmem->fdt_node; |
| 359 | |
| 360 | if (of_get_flat_dt_prop(node, "reusable", NULL)) |
| 361 | return -EINVAL; |
| 362 | |
| 363 | #ifdef CONFIG_ARM |
| 364 | if (!of_get_flat_dt_prop(node, "no-map", NULL)) { |
| 365 | pr_err("Reserved memory: regions without no-map are not yet supported\n"); |
| 366 | return -EINVAL; |
| 367 | } |
Christoph Hellwig | 70d6aa0 | 2021-06-24 19:38:19 +0200 | [diff] [blame] | 368 | #endif |
Vladimir Murzin | 93228b4 | 2017-06-26 10:18:58 +0100 | [diff] [blame] | 369 | |
Christoph Hellwig | 70d6aa0 | 2021-06-24 19:38:19 +0200 | [diff] [blame] | 370 | #ifdef CONFIG_DMA_GLOBAL_POOL |
Vladimir Murzin | 93228b4 | 2017-06-26 10:18:58 +0100 | [diff] [blame] | 371 | if (of_get_flat_dt_prop(node, "linux,dma-default", NULL)) { |
| 372 | WARN(dma_reserved_default_memory, |
| 373 | "Reserved memory: region for default DMA coherent area is redefined\n"); |
| 374 | dma_reserved_default_memory = rmem; |
| 375 | } |
Marek Szyprowski | 7bfa5ab | 2014-10-13 15:51:07 -0700 | [diff] [blame] | 376 | #endif |
| 377 | |
| 378 | rmem->ops = &rmem_dma_ops; |
| 379 | pr_info("Reserved memory: created DMA memory pool at %pa, size %ld MiB\n", |
| 380 | &rmem->base, (unsigned long)rmem->size / SZ_1M); |
| 381 | return 0; |
| 382 | } |
Vladimir Murzin | 93228b4 | 2017-06-26 10:18:58 +0100 | [diff] [blame] | 383 | |
Christoph Hellwig | 22f9feb | 2021-06-24 19:37:00 +0200 | [diff] [blame^] | 384 | #ifdef CONFIG_DMA_GLOBAL_POOL |
Vladimir Murzin | 93228b4 | 2017-06-26 10:18:58 +0100 | [diff] [blame] | 385 | static int __init dma_init_reserved_memory(void) |
| 386 | { |
Vladimir Murzin | 93228b4 | 2017-06-26 10:18:58 +0100 | [diff] [blame] | 387 | if (!dma_reserved_default_memory) |
| 388 | return -ENOMEM; |
Christoph Hellwig | 39a2d35 | 2021-06-23 16:05:05 +0200 | [diff] [blame] | 389 | return dma_init_global_coherent(dma_reserved_default_memory->base, |
| 390 | dma_reserved_default_memory->size); |
Vladimir Murzin | 93228b4 | 2017-06-26 10:18:58 +0100 | [diff] [blame] | 391 | } |
Vladimir Murzin | 93228b4 | 2017-06-26 10:18:58 +0100 | [diff] [blame] | 392 | core_initcall(dma_init_reserved_memory); |
Christoph Hellwig | 22f9feb | 2021-06-24 19:37:00 +0200 | [diff] [blame^] | 393 | #endif /* CONFIG_DMA_GLOBAL_POOL */ |
Vladimir Murzin | 93228b4 | 2017-06-26 10:18:58 +0100 | [diff] [blame] | 394 | |
Marek Szyprowski | 7bfa5ab | 2014-10-13 15:51:07 -0700 | [diff] [blame] | 395 | RESERVEDMEM_OF_DECLARE(dma, "shared-dma-pool", rmem_dma_setup); |
| 396 | #endif |