Thomas Gleixner | b2139ce | 2019-06-01 10:08:18 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Matthew Wilcox | 6182a09 | 2007-12-03 12:16:57 -0500 | [diff] [blame] | 2 | /* |
| 3 | * DMA Pool allocator |
| 4 | * |
| 5 | * Copyright 2001 David Brownell |
| 6 | * Copyright 2007 Intel Corporation |
| 7 | * Author: Matthew Wilcox <willy@linux.intel.com> |
| 8 | * |
Matthew Wilcox | 6182a09 | 2007-12-03 12:16:57 -0500 | [diff] [blame] | 9 | * This allocator returns small blocks of a given size which are DMA-able by |
| 10 | * the given device. It uses the dma_alloc_coherent page allocator to get |
| 11 | * new pages, then splits them up into blocks of the required size. |
| 12 | * Many older drivers still have their own code to do this. |
| 13 | * |
| 14 | * The current design of this allocator is fairly simple. The pool is |
| 15 | * represented by the 'struct dma_pool' which keeps a doubly-linked list of |
| 16 | * allocated pages. Each page in the page_list is split into blocks of at |
Matthew Wilcox | a35a345 | 2007-12-03 14:08:28 -0500 | [diff] [blame] | 17 | * least 'size' bytes. Free blocks are tracked in an unsorted singly-linked |
| 18 | * list of free blocks within the page. Used blocks aren't tracked, but we |
| 19 | * keep a count of how many are currently allocated from each page. |
Matthew Wilcox | 6182a09 | 2007-12-03 12:16:57 -0500 | [diff] [blame] | 20 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | |
| 22 | #include <linux/device.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | #include <linux/dma-mapping.h> |
| 24 | #include <linux/dmapool.h> |
Matthew Wilcox | 6182a09 | 2007-12-03 12:16:57 -0500 | [diff] [blame] | 25 | #include <linux/kernel.h> |
| 26 | #include <linux/list.h> |
Paul Gortmaker | b95f1b31 | 2011-10-16 02:01:52 -0400 | [diff] [blame] | 27 | #include <linux/export.h> |
Matthew Wilcox | 6182a09 | 2007-12-03 12:16:57 -0500 | [diff] [blame] | 28 | #include <linux/mutex.h> |
Randy Dunlap | c9cf552 | 2006-06-27 02:53:52 -0700 | [diff] [blame] | 29 | #include <linux/poison.h> |
Alexey Dobriyan | e8edc6e | 2007-05-21 01:22:52 +0400 | [diff] [blame] | 30 | #include <linux/sched.h> |
Daniel Vetter | 0f2f89b6 | 2021-02-25 17:18:41 -0800 | [diff] [blame] | 31 | #include <linux/sched/mm.h> |
Matthew Wilcox | 6182a09 | 2007-12-03 12:16:57 -0500 | [diff] [blame] | 32 | #include <linux/slab.h> |
Paul Gortmaker | 7c77509 | 2011-10-16 02:03:46 -0400 | [diff] [blame] | 33 | #include <linux/stat.h> |
Matthew Wilcox | 6182a09 | 2007-12-03 12:16:57 -0500 | [diff] [blame] | 34 | #include <linux/spinlock.h> |
| 35 | #include <linux/string.h> |
| 36 | #include <linux/types.h> |
| 37 | #include <linux/wait.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | |
Andi Kleen | b5ee5be | 2008-04-28 02:12:37 -0700 | [diff] [blame] | 39 | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON) |
| 40 | #define DMAPOOL_DEBUG 1 |
| 41 | #endif |
| 42 | |
Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 43 | struct dma_pool { /* the pool */ |
| 44 | struct list_head page_list; |
| 45 | spinlock_t lock; |
Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 46 | size_t size; |
| 47 | struct device *dev; |
| 48 | size_t allocation; |
Matthew Wilcox | e34f44b | 2007-12-03 14:16:24 -0500 | [diff] [blame] | 49 | size_t boundary; |
Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 50 | char name[32]; |
Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 51 | struct list_head pools; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 52 | }; |
| 53 | |
Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 54 | struct dma_page { /* cacheable header for 'allocation' bytes */ |
| 55 | struct list_head page_list; |
| 56 | void *vaddr; |
| 57 | dma_addr_t dma; |
Matthew Wilcox | a35a345 | 2007-12-03 14:08:28 -0500 | [diff] [blame] | 58 | unsigned int in_use; |
| 59 | unsigned int offset; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 60 | }; |
| 61 | |
Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 62 | static DEFINE_MUTEX(pools_lock); |
Sebastian Andrzej Siewior | 01c2965 | 2014-10-09 15:28:50 -0700 | [diff] [blame] | 63 | static DEFINE_MUTEX(pools_reg_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 64 | |
YueHaibing | e8df2c7 | 2021-06-28 19:40:05 -0700 | [diff] [blame] | 65 | static ssize_t pools_show(struct device *dev, struct device_attribute *attr, char *buf) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 66 | { |
| 67 | unsigned temp; |
| 68 | unsigned size; |
| 69 | char *next; |
| 70 | struct dma_page *page; |
| 71 | struct dma_pool *pool; |
| 72 | |
| 73 | next = buf; |
| 74 | size = PAGE_SIZE; |
| 75 | |
| 76 | temp = scnprintf(next, size, "poolinfo - 0.1\n"); |
| 77 | size -= temp; |
| 78 | next += temp; |
| 79 | |
Matthias Kaehlcke | b2366d6 | 2007-04-24 22:45:25 +0200 | [diff] [blame] | 80 | mutex_lock(&pools_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 81 | list_for_each_entry(pool, &dev->dma_pools, pools) { |
| 82 | unsigned pages = 0; |
| 83 | unsigned blocks = 0; |
| 84 | |
Thomas Gleixner | c495682 | 2009-06-30 11:41:25 -0700 | [diff] [blame] | 85 | spin_lock_irq(&pool->lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 86 | list_for_each_entry(page, &pool->page_list, page_list) { |
| 87 | pages++; |
| 88 | blocks += page->in_use; |
| 89 | } |
Thomas Gleixner | c495682 | 2009-06-30 11:41:25 -0700 | [diff] [blame] | 90 | spin_unlock_irq(&pool->lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 91 | |
| 92 | /* per-pool info, no real statistics yet */ |
Alexey Dobriyan | 5b5e092 | 2017-02-27 14:30:02 -0800 | [diff] [blame] | 93 | temp = scnprintf(next, size, "%-16s %4u %4zu %4zu %2u\n", |
Matthew Wilcox | a35a345 | 2007-12-03 14:08:28 -0500 | [diff] [blame] | 94 | pool->name, blocks, |
| 95 | pages * (pool->allocation / pool->size), |
Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 96 | pool->size, pages); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 97 | size -= temp; |
| 98 | next += temp; |
| 99 | } |
Matthias Kaehlcke | b2366d6 | 2007-04-24 22:45:25 +0200 | [diff] [blame] | 100 | mutex_unlock(&pools_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 101 | |
| 102 | return PAGE_SIZE - size; |
| 103 | } |
Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 104 | |
YueHaibing | e8df2c7 | 2021-06-28 19:40:05 -0700 | [diff] [blame] | 105 | static DEVICE_ATTR_RO(pools); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 106 | |
| 107 | /** |
| 108 | * dma_pool_create - Creates a pool of consistent memory blocks, for dma. |
| 109 | * @name: name of pool, for diagnostics |
| 110 | * @dev: device that will be doing the DMA |
| 111 | * @size: size of the blocks in this pool. |
| 112 | * @align: alignment requirement for blocks; must be a power of two |
Matthew Wilcox | e34f44b | 2007-12-03 14:16:24 -0500 | [diff] [blame] | 113 | * @boundary: returned blocks won't cross this power of two boundary |
Mike Rapoport | a862f68 | 2019-03-05 15:48:42 -0800 | [diff] [blame] | 114 | * Context: not in_interrupt() |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 115 | * |
Mike Rapoport | a862f68 | 2019-03-05 15:48:42 -0800 | [diff] [blame] | 116 | * Given one of these pools, dma_pool_alloc() |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 117 | * may be used to allocate memory. Such memory will all have "consistent" |
| 118 | * DMA mappings, accessible by the device and its driver without using |
| 119 | * cache flushing primitives. The actual size of blocks allocated may be |
| 120 | * larger than requested because of alignment. |
| 121 | * |
Matthew Wilcox | e34f44b | 2007-12-03 14:16:24 -0500 | [diff] [blame] | 122 | * If @boundary is nonzero, objects returned from dma_pool_alloc() won't |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 123 | * cross that size boundary. This is useful for devices which have |
| 124 | * addressing restrictions on individual DMA transfers, such as not crossing |
| 125 | * boundaries of 4KBytes. |
Mike Rapoport | a862f68 | 2019-03-05 15:48:42 -0800 | [diff] [blame] | 126 | * |
| 127 | * Return: a dma allocation pool with the requested characteristics, or |
| 128 | * %NULL if one can't be created. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 129 | */ |
Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 130 | struct dma_pool *dma_pool_create(const char *name, struct device *dev, |
Matthew Wilcox | e34f44b | 2007-12-03 14:16:24 -0500 | [diff] [blame] | 131 | size_t size, size_t align, size_t boundary) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 132 | { |
Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 133 | struct dma_pool *retval; |
Matthew Wilcox | e34f44b | 2007-12-03 14:16:24 -0500 | [diff] [blame] | 134 | size_t allocation; |
Sebastian Andrzej Siewior | 01c2965 | 2014-10-09 15:28:50 -0700 | [diff] [blame] | 135 | bool empty = false; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 136 | |
Paul McQuade | baa2ef8 | 2014-10-09 15:29:11 -0700 | [diff] [blame] | 137 | if (align == 0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 138 | align = 1; |
Paul McQuade | baa2ef8 | 2014-10-09 15:29:11 -0700 | [diff] [blame] | 139 | else if (align & (align - 1)) |
Matthew Wilcox | 399154b | 2007-12-03 12:10:24 -0500 | [diff] [blame] | 140 | return NULL; |
Matthew Wilcox | 399154b | 2007-12-03 12:10:24 -0500 | [diff] [blame] | 141 | |
Paul McQuade | baa2ef8 | 2014-10-09 15:29:11 -0700 | [diff] [blame] | 142 | if (size == 0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 143 | return NULL; |
Paul McQuade | baa2ef8 | 2014-10-09 15:29:11 -0700 | [diff] [blame] | 144 | else if (size < 4) |
Matthew Wilcox | a35a345 | 2007-12-03 14:08:28 -0500 | [diff] [blame] | 145 | size = 4; |
Matthew Wilcox | 399154b | 2007-12-03 12:10:24 -0500 | [diff] [blame] | 146 | |
Mateusz Nosek | 1386f7a | 2020-04-06 20:08:49 -0700 | [diff] [blame] | 147 | size = ALIGN(size, align); |
Matthew Wilcox | e34f44b | 2007-12-03 14:16:24 -0500 | [diff] [blame] | 148 | allocation = max_t(size_t, size, PAGE_SIZE); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 149 | |
Paul McQuade | baa2ef8 | 2014-10-09 15:29:11 -0700 | [diff] [blame] | 150 | if (!boundary) |
Matthew Wilcox | e34f44b | 2007-12-03 14:16:24 -0500 | [diff] [blame] | 151 | boundary = allocation; |
Paul McQuade | baa2ef8 | 2014-10-09 15:29:11 -0700 | [diff] [blame] | 152 | else if ((boundary < size) || (boundary & (boundary - 1))) |
Matthew Wilcox | e34f44b | 2007-12-03 14:16:24 -0500 | [diff] [blame] | 153 | return NULL; |
Matthew Wilcox | e34f44b | 2007-12-03 14:16:24 -0500 | [diff] [blame] | 154 | |
Christian König | cc6266f | 2022-01-14 14:06:54 -0800 | [diff] [blame] | 155 | retval = kmalloc(sizeof(*retval), GFP_KERNEL); |
Matthew Wilcox | e34f44b | 2007-12-03 14:16:24 -0500 | [diff] [blame] | 156 | if (!retval) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 157 | return retval; |
| 158 | |
Zhiyuan Dai | 943f229 | 2021-04-29 22:57:55 -0700 | [diff] [blame] | 159 | strscpy(retval->name, name, sizeof(retval->name)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 160 | |
| 161 | retval->dev = dev; |
| 162 | |
Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 163 | INIT_LIST_HEAD(&retval->page_list); |
| 164 | spin_lock_init(&retval->lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 165 | retval->size = size; |
Matthew Wilcox | e34f44b | 2007-12-03 14:16:24 -0500 | [diff] [blame] | 166 | retval->boundary = boundary; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 167 | retval->allocation = allocation; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 168 | |
Daeseok Youn | cc6b664 | 2014-06-04 16:08:05 -0700 | [diff] [blame] | 169 | INIT_LIST_HEAD(&retval->pools); |
Cornelia Huck | 141ecc5 | 2006-09-22 11:37:27 +0200 | [diff] [blame] | 170 | |
Sebastian Andrzej Siewior | 01c2965 | 2014-10-09 15:28:50 -0700 | [diff] [blame] | 171 | /* |
| 172 | * pools_lock ensures that the ->dma_pools list does not get corrupted. |
| 173 | * pools_reg_lock ensures that there is not a race between |
| 174 | * dma_pool_create() and dma_pool_destroy() or within dma_pool_create() |
| 175 | * when the first invocation of dma_pool_create() failed on |
| 176 | * device_create_file() and the second assumes that it has been done (I |
| 177 | * know it is a short window). |
| 178 | */ |
| 179 | mutex_lock(&pools_reg_lock); |
Daeseok Youn | cc6b664 | 2014-06-04 16:08:05 -0700 | [diff] [blame] | 180 | mutex_lock(&pools_lock); |
Sebastian Andrzej Siewior | 01c2965 | 2014-10-09 15:28:50 -0700 | [diff] [blame] | 181 | if (list_empty(&dev->dma_pools)) |
| 182 | empty = true; |
| 183 | list_add(&retval->pools, &dev->dma_pools); |
Daeseok Youn | cc6b664 | 2014-06-04 16:08:05 -0700 | [diff] [blame] | 184 | mutex_unlock(&pools_lock); |
Sebastian Andrzej Siewior | 01c2965 | 2014-10-09 15:28:50 -0700 | [diff] [blame] | 185 | if (empty) { |
| 186 | int err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 187 | |
Sebastian Andrzej Siewior | 01c2965 | 2014-10-09 15:28:50 -0700 | [diff] [blame] | 188 | err = device_create_file(dev, &dev_attr_pools); |
| 189 | if (err) { |
| 190 | mutex_lock(&pools_lock); |
| 191 | list_del(&retval->pools); |
| 192 | mutex_unlock(&pools_lock); |
| 193 | mutex_unlock(&pools_reg_lock); |
| 194 | kfree(retval); |
| 195 | return NULL; |
| 196 | } |
| 197 | } |
| 198 | mutex_unlock(&pools_reg_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 199 | return retval; |
| 200 | } |
Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 201 | EXPORT_SYMBOL(dma_pool_create); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 202 | |
Matthew Wilcox | a35a345 | 2007-12-03 14:08:28 -0500 | [diff] [blame] | 203 | static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page) |
| 204 | { |
| 205 | unsigned int offset = 0; |
Matthew Wilcox | e34f44b | 2007-12-03 14:16:24 -0500 | [diff] [blame] | 206 | unsigned int next_boundary = pool->boundary; |
Matthew Wilcox | a35a345 | 2007-12-03 14:08:28 -0500 | [diff] [blame] | 207 | |
| 208 | do { |
| 209 | unsigned int next = offset + pool->size; |
Matthew Wilcox | e34f44b | 2007-12-03 14:16:24 -0500 | [diff] [blame] | 210 | if (unlikely((next + pool->size) >= next_boundary)) { |
| 211 | next = next_boundary; |
| 212 | next_boundary += pool->boundary; |
| 213 | } |
Matthew Wilcox | a35a345 | 2007-12-03 14:08:28 -0500 | [diff] [blame] | 214 | *(int *)(page->vaddr + offset) = next; |
| 215 | offset = next; |
| 216 | } while (offset < pool->allocation); |
| 217 | } |
| 218 | |
Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 219 | static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 220 | { |
Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 221 | struct dma_page *page; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 222 | |
Matthew Wilcox | a35a345 | 2007-12-03 14:08:28 -0500 | [diff] [blame] | 223 | page = kmalloc(sizeof(*page), mem_flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 224 | if (!page) |
| 225 | return NULL; |
Matthew Wilcox | a35a345 | 2007-12-03 14:08:28 -0500 | [diff] [blame] | 226 | page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation, |
Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 227 | &page->dma, mem_flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 228 | if (page->vaddr) { |
Andi Kleen | b5ee5be | 2008-04-28 02:12:37 -0700 | [diff] [blame] | 229 | #ifdef DMAPOOL_DEBUG |
Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 230 | memset(page->vaddr, POOL_POISON_FREED, pool->allocation); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 231 | #endif |
Matthew Wilcox | a35a345 | 2007-12-03 14:08:28 -0500 | [diff] [blame] | 232 | pool_initialise_page(pool, page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 233 | page->in_use = 0; |
Matthew Wilcox | a35a345 | 2007-12-03 14:08:28 -0500 | [diff] [blame] | 234 | page->offset = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 235 | } else { |
Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 236 | kfree(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 237 | page = NULL; |
| 238 | } |
| 239 | return page; |
| 240 | } |
| 241 | |
Nicholas Krause | d9e7e37 | 2015-09-04 15:48:19 -0700 | [diff] [blame] | 242 | static inline bool is_page_busy(struct dma_page *page) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 243 | { |
Matthew Wilcox | a35a345 | 2007-12-03 14:08:28 -0500 | [diff] [blame] | 244 | return page->in_use != 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 245 | } |
| 246 | |
Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 247 | static void pool_free_page(struct dma_pool *pool, struct dma_page *page) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 248 | { |
Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 249 | dma_addr_t dma = page->dma; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 250 | |
Andi Kleen | b5ee5be | 2008-04-28 02:12:37 -0700 | [diff] [blame] | 251 | #ifdef DMAPOOL_DEBUG |
Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 252 | memset(page->vaddr, POOL_POISON_FREED, pool->allocation); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 253 | #endif |
Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 254 | dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma); |
| 255 | list_del(&page->page_list); |
| 256 | kfree(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 257 | } |
| 258 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 259 | /** |
| 260 | * dma_pool_destroy - destroys a pool of dma memory blocks. |
| 261 | * @pool: dma pool that will be destroyed |
| 262 | * Context: !in_interrupt() |
| 263 | * |
| 264 | * Caller guarantees that no more memory from the pool is in use, |
| 265 | * and that nothing will try to use the pool after this call. |
| 266 | */ |
Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 267 | void dma_pool_destroy(struct dma_pool *pool) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 268 | { |
Andy Shevchenko | 42286f8 | 2020-10-13 16:54:35 -0700 | [diff] [blame] | 269 | struct dma_page *page, *tmp; |
Sebastian Andrzej Siewior | 01c2965 | 2014-10-09 15:28:50 -0700 | [diff] [blame] | 270 | bool empty = false; |
| 271 | |
Sergey Senozhatsky | 44d7175 | 2015-09-08 15:00:56 -0700 | [diff] [blame] | 272 | if (unlikely(!pool)) |
| 273 | return; |
| 274 | |
Sebastian Andrzej Siewior | 01c2965 | 2014-10-09 15:28:50 -0700 | [diff] [blame] | 275 | mutex_lock(&pools_reg_lock); |
Matthias Kaehlcke | b2366d6 | 2007-04-24 22:45:25 +0200 | [diff] [blame] | 276 | mutex_lock(&pools_lock); |
Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 277 | list_del(&pool->pools); |
| 278 | if (pool->dev && list_empty(&pool->dev->dma_pools)) |
Sebastian Andrzej Siewior | 01c2965 | 2014-10-09 15:28:50 -0700 | [diff] [blame] | 279 | empty = true; |
Matthias Kaehlcke | b2366d6 | 2007-04-24 22:45:25 +0200 | [diff] [blame] | 280 | mutex_unlock(&pools_lock); |
Sebastian Andrzej Siewior | 01c2965 | 2014-10-09 15:28:50 -0700 | [diff] [blame] | 281 | if (empty) |
| 282 | device_remove_file(pool->dev, &dev_attr_pools); |
| 283 | mutex_unlock(&pools_reg_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 284 | |
Andy Shevchenko | 42286f8 | 2020-10-13 16:54:35 -0700 | [diff] [blame] | 285 | list_for_each_entry_safe(page, tmp, &pool->page_list, page_list) { |
Matthew Wilcox | a35a345 | 2007-12-03 14:08:28 -0500 | [diff] [blame] | 286 | if (is_page_busy(page)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 287 | if (pool->dev) |
Andy Shevchenko | 41a0481 | 2020-10-13 16:54:38 -0700 | [diff] [blame] | 288 | dev_err(pool->dev, "%s %s, %p busy\n", __func__, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 289 | pool->name, page->vaddr); |
| 290 | else |
Andy Shevchenko | 41a0481 | 2020-10-13 16:54:38 -0700 | [diff] [blame] | 291 | pr_err("%s %s, %p busy\n", __func__, |
Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 292 | pool->name, page->vaddr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 293 | /* leak the still-in-use consistent memory */ |
Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 294 | list_del(&page->page_list); |
| 295 | kfree(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 296 | } else |
Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 297 | pool_free_page(pool, page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 298 | } |
| 299 | |
Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 300 | kfree(pool); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 301 | } |
Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 302 | EXPORT_SYMBOL(dma_pool_destroy); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 303 | |
| 304 | /** |
| 305 | * dma_pool_alloc - get a block of consistent memory |
| 306 | * @pool: dma pool that will produce the block |
| 307 | * @mem_flags: GFP_* bitmask |
| 308 | * @handle: pointer to dma address of block |
| 309 | * |
Mike Rapoport | a862f68 | 2019-03-05 15:48:42 -0800 | [diff] [blame] | 310 | * Return: the kernel virtual address of a currently unused block, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 311 | * and reports its dma address through the handle. |
Matthew Wilcox | 6182a09 | 2007-12-03 12:16:57 -0500 | [diff] [blame] | 312 | * If such a memory block can't be allocated, %NULL is returned. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 313 | */ |
Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 314 | void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, |
| 315 | dma_addr_t *handle) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 316 | { |
Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 317 | unsigned long flags; |
| 318 | struct dma_page *page; |
Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 319 | size_t offset; |
| 320 | void *retval; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 321 | |
Daniel Vetter | 0f2f89b6 | 2021-02-25 17:18:41 -0800 | [diff] [blame] | 322 | might_alloc(mem_flags); |
Dima Zavin | ea05c84 | 2010-10-26 14:21:54 -0700 | [diff] [blame] | 323 | |
Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 324 | spin_lock_irqsave(&pool->lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 325 | list_for_each_entry(page, &pool->page_list, page_list) { |
Matthew Wilcox | a35a345 | 2007-12-03 14:08:28 -0500 | [diff] [blame] | 326 | if (page->offset < pool->allocation) |
| 327 | goto ready; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 328 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 329 | |
Marek Szyprowski | 387870f | 2012-11-07 15:37:07 +0100 | [diff] [blame] | 330 | /* pool_alloc_page() might sleep, so temporarily drop &pool->lock */ |
| 331 | spin_unlock_irqrestore(&pool->lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 332 | |
Sean O. Stalley | fa23f56 | 2015-09-08 15:02:24 -0700 | [diff] [blame] | 333 | page = pool_alloc_page(pool, mem_flags & (~__GFP_ZERO)); |
Marek Szyprowski | 387870f | 2012-11-07 15:37:07 +0100 | [diff] [blame] | 334 | if (!page) |
| 335 | return NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 336 | |
Marek Szyprowski | 387870f | 2012-11-07 15:37:07 +0100 | [diff] [blame] | 337 | spin_lock_irqsave(&pool->lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 338 | |
Marek Szyprowski | 387870f | 2012-11-07 15:37:07 +0100 | [diff] [blame] | 339 | list_add(&page->page_list, &pool->page_list); |
Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 340 | ready: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 341 | page->in_use++; |
Matthew Wilcox | a35a345 | 2007-12-03 14:08:28 -0500 | [diff] [blame] | 342 | offset = page->offset; |
| 343 | page->offset = *(int *)(page->vaddr + offset); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 344 | retval = offset + page->vaddr; |
| 345 | *handle = offset + page->dma; |
Andi Kleen | b5ee5be | 2008-04-28 02:12:37 -0700 | [diff] [blame] | 346 | #ifdef DMAPOOL_DEBUG |
Matthieu CASTET | 5de55b2 | 2012-12-11 16:01:31 -0800 | [diff] [blame] | 347 | { |
| 348 | int i; |
| 349 | u8 *data = retval; |
| 350 | /* page->offset is stored in first 4 bytes */ |
| 351 | for (i = sizeof(page->offset); i < pool->size; i++) { |
| 352 | if (data[i] == POOL_POISON_FREED) |
| 353 | continue; |
| 354 | if (pool->dev) |
Andy Shevchenko | 41a0481 | 2020-10-13 16:54:38 -0700 | [diff] [blame] | 355 | dev_err(pool->dev, "%s %s, %p (corrupted)\n", |
| 356 | __func__, pool->name, retval); |
Matthieu CASTET | 5de55b2 | 2012-12-11 16:01:31 -0800 | [diff] [blame] | 357 | else |
Andy Shevchenko | 41a0481 | 2020-10-13 16:54:38 -0700 | [diff] [blame] | 358 | pr_err("%s %s, %p (corrupted)\n", |
| 359 | __func__, pool->name, retval); |
Matthieu CASTET | 5de55b2 | 2012-12-11 16:01:31 -0800 | [diff] [blame] | 360 | |
| 361 | /* |
| 362 | * Dump the first 4 bytes even if they are not |
| 363 | * POOL_POISON_FREED |
| 364 | */ |
| 365 | print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, |
| 366 | data, pool->size, 1); |
| 367 | break; |
| 368 | } |
| 369 | } |
Sean O. Stalley | fa23f56 | 2015-09-08 15:02:24 -0700 | [diff] [blame] | 370 | if (!(mem_flags & __GFP_ZERO)) |
| 371 | memset(retval, POOL_POISON_ALLOCATED, pool->size); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 372 | #endif |
Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 373 | spin_unlock_irqrestore(&pool->lock, flags); |
Sean O. Stalley | fa23f56 | 2015-09-08 15:02:24 -0700 | [diff] [blame] | 374 | |
Alexander Potapenko | 6471384 | 2019-07-11 20:59:19 -0700 | [diff] [blame] | 375 | if (want_init_on_alloc(mem_flags)) |
Sean O. Stalley | fa23f56 | 2015-09-08 15:02:24 -0700 | [diff] [blame] | 376 | memset(retval, 0, pool->size); |
| 377 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 378 | return retval; |
| 379 | } |
Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 380 | EXPORT_SYMBOL(dma_pool_alloc); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 381 | |
Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 382 | static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 383 | { |
Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 384 | struct dma_page *page; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 385 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 386 | list_for_each_entry(page, &pool->page_list, page_list) { |
| 387 | if (dma < page->dma) |
| 388 | continue; |
Robin Murphy | 676bd99 | 2015-10-01 15:37:19 -0700 | [diff] [blame] | 389 | if ((dma - page->dma) < pool->allocation) |
Rolf Eike Beer | 84bc227 | 2011-01-13 15:47:24 -0800 | [diff] [blame] | 390 | return page; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 391 | } |
Rolf Eike Beer | 84bc227 | 2011-01-13 15:47:24 -0800 | [diff] [blame] | 392 | return NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 393 | } |
| 394 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 395 | /** |
| 396 | * dma_pool_free - put block back into dma pool |
| 397 | * @pool: the dma pool holding the block |
| 398 | * @vaddr: virtual address of block |
| 399 | * @dma: dma address of block |
| 400 | * |
| 401 | * Caller promises neither device nor driver will again touch this block |
| 402 | * unless it is first re-allocated. |
| 403 | */ |
Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 404 | void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 405 | { |
Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 406 | struct dma_page *page; |
| 407 | unsigned long flags; |
Matthew Wilcox | a35a345 | 2007-12-03 14:08:28 -0500 | [diff] [blame] | 408 | unsigned int offset; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 409 | |
Rolf Eike Beer | 84bc227 | 2011-01-13 15:47:24 -0800 | [diff] [blame] | 410 | spin_lock_irqsave(&pool->lock, flags); |
Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 411 | page = pool_find_page(pool, dma); |
| 412 | if (!page) { |
Rolf Eike Beer | 84bc227 | 2011-01-13 15:47:24 -0800 | [diff] [blame] | 413 | spin_unlock_irqrestore(&pool->lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 414 | if (pool->dev) |
Andy Shevchenko | 41a0481 | 2020-10-13 16:54:38 -0700 | [diff] [blame] | 415 | dev_err(pool->dev, "%s %s, %p/%pad (bad dma)\n", |
| 416 | __func__, pool->name, vaddr, &dma); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 417 | else |
Andy Shevchenko | 41a0481 | 2020-10-13 16:54:38 -0700 | [diff] [blame] | 418 | pr_err("%s %s, %p/%pad (bad dma)\n", |
| 419 | __func__, pool->name, vaddr, &dma); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 420 | return; |
| 421 | } |
| 422 | |
Matthew Wilcox | a35a345 | 2007-12-03 14:08:28 -0500 | [diff] [blame] | 423 | offset = vaddr - page->vaddr; |
Alexander Potapenko | 6471384 | 2019-07-11 20:59:19 -0700 | [diff] [blame] | 424 | if (want_init_on_free()) |
| 425 | memset(vaddr, 0, pool->size); |
Andi Kleen | b5ee5be | 2008-04-28 02:12:37 -0700 | [diff] [blame] | 426 | #ifdef DMAPOOL_DEBUG |
Matthew Wilcox | a35a345 | 2007-12-03 14:08:28 -0500 | [diff] [blame] | 427 | if ((dma - page->dma) != offset) { |
Rolf Eike Beer | 84bc227 | 2011-01-13 15:47:24 -0800 | [diff] [blame] | 428 | spin_unlock_irqrestore(&pool->lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 429 | if (pool->dev) |
Andy Shevchenko | 41a0481 | 2020-10-13 16:54:38 -0700 | [diff] [blame] | 430 | dev_err(pool->dev, "%s %s, %p (bad vaddr)/%pad\n", |
| 431 | __func__, pool->name, vaddr, &dma); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 432 | else |
Andy Shevchenko | 41a0481 | 2020-10-13 16:54:38 -0700 | [diff] [blame] | 433 | pr_err("%s %s, %p (bad vaddr)/%pad\n", |
| 434 | __func__, pool->name, vaddr, &dma); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 435 | return; |
| 436 | } |
Matthew Wilcox | a35a345 | 2007-12-03 14:08:28 -0500 | [diff] [blame] | 437 | { |
| 438 | unsigned int chain = page->offset; |
| 439 | while (chain < pool->allocation) { |
| 440 | if (chain != offset) { |
| 441 | chain = *(int *)(page->vaddr + chain); |
| 442 | continue; |
| 443 | } |
Rolf Eike Beer | 84bc227 | 2011-01-13 15:47:24 -0800 | [diff] [blame] | 444 | spin_unlock_irqrestore(&pool->lock, flags); |
Matthew Wilcox | a35a345 | 2007-12-03 14:08:28 -0500 | [diff] [blame] | 445 | if (pool->dev) |
Andy Shevchenko | 41a0481 | 2020-10-13 16:54:38 -0700 | [diff] [blame] | 446 | dev_err(pool->dev, "%s %s, dma %pad already free\n", |
| 447 | __func__, pool->name, &dma); |
Matthew Wilcox | a35a345 | 2007-12-03 14:08:28 -0500 | [diff] [blame] | 448 | else |
Andy Shevchenko | 41a0481 | 2020-10-13 16:54:38 -0700 | [diff] [blame] | 449 | pr_err("%s %s, dma %pad already free\n", |
| 450 | __func__, pool->name, &dma); |
Matthew Wilcox | a35a345 | 2007-12-03 14:08:28 -0500 | [diff] [blame] | 451 | return; |
| 452 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 453 | } |
Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 454 | memset(vaddr, POOL_POISON_FREED, pool->size); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 455 | #endif |
| 456 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 457 | page->in_use--; |
Matthew Wilcox | a35a345 | 2007-12-03 14:08:28 -0500 | [diff] [blame] | 458 | *(int *)vaddr = page->offset; |
| 459 | page->offset = offset; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 460 | /* |
| 461 | * Resist a temptation to do |
Matthew Wilcox | a35a345 | 2007-12-03 14:08:28 -0500 | [diff] [blame] | 462 | * if (!is_page_busy(page)) pool_free_page(pool, page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 463 | * Better have a few empty pages hang around. |
| 464 | */ |
Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 465 | spin_unlock_irqrestore(&pool->lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 466 | } |
Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 467 | EXPORT_SYMBOL(dma_pool_free); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 468 | |
Tejun Heo | 9ac7849 | 2007-01-20 16:00:26 +0900 | [diff] [blame] | 469 | /* |
| 470 | * Managed DMA pool |
| 471 | */ |
| 472 | static void dmam_pool_release(struct device *dev, void *res) |
| 473 | { |
| 474 | struct dma_pool *pool = *(struct dma_pool **)res; |
| 475 | |
| 476 | dma_pool_destroy(pool); |
| 477 | } |
| 478 | |
| 479 | static int dmam_pool_match(struct device *dev, void *res, void *match_data) |
| 480 | { |
| 481 | return *(struct dma_pool **)res == match_data; |
| 482 | } |
| 483 | |
| 484 | /** |
| 485 | * dmam_pool_create - Managed dma_pool_create() |
| 486 | * @name: name of pool, for diagnostics |
| 487 | * @dev: device that will be doing the DMA |
| 488 | * @size: size of the blocks in this pool. |
| 489 | * @align: alignment requirement for blocks; must be a power of two |
| 490 | * @allocation: returned blocks won't cross this boundary (or zero) |
| 491 | * |
| 492 | * Managed dma_pool_create(). DMA pool created with this function is |
| 493 | * automatically destroyed on driver detach. |
Mike Rapoport | a862f68 | 2019-03-05 15:48:42 -0800 | [diff] [blame] | 494 | * |
| 495 | * Return: a managed dma allocation pool with the requested |
| 496 | * characteristics, or %NULL if one can't be created. |
Tejun Heo | 9ac7849 | 2007-01-20 16:00:26 +0900 | [diff] [blame] | 497 | */ |
| 498 | struct dma_pool *dmam_pool_create(const char *name, struct device *dev, |
| 499 | size_t size, size_t align, size_t allocation) |
| 500 | { |
| 501 | struct dma_pool **ptr, *pool; |
| 502 | |
| 503 | ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL); |
| 504 | if (!ptr) |
| 505 | return NULL; |
| 506 | |
| 507 | pool = *ptr = dma_pool_create(name, dev, size, align, allocation); |
| 508 | if (pool) |
| 509 | devres_add(dev, ptr); |
| 510 | else |
| 511 | devres_free(ptr); |
| 512 | |
| 513 | return pool; |
| 514 | } |
Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 515 | EXPORT_SYMBOL(dmam_pool_create); |
Tejun Heo | 9ac7849 | 2007-01-20 16:00:26 +0900 | [diff] [blame] | 516 | |
| 517 | /** |
| 518 | * dmam_pool_destroy - Managed dma_pool_destroy() |
| 519 | * @pool: dma pool that will be destroyed |
| 520 | * |
| 521 | * Managed dma_pool_destroy(). |
| 522 | */ |
| 523 | void dmam_pool_destroy(struct dma_pool *pool) |
| 524 | { |
| 525 | struct device *dev = pool->dev; |
| 526 | |
Andy Shevchenko | 172cb4b | 2014-06-04 16:10:02 -0700 | [diff] [blame] | 527 | WARN_ON(devres_release(dev, dmam_pool_release, dmam_pool_match, pool)); |
Tejun Heo | 9ac7849 | 2007-01-20 16:00:26 +0900 | [diff] [blame] | 528 | } |
Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 529 | EXPORT_SYMBOL(dmam_pool_destroy); |