blob: a7eb5d0eb2da7362050a578f24d2026d107dd5ce [file] [log] [blame]
Thomas Gleixnerb2139ce2019-06-01 10:08:18 +02001// SPDX-License-Identifier: GPL-2.0-only
Matthew Wilcox6182a092007-12-03 12:16:57 -05002/*
3 * DMA Pool allocator
4 *
5 * Copyright 2001 David Brownell
6 * Copyright 2007 Intel Corporation
7 * Author: Matthew Wilcox <willy@linux.intel.com>
8 *
Matthew Wilcox6182a092007-12-03 12:16:57 -05009 * This allocator returns small blocks of a given size which are DMA-able by
10 * the given device. It uses the dma_alloc_coherent page allocator to get
11 * new pages, then splits them up into blocks of the required size.
12 * Many older drivers still have their own code to do this.
13 *
14 * The current design of this allocator is fairly simple. The pool is
15 * represented by the 'struct dma_pool' which keeps a doubly-linked list of
16 * allocated pages. Each page in the page_list is split into blocks of at
Matthew Wilcoxa35a3452007-12-03 14:08:28 -050017 * least 'size' bytes. Free blocks are tracked in an unsorted singly-linked
18 * list of free blocks within the page. Used blocks aren't tracked, but we
19 * keep a count of how many are currently allocated from each page.
Matthew Wilcox6182a092007-12-03 12:16:57 -050020 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070021
22#include <linux/device.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <linux/dma-mapping.h>
24#include <linux/dmapool.h>
Matthew Wilcox6182a092007-12-03 12:16:57 -050025#include <linux/kernel.h>
26#include <linux/list.h>
Paul Gortmakerb95f1b312011-10-16 02:01:52 -040027#include <linux/export.h>
Matthew Wilcox6182a092007-12-03 12:16:57 -050028#include <linux/mutex.h>
Randy Dunlapc9cf5522006-06-27 02:53:52 -070029#include <linux/poison.h>
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +040030#include <linux/sched.h>
Daniel Vetter0f2f89b62021-02-25 17:18:41 -080031#include <linux/sched/mm.h>
Matthew Wilcox6182a092007-12-03 12:16:57 -050032#include <linux/slab.h>
Paul Gortmaker7c775092011-10-16 02:03:46 -040033#include <linux/stat.h>
Matthew Wilcox6182a092007-12-03 12:16:57 -050034#include <linux/spinlock.h>
35#include <linux/string.h>
36#include <linux/types.h>
37#include <linux/wait.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038
Andi Kleenb5ee5be2008-04-28 02:12:37 -070039#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
40#define DMAPOOL_DEBUG 1
41#endif
42
Matthew Wilcoxe87aa772007-12-03 12:04:31 -050043struct dma_pool { /* the pool */
44 struct list_head page_list;
45 spinlock_t lock;
Matthew Wilcoxe87aa772007-12-03 12:04:31 -050046 size_t size;
47 struct device *dev;
48 size_t allocation;
Matthew Wilcoxe34f44b2007-12-03 14:16:24 -050049 size_t boundary;
Matthew Wilcoxe87aa772007-12-03 12:04:31 -050050 char name[32];
Matthew Wilcoxe87aa772007-12-03 12:04:31 -050051 struct list_head pools;
Linus Torvalds1da177e2005-04-16 15:20:36 -070052};
53
Matthew Wilcoxe87aa772007-12-03 12:04:31 -050054struct dma_page { /* cacheable header for 'allocation' bytes */
55 struct list_head page_list;
56 void *vaddr;
57 dma_addr_t dma;
Matthew Wilcoxa35a3452007-12-03 14:08:28 -050058 unsigned int in_use;
59 unsigned int offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -070060};
61
Matthew Wilcoxe87aa772007-12-03 12:04:31 -050062static DEFINE_MUTEX(pools_lock);
Sebastian Andrzej Siewior01c29652014-10-09 15:28:50 -070063static DEFINE_MUTEX(pools_reg_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070064
YueHaibinge8df2c72021-06-28 19:40:05 -070065static ssize_t pools_show(struct device *dev, struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -070066{
67 unsigned temp;
68 unsigned size;
69 char *next;
70 struct dma_page *page;
71 struct dma_pool *pool;
72
73 next = buf;
74 size = PAGE_SIZE;
75
76 temp = scnprintf(next, size, "poolinfo - 0.1\n");
77 size -= temp;
78 next += temp;
79
Matthias Kaehlckeb2366d62007-04-24 22:45:25 +020080 mutex_lock(&pools_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070081 list_for_each_entry(pool, &dev->dma_pools, pools) {
82 unsigned pages = 0;
83 unsigned blocks = 0;
84
Thomas Gleixnerc4956822009-06-30 11:41:25 -070085 spin_lock_irq(&pool->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070086 list_for_each_entry(page, &pool->page_list, page_list) {
87 pages++;
88 blocks += page->in_use;
89 }
Thomas Gleixnerc4956822009-06-30 11:41:25 -070090 spin_unlock_irq(&pool->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070091
92 /* per-pool info, no real statistics yet */
Alexey Dobriyan5b5e0922017-02-27 14:30:02 -080093 temp = scnprintf(next, size, "%-16s %4u %4zu %4zu %2u\n",
Matthew Wilcoxa35a3452007-12-03 14:08:28 -050094 pool->name, blocks,
95 pages * (pool->allocation / pool->size),
Matthew Wilcoxe87aa772007-12-03 12:04:31 -050096 pool->size, pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -070097 size -= temp;
98 next += temp;
99 }
Matthias Kaehlckeb2366d62007-04-24 22:45:25 +0200100 mutex_unlock(&pools_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101
102 return PAGE_SIZE - size;
103}
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500104
YueHaibinge8df2c72021-06-28 19:40:05 -0700105static DEVICE_ATTR_RO(pools);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106
107/**
108 * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
109 * @name: name of pool, for diagnostics
110 * @dev: device that will be doing the DMA
111 * @size: size of the blocks in this pool.
112 * @align: alignment requirement for blocks; must be a power of two
Matthew Wilcoxe34f44b2007-12-03 14:16:24 -0500113 * @boundary: returned blocks won't cross this power of two boundary
Mike Rapoporta862f682019-03-05 15:48:42 -0800114 * Context: not in_interrupt()
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115 *
Mike Rapoporta862f682019-03-05 15:48:42 -0800116 * Given one of these pools, dma_pool_alloc()
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117 * may be used to allocate memory. Such memory will all have "consistent"
118 * DMA mappings, accessible by the device and its driver without using
119 * cache flushing primitives. The actual size of blocks allocated may be
120 * larger than requested because of alignment.
121 *
Matthew Wilcoxe34f44b2007-12-03 14:16:24 -0500122 * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123 * cross that size boundary. This is useful for devices which have
124 * addressing restrictions on individual DMA transfers, such as not crossing
125 * boundaries of 4KBytes.
Mike Rapoporta862f682019-03-05 15:48:42 -0800126 *
127 * Return: a dma allocation pool with the requested characteristics, or
128 * %NULL if one can't be created.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129 */
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500130struct dma_pool *dma_pool_create(const char *name, struct device *dev,
Matthew Wilcoxe34f44b2007-12-03 14:16:24 -0500131 size_t size, size_t align, size_t boundary)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132{
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500133 struct dma_pool *retval;
Matthew Wilcoxe34f44b2007-12-03 14:16:24 -0500134 size_t allocation;
Sebastian Andrzej Siewior01c29652014-10-09 15:28:50 -0700135 bool empty = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136
Paul McQuadebaa2ef82014-10-09 15:29:11 -0700137 if (align == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138 align = 1;
Paul McQuadebaa2ef82014-10-09 15:29:11 -0700139 else if (align & (align - 1))
Matthew Wilcox399154b2007-12-03 12:10:24 -0500140 return NULL;
Matthew Wilcox399154b2007-12-03 12:10:24 -0500141
Paul McQuadebaa2ef82014-10-09 15:29:11 -0700142 if (size == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143 return NULL;
Paul McQuadebaa2ef82014-10-09 15:29:11 -0700144 else if (size < 4)
Matthew Wilcoxa35a3452007-12-03 14:08:28 -0500145 size = 4;
Matthew Wilcox399154b2007-12-03 12:10:24 -0500146
Mateusz Nosek1386f7a2020-04-06 20:08:49 -0700147 size = ALIGN(size, align);
Matthew Wilcoxe34f44b2007-12-03 14:16:24 -0500148 allocation = max_t(size_t, size, PAGE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149
Paul McQuadebaa2ef82014-10-09 15:29:11 -0700150 if (!boundary)
Matthew Wilcoxe34f44b2007-12-03 14:16:24 -0500151 boundary = allocation;
Paul McQuadebaa2ef82014-10-09 15:29:11 -0700152 else if ((boundary < size) || (boundary & (boundary - 1)))
Matthew Wilcoxe34f44b2007-12-03 14:16:24 -0500153 return NULL;
Matthew Wilcoxe34f44b2007-12-03 14:16:24 -0500154
Christian Königcc6266f2022-01-14 14:06:54 -0800155 retval = kmalloc(sizeof(*retval), GFP_KERNEL);
Matthew Wilcoxe34f44b2007-12-03 14:16:24 -0500156 if (!retval)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157 return retval;
158
Zhiyuan Dai943f2292021-04-29 22:57:55 -0700159 strscpy(retval->name, name, sizeof(retval->name));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160
161 retval->dev = dev;
162
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500163 INIT_LIST_HEAD(&retval->page_list);
164 spin_lock_init(&retval->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165 retval->size = size;
Matthew Wilcoxe34f44b2007-12-03 14:16:24 -0500166 retval->boundary = boundary;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167 retval->allocation = allocation;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168
Daeseok Youncc6b6642014-06-04 16:08:05 -0700169 INIT_LIST_HEAD(&retval->pools);
Cornelia Huck141ecc52006-09-22 11:37:27 +0200170
Sebastian Andrzej Siewior01c29652014-10-09 15:28:50 -0700171 /*
172 * pools_lock ensures that the ->dma_pools list does not get corrupted.
173 * pools_reg_lock ensures that there is not a race between
174 * dma_pool_create() and dma_pool_destroy() or within dma_pool_create()
175 * when the first invocation of dma_pool_create() failed on
176 * device_create_file() and the second assumes that it has been done (I
177 * know it is a short window).
178 */
179 mutex_lock(&pools_reg_lock);
Daeseok Youncc6b6642014-06-04 16:08:05 -0700180 mutex_lock(&pools_lock);
Sebastian Andrzej Siewior01c29652014-10-09 15:28:50 -0700181 if (list_empty(&dev->dma_pools))
182 empty = true;
183 list_add(&retval->pools, &dev->dma_pools);
Daeseok Youncc6b6642014-06-04 16:08:05 -0700184 mutex_unlock(&pools_lock);
Sebastian Andrzej Siewior01c29652014-10-09 15:28:50 -0700185 if (empty) {
186 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187
Sebastian Andrzej Siewior01c29652014-10-09 15:28:50 -0700188 err = device_create_file(dev, &dev_attr_pools);
189 if (err) {
190 mutex_lock(&pools_lock);
191 list_del(&retval->pools);
192 mutex_unlock(&pools_lock);
193 mutex_unlock(&pools_reg_lock);
194 kfree(retval);
195 return NULL;
196 }
197 }
198 mutex_unlock(&pools_reg_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199 return retval;
200}
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500201EXPORT_SYMBOL(dma_pool_create);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202
Matthew Wilcoxa35a3452007-12-03 14:08:28 -0500203static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
204{
205 unsigned int offset = 0;
Matthew Wilcoxe34f44b2007-12-03 14:16:24 -0500206 unsigned int next_boundary = pool->boundary;
Matthew Wilcoxa35a3452007-12-03 14:08:28 -0500207
208 do {
209 unsigned int next = offset + pool->size;
Matthew Wilcoxe34f44b2007-12-03 14:16:24 -0500210 if (unlikely((next + pool->size) >= next_boundary)) {
211 next = next_boundary;
212 next_boundary += pool->boundary;
213 }
Matthew Wilcoxa35a3452007-12-03 14:08:28 -0500214 *(int *)(page->vaddr + offset) = next;
215 offset = next;
216 } while (offset < pool->allocation);
217}
218
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500219static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220{
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500221 struct dma_page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222
Matthew Wilcoxa35a3452007-12-03 14:08:28 -0500223 page = kmalloc(sizeof(*page), mem_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 if (!page)
225 return NULL;
Matthew Wilcoxa35a3452007-12-03 14:08:28 -0500226 page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500227 &page->dma, mem_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228 if (page->vaddr) {
Andi Kleenb5ee5be2008-04-28 02:12:37 -0700229#ifdef DMAPOOL_DEBUG
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500230 memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231#endif
Matthew Wilcoxa35a3452007-12-03 14:08:28 -0500232 pool_initialise_page(pool, page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233 page->in_use = 0;
Matthew Wilcoxa35a3452007-12-03 14:08:28 -0500234 page->offset = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235 } else {
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500236 kfree(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237 page = NULL;
238 }
239 return page;
240}
241
Nicholas Kraused9e7e372015-09-04 15:48:19 -0700242static inline bool is_page_busy(struct dma_page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243{
Matthew Wilcoxa35a3452007-12-03 14:08:28 -0500244 return page->in_use != 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245}
246
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500247static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248{
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500249 dma_addr_t dma = page->dma;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250
Andi Kleenb5ee5be2008-04-28 02:12:37 -0700251#ifdef DMAPOOL_DEBUG
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500252 memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253#endif
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500254 dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
255 list_del(&page->page_list);
256 kfree(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257}
258
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259/**
260 * dma_pool_destroy - destroys a pool of dma memory blocks.
261 * @pool: dma pool that will be destroyed
262 * Context: !in_interrupt()
263 *
264 * Caller guarantees that no more memory from the pool is in use,
265 * and that nothing will try to use the pool after this call.
266 */
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500267void dma_pool_destroy(struct dma_pool *pool)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268{
Andy Shevchenko42286f82020-10-13 16:54:35 -0700269 struct dma_page *page, *tmp;
Sebastian Andrzej Siewior01c29652014-10-09 15:28:50 -0700270 bool empty = false;
271
Sergey Senozhatsky44d71752015-09-08 15:00:56 -0700272 if (unlikely(!pool))
273 return;
274
Sebastian Andrzej Siewior01c29652014-10-09 15:28:50 -0700275 mutex_lock(&pools_reg_lock);
Matthias Kaehlckeb2366d62007-04-24 22:45:25 +0200276 mutex_lock(&pools_lock);
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500277 list_del(&pool->pools);
278 if (pool->dev && list_empty(&pool->dev->dma_pools))
Sebastian Andrzej Siewior01c29652014-10-09 15:28:50 -0700279 empty = true;
Matthias Kaehlckeb2366d62007-04-24 22:45:25 +0200280 mutex_unlock(&pools_lock);
Sebastian Andrzej Siewior01c29652014-10-09 15:28:50 -0700281 if (empty)
282 device_remove_file(pool->dev, &dev_attr_pools);
283 mutex_unlock(&pools_reg_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284
Andy Shevchenko42286f82020-10-13 16:54:35 -0700285 list_for_each_entry_safe(page, tmp, &pool->page_list, page_list) {
Matthew Wilcoxa35a3452007-12-03 14:08:28 -0500286 if (is_page_busy(page)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287 if (pool->dev)
Andy Shevchenko41a04812020-10-13 16:54:38 -0700288 dev_err(pool->dev, "%s %s, %p busy\n", __func__,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289 pool->name, page->vaddr);
290 else
Andy Shevchenko41a04812020-10-13 16:54:38 -0700291 pr_err("%s %s, %p busy\n", __func__,
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500292 pool->name, page->vaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 /* leak the still-in-use consistent memory */
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500294 list_del(&page->page_list);
295 kfree(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296 } else
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500297 pool_free_page(pool, page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298 }
299
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500300 kfree(pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301}
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500302EXPORT_SYMBOL(dma_pool_destroy);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303
304/**
305 * dma_pool_alloc - get a block of consistent memory
306 * @pool: dma pool that will produce the block
307 * @mem_flags: GFP_* bitmask
308 * @handle: pointer to dma address of block
309 *
Mike Rapoporta862f682019-03-05 15:48:42 -0800310 * Return: the kernel virtual address of a currently unused block,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311 * and reports its dma address through the handle.
Matthew Wilcox6182a092007-12-03 12:16:57 -0500312 * If such a memory block can't be allocated, %NULL is returned.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313 */
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500314void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
315 dma_addr_t *handle)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316{
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500317 unsigned long flags;
318 struct dma_page *page;
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500319 size_t offset;
320 void *retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321
Daniel Vetter0f2f89b62021-02-25 17:18:41 -0800322 might_alloc(mem_flags);
Dima Zavinea05c842010-10-26 14:21:54 -0700323
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500324 spin_lock_irqsave(&pool->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325 list_for_each_entry(page, &pool->page_list, page_list) {
Matthew Wilcoxa35a3452007-12-03 14:08:28 -0500326 if (page->offset < pool->allocation)
327 goto ready;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329
Marek Szyprowski387870f2012-11-07 15:37:07 +0100330 /* pool_alloc_page() might sleep, so temporarily drop &pool->lock */
331 spin_unlock_irqrestore(&pool->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332
Sean O. Stalleyfa23f562015-09-08 15:02:24 -0700333 page = pool_alloc_page(pool, mem_flags & (~__GFP_ZERO));
Marek Szyprowski387870f2012-11-07 15:37:07 +0100334 if (!page)
335 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336
Marek Szyprowski387870f2012-11-07 15:37:07 +0100337 spin_lock_irqsave(&pool->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338
Marek Szyprowski387870f2012-11-07 15:37:07 +0100339 list_add(&page->page_list, &pool->page_list);
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500340 ready:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341 page->in_use++;
Matthew Wilcoxa35a3452007-12-03 14:08:28 -0500342 offset = page->offset;
343 page->offset = *(int *)(page->vaddr + offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344 retval = offset + page->vaddr;
345 *handle = offset + page->dma;
Andi Kleenb5ee5be2008-04-28 02:12:37 -0700346#ifdef DMAPOOL_DEBUG
Matthieu CASTET5de55b22012-12-11 16:01:31 -0800347 {
348 int i;
349 u8 *data = retval;
350 /* page->offset is stored in first 4 bytes */
351 for (i = sizeof(page->offset); i < pool->size; i++) {
352 if (data[i] == POOL_POISON_FREED)
353 continue;
354 if (pool->dev)
Andy Shevchenko41a04812020-10-13 16:54:38 -0700355 dev_err(pool->dev, "%s %s, %p (corrupted)\n",
356 __func__, pool->name, retval);
Matthieu CASTET5de55b22012-12-11 16:01:31 -0800357 else
Andy Shevchenko41a04812020-10-13 16:54:38 -0700358 pr_err("%s %s, %p (corrupted)\n",
359 __func__, pool->name, retval);
Matthieu CASTET5de55b22012-12-11 16:01:31 -0800360
361 /*
362 * Dump the first 4 bytes even if they are not
363 * POOL_POISON_FREED
364 */
365 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1,
366 data, pool->size, 1);
367 break;
368 }
369 }
Sean O. Stalleyfa23f562015-09-08 15:02:24 -0700370 if (!(mem_flags & __GFP_ZERO))
371 memset(retval, POOL_POISON_ALLOCATED, pool->size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372#endif
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500373 spin_unlock_irqrestore(&pool->lock, flags);
Sean O. Stalleyfa23f562015-09-08 15:02:24 -0700374
Alexander Potapenko64713842019-07-11 20:59:19 -0700375 if (want_init_on_alloc(mem_flags))
Sean O. Stalleyfa23f562015-09-08 15:02:24 -0700376 memset(retval, 0, pool->size);
377
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 return retval;
379}
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500380EXPORT_SYMBOL(dma_pool_alloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500382static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383{
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500384 struct dma_page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386 list_for_each_entry(page, &pool->page_list, page_list) {
387 if (dma < page->dma)
388 continue;
Robin Murphy676bd992015-10-01 15:37:19 -0700389 if ((dma - page->dma) < pool->allocation)
Rolf Eike Beer84bc2272011-01-13 15:47:24 -0800390 return page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391 }
Rolf Eike Beer84bc2272011-01-13 15:47:24 -0800392 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393}
394
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395/**
396 * dma_pool_free - put block back into dma pool
397 * @pool: the dma pool holding the block
398 * @vaddr: virtual address of block
399 * @dma: dma address of block
400 *
401 * Caller promises neither device nor driver will again touch this block
402 * unless it is first re-allocated.
403 */
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500404void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405{
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500406 struct dma_page *page;
407 unsigned long flags;
Matthew Wilcoxa35a3452007-12-03 14:08:28 -0500408 unsigned int offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409
Rolf Eike Beer84bc2272011-01-13 15:47:24 -0800410 spin_lock_irqsave(&pool->lock, flags);
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500411 page = pool_find_page(pool, dma);
412 if (!page) {
Rolf Eike Beer84bc2272011-01-13 15:47:24 -0800413 spin_unlock_irqrestore(&pool->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414 if (pool->dev)
Andy Shevchenko41a04812020-10-13 16:54:38 -0700415 dev_err(pool->dev, "%s %s, %p/%pad (bad dma)\n",
416 __func__, pool->name, vaddr, &dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417 else
Andy Shevchenko41a04812020-10-13 16:54:38 -0700418 pr_err("%s %s, %p/%pad (bad dma)\n",
419 __func__, pool->name, vaddr, &dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420 return;
421 }
422
Matthew Wilcoxa35a3452007-12-03 14:08:28 -0500423 offset = vaddr - page->vaddr;
Alexander Potapenko64713842019-07-11 20:59:19 -0700424 if (want_init_on_free())
425 memset(vaddr, 0, pool->size);
Andi Kleenb5ee5be2008-04-28 02:12:37 -0700426#ifdef DMAPOOL_DEBUG
Matthew Wilcoxa35a3452007-12-03 14:08:28 -0500427 if ((dma - page->dma) != offset) {
Rolf Eike Beer84bc2272011-01-13 15:47:24 -0800428 spin_unlock_irqrestore(&pool->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429 if (pool->dev)
Andy Shevchenko41a04812020-10-13 16:54:38 -0700430 dev_err(pool->dev, "%s %s, %p (bad vaddr)/%pad\n",
431 __func__, pool->name, vaddr, &dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432 else
Andy Shevchenko41a04812020-10-13 16:54:38 -0700433 pr_err("%s %s, %p (bad vaddr)/%pad\n",
434 __func__, pool->name, vaddr, &dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435 return;
436 }
Matthew Wilcoxa35a3452007-12-03 14:08:28 -0500437 {
438 unsigned int chain = page->offset;
439 while (chain < pool->allocation) {
440 if (chain != offset) {
441 chain = *(int *)(page->vaddr + chain);
442 continue;
443 }
Rolf Eike Beer84bc2272011-01-13 15:47:24 -0800444 spin_unlock_irqrestore(&pool->lock, flags);
Matthew Wilcoxa35a3452007-12-03 14:08:28 -0500445 if (pool->dev)
Andy Shevchenko41a04812020-10-13 16:54:38 -0700446 dev_err(pool->dev, "%s %s, dma %pad already free\n",
447 __func__, pool->name, &dma);
Matthew Wilcoxa35a3452007-12-03 14:08:28 -0500448 else
Andy Shevchenko41a04812020-10-13 16:54:38 -0700449 pr_err("%s %s, dma %pad already free\n",
450 __func__, pool->name, &dma);
Matthew Wilcoxa35a3452007-12-03 14:08:28 -0500451 return;
452 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453 }
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500454 memset(vaddr, POOL_POISON_FREED, pool->size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455#endif
456
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457 page->in_use--;
Matthew Wilcoxa35a3452007-12-03 14:08:28 -0500458 *(int *)vaddr = page->offset;
459 page->offset = offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460 /*
461 * Resist a temptation to do
Matthew Wilcoxa35a3452007-12-03 14:08:28 -0500462 * if (!is_page_busy(page)) pool_free_page(pool, page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463 * Better have a few empty pages hang around.
464 */
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500465 spin_unlock_irqrestore(&pool->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466}
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500467EXPORT_SYMBOL(dma_pool_free);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468
Tejun Heo9ac78492007-01-20 16:00:26 +0900469/*
470 * Managed DMA pool
471 */
472static void dmam_pool_release(struct device *dev, void *res)
473{
474 struct dma_pool *pool = *(struct dma_pool **)res;
475
476 dma_pool_destroy(pool);
477}
478
479static int dmam_pool_match(struct device *dev, void *res, void *match_data)
480{
481 return *(struct dma_pool **)res == match_data;
482}
483
484/**
485 * dmam_pool_create - Managed dma_pool_create()
486 * @name: name of pool, for diagnostics
487 * @dev: device that will be doing the DMA
488 * @size: size of the blocks in this pool.
489 * @align: alignment requirement for blocks; must be a power of two
490 * @allocation: returned blocks won't cross this boundary (or zero)
491 *
492 * Managed dma_pool_create(). DMA pool created with this function is
493 * automatically destroyed on driver detach.
Mike Rapoporta862f682019-03-05 15:48:42 -0800494 *
495 * Return: a managed dma allocation pool with the requested
496 * characteristics, or %NULL if one can't be created.
Tejun Heo9ac78492007-01-20 16:00:26 +0900497 */
498struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
499 size_t size, size_t align, size_t allocation)
500{
501 struct dma_pool **ptr, *pool;
502
503 ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);
504 if (!ptr)
505 return NULL;
506
507 pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
508 if (pool)
509 devres_add(dev, ptr);
510 else
511 devres_free(ptr);
512
513 return pool;
514}
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500515EXPORT_SYMBOL(dmam_pool_create);
Tejun Heo9ac78492007-01-20 16:00:26 +0900516
517/**
518 * dmam_pool_destroy - Managed dma_pool_destroy()
519 * @pool: dma pool that will be destroyed
520 *
521 * Managed dma_pool_destroy().
522 */
523void dmam_pool_destroy(struct dma_pool *pool)
524{
525 struct device *dev = pool->dev;
526
Andy Shevchenko172cb4b2014-06-04 16:10:02 -0700527 WARN_ON(devres_release(dev, dmam_pool_release, dmam_pool_match, pool));
Tejun Heo9ac78492007-01-20 16:00:26 +0900528}
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500529EXPORT_SYMBOL(dmam_pool_destroy);