blob: 246f14c83944c8c2bcff3bb25eafd8d13a262266 [file] [log] [blame]
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -07001/*
David Woodhousea15a5192009-07-01 18:49:06 +01002 * Copyright © 2006-2009, Intel Corporation.
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -07003 *
David Woodhousea15a5192009-07-01 18:49:06 +01004 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -07007 *
David Woodhousea15a5192009-07-01 18:49:06 +01008 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
mark gross98bcef52008-02-23 15:23:35 -080017 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -070018 */
19
Kay, Allen M38717942008-09-09 18:37:29 +030020#include <linux/iova.h>
Sakari Ailus15bbdec2015-07-13 14:31:30 +030021#include <linux/module.h>
Robin Murphy85b45452015-01-12 17:51:14 +000022#include <linux/slab.h>
Omer Peleg9257b4a2016-04-20 11:34:11 +030023#include <linux/smp.h>
24#include <linux/bitops.h>
Sebastian Andrzej Siewioraaffaa82017-06-27 18:16:47 +020025#include <linux/cpu.h>
Omer Peleg9257b4a2016-04-20 11:34:11 +030026
27static bool iova_rcache_insert(struct iova_domain *iovad,
28 unsigned long pfn,
29 unsigned long size);
30static unsigned long iova_rcache_get(struct iova_domain *iovad,
31 unsigned long size,
32 unsigned long limit_pfn);
33static void init_iova_rcaches(struct iova_domain *iovad);
34static void free_iova_rcaches(struct iova_domain *iovad);
Robin Murphy85b45452015-01-12 17:51:14 +000035
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -070036void
Robin Murphy0fb5fe82015-01-12 17:51:16 +000037init_iova_domain(struct iova_domain *iovad, unsigned long granule,
38 unsigned long start_pfn, unsigned long pfn_32bit)
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -070039{
Robin Murphy0fb5fe82015-01-12 17:51:16 +000040 /*
41 * IOVA granularity will normally be equal to the smallest
42 * supported IOMMU page size; both *must* be capable of
43 * representing individual CPU pages exactly.
44 */
45 BUG_ON((granule > PAGE_SIZE) || !is_power_of_2(granule));
46
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -070047 spin_lock_init(&iovad->iova_rbtree_lock);
48 iovad->rbroot = RB_ROOT;
49 iovad->cached32_node = NULL;
Robin Murphy0fb5fe82015-01-12 17:51:16 +000050 iovad->granule = granule;
Robin Murphy1b722502015-01-12 17:51:15 +000051 iovad->start_pfn = start_pfn;
Robin Murphy757c3702017-05-16 12:26:48 +010052 iovad->dma_32bit_pfn = pfn_32bit + 1;
Omer Peleg9257b4a2016-04-20 11:34:11 +030053 init_iova_rcaches(iovad);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -070054}
Sakari Ailus9b417602015-07-13 14:31:29 +030055EXPORT_SYMBOL_GPL(init_iova_domain);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -070056
57static struct rb_node *
58__get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn)
59{
Robin Murphy62280cf2016-11-11 18:35:46 +000060 if ((*limit_pfn > iovad->dma_32bit_pfn) ||
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -070061 (iovad->cached32_node == NULL))
62 return rb_last(&iovad->rbroot);
63 else {
64 struct rb_node *prev_node = rb_prev(iovad->cached32_node);
65 struct iova *curr_iova =
Geliang Tangeba484b2016-12-19 22:46:58 +080066 rb_entry(iovad->cached32_node, struct iova, node);
Robin Murphy757c3702017-05-16 12:26:48 +010067 *limit_pfn = curr_iova->pfn_lo;
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -070068 return prev_node;
69 }
70}
71
72static void
73__cached_rbnode_insert_update(struct iova_domain *iovad,
74 unsigned long limit_pfn, struct iova *new)
75{
David Millerf6611972008-02-06 01:36:23 -080076 if (limit_pfn != iovad->dma_32bit_pfn)
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -070077 return;
78 iovad->cached32_node = &new->node;
79}
80
81static void
82__cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
83{
84 struct iova *cached_iova;
85 struct rb_node *curr;
86
87 if (!iovad->cached32_node)
88 return;
89 curr = iovad->cached32_node;
Geliang Tangeba484b2016-12-19 22:46:58 +080090 cached_iova = rb_entry(curr, struct iova, node);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -070091
Chris Wright1c9fc3d2011-05-28 13:15:04 -050092 if (free->pfn_lo >= cached_iova->pfn_lo) {
93 struct rb_node *node = rb_next(&free->node);
Geliang Tangeba484b2016-12-19 22:46:58 +080094 struct iova *iova = rb_entry(node, struct iova, node);
Chris Wright1c9fc3d2011-05-28 13:15:04 -050095
96 /* only cache if it's below 32bit pfn */
97 if (node && iova->pfn_lo < iovad->dma_32bit_pfn)
98 iovad->cached32_node = node;
99 else
100 iovad->cached32_node = NULL;
101 }
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700102}
103
Marek Szyprowskid7517512017-02-24 12:13:37 +0100104/* Insert the iova into domain rbtree by holding writer lock */
105static void
106iova_insert_rbtree(struct rb_root *root, struct iova *iova,
107 struct rb_node *start)
108{
109 struct rb_node **new, *parent = NULL;
110
111 new = (start) ? &start : &(root->rb_node);
112 /* Figure out where to put new node */
113 while (*new) {
114 struct iova *this = rb_entry(*new, struct iova, node);
115
116 parent = *new;
117
118 if (iova->pfn_lo < this->pfn_lo)
119 new = &((*new)->rb_left);
120 else if (iova->pfn_lo > this->pfn_lo)
121 new = &((*new)->rb_right);
122 else {
123 WARN_ON(1); /* this should not happen */
124 return;
125 }
126 }
127 /* Add new node and rebalance tree. */
128 rb_link_node(&iova->node, parent, new);
129 rb_insert_color(&iova->node, root);
130}
131
Robin Murphy8f6429c2015-07-16 19:40:12 +0100132/*
133 * Computes the padding size required, to make the start address
134 * naturally aligned on the power-of-two order of its size
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -0700135 */
Robin Murphy8f6429c2015-07-16 19:40:12 +0100136static unsigned int
137iova_get_pad_size(unsigned int size, unsigned int limit_pfn)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -0700138{
Robin Murphy757c3702017-05-16 12:26:48 +0100139 return (limit_pfn - size) & (__roundup_pow_of_two(size) - 1);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -0700140}
141
mark grossddf02882008-03-04 15:22:04 -0800142static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
143 unsigned long size, unsigned long limit_pfn,
144 struct iova *new, bool size_aligned)
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700145{
mark grossddf02882008-03-04 15:22:04 -0800146 struct rb_node *prev, *curr = NULL;
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700147 unsigned long flags;
148 unsigned long saved_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -0700149 unsigned int pad_size = 0;
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700150
151 /* Walk the tree backwards */
152 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
153 saved_pfn = limit_pfn;
154 curr = __get_cached_rbnode(iovad, &limit_pfn);
mark grossddf02882008-03-04 15:22:04 -0800155 prev = curr;
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700156 while (curr) {
Geliang Tangeba484b2016-12-19 22:46:58 +0800157 struct iova *curr_iova = rb_entry(curr, struct iova, node);
mark grossddf02882008-03-04 15:22:04 -0800158
Robin Murphy757c3702017-05-16 12:26:48 +0100159 if (limit_pfn <= curr_iova->pfn_lo) {
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700160 goto move_left;
Robin Murphy757c3702017-05-16 12:26:48 +0100161 } else if (limit_pfn > curr_iova->pfn_hi) {
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -0700162 if (size_aligned)
163 pad_size = iova_get_pad_size(size, limit_pfn);
Robin Murphy757c3702017-05-16 12:26:48 +0100164 if ((curr_iova->pfn_hi + size + pad_size) < limit_pfn)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -0700165 break; /* found a free slot */
166 }
Robin Murphy757c3702017-05-16 12:26:48 +0100167 limit_pfn = curr_iova->pfn_lo;
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700168move_left:
mark grossddf02882008-03-04 15:22:04 -0800169 prev = curr;
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700170 curr = rb_prev(curr);
171 }
172
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -0700173 if (!curr) {
174 if (size_aligned)
175 pad_size = iova_get_pad_size(size, limit_pfn);
Robin Murphy1b722502015-01-12 17:51:15 +0000176 if ((iovad->start_pfn + size + pad_size) > limit_pfn) {
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -0700177 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
178 return -ENOMEM;
179 }
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700180 }
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -0700181
182 /* pfn_lo will point to size aligned address if size_aligned is set */
Robin Murphy757c3702017-05-16 12:26:48 +0100183 new->pfn_lo = limit_pfn - (size + pad_size);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -0700184 new->pfn_hi = new->pfn_lo + size - 1;
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700185
Marek Szyprowskid7517512017-02-24 12:13:37 +0100186 /* If we have 'prev', it's a valid place to start the insertion. */
187 iova_insert_rbtree(&iovad->rbroot, new, prev);
mark grossddf02882008-03-04 15:22:04 -0800188 __cached_rbnode_insert_update(iovad, saved_pfn, new);
189
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700190 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
mark grossddf02882008-03-04 15:22:04 -0800191
192
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700193 return 0;
194}
195
Sakari Ailusae1ff3d2015-07-13 14:31:28 +0300196static struct kmem_cache *iova_cache;
197static unsigned int iova_cache_users;
198static DEFINE_MUTEX(iova_cache_mutex);
199
200struct iova *alloc_iova_mem(void)
201{
202 return kmem_cache_alloc(iova_cache, GFP_ATOMIC);
203}
204EXPORT_SYMBOL(alloc_iova_mem);
205
206void free_iova_mem(struct iova *iova)
207{
208 kmem_cache_free(iova_cache, iova);
209}
210EXPORT_SYMBOL(free_iova_mem);
211
212int iova_cache_get(void)
213{
214 mutex_lock(&iova_cache_mutex);
215 if (!iova_cache_users) {
216 iova_cache = kmem_cache_create(
217 "iommu_iova", sizeof(struct iova), 0,
218 SLAB_HWCACHE_ALIGN, NULL);
219 if (!iova_cache) {
220 mutex_unlock(&iova_cache_mutex);
221 printk(KERN_ERR "Couldn't create iova cache\n");
222 return -ENOMEM;
223 }
224 }
225
226 iova_cache_users++;
227 mutex_unlock(&iova_cache_mutex);
228
229 return 0;
230}
Sakari Ailus9b417602015-07-13 14:31:29 +0300231EXPORT_SYMBOL_GPL(iova_cache_get);
Sakari Ailusae1ff3d2015-07-13 14:31:28 +0300232
233void iova_cache_put(void)
234{
235 mutex_lock(&iova_cache_mutex);
236 if (WARN_ON(!iova_cache_users)) {
237 mutex_unlock(&iova_cache_mutex);
238 return;
239 }
240 iova_cache_users--;
241 if (!iova_cache_users)
242 kmem_cache_destroy(iova_cache);
243 mutex_unlock(&iova_cache_mutex);
244}
Sakari Ailus9b417602015-07-13 14:31:29 +0300245EXPORT_SYMBOL_GPL(iova_cache_put);
Sakari Ailusae1ff3d2015-07-13 14:31:28 +0300246
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700247/**
248 * alloc_iova - allocates an iova
Masanari Iida07db0402012-07-22 02:21:32 +0900249 * @iovad: - iova domain in question
250 * @size: - size of page frames to allocate
251 * @limit_pfn: - max limit address
252 * @size_aligned: - set if size_aligned address range is required
Robin Murphy1b722502015-01-12 17:51:15 +0000253 * This function allocates an iova in the range iovad->start_pfn to limit_pfn,
254 * searching top-down from limit_pfn to iovad->start_pfn. If the size_aligned
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -0700255 * flag is set then the allocated address iova->pfn_lo will be naturally
256 * aligned on roundup_power_of_two(size).
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700257 */
258struct iova *
259alloc_iova(struct iova_domain *iovad, unsigned long size,
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -0700260 unsigned long limit_pfn,
261 bool size_aligned)
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700262{
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700263 struct iova *new_iova;
264 int ret;
265
266 new_iova = alloc_iova_mem();
267 if (!new_iova)
268 return NULL;
269
Robin Murphy757c3702017-05-16 12:26:48 +0100270 ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn + 1,
mark grossddf02882008-03-04 15:22:04 -0800271 new_iova, size_aligned);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700272
273 if (ret) {
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700274 free_iova_mem(new_iova);
275 return NULL;
276 }
277
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700278 return new_iova;
279}
Sakari Ailus9b417602015-07-13 14:31:29 +0300280EXPORT_SYMBOL_GPL(alloc_iova);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700281
Omer Peleg9257b4a2016-04-20 11:34:11 +0300282static struct iova *
283private_find_iova(struct iova_domain *iovad, unsigned long pfn)
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700284{
Omer Peleg9257b4a2016-04-20 11:34:11 +0300285 struct rb_node *node = iovad->rbroot.rb_node;
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700286
Omer Peleg9257b4a2016-04-20 11:34:11 +0300287 assert_spin_locked(&iovad->iova_rbtree_lock);
288
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700289 while (node) {
Geliang Tangeba484b2016-12-19 22:46:58 +0800290 struct iova *iova = rb_entry(node, struct iova, node);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700291
292 /* If pfn falls within iova's range, return iova */
293 if ((pfn >= iova->pfn_lo) && (pfn <= iova->pfn_hi)) {
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700294 return iova;
295 }
296
297 if (pfn < iova->pfn_lo)
298 node = node->rb_left;
299 else if (pfn > iova->pfn_lo)
300 node = node->rb_right;
301 }
302
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700303 return NULL;
304}
Omer Peleg9257b4a2016-04-20 11:34:11 +0300305
306static void private_free_iova(struct iova_domain *iovad, struct iova *iova)
307{
308 assert_spin_locked(&iovad->iova_rbtree_lock);
309 __cached_rbnode_delete_update(iovad, iova);
310 rb_erase(&iova->node, &iovad->rbroot);
311 free_iova_mem(iova);
312}
313
314/**
315 * find_iova - finds an iova for a given pfn
316 * @iovad: - iova domain in question.
317 * @pfn: - page frame number
318 * This function finds and returns an iova belonging to the
319 * given doamin which matches the given pfn.
320 */
321struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
322{
323 unsigned long flags;
324 struct iova *iova;
325
326 /* Take the lock so that no other thread is manipulating the rbtree */
327 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
328 iova = private_find_iova(iovad, pfn);
329 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
330 return iova;
331}
Sakari Ailus9b417602015-07-13 14:31:29 +0300332EXPORT_SYMBOL_GPL(find_iova);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700333
334/**
335 * __free_iova - frees the given iova
336 * @iovad: iova domain in question.
337 * @iova: iova in question.
338 * Frees the given iova belonging to the giving domain
339 */
340void
341__free_iova(struct iova_domain *iovad, struct iova *iova)
342{
343 unsigned long flags;
344
345 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
Omer Peleg9257b4a2016-04-20 11:34:11 +0300346 private_free_iova(iovad, iova);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700347 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700348}
Sakari Ailus9b417602015-07-13 14:31:29 +0300349EXPORT_SYMBOL_GPL(__free_iova);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700350
351/**
352 * free_iova - finds and frees the iova for a given pfn
353 * @iovad: - iova domain in question.
354 * @pfn: - pfn that is allocated previously
355 * This functions finds an iova for a given pfn and then
356 * frees the iova from that domain.
357 */
358void
359free_iova(struct iova_domain *iovad, unsigned long pfn)
360{
361 struct iova *iova = find_iova(iovad, pfn);
Robert Callicotte733cac22015-04-16 23:32:47 -0500362
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700363 if (iova)
364 __free_iova(iovad, iova);
365
366}
Sakari Ailus9b417602015-07-13 14:31:29 +0300367EXPORT_SYMBOL_GPL(free_iova);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700368
369/**
Omer Peleg9257b4a2016-04-20 11:34:11 +0300370 * alloc_iova_fast - allocates an iova from rcache
371 * @iovad: - iova domain in question
372 * @size: - size of page frames to allocate
373 * @limit_pfn: - max limit address
374 * This function tries to satisfy an iova allocation from the rcache,
375 * and falls back to regular allocation on failure.
376*/
377unsigned long
378alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
379 unsigned long limit_pfn)
380{
381 bool flushed_rcache = false;
382 unsigned long iova_pfn;
383 struct iova *new_iova;
384
385 iova_pfn = iova_rcache_get(iovad, size, limit_pfn);
386 if (iova_pfn)
387 return iova_pfn;
388
389retry:
390 new_iova = alloc_iova(iovad, size, limit_pfn, true);
391 if (!new_iova) {
392 unsigned int cpu;
393
394 if (flushed_rcache)
395 return 0;
396
397 /* Try replenishing IOVAs by flushing rcache. */
398 flushed_rcache = true;
399 for_each_online_cpu(cpu)
400 free_cpu_cached_iovas(cpu, iovad);
401 goto retry;
402 }
403
404 return new_iova->pfn_lo;
405}
406EXPORT_SYMBOL_GPL(alloc_iova_fast);
407
408/**
409 * free_iova_fast - free iova pfn range into rcache
410 * @iovad: - iova domain in question.
411 * @pfn: - pfn that is allocated previously
412 * @size: - # of pages in range
413 * This functions frees an iova range by trying to put it into the rcache,
414 * falling back to regular iova deallocation via free_iova() if this fails.
415 */
416void
417free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size)
418{
419 if (iova_rcache_insert(iovad, pfn, size))
420 return;
421
422 free_iova(iovad, pfn);
423}
424EXPORT_SYMBOL_GPL(free_iova_fast);
425
426/**
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700427 * put_iova_domain - destroys the iova doamin
428 * @iovad: - iova domain in question.
429 * All the iova's in that domain are destroyed.
430 */
431void put_iova_domain(struct iova_domain *iovad)
432{
433 struct rb_node *node;
434 unsigned long flags;
435
Omer Peleg9257b4a2016-04-20 11:34:11 +0300436 free_iova_rcaches(iovad);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700437 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
438 node = rb_first(&iovad->rbroot);
439 while (node) {
Geliang Tangeba484b2016-12-19 22:46:58 +0800440 struct iova *iova = rb_entry(node, struct iova, node);
Robert Callicotte733cac22015-04-16 23:32:47 -0500441
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700442 rb_erase(node, &iovad->rbroot);
443 free_iova_mem(iova);
444 node = rb_first(&iovad->rbroot);
445 }
446 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
447}
Sakari Ailus9b417602015-07-13 14:31:29 +0300448EXPORT_SYMBOL_GPL(put_iova_domain);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700449
450static int
451__is_range_overlap(struct rb_node *node,
452 unsigned long pfn_lo, unsigned long pfn_hi)
453{
Geliang Tangeba484b2016-12-19 22:46:58 +0800454 struct iova *iova = rb_entry(node, struct iova, node);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700455
456 if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo))
457 return 1;
458 return 0;
459}
460
Jiang Liu75f05562014-02-19 14:07:37 +0800461static inline struct iova *
462alloc_and_init_iova(unsigned long pfn_lo, unsigned long pfn_hi)
463{
464 struct iova *iova;
465
466 iova = alloc_iova_mem();
467 if (iova) {
468 iova->pfn_lo = pfn_lo;
469 iova->pfn_hi = pfn_hi;
470 }
471
472 return iova;
473}
474
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700475static struct iova *
476__insert_new_range(struct iova_domain *iovad,
477 unsigned long pfn_lo, unsigned long pfn_hi)
478{
479 struct iova *iova;
480
Jiang Liu75f05562014-02-19 14:07:37 +0800481 iova = alloc_and_init_iova(pfn_lo, pfn_hi);
482 if (iova)
Marek Szyprowskid7517512017-02-24 12:13:37 +0100483 iova_insert_rbtree(&iovad->rbroot, iova, NULL);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700484
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700485 return iova;
486}
487
488static void
489__adjust_overlap_range(struct iova *iova,
490 unsigned long *pfn_lo, unsigned long *pfn_hi)
491{
492 if (*pfn_lo < iova->pfn_lo)
493 iova->pfn_lo = *pfn_lo;
494 if (*pfn_hi > iova->pfn_hi)
495 *pfn_lo = iova->pfn_hi + 1;
496}
497
498/**
499 * reserve_iova - reserves an iova in the given range
500 * @iovad: - iova domain pointer
501 * @pfn_lo: - lower page frame address
502 * @pfn_hi:- higher pfn adderss
503 * This function allocates reserves the address range from pfn_lo to pfn_hi so
504 * that this address is not dished out as part of alloc_iova.
505 */
506struct iova *
507reserve_iova(struct iova_domain *iovad,
508 unsigned long pfn_lo, unsigned long pfn_hi)
509{
510 struct rb_node *node;
511 unsigned long flags;
512 struct iova *iova;
513 unsigned int overlap = 0;
514
David Woodhouse3d39cec2009-07-08 15:23:30 +0100515 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700516 for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) {
517 if (__is_range_overlap(node, pfn_lo, pfn_hi)) {
Geliang Tangeba484b2016-12-19 22:46:58 +0800518 iova = rb_entry(node, struct iova, node);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700519 __adjust_overlap_range(iova, &pfn_lo, &pfn_hi);
520 if ((pfn_lo >= iova->pfn_lo) &&
521 (pfn_hi <= iova->pfn_hi))
522 goto finish;
523 overlap = 1;
524
525 } else if (overlap)
526 break;
527 }
528
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300529 /* We are here either because this is the first reserver node
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700530 * or need to insert remaining non overlap addr range
531 */
532 iova = __insert_new_range(iovad, pfn_lo, pfn_hi);
533finish:
534
David Woodhouse3d39cec2009-07-08 15:23:30 +0100535 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700536 return iova;
537}
Sakari Ailus9b417602015-07-13 14:31:29 +0300538EXPORT_SYMBOL_GPL(reserve_iova);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700539
540/**
541 * copy_reserved_iova - copies the reserved between domains
542 * @from: - source doamin from where to copy
543 * @to: - destination domin where to copy
544 * This function copies reserved iova's from one doamin to
545 * other.
546 */
547void
548copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
549{
550 unsigned long flags;
551 struct rb_node *node;
552
David Woodhouse3d39cec2009-07-08 15:23:30 +0100553 spin_lock_irqsave(&from->iova_rbtree_lock, flags);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700554 for (node = rb_first(&from->rbroot); node; node = rb_next(node)) {
Geliang Tangeba484b2016-12-19 22:46:58 +0800555 struct iova *iova = rb_entry(node, struct iova, node);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700556 struct iova *new_iova;
Robert Callicotte733cac22015-04-16 23:32:47 -0500557
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700558 new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi);
559 if (!new_iova)
560 printk(KERN_ERR "Reserve iova range %lx@%lx failed\n",
561 iova->pfn_lo, iova->pfn_lo);
562 }
David Woodhouse3d39cec2009-07-08 15:23:30 +0100563 spin_unlock_irqrestore(&from->iova_rbtree_lock, flags);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700564}
Sakari Ailus9b417602015-07-13 14:31:29 +0300565EXPORT_SYMBOL_GPL(copy_reserved_iova);
Jiang Liu75f05562014-02-19 14:07:37 +0800566
567struct iova *
568split_and_remove_iova(struct iova_domain *iovad, struct iova *iova,
569 unsigned long pfn_lo, unsigned long pfn_hi)
570{
571 unsigned long flags;
572 struct iova *prev = NULL, *next = NULL;
573
574 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
575 if (iova->pfn_lo < pfn_lo) {
576 prev = alloc_and_init_iova(iova->pfn_lo, pfn_lo - 1);
577 if (prev == NULL)
578 goto error;
579 }
580 if (iova->pfn_hi > pfn_hi) {
581 next = alloc_and_init_iova(pfn_hi + 1, iova->pfn_hi);
582 if (next == NULL)
583 goto error;
584 }
585
586 __cached_rbnode_delete_update(iovad, iova);
587 rb_erase(&iova->node, &iovad->rbroot);
588
589 if (prev) {
Marek Szyprowskid7517512017-02-24 12:13:37 +0100590 iova_insert_rbtree(&iovad->rbroot, prev, NULL);
Jiang Liu75f05562014-02-19 14:07:37 +0800591 iova->pfn_lo = pfn_lo;
592 }
593 if (next) {
Marek Szyprowskid7517512017-02-24 12:13:37 +0100594 iova_insert_rbtree(&iovad->rbroot, next, NULL);
Jiang Liu75f05562014-02-19 14:07:37 +0800595 iova->pfn_hi = pfn_hi;
596 }
597 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
598
599 return iova;
600
601error:
602 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
603 if (prev)
604 free_iova_mem(prev);
605 return NULL;
606}
Sakari Ailus15bbdec2015-07-13 14:31:30 +0300607
Omer Peleg9257b4a2016-04-20 11:34:11 +0300608/*
609 * Magazine caches for IOVA ranges. For an introduction to magazines,
610 * see the USENIX 2001 paper "Magazines and Vmem: Extending the Slab
611 * Allocator to Many CPUs and Arbitrary Resources" by Bonwick and Adams.
612 * For simplicity, we use a static magazine size and don't implement the
613 * dynamic size tuning described in the paper.
614 */
615
616#define IOVA_MAG_SIZE 128
617
618struct iova_magazine {
619 unsigned long size;
620 unsigned long pfns[IOVA_MAG_SIZE];
621};
622
623struct iova_cpu_rcache {
624 spinlock_t lock;
625 struct iova_magazine *loaded;
626 struct iova_magazine *prev;
627};
628
629static struct iova_magazine *iova_magazine_alloc(gfp_t flags)
630{
631 return kzalloc(sizeof(struct iova_magazine), flags);
632}
633
634static void iova_magazine_free(struct iova_magazine *mag)
635{
636 kfree(mag);
637}
638
639static void
640iova_magazine_free_pfns(struct iova_magazine *mag, struct iova_domain *iovad)
641{
642 unsigned long flags;
643 int i;
644
645 if (!mag)
646 return;
647
648 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
649
650 for (i = 0 ; i < mag->size; ++i) {
651 struct iova *iova = private_find_iova(iovad, mag->pfns[i]);
652
653 BUG_ON(!iova);
654 private_free_iova(iovad, iova);
655 }
656
657 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
658
659 mag->size = 0;
660}
661
662static bool iova_magazine_full(struct iova_magazine *mag)
663{
664 return (mag && mag->size == IOVA_MAG_SIZE);
665}
666
667static bool iova_magazine_empty(struct iova_magazine *mag)
668{
669 return (!mag || mag->size == 0);
670}
671
672static unsigned long iova_magazine_pop(struct iova_magazine *mag,
673 unsigned long limit_pfn)
674{
675 BUG_ON(iova_magazine_empty(mag));
676
677 if (mag->pfns[mag->size - 1] >= limit_pfn)
678 return 0;
679
680 return mag->pfns[--mag->size];
681}
682
683static void iova_magazine_push(struct iova_magazine *mag, unsigned long pfn)
684{
685 BUG_ON(iova_magazine_full(mag));
686
687 mag->pfns[mag->size++] = pfn;
688}
689
690static void init_iova_rcaches(struct iova_domain *iovad)
691{
692 struct iova_cpu_rcache *cpu_rcache;
693 struct iova_rcache *rcache;
694 unsigned int cpu;
695 int i;
696
697 for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
698 rcache = &iovad->rcaches[i];
699 spin_lock_init(&rcache->lock);
700 rcache->depot_size = 0;
701 rcache->cpu_rcaches = __alloc_percpu(sizeof(*cpu_rcache), cache_line_size());
702 if (WARN_ON(!rcache->cpu_rcaches))
703 continue;
704 for_each_possible_cpu(cpu) {
705 cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
706 spin_lock_init(&cpu_rcache->lock);
707 cpu_rcache->loaded = iova_magazine_alloc(GFP_KERNEL);
708 cpu_rcache->prev = iova_magazine_alloc(GFP_KERNEL);
709 }
710 }
711}
712
713/*
714 * Try inserting IOVA range starting with 'iova_pfn' into 'rcache', and
715 * return true on success. Can fail if rcache is full and we can't free
716 * space, and free_iova() (our only caller) will then return the IOVA
717 * range to the rbtree instead.
718 */
719static bool __iova_rcache_insert(struct iova_domain *iovad,
720 struct iova_rcache *rcache,
721 unsigned long iova_pfn)
722{
723 struct iova_magazine *mag_to_free = NULL;
724 struct iova_cpu_rcache *cpu_rcache;
725 bool can_insert = false;
726 unsigned long flags;
727
Sebastian Andrzej Siewioraaffaa82017-06-27 18:16:47 +0200728 cpu_rcache = raw_cpu_ptr(rcache->cpu_rcaches);
Omer Peleg9257b4a2016-04-20 11:34:11 +0300729 spin_lock_irqsave(&cpu_rcache->lock, flags);
730
731 if (!iova_magazine_full(cpu_rcache->loaded)) {
732 can_insert = true;
733 } else if (!iova_magazine_full(cpu_rcache->prev)) {
734 swap(cpu_rcache->prev, cpu_rcache->loaded);
735 can_insert = true;
736 } else {
737 struct iova_magazine *new_mag = iova_magazine_alloc(GFP_ATOMIC);
738
739 if (new_mag) {
740 spin_lock(&rcache->lock);
741 if (rcache->depot_size < MAX_GLOBAL_MAGS) {
742 rcache->depot[rcache->depot_size++] =
743 cpu_rcache->loaded;
744 } else {
745 mag_to_free = cpu_rcache->loaded;
746 }
747 spin_unlock(&rcache->lock);
748
749 cpu_rcache->loaded = new_mag;
750 can_insert = true;
751 }
752 }
753
754 if (can_insert)
755 iova_magazine_push(cpu_rcache->loaded, iova_pfn);
756
757 spin_unlock_irqrestore(&cpu_rcache->lock, flags);
758
759 if (mag_to_free) {
760 iova_magazine_free_pfns(mag_to_free, iovad);
761 iova_magazine_free(mag_to_free);
762 }
763
764 return can_insert;
765}
766
767static bool iova_rcache_insert(struct iova_domain *iovad, unsigned long pfn,
768 unsigned long size)
769{
770 unsigned int log_size = order_base_2(size);
771
772 if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE)
773 return false;
774
775 return __iova_rcache_insert(iovad, &iovad->rcaches[log_size], pfn);
776}
777
778/*
779 * Caller wants to allocate a new IOVA range from 'rcache'. If we can
780 * satisfy the request, return a matching non-NULL range and remove
781 * it from the 'rcache'.
782 */
783static unsigned long __iova_rcache_get(struct iova_rcache *rcache,
784 unsigned long limit_pfn)
785{
786 struct iova_cpu_rcache *cpu_rcache;
787 unsigned long iova_pfn = 0;
788 bool has_pfn = false;
789 unsigned long flags;
790
Sebastian Andrzej Siewioraaffaa82017-06-27 18:16:47 +0200791 cpu_rcache = raw_cpu_ptr(rcache->cpu_rcaches);
Omer Peleg9257b4a2016-04-20 11:34:11 +0300792 spin_lock_irqsave(&cpu_rcache->lock, flags);
793
794 if (!iova_magazine_empty(cpu_rcache->loaded)) {
795 has_pfn = true;
796 } else if (!iova_magazine_empty(cpu_rcache->prev)) {
797 swap(cpu_rcache->prev, cpu_rcache->loaded);
798 has_pfn = true;
799 } else {
800 spin_lock(&rcache->lock);
801 if (rcache->depot_size > 0) {
802 iova_magazine_free(cpu_rcache->loaded);
803 cpu_rcache->loaded = rcache->depot[--rcache->depot_size];
804 has_pfn = true;
805 }
806 spin_unlock(&rcache->lock);
807 }
808
809 if (has_pfn)
810 iova_pfn = iova_magazine_pop(cpu_rcache->loaded, limit_pfn);
811
812 spin_unlock_irqrestore(&cpu_rcache->lock, flags);
813
814 return iova_pfn;
815}
816
817/*
818 * Try to satisfy IOVA allocation range from rcache. Fail if requested
819 * size is too big or the DMA limit we are given isn't satisfied by the
820 * top element in the magazine.
821 */
822static unsigned long iova_rcache_get(struct iova_domain *iovad,
823 unsigned long size,
824 unsigned long limit_pfn)
825{
826 unsigned int log_size = order_base_2(size);
827
828 if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE)
829 return 0;
830
831 return __iova_rcache_get(&iovad->rcaches[log_size], limit_pfn);
832}
833
834/*
835 * Free a cpu's rcache.
836 */
837static void free_cpu_iova_rcache(unsigned int cpu, struct iova_domain *iovad,
838 struct iova_rcache *rcache)
839{
840 struct iova_cpu_rcache *cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
841 unsigned long flags;
842
843 spin_lock_irqsave(&cpu_rcache->lock, flags);
844
845 iova_magazine_free_pfns(cpu_rcache->loaded, iovad);
846 iova_magazine_free(cpu_rcache->loaded);
847
848 iova_magazine_free_pfns(cpu_rcache->prev, iovad);
849 iova_magazine_free(cpu_rcache->prev);
850
851 spin_unlock_irqrestore(&cpu_rcache->lock, flags);
852}
853
854/*
855 * free rcache data structures.
856 */
857static void free_iova_rcaches(struct iova_domain *iovad)
858{
859 struct iova_rcache *rcache;
860 unsigned long flags;
861 unsigned int cpu;
862 int i, j;
863
864 for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
865 rcache = &iovad->rcaches[i];
866 for_each_possible_cpu(cpu)
867 free_cpu_iova_rcache(cpu, iovad, rcache);
868 spin_lock_irqsave(&rcache->lock, flags);
869 free_percpu(rcache->cpu_rcaches);
870 for (j = 0; j < rcache->depot_size; ++j) {
871 iova_magazine_free_pfns(rcache->depot[j], iovad);
872 iova_magazine_free(rcache->depot[j]);
873 }
874 spin_unlock_irqrestore(&rcache->lock, flags);
875 }
876}
877
878/*
879 * free all the IOVA ranges cached by a cpu (used when cpu is unplugged)
880 */
881void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad)
882{
883 struct iova_cpu_rcache *cpu_rcache;
884 struct iova_rcache *rcache;
885 unsigned long flags;
886 int i;
887
888 for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
889 rcache = &iovad->rcaches[i];
890 cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
891 spin_lock_irqsave(&cpu_rcache->lock, flags);
892 iova_magazine_free_pfns(cpu_rcache->loaded, iovad);
893 iova_magazine_free_pfns(cpu_rcache->prev, iovad);
894 spin_unlock_irqrestore(&cpu_rcache->lock, flags);
895 }
896}
897
Sakari Ailus15bbdec2015-07-13 14:31:30 +0300898MODULE_AUTHOR("Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>");
899MODULE_LICENSE("GPL");