blob: f0ff0aa040819a906e7f909cdf06c989bbf1a238 [file] [log] [blame]
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -07001/*
David Woodhousea15a5192009-07-01 18:49:06 +01002 * Copyright © 2006-2009, Intel Corporation.
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -07003 *
David Woodhousea15a5192009-07-01 18:49:06 +01004 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -07007 *
David Woodhousea15a5192009-07-01 18:49:06 +01008 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
mark gross98bcef52008-02-23 15:23:35 -080017 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -070018 */
19
Kay, Allen M38717942008-09-09 18:37:29 +030020#include <linux/iova.h>
Sakari Ailus15bbdec2015-07-13 14:31:30 +030021#include <linux/module.h>
Robin Murphy85b45452015-01-12 17:51:14 +000022#include <linux/slab.h>
Omer Peleg9257b4a2016-04-20 11:34:11 +030023#include <linux/smp.h>
24#include <linux/bitops.h>
Sebastian Andrzej Siewioraaffaa82017-06-27 18:16:47 +020025#include <linux/cpu.h>
Omer Peleg9257b4a2016-04-20 11:34:11 +030026
27static bool iova_rcache_insert(struct iova_domain *iovad,
28 unsigned long pfn,
29 unsigned long size);
30static unsigned long iova_rcache_get(struct iova_domain *iovad,
31 unsigned long size,
32 unsigned long limit_pfn);
33static void init_iova_rcaches(struct iova_domain *iovad);
34static void free_iova_rcaches(struct iova_domain *iovad);
Robin Murphy85b45452015-01-12 17:51:14 +000035
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -070036void
Robin Murphy0fb5fe82015-01-12 17:51:16 +000037init_iova_domain(struct iova_domain *iovad, unsigned long granule,
38 unsigned long start_pfn, unsigned long pfn_32bit)
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -070039{
Robin Murphy0fb5fe82015-01-12 17:51:16 +000040 /*
41 * IOVA granularity will normally be equal to the smallest
42 * supported IOMMU page size; both *must* be capable of
43 * representing individual CPU pages exactly.
44 */
45 BUG_ON((granule > PAGE_SIZE) || !is_power_of_2(granule));
46
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -070047 spin_lock_init(&iovad->iova_rbtree_lock);
48 iovad->rbroot = RB_ROOT;
49 iovad->cached32_node = NULL;
Robin Murphy0fb5fe82015-01-12 17:51:16 +000050 iovad->granule = granule;
Robin Murphy1b722502015-01-12 17:51:15 +000051 iovad->start_pfn = start_pfn;
David Millerf6611972008-02-06 01:36:23 -080052 iovad->dma_32bit_pfn = pfn_32bit;
Omer Peleg9257b4a2016-04-20 11:34:11 +030053 init_iova_rcaches(iovad);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -070054}
Sakari Ailus9b417602015-07-13 14:31:29 +030055EXPORT_SYMBOL_GPL(init_iova_domain);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -070056
57static struct rb_node *
58__get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn)
59{
Robin Murphy62280cf2016-11-11 18:35:46 +000060 if ((*limit_pfn > iovad->dma_32bit_pfn) ||
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -070061 (iovad->cached32_node == NULL))
62 return rb_last(&iovad->rbroot);
63 else {
64 struct rb_node *prev_node = rb_prev(iovad->cached32_node);
65 struct iova *curr_iova =
Geliang Tangeba484b2016-12-19 22:46:58 +080066 rb_entry(iovad->cached32_node, struct iova, node);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -070067 *limit_pfn = curr_iova->pfn_lo - 1;
68 return prev_node;
69 }
70}
71
72static void
73__cached_rbnode_insert_update(struct iova_domain *iovad,
74 unsigned long limit_pfn, struct iova *new)
75{
David Millerf6611972008-02-06 01:36:23 -080076 if (limit_pfn != iovad->dma_32bit_pfn)
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -070077 return;
78 iovad->cached32_node = &new->node;
79}
80
81static void
82__cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
83{
84 struct iova *cached_iova;
85 struct rb_node *curr;
86
87 if (!iovad->cached32_node)
88 return;
89 curr = iovad->cached32_node;
Geliang Tangeba484b2016-12-19 22:46:58 +080090 cached_iova = rb_entry(curr, struct iova, node);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -070091
Chris Wright1c9fc3d2011-05-28 13:15:04 -050092 if (free->pfn_lo >= cached_iova->pfn_lo) {
93 struct rb_node *node = rb_next(&free->node);
Geliang Tangeba484b2016-12-19 22:46:58 +080094 struct iova *iova = rb_entry(node, struct iova, node);
Chris Wright1c9fc3d2011-05-28 13:15:04 -050095
96 /* only cache if it's below 32bit pfn */
97 if (node && iova->pfn_lo < iovad->dma_32bit_pfn)
98 iovad->cached32_node = node;
99 else
100 iovad->cached32_node = NULL;
101 }
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700102}
103
Marek Szyprowskid7517512017-02-24 12:13:37 +0100104/* Insert the iova into domain rbtree by holding writer lock */
105static void
106iova_insert_rbtree(struct rb_root *root, struct iova *iova,
107 struct rb_node *start)
108{
109 struct rb_node **new, *parent = NULL;
110
111 new = (start) ? &start : &(root->rb_node);
112 /* Figure out where to put new node */
113 while (*new) {
114 struct iova *this = rb_entry(*new, struct iova, node);
115
116 parent = *new;
117
118 if (iova->pfn_lo < this->pfn_lo)
119 new = &((*new)->rb_left);
120 else if (iova->pfn_lo > this->pfn_lo)
121 new = &((*new)->rb_right);
122 else {
123 WARN_ON(1); /* this should not happen */
124 return;
125 }
126 }
127 /* Add new node and rebalance tree. */
128 rb_link_node(&iova->node, parent, new);
129 rb_insert_color(&iova->node, root);
130}
131
Robin Murphy8f6429c2015-07-16 19:40:12 +0100132/*
133 * Computes the padding size required, to make the start address
134 * naturally aligned on the power-of-two order of its size
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -0700135 */
Robin Murphy8f6429c2015-07-16 19:40:12 +0100136static unsigned int
137iova_get_pad_size(unsigned int size, unsigned int limit_pfn)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -0700138{
Robin Murphy8f6429c2015-07-16 19:40:12 +0100139 return (limit_pfn + 1 - size) & (__roundup_pow_of_two(size) - 1);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -0700140}
141
mark grossddf02882008-03-04 15:22:04 -0800142static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
143 unsigned long size, unsigned long limit_pfn,
144 struct iova *new, bool size_aligned)
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700145{
mark grossddf02882008-03-04 15:22:04 -0800146 struct rb_node *prev, *curr = NULL;
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700147 unsigned long flags;
148 unsigned long saved_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -0700149 unsigned int pad_size = 0;
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700150
151 /* Walk the tree backwards */
152 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
153 saved_pfn = limit_pfn;
154 curr = __get_cached_rbnode(iovad, &limit_pfn);
mark grossddf02882008-03-04 15:22:04 -0800155 prev = curr;
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700156 while (curr) {
Geliang Tangeba484b2016-12-19 22:46:58 +0800157 struct iova *curr_iova = rb_entry(curr, struct iova, node);
mark grossddf02882008-03-04 15:22:04 -0800158
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700159 if (limit_pfn < curr_iova->pfn_lo)
160 goto move_left;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -0700161 else if (limit_pfn < curr_iova->pfn_hi)
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700162 goto adjust_limit_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -0700163 else {
164 if (size_aligned)
165 pad_size = iova_get_pad_size(size, limit_pfn);
166 if ((curr_iova->pfn_hi + size + pad_size) <= limit_pfn)
167 break; /* found a free slot */
168 }
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700169adjust_limit_pfn:
Nate Watterson5016bdb2017-04-07 01:36:20 -0400170 limit_pfn = curr_iova->pfn_lo ? (curr_iova->pfn_lo - 1) : 0;
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700171move_left:
mark grossddf02882008-03-04 15:22:04 -0800172 prev = curr;
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700173 curr = rb_prev(curr);
174 }
175
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -0700176 if (!curr) {
177 if (size_aligned)
178 pad_size = iova_get_pad_size(size, limit_pfn);
Robin Murphy1b722502015-01-12 17:51:15 +0000179 if ((iovad->start_pfn + size + pad_size) > limit_pfn) {
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -0700180 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
181 return -ENOMEM;
182 }
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700183 }
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -0700184
185 /* pfn_lo will point to size aligned address if size_aligned is set */
186 new->pfn_lo = limit_pfn - (size + pad_size) + 1;
187 new->pfn_hi = new->pfn_lo + size - 1;
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700188
Marek Szyprowskid7517512017-02-24 12:13:37 +0100189 /* If we have 'prev', it's a valid place to start the insertion. */
190 iova_insert_rbtree(&iovad->rbroot, new, prev);
mark grossddf02882008-03-04 15:22:04 -0800191 __cached_rbnode_insert_update(iovad, saved_pfn, new);
192
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700193 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
mark grossddf02882008-03-04 15:22:04 -0800194
195
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700196 return 0;
197}
198
Sakari Ailusae1ff3d2015-07-13 14:31:28 +0300199static struct kmem_cache *iova_cache;
200static unsigned int iova_cache_users;
201static DEFINE_MUTEX(iova_cache_mutex);
202
203struct iova *alloc_iova_mem(void)
204{
205 return kmem_cache_alloc(iova_cache, GFP_ATOMIC);
206}
207EXPORT_SYMBOL(alloc_iova_mem);
208
209void free_iova_mem(struct iova *iova)
210{
211 kmem_cache_free(iova_cache, iova);
212}
213EXPORT_SYMBOL(free_iova_mem);
214
215int iova_cache_get(void)
216{
217 mutex_lock(&iova_cache_mutex);
218 if (!iova_cache_users) {
219 iova_cache = kmem_cache_create(
220 "iommu_iova", sizeof(struct iova), 0,
221 SLAB_HWCACHE_ALIGN, NULL);
222 if (!iova_cache) {
223 mutex_unlock(&iova_cache_mutex);
224 printk(KERN_ERR "Couldn't create iova cache\n");
225 return -ENOMEM;
226 }
227 }
228
229 iova_cache_users++;
230 mutex_unlock(&iova_cache_mutex);
231
232 return 0;
233}
Sakari Ailus9b417602015-07-13 14:31:29 +0300234EXPORT_SYMBOL_GPL(iova_cache_get);
Sakari Ailusae1ff3d2015-07-13 14:31:28 +0300235
236void iova_cache_put(void)
237{
238 mutex_lock(&iova_cache_mutex);
239 if (WARN_ON(!iova_cache_users)) {
240 mutex_unlock(&iova_cache_mutex);
241 return;
242 }
243 iova_cache_users--;
244 if (!iova_cache_users)
245 kmem_cache_destroy(iova_cache);
246 mutex_unlock(&iova_cache_mutex);
247}
Sakari Ailus9b417602015-07-13 14:31:29 +0300248EXPORT_SYMBOL_GPL(iova_cache_put);
Sakari Ailusae1ff3d2015-07-13 14:31:28 +0300249
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700250/**
251 * alloc_iova - allocates an iova
Masanari Iida07db0402012-07-22 02:21:32 +0900252 * @iovad: - iova domain in question
253 * @size: - size of page frames to allocate
254 * @limit_pfn: - max limit address
255 * @size_aligned: - set if size_aligned address range is required
Robin Murphy1b722502015-01-12 17:51:15 +0000256 * This function allocates an iova in the range iovad->start_pfn to limit_pfn,
257 * searching top-down from limit_pfn to iovad->start_pfn. If the size_aligned
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -0700258 * flag is set then the allocated address iova->pfn_lo will be naturally
259 * aligned on roundup_power_of_two(size).
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700260 */
261struct iova *
262alloc_iova(struct iova_domain *iovad, unsigned long size,
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -0700263 unsigned long limit_pfn,
264 bool size_aligned)
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700265{
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700266 struct iova *new_iova;
267 int ret;
268
269 new_iova = alloc_iova_mem();
270 if (!new_iova)
271 return NULL;
272
mark grossddf02882008-03-04 15:22:04 -0800273 ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn,
274 new_iova, size_aligned);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700275
276 if (ret) {
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700277 free_iova_mem(new_iova);
278 return NULL;
279 }
280
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700281 return new_iova;
282}
Sakari Ailus9b417602015-07-13 14:31:29 +0300283EXPORT_SYMBOL_GPL(alloc_iova);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700284
Omer Peleg9257b4a2016-04-20 11:34:11 +0300285static struct iova *
286private_find_iova(struct iova_domain *iovad, unsigned long pfn)
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700287{
Omer Peleg9257b4a2016-04-20 11:34:11 +0300288 struct rb_node *node = iovad->rbroot.rb_node;
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700289
Omer Peleg9257b4a2016-04-20 11:34:11 +0300290 assert_spin_locked(&iovad->iova_rbtree_lock);
291
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700292 while (node) {
Geliang Tangeba484b2016-12-19 22:46:58 +0800293 struct iova *iova = rb_entry(node, struct iova, node);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700294
295 /* If pfn falls within iova's range, return iova */
296 if ((pfn >= iova->pfn_lo) && (pfn <= iova->pfn_hi)) {
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700297 return iova;
298 }
299
300 if (pfn < iova->pfn_lo)
301 node = node->rb_left;
302 else if (pfn > iova->pfn_lo)
303 node = node->rb_right;
304 }
305
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700306 return NULL;
307}
Omer Peleg9257b4a2016-04-20 11:34:11 +0300308
309static void private_free_iova(struct iova_domain *iovad, struct iova *iova)
310{
311 assert_spin_locked(&iovad->iova_rbtree_lock);
312 __cached_rbnode_delete_update(iovad, iova);
313 rb_erase(&iova->node, &iovad->rbroot);
314 free_iova_mem(iova);
315}
316
317/**
318 * find_iova - finds an iova for a given pfn
319 * @iovad: - iova domain in question.
320 * @pfn: - page frame number
321 * This function finds and returns an iova belonging to the
322 * given doamin which matches the given pfn.
323 */
324struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
325{
326 unsigned long flags;
327 struct iova *iova;
328
329 /* Take the lock so that no other thread is manipulating the rbtree */
330 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
331 iova = private_find_iova(iovad, pfn);
332 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
333 return iova;
334}
Sakari Ailus9b417602015-07-13 14:31:29 +0300335EXPORT_SYMBOL_GPL(find_iova);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700336
337/**
338 * __free_iova - frees the given iova
339 * @iovad: iova domain in question.
340 * @iova: iova in question.
341 * Frees the given iova belonging to the giving domain
342 */
343void
344__free_iova(struct iova_domain *iovad, struct iova *iova)
345{
346 unsigned long flags;
347
348 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
Omer Peleg9257b4a2016-04-20 11:34:11 +0300349 private_free_iova(iovad, iova);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700350 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700351}
Sakari Ailus9b417602015-07-13 14:31:29 +0300352EXPORT_SYMBOL_GPL(__free_iova);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700353
354/**
355 * free_iova - finds and frees the iova for a given pfn
356 * @iovad: - iova domain in question.
357 * @pfn: - pfn that is allocated previously
358 * This functions finds an iova for a given pfn and then
359 * frees the iova from that domain.
360 */
361void
362free_iova(struct iova_domain *iovad, unsigned long pfn)
363{
364 struct iova *iova = find_iova(iovad, pfn);
Robert Callicotte733cac22015-04-16 23:32:47 -0500365
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700366 if (iova)
367 __free_iova(iovad, iova);
368
369}
Sakari Ailus9b417602015-07-13 14:31:29 +0300370EXPORT_SYMBOL_GPL(free_iova);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700371
372/**
Omer Peleg9257b4a2016-04-20 11:34:11 +0300373 * alloc_iova_fast - allocates an iova from rcache
374 * @iovad: - iova domain in question
375 * @size: - size of page frames to allocate
376 * @limit_pfn: - max limit address
377 * This function tries to satisfy an iova allocation from the rcache,
378 * and falls back to regular allocation on failure.
379*/
380unsigned long
381alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
382 unsigned long limit_pfn)
383{
384 bool flushed_rcache = false;
385 unsigned long iova_pfn;
386 struct iova *new_iova;
387
388 iova_pfn = iova_rcache_get(iovad, size, limit_pfn);
389 if (iova_pfn)
390 return iova_pfn;
391
392retry:
393 new_iova = alloc_iova(iovad, size, limit_pfn, true);
394 if (!new_iova) {
395 unsigned int cpu;
396
397 if (flushed_rcache)
398 return 0;
399
400 /* Try replenishing IOVAs by flushing rcache. */
401 flushed_rcache = true;
402 for_each_online_cpu(cpu)
403 free_cpu_cached_iovas(cpu, iovad);
404 goto retry;
405 }
406
407 return new_iova->pfn_lo;
408}
409EXPORT_SYMBOL_GPL(alloc_iova_fast);
410
411/**
412 * free_iova_fast - free iova pfn range into rcache
413 * @iovad: - iova domain in question.
414 * @pfn: - pfn that is allocated previously
415 * @size: - # of pages in range
416 * This functions frees an iova range by trying to put it into the rcache,
417 * falling back to regular iova deallocation via free_iova() if this fails.
418 */
419void
420free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size)
421{
422 if (iova_rcache_insert(iovad, pfn, size))
423 return;
424
425 free_iova(iovad, pfn);
426}
427EXPORT_SYMBOL_GPL(free_iova_fast);
428
429/**
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700430 * put_iova_domain - destroys the iova doamin
431 * @iovad: - iova domain in question.
432 * All the iova's in that domain are destroyed.
433 */
434void put_iova_domain(struct iova_domain *iovad)
435{
436 struct rb_node *node;
437 unsigned long flags;
438
Omer Peleg9257b4a2016-04-20 11:34:11 +0300439 free_iova_rcaches(iovad);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700440 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
441 node = rb_first(&iovad->rbroot);
442 while (node) {
Geliang Tangeba484b2016-12-19 22:46:58 +0800443 struct iova *iova = rb_entry(node, struct iova, node);
Robert Callicotte733cac22015-04-16 23:32:47 -0500444
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700445 rb_erase(node, &iovad->rbroot);
446 free_iova_mem(iova);
447 node = rb_first(&iovad->rbroot);
448 }
449 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
450}
Sakari Ailus9b417602015-07-13 14:31:29 +0300451EXPORT_SYMBOL_GPL(put_iova_domain);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700452
453static int
454__is_range_overlap(struct rb_node *node,
455 unsigned long pfn_lo, unsigned long pfn_hi)
456{
Geliang Tangeba484b2016-12-19 22:46:58 +0800457 struct iova *iova = rb_entry(node, struct iova, node);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700458
459 if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo))
460 return 1;
461 return 0;
462}
463
Jiang Liu75f05562014-02-19 14:07:37 +0800464static inline struct iova *
465alloc_and_init_iova(unsigned long pfn_lo, unsigned long pfn_hi)
466{
467 struct iova *iova;
468
469 iova = alloc_iova_mem();
470 if (iova) {
471 iova->pfn_lo = pfn_lo;
472 iova->pfn_hi = pfn_hi;
473 }
474
475 return iova;
476}
477
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700478static struct iova *
479__insert_new_range(struct iova_domain *iovad,
480 unsigned long pfn_lo, unsigned long pfn_hi)
481{
482 struct iova *iova;
483
Jiang Liu75f05562014-02-19 14:07:37 +0800484 iova = alloc_and_init_iova(pfn_lo, pfn_hi);
485 if (iova)
Marek Szyprowskid7517512017-02-24 12:13:37 +0100486 iova_insert_rbtree(&iovad->rbroot, iova, NULL);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700487
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700488 return iova;
489}
490
491static void
492__adjust_overlap_range(struct iova *iova,
493 unsigned long *pfn_lo, unsigned long *pfn_hi)
494{
495 if (*pfn_lo < iova->pfn_lo)
496 iova->pfn_lo = *pfn_lo;
497 if (*pfn_hi > iova->pfn_hi)
498 *pfn_lo = iova->pfn_hi + 1;
499}
500
501/**
502 * reserve_iova - reserves an iova in the given range
503 * @iovad: - iova domain pointer
504 * @pfn_lo: - lower page frame address
505 * @pfn_hi:- higher pfn adderss
506 * This function allocates reserves the address range from pfn_lo to pfn_hi so
507 * that this address is not dished out as part of alloc_iova.
508 */
509struct iova *
510reserve_iova(struct iova_domain *iovad,
511 unsigned long pfn_lo, unsigned long pfn_hi)
512{
513 struct rb_node *node;
514 unsigned long flags;
515 struct iova *iova;
516 unsigned int overlap = 0;
517
David Woodhouse3d39cec2009-07-08 15:23:30 +0100518 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700519 for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) {
520 if (__is_range_overlap(node, pfn_lo, pfn_hi)) {
Geliang Tangeba484b2016-12-19 22:46:58 +0800521 iova = rb_entry(node, struct iova, node);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700522 __adjust_overlap_range(iova, &pfn_lo, &pfn_hi);
523 if ((pfn_lo >= iova->pfn_lo) &&
524 (pfn_hi <= iova->pfn_hi))
525 goto finish;
526 overlap = 1;
527
528 } else if (overlap)
529 break;
530 }
531
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300532 /* We are here either because this is the first reserver node
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700533 * or need to insert remaining non overlap addr range
534 */
535 iova = __insert_new_range(iovad, pfn_lo, pfn_hi);
536finish:
537
David Woodhouse3d39cec2009-07-08 15:23:30 +0100538 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700539 return iova;
540}
Sakari Ailus9b417602015-07-13 14:31:29 +0300541EXPORT_SYMBOL_GPL(reserve_iova);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700542
543/**
544 * copy_reserved_iova - copies the reserved between domains
545 * @from: - source doamin from where to copy
546 * @to: - destination domin where to copy
547 * This function copies reserved iova's from one doamin to
548 * other.
549 */
550void
551copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
552{
553 unsigned long flags;
554 struct rb_node *node;
555
David Woodhouse3d39cec2009-07-08 15:23:30 +0100556 spin_lock_irqsave(&from->iova_rbtree_lock, flags);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700557 for (node = rb_first(&from->rbroot); node; node = rb_next(node)) {
Geliang Tangeba484b2016-12-19 22:46:58 +0800558 struct iova *iova = rb_entry(node, struct iova, node);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700559 struct iova *new_iova;
Robert Callicotte733cac22015-04-16 23:32:47 -0500560
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700561 new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi);
562 if (!new_iova)
563 printk(KERN_ERR "Reserve iova range %lx@%lx failed\n",
564 iova->pfn_lo, iova->pfn_lo);
565 }
David Woodhouse3d39cec2009-07-08 15:23:30 +0100566 spin_unlock_irqrestore(&from->iova_rbtree_lock, flags);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700567}
Sakari Ailus9b417602015-07-13 14:31:29 +0300568EXPORT_SYMBOL_GPL(copy_reserved_iova);
Jiang Liu75f05562014-02-19 14:07:37 +0800569
570struct iova *
571split_and_remove_iova(struct iova_domain *iovad, struct iova *iova,
572 unsigned long pfn_lo, unsigned long pfn_hi)
573{
574 unsigned long flags;
575 struct iova *prev = NULL, *next = NULL;
576
577 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
578 if (iova->pfn_lo < pfn_lo) {
579 prev = alloc_and_init_iova(iova->pfn_lo, pfn_lo - 1);
580 if (prev == NULL)
581 goto error;
582 }
583 if (iova->pfn_hi > pfn_hi) {
584 next = alloc_and_init_iova(pfn_hi + 1, iova->pfn_hi);
585 if (next == NULL)
586 goto error;
587 }
588
589 __cached_rbnode_delete_update(iovad, iova);
590 rb_erase(&iova->node, &iovad->rbroot);
591
592 if (prev) {
Marek Szyprowskid7517512017-02-24 12:13:37 +0100593 iova_insert_rbtree(&iovad->rbroot, prev, NULL);
Jiang Liu75f05562014-02-19 14:07:37 +0800594 iova->pfn_lo = pfn_lo;
595 }
596 if (next) {
Marek Szyprowskid7517512017-02-24 12:13:37 +0100597 iova_insert_rbtree(&iovad->rbroot, next, NULL);
Jiang Liu75f05562014-02-19 14:07:37 +0800598 iova->pfn_hi = pfn_hi;
599 }
600 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
601
602 return iova;
603
604error:
605 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
606 if (prev)
607 free_iova_mem(prev);
608 return NULL;
609}
Sakari Ailus15bbdec2015-07-13 14:31:30 +0300610
Omer Peleg9257b4a2016-04-20 11:34:11 +0300611/*
612 * Magazine caches for IOVA ranges. For an introduction to magazines,
613 * see the USENIX 2001 paper "Magazines and Vmem: Extending the Slab
614 * Allocator to Many CPUs and Arbitrary Resources" by Bonwick and Adams.
615 * For simplicity, we use a static magazine size and don't implement the
616 * dynamic size tuning described in the paper.
617 */
618
619#define IOVA_MAG_SIZE 128
620
621struct iova_magazine {
622 unsigned long size;
623 unsigned long pfns[IOVA_MAG_SIZE];
624};
625
626struct iova_cpu_rcache {
627 spinlock_t lock;
628 struct iova_magazine *loaded;
629 struct iova_magazine *prev;
630};
631
632static struct iova_magazine *iova_magazine_alloc(gfp_t flags)
633{
634 return kzalloc(sizeof(struct iova_magazine), flags);
635}
636
637static void iova_magazine_free(struct iova_magazine *mag)
638{
639 kfree(mag);
640}
641
642static void
643iova_magazine_free_pfns(struct iova_magazine *mag, struct iova_domain *iovad)
644{
645 unsigned long flags;
646 int i;
647
648 if (!mag)
649 return;
650
651 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
652
653 for (i = 0 ; i < mag->size; ++i) {
654 struct iova *iova = private_find_iova(iovad, mag->pfns[i]);
655
656 BUG_ON(!iova);
657 private_free_iova(iovad, iova);
658 }
659
660 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
661
662 mag->size = 0;
663}
664
665static bool iova_magazine_full(struct iova_magazine *mag)
666{
667 return (mag && mag->size == IOVA_MAG_SIZE);
668}
669
670static bool iova_magazine_empty(struct iova_magazine *mag)
671{
672 return (!mag || mag->size == 0);
673}
674
675static unsigned long iova_magazine_pop(struct iova_magazine *mag,
676 unsigned long limit_pfn)
677{
678 BUG_ON(iova_magazine_empty(mag));
679
680 if (mag->pfns[mag->size - 1] >= limit_pfn)
681 return 0;
682
683 return mag->pfns[--mag->size];
684}
685
686static void iova_magazine_push(struct iova_magazine *mag, unsigned long pfn)
687{
688 BUG_ON(iova_magazine_full(mag));
689
690 mag->pfns[mag->size++] = pfn;
691}
692
693static void init_iova_rcaches(struct iova_domain *iovad)
694{
695 struct iova_cpu_rcache *cpu_rcache;
696 struct iova_rcache *rcache;
697 unsigned int cpu;
698 int i;
699
700 for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
701 rcache = &iovad->rcaches[i];
702 spin_lock_init(&rcache->lock);
703 rcache->depot_size = 0;
704 rcache->cpu_rcaches = __alloc_percpu(sizeof(*cpu_rcache), cache_line_size());
705 if (WARN_ON(!rcache->cpu_rcaches))
706 continue;
707 for_each_possible_cpu(cpu) {
708 cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
709 spin_lock_init(&cpu_rcache->lock);
710 cpu_rcache->loaded = iova_magazine_alloc(GFP_KERNEL);
711 cpu_rcache->prev = iova_magazine_alloc(GFP_KERNEL);
712 }
713 }
714}
715
716/*
717 * Try inserting IOVA range starting with 'iova_pfn' into 'rcache', and
718 * return true on success. Can fail if rcache is full and we can't free
719 * space, and free_iova() (our only caller) will then return the IOVA
720 * range to the rbtree instead.
721 */
722static bool __iova_rcache_insert(struct iova_domain *iovad,
723 struct iova_rcache *rcache,
724 unsigned long iova_pfn)
725{
726 struct iova_magazine *mag_to_free = NULL;
727 struct iova_cpu_rcache *cpu_rcache;
728 bool can_insert = false;
729 unsigned long flags;
730
Sebastian Andrzej Siewioraaffaa82017-06-27 18:16:47 +0200731 cpu_rcache = raw_cpu_ptr(rcache->cpu_rcaches);
Omer Peleg9257b4a2016-04-20 11:34:11 +0300732 spin_lock_irqsave(&cpu_rcache->lock, flags);
733
734 if (!iova_magazine_full(cpu_rcache->loaded)) {
735 can_insert = true;
736 } else if (!iova_magazine_full(cpu_rcache->prev)) {
737 swap(cpu_rcache->prev, cpu_rcache->loaded);
738 can_insert = true;
739 } else {
740 struct iova_magazine *new_mag = iova_magazine_alloc(GFP_ATOMIC);
741
742 if (new_mag) {
743 spin_lock(&rcache->lock);
744 if (rcache->depot_size < MAX_GLOBAL_MAGS) {
745 rcache->depot[rcache->depot_size++] =
746 cpu_rcache->loaded;
747 } else {
748 mag_to_free = cpu_rcache->loaded;
749 }
750 spin_unlock(&rcache->lock);
751
752 cpu_rcache->loaded = new_mag;
753 can_insert = true;
754 }
755 }
756
757 if (can_insert)
758 iova_magazine_push(cpu_rcache->loaded, iova_pfn);
759
760 spin_unlock_irqrestore(&cpu_rcache->lock, flags);
761
762 if (mag_to_free) {
763 iova_magazine_free_pfns(mag_to_free, iovad);
764 iova_magazine_free(mag_to_free);
765 }
766
767 return can_insert;
768}
769
770static bool iova_rcache_insert(struct iova_domain *iovad, unsigned long pfn,
771 unsigned long size)
772{
773 unsigned int log_size = order_base_2(size);
774
775 if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE)
776 return false;
777
778 return __iova_rcache_insert(iovad, &iovad->rcaches[log_size], pfn);
779}
780
781/*
782 * Caller wants to allocate a new IOVA range from 'rcache'. If we can
783 * satisfy the request, return a matching non-NULL range and remove
784 * it from the 'rcache'.
785 */
786static unsigned long __iova_rcache_get(struct iova_rcache *rcache,
787 unsigned long limit_pfn)
788{
789 struct iova_cpu_rcache *cpu_rcache;
790 unsigned long iova_pfn = 0;
791 bool has_pfn = false;
792 unsigned long flags;
793
Sebastian Andrzej Siewioraaffaa82017-06-27 18:16:47 +0200794 cpu_rcache = raw_cpu_ptr(rcache->cpu_rcaches);
Omer Peleg9257b4a2016-04-20 11:34:11 +0300795 spin_lock_irqsave(&cpu_rcache->lock, flags);
796
797 if (!iova_magazine_empty(cpu_rcache->loaded)) {
798 has_pfn = true;
799 } else if (!iova_magazine_empty(cpu_rcache->prev)) {
800 swap(cpu_rcache->prev, cpu_rcache->loaded);
801 has_pfn = true;
802 } else {
803 spin_lock(&rcache->lock);
804 if (rcache->depot_size > 0) {
805 iova_magazine_free(cpu_rcache->loaded);
806 cpu_rcache->loaded = rcache->depot[--rcache->depot_size];
807 has_pfn = true;
808 }
809 spin_unlock(&rcache->lock);
810 }
811
812 if (has_pfn)
813 iova_pfn = iova_magazine_pop(cpu_rcache->loaded, limit_pfn);
814
815 spin_unlock_irqrestore(&cpu_rcache->lock, flags);
816
817 return iova_pfn;
818}
819
820/*
821 * Try to satisfy IOVA allocation range from rcache. Fail if requested
822 * size is too big or the DMA limit we are given isn't satisfied by the
823 * top element in the magazine.
824 */
825static unsigned long iova_rcache_get(struct iova_domain *iovad,
826 unsigned long size,
827 unsigned long limit_pfn)
828{
829 unsigned int log_size = order_base_2(size);
830
831 if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE)
832 return 0;
833
834 return __iova_rcache_get(&iovad->rcaches[log_size], limit_pfn);
835}
836
837/*
838 * Free a cpu's rcache.
839 */
840static void free_cpu_iova_rcache(unsigned int cpu, struct iova_domain *iovad,
841 struct iova_rcache *rcache)
842{
843 struct iova_cpu_rcache *cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
844 unsigned long flags;
845
846 spin_lock_irqsave(&cpu_rcache->lock, flags);
847
848 iova_magazine_free_pfns(cpu_rcache->loaded, iovad);
849 iova_magazine_free(cpu_rcache->loaded);
850
851 iova_magazine_free_pfns(cpu_rcache->prev, iovad);
852 iova_magazine_free(cpu_rcache->prev);
853
854 spin_unlock_irqrestore(&cpu_rcache->lock, flags);
855}
856
857/*
858 * free rcache data structures.
859 */
860static void free_iova_rcaches(struct iova_domain *iovad)
861{
862 struct iova_rcache *rcache;
863 unsigned long flags;
864 unsigned int cpu;
865 int i, j;
866
867 for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
868 rcache = &iovad->rcaches[i];
869 for_each_possible_cpu(cpu)
870 free_cpu_iova_rcache(cpu, iovad, rcache);
871 spin_lock_irqsave(&rcache->lock, flags);
872 free_percpu(rcache->cpu_rcaches);
873 for (j = 0; j < rcache->depot_size; ++j) {
874 iova_magazine_free_pfns(rcache->depot[j], iovad);
875 iova_magazine_free(rcache->depot[j]);
876 }
877 spin_unlock_irqrestore(&rcache->lock, flags);
878 }
879}
880
881/*
882 * free all the IOVA ranges cached by a cpu (used when cpu is unplugged)
883 */
884void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad)
885{
886 struct iova_cpu_rcache *cpu_rcache;
887 struct iova_rcache *rcache;
888 unsigned long flags;
889 int i;
890
891 for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
892 rcache = &iovad->rcaches[i];
893 cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
894 spin_lock_irqsave(&cpu_rcache->lock, flags);
895 iova_magazine_free_pfns(cpu_rcache->loaded, iovad);
896 iova_magazine_free_pfns(cpu_rcache->prev, iovad);
897 spin_unlock_irqrestore(&cpu_rcache->lock, flags);
898 }
899}
900
Sakari Ailus15bbdec2015-07-13 14:31:30 +0300901MODULE_AUTHOR("Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>");
902MODULE_LICENSE("GPL");