blob: c3a1a8e6f2c1da547ae4c6d61d59743d12e61ed9 [file] [log] [blame]
Thomas Gleixner3b20eb22019-05-29 16:57:35 -07001// SPDX-License-Identifier: GPL-2.0-only
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -07002/*
David Woodhousea15a5192009-07-01 18:49:06 +01003 * Copyright © 2006-2009, Intel Corporation.
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -07004 *
mark gross98bcef52008-02-23 15:23:35 -08005 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -07006 */
7
Kay, Allen M38717942008-09-09 18:37:29 +03008#include <linux/iova.h>
Sakari Ailus15bbdec2015-07-13 14:31:30 +03009#include <linux/module.h>
Robin Murphy85b45452015-01-12 17:51:14 +000010#include <linux/slab.h>
Omer Peleg9257b4a2016-04-20 11:34:11 +030011#include <linux/smp.h>
12#include <linux/bitops.h>
Sebastian Andrzej Siewioraaffaa82017-06-27 18:16:47 +020013#include <linux/cpu.h>
Omer Peleg9257b4a2016-04-20 11:34:11 +030014
Robin Murphybb68b2f2017-09-21 16:52:46 +010015/* The anchor node sits above the top of the usable address space */
16#define IOVA_ANCHOR ~0UL
17
Omer Peleg9257b4a2016-04-20 11:34:11 +030018static bool iova_rcache_insert(struct iova_domain *iovad,
19 unsigned long pfn,
20 unsigned long size);
21static unsigned long iova_rcache_get(struct iova_domain *iovad,
22 unsigned long size,
23 unsigned long limit_pfn);
24static void init_iova_rcaches(struct iova_domain *iovad);
25static void free_iova_rcaches(struct iova_domain *iovad);
Joerg Roedel19282102017-08-10 15:49:44 +020026static void fq_destroy_all_entries(struct iova_domain *iovad);
Kees Cooke99e88a2017-10-16 14:43:17 -070027static void fq_flush_timeout(struct timer_list *t);
Robin Murphy85b45452015-01-12 17:51:14 +000028
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -070029void
Robin Murphy0fb5fe82015-01-12 17:51:16 +000030init_iova_domain(struct iova_domain *iovad, unsigned long granule,
Zhen Leiaa3ac942017-09-21 16:52:45 +010031 unsigned long start_pfn)
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -070032{
Robin Murphy0fb5fe82015-01-12 17:51:16 +000033 /*
34 * IOVA granularity will normally be equal to the smallest
35 * supported IOMMU page size; both *must* be capable of
36 * representing individual CPU pages exactly.
37 */
38 BUG_ON((granule > PAGE_SIZE) || !is_power_of_2(granule));
39
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -070040 spin_lock_init(&iovad->iova_rbtree_lock);
41 iovad->rbroot = RB_ROOT;
Robin Murphy973f5fb2017-09-21 16:52:47 +010042 iovad->cached_node = &iovad->anchor.node;
43 iovad->cached32_node = &iovad->anchor.node;
Robin Murphy0fb5fe82015-01-12 17:51:16 +000044 iovad->granule = granule;
Robin Murphy1b722502015-01-12 17:51:15 +000045 iovad->start_pfn = start_pfn;
Zhen Leiaa3ac942017-09-21 16:52:45 +010046 iovad->dma_32bit_pfn = 1UL << (32 - iova_shift(iovad));
Ganapatrao Kulkarnibee60e92018-09-05 09:57:36 +053047 iovad->max32_alloc_size = iovad->dma_32bit_pfn;
Joerg Roedel42f87e72017-08-10 14:44:28 +020048 iovad->flush_cb = NULL;
49 iovad->fq = NULL;
Robin Murphybb68b2f2017-09-21 16:52:46 +010050 iovad->anchor.pfn_lo = iovad->anchor.pfn_hi = IOVA_ANCHOR;
51 rb_link_node(&iovad->anchor.node, NULL, &iovad->rbroot.rb_node);
52 rb_insert_color(&iovad->anchor.node, &iovad->rbroot);
Omer Peleg9257b4a2016-04-20 11:34:11 +030053 init_iova_rcaches(iovad);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -070054}
Sakari Ailus9b417602015-07-13 14:31:29 +030055EXPORT_SYMBOL_GPL(init_iova_domain);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -070056
Dmitry Safonoveffa4672019-07-16 22:38:05 +010057bool has_iova_flush_queue(struct iova_domain *iovad)
58{
59 return !!iovad->fq;
60}
61
Joerg Roedel42f87e72017-08-10 14:44:28 +020062static void free_iova_flush_queue(struct iova_domain *iovad)
63{
Dmitry Safonoveffa4672019-07-16 22:38:05 +010064 if (!has_iova_flush_queue(iovad))
Joerg Roedel42f87e72017-08-10 14:44:28 +020065 return;
66
Joerg Roedel9a005a82017-08-10 16:58:18 +020067 if (timer_pending(&iovad->fq_timer))
68 del_timer(&iovad->fq_timer);
69
Joerg Roedel19282102017-08-10 15:49:44 +020070 fq_destroy_all_entries(iovad);
Joerg Roedel9a005a82017-08-10 16:58:18 +020071
Joerg Roedel42f87e72017-08-10 14:44:28 +020072 free_percpu(iovad->fq);
73
74 iovad->fq = NULL;
75 iovad->flush_cb = NULL;
76 iovad->entry_dtor = NULL;
77}
78
79int init_iova_flush_queue(struct iova_domain *iovad,
80 iova_flush_cb flush_cb, iova_entry_dtor entry_dtor)
81{
Dmitry Safonoveffa4672019-07-16 22:38:05 +010082 struct iova_fq __percpu *queue;
Joerg Roedel42f87e72017-08-10 14:44:28 +020083 int cpu;
84
Joerg Roedelfb418da2017-08-10 16:14:59 +020085 atomic64_set(&iovad->fq_flush_start_cnt, 0);
86 atomic64_set(&iovad->fq_flush_finish_cnt, 0);
87
Dmitry Safonoveffa4672019-07-16 22:38:05 +010088 queue = alloc_percpu(struct iova_fq);
89 if (!queue)
Joerg Roedel42f87e72017-08-10 14:44:28 +020090 return -ENOMEM;
91
92 iovad->flush_cb = flush_cb;
93 iovad->entry_dtor = entry_dtor;
94
95 for_each_possible_cpu(cpu) {
96 struct iova_fq *fq;
97
Dmitry Safonoveffa4672019-07-16 22:38:05 +010098 fq = per_cpu_ptr(queue, cpu);
Joerg Roedel42f87e72017-08-10 14:44:28 +020099 fq->head = 0;
100 fq->tail = 0;
Joerg Roedel8109c2a2017-08-10 16:31:17 +0200101
102 spin_lock_init(&fq->lock);
Joerg Roedel42f87e72017-08-10 14:44:28 +0200103 }
104
Dmitry Safonoveffa4672019-07-16 22:38:05 +0100105 smp_wmb();
106
107 iovad->fq = queue;
108
Kees Cooke99e88a2017-10-16 14:43:17 -0700109 timer_setup(&iovad->fq_timer, fq_flush_timeout, 0);
Joerg Roedel9a005a82017-08-10 16:58:18 +0200110 atomic_set(&iovad->fq_timer_on, 0);
111
Joerg Roedel42f87e72017-08-10 14:44:28 +0200112 return 0;
113}
114EXPORT_SYMBOL_GPL(init_iova_flush_queue);
115
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700116static struct rb_node *
Robin Murphy973f5fb2017-09-21 16:52:47 +0100117__get_cached_rbnode(struct iova_domain *iovad, unsigned long limit_pfn)
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700118{
Robin Murphy973f5fb2017-09-21 16:52:47 +0100119 if (limit_pfn <= iovad->dma_32bit_pfn)
120 return iovad->cached32_node;
Robin Murphye60aa7b2017-09-21 16:52:44 +0100121
Robin Murphy973f5fb2017-09-21 16:52:47 +0100122 return iovad->cached_node;
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700123}
124
125static void
Robin Murphye60aa7b2017-09-21 16:52:44 +0100126__cached_rbnode_insert_update(struct iova_domain *iovad, struct iova *new)
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700127{
Robin Murphye60aa7b2017-09-21 16:52:44 +0100128 if (new->pfn_hi < iovad->dma_32bit_pfn)
129 iovad->cached32_node = &new->node;
130 else
131 iovad->cached_node = &new->node;
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700132}
133
134static void
135__cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
136{
137 struct iova *cached_iova;
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700138
Robin Murphye60aa7b2017-09-21 16:52:44 +0100139 cached_iova = rb_entry(iovad->cached32_node, struct iova, node);
Chris Wilson9eed17d2019-07-20 19:08:48 +0100140 if (free == cached_iova ||
141 (free->pfn_hi < iovad->dma_32bit_pfn &&
142 free->pfn_lo >= cached_iova->pfn_lo)) {
Robin Murphye60aa7b2017-09-21 16:52:44 +0100143 iovad->cached32_node = rb_next(&free->node);
Ganapatrao Kulkarnibee60e92018-09-05 09:57:36 +0530144 iovad->max32_alloc_size = iovad->dma_32bit_pfn;
145 }
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700146
Robin Murphye60aa7b2017-09-21 16:52:44 +0100147 cached_iova = rb_entry(iovad->cached_node, struct iova, node);
Robin Murphy973f5fb2017-09-21 16:52:47 +0100148 if (free->pfn_lo >= cached_iova->pfn_lo)
Robin Murphye60aa7b2017-09-21 16:52:44 +0100149 iovad->cached_node = rb_next(&free->node);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700150}
151
Marek Szyprowskid7517512017-02-24 12:13:37 +0100152/* Insert the iova into domain rbtree by holding writer lock */
153static void
154iova_insert_rbtree(struct rb_root *root, struct iova *iova,
155 struct rb_node *start)
156{
157 struct rb_node **new, *parent = NULL;
158
159 new = (start) ? &start : &(root->rb_node);
160 /* Figure out where to put new node */
161 while (*new) {
162 struct iova *this = rb_entry(*new, struct iova, node);
163
164 parent = *new;
165
166 if (iova->pfn_lo < this->pfn_lo)
167 new = &((*new)->rb_left);
168 else if (iova->pfn_lo > this->pfn_lo)
169 new = &((*new)->rb_right);
170 else {
171 WARN_ON(1); /* this should not happen */
172 return;
173 }
174 }
175 /* Add new node and rebalance tree. */
176 rb_link_node(&iova->node, parent, new);
177 rb_insert_color(&iova->node, root);
178}
179
mark grossddf02882008-03-04 15:22:04 -0800180static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
181 unsigned long size, unsigned long limit_pfn,
182 struct iova *new, bool size_aligned)
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700183{
Robin Murphy973f5fb2017-09-21 16:52:47 +0100184 struct rb_node *curr, *prev;
185 struct iova *curr_iova;
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700186 unsigned long flags;
Vijayanand Jitta4e89dce2020-09-30 13:14:23 +0530187 unsigned long new_pfn, retry_pfn;
Zhen Lei086c83a2017-09-21 16:52:43 +0100188 unsigned long align_mask = ~0UL;
Vijayanand Jitta4e89dce2020-09-30 13:14:23 +0530189 unsigned long high_pfn = limit_pfn, low_pfn = iovad->start_pfn;
Zhen Lei086c83a2017-09-21 16:52:43 +0100190
191 if (size_aligned)
192 align_mask <<= fls_long(size - 1);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700193
194 /* Walk the tree backwards */
195 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
Ganapatrao Kulkarnibee60e92018-09-05 09:57:36 +0530196 if (limit_pfn <= iovad->dma_32bit_pfn &&
197 size >= iovad->max32_alloc_size)
198 goto iova32_full;
199
Robin Murphy973f5fb2017-09-21 16:52:47 +0100200 curr = __get_cached_rbnode(iovad, limit_pfn);
201 curr_iova = rb_entry(curr, struct iova, node);
Vijayanand Jitta4e89dce2020-09-30 13:14:23 +0530202 retry_pfn = curr_iova->pfn_hi + 1;
203
204retry:
Robin Murphy973f5fb2017-09-21 16:52:47 +0100205 do {
Vijayanand Jitta4e89dce2020-09-30 13:14:23 +0530206 high_pfn = min(high_pfn, curr_iova->pfn_lo);
207 new_pfn = (high_pfn - size) & align_mask;
mark grossddf02882008-03-04 15:22:04 -0800208 prev = curr;
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700209 curr = rb_prev(curr);
Robin Murphy973f5fb2017-09-21 16:52:47 +0100210 curr_iova = rb_entry(curr, struct iova, node);
Vijayanand Jitta4e89dce2020-09-30 13:14:23 +0530211 } while (curr && new_pfn <= curr_iova->pfn_hi && new_pfn >= low_pfn);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700212
Vijayanand Jitta4e89dce2020-09-30 13:14:23 +0530213 if (high_pfn < size || new_pfn < low_pfn) {
214 if (low_pfn == iovad->start_pfn && retry_pfn < limit_pfn) {
215 high_pfn = limit_pfn;
216 low_pfn = retry_pfn;
217 curr = &iovad->anchor.node;
218 curr_iova = rb_entry(curr, struct iova, node);
219 goto retry;
220 }
Robert Richter80ef4462019-03-20 18:57:23 +0000221 iovad->max32_alloc_size = size;
Ganapatrao Kulkarnibee60e92018-09-05 09:57:36 +0530222 goto iova32_full;
Robert Richter80ef4462019-03-20 18:57:23 +0000223 }
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -0700224
225 /* pfn_lo will point to size aligned address if size_aligned is set */
Zhen Lei086c83a2017-09-21 16:52:43 +0100226 new->pfn_lo = new_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -0700227 new->pfn_hi = new->pfn_lo + size - 1;
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700228
Marek Szyprowskid7517512017-02-24 12:13:37 +0100229 /* If we have 'prev', it's a valid place to start the insertion. */
230 iova_insert_rbtree(&iovad->rbroot, new, prev);
Robin Murphye60aa7b2017-09-21 16:52:44 +0100231 __cached_rbnode_insert_update(iovad, new);
mark grossddf02882008-03-04 15:22:04 -0800232
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700233 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
234 return 0;
Ganapatrao Kulkarnibee60e92018-09-05 09:57:36 +0530235
236iova32_full:
Ganapatrao Kulkarnibee60e92018-09-05 09:57:36 +0530237 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
238 return -ENOMEM;
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700239}
240
Sakari Ailusae1ff3d2015-07-13 14:31:28 +0300241static struct kmem_cache *iova_cache;
242static unsigned int iova_cache_users;
243static DEFINE_MUTEX(iova_cache_mutex);
244
245struct iova *alloc_iova_mem(void)
246{
Qian Cai944c9172019-11-22 14:16:54 -0500247 return kmem_cache_zalloc(iova_cache, GFP_ATOMIC | __GFP_NOWARN);
Sakari Ailusae1ff3d2015-07-13 14:31:28 +0300248}
249EXPORT_SYMBOL(alloc_iova_mem);
250
251void free_iova_mem(struct iova *iova)
252{
Robin Murphybb68b2f2017-09-21 16:52:46 +0100253 if (iova->pfn_lo != IOVA_ANCHOR)
254 kmem_cache_free(iova_cache, iova);
Sakari Ailusae1ff3d2015-07-13 14:31:28 +0300255}
256EXPORT_SYMBOL(free_iova_mem);
257
258int iova_cache_get(void)
259{
260 mutex_lock(&iova_cache_mutex);
261 if (!iova_cache_users) {
262 iova_cache = kmem_cache_create(
263 "iommu_iova", sizeof(struct iova), 0,
264 SLAB_HWCACHE_ALIGN, NULL);
265 if (!iova_cache) {
266 mutex_unlock(&iova_cache_mutex);
Andy Shevchenko3a0ce122020-05-07 19:18:03 +0300267 pr_err("Couldn't create iova cache\n");
Sakari Ailusae1ff3d2015-07-13 14:31:28 +0300268 return -ENOMEM;
269 }
270 }
271
272 iova_cache_users++;
273 mutex_unlock(&iova_cache_mutex);
274
275 return 0;
276}
Sakari Ailus9b417602015-07-13 14:31:29 +0300277EXPORT_SYMBOL_GPL(iova_cache_get);
Sakari Ailusae1ff3d2015-07-13 14:31:28 +0300278
279void iova_cache_put(void)
280{
281 mutex_lock(&iova_cache_mutex);
282 if (WARN_ON(!iova_cache_users)) {
283 mutex_unlock(&iova_cache_mutex);
284 return;
285 }
286 iova_cache_users--;
287 if (!iova_cache_users)
288 kmem_cache_destroy(iova_cache);
289 mutex_unlock(&iova_cache_mutex);
290}
Sakari Ailus9b417602015-07-13 14:31:29 +0300291EXPORT_SYMBOL_GPL(iova_cache_put);
Sakari Ailusae1ff3d2015-07-13 14:31:28 +0300292
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700293/**
294 * alloc_iova - allocates an iova
Masanari Iida07db0402012-07-22 02:21:32 +0900295 * @iovad: - iova domain in question
296 * @size: - size of page frames to allocate
297 * @limit_pfn: - max limit address
298 * @size_aligned: - set if size_aligned address range is required
Robin Murphy1b722502015-01-12 17:51:15 +0000299 * This function allocates an iova in the range iovad->start_pfn to limit_pfn,
300 * searching top-down from limit_pfn to iovad->start_pfn. If the size_aligned
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -0700301 * flag is set then the allocated address iova->pfn_lo will be naturally
302 * aligned on roundup_power_of_two(size).
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700303 */
304struct iova *
305alloc_iova(struct iova_domain *iovad, unsigned long size,
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -0700306 unsigned long limit_pfn,
307 bool size_aligned)
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700308{
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700309 struct iova *new_iova;
310 int ret;
311
312 new_iova = alloc_iova_mem();
313 if (!new_iova)
314 return NULL;
315
Robin Murphy757c3702017-05-16 12:26:48 +0100316 ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn + 1,
mark grossddf02882008-03-04 15:22:04 -0800317 new_iova, size_aligned);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700318
319 if (ret) {
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700320 free_iova_mem(new_iova);
321 return NULL;
322 }
323
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700324 return new_iova;
325}
Sakari Ailus9b417602015-07-13 14:31:29 +0300326EXPORT_SYMBOL_GPL(alloc_iova);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700327
Omer Peleg9257b4a2016-04-20 11:34:11 +0300328static struct iova *
329private_find_iova(struct iova_domain *iovad, unsigned long pfn)
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700330{
Omer Peleg9257b4a2016-04-20 11:34:11 +0300331 struct rb_node *node = iovad->rbroot.rb_node;
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700332
Omer Peleg9257b4a2016-04-20 11:34:11 +0300333 assert_spin_locked(&iovad->iova_rbtree_lock);
334
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700335 while (node) {
Geliang Tangeba484b2016-12-19 22:46:58 +0800336 struct iova *iova = rb_entry(node, struct iova, node);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700337
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700338 if (pfn < iova->pfn_lo)
339 node = node->rb_left;
Zhen Lei2070f942017-09-21 16:52:42 +0100340 else if (pfn > iova->pfn_hi)
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700341 node = node->rb_right;
Zhen Lei2070f942017-09-21 16:52:42 +0100342 else
343 return iova; /* pfn falls within iova's range */
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700344 }
345
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700346 return NULL;
347}
Omer Peleg9257b4a2016-04-20 11:34:11 +0300348
349static void private_free_iova(struct iova_domain *iovad, struct iova *iova)
350{
351 assert_spin_locked(&iovad->iova_rbtree_lock);
352 __cached_rbnode_delete_update(iovad, iova);
353 rb_erase(&iova->node, &iovad->rbroot);
354 free_iova_mem(iova);
355}
356
357/**
358 * find_iova - finds an iova for a given pfn
359 * @iovad: - iova domain in question.
360 * @pfn: - page frame number
361 * This function finds and returns an iova belonging to the
362 * given doamin which matches the given pfn.
363 */
364struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
365{
366 unsigned long flags;
367 struct iova *iova;
368
369 /* Take the lock so that no other thread is manipulating the rbtree */
370 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
371 iova = private_find_iova(iovad, pfn);
372 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
373 return iova;
374}
Sakari Ailus9b417602015-07-13 14:31:29 +0300375EXPORT_SYMBOL_GPL(find_iova);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700376
377/**
378 * __free_iova - frees the given iova
379 * @iovad: iova domain in question.
380 * @iova: iova in question.
381 * Frees the given iova belonging to the giving domain
382 */
383void
384__free_iova(struct iova_domain *iovad, struct iova *iova)
385{
386 unsigned long flags;
387
388 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
Omer Peleg9257b4a2016-04-20 11:34:11 +0300389 private_free_iova(iovad, iova);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700390 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700391}
Sakari Ailus9b417602015-07-13 14:31:29 +0300392EXPORT_SYMBOL_GPL(__free_iova);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700393
394/**
395 * free_iova - finds and frees the iova for a given pfn
396 * @iovad: - iova domain in question.
397 * @pfn: - pfn that is allocated previously
398 * This functions finds an iova for a given pfn and then
399 * frees the iova from that domain.
400 */
401void
402free_iova(struct iova_domain *iovad, unsigned long pfn)
403{
404 struct iova *iova = find_iova(iovad, pfn);
Robert Callicotte733cac22015-04-16 23:32:47 -0500405
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700406 if (iova)
407 __free_iova(iovad, iova);
408
409}
Sakari Ailus9b417602015-07-13 14:31:29 +0300410EXPORT_SYMBOL_GPL(free_iova);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700411
412/**
Omer Peleg9257b4a2016-04-20 11:34:11 +0300413 * alloc_iova_fast - allocates an iova from rcache
414 * @iovad: - iova domain in question
415 * @size: - size of page frames to allocate
416 * @limit_pfn: - max limit address
Tomasz Nowicki538d5b32017-09-20 10:52:02 +0200417 * @flush_rcache: - set to flush rcache on regular allocation failure
Omer Peleg9257b4a2016-04-20 11:34:11 +0300418 * This function tries to satisfy an iova allocation from the rcache,
Tomasz Nowicki538d5b32017-09-20 10:52:02 +0200419 * and falls back to regular allocation on failure. If regular allocation
420 * fails too and the flush_rcache flag is set then the rcache will be flushed.
Omer Peleg9257b4a2016-04-20 11:34:11 +0300421*/
422unsigned long
423alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
Tomasz Nowicki538d5b32017-09-20 10:52:02 +0200424 unsigned long limit_pfn, bool flush_rcache)
Omer Peleg9257b4a2016-04-20 11:34:11 +0300425{
Omer Peleg9257b4a2016-04-20 11:34:11 +0300426 unsigned long iova_pfn;
427 struct iova *new_iova;
428
Robin Murphyb826ee92017-09-19 14:48:40 +0100429 iova_pfn = iova_rcache_get(iovad, size, limit_pfn + 1);
Omer Peleg9257b4a2016-04-20 11:34:11 +0300430 if (iova_pfn)
431 return iova_pfn;
432
433retry:
434 new_iova = alloc_iova(iovad, size, limit_pfn, true);
435 if (!new_iova) {
436 unsigned int cpu;
437
Tomasz Nowicki538d5b32017-09-20 10:52:02 +0200438 if (!flush_rcache)
Omer Peleg9257b4a2016-04-20 11:34:11 +0300439 return 0;
440
441 /* Try replenishing IOVAs by flushing rcache. */
Tomasz Nowicki538d5b32017-09-20 10:52:02 +0200442 flush_rcache = false;
Omer Peleg9257b4a2016-04-20 11:34:11 +0300443 for_each_online_cpu(cpu)
444 free_cpu_cached_iovas(cpu, iovad);
445 goto retry;
446 }
447
448 return new_iova->pfn_lo;
449}
450EXPORT_SYMBOL_GPL(alloc_iova_fast);
451
452/**
453 * free_iova_fast - free iova pfn range into rcache
454 * @iovad: - iova domain in question.
455 * @pfn: - pfn that is allocated previously
456 * @size: - # of pages in range
457 * This functions frees an iova range by trying to put it into the rcache,
458 * falling back to regular iova deallocation via free_iova() if this fails.
459 */
460void
461free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size)
462{
463 if (iova_rcache_insert(iovad, pfn, size))
464 return;
465
466 free_iova(iovad, pfn);
467}
468EXPORT_SYMBOL_GPL(free_iova_fast);
469
Joerg Roedel19282102017-08-10 15:49:44 +0200470#define fq_ring_for_each(i, fq) \
471 for ((i) = (fq)->head; (i) != (fq)->tail; (i) = ((i) + 1) % IOVA_FQ_SIZE)
472
473static inline bool fq_full(struct iova_fq *fq)
474{
Joerg Roedel8109c2a2017-08-10 16:31:17 +0200475 assert_spin_locked(&fq->lock);
Joerg Roedel19282102017-08-10 15:49:44 +0200476 return (((fq->tail + 1) % IOVA_FQ_SIZE) == fq->head);
477}
478
479static inline unsigned fq_ring_add(struct iova_fq *fq)
480{
481 unsigned idx = fq->tail;
482
Joerg Roedel8109c2a2017-08-10 16:31:17 +0200483 assert_spin_locked(&fq->lock);
484
Joerg Roedel19282102017-08-10 15:49:44 +0200485 fq->tail = (idx + 1) % IOVA_FQ_SIZE;
486
487 return idx;
488}
489
490static void fq_ring_free(struct iova_domain *iovad, struct iova_fq *fq)
491{
Joerg Roedelfb418da2017-08-10 16:14:59 +0200492 u64 counter = atomic64_read(&iovad->fq_flush_finish_cnt);
Joerg Roedel19282102017-08-10 15:49:44 +0200493 unsigned idx;
494
Joerg Roedel8109c2a2017-08-10 16:31:17 +0200495 assert_spin_locked(&fq->lock);
496
Joerg Roedel19282102017-08-10 15:49:44 +0200497 fq_ring_for_each(idx, fq) {
498
Joerg Roedelfb418da2017-08-10 16:14:59 +0200499 if (fq->entries[idx].counter >= counter)
500 break;
501
Joerg Roedel19282102017-08-10 15:49:44 +0200502 if (iovad->entry_dtor)
503 iovad->entry_dtor(fq->entries[idx].data);
504
505 free_iova_fast(iovad,
506 fq->entries[idx].iova_pfn,
507 fq->entries[idx].pages);
Joerg Roedel19282102017-08-10 15:49:44 +0200508
Joerg Roedelfb418da2017-08-10 16:14:59 +0200509 fq->head = (fq->head + 1) % IOVA_FQ_SIZE;
510 }
511}
512
513static void iova_domain_flush(struct iova_domain *iovad)
514{
515 atomic64_inc(&iovad->fq_flush_start_cnt);
516 iovad->flush_cb(iovad);
517 atomic64_inc(&iovad->fq_flush_finish_cnt);
Joerg Roedel19282102017-08-10 15:49:44 +0200518}
519
520static void fq_destroy_all_entries(struct iova_domain *iovad)
521{
522 int cpu;
523
524 /*
525 * This code runs when the iova_domain is being detroyed, so don't
526 * bother to free iovas, just call the entry_dtor on all remaining
527 * entries.
528 */
529 if (!iovad->entry_dtor)
530 return;
531
532 for_each_possible_cpu(cpu) {
533 struct iova_fq *fq = per_cpu_ptr(iovad->fq, cpu);
534 int idx;
535
536 fq_ring_for_each(idx, fq)
537 iovad->entry_dtor(fq->entries[idx].data);
538 }
539}
540
Kees Cooke99e88a2017-10-16 14:43:17 -0700541static void fq_flush_timeout(struct timer_list *t)
Joerg Roedel9a005a82017-08-10 16:58:18 +0200542{
Kees Cooke99e88a2017-10-16 14:43:17 -0700543 struct iova_domain *iovad = from_timer(iovad, t, fq_timer);
Joerg Roedel9a005a82017-08-10 16:58:18 +0200544 int cpu;
545
546 atomic_set(&iovad->fq_timer_on, 0);
547 iova_domain_flush(iovad);
548
549 for_each_possible_cpu(cpu) {
550 unsigned long flags;
551 struct iova_fq *fq;
552
553 fq = per_cpu_ptr(iovad->fq, cpu);
554 spin_lock_irqsave(&fq->lock, flags);
555 fq_ring_free(iovad, fq);
556 spin_unlock_irqrestore(&fq->lock, flags);
557 }
558}
559
Joerg Roedel19282102017-08-10 15:49:44 +0200560void queue_iova(struct iova_domain *iovad,
561 unsigned long pfn, unsigned long pages,
562 unsigned long data)
563{
Sebastian Andrzej Siewior94e2cc42017-09-21 17:21:40 +0200564 struct iova_fq *fq = raw_cpu_ptr(iovad->fq);
Joerg Roedel8109c2a2017-08-10 16:31:17 +0200565 unsigned long flags;
Joerg Roedel19282102017-08-10 15:49:44 +0200566 unsigned idx;
567
Joerg Roedel8109c2a2017-08-10 16:31:17 +0200568 spin_lock_irqsave(&fq->lock, flags);
569
Joerg Roedelfb418da2017-08-10 16:14:59 +0200570 /*
571 * First remove all entries from the flush queue that have already been
572 * flushed out on another CPU. This makes the fq_full() check below less
573 * likely to be true.
574 */
575 fq_ring_free(iovad, fq);
576
Joerg Roedel19282102017-08-10 15:49:44 +0200577 if (fq_full(fq)) {
Joerg Roedelfb418da2017-08-10 16:14:59 +0200578 iova_domain_flush(iovad);
Joerg Roedel19282102017-08-10 15:49:44 +0200579 fq_ring_free(iovad, fq);
580 }
581
582 idx = fq_ring_add(fq);
583
584 fq->entries[idx].iova_pfn = pfn;
585 fq->entries[idx].pages = pages;
586 fq->entries[idx].data = data;
Joerg Roedelfb418da2017-08-10 16:14:59 +0200587 fq->entries[idx].counter = atomic64_read(&iovad->fq_flush_start_cnt);
Joerg Roedel19282102017-08-10 15:49:44 +0200588
Joerg Roedel8109c2a2017-08-10 16:31:17 +0200589 spin_unlock_irqrestore(&fq->lock, flags);
Joerg Roedel9a005a82017-08-10 16:58:18 +0200590
Eric Dumazet0d873082019-08-28 06:13:38 -0700591 /* Avoid false sharing as much as possible. */
592 if (!atomic_read(&iovad->fq_timer_on) &&
Yuqi Jinba328f82020-08-27 16:43:54 +0800593 !atomic_xchg(&iovad->fq_timer_on, 1))
Joerg Roedel9a005a82017-08-10 16:58:18 +0200594 mod_timer(&iovad->fq_timer,
595 jiffies + msecs_to_jiffies(IOVA_FQ_TIMEOUT));
Joerg Roedel19282102017-08-10 15:49:44 +0200596}
597EXPORT_SYMBOL_GPL(queue_iova);
598
Omer Peleg9257b4a2016-04-20 11:34:11 +0300599/**
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700600 * put_iova_domain - destroys the iova doamin
601 * @iovad: - iova domain in question.
602 * All the iova's in that domain are destroyed.
603 */
604void put_iova_domain(struct iova_domain *iovad)
605{
Robin Murphy7595dc52017-09-19 14:48:39 +0100606 struct iova *iova, *tmp;
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700607
Joerg Roedel42f87e72017-08-10 14:44:28 +0200608 free_iova_flush_queue(iovad);
Omer Peleg9257b4a2016-04-20 11:34:11 +0300609 free_iova_rcaches(iovad);
Robin Murphy7595dc52017-09-19 14:48:39 +0100610 rbtree_postorder_for_each_entry_safe(iova, tmp, &iovad->rbroot, node)
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700611 free_iova_mem(iova);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700612}
Sakari Ailus9b417602015-07-13 14:31:29 +0300613EXPORT_SYMBOL_GPL(put_iova_domain);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700614
615static int
616__is_range_overlap(struct rb_node *node,
617 unsigned long pfn_lo, unsigned long pfn_hi)
618{
Geliang Tangeba484b2016-12-19 22:46:58 +0800619 struct iova *iova = rb_entry(node, struct iova, node);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700620
621 if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo))
622 return 1;
623 return 0;
624}
625
Jiang Liu75f05562014-02-19 14:07:37 +0800626static inline struct iova *
627alloc_and_init_iova(unsigned long pfn_lo, unsigned long pfn_hi)
628{
629 struct iova *iova;
630
631 iova = alloc_iova_mem();
632 if (iova) {
633 iova->pfn_lo = pfn_lo;
634 iova->pfn_hi = pfn_hi;
635 }
636
637 return iova;
638}
639
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700640static struct iova *
641__insert_new_range(struct iova_domain *iovad,
642 unsigned long pfn_lo, unsigned long pfn_hi)
643{
644 struct iova *iova;
645
Jiang Liu75f05562014-02-19 14:07:37 +0800646 iova = alloc_and_init_iova(pfn_lo, pfn_hi);
647 if (iova)
Marek Szyprowskid7517512017-02-24 12:13:37 +0100648 iova_insert_rbtree(&iovad->rbroot, iova, NULL);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700649
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700650 return iova;
651}
652
653static void
654__adjust_overlap_range(struct iova *iova,
655 unsigned long *pfn_lo, unsigned long *pfn_hi)
656{
657 if (*pfn_lo < iova->pfn_lo)
658 iova->pfn_lo = *pfn_lo;
659 if (*pfn_hi > iova->pfn_hi)
660 *pfn_lo = iova->pfn_hi + 1;
661}
662
663/**
664 * reserve_iova - reserves an iova in the given range
665 * @iovad: - iova domain pointer
666 * @pfn_lo: - lower page frame address
667 * @pfn_hi:- higher pfn adderss
668 * This function allocates reserves the address range from pfn_lo to pfn_hi so
669 * that this address is not dished out as part of alloc_iova.
670 */
671struct iova *
672reserve_iova(struct iova_domain *iovad,
673 unsigned long pfn_lo, unsigned long pfn_hi)
674{
675 struct rb_node *node;
676 unsigned long flags;
677 struct iova *iova;
678 unsigned int overlap = 0;
679
Robin Murphybb68b2f2017-09-21 16:52:46 +0100680 /* Don't allow nonsensical pfns */
681 if (WARN_ON((pfn_hi | pfn_lo) > (ULLONG_MAX >> iova_shift(iovad))))
682 return NULL;
683
David Woodhouse3d39cec2009-07-08 15:23:30 +0100684 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700685 for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) {
686 if (__is_range_overlap(node, pfn_lo, pfn_hi)) {
Geliang Tangeba484b2016-12-19 22:46:58 +0800687 iova = rb_entry(node, struct iova, node);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700688 __adjust_overlap_range(iova, &pfn_lo, &pfn_hi);
689 if ((pfn_lo >= iova->pfn_lo) &&
690 (pfn_hi <= iova->pfn_hi))
691 goto finish;
692 overlap = 1;
693
694 } else if (overlap)
695 break;
696 }
697
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300698 /* We are here either because this is the first reserver node
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700699 * or need to insert remaining non overlap addr range
700 */
701 iova = __insert_new_range(iovad, pfn_lo, pfn_hi);
702finish:
703
David Woodhouse3d39cec2009-07-08 15:23:30 +0100704 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700705 return iova;
706}
Sakari Ailus9b417602015-07-13 14:31:29 +0300707EXPORT_SYMBOL_GPL(reserve_iova);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700708
709/**
710 * copy_reserved_iova - copies the reserved between domains
711 * @from: - source doamin from where to copy
712 * @to: - destination domin where to copy
713 * This function copies reserved iova's from one doamin to
714 * other.
715 */
716void
717copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
718{
719 unsigned long flags;
720 struct rb_node *node;
721
David Woodhouse3d39cec2009-07-08 15:23:30 +0100722 spin_lock_irqsave(&from->iova_rbtree_lock, flags);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700723 for (node = rb_first(&from->rbroot); node; node = rb_next(node)) {
Geliang Tangeba484b2016-12-19 22:46:58 +0800724 struct iova *iova = rb_entry(node, struct iova, node);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700725 struct iova *new_iova;
Robert Callicotte733cac22015-04-16 23:32:47 -0500726
Robin Murphyabbb8a02017-10-02 11:53:31 +0100727 if (iova->pfn_lo == IOVA_ANCHOR)
728 continue;
729
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700730 new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi);
731 if (!new_iova)
Andy Shevchenko3a0ce122020-05-07 19:18:03 +0300732 pr_err("Reserve iova range %lx@%lx failed\n",
733 iova->pfn_lo, iova->pfn_lo);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700734 }
David Woodhouse3d39cec2009-07-08 15:23:30 +0100735 spin_unlock_irqrestore(&from->iova_rbtree_lock, flags);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700736}
Sakari Ailus9b417602015-07-13 14:31:29 +0300737EXPORT_SYMBOL_GPL(copy_reserved_iova);
Jiang Liu75f05562014-02-19 14:07:37 +0800738
739struct iova *
740split_and_remove_iova(struct iova_domain *iovad, struct iova *iova,
741 unsigned long pfn_lo, unsigned long pfn_hi)
742{
743 unsigned long flags;
744 struct iova *prev = NULL, *next = NULL;
745
746 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
747 if (iova->pfn_lo < pfn_lo) {
748 prev = alloc_and_init_iova(iova->pfn_lo, pfn_lo - 1);
749 if (prev == NULL)
750 goto error;
751 }
752 if (iova->pfn_hi > pfn_hi) {
753 next = alloc_and_init_iova(pfn_hi + 1, iova->pfn_hi);
754 if (next == NULL)
755 goto error;
756 }
757
758 __cached_rbnode_delete_update(iovad, iova);
759 rb_erase(&iova->node, &iovad->rbroot);
760
761 if (prev) {
Marek Szyprowskid7517512017-02-24 12:13:37 +0100762 iova_insert_rbtree(&iovad->rbroot, prev, NULL);
Jiang Liu75f05562014-02-19 14:07:37 +0800763 iova->pfn_lo = pfn_lo;
764 }
765 if (next) {
Marek Szyprowskid7517512017-02-24 12:13:37 +0100766 iova_insert_rbtree(&iovad->rbroot, next, NULL);
Jiang Liu75f05562014-02-19 14:07:37 +0800767 iova->pfn_hi = pfn_hi;
768 }
769 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
770
771 return iova;
772
773error:
774 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
775 if (prev)
776 free_iova_mem(prev);
777 return NULL;
778}
Sakari Ailus15bbdec2015-07-13 14:31:30 +0300779
Omer Peleg9257b4a2016-04-20 11:34:11 +0300780/*
781 * Magazine caches for IOVA ranges. For an introduction to magazines,
782 * see the USENIX 2001 paper "Magazines and Vmem: Extending the Slab
783 * Allocator to Many CPUs and Arbitrary Resources" by Bonwick and Adams.
784 * For simplicity, we use a static magazine size and don't implement the
785 * dynamic size tuning described in the paper.
786 */
787
788#define IOVA_MAG_SIZE 128
789
790struct iova_magazine {
791 unsigned long size;
792 unsigned long pfns[IOVA_MAG_SIZE];
793};
794
795struct iova_cpu_rcache {
796 spinlock_t lock;
797 struct iova_magazine *loaded;
798 struct iova_magazine *prev;
799};
800
801static struct iova_magazine *iova_magazine_alloc(gfp_t flags)
802{
803 return kzalloc(sizeof(struct iova_magazine), flags);
804}
805
806static void iova_magazine_free(struct iova_magazine *mag)
807{
808 kfree(mag);
809}
810
811static void
812iova_magazine_free_pfns(struct iova_magazine *mag, struct iova_domain *iovad)
813{
814 unsigned long flags;
815 int i;
816
817 if (!mag)
818 return;
819
820 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
821
822 for (i = 0 ; i < mag->size; ++i) {
823 struct iova *iova = private_find_iova(iovad, mag->pfns[i]);
824
Robin Murphyd3e3d2b2020-06-02 14:08:18 +0100825 if (WARN_ON(!iova))
826 continue;
827
Omer Peleg9257b4a2016-04-20 11:34:11 +0300828 private_free_iova(iovad, iova);
829 }
830
831 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
832
833 mag->size = 0;
834}
835
836static bool iova_magazine_full(struct iova_magazine *mag)
837{
838 return (mag && mag->size == IOVA_MAG_SIZE);
839}
840
841static bool iova_magazine_empty(struct iova_magazine *mag)
842{
843 return (!mag || mag->size == 0);
844}
845
846static unsigned long iova_magazine_pop(struct iova_magazine *mag,
847 unsigned long limit_pfn)
848{
Robin Murphye8b19842017-09-28 11:31:23 +0100849 int i;
850 unsigned long pfn;
851
Omer Peleg9257b4a2016-04-20 11:34:11 +0300852 BUG_ON(iova_magazine_empty(mag));
853
Robin Murphye8b19842017-09-28 11:31:23 +0100854 /* Only fall back to the rbtree if we have no suitable pfns at all */
855 for (i = mag->size - 1; mag->pfns[i] > limit_pfn; i--)
856 if (i == 0)
857 return 0;
Omer Peleg9257b4a2016-04-20 11:34:11 +0300858
Robin Murphye8b19842017-09-28 11:31:23 +0100859 /* Swap it to pop it */
860 pfn = mag->pfns[i];
861 mag->pfns[i] = mag->pfns[--mag->size];
862
863 return pfn;
Omer Peleg9257b4a2016-04-20 11:34:11 +0300864}
865
866static void iova_magazine_push(struct iova_magazine *mag, unsigned long pfn)
867{
868 BUG_ON(iova_magazine_full(mag));
869
870 mag->pfns[mag->size++] = pfn;
871}
872
873static void init_iova_rcaches(struct iova_domain *iovad)
874{
875 struct iova_cpu_rcache *cpu_rcache;
876 struct iova_rcache *rcache;
877 unsigned int cpu;
878 int i;
879
880 for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
881 rcache = &iovad->rcaches[i];
882 spin_lock_init(&rcache->lock);
883 rcache->depot_size = 0;
884 rcache->cpu_rcaches = __alloc_percpu(sizeof(*cpu_rcache), cache_line_size());
885 if (WARN_ON(!rcache->cpu_rcaches))
886 continue;
887 for_each_possible_cpu(cpu) {
888 cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
889 spin_lock_init(&cpu_rcache->lock);
890 cpu_rcache->loaded = iova_magazine_alloc(GFP_KERNEL);
891 cpu_rcache->prev = iova_magazine_alloc(GFP_KERNEL);
892 }
893 }
894}
895
896/*
897 * Try inserting IOVA range starting with 'iova_pfn' into 'rcache', and
898 * return true on success. Can fail if rcache is full and we can't free
899 * space, and free_iova() (our only caller) will then return the IOVA
900 * range to the rbtree instead.
901 */
902static bool __iova_rcache_insert(struct iova_domain *iovad,
903 struct iova_rcache *rcache,
904 unsigned long iova_pfn)
905{
906 struct iova_magazine *mag_to_free = NULL;
907 struct iova_cpu_rcache *cpu_rcache;
908 bool can_insert = false;
909 unsigned long flags;
910
Sebastian Andrzej Siewioraaffaa82017-06-27 18:16:47 +0200911 cpu_rcache = raw_cpu_ptr(rcache->cpu_rcaches);
Omer Peleg9257b4a2016-04-20 11:34:11 +0300912 spin_lock_irqsave(&cpu_rcache->lock, flags);
913
914 if (!iova_magazine_full(cpu_rcache->loaded)) {
915 can_insert = true;
916 } else if (!iova_magazine_full(cpu_rcache->prev)) {
917 swap(cpu_rcache->prev, cpu_rcache->loaded);
918 can_insert = true;
919 } else {
920 struct iova_magazine *new_mag = iova_magazine_alloc(GFP_ATOMIC);
921
922 if (new_mag) {
923 spin_lock(&rcache->lock);
924 if (rcache->depot_size < MAX_GLOBAL_MAGS) {
925 rcache->depot[rcache->depot_size++] =
926 cpu_rcache->loaded;
927 } else {
928 mag_to_free = cpu_rcache->loaded;
929 }
930 spin_unlock(&rcache->lock);
931
932 cpu_rcache->loaded = new_mag;
933 can_insert = true;
934 }
935 }
936
937 if (can_insert)
938 iova_magazine_push(cpu_rcache->loaded, iova_pfn);
939
940 spin_unlock_irqrestore(&cpu_rcache->lock, flags);
941
942 if (mag_to_free) {
943 iova_magazine_free_pfns(mag_to_free, iovad);
944 iova_magazine_free(mag_to_free);
945 }
946
947 return can_insert;
948}
949
950static bool iova_rcache_insert(struct iova_domain *iovad, unsigned long pfn,
951 unsigned long size)
952{
953 unsigned int log_size = order_base_2(size);
954
955 if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE)
956 return false;
957
958 return __iova_rcache_insert(iovad, &iovad->rcaches[log_size], pfn);
959}
960
961/*
962 * Caller wants to allocate a new IOVA range from 'rcache'. If we can
963 * satisfy the request, return a matching non-NULL range and remove
964 * it from the 'rcache'.
965 */
966static unsigned long __iova_rcache_get(struct iova_rcache *rcache,
967 unsigned long limit_pfn)
968{
969 struct iova_cpu_rcache *cpu_rcache;
970 unsigned long iova_pfn = 0;
971 bool has_pfn = false;
972 unsigned long flags;
973
Sebastian Andrzej Siewioraaffaa82017-06-27 18:16:47 +0200974 cpu_rcache = raw_cpu_ptr(rcache->cpu_rcaches);
Omer Peleg9257b4a2016-04-20 11:34:11 +0300975 spin_lock_irqsave(&cpu_rcache->lock, flags);
976
977 if (!iova_magazine_empty(cpu_rcache->loaded)) {
978 has_pfn = true;
979 } else if (!iova_magazine_empty(cpu_rcache->prev)) {
980 swap(cpu_rcache->prev, cpu_rcache->loaded);
981 has_pfn = true;
982 } else {
983 spin_lock(&rcache->lock);
984 if (rcache->depot_size > 0) {
985 iova_magazine_free(cpu_rcache->loaded);
986 cpu_rcache->loaded = rcache->depot[--rcache->depot_size];
987 has_pfn = true;
988 }
989 spin_unlock(&rcache->lock);
990 }
991
992 if (has_pfn)
993 iova_pfn = iova_magazine_pop(cpu_rcache->loaded, limit_pfn);
994
995 spin_unlock_irqrestore(&cpu_rcache->lock, flags);
996
997 return iova_pfn;
998}
999
1000/*
1001 * Try to satisfy IOVA allocation range from rcache. Fail if requested
1002 * size is too big or the DMA limit we are given isn't satisfied by the
1003 * top element in the magazine.
1004 */
1005static unsigned long iova_rcache_get(struct iova_domain *iovad,
1006 unsigned long size,
1007 unsigned long limit_pfn)
1008{
1009 unsigned int log_size = order_base_2(size);
1010
1011 if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE)
1012 return 0;
1013
Robin Murphyb826ee92017-09-19 14:48:40 +01001014 return __iova_rcache_get(&iovad->rcaches[log_size], limit_pfn - size);
Omer Peleg9257b4a2016-04-20 11:34:11 +03001015}
1016
1017/*
Omer Peleg9257b4a2016-04-20 11:34:11 +03001018 * free rcache data structures.
1019 */
1020static void free_iova_rcaches(struct iova_domain *iovad)
1021{
1022 struct iova_rcache *rcache;
Robin Murphy7595dc52017-09-19 14:48:39 +01001023 struct iova_cpu_rcache *cpu_rcache;
Omer Peleg9257b4a2016-04-20 11:34:11 +03001024 unsigned int cpu;
1025 int i, j;
1026
1027 for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
1028 rcache = &iovad->rcaches[i];
Robin Murphy7595dc52017-09-19 14:48:39 +01001029 for_each_possible_cpu(cpu) {
1030 cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
1031 iova_magazine_free(cpu_rcache->loaded);
1032 iova_magazine_free(cpu_rcache->prev);
Omer Peleg9257b4a2016-04-20 11:34:11 +03001033 }
Robin Murphy7595dc52017-09-19 14:48:39 +01001034 free_percpu(rcache->cpu_rcaches);
1035 for (j = 0; j < rcache->depot_size; ++j)
1036 iova_magazine_free(rcache->depot[j]);
Omer Peleg9257b4a2016-04-20 11:34:11 +03001037 }
1038}
1039
1040/*
1041 * free all the IOVA ranges cached by a cpu (used when cpu is unplugged)
1042 */
1043void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad)
1044{
1045 struct iova_cpu_rcache *cpu_rcache;
1046 struct iova_rcache *rcache;
1047 unsigned long flags;
1048 int i;
1049
1050 for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
1051 rcache = &iovad->rcaches[i];
1052 cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
1053 spin_lock_irqsave(&cpu_rcache->lock, flags);
1054 iova_magazine_free_pfns(cpu_rcache->loaded, iovad);
1055 iova_magazine_free_pfns(cpu_rcache->prev, iovad);
1056 spin_unlock_irqrestore(&cpu_rcache->lock, flags);
1057 }
1058}
1059
Sakari Ailus15bbdec2015-07-13 14:31:30 +03001060MODULE_AUTHOR("Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>");
1061MODULE_LICENSE("GPL");