blob: c9726e1b9e1156778a07df3077b427480a07ce32 [file] [log] [blame]
Thomas Gleixner3b20eb22019-05-29 16:57:35 -07001// SPDX-License-Identifier: GPL-2.0-only
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -07002/*
David Woodhousea15a5192009-07-01 18:49:06 +01003 * Copyright © 2006-2009, Intel Corporation.
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -07004 *
mark gross98bcef52008-02-23 15:23:35 -08005 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -07006 */
7
Kay, Allen M38717942008-09-09 18:37:29 +03008#include <linux/iova.h>
Sakari Ailus15bbdec2015-07-13 14:31:30 +03009#include <linux/module.h>
Robin Murphy85b45452015-01-12 17:51:14 +000010#include <linux/slab.h>
Omer Peleg9257b4a2016-04-20 11:34:11 +030011#include <linux/smp.h>
12#include <linux/bitops.h>
Sebastian Andrzej Siewioraaffaa82017-06-27 18:16:47 +020013#include <linux/cpu.h>
Omer Peleg9257b4a2016-04-20 11:34:11 +030014
Robin Murphybb68b2f2017-09-21 16:52:46 +010015/* The anchor node sits above the top of the usable address space */
16#define IOVA_ANCHOR ~0UL
17
Omer Peleg9257b4a2016-04-20 11:34:11 +030018static bool iova_rcache_insert(struct iova_domain *iovad,
19 unsigned long pfn,
20 unsigned long size);
21static unsigned long iova_rcache_get(struct iova_domain *iovad,
22 unsigned long size,
23 unsigned long limit_pfn);
24static void init_iova_rcaches(struct iova_domain *iovad);
John Garry149448b2021-03-25 20:30:00 +080025static void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad);
Omer Peleg9257b4a2016-04-20 11:34:11 +030026static void free_iova_rcaches(struct iova_domain *iovad);
Joerg Roedel19282102017-08-10 15:49:44 +020027static void fq_destroy_all_entries(struct iova_domain *iovad);
Kees Cooke99e88a2017-10-16 14:43:17 -070028static void fq_flush_timeout(struct timer_list *t);
John Garryf598a492021-03-25 20:29:58 +080029
30static int iova_cpuhp_dead(unsigned int cpu, struct hlist_node *node)
31{
32 struct iova_domain *iovad;
33
34 iovad = hlist_entry_safe(node, struct iova_domain, cpuhp_dead);
35
36 free_cpu_cached_iovas(cpu, iovad);
37 return 0;
38}
39
Vijayanand Jitta6fa35252020-09-30 13:14:24 +053040static void free_global_cached_iovas(struct iova_domain *iovad);
Robin Murphy85b45452015-01-12 17:51:14 +000041
Robin Murphy7ae31ce2021-03-05 16:35:22 +000042static struct iova *to_iova(struct rb_node *node)
43{
44 return rb_entry(node, struct iova, node);
45}
46
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -070047void
Robin Murphy0fb5fe82015-01-12 17:51:16 +000048init_iova_domain(struct iova_domain *iovad, unsigned long granule,
Zhen Leiaa3ac942017-09-21 16:52:45 +010049 unsigned long start_pfn)
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -070050{
Robin Murphy0fb5fe82015-01-12 17:51:16 +000051 /*
52 * IOVA granularity will normally be equal to the smallest
53 * supported IOMMU page size; both *must* be capable of
54 * representing individual CPU pages exactly.
55 */
56 BUG_ON((granule > PAGE_SIZE) || !is_power_of_2(granule));
57
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -070058 spin_lock_init(&iovad->iova_rbtree_lock);
59 iovad->rbroot = RB_ROOT;
Robin Murphy973f5fb2017-09-21 16:52:47 +010060 iovad->cached_node = &iovad->anchor.node;
61 iovad->cached32_node = &iovad->anchor.node;
Robin Murphy0fb5fe82015-01-12 17:51:16 +000062 iovad->granule = granule;
Robin Murphy1b722502015-01-12 17:51:15 +000063 iovad->start_pfn = start_pfn;
Zhen Leiaa3ac942017-09-21 16:52:45 +010064 iovad->dma_32bit_pfn = 1UL << (32 - iova_shift(iovad));
Ganapatrao Kulkarnibee60e92018-09-05 09:57:36 +053065 iovad->max32_alloc_size = iovad->dma_32bit_pfn;
Joerg Roedel42f87e72017-08-10 14:44:28 +020066 iovad->flush_cb = NULL;
67 iovad->fq = NULL;
Robin Murphybb68b2f2017-09-21 16:52:46 +010068 iovad->anchor.pfn_lo = iovad->anchor.pfn_hi = IOVA_ANCHOR;
69 rb_link_node(&iovad->anchor.node, NULL, &iovad->rbroot.rb_node);
70 rb_insert_color(&iovad->anchor.node, &iovad->rbroot);
John Garryf598a492021-03-25 20:29:58 +080071 cpuhp_state_add_instance_nocalls(CPUHP_IOMMU_IOVA_DEAD, &iovad->cpuhp_dead);
Omer Peleg9257b4a2016-04-20 11:34:11 +030072 init_iova_rcaches(iovad);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -070073}
Sakari Ailus9b417602015-07-13 14:31:29 +030074EXPORT_SYMBOL_GPL(init_iova_domain);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -070075
John Garry9cc0aaeb2021-01-06 21:35:06 +080076static bool has_iova_flush_queue(struct iova_domain *iovad)
Dmitry Safonoveffa4672019-07-16 22:38:05 +010077{
78 return !!iovad->fq;
79}
80
Joerg Roedel42f87e72017-08-10 14:44:28 +020081static void free_iova_flush_queue(struct iova_domain *iovad)
82{
Dmitry Safonoveffa4672019-07-16 22:38:05 +010083 if (!has_iova_flush_queue(iovad))
Joerg Roedel42f87e72017-08-10 14:44:28 +020084 return;
85
Joerg Roedel9a005a82017-08-10 16:58:18 +020086 if (timer_pending(&iovad->fq_timer))
87 del_timer(&iovad->fq_timer);
88
Joerg Roedel19282102017-08-10 15:49:44 +020089 fq_destroy_all_entries(iovad);
Joerg Roedel9a005a82017-08-10 16:58:18 +020090
Joerg Roedel42f87e72017-08-10 14:44:28 +020091 free_percpu(iovad->fq);
92
93 iovad->fq = NULL;
94 iovad->flush_cb = NULL;
95 iovad->entry_dtor = NULL;
96}
97
98int init_iova_flush_queue(struct iova_domain *iovad,
99 iova_flush_cb flush_cb, iova_entry_dtor entry_dtor)
100{
Dmitry Safonoveffa4672019-07-16 22:38:05 +0100101 struct iova_fq __percpu *queue;
Joerg Roedel42f87e72017-08-10 14:44:28 +0200102 int cpu;
103
Joerg Roedelfb418da2017-08-10 16:14:59 +0200104 atomic64_set(&iovad->fq_flush_start_cnt, 0);
105 atomic64_set(&iovad->fq_flush_finish_cnt, 0);
106
Dmitry Safonoveffa4672019-07-16 22:38:05 +0100107 queue = alloc_percpu(struct iova_fq);
108 if (!queue)
Joerg Roedel42f87e72017-08-10 14:44:28 +0200109 return -ENOMEM;
110
111 iovad->flush_cb = flush_cb;
112 iovad->entry_dtor = entry_dtor;
113
114 for_each_possible_cpu(cpu) {
115 struct iova_fq *fq;
116
Dmitry Safonoveffa4672019-07-16 22:38:05 +0100117 fq = per_cpu_ptr(queue, cpu);
Joerg Roedel42f87e72017-08-10 14:44:28 +0200118 fq->head = 0;
119 fq->tail = 0;
Joerg Roedel8109c2a2017-08-10 16:31:17 +0200120
121 spin_lock_init(&fq->lock);
Joerg Roedel42f87e72017-08-10 14:44:28 +0200122 }
123
Dmitry Safonoveffa4672019-07-16 22:38:05 +0100124 smp_wmb();
125
126 iovad->fq = queue;
127
Kees Cooke99e88a2017-10-16 14:43:17 -0700128 timer_setup(&iovad->fq_timer, fq_flush_timeout, 0);
Joerg Roedel9a005a82017-08-10 16:58:18 +0200129 atomic_set(&iovad->fq_timer_on, 0);
130
Joerg Roedel42f87e72017-08-10 14:44:28 +0200131 return 0;
132}
Joerg Roedel42f87e72017-08-10 14:44:28 +0200133
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700134static struct rb_node *
Robin Murphy973f5fb2017-09-21 16:52:47 +0100135__get_cached_rbnode(struct iova_domain *iovad, unsigned long limit_pfn)
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700136{
Robin Murphy973f5fb2017-09-21 16:52:47 +0100137 if (limit_pfn <= iovad->dma_32bit_pfn)
138 return iovad->cached32_node;
Robin Murphye60aa7b2017-09-21 16:52:44 +0100139
Robin Murphy973f5fb2017-09-21 16:52:47 +0100140 return iovad->cached_node;
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700141}
142
143static void
Robin Murphye60aa7b2017-09-21 16:52:44 +0100144__cached_rbnode_insert_update(struct iova_domain *iovad, struct iova *new)
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700145{
Robin Murphye60aa7b2017-09-21 16:52:44 +0100146 if (new->pfn_hi < iovad->dma_32bit_pfn)
147 iovad->cached32_node = &new->node;
148 else
149 iovad->cached_node = &new->node;
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700150}
151
152static void
153__cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
154{
155 struct iova *cached_iova;
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700156
Robin Murphy7ae31ce2021-03-05 16:35:22 +0000157 cached_iova = to_iova(iovad->cached32_node);
Chris Wilson9eed17d2019-07-20 19:08:48 +0100158 if (free == cached_iova ||
159 (free->pfn_hi < iovad->dma_32bit_pfn &&
160 free->pfn_lo >= cached_iova->pfn_lo)) {
Robin Murphye60aa7b2017-09-21 16:52:44 +0100161 iovad->cached32_node = rb_next(&free->node);
Ganapatrao Kulkarnibee60e92018-09-05 09:57:36 +0530162 iovad->max32_alloc_size = iovad->dma_32bit_pfn;
163 }
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700164
Robin Murphy7ae31ce2021-03-05 16:35:22 +0000165 cached_iova = to_iova(iovad->cached_node);
Robin Murphy973f5fb2017-09-21 16:52:47 +0100166 if (free->pfn_lo >= cached_iova->pfn_lo)
Robin Murphye60aa7b2017-09-21 16:52:44 +0100167 iovad->cached_node = rb_next(&free->node);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700168}
169
Robin Murphy371d7952021-03-05 16:35:23 +0000170static struct rb_node *iova_find_limit(struct iova_domain *iovad, unsigned long limit_pfn)
171{
172 struct rb_node *node, *next;
173 /*
174 * Ideally what we'd like to judge here is whether limit_pfn is close
175 * enough to the highest-allocated IOVA that starting the allocation
176 * walk from the anchor node will be quicker than this initial work to
177 * find an exact starting point (especially if that ends up being the
178 * anchor node anyway). This is an incredibly crude approximation which
179 * only really helps the most likely case, but is at least trivially easy.
180 */
181 if (limit_pfn > iovad->dma_32bit_pfn)
182 return &iovad->anchor.node;
183
184 node = iovad->rbroot.rb_node;
185 while (to_iova(node)->pfn_hi < limit_pfn)
186 node = node->rb_right;
187
188search_left:
189 while (node->rb_left && to_iova(node->rb_left)->pfn_lo >= limit_pfn)
190 node = node->rb_left;
191
192 if (!node->rb_left)
193 return node;
194
195 next = node->rb_left;
196 while (next->rb_right) {
197 next = next->rb_right;
198 if (to_iova(next)->pfn_lo >= limit_pfn) {
199 node = next;
200 goto search_left;
201 }
202 }
203
204 return node;
205}
206
Marek Szyprowskid7517512017-02-24 12:13:37 +0100207/* Insert the iova into domain rbtree by holding writer lock */
208static void
209iova_insert_rbtree(struct rb_root *root, struct iova *iova,
210 struct rb_node *start)
211{
212 struct rb_node **new, *parent = NULL;
213
214 new = (start) ? &start : &(root->rb_node);
215 /* Figure out where to put new node */
216 while (*new) {
Robin Murphy7ae31ce2021-03-05 16:35:22 +0000217 struct iova *this = to_iova(*new);
Marek Szyprowskid7517512017-02-24 12:13:37 +0100218
219 parent = *new;
220
221 if (iova->pfn_lo < this->pfn_lo)
222 new = &((*new)->rb_left);
223 else if (iova->pfn_lo > this->pfn_lo)
224 new = &((*new)->rb_right);
225 else {
226 WARN_ON(1); /* this should not happen */
227 return;
228 }
229 }
230 /* Add new node and rebalance tree. */
231 rb_link_node(&iova->node, parent, new);
232 rb_insert_color(&iova->node, root);
233}
234
mark grossddf02882008-03-04 15:22:04 -0800235static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
236 unsigned long size, unsigned long limit_pfn,
237 struct iova *new, bool size_aligned)
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700238{
Robin Murphy973f5fb2017-09-21 16:52:47 +0100239 struct rb_node *curr, *prev;
240 struct iova *curr_iova;
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700241 unsigned long flags;
Vijayanand Jitta4e89dce2020-09-30 13:14:23 +0530242 unsigned long new_pfn, retry_pfn;
Zhen Lei086c83a2017-09-21 16:52:43 +0100243 unsigned long align_mask = ~0UL;
Vijayanand Jitta4e89dce2020-09-30 13:14:23 +0530244 unsigned long high_pfn = limit_pfn, low_pfn = iovad->start_pfn;
Zhen Lei086c83a2017-09-21 16:52:43 +0100245
246 if (size_aligned)
247 align_mask <<= fls_long(size - 1);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700248
249 /* Walk the tree backwards */
250 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
Ganapatrao Kulkarnibee60e92018-09-05 09:57:36 +0530251 if (limit_pfn <= iovad->dma_32bit_pfn &&
252 size >= iovad->max32_alloc_size)
253 goto iova32_full;
254
Robin Murphy973f5fb2017-09-21 16:52:47 +0100255 curr = __get_cached_rbnode(iovad, limit_pfn);
Robin Murphy7ae31ce2021-03-05 16:35:22 +0000256 curr_iova = to_iova(curr);
Vijayanand Jitta4e89dce2020-09-30 13:14:23 +0530257 retry_pfn = curr_iova->pfn_hi + 1;
258
259retry:
Robin Murphy973f5fb2017-09-21 16:52:47 +0100260 do {
Vijayanand Jitta4e89dce2020-09-30 13:14:23 +0530261 high_pfn = min(high_pfn, curr_iova->pfn_lo);
262 new_pfn = (high_pfn - size) & align_mask;
mark grossddf02882008-03-04 15:22:04 -0800263 prev = curr;
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700264 curr = rb_prev(curr);
Robin Murphy7ae31ce2021-03-05 16:35:22 +0000265 curr_iova = to_iova(curr);
Vijayanand Jitta4e89dce2020-09-30 13:14:23 +0530266 } while (curr && new_pfn <= curr_iova->pfn_hi && new_pfn >= low_pfn);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700267
Vijayanand Jitta4e89dce2020-09-30 13:14:23 +0530268 if (high_pfn < size || new_pfn < low_pfn) {
269 if (low_pfn == iovad->start_pfn && retry_pfn < limit_pfn) {
270 high_pfn = limit_pfn;
271 low_pfn = retry_pfn;
Robin Murphy371d7952021-03-05 16:35:23 +0000272 curr = iova_find_limit(iovad, limit_pfn);
Robin Murphy7ae31ce2021-03-05 16:35:22 +0000273 curr_iova = to_iova(curr);
Vijayanand Jitta4e89dce2020-09-30 13:14:23 +0530274 goto retry;
275 }
Robert Richter80ef4462019-03-20 18:57:23 +0000276 iovad->max32_alloc_size = size;
Ganapatrao Kulkarnibee60e92018-09-05 09:57:36 +0530277 goto iova32_full;
Robert Richter80ef4462019-03-20 18:57:23 +0000278 }
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -0700279
280 /* pfn_lo will point to size aligned address if size_aligned is set */
Zhen Lei086c83a2017-09-21 16:52:43 +0100281 new->pfn_lo = new_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -0700282 new->pfn_hi = new->pfn_lo + size - 1;
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700283
Marek Szyprowskid7517512017-02-24 12:13:37 +0100284 /* If we have 'prev', it's a valid place to start the insertion. */
285 iova_insert_rbtree(&iovad->rbroot, new, prev);
Robin Murphye60aa7b2017-09-21 16:52:44 +0100286 __cached_rbnode_insert_update(iovad, new);
mark grossddf02882008-03-04 15:22:04 -0800287
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700288 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
289 return 0;
Ganapatrao Kulkarnibee60e92018-09-05 09:57:36 +0530290
291iova32_full:
Ganapatrao Kulkarnibee60e92018-09-05 09:57:36 +0530292 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
293 return -ENOMEM;
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700294}
295
Sakari Ailusae1ff3d2015-07-13 14:31:28 +0300296static struct kmem_cache *iova_cache;
297static unsigned int iova_cache_users;
298static DEFINE_MUTEX(iova_cache_mutex);
299
John Garry51b70b82020-12-04 02:34:51 +0800300static struct iova *alloc_iova_mem(void)
Sakari Ailusae1ff3d2015-07-13 14:31:28 +0300301{
Qian Cai944c9172019-11-22 14:16:54 -0500302 return kmem_cache_zalloc(iova_cache, GFP_ATOMIC | __GFP_NOWARN);
Sakari Ailusae1ff3d2015-07-13 14:31:28 +0300303}
Sakari Ailusae1ff3d2015-07-13 14:31:28 +0300304
John Garry176cfc12020-12-04 02:34:52 +0800305static void free_iova_mem(struct iova *iova)
Sakari Ailusae1ff3d2015-07-13 14:31:28 +0300306{
Robin Murphybb68b2f2017-09-21 16:52:46 +0100307 if (iova->pfn_lo != IOVA_ANCHOR)
308 kmem_cache_free(iova_cache, iova);
Sakari Ailusae1ff3d2015-07-13 14:31:28 +0300309}
Sakari Ailusae1ff3d2015-07-13 14:31:28 +0300310
311int iova_cache_get(void)
312{
313 mutex_lock(&iova_cache_mutex);
314 if (!iova_cache_users) {
John Garryf598a492021-03-25 20:29:58 +0800315 int ret;
316
317 ret = cpuhp_setup_state_multi(CPUHP_IOMMU_IOVA_DEAD, "iommu/iova:dead", NULL,
318 iova_cpuhp_dead);
319 if (ret) {
320 mutex_unlock(&iova_cache_mutex);
321 pr_err("Couldn't register cpuhp handler\n");
322 return ret;
323 }
324
Sakari Ailusae1ff3d2015-07-13 14:31:28 +0300325 iova_cache = kmem_cache_create(
326 "iommu_iova", sizeof(struct iova), 0,
327 SLAB_HWCACHE_ALIGN, NULL);
328 if (!iova_cache) {
John Garryf598a492021-03-25 20:29:58 +0800329 cpuhp_remove_multi_state(CPUHP_IOMMU_IOVA_DEAD);
Sakari Ailusae1ff3d2015-07-13 14:31:28 +0300330 mutex_unlock(&iova_cache_mutex);
Andy Shevchenko3a0ce122020-05-07 19:18:03 +0300331 pr_err("Couldn't create iova cache\n");
Sakari Ailusae1ff3d2015-07-13 14:31:28 +0300332 return -ENOMEM;
333 }
334 }
335
336 iova_cache_users++;
337 mutex_unlock(&iova_cache_mutex);
338
339 return 0;
340}
Sakari Ailus9b417602015-07-13 14:31:29 +0300341EXPORT_SYMBOL_GPL(iova_cache_get);
Sakari Ailusae1ff3d2015-07-13 14:31:28 +0300342
343void iova_cache_put(void)
344{
345 mutex_lock(&iova_cache_mutex);
346 if (WARN_ON(!iova_cache_users)) {
347 mutex_unlock(&iova_cache_mutex);
348 return;
349 }
350 iova_cache_users--;
John Garryf598a492021-03-25 20:29:58 +0800351 if (!iova_cache_users) {
352 cpuhp_remove_multi_state(CPUHP_IOMMU_IOVA_DEAD);
Sakari Ailusae1ff3d2015-07-13 14:31:28 +0300353 kmem_cache_destroy(iova_cache);
John Garryf598a492021-03-25 20:29:58 +0800354 }
Sakari Ailusae1ff3d2015-07-13 14:31:28 +0300355 mutex_unlock(&iova_cache_mutex);
356}
Sakari Ailus9b417602015-07-13 14:31:29 +0300357EXPORT_SYMBOL_GPL(iova_cache_put);
Sakari Ailusae1ff3d2015-07-13 14:31:28 +0300358
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700359/**
360 * alloc_iova - allocates an iova
Masanari Iida07db0402012-07-22 02:21:32 +0900361 * @iovad: - iova domain in question
362 * @size: - size of page frames to allocate
363 * @limit_pfn: - max limit address
364 * @size_aligned: - set if size_aligned address range is required
Robin Murphy1b722502015-01-12 17:51:15 +0000365 * This function allocates an iova in the range iovad->start_pfn to limit_pfn,
366 * searching top-down from limit_pfn to iovad->start_pfn. If the size_aligned
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -0700367 * flag is set then the allocated address iova->pfn_lo will be naturally
368 * aligned on roundup_power_of_two(size).
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700369 */
370struct iova *
371alloc_iova(struct iova_domain *iovad, unsigned long size,
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -0700372 unsigned long limit_pfn,
373 bool size_aligned)
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700374{
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700375 struct iova *new_iova;
376 int ret;
377
378 new_iova = alloc_iova_mem();
379 if (!new_iova)
380 return NULL;
381
Robin Murphy757c3702017-05-16 12:26:48 +0100382 ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn + 1,
mark grossddf02882008-03-04 15:22:04 -0800383 new_iova, size_aligned);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700384
385 if (ret) {
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700386 free_iova_mem(new_iova);
387 return NULL;
388 }
389
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700390 return new_iova;
391}
Sakari Ailus9b417602015-07-13 14:31:29 +0300392EXPORT_SYMBOL_GPL(alloc_iova);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700393
Omer Peleg9257b4a2016-04-20 11:34:11 +0300394static struct iova *
395private_find_iova(struct iova_domain *iovad, unsigned long pfn)
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700396{
Omer Peleg9257b4a2016-04-20 11:34:11 +0300397 struct rb_node *node = iovad->rbroot.rb_node;
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700398
Omer Peleg9257b4a2016-04-20 11:34:11 +0300399 assert_spin_locked(&iovad->iova_rbtree_lock);
400
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700401 while (node) {
Robin Murphy7ae31ce2021-03-05 16:35:22 +0000402 struct iova *iova = to_iova(node);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700403
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700404 if (pfn < iova->pfn_lo)
405 node = node->rb_left;
Zhen Lei2070f942017-09-21 16:52:42 +0100406 else if (pfn > iova->pfn_hi)
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700407 node = node->rb_right;
Zhen Lei2070f942017-09-21 16:52:42 +0100408 else
409 return iova; /* pfn falls within iova's range */
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700410 }
411
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700412 return NULL;
413}
Omer Peleg9257b4a2016-04-20 11:34:11 +0300414
415static void private_free_iova(struct iova_domain *iovad, struct iova *iova)
416{
417 assert_spin_locked(&iovad->iova_rbtree_lock);
418 __cached_rbnode_delete_update(iovad, iova);
419 rb_erase(&iova->node, &iovad->rbroot);
420 free_iova_mem(iova);
421}
422
423/**
424 * find_iova - finds an iova for a given pfn
425 * @iovad: - iova domain in question.
426 * @pfn: - page frame number
427 * This function finds and returns an iova belonging to the
Stefano Garzarella6775ae92020-12-22 17:42:32 +0100428 * given domain which matches the given pfn.
Omer Peleg9257b4a2016-04-20 11:34:11 +0300429 */
430struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
431{
432 unsigned long flags;
433 struct iova *iova;
434
435 /* Take the lock so that no other thread is manipulating the rbtree */
436 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
437 iova = private_find_iova(iovad, pfn);
438 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
439 return iova;
440}
Sakari Ailus9b417602015-07-13 14:31:29 +0300441EXPORT_SYMBOL_GPL(find_iova);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700442
443/**
444 * __free_iova - frees the given iova
445 * @iovad: iova domain in question.
446 * @iova: iova in question.
447 * Frees the given iova belonging to the giving domain
448 */
449void
450__free_iova(struct iova_domain *iovad, struct iova *iova)
451{
452 unsigned long flags;
453
454 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
Omer Peleg9257b4a2016-04-20 11:34:11 +0300455 private_free_iova(iovad, iova);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700456 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700457}
Sakari Ailus9b417602015-07-13 14:31:29 +0300458EXPORT_SYMBOL_GPL(__free_iova);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700459
460/**
461 * free_iova - finds and frees the iova for a given pfn
462 * @iovad: - iova domain in question.
463 * @pfn: - pfn that is allocated previously
464 * This functions finds an iova for a given pfn and then
465 * frees the iova from that domain.
466 */
467void
468free_iova(struct iova_domain *iovad, unsigned long pfn)
469{
Cong Wang3a651b32020-11-17 18:25:34 +0800470 unsigned long flags;
471 struct iova *iova;
Robert Callicotte733cac22015-04-16 23:32:47 -0500472
Cong Wang3a651b32020-11-17 18:25:34 +0800473 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
474 iova = private_find_iova(iovad, pfn);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700475 if (iova)
Cong Wang3a651b32020-11-17 18:25:34 +0800476 private_free_iova(iovad, iova);
477 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700478
479}
Sakari Ailus9b417602015-07-13 14:31:29 +0300480EXPORT_SYMBOL_GPL(free_iova);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700481
482/**
Omer Peleg9257b4a2016-04-20 11:34:11 +0300483 * alloc_iova_fast - allocates an iova from rcache
484 * @iovad: - iova domain in question
485 * @size: - size of page frames to allocate
486 * @limit_pfn: - max limit address
Tomasz Nowicki538d5b32017-09-20 10:52:02 +0200487 * @flush_rcache: - set to flush rcache on regular allocation failure
Omer Peleg9257b4a2016-04-20 11:34:11 +0300488 * This function tries to satisfy an iova allocation from the rcache,
Tomasz Nowicki538d5b32017-09-20 10:52:02 +0200489 * and falls back to regular allocation on failure. If regular allocation
490 * fails too and the flush_rcache flag is set then the rcache will be flushed.
Omer Peleg9257b4a2016-04-20 11:34:11 +0300491*/
492unsigned long
493alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
Tomasz Nowicki538d5b32017-09-20 10:52:02 +0200494 unsigned long limit_pfn, bool flush_rcache)
Omer Peleg9257b4a2016-04-20 11:34:11 +0300495{
Omer Peleg9257b4a2016-04-20 11:34:11 +0300496 unsigned long iova_pfn;
497 struct iova *new_iova;
498
Robin Murphyb826ee92017-09-19 14:48:40 +0100499 iova_pfn = iova_rcache_get(iovad, size, limit_pfn + 1);
Omer Peleg9257b4a2016-04-20 11:34:11 +0300500 if (iova_pfn)
501 return iova_pfn;
502
503retry:
504 new_iova = alloc_iova(iovad, size, limit_pfn, true);
505 if (!new_iova) {
506 unsigned int cpu;
507
Tomasz Nowicki538d5b32017-09-20 10:52:02 +0200508 if (!flush_rcache)
Omer Peleg9257b4a2016-04-20 11:34:11 +0300509 return 0;
510
511 /* Try replenishing IOVAs by flushing rcache. */
Tomasz Nowicki538d5b32017-09-20 10:52:02 +0200512 flush_rcache = false;
Omer Peleg9257b4a2016-04-20 11:34:11 +0300513 for_each_online_cpu(cpu)
514 free_cpu_cached_iovas(cpu, iovad);
Vijayanand Jitta6fa35252020-09-30 13:14:24 +0530515 free_global_cached_iovas(iovad);
Omer Peleg9257b4a2016-04-20 11:34:11 +0300516 goto retry;
517 }
518
519 return new_iova->pfn_lo;
520}
Omer Peleg9257b4a2016-04-20 11:34:11 +0300521
522/**
523 * free_iova_fast - free iova pfn range into rcache
524 * @iovad: - iova domain in question.
525 * @pfn: - pfn that is allocated previously
526 * @size: - # of pages in range
527 * This functions frees an iova range by trying to put it into the rcache,
528 * falling back to regular iova deallocation via free_iova() if this fails.
529 */
530void
531free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size)
532{
533 if (iova_rcache_insert(iovad, pfn, size))
534 return;
535
536 free_iova(iovad, pfn);
537}
538EXPORT_SYMBOL_GPL(free_iova_fast);
539
Joerg Roedel19282102017-08-10 15:49:44 +0200540#define fq_ring_for_each(i, fq) \
541 for ((i) = (fq)->head; (i) != (fq)->tail; (i) = ((i) + 1) % IOVA_FQ_SIZE)
542
543static inline bool fq_full(struct iova_fq *fq)
544{
Joerg Roedel8109c2a2017-08-10 16:31:17 +0200545 assert_spin_locked(&fq->lock);
Joerg Roedel19282102017-08-10 15:49:44 +0200546 return (((fq->tail + 1) % IOVA_FQ_SIZE) == fq->head);
547}
548
549static inline unsigned fq_ring_add(struct iova_fq *fq)
550{
551 unsigned idx = fq->tail;
552
Joerg Roedel8109c2a2017-08-10 16:31:17 +0200553 assert_spin_locked(&fq->lock);
554
Joerg Roedel19282102017-08-10 15:49:44 +0200555 fq->tail = (idx + 1) % IOVA_FQ_SIZE;
556
557 return idx;
558}
559
560static void fq_ring_free(struct iova_domain *iovad, struct iova_fq *fq)
561{
Joerg Roedelfb418da2017-08-10 16:14:59 +0200562 u64 counter = atomic64_read(&iovad->fq_flush_finish_cnt);
Joerg Roedel19282102017-08-10 15:49:44 +0200563 unsigned idx;
564
Joerg Roedel8109c2a2017-08-10 16:31:17 +0200565 assert_spin_locked(&fq->lock);
566
Joerg Roedel19282102017-08-10 15:49:44 +0200567 fq_ring_for_each(idx, fq) {
568
Joerg Roedelfb418da2017-08-10 16:14:59 +0200569 if (fq->entries[idx].counter >= counter)
570 break;
571
Joerg Roedel19282102017-08-10 15:49:44 +0200572 if (iovad->entry_dtor)
573 iovad->entry_dtor(fq->entries[idx].data);
574
575 free_iova_fast(iovad,
576 fq->entries[idx].iova_pfn,
577 fq->entries[idx].pages);
Joerg Roedel19282102017-08-10 15:49:44 +0200578
Joerg Roedelfb418da2017-08-10 16:14:59 +0200579 fq->head = (fq->head + 1) % IOVA_FQ_SIZE;
580 }
581}
582
583static void iova_domain_flush(struct iova_domain *iovad)
584{
585 atomic64_inc(&iovad->fq_flush_start_cnt);
586 iovad->flush_cb(iovad);
587 atomic64_inc(&iovad->fq_flush_finish_cnt);
Joerg Roedel19282102017-08-10 15:49:44 +0200588}
589
590static void fq_destroy_all_entries(struct iova_domain *iovad)
591{
592 int cpu;
593
594 /*
595 * This code runs when the iova_domain is being detroyed, so don't
596 * bother to free iovas, just call the entry_dtor on all remaining
597 * entries.
598 */
599 if (!iovad->entry_dtor)
600 return;
601
602 for_each_possible_cpu(cpu) {
603 struct iova_fq *fq = per_cpu_ptr(iovad->fq, cpu);
604 int idx;
605
606 fq_ring_for_each(idx, fq)
607 iovad->entry_dtor(fq->entries[idx].data);
608 }
609}
610
Kees Cooke99e88a2017-10-16 14:43:17 -0700611static void fq_flush_timeout(struct timer_list *t)
Joerg Roedel9a005a82017-08-10 16:58:18 +0200612{
Kees Cooke99e88a2017-10-16 14:43:17 -0700613 struct iova_domain *iovad = from_timer(iovad, t, fq_timer);
Joerg Roedel9a005a82017-08-10 16:58:18 +0200614 int cpu;
615
616 atomic_set(&iovad->fq_timer_on, 0);
617 iova_domain_flush(iovad);
618
619 for_each_possible_cpu(cpu) {
620 unsigned long flags;
621 struct iova_fq *fq;
622
623 fq = per_cpu_ptr(iovad->fq, cpu);
624 spin_lock_irqsave(&fq->lock, flags);
625 fq_ring_free(iovad, fq);
626 spin_unlock_irqrestore(&fq->lock, flags);
627 }
628}
629
Joerg Roedel19282102017-08-10 15:49:44 +0200630void queue_iova(struct iova_domain *iovad,
631 unsigned long pfn, unsigned long pages,
632 unsigned long data)
633{
Sebastian Andrzej Siewior94e2cc42017-09-21 17:21:40 +0200634 struct iova_fq *fq = raw_cpu_ptr(iovad->fq);
Joerg Roedel8109c2a2017-08-10 16:31:17 +0200635 unsigned long flags;
Joerg Roedel19282102017-08-10 15:49:44 +0200636 unsigned idx;
637
Joerg Roedel8109c2a2017-08-10 16:31:17 +0200638 spin_lock_irqsave(&fq->lock, flags);
639
Joerg Roedelfb418da2017-08-10 16:14:59 +0200640 /*
641 * First remove all entries from the flush queue that have already been
642 * flushed out on another CPU. This makes the fq_full() check below less
643 * likely to be true.
644 */
645 fq_ring_free(iovad, fq);
646
Joerg Roedel19282102017-08-10 15:49:44 +0200647 if (fq_full(fq)) {
Joerg Roedelfb418da2017-08-10 16:14:59 +0200648 iova_domain_flush(iovad);
Joerg Roedel19282102017-08-10 15:49:44 +0200649 fq_ring_free(iovad, fq);
650 }
651
652 idx = fq_ring_add(fq);
653
654 fq->entries[idx].iova_pfn = pfn;
655 fq->entries[idx].pages = pages;
656 fq->entries[idx].data = data;
Joerg Roedelfb418da2017-08-10 16:14:59 +0200657 fq->entries[idx].counter = atomic64_read(&iovad->fq_flush_start_cnt);
Joerg Roedel19282102017-08-10 15:49:44 +0200658
Joerg Roedel8109c2a2017-08-10 16:31:17 +0200659 spin_unlock_irqrestore(&fq->lock, flags);
Joerg Roedel9a005a82017-08-10 16:58:18 +0200660
Eric Dumazet0d873082019-08-28 06:13:38 -0700661 /* Avoid false sharing as much as possible. */
662 if (!atomic_read(&iovad->fq_timer_on) &&
Yuqi Jinba328f82020-08-27 16:43:54 +0800663 !atomic_xchg(&iovad->fq_timer_on, 1))
Joerg Roedel9a005a82017-08-10 16:58:18 +0200664 mod_timer(&iovad->fq_timer,
665 jiffies + msecs_to_jiffies(IOVA_FQ_TIMEOUT));
Joerg Roedel19282102017-08-10 15:49:44 +0200666}
Joerg Roedel19282102017-08-10 15:49:44 +0200667
Omer Peleg9257b4a2016-04-20 11:34:11 +0300668/**
Stefano Garzarella6775ae92020-12-22 17:42:32 +0100669 * put_iova_domain - destroys the iova domain
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700670 * @iovad: - iova domain in question.
671 * All the iova's in that domain are destroyed.
672 */
673void put_iova_domain(struct iova_domain *iovad)
674{
Robin Murphy7595dc52017-09-19 14:48:39 +0100675 struct iova *iova, *tmp;
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700676
John Garryf598a492021-03-25 20:29:58 +0800677 cpuhp_state_remove_instance_nocalls(CPUHP_IOMMU_IOVA_DEAD,
678 &iovad->cpuhp_dead);
679
Joerg Roedel42f87e72017-08-10 14:44:28 +0200680 free_iova_flush_queue(iovad);
Omer Peleg9257b4a2016-04-20 11:34:11 +0300681 free_iova_rcaches(iovad);
Robin Murphy7595dc52017-09-19 14:48:39 +0100682 rbtree_postorder_for_each_entry_safe(iova, tmp, &iovad->rbroot, node)
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700683 free_iova_mem(iova);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700684}
Sakari Ailus9b417602015-07-13 14:31:29 +0300685EXPORT_SYMBOL_GPL(put_iova_domain);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700686
687static int
688__is_range_overlap(struct rb_node *node,
689 unsigned long pfn_lo, unsigned long pfn_hi)
690{
Robin Murphy7ae31ce2021-03-05 16:35:22 +0000691 struct iova *iova = to_iova(node);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700692
693 if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo))
694 return 1;
695 return 0;
696}
697
Jiang Liu75f05562014-02-19 14:07:37 +0800698static inline struct iova *
699alloc_and_init_iova(unsigned long pfn_lo, unsigned long pfn_hi)
700{
701 struct iova *iova;
702
703 iova = alloc_iova_mem();
704 if (iova) {
705 iova->pfn_lo = pfn_lo;
706 iova->pfn_hi = pfn_hi;
707 }
708
709 return iova;
710}
711
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700712static struct iova *
713__insert_new_range(struct iova_domain *iovad,
714 unsigned long pfn_lo, unsigned long pfn_hi)
715{
716 struct iova *iova;
717
Jiang Liu75f05562014-02-19 14:07:37 +0800718 iova = alloc_and_init_iova(pfn_lo, pfn_hi);
719 if (iova)
Marek Szyprowskid7517512017-02-24 12:13:37 +0100720 iova_insert_rbtree(&iovad->rbroot, iova, NULL);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700721
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700722 return iova;
723}
724
725static void
726__adjust_overlap_range(struct iova *iova,
727 unsigned long *pfn_lo, unsigned long *pfn_hi)
728{
729 if (*pfn_lo < iova->pfn_lo)
730 iova->pfn_lo = *pfn_lo;
731 if (*pfn_hi > iova->pfn_hi)
732 *pfn_lo = iova->pfn_hi + 1;
733}
734
735/**
736 * reserve_iova - reserves an iova in the given range
737 * @iovad: - iova domain pointer
738 * @pfn_lo: - lower page frame address
739 * @pfn_hi:- higher pfn adderss
740 * This function allocates reserves the address range from pfn_lo to pfn_hi so
741 * that this address is not dished out as part of alloc_iova.
742 */
743struct iova *
744reserve_iova(struct iova_domain *iovad,
745 unsigned long pfn_lo, unsigned long pfn_hi)
746{
747 struct rb_node *node;
748 unsigned long flags;
749 struct iova *iova;
750 unsigned int overlap = 0;
751
Robin Murphybb68b2f2017-09-21 16:52:46 +0100752 /* Don't allow nonsensical pfns */
753 if (WARN_ON((pfn_hi | pfn_lo) > (ULLONG_MAX >> iova_shift(iovad))))
754 return NULL;
755
David Woodhouse3d39cec2009-07-08 15:23:30 +0100756 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700757 for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) {
758 if (__is_range_overlap(node, pfn_lo, pfn_hi)) {
Robin Murphy7ae31ce2021-03-05 16:35:22 +0000759 iova = to_iova(node);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700760 __adjust_overlap_range(iova, &pfn_lo, &pfn_hi);
761 if ((pfn_lo >= iova->pfn_lo) &&
762 (pfn_hi <= iova->pfn_hi))
763 goto finish;
764 overlap = 1;
765
766 } else if (overlap)
767 break;
768 }
769
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300770 /* We are here either because this is the first reserver node
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700771 * or need to insert remaining non overlap addr range
772 */
773 iova = __insert_new_range(iovad, pfn_lo, pfn_hi);
774finish:
775
David Woodhouse3d39cec2009-07-08 15:23:30 +0100776 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700777 return iova;
778}
Sakari Ailus9b417602015-07-13 14:31:29 +0300779EXPORT_SYMBOL_GPL(reserve_iova);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700780
Omer Peleg9257b4a2016-04-20 11:34:11 +0300781/*
782 * Magazine caches for IOVA ranges. For an introduction to magazines,
783 * see the USENIX 2001 paper "Magazines and Vmem: Extending the Slab
784 * Allocator to Many CPUs and Arbitrary Resources" by Bonwick and Adams.
785 * For simplicity, we use a static magazine size and don't implement the
786 * dynamic size tuning described in the paper.
787 */
788
789#define IOVA_MAG_SIZE 128
790
791struct iova_magazine {
792 unsigned long size;
793 unsigned long pfns[IOVA_MAG_SIZE];
794};
795
796struct iova_cpu_rcache {
797 spinlock_t lock;
798 struct iova_magazine *loaded;
799 struct iova_magazine *prev;
800};
801
802static struct iova_magazine *iova_magazine_alloc(gfp_t flags)
803{
804 return kzalloc(sizeof(struct iova_magazine), flags);
805}
806
807static void iova_magazine_free(struct iova_magazine *mag)
808{
809 kfree(mag);
810}
811
812static void
813iova_magazine_free_pfns(struct iova_magazine *mag, struct iova_domain *iovad)
814{
815 unsigned long flags;
816 int i;
817
818 if (!mag)
819 return;
820
821 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
822
823 for (i = 0 ; i < mag->size; ++i) {
824 struct iova *iova = private_find_iova(iovad, mag->pfns[i]);
825
Robin Murphyd3e3d2b2020-06-02 14:08:18 +0100826 if (WARN_ON(!iova))
827 continue;
828
Omer Peleg9257b4a2016-04-20 11:34:11 +0300829 private_free_iova(iovad, iova);
830 }
831
832 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
833
834 mag->size = 0;
835}
836
837static bool iova_magazine_full(struct iova_magazine *mag)
838{
839 return (mag && mag->size == IOVA_MAG_SIZE);
840}
841
842static bool iova_magazine_empty(struct iova_magazine *mag)
843{
844 return (!mag || mag->size == 0);
845}
846
847static unsigned long iova_magazine_pop(struct iova_magazine *mag,
848 unsigned long limit_pfn)
849{
Robin Murphye8b19842017-09-28 11:31:23 +0100850 int i;
851 unsigned long pfn;
852
Omer Peleg9257b4a2016-04-20 11:34:11 +0300853 BUG_ON(iova_magazine_empty(mag));
854
Robin Murphye8b19842017-09-28 11:31:23 +0100855 /* Only fall back to the rbtree if we have no suitable pfns at all */
856 for (i = mag->size - 1; mag->pfns[i] > limit_pfn; i--)
857 if (i == 0)
858 return 0;
Omer Peleg9257b4a2016-04-20 11:34:11 +0300859
Robin Murphye8b19842017-09-28 11:31:23 +0100860 /* Swap it to pop it */
861 pfn = mag->pfns[i];
862 mag->pfns[i] = mag->pfns[--mag->size];
863
864 return pfn;
Omer Peleg9257b4a2016-04-20 11:34:11 +0300865}
866
867static void iova_magazine_push(struct iova_magazine *mag, unsigned long pfn)
868{
869 BUG_ON(iova_magazine_full(mag));
870
871 mag->pfns[mag->size++] = pfn;
872}
873
874static void init_iova_rcaches(struct iova_domain *iovad)
875{
876 struct iova_cpu_rcache *cpu_rcache;
877 struct iova_rcache *rcache;
878 unsigned int cpu;
879 int i;
880
881 for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
882 rcache = &iovad->rcaches[i];
883 spin_lock_init(&rcache->lock);
884 rcache->depot_size = 0;
885 rcache->cpu_rcaches = __alloc_percpu(sizeof(*cpu_rcache), cache_line_size());
886 if (WARN_ON(!rcache->cpu_rcaches))
887 continue;
888 for_each_possible_cpu(cpu) {
889 cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
890 spin_lock_init(&cpu_rcache->lock);
891 cpu_rcache->loaded = iova_magazine_alloc(GFP_KERNEL);
892 cpu_rcache->prev = iova_magazine_alloc(GFP_KERNEL);
893 }
894 }
895}
896
897/*
898 * Try inserting IOVA range starting with 'iova_pfn' into 'rcache', and
899 * return true on success. Can fail if rcache is full and we can't free
900 * space, and free_iova() (our only caller) will then return the IOVA
901 * range to the rbtree instead.
902 */
903static bool __iova_rcache_insert(struct iova_domain *iovad,
904 struct iova_rcache *rcache,
905 unsigned long iova_pfn)
906{
907 struct iova_magazine *mag_to_free = NULL;
908 struct iova_cpu_rcache *cpu_rcache;
909 bool can_insert = false;
910 unsigned long flags;
911
Sebastian Andrzej Siewioraaffaa82017-06-27 18:16:47 +0200912 cpu_rcache = raw_cpu_ptr(rcache->cpu_rcaches);
Omer Peleg9257b4a2016-04-20 11:34:11 +0300913 spin_lock_irqsave(&cpu_rcache->lock, flags);
914
915 if (!iova_magazine_full(cpu_rcache->loaded)) {
916 can_insert = true;
917 } else if (!iova_magazine_full(cpu_rcache->prev)) {
918 swap(cpu_rcache->prev, cpu_rcache->loaded);
919 can_insert = true;
920 } else {
921 struct iova_magazine *new_mag = iova_magazine_alloc(GFP_ATOMIC);
922
923 if (new_mag) {
924 spin_lock(&rcache->lock);
925 if (rcache->depot_size < MAX_GLOBAL_MAGS) {
926 rcache->depot[rcache->depot_size++] =
927 cpu_rcache->loaded;
928 } else {
929 mag_to_free = cpu_rcache->loaded;
930 }
931 spin_unlock(&rcache->lock);
932
933 cpu_rcache->loaded = new_mag;
934 can_insert = true;
935 }
936 }
937
938 if (can_insert)
939 iova_magazine_push(cpu_rcache->loaded, iova_pfn);
940
941 spin_unlock_irqrestore(&cpu_rcache->lock, flags);
942
943 if (mag_to_free) {
944 iova_magazine_free_pfns(mag_to_free, iovad);
945 iova_magazine_free(mag_to_free);
946 }
947
948 return can_insert;
949}
950
951static bool iova_rcache_insert(struct iova_domain *iovad, unsigned long pfn,
952 unsigned long size)
953{
954 unsigned int log_size = order_base_2(size);
955
956 if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE)
957 return false;
958
959 return __iova_rcache_insert(iovad, &iovad->rcaches[log_size], pfn);
960}
961
962/*
963 * Caller wants to allocate a new IOVA range from 'rcache'. If we can
964 * satisfy the request, return a matching non-NULL range and remove
965 * it from the 'rcache'.
966 */
967static unsigned long __iova_rcache_get(struct iova_rcache *rcache,
968 unsigned long limit_pfn)
969{
970 struct iova_cpu_rcache *cpu_rcache;
971 unsigned long iova_pfn = 0;
972 bool has_pfn = false;
973 unsigned long flags;
974
Sebastian Andrzej Siewioraaffaa82017-06-27 18:16:47 +0200975 cpu_rcache = raw_cpu_ptr(rcache->cpu_rcaches);
Omer Peleg9257b4a2016-04-20 11:34:11 +0300976 spin_lock_irqsave(&cpu_rcache->lock, flags);
977
978 if (!iova_magazine_empty(cpu_rcache->loaded)) {
979 has_pfn = true;
980 } else if (!iova_magazine_empty(cpu_rcache->prev)) {
981 swap(cpu_rcache->prev, cpu_rcache->loaded);
982 has_pfn = true;
983 } else {
984 spin_lock(&rcache->lock);
985 if (rcache->depot_size > 0) {
986 iova_magazine_free(cpu_rcache->loaded);
987 cpu_rcache->loaded = rcache->depot[--rcache->depot_size];
988 has_pfn = true;
989 }
990 spin_unlock(&rcache->lock);
991 }
992
993 if (has_pfn)
994 iova_pfn = iova_magazine_pop(cpu_rcache->loaded, limit_pfn);
995
996 spin_unlock_irqrestore(&cpu_rcache->lock, flags);
997
998 return iova_pfn;
999}
1000
1001/*
1002 * Try to satisfy IOVA allocation range from rcache. Fail if requested
1003 * size is too big or the DMA limit we are given isn't satisfied by the
1004 * top element in the magazine.
1005 */
1006static unsigned long iova_rcache_get(struct iova_domain *iovad,
1007 unsigned long size,
1008 unsigned long limit_pfn)
1009{
1010 unsigned int log_size = order_base_2(size);
1011
1012 if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE)
1013 return 0;
1014
Robin Murphyb826ee92017-09-19 14:48:40 +01001015 return __iova_rcache_get(&iovad->rcaches[log_size], limit_pfn - size);
Omer Peleg9257b4a2016-04-20 11:34:11 +03001016}
1017
1018/*
Omer Peleg9257b4a2016-04-20 11:34:11 +03001019 * free rcache data structures.
1020 */
1021static void free_iova_rcaches(struct iova_domain *iovad)
1022{
1023 struct iova_rcache *rcache;
Robin Murphy7595dc52017-09-19 14:48:39 +01001024 struct iova_cpu_rcache *cpu_rcache;
Omer Peleg9257b4a2016-04-20 11:34:11 +03001025 unsigned int cpu;
1026 int i, j;
1027
1028 for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
1029 rcache = &iovad->rcaches[i];
Robin Murphy7595dc52017-09-19 14:48:39 +01001030 for_each_possible_cpu(cpu) {
1031 cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
1032 iova_magazine_free(cpu_rcache->loaded);
1033 iova_magazine_free(cpu_rcache->prev);
Omer Peleg9257b4a2016-04-20 11:34:11 +03001034 }
Robin Murphy7595dc52017-09-19 14:48:39 +01001035 free_percpu(rcache->cpu_rcaches);
1036 for (j = 0; j < rcache->depot_size; ++j)
1037 iova_magazine_free(rcache->depot[j]);
Omer Peleg9257b4a2016-04-20 11:34:11 +03001038 }
1039}
1040
1041/*
1042 * free all the IOVA ranges cached by a cpu (used when cpu is unplugged)
1043 */
John Garry149448b2021-03-25 20:30:00 +08001044static void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad)
Omer Peleg9257b4a2016-04-20 11:34:11 +03001045{
1046 struct iova_cpu_rcache *cpu_rcache;
1047 struct iova_rcache *rcache;
1048 unsigned long flags;
1049 int i;
1050
1051 for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
1052 rcache = &iovad->rcaches[i];
1053 cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
1054 spin_lock_irqsave(&cpu_rcache->lock, flags);
1055 iova_magazine_free_pfns(cpu_rcache->loaded, iovad);
1056 iova_magazine_free_pfns(cpu_rcache->prev, iovad);
1057 spin_unlock_irqrestore(&cpu_rcache->lock, flags);
1058 }
1059}
1060
Vijayanand Jitta6fa35252020-09-30 13:14:24 +05301061/*
1062 * free all the IOVA ranges of global cache
1063 */
1064static void free_global_cached_iovas(struct iova_domain *iovad)
1065{
1066 struct iova_rcache *rcache;
1067 unsigned long flags;
1068 int i, j;
1069
1070 for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
1071 rcache = &iovad->rcaches[i];
1072 spin_lock_irqsave(&rcache->lock, flags);
1073 for (j = 0; j < rcache->depot_size; ++j) {
1074 iova_magazine_free_pfns(rcache->depot[j], iovad);
1075 iova_magazine_free(rcache->depot[j]);
1076 }
1077 rcache->depot_size = 0;
1078 spin_unlock_irqrestore(&rcache->lock, flags);
1079 }
1080}
Sakari Ailus15bbdec2015-07-13 14:31:30 +03001081MODULE_AUTHOR("Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>");
1082MODULE_LICENSE("GPL");