blob: e6e2fa85271c3f834651d3d926730889f342e482 [file] [log] [blame]
Thomas Gleixner3b20eb22019-05-29 16:57:35 -07001// SPDX-License-Identifier: GPL-2.0-only
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -07002/*
David Woodhousea15a5192009-07-01 18:49:06 +01003 * Copyright © 2006-2009, Intel Corporation.
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -07004 *
mark gross98bcef52008-02-23 15:23:35 -08005 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -07006 */
7
Kay, Allen M38717942008-09-09 18:37:29 +03008#include <linux/iova.h>
Sakari Ailus15bbdec2015-07-13 14:31:30 +03009#include <linux/module.h>
Robin Murphy85b45452015-01-12 17:51:14 +000010#include <linux/slab.h>
Omer Peleg9257b4a2016-04-20 11:34:11 +030011#include <linux/smp.h>
12#include <linux/bitops.h>
Sebastian Andrzej Siewioraaffaa82017-06-27 18:16:47 +020013#include <linux/cpu.h>
Omer Peleg9257b4a2016-04-20 11:34:11 +030014
Robin Murphybb68b2f2017-09-21 16:52:46 +010015/* The anchor node sits above the top of the usable address space */
16#define IOVA_ANCHOR ~0UL
17
Omer Peleg9257b4a2016-04-20 11:34:11 +030018static bool iova_rcache_insert(struct iova_domain *iovad,
19 unsigned long pfn,
20 unsigned long size);
21static unsigned long iova_rcache_get(struct iova_domain *iovad,
22 unsigned long size,
23 unsigned long limit_pfn);
24static void init_iova_rcaches(struct iova_domain *iovad);
25static void free_iova_rcaches(struct iova_domain *iovad);
Joerg Roedel19282102017-08-10 15:49:44 +020026static void fq_destroy_all_entries(struct iova_domain *iovad);
Kees Cooke99e88a2017-10-16 14:43:17 -070027static void fq_flush_timeout(struct timer_list *t);
Vijayanand Jitta6fa35252020-09-30 13:14:24 +053028static void free_global_cached_iovas(struct iova_domain *iovad);
Robin Murphy85b45452015-01-12 17:51:14 +000029
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -070030void
Robin Murphy0fb5fe82015-01-12 17:51:16 +000031init_iova_domain(struct iova_domain *iovad, unsigned long granule,
Zhen Leiaa3ac942017-09-21 16:52:45 +010032 unsigned long start_pfn)
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -070033{
Robin Murphy0fb5fe82015-01-12 17:51:16 +000034 /*
35 * IOVA granularity will normally be equal to the smallest
36 * supported IOMMU page size; both *must* be capable of
37 * representing individual CPU pages exactly.
38 */
39 BUG_ON((granule > PAGE_SIZE) || !is_power_of_2(granule));
40
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -070041 spin_lock_init(&iovad->iova_rbtree_lock);
42 iovad->rbroot = RB_ROOT;
Robin Murphy973f5fb2017-09-21 16:52:47 +010043 iovad->cached_node = &iovad->anchor.node;
44 iovad->cached32_node = &iovad->anchor.node;
Robin Murphy0fb5fe82015-01-12 17:51:16 +000045 iovad->granule = granule;
Robin Murphy1b722502015-01-12 17:51:15 +000046 iovad->start_pfn = start_pfn;
Zhen Leiaa3ac942017-09-21 16:52:45 +010047 iovad->dma_32bit_pfn = 1UL << (32 - iova_shift(iovad));
Ganapatrao Kulkarnibee60e92018-09-05 09:57:36 +053048 iovad->max32_alloc_size = iovad->dma_32bit_pfn;
Joerg Roedel42f87e72017-08-10 14:44:28 +020049 iovad->flush_cb = NULL;
50 iovad->fq = NULL;
Robin Murphybb68b2f2017-09-21 16:52:46 +010051 iovad->anchor.pfn_lo = iovad->anchor.pfn_hi = IOVA_ANCHOR;
52 rb_link_node(&iovad->anchor.node, NULL, &iovad->rbroot.rb_node);
53 rb_insert_color(&iovad->anchor.node, &iovad->rbroot);
Omer Peleg9257b4a2016-04-20 11:34:11 +030054 init_iova_rcaches(iovad);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -070055}
Sakari Ailus9b417602015-07-13 14:31:29 +030056EXPORT_SYMBOL_GPL(init_iova_domain);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -070057
John Garry9cc0aaeb2021-01-06 21:35:06 +080058static bool has_iova_flush_queue(struct iova_domain *iovad)
Dmitry Safonoveffa4672019-07-16 22:38:05 +010059{
60 return !!iovad->fq;
61}
62
Joerg Roedel42f87e72017-08-10 14:44:28 +020063static void free_iova_flush_queue(struct iova_domain *iovad)
64{
Dmitry Safonoveffa4672019-07-16 22:38:05 +010065 if (!has_iova_flush_queue(iovad))
Joerg Roedel42f87e72017-08-10 14:44:28 +020066 return;
67
Joerg Roedel9a005a82017-08-10 16:58:18 +020068 if (timer_pending(&iovad->fq_timer))
69 del_timer(&iovad->fq_timer);
70
Joerg Roedel19282102017-08-10 15:49:44 +020071 fq_destroy_all_entries(iovad);
Joerg Roedel9a005a82017-08-10 16:58:18 +020072
Joerg Roedel42f87e72017-08-10 14:44:28 +020073 free_percpu(iovad->fq);
74
75 iovad->fq = NULL;
76 iovad->flush_cb = NULL;
77 iovad->entry_dtor = NULL;
78}
79
80int init_iova_flush_queue(struct iova_domain *iovad,
81 iova_flush_cb flush_cb, iova_entry_dtor entry_dtor)
82{
Dmitry Safonoveffa4672019-07-16 22:38:05 +010083 struct iova_fq __percpu *queue;
Joerg Roedel42f87e72017-08-10 14:44:28 +020084 int cpu;
85
Joerg Roedelfb418da2017-08-10 16:14:59 +020086 atomic64_set(&iovad->fq_flush_start_cnt, 0);
87 atomic64_set(&iovad->fq_flush_finish_cnt, 0);
88
Dmitry Safonoveffa4672019-07-16 22:38:05 +010089 queue = alloc_percpu(struct iova_fq);
90 if (!queue)
Joerg Roedel42f87e72017-08-10 14:44:28 +020091 return -ENOMEM;
92
93 iovad->flush_cb = flush_cb;
94 iovad->entry_dtor = entry_dtor;
95
96 for_each_possible_cpu(cpu) {
97 struct iova_fq *fq;
98
Dmitry Safonoveffa4672019-07-16 22:38:05 +010099 fq = per_cpu_ptr(queue, cpu);
Joerg Roedel42f87e72017-08-10 14:44:28 +0200100 fq->head = 0;
101 fq->tail = 0;
Joerg Roedel8109c2a2017-08-10 16:31:17 +0200102
103 spin_lock_init(&fq->lock);
Joerg Roedel42f87e72017-08-10 14:44:28 +0200104 }
105
Dmitry Safonoveffa4672019-07-16 22:38:05 +0100106 smp_wmb();
107
108 iovad->fq = queue;
109
Kees Cooke99e88a2017-10-16 14:43:17 -0700110 timer_setup(&iovad->fq_timer, fq_flush_timeout, 0);
Joerg Roedel9a005a82017-08-10 16:58:18 +0200111 atomic_set(&iovad->fq_timer_on, 0);
112
Joerg Roedel42f87e72017-08-10 14:44:28 +0200113 return 0;
114}
Joerg Roedel42f87e72017-08-10 14:44:28 +0200115
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700116static struct rb_node *
Robin Murphy973f5fb2017-09-21 16:52:47 +0100117__get_cached_rbnode(struct iova_domain *iovad, unsigned long limit_pfn)
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700118{
Robin Murphy973f5fb2017-09-21 16:52:47 +0100119 if (limit_pfn <= iovad->dma_32bit_pfn)
120 return iovad->cached32_node;
Robin Murphye60aa7b2017-09-21 16:52:44 +0100121
Robin Murphy973f5fb2017-09-21 16:52:47 +0100122 return iovad->cached_node;
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700123}
124
125static void
Robin Murphye60aa7b2017-09-21 16:52:44 +0100126__cached_rbnode_insert_update(struct iova_domain *iovad, struct iova *new)
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700127{
Robin Murphye60aa7b2017-09-21 16:52:44 +0100128 if (new->pfn_hi < iovad->dma_32bit_pfn)
129 iovad->cached32_node = &new->node;
130 else
131 iovad->cached_node = &new->node;
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700132}
133
134static void
135__cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
136{
137 struct iova *cached_iova;
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700138
Robin Murphye60aa7b2017-09-21 16:52:44 +0100139 cached_iova = rb_entry(iovad->cached32_node, struct iova, node);
Chris Wilson9eed17d2019-07-20 19:08:48 +0100140 if (free == cached_iova ||
141 (free->pfn_hi < iovad->dma_32bit_pfn &&
142 free->pfn_lo >= cached_iova->pfn_lo)) {
Robin Murphye60aa7b2017-09-21 16:52:44 +0100143 iovad->cached32_node = rb_next(&free->node);
Ganapatrao Kulkarnibee60e92018-09-05 09:57:36 +0530144 iovad->max32_alloc_size = iovad->dma_32bit_pfn;
145 }
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700146
Robin Murphye60aa7b2017-09-21 16:52:44 +0100147 cached_iova = rb_entry(iovad->cached_node, struct iova, node);
Robin Murphy973f5fb2017-09-21 16:52:47 +0100148 if (free->pfn_lo >= cached_iova->pfn_lo)
Robin Murphye60aa7b2017-09-21 16:52:44 +0100149 iovad->cached_node = rb_next(&free->node);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700150}
151
Marek Szyprowskid7517512017-02-24 12:13:37 +0100152/* Insert the iova into domain rbtree by holding writer lock */
153static void
154iova_insert_rbtree(struct rb_root *root, struct iova *iova,
155 struct rb_node *start)
156{
157 struct rb_node **new, *parent = NULL;
158
159 new = (start) ? &start : &(root->rb_node);
160 /* Figure out where to put new node */
161 while (*new) {
162 struct iova *this = rb_entry(*new, struct iova, node);
163
164 parent = *new;
165
166 if (iova->pfn_lo < this->pfn_lo)
167 new = &((*new)->rb_left);
168 else if (iova->pfn_lo > this->pfn_lo)
169 new = &((*new)->rb_right);
170 else {
171 WARN_ON(1); /* this should not happen */
172 return;
173 }
174 }
175 /* Add new node and rebalance tree. */
176 rb_link_node(&iova->node, parent, new);
177 rb_insert_color(&iova->node, root);
178}
179
mark grossddf02882008-03-04 15:22:04 -0800180static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
181 unsigned long size, unsigned long limit_pfn,
182 struct iova *new, bool size_aligned)
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700183{
Robin Murphy973f5fb2017-09-21 16:52:47 +0100184 struct rb_node *curr, *prev;
185 struct iova *curr_iova;
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700186 unsigned long flags;
Vijayanand Jitta4e89dce2020-09-30 13:14:23 +0530187 unsigned long new_pfn, retry_pfn;
Zhen Lei086c83a2017-09-21 16:52:43 +0100188 unsigned long align_mask = ~0UL;
Vijayanand Jitta4e89dce2020-09-30 13:14:23 +0530189 unsigned long high_pfn = limit_pfn, low_pfn = iovad->start_pfn;
Zhen Lei086c83a2017-09-21 16:52:43 +0100190
191 if (size_aligned)
192 align_mask <<= fls_long(size - 1);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700193
194 /* Walk the tree backwards */
195 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
Ganapatrao Kulkarnibee60e92018-09-05 09:57:36 +0530196 if (limit_pfn <= iovad->dma_32bit_pfn &&
197 size >= iovad->max32_alloc_size)
198 goto iova32_full;
199
Robin Murphy973f5fb2017-09-21 16:52:47 +0100200 curr = __get_cached_rbnode(iovad, limit_pfn);
201 curr_iova = rb_entry(curr, struct iova, node);
Vijayanand Jitta4e89dce2020-09-30 13:14:23 +0530202 retry_pfn = curr_iova->pfn_hi + 1;
203
204retry:
Robin Murphy973f5fb2017-09-21 16:52:47 +0100205 do {
Vijayanand Jitta4e89dce2020-09-30 13:14:23 +0530206 high_pfn = min(high_pfn, curr_iova->pfn_lo);
207 new_pfn = (high_pfn - size) & align_mask;
mark grossddf02882008-03-04 15:22:04 -0800208 prev = curr;
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700209 curr = rb_prev(curr);
Robin Murphy973f5fb2017-09-21 16:52:47 +0100210 curr_iova = rb_entry(curr, struct iova, node);
Vijayanand Jitta4e89dce2020-09-30 13:14:23 +0530211 } while (curr && new_pfn <= curr_iova->pfn_hi && new_pfn >= low_pfn);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700212
Vijayanand Jitta4e89dce2020-09-30 13:14:23 +0530213 if (high_pfn < size || new_pfn < low_pfn) {
214 if (low_pfn == iovad->start_pfn && retry_pfn < limit_pfn) {
215 high_pfn = limit_pfn;
216 low_pfn = retry_pfn;
217 curr = &iovad->anchor.node;
218 curr_iova = rb_entry(curr, struct iova, node);
219 goto retry;
220 }
Robert Richter80ef4462019-03-20 18:57:23 +0000221 iovad->max32_alloc_size = size;
Ganapatrao Kulkarnibee60e92018-09-05 09:57:36 +0530222 goto iova32_full;
Robert Richter80ef4462019-03-20 18:57:23 +0000223 }
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -0700224
225 /* pfn_lo will point to size aligned address if size_aligned is set */
Zhen Lei086c83a2017-09-21 16:52:43 +0100226 new->pfn_lo = new_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -0700227 new->pfn_hi = new->pfn_lo + size - 1;
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700228
Marek Szyprowskid7517512017-02-24 12:13:37 +0100229 /* If we have 'prev', it's a valid place to start the insertion. */
230 iova_insert_rbtree(&iovad->rbroot, new, prev);
Robin Murphye60aa7b2017-09-21 16:52:44 +0100231 __cached_rbnode_insert_update(iovad, new);
mark grossddf02882008-03-04 15:22:04 -0800232
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700233 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
234 return 0;
Ganapatrao Kulkarnibee60e92018-09-05 09:57:36 +0530235
236iova32_full:
Ganapatrao Kulkarnibee60e92018-09-05 09:57:36 +0530237 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
238 return -ENOMEM;
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700239}
240
Sakari Ailusae1ff3d2015-07-13 14:31:28 +0300241static struct kmem_cache *iova_cache;
242static unsigned int iova_cache_users;
243static DEFINE_MUTEX(iova_cache_mutex);
244
John Garry51b70b82020-12-04 02:34:51 +0800245static struct iova *alloc_iova_mem(void)
Sakari Ailusae1ff3d2015-07-13 14:31:28 +0300246{
Qian Cai944c9172019-11-22 14:16:54 -0500247 return kmem_cache_zalloc(iova_cache, GFP_ATOMIC | __GFP_NOWARN);
Sakari Ailusae1ff3d2015-07-13 14:31:28 +0300248}
Sakari Ailusae1ff3d2015-07-13 14:31:28 +0300249
John Garry176cfc12020-12-04 02:34:52 +0800250static void free_iova_mem(struct iova *iova)
Sakari Ailusae1ff3d2015-07-13 14:31:28 +0300251{
Robin Murphybb68b2f2017-09-21 16:52:46 +0100252 if (iova->pfn_lo != IOVA_ANCHOR)
253 kmem_cache_free(iova_cache, iova);
Sakari Ailusae1ff3d2015-07-13 14:31:28 +0300254}
Sakari Ailusae1ff3d2015-07-13 14:31:28 +0300255
256int iova_cache_get(void)
257{
258 mutex_lock(&iova_cache_mutex);
259 if (!iova_cache_users) {
260 iova_cache = kmem_cache_create(
261 "iommu_iova", sizeof(struct iova), 0,
262 SLAB_HWCACHE_ALIGN, NULL);
263 if (!iova_cache) {
264 mutex_unlock(&iova_cache_mutex);
Andy Shevchenko3a0ce122020-05-07 19:18:03 +0300265 pr_err("Couldn't create iova cache\n");
Sakari Ailusae1ff3d2015-07-13 14:31:28 +0300266 return -ENOMEM;
267 }
268 }
269
270 iova_cache_users++;
271 mutex_unlock(&iova_cache_mutex);
272
273 return 0;
274}
Sakari Ailus9b417602015-07-13 14:31:29 +0300275EXPORT_SYMBOL_GPL(iova_cache_get);
Sakari Ailusae1ff3d2015-07-13 14:31:28 +0300276
277void iova_cache_put(void)
278{
279 mutex_lock(&iova_cache_mutex);
280 if (WARN_ON(!iova_cache_users)) {
281 mutex_unlock(&iova_cache_mutex);
282 return;
283 }
284 iova_cache_users--;
285 if (!iova_cache_users)
286 kmem_cache_destroy(iova_cache);
287 mutex_unlock(&iova_cache_mutex);
288}
Sakari Ailus9b417602015-07-13 14:31:29 +0300289EXPORT_SYMBOL_GPL(iova_cache_put);
Sakari Ailusae1ff3d2015-07-13 14:31:28 +0300290
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700291/**
292 * alloc_iova - allocates an iova
Masanari Iida07db0402012-07-22 02:21:32 +0900293 * @iovad: - iova domain in question
294 * @size: - size of page frames to allocate
295 * @limit_pfn: - max limit address
296 * @size_aligned: - set if size_aligned address range is required
Robin Murphy1b722502015-01-12 17:51:15 +0000297 * This function allocates an iova in the range iovad->start_pfn to limit_pfn,
298 * searching top-down from limit_pfn to iovad->start_pfn. If the size_aligned
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -0700299 * flag is set then the allocated address iova->pfn_lo will be naturally
300 * aligned on roundup_power_of_two(size).
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700301 */
302struct iova *
303alloc_iova(struct iova_domain *iovad, unsigned long size,
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -0700304 unsigned long limit_pfn,
305 bool size_aligned)
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700306{
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700307 struct iova *new_iova;
308 int ret;
309
310 new_iova = alloc_iova_mem();
311 if (!new_iova)
312 return NULL;
313
Robin Murphy757c3702017-05-16 12:26:48 +0100314 ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn + 1,
mark grossddf02882008-03-04 15:22:04 -0800315 new_iova, size_aligned);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700316
317 if (ret) {
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700318 free_iova_mem(new_iova);
319 return NULL;
320 }
321
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700322 return new_iova;
323}
Sakari Ailus9b417602015-07-13 14:31:29 +0300324EXPORT_SYMBOL_GPL(alloc_iova);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700325
Omer Peleg9257b4a2016-04-20 11:34:11 +0300326static struct iova *
327private_find_iova(struct iova_domain *iovad, unsigned long pfn)
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700328{
Omer Peleg9257b4a2016-04-20 11:34:11 +0300329 struct rb_node *node = iovad->rbroot.rb_node;
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700330
Omer Peleg9257b4a2016-04-20 11:34:11 +0300331 assert_spin_locked(&iovad->iova_rbtree_lock);
332
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700333 while (node) {
Geliang Tangeba484b2016-12-19 22:46:58 +0800334 struct iova *iova = rb_entry(node, struct iova, node);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700335
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700336 if (pfn < iova->pfn_lo)
337 node = node->rb_left;
Zhen Lei2070f942017-09-21 16:52:42 +0100338 else if (pfn > iova->pfn_hi)
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700339 node = node->rb_right;
Zhen Lei2070f942017-09-21 16:52:42 +0100340 else
341 return iova; /* pfn falls within iova's range */
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700342 }
343
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700344 return NULL;
345}
Omer Peleg9257b4a2016-04-20 11:34:11 +0300346
347static void private_free_iova(struct iova_domain *iovad, struct iova *iova)
348{
349 assert_spin_locked(&iovad->iova_rbtree_lock);
350 __cached_rbnode_delete_update(iovad, iova);
351 rb_erase(&iova->node, &iovad->rbroot);
352 free_iova_mem(iova);
353}
354
355/**
356 * find_iova - finds an iova for a given pfn
357 * @iovad: - iova domain in question.
358 * @pfn: - page frame number
359 * This function finds and returns an iova belonging to the
Stefano Garzarella6775ae92020-12-22 17:42:32 +0100360 * given domain which matches the given pfn.
Omer Peleg9257b4a2016-04-20 11:34:11 +0300361 */
362struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
363{
364 unsigned long flags;
365 struct iova *iova;
366
367 /* Take the lock so that no other thread is manipulating the rbtree */
368 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
369 iova = private_find_iova(iovad, pfn);
370 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
371 return iova;
372}
Sakari Ailus9b417602015-07-13 14:31:29 +0300373EXPORT_SYMBOL_GPL(find_iova);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700374
375/**
376 * __free_iova - frees the given iova
377 * @iovad: iova domain in question.
378 * @iova: iova in question.
379 * Frees the given iova belonging to the giving domain
380 */
381void
382__free_iova(struct iova_domain *iovad, struct iova *iova)
383{
384 unsigned long flags;
385
386 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
Omer Peleg9257b4a2016-04-20 11:34:11 +0300387 private_free_iova(iovad, iova);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700388 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700389}
Sakari Ailus9b417602015-07-13 14:31:29 +0300390EXPORT_SYMBOL_GPL(__free_iova);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700391
392/**
393 * free_iova - finds and frees the iova for a given pfn
394 * @iovad: - iova domain in question.
395 * @pfn: - pfn that is allocated previously
396 * This functions finds an iova for a given pfn and then
397 * frees the iova from that domain.
398 */
399void
400free_iova(struct iova_domain *iovad, unsigned long pfn)
401{
Cong Wang3a651b32020-11-17 18:25:34 +0800402 unsigned long flags;
403 struct iova *iova;
Robert Callicotte733cac22015-04-16 23:32:47 -0500404
Cong Wang3a651b32020-11-17 18:25:34 +0800405 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
406 iova = private_find_iova(iovad, pfn);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700407 if (iova)
Cong Wang3a651b32020-11-17 18:25:34 +0800408 private_free_iova(iovad, iova);
409 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700410
411}
Sakari Ailus9b417602015-07-13 14:31:29 +0300412EXPORT_SYMBOL_GPL(free_iova);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700413
414/**
Omer Peleg9257b4a2016-04-20 11:34:11 +0300415 * alloc_iova_fast - allocates an iova from rcache
416 * @iovad: - iova domain in question
417 * @size: - size of page frames to allocate
418 * @limit_pfn: - max limit address
Tomasz Nowicki538d5b32017-09-20 10:52:02 +0200419 * @flush_rcache: - set to flush rcache on regular allocation failure
Omer Peleg9257b4a2016-04-20 11:34:11 +0300420 * This function tries to satisfy an iova allocation from the rcache,
Tomasz Nowicki538d5b32017-09-20 10:52:02 +0200421 * and falls back to regular allocation on failure. If regular allocation
422 * fails too and the flush_rcache flag is set then the rcache will be flushed.
Omer Peleg9257b4a2016-04-20 11:34:11 +0300423*/
424unsigned long
425alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
Tomasz Nowicki538d5b32017-09-20 10:52:02 +0200426 unsigned long limit_pfn, bool flush_rcache)
Omer Peleg9257b4a2016-04-20 11:34:11 +0300427{
Omer Peleg9257b4a2016-04-20 11:34:11 +0300428 unsigned long iova_pfn;
429 struct iova *new_iova;
430
Robin Murphyb826ee92017-09-19 14:48:40 +0100431 iova_pfn = iova_rcache_get(iovad, size, limit_pfn + 1);
Omer Peleg9257b4a2016-04-20 11:34:11 +0300432 if (iova_pfn)
433 return iova_pfn;
434
435retry:
436 new_iova = alloc_iova(iovad, size, limit_pfn, true);
437 if (!new_iova) {
438 unsigned int cpu;
439
Tomasz Nowicki538d5b32017-09-20 10:52:02 +0200440 if (!flush_rcache)
Omer Peleg9257b4a2016-04-20 11:34:11 +0300441 return 0;
442
443 /* Try replenishing IOVAs by flushing rcache. */
Tomasz Nowicki538d5b32017-09-20 10:52:02 +0200444 flush_rcache = false;
Omer Peleg9257b4a2016-04-20 11:34:11 +0300445 for_each_online_cpu(cpu)
446 free_cpu_cached_iovas(cpu, iovad);
Vijayanand Jitta6fa35252020-09-30 13:14:24 +0530447 free_global_cached_iovas(iovad);
Omer Peleg9257b4a2016-04-20 11:34:11 +0300448 goto retry;
449 }
450
451 return new_iova->pfn_lo;
452}
Omer Peleg9257b4a2016-04-20 11:34:11 +0300453
454/**
455 * free_iova_fast - free iova pfn range into rcache
456 * @iovad: - iova domain in question.
457 * @pfn: - pfn that is allocated previously
458 * @size: - # of pages in range
459 * This functions frees an iova range by trying to put it into the rcache,
460 * falling back to regular iova deallocation via free_iova() if this fails.
461 */
462void
463free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size)
464{
465 if (iova_rcache_insert(iovad, pfn, size))
466 return;
467
468 free_iova(iovad, pfn);
469}
470EXPORT_SYMBOL_GPL(free_iova_fast);
471
Joerg Roedel19282102017-08-10 15:49:44 +0200472#define fq_ring_for_each(i, fq) \
473 for ((i) = (fq)->head; (i) != (fq)->tail; (i) = ((i) + 1) % IOVA_FQ_SIZE)
474
475static inline bool fq_full(struct iova_fq *fq)
476{
Joerg Roedel8109c2a2017-08-10 16:31:17 +0200477 assert_spin_locked(&fq->lock);
Joerg Roedel19282102017-08-10 15:49:44 +0200478 return (((fq->tail + 1) % IOVA_FQ_SIZE) == fq->head);
479}
480
481static inline unsigned fq_ring_add(struct iova_fq *fq)
482{
483 unsigned idx = fq->tail;
484
Joerg Roedel8109c2a2017-08-10 16:31:17 +0200485 assert_spin_locked(&fq->lock);
486
Joerg Roedel19282102017-08-10 15:49:44 +0200487 fq->tail = (idx + 1) % IOVA_FQ_SIZE;
488
489 return idx;
490}
491
492static void fq_ring_free(struct iova_domain *iovad, struct iova_fq *fq)
493{
Joerg Roedelfb418da2017-08-10 16:14:59 +0200494 u64 counter = atomic64_read(&iovad->fq_flush_finish_cnt);
Joerg Roedel19282102017-08-10 15:49:44 +0200495 unsigned idx;
496
Joerg Roedel8109c2a2017-08-10 16:31:17 +0200497 assert_spin_locked(&fq->lock);
498
Joerg Roedel19282102017-08-10 15:49:44 +0200499 fq_ring_for_each(idx, fq) {
500
Joerg Roedelfb418da2017-08-10 16:14:59 +0200501 if (fq->entries[idx].counter >= counter)
502 break;
503
Joerg Roedel19282102017-08-10 15:49:44 +0200504 if (iovad->entry_dtor)
505 iovad->entry_dtor(fq->entries[idx].data);
506
507 free_iova_fast(iovad,
508 fq->entries[idx].iova_pfn,
509 fq->entries[idx].pages);
Joerg Roedel19282102017-08-10 15:49:44 +0200510
Joerg Roedelfb418da2017-08-10 16:14:59 +0200511 fq->head = (fq->head + 1) % IOVA_FQ_SIZE;
512 }
513}
514
515static void iova_domain_flush(struct iova_domain *iovad)
516{
517 atomic64_inc(&iovad->fq_flush_start_cnt);
518 iovad->flush_cb(iovad);
519 atomic64_inc(&iovad->fq_flush_finish_cnt);
Joerg Roedel19282102017-08-10 15:49:44 +0200520}
521
522static void fq_destroy_all_entries(struct iova_domain *iovad)
523{
524 int cpu;
525
526 /*
527 * This code runs when the iova_domain is being detroyed, so don't
528 * bother to free iovas, just call the entry_dtor on all remaining
529 * entries.
530 */
531 if (!iovad->entry_dtor)
532 return;
533
534 for_each_possible_cpu(cpu) {
535 struct iova_fq *fq = per_cpu_ptr(iovad->fq, cpu);
536 int idx;
537
538 fq_ring_for_each(idx, fq)
539 iovad->entry_dtor(fq->entries[idx].data);
540 }
541}
542
Kees Cooke99e88a2017-10-16 14:43:17 -0700543static void fq_flush_timeout(struct timer_list *t)
Joerg Roedel9a005a82017-08-10 16:58:18 +0200544{
Kees Cooke99e88a2017-10-16 14:43:17 -0700545 struct iova_domain *iovad = from_timer(iovad, t, fq_timer);
Joerg Roedel9a005a82017-08-10 16:58:18 +0200546 int cpu;
547
548 atomic_set(&iovad->fq_timer_on, 0);
549 iova_domain_flush(iovad);
550
551 for_each_possible_cpu(cpu) {
552 unsigned long flags;
553 struct iova_fq *fq;
554
555 fq = per_cpu_ptr(iovad->fq, cpu);
556 spin_lock_irqsave(&fq->lock, flags);
557 fq_ring_free(iovad, fq);
558 spin_unlock_irqrestore(&fq->lock, flags);
559 }
560}
561
Joerg Roedel19282102017-08-10 15:49:44 +0200562void queue_iova(struct iova_domain *iovad,
563 unsigned long pfn, unsigned long pages,
564 unsigned long data)
565{
Sebastian Andrzej Siewior94e2cc42017-09-21 17:21:40 +0200566 struct iova_fq *fq = raw_cpu_ptr(iovad->fq);
Joerg Roedel8109c2a2017-08-10 16:31:17 +0200567 unsigned long flags;
Joerg Roedel19282102017-08-10 15:49:44 +0200568 unsigned idx;
569
Joerg Roedel8109c2a2017-08-10 16:31:17 +0200570 spin_lock_irqsave(&fq->lock, flags);
571
Joerg Roedelfb418da2017-08-10 16:14:59 +0200572 /*
573 * First remove all entries from the flush queue that have already been
574 * flushed out on another CPU. This makes the fq_full() check below less
575 * likely to be true.
576 */
577 fq_ring_free(iovad, fq);
578
Joerg Roedel19282102017-08-10 15:49:44 +0200579 if (fq_full(fq)) {
Joerg Roedelfb418da2017-08-10 16:14:59 +0200580 iova_domain_flush(iovad);
Joerg Roedel19282102017-08-10 15:49:44 +0200581 fq_ring_free(iovad, fq);
582 }
583
584 idx = fq_ring_add(fq);
585
586 fq->entries[idx].iova_pfn = pfn;
587 fq->entries[idx].pages = pages;
588 fq->entries[idx].data = data;
Joerg Roedelfb418da2017-08-10 16:14:59 +0200589 fq->entries[idx].counter = atomic64_read(&iovad->fq_flush_start_cnt);
Joerg Roedel19282102017-08-10 15:49:44 +0200590
Joerg Roedel8109c2a2017-08-10 16:31:17 +0200591 spin_unlock_irqrestore(&fq->lock, flags);
Joerg Roedel9a005a82017-08-10 16:58:18 +0200592
Eric Dumazet0d873082019-08-28 06:13:38 -0700593 /* Avoid false sharing as much as possible. */
594 if (!atomic_read(&iovad->fq_timer_on) &&
Yuqi Jinba328f82020-08-27 16:43:54 +0800595 !atomic_xchg(&iovad->fq_timer_on, 1))
Joerg Roedel9a005a82017-08-10 16:58:18 +0200596 mod_timer(&iovad->fq_timer,
597 jiffies + msecs_to_jiffies(IOVA_FQ_TIMEOUT));
Joerg Roedel19282102017-08-10 15:49:44 +0200598}
Joerg Roedel19282102017-08-10 15:49:44 +0200599
Omer Peleg9257b4a2016-04-20 11:34:11 +0300600/**
Stefano Garzarella6775ae92020-12-22 17:42:32 +0100601 * put_iova_domain - destroys the iova domain
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700602 * @iovad: - iova domain in question.
603 * All the iova's in that domain are destroyed.
604 */
605void put_iova_domain(struct iova_domain *iovad)
606{
Robin Murphy7595dc52017-09-19 14:48:39 +0100607 struct iova *iova, *tmp;
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700608
Joerg Roedel42f87e72017-08-10 14:44:28 +0200609 free_iova_flush_queue(iovad);
Omer Peleg9257b4a2016-04-20 11:34:11 +0300610 free_iova_rcaches(iovad);
Robin Murphy7595dc52017-09-19 14:48:39 +0100611 rbtree_postorder_for_each_entry_safe(iova, tmp, &iovad->rbroot, node)
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700612 free_iova_mem(iova);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700613}
Sakari Ailus9b417602015-07-13 14:31:29 +0300614EXPORT_SYMBOL_GPL(put_iova_domain);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700615
616static int
617__is_range_overlap(struct rb_node *node,
618 unsigned long pfn_lo, unsigned long pfn_hi)
619{
Geliang Tangeba484b2016-12-19 22:46:58 +0800620 struct iova *iova = rb_entry(node, struct iova, node);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700621
622 if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo))
623 return 1;
624 return 0;
625}
626
Jiang Liu75f05562014-02-19 14:07:37 +0800627static inline struct iova *
628alloc_and_init_iova(unsigned long pfn_lo, unsigned long pfn_hi)
629{
630 struct iova *iova;
631
632 iova = alloc_iova_mem();
633 if (iova) {
634 iova->pfn_lo = pfn_lo;
635 iova->pfn_hi = pfn_hi;
636 }
637
638 return iova;
639}
640
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700641static struct iova *
642__insert_new_range(struct iova_domain *iovad,
643 unsigned long pfn_lo, unsigned long pfn_hi)
644{
645 struct iova *iova;
646
Jiang Liu75f05562014-02-19 14:07:37 +0800647 iova = alloc_and_init_iova(pfn_lo, pfn_hi);
648 if (iova)
Marek Szyprowskid7517512017-02-24 12:13:37 +0100649 iova_insert_rbtree(&iovad->rbroot, iova, NULL);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700650
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700651 return iova;
652}
653
654static void
655__adjust_overlap_range(struct iova *iova,
656 unsigned long *pfn_lo, unsigned long *pfn_hi)
657{
658 if (*pfn_lo < iova->pfn_lo)
659 iova->pfn_lo = *pfn_lo;
660 if (*pfn_hi > iova->pfn_hi)
661 *pfn_lo = iova->pfn_hi + 1;
662}
663
664/**
665 * reserve_iova - reserves an iova in the given range
666 * @iovad: - iova domain pointer
667 * @pfn_lo: - lower page frame address
668 * @pfn_hi:- higher pfn adderss
669 * This function allocates reserves the address range from pfn_lo to pfn_hi so
670 * that this address is not dished out as part of alloc_iova.
671 */
672struct iova *
673reserve_iova(struct iova_domain *iovad,
674 unsigned long pfn_lo, unsigned long pfn_hi)
675{
676 struct rb_node *node;
677 unsigned long flags;
678 struct iova *iova;
679 unsigned int overlap = 0;
680
Robin Murphybb68b2f2017-09-21 16:52:46 +0100681 /* Don't allow nonsensical pfns */
682 if (WARN_ON((pfn_hi | pfn_lo) > (ULLONG_MAX >> iova_shift(iovad))))
683 return NULL;
684
David Woodhouse3d39cec2009-07-08 15:23:30 +0100685 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700686 for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) {
687 if (__is_range_overlap(node, pfn_lo, pfn_hi)) {
Geliang Tangeba484b2016-12-19 22:46:58 +0800688 iova = rb_entry(node, struct iova, node);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700689 __adjust_overlap_range(iova, &pfn_lo, &pfn_hi);
690 if ((pfn_lo >= iova->pfn_lo) &&
691 (pfn_hi <= iova->pfn_hi))
692 goto finish;
693 overlap = 1;
694
695 } else if (overlap)
696 break;
697 }
698
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300699 /* We are here either because this is the first reserver node
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700700 * or need to insert remaining non overlap addr range
701 */
702 iova = __insert_new_range(iovad, pfn_lo, pfn_hi);
703finish:
704
David Woodhouse3d39cec2009-07-08 15:23:30 +0100705 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700706 return iova;
707}
Sakari Ailus9b417602015-07-13 14:31:29 +0300708EXPORT_SYMBOL_GPL(reserve_iova);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700709
Omer Peleg9257b4a2016-04-20 11:34:11 +0300710/*
711 * Magazine caches for IOVA ranges. For an introduction to magazines,
712 * see the USENIX 2001 paper "Magazines and Vmem: Extending the Slab
713 * Allocator to Many CPUs and Arbitrary Resources" by Bonwick and Adams.
714 * For simplicity, we use a static magazine size and don't implement the
715 * dynamic size tuning described in the paper.
716 */
717
718#define IOVA_MAG_SIZE 128
719
720struct iova_magazine {
721 unsigned long size;
722 unsigned long pfns[IOVA_MAG_SIZE];
723};
724
725struct iova_cpu_rcache {
726 spinlock_t lock;
727 struct iova_magazine *loaded;
728 struct iova_magazine *prev;
729};
730
731static struct iova_magazine *iova_magazine_alloc(gfp_t flags)
732{
733 return kzalloc(sizeof(struct iova_magazine), flags);
734}
735
736static void iova_magazine_free(struct iova_magazine *mag)
737{
738 kfree(mag);
739}
740
741static void
742iova_magazine_free_pfns(struct iova_magazine *mag, struct iova_domain *iovad)
743{
744 unsigned long flags;
745 int i;
746
747 if (!mag)
748 return;
749
750 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
751
752 for (i = 0 ; i < mag->size; ++i) {
753 struct iova *iova = private_find_iova(iovad, mag->pfns[i]);
754
Robin Murphyd3e3d2b2020-06-02 14:08:18 +0100755 if (WARN_ON(!iova))
756 continue;
757
Omer Peleg9257b4a2016-04-20 11:34:11 +0300758 private_free_iova(iovad, iova);
759 }
760
761 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
762
763 mag->size = 0;
764}
765
766static bool iova_magazine_full(struct iova_magazine *mag)
767{
768 return (mag && mag->size == IOVA_MAG_SIZE);
769}
770
771static bool iova_magazine_empty(struct iova_magazine *mag)
772{
773 return (!mag || mag->size == 0);
774}
775
776static unsigned long iova_magazine_pop(struct iova_magazine *mag,
777 unsigned long limit_pfn)
778{
Robin Murphye8b19842017-09-28 11:31:23 +0100779 int i;
780 unsigned long pfn;
781
Omer Peleg9257b4a2016-04-20 11:34:11 +0300782 BUG_ON(iova_magazine_empty(mag));
783
Robin Murphye8b19842017-09-28 11:31:23 +0100784 /* Only fall back to the rbtree if we have no suitable pfns at all */
785 for (i = mag->size - 1; mag->pfns[i] > limit_pfn; i--)
786 if (i == 0)
787 return 0;
Omer Peleg9257b4a2016-04-20 11:34:11 +0300788
Robin Murphye8b19842017-09-28 11:31:23 +0100789 /* Swap it to pop it */
790 pfn = mag->pfns[i];
791 mag->pfns[i] = mag->pfns[--mag->size];
792
793 return pfn;
Omer Peleg9257b4a2016-04-20 11:34:11 +0300794}
795
796static void iova_magazine_push(struct iova_magazine *mag, unsigned long pfn)
797{
798 BUG_ON(iova_magazine_full(mag));
799
800 mag->pfns[mag->size++] = pfn;
801}
802
803static void init_iova_rcaches(struct iova_domain *iovad)
804{
805 struct iova_cpu_rcache *cpu_rcache;
806 struct iova_rcache *rcache;
807 unsigned int cpu;
808 int i;
809
810 for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
811 rcache = &iovad->rcaches[i];
812 spin_lock_init(&rcache->lock);
813 rcache->depot_size = 0;
814 rcache->cpu_rcaches = __alloc_percpu(sizeof(*cpu_rcache), cache_line_size());
815 if (WARN_ON(!rcache->cpu_rcaches))
816 continue;
817 for_each_possible_cpu(cpu) {
818 cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
819 spin_lock_init(&cpu_rcache->lock);
820 cpu_rcache->loaded = iova_magazine_alloc(GFP_KERNEL);
821 cpu_rcache->prev = iova_magazine_alloc(GFP_KERNEL);
822 }
823 }
824}
825
826/*
827 * Try inserting IOVA range starting with 'iova_pfn' into 'rcache', and
828 * return true on success. Can fail if rcache is full and we can't free
829 * space, and free_iova() (our only caller) will then return the IOVA
830 * range to the rbtree instead.
831 */
832static bool __iova_rcache_insert(struct iova_domain *iovad,
833 struct iova_rcache *rcache,
834 unsigned long iova_pfn)
835{
836 struct iova_magazine *mag_to_free = NULL;
837 struct iova_cpu_rcache *cpu_rcache;
838 bool can_insert = false;
839 unsigned long flags;
840
Sebastian Andrzej Siewioraaffaa82017-06-27 18:16:47 +0200841 cpu_rcache = raw_cpu_ptr(rcache->cpu_rcaches);
Omer Peleg9257b4a2016-04-20 11:34:11 +0300842 spin_lock_irqsave(&cpu_rcache->lock, flags);
843
844 if (!iova_magazine_full(cpu_rcache->loaded)) {
845 can_insert = true;
846 } else if (!iova_magazine_full(cpu_rcache->prev)) {
847 swap(cpu_rcache->prev, cpu_rcache->loaded);
848 can_insert = true;
849 } else {
850 struct iova_magazine *new_mag = iova_magazine_alloc(GFP_ATOMIC);
851
852 if (new_mag) {
853 spin_lock(&rcache->lock);
854 if (rcache->depot_size < MAX_GLOBAL_MAGS) {
855 rcache->depot[rcache->depot_size++] =
856 cpu_rcache->loaded;
857 } else {
858 mag_to_free = cpu_rcache->loaded;
859 }
860 spin_unlock(&rcache->lock);
861
862 cpu_rcache->loaded = new_mag;
863 can_insert = true;
864 }
865 }
866
867 if (can_insert)
868 iova_magazine_push(cpu_rcache->loaded, iova_pfn);
869
870 spin_unlock_irqrestore(&cpu_rcache->lock, flags);
871
872 if (mag_to_free) {
873 iova_magazine_free_pfns(mag_to_free, iovad);
874 iova_magazine_free(mag_to_free);
875 }
876
877 return can_insert;
878}
879
880static bool iova_rcache_insert(struct iova_domain *iovad, unsigned long pfn,
881 unsigned long size)
882{
883 unsigned int log_size = order_base_2(size);
884
885 if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE)
886 return false;
887
888 return __iova_rcache_insert(iovad, &iovad->rcaches[log_size], pfn);
889}
890
891/*
892 * Caller wants to allocate a new IOVA range from 'rcache'. If we can
893 * satisfy the request, return a matching non-NULL range and remove
894 * it from the 'rcache'.
895 */
896static unsigned long __iova_rcache_get(struct iova_rcache *rcache,
897 unsigned long limit_pfn)
898{
899 struct iova_cpu_rcache *cpu_rcache;
900 unsigned long iova_pfn = 0;
901 bool has_pfn = false;
902 unsigned long flags;
903
Sebastian Andrzej Siewioraaffaa82017-06-27 18:16:47 +0200904 cpu_rcache = raw_cpu_ptr(rcache->cpu_rcaches);
Omer Peleg9257b4a2016-04-20 11:34:11 +0300905 spin_lock_irqsave(&cpu_rcache->lock, flags);
906
907 if (!iova_magazine_empty(cpu_rcache->loaded)) {
908 has_pfn = true;
909 } else if (!iova_magazine_empty(cpu_rcache->prev)) {
910 swap(cpu_rcache->prev, cpu_rcache->loaded);
911 has_pfn = true;
912 } else {
913 spin_lock(&rcache->lock);
914 if (rcache->depot_size > 0) {
915 iova_magazine_free(cpu_rcache->loaded);
916 cpu_rcache->loaded = rcache->depot[--rcache->depot_size];
917 has_pfn = true;
918 }
919 spin_unlock(&rcache->lock);
920 }
921
922 if (has_pfn)
923 iova_pfn = iova_magazine_pop(cpu_rcache->loaded, limit_pfn);
924
925 spin_unlock_irqrestore(&cpu_rcache->lock, flags);
926
927 return iova_pfn;
928}
929
930/*
931 * Try to satisfy IOVA allocation range from rcache. Fail if requested
932 * size is too big or the DMA limit we are given isn't satisfied by the
933 * top element in the magazine.
934 */
935static unsigned long iova_rcache_get(struct iova_domain *iovad,
936 unsigned long size,
937 unsigned long limit_pfn)
938{
939 unsigned int log_size = order_base_2(size);
940
941 if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE)
942 return 0;
943
Robin Murphyb826ee92017-09-19 14:48:40 +0100944 return __iova_rcache_get(&iovad->rcaches[log_size], limit_pfn - size);
Omer Peleg9257b4a2016-04-20 11:34:11 +0300945}
946
947/*
Omer Peleg9257b4a2016-04-20 11:34:11 +0300948 * free rcache data structures.
949 */
950static void free_iova_rcaches(struct iova_domain *iovad)
951{
952 struct iova_rcache *rcache;
Robin Murphy7595dc52017-09-19 14:48:39 +0100953 struct iova_cpu_rcache *cpu_rcache;
Omer Peleg9257b4a2016-04-20 11:34:11 +0300954 unsigned int cpu;
955 int i, j;
956
957 for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
958 rcache = &iovad->rcaches[i];
Robin Murphy7595dc52017-09-19 14:48:39 +0100959 for_each_possible_cpu(cpu) {
960 cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
961 iova_magazine_free(cpu_rcache->loaded);
962 iova_magazine_free(cpu_rcache->prev);
Omer Peleg9257b4a2016-04-20 11:34:11 +0300963 }
Robin Murphy7595dc52017-09-19 14:48:39 +0100964 free_percpu(rcache->cpu_rcaches);
965 for (j = 0; j < rcache->depot_size; ++j)
966 iova_magazine_free(rcache->depot[j]);
Omer Peleg9257b4a2016-04-20 11:34:11 +0300967 }
968}
969
970/*
971 * free all the IOVA ranges cached by a cpu (used when cpu is unplugged)
972 */
973void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad)
974{
975 struct iova_cpu_rcache *cpu_rcache;
976 struct iova_rcache *rcache;
977 unsigned long flags;
978 int i;
979
980 for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
981 rcache = &iovad->rcaches[i];
982 cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
983 spin_lock_irqsave(&cpu_rcache->lock, flags);
984 iova_magazine_free_pfns(cpu_rcache->loaded, iovad);
985 iova_magazine_free_pfns(cpu_rcache->prev, iovad);
986 spin_unlock_irqrestore(&cpu_rcache->lock, flags);
987 }
988}
989
Vijayanand Jitta6fa35252020-09-30 13:14:24 +0530990/*
991 * free all the IOVA ranges of global cache
992 */
993static void free_global_cached_iovas(struct iova_domain *iovad)
994{
995 struct iova_rcache *rcache;
996 unsigned long flags;
997 int i, j;
998
999 for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
1000 rcache = &iovad->rcaches[i];
1001 spin_lock_irqsave(&rcache->lock, flags);
1002 for (j = 0; j < rcache->depot_size; ++j) {
1003 iova_magazine_free_pfns(rcache->depot[j], iovad);
1004 iova_magazine_free(rcache->depot[j]);
1005 }
1006 rcache->depot_size = 0;
1007 spin_unlock_irqrestore(&rcache->lock, flags);
1008 }
1009}
Sakari Ailus15bbdec2015-07-13 14:31:30 +03001010MODULE_AUTHOR("Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>");
1011MODULE_LICENSE("GPL");