blob: 4600e97acb26457bd9a4382aae59c009b6d1f389 [file] [log] [blame]
Thomas Gleixner3b20eb22019-05-29 16:57:35 -07001// SPDX-License-Identifier: GPL-2.0-only
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -07002/*
David Woodhousea15a5192009-07-01 18:49:06 +01003 * Copyright © 2006-2009, Intel Corporation.
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -07004 *
mark gross98bcef52008-02-23 15:23:35 -08005 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -07006 */
7
Kay, Allen M38717942008-09-09 18:37:29 +03008#include <linux/iova.h>
Sakari Ailus15bbdec2015-07-13 14:31:30 +03009#include <linux/module.h>
Robin Murphy85b45452015-01-12 17:51:14 +000010#include <linux/slab.h>
Omer Peleg9257b4a2016-04-20 11:34:11 +030011#include <linux/smp.h>
12#include <linux/bitops.h>
Sebastian Andrzej Siewioraaffaa82017-06-27 18:16:47 +020013#include <linux/cpu.h>
Omer Peleg9257b4a2016-04-20 11:34:11 +030014
Robin Murphybb68b2f2017-09-21 16:52:46 +010015/* The anchor node sits above the top of the usable address space */
16#define IOVA_ANCHOR ~0UL
17
Omer Peleg9257b4a2016-04-20 11:34:11 +030018static bool iova_rcache_insert(struct iova_domain *iovad,
19 unsigned long pfn,
20 unsigned long size);
21static unsigned long iova_rcache_get(struct iova_domain *iovad,
22 unsigned long size,
23 unsigned long limit_pfn);
24static void init_iova_rcaches(struct iova_domain *iovad);
25static void free_iova_rcaches(struct iova_domain *iovad);
Joerg Roedel19282102017-08-10 15:49:44 +020026static void fq_destroy_all_entries(struct iova_domain *iovad);
Kees Cooke99e88a2017-10-16 14:43:17 -070027static void fq_flush_timeout(struct timer_list *t);
Robin Murphy85b45452015-01-12 17:51:14 +000028
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -070029void
Robin Murphy0fb5fe82015-01-12 17:51:16 +000030init_iova_domain(struct iova_domain *iovad, unsigned long granule,
Zhen Leiaa3ac942017-09-21 16:52:45 +010031 unsigned long start_pfn)
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -070032{
Robin Murphy0fb5fe82015-01-12 17:51:16 +000033 /*
34 * IOVA granularity will normally be equal to the smallest
35 * supported IOMMU page size; both *must* be capable of
36 * representing individual CPU pages exactly.
37 */
38 BUG_ON((granule > PAGE_SIZE) || !is_power_of_2(granule));
39
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -070040 spin_lock_init(&iovad->iova_rbtree_lock);
41 iovad->rbroot = RB_ROOT;
Robin Murphy973f5fb2017-09-21 16:52:47 +010042 iovad->cached_node = &iovad->anchor.node;
43 iovad->cached32_node = &iovad->anchor.node;
Robin Murphy0fb5fe82015-01-12 17:51:16 +000044 iovad->granule = granule;
Robin Murphy1b722502015-01-12 17:51:15 +000045 iovad->start_pfn = start_pfn;
Zhen Leiaa3ac942017-09-21 16:52:45 +010046 iovad->dma_32bit_pfn = 1UL << (32 - iova_shift(iovad));
Ganapatrao Kulkarnibee60e92018-09-05 09:57:36 +053047 iovad->max32_alloc_size = iovad->dma_32bit_pfn;
Joerg Roedel42f87e72017-08-10 14:44:28 +020048 iovad->flush_cb = NULL;
49 iovad->fq = NULL;
Robin Murphybb68b2f2017-09-21 16:52:46 +010050 iovad->anchor.pfn_lo = iovad->anchor.pfn_hi = IOVA_ANCHOR;
51 rb_link_node(&iovad->anchor.node, NULL, &iovad->rbroot.rb_node);
52 rb_insert_color(&iovad->anchor.node, &iovad->rbroot);
Omer Peleg9257b4a2016-04-20 11:34:11 +030053 init_iova_rcaches(iovad);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -070054}
Sakari Ailus9b417602015-07-13 14:31:29 +030055EXPORT_SYMBOL_GPL(init_iova_domain);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -070056
Dmitry Safonoveffa4672019-07-16 22:38:05 +010057bool has_iova_flush_queue(struct iova_domain *iovad)
58{
59 return !!iovad->fq;
60}
61
Joerg Roedel42f87e72017-08-10 14:44:28 +020062static void free_iova_flush_queue(struct iova_domain *iovad)
63{
Dmitry Safonoveffa4672019-07-16 22:38:05 +010064 if (!has_iova_flush_queue(iovad))
Joerg Roedel42f87e72017-08-10 14:44:28 +020065 return;
66
Xiongfeng Wang6e2a16952021-12-17 15:30:55 +000067 del_timer_sync(&iovad->fq_timer);
Joerg Roedel9a005a82017-08-10 16:58:18 +020068
Joerg Roedel19282102017-08-10 15:49:44 +020069 fq_destroy_all_entries(iovad);
Joerg Roedel9a005a82017-08-10 16:58:18 +020070
Joerg Roedel42f87e72017-08-10 14:44:28 +020071 free_percpu(iovad->fq);
72
73 iovad->fq = NULL;
74 iovad->flush_cb = NULL;
75 iovad->entry_dtor = NULL;
76}
77
78int init_iova_flush_queue(struct iova_domain *iovad,
79 iova_flush_cb flush_cb, iova_entry_dtor entry_dtor)
80{
Dmitry Safonoveffa4672019-07-16 22:38:05 +010081 struct iova_fq __percpu *queue;
Joerg Roedel42f87e72017-08-10 14:44:28 +020082 int cpu;
83
Joerg Roedelfb418da2017-08-10 16:14:59 +020084 atomic64_set(&iovad->fq_flush_start_cnt, 0);
85 atomic64_set(&iovad->fq_flush_finish_cnt, 0);
86
Dmitry Safonoveffa4672019-07-16 22:38:05 +010087 queue = alloc_percpu(struct iova_fq);
88 if (!queue)
Joerg Roedel42f87e72017-08-10 14:44:28 +020089 return -ENOMEM;
90
91 iovad->flush_cb = flush_cb;
92 iovad->entry_dtor = entry_dtor;
93
94 for_each_possible_cpu(cpu) {
95 struct iova_fq *fq;
96
Dmitry Safonoveffa4672019-07-16 22:38:05 +010097 fq = per_cpu_ptr(queue, cpu);
Joerg Roedel42f87e72017-08-10 14:44:28 +020098 fq->head = 0;
99 fq->tail = 0;
Joerg Roedel8109c2a2017-08-10 16:31:17 +0200100
101 spin_lock_init(&fq->lock);
Joerg Roedel42f87e72017-08-10 14:44:28 +0200102 }
103
Dmitry Safonoveffa4672019-07-16 22:38:05 +0100104 smp_wmb();
105
106 iovad->fq = queue;
107
Kees Cooke99e88a2017-10-16 14:43:17 -0700108 timer_setup(&iovad->fq_timer, fq_flush_timeout, 0);
Joerg Roedel9a005a82017-08-10 16:58:18 +0200109 atomic_set(&iovad->fq_timer_on, 0);
110
Joerg Roedel42f87e72017-08-10 14:44:28 +0200111 return 0;
112}
113EXPORT_SYMBOL_GPL(init_iova_flush_queue);
114
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700115static struct rb_node *
Robin Murphy973f5fb2017-09-21 16:52:47 +0100116__get_cached_rbnode(struct iova_domain *iovad, unsigned long limit_pfn)
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700117{
Robin Murphy973f5fb2017-09-21 16:52:47 +0100118 if (limit_pfn <= iovad->dma_32bit_pfn)
119 return iovad->cached32_node;
Robin Murphye60aa7b2017-09-21 16:52:44 +0100120
Robin Murphy973f5fb2017-09-21 16:52:47 +0100121 return iovad->cached_node;
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700122}
123
124static void
Robin Murphye60aa7b2017-09-21 16:52:44 +0100125__cached_rbnode_insert_update(struct iova_domain *iovad, struct iova *new)
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700126{
Robin Murphye60aa7b2017-09-21 16:52:44 +0100127 if (new->pfn_hi < iovad->dma_32bit_pfn)
128 iovad->cached32_node = &new->node;
129 else
130 iovad->cached_node = &new->node;
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700131}
132
133static void
134__cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
135{
136 struct iova *cached_iova;
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700137
Robin Murphye60aa7b2017-09-21 16:52:44 +0100138 cached_iova = rb_entry(iovad->cached32_node, struct iova, node);
Chris Wilson9eed17d2019-07-20 19:08:48 +0100139 if (free == cached_iova ||
140 (free->pfn_hi < iovad->dma_32bit_pfn &&
Robin Murphyfcd3c312022-03-03 14:40:08 +0000141 free->pfn_lo >= cached_iova->pfn_lo))
Robin Murphye60aa7b2017-09-21 16:52:44 +0100142 iovad->cached32_node = rb_next(&free->node);
Robin Murphyfcd3c312022-03-03 14:40:08 +0000143
144 if (free->pfn_lo < iovad->dma_32bit_pfn)
Ganapatrao Kulkarnibee60e92018-09-05 09:57:36 +0530145 iovad->max32_alloc_size = iovad->dma_32bit_pfn;
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700146
Robin Murphye60aa7b2017-09-21 16:52:44 +0100147 cached_iova = rb_entry(iovad->cached_node, struct iova, node);
Robin Murphy973f5fb2017-09-21 16:52:47 +0100148 if (free->pfn_lo >= cached_iova->pfn_lo)
Robin Murphye60aa7b2017-09-21 16:52:44 +0100149 iovad->cached_node = rb_next(&free->node);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700150}
151
Marek Szyprowskid7517512017-02-24 12:13:37 +0100152/* Insert the iova into domain rbtree by holding writer lock */
153static void
154iova_insert_rbtree(struct rb_root *root, struct iova *iova,
155 struct rb_node *start)
156{
157 struct rb_node **new, *parent = NULL;
158
159 new = (start) ? &start : &(root->rb_node);
160 /* Figure out where to put new node */
161 while (*new) {
162 struct iova *this = rb_entry(*new, struct iova, node);
163
164 parent = *new;
165
166 if (iova->pfn_lo < this->pfn_lo)
167 new = &((*new)->rb_left);
168 else if (iova->pfn_lo > this->pfn_lo)
169 new = &((*new)->rb_right);
170 else {
171 WARN_ON(1); /* this should not happen */
172 return;
173 }
174 }
175 /* Add new node and rebalance tree. */
176 rb_link_node(&iova->node, parent, new);
177 rb_insert_color(&iova->node, root);
178}
179
mark grossddf02882008-03-04 15:22:04 -0800180static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
181 unsigned long size, unsigned long limit_pfn,
182 struct iova *new, bool size_aligned)
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700183{
Robin Murphy973f5fb2017-09-21 16:52:47 +0100184 struct rb_node *curr, *prev;
185 struct iova *curr_iova;
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700186 unsigned long flags;
Robin Murphye60aa7b2017-09-21 16:52:44 +0100187 unsigned long new_pfn;
Zhen Lei086c83a2017-09-21 16:52:43 +0100188 unsigned long align_mask = ~0UL;
189
190 if (size_aligned)
191 align_mask <<= fls_long(size - 1);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700192
193 /* Walk the tree backwards */
194 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
Ganapatrao Kulkarnibee60e92018-09-05 09:57:36 +0530195 if (limit_pfn <= iovad->dma_32bit_pfn &&
196 size >= iovad->max32_alloc_size)
197 goto iova32_full;
198
Robin Murphy973f5fb2017-09-21 16:52:47 +0100199 curr = __get_cached_rbnode(iovad, limit_pfn);
200 curr_iova = rb_entry(curr, struct iova, node);
201 do {
202 limit_pfn = min(limit_pfn, curr_iova->pfn_lo);
203 new_pfn = (limit_pfn - size) & align_mask;
mark grossddf02882008-03-04 15:22:04 -0800204 prev = curr;
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700205 curr = rb_prev(curr);
Robin Murphy973f5fb2017-09-21 16:52:47 +0100206 curr_iova = rb_entry(curr, struct iova, node);
207 } while (curr && new_pfn <= curr_iova->pfn_hi);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700208
Robert Richter80ef4462019-03-20 18:57:23 +0000209 if (limit_pfn < size || new_pfn < iovad->start_pfn) {
210 iovad->max32_alloc_size = size;
Ganapatrao Kulkarnibee60e92018-09-05 09:57:36 +0530211 goto iova32_full;
Robert Richter80ef4462019-03-20 18:57:23 +0000212 }
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -0700213
214 /* pfn_lo will point to size aligned address if size_aligned is set */
Zhen Lei086c83a2017-09-21 16:52:43 +0100215 new->pfn_lo = new_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -0700216 new->pfn_hi = new->pfn_lo + size - 1;
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700217
Marek Szyprowskid7517512017-02-24 12:13:37 +0100218 /* If we have 'prev', it's a valid place to start the insertion. */
219 iova_insert_rbtree(&iovad->rbroot, new, prev);
Robin Murphye60aa7b2017-09-21 16:52:44 +0100220 __cached_rbnode_insert_update(iovad, new);
mark grossddf02882008-03-04 15:22:04 -0800221
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700222 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
223 return 0;
Ganapatrao Kulkarnibee60e92018-09-05 09:57:36 +0530224
225iova32_full:
Ganapatrao Kulkarnibee60e92018-09-05 09:57:36 +0530226 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
227 return -ENOMEM;
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700228}
229
Sakari Ailusae1ff3d2015-07-13 14:31:28 +0300230static struct kmem_cache *iova_cache;
231static unsigned int iova_cache_users;
232static DEFINE_MUTEX(iova_cache_mutex);
233
234struct iova *alloc_iova_mem(void)
235{
Qian Cai944c9172019-11-22 14:16:54 -0500236 return kmem_cache_zalloc(iova_cache, GFP_ATOMIC | __GFP_NOWARN);
Sakari Ailusae1ff3d2015-07-13 14:31:28 +0300237}
238EXPORT_SYMBOL(alloc_iova_mem);
239
240void free_iova_mem(struct iova *iova)
241{
Robin Murphybb68b2f2017-09-21 16:52:46 +0100242 if (iova->pfn_lo != IOVA_ANCHOR)
243 kmem_cache_free(iova_cache, iova);
Sakari Ailusae1ff3d2015-07-13 14:31:28 +0300244}
245EXPORT_SYMBOL(free_iova_mem);
246
247int iova_cache_get(void)
248{
249 mutex_lock(&iova_cache_mutex);
250 if (!iova_cache_users) {
251 iova_cache = kmem_cache_create(
252 "iommu_iova", sizeof(struct iova), 0,
253 SLAB_HWCACHE_ALIGN, NULL);
254 if (!iova_cache) {
255 mutex_unlock(&iova_cache_mutex);
Andy Shevchenko3a0ce122020-05-07 19:18:03 +0300256 pr_err("Couldn't create iova cache\n");
Sakari Ailusae1ff3d2015-07-13 14:31:28 +0300257 return -ENOMEM;
258 }
259 }
260
261 iova_cache_users++;
262 mutex_unlock(&iova_cache_mutex);
263
264 return 0;
265}
Sakari Ailus9b417602015-07-13 14:31:29 +0300266EXPORT_SYMBOL_GPL(iova_cache_get);
Sakari Ailusae1ff3d2015-07-13 14:31:28 +0300267
268void iova_cache_put(void)
269{
270 mutex_lock(&iova_cache_mutex);
271 if (WARN_ON(!iova_cache_users)) {
272 mutex_unlock(&iova_cache_mutex);
273 return;
274 }
275 iova_cache_users--;
276 if (!iova_cache_users)
277 kmem_cache_destroy(iova_cache);
278 mutex_unlock(&iova_cache_mutex);
279}
Sakari Ailus9b417602015-07-13 14:31:29 +0300280EXPORT_SYMBOL_GPL(iova_cache_put);
Sakari Ailusae1ff3d2015-07-13 14:31:28 +0300281
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700282/**
283 * alloc_iova - allocates an iova
Masanari Iida07db0402012-07-22 02:21:32 +0900284 * @iovad: - iova domain in question
285 * @size: - size of page frames to allocate
286 * @limit_pfn: - max limit address
287 * @size_aligned: - set if size_aligned address range is required
Robin Murphy1b722502015-01-12 17:51:15 +0000288 * This function allocates an iova in the range iovad->start_pfn to limit_pfn,
289 * searching top-down from limit_pfn to iovad->start_pfn. If the size_aligned
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -0700290 * flag is set then the allocated address iova->pfn_lo will be naturally
291 * aligned on roundup_power_of_two(size).
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700292 */
293struct iova *
294alloc_iova(struct iova_domain *iovad, unsigned long size,
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -0700295 unsigned long limit_pfn,
296 bool size_aligned)
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700297{
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700298 struct iova *new_iova;
299 int ret;
300
301 new_iova = alloc_iova_mem();
302 if (!new_iova)
303 return NULL;
304
Robin Murphy757c3702017-05-16 12:26:48 +0100305 ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn + 1,
mark grossddf02882008-03-04 15:22:04 -0800306 new_iova, size_aligned);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700307
308 if (ret) {
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700309 free_iova_mem(new_iova);
310 return NULL;
311 }
312
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700313 return new_iova;
314}
Sakari Ailus9b417602015-07-13 14:31:29 +0300315EXPORT_SYMBOL_GPL(alloc_iova);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700316
Omer Peleg9257b4a2016-04-20 11:34:11 +0300317static struct iova *
318private_find_iova(struct iova_domain *iovad, unsigned long pfn)
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700319{
Omer Peleg9257b4a2016-04-20 11:34:11 +0300320 struct rb_node *node = iovad->rbroot.rb_node;
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700321
Omer Peleg9257b4a2016-04-20 11:34:11 +0300322 assert_spin_locked(&iovad->iova_rbtree_lock);
323
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700324 while (node) {
Geliang Tangeba484b2016-12-19 22:46:58 +0800325 struct iova *iova = rb_entry(node, struct iova, node);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700326
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700327 if (pfn < iova->pfn_lo)
328 node = node->rb_left;
Zhen Lei2070f942017-09-21 16:52:42 +0100329 else if (pfn > iova->pfn_hi)
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700330 node = node->rb_right;
Zhen Lei2070f942017-09-21 16:52:42 +0100331 else
332 return iova; /* pfn falls within iova's range */
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700333 }
334
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700335 return NULL;
336}
Omer Peleg9257b4a2016-04-20 11:34:11 +0300337
338static void private_free_iova(struct iova_domain *iovad, struct iova *iova)
339{
340 assert_spin_locked(&iovad->iova_rbtree_lock);
341 __cached_rbnode_delete_update(iovad, iova);
342 rb_erase(&iova->node, &iovad->rbroot);
343 free_iova_mem(iova);
344}
345
346/**
347 * find_iova - finds an iova for a given pfn
348 * @iovad: - iova domain in question.
349 * @pfn: - page frame number
350 * This function finds and returns an iova belonging to the
351 * given doamin which matches the given pfn.
352 */
353struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
354{
355 unsigned long flags;
356 struct iova *iova;
357
358 /* Take the lock so that no other thread is manipulating the rbtree */
359 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
360 iova = private_find_iova(iovad, pfn);
361 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
362 return iova;
363}
Sakari Ailus9b417602015-07-13 14:31:29 +0300364EXPORT_SYMBOL_GPL(find_iova);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700365
366/**
367 * __free_iova - frees the given iova
368 * @iovad: iova domain in question.
369 * @iova: iova in question.
370 * Frees the given iova belonging to the giving domain
371 */
372void
373__free_iova(struct iova_domain *iovad, struct iova *iova)
374{
375 unsigned long flags;
376
377 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
Omer Peleg9257b4a2016-04-20 11:34:11 +0300378 private_free_iova(iovad, iova);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700379 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700380}
Sakari Ailus9b417602015-07-13 14:31:29 +0300381EXPORT_SYMBOL_GPL(__free_iova);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700382
383/**
384 * free_iova - finds and frees the iova for a given pfn
385 * @iovad: - iova domain in question.
386 * @pfn: - pfn that is allocated previously
387 * This functions finds an iova for a given pfn and then
388 * frees the iova from that domain.
389 */
390void
391free_iova(struct iova_domain *iovad, unsigned long pfn)
392{
393 struct iova *iova = find_iova(iovad, pfn);
Robert Callicotte733cac22015-04-16 23:32:47 -0500394
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700395 if (iova)
396 __free_iova(iovad, iova);
397
398}
Sakari Ailus9b417602015-07-13 14:31:29 +0300399EXPORT_SYMBOL_GPL(free_iova);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700400
401/**
Omer Peleg9257b4a2016-04-20 11:34:11 +0300402 * alloc_iova_fast - allocates an iova from rcache
403 * @iovad: - iova domain in question
404 * @size: - size of page frames to allocate
405 * @limit_pfn: - max limit address
Tomasz Nowicki538d5b32017-09-20 10:52:02 +0200406 * @flush_rcache: - set to flush rcache on regular allocation failure
Omer Peleg9257b4a2016-04-20 11:34:11 +0300407 * This function tries to satisfy an iova allocation from the rcache,
Tomasz Nowicki538d5b32017-09-20 10:52:02 +0200408 * and falls back to regular allocation on failure. If regular allocation
409 * fails too and the flush_rcache flag is set then the rcache will be flushed.
Omer Peleg9257b4a2016-04-20 11:34:11 +0300410*/
411unsigned long
412alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
Tomasz Nowicki538d5b32017-09-20 10:52:02 +0200413 unsigned long limit_pfn, bool flush_rcache)
Omer Peleg9257b4a2016-04-20 11:34:11 +0300414{
Omer Peleg9257b4a2016-04-20 11:34:11 +0300415 unsigned long iova_pfn;
416 struct iova *new_iova;
417
Robin Murphyb826ee92017-09-19 14:48:40 +0100418 iova_pfn = iova_rcache_get(iovad, size, limit_pfn + 1);
Omer Peleg9257b4a2016-04-20 11:34:11 +0300419 if (iova_pfn)
420 return iova_pfn;
421
422retry:
423 new_iova = alloc_iova(iovad, size, limit_pfn, true);
424 if (!new_iova) {
425 unsigned int cpu;
426
Tomasz Nowicki538d5b32017-09-20 10:52:02 +0200427 if (!flush_rcache)
Omer Peleg9257b4a2016-04-20 11:34:11 +0300428 return 0;
429
430 /* Try replenishing IOVAs by flushing rcache. */
Tomasz Nowicki538d5b32017-09-20 10:52:02 +0200431 flush_rcache = false;
Omer Peleg9257b4a2016-04-20 11:34:11 +0300432 for_each_online_cpu(cpu)
433 free_cpu_cached_iovas(cpu, iovad);
434 goto retry;
435 }
436
437 return new_iova->pfn_lo;
438}
439EXPORT_SYMBOL_GPL(alloc_iova_fast);
440
441/**
442 * free_iova_fast - free iova pfn range into rcache
443 * @iovad: - iova domain in question.
444 * @pfn: - pfn that is allocated previously
445 * @size: - # of pages in range
446 * This functions frees an iova range by trying to put it into the rcache,
447 * falling back to regular iova deallocation via free_iova() if this fails.
448 */
449void
450free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size)
451{
452 if (iova_rcache_insert(iovad, pfn, size))
453 return;
454
455 free_iova(iovad, pfn);
456}
457EXPORT_SYMBOL_GPL(free_iova_fast);
458
Joerg Roedel19282102017-08-10 15:49:44 +0200459#define fq_ring_for_each(i, fq) \
460 for ((i) = (fq)->head; (i) != (fq)->tail; (i) = ((i) + 1) % IOVA_FQ_SIZE)
461
462static inline bool fq_full(struct iova_fq *fq)
463{
Joerg Roedel8109c2a2017-08-10 16:31:17 +0200464 assert_spin_locked(&fq->lock);
Joerg Roedel19282102017-08-10 15:49:44 +0200465 return (((fq->tail + 1) % IOVA_FQ_SIZE) == fq->head);
466}
467
468static inline unsigned fq_ring_add(struct iova_fq *fq)
469{
470 unsigned idx = fq->tail;
471
Joerg Roedel8109c2a2017-08-10 16:31:17 +0200472 assert_spin_locked(&fq->lock);
473
Joerg Roedel19282102017-08-10 15:49:44 +0200474 fq->tail = (idx + 1) % IOVA_FQ_SIZE;
475
476 return idx;
477}
478
479static void fq_ring_free(struct iova_domain *iovad, struct iova_fq *fq)
480{
Joerg Roedelfb418da2017-08-10 16:14:59 +0200481 u64 counter = atomic64_read(&iovad->fq_flush_finish_cnt);
Joerg Roedel19282102017-08-10 15:49:44 +0200482 unsigned idx;
483
Joerg Roedel8109c2a2017-08-10 16:31:17 +0200484 assert_spin_locked(&fq->lock);
485
Joerg Roedel19282102017-08-10 15:49:44 +0200486 fq_ring_for_each(idx, fq) {
487
Joerg Roedelfb418da2017-08-10 16:14:59 +0200488 if (fq->entries[idx].counter >= counter)
489 break;
490
Joerg Roedel19282102017-08-10 15:49:44 +0200491 if (iovad->entry_dtor)
492 iovad->entry_dtor(fq->entries[idx].data);
493
494 free_iova_fast(iovad,
495 fq->entries[idx].iova_pfn,
496 fq->entries[idx].pages);
Joerg Roedel19282102017-08-10 15:49:44 +0200497
Joerg Roedelfb418da2017-08-10 16:14:59 +0200498 fq->head = (fq->head + 1) % IOVA_FQ_SIZE;
499 }
500}
501
502static void iova_domain_flush(struct iova_domain *iovad)
503{
504 atomic64_inc(&iovad->fq_flush_start_cnt);
505 iovad->flush_cb(iovad);
506 atomic64_inc(&iovad->fq_flush_finish_cnt);
Joerg Roedel19282102017-08-10 15:49:44 +0200507}
508
509static void fq_destroy_all_entries(struct iova_domain *iovad)
510{
511 int cpu;
512
513 /*
514 * This code runs when the iova_domain is being detroyed, so don't
515 * bother to free iovas, just call the entry_dtor on all remaining
516 * entries.
517 */
518 if (!iovad->entry_dtor)
519 return;
520
521 for_each_possible_cpu(cpu) {
522 struct iova_fq *fq = per_cpu_ptr(iovad->fq, cpu);
523 int idx;
524
525 fq_ring_for_each(idx, fq)
526 iovad->entry_dtor(fq->entries[idx].data);
527 }
528}
529
Kees Cooke99e88a2017-10-16 14:43:17 -0700530static void fq_flush_timeout(struct timer_list *t)
Joerg Roedel9a005a82017-08-10 16:58:18 +0200531{
Kees Cooke99e88a2017-10-16 14:43:17 -0700532 struct iova_domain *iovad = from_timer(iovad, t, fq_timer);
Joerg Roedel9a005a82017-08-10 16:58:18 +0200533 int cpu;
534
535 atomic_set(&iovad->fq_timer_on, 0);
536 iova_domain_flush(iovad);
537
538 for_each_possible_cpu(cpu) {
539 unsigned long flags;
540 struct iova_fq *fq;
541
542 fq = per_cpu_ptr(iovad->fq, cpu);
543 spin_lock_irqsave(&fq->lock, flags);
544 fq_ring_free(iovad, fq);
545 spin_unlock_irqrestore(&fq->lock, flags);
546 }
547}
548
Joerg Roedel19282102017-08-10 15:49:44 +0200549void queue_iova(struct iova_domain *iovad,
550 unsigned long pfn, unsigned long pages,
551 unsigned long data)
552{
Sebastian Andrzej Siewior94e2cc42017-09-21 17:21:40 +0200553 struct iova_fq *fq = raw_cpu_ptr(iovad->fq);
Joerg Roedel8109c2a2017-08-10 16:31:17 +0200554 unsigned long flags;
Joerg Roedel19282102017-08-10 15:49:44 +0200555 unsigned idx;
556
Joerg Roedel8109c2a2017-08-10 16:31:17 +0200557 spin_lock_irqsave(&fq->lock, flags);
558
Joerg Roedelfb418da2017-08-10 16:14:59 +0200559 /*
560 * First remove all entries from the flush queue that have already been
561 * flushed out on another CPU. This makes the fq_full() check below less
562 * likely to be true.
563 */
564 fq_ring_free(iovad, fq);
565
Joerg Roedel19282102017-08-10 15:49:44 +0200566 if (fq_full(fq)) {
Joerg Roedelfb418da2017-08-10 16:14:59 +0200567 iova_domain_flush(iovad);
Joerg Roedel19282102017-08-10 15:49:44 +0200568 fq_ring_free(iovad, fq);
569 }
570
571 idx = fq_ring_add(fq);
572
573 fq->entries[idx].iova_pfn = pfn;
574 fq->entries[idx].pages = pages;
575 fq->entries[idx].data = data;
Joerg Roedelfb418da2017-08-10 16:14:59 +0200576 fq->entries[idx].counter = atomic64_read(&iovad->fq_flush_start_cnt);
Joerg Roedel19282102017-08-10 15:49:44 +0200577
Joerg Roedel8109c2a2017-08-10 16:31:17 +0200578 spin_unlock_irqrestore(&fq->lock, flags);
Joerg Roedel9a005a82017-08-10 16:58:18 +0200579
Eric Dumazet0d873082019-08-28 06:13:38 -0700580 /* Avoid false sharing as much as possible. */
581 if (!atomic_read(&iovad->fq_timer_on) &&
Yuqi Jinba328f82020-08-27 16:43:54 +0800582 !atomic_xchg(&iovad->fq_timer_on, 1))
Joerg Roedel9a005a82017-08-10 16:58:18 +0200583 mod_timer(&iovad->fq_timer,
584 jiffies + msecs_to_jiffies(IOVA_FQ_TIMEOUT));
Joerg Roedel19282102017-08-10 15:49:44 +0200585}
586EXPORT_SYMBOL_GPL(queue_iova);
587
Omer Peleg9257b4a2016-04-20 11:34:11 +0300588/**
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700589 * put_iova_domain - destroys the iova doamin
590 * @iovad: - iova domain in question.
591 * All the iova's in that domain are destroyed.
592 */
593void put_iova_domain(struct iova_domain *iovad)
594{
Robin Murphy7595dc52017-09-19 14:48:39 +0100595 struct iova *iova, *tmp;
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700596
Joerg Roedel42f87e72017-08-10 14:44:28 +0200597 free_iova_flush_queue(iovad);
Omer Peleg9257b4a2016-04-20 11:34:11 +0300598 free_iova_rcaches(iovad);
Robin Murphy7595dc52017-09-19 14:48:39 +0100599 rbtree_postorder_for_each_entry_safe(iova, tmp, &iovad->rbroot, node)
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700600 free_iova_mem(iova);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700601}
Sakari Ailus9b417602015-07-13 14:31:29 +0300602EXPORT_SYMBOL_GPL(put_iova_domain);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700603
604static int
605__is_range_overlap(struct rb_node *node,
606 unsigned long pfn_lo, unsigned long pfn_hi)
607{
Geliang Tangeba484b2016-12-19 22:46:58 +0800608 struct iova *iova = rb_entry(node, struct iova, node);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700609
610 if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo))
611 return 1;
612 return 0;
613}
614
Jiang Liu75f05562014-02-19 14:07:37 +0800615static inline struct iova *
616alloc_and_init_iova(unsigned long pfn_lo, unsigned long pfn_hi)
617{
618 struct iova *iova;
619
620 iova = alloc_iova_mem();
621 if (iova) {
622 iova->pfn_lo = pfn_lo;
623 iova->pfn_hi = pfn_hi;
624 }
625
626 return iova;
627}
628
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700629static struct iova *
630__insert_new_range(struct iova_domain *iovad,
631 unsigned long pfn_lo, unsigned long pfn_hi)
632{
633 struct iova *iova;
634
Jiang Liu75f05562014-02-19 14:07:37 +0800635 iova = alloc_and_init_iova(pfn_lo, pfn_hi);
636 if (iova)
Marek Szyprowskid7517512017-02-24 12:13:37 +0100637 iova_insert_rbtree(&iovad->rbroot, iova, NULL);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700638
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700639 return iova;
640}
641
642static void
643__adjust_overlap_range(struct iova *iova,
644 unsigned long *pfn_lo, unsigned long *pfn_hi)
645{
646 if (*pfn_lo < iova->pfn_lo)
647 iova->pfn_lo = *pfn_lo;
648 if (*pfn_hi > iova->pfn_hi)
649 *pfn_lo = iova->pfn_hi + 1;
650}
651
652/**
653 * reserve_iova - reserves an iova in the given range
654 * @iovad: - iova domain pointer
655 * @pfn_lo: - lower page frame address
656 * @pfn_hi:- higher pfn adderss
657 * This function allocates reserves the address range from pfn_lo to pfn_hi so
658 * that this address is not dished out as part of alloc_iova.
659 */
660struct iova *
661reserve_iova(struct iova_domain *iovad,
662 unsigned long pfn_lo, unsigned long pfn_hi)
663{
664 struct rb_node *node;
665 unsigned long flags;
666 struct iova *iova;
667 unsigned int overlap = 0;
668
Robin Murphybb68b2f2017-09-21 16:52:46 +0100669 /* Don't allow nonsensical pfns */
670 if (WARN_ON((pfn_hi | pfn_lo) > (ULLONG_MAX >> iova_shift(iovad))))
671 return NULL;
672
David Woodhouse3d39cec2009-07-08 15:23:30 +0100673 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700674 for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) {
675 if (__is_range_overlap(node, pfn_lo, pfn_hi)) {
Geliang Tangeba484b2016-12-19 22:46:58 +0800676 iova = rb_entry(node, struct iova, node);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700677 __adjust_overlap_range(iova, &pfn_lo, &pfn_hi);
678 if ((pfn_lo >= iova->pfn_lo) &&
679 (pfn_hi <= iova->pfn_hi))
680 goto finish;
681 overlap = 1;
682
683 } else if (overlap)
684 break;
685 }
686
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300687 /* We are here either because this is the first reserver node
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700688 * or need to insert remaining non overlap addr range
689 */
690 iova = __insert_new_range(iovad, pfn_lo, pfn_hi);
691finish:
692
David Woodhouse3d39cec2009-07-08 15:23:30 +0100693 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700694 return iova;
695}
Sakari Ailus9b417602015-07-13 14:31:29 +0300696EXPORT_SYMBOL_GPL(reserve_iova);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700697
698/**
699 * copy_reserved_iova - copies the reserved between domains
700 * @from: - source doamin from where to copy
701 * @to: - destination domin where to copy
702 * This function copies reserved iova's from one doamin to
703 * other.
704 */
705void
706copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
707{
708 unsigned long flags;
709 struct rb_node *node;
710
David Woodhouse3d39cec2009-07-08 15:23:30 +0100711 spin_lock_irqsave(&from->iova_rbtree_lock, flags);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700712 for (node = rb_first(&from->rbroot); node; node = rb_next(node)) {
Geliang Tangeba484b2016-12-19 22:46:58 +0800713 struct iova *iova = rb_entry(node, struct iova, node);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700714 struct iova *new_iova;
Robert Callicotte733cac22015-04-16 23:32:47 -0500715
Robin Murphyabbb8a02017-10-02 11:53:31 +0100716 if (iova->pfn_lo == IOVA_ANCHOR)
717 continue;
718
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700719 new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi);
720 if (!new_iova)
Andy Shevchenko3a0ce122020-05-07 19:18:03 +0300721 pr_err("Reserve iova range %lx@%lx failed\n",
722 iova->pfn_lo, iova->pfn_lo);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700723 }
David Woodhouse3d39cec2009-07-08 15:23:30 +0100724 spin_unlock_irqrestore(&from->iova_rbtree_lock, flags);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700725}
Sakari Ailus9b417602015-07-13 14:31:29 +0300726EXPORT_SYMBOL_GPL(copy_reserved_iova);
Jiang Liu75f05562014-02-19 14:07:37 +0800727
728struct iova *
729split_and_remove_iova(struct iova_domain *iovad, struct iova *iova,
730 unsigned long pfn_lo, unsigned long pfn_hi)
731{
732 unsigned long flags;
733 struct iova *prev = NULL, *next = NULL;
734
735 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
736 if (iova->pfn_lo < pfn_lo) {
737 prev = alloc_and_init_iova(iova->pfn_lo, pfn_lo - 1);
738 if (prev == NULL)
739 goto error;
740 }
741 if (iova->pfn_hi > pfn_hi) {
742 next = alloc_and_init_iova(pfn_hi + 1, iova->pfn_hi);
743 if (next == NULL)
744 goto error;
745 }
746
747 __cached_rbnode_delete_update(iovad, iova);
748 rb_erase(&iova->node, &iovad->rbroot);
749
750 if (prev) {
Marek Szyprowskid7517512017-02-24 12:13:37 +0100751 iova_insert_rbtree(&iovad->rbroot, prev, NULL);
Jiang Liu75f05562014-02-19 14:07:37 +0800752 iova->pfn_lo = pfn_lo;
753 }
754 if (next) {
Marek Szyprowskid7517512017-02-24 12:13:37 +0100755 iova_insert_rbtree(&iovad->rbroot, next, NULL);
Jiang Liu75f05562014-02-19 14:07:37 +0800756 iova->pfn_hi = pfn_hi;
757 }
758 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
759
760 return iova;
761
762error:
763 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
764 if (prev)
765 free_iova_mem(prev);
766 return NULL;
767}
Sakari Ailus15bbdec2015-07-13 14:31:30 +0300768
Omer Peleg9257b4a2016-04-20 11:34:11 +0300769/*
770 * Magazine caches for IOVA ranges. For an introduction to magazines,
771 * see the USENIX 2001 paper "Magazines and Vmem: Extending the Slab
772 * Allocator to Many CPUs and Arbitrary Resources" by Bonwick and Adams.
773 * For simplicity, we use a static magazine size and don't implement the
774 * dynamic size tuning described in the paper.
775 */
776
777#define IOVA_MAG_SIZE 128
778
779struct iova_magazine {
780 unsigned long size;
781 unsigned long pfns[IOVA_MAG_SIZE];
782};
783
784struct iova_cpu_rcache {
785 spinlock_t lock;
786 struct iova_magazine *loaded;
787 struct iova_magazine *prev;
788};
789
790static struct iova_magazine *iova_magazine_alloc(gfp_t flags)
791{
792 return kzalloc(sizeof(struct iova_magazine), flags);
793}
794
795static void iova_magazine_free(struct iova_magazine *mag)
796{
797 kfree(mag);
798}
799
800static void
801iova_magazine_free_pfns(struct iova_magazine *mag, struct iova_domain *iovad)
802{
803 unsigned long flags;
804 int i;
805
806 if (!mag)
807 return;
808
809 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
810
811 for (i = 0 ; i < mag->size; ++i) {
812 struct iova *iova = private_find_iova(iovad, mag->pfns[i]);
813
Robin Murphyd3e3d2b2020-06-02 14:08:18 +0100814 if (WARN_ON(!iova))
815 continue;
816
Omer Peleg9257b4a2016-04-20 11:34:11 +0300817 private_free_iova(iovad, iova);
818 }
819
820 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
821
822 mag->size = 0;
823}
824
825static bool iova_magazine_full(struct iova_magazine *mag)
826{
827 return (mag && mag->size == IOVA_MAG_SIZE);
828}
829
830static bool iova_magazine_empty(struct iova_magazine *mag)
831{
832 return (!mag || mag->size == 0);
833}
834
835static unsigned long iova_magazine_pop(struct iova_magazine *mag,
836 unsigned long limit_pfn)
837{
Robin Murphye8b19842017-09-28 11:31:23 +0100838 int i;
839 unsigned long pfn;
840
Omer Peleg9257b4a2016-04-20 11:34:11 +0300841 BUG_ON(iova_magazine_empty(mag));
842
Robin Murphye8b19842017-09-28 11:31:23 +0100843 /* Only fall back to the rbtree if we have no suitable pfns at all */
844 for (i = mag->size - 1; mag->pfns[i] > limit_pfn; i--)
845 if (i == 0)
846 return 0;
Omer Peleg9257b4a2016-04-20 11:34:11 +0300847
Robin Murphye8b19842017-09-28 11:31:23 +0100848 /* Swap it to pop it */
849 pfn = mag->pfns[i];
850 mag->pfns[i] = mag->pfns[--mag->size];
851
852 return pfn;
Omer Peleg9257b4a2016-04-20 11:34:11 +0300853}
854
855static void iova_magazine_push(struct iova_magazine *mag, unsigned long pfn)
856{
857 BUG_ON(iova_magazine_full(mag));
858
859 mag->pfns[mag->size++] = pfn;
860}
861
862static void init_iova_rcaches(struct iova_domain *iovad)
863{
864 struct iova_cpu_rcache *cpu_rcache;
865 struct iova_rcache *rcache;
866 unsigned int cpu;
867 int i;
868
869 for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
870 rcache = &iovad->rcaches[i];
871 spin_lock_init(&rcache->lock);
872 rcache->depot_size = 0;
873 rcache->cpu_rcaches = __alloc_percpu(sizeof(*cpu_rcache), cache_line_size());
874 if (WARN_ON(!rcache->cpu_rcaches))
875 continue;
876 for_each_possible_cpu(cpu) {
877 cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
878 spin_lock_init(&cpu_rcache->lock);
879 cpu_rcache->loaded = iova_magazine_alloc(GFP_KERNEL);
880 cpu_rcache->prev = iova_magazine_alloc(GFP_KERNEL);
881 }
882 }
883}
884
885/*
886 * Try inserting IOVA range starting with 'iova_pfn' into 'rcache', and
887 * return true on success. Can fail if rcache is full and we can't free
888 * space, and free_iova() (our only caller) will then return the IOVA
889 * range to the rbtree instead.
890 */
891static bool __iova_rcache_insert(struct iova_domain *iovad,
892 struct iova_rcache *rcache,
893 unsigned long iova_pfn)
894{
895 struct iova_magazine *mag_to_free = NULL;
896 struct iova_cpu_rcache *cpu_rcache;
897 bool can_insert = false;
898 unsigned long flags;
899
Sebastian Andrzej Siewioraaffaa82017-06-27 18:16:47 +0200900 cpu_rcache = raw_cpu_ptr(rcache->cpu_rcaches);
Omer Peleg9257b4a2016-04-20 11:34:11 +0300901 spin_lock_irqsave(&cpu_rcache->lock, flags);
902
903 if (!iova_magazine_full(cpu_rcache->loaded)) {
904 can_insert = true;
905 } else if (!iova_magazine_full(cpu_rcache->prev)) {
906 swap(cpu_rcache->prev, cpu_rcache->loaded);
907 can_insert = true;
908 } else {
909 struct iova_magazine *new_mag = iova_magazine_alloc(GFP_ATOMIC);
910
911 if (new_mag) {
912 spin_lock(&rcache->lock);
913 if (rcache->depot_size < MAX_GLOBAL_MAGS) {
914 rcache->depot[rcache->depot_size++] =
915 cpu_rcache->loaded;
916 } else {
917 mag_to_free = cpu_rcache->loaded;
918 }
919 spin_unlock(&rcache->lock);
920
921 cpu_rcache->loaded = new_mag;
922 can_insert = true;
923 }
924 }
925
926 if (can_insert)
927 iova_magazine_push(cpu_rcache->loaded, iova_pfn);
928
929 spin_unlock_irqrestore(&cpu_rcache->lock, flags);
930
931 if (mag_to_free) {
932 iova_magazine_free_pfns(mag_to_free, iovad);
933 iova_magazine_free(mag_to_free);
934 }
935
936 return can_insert;
937}
938
939static bool iova_rcache_insert(struct iova_domain *iovad, unsigned long pfn,
940 unsigned long size)
941{
942 unsigned int log_size = order_base_2(size);
943
944 if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE)
945 return false;
946
947 return __iova_rcache_insert(iovad, &iovad->rcaches[log_size], pfn);
948}
949
950/*
951 * Caller wants to allocate a new IOVA range from 'rcache'. If we can
952 * satisfy the request, return a matching non-NULL range and remove
953 * it from the 'rcache'.
954 */
955static unsigned long __iova_rcache_get(struct iova_rcache *rcache,
956 unsigned long limit_pfn)
957{
958 struct iova_cpu_rcache *cpu_rcache;
959 unsigned long iova_pfn = 0;
960 bool has_pfn = false;
961 unsigned long flags;
962
Sebastian Andrzej Siewioraaffaa82017-06-27 18:16:47 +0200963 cpu_rcache = raw_cpu_ptr(rcache->cpu_rcaches);
Omer Peleg9257b4a2016-04-20 11:34:11 +0300964 spin_lock_irqsave(&cpu_rcache->lock, flags);
965
966 if (!iova_magazine_empty(cpu_rcache->loaded)) {
967 has_pfn = true;
968 } else if (!iova_magazine_empty(cpu_rcache->prev)) {
969 swap(cpu_rcache->prev, cpu_rcache->loaded);
970 has_pfn = true;
971 } else {
972 spin_lock(&rcache->lock);
973 if (rcache->depot_size > 0) {
974 iova_magazine_free(cpu_rcache->loaded);
975 cpu_rcache->loaded = rcache->depot[--rcache->depot_size];
976 has_pfn = true;
977 }
978 spin_unlock(&rcache->lock);
979 }
980
981 if (has_pfn)
982 iova_pfn = iova_magazine_pop(cpu_rcache->loaded, limit_pfn);
983
984 spin_unlock_irqrestore(&cpu_rcache->lock, flags);
985
986 return iova_pfn;
987}
988
989/*
990 * Try to satisfy IOVA allocation range from rcache. Fail if requested
991 * size is too big or the DMA limit we are given isn't satisfied by the
992 * top element in the magazine.
993 */
994static unsigned long iova_rcache_get(struct iova_domain *iovad,
995 unsigned long size,
996 unsigned long limit_pfn)
997{
998 unsigned int log_size = order_base_2(size);
999
1000 if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE)
1001 return 0;
1002
Robin Murphyb826ee92017-09-19 14:48:40 +01001003 return __iova_rcache_get(&iovad->rcaches[log_size], limit_pfn - size);
Omer Peleg9257b4a2016-04-20 11:34:11 +03001004}
1005
1006/*
Omer Peleg9257b4a2016-04-20 11:34:11 +03001007 * free rcache data structures.
1008 */
1009static void free_iova_rcaches(struct iova_domain *iovad)
1010{
1011 struct iova_rcache *rcache;
Robin Murphy7595dc52017-09-19 14:48:39 +01001012 struct iova_cpu_rcache *cpu_rcache;
Omer Peleg9257b4a2016-04-20 11:34:11 +03001013 unsigned int cpu;
1014 int i, j;
1015
1016 for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
1017 rcache = &iovad->rcaches[i];
Robin Murphy7595dc52017-09-19 14:48:39 +01001018 for_each_possible_cpu(cpu) {
1019 cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
1020 iova_magazine_free(cpu_rcache->loaded);
1021 iova_magazine_free(cpu_rcache->prev);
Omer Peleg9257b4a2016-04-20 11:34:11 +03001022 }
Robin Murphy7595dc52017-09-19 14:48:39 +01001023 free_percpu(rcache->cpu_rcaches);
1024 for (j = 0; j < rcache->depot_size; ++j)
1025 iova_magazine_free(rcache->depot[j]);
Omer Peleg9257b4a2016-04-20 11:34:11 +03001026 }
1027}
1028
1029/*
1030 * free all the IOVA ranges cached by a cpu (used when cpu is unplugged)
1031 */
1032void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad)
1033{
1034 struct iova_cpu_rcache *cpu_rcache;
1035 struct iova_rcache *rcache;
1036 unsigned long flags;
1037 int i;
1038
1039 for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
1040 rcache = &iovad->rcaches[i];
1041 cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
1042 spin_lock_irqsave(&cpu_rcache->lock, flags);
1043 iova_magazine_free_pfns(cpu_rcache->loaded, iovad);
1044 iova_magazine_free_pfns(cpu_rcache->prev, iovad);
1045 spin_unlock_irqrestore(&cpu_rcache->lock, flags);
1046 }
1047}
1048
Sakari Ailus15bbdec2015-07-13 14:31:30 +03001049MODULE_AUTHOR("Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>");
1050MODULE_LICENSE("GPL");