blob: da35d6fdfc5ebde945464304e32af5605a6319c8 [file] [log] [blame]
Upinder Malhie3cf00d2013-09-10 03:38:16 +00001/*
2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2013 Cisco Systems. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
Jeff Squyres3805ead2015-09-30 13:34:00 -070010 * BSD license below:
Upinder Malhie3cf00d2013-09-10 03:38:16 +000011 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/mm.h>
36#include <linux/dma-mapping.h>
Ingo Molnar3f07c012017-02-08 18:51:30 +010037#include <linux/sched/signal.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +010038#include <linux/sched/mm.h>
Upinder Malhie3cf00d2013-09-10 03:38:16 +000039#include <linux/hugetlb.h>
Upinder Malhie3cf00d2013-09-10 03:38:16 +000040#include <linux/iommu.h>
41#include <linux/workqueue.h>
42#include <linux/list.h>
43#include <linux/pci.h>
Jason Gunthorpe43cbd642018-06-13 11:19:42 -060044#include <rdma/ib_verbs.h>
Upinder Malhie3cf00d2013-09-10 03:38:16 +000045
46#include "usnic_log.h"
47#include "usnic_uiom.h"
48#include "usnic_uiom_interval_tree.h"
49
Upinder Malhie3cf00d2013-09-10 03:38:16 +000050#define USNIC_UIOM_PAGE_CHUNK \
51 ((PAGE_SIZE - offsetof(struct usnic_uiom_chunk, page_list)) /\
52 ((void *) &((struct usnic_uiom_chunk *) 0)->page_list[1] - \
53 (void *) &((struct usnic_uiom_chunk *) 0)->page_list[0]))
54
Upinder Malhie3cf00d2013-09-10 03:38:16 +000055static int usnic_uiom_dma_fault(struct iommu_domain *domain,
56 struct device *dev,
57 unsigned long iova, int flags,
58 void *token)
59{
60 usnic_err("Device %s iommu fault domain 0x%pK va 0x%lx flags 0x%x\n",
61 dev_name(dev),
62 domain, iova, flags);
63 return -ENOSYS;
64}
65
66static void usnic_uiom_put_pages(struct list_head *chunk_list, int dirty)
67{
68 struct usnic_uiom_chunk *chunk, *tmp;
69 struct page *page;
Upinder Malhic5f855e2014-01-09 14:48:45 -080070 struct scatterlist *sg;
Upinder Malhie3cf00d2013-09-10 03:38:16 +000071 int i;
72 dma_addr_t pa;
73
74 list_for_each_entry_safe(chunk, tmp, chunk_list, list) {
Upinder Malhic5f855e2014-01-09 14:48:45 -080075 for_each_sg(chunk->page_list, sg, chunk->nents, i) {
76 page = sg_page(sg);
77 pa = sg_phys(sg);
Jason Gunthorpe43cbd642018-06-13 11:19:42 -060078 if (!PageDirty(page) && dirty)
Upinder Malhie3cf00d2013-09-10 03:38:16 +000079 set_page_dirty_lock(page);
80 put_page(page);
81 usnic_dbg("pa: %pa\n", &pa);
82 }
83 kfree(chunk);
84 }
85}
86
87static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
Jason Gunthorpeece8ea72018-09-16 20:44:46 +030088 int dmasync, struct usnic_uiom_reg *uiomr)
Upinder Malhie3cf00d2013-09-10 03:38:16 +000089{
Jason Gunthorpeece8ea72018-09-16 20:44:46 +030090 struct list_head *chunk_list = &uiomr->chunk_list;
Upinder Malhie3cf00d2013-09-10 03:38:16 +000091 struct page **page_list;
Upinder Malhic5f855e2014-01-09 14:48:45 -080092 struct scatterlist *sg;
Upinder Malhie3cf00d2013-09-10 03:38:16 +000093 struct usnic_uiom_chunk *chunk;
94 unsigned long locked;
95 unsigned long lock_limit;
96 unsigned long cur_base;
97 unsigned long npages;
98 int ret;
99 int off;
100 int i;
101 int flags;
102 dma_addr_t pa;
Lorenzo Stoakes768ae302016-10-13 01:20:16 +0100103 unsigned int gup_flags;
Jason Gunthorpeece8ea72018-09-16 20:44:46 +0300104 struct mm_struct *mm;
Upinder Malhie3cf00d2013-09-10 03:38:16 +0000105
Jason Gunthorpe43cbd642018-06-13 11:19:42 -0600106 /*
107 * If the combination of the addr and size requested for this memory
108 * region causes an integer overflow, return error.
109 */
110 if (((addr + size) < addr) || PAGE_ALIGN(addr + size) < (addr + size))
111 return -EINVAL;
112
113 if (!size)
114 return -EINVAL;
115
Upinder Malhie3cf00d2013-09-10 03:38:16 +0000116 if (!can_do_mlock())
117 return -EPERM;
118
119 INIT_LIST_HEAD(chunk_list);
120
121 page_list = (struct page **) __get_free_page(GFP_KERNEL);
122 if (!page_list)
123 return -ENOMEM;
124
125 npages = PAGE_ALIGN(size + (addr & ~PAGE_MASK)) >> PAGE_SHIFT;
126
Jason Gunthorpeece8ea72018-09-16 20:44:46 +0300127 uiomr->owning_mm = mm = current->mm;
Davidlohr Bueso8ea1f982019-02-06 09:59:19 -0800128 down_read(&mm->mmap_sem);
Upinder Malhie3cf00d2013-09-10 03:38:16 +0000129
Davidlohr Bueso8ea1f982019-02-06 09:59:19 -0800130 locked = atomic64_add_return(npages, &current->mm->pinned_vm);
Upinder Malhie3cf00d2013-09-10 03:38:16 +0000131 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
132
133 if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
134 ret = -ENOMEM;
135 goto out;
136 }
137
138 flags = IOMMU_READ | IOMMU_CACHE;
139 flags |= (writable) ? IOMMU_WRITE : 0;
Lorenzo Stoakes768ae302016-10-13 01:20:16 +0100140 gup_flags = FOLL_WRITE;
141 gup_flags |= (writable) ? 0 : FOLL_FORCE;
Upinder Malhie3cf00d2013-09-10 03:38:16 +0000142 cur_base = addr & PAGE_MASK;
143 ret = 0;
144
145 while (npages) {
Jason Gunthorpe43cbd642018-06-13 11:19:42 -0600146 ret = get_user_pages_longterm(cur_base,
Upinder Malhie3cf00d2013-09-10 03:38:16 +0000147 min_t(unsigned long, npages,
148 PAGE_SIZE / sizeof(struct page *)),
Lorenzo Stoakes768ae302016-10-13 01:20:16 +0100149 gup_flags, page_list, NULL);
Upinder Malhie3cf00d2013-09-10 03:38:16 +0000150
151 if (ret < 0)
152 goto out;
153
154 npages -= ret;
155 off = 0;
156
157 while (ret) {
Gustavo A. R. Silva02fc1842019-01-07 11:39:54 -0600158 chunk = kmalloc(struct_size(chunk, page_list,
159 min_t(int, ret, USNIC_UIOM_PAGE_CHUNK)),
Upinder Malhie3cf00d2013-09-10 03:38:16 +0000160 GFP_KERNEL);
161 if (!chunk) {
162 ret = -ENOMEM;
163 goto out;
164 }
165
166 chunk->nents = min_t(int, ret, USNIC_UIOM_PAGE_CHUNK);
167 sg_init_table(chunk->page_list, chunk->nents);
Upinder Malhic5f855e2014-01-09 14:48:45 -0800168 for_each_sg(chunk->page_list, sg, chunk->nents, i) {
169 sg_set_page(sg, page_list[i + off],
170 PAGE_SIZE, 0);
171 pa = sg_phys(sg);
Upinder Malhie3cf00d2013-09-10 03:38:16 +0000172 usnic_dbg("va: 0x%lx pa: %pa\n",
173 cur_base + i*PAGE_SIZE, &pa);
174 }
175 cur_base += chunk->nents * PAGE_SIZE;
176 ret -= chunk->nents;
177 off += chunk->nents;
178 list_add_tail(&chunk->list, chunk_list);
179 }
180
181 ret = 0;
182 }
183
184out:
Davidlohr Bueso8ea1f982019-02-06 09:59:19 -0800185 if (ret < 0) {
Upinder Malhie3cf00d2013-09-10 03:38:16 +0000186 usnic_uiom_put_pages(chunk_list, 0);
Davidlohr Bueso8ea1f982019-02-06 09:59:19 -0800187 atomic64_sub(npages, &current->mm->pinned_vm);
188 } else
Jason Gunthorpeece8ea72018-09-16 20:44:46 +0300189 mmgrab(uiomr->owning_mm);
Upinder Malhie3cf00d2013-09-10 03:38:16 +0000190
Davidlohr Bueso8ea1f982019-02-06 09:59:19 -0800191 up_read(&mm->mmap_sem);
Upinder Malhie3cf00d2013-09-10 03:38:16 +0000192 free_page((unsigned long) page_list);
193 return ret;
194}
195
196static void usnic_uiom_unmap_sorted_intervals(struct list_head *intervals,
197 struct usnic_uiom_pd *pd)
198{
199 struct usnic_uiom_interval_node *interval, *tmp;
200 long unsigned va, size;
201
202 list_for_each_entry_safe(interval, tmp, intervals, link) {
203 va = interval->start << PAGE_SHIFT;
204 size = ((interval->last - interval->start) + 1) << PAGE_SHIFT;
205 while (size > 0) {
206 /* Workaround for RH 970401 */
207 usnic_dbg("va 0x%lx size 0x%lx", va, PAGE_SIZE);
208 iommu_unmap(pd->domain, va, PAGE_SIZE);
209 va += PAGE_SIZE;
210 size -= PAGE_SIZE;
211 }
212 }
213}
214
215static void __usnic_uiom_reg_release(struct usnic_uiom_pd *pd,
216 struct usnic_uiom_reg *uiomr,
217 int dirty)
218{
219 int npages;
220 unsigned long vpn_start, vpn_last;
221 struct usnic_uiom_interval_node *interval, *tmp;
222 int writable = 0;
223 LIST_HEAD(rm_intervals);
224
225 npages = PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT;
226 vpn_start = (uiomr->va & PAGE_MASK) >> PAGE_SHIFT;
227 vpn_last = vpn_start + npages - 1;
228
229 spin_lock(&pd->lock);
Davidlohr Buesof808c132017-09-08 16:15:08 -0700230 usnic_uiom_remove_interval(&pd->root, vpn_start,
Upinder Malhie3cf00d2013-09-10 03:38:16 +0000231 vpn_last, &rm_intervals);
232 usnic_uiom_unmap_sorted_intervals(&rm_intervals, pd);
233
234 list_for_each_entry_safe(interval, tmp, &rm_intervals, link) {
235 if (interval->flags & IOMMU_WRITE)
236 writable = 1;
237 list_del(&interval->link);
238 kfree(interval);
239 }
240
241 usnic_uiom_put_pages(&uiomr->chunk_list, dirty & writable);
242 spin_unlock(&pd->lock);
243}
244
245static int usnic_uiom_map_sorted_intervals(struct list_head *intervals,
246 struct usnic_uiom_reg *uiomr)
247{
248 int i, err;
249 size_t size;
250 struct usnic_uiom_chunk *chunk;
251 struct usnic_uiom_interval_node *interval_node;
252 dma_addr_t pa;
253 dma_addr_t pa_start = 0;
254 dma_addr_t pa_end = 0;
255 long int va_start = -EINVAL;
256 struct usnic_uiom_pd *pd = uiomr->pd;
257 long int va = uiomr->va & PAGE_MASK;
258 int flags = IOMMU_READ | IOMMU_CACHE;
259
260 flags |= (uiomr->writable) ? IOMMU_WRITE : 0;
261 chunk = list_first_entry(&uiomr->chunk_list, struct usnic_uiom_chunk,
262 list);
263 list_for_each_entry(interval_node, intervals, link) {
264iter_chunk:
265 for (i = 0; i < chunk->nents; i++, va += PAGE_SIZE) {
266 pa = sg_phys(&chunk->page_list[i]);
267 if ((va >> PAGE_SHIFT) < interval_node->start)
268 continue;
269
270 if ((va >> PAGE_SHIFT) == interval_node->start) {
271 /* First page of the interval */
272 va_start = va;
273 pa_start = pa;
274 pa_end = pa;
275 }
276
277 WARN_ON(va_start == -EINVAL);
278
279 if ((pa_end + PAGE_SIZE != pa) &&
280 (pa != pa_start)) {
281 /* PAs are not contiguous */
282 size = pa_end - pa_start + PAGE_SIZE;
283 usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x",
284 va_start, &pa_start, size, flags);
285 err = iommu_map(pd->domain, va_start, pa_start,
286 size, flags);
287 if (err) {
Fabio Estevam970918b2014-02-18 09:54:27 -0300288 usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n",
Upinder Malhie3cf00d2013-09-10 03:38:16 +0000289 va_start, &pa_start, size, err);
290 goto err_out;
291 }
292 va_start = va;
293 pa_start = pa;
294 pa_end = pa;
295 }
296
297 if ((va >> PAGE_SHIFT) == interval_node->last) {
298 /* Last page of the interval */
299 size = pa - pa_start + PAGE_SIZE;
300 usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x\n",
301 va_start, &pa_start, size, flags);
302 err = iommu_map(pd->domain, va_start, pa_start,
303 size, flags);
304 if (err) {
305 usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n",
306 va_start, &pa_start, size, err);
307 goto err_out;
308 }
309 break;
310 }
311
312 if (pa != pa_start)
313 pa_end += PAGE_SIZE;
314 }
315
316 if (i == chunk->nents) {
317 /*
318 * Hit last entry of the chunk,
319 * hence advance to next chunk
320 */
321 chunk = list_first_entry(&chunk->list,
322 struct usnic_uiom_chunk,
323 list);
324 goto iter_chunk;
325 }
326 }
327
328 return 0;
329
330err_out:
331 usnic_uiom_unmap_sorted_intervals(intervals, pd);
332 return err;
333}
334
335struct usnic_uiom_reg *usnic_uiom_reg_get(struct usnic_uiom_pd *pd,
336 unsigned long addr, size_t size,
337 int writable, int dmasync)
338{
339 struct usnic_uiom_reg *uiomr;
340 unsigned long va_base, vpn_start, vpn_last;
341 unsigned long npages;
342 int offset, err;
343 LIST_HEAD(sorted_diff_intervals);
344
345 /*
346 * Intel IOMMU map throws an error if a translation entry is
347 * changed from read to write. This module may not unmap
348 * and then remap the entry after fixing the permission
349 * b/c this open up a small windows where hw DMA may page fault
350 * Hence, make all entries to be writable.
351 */
352 writable = 1;
353
354 va_base = addr & PAGE_MASK;
355 offset = addr & ~PAGE_MASK;
356 npages = PAGE_ALIGN(size + offset) >> PAGE_SHIFT;
357 vpn_start = (addr & PAGE_MASK) >> PAGE_SHIFT;
358 vpn_last = vpn_start + npages - 1;
359
360 uiomr = kmalloc(sizeof(*uiomr), GFP_KERNEL);
361 if (!uiomr)
362 return ERR_PTR(-ENOMEM);
363
364 uiomr->va = va_base;
365 uiomr->offset = offset;
366 uiomr->length = size;
367 uiomr->writable = writable;
368 uiomr->pd = pd;
369
370 err = usnic_uiom_get_pages(addr, size, writable, dmasync,
Jason Gunthorpeece8ea72018-09-16 20:44:46 +0300371 uiomr);
Upinder Malhie3cf00d2013-09-10 03:38:16 +0000372 if (err) {
373 usnic_err("Failed get_pages vpn [0x%lx,0x%lx] err %d\n",
374 vpn_start, vpn_last, err);
375 goto out_free_uiomr;
376 }
377
378 spin_lock(&pd->lock);
379 err = usnic_uiom_get_intervals_diff(vpn_start, vpn_last,
380 (writable) ? IOMMU_WRITE : 0,
381 IOMMU_WRITE,
Davidlohr Buesof808c132017-09-08 16:15:08 -0700382 &pd->root,
Upinder Malhie3cf00d2013-09-10 03:38:16 +0000383 &sorted_diff_intervals);
384 if (err) {
385 usnic_err("Failed disjoint interval vpn [0x%lx,0x%lx] err %d\n",
386 vpn_start, vpn_last, err);
387 goto out_put_pages;
388 }
389
390 err = usnic_uiom_map_sorted_intervals(&sorted_diff_intervals, uiomr);
391 if (err) {
392 usnic_err("Failed map interval vpn [0x%lx,0x%lx] err %d\n",
393 vpn_start, vpn_last, err);
394 goto out_put_intervals;
395
396 }
397
Davidlohr Buesof808c132017-09-08 16:15:08 -0700398 err = usnic_uiom_insert_interval(&pd->root, vpn_start, vpn_last,
Upinder Malhie3cf00d2013-09-10 03:38:16 +0000399 (writable) ? IOMMU_WRITE : 0);
400 if (err) {
401 usnic_err("Failed insert interval vpn [0x%lx,0x%lx] err %d\n",
402 vpn_start, vpn_last, err);
403 goto out_unmap_intervals;
404 }
405
406 usnic_uiom_put_interval_set(&sorted_diff_intervals);
407 spin_unlock(&pd->lock);
408
409 return uiomr;
410
411out_unmap_intervals:
412 usnic_uiom_unmap_sorted_intervals(&sorted_diff_intervals, pd);
413out_put_intervals:
414 usnic_uiom_put_interval_set(&sorted_diff_intervals);
415out_put_pages:
416 usnic_uiom_put_pages(&uiomr->chunk_list, 0);
417 spin_unlock(&pd->lock);
Jason Gunthorpeece8ea72018-09-16 20:44:46 +0300418 mmdrop(uiomr->owning_mm);
Upinder Malhie3cf00d2013-09-10 03:38:16 +0000419out_free_uiomr:
420 kfree(uiomr);
421 return ERR_PTR(err);
422}
423
Jason Gunthorpeece8ea72018-09-16 20:44:46 +0300424static void __usnic_uiom_release_tail(struct usnic_uiom_reg *uiomr)
Upinder Malhie3cf00d2013-09-10 03:38:16 +0000425{
Jason Gunthorpeece8ea72018-09-16 20:44:46 +0300426 mmdrop(uiomr->owning_mm);
427 kfree(uiomr);
428}
Upinder Malhie3cf00d2013-09-10 03:38:16 +0000429
Jason Gunthorpeece8ea72018-09-16 20:44:46 +0300430static inline size_t usnic_uiom_num_pages(struct usnic_uiom_reg *uiomr)
431{
432 return PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT;
433}
434
Shamir Rabinovitchbdeacab2019-03-31 19:10:06 +0300435void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr)
Jason Gunthorpeece8ea72018-09-16 20:44:46 +0300436{
Upinder Malhie3cf00d2013-09-10 03:38:16 +0000437 __usnic_uiom_reg_release(uiomr->pd, uiomr, 1);
438
Davidlohr Bueso70f8a3c2019-02-06 09:59:15 -0800439 atomic64_sub(usnic_uiom_num_pages(uiomr), &uiomr->owning_mm->pinned_vm);
Jason Gunthorpeece8ea72018-09-16 20:44:46 +0300440 __usnic_uiom_release_tail(uiomr);
Upinder Malhie3cf00d2013-09-10 03:38:16 +0000441}
442
443struct usnic_uiom_pd *usnic_uiom_alloc_pd(void)
444{
445 struct usnic_uiom_pd *pd;
446 void *domain;
447
448 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
449 if (!pd)
450 return ERR_PTR(-ENOMEM);
451
452 pd->domain = domain = iommu_domain_alloc(&pci_bus_type);
Dan Carpenterfc3aa452015-06-04 12:02:18 +0300453 if (!domain) {
454 usnic_err("Failed to allocate IOMMU domain");
Upinder Malhie3cf00d2013-09-10 03:38:16 +0000455 kfree(pd);
Dan Carpenterfc3aa452015-06-04 12:02:18 +0300456 return ERR_PTR(-ENOMEM);
Upinder Malhie3cf00d2013-09-10 03:38:16 +0000457 }
458
459 iommu_set_fault_handler(pd->domain, usnic_uiom_dma_fault, NULL);
460
461 spin_lock_init(&pd->lock);
462 INIT_LIST_HEAD(&pd->devs);
463
464 return pd;
465}
466
467void usnic_uiom_dealloc_pd(struct usnic_uiom_pd *pd)
468{
469 iommu_domain_free(pd->domain);
470 kfree(pd);
471}
472
473int usnic_uiom_attach_dev_to_pd(struct usnic_uiom_pd *pd, struct device *dev)
474{
475 struct usnic_uiom_dev *uiom_dev;
476 int err;
477
Dan Carpenter8ce96af2014-01-20 13:32:49 +0300478 uiom_dev = kzalloc(sizeof(*uiom_dev), GFP_ATOMIC);
Upinder Malhie3cf00d2013-09-10 03:38:16 +0000479 if (!uiom_dev)
480 return -ENOMEM;
481 uiom_dev->dev = dev;
482
483 err = iommu_attach_device(pd->domain, dev);
484 if (err)
485 goto out_free_dev;
486
Joerg Roedel6f952712014-09-05 12:22:27 +0200487 if (!iommu_capable(dev->bus, IOMMU_CAP_CACHE_COHERENCY)) {
Upinder Malhie3cf00d2013-09-10 03:38:16 +0000488 usnic_err("IOMMU of %s does not support cache coherency\n",
489 dev_name(dev));
490 err = -EINVAL;
491 goto out_detach_device;
492 }
493
494 spin_lock(&pd->lock);
495 list_add_tail(&uiom_dev->link, &pd->devs);
496 pd->dev_cnt++;
497 spin_unlock(&pd->lock);
498
499 return 0;
500
501out_detach_device:
502 iommu_detach_device(pd->domain, dev);
503out_free_dev:
504 kfree(uiom_dev);
505 return err;
506}
507
508void usnic_uiom_detach_dev_from_pd(struct usnic_uiom_pd *pd, struct device *dev)
509{
510 struct usnic_uiom_dev *uiom_dev;
511 int found = 0;
512
513 spin_lock(&pd->lock);
514 list_for_each_entry(uiom_dev, &pd->devs, link) {
515 if (uiom_dev->dev == dev) {
516 found = 1;
517 break;
518 }
519 }
520
521 if (!found) {
522 usnic_err("Unable to free dev %s - not found\n",
523 dev_name(dev));
524 spin_unlock(&pd->lock);
525 return;
526 }
527
528 list_del(&uiom_dev->link);
529 pd->dev_cnt--;
530 spin_unlock(&pd->lock);
531
532 return iommu_detach_device(pd->domain, dev);
533}
534
535struct device **usnic_uiom_get_dev_list(struct usnic_uiom_pd *pd)
536{
537 struct usnic_uiom_dev *uiom_dev;
538 struct device **devs;
539 int i = 0;
540
541 spin_lock(&pd->lock);
542 devs = kcalloc(pd->dev_cnt + 1, sizeof(*devs), GFP_ATOMIC);
543 if (!devs) {
544 devs = ERR_PTR(-ENOMEM);
545 goto out;
546 }
547
548 list_for_each_entry(uiom_dev, &pd->devs, link) {
549 devs[i++] = uiom_dev->dev;
550 }
551out:
552 spin_unlock(&pd->lock);
553 return devs;
554}
555
556void usnic_uiom_free_dev_list(struct device **devs)
557{
558 kfree(devs);
559}
560
561int usnic_uiom_init(char *drv_name)
562{
563 if (!iommu_present(&pci_bus_type)) {
564 usnic_err("IOMMU required but not present or enabled. USNIC QPs will not function w/o enabling IOMMU\n");
565 return -EPERM;
566 }
567
Upinder Malhie3cf00d2013-09-10 03:38:16 +0000568 return 0;
569}