blob: 03d6a26687bc45ec6aac98ae5181f1a70573c454 [file] [log] [blame]
Thomas Gleixner45051532019-05-29 16:57:47 -07001// SPDX-License-Identifier: GPL-2.0-only
Joerg Roedelfc2100e2008-11-26 17:21:24 +01002/*
3 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
Joerg Roedel63ce3ae2015-02-04 16:12:55 +01004 * Author: Joerg Roedel <jroedel@suse.de>
Joerg Roedelfc2100e2008-11-26 17:21:24 +01005 */
6
Joerg Roedel92e70662015-05-28 18:41:24 +02007#define pr_fmt(fmt) "iommu: " fmt
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02008
Joerg Roedel905d66c2011-09-06 16:03:26 +02009#include <linux/device.h>
Ohad Ben-Cohen40998182011-09-02 13:32:32 -040010#include <linux/kernel.h>
Joerg Roedelfc2100e2008-11-26 17:21:24 +010011#include <linux/bug.h>
12#include <linux/types.h>
Paul Gortmakerc1af7b42018-12-01 14:19:09 -050013#include <linux/init.h>
14#include <linux/export.h>
Andrew Morton60db4022009-05-06 16:03:07 -070015#include <linux/slab.h>
Joerg Roedelfc2100e2008-11-26 17:21:24 +010016#include <linux/errno.h>
17#include <linux/iommu.h>
Alex Williamsond72e31c2012-05-30 14:18:53 -060018#include <linux/idr.h>
19#include <linux/notifier.h>
20#include <linux/err.h>
Alex Williamson104a1c12014-07-03 09:51:18 -060021#include <linux/pci.h>
Alex Williamsonf096c062014-09-19 10:03:06 -060022#include <linux/bitops.h>
Robin Murphy57f98d22016-09-13 10:54:14 +010023#include <linux/property.h>
Nipun Guptaeab03e22018-09-10 19:19:18 +053024#include <linux/fsl/mc.h>
Will Deacon25f003d2019-12-19 12:03:41 +000025#include <linux/module.h>
Shuah Khan7f6db172013-08-15 11:59:23 -060026#include <trace/events/iommu.h>
Joerg Roedelfc2100e2008-11-26 17:21:24 +010027
Alex Williamsond72e31c2012-05-30 14:18:53 -060028static struct kset *iommu_group_kset;
Heiner Kallweite38d1f12016-06-28 20:38:36 +020029static DEFINE_IDA(iommu_group_ida);
Joerg Roedel22bb1822019-08-19 15:22:54 +020030
31static unsigned int iommu_def_domain_type __read_mostly;
Zhen Lei68a6efe2018-09-20 17:10:23 +010032static bool iommu_dma_strict __read_mostly = true;
Joerg Roedelfaf14982019-08-19 15:22:46 +020033static u32 iommu_cmd_line __read_mostly;
Alex Williamsond72e31c2012-05-30 14:18:53 -060034
35struct iommu_group {
36 struct kobject kobj;
37 struct kobject *devices_kobj;
38 struct list_head devices;
39 struct mutex mutex;
40 struct blocking_notifier_head notifier;
41 void *iommu_data;
42 void (*iommu_data_release)(void *iommu_data);
43 char *name;
44 int id;
Joerg Roedel53723dc2015-05-28 18:41:29 +020045 struct iommu_domain *default_domain;
Joerg Roedele39cb8a2015-05-28 18:41:31 +020046 struct iommu_domain *domain;
Alex Williamsond72e31c2012-05-30 14:18:53 -060047};
48
Joerg Roedelc09e22d2017-02-01 12:19:46 +010049struct group_device {
Alex Williamsond72e31c2012-05-30 14:18:53 -060050 struct list_head list;
51 struct device *dev;
52 char *name;
53};
54
55struct iommu_group_attribute {
56 struct attribute attr;
57 ssize_t (*show)(struct iommu_group *group, char *buf);
58 ssize_t (*store)(struct iommu_group *group,
59 const char *buf, size_t count);
60};
61
Eric Augerbc7d12b92017-01-19 20:57:52 +000062static const char * const iommu_group_resv_type_string[] = {
Eric Augeradfd3732019-06-03 08:53:35 +020063 [IOMMU_RESV_DIRECT] = "direct",
64 [IOMMU_RESV_DIRECT_RELAXABLE] = "direct-relaxable",
65 [IOMMU_RESV_RESERVED] = "reserved",
66 [IOMMU_RESV_MSI] = "msi",
67 [IOMMU_RESV_SW_MSI] = "msi",
Eric Augerbc7d12b92017-01-19 20:57:52 +000068};
69
Joerg Roedelfaf14982019-08-19 15:22:46 +020070#define IOMMU_CMD_LINE_DMA_API BIT(0)
71
72static void iommu_set_cmd_line_dma_api(void)
73{
74 iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API;
75}
76
Joerg Roedel22bb1822019-08-19 15:22:54 +020077static bool iommu_cmd_line_dma_api(void)
Joerg Roedelfaf14982019-08-19 15:22:46 +020078{
79 return !!(iommu_cmd_line & IOMMU_CMD_LINE_DMA_API);
80}
81
Alex Williamsond72e31c2012-05-30 14:18:53 -060082#define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \
83struct iommu_group_attribute iommu_group_attr_##_name = \
84 __ATTR(_name, _mode, _show, _store)
85
86#define to_iommu_group_attr(_attr) \
87 container_of(_attr, struct iommu_group_attribute, attr)
88#define to_iommu_group(_kobj) \
89 container_of(_kobj, struct iommu_group, kobj)
90
Joerg Roedelb0119e82017-02-01 13:23:08 +010091static LIST_HEAD(iommu_device_list);
92static DEFINE_SPINLOCK(iommu_device_lock);
93
Joerg Roedel5fa9e7c2019-08-19 15:22:53 +020094/*
95 * Use a function instead of an array here because the domain-type is a
96 * bit-field, so an array would waste memory.
97 */
98static const char *iommu_domain_type_str(unsigned int t)
99{
100 switch (t) {
101 case IOMMU_DOMAIN_BLOCKED:
102 return "Blocked";
103 case IOMMU_DOMAIN_IDENTITY:
104 return "Passthrough";
105 case IOMMU_DOMAIN_UNMANAGED:
106 return "Unmanaged";
107 case IOMMU_DOMAIN_DMA:
108 return "Translated";
109 default:
110 return "Unknown";
111 }
112}
113
114static int __init iommu_subsys_init(void)
115{
Joerg Roedel22bb1822019-08-19 15:22:54 +0200116 bool cmd_line = iommu_cmd_line_dma_api();
117
118 if (!cmd_line) {
119 if (IS_ENABLED(CONFIG_IOMMU_DEFAULT_PASSTHROUGH))
120 iommu_set_default_passthrough(false);
121 else
122 iommu_set_default_translated(false);
Joerg Roedel2cc13bb2019-08-19 15:22:55 +0200123
Joerg Roedel2896ba42019-09-03 15:15:44 +0200124 if (iommu_default_passthrough() && mem_encrypt_active()) {
125 pr_info("Memory encryption detected - Disabling default IOMMU Passthrough\n");
Joerg Roedel2cc13bb2019-08-19 15:22:55 +0200126 iommu_set_default_translated(false);
127 }
Joerg Roedel22bb1822019-08-19 15:22:54 +0200128 }
129
130 pr_info("Default domain type: %s %s\n",
131 iommu_domain_type_str(iommu_def_domain_type),
132 cmd_line ? "(set via kernel command line)" : "");
Joerg Roedel5fa9e7c2019-08-19 15:22:53 +0200133
134 return 0;
135}
136subsys_initcall(iommu_subsys_init);
137
Joerg Roedelb0119e82017-02-01 13:23:08 +0100138int iommu_device_register(struct iommu_device *iommu)
139{
140 spin_lock(&iommu_device_lock);
141 list_add_tail(&iommu->list, &iommu_device_list);
142 spin_unlock(&iommu_device_lock);
Joerg Roedelb0119e82017-02-01 13:23:08 +0100143 return 0;
144}
Will Deacona7ba5c32019-12-19 12:03:37 +0000145EXPORT_SYMBOL_GPL(iommu_device_register);
Joerg Roedelb0119e82017-02-01 13:23:08 +0100146
147void iommu_device_unregister(struct iommu_device *iommu)
148{
149 spin_lock(&iommu_device_lock);
150 list_del(&iommu->list);
151 spin_unlock(&iommu_device_lock);
152}
Will Deacona7ba5c32019-12-19 12:03:37 +0000153EXPORT_SYMBOL_GPL(iommu_device_unregister);
Joerg Roedelb0119e82017-02-01 13:23:08 +0100154
Joerg Roedel045a7042020-03-26 16:08:30 +0100155static struct dev_iommu *dev_iommu_get(struct device *dev)
Jacob Pan0c830e62019-06-03 15:57:48 +0100156{
Joerg Roedel045a7042020-03-26 16:08:30 +0100157 struct dev_iommu *param = dev->iommu;
Jacob Pan0c830e62019-06-03 15:57:48 +0100158
159 if (param)
160 return param;
161
162 param = kzalloc(sizeof(*param), GFP_KERNEL);
163 if (!param)
164 return NULL;
165
166 mutex_init(&param->lock);
Joerg Roedel045a7042020-03-26 16:08:30 +0100167 dev->iommu = param;
Jacob Pan0c830e62019-06-03 15:57:48 +0100168 return param;
169}
170
Joerg Roedel045a7042020-03-26 16:08:30 +0100171static void dev_iommu_free(struct device *dev)
Jacob Pan0c830e62019-06-03 15:57:48 +0100172{
Kevin Hao5375e872020-04-02 22:37:49 +0800173 iommu_fwspec_free(dev);
Joerg Roedel045a7042020-03-26 16:08:30 +0100174 kfree(dev->iommu);
175 dev->iommu = NULL;
Jacob Pan0c830e62019-06-03 15:57:48 +0100176}
177
Joerg Roedelcc5aed42018-11-30 10:31:59 +0100178int iommu_probe_device(struct device *dev)
179{
180 const struct iommu_ops *ops = dev->bus->iommu_ops;
Jacob Pan0c830e62019-06-03 15:57:48 +0100181 int ret;
Joerg Roedelcc5aed42018-11-30 10:31:59 +0100182
183 WARN_ON(dev->iommu_group);
Jacob Pan0c830e62019-06-03 15:57:48 +0100184 if (!ops)
185 return -EINVAL;
Joerg Roedelcc5aed42018-11-30 10:31:59 +0100186
Joerg Roedel045a7042020-03-26 16:08:30 +0100187 if (!dev_iommu_get(dev))
Jacob Pan0c830e62019-06-03 15:57:48 +0100188 return -ENOMEM;
189
Will Deacon25f003d2019-12-19 12:03:41 +0000190 if (!try_module_get(ops->owner)) {
191 ret = -EINVAL;
192 goto err_free_dev_param;
193 }
194
Jacob Pan0c830e62019-06-03 15:57:48 +0100195 ret = ops->add_device(dev);
196 if (ret)
Will Deacon25f003d2019-12-19 12:03:41 +0000197 goto err_module_put;
Joerg Roedeldc9de8a2018-12-20 10:02:20 +0100198
Will Deacon25f003d2019-12-19 12:03:41 +0000199 return 0;
200
201err_module_put:
202 module_put(ops->owner);
203err_free_dev_param:
Joerg Roedel045a7042020-03-26 16:08:30 +0100204 dev_iommu_free(dev);
Joerg Roedeldc9de8a2018-12-20 10:02:20 +0100205 return ret;
Joerg Roedelcc5aed42018-11-30 10:31:59 +0100206}
207
208void iommu_release_device(struct device *dev)
209{
210 const struct iommu_ops *ops = dev->bus->iommu_ops;
211
212 if (dev->iommu_group)
213 ops->remove_device(dev);
Jacob Pan0c830e62019-06-03 15:57:48 +0100214
Joerg Roedel045a7042020-03-26 16:08:30 +0100215 if (dev->iommu) {
Will Deacon25f003d2019-12-19 12:03:41 +0000216 module_put(ops->owner);
Joerg Roedel045a7042020-03-26 16:08:30 +0100217 dev_iommu_free(dev);
Will Deacon25f003d2019-12-19 12:03:41 +0000218 }
Joerg Roedelcc5aed42018-11-30 10:31:59 +0100219}
220
Joerg Roedel53723dc2015-05-28 18:41:29 +0200221static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
222 unsigned type);
Joerg Roedele39cb8a2015-05-28 18:41:31 +0200223static int __iommu_attach_device(struct iommu_domain *domain,
224 struct device *dev);
225static int __iommu_attach_group(struct iommu_domain *domain,
226 struct iommu_group *group);
227static void __iommu_detach_group(struct iommu_domain *domain,
228 struct iommu_group *group);
Joerg Roedel53723dc2015-05-28 18:41:29 +0200229
Will Deaconfccb4e32017-01-05 18:38:26 +0000230static int __init iommu_set_def_domain_type(char *str)
231{
232 bool pt;
Andy Shevchenko7f9584d2018-05-14 19:22:25 +0300233 int ret;
Will Deaconfccb4e32017-01-05 18:38:26 +0000234
Andy Shevchenko7f9584d2018-05-14 19:22:25 +0300235 ret = kstrtobool(str, &pt);
236 if (ret)
237 return ret;
Will Deaconfccb4e32017-01-05 18:38:26 +0000238
Joerg Roedeladab0b02019-08-19 15:22:48 +0200239 if (pt)
240 iommu_set_default_passthrough(true);
241 else
242 iommu_set_default_translated(true);
Joerg Roedelfaf14982019-08-19 15:22:46 +0200243
Will Deaconfccb4e32017-01-05 18:38:26 +0000244 return 0;
245}
246early_param("iommu.passthrough", iommu_set_def_domain_type);
247
Zhen Lei68a6efe2018-09-20 17:10:23 +0100248static int __init iommu_dma_setup(char *str)
249{
250 return kstrtobool(str, &iommu_dma_strict);
251}
252early_param("iommu.strict", iommu_dma_setup);
253
Alex Williamsond72e31c2012-05-30 14:18:53 -0600254static ssize_t iommu_group_attr_show(struct kobject *kobj,
255 struct attribute *__attr, char *buf)
Alex Williamson14604322011-10-21 15:56:05 -0400256{
Alex Williamsond72e31c2012-05-30 14:18:53 -0600257 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
258 struct iommu_group *group = to_iommu_group(kobj);
259 ssize_t ret = -EIO;
Alex Williamson14604322011-10-21 15:56:05 -0400260
Alex Williamsond72e31c2012-05-30 14:18:53 -0600261 if (attr->show)
262 ret = attr->show(group, buf);
263 return ret;
Alex Williamson14604322011-10-21 15:56:05 -0400264}
Alex Williamsond72e31c2012-05-30 14:18:53 -0600265
266static ssize_t iommu_group_attr_store(struct kobject *kobj,
267 struct attribute *__attr,
268 const char *buf, size_t count)
269{
270 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
271 struct iommu_group *group = to_iommu_group(kobj);
272 ssize_t ret = -EIO;
273
274 if (attr->store)
275 ret = attr->store(group, buf, count);
276 return ret;
277}
278
279static const struct sysfs_ops iommu_group_sysfs_ops = {
280 .show = iommu_group_attr_show,
281 .store = iommu_group_attr_store,
282};
283
284static int iommu_group_create_file(struct iommu_group *group,
285 struct iommu_group_attribute *attr)
286{
287 return sysfs_create_file(&group->kobj, &attr->attr);
288}
289
290static void iommu_group_remove_file(struct iommu_group *group,
291 struct iommu_group_attribute *attr)
292{
293 sysfs_remove_file(&group->kobj, &attr->attr);
294}
295
296static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf)
297{
298 return sprintf(buf, "%s\n", group->name);
299}
300
Eric Auger6c65fb32017-01-19 20:57:51 +0000301/**
302 * iommu_insert_resv_region - Insert a new region in the
303 * list of reserved regions.
304 * @new: new region to insert
305 * @regions: list of regions
306 *
Eric Auger4dbd2582019-08-21 14:09:40 +0200307 * Elements are sorted by start address and overlapping segments
308 * of the same type are merged.
Eric Auger6c65fb32017-01-19 20:57:51 +0000309 */
Eric Auger4dbd2582019-08-21 14:09:40 +0200310int iommu_insert_resv_region(struct iommu_resv_region *new,
311 struct list_head *regions)
Eric Auger6c65fb32017-01-19 20:57:51 +0000312{
Eric Auger4dbd2582019-08-21 14:09:40 +0200313 struct iommu_resv_region *iter, *tmp, *nr, *top;
314 LIST_HEAD(stack);
Eric Auger6c65fb32017-01-19 20:57:51 +0000315
Eric Auger4dbd2582019-08-21 14:09:40 +0200316 nr = iommu_alloc_resv_region(new->start, new->length,
317 new->prot, new->type);
318 if (!nr)
Eric Auger6c65fb32017-01-19 20:57:51 +0000319 return -ENOMEM;
320
Eric Auger4dbd2582019-08-21 14:09:40 +0200321 /* First add the new element based on start address sorting */
322 list_for_each_entry(iter, regions, list) {
323 if (nr->start < iter->start ||
324 (nr->start == iter->start && nr->type <= iter->type))
325 break;
326 }
327 list_add_tail(&nr->list, &iter->list);
328
329 /* Merge overlapping segments of type nr->type in @regions, if any */
330 list_for_each_entry_safe(iter, tmp, regions, list) {
331 phys_addr_t top_end, iter_end = iter->start + iter->length - 1;
332
Eric Auger4c80ba32019-11-26 18:54:13 +0100333 /* no merge needed on elements of different types than @new */
334 if (iter->type != new->type) {
Eric Auger4dbd2582019-08-21 14:09:40 +0200335 list_move_tail(&iter->list, &stack);
336 continue;
337 }
338
339 /* look for the last stack element of same type as @iter */
340 list_for_each_entry_reverse(top, &stack, list)
341 if (top->type == iter->type)
342 goto check_overlap;
343
344 list_move_tail(&iter->list, &stack);
345 continue;
346
347check_overlap:
348 top_end = top->start + top->length - 1;
349
350 if (iter->start > top_end + 1) {
351 list_move_tail(&iter->list, &stack);
352 } else {
353 top->length = max(top_end, iter_end) - top->start + 1;
354 list_del(&iter->list);
355 kfree(iter);
356 }
357 }
358 list_splice(&stack, regions);
Eric Auger6c65fb32017-01-19 20:57:51 +0000359 return 0;
360}
361
362static int
363iommu_insert_device_resv_regions(struct list_head *dev_resv_regions,
364 struct list_head *group_resv_regions)
365{
366 struct iommu_resv_region *entry;
Eric Augera514a6e2017-02-06 10:11:38 +0100367 int ret = 0;
Eric Auger6c65fb32017-01-19 20:57:51 +0000368
369 list_for_each_entry(entry, dev_resv_regions, list) {
370 ret = iommu_insert_resv_region(entry, group_resv_regions);
371 if (ret)
372 break;
373 }
374 return ret;
375}
376
377int iommu_get_group_resv_regions(struct iommu_group *group,
378 struct list_head *head)
379{
Joerg Roedel8d2932d2017-02-10 15:13:10 +0100380 struct group_device *device;
Eric Auger6c65fb32017-01-19 20:57:51 +0000381 int ret = 0;
382
383 mutex_lock(&group->mutex);
384 list_for_each_entry(device, &group->devices, list) {
385 struct list_head dev_resv_regions;
386
387 INIT_LIST_HEAD(&dev_resv_regions);
388 iommu_get_resv_regions(device->dev, &dev_resv_regions);
389 ret = iommu_insert_device_resv_regions(&dev_resv_regions, head);
390 iommu_put_resv_regions(device->dev, &dev_resv_regions);
391 if (ret)
392 break;
393 }
394 mutex_unlock(&group->mutex);
395 return ret;
396}
397EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions);
398
Eric Augerbc7d12b92017-01-19 20:57:52 +0000399static ssize_t iommu_group_show_resv_regions(struct iommu_group *group,
400 char *buf)
401{
402 struct iommu_resv_region *region, *next;
403 struct list_head group_resv_regions;
404 char *str = buf;
405
406 INIT_LIST_HEAD(&group_resv_regions);
407 iommu_get_group_resv_regions(group, &group_resv_regions);
408
409 list_for_each_entry_safe(region, next, &group_resv_regions, list) {
410 str += sprintf(str, "0x%016llx 0x%016llx %s\n",
411 (long long int)region->start,
412 (long long int)(region->start +
413 region->length - 1),
414 iommu_group_resv_type_string[region->type]);
415 kfree(region);
416 }
417
418 return (str - buf);
419}
420
Olof Johanssonc52c72d2018-07-11 13:59:36 -0700421static ssize_t iommu_group_show_type(struct iommu_group *group,
422 char *buf)
423{
424 char *type = "unknown\n";
425
426 if (group->default_domain) {
427 switch (group->default_domain->type) {
428 case IOMMU_DOMAIN_BLOCKED:
429 type = "blocked\n";
430 break;
431 case IOMMU_DOMAIN_IDENTITY:
432 type = "identity\n";
433 break;
434 case IOMMU_DOMAIN_UNMANAGED:
435 type = "unmanaged\n";
436 break;
437 case IOMMU_DOMAIN_DMA:
Lu Baolu24f307d2019-05-24 14:30:56 +0800438 type = "DMA\n";
Olof Johanssonc52c72d2018-07-11 13:59:36 -0700439 break;
440 }
441 }
442 strcpy(buf, type);
443
444 return strlen(type);
445}
446
Alex Williamsond72e31c2012-05-30 14:18:53 -0600447static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL);
448
Eric Augerbc7d12b92017-01-19 20:57:52 +0000449static IOMMU_GROUP_ATTR(reserved_regions, 0444,
450 iommu_group_show_resv_regions, NULL);
451
Olof Johanssonc52c72d2018-07-11 13:59:36 -0700452static IOMMU_GROUP_ATTR(type, 0444, iommu_group_show_type, NULL);
453
Alex Williamsond72e31c2012-05-30 14:18:53 -0600454static void iommu_group_release(struct kobject *kobj)
455{
456 struct iommu_group *group = to_iommu_group(kobj);
457
Joerg Roedel269aa802015-05-28 18:41:25 +0200458 pr_debug("Releasing group %d\n", group->id);
459
Alex Williamsond72e31c2012-05-30 14:18:53 -0600460 if (group->iommu_data_release)
461 group->iommu_data_release(group->iommu_data);
462
Heiner Kallweitfeccf392016-06-29 21:13:59 +0200463 ida_simple_remove(&iommu_group_ida, group->id);
Alex Williamsond72e31c2012-05-30 14:18:53 -0600464
Joerg Roedel53723dc2015-05-28 18:41:29 +0200465 if (group->default_domain)
466 iommu_domain_free(group->default_domain);
467
Alex Williamsond72e31c2012-05-30 14:18:53 -0600468 kfree(group->name);
469 kfree(group);
470}
471
472static struct kobj_type iommu_group_ktype = {
473 .sysfs_ops = &iommu_group_sysfs_ops,
474 .release = iommu_group_release,
475};
476
477/**
478 * iommu_group_alloc - Allocate a new group
Alex Williamsond72e31c2012-05-30 14:18:53 -0600479 *
480 * This function is called by an iommu driver to allocate a new iommu
481 * group. The iommu group represents the minimum granularity of the iommu.
482 * Upon successful return, the caller holds a reference to the supplied
483 * group in order to hold the group until devices are added. Use
484 * iommu_group_put() to release this extra reference count, allowing the
485 * group to be automatically reclaimed once it has no devices or external
486 * references.
487 */
488struct iommu_group *iommu_group_alloc(void)
489{
490 struct iommu_group *group;
491 int ret;
492
493 group = kzalloc(sizeof(*group), GFP_KERNEL);
494 if (!group)
495 return ERR_PTR(-ENOMEM);
496
497 group->kobj.kset = iommu_group_kset;
498 mutex_init(&group->mutex);
499 INIT_LIST_HEAD(&group->devices);
500 BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
501
Heiner Kallweitfeccf392016-06-29 21:13:59 +0200502 ret = ida_simple_get(&iommu_group_ida, 0, 0, GFP_KERNEL);
503 if (ret < 0) {
Alex Williamsond72e31c2012-05-30 14:18:53 -0600504 kfree(group);
Heiner Kallweitfeccf392016-06-29 21:13:59 +0200505 return ERR_PTR(ret);
Alex Williamsond72e31c2012-05-30 14:18:53 -0600506 }
Heiner Kallweitfeccf392016-06-29 21:13:59 +0200507 group->id = ret;
Alex Williamsond72e31c2012-05-30 14:18:53 -0600508
509 ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype,
510 NULL, "%d", group->id);
511 if (ret) {
Heiner Kallweitfeccf392016-06-29 21:13:59 +0200512 ida_simple_remove(&iommu_group_ida, group->id);
Qiushi Wu7cc31612020-05-27 16:00:19 -0500513 kobject_put(&group->kobj);
Alex Williamsond72e31c2012-05-30 14:18:53 -0600514 return ERR_PTR(ret);
515 }
516
517 group->devices_kobj = kobject_create_and_add("devices", &group->kobj);
518 if (!group->devices_kobj) {
519 kobject_put(&group->kobj); /* triggers .release & free */
520 return ERR_PTR(-ENOMEM);
521 }
522
523 /*
524 * The devices_kobj holds a reference on the group kobject, so
525 * as long as that exists so will the group. We can therefore
526 * use the devices_kobj for reference counting.
527 */
528 kobject_put(&group->kobj);
529
Eric Augerbc7d12b92017-01-19 20:57:52 +0000530 ret = iommu_group_create_file(group,
531 &iommu_group_attr_reserved_regions);
532 if (ret)
533 return ERR_PTR(ret);
534
Olof Johanssonc52c72d2018-07-11 13:59:36 -0700535 ret = iommu_group_create_file(group, &iommu_group_attr_type);
536 if (ret)
537 return ERR_PTR(ret);
538
Joerg Roedel269aa802015-05-28 18:41:25 +0200539 pr_debug("Allocated group %d\n", group->id);
540
Alex Williamsond72e31c2012-05-30 14:18:53 -0600541 return group;
542}
543EXPORT_SYMBOL_GPL(iommu_group_alloc);
544
Alexey Kardashevskiyaa16bea2013-03-25 10:23:49 +1100545struct iommu_group *iommu_group_get_by_id(int id)
546{
547 struct kobject *group_kobj;
548 struct iommu_group *group;
549 const char *name;
550
551 if (!iommu_group_kset)
552 return NULL;
553
554 name = kasprintf(GFP_KERNEL, "%d", id);
555 if (!name)
556 return NULL;
557
558 group_kobj = kset_find_obj(iommu_group_kset, name);
559 kfree(name);
560
561 if (!group_kobj)
562 return NULL;
563
564 group = container_of(group_kobj, struct iommu_group, kobj);
565 BUG_ON(group->id != id);
566
567 kobject_get(group->devices_kobj);
568 kobject_put(&group->kobj);
569
570 return group;
571}
572EXPORT_SYMBOL_GPL(iommu_group_get_by_id);
573
Alex Williamsond72e31c2012-05-30 14:18:53 -0600574/**
575 * iommu_group_get_iommudata - retrieve iommu_data registered for a group
576 * @group: the group
577 *
578 * iommu drivers can store data in the group for use when doing iommu
579 * operations. This function provides a way to retrieve it. Caller
580 * should hold a group reference.
581 */
582void *iommu_group_get_iommudata(struct iommu_group *group)
583{
584 return group->iommu_data;
585}
586EXPORT_SYMBOL_GPL(iommu_group_get_iommudata);
587
588/**
589 * iommu_group_set_iommudata - set iommu_data for a group
590 * @group: the group
591 * @iommu_data: new data
592 * @release: release function for iommu_data
593 *
594 * iommu drivers can store data in the group for use when doing iommu
595 * operations. This function provides a way to set the data after
596 * the group has been allocated. Caller should hold a group reference.
597 */
598void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data,
599 void (*release)(void *iommu_data))
600{
601 group->iommu_data = iommu_data;
602 group->iommu_data_release = release;
603}
604EXPORT_SYMBOL_GPL(iommu_group_set_iommudata);
605
606/**
607 * iommu_group_set_name - set name for a group
608 * @group: the group
609 * @name: name
610 *
611 * Allow iommu driver to set a name for a group. When set it will
612 * appear in a name attribute file under the group in sysfs.
613 */
614int iommu_group_set_name(struct iommu_group *group, const char *name)
615{
616 int ret;
617
618 if (group->name) {
619 iommu_group_remove_file(group, &iommu_group_attr_name);
620 kfree(group->name);
621 group->name = NULL;
622 if (!name)
623 return 0;
624 }
625
626 group->name = kstrdup(name, GFP_KERNEL);
627 if (!group->name)
628 return -ENOMEM;
629
630 ret = iommu_group_create_file(group, &iommu_group_attr_name);
631 if (ret) {
632 kfree(group->name);
633 group->name = NULL;
634 return ret;
635 }
636
637 return 0;
638}
639EXPORT_SYMBOL_GPL(iommu_group_set_name);
640
Joerg Roedelbeed2822015-05-28 18:41:34 +0200641static int iommu_group_create_direct_mappings(struct iommu_group *group,
642 struct device *dev)
643{
644 struct iommu_domain *domain = group->default_domain;
Eric Augere5b52342017-01-19 20:57:47 +0000645 struct iommu_resv_region *entry;
Joerg Roedelbeed2822015-05-28 18:41:34 +0200646 struct list_head mappings;
647 unsigned long pg_size;
648 int ret = 0;
649
650 if (!domain || domain->type != IOMMU_DOMAIN_DMA)
651 return 0;
652
Robin Murphyd16e0fa2016-04-07 18:42:06 +0100653 BUG_ON(!domain->pgsize_bitmap);
Joerg Roedelbeed2822015-05-28 18:41:34 +0200654
Robin Murphyd16e0fa2016-04-07 18:42:06 +0100655 pg_size = 1UL << __ffs(domain->pgsize_bitmap);
Joerg Roedelbeed2822015-05-28 18:41:34 +0200656 INIT_LIST_HEAD(&mappings);
657
Eric Augere5b52342017-01-19 20:57:47 +0000658 iommu_get_resv_regions(dev, &mappings);
Joerg Roedelbeed2822015-05-28 18:41:34 +0200659
660 /* We need to consider overlapping regions for different devices */
661 list_for_each_entry(entry, &mappings, list) {
662 dma_addr_t start, end, addr;
663
Eric Augere5b52342017-01-19 20:57:47 +0000664 if (domain->ops->apply_resv_region)
665 domain->ops->apply_resv_region(dev, domain, entry);
Joerg Roedel33b21a62016-07-05 13:07:53 +0200666
Joerg Roedelbeed2822015-05-28 18:41:34 +0200667 start = ALIGN(entry->start, pg_size);
668 end = ALIGN(entry->start + entry->length, pg_size);
669
Eric Augeradfd3732019-06-03 08:53:35 +0200670 if (entry->type != IOMMU_RESV_DIRECT &&
671 entry->type != IOMMU_RESV_DIRECT_RELAXABLE)
Eric Auger544a25d2017-01-19 20:57:50 +0000672 continue;
673
Joerg Roedelbeed2822015-05-28 18:41:34 +0200674 for (addr = start; addr < end; addr += pg_size) {
675 phys_addr_t phys_addr;
676
677 phys_addr = iommu_iova_to_phys(domain, addr);
678 if (phys_addr)
679 continue;
680
681 ret = iommu_map(domain, addr, addr, pg_size, entry->prot);
682 if (ret)
683 goto out;
684 }
685
686 }
687
Joerg Roedeladd02cfd2017-08-23 15:50:04 +0200688 iommu_flush_tlb_all(domain);
689
Joerg Roedelbeed2822015-05-28 18:41:34 +0200690out:
Eric Augere5b52342017-01-19 20:57:47 +0000691 iommu_put_resv_regions(dev, &mappings);
Joerg Roedelbeed2822015-05-28 18:41:34 +0200692
693 return ret;
694}
695
Joerg Roedelbd421262020-05-19 15:03:40 +0200696static bool iommu_is_attach_deferred(struct iommu_domain *domain,
697 struct device *dev)
698{
699 if (domain->ops->is_attach_deferred)
700 return domain->ops->is_attach_deferred(domain, dev);
701
702 return false;
703}
704
Alex Williamsond72e31c2012-05-30 14:18:53 -0600705/**
706 * iommu_group_add_device - add a device to an iommu group
707 * @group: the group into which to add the device (reference should be held)
708 * @dev: the device
709 *
710 * This function is called by an iommu driver to add a device into a
711 * group. Adding a device increments the group reference count.
712 */
713int iommu_group_add_device(struct iommu_group *group, struct device *dev)
714{
715 int ret, i = 0;
Joerg Roedelc09e22d2017-02-01 12:19:46 +0100716 struct group_device *device;
Alex Williamsond72e31c2012-05-30 14:18:53 -0600717
718 device = kzalloc(sizeof(*device), GFP_KERNEL);
719 if (!device)
720 return -ENOMEM;
721
722 device->dev = dev;
723
724 ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group");
Robin Murphy797a8b42017-01-16 12:58:07 +0000725 if (ret)
726 goto err_free_device;
Alex Williamsond72e31c2012-05-30 14:18:53 -0600727
728 device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj));
729rename:
730 if (!device->name) {
Robin Murphy797a8b42017-01-16 12:58:07 +0000731 ret = -ENOMEM;
732 goto err_remove_link;
Alex Williamsond72e31c2012-05-30 14:18:53 -0600733 }
734
735 ret = sysfs_create_link_nowarn(group->devices_kobj,
736 &dev->kobj, device->name);
737 if (ret) {
Alex Williamsond72e31c2012-05-30 14:18:53 -0600738 if (ret == -EEXIST && i >= 0) {
739 /*
740 * Account for the slim chance of collision
741 * and append an instance to the name.
742 */
Robin Murphy797a8b42017-01-16 12:58:07 +0000743 kfree(device->name);
Alex Williamsond72e31c2012-05-30 14:18:53 -0600744 device->name = kasprintf(GFP_KERNEL, "%s.%d",
745 kobject_name(&dev->kobj), i++);
746 goto rename;
747 }
Robin Murphy797a8b42017-01-16 12:58:07 +0000748 goto err_free_name;
Alex Williamsond72e31c2012-05-30 14:18:53 -0600749 }
750
751 kobject_get(group->devices_kobj);
752
753 dev->iommu_group = group;
754
Joerg Roedelbeed2822015-05-28 18:41:34 +0200755 iommu_group_create_direct_mappings(group, dev);
756
Alex Williamsond72e31c2012-05-30 14:18:53 -0600757 mutex_lock(&group->mutex);
758 list_add_tail(&device->list, &group->devices);
Joerg Roedelbd421262020-05-19 15:03:40 +0200759 if (group->domain && !iommu_is_attach_deferred(group->domain, dev))
Robin Murphy797a8b42017-01-16 12:58:07 +0000760 ret = __iommu_attach_device(group->domain, dev);
Alex Williamsond72e31c2012-05-30 14:18:53 -0600761 mutex_unlock(&group->mutex);
Robin Murphy797a8b42017-01-16 12:58:07 +0000762 if (ret)
763 goto err_put_group;
Alex Williamsond72e31c2012-05-30 14:18:53 -0600764
765 /* Notify any listeners about change to group. */
766 blocking_notifier_call_chain(&group->notifier,
767 IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev);
Shuah Khand1cf7e82013-08-15 11:59:24 -0600768
769 trace_add_device_to_group(group->id, dev);
Joerg Roedel269aa802015-05-28 18:41:25 +0200770
Bjorn Helgaas780da9e2019-02-08 16:05:45 -0600771 dev_info(dev, "Adding to iommu group %d\n", group->id);
Joerg Roedel269aa802015-05-28 18:41:25 +0200772
Alex Williamsond72e31c2012-05-30 14:18:53 -0600773 return 0;
Robin Murphy797a8b42017-01-16 12:58:07 +0000774
775err_put_group:
776 mutex_lock(&group->mutex);
777 list_del(&device->list);
778 mutex_unlock(&group->mutex);
779 dev->iommu_group = NULL;
780 kobject_put(group->devices_kobj);
Jon Derrick7d4e6cc2019-12-31 13:24:19 -0700781 sysfs_remove_link(group->devices_kobj, device->name);
Robin Murphy797a8b42017-01-16 12:58:07 +0000782err_free_name:
783 kfree(device->name);
784err_remove_link:
785 sysfs_remove_link(&dev->kobj, "iommu_group");
786err_free_device:
787 kfree(device);
Bjorn Helgaas780da9e2019-02-08 16:05:45 -0600788 dev_err(dev, "Failed to add to iommu group %d: %d\n", group->id, ret);
Robin Murphy797a8b42017-01-16 12:58:07 +0000789 return ret;
Alex Williamsond72e31c2012-05-30 14:18:53 -0600790}
791EXPORT_SYMBOL_GPL(iommu_group_add_device);
792
793/**
794 * iommu_group_remove_device - remove a device from it's current group
795 * @dev: device to be removed
796 *
797 * This function is called by an iommu driver to remove the device from
798 * it's current group. This decrements the iommu group reference count.
799 */
800void iommu_group_remove_device(struct device *dev)
801{
802 struct iommu_group *group = dev->iommu_group;
Joerg Roedelc09e22d2017-02-01 12:19:46 +0100803 struct group_device *tmp_device, *device = NULL;
Alex Williamsond72e31c2012-05-30 14:18:53 -0600804
Bjorn Helgaas780da9e2019-02-08 16:05:45 -0600805 dev_info(dev, "Removing from iommu group %d\n", group->id);
Joerg Roedel269aa802015-05-28 18:41:25 +0200806
Alex Williamsond72e31c2012-05-30 14:18:53 -0600807 /* Pre-notify listeners that a device is being removed. */
808 blocking_notifier_call_chain(&group->notifier,
809 IOMMU_GROUP_NOTIFY_DEL_DEVICE, dev);
810
811 mutex_lock(&group->mutex);
812 list_for_each_entry(tmp_device, &group->devices, list) {
813 if (tmp_device->dev == dev) {
814 device = tmp_device;
815 list_del(&device->list);
816 break;
817 }
818 }
819 mutex_unlock(&group->mutex);
820
821 if (!device)
822 return;
823
824 sysfs_remove_link(group->devices_kobj, device->name);
825 sysfs_remove_link(&dev->kobj, "iommu_group");
826
Shuah Khan2e757082013-08-15 11:59:25 -0600827 trace_remove_device_from_group(group->id, dev);
828
Alex Williamsond72e31c2012-05-30 14:18:53 -0600829 kfree(device->name);
830 kfree(device);
831 dev->iommu_group = NULL;
832 kobject_put(group->devices_kobj);
833}
834EXPORT_SYMBOL_GPL(iommu_group_remove_device);
835
Joerg Roedel426a2732015-05-28 18:41:30 +0200836static int iommu_group_device_count(struct iommu_group *group)
837{
Joerg Roedelc09e22d2017-02-01 12:19:46 +0100838 struct group_device *entry;
Joerg Roedel426a2732015-05-28 18:41:30 +0200839 int ret = 0;
840
841 list_for_each_entry(entry, &group->devices, list)
842 ret++;
843
844 return ret;
845}
846
Alex Williamsond72e31c2012-05-30 14:18:53 -0600847/**
848 * iommu_group_for_each_dev - iterate over each device in the group
849 * @group: the group
850 * @data: caller opaque data to be passed to callback function
851 * @fn: caller supplied callback function
852 *
853 * This function is called by group users to iterate over group devices.
854 * Callers should hold a reference count to the group during callback.
855 * The group->mutex is held across callbacks, which will block calls to
856 * iommu_group_add/remove_device.
857 */
Joerg Roedele39cb8a2015-05-28 18:41:31 +0200858static int __iommu_group_for_each_dev(struct iommu_group *group, void *data,
859 int (*fn)(struct device *, void *))
Alex Williamsond72e31c2012-05-30 14:18:53 -0600860{
Joerg Roedelc09e22d2017-02-01 12:19:46 +0100861 struct group_device *device;
Alex Williamsond72e31c2012-05-30 14:18:53 -0600862 int ret = 0;
863
Alex Williamsond72e31c2012-05-30 14:18:53 -0600864 list_for_each_entry(device, &group->devices, list) {
865 ret = fn(device->dev, data);
866 if (ret)
867 break;
868 }
Joerg Roedele39cb8a2015-05-28 18:41:31 +0200869 return ret;
870}
871
872
873int iommu_group_for_each_dev(struct iommu_group *group, void *data,
874 int (*fn)(struct device *, void *))
875{
876 int ret;
877
878 mutex_lock(&group->mutex);
879 ret = __iommu_group_for_each_dev(group, data, fn);
Alex Williamsond72e31c2012-05-30 14:18:53 -0600880 mutex_unlock(&group->mutex);
Joerg Roedele39cb8a2015-05-28 18:41:31 +0200881
Alex Williamsond72e31c2012-05-30 14:18:53 -0600882 return ret;
883}
884EXPORT_SYMBOL_GPL(iommu_group_for_each_dev);
885
886/**
887 * iommu_group_get - Return the group for a device and increment reference
888 * @dev: get the group that this device belongs to
889 *
890 * This function is called by iommu drivers and users to get the group
891 * for the specified device. If found, the group is returned and the group
892 * reference in incremented, else NULL.
893 */
894struct iommu_group *iommu_group_get(struct device *dev)
895{
896 struct iommu_group *group = dev->iommu_group;
897
898 if (group)
899 kobject_get(group->devices_kobj);
900
901 return group;
902}
903EXPORT_SYMBOL_GPL(iommu_group_get);
904
905/**
Robin Murphy13f59a72016-11-11 17:59:21 +0000906 * iommu_group_ref_get - Increment reference on a group
907 * @group: the group to use, must not be NULL
908 *
909 * This function is called by iommu drivers to take additional references on an
910 * existing group. Returns the given group for convenience.
911 */
912struct iommu_group *iommu_group_ref_get(struct iommu_group *group)
913{
914 kobject_get(group->devices_kobj);
915 return group;
916}
Will Deacona7ba5c32019-12-19 12:03:37 +0000917EXPORT_SYMBOL_GPL(iommu_group_ref_get);
Robin Murphy13f59a72016-11-11 17:59:21 +0000918
919/**
Alex Williamsond72e31c2012-05-30 14:18:53 -0600920 * iommu_group_put - Decrement group reference
921 * @group: the group to use
922 *
923 * This function is called by iommu drivers and users to release the
924 * iommu group. Once the reference count is zero, the group is released.
925 */
926void iommu_group_put(struct iommu_group *group)
927{
928 if (group)
929 kobject_put(group->devices_kobj);
930}
931EXPORT_SYMBOL_GPL(iommu_group_put);
932
933/**
934 * iommu_group_register_notifier - Register a notifier for group changes
935 * @group: the group to watch
936 * @nb: notifier block to signal
937 *
938 * This function allows iommu group users to track changes in a group.
939 * See include/linux/iommu.h for actions sent via this notifier. Caller
940 * should hold a reference to the group throughout notifier registration.
941 */
942int iommu_group_register_notifier(struct iommu_group *group,
943 struct notifier_block *nb)
944{
945 return blocking_notifier_chain_register(&group->notifier, nb);
946}
947EXPORT_SYMBOL_GPL(iommu_group_register_notifier);
948
949/**
950 * iommu_group_unregister_notifier - Unregister a notifier
951 * @group: the group to watch
952 * @nb: notifier block to signal
953 *
954 * Unregister a previously registered group notifier block.
955 */
956int iommu_group_unregister_notifier(struct iommu_group *group,
957 struct notifier_block *nb)
958{
959 return blocking_notifier_chain_unregister(&group->notifier, nb);
960}
961EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier);
962
963/**
Jacob Pan0c830e62019-06-03 15:57:48 +0100964 * iommu_register_device_fault_handler() - Register a device fault handler
965 * @dev: the device
966 * @handler: the fault handler
967 * @data: private data passed as argument to the handler
968 *
969 * When an IOMMU fault event is received, this handler gets called with the
Jean-Philippe Bruckerbf3255b2019-06-03 15:57:49 +0100970 * fault event and data as argument. The handler should return 0 on success. If
971 * the fault is recoverable (IOMMU_FAULT_PAGE_REQ), the consumer should also
972 * complete the fault by calling iommu_page_response() with one of the following
973 * response code:
974 * - IOMMU_PAGE_RESP_SUCCESS: retry the translation
975 * - IOMMU_PAGE_RESP_INVALID: terminate the fault
976 * - IOMMU_PAGE_RESP_FAILURE: terminate the fault and stop reporting
977 * page faults if possible.
Jacob Pan0c830e62019-06-03 15:57:48 +0100978 *
979 * Return 0 if the fault handler was installed successfully, or an error.
980 */
981int iommu_register_device_fault_handler(struct device *dev,
982 iommu_dev_fault_handler_t handler,
983 void *data)
984{
Joerg Roedel045a7042020-03-26 16:08:30 +0100985 struct dev_iommu *param = dev->iommu;
Jacob Pan0c830e62019-06-03 15:57:48 +0100986 int ret = 0;
987
988 if (!param)
989 return -EINVAL;
990
991 mutex_lock(&param->lock);
992 /* Only allow one fault handler registered for each device */
993 if (param->fault_param) {
994 ret = -EBUSY;
995 goto done_unlock;
996 }
997
998 get_device(dev);
999 param->fault_param = kzalloc(sizeof(*param->fault_param), GFP_KERNEL);
1000 if (!param->fault_param) {
1001 put_device(dev);
1002 ret = -ENOMEM;
1003 goto done_unlock;
1004 }
1005 param->fault_param->handler = handler;
1006 param->fault_param->data = data;
Jean-Philippe Bruckerbf3255b2019-06-03 15:57:49 +01001007 mutex_init(&param->fault_param->lock);
1008 INIT_LIST_HEAD(&param->fault_param->faults);
Jacob Pan0c830e62019-06-03 15:57:48 +01001009
1010done_unlock:
1011 mutex_unlock(&param->lock);
1012
1013 return ret;
1014}
1015EXPORT_SYMBOL_GPL(iommu_register_device_fault_handler);
1016
1017/**
1018 * iommu_unregister_device_fault_handler() - Unregister the device fault handler
1019 * @dev: the device
1020 *
1021 * Remove the device fault handler installed with
1022 * iommu_register_device_fault_handler().
1023 *
1024 * Return 0 on success, or an error.
1025 */
1026int iommu_unregister_device_fault_handler(struct device *dev)
1027{
Joerg Roedel045a7042020-03-26 16:08:30 +01001028 struct dev_iommu *param = dev->iommu;
Jacob Pan0c830e62019-06-03 15:57:48 +01001029 int ret = 0;
1030
1031 if (!param)
1032 return -EINVAL;
1033
1034 mutex_lock(&param->lock);
1035
1036 if (!param->fault_param)
1037 goto unlock;
1038
Jean-Philippe Bruckerbf3255b2019-06-03 15:57:49 +01001039 /* we cannot unregister handler if there are pending faults */
1040 if (!list_empty(&param->fault_param->faults)) {
1041 ret = -EBUSY;
1042 goto unlock;
1043 }
1044
Jacob Pan0c830e62019-06-03 15:57:48 +01001045 kfree(param->fault_param);
1046 param->fault_param = NULL;
1047 put_device(dev);
1048unlock:
1049 mutex_unlock(&param->lock);
1050
1051 return ret;
1052}
1053EXPORT_SYMBOL_GPL(iommu_unregister_device_fault_handler);
1054
1055/**
1056 * iommu_report_device_fault() - Report fault event to device driver
1057 * @dev: the device
1058 * @evt: fault event data
1059 *
1060 * Called by IOMMU drivers when a fault is detected, typically in a threaded IRQ
Jean-Philippe Bruckerbf3255b2019-06-03 15:57:49 +01001061 * handler. When this function fails and the fault is recoverable, it is the
1062 * caller's responsibility to complete the fault.
Jacob Pan0c830e62019-06-03 15:57:48 +01001063 *
1064 * Return 0 on success, or an error.
1065 */
1066int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt)
1067{
Joerg Roedel045a7042020-03-26 16:08:30 +01001068 struct dev_iommu *param = dev->iommu;
Jean-Philippe Bruckerbf3255b2019-06-03 15:57:49 +01001069 struct iommu_fault_event *evt_pending = NULL;
Jacob Pan0c830e62019-06-03 15:57:48 +01001070 struct iommu_fault_param *fparam;
1071 int ret = 0;
1072
1073 if (!param || !evt)
1074 return -EINVAL;
1075
1076 /* we only report device fault if there is a handler registered */
1077 mutex_lock(&param->lock);
1078 fparam = param->fault_param;
1079 if (!fparam || !fparam->handler) {
1080 ret = -EINVAL;
1081 goto done_unlock;
1082 }
Jean-Philippe Bruckerbf3255b2019-06-03 15:57:49 +01001083
1084 if (evt->fault.type == IOMMU_FAULT_PAGE_REQ &&
1085 (evt->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) {
1086 evt_pending = kmemdup(evt, sizeof(struct iommu_fault_event),
1087 GFP_KERNEL);
1088 if (!evt_pending) {
1089 ret = -ENOMEM;
1090 goto done_unlock;
1091 }
1092 mutex_lock(&fparam->lock);
1093 list_add_tail(&evt_pending->list, &fparam->faults);
1094 mutex_unlock(&fparam->lock);
1095 }
1096
Jacob Pan0c830e62019-06-03 15:57:48 +01001097 ret = fparam->handler(&evt->fault, fparam->data);
Jean-Philippe Bruckerbf3255b2019-06-03 15:57:49 +01001098 if (ret && evt_pending) {
1099 mutex_lock(&fparam->lock);
1100 list_del(&evt_pending->list);
1101 mutex_unlock(&fparam->lock);
1102 kfree(evt_pending);
1103 }
Jacob Pan0c830e62019-06-03 15:57:48 +01001104done_unlock:
1105 mutex_unlock(&param->lock);
1106 return ret;
1107}
1108EXPORT_SYMBOL_GPL(iommu_report_device_fault);
1109
Jean-Philippe Bruckerbf3255b2019-06-03 15:57:49 +01001110int iommu_page_response(struct device *dev,
1111 struct iommu_page_response *msg)
1112{
1113 bool pasid_valid;
1114 int ret = -EINVAL;
1115 struct iommu_fault_event *evt;
1116 struct iommu_fault_page_request *prm;
Joerg Roedel045a7042020-03-26 16:08:30 +01001117 struct dev_iommu *param = dev->iommu;
Jean-Philippe Bruckerbf3255b2019-06-03 15:57:49 +01001118 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1119
1120 if (!domain || !domain->ops->page_response)
1121 return -ENODEV;
1122
1123 if (!param || !param->fault_param)
1124 return -EINVAL;
1125
1126 if (msg->version != IOMMU_PAGE_RESP_VERSION_1 ||
1127 msg->flags & ~IOMMU_PAGE_RESP_PASID_VALID)
1128 return -EINVAL;
1129
1130 /* Only send response if there is a fault report pending */
1131 mutex_lock(&param->fault_param->lock);
1132 if (list_empty(&param->fault_param->faults)) {
1133 dev_warn_ratelimited(dev, "no pending PRQ, drop response\n");
1134 goto done_unlock;
1135 }
1136 /*
1137 * Check if we have a matching page request pending to respond,
1138 * otherwise return -EINVAL
1139 */
1140 list_for_each_entry(evt, &param->fault_param->faults, list) {
1141 prm = &evt->fault.prm;
1142 pasid_valid = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
1143
1144 if ((pasid_valid && prm->pasid != msg->pasid) ||
1145 prm->grpid != msg->grpid)
1146 continue;
1147
1148 /* Sanitize the reply */
1149 msg->flags = pasid_valid ? IOMMU_PAGE_RESP_PASID_VALID : 0;
1150
1151 ret = domain->ops->page_response(dev, evt, msg);
1152 list_del(&evt->list);
1153 kfree(evt);
1154 break;
1155 }
1156
1157done_unlock:
1158 mutex_unlock(&param->fault_param->lock);
1159 return ret;
1160}
1161EXPORT_SYMBOL_GPL(iommu_page_response);
1162
Jacob Pan0c830e62019-06-03 15:57:48 +01001163/**
Alex Williamsond72e31c2012-05-30 14:18:53 -06001164 * iommu_group_id - Return ID for a group
1165 * @group: the group to ID
1166 *
1167 * Return the unique ID for the group matching the sysfs group number.
1168 */
1169int iommu_group_id(struct iommu_group *group)
1170{
1171 return group->id;
1172}
1173EXPORT_SYMBOL_GPL(iommu_group_id);
Alex Williamson14604322011-10-21 15:56:05 -04001174
Alex Williamsonf096c062014-09-19 10:03:06 -06001175static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
1176 unsigned long *devfns);
1177
Alex Williamson104a1c12014-07-03 09:51:18 -06001178/*
1179 * To consider a PCI device isolated, we require ACS to support Source
1180 * Validation, Request Redirection, Completer Redirection, and Upstream
1181 * Forwarding. This effectively means that devices cannot spoof their
1182 * requester ID, requests and completions cannot be redirected, and all
1183 * transactions are forwarded upstream, even as it passes through a
1184 * bridge where the target device is downstream.
1185 */
1186#define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
1187
Alex Williamsonf096c062014-09-19 10:03:06 -06001188/*
1189 * For multifunction devices which are not isolated from each other, find
1190 * all the other non-isolated functions and look for existing groups. For
1191 * each function, we also need to look for aliases to or from other devices
1192 * that may already have a group.
1193 */
1194static struct iommu_group *get_pci_function_alias_group(struct pci_dev *pdev,
1195 unsigned long *devfns)
1196{
1197 struct pci_dev *tmp = NULL;
1198 struct iommu_group *group;
1199
1200 if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS))
1201 return NULL;
1202
1203 for_each_pci_dev(tmp) {
1204 if (tmp == pdev || tmp->bus != pdev->bus ||
1205 PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) ||
1206 pci_acs_enabled(tmp, REQ_ACS_FLAGS))
1207 continue;
1208
1209 group = get_pci_alias_group(tmp, devfns);
1210 if (group) {
1211 pci_dev_put(tmp);
1212 return group;
1213 }
1214 }
1215
1216 return NULL;
1217}
1218
1219/*
Jacek Lawrynowicz338c3142016-03-03 15:38:02 +01001220 * Look for aliases to or from the given device for existing groups. DMA
1221 * aliases are only supported on the same bus, therefore the search
Alex Williamsonf096c062014-09-19 10:03:06 -06001222 * space is quite small (especially since we're really only looking at pcie
1223 * device, and therefore only expect multiple slots on the root complex or
1224 * downstream switch ports). It's conceivable though that a pair of
1225 * multifunction devices could have aliases between them that would cause a
1226 * loop. To prevent this, we use a bitmap to track where we've been.
1227 */
1228static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
1229 unsigned long *devfns)
1230{
1231 struct pci_dev *tmp = NULL;
1232 struct iommu_group *group;
1233
1234 if (test_and_set_bit(pdev->devfn & 0xff, devfns))
1235 return NULL;
1236
1237 group = iommu_group_get(&pdev->dev);
1238 if (group)
1239 return group;
1240
1241 for_each_pci_dev(tmp) {
1242 if (tmp == pdev || tmp->bus != pdev->bus)
1243 continue;
1244
1245 /* We alias them or they alias us */
Jacek Lawrynowicz338c3142016-03-03 15:38:02 +01001246 if (pci_devs_are_dma_aliases(pdev, tmp)) {
Alex Williamsonf096c062014-09-19 10:03:06 -06001247 group = get_pci_alias_group(tmp, devfns);
1248 if (group) {
1249 pci_dev_put(tmp);
1250 return group;
1251 }
1252
1253 group = get_pci_function_alias_group(tmp, devfns);
1254 if (group) {
1255 pci_dev_put(tmp);
1256 return group;
1257 }
1258 }
1259 }
1260
1261 return NULL;
1262}
1263
Alex Williamson104a1c12014-07-03 09:51:18 -06001264struct group_for_pci_data {
1265 struct pci_dev *pdev;
1266 struct iommu_group *group;
1267};
1268
1269/*
1270 * DMA alias iterator callback, return the last seen device. Stop and return
1271 * the IOMMU group if we find one along the way.
1272 */
1273static int get_pci_alias_or_group(struct pci_dev *pdev, u16 alias, void *opaque)
1274{
1275 struct group_for_pci_data *data = opaque;
1276
1277 data->pdev = pdev;
1278 data->group = iommu_group_get(&pdev->dev);
1279
1280 return data->group != NULL;
1281}
1282
1283/*
Joerg Roedel6eab5562015-10-21 23:51:38 +02001284 * Generic device_group call-back function. It just allocates one
1285 * iommu-group per device.
1286 */
1287struct iommu_group *generic_device_group(struct device *dev)
1288{
Joerg Roedel7f7a2302017-06-28 12:45:31 +02001289 return iommu_group_alloc();
Joerg Roedel6eab5562015-10-21 23:51:38 +02001290}
Will Deacona7ba5c32019-12-19 12:03:37 +00001291EXPORT_SYMBOL_GPL(generic_device_group);
Joerg Roedel6eab5562015-10-21 23:51:38 +02001292
1293/*
Alex Williamson104a1c12014-07-03 09:51:18 -06001294 * Use standard PCI bus topology, isolation features, and DMA alias quirks
1295 * to find or create an IOMMU group for a device.
1296 */
Joerg Roedel5e622922015-10-21 23:51:37 +02001297struct iommu_group *pci_device_group(struct device *dev)
Alex Williamson104a1c12014-07-03 09:51:18 -06001298{
Joerg Roedel5e622922015-10-21 23:51:37 +02001299 struct pci_dev *pdev = to_pci_dev(dev);
Alex Williamson104a1c12014-07-03 09:51:18 -06001300 struct group_for_pci_data data;
1301 struct pci_bus *bus;
1302 struct iommu_group *group = NULL;
Alex Williamsonf096c062014-09-19 10:03:06 -06001303 u64 devfns[4] = { 0 };
Alex Williamson104a1c12014-07-03 09:51:18 -06001304
Joerg Roedel5e622922015-10-21 23:51:37 +02001305 if (WARN_ON(!dev_is_pci(dev)))
1306 return ERR_PTR(-EINVAL);
1307
Alex Williamson104a1c12014-07-03 09:51:18 -06001308 /*
1309 * Find the upstream DMA alias for the device. A device must not
1310 * be aliased due to topology in order to have its own IOMMU group.
1311 * If we find an alias along the way that already belongs to a
1312 * group, use it.
1313 */
1314 if (pci_for_each_dma_alias(pdev, get_pci_alias_or_group, &data))
1315 return data.group;
1316
1317 pdev = data.pdev;
1318
1319 /*
1320 * Continue upstream from the point of minimum IOMMU granularity
1321 * due to aliases to the point where devices are protected from
1322 * peer-to-peer DMA by PCI ACS. Again, if we find an existing
1323 * group, use it.
1324 */
1325 for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) {
1326 if (!bus->self)
1327 continue;
1328
1329 if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
1330 break;
1331
1332 pdev = bus->self;
1333
1334 group = iommu_group_get(&pdev->dev);
1335 if (group)
1336 return group;
1337 }
1338
1339 /*
Alex Williamsonf096c062014-09-19 10:03:06 -06001340 * Look for existing groups on device aliases. If we alias another
1341 * device or another device aliases us, use the same group.
Alex Williamson104a1c12014-07-03 09:51:18 -06001342 */
Alex Williamsonf096c062014-09-19 10:03:06 -06001343 group = get_pci_alias_group(pdev, (unsigned long *)devfns);
1344 if (group)
1345 return group;
Alex Williamson104a1c12014-07-03 09:51:18 -06001346
1347 /*
Alex Williamsonf096c062014-09-19 10:03:06 -06001348 * Look for existing groups on non-isolated functions on the same
1349 * slot and aliases of those funcions, if any. No need to clear
1350 * the search bitmap, the tested devfns are still valid.
Alex Williamson104a1c12014-07-03 09:51:18 -06001351 */
Alex Williamsonf096c062014-09-19 10:03:06 -06001352 group = get_pci_function_alias_group(pdev, (unsigned long *)devfns);
1353 if (group)
1354 return group;
Alex Williamson104a1c12014-07-03 09:51:18 -06001355
1356 /* No shared group found, allocate new */
Joerg Roedel7f7a2302017-06-28 12:45:31 +02001357 return iommu_group_alloc();
Alex Williamson104a1c12014-07-03 09:51:18 -06001358}
Will Deacona7ba5c32019-12-19 12:03:37 +00001359EXPORT_SYMBOL_GPL(pci_device_group);
Alex Williamson104a1c12014-07-03 09:51:18 -06001360
Nipun Guptaeab03e22018-09-10 19:19:18 +05301361/* Get the IOMMU group for device on fsl-mc bus */
1362struct iommu_group *fsl_mc_device_group(struct device *dev)
1363{
1364 struct device *cont_dev = fsl_mc_cont_dev(dev);
1365 struct iommu_group *group;
1366
1367 group = iommu_group_get(cont_dev);
1368 if (!group)
1369 group = iommu_group_alloc();
1370 return group;
1371}
Will Deacona7ba5c32019-12-19 12:03:37 +00001372EXPORT_SYMBOL_GPL(fsl_mc_device_group);
Nipun Guptaeab03e22018-09-10 19:19:18 +05301373
Alex Williamson104a1c12014-07-03 09:51:18 -06001374/**
1375 * iommu_group_get_for_dev - Find or create the IOMMU group for a device
1376 * @dev: target device
1377 *
1378 * This function is intended to be called by IOMMU drivers and extended to
1379 * support common, bus-defined algorithms when determining or creating the
1380 * IOMMU group for a device. On success, the caller will hold a reference
1381 * to the returned IOMMU group, which will already include the provided
1382 * device. The reference should be released with iommu_group_put().
1383 */
1384struct iommu_group *iommu_group_get_for_dev(struct device *dev)
1385{
Joerg Roedel46c6b2b2015-10-21 23:51:36 +02001386 const struct iommu_ops *ops = dev->bus->iommu_ops;
Joerg Roedelc4a783b2014-08-21 22:32:08 +02001387 struct iommu_group *group;
Alex Williamson104a1c12014-07-03 09:51:18 -06001388 int ret;
1389
1390 group = iommu_group_get(dev);
1391 if (group)
1392 return group;
1393
Robin Murphy05f803002017-07-21 13:12:38 +01001394 if (!ops)
1395 return ERR_PTR(-EINVAL);
Joerg Roedelc4a783b2014-08-21 22:32:08 +02001396
Robin Murphy05f803002017-07-21 13:12:38 +01001397 group = ops->device_group(dev);
Joerg Roedel72dcac62017-06-28 12:52:48 +02001398 if (WARN_ON_ONCE(group == NULL))
1399 return ERR_PTR(-EINVAL);
1400
Alex Williamson104a1c12014-07-03 09:51:18 -06001401 if (IS_ERR(group))
1402 return group;
1403
Joerg Roedel12282362015-10-21 23:51:43 +02001404 /*
1405 * Try to allocate a default domain - needs support from the
1406 * IOMMU driver.
1407 */
1408 if (!group->default_domain) {
Will Deaconfccb4e32017-01-05 18:38:26 +00001409 struct iommu_domain *dom;
1410
1411 dom = __iommu_domain_alloc(dev->bus, iommu_def_domain_type);
1412 if (!dom && iommu_def_domain_type != IOMMU_DOMAIN_DMA) {
Will Deaconfccb4e32017-01-05 18:38:26 +00001413 dom = __iommu_domain_alloc(dev->bus, IOMMU_DOMAIN_DMA);
Joerg Roedel8bc32a22019-03-22 16:52:17 +01001414 if (dom) {
1415 dev_warn(dev,
1416 "failed to allocate default IOMMU domain of type %u; falling back to IOMMU_DOMAIN_DMA",
1417 iommu_def_domain_type);
1418 }
Will Deaconfccb4e32017-01-05 18:38:26 +00001419 }
1420
1421 group->default_domain = dom;
Joerg Roedeleebb8032016-04-04 15:47:48 +02001422 if (!group->domain)
Will Deaconfccb4e32017-01-05 18:38:26 +00001423 group->domain = dom;
Zhen Lei68a6efe2018-09-20 17:10:23 +01001424
1425 if (dom && !iommu_dma_strict) {
1426 int attr = 1;
1427 iommu_domain_set_attr(dom,
1428 DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE,
1429 &attr);
1430 }
Joerg Roedel12282362015-10-21 23:51:43 +02001431 }
1432
Alex Williamson104a1c12014-07-03 09:51:18 -06001433 ret = iommu_group_add_device(group, dev);
1434 if (ret) {
1435 iommu_group_put(group);
1436 return ERR_PTR(ret);
1437 }
1438
1439 return group;
1440}
Greg Kroah-Hartmanae74c192020-04-30 14:01:20 +02001441EXPORT_SYMBOL_GPL(iommu_group_get_for_dev);
Alex Williamson104a1c12014-07-03 09:51:18 -06001442
Joerg Roedel6827ca82015-05-28 18:41:35 +02001443struct iommu_domain *iommu_group_default_domain(struct iommu_group *group)
1444{
1445 return group->default_domain;
1446}
1447
Alex Williamson14604322011-10-21 15:56:05 -04001448static int add_iommu_group(struct device *dev, void *data)
1449{
Joerg Roedelcc5aed42018-11-30 10:31:59 +01001450 int ret = iommu_probe_device(dev);
Joerg Roedel38667f12015-06-29 10:16:08 +02001451
1452 /*
1453 * We ignore -ENODEV errors for now, as they just mean that the
1454 * device is not translated by an IOMMU. We still care about
1455 * other errors and fail to initialize when they happen.
1456 */
1457 if (ret == -ENODEV)
1458 ret = 0;
1459
1460 return ret;
Alex Williamson14604322011-10-21 15:56:05 -04001461}
1462
Joerg Roedel8da30142015-05-28 18:41:27 +02001463static int remove_iommu_group(struct device *dev, void *data)
1464{
Joerg Roedelcc5aed42018-11-30 10:31:59 +01001465 iommu_release_device(dev);
Alex Williamson14604322011-10-21 15:56:05 -04001466
1467 return 0;
1468}
1469
Alex Williamsond72e31c2012-05-30 14:18:53 -06001470static int iommu_bus_notifier(struct notifier_block *nb,
1471 unsigned long action, void *data)
Alex Williamson14604322011-10-21 15:56:05 -04001472{
Alex Williamsond72e31c2012-05-30 14:18:53 -06001473 unsigned long group_action = 0;
Joerg Roedelcc5aed42018-11-30 10:31:59 +01001474 struct device *dev = data;
1475 struct iommu_group *group;
Alex Williamson14604322011-10-21 15:56:05 -04001476
Alex Williamsond72e31c2012-05-30 14:18:53 -06001477 /*
1478 * ADD/DEL call into iommu driver ops if provided, which may
1479 * result in ADD/DEL notifiers to group->notifier
1480 */
1481 if (action == BUS_NOTIFY_ADD_DEVICE) {
Joerg Roedelcc5aed42018-11-30 10:31:59 +01001482 int ret;
zhichang.yuan3ba87752017-04-18 20:51:48 +08001483
Joerg Roedelcc5aed42018-11-30 10:31:59 +01001484 ret = iommu_probe_device(dev);
1485 return (ret) ? NOTIFY_DONE : NOTIFY_OK;
Joerg Roedel843cb6d2015-05-28 18:41:28 +02001486 } else if (action == BUS_NOTIFY_REMOVED_DEVICE) {
Joerg Roedelcc5aed42018-11-30 10:31:59 +01001487 iommu_release_device(dev);
1488 return NOTIFY_OK;
Alex Williamsond72e31c2012-05-30 14:18:53 -06001489 }
Alex Williamson14604322011-10-21 15:56:05 -04001490
Alex Williamsond72e31c2012-05-30 14:18:53 -06001491 /*
1492 * Remaining BUS_NOTIFYs get filtered and republished to the
1493 * group, if anyone is listening
1494 */
1495 group = iommu_group_get(dev);
1496 if (!group)
1497 return 0;
1498
1499 switch (action) {
1500 case BUS_NOTIFY_BIND_DRIVER:
1501 group_action = IOMMU_GROUP_NOTIFY_BIND_DRIVER;
1502 break;
1503 case BUS_NOTIFY_BOUND_DRIVER:
1504 group_action = IOMMU_GROUP_NOTIFY_BOUND_DRIVER;
1505 break;
1506 case BUS_NOTIFY_UNBIND_DRIVER:
1507 group_action = IOMMU_GROUP_NOTIFY_UNBIND_DRIVER;
1508 break;
1509 case BUS_NOTIFY_UNBOUND_DRIVER:
1510 group_action = IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER;
1511 break;
1512 }
1513
1514 if (group_action)
1515 blocking_notifier_call_chain(&group->notifier,
1516 group_action, dev);
1517
1518 iommu_group_put(group);
Alex Williamson14604322011-10-21 15:56:05 -04001519 return 0;
1520}
1521
Mark Salterfb3e3062014-09-21 13:58:24 -04001522static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
Joerg Roedelfc2100e2008-11-26 17:21:24 +01001523{
Mark Salterfb3e3062014-09-21 13:58:24 -04001524 int err;
1525 struct notifier_block *nb;
Thierry Redingb22f6432014-06-27 09:03:12 +02001526
Mark Salterfb3e3062014-09-21 13:58:24 -04001527 nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
1528 if (!nb)
1529 return -ENOMEM;
1530
1531 nb->notifier_call = iommu_bus_notifier;
1532
1533 err = bus_register_notifier(bus, nb);
Joerg Roedel8da30142015-05-28 18:41:27 +02001534 if (err)
1535 goto out_free;
Heiko Stübnerd7da6bd2014-10-29 01:22:56 +01001536
Lu Baolu8cec63e2019-03-20 09:40:24 +08001537 err = bus_for_each_dev(bus, NULL, NULL, add_iommu_group);
Joerg Roedel8da30142015-05-28 18:41:27 +02001538 if (err)
1539 goto out_err;
1540
Heiko Stübnerd7da6bd2014-10-29 01:22:56 +01001541
1542 return 0;
Joerg Roedel8da30142015-05-28 18:41:27 +02001543
1544out_err:
1545 /* Clean up */
Lu Baolu8cec63e2019-03-20 09:40:24 +08001546 bus_for_each_dev(bus, NULL, NULL, remove_iommu_group);
Joerg Roedel8da30142015-05-28 18:41:27 +02001547 bus_unregister_notifier(bus, nb);
1548
1549out_free:
1550 kfree(nb);
1551
1552 return err;
Joerg Roedelfc2100e2008-11-26 17:21:24 +01001553}
1554
Joerg Roedelff217762011-08-26 16:48:26 +02001555/**
1556 * bus_set_iommu - set iommu-callbacks for the bus
1557 * @bus: bus.
1558 * @ops: the callbacks provided by the iommu-driver
1559 *
1560 * This function is called by an iommu driver to set the iommu methods
1561 * used for a particular bus. Drivers for devices on that bus can use
1562 * the iommu-api after these ops are registered.
1563 * This special function is needed because IOMMUs are usually devices on
1564 * the bus itself, so the iommu drivers are not initialized when the bus
1565 * is set up. With this function the iommu-driver can set the iommu-ops
1566 * afterwards.
1567 */
Thierry Redingb22f6432014-06-27 09:03:12 +02001568int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops)
Joerg Roedelfc2100e2008-11-26 17:21:24 +01001569{
Heiko Stübnerd7da6bd2014-10-29 01:22:56 +01001570 int err;
1571
Will Deacon4312cf72019-12-19 12:03:43 +00001572 if (ops == NULL) {
1573 bus->iommu_ops = NULL;
1574 return 0;
1575 }
1576
Joerg Roedelff217762011-08-26 16:48:26 +02001577 if (bus->iommu_ops != NULL)
1578 return -EBUSY;
Joerg Roedelfc2100e2008-11-26 17:21:24 +01001579
Joerg Roedelff217762011-08-26 16:48:26 +02001580 bus->iommu_ops = ops;
1581
1582 /* Do IOMMU specific setup for this bus-type */
Heiko Stübnerd7da6bd2014-10-29 01:22:56 +01001583 err = iommu_bus_init(bus, ops);
1584 if (err)
1585 bus->iommu_ops = NULL;
1586
1587 return err;
Joerg Roedelfc2100e2008-11-26 17:21:24 +01001588}
Joerg Roedelff217762011-08-26 16:48:26 +02001589EXPORT_SYMBOL_GPL(bus_set_iommu);
1590
Joerg Roedela1b60c12011-09-06 18:46:34 +02001591bool iommu_present(struct bus_type *bus)
Joerg Roedelfc2100e2008-11-26 17:21:24 +01001592{
Joerg Roedel94441c32011-09-06 18:58:54 +02001593 return bus->iommu_ops != NULL;
Joerg Roedelfc2100e2008-11-26 17:21:24 +01001594}
Joerg Roedela1b60c12011-09-06 18:46:34 +02001595EXPORT_SYMBOL_GPL(iommu_present);
Joerg Roedelfc2100e2008-11-26 17:21:24 +01001596
Joerg Roedel3c0e0ca2014-09-03 18:47:25 +02001597bool iommu_capable(struct bus_type *bus, enum iommu_cap cap)
1598{
1599 if (!bus->iommu_ops || !bus->iommu_ops->capable)
1600 return false;
1601
1602 return bus->iommu_ops->capable(cap);
1603}
1604EXPORT_SYMBOL_GPL(iommu_capable);
1605
Ohad Ben-Cohen4f3f8d92011-09-13 15:25:23 -04001606/**
1607 * iommu_set_fault_handler() - set a fault handler for an iommu domain
1608 * @domain: iommu domain
1609 * @handler: fault handler
Ohad Ben-Cohen77ca2332012-05-21 20:20:05 +03001610 * @token: user data, will be passed back to the fault handler
Ohad Ben-Cohen0ed6d2d2011-09-27 07:36:40 -04001611 *
1612 * This function should be used by IOMMU users which want to be notified
1613 * whenever an IOMMU fault happens.
1614 *
1615 * The fault handler itself should return 0 on success, and an appropriate
1616 * error code otherwise.
Ohad Ben-Cohen4f3f8d92011-09-13 15:25:23 -04001617 */
1618void iommu_set_fault_handler(struct iommu_domain *domain,
Ohad Ben-Cohen77ca2332012-05-21 20:20:05 +03001619 iommu_fault_handler_t handler,
1620 void *token)
Ohad Ben-Cohen4f3f8d92011-09-13 15:25:23 -04001621{
1622 BUG_ON(!domain);
1623
1624 domain->handler = handler;
Ohad Ben-Cohen77ca2332012-05-21 20:20:05 +03001625 domain->handler_token = token;
Ohad Ben-Cohen4f3f8d92011-09-13 15:25:23 -04001626}
Ohad Ben-Cohen30bd9182011-09-26 09:11:46 -04001627EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
Ohad Ben-Cohen4f3f8d92011-09-13 15:25:23 -04001628
Joerg Roedel53723dc2015-05-28 18:41:29 +02001629static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
1630 unsigned type)
Joerg Roedelfc2100e2008-11-26 17:21:24 +01001631{
1632 struct iommu_domain *domain;
Joerg Roedelfc2100e2008-11-26 17:21:24 +01001633
Joerg Roedel94441c32011-09-06 18:58:54 +02001634 if (bus == NULL || bus->iommu_ops == NULL)
Joerg Roedel905d66c2011-09-06 16:03:26 +02001635 return NULL;
1636
Joerg Roedel53723dc2015-05-28 18:41:29 +02001637 domain = bus->iommu_ops->domain_alloc(type);
Joerg Roedelfc2100e2008-11-26 17:21:24 +01001638 if (!domain)
1639 return NULL;
1640
Joerg Roedel8539c7c2015-03-26 13:43:05 +01001641 domain->ops = bus->iommu_ops;
Joerg Roedel53723dc2015-05-28 18:41:29 +02001642 domain->type = type;
Robin Murphyd16e0fa2016-04-07 18:42:06 +01001643 /* Assume all sizes by default; the driver may override this later */
1644 domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap;
Joerg Roedel905d66c2011-09-06 16:03:26 +02001645
Joerg Roedelfc2100e2008-11-26 17:21:24 +01001646 return domain;
Joerg Roedelfc2100e2008-11-26 17:21:24 +01001647}
Joerg Roedelfc2100e2008-11-26 17:21:24 +01001648
Joerg Roedel53723dc2015-05-28 18:41:29 +02001649struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
1650{
1651 return __iommu_domain_alloc(bus, IOMMU_DOMAIN_UNMANAGED);
Joerg Roedelfc2100e2008-11-26 17:21:24 +01001652}
1653EXPORT_SYMBOL_GPL(iommu_domain_alloc);
1654
1655void iommu_domain_free(struct iommu_domain *domain)
1656{
Joerg Roedel89be34a2015-03-26 13:43:19 +01001657 domain->ops->domain_free(domain);
Joerg Roedelfc2100e2008-11-26 17:21:24 +01001658}
1659EXPORT_SYMBOL_GPL(iommu_domain_free);
1660
Joerg Roedel426a2732015-05-28 18:41:30 +02001661static int __iommu_attach_device(struct iommu_domain *domain,
1662 struct device *dev)
Joerg Roedelfc2100e2008-11-26 17:21:24 +01001663{
Shuah Khanb54db772013-08-15 11:59:26 -06001664 int ret;
Baoquan Hee01d1912017-08-09 16:33:40 +08001665
Joerg Roedele5aa7f02011-09-06 16:44:29 +02001666 if (unlikely(domain->ops->attach_dev == NULL))
1667 return -ENODEV;
1668
Shuah Khanb54db772013-08-15 11:59:26 -06001669 ret = domain->ops->attach_dev(domain, dev);
1670 if (!ret)
1671 trace_attach_device_to_domain(dev);
1672 return ret;
Joerg Roedelfc2100e2008-11-26 17:21:24 +01001673}
Joerg Roedel426a2732015-05-28 18:41:30 +02001674
1675int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
1676{
1677 struct iommu_group *group;
1678 int ret;
1679
1680 group = iommu_group_get(dev);
Jordan Crouse9ae9df02017-12-20 09:48:36 -07001681 if (!group)
1682 return -ENODEV;
1683
Joerg Roedel426a2732015-05-28 18:41:30 +02001684 /*
Robin Murphy05f803002017-07-21 13:12:38 +01001685 * Lock the group to make sure the device-count doesn't
Joerg Roedel426a2732015-05-28 18:41:30 +02001686 * change while we are attaching
1687 */
1688 mutex_lock(&group->mutex);
1689 ret = -EINVAL;
1690 if (iommu_group_device_count(group) != 1)
1691 goto out_unlock;
1692
Joerg Roedele39cb8a2015-05-28 18:41:31 +02001693 ret = __iommu_attach_group(domain, group);
Joerg Roedel426a2732015-05-28 18:41:30 +02001694
1695out_unlock:
1696 mutex_unlock(&group->mutex);
1697 iommu_group_put(group);
1698
1699 return ret;
1700}
Joerg Roedelfc2100e2008-11-26 17:21:24 +01001701EXPORT_SYMBOL_GPL(iommu_attach_device);
1702
Yi L Liu4c7c1712019-10-02 12:42:40 -07001703int iommu_cache_invalidate(struct iommu_domain *domain, struct device *dev,
1704 struct iommu_cache_invalidate_info *inv_info)
1705{
1706 if (unlikely(!domain->ops->cache_invalidate))
1707 return -ENODEV;
1708
1709 return domain->ops->cache_invalidate(domain, dev, inv_info);
1710}
1711EXPORT_SYMBOL_GPL(iommu_cache_invalidate);
1712
Jacob Pan808be0a2019-10-02 12:42:43 -07001713int iommu_sva_bind_gpasid(struct iommu_domain *domain,
1714 struct device *dev, struct iommu_gpasid_bind_data *data)
1715{
1716 if (unlikely(!domain->ops->sva_bind_gpasid))
1717 return -ENODEV;
1718
1719 return domain->ops->sva_bind_gpasid(domain, dev, data);
1720}
1721EXPORT_SYMBOL_GPL(iommu_sva_bind_gpasid);
1722
1723int iommu_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev,
1724 ioasid_t pasid)
1725{
1726 if (unlikely(!domain->ops->sva_unbind_gpasid))
1727 return -ENODEV;
1728
1729 return domain->ops->sva_unbind_gpasid(dev, pasid);
1730}
1731EXPORT_SYMBOL_GPL(iommu_sva_unbind_gpasid);
1732
Joerg Roedel426a2732015-05-28 18:41:30 +02001733static void __iommu_detach_device(struct iommu_domain *domain,
1734 struct device *dev)
Joerg Roedelfc2100e2008-11-26 17:21:24 +01001735{
Joerg Roedelbd421262020-05-19 15:03:40 +02001736 if (iommu_is_attach_deferred(domain, dev))
Baoquan Hee01d1912017-08-09 16:33:40 +08001737 return;
1738
Joerg Roedele5aa7f02011-09-06 16:44:29 +02001739 if (unlikely(domain->ops->detach_dev == NULL))
1740 return;
1741
1742 domain->ops->detach_dev(domain, dev);
Shuah Khan69980632013-08-15 11:59:27 -06001743 trace_detach_device_from_domain(dev);
Joerg Roedelfc2100e2008-11-26 17:21:24 +01001744}
Joerg Roedel426a2732015-05-28 18:41:30 +02001745
1746void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
1747{
1748 struct iommu_group *group;
1749
1750 group = iommu_group_get(dev);
Jordan Crouse9ae9df02017-12-20 09:48:36 -07001751 if (!group)
1752 return;
Joerg Roedel426a2732015-05-28 18:41:30 +02001753
1754 mutex_lock(&group->mutex);
1755 if (iommu_group_device_count(group) != 1) {
1756 WARN_ON(1);
1757 goto out_unlock;
1758 }
1759
Joerg Roedele39cb8a2015-05-28 18:41:31 +02001760 __iommu_detach_group(domain, group);
Joerg Roedel426a2732015-05-28 18:41:30 +02001761
1762out_unlock:
1763 mutex_unlock(&group->mutex);
1764 iommu_group_put(group);
1765}
Joerg Roedelfc2100e2008-11-26 17:21:24 +01001766EXPORT_SYMBOL_GPL(iommu_detach_device);
1767
Joerg Roedel2c1296d2015-05-28 18:41:32 +02001768struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
1769{
1770 struct iommu_domain *domain;
1771 struct iommu_group *group;
1772
1773 group = iommu_group_get(dev);
Robin Murphy1464d0b2017-08-17 11:40:08 +01001774 if (!group)
Joerg Roedel2c1296d2015-05-28 18:41:32 +02001775 return NULL;
1776
1777 domain = group->domain;
1778
1779 iommu_group_put(group);
1780
1781 return domain;
1782}
1783EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev);
1784
Alex Williamsond72e31c2012-05-30 14:18:53 -06001785/*
Robin Murphy6af588f2018-09-12 16:24:12 +01001786 * For IOMMU_DOMAIN_DMA implementations which already provide their own
1787 * guarantees that the group and its default domain are valid and correct.
1788 */
1789struct iommu_domain *iommu_get_dma_domain(struct device *dev)
1790{
1791 return dev->iommu_group->default_domain;
1792}
1793
1794/*
Rami Rosen35449ad2018-09-18 17:38:49 +03001795 * IOMMU groups are really the natural working unit of the IOMMU, but
Alex Williamsond72e31c2012-05-30 14:18:53 -06001796 * the IOMMU API works on domains and devices. Bridge that gap by
1797 * iterating over the devices in a group. Ideally we'd have a single
1798 * device which represents the requestor ID of the group, but we also
1799 * allow IOMMU drivers to create policy defined minimum sets, where
1800 * the physical hardware may be able to distiguish members, but we
1801 * wish to group them at a higher level (ex. untrusted multi-function
1802 * PCI devices). Thus we attach each device.
1803 */
1804static int iommu_group_do_attach_device(struct device *dev, void *data)
1805{
1806 struct iommu_domain *domain = data;
1807
Joerg Roedel426a2732015-05-28 18:41:30 +02001808 return __iommu_attach_device(domain, dev);
Alex Williamsond72e31c2012-05-30 14:18:53 -06001809}
1810
Joerg Roedele39cb8a2015-05-28 18:41:31 +02001811static int __iommu_attach_group(struct iommu_domain *domain,
1812 struct iommu_group *group)
1813{
1814 int ret;
1815
1816 if (group->default_domain && group->domain != group->default_domain)
1817 return -EBUSY;
1818
1819 ret = __iommu_group_for_each_dev(group, domain,
1820 iommu_group_do_attach_device);
1821 if (ret == 0)
1822 group->domain = domain;
1823
1824 return ret;
Alex Williamsond72e31c2012-05-30 14:18:53 -06001825}
1826
1827int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group)
1828{
Joerg Roedele39cb8a2015-05-28 18:41:31 +02001829 int ret;
1830
1831 mutex_lock(&group->mutex);
1832 ret = __iommu_attach_group(domain, group);
1833 mutex_unlock(&group->mutex);
1834
1835 return ret;
Alex Williamsond72e31c2012-05-30 14:18:53 -06001836}
1837EXPORT_SYMBOL_GPL(iommu_attach_group);
1838
1839static int iommu_group_do_detach_device(struct device *dev, void *data)
1840{
1841 struct iommu_domain *domain = data;
1842
Joerg Roedel426a2732015-05-28 18:41:30 +02001843 __iommu_detach_device(domain, dev);
Alex Williamsond72e31c2012-05-30 14:18:53 -06001844
1845 return 0;
1846}
1847
Joerg Roedele39cb8a2015-05-28 18:41:31 +02001848static void __iommu_detach_group(struct iommu_domain *domain,
1849 struct iommu_group *group)
1850{
1851 int ret;
1852
1853 if (!group->default_domain) {
1854 __iommu_group_for_each_dev(group, domain,
1855 iommu_group_do_detach_device);
1856 group->domain = NULL;
1857 return;
1858 }
1859
1860 if (group->domain == group->default_domain)
1861 return;
1862
1863 /* Detach by re-attaching to the default domain */
1864 ret = __iommu_group_for_each_dev(group, group->default_domain,
1865 iommu_group_do_attach_device);
1866 if (ret != 0)
1867 WARN_ON(1);
1868 else
1869 group->domain = group->default_domain;
1870}
1871
Alex Williamsond72e31c2012-05-30 14:18:53 -06001872void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group)
1873{
Joerg Roedele39cb8a2015-05-28 18:41:31 +02001874 mutex_lock(&group->mutex);
1875 __iommu_detach_group(domain, group);
1876 mutex_unlock(&group->mutex);
Alex Williamsond72e31c2012-05-30 14:18:53 -06001877}
1878EXPORT_SYMBOL_GPL(iommu_detach_group);
1879
Varun Sethibb5547a2013-03-29 01:23:58 +05301880phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
Joerg Roedelfc2100e2008-11-26 17:21:24 +01001881{
Joerg Roedele5aa7f02011-09-06 16:44:29 +02001882 if (unlikely(domain->ops->iova_to_phys == NULL))
1883 return 0;
1884
1885 return domain->ops->iova_to_phys(domain, iova);
Joerg Roedelfc2100e2008-11-26 17:21:24 +01001886}
1887EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
Sheng Yangdbb9fd82009-03-18 15:33:06 +08001888
Alex Williamsonbd139692013-06-17 19:57:34 -06001889static size_t iommu_pgsize(struct iommu_domain *domain,
1890 unsigned long addr_merge, size_t size)
1891{
1892 unsigned int pgsize_idx;
1893 size_t pgsize;
1894
1895 /* Max page size that still fits into 'size' */
1896 pgsize_idx = __fls(size);
1897
1898 /* need to consider alignment requirements ? */
1899 if (likely(addr_merge)) {
1900 /* Max page size allowed by address */
1901 unsigned int align_pgsize_idx = __ffs(addr_merge);
1902 pgsize_idx = min(pgsize_idx, align_pgsize_idx);
1903 }
1904
1905 /* build a mask of acceptable page sizes */
1906 pgsize = (1UL << (pgsize_idx + 1)) - 1;
1907
1908 /* throw away page sizes not supported by the hardware */
Robin Murphyd16e0fa2016-04-07 18:42:06 +01001909 pgsize &= domain->pgsize_bitmap;
Alex Williamsonbd139692013-06-17 19:57:34 -06001910
1911 /* make sure we're still sane */
1912 BUG_ON(!pgsize);
1913
1914 /* pick the biggest page */
1915 pgsize_idx = __fls(pgsize);
1916 pgsize = 1UL << pgsize_idx;
1917
1918 return pgsize;
1919}
1920
Tom Murphy781ca2d2019-09-08 09:56:38 -07001921int __iommu_map(struct iommu_domain *domain, unsigned long iova,
1922 phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
Joerg Roedelcefc53c2010-01-08 13:35:09 +01001923{
Dmitry Osipenko1d7ae532018-12-12 23:38:47 +03001924 const struct iommu_ops *ops = domain->ops;
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02001925 unsigned long orig_iova = iova;
1926 unsigned int min_pagesz;
1927 size_t orig_size = size;
Yoshihiro Shimoda06bfcaa2016-02-10 10:18:04 +09001928 phys_addr_t orig_paddr = paddr;
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02001929 int ret = 0;
Joerg Roedelcefc53c2010-01-08 13:35:09 +01001930
Dmitry Osipenko1d7ae532018-12-12 23:38:47 +03001931 if (unlikely(ops->map == NULL ||
Robin Murphyd16e0fa2016-04-07 18:42:06 +01001932 domain->pgsize_bitmap == 0UL))
Joerg Roedele5aa7f02011-09-06 16:44:29 +02001933 return -ENODEV;
Joerg Roedelcefc53c2010-01-08 13:35:09 +01001934
Joerg Roedela10315e2015-03-26 13:43:06 +01001935 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
1936 return -EINVAL;
1937
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02001938 /* find out the minimum page size supported */
Robin Murphyd16e0fa2016-04-07 18:42:06 +01001939 min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
Joerg Roedelcefc53c2010-01-08 13:35:09 +01001940
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02001941 /*
1942 * both the virtual address and the physical one, as well as
1943 * the size of the mapping, must be aligned (at least) to the
1944 * size of the smallest page supported by the hardware
1945 */
1946 if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
Fabio Estevamabedb042013-08-22 10:25:42 -03001947 pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
Joe Perches6197ca82013-06-23 12:29:04 -07001948 iova, &paddr, size, min_pagesz);
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02001949 return -EINVAL;
1950 }
Joerg Roedelcefc53c2010-01-08 13:35:09 +01001951
Fabio Estevamabedb042013-08-22 10:25:42 -03001952 pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size);
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02001953
1954 while (size) {
Alex Williamsonbd139692013-06-17 19:57:34 -06001955 size_t pgsize = iommu_pgsize(domain, iova | paddr, size);
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02001956
Fabio Estevamabedb042013-08-22 10:25:42 -03001957 pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
Joe Perches6197ca82013-06-23 12:29:04 -07001958 iova, &paddr, pgsize);
Tom Murphy781ca2d2019-09-08 09:56:38 -07001959 ret = ops->map(domain, iova, paddr, pgsize, prot, gfp);
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02001960
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02001961 if (ret)
1962 break;
1963
1964 iova += pgsize;
1965 paddr += pgsize;
1966 size -= pgsize;
1967 }
1968
Dmitry Osipenko1d7ae532018-12-12 23:38:47 +03001969 if (ops->iotlb_sync_map)
1970 ops->iotlb_sync_map(domain);
1971
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02001972 /* unroll mapping in case something went wrong */
1973 if (ret)
1974 iommu_unmap(domain, orig_iova, orig_size - size);
Shuah Khane0be7c82013-08-15 11:59:28 -06001975 else
Yoshihiro Shimoda06bfcaa2016-02-10 10:18:04 +09001976 trace_map(orig_iova, orig_paddr, orig_size);
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02001977
1978 return ret;
Joerg Roedelcefc53c2010-01-08 13:35:09 +01001979}
Tom Murphy781ca2d2019-09-08 09:56:38 -07001980
1981int iommu_map(struct iommu_domain *domain, unsigned long iova,
1982 phys_addr_t paddr, size_t size, int prot)
1983{
1984 might_sleep();
1985 return __iommu_map(domain, iova, paddr, size, prot, GFP_KERNEL);
1986}
Joerg Roedelcefc53c2010-01-08 13:35:09 +01001987EXPORT_SYMBOL_GPL(iommu_map);
1988
Tom Murphy781ca2d2019-09-08 09:56:38 -07001989int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova,
1990 phys_addr_t paddr, size_t size, int prot)
1991{
1992 return __iommu_map(domain, iova, paddr, size, prot, GFP_ATOMIC);
1993}
1994EXPORT_SYMBOL_GPL(iommu_map_atomic);
1995
Joerg Roedeladd02cfd2017-08-23 15:50:04 +02001996static size_t __iommu_unmap(struct iommu_domain *domain,
1997 unsigned long iova, size_t size,
Will Deacona7d20dc2019-07-02 16:43:48 +01001998 struct iommu_iotlb_gather *iotlb_gather)
Joerg Roedelcefc53c2010-01-08 13:35:09 +01001999{
Joerg Roedeladd02cfd2017-08-23 15:50:04 +02002000 const struct iommu_ops *ops = domain->ops;
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02002001 size_t unmapped_page, unmapped = 0;
Shuah Khan6fd492f2015-01-16 16:47:19 -07002002 unsigned long orig_iova = iova;
Joerg Roedeladd02cfd2017-08-23 15:50:04 +02002003 unsigned int min_pagesz;
Joerg Roedelcefc53c2010-01-08 13:35:09 +01002004
Joerg Roedeladd02cfd2017-08-23 15:50:04 +02002005 if (unlikely(ops->unmap == NULL ||
Robin Murphyd16e0fa2016-04-07 18:42:06 +01002006 domain->pgsize_bitmap == 0UL))
Suravee Suthikulpanitc5611a82018-02-05 05:45:53 -05002007 return 0;
Joerg Roedelcefc53c2010-01-08 13:35:09 +01002008
Joerg Roedela10315e2015-03-26 13:43:06 +01002009 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
Suravee Suthikulpanitc5611a82018-02-05 05:45:53 -05002010 return 0;
Joerg Roedela10315e2015-03-26 13:43:06 +01002011
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02002012 /* find out the minimum page size supported */
Robin Murphyd16e0fa2016-04-07 18:42:06 +01002013 min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
Joerg Roedelcefc53c2010-01-08 13:35:09 +01002014
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02002015 /*
2016 * The virtual address, as well as the size of the mapping, must be
2017 * aligned (at least) to the size of the smallest page supported
2018 * by the hardware
2019 */
2020 if (!IS_ALIGNED(iova | size, min_pagesz)) {
Joe Perches6197ca82013-06-23 12:29:04 -07002021 pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
2022 iova, size, min_pagesz);
Suravee Suthikulpanitc5611a82018-02-05 05:45:53 -05002023 return 0;
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02002024 }
Joerg Roedelcefc53c2010-01-08 13:35:09 +01002025
Joe Perches6197ca82013-06-23 12:29:04 -07002026 pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size);
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02002027
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02002028 /*
2029 * Keep iterating until we either unmap 'size' bytes (or more)
2030 * or we hit an area that isn't mapped.
2031 */
2032 while (unmapped < size) {
Alex Williamsonbd139692013-06-17 19:57:34 -06002033 size_t pgsize = iommu_pgsize(domain, iova, size - unmapped);
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02002034
Will Deacon56f8af52019-07-02 16:44:06 +01002035 unmapped_page = ops->unmap(domain, iova, pgsize, iotlb_gather);
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02002036 if (!unmapped_page)
2037 break;
2038
Joe Perches6197ca82013-06-23 12:29:04 -07002039 pr_debug("unmapped: iova 0x%lx size 0x%zx\n",
2040 iova, unmapped_page);
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02002041
2042 iova += unmapped_page;
2043 unmapped += unmapped_page;
2044 }
2045
Shuah Khandb8614d2015-01-16 20:53:17 -07002046 trace_unmap(orig_iova, size, unmapped);
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02002047 return unmapped;
Joerg Roedelcefc53c2010-01-08 13:35:09 +01002048}
Joerg Roedeladd02cfd2017-08-23 15:50:04 +02002049
2050size_t iommu_unmap(struct iommu_domain *domain,
2051 unsigned long iova, size_t size)
2052{
Will Deacona7d20dc2019-07-02 16:43:48 +01002053 struct iommu_iotlb_gather iotlb_gather;
2054 size_t ret;
2055
2056 iommu_iotlb_gather_init(&iotlb_gather);
2057 ret = __iommu_unmap(domain, iova, size, &iotlb_gather);
2058 iommu_tlb_sync(domain, &iotlb_gather);
2059
2060 return ret;
Joerg Roedeladd02cfd2017-08-23 15:50:04 +02002061}
Joerg Roedelcefc53c2010-01-08 13:35:09 +01002062EXPORT_SYMBOL_GPL(iommu_unmap);
Alex Williamson14604322011-10-21 15:56:05 -04002063
Joerg Roedeladd02cfd2017-08-23 15:50:04 +02002064size_t iommu_unmap_fast(struct iommu_domain *domain,
Will Deacona7d20dc2019-07-02 16:43:48 +01002065 unsigned long iova, size_t size,
2066 struct iommu_iotlb_gather *iotlb_gather)
Joerg Roedeladd02cfd2017-08-23 15:50:04 +02002067{
Will Deacona7d20dc2019-07-02 16:43:48 +01002068 return __iommu_unmap(domain, iova, size, iotlb_gather);
Joerg Roedeladd02cfd2017-08-23 15:50:04 +02002069}
2070EXPORT_SYMBOL_GPL(iommu_unmap_fast);
2071
Tom Murphy781ca2d2019-09-08 09:56:38 -07002072size_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
2073 struct scatterlist *sg, unsigned int nents, int prot,
2074 gfp_t gfp)
Olav Haugan315786e2014-10-25 09:55:16 -07002075{
Robin Murphy5d95f402018-10-11 16:56:42 +01002076 size_t len = 0, mapped = 0;
2077 phys_addr_t start;
2078 unsigned int i = 0;
Joerg Roedel38ec0102014-11-04 14:53:51 +01002079 int ret;
Olav Haugan315786e2014-10-25 09:55:16 -07002080
Robin Murphy5d95f402018-10-11 16:56:42 +01002081 while (i <= nents) {
2082 phys_addr_t s_phys = sg_phys(sg);
Olav Haugan315786e2014-10-25 09:55:16 -07002083
Robin Murphy5d95f402018-10-11 16:56:42 +01002084 if (len && s_phys != start + len) {
Tom Murphy781ca2d2019-09-08 09:56:38 -07002085 ret = __iommu_map(domain, iova + mapped, start,
2086 len, prot, gfp);
2087
Robin Murphy5d95f402018-10-11 16:56:42 +01002088 if (ret)
2089 goto out_err;
Robin Murphy18f23402014-11-25 17:50:55 +00002090
Robin Murphy5d95f402018-10-11 16:56:42 +01002091 mapped += len;
2092 len = 0;
2093 }
Robin Murphy18f23402014-11-25 17:50:55 +00002094
Robin Murphy5d95f402018-10-11 16:56:42 +01002095 if (len) {
2096 len += sg->length;
2097 } else {
2098 len = sg->length;
2099 start = s_phys;
2100 }
Joerg Roedel38ec0102014-11-04 14:53:51 +01002101
Robin Murphy5d95f402018-10-11 16:56:42 +01002102 if (++i < nents)
2103 sg = sg_next(sg);
Olav Haugan315786e2014-10-25 09:55:16 -07002104 }
2105
2106 return mapped;
Joerg Roedel38ec0102014-11-04 14:53:51 +01002107
2108out_err:
2109 /* undo mappings already done */
2110 iommu_unmap(domain, iova, mapped);
2111
2112 return 0;
2113
Olav Haugan315786e2014-10-25 09:55:16 -07002114}
Tom Murphy781ca2d2019-09-08 09:56:38 -07002115
2116size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
2117 struct scatterlist *sg, unsigned int nents, int prot)
2118{
2119 might_sleep();
2120 return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_KERNEL);
2121}
Christoph Hellwigd88e61f2018-07-30 09:36:26 +02002122EXPORT_SYMBOL_GPL(iommu_map_sg);
Joerg Roedeld7787d52013-01-29 14:26:20 +01002123
Tom Murphy781ca2d2019-09-08 09:56:38 -07002124size_t iommu_map_sg_atomic(struct iommu_domain *domain, unsigned long iova,
2125 struct scatterlist *sg, unsigned int nents, int prot)
2126{
2127 return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_ATOMIC);
2128}
2129EXPORT_SYMBOL_GPL(iommu_map_sg_atomic);
2130
Joerg Roedeld7787d52013-01-29 14:26:20 +01002131int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
Varun Sethi80f97f02013-03-29 01:24:00 +05302132 phys_addr_t paddr, u64 size, int prot)
Joerg Roedeld7787d52013-01-29 14:26:20 +01002133{
2134 if (unlikely(domain->ops->domain_window_enable == NULL))
2135 return -ENODEV;
2136
Varun Sethi80f97f02013-03-29 01:24:00 +05302137 return domain->ops->domain_window_enable(domain, wnd_nr, paddr, size,
2138 prot);
Joerg Roedeld7787d52013-01-29 14:26:20 +01002139}
2140EXPORT_SYMBOL_GPL(iommu_domain_window_enable);
2141
2142void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr)
2143{
2144 if (unlikely(domain->ops->domain_window_disable == NULL))
2145 return;
2146
2147 return domain->ops->domain_window_disable(domain, wnd_nr);
2148}
2149EXPORT_SYMBOL_GPL(iommu_domain_window_disable);
2150
Joerg Roedel207c6e32017-04-26 15:39:28 +02002151/**
2152 * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework
2153 * @domain: the iommu domain where the fault has happened
2154 * @dev: the device where the fault has happened
2155 * @iova: the faulting address
2156 * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...)
2157 *
2158 * This function should be called by the low-level IOMMU implementations
2159 * whenever IOMMU faults happen, to allow high-level users, that are
2160 * interested in such events, to know about them.
2161 *
2162 * This event may be useful for several possible use cases:
2163 * - mere logging of the event
2164 * - dynamic TLB/PTE loading
2165 * - if restarting of the faulting device is required
2166 *
2167 * Returns 0 on success and an appropriate error code otherwise (if dynamic
2168 * PTE/TLB loading will one day be supported, implementations will be able
2169 * to tell whether it succeeded or not according to this return value).
2170 *
2171 * Specifically, -ENOSYS is returned if a fault handler isn't installed
2172 * (though fault handlers can also return -ENOSYS, in case they want to
2173 * elicit the default behavior of the IOMMU drivers).
2174 */
2175int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
2176 unsigned long iova, int flags)
2177{
2178 int ret = -ENOSYS;
2179
2180 /*
2181 * if upper layers showed interest and installed a fault handler,
2182 * invoke it.
2183 */
2184 if (domain->handler)
2185 ret = domain->handler(domain, dev, iova, flags,
2186 domain->handler_token);
2187
2188 trace_io_page_fault(dev, iova, flags);
2189 return ret;
2190}
2191EXPORT_SYMBOL_GPL(report_iommu_fault);
2192
Alex Williamsond72e31c2012-05-30 14:18:53 -06002193static int __init iommu_init(void)
Alex Williamson14604322011-10-21 15:56:05 -04002194{
Alex Williamsond72e31c2012-05-30 14:18:53 -06002195 iommu_group_kset = kset_create_and_add("iommu_groups",
2196 NULL, kernel_kobj);
Alex Williamsond72e31c2012-05-30 14:18:53 -06002197 BUG_ON(!iommu_group_kset);
2198
Gary R Hookbad614b2018-06-12 16:41:21 -05002199 iommu_debugfs_setup();
2200
Alex Williamsond72e31c2012-05-30 14:18:53 -06002201 return 0;
Alex Williamson14604322011-10-21 15:56:05 -04002202}
Marek Szyprowskid7ef9992015-05-19 15:20:23 +02002203core_initcall(iommu_init);
Joerg Roedel0cd76dd2012-01-26 19:40:52 +01002204
2205int iommu_domain_get_attr(struct iommu_domain *domain,
2206 enum iommu_attr attr, void *data)
2207{
Joerg Roedel0ff64f82012-01-26 19:40:53 +01002208 struct iommu_domain_geometry *geometry;
Joerg Roedeld2e12162013-01-29 13:49:04 +01002209 bool *paging;
Joerg Roedel0ff64f82012-01-26 19:40:53 +01002210 int ret = 0;
Joerg Roedel0cd76dd2012-01-26 19:40:52 +01002211
Joerg Roedel0ff64f82012-01-26 19:40:53 +01002212 switch (attr) {
2213 case DOMAIN_ATTR_GEOMETRY:
2214 geometry = data;
2215 *geometry = domain->geometry;
2216
2217 break;
Joerg Roedeld2e12162013-01-29 13:49:04 +01002218 case DOMAIN_ATTR_PAGING:
2219 paging = data;
Robin Murphyd16e0fa2016-04-07 18:42:06 +01002220 *paging = (domain->pgsize_bitmap != 0UL);
Joerg Roedeld2e12162013-01-29 13:49:04 +01002221 break;
Joerg Roedel0ff64f82012-01-26 19:40:53 +01002222 default:
2223 if (!domain->ops->domain_get_attr)
2224 return -EINVAL;
2225
2226 ret = domain->ops->domain_get_attr(domain, attr, data);
2227 }
2228
2229 return ret;
Joerg Roedel0cd76dd2012-01-26 19:40:52 +01002230}
2231EXPORT_SYMBOL_GPL(iommu_domain_get_attr);
2232
2233int iommu_domain_set_attr(struct iommu_domain *domain,
2234 enum iommu_attr attr, void *data)
2235{
Joerg Roedel69356712013-02-04 14:00:01 +01002236 int ret = 0;
Joerg Roedel0cd76dd2012-01-26 19:40:52 +01002237
Joerg Roedel69356712013-02-04 14:00:01 +01002238 switch (attr) {
Joerg Roedel69356712013-02-04 14:00:01 +01002239 default:
2240 if (domain->ops->domain_set_attr == NULL)
2241 return -EINVAL;
2242
2243 ret = domain->ops->domain_set_attr(domain, attr, data);
2244 }
2245
2246 return ret;
Joerg Roedel0cd76dd2012-01-26 19:40:52 +01002247}
2248EXPORT_SYMBOL_GPL(iommu_domain_set_attr);
Joerg Roedela1015c22015-05-28 18:41:33 +02002249
Eric Augere5b52342017-01-19 20:57:47 +00002250void iommu_get_resv_regions(struct device *dev, struct list_head *list)
Joerg Roedela1015c22015-05-28 18:41:33 +02002251{
2252 const struct iommu_ops *ops = dev->bus->iommu_ops;
2253
Eric Augere5b52342017-01-19 20:57:47 +00002254 if (ops && ops->get_resv_regions)
2255 ops->get_resv_regions(dev, list);
Joerg Roedela1015c22015-05-28 18:41:33 +02002256}
2257
Eric Augere5b52342017-01-19 20:57:47 +00002258void iommu_put_resv_regions(struct device *dev, struct list_head *list)
Joerg Roedela1015c22015-05-28 18:41:33 +02002259{
2260 const struct iommu_ops *ops = dev->bus->iommu_ops;
2261
Eric Augere5b52342017-01-19 20:57:47 +00002262 if (ops && ops->put_resv_regions)
2263 ops->put_resv_regions(dev, list);
Joerg Roedela1015c22015-05-28 18:41:33 +02002264}
Joerg Roedeld290f1e2015-05-28 18:41:36 +02002265
Thierry Redingf9f69712019-12-18 14:42:01 +01002266/**
2267 * generic_iommu_put_resv_regions - Reserved region driver helper
2268 * @dev: device for which to free reserved regions
2269 * @list: reserved region list for device
2270 *
2271 * IOMMU drivers can use this to implement their .put_resv_regions() callback
2272 * for simple reservations. Memory allocated for each reserved region will be
2273 * freed. If an IOMMU driver allocates additional resources per region, it is
2274 * going to have to implement a custom callback.
2275 */
2276void generic_iommu_put_resv_regions(struct device *dev, struct list_head *list)
2277{
2278 struct iommu_resv_region *entry, *next;
2279
2280 list_for_each_entry_safe(entry, next, list, list)
2281 kfree(entry);
2282}
2283EXPORT_SYMBOL(generic_iommu_put_resv_regions);
2284
Eric Auger2b20cbb2017-01-19 20:57:49 +00002285struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
Robin Murphy9d3a4de2017-03-16 17:00:16 +00002286 size_t length, int prot,
2287 enum iommu_resv_type type)
Eric Auger2b20cbb2017-01-19 20:57:49 +00002288{
2289 struct iommu_resv_region *region;
2290
2291 region = kzalloc(sizeof(*region), GFP_KERNEL);
2292 if (!region)
2293 return NULL;
2294
2295 INIT_LIST_HEAD(&region->list);
2296 region->start = start;
2297 region->length = length;
2298 region->prot = prot;
2299 region->type = type;
2300 return region;
Joerg Roedelfc2100e2008-11-26 17:21:24 +01002301}
Will Deacona7ba5c32019-12-19 12:03:37 +00002302EXPORT_SYMBOL_GPL(iommu_alloc_resv_region);
Joerg Roedeld290f1e2015-05-28 18:41:36 +02002303
Lu Baolu7423e012019-05-25 13:41:22 +08002304static int
2305request_default_domain_for_dev(struct device *dev, unsigned long type)
Joerg Roedeld290f1e2015-05-28 18:41:36 +02002306{
Lu Baolu7423e012019-05-25 13:41:22 +08002307 struct iommu_domain *domain;
Joerg Roedeld290f1e2015-05-28 18:41:36 +02002308 struct iommu_group *group;
2309 int ret;
2310
2311 /* Device must already be in a group before calling this function */
Lu Baolu57274ea2019-05-21 15:27:35 +08002312 group = iommu_group_get(dev);
2313 if (!group)
2314 return -EINVAL;
Joerg Roedeld290f1e2015-05-28 18:41:36 +02002315
2316 mutex_lock(&group->mutex);
2317
Joerg Roedeld290f1e2015-05-28 18:41:36 +02002318 ret = 0;
Lu Baolu7423e012019-05-25 13:41:22 +08002319 if (group->default_domain && group->default_domain->type == type)
Joerg Roedeld290f1e2015-05-28 18:41:36 +02002320 goto out;
2321
2322 /* Don't change mappings of existing devices */
2323 ret = -EBUSY;
2324 if (iommu_group_device_count(group) != 1)
2325 goto out;
2326
Joerg Roedeld290f1e2015-05-28 18:41:36 +02002327 ret = -ENOMEM;
Lu Baolu7423e012019-05-25 13:41:22 +08002328 domain = __iommu_domain_alloc(dev->bus, type);
2329 if (!domain)
Joerg Roedeld290f1e2015-05-28 18:41:36 +02002330 goto out;
2331
2332 /* Attach the device to the domain */
Lu Baolu7423e012019-05-25 13:41:22 +08002333 ret = __iommu_attach_group(domain, group);
Joerg Roedeld290f1e2015-05-28 18:41:36 +02002334 if (ret) {
Lu Baolu7423e012019-05-25 13:41:22 +08002335 iommu_domain_free(domain);
Joerg Roedeld290f1e2015-05-28 18:41:36 +02002336 goto out;
2337 }
2338
Tom Murphyd127bc92019-08-26 05:48:21 +01002339 /* Make the domain the default for this group */
Joerg Roedeld290f1e2015-05-28 18:41:36 +02002340 if (group->default_domain)
2341 iommu_domain_free(group->default_domain);
Lu Baolu7423e012019-05-25 13:41:22 +08002342 group->default_domain = domain;
Joerg Roedeld290f1e2015-05-28 18:41:36 +02002343
Jerry Snitselaard3602112019-12-10 11:56:06 -07002344 iommu_group_create_direct_mappings(group, dev);
2345
Lu Baolu7423e012019-05-25 13:41:22 +08002346 dev_info(dev, "Using iommu %s mapping\n",
2347 type == IOMMU_DOMAIN_DMA ? "dma" : "direct");
Joerg Roedeld290f1e2015-05-28 18:41:36 +02002348
2349 ret = 0;
2350out:
2351 mutex_unlock(&group->mutex);
2352 iommu_group_put(group);
2353
2354 return ret;
2355}
Robin Murphy57f98d22016-09-13 10:54:14 +01002356
Lu Baolu7423e012019-05-25 13:41:22 +08002357/* Request that a device is direct mapped by the IOMMU */
2358int iommu_request_dm_for_dev(struct device *dev)
2359{
2360 return request_default_domain_for_dev(dev, IOMMU_DOMAIN_IDENTITY);
2361}
2362
2363/* Request that a device can't be direct mapped by the IOMMU */
2364int iommu_request_dma_domain_for_dev(struct device *dev)
2365{
2366 return request_default_domain_for_dev(dev, IOMMU_DOMAIN_DMA);
2367}
2368
Joerg Roedel8a699612019-08-19 15:22:47 +02002369void iommu_set_default_passthrough(bool cmd_line)
2370{
2371 if (cmd_line)
2372 iommu_set_cmd_line_dma_api();
2373
2374 iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY;
2375}
2376
2377void iommu_set_default_translated(bool cmd_line)
2378{
2379 if (cmd_line)
2380 iommu_set_cmd_line_dma_api();
2381
2382 iommu_def_domain_type = IOMMU_DOMAIN_DMA;
2383}
2384
2385bool iommu_default_passthrough(void)
2386{
2387 return iommu_def_domain_type == IOMMU_DOMAIN_IDENTITY;
2388}
2389EXPORT_SYMBOL_GPL(iommu_default_passthrough);
2390
Joerg Roedel534766d2017-01-31 16:58:42 +01002391const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
Lorenzo Pieralisie4f10ff2016-11-21 10:01:36 +00002392{
Lorenzo Pieralisie4f10ff2016-11-21 10:01:36 +00002393 const struct iommu_ops *ops = NULL;
Joerg Roedeld0f6f582017-02-02 12:19:12 +01002394 struct iommu_device *iommu;
Lorenzo Pieralisie4f10ff2016-11-21 10:01:36 +00002395
Joerg Roedeld0f6f582017-02-02 12:19:12 +01002396 spin_lock(&iommu_device_lock);
2397 list_for_each_entry(iommu, &iommu_device_list, list)
2398 if (iommu->fwnode == fwnode) {
2399 ops = iommu->ops;
Lorenzo Pieralisie4f10ff2016-11-21 10:01:36 +00002400 break;
2401 }
Joerg Roedeld0f6f582017-02-02 12:19:12 +01002402 spin_unlock(&iommu_device_lock);
Lorenzo Pieralisie4f10ff2016-11-21 10:01:36 +00002403 return ops;
2404}
2405
Robin Murphy57f98d22016-09-13 10:54:14 +01002406int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
2407 const struct iommu_ops *ops)
2408{
Joerg Roedelb4ef7252018-11-28 13:35:24 +01002409 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Robin Murphy57f98d22016-09-13 10:54:14 +01002410
2411 if (fwspec)
2412 return ops == fwspec->ops ? 0 : -EINVAL;
2413
Joerg Roedel72acd9d2020-03-26 16:08:31 +01002414 if (!dev_iommu_get(dev))
2415 return -ENOMEM;
2416
Robin Murphy098accf2020-02-13 14:00:21 +00002417 /* Preallocate for the overwhelmingly common case of 1 ID */
2418 fwspec = kzalloc(struct_size(fwspec, ids, 1), GFP_KERNEL);
Robin Murphy57f98d22016-09-13 10:54:14 +01002419 if (!fwspec)
2420 return -ENOMEM;
2421
2422 of_node_get(to_of_node(iommu_fwnode));
2423 fwspec->iommu_fwnode = iommu_fwnode;
2424 fwspec->ops = ops;
Joerg Roedelb4ef7252018-11-28 13:35:24 +01002425 dev_iommu_fwspec_set(dev, fwspec);
Robin Murphy57f98d22016-09-13 10:54:14 +01002426 return 0;
2427}
2428EXPORT_SYMBOL_GPL(iommu_fwspec_init);
2429
2430void iommu_fwspec_free(struct device *dev)
2431{
Joerg Roedelb4ef7252018-11-28 13:35:24 +01002432 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Robin Murphy57f98d22016-09-13 10:54:14 +01002433
2434 if (fwspec) {
2435 fwnode_handle_put(fwspec->iommu_fwnode);
2436 kfree(fwspec);
Joerg Roedelb4ef7252018-11-28 13:35:24 +01002437 dev_iommu_fwspec_set(dev, NULL);
Robin Murphy57f98d22016-09-13 10:54:14 +01002438 }
2439}
2440EXPORT_SYMBOL_GPL(iommu_fwspec_free);
2441
2442int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids)
2443{
Joerg Roedelb4ef7252018-11-28 13:35:24 +01002444 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Robin Murphy098accf2020-02-13 14:00:21 +00002445 int i, new_num;
Robin Murphy57f98d22016-09-13 10:54:14 +01002446
2447 if (!fwspec)
2448 return -EINVAL;
2449
Robin Murphy098accf2020-02-13 14:00:21 +00002450 new_num = fwspec->num_ids + num_ids;
2451 if (new_num > 1) {
2452 fwspec = krealloc(fwspec, struct_size(fwspec, ids, new_num),
2453 GFP_KERNEL);
Robin Murphy57f98d22016-09-13 10:54:14 +01002454 if (!fwspec)
2455 return -ENOMEM;
Zhen Lei909111b2017-02-03 17:35:02 +08002456
Joerg Roedelb4ef7252018-11-28 13:35:24 +01002457 dev_iommu_fwspec_set(dev, fwspec);
Robin Murphy57f98d22016-09-13 10:54:14 +01002458 }
2459
2460 for (i = 0; i < num_ids; i++)
2461 fwspec->ids[fwspec->num_ids + i] = ids[i];
2462
Robin Murphy098accf2020-02-13 14:00:21 +00002463 fwspec->num_ids = new_num;
Robin Murphy57f98d22016-09-13 10:54:14 +01002464 return 0;
2465}
2466EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids);
Lu Baolua3a19592019-03-25 09:30:28 +08002467
2468/*
2469 * Per device IOMMU features.
2470 */
2471bool iommu_dev_has_feature(struct device *dev, enum iommu_dev_features feat)
2472{
2473 const struct iommu_ops *ops = dev->bus->iommu_ops;
2474
2475 if (ops && ops->dev_has_feat)
2476 return ops->dev_has_feat(dev, feat);
2477
2478 return false;
2479}
2480EXPORT_SYMBOL_GPL(iommu_dev_has_feature);
2481
2482int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
2483{
2484 const struct iommu_ops *ops = dev->bus->iommu_ops;
2485
2486 if (ops && ops->dev_enable_feat)
2487 return ops->dev_enable_feat(dev, feat);
2488
2489 return -ENODEV;
2490}
2491EXPORT_SYMBOL_GPL(iommu_dev_enable_feature);
2492
2493/*
2494 * The device drivers should do the necessary cleanups before calling this.
2495 * For example, before disabling the aux-domain feature, the device driver
2496 * should detach all aux-domains. Otherwise, this will return -EBUSY.
2497 */
2498int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
2499{
2500 const struct iommu_ops *ops = dev->bus->iommu_ops;
2501
2502 if (ops && ops->dev_disable_feat)
2503 return ops->dev_disable_feat(dev, feat);
2504
2505 return -EBUSY;
2506}
2507EXPORT_SYMBOL_GPL(iommu_dev_disable_feature);
2508
2509bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat)
2510{
2511 const struct iommu_ops *ops = dev->bus->iommu_ops;
2512
2513 if (ops && ops->dev_feat_enabled)
2514 return ops->dev_feat_enabled(dev, feat);
2515
2516 return false;
2517}
2518EXPORT_SYMBOL_GPL(iommu_dev_feature_enabled);
2519
2520/*
2521 * Aux-domain specific attach/detach.
2522 *
2523 * Only works if iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX) returns
2524 * true. Also, as long as domains are attached to a device through this
2525 * interface, any tries to call iommu_attach_device() should fail
2526 * (iommu_detach_device() can't fail, so we fail when trying to re-attach).
2527 * This should make us safe against a device being attached to a guest as a
2528 * whole while there are still pasid users on it (aux and sva).
2529 */
2530int iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev)
2531{
2532 int ret = -ENODEV;
2533
2534 if (domain->ops->aux_attach_dev)
2535 ret = domain->ops->aux_attach_dev(domain, dev);
2536
2537 if (!ret)
2538 trace_attach_device_to_domain(dev);
2539
2540 return ret;
2541}
2542EXPORT_SYMBOL_GPL(iommu_aux_attach_device);
2543
2544void iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev)
2545{
2546 if (domain->ops->aux_detach_dev) {
2547 domain->ops->aux_detach_dev(domain, dev);
2548 trace_detach_device_from_domain(dev);
2549 }
2550}
2551EXPORT_SYMBOL_GPL(iommu_aux_detach_device);
2552
2553int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
2554{
2555 int ret = -ENODEV;
2556
2557 if (domain->ops->aux_get_pasid)
2558 ret = domain->ops->aux_get_pasid(domain, dev);
2559
2560 return ret;
2561}
2562EXPORT_SYMBOL_GPL(iommu_aux_get_pasid);
Jean-Philippe Brucker26b25a22019-04-10 16:15:16 +01002563
2564/**
2565 * iommu_sva_bind_device() - Bind a process address space to a device
2566 * @dev: the device
2567 * @mm: the mm to bind, caller must hold a reference to it
2568 *
2569 * Create a bond between device and address space, allowing the device to access
2570 * the mm using the returned PASID. If a bond already exists between @device and
2571 * @mm, it is returned and an additional reference is taken. Caller must call
2572 * iommu_sva_unbind_device() to release each reference.
2573 *
2574 * iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA) must be called first, to
2575 * initialize the required SVA features.
2576 *
2577 * On error, returns an ERR_PTR value.
2578 */
2579struct iommu_sva *
2580iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata)
2581{
2582 struct iommu_group *group;
2583 struct iommu_sva *handle = ERR_PTR(-EINVAL);
2584 const struct iommu_ops *ops = dev->bus->iommu_ops;
2585
2586 if (!ops || !ops->sva_bind)
2587 return ERR_PTR(-ENODEV);
2588
2589 group = iommu_group_get(dev);
2590 if (!group)
2591 return ERR_PTR(-ENODEV);
2592
2593 /* Ensure device count and domain don't change while we're binding */
2594 mutex_lock(&group->mutex);
2595
2596 /*
2597 * To keep things simple, SVA currently doesn't support IOMMU groups
2598 * with more than one device. Existing SVA-capable systems are not
2599 * affected by the problems that required IOMMU groups (lack of ACS
2600 * isolation, device ID aliasing and other hardware issues).
2601 */
2602 if (iommu_group_device_count(group) != 1)
2603 goto out_unlock;
2604
2605 handle = ops->sva_bind(dev, mm, drvdata);
2606
2607out_unlock:
2608 mutex_unlock(&group->mutex);
2609 iommu_group_put(group);
2610
2611 return handle;
2612}
2613EXPORT_SYMBOL_GPL(iommu_sva_bind_device);
2614
2615/**
2616 * iommu_sva_unbind_device() - Remove a bond created with iommu_sva_bind_device
2617 * @handle: the handle returned by iommu_sva_bind_device()
2618 *
2619 * Put reference to a bond between device and address space. The device should
2620 * not be issuing any more transaction for this PASID. All outstanding page
2621 * requests for this PASID must have been flushed to the IOMMU.
2622 *
2623 * Returns 0 on success, or an error value
2624 */
2625void iommu_sva_unbind_device(struct iommu_sva *handle)
2626{
2627 struct iommu_group *group;
2628 struct device *dev = handle->dev;
2629 const struct iommu_ops *ops = dev->bus->iommu_ops;
2630
2631 if (!ops || !ops->sva_unbind)
2632 return;
2633
2634 group = iommu_group_get(dev);
2635 if (!group)
2636 return;
2637
2638 mutex_lock(&group->mutex);
2639 ops->sva_unbind(handle);
2640 mutex_unlock(&group->mutex);
2641
2642 iommu_group_put(group);
2643}
2644EXPORT_SYMBOL_GPL(iommu_sva_unbind_device);
2645
2646int iommu_sva_set_ops(struct iommu_sva *handle,
2647 const struct iommu_sva_ops *sva_ops)
2648{
2649 if (handle->ops && handle->ops != sva_ops)
2650 return -EEXIST;
2651
2652 handle->ops = sva_ops;
2653 return 0;
2654}
2655EXPORT_SYMBOL_GPL(iommu_sva_set_ops);
2656
2657int iommu_sva_get_pasid(struct iommu_sva *handle)
2658{
2659 const struct iommu_ops *ops = handle->dev->bus->iommu_ops;
2660
2661 if (!ops || !ops->sva_get_pasid)
2662 return IOMMU_PASID_INVALID;
2663
2664 return ops->sva_get_pasid(handle);
2665}
2666EXPORT_SYMBOL_GPL(iommu_sva_get_pasid);