blob: 7de0e29db3338426e6c2fe75d7ed8daa80055341 [file] [log] [blame]
Thomas Gleixner45051532019-05-29 16:57:47 -07001// SPDX-License-Identifier: GPL-2.0-only
Joerg Roedelfc2100e2008-11-26 17:21:24 +01002/*
3 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
Joerg Roedel63ce3ae2015-02-04 16:12:55 +01004 * Author: Joerg Roedel <jroedel@suse.de>
Joerg Roedelfc2100e2008-11-26 17:21:24 +01005 */
6
Joerg Roedel92e70662015-05-28 18:41:24 +02007#define pr_fmt(fmt) "iommu: " fmt
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02008
Joerg Roedel905d66c2011-09-06 16:03:26 +02009#include <linux/device.h>
Ohad Ben-Cohen40998182011-09-02 13:32:32 -040010#include <linux/kernel.h>
Joerg Roedelfc2100e2008-11-26 17:21:24 +010011#include <linux/bug.h>
12#include <linux/types.h>
Paul Gortmakerc1af7b42018-12-01 14:19:09 -050013#include <linux/init.h>
14#include <linux/export.h>
Andrew Morton60db4022009-05-06 16:03:07 -070015#include <linux/slab.h>
Joerg Roedelfc2100e2008-11-26 17:21:24 +010016#include <linux/errno.h>
17#include <linux/iommu.h>
Alex Williamsond72e31c2012-05-30 14:18:53 -060018#include <linux/idr.h>
19#include <linux/notifier.h>
20#include <linux/err.h>
Alex Williamson104a1c12014-07-03 09:51:18 -060021#include <linux/pci.h>
Alex Williamsonf096c062014-09-19 10:03:06 -060022#include <linux/bitops.h>
Robin Murphy57f98d22016-09-13 10:54:14 +010023#include <linux/property.h>
Nipun Guptaeab03e22018-09-10 19:19:18 +053024#include <linux/fsl/mc.h>
Will Deacon25f003d2019-12-19 12:03:41 +000025#include <linux/module.h>
Shuah Khan7f6db172013-08-15 11:59:23 -060026#include <trace/events/iommu.h>
Joerg Roedelfc2100e2008-11-26 17:21:24 +010027
Alex Williamsond72e31c2012-05-30 14:18:53 -060028static struct kset *iommu_group_kset;
Heiner Kallweite38d1f12016-06-28 20:38:36 +020029static DEFINE_IDA(iommu_group_ida);
Joerg Roedel22bb1822019-08-19 15:22:54 +020030
31static unsigned int iommu_def_domain_type __read_mostly;
Zhen Lei68a6efe2018-09-20 17:10:23 +010032static bool iommu_dma_strict __read_mostly = true;
Joerg Roedelfaf14982019-08-19 15:22:46 +020033static u32 iommu_cmd_line __read_mostly;
Alex Williamsond72e31c2012-05-30 14:18:53 -060034
35struct iommu_group {
36 struct kobject kobj;
37 struct kobject *devices_kobj;
38 struct list_head devices;
39 struct mutex mutex;
40 struct blocking_notifier_head notifier;
41 void *iommu_data;
42 void (*iommu_data_release)(void *iommu_data);
43 char *name;
44 int id;
Joerg Roedel53723dc2015-05-28 18:41:29 +020045 struct iommu_domain *default_domain;
Joerg Roedele39cb8a2015-05-28 18:41:31 +020046 struct iommu_domain *domain;
Joerg Roedel41df6dc2020-04-29 15:36:47 +020047 struct list_head entry;
Alex Williamsond72e31c2012-05-30 14:18:53 -060048};
49
Joerg Roedelc09e22d2017-02-01 12:19:46 +010050struct group_device {
Alex Williamsond72e31c2012-05-30 14:18:53 -060051 struct list_head list;
52 struct device *dev;
53 char *name;
54};
55
56struct iommu_group_attribute {
57 struct attribute attr;
58 ssize_t (*show)(struct iommu_group *group, char *buf);
59 ssize_t (*store)(struct iommu_group *group,
60 const char *buf, size_t count);
61};
62
Eric Augerbc7d12b92017-01-19 20:57:52 +000063static const char * const iommu_group_resv_type_string[] = {
Eric Augeradfd3732019-06-03 08:53:35 +020064 [IOMMU_RESV_DIRECT] = "direct",
65 [IOMMU_RESV_DIRECT_RELAXABLE] = "direct-relaxable",
66 [IOMMU_RESV_RESERVED] = "reserved",
67 [IOMMU_RESV_MSI] = "msi",
68 [IOMMU_RESV_SW_MSI] = "msi",
Eric Augerbc7d12b92017-01-19 20:57:52 +000069};
70
Joerg Roedelfaf14982019-08-19 15:22:46 +020071#define IOMMU_CMD_LINE_DMA_API BIT(0)
72
73static void iommu_set_cmd_line_dma_api(void)
74{
75 iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API;
76}
77
Joerg Roedel22bb1822019-08-19 15:22:54 +020078static bool iommu_cmd_line_dma_api(void)
Joerg Roedelfaf14982019-08-19 15:22:46 +020079{
80 return !!(iommu_cmd_line & IOMMU_CMD_LINE_DMA_API);
81}
82
Joerg Roedel6e1aa202020-04-29 15:36:46 +020083static int iommu_alloc_default_domain(struct device *dev);
84static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
85 unsigned type);
86static int __iommu_attach_device(struct iommu_domain *domain,
87 struct device *dev);
88static int __iommu_attach_group(struct iommu_domain *domain,
89 struct iommu_group *group);
90static void __iommu_detach_group(struct iommu_domain *domain,
91 struct iommu_group *group);
92
Alex Williamsond72e31c2012-05-30 14:18:53 -060093#define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \
94struct iommu_group_attribute iommu_group_attr_##_name = \
95 __ATTR(_name, _mode, _show, _store)
96
97#define to_iommu_group_attr(_attr) \
98 container_of(_attr, struct iommu_group_attribute, attr)
99#define to_iommu_group(_kobj) \
100 container_of(_kobj, struct iommu_group, kobj)
101
Joerg Roedelb0119e82017-02-01 13:23:08 +0100102static LIST_HEAD(iommu_device_list);
103static DEFINE_SPINLOCK(iommu_device_lock);
104
Joerg Roedel5fa9e7c2019-08-19 15:22:53 +0200105/*
106 * Use a function instead of an array here because the domain-type is a
107 * bit-field, so an array would waste memory.
108 */
109static const char *iommu_domain_type_str(unsigned int t)
110{
111 switch (t) {
112 case IOMMU_DOMAIN_BLOCKED:
113 return "Blocked";
114 case IOMMU_DOMAIN_IDENTITY:
115 return "Passthrough";
116 case IOMMU_DOMAIN_UNMANAGED:
117 return "Unmanaged";
118 case IOMMU_DOMAIN_DMA:
119 return "Translated";
120 default:
121 return "Unknown";
122 }
123}
124
125static int __init iommu_subsys_init(void)
126{
Joerg Roedel22bb1822019-08-19 15:22:54 +0200127 bool cmd_line = iommu_cmd_line_dma_api();
128
129 if (!cmd_line) {
130 if (IS_ENABLED(CONFIG_IOMMU_DEFAULT_PASSTHROUGH))
131 iommu_set_default_passthrough(false);
132 else
133 iommu_set_default_translated(false);
Joerg Roedel2cc13bb2019-08-19 15:22:55 +0200134
Joerg Roedel2896ba42019-09-03 15:15:44 +0200135 if (iommu_default_passthrough() && mem_encrypt_active()) {
136 pr_info("Memory encryption detected - Disabling default IOMMU Passthrough\n");
Joerg Roedel2cc13bb2019-08-19 15:22:55 +0200137 iommu_set_default_translated(false);
138 }
Joerg Roedel22bb1822019-08-19 15:22:54 +0200139 }
140
141 pr_info("Default domain type: %s %s\n",
142 iommu_domain_type_str(iommu_def_domain_type),
143 cmd_line ? "(set via kernel command line)" : "");
Joerg Roedel5fa9e7c2019-08-19 15:22:53 +0200144
145 return 0;
146}
147subsys_initcall(iommu_subsys_init);
148
Joerg Roedelb0119e82017-02-01 13:23:08 +0100149int iommu_device_register(struct iommu_device *iommu)
150{
151 spin_lock(&iommu_device_lock);
152 list_add_tail(&iommu->list, &iommu_device_list);
153 spin_unlock(&iommu_device_lock);
Joerg Roedelb0119e82017-02-01 13:23:08 +0100154 return 0;
155}
Will Deacona7ba5c32019-12-19 12:03:37 +0000156EXPORT_SYMBOL_GPL(iommu_device_register);
Joerg Roedelb0119e82017-02-01 13:23:08 +0100157
158void iommu_device_unregister(struct iommu_device *iommu)
159{
160 spin_lock(&iommu_device_lock);
161 list_del(&iommu->list);
162 spin_unlock(&iommu_device_lock);
163}
Will Deacona7ba5c32019-12-19 12:03:37 +0000164EXPORT_SYMBOL_GPL(iommu_device_unregister);
Joerg Roedelb0119e82017-02-01 13:23:08 +0100165
Joerg Roedel045a7042020-03-26 16:08:30 +0100166static struct dev_iommu *dev_iommu_get(struct device *dev)
Jacob Pan0c830e62019-06-03 15:57:48 +0100167{
Joerg Roedel045a7042020-03-26 16:08:30 +0100168 struct dev_iommu *param = dev->iommu;
Jacob Pan0c830e62019-06-03 15:57:48 +0100169
170 if (param)
171 return param;
172
173 param = kzalloc(sizeof(*param), GFP_KERNEL);
174 if (!param)
175 return NULL;
176
177 mutex_init(&param->lock);
Joerg Roedel045a7042020-03-26 16:08:30 +0100178 dev->iommu = param;
Jacob Pan0c830e62019-06-03 15:57:48 +0100179 return param;
180}
181
Joerg Roedel045a7042020-03-26 16:08:30 +0100182static void dev_iommu_free(struct device *dev)
Jacob Pan0c830e62019-06-03 15:57:48 +0100183{
Joerg Roedel045a7042020-03-26 16:08:30 +0100184 kfree(dev->iommu);
185 dev->iommu = NULL;
Jacob Pan0c830e62019-06-03 15:57:48 +0100186}
187
Joerg Roedel41df6dc2020-04-29 15:36:47 +0200188static int __iommu_probe_device(struct device *dev, struct list_head *group_list)
Joerg Roedela6a4c7e2020-04-29 15:36:45 +0200189{
190 const struct iommu_ops *ops = dev->bus->iommu_ops;
191 struct iommu_device *iommu_dev;
192 struct iommu_group *group;
193 int ret;
194
195 iommu_dev = ops->probe_device(dev);
196 if (IS_ERR(iommu_dev))
197 return PTR_ERR(iommu_dev);
198
199 dev->iommu->iommu_dev = iommu_dev;
200
201 group = iommu_group_get_for_dev(dev);
Joerg Roedeldeac0b3b2020-04-29 15:36:49 +0200202 if (IS_ERR(group)) {
Joerg Roedela6a4c7e2020-04-29 15:36:45 +0200203 ret = PTR_ERR(group);
204 goto out_release;
205 }
206 iommu_group_put(group);
207
Joerg Roedel41df6dc2020-04-29 15:36:47 +0200208 if (group_list && !group->default_domain && list_empty(&group->entry))
209 list_add_tail(&group->entry, group_list);
210
Joerg Roedela6a4c7e2020-04-29 15:36:45 +0200211 iommu_device_link(iommu_dev, dev);
212
213 return 0;
214
215out_release:
216 ops->release_device(dev);
217
218 return ret;
219}
220
Joerg Roedelcf193882020-04-29 15:36:48 +0200221static int __iommu_probe_device_helper(struct device *dev)
222{
223 const struct iommu_ops *ops = dev->bus->iommu_ops;
224 struct iommu_group *group;
225 int ret;
226
227 ret = __iommu_probe_device(dev, NULL);
228 if (ret)
229 goto err_out;
230
231 /*
232 * Try to allocate a default domain - needs support from the
233 * IOMMU driver. There are still some drivers which don't
234 * support default domains, so the return value is not yet
235 * checked.
236 */
237 iommu_alloc_default_domain(dev);
238
239 group = iommu_group_get(dev);
240 if (!group)
241 goto err_release;
242
243 if (group->default_domain)
244 ret = __iommu_attach_device(group->default_domain, dev);
245
246 iommu_group_put(group);
247
248 if (ret)
249 goto err_release;
250
251 if (ops->probe_finalize)
252 ops->probe_finalize(dev);
253
254 return 0;
255
256err_release:
257 iommu_release_device(dev);
258err_out:
259 return ret;
260
261}
262
Joerg Roedelcc5aed42018-11-30 10:31:59 +0100263int iommu_probe_device(struct device *dev)
264{
265 const struct iommu_ops *ops = dev->bus->iommu_ops;
Jacob Pan0c830e62019-06-03 15:57:48 +0100266 int ret;
Joerg Roedelcc5aed42018-11-30 10:31:59 +0100267
268 WARN_ON(dev->iommu_group);
Joerg Roedelcf193882020-04-29 15:36:48 +0200269
Jacob Pan0c830e62019-06-03 15:57:48 +0100270 if (!ops)
271 return -EINVAL;
Joerg Roedelcc5aed42018-11-30 10:31:59 +0100272
Joerg Roedel045a7042020-03-26 16:08:30 +0100273 if (!dev_iommu_get(dev))
Jacob Pan0c830e62019-06-03 15:57:48 +0100274 return -ENOMEM;
275
Will Deacon25f003d2019-12-19 12:03:41 +0000276 if (!try_module_get(ops->owner)) {
277 ret = -EINVAL;
278 goto err_free_dev_param;
279 }
280
Joerg Roedelcf193882020-04-29 15:36:48 +0200281 if (ops->probe_device)
282 return __iommu_probe_device_helper(dev);
Joerg Roedel6e1aa202020-04-29 15:36:46 +0200283
Joerg Roedelcf193882020-04-29 15:36:48 +0200284 ret = ops->add_device(dev);
Jacob Pan0c830e62019-06-03 15:57:48 +0100285 if (ret)
Will Deacon25f003d2019-12-19 12:03:41 +0000286 goto err_module_put;
Joerg Roedeldc9de8a2018-12-20 10:02:20 +0100287
Joerg Roedela6a4c7e2020-04-29 15:36:45 +0200288 if (ops->probe_finalize)
289 ops->probe_finalize(dev);
290
Will Deacon25f003d2019-12-19 12:03:41 +0000291 return 0;
292
293err_module_put:
294 module_put(ops->owner);
295err_free_dev_param:
Joerg Roedel045a7042020-03-26 16:08:30 +0100296 dev_iommu_free(dev);
Joerg Roedeldc9de8a2018-12-20 10:02:20 +0100297 return ret;
Joerg Roedelcc5aed42018-11-30 10:31:59 +0100298}
299
Joerg Roedela6a4c7e2020-04-29 15:36:45 +0200300static void __iommu_release_device(struct device *dev)
301{
302 const struct iommu_ops *ops = dev->bus->iommu_ops;
303
304 iommu_device_unlink(dev->iommu->iommu_dev, dev);
305
306 iommu_group_remove_device(dev);
307
308 ops->release_device(dev);
309}
310
Joerg Roedelcc5aed42018-11-30 10:31:59 +0100311void iommu_release_device(struct device *dev)
312{
313 const struct iommu_ops *ops = dev->bus->iommu_ops;
314
Joerg Roedela6a4c7e2020-04-29 15:36:45 +0200315 if (!dev->iommu)
316 return;
317
318 if (ops->release_device)
319 __iommu_release_device(dev);
320 else if (dev->iommu_group)
Joerg Roedelcc5aed42018-11-30 10:31:59 +0100321 ops->remove_device(dev);
Jacob Pan0c830e62019-06-03 15:57:48 +0100322
Joerg Roedela6a4c7e2020-04-29 15:36:45 +0200323 module_put(ops->owner);
324 dev_iommu_free(dev);
Joerg Roedelcc5aed42018-11-30 10:31:59 +0100325}
326
Will Deaconfccb4e32017-01-05 18:38:26 +0000327static int __init iommu_set_def_domain_type(char *str)
328{
329 bool pt;
Andy Shevchenko7f9584d2018-05-14 19:22:25 +0300330 int ret;
Will Deaconfccb4e32017-01-05 18:38:26 +0000331
Andy Shevchenko7f9584d2018-05-14 19:22:25 +0300332 ret = kstrtobool(str, &pt);
333 if (ret)
334 return ret;
Will Deaconfccb4e32017-01-05 18:38:26 +0000335
Joerg Roedeladab0b02019-08-19 15:22:48 +0200336 if (pt)
337 iommu_set_default_passthrough(true);
338 else
339 iommu_set_default_translated(true);
Joerg Roedelfaf14982019-08-19 15:22:46 +0200340
Will Deaconfccb4e32017-01-05 18:38:26 +0000341 return 0;
342}
343early_param("iommu.passthrough", iommu_set_def_domain_type);
344
Zhen Lei68a6efe2018-09-20 17:10:23 +0100345static int __init iommu_dma_setup(char *str)
346{
347 return kstrtobool(str, &iommu_dma_strict);
348}
349early_param("iommu.strict", iommu_dma_setup);
350
Alex Williamsond72e31c2012-05-30 14:18:53 -0600351static ssize_t iommu_group_attr_show(struct kobject *kobj,
352 struct attribute *__attr, char *buf)
Alex Williamson14604322011-10-21 15:56:05 -0400353{
Alex Williamsond72e31c2012-05-30 14:18:53 -0600354 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
355 struct iommu_group *group = to_iommu_group(kobj);
356 ssize_t ret = -EIO;
Alex Williamson14604322011-10-21 15:56:05 -0400357
Alex Williamsond72e31c2012-05-30 14:18:53 -0600358 if (attr->show)
359 ret = attr->show(group, buf);
360 return ret;
Alex Williamson14604322011-10-21 15:56:05 -0400361}
Alex Williamsond72e31c2012-05-30 14:18:53 -0600362
363static ssize_t iommu_group_attr_store(struct kobject *kobj,
364 struct attribute *__attr,
365 const char *buf, size_t count)
366{
367 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
368 struct iommu_group *group = to_iommu_group(kobj);
369 ssize_t ret = -EIO;
370
371 if (attr->store)
372 ret = attr->store(group, buf, count);
373 return ret;
374}
375
376static const struct sysfs_ops iommu_group_sysfs_ops = {
377 .show = iommu_group_attr_show,
378 .store = iommu_group_attr_store,
379};
380
381static int iommu_group_create_file(struct iommu_group *group,
382 struct iommu_group_attribute *attr)
383{
384 return sysfs_create_file(&group->kobj, &attr->attr);
385}
386
387static void iommu_group_remove_file(struct iommu_group *group,
388 struct iommu_group_attribute *attr)
389{
390 sysfs_remove_file(&group->kobj, &attr->attr);
391}
392
393static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf)
394{
395 return sprintf(buf, "%s\n", group->name);
396}
397
Eric Auger6c65fb32017-01-19 20:57:51 +0000398/**
399 * iommu_insert_resv_region - Insert a new region in the
400 * list of reserved regions.
401 * @new: new region to insert
402 * @regions: list of regions
403 *
Eric Auger4dbd2582019-08-21 14:09:40 +0200404 * Elements are sorted by start address and overlapping segments
405 * of the same type are merged.
Eric Auger6c65fb32017-01-19 20:57:51 +0000406 */
Eric Auger4dbd2582019-08-21 14:09:40 +0200407int iommu_insert_resv_region(struct iommu_resv_region *new,
408 struct list_head *regions)
Eric Auger6c65fb32017-01-19 20:57:51 +0000409{
Eric Auger4dbd2582019-08-21 14:09:40 +0200410 struct iommu_resv_region *iter, *tmp, *nr, *top;
411 LIST_HEAD(stack);
Eric Auger6c65fb32017-01-19 20:57:51 +0000412
Eric Auger4dbd2582019-08-21 14:09:40 +0200413 nr = iommu_alloc_resv_region(new->start, new->length,
414 new->prot, new->type);
415 if (!nr)
Eric Auger6c65fb32017-01-19 20:57:51 +0000416 return -ENOMEM;
417
Eric Auger4dbd2582019-08-21 14:09:40 +0200418 /* First add the new element based on start address sorting */
419 list_for_each_entry(iter, regions, list) {
420 if (nr->start < iter->start ||
421 (nr->start == iter->start && nr->type <= iter->type))
422 break;
423 }
424 list_add_tail(&nr->list, &iter->list);
425
426 /* Merge overlapping segments of type nr->type in @regions, if any */
427 list_for_each_entry_safe(iter, tmp, regions, list) {
428 phys_addr_t top_end, iter_end = iter->start + iter->length - 1;
429
Eric Auger4c80ba32019-11-26 18:54:13 +0100430 /* no merge needed on elements of different types than @new */
431 if (iter->type != new->type) {
Eric Auger4dbd2582019-08-21 14:09:40 +0200432 list_move_tail(&iter->list, &stack);
433 continue;
434 }
435
436 /* look for the last stack element of same type as @iter */
437 list_for_each_entry_reverse(top, &stack, list)
438 if (top->type == iter->type)
439 goto check_overlap;
440
441 list_move_tail(&iter->list, &stack);
442 continue;
443
444check_overlap:
445 top_end = top->start + top->length - 1;
446
447 if (iter->start > top_end + 1) {
448 list_move_tail(&iter->list, &stack);
449 } else {
450 top->length = max(top_end, iter_end) - top->start + 1;
451 list_del(&iter->list);
452 kfree(iter);
453 }
454 }
455 list_splice(&stack, regions);
Eric Auger6c65fb32017-01-19 20:57:51 +0000456 return 0;
457}
458
459static int
460iommu_insert_device_resv_regions(struct list_head *dev_resv_regions,
461 struct list_head *group_resv_regions)
462{
463 struct iommu_resv_region *entry;
Eric Augera514a6e2017-02-06 10:11:38 +0100464 int ret = 0;
Eric Auger6c65fb32017-01-19 20:57:51 +0000465
466 list_for_each_entry(entry, dev_resv_regions, list) {
467 ret = iommu_insert_resv_region(entry, group_resv_regions);
468 if (ret)
469 break;
470 }
471 return ret;
472}
473
474int iommu_get_group_resv_regions(struct iommu_group *group,
475 struct list_head *head)
476{
Joerg Roedel8d2932d2017-02-10 15:13:10 +0100477 struct group_device *device;
Eric Auger6c65fb32017-01-19 20:57:51 +0000478 int ret = 0;
479
480 mutex_lock(&group->mutex);
481 list_for_each_entry(device, &group->devices, list) {
482 struct list_head dev_resv_regions;
483
484 INIT_LIST_HEAD(&dev_resv_regions);
485 iommu_get_resv_regions(device->dev, &dev_resv_regions);
486 ret = iommu_insert_device_resv_regions(&dev_resv_regions, head);
487 iommu_put_resv_regions(device->dev, &dev_resv_regions);
488 if (ret)
489 break;
490 }
491 mutex_unlock(&group->mutex);
492 return ret;
493}
494EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions);
495
Eric Augerbc7d12b92017-01-19 20:57:52 +0000496static ssize_t iommu_group_show_resv_regions(struct iommu_group *group,
497 char *buf)
498{
499 struct iommu_resv_region *region, *next;
500 struct list_head group_resv_regions;
501 char *str = buf;
502
503 INIT_LIST_HEAD(&group_resv_regions);
504 iommu_get_group_resv_regions(group, &group_resv_regions);
505
506 list_for_each_entry_safe(region, next, &group_resv_regions, list) {
507 str += sprintf(str, "0x%016llx 0x%016llx %s\n",
508 (long long int)region->start,
509 (long long int)(region->start +
510 region->length - 1),
511 iommu_group_resv_type_string[region->type]);
512 kfree(region);
513 }
514
515 return (str - buf);
516}
517
Olof Johanssonc52c72d2018-07-11 13:59:36 -0700518static ssize_t iommu_group_show_type(struct iommu_group *group,
519 char *buf)
520{
521 char *type = "unknown\n";
522
523 if (group->default_domain) {
524 switch (group->default_domain->type) {
525 case IOMMU_DOMAIN_BLOCKED:
526 type = "blocked\n";
527 break;
528 case IOMMU_DOMAIN_IDENTITY:
529 type = "identity\n";
530 break;
531 case IOMMU_DOMAIN_UNMANAGED:
532 type = "unmanaged\n";
533 break;
534 case IOMMU_DOMAIN_DMA:
Lu Baolu24f307d2019-05-24 14:30:56 +0800535 type = "DMA\n";
Olof Johanssonc52c72d2018-07-11 13:59:36 -0700536 break;
537 }
538 }
539 strcpy(buf, type);
540
541 return strlen(type);
542}
543
Alex Williamsond72e31c2012-05-30 14:18:53 -0600544static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL);
545
Eric Augerbc7d12b92017-01-19 20:57:52 +0000546static IOMMU_GROUP_ATTR(reserved_regions, 0444,
547 iommu_group_show_resv_regions, NULL);
548
Olof Johanssonc52c72d2018-07-11 13:59:36 -0700549static IOMMU_GROUP_ATTR(type, 0444, iommu_group_show_type, NULL);
550
Alex Williamsond72e31c2012-05-30 14:18:53 -0600551static void iommu_group_release(struct kobject *kobj)
552{
553 struct iommu_group *group = to_iommu_group(kobj);
554
Joerg Roedel269aa802015-05-28 18:41:25 +0200555 pr_debug("Releasing group %d\n", group->id);
556
Alex Williamsond72e31c2012-05-30 14:18:53 -0600557 if (group->iommu_data_release)
558 group->iommu_data_release(group->iommu_data);
559
Heiner Kallweitfeccf392016-06-29 21:13:59 +0200560 ida_simple_remove(&iommu_group_ida, group->id);
Alex Williamsond72e31c2012-05-30 14:18:53 -0600561
Joerg Roedel53723dc2015-05-28 18:41:29 +0200562 if (group->default_domain)
563 iommu_domain_free(group->default_domain);
564
Alex Williamsond72e31c2012-05-30 14:18:53 -0600565 kfree(group->name);
566 kfree(group);
567}
568
569static struct kobj_type iommu_group_ktype = {
570 .sysfs_ops = &iommu_group_sysfs_ops,
571 .release = iommu_group_release,
572};
573
574/**
575 * iommu_group_alloc - Allocate a new group
Alex Williamsond72e31c2012-05-30 14:18:53 -0600576 *
577 * This function is called by an iommu driver to allocate a new iommu
578 * group. The iommu group represents the minimum granularity of the iommu.
579 * Upon successful return, the caller holds a reference to the supplied
580 * group in order to hold the group until devices are added. Use
581 * iommu_group_put() to release this extra reference count, allowing the
582 * group to be automatically reclaimed once it has no devices or external
583 * references.
584 */
585struct iommu_group *iommu_group_alloc(void)
586{
587 struct iommu_group *group;
588 int ret;
589
590 group = kzalloc(sizeof(*group), GFP_KERNEL);
591 if (!group)
592 return ERR_PTR(-ENOMEM);
593
594 group->kobj.kset = iommu_group_kset;
595 mutex_init(&group->mutex);
596 INIT_LIST_HEAD(&group->devices);
Joerg Roedel41df6dc2020-04-29 15:36:47 +0200597 INIT_LIST_HEAD(&group->entry);
Alex Williamsond72e31c2012-05-30 14:18:53 -0600598 BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
599
Heiner Kallweitfeccf392016-06-29 21:13:59 +0200600 ret = ida_simple_get(&iommu_group_ida, 0, 0, GFP_KERNEL);
601 if (ret < 0) {
Alex Williamsond72e31c2012-05-30 14:18:53 -0600602 kfree(group);
Heiner Kallweitfeccf392016-06-29 21:13:59 +0200603 return ERR_PTR(ret);
Alex Williamsond72e31c2012-05-30 14:18:53 -0600604 }
Heiner Kallweitfeccf392016-06-29 21:13:59 +0200605 group->id = ret;
Alex Williamsond72e31c2012-05-30 14:18:53 -0600606
607 ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype,
608 NULL, "%d", group->id);
609 if (ret) {
Heiner Kallweitfeccf392016-06-29 21:13:59 +0200610 ida_simple_remove(&iommu_group_ida, group->id);
Alex Williamsond72e31c2012-05-30 14:18:53 -0600611 kfree(group);
612 return ERR_PTR(ret);
613 }
614
615 group->devices_kobj = kobject_create_and_add("devices", &group->kobj);
616 if (!group->devices_kobj) {
617 kobject_put(&group->kobj); /* triggers .release & free */
618 return ERR_PTR(-ENOMEM);
619 }
620
621 /*
622 * The devices_kobj holds a reference on the group kobject, so
623 * as long as that exists so will the group. We can therefore
624 * use the devices_kobj for reference counting.
625 */
626 kobject_put(&group->kobj);
627
Eric Augerbc7d12b92017-01-19 20:57:52 +0000628 ret = iommu_group_create_file(group,
629 &iommu_group_attr_reserved_regions);
630 if (ret)
631 return ERR_PTR(ret);
632
Olof Johanssonc52c72d2018-07-11 13:59:36 -0700633 ret = iommu_group_create_file(group, &iommu_group_attr_type);
634 if (ret)
635 return ERR_PTR(ret);
636
Joerg Roedel269aa802015-05-28 18:41:25 +0200637 pr_debug("Allocated group %d\n", group->id);
638
Alex Williamsond72e31c2012-05-30 14:18:53 -0600639 return group;
640}
641EXPORT_SYMBOL_GPL(iommu_group_alloc);
642
Alexey Kardashevskiyaa16bea2013-03-25 10:23:49 +1100643struct iommu_group *iommu_group_get_by_id(int id)
644{
645 struct kobject *group_kobj;
646 struct iommu_group *group;
647 const char *name;
648
649 if (!iommu_group_kset)
650 return NULL;
651
652 name = kasprintf(GFP_KERNEL, "%d", id);
653 if (!name)
654 return NULL;
655
656 group_kobj = kset_find_obj(iommu_group_kset, name);
657 kfree(name);
658
659 if (!group_kobj)
660 return NULL;
661
662 group = container_of(group_kobj, struct iommu_group, kobj);
663 BUG_ON(group->id != id);
664
665 kobject_get(group->devices_kobj);
666 kobject_put(&group->kobj);
667
668 return group;
669}
670EXPORT_SYMBOL_GPL(iommu_group_get_by_id);
671
Alex Williamsond72e31c2012-05-30 14:18:53 -0600672/**
673 * iommu_group_get_iommudata - retrieve iommu_data registered for a group
674 * @group: the group
675 *
676 * iommu drivers can store data in the group for use when doing iommu
677 * operations. This function provides a way to retrieve it. Caller
678 * should hold a group reference.
679 */
680void *iommu_group_get_iommudata(struct iommu_group *group)
681{
682 return group->iommu_data;
683}
684EXPORT_SYMBOL_GPL(iommu_group_get_iommudata);
685
686/**
687 * iommu_group_set_iommudata - set iommu_data for a group
688 * @group: the group
689 * @iommu_data: new data
690 * @release: release function for iommu_data
691 *
692 * iommu drivers can store data in the group for use when doing iommu
693 * operations. This function provides a way to set the data after
694 * the group has been allocated. Caller should hold a group reference.
695 */
696void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data,
697 void (*release)(void *iommu_data))
698{
699 group->iommu_data = iommu_data;
700 group->iommu_data_release = release;
701}
702EXPORT_SYMBOL_GPL(iommu_group_set_iommudata);
703
704/**
705 * iommu_group_set_name - set name for a group
706 * @group: the group
707 * @name: name
708 *
709 * Allow iommu driver to set a name for a group. When set it will
710 * appear in a name attribute file under the group in sysfs.
711 */
712int iommu_group_set_name(struct iommu_group *group, const char *name)
713{
714 int ret;
715
716 if (group->name) {
717 iommu_group_remove_file(group, &iommu_group_attr_name);
718 kfree(group->name);
719 group->name = NULL;
720 if (!name)
721 return 0;
722 }
723
724 group->name = kstrdup(name, GFP_KERNEL);
725 if (!group->name)
726 return -ENOMEM;
727
728 ret = iommu_group_create_file(group, &iommu_group_attr_name);
729 if (ret) {
730 kfree(group->name);
731 group->name = NULL;
732 return ret;
733 }
734
735 return 0;
736}
737EXPORT_SYMBOL_GPL(iommu_group_set_name);
738
Joerg Roedelbeed2822015-05-28 18:41:34 +0200739static int iommu_group_create_direct_mappings(struct iommu_group *group,
740 struct device *dev)
741{
742 struct iommu_domain *domain = group->default_domain;
Eric Augere5b52342017-01-19 20:57:47 +0000743 struct iommu_resv_region *entry;
Joerg Roedelbeed2822015-05-28 18:41:34 +0200744 struct list_head mappings;
745 unsigned long pg_size;
746 int ret = 0;
747
748 if (!domain || domain->type != IOMMU_DOMAIN_DMA)
749 return 0;
750
Robin Murphyd16e0fa2016-04-07 18:42:06 +0100751 BUG_ON(!domain->pgsize_bitmap);
Joerg Roedelbeed2822015-05-28 18:41:34 +0200752
Robin Murphyd16e0fa2016-04-07 18:42:06 +0100753 pg_size = 1UL << __ffs(domain->pgsize_bitmap);
Joerg Roedelbeed2822015-05-28 18:41:34 +0200754 INIT_LIST_HEAD(&mappings);
755
Eric Augere5b52342017-01-19 20:57:47 +0000756 iommu_get_resv_regions(dev, &mappings);
Joerg Roedelbeed2822015-05-28 18:41:34 +0200757
758 /* We need to consider overlapping regions for different devices */
759 list_for_each_entry(entry, &mappings, list) {
760 dma_addr_t start, end, addr;
761
Eric Augere5b52342017-01-19 20:57:47 +0000762 if (domain->ops->apply_resv_region)
763 domain->ops->apply_resv_region(dev, domain, entry);
Joerg Roedel33b21a62016-07-05 13:07:53 +0200764
Joerg Roedelbeed2822015-05-28 18:41:34 +0200765 start = ALIGN(entry->start, pg_size);
766 end = ALIGN(entry->start + entry->length, pg_size);
767
Eric Augeradfd3732019-06-03 08:53:35 +0200768 if (entry->type != IOMMU_RESV_DIRECT &&
769 entry->type != IOMMU_RESV_DIRECT_RELAXABLE)
Eric Auger544a25d2017-01-19 20:57:50 +0000770 continue;
771
Joerg Roedelbeed2822015-05-28 18:41:34 +0200772 for (addr = start; addr < end; addr += pg_size) {
773 phys_addr_t phys_addr;
774
775 phys_addr = iommu_iova_to_phys(domain, addr);
776 if (phys_addr)
777 continue;
778
779 ret = iommu_map(domain, addr, addr, pg_size, entry->prot);
780 if (ret)
781 goto out;
782 }
783
784 }
785
Joerg Roedeladd02cfd2017-08-23 15:50:04 +0200786 iommu_flush_tlb_all(domain);
787
Joerg Roedelbeed2822015-05-28 18:41:34 +0200788out:
Eric Augere5b52342017-01-19 20:57:47 +0000789 iommu_put_resv_regions(dev, &mappings);
Joerg Roedelbeed2822015-05-28 18:41:34 +0200790
791 return ret;
792}
793
Alex Williamsond72e31c2012-05-30 14:18:53 -0600794/**
795 * iommu_group_add_device - add a device to an iommu group
796 * @group: the group into which to add the device (reference should be held)
797 * @dev: the device
798 *
799 * This function is called by an iommu driver to add a device into a
800 * group. Adding a device increments the group reference count.
801 */
802int iommu_group_add_device(struct iommu_group *group, struct device *dev)
803{
804 int ret, i = 0;
Joerg Roedelc09e22d2017-02-01 12:19:46 +0100805 struct group_device *device;
Alex Williamsond72e31c2012-05-30 14:18:53 -0600806
807 device = kzalloc(sizeof(*device), GFP_KERNEL);
808 if (!device)
809 return -ENOMEM;
810
811 device->dev = dev;
812
813 ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group");
Robin Murphy797a8b42017-01-16 12:58:07 +0000814 if (ret)
815 goto err_free_device;
Alex Williamsond72e31c2012-05-30 14:18:53 -0600816
817 device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj));
818rename:
819 if (!device->name) {
Robin Murphy797a8b42017-01-16 12:58:07 +0000820 ret = -ENOMEM;
821 goto err_remove_link;
Alex Williamsond72e31c2012-05-30 14:18:53 -0600822 }
823
824 ret = sysfs_create_link_nowarn(group->devices_kobj,
825 &dev->kobj, device->name);
826 if (ret) {
Alex Williamsond72e31c2012-05-30 14:18:53 -0600827 if (ret == -EEXIST && i >= 0) {
828 /*
829 * Account for the slim chance of collision
830 * and append an instance to the name.
831 */
Robin Murphy797a8b42017-01-16 12:58:07 +0000832 kfree(device->name);
Alex Williamsond72e31c2012-05-30 14:18:53 -0600833 device->name = kasprintf(GFP_KERNEL, "%s.%d",
834 kobject_name(&dev->kobj), i++);
835 goto rename;
836 }
Robin Murphy797a8b42017-01-16 12:58:07 +0000837 goto err_free_name;
Alex Williamsond72e31c2012-05-30 14:18:53 -0600838 }
839
840 kobject_get(group->devices_kobj);
841
842 dev->iommu_group = group;
843
Joerg Roedelbeed2822015-05-28 18:41:34 +0200844 iommu_group_create_direct_mappings(group, dev);
845
Alex Williamsond72e31c2012-05-30 14:18:53 -0600846 mutex_lock(&group->mutex);
847 list_add_tail(&device->list, &group->devices);
Joerg Roedele39cb8a2015-05-28 18:41:31 +0200848 if (group->domain)
Robin Murphy797a8b42017-01-16 12:58:07 +0000849 ret = __iommu_attach_device(group->domain, dev);
Alex Williamsond72e31c2012-05-30 14:18:53 -0600850 mutex_unlock(&group->mutex);
Robin Murphy797a8b42017-01-16 12:58:07 +0000851 if (ret)
852 goto err_put_group;
Alex Williamsond72e31c2012-05-30 14:18:53 -0600853
854 /* Notify any listeners about change to group. */
855 blocking_notifier_call_chain(&group->notifier,
856 IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev);
Shuah Khand1cf7e82013-08-15 11:59:24 -0600857
858 trace_add_device_to_group(group->id, dev);
Joerg Roedel269aa802015-05-28 18:41:25 +0200859
Bjorn Helgaas780da9e2019-02-08 16:05:45 -0600860 dev_info(dev, "Adding to iommu group %d\n", group->id);
Joerg Roedel269aa802015-05-28 18:41:25 +0200861
Alex Williamsond72e31c2012-05-30 14:18:53 -0600862 return 0;
Robin Murphy797a8b42017-01-16 12:58:07 +0000863
864err_put_group:
865 mutex_lock(&group->mutex);
866 list_del(&device->list);
867 mutex_unlock(&group->mutex);
868 dev->iommu_group = NULL;
869 kobject_put(group->devices_kobj);
Jon Derrick7d4e6cc2019-12-31 13:24:19 -0700870 sysfs_remove_link(group->devices_kobj, device->name);
Robin Murphy797a8b42017-01-16 12:58:07 +0000871err_free_name:
872 kfree(device->name);
873err_remove_link:
874 sysfs_remove_link(&dev->kobj, "iommu_group");
875err_free_device:
876 kfree(device);
Bjorn Helgaas780da9e2019-02-08 16:05:45 -0600877 dev_err(dev, "Failed to add to iommu group %d: %d\n", group->id, ret);
Robin Murphy797a8b42017-01-16 12:58:07 +0000878 return ret;
Alex Williamsond72e31c2012-05-30 14:18:53 -0600879}
880EXPORT_SYMBOL_GPL(iommu_group_add_device);
881
882/**
883 * iommu_group_remove_device - remove a device from it's current group
884 * @dev: device to be removed
885 *
886 * This function is called by an iommu driver to remove the device from
887 * it's current group. This decrements the iommu group reference count.
888 */
889void iommu_group_remove_device(struct device *dev)
890{
891 struct iommu_group *group = dev->iommu_group;
Joerg Roedelc09e22d2017-02-01 12:19:46 +0100892 struct group_device *tmp_device, *device = NULL;
Alex Williamsond72e31c2012-05-30 14:18:53 -0600893
Bjorn Helgaas780da9e2019-02-08 16:05:45 -0600894 dev_info(dev, "Removing from iommu group %d\n", group->id);
Joerg Roedel269aa802015-05-28 18:41:25 +0200895
Alex Williamsond72e31c2012-05-30 14:18:53 -0600896 /* Pre-notify listeners that a device is being removed. */
897 blocking_notifier_call_chain(&group->notifier,
898 IOMMU_GROUP_NOTIFY_DEL_DEVICE, dev);
899
900 mutex_lock(&group->mutex);
901 list_for_each_entry(tmp_device, &group->devices, list) {
902 if (tmp_device->dev == dev) {
903 device = tmp_device;
904 list_del(&device->list);
905 break;
906 }
907 }
908 mutex_unlock(&group->mutex);
909
910 if (!device)
911 return;
912
913 sysfs_remove_link(group->devices_kobj, device->name);
914 sysfs_remove_link(&dev->kobj, "iommu_group");
915
Shuah Khan2e757082013-08-15 11:59:25 -0600916 trace_remove_device_from_group(group->id, dev);
917
Alex Williamsond72e31c2012-05-30 14:18:53 -0600918 kfree(device->name);
919 kfree(device);
920 dev->iommu_group = NULL;
921 kobject_put(group->devices_kobj);
922}
923EXPORT_SYMBOL_GPL(iommu_group_remove_device);
924
Joerg Roedel426a2732015-05-28 18:41:30 +0200925static int iommu_group_device_count(struct iommu_group *group)
926{
Joerg Roedelc09e22d2017-02-01 12:19:46 +0100927 struct group_device *entry;
Joerg Roedel426a2732015-05-28 18:41:30 +0200928 int ret = 0;
929
930 list_for_each_entry(entry, &group->devices, list)
931 ret++;
932
933 return ret;
934}
935
Alex Williamsond72e31c2012-05-30 14:18:53 -0600936/**
937 * iommu_group_for_each_dev - iterate over each device in the group
938 * @group: the group
939 * @data: caller opaque data to be passed to callback function
940 * @fn: caller supplied callback function
941 *
942 * This function is called by group users to iterate over group devices.
943 * Callers should hold a reference count to the group during callback.
944 * The group->mutex is held across callbacks, which will block calls to
945 * iommu_group_add/remove_device.
946 */
Joerg Roedele39cb8a2015-05-28 18:41:31 +0200947static int __iommu_group_for_each_dev(struct iommu_group *group, void *data,
948 int (*fn)(struct device *, void *))
Alex Williamsond72e31c2012-05-30 14:18:53 -0600949{
Joerg Roedelc09e22d2017-02-01 12:19:46 +0100950 struct group_device *device;
Alex Williamsond72e31c2012-05-30 14:18:53 -0600951 int ret = 0;
952
Alex Williamsond72e31c2012-05-30 14:18:53 -0600953 list_for_each_entry(device, &group->devices, list) {
954 ret = fn(device->dev, data);
955 if (ret)
956 break;
957 }
Joerg Roedele39cb8a2015-05-28 18:41:31 +0200958 return ret;
959}
960
961
962int iommu_group_for_each_dev(struct iommu_group *group, void *data,
963 int (*fn)(struct device *, void *))
964{
965 int ret;
966
967 mutex_lock(&group->mutex);
968 ret = __iommu_group_for_each_dev(group, data, fn);
Alex Williamsond72e31c2012-05-30 14:18:53 -0600969 mutex_unlock(&group->mutex);
Joerg Roedele39cb8a2015-05-28 18:41:31 +0200970
Alex Williamsond72e31c2012-05-30 14:18:53 -0600971 return ret;
972}
973EXPORT_SYMBOL_GPL(iommu_group_for_each_dev);
974
975/**
976 * iommu_group_get - Return the group for a device and increment reference
977 * @dev: get the group that this device belongs to
978 *
979 * This function is called by iommu drivers and users to get the group
980 * for the specified device. If found, the group is returned and the group
981 * reference in incremented, else NULL.
982 */
983struct iommu_group *iommu_group_get(struct device *dev)
984{
985 struct iommu_group *group = dev->iommu_group;
986
987 if (group)
988 kobject_get(group->devices_kobj);
989
990 return group;
991}
992EXPORT_SYMBOL_GPL(iommu_group_get);
993
994/**
Robin Murphy13f59a72016-11-11 17:59:21 +0000995 * iommu_group_ref_get - Increment reference on a group
996 * @group: the group to use, must not be NULL
997 *
998 * This function is called by iommu drivers to take additional references on an
999 * existing group. Returns the given group for convenience.
1000 */
1001struct iommu_group *iommu_group_ref_get(struct iommu_group *group)
1002{
1003 kobject_get(group->devices_kobj);
1004 return group;
1005}
Will Deacona7ba5c32019-12-19 12:03:37 +00001006EXPORT_SYMBOL_GPL(iommu_group_ref_get);
Robin Murphy13f59a72016-11-11 17:59:21 +00001007
1008/**
Alex Williamsond72e31c2012-05-30 14:18:53 -06001009 * iommu_group_put - Decrement group reference
1010 * @group: the group to use
1011 *
1012 * This function is called by iommu drivers and users to release the
1013 * iommu group. Once the reference count is zero, the group is released.
1014 */
1015void iommu_group_put(struct iommu_group *group)
1016{
1017 if (group)
1018 kobject_put(group->devices_kobj);
1019}
1020EXPORT_SYMBOL_GPL(iommu_group_put);
1021
1022/**
1023 * iommu_group_register_notifier - Register a notifier for group changes
1024 * @group: the group to watch
1025 * @nb: notifier block to signal
1026 *
1027 * This function allows iommu group users to track changes in a group.
1028 * See include/linux/iommu.h for actions sent via this notifier. Caller
1029 * should hold a reference to the group throughout notifier registration.
1030 */
1031int iommu_group_register_notifier(struct iommu_group *group,
1032 struct notifier_block *nb)
1033{
1034 return blocking_notifier_chain_register(&group->notifier, nb);
1035}
1036EXPORT_SYMBOL_GPL(iommu_group_register_notifier);
1037
1038/**
1039 * iommu_group_unregister_notifier - Unregister a notifier
1040 * @group: the group to watch
1041 * @nb: notifier block to signal
1042 *
1043 * Unregister a previously registered group notifier block.
1044 */
1045int iommu_group_unregister_notifier(struct iommu_group *group,
1046 struct notifier_block *nb)
1047{
1048 return blocking_notifier_chain_unregister(&group->notifier, nb);
1049}
1050EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier);
1051
1052/**
Jacob Pan0c830e62019-06-03 15:57:48 +01001053 * iommu_register_device_fault_handler() - Register a device fault handler
1054 * @dev: the device
1055 * @handler: the fault handler
1056 * @data: private data passed as argument to the handler
1057 *
1058 * When an IOMMU fault event is received, this handler gets called with the
Jean-Philippe Bruckerbf3255b2019-06-03 15:57:49 +01001059 * fault event and data as argument. The handler should return 0 on success. If
1060 * the fault is recoverable (IOMMU_FAULT_PAGE_REQ), the consumer should also
1061 * complete the fault by calling iommu_page_response() with one of the following
1062 * response code:
1063 * - IOMMU_PAGE_RESP_SUCCESS: retry the translation
1064 * - IOMMU_PAGE_RESP_INVALID: terminate the fault
1065 * - IOMMU_PAGE_RESP_FAILURE: terminate the fault and stop reporting
1066 * page faults if possible.
Jacob Pan0c830e62019-06-03 15:57:48 +01001067 *
1068 * Return 0 if the fault handler was installed successfully, or an error.
1069 */
1070int iommu_register_device_fault_handler(struct device *dev,
1071 iommu_dev_fault_handler_t handler,
1072 void *data)
1073{
Joerg Roedel045a7042020-03-26 16:08:30 +01001074 struct dev_iommu *param = dev->iommu;
Jacob Pan0c830e62019-06-03 15:57:48 +01001075 int ret = 0;
1076
1077 if (!param)
1078 return -EINVAL;
1079
1080 mutex_lock(&param->lock);
1081 /* Only allow one fault handler registered for each device */
1082 if (param->fault_param) {
1083 ret = -EBUSY;
1084 goto done_unlock;
1085 }
1086
1087 get_device(dev);
1088 param->fault_param = kzalloc(sizeof(*param->fault_param), GFP_KERNEL);
1089 if (!param->fault_param) {
1090 put_device(dev);
1091 ret = -ENOMEM;
1092 goto done_unlock;
1093 }
1094 param->fault_param->handler = handler;
1095 param->fault_param->data = data;
Jean-Philippe Bruckerbf3255b2019-06-03 15:57:49 +01001096 mutex_init(&param->fault_param->lock);
1097 INIT_LIST_HEAD(&param->fault_param->faults);
Jacob Pan0c830e62019-06-03 15:57:48 +01001098
1099done_unlock:
1100 mutex_unlock(&param->lock);
1101
1102 return ret;
1103}
1104EXPORT_SYMBOL_GPL(iommu_register_device_fault_handler);
1105
1106/**
1107 * iommu_unregister_device_fault_handler() - Unregister the device fault handler
1108 * @dev: the device
1109 *
1110 * Remove the device fault handler installed with
1111 * iommu_register_device_fault_handler().
1112 *
1113 * Return 0 on success, or an error.
1114 */
1115int iommu_unregister_device_fault_handler(struct device *dev)
1116{
Joerg Roedel045a7042020-03-26 16:08:30 +01001117 struct dev_iommu *param = dev->iommu;
Jacob Pan0c830e62019-06-03 15:57:48 +01001118 int ret = 0;
1119
1120 if (!param)
1121 return -EINVAL;
1122
1123 mutex_lock(&param->lock);
1124
1125 if (!param->fault_param)
1126 goto unlock;
1127
Jean-Philippe Bruckerbf3255b2019-06-03 15:57:49 +01001128 /* we cannot unregister handler if there are pending faults */
1129 if (!list_empty(&param->fault_param->faults)) {
1130 ret = -EBUSY;
1131 goto unlock;
1132 }
1133
Jacob Pan0c830e62019-06-03 15:57:48 +01001134 kfree(param->fault_param);
1135 param->fault_param = NULL;
1136 put_device(dev);
1137unlock:
1138 mutex_unlock(&param->lock);
1139
1140 return ret;
1141}
1142EXPORT_SYMBOL_GPL(iommu_unregister_device_fault_handler);
1143
1144/**
1145 * iommu_report_device_fault() - Report fault event to device driver
1146 * @dev: the device
1147 * @evt: fault event data
1148 *
1149 * Called by IOMMU drivers when a fault is detected, typically in a threaded IRQ
Jean-Philippe Bruckerbf3255b2019-06-03 15:57:49 +01001150 * handler. When this function fails and the fault is recoverable, it is the
1151 * caller's responsibility to complete the fault.
Jacob Pan0c830e62019-06-03 15:57:48 +01001152 *
1153 * Return 0 on success, or an error.
1154 */
1155int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt)
1156{
Joerg Roedel045a7042020-03-26 16:08:30 +01001157 struct dev_iommu *param = dev->iommu;
Jean-Philippe Bruckerbf3255b2019-06-03 15:57:49 +01001158 struct iommu_fault_event *evt_pending = NULL;
Jacob Pan0c830e62019-06-03 15:57:48 +01001159 struct iommu_fault_param *fparam;
1160 int ret = 0;
1161
1162 if (!param || !evt)
1163 return -EINVAL;
1164
1165 /* we only report device fault if there is a handler registered */
1166 mutex_lock(&param->lock);
1167 fparam = param->fault_param;
1168 if (!fparam || !fparam->handler) {
1169 ret = -EINVAL;
1170 goto done_unlock;
1171 }
Jean-Philippe Bruckerbf3255b2019-06-03 15:57:49 +01001172
1173 if (evt->fault.type == IOMMU_FAULT_PAGE_REQ &&
1174 (evt->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) {
1175 evt_pending = kmemdup(evt, sizeof(struct iommu_fault_event),
1176 GFP_KERNEL);
1177 if (!evt_pending) {
1178 ret = -ENOMEM;
1179 goto done_unlock;
1180 }
1181 mutex_lock(&fparam->lock);
1182 list_add_tail(&evt_pending->list, &fparam->faults);
1183 mutex_unlock(&fparam->lock);
1184 }
1185
Jacob Pan0c830e62019-06-03 15:57:48 +01001186 ret = fparam->handler(&evt->fault, fparam->data);
Jean-Philippe Bruckerbf3255b2019-06-03 15:57:49 +01001187 if (ret && evt_pending) {
1188 mutex_lock(&fparam->lock);
1189 list_del(&evt_pending->list);
1190 mutex_unlock(&fparam->lock);
1191 kfree(evt_pending);
1192 }
Jacob Pan0c830e62019-06-03 15:57:48 +01001193done_unlock:
1194 mutex_unlock(&param->lock);
1195 return ret;
1196}
1197EXPORT_SYMBOL_GPL(iommu_report_device_fault);
1198
Jean-Philippe Bruckerbf3255b2019-06-03 15:57:49 +01001199int iommu_page_response(struct device *dev,
1200 struct iommu_page_response *msg)
1201{
1202 bool pasid_valid;
1203 int ret = -EINVAL;
1204 struct iommu_fault_event *evt;
1205 struct iommu_fault_page_request *prm;
Joerg Roedel045a7042020-03-26 16:08:30 +01001206 struct dev_iommu *param = dev->iommu;
Jean-Philippe Bruckerbf3255b2019-06-03 15:57:49 +01001207 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1208
1209 if (!domain || !domain->ops->page_response)
1210 return -ENODEV;
1211
1212 if (!param || !param->fault_param)
1213 return -EINVAL;
1214
1215 if (msg->version != IOMMU_PAGE_RESP_VERSION_1 ||
1216 msg->flags & ~IOMMU_PAGE_RESP_PASID_VALID)
1217 return -EINVAL;
1218
1219 /* Only send response if there is a fault report pending */
1220 mutex_lock(&param->fault_param->lock);
1221 if (list_empty(&param->fault_param->faults)) {
1222 dev_warn_ratelimited(dev, "no pending PRQ, drop response\n");
1223 goto done_unlock;
1224 }
1225 /*
1226 * Check if we have a matching page request pending to respond,
1227 * otherwise return -EINVAL
1228 */
1229 list_for_each_entry(evt, &param->fault_param->faults, list) {
1230 prm = &evt->fault.prm;
1231 pasid_valid = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
1232
1233 if ((pasid_valid && prm->pasid != msg->pasid) ||
1234 prm->grpid != msg->grpid)
1235 continue;
1236
1237 /* Sanitize the reply */
1238 msg->flags = pasid_valid ? IOMMU_PAGE_RESP_PASID_VALID : 0;
1239
1240 ret = domain->ops->page_response(dev, evt, msg);
1241 list_del(&evt->list);
1242 kfree(evt);
1243 break;
1244 }
1245
1246done_unlock:
1247 mutex_unlock(&param->fault_param->lock);
1248 return ret;
1249}
1250EXPORT_SYMBOL_GPL(iommu_page_response);
1251
Jacob Pan0c830e62019-06-03 15:57:48 +01001252/**
Alex Williamsond72e31c2012-05-30 14:18:53 -06001253 * iommu_group_id - Return ID for a group
1254 * @group: the group to ID
1255 *
1256 * Return the unique ID for the group matching the sysfs group number.
1257 */
1258int iommu_group_id(struct iommu_group *group)
1259{
1260 return group->id;
1261}
1262EXPORT_SYMBOL_GPL(iommu_group_id);
Alex Williamson14604322011-10-21 15:56:05 -04001263
Alex Williamsonf096c062014-09-19 10:03:06 -06001264static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
1265 unsigned long *devfns);
1266
Alex Williamson104a1c12014-07-03 09:51:18 -06001267/*
1268 * To consider a PCI device isolated, we require ACS to support Source
1269 * Validation, Request Redirection, Completer Redirection, and Upstream
1270 * Forwarding. This effectively means that devices cannot spoof their
1271 * requester ID, requests and completions cannot be redirected, and all
1272 * transactions are forwarded upstream, even as it passes through a
1273 * bridge where the target device is downstream.
1274 */
1275#define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
1276
Alex Williamsonf096c062014-09-19 10:03:06 -06001277/*
1278 * For multifunction devices which are not isolated from each other, find
1279 * all the other non-isolated functions and look for existing groups. For
1280 * each function, we also need to look for aliases to or from other devices
1281 * that may already have a group.
1282 */
1283static struct iommu_group *get_pci_function_alias_group(struct pci_dev *pdev,
1284 unsigned long *devfns)
1285{
1286 struct pci_dev *tmp = NULL;
1287 struct iommu_group *group;
1288
1289 if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS))
1290 return NULL;
1291
1292 for_each_pci_dev(tmp) {
1293 if (tmp == pdev || tmp->bus != pdev->bus ||
1294 PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) ||
1295 pci_acs_enabled(tmp, REQ_ACS_FLAGS))
1296 continue;
1297
1298 group = get_pci_alias_group(tmp, devfns);
1299 if (group) {
1300 pci_dev_put(tmp);
1301 return group;
1302 }
1303 }
1304
1305 return NULL;
1306}
1307
1308/*
Jacek Lawrynowicz338c3142016-03-03 15:38:02 +01001309 * Look for aliases to or from the given device for existing groups. DMA
1310 * aliases are only supported on the same bus, therefore the search
Alex Williamsonf096c062014-09-19 10:03:06 -06001311 * space is quite small (especially since we're really only looking at pcie
1312 * device, and therefore only expect multiple slots on the root complex or
1313 * downstream switch ports). It's conceivable though that a pair of
1314 * multifunction devices could have aliases between them that would cause a
1315 * loop. To prevent this, we use a bitmap to track where we've been.
1316 */
1317static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
1318 unsigned long *devfns)
1319{
1320 struct pci_dev *tmp = NULL;
1321 struct iommu_group *group;
1322
1323 if (test_and_set_bit(pdev->devfn & 0xff, devfns))
1324 return NULL;
1325
1326 group = iommu_group_get(&pdev->dev);
1327 if (group)
1328 return group;
1329
1330 for_each_pci_dev(tmp) {
1331 if (tmp == pdev || tmp->bus != pdev->bus)
1332 continue;
1333
1334 /* We alias them or they alias us */
Jacek Lawrynowicz338c3142016-03-03 15:38:02 +01001335 if (pci_devs_are_dma_aliases(pdev, tmp)) {
Alex Williamsonf096c062014-09-19 10:03:06 -06001336 group = get_pci_alias_group(tmp, devfns);
1337 if (group) {
1338 pci_dev_put(tmp);
1339 return group;
1340 }
1341
1342 group = get_pci_function_alias_group(tmp, devfns);
1343 if (group) {
1344 pci_dev_put(tmp);
1345 return group;
1346 }
1347 }
1348 }
1349
1350 return NULL;
1351}
1352
Alex Williamson104a1c12014-07-03 09:51:18 -06001353struct group_for_pci_data {
1354 struct pci_dev *pdev;
1355 struct iommu_group *group;
1356};
1357
1358/*
1359 * DMA alias iterator callback, return the last seen device. Stop and return
1360 * the IOMMU group if we find one along the way.
1361 */
1362static int get_pci_alias_or_group(struct pci_dev *pdev, u16 alias, void *opaque)
1363{
1364 struct group_for_pci_data *data = opaque;
1365
1366 data->pdev = pdev;
1367 data->group = iommu_group_get(&pdev->dev);
1368
1369 return data->group != NULL;
1370}
1371
1372/*
Joerg Roedel6eab5562015-10-21 23:51:38 +02001373 * Generic device_group call-back function. It just allocates one
1374 * iommu-group per device.
1375 */
1376struct iommu_group *generic_device_group(struct device *dev)
1377{
Joerg Roedel7f7a2302017-06-28 12:45:31 +02001378 return iommu_group_alloc();
Joerg Roedel6eab5562015-10-21 23:51:38 +02001379}
Will Deacona7ba5c32019-12-19 12:03:37 +00001380EXPORT_SYMBOL_GPL(generic_device_group);
Joerg Roedel6eab5562015-10-21 23:51:38 +02001381
1382/*
Alex Williamson104a1c12014-07-03 09:51:18 -06001383 * Use standard PCI bus topology, isolation features, and DMA alias quirks
1384 * to find or create an IOMMU group for a device.
1385 */
Joerg Roedel5e622922015-10-21 23:51:37 +02001386struct iommu_group *pci_device_group(struct device *dev)
Alex Williamson104a1c12014-07-03 09:51:18 -06001387{
Joerg Roedel5e622922015-10-21 23:51:37 +02001388 struct pci_dev *pdev = to_pci_dev(dev);
Alex Williamson104a1c12014-07-03 09:51:18 -06001389 struct group_for_pci_data data;
1390 struct pci_bus *bus;
1391 struct iommu_group *group = NULL;
Alex Williamsonf096c062014-09-19 10:03:06 -06001392 u64 devfns[4] = { 0 };
Alex Williamson104a1c12014-07-03 09:51:18 -06001393
Joerg Roedel5e622922015-10-21 23:51:37 +02001394 if (WARN_ON(!dev_is_pci(dev)))
1395 return ERR_PTR(-EINVAL);
1396
Alex Williamson104a1c12014-07-03 09:51:18 -06001397 /*
1398 * Find the upstream DMA alias for the device. A device must not
1399 * be aliased due to topology in order to have its own IOMMU group.
1400 * If we find an alias along the way that already belongs to a
1401 * group, use it.
1402 */
1403 if (pci_for_each_dma_alias(pdev, get_pci_alias_or_group, &data))
1404 return data.group;
1405
1406 pdev = data.pdev;
1407
1408 /*
1409 * Continue upstream from the point of minimum IOMMU granularity
1410 * due to aliases to the point where devices are protected from
1411 * peer-to-peer DMA by PCI ACS. Again, if we find an existing
1412 * group, use it.
1413 */
1414 for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) {
1415 if (!bus->self)
1416 continue;
1417
1418 if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
1419 break;
1420
1421 pdev = bus->self;
1422
1423 group = iommu_group_get(&pdev->dev);
1424 if (group)
1425 return group;
1426 }
1427
1428 /*
Alex Williamsonf096c062014-09-19 10:03:06 -06001429 * Look for existing groups on device aliases. If we alias another
1430 * device or another device aliases us, use the same group.
Alex Williamson104a1c12014-07-03 09:51:18 -06001431 */
Alex Williamsonf096c062014-09-19 10:03:06 -06001432 group = get_pci_alias_group(pdev, (unsigned long *)devfns);
1433 if (group)
1434 return group;
Alex Williamson104a1c12014-07-03 09:51:18 -06001435
1436 /*
Alex Williamsonf096c062014-09-19 10:03:06 -06001437 * Look for existing groups on non-isolated functions on the same
1438 * slot and aliases of those funcions, if any. No need to clear
1439 * the search bitmap, the tested devfns are still valid.
Alex Williamson104a1c12014-07-03 09:51:18 -06001440 */
Alex Williamsonf096c062014-09-19 10:03:06 -06001441 group = get_pci_function_alias_group(pdev, (unsigned long *)devfns);
1442 if (group)
1443 return group;
Alex Williamson104a1c12014-07-03 09:51:18 -06001444
1445 /* No shared group found, allocate new */
Joerg Roedel7f7a2302017-06-28 12:45:31 +02001446 return iommu_group_alloc();
Alex Williamson104a1c12014-07-03 09:51:18 -06001447}
Will Deacona7ba5c32019-12-19 12:03:37 +00001448EXPORT_SYMBOL_GPL(pci_device_group);
Alex Williamson104a1c12014-07-03 09:51:18 -06001449
Nipun Guptaeab03e22018-09-10 19:19:18 +05301450/* Get the IOMMU group for device on fsl-mc bus */
1451struct iommu_group *fsl_mc_device_group(struct device *dev)
1452{
1453 struct device *cont_dev = fsl_mc_cont_dev(dev);
1454 struct iommu_group *group;
1455
1456 group = iommu_group_get(cont_dev);
1457 if (!group)
1458 group = iommu_group_alloc();
1459 return group;
1460}
Will Deacona7ba5c32019-12-19 12:03:37 +00001461EXPORT_SYMBOL_GPL(fsl_mc_device_group);
Nipun Guptaeab03e22018-09-10 19:19:18 +05301462
Sai Praneeth Prakhya4cbf3852020-04-29 15:36:40 +02001463static int iommu_get_def_domain_type(struct device *dev)
1464{
1465 const struct iommu_ops *ops = dev->bus->iommu_ops;
1466 unsigned int type = 0;
1467
1468 if (ops->def_domain_type)
1469 type = ops->def_domain_type(dev);
1470
1471 return (type == 0) ? iommu_def_domain_type : type;
1472}
1473
Joerg Roedel6e1aa202020-04-29 15:36:46 +02001474static int iommu_group_alloc_default_domain(struct bus_type *bus,
1475 struct iommu_group *group,
1476 unsigned int type)
Joerg Roedelff2a08b2020-04-29 15:36:39 +02001477{
1478 struct iommu_domain *dom;
1479
Joerg Roedel6e1aa202020-04-29 15:36:46 +02001480 dom = __iommu_domain_alloc(bus, type);
Sai Praneeth Prakhya4cbf3852020-04-29 15:36:40 +02001481 if (!dom && type != IOMMU_DOMAIN_DMA) {
Joerg Roedel6e1aa202020-04-29 15:36:46 +02001482 dom = __iommu_domain_alloc(bus, IOMMU_DOMAIN_DMA);
1483 if (dom)
1484 pr_warn("Failed to allocate default IOMMU domain of type %u for group %s - Falling back to IOMMU_DOMAIN_DMA",
1485 type, group->name);
Joerg Roedelff2a08b2020-04-29 15:36:39 +02001486 }
1487
1488 if (!dom)
1489 return -ENOMEM;
1490
1491 group->default_domain = dom;
1492 if (!group->domain)
1493 group->domain = dom;
1494
1495 if (!iommu_dma_strict) {
1496 int attr = 1;
1497 iommu_domain_set_attr(dom,
1498 DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE,
1499 &attr);
1500 }
1501
1502 return 0;
1503}
1504
Joerg Roedel6e1aa202020-04-29 15:36:46 +02001505static int iommu_alloc_default_domain(struct device *dev)
1506{
1507 struct iommu_group *group;
1508 unsigned int type;
1509
1510 group = iommu_group_get(dev);
1511 if (!group)
1512 return -ENODEV;
1513
1514 if (group->default_domain)
1515 return 0;
1516
1517 type = iommu_get_def_domain_type(dev);
1518
1519 return iommu_group_alloc_default_domain(dev->bus, group, type);
1520}
1521
Alex Williamson104a1c12014-07-03 09:51:18 -06001522/**
1523 * iommu_group_get_for_dev - Find or create the IOMMU group for a device
1524 * @dev: target device
1525 *
1526 * This function is intended to be called by IOMMU drivers and extended to
1527 * support common, bus-defined algorithms when determining or creating the
1528 * IOMMU group for a device. On success, the caller will hold a reference
1529 * to the returned IOMMU group, which will already include the provided
1530 * device. The reference should be released with iommu_group_put().
1531 */
1532struct iommu_group *iommu_group_get_for_dev(struct device *dev)
1533{
Joerg Roedel46c6b2b2015-10-21 23:51:36 +02001534 const struct iommu_ops *ops = dev->bus->iommu_ops;
Joerg Roedelc4a783b2014-08-21 22:32:08 +02001535 struct iommu_group *group;
Alex Williamson104a1c12014-07-03 09:51:18 -06001536 int ret;
1537
1538 group = iommu_group_get(dev);
1539 if (group)
1540 return group;
1541
Robin Murphy05f803002017-07-21 13:12:38 +01001542 if (!ops)
1543 return ERR_PTR(-EINVAL);
Joerg Roedelc4a783b2014-08-21 22:32:08 +02001544
Robin Murphy05f803002017-07-21 13:12:38 +01001545 group = ops->device_group(dev);
Joerg Roedel72dcac62017-06-28 12:52:48 +02001546 if (WARN_ON_ONCE(group == NULL))
1547 return ERR_PTR(-EINVAL);
1548
Alex Williamson104a1c12014-07-03 09:51:18 -06001549 if (IS_ERR(group))
1550 return group;
1551
1552 ret = iommu_group_add_device(group, dev);
Joerg Roedelff2a08b2020-04-29 15:36:39 +02001553 if (ret)
1554 goto out_put_group;
Alex Williamson104a1c12014-07-03 09:51:18 -06001555
Joerg Roedel6e1aa202020-04-29 15:36:46 +02001556 /*
1557 * Try to allocate a default domain - needs support from the
1558 * IOMMU driver. There are still some drivers which don't support
1559 * default domains, so the return value is not yet checked. Only
1560 * allocate the domain here when the driver still has the
1561 * add_device/remove_device call-backs implemented.
1562 */
1563 if (!ops->probe_device) {
1564 iommu_alloc_default_domain(dev);
1565
1566 if (group->default_domain)
1567 ret = __iommu_attach_device(group->default_domain, dev);
1568
1569 if (ret)
1570 goto out_put_group;
1571 }
1572
Alex Williamson104a1c12014-07-03 09:51:18 -06001573 return group;
Joerg Roedelff2a08b2020-04-29 15:36:39 +02001574
1575out_put_group:
1576 iommu_group_put(group);
1577
1578 return ERR_PTR(ret);
Alex Williamson104a1c12014-07-03 09:51:18 -06001579}
Will Deacona7ba5c32019-12-19 12:03:37 +00001580EXPORT_SYMBOL(iommu_group_get_for_dev);
Alex Williamson104a1c12014-07-03 09:51:18 -06001581
Joerg Roedel6827ca82015-05-28 18:41:35 +02001582struct iommu_domain *iommu_group_default_domain(struct iommu_group *group)
1583{
1584 return group->default_domain;
1585}
1586
Alex Williamson14604322011-10-21 15:56:05 -04001587static int add_iommu_group(struct device *dev, void *data)
1588{
Joerg Roedelcc5aed42018-11-30 10:31:59 +01001589 int ret = iommu_probe_device(dev);
Joerg Roedel38667f12015-06-29 10:16:08 +02001590
1591 /*
1592 * We ignore -ENODEV errors for now, as they just mean that the
1593 * device is not translated by an IOMMU. We still care about
1594 * other errors and fail to initialize when they happen.
1595 */
1596 if (ret == -ENODEV)
1597 ret = 0;
1598
1599 return ret;
Alex Williamson14604322011-10-21 15:56:05 -04001600}
1601
Joerg Roedeldeac0b3b2020-04-29 15:36:49 +02001602static int probe_iommu_group(struct device *dev, void *data)
1603{
1604 const struct iommu_ops *ops = dev->bus->iommu_ops;
1605 struct list_head *group_list = data;
1606 int ret;
1607
1608 if (!dev_iommu_get(dev))
1609 return -ENOMEM;
1610
1611 if (!try_module_get(ops->owner)) {
1612 ret = -EINVAL;
1613 goto err_free_dev_iommu;
1614 }
1615
1616 ret = __iommu_probe_device(dev, group_list);
1617 if (ret)
1618 goto err_module_put;
1619
1620 return 0;
1621
1622err_module_put:
1623 module_put(ops->owner);
1624err_free_dev_iommu:
1625 dev_iommu_free(dev);
1626
1627 if (ret == -ENODEV)
1628 ret = 0;
1629
1630 return ret;
1631}
1632
Joerg Roedel8da30142015-05-28 18:41:27 +02001633static int remove_iommu_group(struct device *dev, void *data)
1634{
Joerg Roedelcc5aed42018-11-30 10:31:59 +01001635 iommu_release_device(dev);
Alex Williamson14604322011-10-21 15:56:05 -04001636
1637 return 0;
1638}
1639
Alex Williamsond72e31c2012-05-30 14:18:53 -06001640static int iommu_bus_notifier(struct notifier_block *nb,
1641 unsigned long action, void *data)
Alex Williamson14604322011-10-21 15:56:05 -04001642{
Alex Williamsond72e31c2012-05-30 14:18:53 -06001643 unsigned long group_action = 0;
Joerg Roedelcc5aed42018-11-30 10:31:59 +01001644 struct device *dev = data;
1645 struct iommu_group *group;
Alex Williamson14604322011-10-21 15:56:05 -04001646
Alex Williamsond72e31c2012-05-30 14:18:53 -06001647 /*
1648 * ADD/DEL call into iommu driver ops if provided, which may
1649 * result in ADD/DEL notifiers to group->notifier
1650 */
1651 if (action == BUS_NOTIFY_ADD_DEVICE) {
Joerg Roedelcc5aed42018-11-30 10:31:59 +01001652 int ret;
zhichang.yuan3ba87752017-04-18 20:51:48 +08001653
Joerg Roedelcc5aed42018-11-30 10:31:59 +01001654 ret = iommu_probe_device(dev);
1655 return (ret) ? NOTIFY_DONE : NOTIFY_OK;
Joerg Roedel843cb6d2015-05-28 18:41:28 +02001656 } else if (action == BUS_NOTIFY_REMOVED_DEVICE) {
Joerg Roedelcc5aed42018-11-30 10:31:59 +01001657 iommu_release_device(dev);
1658 return NOTIFY_OK;
Alex Williamsond72e31c2012-05-30 14:18:53 -06001659 }
Alex Williamson14604322011-10-21 15:56:05 -04001660
Alex Williamsond72e31c2012-05-30 14:18:53 -06001661 /*
1662 * Remaining BUS_NOTIFYs get filtered and republished to the
1663 * group, if anyone is listening
1664 */
1665 group = iommu_group_get(dev);
1666 if (!group)
1667 return 0;
1668
1669 switch (action) {
1670 case BUS_NOTIFY_BIND_DRIVER:
1671 group_action = IOMMU_GROUP_NOTIFY_BIND_DRIVER;
1672 break;
1673 case BUS_NOTIFY_BOUND_DRIVER:
1674 group_action = IOMMU_GROUP_NOTIFY_BOUND_DRIVER;
1675 break;
1676 case BUS_NOTIFY_UNBIND_DRIVER:
1677 group_action = IOMMU_GROUP_NOTIFY_UNBIND_DRIVER;
1678 break;
1679 case BUS_NOTIFY_UNBOUND_DRIVER:
1680 group_action = IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER;
1681 break;
1682 }
1683
1684 if (group_action)
1685 blocking_notifier_call_chain(&group->notifier,
1686 group_action, dev);
1687
1688 iommu_group_put(group);
Alex Williamson14604322011-10-21 15:56:05 -04001689 return 0;
1690}
1691
Joerg Roedeldeac0b3b2020-04-29 15:36:49 +02001692struct __group_domain_type {
1693 struct device *dev;
1694 unsigned int type;
1695};
1696
1697static int probe_get_default_domain_type(struct device *dev, void *data)
1698{
1699 const struct iommu_ops *ops = dev->bus->iommu_ops;
1700 struct __group_domain_type *gtype = data;
1701 unsigned int type = 0;
1702
1703 if (ops->def_domain_type)
1704 type = ops->def_domain_type(dev);
1705
1706 if (type) {
1707 if (gtype->type && gtype->type != type) {
1708 dev_warn(dev, "Device needs domain type %s, but device %s in the same iommu group requires type %s - using default\n",
1709 iommu_domain_type_str(type),
1710 dev_name(gtype->dev),
1711 iommu_domain_type_str(gtype->type));
1712 gtype->type = 0;
1713 }
1714
1715 if (!gtype->dev) {
1716 gtype->dev = dev;
1717 gtype->type = type;
1718 }
1719 }
1720
1721 return 0;
1722}
1723
1724static void probe_alloc_default_domain(struct bus_type *bus,
1725 struct iommu_group *group)
1726{
1727 struct __group_domain_type gtype;
1728
1729 memset(&gtype, 0, sizeof(gtype));
1730
1731 /* Ask for default domain requirements of all devices in the group */
1732 __iommu_group_for_each_dev(group, &gtype,
1733 probe_get_default_domain_type);
1734
1735 if (!gtype.type)
1736 gtype.type = iommu_def_domain_type;
1737
1738 iommu_group_alloc_default_domain(bus, group, gtype.type);
1739}
1740
1741static int iommu_group_do_dma_attach(struct device *dev, void *data)
1742{
1743 struct iommu_domain *domain = data;
1744 const struct iommu_ops *ops;
1745 int ret;
1746
1747 ret = __iommu_attach_device(domain, dev);
1748
1749 ops = domain->ops;
1750
1751 if (ret == 0 && ops->probe_finalize)
1752 ops->probe_finalize(dev);
1753
1754 return ret;
1755}
1756
1757static int __iommu_group_dma_attach(struct iommu_group *group)
1758{
1759 return __iommu_group_for_each_dev(group, group->default_domain,
1760 iommu_group_do_dma_attach);
1761}
1762
1763static int bus_iommu_probe(struct bus_type *bus)
1764{
1765 const struct iommu_ops *ops = bus->iommu_ops;
1766 int ret;
1767
1768 if (ops->probe_device) {
1769 struct iommu_group *group, *next;
1770 LIST_HEAD(group_list);
1771
1772 /*
1773 * This code-path does not allocate the default domain when
1774 * creating the iommu group, so do it after the groups are
1775 * created.
1776 */
1777 ret = bus_for_each_dev(bus, NULL, &group_list, probe_iommu_group);
1778 if (ret)
1779 return ret;
1780
1781 list_for_each_entry_safe(group, next, &group_list, entry) {
1782 /* Remove item from the list */
1783 list_del_init(&group->entry);
1784
1785 mutex_lock(&group->mutex);
1786
1787 /* Try to allocate default domain */
1788 probe_alloc_default_domain(bus, group);
1789
1790 if (!group->default_domain) {
1791 mutex_unlock(&group->mutex);
1792 continue;
1793 }
1794
1795 ret = __iommu_group_dma_attach(group);
1796
1797 mutex_unlock(&group->mutex);
1798
1799 if (ret)
1800 break;
1801 }
1802 } else {
1803 ret = bus_for_each_dev(bus, NULL, NULL, add_iommu_group);
1804 }
1805
1806 return ret;
1807}
1808
Mark Salterfb3e3062014-09-21 13:58:24 -04001809static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
Joerg Roedelfc2100e2008-11-26 17:21:24 +01001810{
Mark Salterfb3e3062014-09-21 13:58:24 -04001811 struct notifier_block *nb;
Joerg Roedeldeac0b3b2020-04-29 15:36:49 +02001812 int err;
Thierry Redingb22f6432014-06-27 09:03:12 +02001813
Mark Salterfb3e3062014-09-21 13:58:24 -04001814 nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
1815 if (!nb)
1816 return -ENOMEM;
1817
1818 nb->notifier_call = iommu_bus_notifier;
1819
1820 err = bus_register_notifier(bus, nb);
Joerg Roedel8da30142015-05-28 18:41:27 +02001821 if (err)
1822 goto out_free;
Heiko Stübnerd7da6bd2014-10-29 01:22:56 +01001823
Joerg Roedeldeac0b3b2020-04-29 15:36:49 +02001824 err = bus_iommu_probe(bus);
Joerg Roedel8da30142015-05-28 18:41:27 +02001825 if (err)
1826 goto out_err;
1827
Heiko Stübnerd7da6bd2014-10-29 01:22:56 +01001828
1829 return 0;
Joerg Roedel8da30142015-05-28 18:41:27 +02001830
1831out_err:
1832 /* Clean up */
Lu Baolu8cec63e2019-03-20 09:40:24 +08001833 bus_for_each_dev(bus, NULL, NULL, remove_iommu_group);
Joerg Roedel8da30142015-05-28 18:41:27 +02001834 bus_unregister_notifier(bus, nb);
1835
1836out_free:
1837 kfree(nb);
1838
1839 return err;
Joerg Roedelfc2100e2008-11-26 17:21:24 +01001840}
1841
Joerg Roedelff217762011-08-26 16:48:26 +02001842/**
1843 * bus_set_iommu - set iommu-callbacks for the bus
1844 * @bus: bus.
1845 * @ops: the callbacks provided by the iommu-driver
1846 *
1847 * This function is called by an iommu driver to set the iommu methods
1848 * used for a particular bus. Drivers for devices on that bus can use
1849 * the iommu-api after these ops are registered.
1850 * This special function is needed because IOMMUs are usually devices on
1851 * the bus itself, so the iommu drivers are not initialized when the bus
1852 * is set up. With this function the iommu-driver can set the iommu-ops
1853 * afterwards.
1854 */
Thierry Redingb22f6432014-06-27 09:03:12 +02001855int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops)
Joerg Roedelfc2100e2008-11-26 17:21:24 +01001856{
Heiko Stübnerd7da6bd2014-10-29 01:22:56 +01001857 int err;
1858
Will Deacon4312cf72019-12-19 12:03:43 +00001859 if (ops == NULL) {
1860 bus->iommu_ops = NULL;
1861 return 0;
1862 }
1863
Joerg Roedelff217762011-08-26 16:48:26 +02001864 if (bus->iommu_ops != NULL)
1865 return -EBUSY;
Joerg Roedelfc2100e2008-11-26 17:21:24 +01001866
Joerg Roedelff217762011-08-26 16:48:26 +02001867 bus->iommu_ops = ops;
1868
1869 /* Do IOMMU specific setup for this bus-type */
Heiko Stübnerd7da6bd2014-10-29 01:22:56 +01001870 err = iommu_bus_init(bus, ops);
1871 if (err)
1872 bus->iommu_ops = NULL;
1873
1874 return err;
Joerg Roedelfc2100e2008-11-26 17:21:24 +01001875}
Joerg Roedelff217762011-08-26 16:48:26 +02001876EXPORT_SYMBOL_GPL(bus_set_iommu);
1877
Joerg Roedela1b60c12011-09-06 18:46:34 +02001878bool iommu_present(struct bus_type *bus)
Joerg Roedelfc2100e2008-11-26 17:21:24 +01001879{
Joerg Roedel94441c32011-09-06 18:58:54 +02001880 return bus->iommu_ops != NULL;
Joerg Roedelfc2100e2008-11-26 17:21:24 +01001881}
Joerg Roedela1b60c12011-09-06 18:46:34 +02001882EXPORT_SYMBOL_GPL(iommu_present);
Joerg Roedelfc2100e2008-11-26 17:21:24 +01001883
Joerg Roedel3c0e0ca2014-09-03 18:47:25 +02001884bool iommu_capable(struct bus_type *bus, enum iommu_cap cap)
1885{
1886 if (!bus->iommu_ops || !bus->iommu_ops->capable)
1887 return false;
1888
1889 return bus->iommu_ops->capable(cap);
1890}
1891EXPORT_SYMBOL_GPL(iommu_capable);
1892
Ohad Ben-Cohen4f3f8d92011-09-13 15:25:23 -04001893/**
1894 * iommu_set_fault_handler() - set a fault handler for an iommu domain
1895 * @domain: iommu domain
1896 * @handler: fault handler
Ohad Ben-Cohen77ca2332012-05-21 20:20:05 +03001897 * @token: user data, will be passed back to the fault handler
Ohad Ben-Cohen0ed6d2d2011-09-27 07:36:40 -04001898 *
1899 * This function should be used by IOMMU users which want to be notified
1900 * whenever an IOMMU fault happens.
1901 *
1902 * The fault handler itself should return 0 on success, and an appropriate
1903 * error code otherwise.
Ohad Ben-Cohen4f3f8d92011-09-13 15:25:23 -04001904 */
1905void iommu_set_fault_handler(struct iommu_domain *domain,
Ohad Ben-Cohen77ca2332012-05-21 20:20:05 +03001906 iommu_fault_handler_t handler,
1907 void *token)
Ohad Ben-Cohen4f3f8d92011-09-13 15:25:23 -04001908{
1909 BUG_ON(!domain);
1910
1911 domain->handler = handler;
Ohad Ben-Cohen77ca2332012-05-21 20:20:05 +03001912 domain->handler_token = token;
Ohad Ben-Cohen4f3f8d92011-09-13 15:25:23 -04001913}
Ohad Ben-Cohen30bd9182011-09-26 09:11:46 -04001914EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
Ohad Ben-Cohen4f3f8d92011-09-13 15:25:23 -04001915
Joerg Roedel53723dc2015-05-28 18:41:29 +02001916static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
1917 unsigned type)
Joerg Roedelfc2100e2008-11-26 17:21:24 +01001918{
1919 struct iommu_domain *domain;
Joerg Roedelfc2100e2008-11-26 17:21:24 +01001920
Joerg Roedel94441c32011-09-06 18:58:54 +02001921 if (bus == NULL || bus->iommu_ops == NULL)
Joerg Roedel905d66c2011-09-06 16:03:26 +02001922 return NULL;
1923
Joerg Roedel53723dc2015-05-28 18:41:29 +02001924 domain = bus->iommu_ops->domain_alloc(type);
Joerg Roedelfc2100e2008-11-26 17:21:24 +01001925 if (!domain)
1926 return NULL;
1927
Joerg Roedel8539c7c2015-03-26 13:43:05 +01001928 domain->ops = bus->iommu_ops;
Joerg Roedel53723dc2015-05-28 18:41:29 +02001929 domain->type = type;
Robin Murphyd16e0fa2016-04-07 18:42:06 +01001930 /* Assume all sizes by default; the driver may override this later */
1931 domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap;
Joerg Roedel905d66c2011-09-06 16:03:26 +02001932
Joerg Roedelfc2100e2008-11-26 17:21:24 +01001933 return domain;
Joerg Roedelfc2100e2008-11-26 17:21:24 +01001934}
Joerg Roedelfc2100e2008-11-26 17:21:24 +01001935
Joerg Roedel53723dc2015-05-28 18:41:29 +02001936struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
1937{
1938 return __iommu_domain_alloc(bus, IOMMU_DOMAIN_UNMANAGED);
Joerg Roedelfc2100e2008-11-26 17:21:24 +01001939}
1940EXPORT_SYMBOL_GPL(iommu_domain_alloc);
1941
1942void iommu_domain_free(struct iommu_domain *domain)
1943{
Joerg Roedel89be34a2015-03-26 13:43:19 +01001944 domain->ops->domain_free(domain);
Joerg Roedelfc2100e2008-11-26 17:21:24 +01001945}
1946EXPORT_SYMBOL_GPL(iommu_domain_free);
1947
Joerg Roedel426a2732015-05-28 18:41:30 +02001948static int __iommu_attach_device(struct iommu_domain *domain,
1949 struct device *dev)
Joerg Roedelfc2100e2008-11-26 17:21:24 +01001950{
Shuah Khanb54db772013-08-15 11:59:26 -06001951 int ret;
Baoquan Hee01d1912017-08-09 16:33:40 +08001952 if ((domain->ops->is_attach_deferred != NULL) &&
1953 domain->ops->is_attach_deferred(domain, dev))
1954 return 0;
1955
Joerg Roedele5aa7f02011-09-06 16:44:29 +02001956 if (unlikely(domain->ops->attach_dev == NULL))
1957 return -ENODEV;
1958
Shuah Khanb54db772013-08-15 11:59:26 -06001959 ret = domain->ops->attach_dev(domain, dev);
1960 if (!ret)
1961 trace_attach_device_to_domain(dev);
1962 return ret;
Joerg Roedelfc2100e2008-11-26 17:21:24 +01001963}
Joerg Roedel426a2732015-05-28 18:41:30 +02001964
1965int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
1966{
1967 struct iommu_group *group;
1968 int ret;
1969
1970 group = iommu_group_get(dev);
Jordan Crouse9ae9df02017-12-20 09:48:36 -07001971 if (!group)
1972 return -ENODEV;
1973
Joerg Roedel426a2732015-05-28 18:41:30 +02001974 /*
Robin Murphy05f803002017-07-21 13:12:38 +01001975 * Lock the group to make sure the device-count doesn't
Joerg Roedel426a2732015-05-28 18:41:30 +02001976 * change while we are attaching
1977 */
1978 mutex_lock(&group->mutex);
1979 ret = -EINVAL;
1980 if (iommu_group_device_count(group) != 1)
1981 goto out_unlock;
1982
Joerg Roedele39cb8a2015-05-28 18:41:31 +02001983 ret = __iommu_attach_group(domain, group);
Joerg Roedel426a2732015-05-28 18:41:30 +02001984
1985out_unlock:
1986 mutex_unlock(&group->mutex);
1987 iommu_group_put(group);
1988
1989 return ret;
1990}
Joerg Roedelfc2100e2008-11-26 17:21:24 +01001991EXPORT_SYMBOL_GPL(iommu_attach_device);
1992
Yi L Liu4c7c1712019-10-02 12:42:40 -07001993int iommu_cache_invalidate(struct iommu_domain *domain, struct device *dev,
1994 struct iommu_cache_invalidate_info *inv_info)
1995{
1996 if (unlikely(!domain->ops->cache_invalidate))
1997 return -ENODEV;
1998
1999 return domain->ops->cache_invalidate(domain, dev, inv_info);
2000}
2001EXPORT_SYMBOL_GPL(iommu_cache_invalidate);
2002
Jacob Pan808be0a2019-10-02 12:42:43 -07002003int iommu_sva_bind_gpasid(struct iommu_domain *domain,
2004 struct device *dev, struct iommu_gpasid_bind_data *data)
2005{
2006 if (unlikely(!domain->ops->sva_bind_gpasid))
2007 return -ENODEV;
2008
2009 return domain->ops->sva_bind_gpasid(domain, dev, data);
2010}
2011EXPORT_SYMBOL_GPL(iommu_sva_bind_gpasid);
2012
2013int iommu_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev,
2014 ioasid_t pasid)
2015{
2016 if (unlikely(!domain->ops->sva_unbind_gpasid))
2017 return -ENODEV;
2018
2019 return domain->ops->sva_unbind_gpasid(dev, pasid);
2020}
2021EXPORT_SYMBOL_GPL(iommu_sva_unbind_gpasid);
2022
Joerg Roedel426a2732015-05-28 18:41:30 +02002023static void __iommu_detach_device(struct iommu_domain *domain,
2024 struct device *dev)
Joerg Roedelfc2100e2008-11-26 17:21:24 +01002025{
Baoquan Hee01d1912017-08-09 16:33:40 +08002026 if ((domain->ops->is_attach_deferred != NULL) &&
2027 domain->ops->is_attach_deferred(domain, dev))
2028 return;
2029
Joerg Roedele5aa7f02011-09-06 16:44:29 +02002030 if (unlikely(domain->ops->detach_dev == NULL))
2031 return;
2032
2033 domain->ops->detach_dev(domain, dev);
Shuah Khan69980632013-08-15 11:59:27 -06002034 trace_detach_device_from_domain(dev);
Joerg Roedelfc2100e2008-11-26 17:21:24 +01002035}
Joerg Roedel426a2732015-05-28 18:41:30 +02002036
2037void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
2038{
2039 struct iommu_group *group;
2040
2041 group = iommu_group_get(dev);
Jordan Crouse9ae9df02017-12-20 09:48:36 -07002042 if (!group)
2043 return;
Joerg Roedel426a2732015-05-28 18:41:30 +02002044
2045 mutex_lock(&group->mutex);
2046 if (iommu_group_device_count(group) != 1) {
2047 WARN_ON(1);
2048 goto out_unlock;
2049 }
2050
Joerg Roedele39cb8a2015-05-28 18:41:31 +02002051 __iommu_detach_group(domain, group);
Joerg Roedel426a2732015-05-28 18:41:30 +02002052
2053out_unlock:
2054 mutex_unlock(&group->mutex);
2055 iommu_group_put(group);
2056}
Joerg Roedelfc2100e2008-11-26 17:21:24 +01002057EXPORT_SYMBOL_GPL(iommu_detach_device);
2058
Joerg Roedel2c1296d2015-05-28 18:41:32 +02002059struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
2060{
2061 struct iommu_domain *domain;
2062 struct iommu_group *group;
2063
2064 group = iommu_group_get(dev);
Robin Murphy1464d0b2017-08-17 11:40:08 +01002065 if (!group)
Joerg Roedel2c1296d2015-05-28 18:41:32 +02002066 return NULL;
2067
2068 domain = group->domain;
2069
2070 iommu_group_put(group);
2071
2072 return domain;
2073}
2074EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev);
2075
Alex Williamsond72e31c2012-05-30 14:18:53 -06002076/*
Robin Murphy6af588f2018-09-12 16:24:12 +01002077 * For IOMMU_DOMAIN_DMA implementations which already provide their own
2078 * guarantees that the group and its default domain are valid and correct.
2079 */
2080struct iommu_domain *iommu_get_dma_domain(struct device *dev)
2081{
2082 return dev->iommu_group->default_domain;
2083}
2084
2085/*
Rami Rosen35449ad2018-09-18 17:38:49 +03002086 * IOMMU groups are really the natural working unit of the IOMMU, but
Alex Williamsond72e31c2012-05-30 14:18:53 -06002087 * the IOMMU API works on domains and devices. Bridge that gap by
2088 * iterating over the devices in a group. Ideally we'd have a single
2089 * device which represents the requestor ID of the group, but we also
2090 * allow IOMMU drivers to create policy defined minimum sets, where
2091 * the physical hardware may be able to distiguish members, but we
2092 * wish to group them at a higher level (ex. untrusted multi-function
2093 * PCI devices). Thus we attach each device.
2094 */
2095static int iommu_group_do_attach_device(struct device *dev, void *data)
2096{
2097 struct iommu_domain *domain = data;
2098
Joerg Roedel426a2732015-05-28 18:41:30 +02002099 return __iommu_attach_device(domain, dev);
Alex Williamsond72e31c2012-05-30 14:18:53 -06002100}
2101
Joerg Roedele39cb8a2015-05-28 18:41:31 +02002102static int __iommu_attach_group(struct iommu_domain *domain,
2103 struct iommu_group *group)
2104{
2105 int ret;
2106
2107 if (group->default_domain && group->domain != group->default_domain)
2108 return -EBUSY;
2109
2110 ret = __iommu_group_for_each_dev(group, domain,
2111 iommu_group_do_attach_device);
2112 if (ret == 0)
2113 group->domain = domain;
2114
2115 return ret;
Alex Williamsond72e31c2012-05-30 14:18:53 -06002116}
2117
2118int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group)
2119{
Joerg Roedele39cb8a2015-05-28 18:41:31 +02002120 int ret;
2121
2122 mutex_lock(&group->mutex);
2123 ret = __iommu_attach_group(domain, group);
2124 mutex_unlock(&group->mutex);
2125
2126 return ret;
Alex Williamsond72e31c2012-05-30 14:18:53 -06002127}
2128EXPORT_SYMBOL_GPL(iommu_attach_group);
2129
2130static int iommu_group_do_detach_device(struct device *dev, void *data)
2131{
2132 struct iommu_domain *domain = data;
2133
Joerg Roedel426a2732015-05-28 18:41:30 +02002134 __iommu_detach_device(domain, dev);
Alex Williamsond72e31c2012-05-30 14:18:53 -06002135
2136 return 0;
2137}
2138
Joerg Roedele39cb8a2015-05-28 18:41:31 +02002139static void __iommu_detach_group(struct iommu_domain *domain,
2140 struct iommu_group *group)
2141{
2142 int ret;
2143
2144 if (!group->default_domain) {
2145 __iommu_group_for_each_dev(group, domain,
2146 iommu_group_do_detach_device);
2147 group->domain = NULL;
2148 return;
2149 }
2150
2151 if (group->domain == group->default_domain)
2152 return;
2153
2154 /* Detach by re-attaching to the default domain */
2155 ret = __iommu_group_for_each_dev(group, group->default_domain,
2156 iommu_group_do_attach_device);
2157 if (ret != 0)
2158 WARN_ON(1);
2159 else
2160 group->domain = group->default_domain;
2161}
2162
Alex Williamsond72e31c2012-05-30 14:18:53 -06002163void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group)
2164{
Joerg Roedele39cb8a2015-05-28 18:41:31 +02002165 mutex_lock(&group->mutex);
2166 __iommu_detach_group(domain, group);
2167 mutex_unlock(&group->mutex);
Alex Williamsond72e31c2012-05-30 14:18:53 -06002168}
2169EXPORT_SYMBOL_GPL(iommu_detach_group);
2170
Varun Sethibb5547a2013-03-29 01:23:58 +05302171phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
Joerg Roedelfc2100e2008-11-26 17:21:24 +01002172{
Joerg Roedele5aa7f02011-09-06 16:44:29 +02002173 if (unlikely(domain->ops->iova_to_phys == NULL))
2174 return 0;
2175
2176 return domain->ops->iova_to_phys(domain, iova);
Joerg Roedelfc2100e2008-11-26 17:21:24 +01002177}
2178EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
Sheng Yangdbb9fd82009-03-18 15:33:06 +08002179
Alex Williamsonbd139692013-06-17 19:57:34 -06002180static size_t iommu_pgsize(struct iommu_domain *domain,
2181 unsigned long addr_merge, size_t size)
2182{
2183 unsigned int pgsize_idx;
2184 size_t pgsize;
2185
2186 /* Max page size that still fits into 'size' */
2187 pgsize_idx = __fls(size);
2188
2189 /* need to consider alignment requirements ? */
2190 if (likely(addr_merge)) {
2191 /* Max page size allowed by address */
2192 unsigned int align_pgsize_idx = __ffs(addr_merge);
2193 pgsize_idx = min(pgsize_idx, align_pgsize_idx);
2194 }
2195
2196 /* build a mask of acceptable page sizes */
2197 pgsize = (1UL << (pgsize_idx + 1)) - 1;
2198
2199 /* throw away page sizes not supported by the hardware */
Robin Murphyd16e0fa2016-04-07 18:42:06 +01002200 pgsize &= domain->pgsize_bitmap;
Alex Williamsonbd139692013-06-17 19:57:34 -06002201
2202 /* make sure we're still sane */
2203 BUG_ON(!pgsize);
2204
2205 /* pick the biggest page */
2206 pgsize_idx = __fls(pgsize);
2207 pgsize = 1UL << pgsize_idx;
2208
2209 return pgsize;
2210}
2211
Tom Murphy781ca2d2019-09-08 09:56:38 -07002212int __iommu_map(struct iommu_domain *domain, unsigned long iova,
2213 phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
Joerg Roedelcefc53c2010-01-08 13:35:09 +01002214{
Dmitry Osipenko1d7ae532018-12-12 23:38:47 +03002215 const struct iommu_ops *ops = domain->ops;
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02002216 unsigned long orig_iova = iova;
2217 unsigned int min_pagesz;
2218 size_t orig_size = size;
Yoshihiro Shimoda06bfcaa2016-02-10 10:18:04 +09002219 phys_addr_t orig_paddr = paddr;
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02002220 int ret = 0;
Joerg Roedelcefc53c2010-01-08 13:35:09 +01002221
Dmitry Osipenko1d7ae532018-12-12 23:38:47 +03002222 if (unlikely(ops->map == NULL ||
Robin Murphyd16e0fa2016-04-07 18:42:06 +01002223 domain->pgsize_bitmap == 0UL))
Joerg Roedele5aa7f02011-09-06 16:44:29 +02002224 return -ENODEV;
Joerg Roedelcefc53c2010-01-08 13:35:09 +01002225
Joerg Roedela10315e2015-03-26 13:43:06 +01002226 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
2227 return -EINVAL;
2228
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02002229 /* find out the minimum page size supported */
Robin Murphyd16e0fa2016-04-07 18:42:06 +01002230 min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
Joerg Roedelcefc53c2010-01-08 13:35:09 +01002231
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02002232 /*
2233 * both the virtual address and the physical one, as well as
2234 * the size of the mapping, must be aligned (at least) to the
2235 * size of the smallest page supported by the hardware
2236 */
2237 if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
Fabio Estevamabedb042013-08-22 10:25:42 -03002238 pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
Joe Perches6197ca82013-06-23 12:29:04 -07002239 iova, &paddr, size, min_pagesz);
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02002240 return -EINVAL;
2241 }
Joerg Roedelcefc53c2010-01-08 13:35:09 +01002242
Fabio Estevamabedb042013-08-22 10:25:42 -03002243 pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size);
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02002244
2245 while (size) {
Alex Williamsonbd139692013-06-17 19:57:34 -06002246 size_t pgsize = iommu_pgsize(domain, iova | paddr, size);
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02002247
Fabio Estevamabedb042013-08-22 10:25:42 -03002248 pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
Joe Perches6197ca82013-06-23 12:29:04 -07002249 iova, &paddr, pgsize);
Tom Murphy781ca2d2019-09-08 09:56:38 -07002250 ret = ops->map(domain, iova, paddr, pgsize, prot, gfp);
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02002251
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02002252 if (ret)
2253 break;
2254
2255 iova += pgsize;
2256 paddr += pgsize;
2257 size -= pgsize;
2258 }
2259
Dmitry Osipenko1d7ae532018-12-12 23:38:47 +03002260 if (ops->iotlb_sync_map)
2261 ops->iotlb_sync_map(domain);
2262
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02002263 /* unroll mapping in case something went wrong */
2264 if (ret)
2265 iommu_unmap(domain, orig_iova, orig_size - size);
Shuah Khane0be7c82013-08-15 11:59:28 -06002266 else
Yoshihiro Shimoda06bfcaa2016-02-10 10:18:04 +09002267 trace_map(orig_iova, orig_paddr, orig_size);
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02002268
2269 return ret;
Joerg Roedelcefc53c2010-01-08 13:35:09 +01002270}
Tom Murphy781ca2d2019-09-08 09:56:38 -07002271
2272int iommu_map(struct iommu_domain *domain, unsigned long iova,
2273 phys_addr_t paddr, size_t size, int prot)
2274{
2275 might_sleep();
2276 return __iommu_map(domain, iova, paddr, size, prot, GFP_KERNEL);
2277}
Joerg Roedelcefc53c2010-01-08 13:35:09 +01002278EXPORT_SYMBOL_GPL(iommu_map);
2279
Tom Murphy781ca2d2019-09-08 09:56:38 -07002280int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova,
2281 phys_addr_t paddr, size_t size, int prot)
2282{
2283 return __iommu_map(domain, iova, paddr, size, prot, GFP_ATOMIC);
2284}
2285EXPORT_SYMBOL_GPL(iommu_map_atomic);
2286
Joerg Roedeladd02cfd2017-08-23 15:50:04 +02002287static size_t __iommu_unmap(struct iommu_domain *domain,
2288 unsigned long iova, size_t size,
Will Deacona7d20dc2019-07-02 16:43:48 +01002289 struct iommu_iotlb_gather *iotlb_gather)
Joerg Roedelcefc53c2010-01-08 13:35:09 +01002290{
Joerg Roedeladd02cfd2017-08-23 15:50:04 +02002291 const struct iommu_ops *ops = domain->ops;
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02002292 size_t unmapped_page, unmapped = 0;
Shuah Khan6fd492f2015-01-16 16:47:19 -07002293 unsigned long orig_iova = iova;
Joerg Roedeladd02cfd2017-08-23 15:50:04 +02002294 unsigned int min_pagesz;
Joerg Roedelcefc53c2010-01-08 13:35:09 +01002295
Joerg Roedeladd02cfd2017-08-23 15:50:04 +02002296 if (unlikely(ops->unmap == NULL ||
Robin Murphyd16e0fa2016-04-07 18:42:06 +01002297 domain->pgsize_bitmap == 0UL))
Suravee Suthikulpanitc5611a82018-02-05 05:45:53 -05002298 return 0;
Joerg Roedelcefc53c2010-01-08 13:35:09 +01002299
Joerg Roedela10315e2015-03-26 13:43:06 +01002300 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
Suravee Suthikulpanitc5611a82018-02-05 05:45:53 -05002301 return 0;
Joerg Roedela10315e2015-03-26 13:43:06 +01002302
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02002303 /* find out the minimum page size supported */
Robin Murphyd16e0fa2016-04-07 18:42:06 +01002304 min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
Joerg Roedelcefc53c2010-01-08 13:35:09 +01002305
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02002306 /*
2307 * The virtual address, as well as the size of the mapping, must be
2308 * aligned (at least) to the size of the smallest page supported
2309 * by the hardware
2310 */
2311 if (!IS_ALIGNED(iova | size, min_pagesz)) {
Joe Perches6197ca82013-06-23 12:29:04 -07002312 pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
2313 iova, size, min_pagesz);
Suravee Suthikulpanitc5611a82018-02-05 05:45:53 -05002314 return 0;
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02002315 }
Joerg Roedelcefc53c2010-01-08 13:35:09 +01002316
Joe Perches6197ca82013-06-23 12:29:04 -07002317 pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size);
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02002318
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02002319 /*
2320 * Keep iterating until we either unmap 'size' bytes (or more)
2321 * or we hit an area that isn't mapped.
2322 */
2323 while (unmapped < size) {
Alex Williamsonbd139692013-06-17 19:57:34 -06002324 size_t pgsize = iommu_pgsize(domain, iova, size - unmapped);
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02002325
Will Deacon56f8af52019-07-02 16:44:06 +01002326 unmapped_page = ops->unmap(domain, iova, pgsize, iotlb_gather);
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02002327 if (!unmapped_page)
2328 break;
2329
Joe Perches6197ca82013-06-23 12:29:04 -07002330 pr_debug("unmapped: iova 0x%lx size 0x%zx\n",
2331 iova, unmapped_page);
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02002332
2333 iova += unmapped_page;
2334 unmapped += unmapped_page;
2335 }
2336
Shuah Khandb8614d2015-01-16 20:53:17 -07002337 trace_unmap(orig_iova, size, unmapped);
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02002338 return unmapped;
Joerg Roedelcefc53c2010-01-08 13:35:09 +01002339}
Joerg Roedeladd02cfd2017-08-23 15:50:04 +02002340
2341size_t iommu_unmap(struct iommu_domain *domain,
2342 unsigned long iova, size_t size)
2343{
Will Deacona7d20dc2019-07-02 16:43:48 +01002344 struct iommu_iotlb_gather iotlb_gather;
2345 size_t ret;
2346
2347 iommu_iotlb_gather_init(&iotlb_gather);
2348 ret = __iommu_unmap(domain, iova, size, &iotlb_gather);
2349 iommu_tlb_sync(domain, &iotlb_gather);
2350
2351 return ret;
Joerg Roedeladd02cfd2017-08-23 15:50:04 +02002352}
Joerg Roedelcefc53c2010-01-08 13:35:09 +01002353EXPORT_SYMBOL_GPL(iommu_unmap);
Alex Williamson14604322011-10-21 15:56:05 -04002354
Joerg Roedeladd02cfd2017-08-23 15:50:04 +02002355size_t iommu_unmap_fast(struct iommu_domain *domain,
Will Deacona7d20dc2019-07-02 16:43:48 +01002356 unsigned long iova, size_t size,
2357 struct iommu_iotlb_gather *iotlb_gather)
Joerg Roedeladd02cfd2017-08-23 15:50:04 +02002358{
Will Deacona7d20dc2019-07-02 16:43:48 +01002359 return __iommu_unmap(domain, iova, size, iotlb_gather);
Joerg Roedeladd02cfd2017-08-23 15:50:04 +02002360}
2361EXPORT_SYMBOL_GPL(iommu_unmap_fast);
2362
Tom Murphy781ca2d2019-09-08 09:56:38 -07002363size_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
2364 struct scatterlist *sg, unsigned int nents, int prot,
2365 gfp_t gfp)
Olav Haugan315786e2014-10-25 09:55:16 -07002366{
Robin Murphy5d95f402018-10-11 16:56:42 +01002367 size_t len = 0, mapped = 0;
2368 phys_addr_t start;
2369 unsigned int i = 0;
Joerg Roedel38ec0102014-11-04 14:53:51 +01002370 int ret;
Olav Haugan315786e2014-10-25 09:55:16 -07002371
Robin Murphy5d95f402018-10-11 16:56:42 +01002372 while (i <= nents) {
2373 phys_addr_t s_phys = sg_phys(sg);
Olav Haugan315786e2014-10-25 09:55:16 -07002374
Robin Murphy5d95f402018-10-11 16:56:42 +01002375 if (len && s_phys != start + len) {
Tom Murphy781ca2d2019-09-08 09:56:38 -07002376 ret = __iommu_map(domain, iova + mapped, start,
2377 len, prot, gfp);
2378
Robin Murphy5d95f402018-10-11 16:56:42 +01002379 if (ret)
2380 goto out_err;
Robin Murphy18f23402014-11-25 17:50:55 +00002381
Robin Murphy5d95f402018-10-11 16:56:42 +01002382 mapped += len;
2383 len = 0;
2384 }
Robin Murphy18f23402014-11-25 17:50:55 +00002385
Robin Murphy5d95f402018-10-11 16:56:42 +01002386 if (len) {
2387 len += sg->length;
2388 } else {
2389 len = sg->length;
2390 start = s_phys;
2391 }
Joerg Roedel38ec0102014-11-04 14:53:51 +01002392
Robin Murphy5d95f402018-10-11 16:56:42 +01002393 if (++i < nents)
2394 sg = sg_next(sg);
Olav Haugan315786e2014-10-25 09:55:16 -07002395 }
2396
2397 return mapped;
Joerg Roedel38ec0102014-11-04 14:53:51 +01002398
2399out_err:
2400 /* undo mappings already done */
2401 iommu_unmap(domain, iova, mapped);
2402
2403 return 0;
2404
Olav Haugan315786e2014-10-25 09:55:16 -07002405}
Tom Murphy781ca2d2019-09-08 09:56:38 -07002406
2407size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
2408 struct scatterlist *sg, unsigned int nents, int prot)
2409{
2410 might_sleep();
2411 return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_KERNEL);
2412}
Christoph Hellwigd88e61f2018-07-30 09:36:26 +02002413EXPORT_SYMBOL_GPL(iommu_map_sg);
Joerg Roedeld7787d52013-01-29 14:26:20 +01002414
Tom Murphy781ca2d2019-09-08 09:56:38 -07002415size_t iommu_map_sg_atomic(struct iommu_domain *domain, unsigned long iova,
2416 struct scatterlist *sg, unsigned int nents, int prot)
2417{
2418 return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_ATOMIC);
2419}
2420EXPORT_SYMBOL_GPL(iommu_map_sg_atomic);
2421
Joerg Roedeld7787d52013-01-29 14:26:20 +01002422int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
Varun Sethi80f97f02013-03-29 01:24:00 +05302423 phys_addr_t paddr, u64 size, int prot)
Joerg Roedeld7787d52013-01-29 14:26:20 +01002424{
2425 if (unlikely(domain->ops->domain_window_enable == NULL))
2426 return -ENODEV;
2427
Varun Sethi80f97f02013-03-29 01:24:00 +05302428 return domain->ops->domain_window_enable(domain, wnd_nr, paddr, size,
2429 prot);
Joerg Roedeld7787d52013-01-29 14:26:20 +01002430}
2431EXPORT_SYMBOL_GPL(iommu_domain_window_enable);
2432
2433void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr)
2434{
2435 if (unlikely(domain->ops->domain_window_disable == NULL))
2436 return;
2437
2438 return domain->ops->domain_window_disable(domain, wnd_nr);
2439}
2440EXPORT_SYMBOL_GPL(iommu_domain_window_disable);
2441
Joerg Roedel207c6e32017-04-26 15:39:28 +02002442/**
2443 * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework
2444 * @domain: the iommu domain where the fault has happened
2445 * @dev: the device where the fault has happened
2446 * @iova: the faulting address
2447 * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...)
2448 *
2449 * This function should be called by the low-level IOMMU implementations
2450 * whenever IOMMU faults happen, to allow high-level users, that are
2451 * interested in such events, to know about them.
2452 *
2453 * This event may be useful for several possible use cases:
2454 * - mere logging of the event
2455 * - dynamic TLB/PTE loading
2456 * - if restarting of the faulting device is required
2457 *
2458 * Returns 0 on success and an appropriate error code otherwise (if dynamic
2459 * PTE/TLB loading will one day be supported, implementations will be able
2460 * to tell whether it succeeded or not according to this return value).
2461 *
2462 * Specifically, -ENOSYS is returned if a fault handler isn't installed
2463 * (though fault handlers can also return -ENOSYS, in case they want to
2464 * elicit the default behavior of the IOMMU drivers).
2465 */
2466int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
2467 unsigned long iova, int flags)
2468{
2469 int ret = -ENOSYS;
2470
2471 /*
2472 * if upper layers showed interest and installed a fault handler,
2473 * invoke it.
2474 */
2475 if (domain->handler)
2476 ret = domain->handler(domain, dev, iova, flags,
2477 domain->handler_token);
2478
2479 trace_io_page_fault(dev, iova, flags);
2480 return ret;
2481}
2482EXPORT_SYMBOL_GPL(report_iommu_fault);
2483
Alex Williamsond72e31c2012-05-30 14:18:53 -06002484static int __init iommu_init(void)
Alex Williamson14604322011-10-21 15:56:05 -04002485{
Alex Williamsond72e31c2012-05-30 14:18:53 -06002486 iommu_group_kset = kset_create_and_add("iommu_groups",
2487 NULL, kernel_kobj);
Alex Williamsond72e31c2012-05-30 14:18:53 -06002488 BUG_ON(!iommu_group_kset);
2489
Gary R Hookbad614b2018-06-12 16:41:21 -05002490 iommu_debugfs_setup();
2491
Alex Williamsond72e31c2012-05-30 14:18:53 -06002492 return 0;
Alex Williamson14604322011-10-21 15:56:05 -04002493}
Marek Szyprowskid7ef9992015-05-19 15:20:23 +02002494core_initcall(iommu_init);
Joerg Roedel0cd76dd2012-01-26 19:40:52 +01002495
2496int iommu_domain_get_attr(struct iommu_domain *domain,
2497 enum iommu_attr attr, void *data)
2498{
Joerg Roedel0ff64f82012-01-26 19:40:53 +01002499 struct iommu_domain_geometry *geometry;
Joerg Roedeld2e12162013-01-29 13:49:04 +01002500 bool *paging;
Joerg Roedel0ff64f82012-01-26 19:40:53 +01002501 int ret = 0;
Joerg Roedel0cd76dd2012-01-26 19:40:52 +01002502
Joerg Roedel0ff64f82012-01-26 19:40:53 +01002503 switch (attr) {
2504 case DOMAIN_ATTR_GEOMETRY:
2505 geometry = data;
2506 *geometry = domain->geometry;
2507
2508 break;
Joerg Roedeld2e12162013-01-29 13:49:04 +01002509 case DOMAIN_ATTR_PAGING:
2510 paging = data;
Robin Murphyd16e0fa2016-04-07 18:42:06 +01002511 *paging = (domain->pgsize_bitmap != 0UL);
Joerg Roedeld2e12162013-01-29 13:49:04 +01002512 break;
Joerg Roedel0ff64f82012-01-26 19:40:53 +01002513 default:
2514 if (!domain->ops->domain_get_attr)
2515 return -EINVAL;
2516
2517 ret = domain->ops->domain_get_attr(domain, attr, data);
2518 }
2519
2520 return ret;
Joerg Roedel0cd76dd2012-01-26 19:40:52 +01002521}
2522EXPORT_SYMBOL_GPL(iommu_domain_get_attr);
2523
2524int iommu_domain_set_attr(struct iommu_domain *domain,
2525 enum iommu_attr attr, void *data)
2526{
Joerg Roedel69356712013-02-04 14:00:01 +01002527 int ret = 0;
Joerg Roedel0cd76dd2012-01-26 19:40:52 +01002528
Joerg Roedel69356712013-02-04 14:00:01 +01002529 switch (attr) {
Joerg Roedel69356712013-02-04 14:00:01 +01002530 default:
2531 if (domain->ops->domain_set_attr == NULL)
2532 return -EINVAL;
2533
2534 ret = domain->ops->domain_set_attr(domain, attr, data);
2535 }
2536
2537 return ret;
Joerg Roedel0cd76dd2012-01-26 19:40:52 +01002538}
2539EXPORT_SYMBOL_GPL(iommu_domain_set_attr);
Joerg Roedela1015c22015-05-28 18:41:33 +02002540
Eric Augere5b52342017-01-19 20:57:47 +00002541void iommu_get_resv_regions(struct device *dev, struct list_head *list)
Joerg Roedela1015c22015-05-28 18:41:33 +02002542{
2543 const struct iommu_ops *ops = dev->bus->iommu_ops;
2544
Eric Augere5b52342017-01-19 20:57:47 +00002545 if (ops && ops->get_resv_regions)
2546 ops->get_resv_regions(dev, list);
Joerg Roedela1015c22015-05-28 18:41:33 +02002547}
2548
Eric Augere5b52342017-01-19 20:57:47 +00002549void iommu_put_resv_regions(struct device *dev, struct list_head *list)
Joerg Roedela1015c22015-05-28 18:41:33 +02002550{
2551 const struct iommu_ops *ops = dev->bus->iommu_ops;
2552
Eric Augere5b52342017-01-19 20:57:47 +00002553 if (ops && ops->put_resv_regions)
2554 ops->put_resv_regions(dev, list);
Joerg Roedela1015c22015-05-28 18:41:33 +02002555}
Joerg Roedeld290f1e2015-05-28 18:41:36 +02002556
Thierry Redingf9f69712019-12-18 14:42:01 +01002557/**
2558 * generic_iommu_put_resv_regions - Reserved region driver helper
2559 * @dev: device for which to free reserved regions
2560 * @list: reserved region list for device
2561 *
2562 * IOMMU drivers can use this to implement their .put_resv_regions() callback
2563 * for simple reservations. Memory allocated for each reserved region will be
2564 * freed. If an IOMMU driver allocates additional resources per region, it is
2565 * going to have to implement a custom callback.
2566 */
2567void generic_iommu_put_resv_regions(struct device *dev, struct list_head *list)
2568{
2569 struct iommu_resv_region *entry, *next;
2570
2571 list_for_each_entry_safe(entry, next, list, list)
2572 kfree(entry);
2573}
2574EXPORT_SYMBOL(generic_iommu_put_resv_regions);
2575
Eric Auger2b20cbb2017-01-19 20:57:49 +00002576struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
Robin Murphy9d3a4de2017-03-16 17:00:16 +00002577 size_t length, int prot,
2578 enum iommu_resv_type type)
Eric Auger2b20cbb2017-01-19 20:57:49 +00002579{
2580 struct iommu_resv_region *region;
2581
2582 region = kzalloc(sizeof(*region), GFP_KERNEL);
2583 if (!region)
2584 return NULL;
2585
2586 INIT_LIST_HEAD(&region->list);
2587 region->start = start;
2588 region->length = length;
2589 region->prot = prot;
2590 region->type = type;
2591 return region;
Joerg Roedelfc2100e2008-11-26 17:21:24 +01002592}
Will Deacona7ba5c32019-12-19 12:03:37 +00002593EXPORT_SYMBOL_GPL(iommu_alloc_resv_region);
Joerg Roedeld290f1e2015-05-28 18:41:36 +02002594
Lu Baolu7423e012019-05-25 13:41:22 +08002595static int
2596request_default_domain_for_dev(struct device *dev, unsigned long type)
Joerg Roedeld290f1e2015-05-28 18:41:36 +02002597{
Lu Baolu7423e012019-05-25 13:41:22 +08002598 struct iommu_domain *domain;
Joerg Roedeld290f1e2015-05-28 18:41:36 +02002599 struct iommu_group *group;
2600 int ret;
2601
2602 /* Device must already be in a group before calling this function */
Lu Baolu57274ea2019-05-21 15:27:35 +08002603 group = iommu_group_get(dev);
2604 if (!group)
2605 return -EINVAL;
Joerg Roedeld290f1e2015-05-28 18:41:36 +02002606
2607 mutex_lock(&group->mutex);
2608
Joerg Roedeld290f1e2015-05-28 18:41:36 +02002609 ret = 0;
Lu Baolu7423e012019-05-25 13:41:22 +08002610 if (group->default_domain && group->default_domain->type == type)
Joerg Roedeld290f1e2015-05-28 18:41:36 +02002611 goto out;
2612
2613 /* Don't change mappings of existing devices */
2614 ret = -EBUSY;
2615 if (iommu_group_device_count(group) != 1)
2616 goto out;
2617
Joerg Roedeld290f1e2015-05-28 18:41:36 +02002618 ret = -ENOMEM;
Lu Baolu7423e012019-05-25 13:41:22 +08002619 domain = __iommu_domain_alloc(dev->bus, type);
2620 if (!domain)
Joerg Roedeld290f1e2015-05-28 18:41:36 +02002621 goto out;
2622
2623 /* Attach the device to the domain */
Lu Baolu7423e012019-05-25 13:41:22 +08002624 ret = __iommu_attach_group(domain, group);
Joerg Roedeld290f1e2015-05-28 18:41:36 +02002625 if (ret) {
Lu Baolu7423e012019-05-25 13:41:22 +08002626 iommu_domain_free(domain);
Joerg Roedeld290f1e2015-05-28 18:41:36 +02002627 goto out;
2628 }
2629
Tom Murphyd127bc92019-08-26 05:48:21 +01002630 /* Make the domain the default for this group */
Joerg Roedeld290f1e2015-05-28 18:41:36 +02002631 if (group->default_domain)
2632 iommu_domain_free(group->default_domain);
Lu Baolu7423e012019-05-25 13:41:22 +08002633 group->default_domain = domain;
Joerg Roedeld290f1e2015-05-28 18:41:36 +02002634
Jerry Snitselaard3602112019-12-10 11:56:06 -07002635 iommu_group_create_direct_mappings(group, dev);
2636
Lu Baolu7423e012019-05-25 13:41:22 +08002637 dev_info(dev, "Using iommu %s mapping\n",
2638 type == IOMMU_DOMAIN_DMA ? "dma" : "direct");
Joerg Roedeld290f1e2015-05-28 18:41:36 +02002639
2640 ret = 0;
2641out:
2642 mutex_unlock(&group->mutex);
2643 iommu_group_put(group);
2644
2645 return ret;
2646}
Robin Murphy57f98d22016-09-13 10:54:14 +01002647
Lu Baolu7423e012019-05-25 13:41:22 +08002648/* Request that a device is direct mapped by the IOMMU */
2649int iommu_request_dm_for_dev(struct device *dev)
2650{
2651 return request_default_domain_for_dev(dev, IOMMU_DOMAIN_IDENTITY);
2652}
2653
2654/* Request that a device can't be direct mapped by the IOMMU */
2655int iommu_request_dma_domain_for_dev(struct device *dev)
2656{
2657 return request_default_domain_for_dev(dev, IOMMU_DOMAIN_DMA);
2658}
2659
Joerg Roedel8a699612019-08-19 15:22:47 +02002660void iommu_set_default_passthrough(bool cmd_line)
2661{
2662 if (cmd_line)
2663 iommu_set_cmd_line_dma_api();
2664
2665 iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY;
2666}
2667
2668void iommu_set_default_translated(bool cmd_line)
2669{
2670 if (cmd_line)
2671 iommu_set_cmd_line_dma_api();
2672
2673 iommu_def_domain_type = IOMMU_DOMAIN_DMA;
2674}
2675
2676bool iommu_default_passthrough(void)
2677{
2678 return iommu_def_domain_type == IOMMU_DOMAIN_IDENTITY;
2679}
2680EXPORT_SYMBOL_GPL(iommu_default_passthrough);
2681
Joerg Roedel534766d2017-01-31 16:58:42 +01002682const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
Lorenzo Pieralisie4f10ff2016-11-21 10:01:36 +00002683{
Lorenzo Pieralisie4f10ff2016-11-21 10:01:36 +00002684 const struct iommu_ops *ops = NULL;
Joerg Roedeld0f6f582017-02-02 12:19:12 +01002685 struct iommu_device *iommu;
Lorenzo Pieralisie4f10ff2016-11-21 10:01:36 +00002686
Joerg Roedeld0f6f582017-02-02 12:19:12 +01002687 spin_lock(&iommu_device_lock);
2688 list_for_each_entry(iommu, &iommu_device_list, list)
2689 if (iommu->fwnode == fwnode) {
2690 ops = iommu->ops;
Lorenzo Pieralisie4f10ff2016-11-21 10:01:36 +00002691 break;
2692 }
Joerg Roedeld0f6f582017-02-02 12:19:12 +01002693 spin_unlock(&iommu_device_lock);
Lorenzo Pieralisie4f10ff2016-11-21 10:01:36 +00002694 return ops;
2695}
2696
Robin Murphy57f98d22016-09-13 10:54:14 +01002697int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
2698 const struct iommu_ops *ops)
2699{
Joerg Roedelb4ef7252018-11-28 13:35:24 +01002700 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Robin Murphy57f98d22016-09-13 10:54:14 +01002701
2702 if (fwspec)
2703 return ops == fwspec->ops ? 0 : -EINVAL;
2704
Joerg Roedel72acd9d2020-03-26 16:08:31 +01002705 if (!dev_iommu_get(dev))
2706 return -ENOMEM;
2707
Robin Murphy098accf2020-02-13 14:00:21 +00002708 /* Preallocate for the overwhelmingly common case of 1 ID */
2709 fwspec = kzalloc(struct_size(fwspec, ids, 1), GFP_KERNEL);
Robin Murphy57f98d22016-09-13 10:54:14 +01002710 if (!fwspec)
2711 return -ENOMEM;
2712
2713 of_node_get(to_of_node(iommu_fwnode));
2714 fwspec->iommu_fwnode = iommu_fwnode;
2715 fwspec->ops = ops;
Joerg Roedelb4ef7252018-11-28 13:35:24 +01002716 dev_iommu_fwspec_set(dev, fwspec);
Robin Murphy57f98d22016-09-13 10:54:14 +01002717 return 0;
2718}
2719EXPORT_SYMBOL_GPL(iommu_fwspec_init);
2720
2721void iommu_fwspec_free(struct device *dev)
2722{
Joerg Roedelb4ef7252018-11-28 13:35:24 +01002723 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Robin Murphy57f98d22016-09-13 10:54:14 +01002724
2725 if (fwspec) {
2726 fwnode_handle_put(fwspec->iommu_fwnode);
2727 kfree(fwspec);
Joerg Roedelb4ef7252018-11-28 13:35:24 +01002728 dev_iommu_fwspec_set(dev, NULL);
Robin Murphy57f98d22016-09-13 10:54:14 +01002729 }
2730}
2731EXPORT_SYMBOL_GPL(iommu_fwspec_free);
2732
2733int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids)
2734{
Joerg Roedelb4ef7252018-11-28 13:35:24 +01002735 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Robin Murphy098accf2020-02-13 14:00:21 +00002736 int i, new_num;
Robin Murphy57f98d22016-09-13 10:54:14 +01002737
2738 if (!fwspec)
2739 return -EINVAL;
2740
Robin Murphy098accf2020-02-13 14:00:21 +00002741 new_num = fwspec->num_ids + num_ids;
2742 if (new_num > 1) {
2743 fwspec = krealloc(fwspec, struct_size(fwspec, ids, new_num),
2744 GFP_KERNEL);
Robin Murphy57f98d22016-09-13 10:54:14 +01002745 if (!fwspec)
2746 return -ENOMEM;
Zhen Lei909111b2017-02-03 17:35:02 +08002747
Joerg Roedelb4ef7252018-11-28 13:35:24 +01002748 dev_iommu_fwspec_set(dev, fwspec);
Robin Murphy57f98d22016-09-13 10:54:14 +01002749 }
2750
2751 for (i = 0; i < num_ids; i++)
2752 fwspec->ids[fwspec->num_ids + i] = ids[i];
2753
Robin Murphy098accf2020-02-13 14:00:21 +00002754 fwspec->num_ids = new_num;
Robin Murphy57f98d22016-09-13 10:54:14 +01002755 return 0;
2756}
2757EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids);
Lu Baolua3a19592019-03-25 09:30:28 +08002758
2759/*
2760 * Per device IOMMU features.
2761 */
2762bool iommu_dev_has_feature(struct device *dev, enum iommu_dev_features feat)
2763{
2764 const struct iommu_ops *ops = dev->bus->iommu_ops;
2765
2766 if (ops && ops->dev_has_feat)
2767 return ops->dev_has_feat(dev, feat);
2768
2769 return false;
2770}
2771EXPORT_SYMBOL_GPL(iommu_dev_has_feature);
2772
2773int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
2774{
2775 const struct iommu_ops *ops = dev->bus->iommu_ops;
2776
2777 if (ops && ops->dev_enable_feat)
2778 return ops->dev_enable_feat(dev, feat);
2779
2780 return -ENODEV;
2781}
2782EXPORT_SYMBOL_GPL(iommu_dev_enable_feature);
2783
2784/*
2785 * The device drivers should do the necessary cleanups before calling this.
2786 * For example, before disabling the aux-domain feature, the device driver
2787 * should detach all aux-domains. Otherwise, this will return -EBUSY.
2788 */
2789int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
2790{
2791 const struct iommu_ops *ops = dev->bus->iommu_ops;
2792
2793 if (ops && ops->dev_disable_feat)
2794 return ops->dev_disable_feat(dev, feat);
2795
2796 return -EBUSY;
2797}
2798EXPORT_SYMBOL_GPL(iommu_dev_disable_feature);
2799
2800bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat)
2801{
2802 const struct iommu_ops *ops = dev->bus->iommu_ops;
2803
2804 if (ops && ops->dev_feat_enabled)
2805 return ops->dev_feat_enabled(dev, feat);
2806
2807 return false;
2808}
2809EXPORT_SYMBOL_GPL(iommu_dev_feature_enabled);
2810
2811/*
2812 * Aux-domain specific attach/detach.
2813 *
2814 * Only works if iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX) returns
2815 * true. Also, as long as domains are attached to a device through this
2816 * interface, any tries to call iommu_attach_device() should fail
2817 * (iommu_detach_device() can't fail, so we fail when trying to re-attach).
2818 * This should make us safe against a device being attached to a guest as a
2819 * whole while there are still pasid users on it (aux and sva).
2820 */
2821int iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev)
2822{
2823 int ret = -ENODEV;
2824
2825 if (domain->ops->aux_attach_dev)
2826 ret = domain->ops->aux_attach_dev(domain, dev);
2827
2828 if (!ret)
2829 trace_attach_device_to_domain(dev);
2830
2831 return ret;
2832}
2833EXPORT_SYMBOL_GPL(iommu_aux_attach_device);
2834
2835void iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev)
2836{
2837 if (domain->ops->aux_detach_dev) {
2838 domain->ops->aux_detach_dev(domain, dev);
2839 trace_detach_device_from_domain(dev);
2840 }
2841}
2842EXPORT_SYMBOL_GPL(iommu_aux_detach_device);
2843
2844int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
2845{
2846 int ret = -ENODEV;
2847
2848 if (domain->ops->aux_get_pasid)
2849 ret = domain->ops->aux_get_pasid(domain, dev);
2850
2851 return ret;
2852}
2853EXPORT_SYMBOL_GPL(iommu_aux_get_pasid);
Jean-Philippe Brucker26b25a22019-04-10 16:15:16 +01002854
2855/**
2856 * iommu_sva_bind_device() - Bind a process address space to a device
2857 * @dev: the device
2858 * @mm: the mm to bind, caller must hold a reference to it
2859 *
2860 * Create a bond between device and address space, allowing the device to access
2861 * the mm using the returned PASID. If a bond already exists between @device and
2862 * @mm, it is returned and an additional reference is taken. Caller must call
2863 * iommu_sva_unbind_device() to release each reference.
2864 *
2865 * iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA) must be called first, to
2866 * initialize the required SVA features.
2867 *
2868 * On error, returns an ERR_PTR value.
2869 */
2870struct iommu_sva *
2871iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata)
2872{
2873 struct iommu_group *group;
2874 struct iommu_sva *handle = ERR_PTR(-EINVAL);
2875 const struct iommu_ops *ops = dev->bus->iommu_ops;
2876
2877 if (!ops || !ops->sva_bind)
2878 return ERR_PTR(-ENODEV);
2879
2880 group = iommu_group_get(dev);
2881 if (!group)
2882 return ERR_PTR(-ENODEV);
2883
2884 /* Ensure device count and domain don't change while we're binding */
2885 mutex_lock(&group->mutex);
2886
2887 /*
2888 * To keep things simple, SVA currently doesn't support IOMMU groups
2889 * with more than one device. Existing SVA-capable systems are not
2890 * affected by the problems that required IOMMU groups (lack of ACS
2891 * isolation, device ID aliasing and other hardware issues).
2892 */
2893 if (iommu_group_device_count(group) != 1)
2894 goto out_unlock;
2895
2896 handle = ops->sva_bind(dev, mm, drvdata);
2897
2898out_unlock:
2899 mutex_unlock(&group->mutex);
2900 iommu_group_put(group);
2901
2902 return handle;
2903}
2904EXPORT_SYMBOL_GPL(iommu_sva_bind_device);
2905
2906/**
2907 * iommu_sva_unbind_device() - Remove a bond created with iommu_sva_bind_device
2908 * @handle: the handle returned by iommu_sva_bind_device()
2909 *
2910 * Put reference to a bond between device and address space. The device should
2911 * not be issuing any more transaction for this PASID. All outstanding page
2912 * requests for this PASID must have been flushed to the IOMMU.
2913 *
2914 * Returns 0 on success, or an error value
2915 */
2916void iommu_sva_unbind_device(struct iommu_sva *handle)
2917{
2918 struct iommu_group *group;
2919 struct device *dev = handle->dev;
2920 const struct iommu_ops *ops = dev->bus->iommu_ops;
2921
2922 if (!ops || !ops->sva_unbind)
2923 return;
2924
2925 group = iommu_group_get(dev);
2926 if (!group)
2927 return;
2928
2929 mutex_lock(&group->mutex);
2930 ops->sva_unbind(handle);
2931 mutex_unlock(&group->mutex);
2932
2933 iommu_group_put(group);
2934}
2935EXPORT_SYMBOL_GPL(iommu_sva_unbind_device);
2936
2937int iommu_sva_set_ops(struct iommu_sva *handle,
2938 const struct iommu_sva_ops *sva_ops)
2939{
2940 if (handle->ops && handle->ops != sva_ops)
2941 return -EEXIST;
2942
2943 handle->ops = sva_ops;
2944 return 0;
2945}
2946EXPORT_SYMBOL_GPL(iommu_sva_set_ops);
2947
2948int iommu_sva_get_pasid(struct iommu_sva *handle)
2949{
2950 const struct iommu_ops *ops = handle->dev->bus->iommu_ops;
2951
2952 if (!ops || !ops->sva_get_pasid)
2953 return IOMMU_PASID_INVALID;
2954
2955 return ops->sva_get_pasid(handle);
2956}
2957EXPORT_SYMBOL_GPL(iommu_sva_get_pasid);