Thomas Gleixner | 4505153 | 2019-05-29 16:57:47 -0700 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
Joerg Roedel | 4a77a6c | 2008-11-26 17:02:33 +0100 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. |
| 4 | * Author: Joerg Roedel <joerg.roedel@amd.com> |
Joerg Roedel | 4a77a6c | 2008-11-26 17:02:33 +0100 | [diff] [blame] | 5 | */ |
| 6 | |
| 7 | #ifndef __LINUX_IOMMU_H |
| 8 | #define __LINUX_IOMMU_H |
| 9 | |
Joerg Roedel | e8245c1 | 2017-04-26 15:34:06 +0200 | [diff] [blame] | 10 | #include <linux/scatterlist.h> |
| 11 | #include <linux/device.h> |
| 12 | #include <linux/types.h> |
Laura Abbott | 74315cc | 2011-06-08 17:29:11 -0400 | [diff] [blame] | 13 | #include <linux/errno.h> |
Wang YanQing | 9a08d37 | 2013-04-19 09:38:04 +0800 | [diff] [blame] | 14 | #include <linux/err.h> |
Will Deacon | d0f60a4 | 2014-08-27 16:15:59 +0100 | [diff] [blame] | 15 | #include <linux/of.h> |
Jacob Pan | 808be0a | 2019-10-02 12:42:43 -0700 | [diff] [blame] | 16 | #include <linux/ioasid.h> |
Jacob Pan | 4e32348b | 2019-06-03 15:57:47 +0100 | [diff] [blame] | 17 | #include <uapi/linux/iommu.h> |
Laura Abbott | 74315cc | 2011-06-08 17:29:11 -0400 | [diff] [blame] | 18 | |
Will Deacon | ca13bb3 | 2013-11-05 15:59:53 +0000 | [diff] [blame] | 19 | #define IOMMU_READ (1 << 0) |
| 20 | #define IOMMU_WRITE (1 << 1) |
| 21 | #define IOMMU_CACHE (1 << 2) /* DMA cache coherency */ |
Antonios Motakis | a720b41 | 2014-10-13 14:06:16 +0100 | [diff] [blame] | 22 | #define IOMMU_NOEXEC (1 << 3) |
Robin Murphy | 31e6850 | 2016-04-05 12:39:30 +0100 | [diff] [blame] | 23 | #define IOMMU_MMIO (1 << 4) /* e.g. things like MSI doorbells */ |
Mitchel Humpherys | 579b2a6 | 2017-01-06 18:58:08 +0530 | [diff] [blame] | 24 | /* |
Robin Murphy | adf5e51 | 2017-01-27 12:22:54 +0000 | [diff] [blame] | 25 | * Where the bus hardware includes a privilege level as part of its access type |
| 26 | * markings, and certain devices are capable of issuing transactions marked as |
| 27 | * either 'supervisor' or 'user', the IOMMU_PRIV flag requests that the other |
| 28 | * given permission flags only apply to accesses at the higher privilege level, |
| 29 | * and that unprivileged transactions should have as little access as possible. |
| 30 | * This would usually imply the same permissions as kernel mappings on the CPU, |
| 31 | * if the IOMMU page table format is equivalent. |
Mitchel Humpherys | 579b2a6 | 2017-01-06 18:58:08 +0530 | [diff] [blame] | 32 | */ |
| 33 | #define IOMMU_PRIV (1 << 5) |
Vivek Gautam | 90ec7a7 | 2019-05-16 15:00:20 +0530 | [diff] [blame] | 34 | /* |
Will Deacon | dd5ddd3 | 2019-10-24 16:57:39 +0100 | [diff] [blame] | 35 | * Non-coherent masters can use this page protection flag to set cacheable |
| 36 | * memory attributes for only a transparent outer level of cache, also known as |
| 37 | * the last-level or system cache. |
Vivek Gautam | 90ec7a7 | 2019-05-16 15:00:20 +0530 | [diff] [blame] | 38 | */ |
Will Deacon | dd5ddd3 | 2019-10-24 16:57:39 +0100 | [diff] [blame] | 39 | #define IOMMU_SYS_CACHE_ONLY (1 << 6) |
Joerg Roedel | 4a77a6c | 2008-11-26 17:02:33 +0100 | [diff] [blame] | 40 | |
Joerg Roedel | 905d66c | 2011-09-06 16:03:26 +0200 | [diff] [blame] | 41 | struct iommu_ops; |
Alex Williamson | d72e31c | 2012-05-30 14:18:53 -0600 | [diff] [blame] | 42 | struct iommu_group; |
Joerg Roedel | ff21776 | 2011-08-26 16:48:26 +0200 | [diff] [blame] | 43 | struct bus_type; |
Joerg Roedel | 4a77a6c | 2008-11-26 17:02:33 +0100 | [diff] [blame] | 44 | struct device; |
Ohad Ben-Cohen | 4f3f8d9 | 2011-09-13 15:25:23 -0400 | [diff] [blame] | 45 | struct iommu_domain; |
Joerg Roedel | ba1eabfa | 2012-08-03 15:55:41 +0200 | [diff] [blame] | 46 | struct notifier_block; |
Jean-Philippe Brucker | 26b25a2 | 2019-04-10 16:15:16 +0100 | [diff] [blame] | 47 | struct iommu_sva; |
Jacob Pan | 4e32348b | 2019-06-03 15:57:47 +0100 | [diff] [blame] | 48 | struct iommu_fault_event; |
Ohad Ben-Cohen | 4f3f8d9 | 2011-09-13 15:25:23 -0400 | [diff] [blame] | 49 | |
| 50 | /* iommu fault flags */ |
| 51 | #define IOMMU_FAULT_READ 0x0 |
| 52 | #define IOMMU_FAULT_WRITE 0x1 |
| 53 | |
| 54 | typedef int (*iommu_fault_handler_t)(struct iommu_domain *, |
Ohad Ben-Cohen | 77ca233 | 2012-05-21 20:20:05 +0300 | [diff] [blame] | 55 | struct device *, unsigned long, int, void *); |
Jean-Philippe Brucker | 26b25a2 | 2019-04-10 16:15:16 +0100 | [diff] [blame] | 56 | typedef int (*iommu_mm_exit_handler_t)(struct device *dev, struct iommu_sva *, |
| 57 | void *); |
Jacob Pan | 4e32348b | 2019-06-03 15:57:47 +0100 | [diff] [blame] | 58 | typedef int (*iommu_dev_fault_handler_t)(struct iommu_fault *, void *); |
Joerg Roedel | 4a77a6c | 2008-11-26 17:02:33 +0100 | [diff] [blame] | 59 | |
Joerg Roedel | 0ff64f8 | 2012-01-26 19:40:53 +0100 | [diff] [blame] | 60 | struct iommu_domain_geometry { |
| 61 | dma_addr_t aperture_start; /* First address that can be mapped */ |
| 62 | dma_addr_t aperture_end; /* Last address that can be mapped */ |
| 63 | bool force_aperture; /* DMA only allowed in mappable range? */ |
| 64 | }; |
| 65 | |
Joerg Roedel | 8539c7c | 2015-03-26 13:43:05 +0100 | [diff] [blame] | 66 | /* Domain feature flags */ |
| 67 | #define __IOMMU_DOMAIN_PAGING (1U << 0) /* Support for iommu_map/unmap */ |
| 68 | #define __IOMMU_DOMAIN_DMA_API (1U << 1) /* Domain for use in DMA-API |
| 69 | implementation */ |
| 70 | #define __IOMMU_DOMAIN_PT (1U << 2) /* Domain is identity mapped */ |
| 71 | |
| 72 | /* |
| 73 | * This are the possible domain-types |
| 74 | * |
| 75 | * IOMMU_DOMAIN_BLOCKED - All DMA is blocked, can be used to isolate |
| 76 | * devices |
| 77 | * IOMMU_DOMAIN_IDENTITY - DMA addresses are system physical addresses |
| 78 | * IOMMU_DOMAIN_UNMANAGED - DMA mappings managed by IOMMU-API user, used |
| 79 | * for VMs |
| 80 | * IOMMU_DOMAIN_DMA - Internally used for DMA-API implementations. |
| 81 | * This flag allows IOMMU drivers to implement |
| 82 | * certain optimizations for these domains |
| 83 | */ |
| 84 | #define IOMMU_DOMAIN_BLOCKED (0U) |
| 85 | #define IOMMU_DOMAIN_IDENTITY (__IOMMU_DOMAIN_PT) |
| 86 | #define IOMMU_DOMAIN_UNMANAGED (__IOMMU_DOMAIN_PAGING) |
| 87 | #define IOMMU_DOMAIN_DMA (__IOMMU_DOMAIN_PAGING | \ |
| 88 | __IOMMU_DOMAIN_DMA_API) |
| 89 | |
Joerg Roedel | 4a77a6c | 2008-11-26 17:02:33 +0100 | [diff] [blame] | 90 | struct iommu_domain { |
Joerg Roedel | 8539c7c | 2015-03-26 13:43:05 +0100 | [diff] [blame] | 91 | unsigned type; |
Thierry Reding | b22f643 | 2014-06-27 09:03:12 +0200 | [diff] [blame] | 92 | const struct iommu_ops *ops; |
Robin Murphy | d16e0fa | 2016-04-07 18:42:06 +0100 | [diff] [blame] | 93 | unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */ |
Ohad Ben-Cohen | 4f3f8d9 | 2011-09-13 15:25:23 -0400 | [diff] [blame] | 94 | iommu_fault_handler_t handler; |
Ohad Ben-Cohen | 77ca233 | 2012-05-21 20:20:05 +0300 | [diff] [blame] | 95 | void *handler_token; |
Joerg Roedel | 0ff64f8 | 2012-01-26 19:40:53 +0100 | [diff] [blame] | 96 | struct iommu_domain_geometry geometry; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 97 | void *iova_cookie; |
Joerg Roedel | 4a77a6c | 2008-11-26 17:02:33 +0100 | [diff] [blame] | 98 | }; |
| 99 | |
Joerg Roedel | 1aed074 | 2014-09-03 18:34:04 +0200 | [diff] [blame] | 100 | enum iommu_cap { |
| 101 | IOMMU_CAP_CACHE_COHERENCY, /* IOMMU can enforce cache coherent DMA |
| 102 | transactions */ |
| 103 | IOMMU_CAP_INTR_REMAP, /* IOMMU supports interrupt isolation */ |
Antonios Motakis | c498664 | 2014-10-13 14:06:17 +0100 | [diff] [blame] | 104 | IOMMU_CAP_NOEXEC, /* IOMMU_NOEXEC flag */ |
Joerg Roedel | 1aed074 | 2014-09-03 18:34:04 +0200 | [diff] [blame] | 105 | }; |
Sheng Yang | dbb9fd8 | 2009-03-18 15:33:06 +0800 | [diff] [blame] | 106 | |
Varun Sethi | 7cabf49 | 2013-07-15 10:20:56 +0530 | [diff] [blame] | 107 | /* |
| 108 | * Following constraints are specifc to FSL_PAMUV1: |
| 109 | * -aperture must be power of 2, and naturally aligned |
| 110 | * -number of windows must be power of 2, and address space size |
| 111 | * of each window is determined by aperture size / # of windows |
| 112 | * -the actual size of the mapped region of a window must be power |
| 113 | * of 2 starting with 4KB and physical address must be naturally |
| 114 | * aligned. |
| 115 | * DOMAIN_ATTR_FSL_PAMUV1 corresponds to the above mentioned contraints. |
| 116 | * The caller can invoke iommu_domain_get_attr to check if the underlying |
| 117 | * iommu implementation supports these constraints. |
| 118 | */ |
| 119 | |
Joerg Roedel | 0cd76dd | 2012-01-26 19:40:52 +0100 | [diff] [blame] | 120 | enum iommu_attr { |
Joerg Roedel | 0ff64f8 | 2012-01-26 19:40:53 +0100 | [diff] [blame] | 121 | DOMAIN_ATTR_GEOMETRY, |
Joerg Roedel | d2e1216 | 2013-01-29 13:49:04 +0100 | [diff] [blame] | 122 | DOMAIN_ATTR_PAGING, |
Joerg Roedel | 6935671 | 2013-02-04 14:00:01 +0100 | [diff] [blame] | 123 | DOMAIN_ATTR_WINDOWS, |
Varun Sethi | 7cabf49 | 2013-07-15 10:20:56 +0530 | [diff] [blame] | 124 | DOMAIN_ATTR_FSL_PAMU_STASH, |
| 125 | DOMAIN_ATTR_FSL_PAMU_ENABLE, |
| 126 | DOMAIN_ATTR_FSL_PAMUV1, |
Will Deacon | c02607a | 2014-09-29 10:05:06 -0600 | [diff] [blame] | 127 | DOMAIN_ATTR_NESTING, /* two stages of translation */ |
Zhen Lei | 2da274c | 2018-09-20 17:10:22 +0100 | [diff] [blame] | 128 | DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, |
Joerg Roedel | a8b8a88a | 2013-01-29 14:36:31 +0100 | [diff] [blame] | 129 | DOMAIN_ATTR_MAX, |
Joerg Roedel | 0cd76dd | 2012-01-26 19:40:52 +0100 | [diff] [blame] | 130 | }; |
| 131 | |
Eric Auger | d30ddca | 2017-01-19 20:57:48 +0000 | [diff] [blame] | 132 | /* These are the possible reserved region types */ |
Robin Murphy | 9d3a4de | 2017-03-16 17:00:16 +0000 | [diff] [blame] | 133 | enum iommu_resv_type { |
| 134 | /* Memory regions which must be mapped 1:1 at all times */ |
| 135 | IOMMU_RESV_DIRECT, |
Eric Auger | adfd373 | 2019-06-03 08:53:35 +0200 | [diff] [blame] | 136 | /* |
| 137 | * Memory regions which are advertised to be 1:1 but are |
| 138 | * commonly considered relaxable in some conditions, |
| 139 | * for instance in device assignment use case (USB, Graphics) |
| 140 | */ |
| 141 | IOMMU_RESV_DIRECT_RELAXABLE, |
Robin Murphy | 9d3a4de | 2017-03-16 17:00:16 +0000 | [diff] [blame] | 142 | /* Arbitrary "never map this or give it to a device" address ranges */ |
| 143 | IOMMU_RESV_RESERVED, |
| 144 | /* Hardware MSI region (untranslated) */ |
| 145 | IOMMU_RESV_MSI, |
| 146 | /* Software-managed MSI translation window */ |
| 147 | IOMMU_RESV_SW_MSI, |
| 148 | }; |
Eric Auger | d30ddca | 2017-01-19 20:57:48 +0000 | [diff] [blame] | 149 | |
Joerg Roedel | a1015c2 | 2015-05-28 18:41:33 +0200 | [diff] [blame] | 150 | /** |
Eric Auger | e5b5234 | 2017-01-19 20:57:47 +0000 | [diff] [blame] | 151 | * struct iommu_resv_region - descriptor for a reserved memory region |
Joerg Roedel | a1015c2 | 2015-05-28 18:41:33 +0200 | [diff] [blame] | 152 | * @list: Linked list pointers |
| 153 | * @start: System physical start address of the region |
| 154 | * @length: Length of the region in bytes |
| 155 | * @prot: IOMMU Protection flags (READ/WRITE/...) |
Eric Auger | d30ddca | 2017-01-19 20:57:48 +0000 | [diff] [blame] | 156 | * @type: Type of the reserved region |
Joerg Roedel | a1015c2 | 2015-05-28 18:41:33 +0200 | [diff] [blame] | 157 | */ |
Eric Auger | e5b5234 | 2017-01-19 20:57:47 +0000 | [diff] [blame] | 158 | struct iommu_resv_region { |
Joerg Roedel | a1015c2 | 2015-05-28 18:41:33 +0200 | [diff] [blame] | 159 | struct list_head list; |
| 160 | phys_addr_t start; |
| 161 | size_t length; |
| 162 | int prot; |
Robin Murphy | 9d3a4de | 2017-03-16 17:00:16 +0000 | [diff] [blame] | 163 | enum iommu_resv_type type; |
Joerg Roedel | a1015c2 | 2015-05-28 18:41:33 +0200 | [diff] [blame] | 164 | }; |
| 165 | |
Lu Baolu | a3a1959 | 2019-03-25 09:30:28 +0800 | [diff] [blame] | 166 | /* Per device IOMMU features */ |
| 167 | enum iommu_dev_features { |
| 168 | IOMMU_DEV_FEAT_AUX, /* Aux-domain feature */ |
Jean-Philippe Brucker | 26b25a2 | 2019-04-10 16:15:16 +0100 | [diff] [blame] | 169 | IOMMU_DEV_FEAT_SVA, /* Shared Virtual Addresses */ |
| 170 | }; |
| 171 | |
| 172 | #define IOMMU_PASID_INVALID (-1U) |
| 173 | |
| 174 | /** |
| 175 | * struct iommu_sva_ops - device driver callbacks for an SVA context |
| 176 | * |
| 177 | * @mm_exit: called when the mm is about to be torn down by exit_mmap. After |
| 178 | * @mm_exit returns, the device must not issue any more transaction |
| 179 | * with the PASID given as argument. |
| 180 | * |
| 181 | * The @mm_exit handler is allowed to sleep. Be careful about the |
| 182 | * locks taken in @mm_exit, because they might lead to deadlocks if |
| 183 | * they are also held when dropping references to the mm. Consider the |
| 184 | * following call chain: |
| 185 | * mutex_lock(A); mmput(mm) -> exit_mm() -> @mm_exit() -> mutex_lock(A) |
| 186 | * Using mmput_async() prevents this scenario. |
| 187 | * |
| 188 | */ |
| 189 | struct iommu_sva_ops { |
| 190 | iommu_mm_exit_handler_t mm_exit; |
Lu Baolu | a3a1959 | 2019-03-25 09:30:28 +0800 | [diff] [blame] | 191 | }; |
| 192 | |
Joerg Roedel | 39d4ebb | 2011-09-06 16:48:40 +0200 | [diff] [blame] | 193 | #ifdef CONFIG_IOMMU_API |
| 194 | |
Ohad Ben-Cohen | 7d3002c | 2011-11-10 11:32:26 +0200 | [diff] [blame] | 195 | /** |
Will Deacon | a7d20dc | 2019-07-02 16:43:48 +0100 | [diff] [blame] | 196 | * struct iommu_iotlb_gather - Range information for a pending IOTLB flush |
| 197 | * |
| 198 | * @start: IOVA representing the start of the range to be flushed |
| 199 | * @end: IOVA representing the end of the range to be flushed (exclusive) |
| 200 | * @pgsize: The interval at which to perform the flush |
| 201 | * |
| 202 | * This structure is intended to be updated by multiple calls to the |
| 203 | * ->unmap() function in struct iommu_ops before eventually being passed |
| 204 | * into ->iotlb_sync(). |
| 205 | */ |
| 206 | struct iommu_iotlb_gather { |
| 207 | unsigned long start; |
| 208 | unsigned long end; |
| 209 | size_t pgsize; |
| 210 | }; |
| 211 | |
| 212 | /** |
Ohad Ben-Cohen | 7d3002c | 2011-11-10 11:32:26 +0200 | [diff] [blame] | 213 | * struct iommu_ops - iommu ops and capabilities |
Magnus Damm | 0d9bacb | 2016-01-19 14:28:48 +0900 | [diff] [blame] | 214 | * @capable: check capability |
| 215 | * @domain_alloc: allocate iommu domain |
| 216 | * @domain_free: free iommu domain |
Ohad Ben-Cohen | 7d3002c | 2011-11-10 11:32:26 +0200 | [diff] [blame] | 217 | * @attach_dev: attach device to an iommu domain |
| 218 | * @detach_dev: detach device from an iommu domain |
| 219 | * @map: map a physically contiguous memory region to an iommu domain |
| 220 | * @unmap: unmap a physically contiguous memory region from an iommu domain |
Tom Murphy | db04d4a | 2019-02-11 15:50:33 +0000 | [diff] [blame] | 221 | * @flush_iotlb_all: Synchronously flush all hardware TLBs for this domain |
Geert Uytterhoeven | 2405bc1 | 2019-02-20 14:00:52 +0100 | [diff] [blame] | 222 | * @iotlb_sync_map: Sync mappings created recently using @map to the hardware |
tom | 51eb780 | 2018-12-04 18:27:34 +0000 | [diff] [blame] | 223 | * @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush |
Joerg Roedel | add02cfd | 2017-08-23 15:50:04 +0200 | [diff] [blame] | 224 | * queue |
Ohad Ben-Cohen | 7d3002c | 2011-11-10 11:32:26 +0200 | [diff] [blame] | 225 | * @iova_to_phys: translate iova to physical address |
Alex Williamson | d72e31c | 2012-05-30 14:18:53 -0600 | [diff] [blame] | 226 | * @add_device: add device to iommu grouping |
| 227 | * @remove_device: remove device from iommu grouping |
Magnus Damm | 0d9bacb | 2016-01-19 14:28:48 +0900 | [diff] [blame] | 228 | * @device_group: find iommu group for a particular device |
Joerg Roedel | 0cd76dd | 2012-01-26 19:40:52 +0100 | [diff] [blame] | 229 | * @domain_get_attr: Query domain attributes |
| 230 | * @domain_set_attr: Change domain attributes |
Eric Auger | e5b5234 | 2017-01-19 20:57:47 +0000 | [diff] [blame] | 231 | * @get_resv_regions: Request list of reserved regions for a device |
| 232 | * @put_resv_regions: Free list of reserved regions for a device |
| 233 | * @apply_resv_region: Temporary helper call-back for iova reserved ranges |
Magnus Damm | 0d9bacb | 2016-01-19 14:28:48 +0900 | [diff] [blame] | 234 | * @domain_window_enable: Configure and enable a particular window for a domain |
| 235 | * @domain_window_disable: Disable a particular window for a domain |
Will Deacon | d0f60a4 | 2014-08-27 16:15:59 +0100 | [diff] [blame] | 236 | * @of_xlate: add OF master IDs to iommu grouping |
Geert Uytterhoeven | a7055d5 | 2019-02-20 14:00:53 +0100 | [diff] [blame] | 237 | * @is_attach_deferred: Check if domain attach should be deferred from iommu |
| 238 | * driver init to device driver init (default no) |
Lu Baolu | a3a1959 | 2019-03-25 09:30:28 +0800 | [diff] [blame] | 239 | * @dev_has/enable/disable_feat: per device entries to check/enable/disable |
| 240 | * iommu specific features. |
| 241 | * @dev_feat_enabled: check enabled feature |
| 242 | * @aux_attach/detach_dev: aux-domain specific attach/detach entries. |
| 243 | * @aux_get_pasid: get the pasid given an aux-domain |
Jean-Philippe Brucker | 26b25a2 | 2019-04-10 16:15:16 +0100 | [diff] [blame] | 244 | * @sva_bind: Bind process address space to device |
| 245 | * @sva_unbind: Unbind process address space from device |
| 246 | * @sva_get_pasid: Get PASID associated to a SVA handle |
Jean-Philippe Brucker | bf3255b | 2019-06-03 15:57:49 +0100 | [diff] [blame] | 247 | * @page_response: handle page request response |
Yi L Liu | 4c7c171 | 2019-10-02 12:42:40 -0700 | [diff] [blame] | 248 | * @cache_invalidate: invalidate translation caches |
Jacob Pan | 808be0a | 2019-10-02 12:42:43 -0700 | [diff] [blame] | 249 | * @sva_bind_gpasid: bind guest pasid and mm |
| 250 | * @sva_unbind_gpasid: unbind guest pasid and mm |
Will Deacon | 25f003d | 2019-12-19 12:03:41 +0000 | [diff] [blame] | 251 | * @pgsize_bitmap: bitmap of all possible supported page sizes |
| 252 | * @owner: Driver module providing these ops |
Ohad Ben-Cohen | 7d3002c | 2011-11-10 11:32:26 +0200 | [diff] [blame] | 253 | */ |
Joerg Roedel | 4a77a6c | 2008-11-26 17:02:33 +0100 | [diff] [blame] | 254 | struct iommu_ops { |
Joerg Roedel | 3c0e0ca | 2014-09-03 18:47:25 +0200 | [diff] [blame] | 255 | bool (*capable)(enum iommu_cap); |
Joerg Roedel | 938c470 | 2015-03-26 13:43:04 +0100 | [diff] [blame] | 256 | |
| 257 | /* Domain allocation and freeing by the iommu driver */ |
Joerg Roedel | 8539c7c | 2015-03-26 13:43:05 +0100 | [diff] [blame] | 258 | struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type); |
Joerg Roedel | 938c470 | 2015-03-26 13:43:04 +0100 | [diff] [blame] | 259 | void (*domain_free)(struct iommu_domain *); |
| 260 | |
Joerg Roedel | 4a77a6c | 2008-11-26 17:02:33 +0100 | [diff] [blame] | 261 | int (*attach_dev)(struct iommu_domain *domain, struct device *dev); |
| 262 | void (*detach_dev)(struct iommu_domain *domain, struct device *dev); |
Joerg Roedel | 6765178 | 2010-01-21 16:32:27 +0100 | [diff] [blame] | 263 | int (*map)(struct iommu_domain *domain, unsigned long iova, |
Tom Murphy | 781ca2d | 2019-09-08 09:56:38 -0700 | [diff] [blame] | 264 | phys_addr_t paddr, size_t size, int prot, gfp_t gfp); |
Ohad Ben-Cohen | 5009065 | 2011-11-10 11:32:25 +0200 | [diff] [blame] | 265 | size_t (*unmap)(struct iommu_domain *domain, unsigned long iova, |
Will Deacon | 56f8af5 | 2019-07-02 16:44:06 +0100 | [diff] [blame] | 266 | size_t size, struct iommu_iotlb_gather *iotlb_gather); |
Joerg Roedel | add02cfd | 2017-08-23 15:50:04 +0200 | [diff] [blame] | 267 | void (*flush_iotlb_all)(struct iommu_domain *domain); |
Dmitry Osipenko | 1d7ae53 | 2018-12-12 23:38:47 +0300 | [diff] [blame] | 268 | void (*iotlb_sync_map)(struct iommu_domain *domain); |
Will Deacon | 56f8af5 | 2019-07-02 16:44:06 +0100 | [diff] [blame] | 269 | void (*iotlb_sync)(struct iommu_domain *domain, |
| 270 | struct iommu_iotlb_gather *iotlb_gather); |
Varun Sethi | bb5547a | 2013-03-29 01:23:58 +0530 | [diff] [blame] | 271 | phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova); |
Alex Williamson | d72e31c | 2012-05-30 14:18:53 -0600 | [diff] [blame] | 272 | int (*add_device)(struct device *dev); |
| 273 | void (*remove_device)(struct device *dev); |
Joerg Roedel | 46c6b2b | 2015-10-21 23:51:36 +0200 | [diff] [blame] | 274 | struct iommu_group *(*device_group)(struct device *dev); |
Joerg Roedel | 0cd76dd | 2012-01-26 19:40:52 +0100 | [diff] [blame] | 275 | int (*domain_get_attr)(struct iommu_domain *domain, |
| 276 | enum iommu_attr attr, void *data); |
| 277 | int (*domain_set_attr)(struct iommu_domain *domain, |
| 278 | enum iommu_attr attr, void *data); |
Joerg Roedel | d7787d5 | 2013-01-29 14:26:20 +0100 | [diff] [blame] | 279 | |
Eric Auger | e5b5234 | 2017-01-19 20:57:47 +0000 | [diff] [blame] | 280 | /* Request/Free a list of reserved regions for a device */ |
| 281 | void (*get_resv_regions)(struct device *dev, struct list_head *list); |
| 282 | void (*put_resv_regions)(struct device *dev, struct list_head *list); |
| 283 | void (*apply_resv_region)(struct device *dev, |
| 284 | struct iommu_domain *domain, |
| 285 | struct iommu_resv_region *region); |
Joerg Roedel | a1015c2 | 2015-05-28 18:41:33 +0200 | [diff] [blame] | 286 | |
Joerg Roedel | d7787d5 | 2013-01-29 14:26:20 +0100 | [diff] [blame] | 287 | /* Window handling functions */ |
| 288 | int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr, |
Varun Sethi | 80f97f0 | 2013-03-29 01:24:00 +0530 | [diff] [blame] | 289 | phys_addr_t paddr, u64 size, int prot); |
Joerg Roedel | d7787d5 | 2013-01-29 14:26:20 +0100 | [diff] [blame] | 290 | void (*domain_window_disable)(struct iommu_domain *domain, u32 wnd_nr); |
| 291 | |
Will Deacon | d0f60a4 | 2014-08-27 16:15:59 +0100 | [diff] [blame] | 292 | int (*of_xlate)(struct device *dev, struct of_phandle_args *args); |
Baoquan He | e01d191 | 2017-08-09 16:33:40 +0800 | [diff] [blame] | 293 | bool (*is_attach_deferred)(struct iommu_domain *domain, struct device *dev); |
Will Deacon | d0f60a4 | 2014-08-27 16:15:59 +0100 | [diff] [blame] | 294 | |
Lu Baolu | a3a1959 | 2019-03-25 09:30:28 +0800 | [diff] [blame] | 295 | /* Per device IOMMU features */ |
| 296 | bool (*dev_has_feat)(struct device *dev, enum iommu_dev_features f); |
| 297 | bool (*dev_feat_enabled)(struct device *dev, enum iommu_dev_features f); |
| 298 | int (*dev_enable_feat)(struct device *dev, enum iommu_dev_features f); |
| 299 | int (*dev_disable_feat)(struct device *dev, enum iommu_dev_features f); |
| 300 | |
| 301 | /* Aux-domain specific attach/detach entries */ |
| 302 | int (*aux_attach_dev)(struct iommu_domain *domain, struct device *dev); |
| 303 | void (*aux_detach_dev)(struct iommu_domain *domain, struct device *dev); |
| 304 | int (*aux_get_pasid)(struct iommu_domain *domain, struct device *dev); |
| 305 | |
Jean-Philippe Brucker | 26b25a2 | 2019-04-10 16:15:16 +0100 | [diff] [blame] | 306 | struct iommu_sva *(*sva_bind)(struct device *dev, struct mm_struct *mm, |
| 307 | void *drvdata); |
| 308 | void (*sva_unbind)(struct iommu_sva *handle); |
| 309 | int (*sva_get_pasid)(struct iommu_sva *handle); |
| 310 | |
Jean-Philippe Brucker | bf3255b | 2019-06-03 15:57:49 +0100 | [diff] [blame] | 311 | int (*page_response)(struct device *dev, |
| 312 | struct iommu_fault_event *evt, |
| 313 | struct iommu_page_response *msg); |
Yi L Liu | 4c7c171 | 2019-10-02 12:42:40 -0700 | [diff] [blame] | 314 | int (*cache_invalidate)(struct iommu_domain *domain, struct device *dev, |
| 315 | struct iommu_cache_invalidate_info *inv_info); |
Jacob Pan | 808be0a | 2019-10-02 12:42:43 -0700 | [diff] [blame] | 316 | int (*sva_bind_gpasid)(struct iommu_domain *domain, |
| 317 | struct device *dev, struct iommu_gpasid_bind_data *data); |
| 318 | |
| 319 | int (*sva_unbind_gpasid)(struct device *dev, int pasid); |
Jean-Philippe Brucker | bf3255b | 2019-06-03 15:57:49 +0100 | [diff] [blame] | 320 | |
Ohad Ben-Cohen | 7d3002c | 2011-11-10 11:32:26 +0200 | [diff] [blame] | 321 | unsigned long pgsize_bitmap; |
Will Deacon | 25f003d | 2019-12-19 12:03:41 +0000 | [diff] [blame] | 322 | struct module *owner; |
Joerg Roedel | 4a77a6c | 2008-11-26 17:02:33 +0100 | [diff] [blame] | 323 | }; |
| 324 | |
Joerg Roedel | b0119e8 | 2017-02-01 13:23:08 +0100 | [diff] [blame] | 325 | /** |
| 326 | * struct iommu_device - IOMMU core representation of one IOMMU hardware |
| 327 | * instance |
| 328 | * @list: Used by the iommu-core to keep a list of registered iommus |
| 329 | * @ops: iommu-ops for talking to this iommu |
Joerg Roedel | 39ab955 | 2017-02-01 16:56:46 +0100 | [diff] [blame] | 330 | * @dev: struct device for sysfs handling |
Joerg Roedel | b0119e8 | 2017-02-01 13:23:08 +0100 | [diff] [blame] | 331 | */ |
| 332 | struct iommu_device { |
| 333 | struct list_head list; |
| 334 | const struct iommu_ops *ops; |
Joerg Roedel | c73e1ac | 2017-02-07 18:18:46 +0100 | [diff] [blame] | 335 | struct fwnode_handle *fwnode; |
Joerg Roedel | 2926a2aa | 2017-08-14 17:19:26 +0200 | [diff] [blame] | 336 | struct device *dev; |
Joerg Roedel | b0119e8 | 2017-02-01 13:23:08 +0100 | [diff] [blame] | 337 | }; |
| 338 | |
Jacob Pan | 4e32348b | 2019-06-03 15:57:47 +0100 | [diff] [blame] | 339 | /** |
| 340 | * struct iommu_fault_event - Generic fault event |
| 341 | * |
| 342 | * Can represent recoverable faults such as a page requests or |
| 343 | * unrecoverable faults such as DMA or IRQ remapping faults. |
| 344 | * |
| 345 | * @fault: fault descriptor |
Jean-Philippe Brucker | bf3255b | 2019-06-03 15:57:49 +0100 | [diff] [blame] | 346 | * @list: pending fault event list, used for tracking responses |
Jacob Pan | 4e32348b | 2019-06-03 15:57:47 +0100 | [diff] [blame] | 347 | */ |
| 348 | struct iommu_fault_event { |
| 349 | struct iommu_fault fault; |
Jean-Philippe Brucker | bf3255b | 2019-06-03 15:57:49 +0100 | [diff] [blame] | 350 | struct list_head list; |
Jacob Pan | 4e32348b | 2019-06-03 15:57:47 +0100 | [diff] [blame] | 351 | }; |
| 352 | |
| 353 | /** |
| 354 | * struct iommu_fault_param - per-device IOMMU fault data |
| 355 | * @handler: Callback function to handle IOMMU faults at device level |
| 356 | * @data: handler private data |
Jean-Philippe Brucker | bf3255b | 2019-06-03 15:57:49 +0100 | [diff] [blame] | 357 | * @faults: holds the pending faults which needs response |
| 358 | * @lock: protect pending faults list |
Jacob Pan | 4e32348b | 2019-06-03 15:57:47 +0100 | [diff] [blame] | 359 | */ |
| 360 | struct iommu_fault_param { |
| 361 | iommu_dev_fault_handler_t handler; |
| 362 | void *data; |
Jean-Philippe Brucker | bf3255b | 2019-06-03 15:57:49 +0100 | [diff] [blame] | 363 | struct list_head faults; |
| 364 | struct mutex lock; |
Jacob Pan | 4e32348b | 2019-06-03 15:57:47 +0100 | [diff] [blame] | 365 | }; |
| 366 | |
| 367 | /** |
Joerg Roedel | 045a704 | 2020-03-26 16:08:30 +0100 | [diff] [blame] | 368 | * struct dev_iommu - Collection of per-device IOMMU data |
Jacob Pan | 4e32348b | 2019-06-03 15:57:47 +0100 | [diff] [blame] | 369 | * |
| 370 | * @fault_param: IOMMU detected device fault reporting data |
Joerg Roedel | 72acd9d | 2020-03-26 16:08:31 +0100 | [diff] [blame] | 371 | * @fwspec: IOMMU fwspec data |
Joerg Roedel | 986d5ec | 2020-03-26 16:08:41 +0100 | [diff] [blame] | 372 | * @priv: IOMMU Driver private data |
Jacob Pan | 4e32348b | 2019-06-03 15:57:47 +0100 | [diff] [blame] | 373 | * |
| 374 | * TODO: migrate other per device data pointers under iommu_dev_data, e.g. |
| 375 | * struct iommu_group *iommu_group; |
Jacob Pan | 4e32348b | 2019-06-03 15:57:47 +0100 | [diff] [blame] | 376 | */ |
Joerg Roedel | 045a704 | 2020-03-26 16:08:30 +0100 | [diff] [blame] | 377 | struct dev_iommu { |
Jacob Pan | 0c830e6 | 2019-06-03 15:57:48 +0100 | [diff] [blame] | 378 | struct mutex lock; |
Joerg Roedel | 72acd9d | 2020-03-26 16:08:31 +0100 | [diff] [blame] | 379 | struct iommu_fault_param *fault_param; |
| 380 | struct iommu_fwspec *fwspec; |
Joerg Roedel | 986d5ec | 2020-03-26 16:08:41 +0100 | [diff] [blame] | 381 | void *priv; |
Jacob Pan | 4e32348b | 2019-06-03 15:57:47 +0100 | [diff] [blame] | 382 | }; |
| 383 | |
Joerg Roedel | b0119e8 | 2017-02-01 13:23:08 +0100 | [diff] [blame] | 384 | int iommu_device_register(struct iommu_device *iommu); |
| 385 | void iommu_device_unregister(struct iommu_device *iommu); |
Joerg Roedel | 39ab955 | 2017-02-01 16:56:46 +0100 | [diff] [blame] | 386 | int iommu_device_sysfs_add(struct iommu_device *iommu, |
| 387 | struct device *parent, |
| 388 | const struct attribute_group **groups, |
| 389 | const char *fmt, ...) __printf(4, 5); |
| 390 | void iommu_device_sysfs_remove(struct iommu_device *iommu); |
Joerg Roedel | e3d10af | 2017-02-01 17:23:22 +0100 | [diff] [blame] | 391 | int iommu_device_link(struct iommu_device *iommu, struct device *link); |
| 392 | void iommu_device_unlink(struct iommu_device *iommu, struct device *link); |
Joerg Roedel | b0119e8 | 2017-02-01 13:23:08 +0100 | [diff] [blame] | 393 | |
Will Deacon | fc10cca | 2020-01-09 14:08:41 +0000 | [diff] [blame] | 394 | static inline void __iommu_device_set_ops(struct iommu_device *iommu, |
| 395 | const struct iommu_ops *ops) |
Joerg Roedel | b0119e8 | 2017-02-01 13:23:08 +0100 | [diff] [blame] | 396 | { |
| 397 | iommu->ops = ops; |
| 398 | } |
| 399 | |
Will Deacon | fc10cca | 2020-01-09 14:08:41 +0000 | [diff] [blame] | 400 | #define iommu_device_set_ops(iommu, ops) \ |
| 401 | do { \ |
| 402 | struct iommu_ops *__ops = (struct iommu_ops *)(ops); \ |
| 403 | __ops->owner = THIS_MODULE; \ |
| 404 | __iommu_device_set_ops(iommu, __ops); \ |
| 405 | } while (0) |
| 406 | |
Joerg Roedel | c73e1ac | 2017-02-07 18:18:46 +0100 | [diff] [blame] | 407 | static inline void iommu_device_set_fwnode(struct iommu_device *iommu, |
| 408 | struct fwnode_handle *fwnode) |
| 409 | { |
| 410 | iommu->fwnode = fwnode; |
| 411 | } |
| 412 | |
Joerg Roedel | 2926a2aa | 2017-08-14 17:19:26 +0200 | [diff] [blame] | 413 | static inline struct iommu_device *dev_to_iommu_device(struct device *dev) |
| 414 | { |
| 415 | return (struct iommu_device *)dev_get_drvdata(dev); |
| 416 | } |
| 417 | |
Will Deacon | a7d20dc | 2019-07-02 16:43:48 +0100 | [diff] [blame] | 418 | static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather) |
| 419 | { |
| 420 | *gather = (struct iommu_iotlb_gather) { |
| 421 | .start = ULONG_MAX, |
| 422 | }; |
| 423 | } |
| 424 | |
Alex Williamson | d72e31c | 2012-05-30 14:18:53 -0600 | [diff] [blame] | 425 | #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */ |
| 426 | #define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */ |
| 427 | #define IOMMU_GROUP_NOTIFY_BIND_DRIVER 3 /* Pre Driver bind */ |
| 428 | #define IOMMU_GROUP_NOTIFY_BOUND_DRIVER 4 /* Post Driver bind */ |
| 429 | #define IOMMU_GROUP_NOTIFY_UNBIND_DRIVER 5 /* Pre Driver unbind */ |
| 430 | #define IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER 6 /* Post Driver unbind */ |
| 431 | |
Thierry Reding | b22f643 | 2014-06-27 09:03:12 +0200 | [diff] [blame] | 432 | extern int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops); |
Joerg Roedel | a1b60c1 | 2011-09-06 18:46:34 +0200 | [diff] [blame] | 433 | extern bool iommu_present(struct bus_type *bus); |
Joerg Roedel | 3c0e0ca | 2014-09-03 18:47:25 +0200 | [diff] [blame] | 434 | extern bool iommu_capable(struct bus_type *bus, enum iommu_cap cap); |
Joerg Roedel | 905d66c | 2011-09-06 16:03:26 +0200 | [diff] [blame] | 435 | extern struct iommu_domain *iommu_domain_alloc(struct bus_type *bus); |
Alexey Kardashevskiy | aa16bea | 2013-03-25 10:23:49 +1100 | [diff] [blame] | 436 | extern struct iommu_group *iommu_group_get_by_id(int id); |
Joerg Roedel | 4a77a6c | 2008-11-26 17:02:33 +0100 | [diff] [blame] | 437 | extern void iommu_domain_free(struct iommu_domain *domain); |
| 438 | extern int iommu_attach_device(struct iommu_domain *domain, |
| 439 | struct device *dev); |
| 440 | extern void iommu_detach_device(struct iommu_domain *domain, |
| 441 | struct device *dev); |
Yi L Liu | 4c7c171 | 2019-10-02 12:42:40 -0700 | [diff] [blame] | 442 | extern int iommu_cache_invalidate(struct iommu_domain *domain, |
| 443 | struct device *dev, |
| 444 | struct iommu_cache_invalidate_info *inv_info); |
Jacob Pan | 808be0a | 2019-10-02 12:42:43 -0700 | [diff] [blame] | 445 | extern int iommu_sva_bind_gpasid(struct iommu_domain *domain, |
| 446 | struct device *dev, struct iommu_gpasid_bind_data *data); |
| 447 | extern int iommu_sva_unbind_gpasid(struct iommu_domain *domain, |
| 448 | struct device *dev, ioasid_t pasid); |
Joerg Roedel | 2c1296d | 2015-05-28 18:41:32 +0200 | [diff] [blame] | 449 | extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev); |
Robin Murphy | 6af588f | 2018-09-12 16:24:12 +0100 | [diff] [blame] | 450 | extern struct iommu_domain *iommu_get_dma_domain(struct device *dev); |
Joerg Roedel | cefc53c | 2010-01-08 13:35:09 +0100 | [diff] [blame] | 451 | extern int iommu_map(struct iommu_domain *domain, unsigned long iova, |
Ohad Ben-Cohen | 7d3002c | 2011-11-10 11:32:26 +0200 | [diff] [blame] | 452 | phys_addr_t paddr, size_t size, int prot); |
Tom Murphy | 781ca2d | 2019-09-08 09:56:38 -0700 | [diff] [blame] | 453 | extern int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova, |
| 454 | phys_addr_t paddr, size_t size, int prot); |
Ohad Ben-Cohen | 7d3002c | 2011-11-10 11:32:26 +0200 | [diff] [blame] | 455 | extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, |
Joerg Roedel | add02cfd | 2017-08-23 15:50:04 +0200 | [diff] [blame] | 456 | size_t size); |
| 457 | extern size_t iommu_unmap_fast(struct iommu_domain *domain, |
Will Deacon | a7d20dc | 2019-07-02 16:43:48 +0100 | [diff] [blame] | 458 | unsigned long iova, size_t size, |
| 459 | struct iommu_iotlb_gather *iotlb_gather); |
Christoph Hellwig | d88e61f | 2018-07-30 09:36:26 +0200 | [diff] [blame] | 460 | extern size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, |
| 461 | struct scatterlist *sg,unsigned int nents, int prot); |
Tom Murphy | 781ca2d | 2019-09-08 09:56:38 -0700 | [diff] [blame] | 462 | extern size_t iommu_map_sg_atomic(struct iommu_domain *domain, |
| 463 | unsigned long iova, struct scatterlist *sg, |
| 464 | unsigned int nents, int prot); |
Varun Sethi | bb5547a | 2013-03-29 01:23:58 +0530 | [diff] [blame] | 465 | extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova); |
Ohad Ben-Cohen | 4f3f8d9 | 2011-09-13 15:25:23 -0400 | [diff] [blame] | 466 | extern void iommu_set_fault_handler(struct iommu_domain *domain, |
Ohad Ben-Cohen | 77ca233 | 2012-05-21 20:20:05 +0300 | [diff] [blame] | 467 | iommu_fault_handler_t handler, void *token); |
Alex Williamson | d72e31c | 2012-05-30 14:18:53 -0600 | [diff] [blame] | 468 | |
Eric Auger | e5b5234 | 2017-01-19 20:57:47 +0000 | [diff] [blame] | 469 | extern void iommu_get_resv_regions(struct device *dev, struct list_head *list); |
| 470 | extern void iommu_put_resv_regions(struct device *dev, struct list_head *list); |
Thierry Reding | f9f6971 | 2019-12-18 14:42:01 +0100 | [diff] [blame] | 471 | extern void generic_iommu_put_resv_regions(struct device *dev, |
| 472 | struct list_head *list); |
Joerg Roedel | d290f1e | 2015-05-28 18:41:36 +0200 | [diff] [blame] | 473 | extern int iommu_request_dm_for_dev(struct device *dev); |
Lu Baolu | 7423e01 | 2019-05-25 13:41:22 +0800 | [diff] [blame] | 474 | extern int iommu_request_dma_domain_for_dev(struct device *dev); |
Joerg Roedel | 8a69961 | 2019-08-19 15:22:47 +0200 | [diff] [blame] | 475 | extern void iommu_set_default_passthrough(bool cmd_line); |
| 476 | extern void iommu_set_default_translated(bool cmd_line); |
| 477 | extern bool iommu_default_passthrough(void); |
Eric Auger | 2b20cbb | 2017-01-19 20:57:49 +0000 | [diff] [blame] | 478 | extern struct iommu_resv_region * |
Robin Murphy | 9d3a4de | 2017-03-16 17:00:16 +0000 | [diff] [blame] | 479 | iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot, |
| 480 | enum iommu_resv_type type); |
Eric Auger | 6c65fb3 | 2017-01-19 20:57:51 +0000 | [diff] [blame] | 481 | extern int iommu_get_group_resv_regions(struct iommu_group *group, |
| 482 | struct list_head *head); |
Joerg Roedel | a1015c2 | 2015-05-28 18:41:33 +0200 | [diff] [blame] | 483 | |
Alex Williamson | d72e31c | 2012-05-30 14:18:53 -0600 | [diff] [blame] | 484 | extern int iommu_attach_group(struct iommu_domain *domain, |
| 485 | struct iommu_group *group); |
| 486 | extern void iommu_detach_group(struct iommu_domain *domain, |
| 487 | struct iommu_group *group); |
| 488 | extern struct iommu_group *iommu_group_alloc(void); |
| 489 | extern void *iommu_group_get_iommudata(struct iommu_group *group); |
| 490 | extern void iommu_group_set_iommudata(struct iommu_group *group, |
| 491 | void *iommu_data, |
| 492 | void (*release)(void *iommu_data)); |
| 493 | extern int iommu_group_set_name(struct iommu_group *group, const char *name); |
| 494 | extern int iommu_group_add_device(struct iommu_group *group, |
| 495 | struct device *dev); |
| 496 | extern void iommu_group_remove_device(struct device *dev); |
| 497 | extern int iommu_group_for_each_dev(struct iommu_group *group, void *data, |
| 498 | int (*fn)(struct device *, void *)); |
| 499 | extern struct iommu_group *iommu_group_get(struct device *dev); |
Robin Murphy | 13f59a7 | 2016-11-11 17:59:21 +0000 | [diff] [blame] | 500 | extern struct iommu_group *iommu_group_ref_get(struct iommu_group *group); |
Alex Williamson | d72e31c | 2012-05-30 14:18:53 -0600 | [diff] [blame] | 501 | extern void iommu_group_put(struct iommu_group *group); |
| 502 | extern int iommu_group_register_notifier(struct iommu_group *group, |
| 503 | struct notifier_block *nb); |
| 504 | extern int iommu_group_unregister_notifier(struct iommu_group *group, |
| 505 | struct notifier_block *nb); |
Jacob Pan | 0c830e6 | 2019-06-03 15:57:48 +0100 | [diff] [blame] | 506 | extern int iommu_register_device_fault_handler(struct device *dev, |
| 507 | iommu_dev_fault_handler_t handler, |
| 508 | void *data); |
| 509 | |
| 510 | extern int iommu_unregister_device_fault_handler(struct device *dev); |
| 511 | |
| 512 | extern int iommu_report_device_fault(struct device *dev, |
| 513 | struct iommu_fault_event *evt); |
Jean-Philippe Brucker | bf3255b | 2019-06-03 15:57:49 +0100 | [diff] [blame] | 514 | extern int iommu_page_response(struct device *dev, |
| 515 | struct iommu_page_response *msg); |
Jacob Pan | 0c830e6 | 2019-06-03 15:57:48 +0100 | [diff] [blame] | 516 | |
Alex Williamson | d72e31c | 2012-05-30 14:18:53 -0600 | [diff] [blame] | 517 | extern int iommu_group_id(struct iommu_group *group); |
Alex Williamson | 104a1c1 | 2014-07-03 09:51:18 -0600 | [diff] [blame] | 518 | extern struct iommu_group *iommu_group_get_for_dev(struct device *dev); |
Joerg Roedel | 6827ca8 | 2015-05-28 18:41:35 +0200 | [diff] [blame] | 519 | extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *); |
Ohad Ben-Cohen | 4f3f8d9 | 2011-09-13 15:25:23 -0400 | [diff] [blame] | 520 | |
Joerg Roedel | 0cd76dd | 2012-01-26 19:40:52 +0100 | [diff] [blame] | 521 | extern int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr, |
| 522 | void *data); |
| 523 | extern int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr, |
| 524 | void *data); |
Ohad Ben-Cohen | 4f3f8d9 | 2011-09-13 15:25:23 -0400 | [diff] [blame] | 525 | |
Joerg Roedel | d7787d5 | 2013-01-29 14:26:20 +0100 | [diff] [blame] | 526 | /* Window handling function prototypes */ |
| 527 | extern int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr, |
Varun Sethi | 80f97f0 | 2013-03-29 01:24:00 +0530 | [diff] [blame] | 528 | phys_addr_t offset, u64 size, |
| 529 | int prot); |
Joerg Roedel | d7787d5 | 2013-01-29 14:26:20 +0100 | [diff] [blame] | 530 | extern void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr); |
Ohad Ben-Cohen | 4f3f8d9 | 2011-09-13 15:25:23 -0400 | [diff] [blame] | 531 | |
Joerg Roedel | 207c6e3 | 2017-04-26 15:39:28 +0200 | [diff] [blame] | 532 | extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev, |
| 533 | unsigned long iova, int flags); |
Joerg Roedel | 4a77a6c | 2008-11-26 17:02:33 +0100 | [diff] [blame] | 534 | |
Joerg Roedel | add02cfd | 2017-08-23 15:50:04 +0200 | [diff] [blame] | 535 | static inline void iommu_flush_tlb_all(struct iommu_domain *domain) |
| 536 | { |
| 537 | if (domain->ops->flush_iotlb_all) |
| 538 | domain->ops->flush_iotlb_all(domain); |
| 539 | } |
| 540 | |
Will Deacon | a7d20dc | 2019-07-02 16:43:48 +0100 | [diff] [blame] | 541 | static inline void iommu_tlb_sync(struct iommu_domain *domain, |
| 542 | struct iommu_iotlb_gather *iotlb_gather) |
Joerg Roedel | add02cfd | 2017-08-23 15:50:04 +0200 | [diff] [blame] | 543 | { |
| 544 | if (domain->ops->iotlb_sync) |
Will Deacon | 56f8af5 | 2019-07-02 16:44:06 +0100 | [diff] [blame] | 545 | domain->ops->iotlb_sync(domain, iotlb_gather); |
Will Deacon | a7d20dc | 2019-07-02 16:43:48 +0100 | [diff] [blame] | 546 | |
| 547 | iommu_iotlb_gather_init(iotlb_gather); |
Joerg Roedel | add02cfd | 2017-08-23 15:50:04 +0200 | [diff] [blame] | 548 | } |
| 549 | |
Will Deacon | 4fcf854 | 2019-07-02 16:43:57 +0100 | [diff] [blame] | 550 | static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain, |
| 551 | struct iommu_iotlb_gather *gather, |
| 552 | unsigned long iova, size_t size) |
| 553 | { |
| 554 | unsigned long start = iova, end = start + size; |
| 555 | |
| 556 | /* |
| 557 | * If the new page is disjoint from the current range or is mapped at |
| 558 | * a different granularity, then sync the TLB so that the gather |
| 559 | * structure can be rewritten. |
| 560 | */ |
| 561 | if (gather->pgsize != size || |
| 562 | end < gather->start || start > gather->end) { |
| 563 | if (gather->pgsize) |
| 564 | iommu_tlb_sync(domain, gather); |
| 565 | gather->pgsize = size; |
| 566 | } |
| 567 | |
| 568 | if (gather->end < end) |
| 569 | gather->end = end; |
| 570 | |
| 571 | if (gather->start > start) |
| 572 | gather->start = start; |
| 573 | } |
| 574 | |
Joerg Roedel | 5e62292 | 2015-10-21 23:51:37 +0200 | [diff] [blame] | 575 | /* PCI device grouping function */ |
| 576 | extern struct iommu_group *pci_device_group(struct device *dev); |
Joerg Roedel | 6eab556 | 2015-10-21 23:51:38 +0200 | [diff] [blame] | 577 | /* Generic device grouping function */ |
| 578 | extern struct iommu_group *generic_device_group(struct device *dev); |
Nipun Gupta | eab03e2 | 2018-09-10 19:19:18 +0530 | [diff] [blame] | 579 | /* FSL-MC device grouping function */ |
| 580 | struct iommu_group *fsl_mc_device_group(struct device *dev); |
Joerg Roedel | 5e62292 | 2015-10-21 23:51:37 +0200 | [diff] [blame] | 581 | |
Robin Murphy | 57f98d2 | 2016-09-13 10:54:14 +0100 | [diff] [blame] | 582 | /** |
| 583 | * struct iommu_fwspec - per-device IOMMU instance data |
| 584 | * @ops: ops for this device's IOMMU |
| 585 | * @iommu_fwnode: firmware handle for this device's IOMMU |
| 586 | * @iommu_priv: IOMMU driver private data for this device |
Jean-Philippe Brucker | 8953582 | 2020-01-15 13:52:29 +0100 | [diff] [blame] | 587 | * @num_pasid_bits: number of PASID bits supported by this device |
Robin Murphy | 57f98d2 | 2016-09-13 10:54:14 +0100 | [diff] [blame] | 588 | * @num_ids: number of associated device IDs |
| 589 | * @ids: IDs which this device may present to the IOMMU |
| 590 | */ |
| 591 | struct iommu_fwspec { |
| 592 | const struct iommu_ops *ops; |
| 593 | struct fwnode_handle *iommu_fwnode; |
Jean-Philippe Brucker | 5702ee2 | 2019-04-17 19:24:42 +0100 | [diff] [blame] | 594 | u32 flags; |
Jean-Philippe Brucker | 8953582 | 2020-01-15 13:52:29 +0100 | [diff] [blame] | 595 | u32 num_pasid_bits; |
Robin Murphy | 57f98d2 | 2016-09-13 10:54:14 +0100 | [diff] [blame] | 596 | unsigned int num_ids; |
Robin Murphy | 098accf | 2020-02-13 14:00:21 +0000 | [diff] [blame] | 597 | u32 ids[]; |
Robin Murphy | 57f98d2 | 2016-09-13 10:54:14 +0100 | [diff] [blame] | 598 | }; |
| 599 | |
Jean-Philippe Brucker | 5702ee2 | 2019-04-17 19:24:42 +0100 | [diff] [blame] | 600 | /* ATS is supported */ |
| 601 | #define IOMMU_FWSPEC_PCI_RC_ATS (1 << 0) |
| 602 | |
Jean-Philippe Brucker | 26b25a2 | 2019-04-10 16:15:16 +0100 | [diff] [blame] | 603 | /** |
| 604 | * struct iommu_sva - handle to a device-mm bond |
| 605 | */ |
| 606 | struct iommu_sva { |
| 607 | struct device *dev; |
| 608 | const struct iommu_sva_ops *ops; |
| 609 | }; |
| 610 | |
Robin Murphy | 57f98d2 | 2016-09-13 10:54:14 +0100 | [diff] [blame] | 611 | int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, |
| 612 | const struct iommu_ops *ops); |
| 613 | void iommu_fwspec_free(struct device *dev); |
| 614 | int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids); |
Joerg Roedel | 534766d | 2017-01-31 16:58:42 +0100 | [diff] [blame] | 615 | const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode); |
Robin Murphy | 57f98d2 | 2016-09-13 10:54:14 +0100 | [diff] [blame] | 616 | |
Joerg Roedel | b4ef725 | 2018-11-28 13:35:24 +0100 | [diff] [blame] | 617 | static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev) |
| 618 | { |
Joerg Roedel | 72acd9d | 2020-03-26 16:08:31 +0100 | [diff] [blame] | 619 | if (dev->iommu) |
| 620 | return dev->iommu->fwspec; |
| 621 | else |
| 622 | return NULL; |
Joerg Roedel | b4ef725 | 2018-11-28 13:35:24 +0100 | [diff] [blame] | 623 | } |
| 624 | |
| 625 | static inline void dev_iommu_fwspec_set(struct device *dev, |
| 626 | struct iommu_fwspec *fwspec) |
| 627 | { |
Joerg Roedel | 72acd9d | 2020-03-26 16:08:31 +0100 | [diff] [blame] | 628 | dev->iommu->fwspec = fwspec; |
Joerg Roedel | b4ef725 | 2018-11-28 13:35:24 +0100 | [diff] [blame] | 629 | } |
| 630 | |
Joerg Roedel | f9867f4 | 2020-03-26 16:08:33 +0100 | [diff] [blame] | 631 | static inline void *dev_iommu_priv_get(struct device *dev) |
| 632 | { |
Joerg Roedel | 986d5ec | 2020-03-26 16:08:41 +0100 | [diff] [blame] | 633 | return dev->iommu->priv; |
Joerg Roedel | f9867f4 | 2020-03-26 16:08:33 +0100 | [diff] [blame] | 634 | } |
| 635 | |
| 636 | static inline void dev_iommu_priv_set(struct device *dev, void *priv) |
| 637 | { |
Joerg Roedel | 986d5ec | 2020-03-26 16:08:41 +0100 | [diff] [blame] | 638 | dev->iommu->priv = priv; |
Joerg Roedel | f9867f4 | 2020-03-26 16:08:33 +0100 | [diff] [blame] | 639 | } |
| 640 | |
Joerg Roedel | cc5aed4 | 2018-11-30 10:31:59 +0100 | [diff] [blame] | 641 | int iommu_probe_device(struct device *dev); |
| 642 | void iommu_release_device(struct device *dev); |
| 643 | |
Lu Baolu | a3a1959 | 2019-03-25 09:30:28 +0800 | [diff] [blame] | 644 | bool iommu_dev_has_feature(struct device *dev, enum iommu_dev_features f); |
| 645 | int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features f); |
| 646 | int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features f); |
| 647 | bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features f); |
| 648 | int iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev); |
| 649 | void iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev); |
| 650 | int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev); |
| 651 | |
Jean-Philippe Brucker | 26b25a2 | 2019-04-10 16:15:16 +0100 | [diff] [blame] | 652 | struct iommu_sva *iommu_sva_bind_device(struct device *dev, |
| 653 | struct mm_struct *mm, |
| 654 | void *drvdata); |
| 655 | void iommu_sva_unbind_device(struct iommu_sva *handle); |
| 656 | int iommu_sva_set_ops(struct iommu_sva *handle, |
| 657 | const struct iommu_sva_ops *ops); |
| 658 | int iommu_sva_get_pasid(struct iommu_sva *handle); |
| 659 | |
Joerg Roedel | 4a77a6c | 2008-11-26 17:02:33 +0100 | [diff] [blame] | 660 | #else /* CONFIG_IOMMU_API */ |
| 661 | |
Joerg Roedel | 39d4ebb | 2011-09-06 16:48:40 +0200 | [diff] [blame] | 662 | struct iommu_ops {}; |
Alex Williamson | d72e31c | 2012-05-30 14:18:53 -0600 | [diff] [blame] | 663 | struct iommu_group {}; |
Robin Murphy | 57f98d2 | 2016-09-13 10:54:14 +0100 | [diff] [blame] | 664 | struct iommu_fwspec {}; |
Joerg Roedel | b0119e8 | 2017-02-01 13:23:08 +0100 | [diff] [blame] | 665 | struct iommu_device {}; |
Jacob Pan | 4e32348b | 2019-06-03 15:57:47 +0100 | [diff] [blame] | 666 | struct iommu_fault_param {}; |
Will Deacon | a7d20dc | 2019-07-02 16:43:48 +0100 | [diff] [blame] | 667 | struct iommu_iotlb_gather {}; |
Joerg Roedel | 4a77a6c | 2008-11-26 17:02:33 +0100 | [diff] [blame] | 668 | |
Joerg Roedel | a1b60c1 | 2011-09-06 18:46:34 +0200 | [diff] [blame] | 669 | static inline bool iommu_present(struct bus_type *bus) |
Joerg Roedel | 4a77a6c | 2008-11-26 17:02:33 +0100 | [diff] [blame] | 670 | { |
| 671 | return false; |
| 672 | } |
| 673 | |
Joerg Roedel | 3c0e0ca | 2014-09-03 18:47:25 +0200 | [diff] [blame] | 674 | static inline bool iommu_capable(struct bus_type *bus, enum iommu_cap cap) |
| 675 | { |
| 676 | return false; |
| 677 | } |
| 678 | |
Joerg Roedel | 905d66c | 2011-09-06 16:03:26 +0200 | [diff] [blame] | 679 | static inline struct iommu_domain *iommu_domain_alloc(struct bus_type *bus) |
Joerg Roedel | 4a77a6c | 2008-11-26 17:02:33 +0100 | [diff] [blame] | 680 | { |
| 681 | return NULL; |
| 682 | } |
| 683 | |
Alexey Kardashevskiy | b62dfd2 | 2013-11-21 17:41:14 +1100 | [diff] [blame] | 684 | static inline struct iommu_group *iommu_group_get_by_id(int id) |
| 685 | { |
| 686 | return NULL; |
| 687 | } |
| 688 | |
Joerg Roedel | 4a77a6c | 2008-11-26 17:02:33 +0100 | [diff] [blame] | 689 | static inline void iommu_domain_free(struct iommu_domain *domain) |
| 690 | { |
| 691 | } |
| 692 | |
| 693 | static inline int iommu_attach_device(struct iommu_domain *domain, |
| 694 | struct device *dev) |
| 695 | { |
| 696 | return -ENODEV; |
| 697 | } |
| 698 | |
| 699 | static inline void iommu_detach_device(struct iommu_domain *domain, |
| 700 | struct device *dev) |
| 701 | { |
| 702 | } |
| 703 | |
Joerg Roedel | 2c1296d | 2015-05-28 18:41:32 +0200 | [diff] [blame] | 704 | static inline struct iommu_domain *iommu_get_domain_for_dev(struct device *dev) |
| 705 | { |
| 706 | return NULL; |
| 707 | } |
| 708 | |
Joerg Roedel | cefc53c | 2010-01-08 13:35:09 +0100 | [diff] [blame] | 709 | static inline int iommu_map(struct iommu_domain *domain, unsigned long iova, |
Dmitry Osipenko | ebae3e8 | 2017-07-05 20:27:53 +0300 | [diff] [blame] | 710 | phys_addr_t paddr, size_t size, int prot) |
Joerg Roedel | cefc53c | 2010-01-08 13:35:09 +0100 | [diff] [blame] | 711 | { |
| 712 | return -ENODEV; |
| 713 | } |
| 714 | |
Tom Murphy | 781ca2d | 2019-09-08 09:56:38 -0700 | [diff] [blame] | 715 | static inline int iommu_map_atomic(struct iommu_domain *domain, |
| 716 | unsigned long iova, phys_addr_t paddr, |
| 717 | size_t size, int prot) |
| 718 | { |
| 719 | return -ENODEV; |
| 720 | } |
| 721 | |
Suravee Suthikulpanit | c5611a8 | 2018-02-05 05:45:53 -0500 | [diff] [blame] | 722 | static inline size_t iommu_unmap(struct iommu_domain *domain, |
| 723 | unsigned long iova, size_t size) |
Joerg Roedel | cefc53c | 2010-01-08 13:35:09 +0100 | [diff] [blame] | 724 | { |
Suravee Suthikulpanit | c5611a8 | 2018-02-05 05:45:53 -0500 | [diff] [blame] | 725 | return 0; |
Joerg Roedel | cefc53c | 2010-01-08 13:35:09 +0100 | [diff] [blame] | 726 | } |
| 727 | |
Suravee Suthikulpanit | c5611a8 | 2018-02-05 05:45:53 -0500 | [diff] [blame] | 728 | static inline size_t iommu_unmap_fast(struct iommu_domain *domain, |
Will Deacon | a7d20dc | 2019-07-02 16:43:48 +0100 | [diff] [blame] | 729 | unsigned long iova, int gfp_order, |
| 730 | struct iommu_iotlb_gather *iotlb_gather) |
Joerg Roedel | 4a77a6c | 2008-11-26 17:02:33 +0100 | [diff] [blame] | 731 | { |
Suravee Suthikulpanit | c5611a8 | 2018-02-05 05:45:53 -0500 | [diff] [blame] | 732 | return 0; |
Joerg Roedel | 4a77a6c | 2008-11-26 17:02:33 +0100 | [diff] [blame] | 733 | } |
| 734 | |
Olav Haugan | 315786e | 2014-10-25 09:55:16 -0700 | [diff] [blame] | 735 | static inline size_t iommu_map_sg(struct iommu_domain *domain, |
| 736 | unsigned long iova, struct scatterlist *sg, |
| 737 | unsigned int nents, int prot) |
| 738 | { |
Suravee Suthikulpanit | c5611a8 | 2018-02-05 05:45:53 -0500 | [diff] [blame] | 739 | return 0; |
Olav Haugan | 315786e | 2014-10-25 09:55:16 -0700 | [diff] [blame] | 740 | } |
| 741 | |
Tom Murphy | 781ca2d | 2019-09-08 09:56:38 -0700 | [diff] [blame] | 742 | static inline size_t iommu_map_sg_atomic(struct iommu_domain *domain, |
| 743 | unsigned long iova, struct scatterlist *sg, |
| 744 | unsigned int nents, int prot) |
| 745 | { |
| 746 | return 0; |
| 747 | } |
| 748 | |
Joerg Roedel | add02cfd | 2017-08-23 15:50:04 +0200 | [diff] [blame] | 749 | static inline void iommu_flush_tlb_all(struct iommu_domain *domain) |
| 750 | { |
| 751 | } |
| 752 | |
Will Deacon | a7d20dc | 2019-07-02 16:43:48 +0100 | [diff] [blame] | 753 | static inline void iommu_tlb_sync(struct iommu_domain *domain, |
| 754 | struct iommu_iotlb_gather *iotlb_gather) |
Joerg Roedel | add02cfd | 2017-08-23 15:50:04 +0200 | [diff] [blame] | 755 | { |
| 756 | } |
| 757 | |
Joerg Roedel | d7787d5 | 2013-01-29 14:26:20 +0100 | [diff] [blame] | 758 | static inline int iommu_domain_window_enable(struct iommu_domain *domain, |
| 759 | u32 wnd_nr, phys_addr_t paddr, |
Varun Sethi | 80f97f0 | 2013-03-29 01:24:00 +0530 | [diff] [blame] | 760 | u64 size, int prot) |
Joerg Roedel | d7787d5 | 2013-01-29 14:26:20 +0100 | [diff] [blame] | 761 | { |
| 762 | return -ENODEV; |
| 763 | } |
| 764 | |
| 765 | static inline void iommu_domain_window_disable(struct iommu_domain *domain, |
| 766 | u32 wnd_nr) |
| 767 | { |
| 768 | } |
| 769 | |
Varun Sethi | bb5547a | 2013-03-29 01:23:58 +0530 | [diff] [blame] | 770 | static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) |
Joerg Roedel | 4a77a6c | 2008-11-26 17:02:33 +0100 | [diff] [blame] | 771 | { |
| 772 | return 0; |
| 773 | } |
| 774 | |
Ohad Ben-Cohen | 4f3f8d9 | 2011-09-13 15:25:23 -0400 | [diff] [blame] | 775 | static inline void iommu_set_fault_handler(struct iommu_domain *domain, |
Ohad Ben-Cohen | 77ca233 | 2012-05-21 20:20:05 +0300 | [diff] [blame] | 776 | iommu_fault_handler_t handler, void *token) |
Ohad Ben-Cohen | 4f3f8d9 | 2011-09-13 15:25:23 -0400 | [diff] [blame] | 777 | { |
| 778 | } |
| 779 | |
Eric Auger | e5b5234 | 2017-01-19 20:57:47 +0000 | [diff] [blame] | 780 | static inline void iommu_get_resv_regions(struct device *dev, |
Joerg Roedel | a1015c2 | 2015-05-28 18:41:33 +0200 | [diff] [blame] | 781 | struct list_head *list) |
| 782 | { |
| 783 | } |
| 784 | |
Eric Auger | e5b5234 | 2017-01-19 20:57:47 +0000 | [diff] [blame] | 785 | static inline void iommu_put_resv_regions(struct device *dev, |
Joerg Roedel | a1015c2 | 2015-05-28 18:41:33 +0200 | [diff] [blame] | 786 | struct list_head *list) |
| 787 | { |
| 788 | } |
| 789 | |
Eric Auger | 6c65fb3 | 2017-01-19 20:57:51 +0000 | [diff] [blame] | 790 | static inline int iommu_get_group_resv_regions(struct iommu_group *group, |
| 791 | struct list_head *head) |
| 792 | { |
| 793 | return -ENODEV; |
| 794 | } |
| 795 | |
Joerg Roedel | d290f1e | 2015-05-28 18:41:36 +0200 | [diff] [blame] | 796 | static inline int iommu_request_dm_for_dev(struct device *dev) |
| 797 | { |
| 798 | return -ENODEV; |
| 799 | } |
| 800 | |
Lu Baolu | 7423e01 | 2019-05-25 13:41:22 +0800 | [diff] [blame] | 801 | static inline int iommu_request_dma_domain_for_dev(struct device *dev) |
| 802 | { |
| 803 | return -ENODEV; |
| 804 | } |
| 805 | |
Joerg Roedel | 8a69961 | 2019-08-19 15:22:47 +0200 | [diff] [blame] | 806 | static inline void iommu_set_default_passthrough(bool cmd_line) |
| 807 | { |
| 808 | } |
| 809 | |
| 810 | static inline void iommu_set_default_translated(bool cmd_line) |
| 811 | { |
| 812 | } |
| 813 | |
| 814 | static inline bool iommu_default_passthrough(void) |
| 815 | { |
| 816 | return true; |
| 817 | } |
| 818 | |
Alex Williamson | bef83de | 2012-09-24 21:23:25 -0600 | [diff] [blame] | 819 | static inline int iommu_attach_group(struct iommu_domain *domain, |
| 820 | struct iommu_group *group) |
Alex Williamson | 1460432 | 2011-10-21 15:56:05 -0400 | [diff] [blame] | 821 | { |
| 822 | return -ENODEV; |
| 823 | } |
| 824 | |
Alex Williamson | bef83de | 2012-09-24 21:23:25 -0600 | [diff] [blame] | 825 | static inline void iommu_detach_group(struct iommu_domain *domain, |
| 826 | struct iommu_group *group) |
Alex Williamson | d72e31c | 2012-05-30 14:18:53 -0600 | [diff] [blame] | 827 | { |
| 828 | } |
| 829 | |
Alex Williamson | bef83de | 2012-09-24 21:23:25 -0600 | [diff] [blame] | 830 | static inline struct iommu_group *iommu_group_alloc(void) |
Alex Williamson | d72e31c | 2012-05-30 14:18:53 -0600 | [diff] [blame] | 831 | { |
| 832 | return ERR_PTR(-ENODEV); |
| 833 | } |
| 834 | |
Alex Williamson | bef83de | 2012-09-24 21:23:25 -0600 | [diff] [blame] | 835 | static inline void *iommu_group_get_iommudata(struct iommu_group *group) |
Alex Williamson | d72e31c | 2012-05-30 14:18:53 -0600 | [diff] [blame] | 836 | { |
| 837 | return NULL; |
| 838 | } |
| 839 | |
Alex Williamson | bef83de | 2012-09-24 21:23:25 -0600 | [diff] [blame] | 840 | static inline void iommu_group_set_iommudata(struct iommu_group *group, |
| 841 | void *iommu_data, |
| 842 | void (*release)(void *iommu_data)) |
Alex Williamson | d72e31c | 2012-05-30 14:18:53 -0600 | [diff] [blame] | 843 | { |
| 844 | } |
| 845 | |
Alex Williamson | bef83de | 2012-09-24 21:23:25 -0600 | [diff] [blame] | 846 | static inline int iommu_group_set_name(struct iommu_group *group, |
| 847 | const char *name) |
Alex Williamson | d72e31c | 2012-05-30 14:18:53 -0600 | [diff] [blame] | 848 | { |
| 849 | return -ENODEV; |
| 850 | } |
| 851 | |
Alex Williamson | bef83de | 2012-09-24 21:23:25 -0600 | [diff] [blame] | 852 | static inline int iommu_group_add_device(struct iommu_group *group, |
| 853 | struct device *dev) |
Alex Williamson | d72e31c | 2012-05-30 14:18:53 -0600 | [diff] [blame] | 854 | { |
| 855 | return -ENODEV; |
| 856 | } |
| 857 | |
Alex Williamson | bef83de | 2012-09-24 21:23:25 -0600 | [diff] [blame] | 858 | static inline void iommu_group_remove_device(struct device *dev) |
Alex Williamson | d72e31c | 2012-05-30 14:18:53 -0600 | [diff] [blame] | 859 | { |
| 860 | } |
| 861 | |
Alex Williamson | bef83de | 2012-09-24 21:23:25 -0600 | [diff] [blame] | 862 | static inline int iommu_group_for_each_dev(struct iommu_group *group, |
| 863 | void *data, |
| 864 | int (*fn)(struct device *, void *)) |
Alex Williamson | d72e31c | 2012-05-30 14:18:53 -0600 | [diff] [blame] | 865 | { |
| 866 | return -ENODEV; |
| 867 | } |
| 868 | |
Alex Williamson | bef83de | 2012-09-24 21:23:25 -0600 | [diff] [blame] | 869 | static inline struct iommu_group *iommu_group_get(struct device *dev) |
Alex Williamson | d72e31c | 2012-05-30 14:18:53 -0600 | [diff] [blame] | 870 | { |
| 871 | return NULL; |
| 872 | } |
| 873 | |
Alex Williamson | bef83de | 2012-09-24 21:23:25 -0600 | [diff] [blame] | 874 | static inline void iommu_group_put(struct iommu_group *group) |
Alex Williamson | d72e31c | 2012-05-30 14:18:53 -0600 | [diff] [blame] | 875 | { |
| 876 | } |
| 877 | |
Alex Williamson | bef83de | 2012-09-24 21:23:25 -0600 | [diff] [blame] | 878 | static inline int iommu_group_register_notifier(struct iommu_group *group, |
| 879 | struct notifier_block *nb) |
Alex Williamson | d72e31c | 2012-05-30 14:18:53 -0600 | [diff] [blame] | 880 | { |
| 881 | return -ENODEV; |
| 882 | } |
| 883 | |
Alex Williamson | bef83de | 2012-09-24 21:23:25 -0600 | [diff] [blame] | 884 | static inline int iommu_group_unregister_notifier(struct iommu_group *group, |
| 885 | struct notifier_block *nb) |
Alex Williamson | d72e31c | 2012-05-30 14:18:53 -0600 | [diff] [blame] | 886 | { |
| 887 | return 0; |
| 888 | } |
| 889 | |
Jacob Pan | 0c830e6 | 2019-06-03 15:57:48 +0100 | [diff] [blame] | 890 | static inline |
| 891 | int iommu_register_device_fault_handler(struct device *dev, |
| 892 | iommu_dev_fault_handler_t handler, |
| 893 | void *data) |
| 894 | { |
| 895 | return -ENODEV; |
| 896 | } |
| 897 | |
| 898 | static inline int iommu_unregister_device_fault_handler(struct device *dev) |
| 899 | { |
| 900 | return 0; |
| 901 | } |
| 902 | |
| 903 | static inline |
| 904 | int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt) |
| 905 | { |
| 906 | return -ENODEV; |
| 907 | } |
| 908 | |
Jean-Philippe Brucker | bf3255b | 2019-06-03 15:57:49 +0100 | [diff] [blame] | 909 | static inline int iommu_page_response(struct device *dev, |
| 910 | struct iommu_page_response *msg) |
| 911 | { |
| 912 | return -ENODEV; |
| 913 | } |
| 914 | |
Alex Williamson | bef83de | 2012-09-24 21:23:25 -0600 | [diff] [blame] | 915 | static inline int iommu_group_id(struct iommu_group *group) |
Alex Williamson | d72e31c | 2012-05-30 14:18:53 -0600 | [diff] [blame] | 916 | { |
| 917 | return -ENODEV; |
| 918 | } |
Joerg Roedel | 4a77a6c | 2008-11-26 17:02:33 +0100 | [diff] [blame] | 919 | |
Joerg Roedel | 0cd76dd | 2012-01-26 19:40:52 +0100 | [diff] [blame] | 920 | static inline int iommu_domain_get_attr(struct iommu_domain *domain, |
| 921 | enum iommu_attr attr, void *data) |
| 922 | { |
| 923 | return -EINVAL; |
| 924 | } |
| 925 | |
| 926 | static inline int iommu_domain_set_attr(struct iommu_domain *domain, |
| 927 | enum iommu_attr attr, void *data) |
| 928 | { |
| 929 | return -EINVAL; |
| 930 | } |
| 931 | |
Joerg Roedel | b0119e8 | 2017-02-01 13:23:08 +0100 | [diff] [blame] | 932 | static inline int iommu_device_register(struct iommu_device *iommu) |
Alex Williamson | c61959e | 2014-06-12 16:12:24 -0600 | [diff] [blame] | 933 | { |
Joerg Roedel | b0119e8 | 2017-02-01 13:23:08 +0100 | [diff] [blame] | 934 | return -ENODEV; |
Alex Williamson | c61959e | 2014-06-12 16:12:24 -0600 | [diff] [blame] | 935 | } |
| 936 | |
Joerg Roedel | b0119e8 | 2017-02-01 13:23:08 +0100 | [diff] [blame] | 937 | static inline void iommu_device_set_ops(struct iommu_device *iommu, |
| 938 | const struct iommu_ops *ops) |
| 939 | { |
| 940 | } |
| 941 | |
Joerg Roedel | c73e1ac | 2017-02-07 18:18:46 +0100 | [diff] [blame] | 942 | static inline void iommu_device_set_fwnode(struct iommu_device *iommu, |
| 943 | struct fwnode_handle *fwnode) |
| 944 | { |
| 945 | } |
| 946 | |
Joerg Roedel | 2926a2aa | 2017-08-14 17:19:26 +0200 | [diff] [blame] | 947 | static inline struct iommu_device *dev_to_iommu_device(struct device *dev) |
| 948 | { |
| 949 | return NULL; |
| 950 | } |
| 951 | |
Will Deacon | a7d20dc | 2019-07-02 16:43:48 +0100 | [diff] [blame] | 952 | static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather) |
| 953 | { |
| 954 | } |
| 955 | |
Will Deacon | 4fcf854 | 2019-07-02 16:43:57 +0100 | [diff] [blame] | 956 | static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain, |
| 957 | struct iommu_iotlb_gather *gather, |
| 958 | unsigned long iova, size_t size) |
| 959 | { |
| 960 | } |
| 961 | |
Joerg Roedel | b0119e8 | 2017-02-01 13:23:08 +0100 | [diff] [blame] | 962 | static inline void iommu_device_unregister(struct iommu_device *iommu) |
| 963 | { |
| 964 | } |
| 965 | |
Joerg Roedel | 39ab955 | 2017-02-01 16:56:46 +0100 | [diff] [blame] | 966 | static inline int iommu_device_sysfs_add(struct iommu_device *iommu, |
| 967 | struct device *parent, |
| 968 | const struct attribute_group **groups, |
| 969 | const char *fmt, ...) |
| 970 | { |
| 971 | return -ENODEV; |
| 972 | } |
| 973 | |
| 974 | static inline void iommu_device_sysfs_remove(struct iommu_device *iommu) |
Alex Williamson | c61959e | 2014-06-12 16:12:24 -0600 | [diff] [blame] | 975 | { |
| 976 | } |
| 977 | |
Alex Williamson | e09f8ea | 2014-07-07 14:31:36 -0600 | [diff] [blame] | 978 | static inline int iommu_device_link(struct device *dev, struct device *link) |
Alex Williamson | c61959e | 2014-06-12 16:12:24 -0600 | [diff] [blame] | 979 | { |
| 980 | return -EINVAL; |
| 981 | } |
| 982 | |
Alex Williamson | e09f8ea | 2014-07-07 14:31:36 -0600 | [diff] [blame] | 983 | static inline void iommu_device_unlink(struct device *dev, struct device *link) |
Alex Williamson | c61959e | 2014-06-12 16:12:24 -0600 | [diff] [blame] | 984 | { |
| 985 | } |
| 986 | |
Robin Murphy | 57f98d2 | 2016-09-13 10:54:14 +0100 | [diff] [blame] | 987 | static inline int iommu_fwspec_init(struct device *dev, |
| 988 | struct fwnode_handle *iommu_fwnode, |
| 989 | const struct iommu_ops *ops) |
| 990 | { |
| 991 | return -ENODEV; |
| 992 | } |
| 993 | |
| 994 | static inline void iommu_fwspec_free(struct device *dev) |
| 995 | { |
| 996 | } |
| 997 | |
| 998 | static inline int iommu_fwspec_add_ids(struct device *dev, u32 *ids, |
| 999 | int num_ids) |
| 1000 | { |
| 1001 | return -ENODEV; |
| 1002 | } |
| 1003 | |
Lorenzo Pieralisi | e4f10ff | 2016-11-21 10:01:36 +0000 | [diff] [blame] | 1004 | static inline |
Joerg Roedel | 534766d | 2017-01-31 16:58:42 +0100 | [diff] [blame] | 1005 | const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode) |
Lorenzo Pieralisi | e4f10ff | 2016-11-21 10:01:36 +0000 | [diff] [blame] | 1006 | { |
| 1007 | return NULL; |
| 1008 | } |
| 1009 | |
Lu Baolu | a3a1959 | 2019-03-25 09:30:28 +0800 | [diff] [blame] | 1010 | static inline bool |
| 1011 | iommu_dev_has_feature(struct device *dev, enum iommu_dev_features feat) |
| 1012 | { |
| 1013 | return false; |
| 1014 | } |
| 1015 | |
| 1016 | static inline bool |
| 1017 | iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat) |
| 1018 | { |
| 1019 | return false; |
| 1020 | } |
| 1021 | |
| 1022 | static inline int |
| 1023 | iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat) |
| 1024 | { |
| 1025 | return -ENODEV; |
| 1026 | } |
| 1027 | |
| 1028 | static inline int |
| 1029 | iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat) |
| 1030 | { |
| 1031 | return -ENODEV; |
| 1032 | } |
| 1033 | |
| 1034 | static inline int |
| 1035 | iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev) |
| 1036 | { |
| 1037 | return -ENODEV; |
| 1038 | } |
| 1039 | |
| 1040 | static inline void |
| 1041 | iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev) |
| 1042 | { |
| 1043 | } |
| 1044 | |
| 1045 | static inline int |
| 1046 | iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev) |
| 1047 | { |
| 1048 | return -ENODEV; |
| 1049 | } |
| 1050 | |
Jean-Philippe Brucker | 26b25a2 | 2019-04-10 16:15:16 +0100 | [diff] [blame] | 1051 | static inline struct iommu_sva * |
| 1052 | iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata) |
| 1053 | { |
| 1054 | return NULL; |
| 1055 | } |
| 1056 | |
| 1057 | static inline void iommu_sva_unbind_device(struct iommu_sva *handle) |
| 1058 | { |
| 1059 | } |
| 1060 | |
| 1061 | static inline int iommu_sva_set_ops(struct iommu_sva *handle, |
| 1062 | const struct iommu_sva_ops *ops) |
| 1063 | { |
| 1064 | return -EINVAL; |
| 1065 | } |
| 1066 | |
| 1067 | static inline int iommu_sva_get_pasid(struct iommu_sva *handle) |
| 1068 | { |
| 1069 | return IOMMU_PASID_INVALID; |
| 1070 | } |
| 1071 | |
Yi L Liu | 4c7c171 | 2019-10-02 12:42:40 -0700 | [diff] [blame] | 1072 | static inline int |
| 1073 | iommu_cache_invalidate(struct iommu_domain *domain, |
| 1074 | struct device *dev, |
| 1075 | struct iommu_cache_invalidate_info *inv_info) |
| 1076 | { |
| 1077 | return -ENODEV; |
| 1078 | } |
Jacob Pan | 808be0a | 2019-10-02 12:42:43 -0700 | [diff] [blame] | 1079 | static inline int iommu_sva_bind_gpasid(struct iommu_domain *domain, |
| 1080 | struct device *dev, struct iommu_gpasid_bind_data *data) |
| 1081 | { |
| 1082 | return -ENODEV; |
| 1083 | } |
| 1084 | |
| 1085 | static inline int iommu_sva_unbind_gpasid(struct iommu_domain *domain, |
| 1086 | struct device *dev, int pasid) |
| 1087 | { |
| 1088 | return -ENODEV; |
| 1089 | } |
Yi L Liu | 4c7c171 | 2019-10-02 12:42:40 -0700 | [diff] [blame] | 1090 | |
Joerg Roedel | 0008d0c | 2020-03-26 16:08:26 +0100 | [diff] [blame] | 1091 | static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev) |
| 1092 | { |
| 1093 | return NULL; |
| 1094 | } |
Joerg Roedel | 4a77a6c | 2008-11-26 17:02:33 +0100 | [diff] [blame] | 1095 | #endif /* CONFIG_IOMMU_API */ |
| 1096 | |
Gary R Hook | bad614b | 2018-06-12 16:41:21 -0500 | [diff] [blame] | 1097 | #ifdef CONFIG_IOMMU_DEBUGFS |
| 1098 | extern struct dentry *iommu_debugfs_dir; |
| 1099 | void iommu_debugfs_setup(void); |
| 1100 | #else |
| 1101 | static inline void iommu_debugfs_setup(void) {} |
| 1102 | #endif |
| 1103 | |
Joerg Roedel | 4a77a6c | 2008-11-26 17:02:33 +0100 | [diff] [blame] | 1104 | #endif /* __LINUX_IOMMU_H */ |