blob: 7ef8b0bda69512854021d2b9c4a10852cfff29f3 [file] [log] [blame]
Thomas Gleixner45051532019-05-29 16:57:47 -07001/* SPDX-License-Identifier: GPL-2.0-only */
Joerg Roedel4a77a6c2008-11-26 17:02:33 +01002/*
3 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
4 * Author: Joerg Roedel <joerg.roedel@amd.com>
Joerg Roedel4a77a6c2008-11-26 17:02:33 +01005 */
6
7#ifndef __LINUX_IOMMU_H
8#define __LINUX_IOMMU_H
9
Joerg Roedele8245c12017-04-26 15:34:06 +020010#include <linux/scatterlist.h>
11#include <linux/device.h>
12#include <linux/types.h>
Laura Abbott74315cc2011-06-08 17:29:11 -040013#include <linux/errno.h>
Wang YanQing9a08d372013-04-19 09:38:04 +080014#include <linux/err.h>
Will Deacond0f60a42014-08-27 16:15:59 +010015#include <linux/of.h>
Jacob Pan808be0a2019-10-02 12:42:43 -070016#include <linux/ioasid.h>
Jacob Pan4e32348b2019-06-03 15:57:47 +010017#include <uapi/linux/iommu.h>
Laura Abbott74315cc2011-06-08 17:29:11 -040018
Will Deaconca13bb32013-11-05 15:59:53 +000019#define IOMMU_READ (1 << 0)
20#define IOMMU_WRITE (1 << 1)
21#define IOMMU_CACHE (1 << 2) /* DMA cache coherency */
Antonios Motakisa720b412014-10-13 14:06:16 +010022#define IOMMU_NOEXEC (1 << 3)
Robin Murphy31e68502016-04-05 12:39:30 +010023#define IOMMU_MMIO (1 << 4) /* e.g. things like MSI doorbells */
Mitchel Humpherys579b2a62017-01-06 18:58:08 +053024/*
Robin Murphyadf5e512017-01-27 12:22:54 +000025 * Where the bus hardware includes a privilege level as part of its access type
26 * markings, and certain devices are capable of issuing transactions marked as
27 * either 'supervisor' or 'user', the IOMMU_PRIV flag requests that the other
28 * given permission flags only apply to accesses at the higher privilege level,
29 * and that unprivileged transactions should have as little access as possible.
30 * This would usually imply the same permissions as kernel mappings on the CPU,
31 * if the IOMMU page table format is equivalent.
Mitchel Humpherys579b2a62017-01-06 18:58:08 +053032 */
33#define IOMMU_PRIV (1 << 5)
Vivek Gautam90ec7a72019-05-16 15:00:20 +053034/*
Will Deacondd5ddd32019-10-24 16:57:39 +010035 * Non-coherent masters can use this page protection flag to set cacheable
36 * memory attributes for only a transparent outer level of cache, also known as
37 * the last-level or system cache.
Vivek Gautam90ec7a72019-05-16 15:00:20 +053038 */
Will Deacondd5ddd32019-10-24 16:57:39 +010039#define IOMMU_SYS_CACHE_ONLY (1 << 6)
Joerg Roedel4a77a6c2008-11-26 17:02:33 +010040
Joerg Roedel905d66c2011-09-06 16:03:26 +020041struct iommu_ops;
Alex Williamsond72e31c2012-05-30 14:18:53 -060042struct iommu_group;
Joerg Roedelff217762011-08-26 16:48:26 +020043struct bus_type;
Joerg Roedel4a77a6c2008-11-26 17:02:33 +010044struct device;
Ohad Ben-Cohen4f3f8d92011-09-13 15:25:23 -040045struct iommu_domain;
Joerg Roedelba1eabfa2012-08-03 15:55:41 +020046struct notifier_block;
Jean-Philippe Brucker26b25a22019-04-10 16:15:16 +010047struct iommu_sva;
Jacob Pan4e32348b2019-06-03 15:57:47 +010048struct iommu_fault_event;
Ohad Ben-Cohen4f3f8d92011-09-13 15:25:23 -040049
50/* iommu fault flags */
51#define IOMMU_FAULT_READ 0x0
52#define IOMMU_FAULT_WRITE 0x1
53
54typedef int (*iommu_fault_handler_t)(struct iommu_domain *,
Ohad Ben-Cohen77ca2332012-05-21 20:20:05 +030055 struct device *, unsigned long, int, void *);
Jean-Philippe Brucker26b25a22019-04-10 16:15:16 +010056typedef int (*iommu_mm_exit_handler_t)(struct device *dev, struct iommu_sva *,
57 void *);
Jacob Pan4e32348b2019-06-03 15:57:47 +010058typedef int (*iommu_dev_fault_handler_t)(struct iommu_fault *, void *);
Joerg Roedel4a77a6c2008-11-26 17:02:33 +010059
Joerg Roedel0ff64f82012-01-26 19:40:53 +010060struct iommu_domain_geometry {
61 dma_addr_t aperture_start; /* First address that can be mapped */
62 dma_addr_t aperture_end; /* Last address that can be mapped */
63 bool force_aperture; /* DMA only allowed in mappable range? */
64};
65
Joerg Roedel8539c7c2015-03-26 13:43:05 +010066/* Domain feature flags */
67#define __IOMMU_DOMAIN_PAGING (1U << 0) /* Support for iommu_map/unmap */
68#define __IOMMU_DOMAIN_DMA_API (1U << 1) /* Domain for use in DMA-API
69 implementation */
70#define __IOMMU_DOMAIN_PT (1U << 2) /* Domain is identity mapped */
71
72/*
73 * This are the possible domain-types
74 *
75 * IOMMU_DOMAIN_BLOCKED - All DMA is blocked, can be used to isolate
76 * devices
77 * IOMMU_DOMAIN_IDENTITY - DMA addresses are system physical addresses
78 * IOMMU_DOMAIN_UNMANAGED - DMA mappings managed by IOMMU-API user, used
79 * for VMs
80 * IOMMU_DOMAIN_DMA - Internally used for DMA-API implementations.
81 * This flag allows IOMMU drivers to implement
82 * certain optimizations for these domains
83 */
84#define IOMMU_DOMAIN_BLOCKED (0U)
85#define IOMMU_DOMAIN_IDENTITY (__IOMMU_DOMAIN_PT)
86#define IOMMU_DOMAIN_UNMANAGED (__IOMMU_DOMAIN_PAGING)
87#define IOMMU_DOMAIN_DMA (__IOMMU_DOMAIN_PAGING | \
88 __IOMMU_DOMAIN_DMA_API)
89
Joerg Roedel4a77a6c2008-11-26 17:02:33 +010090struct iommu_domain {
Joerg Roedel8539c7c2015-03-26 13:43:05 +010091 unsigned type;
Thierry Redingb22f6432014-06-27 09:03:12 +020092 const struct iommu_ops *ops;
Robin Murphyd16e0fa2016-04-07 18:42:06 +010093 unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */
Ohad Ben-Cohen4f3f8d92011-09-13 15:25:23 -040094 iommu_fault_handler_t handler;
Ohad Ben-Cohen77ca2332012-05-21 20:20:05 +030095 void *handler_token;
Joerg Roedel0ff64f82012-01-26 19:40:53 +010096 struct iommu_domain_geometry geometry;
Robin Murphy0db2e5d2015-10-01 20:13:58 +010097 void *iova_cookie;
Joerg Roedel4a77a6c2008-11-26 17:02:33 +010098};
99
Joerg Roedel1aed0742014-09-03 18:34:04 +0200100enum iommu_cap {
101 IOMMU_CAP_CACHE_COHERENCY, /* IOMMU can enforce cache coherent DMA
102 transactions */
103 IOMMU_CAP_INTR_REMAP, /* IOMMU supports interrupt isolation */
Antonios Motakisc4986642014-10-13 14:06:17 +0100104 IOMMU_CAP_NOEXEC, /* IOMMU_NOEXEC flag */
Joerg Roedel1aed0742014-09-03 18:34:04 +0200105};
Sheng Yangdbb9fd82009-03-18 15:33:06 +0800106
Varun Sethi7cabf492013-07-15 10:20:56 +0530107/*
108 * Following constraints are specifc to FSL_PAMUV1:
109 * -aperture must be power of 2, and naturally aligned
110 * -number of windows must be power of 2, and address space size
111 * of each window is determined by aperture size / # of windows
112 * -the actual size of the mapped region of a window must be power
113 * of 2 starting with 4KB and physical address must be naturally
114 * aligned.
115 * DOMAIN_ATTR_FSL_PAMUV1 corresponds to the above mentioned contraints.
116 * The caller can invoke iommu_domain_get_attr to check if the underlying
117 * iommu implementation supports these constraints.
118 */
119
Joerg Roedel0cd76dd2012-01-26 19:40:52 +0100120enum iommu_attr {
Joerg Roedel0ff64f82012-01-26 19:40:53 +0100121 DOMAIN_ATTR_GEOMETRY,
Joerg Roedeld2e12162013-01-29 13:49:04 +0100122 DOMAIN_ATTR_PAGING,
Joerg Roedel69356712013-02-04 14:00:01 +0100123 DOMAIN_ATTR_WINDOWS,
Varun Sethi7cabf492013-07-15 10:20:56 +0530124 DOMAIN_ATTR_FSL_PAMU_STASH,
125 DOMAIN_ATTR_FSL_PAMU_ENABLE,
126 DOMAIN_ATTR_FSL_PAMUV1,
Will Deaconc02607a2014-09-29 10:05:06 -0600127 DOMAIN_ATTR_NESTING, /* two stages of translation */
Zhen Lei2da274c2018-09-20 17:10:22 +0100128 DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE,
Joerg Roedela8b8a88a2013-01-29 14:36:31 +0100129 DOMAIN_ATTR_MAX,
Joerg Roedel0cd76dd2012-01-26 19:40:52 +0100130};
131
Eric Augerd30ddca2017-01-19 20:57:48 +0000132/* These are the possible reserved region types */
Robin Murphy9d3a4de2017-03-16 17:00:16 +0000133enum iommu_resv_type {
134 /* Memory regions which must be mapped 1:1 at all times */
135 IOMMU_RESV_DIRECT,
Eric Augeradfd3732019-06-03 08:53:35 +0200136 /*
137 * Memory regions which are advertised to be 1:1 but are
138 * commonly considered relaxable in some conditions,
139 * for instance in device assignment use case (USB, Graphics)
140 */
141 IOMMU_RESV_DIRECT_RELAXABLE,
Robin Murphy9d3a4de2017-03-16 17:00:16 +0000142 /* Arbitrary "never map this or give it to a device" address ranges */
143 IOMMU_RESV_RESERVED,
144 /* Hardware MSI region (untranslated) */
145 IOMMU_RESV_MSI,
146 /* Software-managed MSI translation window */
147 IOMMU_RESV_SW_MSI,
148};
Eric Augerd30ddca2017-01-19 20:57:48 +0000149
Joerg Roedela1015c22015-05-28 18:41:33 +0200150/**
Eric Augere5b52342017-01-19 20:57:47 +0000151 * struct iommu_resv_region - descriptor for a reserved memory region
Joerg Roedela1015c22015-05-28 18:41:33 +0200152 * @list: Linked list pointers
153 * @start: System physical start address of the region
154 * @length: Length of the region in bytes
155 * @prot: IOMMU Protection flags (READ/WRITE/...)
Eric Augerd30ddca2017-01-19 20:57:48 +0000156 * @type: Type of the reserved region
Joerg Roedela1015c22015-05-28 18:41:33 +0200157 */
Eric Augere5b52342017-01-19 20:57:47 +0000158struct iommu_resv_region {
Joerg Roedela1015c22015-05-28 18:41:33 +0200159 struct list_head list;
160 phys_addr_t start;
161 size_t length;
162 int prot;
Robin Murphy9d3a4de2017-03-16 17:00:16 +0000163 enum iommu_resv_type type;
Joerg Roedela1015c22015-05-28 18:41:33 +0200164};
165
Lu Baolua3a19592019-03-25 09:30:28 +0800166/* Per device IOMMU features */
167enum iommu_dev_features {
168 IOMMU_DEV_FEAT_AUX, /* Aux-domain feature */
Jean-Philippe Brucker26b25a22019-04-10 16:15:16 +0100169 IOMMU_DEV_FEAT_SVA, /* Shared Virtual Addresses */
170};
171
172#define IOMMU_PASID_INVALID (-1U)
173
174/**
175 * struct iommu_sva_ops - device driver callbacks for an SVA context
176 *
177 * @mm_exit: called when the mm is about to be torn down by exit_mmap. After
178 * @mm_exit returns, the device must not issue any more transaction
179 * with the PASID given as argument.
180 *
181 * The @mm_exit handler is allowed to sleep. Be careful about the
182 * locks taken in @mm_exit, because they might lead to deadlocks if
183 * they are also held when dropping references to the mm. Consider the
184 * following call chain:
185 * mutex_lock(A); mmput(mm) -> exit_mm() -> @mm_exit() -> mutex_lock(A)
186 * Using mmput_async() prevents this scenario.
187 *
188 */
189struct iommu_sva_ops {
190 iommu_mm_exit_handler_t mm_exit;
Lu Baolua3a19592019-03-25 09:30:28 +0800191};
192
Joerg Roedel39d4ebb2011-09-06 16:48:40 +0200193#ifdef CONFIG_IOMMU_API
194
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200195/**
Will Deacona7d20dc2019-07-02 16:43:48 +0100196 * struct iommu_iotlb_gather - Range information for a pending IOTLB flush
197 *
198 * @start: IOVA representing the start of the range to be flushed
199 * @end: IOVA representing the end of the range to be flushed (exclusive)
200 * @pgsize: The interval at which to perform the flush
201 *
202 * This structure is intended to be updated by multiple calls to the
203 * ->unmap() function in struct iommu_ops before eventually being passed
204 * into ->iotlb_sync().
205 */
206struct iommu_iotlb_gather {
207 unsigned long start;
208 unsigned long end;
209 size_t pgsize;
210};
211
212/**
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200213 * struct iommu_ops - iommu ops and capabilities
Magnus Damm0d9bacb2016-01-19 14:28:48 +0900214 * @capable: check capability
215 * @domain_alloc: allocate iommu domain
216 * @domain_free: free iommu domain
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200217 * @attach_dev: attach device to an iommu domain
218 * @detach_dev: detach device from an iommu domain
219 * @map: map a physically contiguous memory region to an iommu domain
220 * @unmap: unmap a physically contiguous memory region from an iommu domain
Tom Murphydb04d4a2019-02-11 15:50:33 +0000221 * @flush_iotlb_all: Synchronously flush all hardware TLBs for this domain
Geert Uytterhoeven2405bc12019-02-20 14:00:52 +0100222 * @iotlb_sync_map: Sync mappings created recently using @map to the hardware
tom51eb7802018-12-04 18:27:34 +0000223 * @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush
Joerg Roedeladd02cfd2017-08-23 15:50:04 +0200224 * queue
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200225 * @iova_to_phys: translate iova to physical address
Alex Williamsond72e31c2012-05-30 14:18:53 -0600226 * @add_device: add device to iommu grouping
227 * @remove_device: remove device from iommu grouping
Magnus Damm0d9bacb2016-01-19 14:28:48 +0900228 * @device_group: find iommu group for a particular device
Joerg Roedel0cd76dd2012-01-26 19:40:52 +0100229 * @domain_get_attr: Query domain attributes
230 * @domain_set_attr: Change domain attributes
Eric Augere5b52342017-01-19 20:57:47 +0000231 * @get_resv_regions: Request list of reserved regions for a device
232 * @put_resv_regions: Free list of reserved regions for a device
233 * @apply_resv_region: Temporary helper call-back for iova reserved ranges
Magnus Damm0d9bacb2016-01-19 14:28:48 +0900234 * @domain_window_enable: Configure and enable a particular window for a domain
235 * @domain_window_disable: Disable a particular window for a domain
Will Deacond0f60a42014-08-27 16:15:59 +0100236 * @of_xlate: add OF master IDs to iommu grouping
Geert Uytterhoevena7055d52019-02-20 14:00:53 +0100237 * @is_attach_deferred: Check if domain attach should be deferred from iommu
238 * driver init to device driver init (default no)
Lu Baolua3a19592019-03-25 09:30:28 +0800239 * @dev_has/enable/disable_feat: per device entries to check/enable/disable
240 * iommu specific features.
241 * @dev_feat_enabled: check enabled feature
242 * @aux_attach/detach_dev: aux-domain specific attach/detach entries.
243 * @aux_get_pasid: get the pasid given an aux-domain
Jean-Philippe Brucker26b25a22019-04-10 16:15:16 +0100244 * @sva_bind: Bind process address space to device
245 * @sva_unbind: Unbind process address space from device
246 * @sva_get_pasid: Get PASID associated to a SVA handle
Jean-Philippe Bruckerbf3255b2019-06-03 15:57:49 +0100247 * @page_response: handle page request response
Yi L Liu4c7c1712019-10-02 12:42:40 -0700248 * @cache_invalidate: invalidate translation caches
Jacob Pan808be0a2019-10-02 12:42:43 -0700249 * @sva_bind_gpasid: bind guest pasid and mm
250 * @sva_unbind_gpasid: unbind guest pasid and mm
Will Deacon25f003d2019-12-19 12:03:41 +0000251 * @pgsize_bitmap: bitmap of all possible supported page sizes
252 * @owner: Driver module providing these ops
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200253 */
Joerg Roedel4a77a6c2008-11-26 17:02:33 +0100254struct iommu_ops {
Joerg Roedel3c0e0ca2014-09-03 18:47:25 +0200255 bool (*capable)(enum iommu_cap);
Joerg Roedel938c4702015-03-26 13:43:04 +0100256
257 /* Domain allocation and freeing by the iommu driver */
Joerg Roedel8539c7c2015-03-26 13:43:05 +0100258 struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type);
Joerg Roedel938c4702015-03-26 13:43:04 +0100259 void (*domain_free)(struct iommu_domain *);
260
Joerg Roedel4a77a6c2008-11-26 17:02:33 +0100261 int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
262 void (*detach_dev)(struct iommu_domain *domain, struct device *dev);
Joerg Roedel67651782010-01-21 16:32:27 +0100263 int (*map)(struct iommu_domain *domain, unsigned long iova,
Tom Murphy781ca2d2019-09-08 09:56:38 -0700264 phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
Ohad Ben-Cohen50090652011-11-10 11:32:25 +0200265 size_t (*unmap)(struct iommu_domain *domain, unsigned long iova,
Will Deacon56f8af52019-07-02 16:44:06 +0100266 size_t size, struct iommu_iotlb_gather *iotlb_gather);
Joerg Roedeladd02cfd2017-08-23 15:50:04 +0200267 void (*flush_iotlb_all)(struct iommu_domain *domain);
Dmitry Osipenko1d7ae532018-12-12 23:38:47 +0300268 void (*iotlb_sync_map)(struct iommu_domain *domain);
Will Deacon56f8af52019-07-02 16:44:06 +0100269 void (*iotlb_sync)(struct iommu_domain *domain,
270 struct iommu_iotlb_gather *iotlb_gather);
Varun Sethibb5547a2013-03-29 01:23:58 +0530271 phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova);
Alex Williamsond72e31c2012-05-30 14:18:53 -0600272 int (*add_device)(struct device *dev);
273 void (*remove_device)(struct device *dev);
Joerg Roedel46c6b2b2015-10-21 23:51:36 +0200274 struct iommu_group *(*device_group)(struct device *dev);
Joerg Roedel0cd76dd2012-01-26 19:40:52 +0100275 int (*domain_get_attr)(struct iommu_domain *domain,
276 enum iommu_attr attr, void *data);
277 int (*domain_set_attr)(struct iommu_domain *domain,
278 enum iommu_attr attr, void *data);
Joerg Roedeld7787d52013-01-29 14:26:20 +0100279
Eric Augere5b52342017-01-19 20:57:47 +0000280 /* Request/Free a list of reserved regions for a device */
281 void (*get_resv_regions)(struct device *dev, struct list_head *list);
282 void (*put_resv_regions)(struct device *dev, struct list_head *list);
283 void (*apply_resv_region)(struct device *dev,
284 struct iommu_domain *domain,
285 struct iommu_resv_region *region);
Joerg Roedela1015c22015-05-28 18:41:33 +0200286
Joerg Roedeld7787d52013-01-29 14:26:20 +0100287 /* Window handling functions */
288 int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr,
Varun Sethi80f97f02013-03-29 01:24:00 +0530289 phys_addr_t paddr, u64 size, int prot);
Joerg Roedeld7787d52013-01-29 14:26:20 +0100290 void (*domain_window_disable)(struct iommu_domain *domain, u32 wnd_nr);
291
Will Deacond0f60a42014-08-27 16:15:59 +0100292 int (*of_xlate)(struct device *dev, struct of_phandle_args *args);
Baoquan Hee01d1912017-08-09 16:33:40 +0800293 bool (*is_attach_deferred)(struct iommu_domain *domain, struct device *dev);
Will Deacond0f60a42014-08-27 16:15:59 +0100294
Lu Baolua3a19592019-03-25 09:30:28 +0800295 /* Per device IOMMU features */
296 bool (*dev_has_feat)(struct device *dev, enum iommu_dev_features f);
297 bool (*dev_feat_enabled)(struct device *dev, enum iommu_dev_features f);
298 int (*dev_enable_feat)(struct device *dev, enum iommu_dev_features f);
299 int (*dev_disable_feat)(struct device *dev, enum iommu_dev_features f);
300
301 /* Aux-domain specific attach/detach entries */
302 int (*aux_attach_dev)(struct iommu_domain *domain, struct device *dev);
303 void (*aux_detach_dev)(struct iommu_domain *domain, struct device *dev);
304 int (*aux_get_pasid)(struct iommu_domain *domain, struct device *dev);
305
Jean-Philippe Brucker26b25a22019-04-10 16:15:16 +0100306 struct iommu_sva *(*sva_bind)(struct device *dev, struct mm_struct *mm,
307 void *drvdata);
308 void (*sva_unbind)(struct iommu_sva *handle);
309 int (*sva_get_pasid)(struct iommu_sva *handle);
310
Jean-Philippe Bruckerbf3255b2019-06-03 15:57:49 +0100311 int (*page_response)(struct device *dev,
312 struct iommu_fault_event *evt,
313 struct iommu_page_response *msg);
Yi L Liu4c7c1712019-10-02 12:42:40 -0700314 int (*cache_invalidate)(struct iommu_domain *domain, struct device *dev,
315 struct iommu_cache_invalidate_info *inv_info);
Jacob Pan808be0a2019-10-02 12:42:43 -0700316 int (*sva_bind_gpasid)(struct iommu_domain *domain,
317 struct device *dev, struct iommu_gpasid_bind_data *data);
318
319 int (*sva_unbind_gpasid)(struct device *dev, int pasid);
Jean-Philippe Bruckerbf3255b2019-06-03 15:57:49 +0100320
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200321 unsigned long pgsize_bitmap;
Will Deacon25f003d2019-12-19 12:03:41 +0000322 struct module *owner;
Joerg Roedel4a77a6c2008-11-26 17:02:33 +0100323};
324
Joerg Roedelb0119e82017-02-01 13:23:08 +0100325/**
326 * struct iommu_device - IOMMU core representation of one IOMMU hardware
327 * instance
328 * @list: Used by the iommu-core to keep a list of registered iommus
329 * @ops: iommu-ops for talking to this iommu
Joerg Roedel39ab9552017-02-01 16:56:46 +0100330 * @dev: struct device for sysfs handling
Joerg Roedelb0119e82017-02-01 13:23:08 +0100331 */
332struct iommu_device {
333 struct list_head list;
334 const struct iommu_ops *ops;
Joerg Roedelc73e1ac2017-02-07 18:18:46 +0100335 struct fwnode_handle *fwnode;
Joerg Roedel2926a2aa2017-08-14 17:19:26 +0200336 struct device *dev;
Joerg Roedelb0119e82017-02-01 13:23:08 +0100337};
338
Jacob Pan4e32348b2019-06-03 15:57:47 +0100339/**
340 * struct iommu_fault_event - Generic fault event
341 *
342 * Can represent recoverable faults such as a page requests or
343 * unrecoverable faults such as DMA or IRQ remapping faults.
344 *
345 * @fault: fault descriptor
Jean-Philippe Bruckerbf3255b2019-06-03 15:57:49 +0100346 * @list: pending fault event list, used for tracking responses
Jacob Pan4e32348b2019-06-03 15:57:47 +0100347 */
348struct iommu_fault_event {
349 struct iommu_fault fault;
Jean-Philippe Bruckerbf3255b2019-06-03 15:57:49 +0100350 struct list_head list;
Jacob Pan4e32348b2019-06-03 15:57:47 +0100351};
352
353/**
354 * struct iommu_fault_param - per-device IOMMU fault data
355 * @handler: Callback function to handle IOMMU faults at device level
356 * @data: handler private data
Jean-Philippe Bruckerbf3255b2019-06-03 15:57:49 +0100357 * @faults: holds the pending faults which needs response
358 * @lock: protect pending faults list
Jacob Pan4e32348b2019-06-03 15:57:47 +0100359 */
360struct iommu_fault_param {
361 iommu_dev_fault_handler_t handler;
362 void *data;
Jean-Philippe Bruckerbf3255b2019-06-03 15:57:49 +0100363 struct list_head faults;
364 struct mutex lock;
Jacob Pan4e32348b2019-06-03 15:57:47 +0100365};
366
367/**
Joerg Roedel045a7042020-03-26 16:08:30 +0100368 * struct dev_iommu - Collection of per-device IOMMU data
Jacob Pan4e32348b2019-06-03 15:57:47 +0100369 *
370 * @fault_param: IOMMU detected device fault reporting data
Joerg Roedel72acd9d2020-03-26 16:08:31 +0100371 * @fwspec: IOMMU fwspec data
Joerg Roedel986d5ec2020-03-26 16:08:41 +0100372 * @priv: IOMMU Driver private data
Jacob Pan4e32348b2019-06-03 15:57:47 +0100373 *
374 * TODO: migrate other per device data pointers under iommu_dev_data, e.g.
375 * struct iommu_group *iommu_group;
Jacob Pan4e32348b2019-06-03 15:57:47 +0100376 */
Joerg Roedel045a7042020-03-26 16:08:30 +0100377struct dev_iommu {
Jacob Pan0c830e62019-06-03 15:57:48 +0100378 struct mutex lock;
Joerg Roedel72acd9d2020-03-26 16:08:31 +0100379 struct iommu_fault_param *fault_param;
380 struct iommu_fwspec *fwspec;
Joerg Roedel986d5ec2020-03-26 16:08:41 +0100381 void *priv;
Jacob Pan4e32348b2019-06-03 15:57:47 +0100382};
383
Joerg Roedelb0119e82017-02-01 13:23:08 +0100384int iommu_device_register(struct iommu_device *iommu);
385void iommu_device_unregister(struct iommu_device *iommu);
Joerg Roedel39ab9552017-02-01 16:56:46 +0100386int iommu_device_sysfs_add(struct iommu_device *iommu,
387 struct device *parent,
388 const struct attribute_group **groups,
389 const char *fmt, ...) __printf(4, 5);
390void iommu_device_sysfs_remove(struct iommu_device *iommu);
Joerg Roedele3d10af2017-02-01 17:23:22 +0100391int iommu_device_link(struct iommu_device *iommu, struct device *link);
392void iommu_device_unlink(struct iommu_device *iommu, struct device *link);
Joerg Roedelb0119e82017-02-01 13:23:08 +0100393
Will Deaconfc10cca2020-01-09 14:08:41 +0000394static inline void __iommu_device_set_ops(struct iommu_device *iommu,
395 const struct iommu_ops *ops)
Joerg Roedelb0119e82017-02-01 13:23:08 +0100396{
397 iommu->ops = ops;
398}
399
Will Deaconfc10cca2020-01-09 14:08:41 +0000400#define iommu_device_set_ops(iommu, ops) \
401do { \
402 struct iommu_ops *__ops = (struct iommu_ops *)(ops); \
403 __ops->owner = THIS_MODULE; \
404 __iommu_device_set_ops(iommu, __ops); \
405} while (0)
406
Joerg Roedelc73e1ac2017-02-07 18:18:46 +0100407static inline void iommu_device_set_fwnode(struct iommu_device *iommu,
408 struct fwnode_handle *fwnode)
409{
410 iommu->fwnode = fwnode;
411}
412
Joerg Roedel2926a2aa2017-08-14 17:19:26 +0200413static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
414{
415 return (struct iommu_device *)dev_get_drvdata(dev);
416}
417
Will Deacona7d20dc2019-07-02 16:43:48 +0100418static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather)
419{
420 *gather = (struct iommu_iotlb_gather) {
421 .start = ULONG_MAX,
422 };
423}
424
Alex Williamsond72e31c2012-05-30 14:18:53 -0600425#define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
426#define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
427#define IOMMU_GROUP_NOTIFY_BIND_DRIVER 3 /* Pre Driver bind */
428#define IOMMU_GROUP_NOTIFY_BOUND_DRIVER 4 /* Post Driver bind */
429#define IOMMU_GROUP_NOTIFY_UNBIND_DRIVER 5 /* Pre Driver unbind */
430#define IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER 6 /* Post Driver unbind */
431
Thierry Redingb22f6432014-06-27 09:03:12 +0200432extern int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops);
Joerg Roedela1b60c12011-09-06 18:46:34 +0200433extern bool iommu_present(struct bus_type *bus);
Joerg Roedel3c0e0ca2014-09-03 18:47:25 +0200434extern bool iommu_capable(struct bus_type *bus, enum iommu_cap cap);
Joerg Roedel905d66c2011-09-06 16:03:26 +0200435extern struct iommu_domain *iommu_domain_alloc(struct bus_type *bus);
Alexey Kardashevskiyaa16bea2013-03-25 10:23:49 +1100436extern struct iommu_group *iommu_group_get_by_id(int id);
Joerg Roedel4a77a6c2008-11-26 17:02:33 +0100437extern void iommu_domain_free(struct iommu_domain *domain);
438extern int iommu_attach_device(struct iommu_domain *domain,
439 struct device *dev);
440extern void iommu_detach_device(struct iommu_domain *domain,
441 struct device *dev);
Yi L Liu4c7c1712019-10-02 12:42:40 -0700442extern int iommu_cache_invalidate(struct iommu_domain *domain,
443 struct device *dev,
444 struct iommu_cache_invalidate_info *inv_info);
Jacob Pan808be0a2019-10-02 12:42:43 -0700445extern int iommu_sva_bind_gpasid(struct iommu_domain *domain,
446 struct device *dev, struct iommu_gpasid_bind_data *data);
447extern int iommu_sva_unbind_gpasid(struct iommu_domain *domain,
448 struct device *dev, ioasid_t pasid);
Joerg Roedel2c1296d2015-05-28 18:41:32 +0200449extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
Robin Murphy6af588f2018-09-12 16:24:12 +0100450extern struct iommu_domain *iommu_get_dma_domain(struct device *dev);
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100451extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200452 phys_addr_t paddr, size_t size, int prot);
Tom Murphy781ca2d2019-09-08 09:56:38 -0700453extern int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova,
454 phys_addr_t paddr, size_t size, int prot);
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200455extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
Joerg Roedeladd02cfd2017-08-23 15:50:04 +0200456 size_t size);
457extern size_t iommu_unmap_fast(struct iommu_domain *domain,
Will Deacona7d20dc2019-07-02 16:43:48 +0100458 unsigned long iova, size_t size,
459 struct iommu_iotlb_gather *iotlb_gather);
Christoph Hellwigd88e61f2018-07-30 09:36:26 +0200460extern size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
461 struct scatterlist *sg,unsigned int nents, int prot);
Tom Murphy781ca2d2019-09-08 09:56:38 -0700462extern size_t iommu_map_sg_atomic(struct iommu_domain *domain,
463 unsigned long iova, struct scatterlist *sg,
464 unsigned int nents, int prot);
Varun Sethibb5547a2013-03-29 01:23:58 +0530465extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova);
Ohad Ben-Cohen4f3f8d92011-09-13 15:25:23 -0400466extern void iommu_set_fault_handler(struct iommu_domain *domain,
Ohad Ben-Cohen77ca2332012-05-21 20:20:05 +0300467 iommu_fault_handler_t handler, void *token);
Alex Williamsond72e31c2012-05-30 14:18:53 -0600468
Eric Augere5b52342017-01-19 20:57:47 +0000469extern void iommu_get_resv_regions(struct device *dev, struct list_head *list);
470extern void iommu_put_resv_regions(struct device *dev, struct list_head *list);
Thierry Redingf9f69712019-12-18 14:42:01 +0100471extern void generic_iommu_put_resv_regions(struct device *dev,
472 struct list_head *list);
Joerg Roedeld290f1e2015-05-28 18:41:36 +0200473extern int iommu_request_dm_for_dev(struct device *dev);
Lu Baolu7423e012019-05-25 13:41:22 +0800474extern int iommu_request_dma_domain_for_dev(struct device *dev);
Joerg Roedel8a699612019-08-19 15:22:47 +0200475extern void iommu_set_default_passthrough(bool cmd_line);
476extern void iommu_set_default_translated(bool cmd_line);
477extern bool iommu_default_passthrough(void);
Eric Auger2b20cbb2017-01-19 20:57:49 +0000478extern struct iommu_resv_region *
Robin Murphy9d3a4de2017-03-16 17:00:16 +0000479iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot,
480 enum iommu_resv_type type);
Eric Auger6c65fb32017-01-19 20:57:51 +0000481extern int iommu_get_group_resv_regions(struct iommu_group *group,
482 struct list_head *head);
Joerg Roedela1015c22015-05-28 18:41:33 +0200483
Alex Williamsond72e31c2012-05-30 14:18:53 -0600484extern int iommu_attach_group(struct iommu_domain *domain,
485 struct iommu_group *group);
486extern void iommu_detach_group(struct iommu_domain *domain,
487 struct iommu_group *group);
488extern struct iommu_group *iommu_group_alloc(void);
489extern void *iommu_group_get_iommudata(struct iommu_group *group);
490extern void iommu_group_set_iommudata(struct iommu_group *group,
491 void *iommu_data,
492 void (*release)(void *iommu_data));
493extern int iommu_group_set_name(struct iommu_group *group, const char *name);
494extern int iommu_group_add_device(struct iommu_group *group,
495 struct device *dev);
496extern void iommu_group_remove_device(struct device *dev);
497extern int iommu_group_for_each_dev(struct iommu_group *group, void *data,
498 int (*fn)(struct device *, void *));
499extern struct iommu_group *iommu_group_get(struct device *dev);
Robin Murphy13f59a72016-11-11 17:59:21 +0000500extern struct iommu_group *iommu_group_ref_get(struct iommu_group *group);
Alex Williamsond72e31c2012-05-30 14:18:53 -0600501extern void iommu_group_put(struct iommu_group *group);
502extern int iommu_group_register_notifier(struct iommu_group *group,
503 struct notifier_block *nb);
504extern int iommu_group_unregister_notifier(struct iommu_group *group,
505 struct notifier_block *nb);
Jacob Pan0c830e62019-06-03 15:57:48 +0100506extern int iommu_register_device_fault_handler(struct device *dev,
507 iommu_dev_fault_handler_t handler,
508 void *data);
509
510extern int iommu_unregister_device_fault_handler(struct device *dev);
511
512extern int iommu_report_device_fault(struct device *dev,
513 struct iommu_fault_event *evt);
Jean-Philippe Bruckerbf3255b2019-06-03 15:57:49 +0100514extern int iommu_page_response(struct device *dev,
515 struct iommu_page_response *msg);
Jacob Pan0c830e62019-06-03 15:57:48 +0100516
Alex Williamsond72e31c2012-05-30 14:18:53 -0600517extern int iommu_group_id(struct iommu_group *group);
Alex Williamson104a1c12014-07-03 09:51:18 -0600518extern struct iommu_group *iommu_group_get_for_dev(struct device *dev);
Joerg Roedel6827ca82015-05-28 18:41:35 +0200519extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *);
Ohad Ben-Cohen4f3f8d92011-09-13 15:25:23 -0400520
Joerg Roedel0cd76dd2012-01-26 19:40:52 +0100521extern int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr,
522 void *data);
523extern int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr,
524 void *data);
Ohad Ben-Cohen4f3f8d92011-09-13 15:25:23 -0400525
Joerg Roedeld7787d52013-01-29 14:26:20 +0100526/* Window handling function prototypes */
527extern int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
Varun Sethi80f97f02013-03-29 01:24:00 +0530528 phys_addr_t offset, u64 size,
529 int prot);
Joerg Roedeld7787d52013-01-29 14:26:20 +0100530extern void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr);
Ohad Ben-Cohen4f3f8d92011-09-13 15:25:23 -0400531
Joerg Roedel207c6e32017-04-26 15:39:28 +0200532extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
533 unsigned long iova, int flags);
Joerg Roedel4a77a6c2008-11-26 17:02:33 +0100534
Joerg Roedeladd02cfd2017-08-23 15:50:04 +0200535static inline void iommu_flush_tlb_all(struct iommu_domain *domain)
536{
537 if (domain->ops->flush_iotlb_all)
538 domain->ops->flush_iotlb_all(domain);
539}
540
Will Deacona7d20dc2019-07-02 16:43:48 +0100541static inline void iommu_tlb_sync(struct iommu_domain *domain,
542 struct iommu_iotlb_gather *iotlb_gather)
Joerg Roedeladd02cfd2017-08-23 15:50:04 +0200543{
544 if (domain->ops->iotlb_sync)
Will Deacon56f8af52019-07-02 16:44:06 +0100545 domain->ops->iotlb_sync(domain, iotlb_gather);
Will Deacona7d20dc2019-07-02 16:43:48 +0100546
547 iommu_iotlb_gather_init(iotlb_gather);
Joerg Roedeladd02cfd2017-08-23 15:50:04 +0200548}
549
Will Deacon4fcf8542019-07-02 16:43:57 +0100550static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
551 struct iommu_iotlb_gather *gather,
552 unsigned long iova, size_t size)
553{
554 unsigned long start = iova, end = start + size;
555
556 /*
557 * If the new page is disjoint from the current range or is mapped at
558 * a different granularity, then sync the TLB so that the gather
559 * structure can be rewritten.
560 */
561 if (gather->pgsize != size ||
562 end < gather->start || start > gather->end) {
563 if (gather->pgsize)
564 iommu_tlb_sync(domain, gather);
565 gather->pgsize = size;
566 }
567
568 if (gather->end < end)
569 gather->end = end;
570
571 if (gather->start > start)
572 gather->start = start;
573}
574
Joerg Roedel5e622922015-10-21 23:51:37 +0200575/* PCI device grouping function */
576extern struct iommu_group *pci_device_group(struct device *dev);
Joerg Roedel6eab5562015-10-21 23:51:38 +0200577/* Generic device grouping function */
578extern struct iommu_group *generic_device_group(struct device *dev);
Nipun Guptaeab03e22018-09-10 19:19:18 +0530579/* FSL-MC device grouping function */
580struct iommu_group *fsl_mc_device_group(struct device *dev);
Joerg Roedel5e622922015-10-21 23:51:37 +0200581
Robin Murphy57f98d22016-09-13 10:54:14 +0100582/**
583 * struct iommu_fwspec - per-device IOMMU instance data
584 * @ops: ops for this device's IOMMU
585 * @iommu_fwnode: firmware handle for this device's IOMMU
586 * @iommu_priv: IOMMU driver private data for this device
Jean-Philippe Brucker89535822020-01-15 13:52:29 +0100587 * @num_pasid_bits: number of PASID bits supported by this device
Robin Murphy57f98d22016-09-13 10:54:14 +0100588 * @num_ids: number of associated device IDs
589 * @ids: IDs which this device may present to the IOMMU
590 */
591struct iommu_fwspec {
592 const struct iommu_ops *ops;
593 struct fwnode_handle *iommu_fwnode;
Jean-Philippe Brucker5702ee22019-04-17 19:24:42 +0100594 u32 flags;
Jean-Philippe Brucker89535822020-01-15 13:52:29 +0100595 u32 num_pasid_bits;
Robin Murphy57f98d22016-09-13 10:54:14 +0100596 unsigned int num_ids;
Robin Murphy098accf2020-02-13 14:00:21 +0000597 u32 ids[];
Robin Murphy57f98d22016-09-13 10:54:14 +0100598};
599
Jean-Philippe Brucker5702ee22019-04-17 19:24:42 +0100600/* ATS is supported */
601#define IOMMU_FWSPEC_PCI_RC_ATS (1 << 0)
602
Jean-Philippe Brucker26b25a22019-04-10 16:15:16 +0100603/**
604 * struct iommu_sva - handle to a device-mm bond
605 */
606struct iommu_sva {
607 struct device *dev;
608 const struct iommu_sva_ops *ops;
609};
610
Robin Murphy57f98d22016-09-13 10:54:14 +0100611int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
612 const struct iommu_ops *ops);
613void iommu_fwspec_free(struct device *dev);
614int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids);
Joerg Roedel534766d2017-01-31 16:58:42 +0100615const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode);
Robin Murphy57f98d22016-09-13 10:54:14 +0100616
Joerg Roedelb4ef7252018-11-28 13:35:24 +0100617static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
618{
Joerg Roedel72acd9d2020-03-26 16:08:31 +0100619 if (dev->iommu)
620 return dev->iommu->fwspec;
621 else
622 return NULL;
Joerg Roedelb4ef7252018-11-28 13:35:24 +0100623}
624
625static inline void dev_iommu_fwspec_set(struct device *dev,
626 struct iommu_fwspec *fwspec)
627{
Joerg Roedel72acd9d2020-03-26 16:08:31 +0100628 dev->iommu->fwspec = fwspec;
Joerg Roedelb4ef7252018-11-28 13:35:24 +0100629}
630
Joerg Roedelf9867f42020-03-26 16:08:33 +0100631static inline void *dev_iommu_priv_get(struct device *dev)
632{
Joerg Roedel986d5ec2020-03-26 16:08:41 +0100633 return dev->iommu->priv;
Joerg Roedelf9867f42020-03-26 16:08:33 +0100634}
635
636static inline void dev_iommu_priv_set(struct device *dev, void *priv)
637{
Joerg Roedel986d5ec2020-03-26 16:08:41 +0100638 dev->iommu->priv = priv;
Joerg Roedelf9867f42020-03-26 16:08:33 +0100639}
640
Joerg Roedelcc5aed42018-11-30 10:31:59 +0100641int iommu_probe_device(struct device *dev);
642void iommu_release_device(struct device *dev);
643
Lu Baolua3a19592019-03-25 09:30:28 +0800644bool iommu_dev_has_feature(struct device *dev, enum iommu_dev_features f);
645int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features f);
646int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features f);
647bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features f);
648int iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev);
649void iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev);
650int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev);
651
Jean-Philippe Brucker26b25a22019-04-10 16:15:16 +0100652struct iommu_sva *iommu_sva_bind_device(struct device *dev,
653 struct mm_struct *mm,
654 void *drvdata);
655void iommu_sva_unbind_device(struct iommu_sva *handle);
656int iommu_sva_set_ops(struct iommu_sva *handle,
657 const struct iommu_sva_ops *ops);
658int iommu_sva_get_pasid(struct iommu_sva *handle);
659
Joerg Roedel4a77a6c2008-11-26 17:02:33 +0100660#else /* CONFIG_IOMMU_API */
661
Joerg Roedel39d4ebb2011-09-06 16:48:40 +0200662struct iommu_ops {};
Alex Williamsond72e31c2012-05-30 14:18:53 -0600663struct iommu_group {};
Robin Murphy57f98d22016-09-13 10:54:14 +0100664struct iommu_fwspec {};
Joerg Roedelb0119e82017-02-01 13:23:08 +0100665struct iommu_device {};
Jacob Pan4e32348b2019-06-03 15:57:47 +0100666struct iommu_fault_param {};
Will Deacona7d20dc2019-07-02 16:43:48 +0100667struct iommu_iotlb_gather {};
Joerg Roedel4a77a6c2008-11-26 17:02:33 +0100668
Joerg Roedela1b60c12011-09-06 18:46:34 +0200669static inline bool iommu_present(struct bus_type *bus)
Joerg Roedel4a77a6c2008-11-26 17:02:33 +0100670{
671 return false;
672}
673
Joerg Roedel3c0e0ca2014-09-03 18:47:25 +0200674static inline bool iommu_capable(struct bus_type *bus, enum iommu_cap cap)
675{
676 return false;
677}
678
Joerg Roedel905d66c2011-09-06 16:03:26 +0200679static inline struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
Joerg Roedel4a77a6c2008-11-26 17:02:33 +0100680{
681 return NULL;
682}
683
Alexey Kardashevskiyb62dfd22013-11-21 17:41:14 +1100684static inline struct iommu_group *iommu_group_get_by_id(int id)
685{
686 return NULL;
687}
688
Joerg Roedel4a77a6c2008-11-26 17:02:33 +0100689static inline void iommu_domain_free(struct iommu_domain *domain)
690{
691}
692
693static inline int iommu_attach_device(struct iommu_domain *domain,
694 struct device *dev)
695{
696 return -ENODEV;
697}
698
699static inline void iommu_detach_device(struct iommu_domain *domain,
700 struct device *dev)
701{
702}
703
Joerg Roedel2c1296d2015-05-28 18:41:32 +0200704static inline struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
705{
706 return NULL;
707}
708
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100709static inline int iommu_map(struct iommu_domain *domain, unsigned long iova,
Dmitry Osipenkoebae3e82017-07-05 20:27:53 +0300710 phys_addr_t paddr, size_t size, int prot)
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100711{
712 return -ENODEV;
713}
714
Tom Murphy781ca2d2019-09-08 09:56:38 -0700715static inline int iommu_map_atomic(struct iommu_domain *domain,
716 unsigned long iova, phys_addr_t paddr,
717 size_t size, int prot)
718{
719 return -ENODEV;
720}
721
Suravee Suthikulpanitc5611a82018-02-05 05:45:53 -0500722static inline size_t iommu_unmap(struct iommu_domain *domain,
723 unsigned long iova, size_t size)
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100724{
Suravee Suthikulpanitc5611a82018-02-05 05:45:53 -0500725 return 0;
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100726}
727
Suravee Suthikulpanitc5611a82018-02-05 05:45:53 -0500728static inline size_t iommu_unmap_fast(struct iommu_domain *domain,
Will Deacona7d20dc2019-07-02 16:43:48 +0100729 unsigned long iova, int gfp_order,
730 struct iommu_iotlb_gather *iotlb_gather)
Joerg Roedel4a77a6c2008-11-26 17:02:33 +0100731{
Suravee Suthikulpanitc5611a82018-02-05 05:45:53 -0500732 return 0;
Joerg Roedel4a77a6c2008-11-26 17:02:33 +0100733}
734
Olav Haugan315786e2014-10-25 09:55:16 -0700735static inline size_t iommu_map_sg(struct iommu_domain *domain,
736 unsigned long iova, struct scatterlist *sg,
737 unsigned int nents, int prot)
738{
Suravee Suthikulpanitc5611a82018-02-05 05:45:53 -0500739 return 0;
Olav Haugan315786e2014-10-25 09:55:16 -0700740}
741
Tom Murphy781ca2d2019-09-08 09:56:38 -0700742static inline size_t iommu_map_sg_atomic(struct iommu_domain *domain,
743 unsigned long iova, struct scatterlist *sg,
744 unsigned int nents, int prot)
745{
746 return 0;
747}
748
Joerg Roedeladd02cfd2017-08-23 15:50:04 +0200749static inline void iommu_flush_tlb_all(struct iommu_domain *domain)
750{
751}
752
Will Deacona7d20dc2019-07-02 16:43:48 +0100753static inline void iommu_tlb_sync(struct iommu_domain *domain,
754 struct iommu_iotlb_gather *iotlb_gather)
Joerg Roedeladd02cfd2017-08-23 15:50:04 +0200755{
756}
757
Joerg Roedeld7787d52013-01-29 14:26:20 +0100758static inline int iommu_domain_window_enable(struct iommu_domain *domain,
759 u32 wnd_nr, phys_addr_t paddr,
Varun Sethi80f97f02013-03-29 01:24:00 +0530760 u64 size, int prot)
Joerg Roedeld7787d52013-01-29 14:26:20 +0100761{
762 return -ENODEV;
763}
764
765static inline void iommu_domain_window_disable(struct iommu_domain *domain,
766 u32 wnd_nr)
767{
768}
769
Varun Sethibb5547a2013-03-29 01:23:58 +0530770static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
Joerg Roedel4a77a6c2008-11-26 17:02:33 +0100771{
772 return 0;
773}
774
Ohad Ben-Cohen4f3f8d92011-09-13 15:25:23 -0400775static inline void iommu_set_fault_handler(struct iommu_domain *domain,
Ohad Ben-Cohen77ca2332012-05-21 20:20:05 +0300776 iommu_fault_handler_t handler, void *token)
Ohad Ben-Cohen4f3f8d92011-09-13 15:25:23 -0400777{
778}
779
Eric Augere5b52342017-01-19 20:57:47 +0000780static inline void iommu_get_resv_regions(struct device *dev,
Joerg Roedela1015c22015-05-28 18:41:33 +0200781 struct list_head *list)
782{
783}
784
Eric Augere5b52342017-01-19 20:57:47 +0000785static inline void iommu_put_resv_regions(struct device *dev,
Joerg Roedela1015c22015-05-28 18:41:33 +0200786 struct list_head *list)
787{
788}
789
Eric Auger6c65fb32017-01-19 20:57:51 +0000790static inline int iommu_get_group_resv_regions(struct iommu_group *group,
791 struct list_head *head)
792{
793 return -ENODEV;
794}
795
Joerg Roedeld290f1e2015-05-28 18:41:36 +0200796static inline int iommu_request_dm_for_dev(struct device *dev)
797{
798 return -ENODEV;
799}
800
Lu Baolu7423e012019-05-25 13:41:22 +0800801static inline int iommu_request_dma_domain_for_dev(struct device *dev)
802{
803 return -ENODEV;
804}
805
Joerg Roedel8a699612019-08-19 15:22:47 +0200806static inline void iommu_set_default_passthrough(bool cmd_line)
807{
808}
809
810static inline void iommu_set_default_translated(bool cmd_line)
811{
812}
813
814static inline bool iommu_default_passthrough(void)
815{
816 return true;
817}
818
Alex Williamsonbef83de2012-09-24 21:23:25 -0600819static inline int iommu_attach_group(struct iommu_domain *domain,
820 struct iommu_group *group)
Alex Williamson14604322011-10-21 15:56:05 -0400821{
822 return -ENODEV;
823}
824
Alex Williamsonbef83de2012-09-24 21:23:25 -0600825static inline void iommu_detach_group(struct iommu_domain *domain,
826 struct iommu_group *group)
Alex Williamsond72e31c2012-05-30 14:18:53 -0600827{
828}
829
Alex Williamsonbef83de2012-09-24 21:23:25 -0600830static inline struct iommu_group *iommu_group_alloc(void)
Alex Williamsond72e31c2012-05-30 14:18:53 -0600831{
832 return ERR_PTR(-ENODEV);
833}
834
Alex Williamsonbef83de2012-09-24 21:23:25 -0600835static inline void *iommu_group_get_iommudata(struct iommu_group *group)
Alex Williamsond72e31c2012-05-30 14:18:53 -0600836{
837 return NULL;
838}
839
Alex Williamsonbef83de2012-09-24 21:23:25 -0600840static inline void iommu_group_set_iommudata(struct iommu_group *group,
841 void *iommu_data,
842 void (*release)(void *iommu_data))
Alex Williamsond72e31c2012-05-30 14:18:53 -0600843{
844}
845
Alex Williamsonbef83de2012-09-24 21:23:25 -0600846static inline int iommu_group_set_name(struct iommu_group *group,
847 const char *name)
Alex Williamsond72e31c2012-05-30 14:18:53 -0600848{
849 return -ENODEV;
850}
851
Alex Williamsonbef83de2012-09-24 21:23:25 -0600852static inline int iommu_group_add_device(struct iommu_group *group,
853 struct device *dev)
Alex Williamsond72e31c2012-05-30 14:18:53 -0600854{
855 return -ENODEV;
856}
857
Alex Williamsonbef83de2012-09-24 21:23:25 -0600858static inline void iommu_group_remove_device(struct device *dev)
Alex Williamsond72e31c2012-05-30 14:18:53 -0600859{
860}
861
Alex Williamsonbef83de2012-09-24 21:23:25 -0600862static inline int iommu_group_for_each_dev(struct iommu_group *group,
863 void *data,
864 int (*fn)(struct device *, void *))
Alex Williamsond72e31c2012-05-30 14:18:53 -0600865{
866 return -ENODEV;
867}
868
Alex Williamsonbef83de2012-09-24 21:23:25 -0600869static inline struct iommu_group *iommu_group_get(struct device *dev)
Alex Williamsond72e31c2012-05-30 14:18:53 -0600870{
871 return NULL;
872}
873
Alex Williamsonbef83de2012-09-24 21:23:25 -0600874static inline void iommu_group_put(struct iommu_group *group)
Alex Williamsond72e31c2012-05-30 14:18:53 -0600875{
876}
877
Alex Williamsonbef83de2012-09-24 21:23:25 -0600878static inline int iommu_group_register_notifier(struct iommu_group *group,
879 struct notifier_block *nb)
Alex Williamsond72e31c2012-05-30 14:18:53 -0600880{
881 return -ENODEV;
882}
883
Alex Williamsonbef83de2012-09-24 21:23:25 -0600884static inline int iommu_group_unregister_notifier(struct iommu_group *group,
885 struct notifier_block *nb)
Alex Williamsond72e31c2012-05-30 14:18:53 -0600886{
887 return 0;
888}
889
Jacob Pan0c830e62019-06-03 15:57:48 +0100890static inline
891int iommu_register_device_fault_handler(struct device *dev,
892 iommu_dev_fault_handler_t handler,
893 void *data)
894{
895 return -ENODEV;
896}
897
898static inline int iommu_unregister_device_fault_handler(struct device *dev)
899{
900 return 0;
901}
902
903static inline
904int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt)
905{
906 return -ENODEV;
907}
908
Jean-Philippe Bruckerbf3255b2019-06-03 15:57:49 +0100909static inline int iommu_page_response(struct device *dev,
910 struct iommu_page_response *msg)
911{
912 return -ENODEV;
913}
914
Alex Williamsonbef83de2012-09-24 21:23:25 -0600915static inline int iommu_group_id(struct iommu_group *group)
Alex Williamsond72e31c2012-05-30 14:18:53 -0600916{
917 return -ENODEV;
918}
Joerg Roedel4a77a6c2008-11-26 17:02:33 +0100919
Joerg Roedel0cd76dd2012-01-26 19:40:52 +0100920static inline int iommu_domain_get_attr(struct iommu_domain *domain,
921 enum iommu_attr attr, void *data)
922{
923 return -EINVAL;
924}
925
926static inline int iommu_domain_set_attr(struct iommu_domain *domain,
927 enum iommu_attr attr, void *data)
928{
929 return -EINVAL;
930}
931
Joerg Roedelb0119e82017-02-01 13:23:08 +0100932static inline int iommu_device_register(struct iommu_device *iommu)
Alex Williamsonc61959e2014-06-12 16:12:24 -0600933{
Joerg Roedelb0119e82017-02-01 13:23:08 +0100934 return -ENODEV;
Alex Williamsonc61959e2014-06-12 16:12:24 -0600935}
936
Joerg Roedelb0119e82017-02-01 13:23:08 +0100937static inline void iommu_device_set_ops(struct iommu_device *iommu,
938 const struct iommu_ops *ops)
939{
940}
941
Joerg Roedelc73e1ac2017-02-07 18:18:46 +0100942static inline void iommu_device_set_fwnode(struct iommu_device *iommu,
943 struct fwnode_handle *fwnode)
944{
945}
946
Joerg Roedel2926a2aa2017-08-14 17:19:26 +0200947static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
948{
949 return NULL;
950}
951
Will Deacona7d20dc2019-07-02 16:43:48 +0100952static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather)
953{
954}
955
Will Deacon4fcf8542019-07-02 16:43:57 +0100956static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
957 struct iommu_iotlb_gather *gather,
958 unsigned long iova, size_t size)
959{
960}
961
Joerg Roedelb0119e82017-02-01 13:23:08 +0100962static inline void iommu_device_unregister(struct iommu_device *iommu)
963{
964}
965
Joerg Roedel39ab9552017-02-01 16:56:46 +0100966static inline int iommu_device_sysfs_add(struct iommu_device *iommu,
967 struct device *parent,
968 const struct attribute_group **groups,
969 const char *fmt, ...)
970{
971 return -ENODEV;
972}
973
974static inline void iommu_device_sysfs_remove(struct iommu_device *iommu)
Alex Williamsonc61959e2014-06-12 16:12:24 -0600975{
976}
977
Alex Williamsone09f8ea2014-07-07 14:31:36 -0600978static inline int iommu_device_link(struct device *dev, struct device *link)
Alex Williamsonc61959e2014-06-12 16:12:24 -0600979{
980 return -EINVAL;
981}
982
Alex Williamsone09f8ea2014-07-07 14:31:36 -0600983static inline void iommu_device_unlink(struct device *dev, struct device *link)
Alex Williamsonc61959e2014-06-12 16:12:24 -0600984{
985}
986
Robin Murphy57f98d22016-09-13 10:54:14 +0100987static inline int iommu_fwspec_init(struct device *dev,
988 struct fwnode_handle *iommu_fwnode,
989 const struct iommu_ops *ops)
990{
991 return -ENODEV;
992}
993
994static inline void iommu_fwspec_free(struct device *dev)
995{
996}
997
998static inline int iommu_fwspec_add_ids(struct device *dev, u32 *ids,
999 int num_ids)
1000{
1001 return -ENODEV;
1002}
1003
Lorenzo Pieralisie4f10ff2016-11-21 10:01:36 +00001004static inline
Joerg Roedel534766d2017-01-31 16:58:42 +01001005const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
Lorenzo Pieralisie4f10ff2016-11-21 10:01:36 +00001006{
1007 return NULL;
1008}
1009
Lu Baolua3a19592019-03-25 09:30:28 +08001010static inline bool
1011iommu_dev_has_feature(struct device *dev, enum iommu_dev_features feat)
1012{
1013 return false;
1014}
1015
1016static inline bool
1017iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat)
1018{
1019 return false;
1020}
1021
1022static inline int
1023iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
1024{
1025 return -ENODEV;
1026}
1027
1028static inline int
1029iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
1030{
1031 return -ENODEV;
1032}
1033
1034static inline int
1035iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev)
1036{
1037 return -ENODEV;
1038}
1039
1040static inline void
1041iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev)
1042{
1043}
1044
1045static inline int
1046iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
1047{
1048 return -ENODEV;
1049}
1050
Jean-Philippe Brucker26b25a22019-04-10 16:15:16 +01001051static inline struct iommu_sva *
1052iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata)
1053{
1054 return NULL;
1055}
1056
1057static inline void iommu_sva_unbind_device(struct iommu_sva *handle)
1058{
1059}
1060
1061static inline int iommu_sva_set_ops(struct iommu_sva *handle,
1062 const struct iommu_sva_ops *ops)
1063{
1064 return -EINVAL;
1065}
1066
1067static inline int iommu_sva_get_pasid(struct iommu_sva *handle)
1068{
1069 return IOMMU_PASID_INVALID;
1070}
1071
Yi L Liu4c7c1712019-10-02 12:42:40 -07001072static inline int
1073iommu_cache_invalidate(struct iommu_domain *domain,
1074 struct device *dev,
1075 struct iommu_cache_invalidate_info *inv_info)
1076{
1077 return -ENODEV;
1078}
Jacob Pan808be0a2019-10-02 12:42:43 -07001079static inline int iommu_sva_bind_gpasid(struct iommu_domain *domain,
1080 struct device *dev, struct iommu_gpasid_bind_data *data)
1081{
1082 return -ENODEV;
1083}
1084
1085static inline int iommu_sva_unbind_gpasid(struct iommu_domain *domain,
1086 struct device *dev, int pasid)
1087{
1088 return -ENODEV;
1089}
Yi L Liu4c7c1712019-10-02 12:42:40 -07001090
Joerg Roedel0008d0c2020-03-26 16:08:26 +01001091static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
1092{
1093 return NULL;
1094}
Joerg Roedel4a77a6c2008-11-26 17:02:33 +01001095#endif /* CONFIG_IOMMU_API */
1096
Gary R Hookbad614b2018-06-12 16:41:21 -05001097#ifdef CONFIG_IOMMU_DEBUGFS
1098extern struct dentry *iommu_debugfs_dir;
1099void iommu_debugfs_setup(void);
1100#else
1101static inline void iommu_debugfs_setup(void) {}
1102#endif
1103
Joerg Roedel4a77a6c2008-11-26 17:02:33 +01001104#endif /* __LINUX_IOMMU_H */