blob: a47f47307109ff94a4e65544488a559c3dad1654 [file] [log] [blame]
Thomas Gleixnerd94d71c2019-05-29 07:12:40 -07001// SPDX-License-Identifier: GPL-2.0-only
Varun Sethi695093e2013-07-15 10:20:57 +05302/*
Varun Sethi695093e2013-07-15 10:20:57 +05303 *
4 * Copyright (C) 2013 Freescale Semiconductor, Inc.
5 * Author: Varun Sethi <varun.sethi@freescale.com>
Varun Sethi695093e2013-07-15 10:20:57 +05306 */
7
8#define pr_fmt(fmt) "fsl-pamu-domain: %s: " fmt, __func__
9
Varun Sethi695093e2013-07-15 10:20:57 +053010#include "fsl_pamu_domain.h"
Varun Sethi695093e2013-07-15 10:20:57 +053011
Emil Medvecd70d462015-01-28 08:34:33 -060012#include <sysdev/fsl_pci.h>
13
Varun Sethi695093e2013-07-15 10:20:57 +053014/*
15 * Global spinlock that needs to be held while
16 * configuring PAMU.
17 */
18static DEFINE_SPINLOCK(iommu_lock);
19
20static struct kmem_cache *fsl_pamu_domain_cache;
21static struct kmem_cache *iommu_devinfo_cache;
22static DEFINE_SPINLOCK(device_domain_lock);
23
Joerg Roedel3ff2dcc2017-08-23 16:28:09 +020024struct iommu_device pamu_iommu; /* IOMMU core code handle */
25
Joerg Roedel8d4bfe42015-03-26 13:43:18 +010026static struct fsl_dma_domain *to_fsl_dma_domain(struct iommu_domain *dom)
27{
28 return container_of(dom, struct fsl_dma_domain, iommu_domain);
29}
30
Varun Sethi695093e2013-07-15 10:20:57 +053031static int __init iommu_init_mempool(void)
32{
Varun Sethi695093e2013-07-15 10:20:57 +053033 fsl_pamu_domain_cache = kmem_cache_create("fsl_pamu_domain",
Emil Medvecd70d462015-01-28 08:34:33 -060034 sizeof(struct fsl_dma_domain),
35 0,
36 SLAB_HWCACHE_ALIGN,
37 NULL);
Varun Sethi695093e2013-07-15 10:20:57 +053038 if (!fsl_pamu_domain_cache) {
39 pr_debug("Couldn't create fsl iommu_domain cache\n");
40 return -ENOMEM;
41 }
42
43 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
Emil Medvecd70d462015-01-28 08:34:33 -060044 sizeof(struct device_domain_info),
45 0,
46 SLAB_HWCACHE_ALIGN,
47 NULL);
Varun Sethi695093e2013-07-15 10:20:57 +053048 if (!iommu_devinfo_cache) {
49 pr_debug("Couldn't create devinfo cache\n");
50 kmem_cache_destroy(fsl_pamu_domain_cache);
51 return -ENOMEM;
52 }
53
54 return 0;
55}
56
Varun Sethi695093e2013-07-15 10:20:57 +053057static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain,
Emil Medvecd70d462015-01-28 08:34:33 -060058 u32 val)
Varun Sethi695093e2013-07-15 10:20:57 +053059{
Joerg Roedel84b62692021-04-15 16:44:42 +020060 int ret = 0;
Varun Sethi695093e2013-07-15 10:20:57 +053061 unsigned long flags;
62
63 spin_lock_irqsave(&iommu_lock, flags);
Christoph Hellwigba58d122021-04-01 17:52:41 +020064 ret = pamu_update_paace_stash(liodn, val);
65 if (ret) {
Joerg Roedel84b62692021-04-15 16:44:42 +020066 pr_debug("Failed to update SPAACE for liodn %d\n ", liodn);
Varun Sethi695093e2013-07-15 10:20:57 +053067 spin_unlock_irqrestore(&iommu_lock, flags);
Christoph Hellwigba58d122021-04-01 17:52:41 +020068 return ret;
Varun Sethi695093e2013-07-15 10:20:57 +053069 }
70
71 spin_unlock_irqrestore(&iommu_lock, flags);
72
73 return ret;
74}
75
76/* Set the geometry parameters for a LIODN */
Christoph Hellwigdae77472021-04-01 17:52:44 +020077static int pamu_set_liodn(struct fsl_dma_domain *dma_domain, struct device *dev,
78 int liodn)
Varun Sethi695093e2013-07-15 10:20:57 +053079{
Varun Sethi695093e2013-07-15 10:20:57 +053080 u32 omi_index = ~(u32)0;
81 unsigned long flags;
Christoph Hellwigba58d122021-04-01 17:52:41 +020082 int ret;
Varun Sethi695093e2013-07-15 10:20:57 +053083
84 /*
85 * Configure the omi_index at the geometry setup time.
86 * This is a static value which depends on the type of
87 * device and would not change thereafter.
88 */
89 get_ome_index(&omi_index, dev);
90
Varun Sethi695093e2013-07-15 10:20:57 +053091 spin_lock_irqsave(&iommu_lock, flags);
92 ret = pamu_disable_liodn(liodn);
Christoph Hellwigdae77472021-04-01 17:52:44 +020093 if (ret)
94 goto out_unlock;
Christoph Hellwig57fa44b2021-04-01 17:52:49 +020095 ret = pamu_config_ppaace(liodn, omi_index, dma_domain->stash_id, 0);
Christoph Hellwigdae77472021-04-01 17:52:44 +020096 if (ret)
97 goto out_unlock;
Christoph Hellwig57fa44b2021-04-01 17:52:49 +020098 ret = pamu_config_ppaace(liodn, ~(u32)0, dma_domain->stash_id,
Christoph Hellwigdae77472021-04-01 17:52:44 +020099 PAACE_AP_PERMS_QUERY | PAACE_AP_PERMS_UPDATE);
100out_unlock:
Varun Sethi695093e2013-07-15 10:20:57 +0530101 spin_unlock_irqrestore(&iommu_lock, flags);
102 if (ret) {
Christoph Hellwigba58d122021-04-01 17:52:41 +0200103 pr_debug("PAACE configuration failed for liodn %d\n",
104 liodn);
Varun Sethi695093e2013-07-15 10:20:57 +0530105 }
Varun Sethi695093e2013-07-15 10:20:57 +0530106 return ret;
107}
108
Christoph Hellwigba58d122021-04-01 17:52:41 +0200109static void remove_device_ref(struct device_domain_info *info)
Varun Sethi695093e2013-07-15 10:20:57 +0530110{
111 unsigned long flags;
112
113 list_del(&info->link);
114 spin_lock_irqsave(&iommu_lock, flags);
Varun Sethi695093e2013-07-15 10:20:57 +0530115 pamu_disable_liodn(info->liodn);
116 spin_unlock_irqrestore(&iommu_lock, flags);
117 spin_lock_irqsave(&device_domain_lock, flags);
Joerg Roedel2263d812020-06-25 15:08:30 +0200118 dev_iommu_priv_set(info->dev, NULL);
Varun Sethi695093e2013-07-15 10:20:57 +0530119 kmem_cache_free(iommu_devinfo_cache, info);
120 spin_unlock_irqrestore(&device_domain_lock, flags);
121}
122
123static void detach_device(struct device *dev, struct fsl_dma_domain *dma_domain)
124{
125 struct device_domain_info *info, *tmp;
126 unsigned long flags;
127
128 spin_lock_irqsave(&dma_domain->domain_lock, flags);
129 /* Remove the device from the domain device list */
130 list_for_each_entry_safe(info, tmp, &dma_domain->devices, link) {
131 if (!dev || (info->dev == dev))
Christoph Hellwigba58d122021-04-01 17:52:41 +0200132 remove_device_ref(info);
Varun Sethi695093e2013-07-15 10:20:57 +0530133 }
134 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
135}
136
137static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct device *dev)
138{
139 struct device_domain_info *info, *old_domain_info;
140 unsigned long flags;
141
142 spin_lock_irqsave(&device_domain_lock, flags);
143 /*
144 * Check here if the device is already attached to domain or not.
145 * If the device is already attached to a domain detach it.
146 */
Joerg Roedel2263d812020-06-25 15:08:30 +0200147 old_domain_info = dev_iommu_priv_get(dev);
Varun Sethi695093e2013-07-15 10:20:57 +0530148 if (old_domain_info && old_domain_info->domain != dma_domain) {
149 spin_unlock_irqrestore(&device_domain_lock, flags);
150 detach_device(dev, old_domain_info->domain);
151 spin_lock_irqsave(&device_domain_lock, flags);
152 }
153
154 info = kmem_cache_zalloc(iommu_devinfo_cache, GFP_ATOMIC);
155
156 info->dev = dev;
157 info->liodn = liodn;
158 info->domain = dma_domain;
159
160 list_add(&info->link, &dma_domain->devices);
161 /*
162 * In case of devices with multiple LIODNs just store
163 * the info for the first LIODN as all
164 * LIODNs share the same domain
165 */
Joerg Roedel2263d812020-06-25 15:08:30 +0200166 if (!dev_iommu_priv_get(dev))
167 dev_iommu_priv_set(dev, info);
Varun Sethi695093e2013-07-15 10:20:57 +0530168 spin_unlock_irqrestore(&device_domain_lock, flags);
Varun Sethi695093e2013-07-15 10:20:57 +0530169}
170
171static phys_addr_t fsl_pamu_iova_to_phys(struct iommu_domain *domain,
Emil Medvecd70d462015-01-28 08:34:33 -0600172 dma_addr_t iova)
Varun Sethi695093e2013-07-15 10:20:57 +0530173{
Emil Medvecd70d462015-01-28 08:34:33 -0600174 if (iova < domain->geometry.aperture_start ||
175 iova > domain->geometry.aperture_end)
Varun Sethi695093e2013-07-15 10:20:57 +0530176 return 0;
Christoph Hellwig376dfd22021-04-01 17:52:42 +0200177 return iova;
Varun Sethi695093e2013-07-15 10:20:57 +0530178}
179
Joerg Roedelb7eb6782014-09-05 10:50:27 +0200180static bool fsl_pamu_capable(enum iommu_cap cap)
Varun Sethi695093e2013-07-15 10:20:57 +0530181{
182 return cap == IOMMU_CAP_CACHE_COHERENCY;
183}
184
Joerg Roedel8d4bfe42015-03-26 13:43:18 +0100185static void fsl_pamu_domain_free(struct iommu_domain *domain)
Varun Sethi695093e2013-07-15 10:20:57 +0530186{
Joerg Roedel8d4bfe42015-03-26 13:43:18 +0100187 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
Varun Sethi695093e2013-07-15 10:20:57 +0530188
189 /* remove all the devices from the device list */
190 detach_device(NULL, dma_domain);
Varun Sethi695093e2013-07-15 10:20:57 +0530191 kmem_cache_free(fsl_pamu_domain_cache, dma_domain);
192}
193
Joerg Roedel8d4bfe42015-03-26 13:43:18 +0100194static struct iommu_domain *fsl_pamu_domain_alloc(unsigned type)
Varun Sethi695093e2013-07-15 10:20:57 +0530195{
196 struct fsl_dma_domain *dma_domain;
197
Joerg Roedel8d4bfe42015-03-26 13:43:18 +0100198 if (type != IOMMU_DOMAIN_UNMANAGED)
199 return NULL;
200
Christoph Hellwigc8224502021-04-01 17:52:40 +0200201 dma_domain = kmem_cache_zalloc(fsl_pamu_domain_cache, GFP_KERNEL);
202 if (!dma_domain)
Joerg Roedel8d4bfe42015-03-26 13:43:18 +0100203 return NULL;
Christoph Hellwigc8224502021-04-01 17:52:40 +0200204
205 dma_domain->stash_id = ~(u32)0;
Christoph Hellwigc8224502021-04-01 17:52:40 +0200206 INIT_LIST_HEAD(&dma_domain->devices);
207 spin_lock_init(&dma_domain->domain_lock);
208
209 /* default geometry 64 GB i.e. maximum system address */
Joerg Roedel8d4bfe42015-03-26 13:43:18 +0100210 dma_domain->iommu_domain. geometry.aperture_start = 0;
211 dma_domain->iommu_domain.geometry.aperture_end = (1ULL << 36) - 1;
212 dma_domain->iommu_domain.geometry.force_aperture = true;
Varun Sethi695093e2013-07-15 10:20:57 +0530213
Joerg Roedel8d4bfe42015-03-26 13:43:18 +0100214 return &dma_domain->iommu_domain;
Varun Sethi695093e2013-07-15 10:20:57 +0530215}
216
Varun Sethi695093e2013-07-15 10:20:57 +0530217/* Update stash destination for all LIODNs associated with the domain */
218static int update_domain_stash(struct fsl_dma_domain *dma_domain, u32 val)
219{
220 struct device_domain_info *info;
221 int ret = 0;
222
223 list_for_each_entry(info, &dma_domain->devices, link) {
224 ret = update_liodn_stash(info->liodn, dma_domain, val);
225 if (ret)
226 break;
227 }
228
229 return ret;
230}
231
Varun Sethi695093e2013-07-15 10:20:57 +0530232static int fsl_pamu_attach_device(struct iommu_domain *domain,
233 struct device *dev)
234{
Joerg Roedel8d4bfe42015-03-26 13:43:18 +0100235 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
Christoph Hellwig85e362c2021-04-01 17:52:45 +0200236 unsigned long flags;
237 int len, ret = 0, i;
Varun Sethi695093e2013-07-15 10:20:57 +0530238 const u32 *liodn;
Varun Sethi695093e2013-07-15 10:20:57 +0530239 struct pci_dev *pdev = NULL;
240 struct pci_controller *pci_ctl;
241
242 /*
243 * Use LIODN of the PCI controller while attaching a
244 * PCI device.
245 */
Yijing Wangb3eb76d2013-12-05 19:42:49 +0800246 if (dev_is_pci(dev)) {
Varun Sethi695093e2013-07-15 10:20:57 +0530247 pdev = to_pci_dev(dev);
248 pci_ctl = pci_bus_to_host(pdev->bus);
249 /*
250 * make dev point to pci controller device
251 * so we can get the LIODN programmed by
252 * u-boot.
253 */
254 dev = pci_ctl->parent;
255 }
256
257 liodn = of_get_property(dev->of_node, "fsl,liodn", &len);
Christoph Hellwig85e362c2021-04-01 17:52:45 +0200258 if (!liodn) {
Rob Herring6bd4f1c2017-07-18 16:43:09 -0500259 pr_debug("missing fsl,liodn property at %pOF\n", dev->of_node);
Christoph Hellwig85e362c2021-04-01 17:52:45 +0200260 return -EINVAL;
Varun Sethi695093e2013-07-15 10:20:57 +0530261 }
262
Christoph Hellwig85e362c2021-04-01 17:52:45 +0200263 spin_lock_irqsave(&dma_domain->domain_lock, flags);
264 for (i = 0; i < len / sizeof(u32); i++) {
265 /* Ensure that LIODN value is valid */
266 if (liodn[i] >= PAACE_NUMBER_ENTRIES) {
267 pr_debug("Invalid liodn %d, attach device failed for %pOF\n",
268 liodn[i], dev->of_node);
269 ret = -EINVAL;
270 break;
271 }
272
273 attach_device(dma_domain, liodn[i], dev);
274 ret = pamu_set_liodn(dma_domain, dev, liodn[i]);
275 if (ret)
276 break;
Christoph Hellwig7d61cb62021-04-01 17:52:46 +0200277 ret = pamu_enable_liodn(liodn[i]);
278 if (ret)
279 break;
Christoph Hellwig85e362c2021-04-01 17:52:45 +0200280 }
281 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
Varun Sethi695093e2013-07-15 10:20:57 +0530282 return ret;
283}
284
285static void fsl_pamu_detach_device(struct iommu_domain *domain,
Emil Medvecd70d462015-01-28 08:34:33 -0600286 struct device *dev)
Varun Sethi695093e2013-07-15 10:20:57 +0530287{
Joerg Roedel8d4bfe42015-03-26 13:43:18 +0100288 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
Varun Sethi695093e2013-07-15 10:20:57 +0530289 const u32 *prop;
290 int len;
291 struct pci_dev *pdev = NULL;
292 struct pci_controller *pci_ctl;
293
294 /*
295 * Use LIODN of the PCI controller while detaching a
296 * PCI device.
297 */
Yijing Wangb3eb76d2013-12-05 19:42:49 +0800298 if (dev_is_pci(dev)) {
Varun Sethi695093e2013-07-15 10:20:57 +0530299 pdev = to_pci_dev(dev);
300 pci_ctl = pci_bus_to_host(pdev->bus);
301 /*
302 * make dev point to pci controller device
303 * so we can get the LIODN programmed by
304 * u-boot.
305 */
306 dev = pci_ctl->parent;
307 }
308
309 prop = of_get_property(dev->of_node, "fsl,liodn", &len);
310 if (prop)
311 detach_device(dev, dma_domain);
312 else
Rob Herring6bd4f1c2017-07-18 16:43:09 -0500313 pr_debug("missing fsl,liodn property at %pOF\n", dev->of_node);
Varun Sethi695093e2013-07-15 10:20:57 +0530314}
315
Varun Sethi695093e2013-07-15 10:20:57 +0530316/* Set the domain stash attribute */
Christoph Hellwig4eeb96f2021-04-01 17:52:43 +0200317int fsl_pamu_configure_l1_stash(struct iommu_domain *domain, u32 cpu)
Varun Sethi695093e2013-07-15 10:20:57 +0530318{
Christoph Hellwig4eeb96f2021-04-01 17:52:43 +0200319 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
Varun Sethi695093e2013-07-15 10:20:57 +0530320 unsigned long flags;
321 int ret;
322
323 spin_lock_irqsave(&dma_domain->domain_lock, flags);
Christoph Hellwig4eeb96f2021-04-01 17:52:43 +0200324 dma_domain->stash_id = get_stash_id(PAMU_ATTR_CACHE_L1, cpu);
Varun Sethi695093e2013-07-15 10:20:57 +0530325 if (dma_domain->stash_id == ~(u32)0) {
326 pr_debug("Invalid stash attributes\n");
327 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
328 return -EINVAL;
329 }
Varun Sethi695093e2013-07-15 10:20:57 +0530330 ret = update_domain_stash(dma_domain, dma_domain->stash_id);
Varun Sethi695093e2013-07-15 10:20:57 +0530331 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
332
333 return ret;
334}
335
Varun Sethi695093e2013-07-15 10:20:57 +0530336static struct iommu_group *get_device_iommu_group(struct device *dev)
337{
338 struct iommu_group *group;
339
340 group = iommu_group_get(dev);
341 if (!group)
342 group = iommu_group_alloc();
343
344 return group;
345}
346
347static bool check_pci_ctl_endpt_part(struct pci_controller *pci_ctl)
348{
349 u32 version;
350
351 /* Check the PCI controller version number by readding BRR1 register */
352 version = in_be32(pci_ctl->cfg_addr + (PCI_FSL_BRR1 >> 2));
353 version &= PCI_FSL_BRR1_VER;
Emil Medvecd70d462015-01-28 08:34:33 -0600354 /* If PCI controller version is >= 0x204 we can partition endpoints */
355 return version >= 0x204;
Varun Sethi695093e2013-07-15 10:20:57 +0530356}
357
358/* Get iommu group information from peer devices or devices on the parent bus */
359static struct iommu_group *get_shared_pci_device_group(struct pci_dev *pdev)
360{
361 struct pci_dev *tmp;
362 struct iommu_group *group;
363 struct pci_bus *bus = pdev->bus;
364
Joerg Roedel9ed43662013-08-14 11:42:29 +0200365 /*
Varun Sethi695093e2013-07-15 10:20:57 +0530366 * Traverese the pci bus device list to get
367 * the shared iommu group.
368 */
369 while (bus) {
370 list_for_each_entry(tmp, &bus->devices, bus_list) {
371 if (tmp == pdev)
372 continue;
373 group = iommu_group_get(&tmp->dev);
374 if (group)
375 return group;
376 }
377
378 bus = bus->parent;
379 }
380
381 return NULL;
382}
383
384static struct iommu_group *get_pci_device_group(struct pci_dev *pdev)
385{
386 struct pci_controller *pci_ctl;
Colin Ian Kingbc46c222018-09-11 13:28:32 +0100387 bool pci_endpt_partitioning;
Varun Sethi695093e2013-07-15 10:20:57 +0530388 struct iommu_group *group = NULL;
Varun Sethi695093e2013-07-15 10:20:57 +0530389
390 pci_ctl = pci_bus_to_host(pdev->bus);
Colin Ian Kingbc46c222018-09-11 13:28:32 +0100391 pci_endpt_partitioning = check_pci_ctl_endpt_part(pci_ctl);
Varun Sethi695093e2013-07-15 10:20:57 +0530392 /* We can partition PCIe devices so assign device group to the device */
Colin Ian Kingbc46c222018-09-11 13:28:32 +0100393 if (pci_endpt_partitioning) {
Joerg Roedeld5e58292015-10-21 23:51:40 +0200394 group = pci_device_group(&pdev->dev);
Varun Sethi695093e2013-07-15 10:20:57 +0530395
Varun Sethi695093e2013-07-15 10:20:57 +0530396 /*
397 * PCIe controller is not a paritionable entity
398 * free the controller device iommu_group.
399 */
400 if (pci_ctl->parent->iommu_group)
401 iommu_group_remove_device(pci_ctl->parent);
402 } else {
403 /*
404 * All devices connected to the controller will share the
405 * PCI controllers device group. If this is the first
406 * device to be probed for the pci controller, copy the
407 * device group information from the PCI controller device
408 * node and remove the PCI controller iommu group.
409 * For subsequent devices, the iommu group information can
410 * be obtained from sibling devices (i.e. from the bus_devices
411 * link list).
412 */
413 if (pci_ctl->parent->iommu_group) {
414 group = get_device_iommu_group(pci_ctl->parent);
415 iommu_group_remove_device(pci_ctl->parent);
Emil Medvecd70d462015-01-28 08:34:33 -0600416 } else {
Varun Sethi695093e2013-07-15 10:20:57 +0530417 group = get_shared_pci_device_group(pdev);
Emil Medvecd70d462015-01-28 08:34:33 -0600418 }
Varun Sethi695093e2013-07-15 10:20:57 +0530419 }
420
Varun Sethi31704472014-06-24 19:27:17 +0530421 if (!group)
422 group = ERR_PTR(-ENODEV);
423
Varun Sethi695093e2013-07-15 10:20:57 +0530424 return group;
425}
426
Joerg Roedeld5e58292015-10-21 23:51:40 +0200427static struct iommu_group *fsl_pamu_device_group(struct device *dev)
Varun Sethi695093e2013-07-15 10:20:57 +0530428{
Varun Sethi31704472014-06-24 19:27:17 +0530429 struct iommu_group *group = ERR_PTR(-ENODEV);
Joerg Roedeld5e58292015-10-21 23:51:40 +0200430 int len;
Varun Sethi695093e2013-07-15 10:20:57 +0530431
432 /*
433 * For platform devices we allocate a separate group for
434 * each of the devices.
435 */
Joerg Roedeld5e58292015-10-21 23:51:40 +0200436 if (dev_is_pci(dev))
437 group = get_pci_device_group(to_pci_dev(dev));
438 else if (of_get_property(dev->of_node, "fsl,liodn", &len))
439 group = get_device_iommu_group(dev);
Varun Sethi695093e2013-07-15 10:20:57 +0530440
Joerg Roedeld5e58292015-10-21 23:51:40 +0200441 return group;
442}
Varun Sethi695093e2013-07-15 10:20:57 +0530443
Joerg Roedel52dd3ca2020-04-29 15:36:56 +0200444static struct iommu_device *fsl_pamu_probe_device(struct device *dev)
Joerg Roedeld5e58292015-10-21 23:51:40 +0200445{
Joerg Roedel52dd3ca2020-04-29 15:36:56 +0200446 return &pamu_iommu;
Varun Sethi695093e2013-07-15 10:20:57 +0530447}
448
Joerg Roedel52dd3ca2020-04-29 15:36:56 +0200449static void fsl_pamu_release_device(struct device *dev)
Varun Sethi695093e2013-07-15 10:20:57 +0530450{
Varun Sethi695093e2013-07-15 10:20:57 +0530451}
452
Thierry Redingb22f6432014-06-27 09:03:12 +0200453static const struct iommu_ops fsl_pamu_ops = {
Joerg Roedelb7eb6782014-09-05 10:50:27 +0200454 .capable = fsl_pamu_capable,
Joerg Roedel8d4bfe42015-03-26 13:43:18 +0100455 .domain_alloc = fsl_pamu_domain_alloc,
456 .domain_free = fsl_pamu_domain_free,
Varun Sethi695093e2013-07-15 10:20:57 +0530457 .attach_dev = fsl_pamu_attach_device,
458 .detach_dev = fsl_pamu_detach_device,
Varun Sethi695093e2013-07-15 10:20:57 +0530459 .iova_to_phys = fsl_pamu_iova_to_phys,
Joerg Roedel52dd3ca2020-04-29 15:36:56 +0200460 .probe_device = fsl_pamu_probe_device,
461 .release_device = fsl_pamu_release_device,
Joerg Roedeld5e58292015-10-21 23:51:40 +0200462 .device_group = fsl_pamu_device_group,
Varun Sethi695093e2013-07-15 10:20:57 +0530463};
464
Emil Medvecd70d462015-01-28 08:34:33 -0600465int __init pamu_domain_init(void)
Varun Sethi695093e2013-07-15 10:20:57 +0530466{
467 int ret = 0;
468
469 ret = iommu_init_mempool();
470 if (ret)
471 return ret;
472
Joerg Roedel3ff2dcc2017-08-23 16:28:09 +0200473 ret = iommu_device_sysfs_add(&pamu_iommu, NULL, NULL, "iommu0");
474 if (ret)
475 return ret;
476
Robin Murphy2d471b22021-04-01 14:56:26 +0100477 ret = iommu_device_register(&pamu_iommu, &fsl_pamu_ops, NULL);
Joerg Roedel3ff2dcc2017-08-23 16:28:09 +0200478 if (ret) {
479 iommu_device_sysfs_remove(&pamu_iommu);
480 pr_err("Can't register iommu device\n");
481 return ret;
482 }
483
Varun Sethi695093e2013-07-15 10:20:57 +0530484 bus_set_iommu(&platform_bus_type, &fsl_pamu_ops);
485 bus_set_iommu(&pci_bus_type, &fsl_pamu_ops);
486
487 return ret;
488}