blob: ad348c61d5e74d559b686dd105a2e911b1dc8f59 [file] [log] [blame]
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +02001/*
2 * IOMMU API for GART in Tegra20
3 *
4 * Copyright (c) 2010-2012, NVIDIA CORPORATION. All rights reserved.
5 *
Paul Gortmaker39fcbbcc2018-12-01 14:19:15 -05006 * Author: Hiroshi DOYU <hdoyu@nvidia.com>
7 *
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +02008 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
20 */
21
Dmitry Osipenko5dd82cd2018-12-12 23:39:04 +030022#define dev_fmt(fmt) "gart: " fmt
23
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +020024#include <linux/io.h>
25#include <linux/iommu.h>
Dmitry Osipenko4f821c12018-12-12 23:38:44 +030026#include <linux/moduleparam.h>
Dmitry Osipenkoce2785a2018-12-12 23:38:56 +030027#include <linux/platform_device.h>
Dmitry Osipenko4f821c12018-12-12 23:38:44 +030028#include <linux/slab.h>
29#include <linux/spinlock.h>
30#include <linux/vmalloc.h>
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +020031
Dmitry Osipenkoce2785a2018-12-12 23:38:56 +030032#include <soc/tegra/mc.h>
33
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +020034/* bitmap of the page sizes currently supported */
35#define GART_IOMMU_PGSIZES (SZ_4K)
36
Hiroshi DOYU774dfc92012-05-10 10:45:32 +030037#define GART_REG_BASE 0x24
38#define GART_CONFIG (0x24 - GART_REG_BASE)
39#define GART_ENTRY_ADDR (0x28 - GART_REG_BASE)
40#define GART_ENTRY_DATA (0x2c - GART_REG_BASE)
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +020041#define GART_ENTRY_PHYS_ADDR_VALID (1 << 31)
42
43#define GART_PAGE_SHIFT 12
44#define GART_PAGE_SIZE (1 << GART_PAGE_SHIFT)
45#define GART_PAGE_MASK \
46 (~(GART_PAGE_SIZE - 1) & ~GART_ENTRY_PHYS_ADDR_VALID)
47
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +020048struct gart_device {
49 void __iomem *regs;
50 u32 *savedata;
51 u32 page_count; /* total remappable size */
52 dma_addr_t iovmm_base; /* offset to vmm_area */
53 spinlock_t pte_lock; /* for pagetable */
Dmitry Osipenkoe7e236702018-12-12 23:39:06 +030054 spinlock_t dom_lock; /* for active domain */
55 unsigned int active_devices; /* number of active devices */
Dmitry Osipenko7d849b72018-12-12 23:39:02 +030056 struct iommu_domain *active_domain; /* current active domain */
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +020057 struct device *dev;
Joerg Roedelc184ae82017-08-10 00:17:28 +020058
59 struct iommu_device iommu; /* IOMMU Core handle */
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +020060};
61
62static struct gart_device *gart_handle; /* unique for a system */
63
Dmitry Osipenko40c9b882018-04-09 23:07:19 +030064static bool gart_debug;
65
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +020066#define GART_PTE(_pfn) \
67 (GART_ENTRY_PHYS_ADDR_VALID | ((_pfn) << PAGE_SHIFT))
68
69/*
70 * Any interaction between any block on PPSB and a block on APB or AHB
71 * must have these read-back to ensure the APB/AHB bus transaction is
72 * complete before initiating activity on the PPSB block.
73 */
74#define FLUSH_GART_REGS(gart) ((void)readl((gart)->regs + GART_CONFIG))
75
76#define for_each_gart_pte(gart, iova) \
77 for (iova = gart->iovmm_base; \
78 iova < gart->iovmm_base + GART_PAGE_SIZE * gart->page_count; \
79 iova += GART_PAGE_SIZE)
80
81static inline void gart_set_pte(struct gart_device *gart,
82 unsigned long offs, u32 pte)
83{
84 writel(offs, gart->regs + GART_ENTRY_ADDR);
85 writel(pte, gart->regs + GART_ENTRY_DATA);
86
87 dev_dbg(gart->dev, "%s %08lx:%08x\n",
88 pte ? "map" : "unmap", offs, pte & GART_PAGE_MASK);
89}
90
91static inline unsigned long gart_read_pte(struct gart_device *gart,
92 unsigned long offs)
93{
94 unsigned long pte;
95
96 writel(offs, gart->regs + GART_ENTRY_ADDR);
97 pte = readl(gart->regs + GART_ENTRY_DATA);
98
99 return pte;
100}
101
102static void do_gart_setup(struct gart_device *gart, const u32 *data)
103{
104 unsigned long iova;
105
106 for_each_gart_pte(gart, iova)
107 gart_set_pte(gart, iova, data ? *(data++) : 0);
108
109 writel(1, gart->regs + GART_CONFIG);
110 FLUSH_GART_REGS(gart);
111}
112
113#ifdef DEBUG
114static void gart_dump_table(struct gart_device *gart)
115{
116 unsigned long iova;
117 unsigned long flags;
118
119 spin_lock_irqsave(&gart->pte_lock, flags);
120 for_each_gart_pte(gart, iova) {
121 unsigned long pte;
122
123 pte = gart_read_pte(gart, iova);
124
125 dev_dbg(gart->dev, "%s %08lx:%08lx\n",
126 (GART_ENTRY_PHYS_ADDR_VALID & pte) ? "v" : " ",
127 iova, pte & GART_PAGE_MASK);
128 }
129 spin_unlock_irqrestore(&gart->pte_lock, flags);
130}
131#else
132static inline void gart_dump_table(struct gart_device *gart)
133{
134}
135#endif
136
137static inline bool gart_iova_range_valid(struct gart_device *gart,
138 unsigned long iova, size_t bytes)
139{
140 unsigned long iova_start, iova_end, gart_start, gart_end;
141
142 iova_start = iova;
143 iova_end = iova_start + bytes - 1;
144 gart_start = gart->iovmm_base;
145 gart_end = gart_start + gart->page_count * GART_PAGE_SIZE - 1;
146
147 if (iova_start < gart_start)
148 return false;
149 if (iova_end > gart_end)
150 return false;
151 return true;
152}
153
154static int gart_iommu_attach_dev(struct iommu_domain *domain,
155 struct device *dev)
156{
Dmitry Osipenkocc0e1202018-12-12 23:39:05 +0300157 struct gart_device *gart = gart_handle;
Dmitry Osipenkoe7e236702018-12-12 23:39:06 +0300158 int ret = 0;
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200159
Dmitry Osipenkoe7e236702018-12-12 23:39:06 +0300160 spin_lock(&gart->dom_lock);
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200161
Dmitry Osipenko7d849b72018-12-12 23:39:02 +0300162 if (gart->active_domain && gart->active_domain != domain) {
Dmitry Osipenkoe7e236702018-12-12 23:39:06 +0300163 ret = -EBUSY;
164 } else if (dev->archdata.iommu != domain) {
165 dev->archdata.iommu = domain;
166 gart->active_domain = domain;
167 gart->active_devices++;
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200168 }
Dmitry Osipenkoc3086fa2018-12-12 23:39:00 +0300169
Dmitry Osipenkoe7e236702018-12-12 23:39:06 +0300170 spin_unlock(&gart->dom_lock);
171
172 return ret;
Dmitry Osipenkoc3086fa2018-12-12 23:39:00 +0300173}
174
175static void gart_iommu_detach_dev(struct iommu_domain *domain,
176 struct device *dev)
177{
Dmitry Osipenkoe7e236702018-12-12 23:39:06 +0300178 struct gart_device *gart = gart_handle;
Dmitry Osipenkoc3086fa2018-12-12 23:39:00 +0300179
Dmitry Osipenkoe7e236702018-12-12 23:39:06 +0300180 spin_lock(&gart->dom_lock);
181
182 if (dev->archdata.iommu == domain) {
183 dev->archdata.iommu = NULL;
184
185 if (--gart->active_devices == 0)
186 gart->active_domain = NULL;
187 }
188
189 spin_unlock(&gart->dom_lock);
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200190}
191
Joerg Roedelb5cbb382015-03-26 13:43:13 +0100192static struct iommu_domain *gart_iommu_domain_alloc(unsigned type)
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200193{
Dmitry Osipenkoe7e236702018-12-12 23:39:06 +0300194 struct gart_device *gart = gart_handle;
195 struct iommu_domain *domain;
Thierry Reding836a8ac2015-03-27 11:07:26 +0100196
Joerg Roedelb5cbb382015-03-26 13:43:13 +0100197 if (type != IOMMU_DOMAIN_UNMANAGED)
198 return NULL;
199
Dmitry Osipenkoe7e236702018-12-12 23:39:06 +0300200 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
201 if (domain) {
202 domain->geometry.aperture_start = gart->iovmm_base;
203 domain->geometry.aperture_end = gart->iovmm_base +
Thierry Reding836a8ac2015-03-27 11:07:26 +0100204 gart->page_count * GART_PAGE_SIZE - 1;
Dmitry Osipenkoe7e236702018-12-12 23:39:06 +0300205 domain->geometry.force_aperture = true;
206 }
Thierry Reding836a8ac2015-03-27 11:07:26 +0100207
Dmitry Osipenkoe7e236702018-12-12 23:39:06 +0300208 return domain;
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200209}
210
Joerg Roedelb5cbb382015-03-26 13:43:13 +0100211static void gart_iommu_domain_free(struct iommu_domain *domain)
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200212{
Dmitry Osipenkoe7e236702018-12-12 23:39:06 +0300213 WARN_ON(gart_handle->active_domain == domain);
214 kfree(domain);
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200215}
216
217static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova,
218 phys_addr_t pa, size_t bytes, int prot)
219{
Dmitry Osipenkoe7e236702018-12-12 23:39:06 +0300220 struct gart_device *gart = gart_handle;
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200221 unsigned long flags;
222 unsigned long pfn;
Dmitry Osipenko40c9b882018-04-09 23:07:19 +0300223 unsigned long pte;
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200224
225 if (!gart_iova_range_valid(gart, iova, bytes))
226 return -EINVAL;
227
228 spin_lock_irqsave(&gart->pte_lock, flags);
229 pfn = __phys_to_pfn(pa);
230 if (!pfn_valid(pfn)) {
Thierry Redinge56b3da2013-09-17 10:19:31 +0200231 dev_err(gart->dev, "Invalid page: %pa\n", &pa);
Lucas Stach09c32532012-03-12 20:15:01 +0100232 spin_unlock_irqrestore(&gart->pte_lock, flags);
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200233 return -EINVAL;
234 }
Dmitry Osipenko40c9b882018-04-09 23:07:19 +0300235 if (gart_debug) {
236 pte = gart_read_pte(gart, iova);
237 if (pte & GART_ENTRY_PHYS_ADDR_VALID) {
238 spin_unlock_irqrestore(&gart->pte_lock, flags);
239 dev_err(gart->dev, "Page entry is in-use\n");
240 return -EBUSY;
241 }
242 }
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200243 gart_set_pte(gart, iova, GART_PTE(pfn));
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200244 spin_unlock_irqrestore(&gart->pte_lock, flags);
245 return 0;
246}
247
248static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
249 size_t bytes)
250{
Dmitry Osipenkoe7e236702018-12-12 23:39:06 +0300251 struct gart_device *gart = gart_handle;
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200252 unsigned long flags;
253
254 if (!gart_iova_range_valid(gart, iova, bytes))
255 return 0;
256
257 spin_lock_irqsave(&gart->pte_lock, flags);
258 gart_set_pte(gart, iova, 0);
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200259 spin_unlock_irqrestore(&gart->pte_lock, flags);
Dmitry Osipenko130a2fd2018-04-09 23:07:20 +0300260 return bytes;
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200261}
262
263static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain,
Varun Sethibb5547a2013-03-29 01:23:58 +0530264 dma_addr_t iova)
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200265{
Dmitry Osipenkoe7e236702018-12-12 23:39:06 +0300266 struct gart_device *gart = gart_handle;
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200267 unsigned long pte;
268 phys_addr_t pa;
269 unsigned long flags;
270
271 if (!gart_iova_range_valid(gart, iova, 0))
272 return -EINVAL;
273
274 spin_lock_irqsave(&gart->pte_lock, flags);
275 pte = gart_read_pte(gart, iova);
276 spin_unlock_irqrestore(&gart->pte_lock, flags);
277
278 pa = (pte & GART_PAGE_MASK);
279 if (!pfn_valid(__phys_to_pfn(pa))) {
Thierry Redinge56b3da2013-09-17 10:19:31 +0200280 dev_err(gart->dev, "No entry for %08llx:%pa\n",
281 (unsigned long long)iova, &pa);
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200282 gart_dump_table(gart);
283 return -EINVAL;
284 }
285 return pa;
286}
287
Joerg Roedel7c2aa642014-09-05 10:51:37 +0200288static bool gart_iommu_capable(enum iommu_cap cap)
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200289{
Joerg Roedel7c2aa642014-09-05 10:51:37 +0200290 return false;
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200291}
292
Robin Murphy15f9a312017-07-21 13:12:37 +0100293static int gart_iommu_add_device(struct device *dev)
294{
Dmitry Osipenko4b6f0ea2018-12-12 23:38:46 +0300295 struct iommu_group *group;
Robin Murphy15f9a312017-07-21 13:12:37 +0100296
Dmitry Osipenko4b6f0ea2018-12-12 23:38:46 +0300297 if (!dev->iommu_fwspec)
298 return -ENODEV;
299
300 group = iommu_group_get_for_dev(dev);
Robin Murphy15f9a312017-07-21 13:12:37 +0100301 if (IS_ERR(group))
302 return PTR_ERR(group);
303
304 iommu_group_put(group);
Joerg Roedelc184ae82017-08-10 00:17:28 +0200305
306 iommu_device_link(&gart_handle->iommu, dev);
307
Robin Murphy15f9a312017-07-21 13:12:37 +0100308 return 0;
309}
310
311static void gart_iommu_remove_device(struct device *dev)
312{
313 iommu_group_remove_device(dev);
Joerg Roedelc184ae82017-08-10 00:17:28 +0200314 iommu_device_unlink(&gart_handle->iommu, dev);
Robin Murphy15f9a312017-07-21 13:12:37 +0100315}
316
Dmitry Osipenko4b6f0ea2018-12-12 23:38:46 +0300317static int gart_iommu_of_xlate(struct device *dev,
318 struct of_phandle_args *args)
319{
320 return 0;
321}
322
Dmitry Osipenko2fc0ac12018-12-12 23:38:48 +0300323static void gart_iommu_sync(struct iommu_domain *domain)
324{
Dmitry Osipenkoe7e236702018-12-12 23:39:06 +0300325 struct gart_device *gart = gart_handle;
Dmitry Osipenko2fc0ac12018-12-12 23:38:48 +0300326
327 FLUSH_GART_REGS(gart);
328}
329
Thierry Redingb22f6432014-06-27 09:03:12 +0200330static const struct iommu_ops gart_iommu_ops = {
Joerg Roedel7c2aa642014-09-05 10:51:37 +0200331 .capable = gart_iommu_capable,
Joerg Roedelb5cbb382015-03-26 13:43:13 +0100332 .domain_alloc = gart_iommu_domain_alloc,
333 .domain_free = gart_iommu_domain_free,
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200334 .attach_dev = gart_iommu_attach_dev,
335 .detach_dev = gart_iommu_detach_dev,
Robin Murphy15f9a312017-07-21 13:12:37 +0100336 .add_device = gart_iommu_add_device,
337 .remove_device = gart_iommu_remove_device,
338 .device_group = generic_device_group,
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200339 .map = gart_iommu_map,
340 .unmap = gart_iommu_unmap,
341 .iova_to_phys = gart_iommu_iova_to_phys,
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200342 .pgsize_bitmap = GART_IOMMU_PGSIZES,
Dmitry Osipenko4b6f0ea2018-12-12 23:38:46 +0300343 .of_xlate = gart_iommu_of_xlate,
Dmitry Osipenko2fc0ac12018-12-12 23:38:48 +0300344 .iotlb_sync_map = gart_iommu_sync,
345 .iotlb_sync = gart_iommu_sync,
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200346};
347
Dmitry Osipenkoce2785a2018-12-12 23:38:56 +0300348int tegra_gart_suspend(struct gart_device *gart)
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200349{
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200350 unsigned long iova;
351 u32 *data = gart->savedata;
352 unsigned long flags;
353
354 spin_lock_irqsave(&gart->pte_lock, flags);
355 for_each_gart_pte(gart, iova)
356 *(data++) = gart_read_pte(gart, iova);
357 spin_unlock_irqrestore(&gart->pte_lock, flags);
358 return 0;
359}
360
Dmitry Osipenkoce2785a2018-12-12 23:38:56 +0300361int tegra_gart_resume(struct gart_device *gart)
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200362{
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200363 unsigned long flags;
364
365 spin_lock_irqsave(&gart->pte_lock, flags);
366 do_gart_setup(gart, gart->savedata);
367 spin_unlock_irqrestore(&gart->pte_lock, flags);
368 return 0;
369}
370
Dmitry Osipenkoce2785a2018-12-12 23:38:56 +0300371struct gart_device *tegra_gart_probe(struct device *dev, struct tegra_mc *mc)
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200372{
373 struct gart_device *gart;
Dmitry Osipenkoce2785a2018-12-12 23:38:56 +0300374 struct resource *res_remap;
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200375 void __iomem *gart_regs;
Joerg Roedelc184ae82017-08-10 00:17:28 +0200376 int ret;
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200377
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200378 BUILD_BUG_ON(PAGE_SHIFT != GART_PAGE_SHIFT);
379
380 /* the GART memory aperture is required */
Dmitry Osipenkoce2785a2018-12-12 23:38:56 +0300381 res_remap = platform_get_resource(to_platform_device(dev),
382 IORESOURCE_MEM, 1);
383 if (!res_remap) {
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200384 dev_err(dev, "GART memory aperture expected\n");
Dmitry Osipenkoce2785a2018-12-12 23:38:56 +0300385 return ERR_PTR(-ENXIO);
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200386 }
387
Dmitry Osipenko167d67d2018-12-12 23:39:03 +0300388 gart = kzalloc(sizeof(*gart), GFP_KERNEL);
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200389 if (!gart) {
390 dev_err(dev, "failed to allocate gart_device\n");
Dmitry Osipenkoce2785a2018-12-12 23:38:56 +0300391 return ERR_PTR(-ENOMEM);
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200392 }
393
Dmitry Osipenkoce2785a2018-12-12 23:38:56 +0300394 ret = iommu_device_sysfs_add(&gart->iommu, dev, NULL, "gart");
Joerg Roedelc184ae82017-08-10 00:17:28 +0200395 if (ret) {
396 dev_err(dev, "Failed to register IOMMU in sysfs\n");
Dmitry Osipenko167d67d2018-12-12 23:39:03 +0300397 goto free_gart;
Joerg Roedelc184ae82017-08-10 00:17:28 +0200398 }
399
400 iommu_device_set_ops(&gart->iommu, &gart_iommu_ops);
Dmitry Osipenko4b6f0ea2018-12-12 23:38:46 +0300401 iommu_device_set_fwnode(&gart->iommu, dev->fwnode);
Joerg Roedelc184ae82017-08-10 00:17:28 +0200402
403 ret = iommu_device_register(&gart->iommu);
404 if (ret) {
405 dev_err(dev, "Failed to register IOMMU\n");
Dmitry Osipenkoae95c462018-12-12 23:38:45 +0300406 goto remove_sysfs;
Joerg Roedelc184ae82017-08-10 00:17:28 +0200407 }
408
Dmitry Osipenkoce2785a2018-12-12 23:38:56 +0300409 gart->dev = dev;
410 gart_regs = mc->regs + GART_REG_BASE;
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200411 spin_lock_init(&gart->pte_lock);
Dmitry Osipenkoe7e236702018-12-12 23:39:06 +0300412 spin_lock_init(&gart->dom_lock);
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200413 gart->regs = gart_regs;
414 gart->iovmm_base = (dma_addr_t)res_remap->start;
415 gart->page_count = (resource_size(res_remap) >> GART_PAGE_SHIFT);
416
Kees Cook42bc47b2018-06-12 14:27:11 -0700417 gart->savedata = vmalloc(array_size(sizeof(u32), gart->page_count));
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200418 if (!gart->savedata) {
419 dev_err(dev, "failed to allocate context save area\n");
Dmitry Osipenkoae95c462018-12-12 23:38:45 +0300420 ret = -ENOMEM;
421 goto unregister_iommu;
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200422 }
423
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200424 do_gart_setup(gart, NULL);
425
426 gart_handle = gart;
Thierry Redingc7e3ca52015-01-23 16:37:51 +0100427
Dmitry Osipenkoce2785a2018-12-12 23:38:56 +0300428 return gart;
Dmitry Osipenkoae95c462018-12-12 23:38:45 +0300429
430unregister_iommu:
431 iommu_device_unregister(&gart->iommu);
432remove_sysfs:
433 iommu_device_sysfs_remove(&gart->iommu);
Dmitry Osipenko167d67d2018-12-12 23:39:03 +0300434free_gart:
435 kfree(gart);
Dmitry Osipenkoae95c462018-12-12 23:38:45 +0300436
Dmitry Osipenkoce2785a2018-12-12 23:38:56 +0300437 return ERR_PTR(ret);
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200438}
439
Paul Gortmaker39fcbbcc2018-12-01 14:19:15 -0500440module_param(gart_debug, bool, 0644);
Dmitry Osipenko40c9b882018-04-09 23:07:19 +0300441MODULE_PARM_DESC(gart_debug, "Enable GART debugging");