blob: 6a358f92c7e5ddf9f18e52117afe43e5a2733e87 [file] [log] [blame]
Thomas Gleixnera61127c2019-05-29 16:57:49 -07001// SPDX-License-Identifier: GPL-2.0-only
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +02002/*
Dmitry Osipenko70722302018-12-12 23:39:07 +03003 * IOMMU API for Graphics Address Relocation Table on Tegra20
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +02004 *
5 * Copyright (c) 2010-2012, NVIDIA CORPORATION. All rights reserved.
6 *
Paul Gortmaker39fcbbcc2018-12-01 14:19:15 -05007 * Author: Hiroshi DOYU <hdoyu@nvidia.com>
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +02008 */
9
Dmitry Osipenko5dd82cd2018-12-12 23:39:04 +030010#define dev_fmt(fmt) "gart: " fmt
11
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +020012#include <linux/io.h>
13#include <linux/iommu.h>
Dmitry Osipenko4f821c12018-12-12 23:38:44 +030014#include <linux/moduleparam.h>
Dmitry Osipenkoce2785a2018-12-12 23:38:56 +030015#include <linux/platform_device.h>
Dmitry Osipenko4f821c12018-12-12 23:38:44 +030016#include <linux/slab.h>
17#include <linux/spinlock.h>
18#include <linux/vmalloc.h>
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +020019
Dmitry Osipenkoce2785a2018-12-12 23:38:56 +030020#include <soc/tegra/mc.h>
21
Hiroshi DOYU774dfc92012-05-10 10:45:32 +030022#define GART_REG_BASE 0x24
23#define GART_CONFIG (0x24 - GART_REG_BASE)
24#define GART_ENTRY_ADDR (0x28 - GART_REG_BASE)
25#define GART_ENTRY_DATA (0x2c - GART_REG_BASE)
Dmitry Osipenko70722302018-12-12 23:39:07 +030026
27#define GART_ENTRY_PHYS_ADDR_VALID BIT(31)
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +020028
29#define GART_PAGE_SHIFT 12
30#define GART_PAGE_SIZE (1 << GART_PAGE_SHIFT)
Dmitry Osipenko70722302018-12-12 23:39:07 +030031#define GART_PAGE_MASK GENMASK(30, GART_PAGE_SHIFT)
32
33/* bitmap of the page sizes currently supported */
34#define GART_IOMMU_PGSIZES (GART_PAGE_SIZE)
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +020035
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +020036struct gart_device {
37 void __iomem *regs;
38 u32 *savedata;
Dmitry Osipenko70722302018-12-12 23:39:07 +030039 unsigned long iovmm_base; /* offset to vmm_area start */
40 unsigned long iovmm_end; /* offset to vmm_area end */
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +020041 spinlock_t pte_lock; /* for pagetable */
Dmitry Osipenkoe7e236702018-12-12 23:39:06 +030042 spinlock_t dom_lock; /* for active domain */
43 unsigned int active_devices; /* number of active devices */
Dmitry Osipenko7d849b72018-12-12 23:39:02 +030044 struct iommu_domain *active_domain; /* current active domain */
Joerg Roedelc184ae82017-08-10 00:17:28 +020045 struct iommu_device iommu; /* IOMMU Core handle */
Dmitry Osipenko70722302018-12-12 23:39:07 +030046 struct device *dev;
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +020047};
48
49static struct gart_device *gart_handle; /* unique for a system */
50
Dmitry Osipenko40c9b882018-04-09 23:07:19 +030051static bool gart_debug;
52
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +020053/*
54 * Any interaction between any block on PPSB and a block on APB or AHB
55 * must have these read-back to ensure the APB/AHB bus transaction is
56 * complete before initiating activity on the PPSB block.
57 */
Dmitry Osipenko70722302018-12-12 23:39:07 +030058#define FLUSH_GART_REGS(gart) readl_relaxed((gart)->regs + GART_CONFIG)
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +020059
60#define for_each_gart_pte(gart, iova) \
61 for (iova = gart->iovmm_base; \
Dmitry Osipenko70722302018-12-12 23:39:07 +030062 iova < gart->iovmm_end; \
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +020063 iova += GART_PAGE_SIZE)
64
65static inline void gart_set_pte(struct gart_device *gart,
Dmitry Osipenko70722302018-12-12 23:39:07 +030066 unsigned long iova, unsigned long pte)
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +020067{
Dmitry Osipenko70722302018-12-12 23:39:07 +030068 writel_relaxed(iova, gart->regs + GART_ENTRY_ADDR);
69 writel_relaxed(pte, gart->regs + GART_ENTRY_DATA);
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +020070}
71
72static inline unsigned long gart_read_pte(struct gart_device *gart,
Dmitry Osipenko70722302018-12-12 23:39:07 +030073 unsigned long iova)
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +020074{
75 unsigned long pte;
76
Dmitry Osipenko70722302018-12-12 23:39:07 +030077 writel_relaxed(iova, gart->regs + GART_ENTRY_ADDR);
78 pte = readl_relaxed(gart->regs + GART_ENTRY_DATA);
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +020079
80 return pte;
81}
82
83static void do_gart_setup(struct gart_device *gart, const u32 *data)
84{
85 unsigned long iova;
86
87 for_each_gart_pte(gart, iova)
88 gart_set_pte(gart, iova, data ? *(data++) : 0);
89
Dmitry Osipenko70722302018-12-12 23:39:07 +030090 writel_relaxed(1, gart->regs + GART_CONFIG);
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +020091 FLUSH_GART_REGS(gart);
92}
93
Dmitry Osipenko70722302018-12-12 23:39:07 +030094static inline bool gart_iova_range_invalid(struct gart_device *gart,
95 unsigned long iova, size_t bytes)
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +020096{
Dmitry Osipenko70722302018-12-12 23:39:07 +030097 return unlikely(iova < gart->iovmm_base || bytes != GART_PAGE_SIZE ||
98 iova + bytes > gart->iovmm_end);
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +020099}
Dmitry Osipenko70722302018-12-12 23:39:07 +0300100
101static inline bool gart_pte_valid(struct gart_device *gart, unsigned long iova)
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200102{
Dmitry Osipenko70722302018-12-12 23:39:07 +0300103 return !!(gart_read_pte(gart, iova) & GART_ENTRY_PHYS_ADDR_VALID);
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200104}
105
106static int gart_iommu_attach_dev(struct iommu_domain *domain,
107 struct device *dev)
108{
Dmitry Osipenkocc0e1202018-12-12 23:39:05 +0300109 struct gart_device *gart = gart_handle;
Dmitry Osipenkoe7e236702018-12-12 23:39:06 +0300110 int ret = 0;
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200111
Dmitry Osipenkoe7e236702018-12-12 23:39:06 +0300112 spin_lock(&gart->dom_lock);
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200113
Dmitry Osipenko7d849b72018-12-12 23:39:02 +0300114 if (gart->active_domain && gart->active_domain != domain) {
Dmitry Osipenkoe7e236702018-12-12 23:39:06 +0300115 ret = -EBUSY;
Joerg Roedela5616e22020-06-25 15:08:29 +0200116 } else if (dev_iommu_priv_get(dev) != domain) {
117 dev_iommu_priv_set(dev, domain);
Dmitry Osipenkoe7e236702018-12-12 23:39:06 +0300118 gart->active_domain = domain;
119 gart->active_devices++;
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200120 }
Dmitry Osipenkoc3086fa2018-12-12 23:39:00 +0300121
Dmitry Osipenkoe7e236702018-12-12 23:39:06 +0300122 spin_unlock(&gart->dom_lock);
123
124 return ret;
Dmitry Osipenkoc3086fa2018-12-12 23:39:00 +0300125}
126
127static void gart_iommu_detach_dev(struct iommu_domain *domain,
128 struct device *dev)
129{
Dmitry Osipenkoe7e236702018-12-12 23:39:06 +0300130 struct gart_device *gart = gart_handle;
Dmitry Osipenkoc3086fa2018-12-12 23:39:00 +0300131
Dmitry Osipenkoe7e236702018-12-12 23:39:06 +0300132 spin_lock(&gart->dom_lock);
133
Joerg Roedela5616e22020-06-25 15:08:29 +0200134 if (dev_iommu_priv_get(dev) == domain) {
135 dev_iommu_priv_set(dev, NULL);
Dmitry Osipenkoe7e236702018-12-12 23:39:06 +0300136
137 if (--gart->active_devices == 0)
138 gart->active_domain = NULL;
139 }
140
141 spin_unlock(&gart->dom_lock);
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200142}
143
Joerg Roedelb5cbb382015-03-26 13:43:13 +0100144static struct iommu_domain *gart_iommu_domain_alloc(unsigned type)
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200145{
Dmitry Osipenkoe7e236702018-12-12 23:39:06 +0300146 struct iommu_domain *domain;
Thierry Reding836a8ac2015-03-27 11:07:26 +0100147
Joerg Roedelb5cbb382015-03-26 13:43:13 +0100148 if (type != IOMMU_DOMAIN_UNMANAGED)
149 return NULL;
150
Dmitry Osipenkoe7e236702018-12-12 23:39:06 +0300151 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
152 if (domain) {
Dmitry Osipenko70722302018-12-12 23:39:07 +0300153 domain->geometry.aperture_start = gart_handle->iovmm_base;
154 domain->geometry.aperture_end = gart_handle->iovmm_end - 1;
Dmitry Osipenkoe7e236702018-12-12 23:39:06 +0300155 domain->geometry.force_aperture = true;
156 }
Thierry Reding836a8ac2015-03-27 11:07:26 +0100157
Dmitry Osipenkoe7e236702018-12-12 23:39:06 +0300158 return domain;
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200159}
160
Joerg Roedelb5cbb382015-03-26 13:43:13 +0100161static void gart_iommu_domain_free(struct iommu_domain *domain)
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200162{
Dmitry Osipenkoe7e236702018-12-12 23:39:06 +0300163 WARN_ON(gart_handle->active_domain == domain);
164 kfree(domain);
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200165}
166
Dmitry Osipenko70722302018-12-12 23:39:07 +0300167static inline int __gart_iommu_map(struct gart_device *gart, unsigned long iova,
168 unsigned long pa)
169{
170 if (unlikely(gart_debug && gart_pte_valid(gart, iova))) {
171 dev_err(gart->dev, "Page entry is in-use\n");
172 return -EINVAL;
173 }
174
175 gart_set_pte(gart, iova, GART_ENTRY_PHYS_ADDR_VALID | pa);
176
177 return 0;
178}
179
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200180static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova,
Tom Murphy781ca2d2019-09-08 09:56:38 -0700181 phys_addr_t pa, size_t bytes, int prot, gfp_t gfp)
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200182{
Dmitry Osipenkoe7e236702018-12-12 23:39:06 +0300183 struct gart_device *gart = gart_handle;
Dmitry Osipenko70722302018-12-12 23:39:07 +0300184 int ret;
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200185
Dmitry Osipenko70722302018-12-12 23:39:07 +0300186 if (gart_iova_range_invalid(gart, iova, bytes))
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200187 return -EINVAL;
188
Dmitry Osipenko70722302018-12-12 23:39:07 +0300189 spin_lock(&gart->pte_lock);
190 ret = __gart_iommu_map(gart, iova, (unsigned long)pa);
191 spin_unlock(&gart->pte_lock);
192
193 return ret;
194}
195
196static inline int __gart_iommu_unmap(struct gart_device *gart,
197 unsigned long iova)
198{
199 if (unlikely(gart_debug && !gart_pte_valid(gart, iova))) {
200 dev_err(gart->dev, "Page entry is invalid\n");
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200201 return -EINVAL;
202 }
Dmitry Osipenko70722302018-12-12 23:39:07 +0300203
204 gart_set_pte(gart, iova, 0);
205
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200206 return 0;
207}
208
209static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
Will Deacon56f8af52019-07-02 16:44:06 +0100210 size_t bytes, struct iommu_iotlb_gather *gather)
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200211{
Dmitry Osipenkoe7e236702018-12-12 23:39:06 +0300212 struct gart_device *gart = gart_handle;
Dmitry Osipenko70722302018-12-12 23:39:07 +0300213 int err;
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200214
Dmitry Osipenko70722302018-12-12 23:39:07 +0300215 if (gart_iova_range_invalid(gart, iova, bytes))
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200216 return 0;
217
Dmitry Osipenko70722302018-12-12 23:39:07 +0300218 spin_lock(&gart->pte_lock);
219 err = __gart_iommu_unmap(gart, iova);
220 spin_unlock(&gart->pte_lock);
221
222 return err ? 0 : bytes;
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200223}
224
225static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain,
Varun Sethibb5547a2013-03-29 01:23:58 +0530226 dma_addr_t iova)
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200227{
Dmitry Osipenkoe7e236702018-12-12 23:39:06 +0300228 struct gart_device *gart = gart_handle;
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200229 unsigned long pte;
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200230
Dmitry Osipenko70722302018-12-12 23:39:07 +0300231 if (gart_iova_range_invalid(gart, iova, GART_PAGE_SIZE))
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200232 return -EINVAL;
233
Dmitry Osipenko70722302018-12-12 23:39:07 +0300234 spin_lock(&gart->pte_lock);
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200235 pte = gart_read_pte(gart, iova);
Dmitry Osipenko70722302018-12-12 23:39:07 +0300236 spin_unlock(&gart->pte_lock);
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200237
Dmitry Osipenko70722302018-12-12 23:39:07 +0300238 return pte & GART_PAGE_MASK;
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200239}
240
Joerg Roedel7c2aa642014-09-05 10:51:37 +0200241static bool gart_iommu_capable(enum iommu_cap cap)
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200242{
Joerg Roedel7c2aa642014-09-05 10:51:37 +0200243 return false;
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200244}
245
Joerg Roedelb287ba72020-04-29 15:37:04 +0200246static struct iommu_device *gart_iommu_probe_device(struct device *dev)
Robin Murphy15f9a312017-07-21 13:12:37 +0100247{
Joerg Roedel8c3d6922020-03-26 16:08:29 +0100248 if (!dev_iommu_fwspec_get(dev))
Joerg Roedelb287ba72020-04-29 15:37:04 +0200249 return ERR_PTR(-ENODEV);
Dmitry Osipenko4b6f0ea2018-12-12 23:38:46 +0300250
Joerg Roedelb287ba72020-04-29 15:37:04 +0200251 return &gart_handle->iommu;
Robin Murphy15f9a312017-07-21 13:12:37 +0100252}
253
Joerg Roedelb287ba72020-04-29 15:37:04 +0200254static void gart_iommu_release_device(struct device *dev)
Robin Murphy15f9a312017-07-21 13:12:37 +0100255{
Robin Murphy15f9a312017-07-21 13:12:37 +0100256}
257
Dmitry Osipenko4b6f0ea2018-12-12 23:38:46 +0300258static int gart_iommu_of_xlate(struct device *dev,
259 struct of_phandle_args *args)
260{
261 return 0;
262}
263
Yong Wu2ebbd252021-01-07 20:29:04 +0800264static void gart_iommu_sync_map(struct iommu_domain *domain, unsigned long iova,
265 size_t size)
Dmitry Osipenko2fc0ac12018-12-12 23:38:48 +0300266{
Dmitry Osipenko70722302018-12-12 23:39:07 +0300267 FLUSH_GART_REGS(gart_handle);
Dmitry Osipenko2fc0ac12018-12-12 23:38:48 +0300268}
269
Will Deacon56f8af52019-07-02 16:44:06 +0100270static void gart_iommu_sync(struct iommu_domain *domain,
271 struct iommu_iotlb_gather *gather)
272{
Yong Wu862c3712021-01-07 20:29:06 +0800273 size_t length = gather->end - gather->start + 1;
Yong Wu2ebbd252021-01-07 20:29:04 +0800274
275 gart_iommu_sync_map(domain, gather->start, length);
Will Deacon56f8af52019-07-02 16:44:06 +0100276}
277
Thierry Redingb22f6432014-06-27 09:03:12 +0200278static const struct iommu_ops gart_iommu_ops = {
Joerg Roedel7c2aa642014-09-05 10:51:37 +0200279 .capable = gart_iommu_capable,
Joerg Roedelb5cbb382015-03-26 13:43:13 +0100280 .domain_alloc = gart_iommu_domain_alloc,
281 .domain_free = gart_iommu_domain_free,
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200282 .attach_dev = gart_iommu_attach_dev,
283 .detach_dev = gart_iommu_detach_dev,
Joerg Roedelb287ba72020-04-29 15:37:04 +0200284 .probe_device = gart_iommu_probe_device,
285 .release_device = gart_iommu_release_device,
Robin Murphy15f9a312017-07-21 13:12:37 +0100286 .device_group = generic_device_group,
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200287 .map = gart_iommu_map,
288 .unmap = gart_iommu_unmap,
289 .iova_to_phys = gart_iommu_iova_to_phys,
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200290 .pgsize_bitmap = GART_IOMMU_PGSIZES,
Dmitry Osipenko4b6f0ea2018-12-12 23:38:46 +0300291 .of_xlate = gart_iommu_of_xlate,
Will Deacon56f8af52019-07-02 16:44:06 +0100292 .iotlb_sync_map = gart_iommu_sync_map,
Dmitry Osipenko2fc0ac12018-12-12 23:38:48 +0300293 .iotlb_sync = gart_iommu_sync,
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200294};
295
Dmitry Osipenkoce2785a2018-12-12 23:38:56 +0300296int tegra_gart_suspend(struct gart_device *gart)
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200297{
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200298 u32 *data = gart->savedata;
Dmitry Osipenko70722302018-12-12 23:39:07 +0300299 unsigned long iova;
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200300
Dmitry Osipenko70722302018-12-12 23:39:07 +0300301 /*
302 * All GART users shall be suspended at this point. Disable
303 * address translation to trap all GART accesses as invalid
304 * memory accesses.
305 */
306 writel_relaxed(0, gart->regs + GART_CONFIG);
307 FLUSH_GART_REGS(gart);
308
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200309 for_each_gart_pte(gart, iova)
310 *(data++) = gart_read_pte(gart, iova);
Dmitry Osipenko70722302018-12-12 23:39:07 +0300311
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200312 return 0;
313}
314
Dmitry Osipenkoce2785a2018-12-12 23:38:56 +0300315int tegra_gart_resume(struct gart_device *gart)
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200316{
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200317 do_gart_setup(gart, gart->savedata);
Dmitry Osipenko70722302018-12-12 23:39:07 +0300318
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200319 return 0;
320}
321
Dmitry Osipenkoce2785a2018-12-12 23:38:56 +0300322struct gart_device *tegra_gart_probe(struct device *dev, struct tegra_mc *mc)
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200323{
324 struct gart_device *gart;
Dmitry Osipenko70722302018-12-12 23:39:07 +0300325 struct resource *res;
326 int err;
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200327
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200328 BUILD_BUG_ON(PAGE_SHIFT != GART_PAGE_SHIFT);
329
330 /* the GART memory aperture is required */
Dmitry Osipenko70722302018-12-12 23:39:07 +0300331 res = platform_get_resource(to_platform_device(dev), IORESOURCE_MEM, 1);
332 if (!res) {
333 dev_err(dev, "Memory aperture resource unavailable\n");
Dmitry Osipenkoce2785a2018-12-12 23:38:56 +0300334 return ERR_PTR(-ENXIO);
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200335 }
336
Dmitry Osipenko167d67d2018-12-12 23:39:03 +0300337 gart = kzalloc(sizeof(*gart), GFP_KERNEL);
Dmitry Osipenko70722302018-12-12 23:39:07 +0300338 if (!gart)
Dmitry Osipenkoce2785a2018-12-12 23:38:56 +0300339 return ERR_PTR(-ENOMEM);
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200340
Dmitry Osipenko70722302018-12-12 23:39:07 +0300341 gart_handle = gart;
342
343 gart->dev = dev;
344 gart->regs = mc->regs + GART_REG_BASE;
345 gart->iovmm_base = res->start;
346 gart->iovmm_end = res->end + 1;
347 spin_lock_init(&gart->pte_lock);
348 spin_lock_init(&gart->dom_lock);
349
350 do_gart_setup(gart, NULL);
351
352 err = iommu_device_sysfs_add(&gart->iommu, dev, NULL, "gart");
353 if (err)
Dmitry Osipenko167d67d2018-12-12 23:39:03 +0300354 goto free_gart;
Joerg Roedelc184ae82017-08-10 00:17:28 +0200355
Robin Murphy2d471b22021-04-01 14:56:26 +0100356 err = iommu_device_register(&gart->iommu, &gart_iommu_ops, dev);
Dmitry Osipenko70722302018-12-12 23:39:07 +0300357 if (err)
Dmitry Osipenkoae95c462018-12-12 23:38:45 +0300358 goto remove_sysfs;
Joerg Roedelc184ae82017-08-10 00:17:28 +0200359
Dmitry Osipenko70722302018-12-12 23:39:07 +0300360 gart->savedata = vmalloc(resource_size(res) / GART_PAGE_SIZE *
361 sizeof(u32));
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200362 if (!gart->savedata) {
Dmitry Osipenko70722302018-12-12 23:39:07 +0300363 err = -ENOMEM;
Dmitry Osipenkoae95c462018-12-12 23:38:45 +0300364 goto unregister_iommu;
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200365 }
366
Dmitry Osipenkoce2785a2018-12-12 23:38:56 +0300367 return gart;
Dmitry Osipenkoae95c462018-12-12 23:38:45 +0300368
369unregister_iommu:
370 iommu_device_unregister(&gart->iommu);
371remove_sysfs:
372 iommu_device_sysfs_remove(&gart->iommu);
Dmitry Osipenko167d67d2018-12-12 23:39:03 +0300373free_gart:
374 kfree(gart);
Dmitry Osipenkoae95c462018-12-12 23:38:45 +0300375
Dmitry Osipenko70722302018-12-12 23:39:07 +0300376 return ERR_PTR(err);
Hiroshi DOYUd53e54b2011-11-16 17:36:37 +0200377}
378
Paul Gortmaker39fcbbcc2018-12-01 14:19:15 -0500379module_param(gart_debug, bool, 0644);
Dmitry Osipenko40c9b882018-04-09 23:07:19 +0300380MODULE_PARM_DESC(gart_debug, "Enable GART debugging");