blob: 38448097180523b24c3f4b1582c6ee3497abac19 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
David S. Miller8f6a93a2006-02-09 21:32:07 -08002/* pci_sun4v.c: SUN4V specific PCI controller support.
3 *
David S. Millerd2841422008-02-08 18:05:46 -08004 * Copyright (C) 2006, 2007, 2008 David S. Miller (davem@davemloft.net)
David S. Miller8f6a93a2006-02-09 21:32:07 -08005 */
6
7#include <linux/kernel.h>
8#include <linux/types.h>
9#include <linux/pci.h>
10#include <linux/init.h>
11#include <linux/slab.h>
12#include <linux/interrupt.h>
David S. Miller18397942006-02-10 00:08:26 -080013#include <linux/percpu.h>
David S. Miller35a17eb2007-02-10 17:41:02 -080014#include <linux/irq.h>
15#include <linux/msi.h>
Paul Gortmaker7b64db62011-07-18 15:57:46 -040016#include <linux/export.h>
David S. Miller59db8102007-05-23 18:00:46 -070017#include <linux/log2.h>
David S. Miller3822b502008-08-30 02:50:29 -070018#include <linux/of_device.h>
Christoph Hellwig0a0f0d82020-09-22 15:31:03 +020019#include <linux/dma-map-ops.h>
Christoph Hellwig0d3fdb12018-04-03 15:34:58 +020020#include <asm/iommu-common.h>
David S. Miller8f6a93a2006-02-09 21:32:07 -080021
David S. Miller8f6a93a2006-02-09 21:32:07 -080022#include <asm/iommu.h>
23#include <asm/irq.h>
David S. Miller8f6a93a2006-02-09 21:32:07 -080024#include <asm/hypervisor.h>
David S. Millere87dc352006-06-21 18:18:47 -070025#include <asm/prom.h>
David S. Miller8f6a93a2006-02-09 21:32:07 -080026
27#include "pci_impl.h"
28#include "iommu_common.h"
Christoph Hellwigb02c2b02017-05-22 09:11:30 +020029#include "kernel.h"
David S. Miller8f6a93a2006-02-09 21:32:07 -080030
David S. Millerbade5622006-02-09 22:05:54 -080031#include "pci_sun4v.h"
32
David S. Miller3822b502008-08-30 02:50:29 -070033#define DRIVER_NAME "pci_sun4v"
34#define PFX DRIVER_NAME ": "
35
chris hyser89143912016-09-28 12:19:45 -070036static unsigned long vpci_major;
37static unsigned long vpci_minor;
38
39struct vpci_version {
40 unsigned long major;
41 unsigned long minor;
42};
43
44/* Ordered from largest major to lowest */
45static struct vpci_version vpci_versions[] = {
46 { .major = 2, .minor = 0 },
47 { .major = 1, .minor = 1 },
48};
David S. Millere01c0d62007-05-25 01:04:15 -070049
Tushar Davef0248c12016-10-28 10:12:41 -070050static unsigned long vatu_major = 1;
51static unsigned long vatu_minor = 1;
52
David S. Miller7c8f4862006-02-13 21:50:27 -080053#define PGLIST_NENTS (PAGE_SIZE / sizeof(u64))
David S. Miller18397942006-02-10 00:08:26 -080054
David S. Miller16ce82d2007-04-26 21:08:21 -070055struct iommu_batch {
David S. Millerad7ad572007-07-27 22:39:14 -070056 struct device *dev; /* Device mapping is for. */
David S. Miller6a32fd42006-02-19 22:21:32 -080057 unsigned long prot; /* IOMMU page protections */
58 unsigned long entry; /* Index into IOTSB. */
59 u64 *pglist; /* List of physical pages */
60 unsigned long npages; /* Number of pages in list. */
David S. Miller18397942006-02-10 00:08:26 -080061};
62
David S. Millerad7ad572007-07-27 22:39:14 -070063static DEFINE_PER_CPU(struct iommu_batch, iommu_batch);
David S. Millerd3ae4b52008-09-09 23:54:02 -070064static int iommu_batch_initialized;
David S. Miller6a32fd42006-02-19 22:21:32 -080065
66/* Interrupts must be disabled. */
David S. Millerad7ad572007-07-27 22:39:14 -070067static inline void iommu_batch_start(struct device *dev, unsigned long prot, unsigned long entry)
David S. Miller6a32fd42006-02-19 22:21:32 -080068{
Christoph Lameter494fc422014-08-17 12:30:54 -050069 struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
David S. Miller6a32fd42006-02-19 22:21:32 -080070
David S. Millerad7ad572007-07-27 22:39:14 -070071 p->dev = dev;
David S. Miller6a32fd42006-02-19 22:21:32 -080072 p->prot = prot;
73 p->entry = entry;
74 p->npages = 0;
75}
76
Christoph Hellwig2a29e9f2019-04-03 21:34:34 +020077static inline bool iommu_use_atu(struct iommu *iommu, u64 mask)
78{
79 return iommu->atu && mask > DMA_BIT_MASK(32);
80}
81
David S. Miller6a32fd42006-02-19 22:21:32 -080082/* Interrupts must be disabled. */
Tushar Davef08978b2016-10-28 10:12:44 -070083static long iommu_batch_flush(struct iommu_batch *p, u64 mask)
David S. Miller6a32fd42006-02-19 22:21:32 -080084{
David S. Millerad7ad572007-07-27 22:39:14 -070085 struct pci_pbm_info *pbm = p->dev->archdata.host_controller;
Tushar Davef08978b2016-10-28 10:12:44 -070086 u64 *pglist = p->pglist;
87 u64 index_count;
David S. Millera2fb23a2007-02-28 23:35:04 -080088 unsigned long devhandle = pbm->devhandle;
David S. Miller6a32fd42006-02-19 22:21:32 -080089 unsigned long prot = p->prot;
90 unsigned long entry = p->entry;
David S. Miller6a32fd42006-02-19 22:21:32 -080091 unsigned long npages = p->npages;
Tushar Davef08978b2016-10-28 10:12:44 -070092 unsigned long iotsb_num;
93 unsigned long ret;
94 long num;
David S. Miller6a32fd42006-02-19 22:21:32 -080095
chris hyseraa7bde12016-09-28 12:19:50 -070096 /* VPCI maj=1, min=[0,1] only supports read and write */
97 if (vpci_major < 2)
98 prot &= (HV_PCI_MAP_ATTR_READ | HV_PCI_MAP_ATTR_WRITE);
99
David S. Millerd82965c2006-02-20 01:42:51 -0800100 while (npages != 0) {
Christoph Hellwig2a29e9f2019-04-03 21:34:34 +0200101 if (!iommu_use_atu(pbm->iommu, mask)) {
Tushar Davef08978b2016-10-28 10:12:44 -0700102 num = pci_sun4v_iommu_map(devhandle,
103 HV_PCI_TSBID(0, entry),
104 npages,
105 prot,
106 __pa(pglist));
107 if (unlikely(num < 0)) {
108 pr_err_ratelimited("%s: IOMMU map of [%08lx:%08llx:%lx:%lx:%lx] failed with status %ld\n",
109 __func__,
110 devhandle,
111 HV_PCI_TSBID(0, entry),
112 npages, prot, __pa(pglist),
113 num);
114 return -1;
115 }
116 } else {
117 index_count = HV_PCI_IOTSB_INDEX_COUNT(npages, entry),
118 iotsb_num = pbm->iommu->atu->iotsb->iotsb_num;
119 ret = pci_sun4v_iotsb_map(devhandle,
120 iotsb_num,
121 index_count,
122 prot,
123 __pa(pglist),
124 &num);
125 if (unlikely(ret != HV_EOK)) {
126 pr_err_ratelimited("%s: ATU map of [%08lx:%lx:%llx:%lx:%lx] failed with status %ld\n",
127 __func__,
128 devhandle, iotsb_num,
129 index_count, prot,
130 __pa(pglist), ret);
131 return -1;
132 }
David S. Miller6a32fd42006-02-19 22:21:32 -0800133 }
David S. Miller6a32fd42006-02-19 22:21:32 -0800134 entry += num;
135 npages -= num;
136 pglist += num;
David S. Millerd82965c2006-02-20 01:42:51 -0800137 }
David S. Miller6a32fd42006-02-19 22:21:32 -0800138
139 p->entry = entry;
140 p->npages = 0;
141
142 return 0;
143}
144
Tushar Davef08978b2016-10-28 10:12:44 -0700145static inline void iommu_batch_new_entry(unsigned long entry, u64 mask)
David S. Miller13fa14e2008-02-09 03:11:01 -0800146{
Christoph Lameter494fc422014-08-17 12:30:54 -0500147 struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
David S. Miller13fa14e2008-02-09 03:11:01 -0800148
149 if (p->entry + p->npages == entry)
150 return;
151 if (p->entry != ~0UL)
Tushar Davef08978b2016-10-28 10:12:44 -0700152 iommu_batch_flush(p, mask);
David S. Miller13fa14e2008-02-09 03:11:01 -0800153 p->entry = entry;
154}
155
David S. Miller6a32fd42006-02-19 22:21:32 -0800156/* Interrupts must be disabled. */
Tushar Davef08978b2016-10-28 10:12:44 -0700157static inline long iommu_batch_add(u64 phys_page, u64 mask)
David S. Miller6a32fd42006-02-19 22:21:32 -0800158{
Christoph Lameter494fc422014-08-17 12:30:54 -0500159 struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
David S. Miller6a32fd42006-02-19 22:21:32 -0800160
161 BUG_ON(p->npages >= PGLIST_NENTS);
162
163 p->pglist[p->npages++] = phys_page;
164 if (p->npages == PGLIST_NENTS)
Tushar Davef08978b2016-10-28 10:12:44 -0700165 return iommu_batch_flush(p, mask);
David S. Miller6a32fd42006-02-19 22:21:32 -0800166
167 return 0;
168}
169
170/* Interrupts must be disabled. */
Tushar Davef08978b2016-10-28 10:12:44 -0700171static inline long iommu_batch_end(u64 mask)
David S. Miller6a32fd42006-02-19 22:21:32 -0800172{
Christoph Lameter494fc422014-08-17 12:30:54 -0500173 struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
David S. Miller6a32fd42006-02-19 22:21:32 -0800174
175 BUG_ON(p->npages >= PGLIST_NENTS);
176
Tushar Davef08978b2016-10-28 10:12:44 -0700177 return iommu_batch_flush(p, mask);
David S. Miller6a32fd42006-02-19 22:21:32 -0800178}
David S. Miller18397942006-02-10 00:08:26 -0800179
David S. Millerad7ad572007-07-27 22:39:14 -0700180static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
Andrzej Pietrasiewiczc4162582012-03-27 14:56:55 +0200181 dma_addr_t *dma_addrp, gfp_t gfp,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700182 unsigned long attrs)
David S. Miller8f6a93a2006-02-09 21:32:07 -0800183{
Tushar Davef08978b2016-10-28 10:12:44 -0700184 u64 mask;
David S. Miller7c8f4862006-02-13 21:50:27 -0800185 unsigned long flags, order, first_page, npages, n;
chris hyseraa7bde12016-09-28 12:19:50 -0700186 unsigned long prot = 0;
David S. Millerc1b1a5f12008-03-19 04:52:48 -0700187 struct iommu *iommu;
Tushar Davef08978b2016-10-28 10:12:44 -0700188 struct iommu_map_table *tbl;
David S. Millerc1b1a5f12008-03-19 04:52:48 -0700189 struct page *page;
David S. Miller18397942006-02-10 00:08:26 -0800190 void *ret;
191 long entry;
David S. Millerc1b1a5f12008-03-19 04:52:48 -0700192 int nid;
David S. Miller18397942006-02-10 00:08:26 -0800193
194 size = IO_PAGE_ALIGN(size);
195 order = get_order(size);
David S. Miller6a32fd42006-02-19 22:21:32 -0800196 if (unlikely(order >= MAX_ORDER))
David S. Miller18397942006-02-10 00:08:26 -0800197 return NULL;
198
199 npages = size >> IO_PAGE_SHIFT;
David S. Miller18397942006-02-10 00:08:26 -0800200
chris hyseraa7bde12016-09-28 12:19:50 -0700201 if (attrs & DMA_ATTR_WEAK_ORDERING)
202 prot = HV_PCI_MAP_ATTR_RELAXED_ORDER;
203
David S. Millerc1b1a5f12008-03-19 04:52:48 -0700204 nid = dev->archdata.numa_node;
205 page = alloc_pages_node(nid, gfp, order);
206 if (unlikely(!page))
David S. Miller18397942006-02-10 00:08:26 -0800207 return NULL;
David S. Millere7a04532006-02-15 22:25:27 -0800208
David S. Millerc1b1a5f12008-03-19 04:52:48 -0700209 first_page = (unsigned long) page_address(page);
David S. Miller18397942006-02-10 00:08:26 -0800210 memset((char *)first_page, 0, PAGE_SIZE << order);
211
David S. Millerad7ad572007-07-27 22:39:14 -0700212 iommu = dev->archdata.iommu;
Tushar Davef08978b2016-10-28 10:12:44 -0700213 mask = dev->coherent_dma_mask;
Christoph Hellwig2a29e9f2019-04-03 21:34:34 +0200214 if (!iommu_use_atu(iommu, mask))
Tushar Davef08978b2016-10-28 10:12:44 -0700215 tbl = &iommu->tbl;
216 else
Christoph Hellwig2a29e9f2019-04-03 21:34:34 +0200217 tbl = &iommu->atu->tbl;
Tushar Davef08978b2016-10-28 10:12:44 -0700218
219 entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL,
Sowmini Varadhanbb620c32015-04-09 15:33:31 -0400220 (unsigned long)(-1), 0);
David S. Miller18397942006-02-10 00:08:26 -0800221
David S. Millerd6183822015-11-04 11:30:57 -0800222 if (unlikely(entry == IOMMU_ERROR_CODE))
David S. Millerd2841422008-02-08 18:05:46 -0800223 goto range_alloc_fail;
David S. Miller18397942006-02-10 00:08:26 -0800224
Tushar Davef08978b2016-10-28 10:12:44 -0700225 *dma_addrp = (tbl->table_map_base + (entry << IO_PAGE_SHIFT));
David S. Miller18397942006-02-10 00:08:26 -0800226 ret = (void *) first_page;
227 first_page = __pa(first_page);
228
David S. Miller6a32fd42006-02-19 22:21:32 -0800229 local_irq_save(flags);
David S. Miller18397942006-02-10 00:08:26 -0800230
David S. Millerad7ad572007-07-27 22:39:14 -0700231 iommu_batch_start(dev,
chris hyseraa7bde12016-09-28 12:19:50 -0700232 (HV_PCI_MAP_ATTR_READ | prot |
David S. Millerad7ad572007-07-27 22:39:14 -0700233 HV_PCI_MAP_ATTR_WRITE),
234 entry);
David S. Miller18397942006-02-10 00:08:26 -0800235
David S. Miller6a32fd42006-02-19 22:21:32 -0800236 for (n = 0; n < npages; n++) {
Tushar Davef08978b2016-10-28 10:12:44 -0700237 long err = iommu_batch_add(first_page + (n * PAGE_SIZE), mask);
David S. Miller6a32fd42006-02-19 22:21:32 -0800238 if (unlikely(err < 0L))
239 goto iommu_map_fail;
240 }
David S. Miller18397942006-02-10 00:08:26 -0800241
Tushar Davef08978b2016-10-28 10:12:44 -0700242 if (unlikely(iommu_batch_end(mask) < 0L))
David S. Miller6a32fd42006-02-19 22:21:32 -0800243 goto iommu_map_fail;
David S. Miller18397942006-02-10 00:08:26 -0800244
David S. Miller6a32fd42006-02-19 22:21:32 -0800245 local_irq_restore(flags);
David S. Miller18397942006-02-10 00:08:26 -0800246
247 return ret;
David S. Miller6a32fd42006-02-19 22:21:32 -0800248
249iommu_map_fail:
Dan Carpentere241cfd2016-11-26 00:15:37 +0300250 local_irq_restore(flags);
Tushar Davef08978b2016-10-28 10:12:44 -0700251 iommu_tbl_range_free(tbl, *dma_addrp, npages, IOMMU_ERROR_CODE);
David S. Miller6a32fd42006-02-19 22:21:32 -0800252
David S. Millerd2841422008-02-08 18:05:46 -0800253range_alloc_fail:
David S. Miller6a32fd42006-02-19 22:21:32 -0800254 free_pages(first_page, order);
255 return NULL;
David S. Miller8f6a93a2006-02-09 21:32:07 -0800256}
257
Tushar Dave5116ab42016-10-28 10:12:43 -0700258unsigned long dma_4v_iotsb_bind(unsigned long devhandle,
259 unsigned long iotsb_num,
260 struct pci_bus *bus_dev)
261{
262 struct pci_dev *pdev;
263 unsigned long err;
264 unsigned int bus;
265 unsigned int device;
266 unsigned int fun;
267
268 list_for_each_entry(pdev, &bus_dev->devices, bus_list) {
269 if (pdev->subordinate) {
270 /* No need to bind pci bridge */
271 dma_4v_iotsb_bind(devhandle, iotsb_num,
272 pdev->subordinate);
273 } else {
274 bus = bus_dev->number;
275 device = PCI_SLOT(pdev->devfn);
276 fun = PCI_FUNC(pdev->devfn);
277 err = pci_sun4v_iotsb_bind(devhandle, iotsb_num,
278 HV_PCI_DEVICE_BUILD(bus,
279 device,
280 fun));
281
282 /* If bind fails for one device it is going to fail
283 * for rest of the devices because we are sharing
284 * IOTSB. So in case of failure simply return with
285 * error.
286 */
287 if (err)
288 return err;
289 }
290 }
291
292 return 0;
293}
294
Tushar Davef08978b2016-10-28 10:12:44 -0700295static void dma_4v_iommu_demap(struct device *dev, unsigned long devhandle,
296 dma_addr_t dvma, unsigned long iotsb_num,
297 unsigned long entry, unsigned long npages)
Sowmini Varadhanbb620c32015-04-09 15:33:31 -0400298{
Sowmini Varadhanbb620c32015-04-09 15:33:31 -0400299 unsigned long num, flags;
Tushar Davef08978b2016-10-28 10:12:44 -0700300 unsigned long ret;
Sowmini Varadhanbb620c32015-04-09 15:33:31 -0400301
302 local_irq_save(flags);
303 do {
Tushar Davef08978b2016-10-28 10:12:44 -0700304 if (dvma <= DMA_BIT_MASK(32)) {
305 num = pci_sun4v_iommu_demap(devhandle,
306 HV_PCI_TSBID(0, entry),
307 npages);
308 } else {
309 ret = pci_sun4v_iotsb_demap(devhandle, iotsb_num,
310 entry, npages, &num);
311 if (unlikely(ret != HV_EOK)) {
312 pr_err_ratelimited("pci_iotsb_demap() failed with error: %ld\n",
313 ret);
314 }
315 }
Sowmini Varadhanbb620c32015-04-09 15:33:31 -0400316 entry += num;
317 npages -= num;
318 } while (npages != 0);
319 local_irq_restore(flags);
320}
321
David S. Millerad7ad572007-07-27 22:39:14 -0700322static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700323 dma_addr_t dvma, unsigned long attrs)
David S. Miller8f6a93a2006-02-09 21:32:07 -0800324{
David S. Millera2fb23a2007-02-28 23:35:04 -0800325 struct pci_pbm_info *pbm;
David S. Miller16ce82d2007-04-26 21:08:21 -0700326 struct iommu *iommu;
Tushar Davef08978b2016-10-28 10:12:44 -0700327 struct atu *atu;
328 struct iommu_map_table *tbl;
Sowmini Varadhanbb620c32015-04-09 15:33:31 -0400329 unsigned long order, npages, entry;
Tushar Davef08978b2016-10-28 10:12:44 -0700330 unsigned long iotsb_num;
David S. Miller7c8f4862006-02-13 21:50:27 -0800331 u32 devhandle;
David S. Miller18397942006-02-10 00:08:26 -0800332
333 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
David S. Millerad7ad572007-07-27 22:39:14 -0700334 iommu = dev->archdata.iommu;
335 pbm = dev->archdata.host_controller;
Tushar Davef08978b2016-10-28 10:12:44 -0700336 atu = iommu->atu;
David S. Millera2fb23a2007-02-28 23:35:04 -0800337 devhandle = pbm->devhandle;
Tushar Davef08978b2016-10-28 10:12:44 -0700338
Christoph Hellwig2a29e9f2019-04-03 21:34:34 +0200339 if (!iommu_use_atu(iommu, dvma)) {
Tushar Davef08978b2016-10-28 10:12:44 -0700340 tbl = &iommu->tbl;
341 iotsb_num = 0; /* we don't care for legacy iommu */
342 } else {
343 tbl = &atu->tbl;
344 iotsb_num = atu->iotsb->iotsb_num;
345 }
346 entry = ((dvma - tbl->table_map_base) >> IO_PAGE_SHIFT);
347 dma_4v_iommu_demap(dev, devhandle, dvma, iotsb_num, entry, npages);
348 iommu_tbl_range_free(tbl, dvma, npages, IOMMU_ERROR_CODE);
David S. Miller18397942006-02-10 00:08:26 -0800349 order = get_order(size);
350 if (order < 10)
351 free_pages((unsigned long)cpu, order);
David S. Miller8f6a93a2006-02-09 21:32:07 -0800352}
353
FUJITA Tomonori797a7562009-05-14 16:23:10 +0000354static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
355 unsigned long offset, size_t sz,
FUJITA Tomonoribc0a14f2009-08-10 11:53:12 +0900356 enum dma_data_direction direction,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700357 unsigned long attrs)
David S. Miller8f6a93a2006-02-09 21:32:07 -0800358{
David S. Miller16ce82d2007-04-26 21:08:21 -0700359 struct iommu *iommu;
Tushar Davef08978b2016-10-28 10:12:44 -0700360 struct atu *atu;
361 struct iommu_map_table *tbl;
362 u64 mask;
David S. Miller18397942006-02-10 00:08:26 -0800363 unsigned long flags, npages, oaddr;
David S. Miller7c8f4862006-02-13 21:50:27 -0800364 unsigned long i, base_paddr;
David S. Miller18397942006-02-10 00:08:26 -0800365 unsigned long prot;
Tushar Davef08978b2016-10-28 10:12:44 -0700366 dma_addr_t bus_addr, ret;
David S. Miller18397942006-02-10 00:08:26 -0800367 long entry;
David S. Miller18397942006-02-10 00:08:26 -0800368
David S. Millerad7ad572007-07-27 22:39:14 -0700369 iommu = dev->archdata.iommu;
Tushar Davef08978b2016-10-28 10:12:44 -0700370 atu = iommu->atu;
David S. Miller18397942006-02-10 00:08:26 -0800371
David S. Millerad7ad572007-07-27 22:39:14 -0700372 if (unlikely(direction == DMA_NONE))
David S. Miller18397942006-02-10 00:08:26 -0800373 goto bad;
374
FUJITA Tomonori797a7562009-05-14 16:23:10 +0000375 oaddr = (unsigned long)(page_address(page) + offset);
David S. Miller18397942006-02-10 00:08:26 -0800376 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
377 npages >>= IO_PAGE_SHIFT;
David S. Miller18397942006-02-10 00:08:26 -0800378
Tushar Davef08978b2016-10-28 10:12:44 -0700379 mask = *dev->dma_mask;
Christoph Hellwig2a29e9f2019-04-03 21:34:34 +0200380 if (!iommu_use_atu(iommu, mask))
Tushar Davef08978b2016-10-28 10:12:44 -0700381 tbl = &iommu->tbl;
382 else
383 tbl = &atu->tbl;
384
385 entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL,
Sowmini Varadhanbb620c32015-04-09 15:33:31 -0400386 (unsigned long)(-1), 0);
David S. Miller18397942006-02-10 00:08:26 -0800387
David S. Millerd6183822015-11-04 11:30:57 -0800388 if (unlikely(entry == IOMMU_ERROR_CODE))
David S. Miller18397942006-02-10 00:08:26 -0800389 goto bad;
390
Tushar Davef08978b2016-10-28 10:12:44 -0700391 bus_addr = (tbl->table_map_base + (entry << IO_PAGE_SHIFT));
David S. Miller18397942006-02-10 00:08:26 -0800392 ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
393 base_paddr = __pa(oaddr & IO_PAGE_MASK);
394 prot = HV_PCI_MAP_ATTR_READ;
David S. Millerad7ad572007-07-27 22:39:14 -0700395 if (direction != DMA_TO_DEVICE)
David S. Miller18397942006-02-10 00:08:26 -0800396 prot |= HV_PCI_MAP_ATTR_WRITE;
397
chris hyseraa7bde12016-09-28 12:19:50 -0700398 if (attrs & DMA_ATTR_WEAK_ORDERING)
399 prot |= HV_PCI_MAP_ATTR_RELAXED_ORDER;
400
David S. Miller6a32fd42006-02-19 22:21:32 -0800401 local_irq_save(flags);
David S. Miller18397942006-02-10 00:08:26 -0800402
David S. Millerad7ad572007-07-27 22:39:14 -0700403 iommu_batch_start(dev, prot, entry);
David S. Miller18397942006-02-10 00:08:26 -0800404
David S. Miller6a32fd42006-02-19 22:21:32 -0800405 for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) {
Tushar Davef08978b2016-10-28 10:12:44 -0700406 long err = iommu_batch_add(base_paddr, mask);
David S. Miller6a32fd42006-02-19 22:21:32 -0800407 if (unlikely(err < 0L))
408 goto iommu_map_fail;
409 }
Tushar Davef08978b2016-10-28 10:12:44 -0700410 if (unlikely(iommu_batch_end(mask) < 0L))
David S. Miller6a32fd42006-02-19 22:21:32 -0800411 goto iommu_map_fail;
David S. Miller18397942006-02-10 00:08:26 -0800412
David S. Miller6a32fd42006-02-19 22:21:32 -0800413 local_irq_restore(flags);
David S. Miller18397942006-02-10 00:08:26 -0800414
415 return ret;
416
417bad:
418 if (printk_ratelimit())
419 WARN_ON(1);
Christoph Hellwig06301c52018-11-21 18:59:05 +0100420 return DMA_MAPPING_ERROR;
David S. Miller6a32fd42006-02-19 22:21:32 -0800421
422iommu_map_fail:
Dan Carpentere241cfd2016-11-26 00:15:37 +0300423 local_irq_restore(flags);
Tushar Davef08978b2016-10-28 10:12:44 -0700424 iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE);
Christoph Hellwig06301c52018-11-21 18:59:05 +0100425 return DMA_MAPPING_ERROR;
David S. Miller8f6a93a2006-02-09 21:32:07 -0800426}
427
FUJITA Tomonori797a7562009-05-14 16:23:10 +0000428static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
FUJITA Tomonoribc0a14f2009-08-10 11:53:12 +0900429 size_t sz, enum dma_data_direction direction,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700430 unsigned long attrs)
David S. Miller8f6a93a2006-02-09 21:32:07 -0800431{
David S. Millera2fb23a2007-02-28 23:35:04 -0800432 struct pci_pbm_info *pbm;
David S. Miller16ce82d2007-04-26 21:08:21 -0700433 struct iommu *iommu;
Tushar Davef08978b2016-10-28 10:12:44 -0700434 struct atu *atu;
435 struct iommu_map_table *tbl;
Sowmini Varadhanbb620c32015-04-09 15:33:31 -0400436 unsigned long npages;
Tushar Davef08978b2016-10-28 10:12:44 -0700437 unsigned long iotsb_num;
David S. Miller18397942006-02-10 00:08:26 -0800438 long entry;
David S. Miller7c8f4862006-02-13 21:50:27 -0800439 u32 devhandle;
David S. Miller18397942006-02-10 00:08:26 -0800440
David S. Millerad7ad572007-07-27 22:39:14 -0700441 if (unlikely(direction == DMA_NONE)) {
David S. Miller18397942006-02-10 00:08:26 -0800442 if (printk_ratelimit())
443 WARN_ON(1);
444 return;
445 }
446
David S. Millerad7ad572007-07-27 22:39:14 -0700447 iommu = dev->archdata.iommu;
448 pbm = dev->archdata.host_controller;
Tushar Davef08978b2016-10-28 10:12:44 -0700449 atu = iommu->atu;
David S. Millera2fb23a2007-02-28 23:35:04 -0800450 devhandle = pbm->devhandle;
David S. Miller18397942006-02-10 00:08:26 -0800451
452 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
453 npages >>= IO_PAGE_SHIFT;
454 bus_addr &= IO_PAGE_MASK;
Tushar Davef08978b2016-10-28 10:12:44 -0700455
456 if (bus_addr <= DMA_BIT_MASK(32)) {
457 iotsb_num = 0; /* we don't care for legacy iommu */
458 tbl = &iommu->tbl;
459 } else {
460 iotsb_num = atu->iotsb->iotsb_num;
461 tbl = &atu->tbl;
462 }
463 entry = (bus_addr - tbl->table_map_base) >> IO_PAGE_SHIFT;
464 dma_4v_iommu_demap(dev, devhandle, bus_addr, iotsb_num, entry, npages);
465 iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE);
David S. Miller18397942006-02-10 00:08:26 -0800466}
467
David S. Millerad7ad572007-07-27 22:39:14 -0700468static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
FUJITA Tomonoribc0a14f2009-08-10 11:53:12 +0900469 int nelems, enum dma_data_direction direction,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700470 unsigned long attrs)
David S. Miller8f6a93a2006-02-09 21:32:07 -0800471{
David S. Miller13fa14e2008-02-09 03:11:01 -0800472 struct scatterlist *s, *outs, *segstart;
473 unsigned long flags, handle, prot;
474 dma_addr_t dma_next = 0, dma_addr;
475 unsigned int max_seg_size;
FUJITA Tomonorif0880252008-03-28 15:55:41 -0700476 unsigned long seg_boundary_size;
David S. Miller13fa14e2008-02-09 03:11:01 -0800477 int outcount, incount, i;
David S. Miller16ce82d2007-04-26 21:08:21 -0700478 struct iommu *iommu;
Tushar Davef08978b2016-10-28 10:12:44 -0700479 struct atu *atu;
480 struct iommu_map_table *tbl;
481 u64 mask;
FUJITA Tomonorif0880252008-03-28 15:55:41 -0700482 unsigned long base_shift;
David S. Miller13fa14e2008-02-09 03:11:01 -0800483 long err;
David S. Miller18397942006-02-10 00:08:26 -0800484
David S. Miller13fa14e2008-02-09 03:11:01 -0800485 BUG_ON(direction == DMA_NONE);
David S. Miller18397942006-02-10 00:08:26 -0800486
David S. Millerad7ad572007-07-27 22:39:14 -0700487 iommu = dev->archdata.iommu;
David S. Miller13fa14e2008-02-09 03:11:01 -0800488 if (nelems == 0 || !iommu)
Martin Oliveirae02373f2021-07-29 14:15:32 -0600489 return -EINVAL;
Dan Carpenterefca4885b52016-11-25 14:01:32 +0300490 atu = iommu->atu;
491
David S. Miller18397942006-02-10 00:08:26 -0800492 prot = HV_PCI_MAP_ATTR_READ;
David S. Millerad7ad572007-07-27 22:39:14 -0700493 if (direction != DMA_TO_DEVICE)
David S. Miller18397942006-02-10 00:08:26 -0800494 prot |= HV_PCI_MAP_ATTR_WRITE;
495
chris hyseraa7bde12016-09-28 12:19:50 -0700496 if (attrs & DMA_ATTR_WEAK_ORDERING)
497 prot |= HV_PCI_MAP_ATTR_RELAXED_ORDER;
498
David S. Miller13fa14e2008-02-09 03:11:01 -0800499 outs = s = segstart = &sglist[0];
500 outcount = 1;
501 incount = nelems;
502 handle = 0;
David S. Miller38192d52008-02-06 03:50:26 -0800503
David S. Miller13fa14e2008-02-09 03:11:01 -0800504 /* Init first segment length for backout at failure */
505 outs->dma_length = 0;
David S. Miller38192d52008-02-06 03:50:26 -0800506
Sowmini Varadhanbb620c32015-04-09 15:33:31 -0400507 local_irq_save(flags);
David S. Miller38192d52008-02-06 03:50:26 -0800508
David S. Miller13fa14e2008-02-09 03:11:01 -0800509 iommu_batch_start(dev, prot, ~0UL);
David S. Miller38192d52008-02-06 03:50:26 -0800510
David S. Miller13fa14e2008-02-09 03:11:01 -0800511 max_seg_size = dma_get_max_seg_size(dev);
Nicolin Chen1e9d90d2020-09-01 15:16:45 -0700512 seg_boundary_size = dma_get_seg_boundary_nr_pages(dev, IO_PAGE_SHIFT);
Tushar Davef08978b2016-10-28 10:12:44 -0700513
514 mask = *dev->dma_mask;
Christoph Hellwig2a29e9f2019-04-03 21:34:34 +0200515 if (!iommu_use_atu(iommu, mask))
Tushar Davef08978b2016-10-28 10:12:44 -0700516 tbl = &iommu->tbl;
517 else
518 tbl = &atu->tbl;
519
520 base_shift = tbl->table_map_base >> IO_PAGE_SHIFT;
521
David S. Miller13fa14e2008-02-09 03:11:01 -0800522 for_each_sg(sglist, s, nelems, i) {
FUJITA Tomonorif0880252008-03-28 15:55:41 -0700523 unsigned long paddr, npages, entry, out_entry = 0, slen;
David S. Miller38192d52008-02-06 03:50:26 -0800524
David S. Miller13fa14e2008-02-09 03:11:01 -0800525 slen = s->length;
526 /* Sanity check */
527 if (slen == 0) {
528 dma_next = 0;
529 continue;
David S. Miller38192d52008-02-06 03:50:26 -0800530 }
David S. Miller13fa14e2008-02-09 03:11:01 -0800531 /* Allocate iommu entries for that segment */
532 paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
Joerg Roedel0fcff282008-10-15 22:02:14 -0700533 npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
Tushar Davef08978b2016-10-28 10:12:44 -0700534 entry = iommu_tbl_range_alloc(dev, tbl, npages,
Sowmini Varadhanbb620c32015-04-09 15:33:31 -0400535 &handle, (unsigned long)(-1), 0);
David S. Miller13fa14e2008-02-09 03:11:01 -0800536
537 /* Handle failure */
David S. Millerd6183822015-11-04 11:30:57 -0800538 if (unlikely(entry == IOMMU_ERROR_CODE)) {
Tushar Davef08978b2016-10-28 10:12:44 -0700539 pr_err_ratelimited("iommu_alloc failed, iommu %p paddr %lx npages %lx\n",
540 tbl, paddr, npages);
David S. Miller13fa14e2008-02-09 03:11:01 -0800541 goto iommu_map_failed;
542 }
543
Tushar Davef08978b2016-10-28 10:12:44 -0700544 iommu_batch_new_entry(entry, mask);
David S. Miller13fa14e2008-02-09 03:11:01 -0800545
546 /* Convert entry to a dma_addr_t */
Tushar Davef08978b2016-10-28 10:12:44 -0700547 dma_addr = tbl->table_map_base + (entry << IO_PAGE_SHIFT);
David S. Miller13fa14e2008-02-09 03:11:01 -0800548 dma_addr |= (s->offset & ~IO_PAGE_MASK);
549
550 /* Insert into HW table */
551 paddr &= IO_PAGE_MASK;
552 while (npages--) {
Tushar Davef08978b2016-10-28 10:12:44 -0700553 err = iommu_batch_add(paddr, mask);
David S. Miller13fa14e2008-02-09 03:11:01 -0800554 if (unlikely(err < 0L))
555 goto iommu_map_failed;
556 paddr += IO_PAGE_SIZE;
557 }
558
559 /* If we are in an open segment, try merging */
560 if (segstart != s) {
561 /* We cannot merge if:
562 * - allocated dma_addr isn't contiguous to previous allocation
563 */
564 if ((dma_addr != dma_next) ||
FUJITA Tomonorif0880252008-03-28 15:55:41 -0700565 (outs->dma_length + s->length > max_seg_size) ||
566 (is_span_boundary(out_entry, base_shift,
567 seg_boundary_size, outs, s))) {
David S. Miller13fa14e2008-02-09 03:11:01 -0800568 /* Can't merge: create a new segment */
569 segstart = s;
570 outcount++;
571 outs = sg_next(outs);
572 } else {
573 outs->dma_length += s->length;
574 }
575 }
576
577 if (segstart == s) {
578 /* This is a new segment, fill entries */
579 outs->dma_address = dma_addr;
580 outs->dma_length = slen;
FUJITA Tomonorif0880252008-03-28 15:55:41 -0700581 out_entry = entry;
David S. Miller13fa14e2008-02-09 03:11:01 -0800582 }
583
584 /* Calculate next page pointer for contiguous check */
585 dma_next = dma_addr + slen;
David S. Miller38192d52008-02-06 03:50:26 -0800586 }
587
Tushar Davef08978b2016-10-28 10:12:44 -0700588 err = iommu_batch_end(mask);
David S. Miller38192d52008-02-06 03:50:26 -0800589
David S. Miller6a32fd42006-02-19 22:21:32 -0800590 if (unlikely(err < 0L))
591 goto iommu_map_failed;
David S. Miller18397942006-02-10 00:08:26 -0800592
Sowmini Varadhanbb620c32015-04-09 15:33:31 -0400593 local_irq_restore(flags);
David S. Miller18397942006-02-10 00:08:26 -0800594
David S. Miller13fa14e2008-02-09 03:11:01 -0800595 if (outcount < incount) {
596 outs = sg_next(outs);
David S. Miller13fa14e2008-02-09 03:11:01 -0800597 outs->dma_length = 0;
598 }
599
600 return outcount;
David S. Miller6a32fd42006-02-19 22:21:32 -0800601
602iommu_map_failed:
David S. Miller13fa14e2008-02-09 03:11:01 -0800603 for_each_sg(sglist, s, nelems, i) {
604 if (s->dma_length != 0) {
605 unsigned long vaddr, npages;
606
607 vaddr = s->dma_address & IO_PAGE_MASK;
Joerg Roedel0fcff282008-10-15 22:02:14 -0700608 npages = iommu_num_pages(s->dma_address, s->dma_length,
609 IO_PAGE_SIZE);
Tushar Davef08978b2016-10-28 10:12:44 -0700610 iommu_tbl_range_free(tbl, vaddr, npages,
David S. Millerd6183822015-11-04 11:30:57 -0800611 IOMMU_ERROR_CODE);
David S. Miller13fa14e2008-02-09 03:11:01 -0800612 /* XXX demap? XXX */
David S. Miller13fa14e2008-02-09 03:11:01 -0800613 s->dma_length = 0;
614 }
615 if (s == outs)
616 break;
617 }
Sowmini Varadhanbb620c32015-04-09 15:33:31 -0400618 local_irq_restore(flags);
David S. Miller6a32fd42006-02-19 22:21:32 -0800619
Martin Oliveirae02373f2021-07-29 14:15:32 -0600620 return -EINVAL;
David S. Miller8f6a93a2006-02-09 21:32:07 -0800621}
622
David S. Millerad7ad572007-07-27 22:39:14 -0700623static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
FUJITA Tomonoribc0a14f2009-08-10 11:53:12 +0900624 int nelems, enum dma_data_direction direction,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700625 unsigned long attrs)
David S. Miller8f6a93a2006-02-09 21:32:07 -0800626{
David S. Millera2fb23a2007-02-28 23:35:04 -0800627 struct pci_pbm_info *pbm;
David S. Miller13fa14e2008-02-09 03:11:01 -0800628 struct scatterlist *sg;
David S. Miller38192d52008-02-06 03:50:26 -0800629 struct iommu *iommu;
Tushar Davef08978b2016-10-28 10:12:44 -0700630 struct atu *atu;
Sowmini Varadhanbb620c32015-04-09 15:33:31 -0400631 unsigned long flags, entry;
Tushar Davef08978b2016-10-28 10:12:44 -0700632 unsigned long iotsb_num;
David S. Miller13fa14e2008-02-09 03:11:01 -0800633 u32 devhandle;
David S. Miller18397942006-02-10 00:08:26 -0800634
David S. Miller13fa14e2008-02-09 03:11:01 -0800635 BUG_ON(direction == DMA_NONE);
David S. Miller18397942006-02-10 00:08:26 -0800636
David S. Millerad7ad572007-07-27 22:39:14 -0700637 iommu = dev->archdata.iommu;
638 pbm = dev->archdata.host_controller;
Tushar Davef08978b2016-10-28 10:12:44 -0700639 atu = iommu->atu;
David S. Millera2fb23a2007-02-28 23:35:04 -0800640 devhandle = pbm->devhandle;
David S. Miller18397942006-02-10 00:08:26 -0800641
Sowmini Varadhanbb620c32015-04-09 15:33:31 -0400642 local_irq_save(flags);
David S. Miller18397942006-02-10 00:08:26 -0800643
David S. Miller13fa14e2008-02-09 03:11:01 -0800644 sg = sglist;
645 while (nelems--) {
646 dma_addr_t dma_handle = sg->dma_address;
647 unsigned int len = sg->dma_length;
Sowmini Varadhanbb620c32015-04-09 15:33:31 -0400648 unsigned long npages;
Tushar Davef08978b2016-10-28 10:12:44 -0700649 struct iommu_map_table *tbl;
Sowmini Varadhanbb620c32015-04-09 15:33:31 -0400650 unsigned long shift = IO_PAGE_SHIFT;
David S. Miller18397942006-02-10 00:08:26 -0800651
David S. Miller13fa14e2008-02-09 03:11:01 -0800652 if (!len)
653 break;
Joerg Roedel0fcff282008-10-15 22:02:14 -0700654 npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
Tushar Davef08978b2016-10-28 10:12:44 -0700655
656 if (dma_handle <= DMA_BIT_MASK(32)) {
657 iotsb_num = 0; /* we don't care for legacy iommu */
658 tbl = &iommu->tbl;
659 } else {
660 iotsb_num = atu->iotsb->iotsb_num;
661 tbl = &atu->tbl;
662 }
Sowmini Varadhanbb620c32015-04-09 15:33:31 -0400663 entry = ((dma_handle - tbl->table_map_base) >> shift);
Tushar Davef08978b2016-10-28 10:12:44 -0700664 dma_4v_iommu_demap(dev, devhandle, dma_handle, iotsb_num,
665 entry, npages);
666 iommu_tbl_range_free(tbl, dma_handle, npages,
David S. Millerd6183822015-11-04 11:30:57 -0800667 IOMMU_ERROR_CODE);
David S. Miller13fa14e2008-02-09 03:11:01 -0800668 sg = sg_next(sg);
669 }
David S. Miller18397942006-02-10 00:08:26 -0800670
Sowmini Varadhanbb620c32015-04-09 15:33:31 -0400671 local_irq_restore(flags);
David S. Miller8f6a93a2006-02-09 21:32:07 -0800672}
673
Christoph Hellwigb02c2b02017-05-22 09:11:30 +0200674static int dma_4v_supported(struct device *dev, u64 device_mask)
675{
676 struct iommu *iommu = dev->archdata.iommu;
Christoph Hellwigb02c2b02017-05-22 09:11:30 +0200677
Christoph Hellwigc54fc982019-02-15 09:06:31 +0100678 if (ali_sound_dma_hack(dev, device_mask))
679 return 1;
Christoph Hellwig24132a42019-02-15 09:30:28 +0100680 if (device_mask < iommu->dma_addr_mask)
681 return 0;
682 return 1;
Christoph Hellwigb02c2b02017-05-22 09:11:30 +0200683}
684
Bart Van Assche52997092017-01-20 13:04:01 -0800685static const struct dma_map_ops sun4v_dma_ops = {
Andrzej Pietrasiewiczc4162582012-03-27 14:56:55 +0200686 .alloc = dma_4v_alloc_coherent,
687 .free = dma_4v_free_coherent,
FUJITA Tomonori797a7562009-05-14 16:23:10 +0000688 .map_page = dma_4v_map_page,
689 .unmap_page = dma_4v_unmap_page,
David S. Millerad7ad572007-07-27 22:39:14 -0700690 .map_sg = dma_4v_map_sg,
691 .unmap_sg = dma_4v_unmap_sg,
Christoph Hellwigb02c2b02017-05-22 09:11:30 +0200692 .dma_supported = dma_4v_supported,
David S. Miller8f6a93a2006-02-09 21:32:07 -0800693};
694
Greg Kroah-Hartman7c9503b2012-12-21 14:03:26 -0800695static void pci_sun4v_scan_bus(struct pci_pbm_info *pbm, struct device *parent)
David S. Millerbade5622006-02-09 22:05:54 -0800696{
David S. Millere87dc352006-06-21 18:18:47 -0700697 struct property *prop;
698 struct device_node *dp;
699
Grant Likely61c7a082010-04-13 16:12:29 -0700700 dp = pbm->op->dev.of_node;
David S. Miller34768bc2007-05-07 23:06:27 -0700701 prop = of_find_property(dp, "66mhz-capable", NULL);
702 pbm->is_66mhz_capable = (prop != NULL);
David S. Millere822358a2008-09-01 18:32:22 -0700703 pbm->pci_bus = pci_scan_one_pbm(pbm, parent);
David S. Millerc2609262006-02-12 22:18:52 -0800704
705 /* XXX register error interrupt handlers XXX */
David S. Millerbade5622006-02-09 22:05:54 -0800706}
707
Greg Kroah-Hartman7c9503b2012-12-21 14:03:26 -0800708static unsigned long probe_existing_entries(struct pci_pbm_info *pbm,
Sowmini Varadhanbb620c32015-04-09 15:33:31 -0400709 struct iommu_map_table *iommu)
David S. Miller18397942006-02-10 00:08:26 -0800710{
Sowmini Varadhanbb620c32015-04-09 15:33:31 -0400711 struct iommu_pool *pool;
712 unsigned long i, pool_nr, cnt = 0;
David S. Miller7c8f4862006-02-13 21:50:27 -0800713 u32 devhandle;
David S. Miller18397942006-02-10 00:08:26 -0800714
715 devhandle = pbm->devhandle;
Sowmini Varadhanbb620c32015-04-09 15:33:31 -0400716 for (pool_nr = 0; pool_nr < iommu->nr_pools; pool_nr++) {
717 pool = &(iommu->pools[pool_nr]);
718 for (i = pool->start; i <= pool->end; i++) {
719 unsigned long ret, io_attrs, ra;
David S. Miller18397942006-02-10 00:08:26 -0800720
Sowmini Varadhanbb620c32015-04-09 15:33:31 -0400721 ret = pci_sun4v_iommu_getmap(devhandle,
722 HV_PCI_TSBID(0, i),
723 &io_attrs, &ra);
724 if (ret == HV_EOK) {
725 if (page_in_phys_avail(ra)) {
726 pci_sun4v_iommu_demap(devhandle,
727 HV_PCI_TSBID(0,
728 i), 1);
729 } else {
730 cnt++;
731 __set_bit(i, iommu->map);
732 }
David S. Millerc2a5a462006-06-22 00:01:56 -0700733 }
David S. Millere7a04532006-02-15 22:25:27 -0800734 }
David S. Miller18397942006-02-10 00:08:26 -0800735 }
David S. Millere7a04532006-02-15 22:25:27 -0800736 return cnt;
David S. Miller18397942006-02-10 00:08:26 -0800737}
738
Tushar Davef0248c12016-10-28 10:12:41 -0700739static int pci_sun4v_atu_alloc_iotsb(struct pci_pbm_info *pbm)
740{
741 struct atu *atu = pbm->iommu->atu;
742 struct atu_iotsb *iotsb;
743 void *table;
744 u64 table_size;
745 u64 iotsb_num;
746 unsigned long order;
747 unsigned long err;
748
749 iotsb = kzalloc(sizeof(*iotsb), GFP_KERNEL);
750 if (!iotsb) {
751 err = -ENOMEM;
752 goto out_err;
753 }
754 atu->iotsb = iotsb;
755
756 /* calculate size of IOTSB */
757 table_size = (atu->size / IO_PAGE_SIZE) * 8;
758 order = get_order(table_size);
759 table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
760 if (!table) {
761 err = -ENOMEM;
762 goto table_failed;
763 }
764 iotsb->table = table;
765 iotsb->ra = __pa(table);
766 iotsb->dvma_size = atu->size;
767 iotsb->dvma_base = atu->base;
768 iotsb->table_size = table_size;
769 iotsb->page_size = IO_PAGE_SIZE;
770
771 /* configure and register IOTSB with HV */
772 err = pci_sun4v_iotsb_conf(pbm->devhandle,
773 iotsb->ra,
774 iotsb->table_size,
775 iotsb->page_size,
776 iotsb->dvma_base,
777 &iotsb_num);
778 if (err) {
779 pr_err(PFX "pci_iotsb_conf failed error: %ld\n", err);
780 goto iotsb_conf_failed;
781 }
782 iotsb->iotsb_num = iotsb_num;
783
Tushar Dave5116ab42016-10-28 10:12:43 -0700784 err = dma_4v_iotsb_bind(pbm->devhandle, iotsb_num, pbm->pci_bus);
785 if (err) {
786 pr_err(PFX "pci_iotsb_bind failed error: %ld\n", err);
787 goto iotsb_conf_failed;
788 }
789
Tushar Davef0248c12016-10-28 10:12:41 -0700790 return 0;
791
792iotsb_conf_failed:
793 free_pages((unsigned long)table, order);
794table_failed:
795 kfree(iotsb);
796out_err:
797 return err;
798}
799
800static int pci_sun4v_atu_init(struct pci_pbm_info *pbm)
801{
802 struct atu *atu = pbm->iommu->atu;
803 unsigned long err;
804 const u64 *ranges;
Tushar Dave31f077d2016-10-28 10:12:42 -0700805 u64 map_size, num_iotte;
806 u64 dma_mask;
Tushar Davef0248c12016-10-28 10:12:41 -0700807 const u32 *page_size;
808 int len;
809
810 ranges = of_get_property(pbm->op->dev.of_node, "iommu-address-ranges",
811 &len);
812 if (!ranges) {
813 pr_err(PFX "No iommu-address-ranges\n");
814 return -EINVAL;
815 }
816
817 page_size = of_get_property(pbm->op->dev.of_node, "iommu-pagesizes",
818 NULL);
819 if (!page_size) {
820 pr_err(PFX "No iommu-pagesizes\n");
821 return -EINVAL;
822 }
823
824 /* There are 4 iommu-address-ranges supported. Each range is pair of
825 * {base, size}. The ranges[0] and ranges[1] are 32bit address space
826 * while ranges[2] and ranges[3] are 64bit space. We want to use 64bit
827 * address ranges to support 64bit addressing. Because 'size' for
828 * address ranges[2] and ranges[3] are same we can select either of
829 * ranges[2] or ranges[3] for mapping. However due to 'size' is too
830 * large for OS to allocate IOTSB we are using fix size 32G
831 * (ATU_64_SPACE_SIZE) which is more than enough for all PCIe devices
832 * to share.
833 */
834 atu->ranges = (struct atu_ranges *)ranges;
835 atu->base = atu->ranges[3].base;
836 atu->size = ATU_64_SPACE_SIZE;
837
838 /* Create IOTSB */
839 err = pci_sun4v_atu_alloc_iotsb(pbm);
840 if (err) {
841 pr_err(PFX "Error creating ATU IOTSB\n");
842 return err;
843 }
844
Tushar Dave31f077d2016-10-28 10:12:42 -0700845 /* Create ATU iommu map.
846 * One bit represents one iotte in IOTSB table.
847 */
848 dma_mask = (roundup_pow_of_two(atu->size) - 1UL);
849 num_iotte = atu->size / IO_PAGE_SIZE;
850 map_size = num_iotte / 8;
851 atu->tbl.table_map_base = atu->base;
852 atu->dma_addr_mask = dma_mask;
853 atu->tbl.map = kzalloc(map_size, GFP_KERNEL);
854 if (!atu->tbl.map)
855 return -ENOMEM;
856
857 iommu_tbl_pool_init(&atu->tbl, num_iotte, IO_PAGE_SHIFT,
858 NULL, false /* no large_pool */,
859 0 /* default npools */,
860 false /* want span boundary checking */);
861
Tushar Davef0248c12016-10-28 10:12:41 -0700862 return 0;
863}
864
Greg Kroah-Hartman7c9503b2012-12-21 14:03:26 -0800865static int pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
David S. Millerbade5622006-02-09 22:05:54 -0800866{
David S. Miller8aef7272008-09-01 20:23:18 -0700867 static const u32 vdma_default[] = { 0x80000000, 0x80000000 };
David S. Miller16ce82d2007-04-26 21:08:21 -0700868 struct iommu *iommu = pbm->iommu;
David S. Millerc6fee082011-02-26 23:40:02 -0800869 unsigned long num_tsb_entries, sz;
David S. Miller8aef7272008-09-01 20:23:18 -0700870 u32 dma_mask, dma_offset;
871 const u32 *vdma;
David S. Miller18397942006-02-10 00:08:26 -0800872
Grant Likely61c7a082010-04-13 16:12:29 -0700873 vdma = of_get_property(pbm->op->dev.of_node, "virtual-dma", NULL);
David S. Miller8aef7272008-09-01 20:23:18 -0700874 if (!vdma)
875 vdma = vdma_default;
David S. Miller18397942006-02-10 00:08:26 -0800876
David S. Miller59db8102007-05-23 18:00:46 -0700877 if ((vdma[0] | vdma[1]) & ~IO_PAGE_MASK) {
David S. Miller3822b502008-08-30 02:50:29 -0700878 printk(KERN_ERR PFX "Strange virtual-dma[%08x:%08x].\n",
879 vdma[0], vdma[1]);
880 return -EINVAL;
Peter Senna Tschudin20b739f2012-09-12 07:03:11 +0000881 }
David S. Miller18397942006-02-10 00:08:26 -0800882
David S. Miller59db8102007-05-23 18:00:46 -0700883 dma_mask = (roundup_pow_of_two(vdma[1]) - 1UL);
884 num_tsb_entries = vdma[1] / IO_PAGE_SIZE;
David S. Miller18397942006-02-10 00:08:26 -0800885
886 dma_offset = vdma[0];
887
888 /* Setup initial software IOMMU state. */
David S. Millerc12f0482015-04-18 12:31:25 -0700889 spin_lock_init(&iommu->lock);
David S. Miller18397942006-02-10 00:08:26 -0800890 iommu->ctx_lowest_free = 1;
Sowmini Varadhanbb620c32015-04-09 15:33:31 -0400891 iommu->tbl.table_map_base = dma_offset;
David S. Miller18397942006-02-10 00:08:26 -0800892 iommu->dma_addr_mask = dma_mask;
893
894 /* Allocate and initialize the free area map. */
David S. Miller59db8102007-05-23 18:00:46 -0700895 sz = (num_tsb_entries + 7) / 8;
David S. Miller18397942006-02-10 00:08:26 -0800896 sz = (sz + 7UL) & ~7UL;
Sowmini Varadhanbb620c32015-04-09 15:33:31 -0400897 iommu->tbl.map = kzalloc(sz, GFP_KERNEL);
898 if (!iommu->tbl.map) {
David S. Miller3822b502008-08-30 02:50:29 -0700899 printk(KERN_ERR PFX "Error, kmalloc(arena.map) failed.\n");
900 return -ENOMEM;
David S. Miller18397942006-02-10 00:08:26 -0800901 }
Sowmini Varadhanbb620c32015-04-09 15:33:31 -0400902 iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT,
903 NULL, false /* no large_pool */,
904 0 /* default npools */,
905 false /* want span boundary checking */);
906 sz = probe_existing_entries(pbm, &iommu->tbl);
David S. Millerc2a5a462006-06-22 00:01:56 -0700907 if (sz)
908 printk("%s: Imported %lu TSB entries from OBP\n",
909 pbm->name, sz);
David S. Miller3822b502008-08-30 02:50:29 -0700910
911 return 0;
David S. Millerbade5622006-02-09 22:05:54 -0800912}
913
David S. Miller35a17eb2007-02-10 17:41:02 -0800914#ifdef CONFIG_PCI_MSI
915struct pci_sun4v_msiq_entry {
916 u64 version_type;
917#define MSIQ_VERSION_MASK 0xffffffff00000000UL
918#define MSIQ_VERSION_SHIFT 32
919#define MSIQ_TYPE_MASK 0x00000000000000ffUL
920#define MSIQ_TYPE_SHIFT 0
921#define MSIQ_TYPE_NONE 0x00
922#define MSIQ_TYPE_MSG 0x01
923#define MSIQ_TYPE_MSI32 0x02
924#define MSIQ_TYPE_MSI64 0x03
925#define MSIQ_TYPE_INTX 0x08
926#define MSIQ_TYPE_NONE2 0xff
927
928 u64 intx_sysino;
929 u64 reserved1;
930 u64 stick;
931 u64 req_id; /* bus/device/func */
932#define MSIQ_REQID_BUS_MASK 0xff00UL
933#define MSIQ_REQID_BUS_SHIFT 8
934#define MSIQ_REQID_DEVICE_MASK 0x00f8UL
935#define MSIQ_REQID_DEVICE_SHIFT 3
936#define MSIQ_REQID_FUNC_MASK 0x0007UL
937#define MSIQ_REQID_FUNC_SHIFT 0
938
939 u64 msi_address;
940
Simon Arlotte5dd42e2007-05-11 13:52:08 -0700941 /* The format of this value is message type dependent.
David S. Miller35a17eb2007-02-10 17:41:02 -0800942 * For MSI bits 15:0 are the data from the MSI packet.
943 * For MSI-X bits 31:0 are the data from the MSI packet.
944 * For MSG, the message code and message routing code where:
945 * bits 39:32 is the bus/device/fn of the msg target-id
946 * bits 18:16 is the message routing code
947 * bits 7:0 is the message code
948 * For INTx the low order 2-bits are:
949 * 00 - INTA
950 * 01 - INTB
951 * 10 - INTC
952 * 11 - INTD
953 */
954 u64 msi_data;
955
956 u64 reserved2;
957};
958
David S. Miller759f89e2007-10-11 03:16:13 -0700959static int pci_sun4v_get_head(struct pci_pbm_info *pbm, unsigned long msiqid,
960 unsigned long *head)
David S. Miller35a17eb2007-02-10 17:41:02 -0800961{
David S. Miller759f89e2007-10-11 03:16:13 -0700962 unsigned long err, limit;
David S. Miller35a17eb2007-02-10 17:41:02 -0800963
David S. Miller759f89e2007-10-11 03:16:13 -0700964 err = pci_sun4v_msiq_gethead(pbm->devhandle, msiqid, head);
David S. Miller35a17eb2007-02-10 17:41:02 -0800965 if (unlikely(err))
David S. Miller759f89e2007-10-11 03:16:13 -0700966 return -ENXIO;
David S. Miller35a17eb2007-02-10 17:41:02 -0800967
David S. Miller759f89e2007-10-11 03:16:13 -0700968 limit = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
969 if (unlikely(*head >= limit))
970 return -EFBIG;
David S. Miller35a17eb2007-02-10 17:41:02 -0800971
972 return 0;
973}
974
David S. Miller759f89e2007-10-11 03:16:13 -0700975static int pci_sun4v_dequeue_msi(struct pci_pbm_info *pbm,
976 unsigned long msiqid, unsigned long *head,
977 unsigned long *msi)
David S. Miller35a17eb2007-02-10 17:41:02 -0800978{
David S. Miller759f89e2007-10-11 03:16:13 -0700979 struct pci_sun4v_msiq_entry *ep;
980 unsigned long err, type;
981
982 /* Note: void pointer arithmetic, 'head' is a byte offset */
983 ep = (pbm->msi_queues + ((msiqid - pbm->msiq_first) *
984 (pbm->msiq_ent_count *
985 sizeof(struct pci_sun4v_msiq_entry))) +
986 *head);
987
988 if ((ep->version_type & MSIQ_TYPE_MASK) == 0)
989 return 0;
990
991 type = (ep->version_type & MSIQ_TYPE_MASK) >> MSIQ_TYPE_SHIFT;
992 if (unlikely(type != MSIQ_TYPE_MSI32 &&
993 type != MSIQ_TYPE_MSI64))
994 return -EINVAL;
995
996 *msi = ep->msi_data;
997
998 err = pci_sun4v_msi_setstate(pbm->devhandle,
999 ep->msi_data /* msi_num */,
1000 HV_MSISTATE_IDLE);
1001 if (unlikely(err))
1002 return -ENXIO;
1003
1004 /* Clear the entry. */
1005 ep->version_type &= ~MSIQ_TYPE_MASK;
1006
1007 (*head) += sizeof(struct pci_sun4v_msiq_entry);
1008 if (*head >=
1009 (pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry)))
1010 *head = 0;
1011
1012 return 1;
David S. Miller35a17eb2007-02-10 17:41:02 -08001013}
1014
David S. Miller759f89e2007-10-11 03:16:13 -07001015static int pci_sun4v_set_head(struct pci_pbm_info *pbm, unsigned long msiqid,
1016 unsigned long head)
1017{
1018 unsigned long err;
1019
1020 err = pci_sun4v_msiq_sethead(pbm->devhandle, msiqid, head);
1021 if (unlikely(err))
1022 return -EINVAL;
1023
1024 return 0;
1025}
1026
1027static int pci_sun4v_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid,
1028 unsigned long msi, int is_msi64)
1029{
1030 if (pci_sun4v_msi_setmsiq(pbm->devhandle, msi, msiqid,
1031 (is_msi64 ?
1032 HV_MSITYPE_MSI64 : HV_MSITYPE_MSI32)))
1033 return -ENXIO;
1034 if (pci_sun4v_msi_setstate(pbm->devhandle, msi, HV_MSISTATE_IDLE))
1035 return -ENXIO;
1036 if (pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_VALID))
1037 return -ENXIO;
1038 return 0;
1039}
1040
1041static int pci_sun4v_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi)
1042{
1043 unsigned long err, msiqid;
1044
1045 err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi, &msiqid);
1046 if (err)
1047 return -ENXIO;
1048
1049 pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_INVALID);
1050
1051 return 0;
1052}
1053
1054static int pci_sun4v_msiq_alloc(struct pci_pbm_info *pbm)
David S. Miller35a17eb2007-02-10 17:41:02 -08001055{
1056 unsigned long q_size, alloc_size, pages, order;
1057 int i;
1058
1059 q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
1060 alloc_size = (pbm->msiq_num * q_size);
1061 order = get_order(alloc_size);
1062 pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order);
1063 if (pages == 0UL) {
1064 printk(KERN_ERR "MSI: Cannot allocate MSI queues (o=%lu).\n",
1065 order);
1066 return -ENOMEM;
1067 }
1068 memset((char *)pages, 0, PAGE_SIZE << order);
1069 pbm->msi_queues = (void *) pages;
1070
1071 for (i = 0; i < pbm->msiq_num; i++) {
1072 unsigned long err, base = __pa(pages + (i * q_size));
1073 unsigned long ret1, ret2;
1074
1075 err = pci_sun4v_msiq_conf(pbm->devhandle,
1076 pbm->msiq_first + i,
1077 base, pbm->msiq_ent_count);
1078 if (err) {
1079 printk(KERN_ERR "MSI: msiq register fails (err=%lu)\n",
1080 err);
1081 goto h_error;
1082 }
1083
1084 err = pci_sun4v_msiq_info(pbm->devhandle,
1085 pbm->msiq_first + i,
1086 &ret1, &ret2);
1087 if (err) {
1088 printk(KERN_ERR "MSI: Cannot read msiq (err=%lu)\n",
1089 err);
1090 goto h_error;
1091 }
1092 if (ret1 != base || ret2 != pbm->msiq_ent_count) {
1093 printk(KERN_ERR "MSI: Bogus qconf "
1094 "expected[%lx:%x] got[%lx:%lx]\n",
1095 base, pbm->msiq_ent_count,
1096 ret1, ret2);
1097 goto h_error;
1098 }
1099 }
1100
1101 return 0;
1102
1103h_error:
1104 free_pages(pages, order);
1105 return -EINVAL;
1106}
1107
David S. Miller759f89e2007-10-11 03:16:13 -07001108static void pci_sun4v_msiq_free(struct pci_pbm_info *pbm)
David S. Miller35a17eb2007-02-10 17:41:02 -08001109{
David S. Miller759f89e2007-10-11 03:16:13 -07001110 unsigned long q_size, alloc_size, pages, order;
David S. Miller35a17eb2007-02-10 17:41:02 -08001111 int i;
1112
David S. Miller759f89e2007-10-11 03:16:13 -07001113 for (i = 0; i < pbm->msiq_num; i++) {
1114 unsigned long msiqid = pbm->msiq_first + i;
1115
1116 (void) pci_sun4v_msiq_conf(pbm->devhandle, msiqid, 0UL, 0);
David S. Miller35a17eb2007-02-10 17:41:02 -08001117 }
1118
David S. Miller759f89e2007-10-11 03:16:13 -07001119 q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
1120 alloc_size = (pbm->msiq_num * q_size);
1121 order = get_order(alloc_size);
1122
1123 pages = (unsigned long) pbm->msi_queues;
1124
1125 free_pages(pages, order);
1126
1127 pbm->msi_queues = NULL;
David S. Miller35a17eb2007-02-10 17:41:02 -08001128}
1129
David S. Miller759f89e2007-10-11 03:16:13 -07001130static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm,
1131 unsigned long msiqid,
1132 unsigned long devino)
David S. Miller35a17eb2007-02-10 17:41:02 -08001133{
Sam Ravnborg44ed3c02011-01-22 11:32:20 +00001134 unsigned int irq = sun4v_build_irq(pbm->devhandle, devino);
David S. Miller35a17eb2007-02-10 17:41:02 -08001135
Sam Ravnborg44ed3c02011-01-22 11:32:20 +00001136 if (!irq)
David S. Miller759f89e2007-10-11 03:16:13 -07001137 return -ENOMEM;
David S. Miller35a17eb2007-02-10 17:41:02 -08001138
David S. Miller35a17eb2007-02-10 17:41:02 -08001139 if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID))
David S. Miller759f89e2007-10-11 03:16:13 -07001140 return -EINVAL;
David S. Miller7cc85832011-12-22 13:23:59 -08001141 if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
1142 return -EINVAL;
David S. Miller35a17eb2007-02-10 17:41:02 -08001143
Sam Ravnborg44ed3c02011-01-22 11:32:20 +00001144 return irq;
David S. Miller35a17eb2007-02-10 17:41:02 -08001145}
1146
David S. Miller759f89e2007-10-11 03:16:13 -07001147static const struct sparc64_msiq_ops pci_sun4v_msiq_ops = {
1148 .get_head = pci_sun4v_get_head,
1149 .dequeue_msi = pci_sun4v_dequeue_msi,
1150 .set_head = pci_sun4v_set_head,
1151 .msi_setup = pci_sun4v_msi_setup,
1152 .msi_teardown = pci_sun4v_msi_teardown,
1153 .msiq_alloc = pci_sun4v_msiq_alloc,
1154 .msiq_free = pci_sun4v_msiq_free,
1155 .msiq_build_irq = pci_sun4v_msiq_build_irq,
1156};
David S. Millere9870c42007-05-07 23:28:50 -07001157
1158static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
1159{
David S. Miller759f89e2007-10-11 03:16:13 -07001160 sparc64_pbm_msi_init(pbm, &pci_sun4v_msiq_ops);
David S. Millere9870c42007-05-07 23:28:50 -07001161}
David S. Miller35a17eb2007-02-10 17:41:02 -08001162#else /* CONFIG_PCI_MSI */
1163static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
1164{
1165}
1166#endif /* !(CONFIG_PCI_MSI) */
1167
Greg Kroah-Hartman7c9503b2012-12-21 14:03:26 -08001168static int pci_sun4v_pbm_init(struct pci_pbm_info *pbm,
1169 struct platform_device *op, u32 devhandle)
David S. Millerbade5622006-02-09 22:05:54 -08001170{
Grant Likely61c7a082010-04-13 16:12:29 -07001171 struct device_node *dp = op->dev.of_node;
David S. Miller3822b502008-08-30 02:50:29 -07001172 int err;
David S. Millerbade5622006-02-09 22:05:54 -08001173
David S. Millerc1b1a5f12008-03-19 04:52:48 -07001174 pbm->numa_node = of_node_to_nid(dp);
1175
David S. Millerca3dd882007-05-09 02:35:27 -07001176 pbm->pci_ops = &sun4v_pci_ops;
1177 pbm->config_space_reg_bits = 12;
David S. Miller34768bc2007-05-07 23:06:27 -07001178
David S. Miller6c108f12007-05-07 23:49:01 -07001179 pbm->index = pci_num_pbms++;
1180
David S. Miller22fecba2008-09-10 00:19:28 -07001181 pbm->op = op;
David S. Millerbade5622006-02-09 22:05:54 -08001182
David S. Miller38337892006-02-12 22:06:53 -08001183 pbm->devhandle = devhandle;
David S. Millerbade5622006-02-09 22:05:54 -08001184
David S. Millere87dc352006-06-21 18:18:47 -07001185 pbm->name = dp->full_name;
David S. Millerbade5622006-02-09 22:05:54 -08001186
David S. Millere87dc352006-06-21 18:18:47 -07001187 printk("%s: SUN4V PCI Bus Module\n", pbm->name);
David S. Millerc1b1a5f12008-03-19 04:52:48 -07001188 printk("%s: On NUMA node %d\n", pbm->name, pbm->numa_node);
David S. Millerbade5622006-02-09 22:05:54 -08001189
David S. Miller9fd8b642007-03-08 21:55:49 -08001190 pci_determine_mem_io_space(pbm);
David S. Millerbade5622006-02-09 22:05:54 -08001191
David S. Millercfa06522007-05-07 21:51:41 -07001192 pci_get_pbm_props(pbm);
David S. Miller3822b502008-08-30 02:50:29 -07001193
1194 err = pci_sun4v_iommu_init(pbm);
1195 if (err)
1196 return err;
1197
David S. Miller35a17eb2007-02-10 17:41:02 -08001198 pci_sun4v_msi_init(pbm);
David S. Miller3822b502008-08-30 02:50:29 -07001199
David S. Millere822358a2008-09-01 18:32:22 -07001200 pci_sun4v_scan_bus(pbm, &op->dev);
David S. Miller3822b502008-08-30 02:50:29 -07001201
Tushar Davef0248c12016-10-28 10:12:41 -07001202 /* if atu_init fails its not complete failure.
1203 * we can still continue using legacy iommu.
1204 */
1205 if (pbm->iommu->atu) {
1206 err = pci_sun4v_atu_init(pbm);
1207 if (err) {
1208 kfree(pbm->iommu->atu);
1209 pbm->iommu->atu = NULL;
1210 pr_err(PFX "ATU init failed, err=%d\n", err);
1211 }
1212 }
1213
David S. Millerd3ae4b52008-09-09 23:54:02 -07001214 pbm->next = pci_pbm_root;
1215 pci_pbm_root = pbm;
1216
David S. Miller3822b502008-08-30 02:50:29 -07001217 return 0;
David S. Millerbade5622006-02-09 22:05:54 -08001218}
1219
Greg Kroah-Hartman7c9503b2012-12-21 14:03:26 -08001220static int pci_sun4v_probe(struct platform_device *op)
David S. Miller8f6a93a2006-02-09 21:32:07 -08001221{
David S. Miller3822b502008-08-30 02:50:29 -07001222 const struct linux_prom64_registers *regs;
David S. Millere01c0d62007-05-25 01:04:15 -07001223 static int hvapi_negotiated = 0;
David S. Miller34768bc2007-05-07 23:06:27 -07001224 struct pci_pbm_info *pbm;
David S. Miller3822b502008-08-30 02:50:29 -07001225 struct device_node *dp;
David S. Miller16ce82d2007-04-26 21:08:21 -07001226 struct iommu *iommu;
Tushar Davef0248c12016-10-28 10:12:41 -07001227 struct atu *atu;
David S. Miller7c8f4862006-02-13 21:50:27 -08001228 u32 devhandle;
chris hyser89143912016-09-28 12:19:45 -07001229 int i, err = -ENODEV;
Tushar Davef0248c12016-10-28 10:12:41 -07001230 static bool hv_atu = true;
David S. Miller38337892006-02-12 22:06:53 -08001231
Grant Likely61c7a082010-04-13 16:12:29 -07001232 dp = op->dev.of_node;
David S. Miller3822b502008-08-30 02:50:29 -07001233
David S. Millere01c0d62007-05-25 01:04:15 -07001234 if (!hvapi_negotiated++) {
chris hyser89143912016-09-28 12:19:45 -07001235 for (i = 0; i < ARRAY_SIZE(vpci_versions); i++) {
1236 vpci_major = vpci_versions[i].major;
1237 vpci_minor = vpci_versions[i].minor;
1238
1239 err = sun4v_hvapi_register(HV_GRP_PCI, vpci_major,
1240 &vpci_minor);
1241 if (!err)
1242 break;
1243 }
David S. Millere01c0d62007-05-25 01:04:15 -07001244
1245 if (err) {
chris hyser89143912016-09-28 12:19:45 -07001246 pr_err(PFX "Could not register hvapi, err=%d\n", err);
David S. Miller3822b502008-08-30 02:50:29 -07001247 return err;
David S. Millere01c0d62007-05-25 01:04:15 -07001248 }
chris hyser89143912016-09-28 12:19:45 -07001249 pr_info(PFX "Registered hvapi major[%lu] minor[%lu]\n",
1250 vpci_major, vpci_minor);
David S. Millerad7ad572007-07-27 22:39:14 -07001251
Tushar Davef0248c12016-10-28 10:12:41 -07001252 err = sun4v_hvapi_register(HV_GRP_ATU, vatu_major, &vatu_minor);
1253 if (err) {
1254 /* don't return an error if we fail to register the
1255 * ATU group, but ATU hcalls won't be available.
1256 */
1257 hv_atu = false;
Tushar Davef0248c12016-10-28 10:12:41 -07001258 } else {
1259 pr_info(PFX "Registered hvapi ATU major[%lu] minor[%lu]\n",
1260 vatu_major, vatu_minor);
1261 }
1262
David S. Millerad7ad572007-07-27 22:39:14 -07001263 dma_ops = &sun4v_dma_ops;
David S. Millere01c0d62007-05-25 01:04:15 -07001264 }
1265
David S. Miller3822b502008-08-30 02:50:29 -07001266 regs = of_get_property(dp, "reg", NULL);
David S. Millerd7472c32008-08-31 01:33:52 -07001267 err = -ENODEV;
David S. Miller3822b502008-08-30 02:50:29 -07001268 if (!regs) {
1269 printk(KERN_ERR PFX "Could not find config registers\n");
David S. Millerd7472c32008-08-31 01:33:52 -07001270 goto out_err;
Cyrill Gorcunov75c6d142007-11-20 17:32:19 -08001271 }
David S. Millere87dc352006-06-21 18:18:47 -07001272 devhandle = (regs->phys_addr >> 32UL) & 0x0fffffff;
David S. Miller38337892006-02-12 22:06:53 -08001273
David S. Millerd7472c32008-08-31 01:33:52 -07001274 err = -ENOMEM;
David S. Millerd3ae4b52008-09-09 23:54:02 -07001275 if (!iommu_batch_initialized) {
1276 for_each_possible_cpu(i) {
1277 unsigned long page = get_zeroed_page(GFP_KERNEL);
David S. Miller7c8f4862006-02-13 21:50:27 -08001278
David S. Millerd3ae4b52008-09-09 23:54:02 -07001279 if (!page)
1280 goto out_err;
David S. Miller7c8f4862006-02-13 21:50:27 -08001281
David S. Millerd3ae4b52008-09-09 23:54:02 -07001282 per_cpu(iommu_batch, i).pglist = (u64 *) page;
1283 }
1284 iommu_batch_initialized = 1;
David S. Millerbade5622006-02-09 22:05:54 -08001285 }
David S. Miller7c8f4862006-02-13 21:50:27 -08001286
David S. Millerd3ae4b52008-09-09 23:54:02 -07001287 pbm = kzalloc(sizeof(*pbm), GFP_KERNEL);
1288 if (!pbm) {
1289 printk(KERN_ERR PFX "Could not allocate pci_pbm_info\n");
David S. Millerd7472c32008-08-31 01:33:52 -07001290 goto out_err;
David S. Miller3822b502008-08-30 02:50:29 -07001291 }
David S. Miller7c8f4862006-02-13 21:50:27 -08001292
David S. Millerd3ae4b52008-09-09 23:54:02 -07001293 iommu = kzalloc(sizeof(struct iommu), GFP_KERNEL);
David S. Miller3822b502008-08-30 02:50:29 -07001294 if (!iommu) {
David S. Millerd3ae4b52008-09-09 23:54:02 -07001295 printk(KERN_ERR PFX "Could not allocate pbm iommu\n");
David S. Millerd7472c32008-08-31 01:33:52 -07001296 goto out_free_controller;
David S. Miller3822b502008-08-30 02:50:29 -07001297 }
David S. Miller7c8f4862006-02-13 21:50:27 -08001298
David S. Millerd3ae4b52008-09-09 23:54:02 -07001299 pbm->iommu = iommu;
Tushar Davef0248c12016-10-28 10:12:41 -07001300 iommu->atu = NULL;
1301 if (hv_atu) {
1302 atu = kzalloc(sizeof(*atu), GFP_KERNEL);
1303 if (!atu)
1304 pr_err(PFX "Could not allocate atu\n");
1305 else
1306 iommu->atu = atu;
1307 }
David S. Millerbade5622006-02-09 22:05:54 -08001308
David S. Millerd3ae4b52008-09-09 23:54:02 -07001309 err = pci_sun4v_pbm_init(pbm, op, devhandle);
1310 if (err)
1311 goto out_free_iommu;
David S. Miller7c8f4862006-02-13 21:50:27 -08001312
David S. Millerd3ae4b52008-09-09 23:54:02 -07001313 dev_set_drvdata(&op->dev, pbm);
David S. Millerbade5622006-02-09 22:05:54 -08001314
David S. Millerd3ae4b52008-09-09 23:54:02 -07001315 return 0;
David S. Miller7c8f4862006-02-13 21:50:27 -08001316
David S. Millerd3ae4b52008-09-09 23:54:02 -07001317out_free_iommu:
Tushar Davef0248c12016-10-28 10:12:41 -07001318 kfree(iommu->atu);
David S. Millerd3ae4b52008-09-09 23:54:02 -07001319 kfree(pbm->iommu);
David S. Millerd7472c32008-08-31 01:33:52 -07001320
1321out_free_controller:
David S. Millerd3ae4b52008-09-09 23:54:02 -07001322 kfree(pbm);
David S. Millerd7472c32008-08-31 01:33:52 -07001323
1324out_err:
1325 return err;
David S. Miller8f6a93a2006-02-09 21:32:07 -08001326}
David S. Miller3822b502008-08-30 02:50:29 -07001327
David S. Miller3628aa02011-03-30 17:37:56 -07001328static const struct of_device_id pci_sun4v_match[] = {
David S. Miller3822b502008-08-30 02:50:29 -07001329 {
1330 .name = "pci",
1331 .compatible = "SUNW,sun4v-pci",
1332 },
1333 {},
1334};
1335
Grant Likely4ebb24f2011-02-22 20:01:33 -07001336static struct platform_driver pci_sun4v_driver = {
Grant Likely40182942010-04-13 16:13:02 -07001337 .driver = {
1338 .name = DRIVER_NAME,
Grant Likely40182942010-04-13 16:13:02 -07001339 .of_match_table = pci_sun4v_match,
1340 },
David S. Miller3822b502008-08-30 02:50:29 -07001341 .probe = pci_sun4v_probe,
1342};
1343
1344static int __init pci_sun4v_init(void)
1345{
Grant Likely4ebb24f2011-02-22 20:01:33 -07001346 return platform_driver_register(&pci_sun4v_driver);
David S. Miller3822b502008-08-30 02:50:29 -07001347}
1348
1349subsys_initcall(pci_sun4v_init);