blob: d4208aa933830c8b2f7023cd4a73e3186377b0ee [file] [log] [blame]
David S. Miller8f6a93a2006-02-09 21:32:07 -08001/* pci_sun4v.c: SUN4V specific PCI controller support.
2 *
David S. Millerd2841422008-02-08 18:05:46 -08003 * Copyright (C) 2006, 2007, 2008 David S. Miller (davem@davemloft.net)
David S. Miller8f6a93a2006-02-09 21:32:07 -08004 */
5
6#include <linux/kernel.h>
7#include <linux/types.h>
8#include <linux/pci.h>
9#include <linux/init.h>
10#include <linux/slab.h>
11#include <linux/interrupt.h>
David S. Miller18397942006-02-10 00:08:26 -080012#include <linux/percpu.h>
David S. Miller35a17eb2007-02-10 17:41:02 -080013#include <linux/irq.h>
14#include <linux/msi.h>
Paul Gortmaker7b64db62011-07-18 15:57:46 -040015#include <linux/export.h>
David S. Miller59db8102007-05-23 18:00:46 -070016#include <linux/log2.h>
David S. Miller3822b502008-08-30 02:50:29 -070017#include <linux/of_device.h>
Sowmini Varadhanbb620c32015-04-09 15:33:31 -040018#include <linux/iommu-common.h>
David S. Miller8f6a93a2006-02-09 21:32:07 -080019
David S. Miller8f6a93a2006-02-09 21:32:07 -080020#include <asm/iommu.h>
21#include <asm/irq.h>
David S. Miller8f6a93a2006-02-09 21:32:07 -080022#include <asm/hypervisor.h>
David S. Millere87dc352006-06-21 18:18:47 -070023#include <asm/prom.h>
David S. Miller8f6a93a2006-02-09 21:32:07 -080024
25#include "pci_impl.h"
26#include "iommu_common.h"
27
David S. Millerbade5622006-02-09 22:05:54 -080028#include "pci_sun4v.h"
29
David S. Miller3822b502008-08-30 02:50:29 -070030#define DRIVER_NAME "pci_sun4v"
31#define PFX DRIVER_NAME ": "
32
chris hyser89143912016-09-28 12:19:45 -070033static unsigned long vpci_major;
34static unsigned long vpci_minor;
35
36struct vpci_version {
37 unsigned long major;
38 unsigned long minor;
39};
40
41/* Ordered from largest major to lowest */
42static struct vpci_version vpci_versions[] = {
43 { .major = 2, .minor = 0 },
44 { .major = 1, .minor = 1 },
45};
David S. Millere01c0d62007-05-25 01:04:15 -070046
Tushar Davef0248c12016-10-28 10:12:41 -070047static unsigned long vatu_major = 1;
48static unsigned long vatu_minor = 1;
49
David S. Miller7c8f4862006-02-13 21:50:27 -080050#define PGLIST_NENTS (PAGE_SIZE / sizeof(u64))
David S. Miller18397942006-02-10 00:08:26 -080051
David S. Miller16ce82d2007-04-26 21:08:21 -070052struct iommu_batch {
David S. Millerad7ad572007-07-27 22:39:14 -070053 struct device *dev; /* Device mapping is for. */
David S. Miller6a32fd42006-02-19 22:21:32 -080054 unsigned long prot; /* IOMMU page protections */
55 unsigned long entry; /* Index into IOTSB. */
56 u64 *pglist; /* List of physical pages */
57 unsigned long npages; /* Number of pages in list. */
David S. Miller18397942006-02-10 00:08:26 -080058};
59
David S. Millerad7ad572007-07-27 22:39:14 -070060static DEFINE_PER_CPU(struct iommu_batch, iommu_batch);
David S. Millerd3ae4b52008-09-09 23:54:02 -070061static int iommu_batch_initialized;
David S. Miller6a32fd42006-02-19 22:21:32 -080062
63/* Interrupts must be disabled. */
David S. Millerad7ad572007-07-27 22:39:14 -070064static inline void iommu_batch_start(struct device *dev, unsigned long prot, unsigned long entry)
David S. Miller6a32fd42006-02-19 22:21:32 -080065{
Christoph Lameter494fc422014-08-17 12:30:54 -050066 struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
David S. Miller6a32fd42006-02-19 22:21:32 -080067
David S. Millerad7ad572007-07-27 22:39:14 -070068 p->dev = dev;
David S. Miller6a32fd42006-02-19 22:21:32 -080069 p->prot = prot;
70 p->entry = entry;
71 p->npages = 0;
72}
73
74/* Interrupts must be disabled. */
David S. Millerad7ad572007-07-27 22:39:14 -070075static long iommu_batch_flush(struct iommu_batch *p)
David S. Miller6a32fd42006-02-19 22:21:32 -080076{
David S. Millerad7ad572007-07-27 22:39:14 -070077 struct pci_pbm_info *pbm = p->dev->archdata.host_controller;
David S. Millera2fb23a2007-02-28 23:35:04 -080078 unsigned long devhandle = pbm->devhandle;
David S. Miller6a32fd42006-02-19 22:21:32 -080079 unsigned long prot = p->prot;
80 unsigned long entry = p->entry;
81 u64 *pglist = p->pglist;
82 unsigned long npages = p->npages;
83
chris hyseraa7bde12016-09-28 12:19:50 -070084 /* VPCI maj=1, min=[0,1] only supports read and write */
85 if (vpci_major < 2)
86 prot &= (HV_PCI_MAP_ATTR_READ | HV_PCI_MAP_ATTR_WRITE);
87
David S. Millerd82965c2006-02-20 01:42:51 -080088 while (npages != 0) {
David S. Miller6a32fd42006-02-19 22:21:32 -080089 long num;
90
91 num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry),
92 npages, prot, __pa(pglist));
93 if (unlikely(num < 0)) {
94 if (printk_ratelimit())
David S. Millerad7ad572007-07-27 22:39:14 -070095 printk("iommu_batch_flush: IOMMU map of "
Sam Ravnborg90181132009-01-06 13:19:28 -080096 "[%08lx:%08llx:%lx:%lx:%lx] failed with "
David S. Miller6a32fd42006-02-19 22:21:32 -080097 "status %ld\n",
98 devhandle, HV_PCI_TSBID(0, entry),
99 npages, prot, __pa(pglist), num);
100 return -1;
101 }
102
103 entry += num;
104 npages -= num;
105 pglist += num;
David S. Millerd82965c2006-02-20 01:42:51 -0800106 }
David S. Miller6a32fd42006-02-19 22:21:32 -0800107
108 p->entry = entry;
109 p->npages = 0;
110
111 return 0;
112}
113
David S. Miller13fa14e2008-02-09 03:11:01 -0800114static inline void iommu_batch_new_entry(unsigned long entry)
115{
Christoph Lameter494fc422014-08-17 12:30:54 -0500116 struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
David S. Miller13fa14e2008-02-09 03:11:01 -0800117
118 if (p->entry + p->npages == entry)
119 return;
120 if (p->entry != ~0UL)
121 iommu_batch_flush(p);
122 p->entry = entry;
123}
124
David S. Miller6a32fd42006-02-19 22:21:32 -0800125/* Interrupts must be disabled. */
David S. Millerad7ad572007-07-27 22:39:14 -0700126static inline long iommu_batch_add(u64 phys_page)
David S. Miller6a32fd42006-02-19 22:21:32 -0800127{
Christoph Lameter494fc422014-08-17 12:30:54 -0500128 struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
David S. Miller6a32fd42006-02-19 22:21:32 -0800129
130 BUG_ON(p->npages >= PGLIST_NENTS);
131
132 p->pglist[p->npages++] = phys_page;
133 if (p->npages == PGLIST_NENTS)
David S. Millerad7ad572007-07-27 22:39:14 -0700134 return iommu_batch_flush(p);
David S. Miller6a32fd42006-02-19 22:21:32 -0800135
136 return 0;
137}
138
139/* Interrupts must be disabled. */
David S. Millerad7ad572007-07-27 22:39:14 -0700140static inline long iommu_batch_end(void)
David S. Miller6a32fd42006-02-19 22:21:32 -0800141{
Christoph Lameter494fc422014-08-17 12:30:54 -0500142 struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
David S. Miller6a32fd42006-02-19 22:21:32 -0800143
144 BUG_ON(p->npages >= PGLIST_NENTS);
145
David S. Millerad7ad572007-07-27 22:39:14 -0700146 return iommu_batch_flush(p);
David S. Miller6a32fd42006-02-19 22:21:32 -0800147}
David S. Miller18397942006-02-10 00:08:26 -0800148
David S. Millerad7ad572007-07-27 22:39:14 -0700149static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
Andrzej Pietrasiewiczc4162582012-03-27 14:56:55 +0200150 dma_addr_t *dma_addrp, gfp_t gfp,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700151 unsigned long attrs)
David S. Miller8f6a93a2006-02-09 21:32:07 -0800152{
David S. Miller7c8f4862006-02-13 21:50:27 -0800153 unsigned long flags, order, first_page, npages, n;
chris hyseraa7bde12016-09-28 12:19:50 -0700154 unsigned long prot = 0;
David S. Millerc1b1a5f12008-03-19 04:52:48 -0700155 struct iommu *iommu;
156 struct page *page;
David S. Miller18397942006-02-10 00:08:26 -0800157 void *ret;
158 long entry;
David S. Millerc1b1a5f12008-03-19 04:52:48 -0700159 int nid;
David S. Miller18397942006-02-10 00:08:26 -0800160
161 size = IO_PAGE_ALIGN(size);
162 order = get_order(size);
David S. Miller6a32fd42006-02-19 22:21:32 -0800163 if (unlikely(order >= MAX_ORDER))
David S. Miller18397942006-02-10 00:08:26 -0800164 return NULL;
165
166 npages = size >> IO_PAGE_SHIFT;
David S. Miller18397942006-02-10 00:08:26 -0800167
chris hyseraa7bde12016-09-28 12:19:50 -0700168 if (attrs & DMA_ATTR_WEAK_ORDERING)
169 prot = HV_PCI_MAP_ATTR_RELAXED_ORDER;
170
David S. Millerc1b1a5f12008-03-19 04:52:48 -0700171 nid = dev->archdata.numa_node;
172 page = alloc_pages_node(nid, gfp, order);
173 if (unlikely(!page))
David S. Miller18397942006-02-10 00:08:26 -0800174 return NULL;
David S. Millere7a04532006-02-15 22:25:27 -0800175
David S. Millerc1b1a5f12008-03-19 04:52:48 -0700176 first_page = (unsigned long) page_address(page);
David S. Miller18397942006-02-10 00:08:26 -0800177 memset((char *)first_page, 0, PAGE_SIZE << order);
178
David S. Millerad7ad572007-07-27 22:39:14 -0700179 iommu = dev->archdata.iommu;
David S. Miller18397942006-02-10 00:08:26 -0800180
Sowmini Varadhanbb620c32015-04-09 15:33:31 -0400181 entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL,
182 (unsigned long)(-1), 0);
David S. Miller18397942006-02-10 00:08:26 -0800183
David S. Millerd6183822015-11-04 11:30:57 -0800184 if (unlikely(entry == IOMMU_ERROR_CODE))
David S. Millerd2841422008-02-08 18:05:46 -0800185 goto range_alloc_fail;
David S. Miller18397942006-02-10 00:08:26 -0800186
Sowmini Varadhanbb620c32015-04-09 15:33:31 -0400187 *dma_addrp = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT));
David S. Miller18397942006-02-10 00:08:26 -0800188 ret = (void *) first_page;
189 first_page = __pa(first_page);
190
David S. Miller6a32fd42006-02-19 22:21:32 -0800191 local_irq_save(flags);
David S. Miller18397942006-02-10 00:08:26 -0800192
David S. Millerad7ad572007-07-27 22:39:14 -0700193 iommu_batch_start(dev,
chris hyseraa7bde12016-09-28 12:19:50 -0700194 (HV_PCI_MAP_ATTR_READ | prot |
David S. Millerad7ad572007-07-27 22:39:14 -0700195 HV_PCI_MAP_ATTR_WRITE),
196 entry);
David S. Miller18397942006-02-10 00:08:26 -0800197
David S. Miller6a32fd42006-02-19 22:21:32 -0800198 for (n = 0; n < npages; n++) {
David S. Millerad7ad572007-07-27 22:39:14 -0700199 long err = iommu_batch_add(first_page + (n * PAGE_SIZE));
David S. Miller6a32fd42006-02-19 22:21:32 -0800200 if (unlikely(err < 0L))
201 goto iommu_map_fail;
202 }
David S. Miller18397942006-02-10 00:08:26 -0800203
David S. Millerad7ad572007-07-27 22:39:14 -0700204 if (unlikely(iommu_batch_end() < 0L))
David S. Miller6a32fd42006-02-19 22:21:32 -0800205 goto iommu_map_fail;
David S. Miller18397942006-02-10 00:08:26 -0800206
David S. Miller6a32fd42006-02-19 22:21:32 -0800207 local_irq_restore(flags);
David S. Miller18397942006-02-10 00:08:26 -0800208
209 return ret;
David S. Miller6a32fd42006-02-19 22:21:32 -0800210
211iommu_map_fail:
David S. Millerd6183822015-11-04 11:30:57 -0800212 iommu_tbl_range_free(&iommu->tbl, *dma_addrp, npages, IOMMU_ERROR_CODE);
David S. Miller6a32fd42006-02-19 22:21:32 -0800213
David S. Millerd2841422008-02-08 18:05:46 -0800214range_alloc_fail:
David S. Miller6a32fd42006-02-19 22:21:32 -0800215 free_pages(first_page, order);
216 return NULL;
David S. Miller8f6a93a2006-02-09 21:32:07 -0800217}
218
Tushar Dave5116ab42016-10-28 10:12:43 -0700219unsigned long dma_4v_iotsb_bind(unsigned long devhandle,
220 unsigned long iotsb_num,
221 struct pci_bus *bus_dev)
222{
223 struct pci_dev *pdev;
224 unsigned long err;
225 unsigned int bus;
226 unsigned int device;
227 unsigned int fun;
228
229 list_for_each_entry(pdev, &bus_dev->devices, bus_list) {
230 if (pdev->subordinate) {
231 /* No need to bind pci bridge */
232 dma_4v_iotsb_bind(devhandle, iotsb_num,
233 pdev->subordinate);
234 } else {
235 bus = bus_dev->number;
236 device = PCI_SLOT(pdev->devfn);
237 fun = PCI_FUNC(pdev->devfn);
238 err = pci_sun4v_iotsb_bind(devhandle, iotsb_num,
239 HV_PCI_DEVICE_BUILD(bus,
240 device,
241 fun));
242
243 /* If bind fails for one device it is going to fail
244 * for rest of the devices because we are sharing
245 * IOTSB. So in case of failure simply return with
246 * error.
247 */
248 if (err)
249 return err;
250 }
251 }
252
253 return 0;
254}
255
Sowmini Varadhanbb620c32015-04-09 15:33:31 -0400256static void dma_4v_iommu_demap(void *demap_arg, unsigned long entry,
257 unsigned long npages)
258{
259 u32 devhandle = *(u32 *)demap_arg;
260 unsigned long num, flags;
261
262 local_irq_save(flags);
263 do {
264 num = pci_sun4v_iommu_demap(devhandle,
265 HV_PCI_TSBID(0, entry),
266 npages);
267
268 entry += num;
269 npages -= num;
270 } while (npages != 0);
271 local_irq_restore(flags);
272}
273
David S. Millerad7ad572007-07-27 22:39:14 -0700274static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700275 dma_addr_t dvma, unsigned long attrs)
David S. Miller8f6a93a2006-02-09 21:32:07 -0800276{
David S. Millera2fb23a2007-02-28 23:35:04 -0800277 struct pci_pbm_info *pbm;
David S. Miller16ce82d2007-04-26 21:08:21 -0700278 struct iommu *iommu;
Sowmini Varadhanbb620c32015-04-09 15:33:31 -0400279 unsigned long order, npages, entry;
David S. Miller7c8f4862006-02-13 21:50:27 -0800280 u32 devhandle;
David S. Miller18397942006-02-10 00:08:26 -0800281
282 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
David S. Millerad7ad572007-07-27 22:39:14 -0700283 iommu = dev->archdata.iommu;
284 pbm = dev->archdata.host_controller;
David S. Millera2fb23a2007-02-28 23:35:04 -0800285 devhandle = pbm->devhandle;
Sowmini Varadhanbb620c32015-04-09 15:33:31 -0400286 entry = ((dvma - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT);
287 dma_4v_iommu_demap(&devhandle, entry, npages);
David S. Millerd6183822015-11-04 11:30:57 -0800288 iommu_tbl_range_free(&iommu->tbl, dvma, npages, IOMMU_ERROR_CODE);
David S. Miller18397942006-02-10 00:08:26 -0800289 order = get_order(size);
290 if (order < 10)
291 free_pages((unsigned long)cpu, order);
David S. Miller8f6a93a2006-02-09 21:32:07 -0800292}
293
FUJITA Tomonori797a7562009-05-14 16:23:10 +0000294static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
295 unsigned long offset, size_t sz,
FUJITA Tomonoribc0a14f2009-08-10 11:53:12 +0900296 enum dma_data_direction direction,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700297 unsigned long attrs)
David S. Miller8f6a93a2006-02-09 21:32:07 -0800298{
David S. Miller16ce82d2007-04-26 21:08:21 -0700299 struct iommu *iommu;
David S. Miller18397942006-02-10 00:08:26 -0800300 unsigned long flags, npages, oaddr;
David S. Miller7c8f4862006-02-13 21:50:27 -0800301 unsigned long i, base_paddr;
David S. Miller6a32fd42006-02-19 22:21:32 -0800302 u32 bus_addr, ret;
David S. Miller18397942006-02-10 00:08:26 -0800303 unsigned long prot;
304 long entry;
David S. Miller18397942006-02-10 00:08:26 -0800305
David S. Millerad7ad572007-07-27 22:39:14 -0700306 iommu = dev->archdata.iommu;
David S. Miller18397942006-02-10 00:08:26 -0800307
David S. Millerad7ad572007-07-27 22:39:14 -0700308 if (unlikely(direction == DMA_NONE))
David S. Miller18397942006-02-10 00:08:26 -0800309 goto bad;
310
FUJITA Tomonori797a7562009-05-14 16:23:10 +0000311 oaddr = (unsigned long)(page_address(page) + offset);
David S. Miller18397942006-02-10 00:08:26 -0800312 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
313 npages >>= IO_PAGE_SHIFT;
David S. Miller18397942006-02-10 00:08:26 -0800314
Sowmini Varadhanbb620c32015-04-09 15:33:31 -0400315 entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL,
316 (unsigned long)(-1), 0);
David S. Miller18397942006-02-10 00:08:26 -0800317
David S. Millerd6183822015-11-04 11:30:57 -0800318 if (unlikely(entry == IOMMU_ERROR_CODE))
David S. Miller18397942006-02-10 00:08:26 -0800319 goto bad;
320
Sowmini Varadhanbb620c32015-04-09 15:33:31 -0400321 bus_addr = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT));
David S. Miller18397942006-02-10 00:08:26 -0800322 ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
323 base_paddr = __pa(oaddr & IO_PAGE_MASK);
324 prot = HV_PCI_MAP_ATTR_READ;
David S. Millerad7ad572007-07-27 22:39:14 -0700325 if (direction != DMA_TO_DEVICE)
David S. Miller18397942006-02-10 00:08:26 -0800326 prot |= HV_PCI_MAP_ATTR_WRITE;
327
chris hyseraa7bde12016-09-28 12:19:50 -0700328 if (attrs & DMA_ATTR_WEAK_ORDERING)
329 prot |= HV_PCI_MAP_ATTR_RELAXED_ORDER;
330
David S. Miller6a32fd42006-02-19 22:21:32 -0800331 local_irq_save(flags);
David S. Miller18397942006-02-10 00:08:26 -0800332
David S. Millerad7ad572007-07-27 22:39:14 -0700333 iommu_batch_start(dev, prot, entry);
David S. Miller18397942006-02-10 00:08:26 -0800334
David S. Miller6a32fd42006-02-19 22:21:32 -0800335 for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) {
David S. Millerad7ad572007-07-27 22:39:14 -0700336 long err = iommu_batch_add(base_paddr);
David S. Miller6a32fd42006-02-19 22:21:32 -0800337 if (unlikely(err < 0L))
338 goto iommu_map_fail;
339 }
David S. Millerad7ad572007-07-27 22:39:14 -0700340 if (unlikely(iommu_batch_end() < 0L))
David S. Miller6a32fd42006-02-19 22:21:32 -0800341 goto iommu_map_fail;
David S. Miller18397942006-02-10 00:08:26 -0800342
David S. Miller6a32fd42006-02-19 22:21:32 -0800343 local_irq_restore(flags);
David S. Miller18397942006-02-10 00:08:26 -0800344
345 return ret;
346
347bad:
348 if (printk_ratelimit())
349 WARN_ON(1);
David S. Millerad7ad572007-07-27 22:39:14 -0700350 return DMA_ERROR_CODE;
David S. Miller6a32fd42006-02-19 22:21:32 -0800351
352iommu_map_fail:
David S. Millerd6183822015-11-04 11:30:57 -0800353 iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE);
David S. Millerad7ad572007-07-27 22:39:14 -0700354 return DMA_ERROR_CODE;
David S. Miller8f6a93a2006-02-09 21:32:07 -0800355}
356
FUJITA Tomonori797a7562009-05-14 16:23:10 +0000357static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
FUJITA Tomonoribc0a14f2009-08-10 11:53:12 +0900358 size_t sz, enum dma_data_direction direction,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700359 unsigned long attrs)
David S. Miller8f6a93a2006-02-09 21:32:07 -0800360{
David S. Millera2fb23a2007-02-28 23:35:04 -0800361 struct pci_pbm_info *pbm;
David S. Miller16ce82d2007-04-26 21:08:21 -0700362 struct iommu *iommu;
Sowmini Varadhanbb620c32015-04-09 15:33:31 -0400363 unsigned long npages;
David S. Miller18397942006-02-10 00:08:26 -0800364 long entry;
David S. Miller7c8f4862006-02-13 21:50:27 -0800365 u32 devhandle;
David S. Miller18397942006-02-10 00:08:26 -0800366
David S. Millerad7ad572007-07-27 22:39:14 -0700367 if (unlikely(direction == DMA_NONE)) {
David S. Miller18397942006-02-10 00:08:26 -0800368 if (printk_ratelimit())
369 WARN_ON(1);
370 return;
371 }
372
David S. Millerad7ad572007-07-27 22:39:14 -0700373 iommu = dev->archdata.iommu;
374 pbm = dev->archdata.host_controller;
David S. Millera2fb23a2007-02-28 23:35:04 -0800375 devhandle = pbm->devhandle;
David S. Miller18397942006-02-10 00:08:26 -0800376
377 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
378 npages >>= IO_PAGE_SHIFT;
379 bus_addr &= IO_PAGE_MASK;
Sowmini Varadhanbb620c32015-04-09 15:33:31 -0400380 entry = (bus_addr - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT;
381 dma_4v_iommu_demap(&devhandle, entry, npages);
David S. Millerd6183822015-11-04 11:30:57 -0800382 iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE);
David S. Miller18397942006-02-10 00:08:26 -0800383}
384
David S. Millerad7ad572007-07-27 22:39:14 -0700385static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
FUJITA Tomonoribc0a14f2009-08-10 11:53:12 +0900386 int nelems, enum dma_data_direction direction,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700387 unsigned long attrs)
David S. Miller8f6a93a2006-02-09 21:32:07 -0800388{
David S. Miller13fa14e2008-02-09 03:11:01 -0800389 struct scatterlist *s, *outs, *segstart;
390 unsigned long flags, handle, prot;
391 dma_addr_t dma_next = 0, dma_addr;
392 unsigned int max_seg_size;
FUJITA Tomonorif0880252008-03-28 15:55:41 -0700393 unsigned long seg_boundary_size;
David S. Miller13fa14e2008-02-09 03:11:01 -0800394 int outcount, incount, i;
David S. Miller16ce82d2007-04-26 21:08:21 -0700395 struct iommu *iommu;
FUJITA Tomonorif0880252008-03-28 15:55:41 -0700396 unsigned long base_shift;
David S. Miller13fa14e2008-02-09 03:11:01 -0800397 long err;
David S. Miller18397942006-02-10 00:08:26 -0800398
David S. Miller13fa14e2008-02-09 03:11:01 -0800399 BUG_ON(direction == DMA_NONE);
David S. Miller18397942006-02-10 00:08:26 -0800400
David S. Millerad7ad572007-07-27 22:39:14 -0700401 iommu = dev->archdata.iommu;
David S. Miller13fa14e2008-02-09 03:11:01 -0800402 if (nelems == 0 || !iommu)
403 return 0;
David S. Miller18397942006-02-10 00:08:26 -0800404
David S. Miller18397942006-02-10 00:08:26 -0800405 prot = HV_PCI_MAP_ATTR_READ;
David S. Millerad7ad572007-07-27 22:39:14 -0700406 if (direction != DMA_TO_DEVICE)
David S. Miller18397942006-02-10 00:08:26 -0800407 prot |= HV_PCI_MAP_ATTR_WRITE;
408
chris hyseraa7bde12016-09-28 12:19:50 -0700409 if (attrs & DMA_ATTR_WEAK_ORDERING)
410 prot |= HV_PCI_MAP_ATTR_RELAXED_ORDER;
411
David S. Miller13fa14e2008-02-09 03:11:01 -0800412 outs = s = segstart = &sglist[0];
413 outcount = 1;
414 incount = nelems;
415 handle = 0;
David S. Miller38192d52008-02-06 03:50:26 -0800416
David S. Miller13fa14e2008-02-09 03:11:01 -0800417 /* Init first segment length for backout at failure */
418 outs->dma_length = 0;
David S. Miller38192d52008-02-06 03:50:26 -0800419
Sowmini Varadhanbb620c32015-04-09 15:33:31 -0400420 local_irq_save(flags);
David S. Miller38192d52008-02-06 03:50:26 -0800421
David S. Miller13fa14e2008-02-09 03:11:01 -0800422 iommu_batch_start(dev, prot, ~0UL);
David S. Miller38192d52008-02-06 03:50:26 -0800423
David S. Miller13fa14e2008-02-09 03:11:01 -0800424 max_seg_size = dma_get_max_seg_size(dev);
FUJITA Tomonorif0880252008-03-28 15:55:41 -0700425 seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
426 IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
Sowmini Varadhanbb620c32015-04-09 15:33:31 -0400427 base_shift = iommu->tbl.table_map_base >> IO_PAGE_SHIFT;
David S. Miller13fa14e2008-02-09 03:11:01 -0800428 for_each_sg(sglist, s, nelems, i) {
FUJITA Tomonorif0880252008-03-28 15:55:41 -0700429 unsigned long paddr, npages, entry, out_entry = 0, slen;
David S. Miller38192d52008-02-06 03:50:26 -0800430
David S. Miller13fa14e2008-02-09 03:11:01 -0800431 slen = s->length;
432 /* Sanity check */
433 if (slen == 0) {
434 dma_next = 0;
435 continue;
David S. Miller38192d52008-02-06 03:50:26 -0800436 }
David S. Miller13fa14e2008-02-09 03:11:01 -0800437 /* Allocate iommu entries for that segment */
438 paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
Joerg Roedel0fcff282008-10-15 22:02:14 -0700439 npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
Sowmini Varadhanbb620c32015-04-09 15:33:31 -0400440 entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages,
441 &handle, (unsigned long)(-1), 0);
David S. Miller13fa14e2008-02-09 03:11:01 -0800442
443 /* Handle failure */
David S. Millerd6183822015-11-04 11:30:57 -0800444 if (unlikely(entry == IOMMU_ERROR_CODE)) {
David S. Miller13fa14e2008-02-09 03:11:01 -0800445 if (printk_ratelimit())
446 printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
447 " npages %lx\n", iommu, paddr, npages);
448 goto iommu_map_failed;
449 }
450
451 iommu_batch_new_entry(entry);
452
453 /* Convert entry to a dma_addr_t */
Sowmini Varadhanbb620c32015-04-09 15:33:31 -0400454 dma_addr = iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT);
David S. Miller13fa14e2008-02-09 03:11:01 -0800455 dma_addr |= (s->offset & ~IO_PAGE_MASK);
456
457 /* Insert into HW table */
458 paddr &= IO_PAGE_MASK;
459 while (npages--) {
460 err = iommu_batch_add(paddr);
461 if (unlikely(err < 0L))
462 goto iommu_map_failed;
463 paddr += IO_PAGE_SIZE;
464 }
465
466 /* If we are in an open segment, try merging */
467 if (segstart != s) {
468 /* We cannot merge if:
469 * - allocated dma_addr isn't contiguous to previous allocation
470 */
471 if ((dma_addr != dma_next) ||
FUJITA Tomonorif0880252008-03-28 15:55:41 -0700472 (outs->dma_length + s->length > max_seg_size) ||
473 (is_span_boundary(out_entry, base_shift,
474 seg_boundary_size, outs, s))) {
David S. Miller13fa14e2008-02-09 03:11:01 -0800475 /* Can't merge: create a new segment */
476 segstart = s;
477 outcount++;
478 outs = sg_next(outs);
479 } else {
480 outs->dma_length += s->length;
481 }
482 }
483
484 if (segstart == s) {
485 /* This is a new segment, fill entries */
486 outs->dma_address = dma_addr;
487 outs->dma_length = slen;
FUJITA Tomonorif0880252008-03-28 15:55:41 -0700488 out_entry = entry;
David S. Miller13fa14e2008-02-09 03:11:01 -0800489 }
490
491 /* Calculate next page pointer for contiguous check */
492 dma_next = dma_addr + slen;
David S. Miller38192d52008-02-06 03:50:26 -0800493 }
494
495 err = iommu_batch_end();
496
David S. Miller6a32fd42006-02-19 22:21:32 -0800497 if (unlikely(err < 0L))
498 goto iommu_map_failed;
David S. Miller18397942006-02-10 00:08:26 -0800499
Sowmini Varadhanbb620c32015-04-09 15:33:31 -0400500 local_irq_restore(flags);
David S. Miller18397942006-02-10 00:08:26 -0800501
David S. Miller13fa14e2008-02-09 03:11:01 -0800502 if (outcount < incount) {
503 outs = sg_next(outs);
504 outs->dma_address = DMA_ERROR_CODE;
505 outs->dma_length = 0;
506 }
507
508 return outcount;
David S. Miller6a32fd42006-02-19 22:21:32 -0800509
510iommu_map_failed:
David S. Miller13fa14e2008-02-09 03:11:01 -0800511 for_each_sg(sglist, s, nelems, i) {
512 if (s->dma_length != 0) {
513 unsigned long vaddr, npages;
514
515 vaddr = s->dma_address & IO_PAGE_MASK;
Joerg Roedel0fcff282008-10-15 22:02:14 -0700516 npages = iommu_num_pages(s->dma_address, s->dma_length,
517 IO_PAGE_SIZE);
Sowmini Varadhanbb620c32015-04-09 15:33:31 -0400518 iommu_tbl_range_free(&iommu->tbl, vaddr, npages,
David S. Millerd6183822015-11-04 11:30:57 -0800519 IOMMU_ERROR_CODE);
David S. Miller13fa14e2008-02-09 03:11:01 -0800520 /* XXX demap? XXX */
521 s->dma_address = DMA_ERROR_CODE;
522 s->dma_length = 0;
523 }
524 if (s == outs)
525 break;
526 }
Sowmini Varadhanbb620c32015-04-09 15:33:31 -0400527 local_irq_restore(flags);
David S. Miller6a32fd42006-02-19 22:21:32 -0800528
529 return 0;
David S. Miller8f6a93a2006-02-09 21:32:07 -0800530}
531
David S. Millerad7ad572007-07-27 22:39:14 -0700532static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
FUJITA Tomonoribc0a14f2009-08-10 11:53:12 +0900533 int nelems, enum dma_data_direction direction,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700534 unsigned long attrs)
David S. Miller8f6a93a2006-02-09 21:32:07 -0800535{
David S. Millera2fb23a2007-02-28 23:35:04 -0800536 struct pci_pbm_info *pbm;
David S. Miller13fa14e2008-02-09 03:11:01 -0800537 struct scatterlist *sg;
David S. Miller38192d52008-02-06 03:50:26 -0800538 struct iommu *iommu;
Sowmini Varadhanbb620c32015-04-09 15:33:31 -0400539 unsigned long flags, entry;
David S. Miller13fa14e2008-02-09 03:11:01 -0800540 u32 devhandle;
David S. Miller18397942006-02-10 00:08:26 -0800541
David S. Miller13fa14e2008-02-09 03:11:01 -0800542 BUG_ON(direction == DMA_NONE);
David S. Miller18397942006-02-10 00:08:26 -0800543
David S. Millerad7ad572007-07-27 22:39:14 -0700544 iommu = dev->archdata.iommu;
545 pbm = dev->archdata.host_controller;
David S. Millera2fb23a2007-02-28 23:35:04 -0800546 devhandle = pbm->devhandle;
David S. Miller18397942006-02-10 00:08:26 -0800547
Sowmini Varadhanbb620c32015-04-09 15:33:31 -0400548 local_irq_save(flags);
David S. Miller18397942006-02-10 00:08:26 -0800549
David S. Miller13fa14e2008-02-09 03:11:01 -0800550 sg = sglist;
551 while (nelems--) {
552 dma_addr_t dma_handle = sg->dma_address;
553 unsigned int len = sg->dma_length;
Sowmini Varadhanbb620c32015-04-09 15:33:31 -0400554 unsigned long npages;
555 struct iommu_map_table *tbl = &iommu->tbl;
556 unsigned long shift = IO_PAGE_SHIFT;
David S. Miller18397942006-02-10 00:08:26 -0800557
David S. Miller13fa14e2008-02-09 03:11:01 -0800558 if (!len)
559 break;
Joerg Roedel0fcff282008-10-15 22:02:14 -0700560 npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
Sowmini Varadhanbb620c32015-04-09 15:33:31 -0400561 entry = ((dma_handle - tbl->table_map_base) >> shift);
562 dma_4v_iommu_demap(&devhandle, entry, npages);
563 iommu_tbl_range_free(&iommu->tbl, dma_handle, npages,
David S. Millerd6183822015-11-04 11:30:57 -0800564 IOMMU_ERROR_CODE);
David S. Miller13fa14e2008-02-09 03:11:01 -0800565 sg = sg_next(sg);
566 }
David S. Miller18397942006-02-10 00:08:26 -0800567
Sowmini Varadhanbb620c32015-04-09 15:33:31 -0400568 local_irq_restore(flags);
David S. Miller8f6a93a2006-02-09 21:32:07 -0800569}
570
FUJITA Tomonori02f7a182009-08-10 11:53:13 +0900571static struct dma_map_ops sun4v_dma_ops = {
Andrzej Pietrasiewiczc4162582012-03-27 14:56:55 +0200572 .alloc = dma_4v_alloc_coherent,
573 .free = dma_4v_free_coherent,
FUJITA Tomonori797a7562009-05-14 16:23:10 +0000574 .map_page = dma_4v_map_page,
575 .unmap_page = dma_4v_unmap_page,
David S. Millerad7ad572007-07-27 22:39:14 -0700576 .map_sg = dma_4v_map_sg,
577 .unmap_sg = dma_4v_unmap_sg,
David S. Miller8f6a93a2006-02-09 21:32:07 -0800578};
579
Greg Kroah-Hartman7c9503b2012-12-21 14:03:26 -0800580static void pci_sun4v_scan_bus(struct pci_pbm_info *pbm, struct device *parent)
David S. Millerbade5622006-02-09 22:05:54 -0800581{
David S. Millere87dc352006-06-21 18:18:47 -0700582 struct property *prop;
583 struct device_node *dp;
584
Grant Likely61c7a082010-04-13 16:12:29 -0700585 dp = pbm->op->dev.of_node;
David S. Miller34768bc2007-05-07 23:06:27 -0700586 prop = of_find_property(dp, "66mhz-capable", NULL);
587 pbm->is_66mhz_capable = (prop != NULL);
David S. Millere822358a2008-09-01 18:32:22 -0700588 pbm->pci_bus = pci_scan_one_pbm(pbm, parent);
David S. Millerc2609262006-02-12 22:18:52 -0800589
590 /* XXX register error interrupt handlers XXX */
David S. Millerbade5622006-02-09 22:05:54 -0800591}
592
Greg Kroah-Hartman7c9503b2012-12-21 14:03:26 -0800593static unsigned long probe_existing_entries(struct pci_pbm_info *pbm,
Sowmini Varadhanbb620c32015-04-09 15:33:31 -0400594 struct iommu_map_table *iommu)
David S. Miller18397942006-02-10 00:08:26 -0800595{
Sowmini Varadhanbb620c32015-04-09 15:33:31 -0400596 struct iommu_pool *pool;
597 unsigned long i, pool_nr, cnt = 0;
David S. Miller7c8f4862006-02-13 21:50:27 -0800598 u32 devhandle;
David S. Miller18397942006-02-10 00:08:26 -0800599
600 devhandle = pbm->devhandle;
Sowmini Varadhanbb620c32015-04-09 15:33:31 -0400601 for (pool_nr = 0; pool_nr < iommu->nr_pools; pool_nr++) {
602 pool = &(iommu->pools[pool_nr]);
603 for (i = pool->start; i <= pool->end; i++) {
604 unsigned long ret, io_attrs, ra;
David S. Miller18397942006-02-10 00:08:26 -0800605
Sowmini Varadhanbb620c32015-04-09 15:33:31 -0400606 ret = pci_sun4v_iommu_getmap(devhandle,
607 HV_PCI_TSBID(0, i),
608 &io_attrs, &ra);
609 if (ret == HV_EOK) {
610 if (page_in_phys_avail(ra)) {
611 pci_sun4v_iommu_demap(devhandle,
612 HV_PCI_TSBID(0,
613 i), 1);
614 } else {
615 cnt++;
616 __set_bit(i, iommu->map);
617 }
David S. Millerc2a5a462006-06-22 00:01:56 -0700618 }
David S. Millere7a04532006-02-15 22:25:27 -0800619 }
David S. Miller18397942006-02-10 00:08:26 -0800620 }
David S. Millere7a04532006-02-15 22:25:27 -0800621 return cnt;
David S. Miller18397942006-02-10 00:08:26 -0800622}
623
Tushar Davef0248c12016-10-28 10:12:41 -0700624static int pci_sun4v_atu_alloc_iotsb(struct pci_pbm_info *pbm)
625{
626 struct atu *atu = pbm->iommu->atu;
627 struct atu_iotsb *iotsb;
628 void *table;
629 u64 table_size;
630 u64 iotsb_num;
631 unsigned long order;
632 unsigned long err;
633
634 iotsb = kzalloc(sizeof(*iotsb), GFP_KERNEL);
635 if (!iotsb) {
636 err = -ENOMEM;
637 goto out_err;
638 }
639 atu->iotsb = iotsb;
640
641 /* calculate size of IOTSB */
642 table_size = (atu->size / IO_PAGE_SIZE) * 8;
643 order = get_order(table_size);
644 table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
645 if (!table) {
646 err = -ENOMEM;
647 goto table_failed;
648 }
649 iotsb->table = table;
650 iotsb->ra = __pa(table);
651 iotsb->dvma_size = atu->size;
652 iotsb->dvma_base = atu->base;
653 iotsb->table_size = table_size;
654 iotsb->page_size = IO_PAGE_SIZE;
655
656 /* configure and register IOTSB with HV */
657 err = pci_sun4v_iotsb_conf(pbm->devhandle,
658 iotsb->ra,
659 iotsb->table_size,
660 iotsb->page_size,
661 iotsb->dvma_base,
662 &iotsb_num);
663 if (err) {
664 pr_err(PFX "pci_iotsb_conf failed error: %ld\n", err);
665 goto iotsb_conf_failed;
666 }
667 iotsb->iotsb_num = iotsb_num;
668
Tushar Dave5116ab42016-10-28 10:12:43 -0700669 err = dma_4v_iotsb_bind(pbm->devhandle, iotsb_num, pbm->pci_bus);
670 if (err) {
671 pr_err(PFX "pci_iotsb_bind failed error: %ld\n", err);
672 goto iotsb_conf_failed;
673 }
674
Tushar Davef0248c12016-10-28 10:12:41 -0700675 return 0;
676
677iotsb_conf_failed:
678 free_pages((unsigned long)table, order);
679table_failed:
680 kfree(iotsb);
681out_err:
682 return err;
683}
684
685static int pci_sun4v_atu_init(struct pci_pbm_info *pbm)
686{
687 struct atu *atu = pbm->iommu->atu;
688 unsigned long err;
689 const u64 *ranges;
Tushar Dave31f077d2016-10-28 10:12:42 -0700690 u64 map_size, num_iotte;
691 u64 dma_mask;
Tushar Davef0248c12016-10-28 10:12:41 -0700692 const u32 *page_size;
693 int len;
694
695 ranges = of_get_property(pbm->op->dev.of_node, "iommu-address-ranges",
696 &len);
697 if (!ranges) {
698 pr_err(PFX "No iommu-address-ranges\n");
699 return -EINVAL;
700 }
701
702 page_size = of_get_property(pbm->op->dev.of_node, "iommu-pagesizes",
703 NULL);
704 if (!page_size) {
705 pr_err(PFX "No iommu-pagesizes\n");
706 return -EINVAL;
707 }
708
709 /* There are 4 iommu-address-ranges supported. Each range is pair of
710 * {base, size}. The ranges[0] and ranges[1] are 32bit address space
711 * while ranges[2] and ranges[3] are 64bit space. We want to use 64bit
712 * address ranges to support 64bit addressing. Because 'size' for
713 * address ranges[2] and ranges[3] are same we can select either of
714 * ranges[2] or ranges[3] for mapping. However due to 'size' is too
715 * large for OS to allocate IOTSB we are using fix size 32G
716 * (ATU_64_SPACE_SIZE) which is more than enough for all PCIe devices
717 * to share.
718 */
719 atu->ranges = (struct atu_ranges *)ranges;
720 atu->base = atu->ranges[3].base;
721 atu->size = ATU_64_SPACE_SIZE;
722
723 /* Create IOTSB */
724 err = pci_sun4v_atu_alloc_iotsb(pbm);
725 if (err) {
726 pr_err(PFX "Error creating ATU IOTSB\n");
727 return err;
728 }
729
Tushar Dave31f077d2016-10-28 10:12:42 -0700730 /* Create ATU iommu map.
731 * One bit represents one iotte in IOTSB table.
732 */
733 dma_mask = (roundup_pow_of_two(atu->size) - 1UL);
734 num_iotte = atu->size / IO_PAGE_SIZE;
735 map_size = num_iotte / 8;
736 atu->tbl.table_map_base = atu->base;
737 atu->dma_addr_mask = dma_mask;
738 atu->tbl.map = kzalloc(map_size, GFP_KERNEL);
739 if (!atu->tbl.map)
740 return -ENOMEM;
741
742 iommu_tbl_pool_init(&atu->tbl, num_iotte, IO_PAGE_SHIFT,
743 NULL, false /* no large_pool */,
744 0 /* default npools */,
745 false /* want span boundary checking */);
746
Tushar Davef0248c12016-10-28 10:12:41 -0700747 return 0;
748}
749
Greg Kroah-Hartman7c9503b2012-12-21 14:03:26 -0800750static int pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
David S. Millerbade5622006-02-09 22:05:54 -0800751{
David S. Miller8aef7272008-09-01 20:23:18 -0700752 static const u32 vdma_default[] = { 0x80000000, 0x80000000 };
David S. Miller16ce82d2007-04-26 21:08:21 -0700753 struct iommu *iommu = pbm->iommu;
David S. Millerc6fee082011-02-26 23:40:02 -0800754 unsigned long num_tsb_entries, sz;
David S. Miller8aef7272008-09-01 20:23:18 -0700755 u32 dma_mask, dma_offset;
756 const u32 *vdma;
David S. Miller18397942006-02-10 00:08:26 -0800757
Grant Likely61c7a082010-04-13 16:12:29 -0700758 vdma = of_get_property(pbm->op->dev.of_node, "virtual-dma", NULL);
David S. Miller8aef7272008-09-01 20:23:18 -0700759 if (!vdma)
760 vdma = vdma_default;
David S. Miller18397942006-02-10 00:08:26 -0800761
David S. Miller59db8102007-05-23 18:00:46 -0700762 if ((vdma[0] | vdma[1]) & ~IO_PAGE_MASK) {
David S. Miller3822b502008-08-30 02:50:29 -0700763 printk(KERN_ERR PFX "Strange virtual-dma[%08x:%08x].\n",
764 vdma[0], vdma[1]);
765 return -EINVAL;
Peter Senna Tschudin20b739f2012-09-12 07:03:11 +0000766 }
David S. Miller18397942006-02-10 00:08:26 -0800767
David S. Miller59db8102007-05-23 18:00:46 -0700768 dma_mask = (roundup_pow_of_two(vdma[1]) - 1UL);
769 num_tsb_entries = vdma[1] / IO_PAGE_SIZE;
David S. Miller18397942006-02-10 00:08:26 -0800770
771 dma_offset = vdma[0];
772
773 /* Setup initial software IOMMU state. */
David S. Millerc12f0482015-04-18 12:31:25 -0700774 spin_lock_init(&iommu->lock);
David S. Miller18397942006-02-10 00:08:26 -0800775 iommu->ctx_lowest_free = 1;
Sowmini Varadhanbb620c32015-04-09 15:33:31 -0400776 iommu->tbl.table_map_base = dma_offset;
David S. Miller18397942006-02-10 00:08:26 -0800777 iommu->dma_addr_mask = dma_mask;
778
779 /* Allocate and initialize the free area map. */
David S. Miller59db8102007-05-23 18:00:46 -0700780 sz = (num_tsb_entries + 7) / 8;
David S. Miller18397942006-02-10 00:08:26 -0800781 sz = (sz + 7UL) & ~7UL;
Sowmini Varadhanbb620c32015-04-09 15:33:31 -0400782 iommu->tbl.map = kzalloc(sz, GFP_KERNEL);
783 if (!iommu->tbl.map) {
David S. Miller3822b502008-08-30 02:50:29 -0700784 printk(KERN_ERR PFX "Error, kmalloc(arena.map) failed.\n");
785 return -ENOMEM;
David S. Miller18397942006-02-10 00:08:26 -0800786 }
Sowmini Varadhanbb620c32015-04-09 15:33:31 -0400787 iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT,
788 NULL, false /* no large_pool */,
789 0 /* default npools */,
790 false /* want span boundary checking */);
791 sz = probe_existing_entries(pbm, &iommu->tbl);
David S. Millerc2a5a462006-06-22 00:01:56 -0700792 if (sz)
793 printk("%s: Imported %lu TSB entries from OBP\n",
794 pbm->name, sz);
David S. Miller3822b502008-08-30 02:50:29 -0700795
796 return 0;
David S. Millerbade5622006-02-09 22:05:54 -0800797}
798
David S. Miller35a17eb2007-02-10 17:41:02 -0800799#ifdef CONFIG_PCI_MSI
800struct pci_sun4v_msiq_entry {
801 u64 version_type;
802#define MSIQ_VERSION_MASK 0xffffffff00000000UL
803#define MSIQ_VERSION_SHIFT 32
804#define MSIQ_TYPE_MASK 0x00000000000000ffUL
805#define MSIQ_TYPE_SHIFT 0
806#define MSIQ_TYPE_NONE 0x00
807#define MSIQ_TYPE_MSG 0x01
808#define MSIQ_TYPE_MSI32 0x02
809#define MSIQ_TYPE_MSI64 0x03
810#define MSIQ_TYPE_INTX 0x08
811#define MSIQ_TYPE_NONE2 0xff
812
813 u64 intx_sysino;
814 u64 reserved1;
815 u64 stick;
816 u64 req_id; /* bus/device/func */
817#define MSIQ_REQID_BUS_MASK 0xff00UL
818#define MSIQ_REQID_BUS_SHIFT 8
819#define MSIQ_REQID_DEVICE_MASK 0x00f8UL
820#define MSIQ_REQID_DEVICE_SHIFT 3
821#define MSIQ_REQID_FUNC_MASK 0x0007UL
822#define MSIQ_REQID_FUNC_SHIFT 0
823
824 u64 msi_address;
825
Simon Arlotte5dd42e2007-05-11 13:52:08 -0700826 /* The format of this value is message type dependent.
David S. Miller35a17eb2007-02-10 17:41:02 -0800827 * For MSI bits 15:0 are the data from the MSI packet.
828 * For MSI-X bits 31:0 are the data from the MSI packet.
829 * For MSG, the message code and message routing code where:
830 * bits 39:32 is the bus/device/fn of the msg target-id
831 * bits 18:16 is the message routing code
832 * bits 7:0 is the message code
833 * For INTx the low order 2-bits are:
834 * 00 - INTA
835 * 01 - INTB
836 * 10 - INTC
837 * 11 - INTD
838 */
839 u64 msi_data;
840
841 u64 reserved2;
842};
843
David S. Miller759f89e2007-10-11 03:16:13 -0700844static int pci_sun4v_get_head(struct pci_pbm_info *pbm, unsigned long msiqid,
845 unsigned long *head)
David S. Miller35a17eb2007-02-10 17:41:02 -0800846{
David S. Miller759f89e2007-10-11 03:16:13 -0700847 unsigned long err, limit;
David S. Miller35a17eb2007-02-10 17:41:02 -0800848
David S. Miller759f89e2007-10-11 03:16:13 -0700849 err = pci_sun4v_msiq_gethead(pbm->devhandle, msiqid, head);
David S. Miller35a17eb2007-02-10 17:41:02 -0800850 if (unlikely(err))
David S. Miller759f89e2007-10-11 03:16:13 -0700851 return -ENXIO;
David S. Miller35a17eb2007-02-10 17:41:02 -0800852
David S. Miller759f89e2007-10-11 03:16:13 -0700853 limit = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
854 if (unlikely(*head >= limit))
855 return -EFBIG;
David S. Miller35a17eb2007-02-10 17:41:02 -0800856
857 return 0;
858}
859
David S. Miller759f89e2007-10-11 03:16:13 -0700860static int pci_sun4v_dequeue_msi(struct pci_pbm_info *pbm,
861 unsigned long msiqid, unsigned long *head,
862 unsigned long *msi)
David S. Miller35a17eb2007-02-10 17:41:02 -0800863{
David S. Miller759f89e2007-10-11 03:16:13 -0700864 struct pci_sun4v_msiq_entry *ep;
865 unsigned long err, type;
866
867 /* Note: void pointer arithmetic, 'head' is a byte offset */
868 ep = (pbm->msi_queues + ((msiqid - pbm->msiq_first) *
869 (pbm->msiq_ent_count *
870 sizeof(struct pci_sun4v_msiq_entry))) +
871 *head);
872
873 if ((ep->version_type & MSIQ_TYPE_MASK) == 0)
874 return 0;
875
876 type = (ep->version_type & MSIQ_TYPE_MASK) >> MSIQ_TYPE_SHIFT;
877 if (unlikely(type != MSIQ_TYPE_MSI32 &&
878 type != MSIQ_TYPE_MSI64))
879 return -EINVAL;
880
881 *msi = ep->msi_data;
882
883 err = pci_sun4v_msi_setstate(pbm->devhandle,
884 ep->msi_data /* msi_num */,
885 HV_MSISTATE_IDLE);
886 if (unlikely(err))
887 return -ENXIO;
888
889 /* Clear the entry. */
890 ep->version_type &= ~MSIQ_TYPE_MASK;
891
892 (*head) += sizeof(struct pci_sun4v_msiq_entry);
893 if (*head >=
894 (pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry)))
895 *head = 0;
896
897 return 1;
David S. Miller35a17eb2007-02-10 17:41:02 -0800898}
899
David S. Miller759f89e2007-10-11 03:16:13 -0700900static int pci_sun4v_set_head(struct pci_pbm_info *pbm, unsigned long msiqid,
901 unsigned long head)
902{
903 unsigned long err;
904
905 err = pci_sun4v_msiq_sethead(pbm->devhandle, msiqid, head);
906 if (unlikely(err))
907 return -EINVAL;
908
909 return 0;
910}
911
912static int pci_sun4v_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid,
913 unsigned long msi, int is_msi64)
914{
915 if (pci_sun4v_msi_setmsiq(pbm->devhandle, msi, msiqid,
916 (is_msi64 ?
917 HV_MSITYPE_MSI64 : HV_MSITYPE_MSI32)))
918 return -ENXIO;
919 if (pci_sun4v_msi_setstate(pbm->devhandle, msi, HV_MSISTATE_IDLE))
920 return -ENXIO;
921 if (pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_VALID))
922 return -ENXIO;
923 return 0;
924}
925
926static int pci_sun4v_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi)
927{
928 unsigned long err, msiqid;
929
930 err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi, &msiqid);
931 if (err)
932 return -ENXIO;
933
934 pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_INVALID);
935
936 return 0;
937}
938
939static int pci_sun4v_msiq_alloc(struct pci_pbm_info *pbm)
David S. Miller35a17eb2007-02-10 17:41:02 -0800940{
941 unsigned long q_size, alloc_size, pages, order;
942 int i;
943
944 q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
945 alloc_size = (pbm->msiq_num * q_size);
946 order = get_order(alloc_size);
947 pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order);
948 if (pages == 0UL) {
949 printk(KERN_ERR "MSI: Cannot allocate MSI queues (o=%lu).\n",
950 order);
951 return -ENOMEM;
952 }
953 memset((char *)pages, 0, PAGE_SIZE << order);
954 pbm->msi_queues = (void *) pages;
955
956 for (i = 0; i < pbm->msiq_num; i++) {
957 unsigned long err, base = __pa(pages + (i * q_size));
958 unsigned long ret1, ret2;
959
960 err = pci_sun4v_msiq_conf(pbm->devhandle,
961 pbm->msiq_first + i,
962 base, pbm->msiq_ent_count);
963 if (err) {
964 printk(KERN_ERR "MSI: msiq register fails (err=%lu)\n",
965 err);
966 goto h_error;
967 }
968
969 err = pci_sun4v_msiq_info(pbm->devhandle,
970 pbm->msiq_first + i,
971 &ret1, &ret2);
972 if (err) {
973 printk(KERN_ERR "MSI: Cannot read msiq (err=%lu)\n",
974 err);
975 goto h_error;
976 }
977 if (ret1 != base || ret2 != pbm->msiq_ent_count) {
978 printk(KERN_ERR "MSI: Bogus qconf "
979 "expected[%lx:%x] got[%lx:%lx]\n",
980 base, pbm->msiq_ent_count,
981 ret1, ret2);
982 goto h_error;
983 }
984 }
985
986 return 0;
987
988h_error:
989 free_pages(pages, order);
990 return -EINVAL;
991}
992
David S. Miller759f89e2007-10-11 03:16:13 -0700993static void pci_sun4v_msiq_free(struct pci_pbm_info *pbm)
David S. Miller35a17eb2007-02-10 17:41:02 -0800994{
David S. Miller759f89e2007-10-11 03:16:13 -0700995 unsigned long q_size, alloc_size, pages, order;
David S. Miller35a17eb2007-02-10 17:41:02 -0800996 int i;
997
David S. Miller759f89e2007-10-11 03:16:13 -0700998 for (i = 0; i < pbm->msiq_num; i++) {
999 unsigned long msiqid = pbm->msiq_first + i;
1000
1001 (void) pci_sun4v_msiq_conf(pbm->devhandle, msiqid, 0UL, 0);
David S. Miller35a17eb2007-02-10 17:41:02 -08001002 }
1003
David S. Miller759f89e2007-10-11 03:16:13 -07001004 q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
1005 alloc_size = (pbm->msiq_num * q_size);
1006 order = get_order(alloc_size);
1007
1008 pages = (unsigned long) pbm->msi_queues;
1009
1010 free_pages(pages, order);
1011
1012 pbm->msi_queues = NULL;
David S. Miller35a17eb2007-02-10 17:41:02 -08001013}
1014
David S. Miller759f89e2007-10-11 03:16:13 -07001015static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm,
1016 unsigned long msiqid,
1017 unsigned long devino)
David S. Miller35a17eb2007-02-10 17:41:02 -08001018{
Sam Ravnborg44ed3c02011-01-22 11:32:20 +00001019 unsigned int irq = sun4v_build_irq(pbm->devhandle, devino);
David S. Miller35a17eb2007-02-10 17:41:02 -08001020
Sam Ravnborg44ed3c02011-01-22 11:32:20 +00001021 if (!irq)
David S. Miller759f89e2007-10-11 03:16:13 -07001022 return -ENOMEM;
David S. Miller35a17eb2007-02-10 17:41:02 -08001023
David S. Miller35a17eb2007-02-10 17:41:02 -08001024 if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID))
David S. Miller759f89e2007-10-11 03:16:13 -07001025 return -EINVAL;
David S. Miller7cc85832011-12-22 13:23:59 -08001026 if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
1027 return -EINVAL;
David S. Miller35a17eb2007-02-10 17:41:02 -08001028
Sam Ravnborg44ed3c02011-01-22 11:32:20 +00001029 return irq;
David S. Miller35a17eb2007-02-10 17:41:02 -08001030}
1031
David S. Miller759f89e2007-10-11 03:16:13 -07001032static const struct sparc64_msiq_ops pci_sun4v_msiq_ops = {
1033 .get_head = pci_sun4v_get_head,
1034 .dequeue_msi = pci_sun4v_dequeue_msi,
1035 .set_head = pci_sun4v_set_head,
1036 .msi_setup = pci_sun4v_msi_setup,
1037 .msi_teardown = pci_sun4v_msi_teardown,
1038 .msiq_alloc = pci_sun4v_msiq_alloc,
1039 .msiq_free = pci_sun4v_msiq_free,
1040 .msiq_build_irq = pci_sun4v_msiq_build_irq,
1041};
David S. Millere9870c42007-05-07 23:28:50 -07001042
1043static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
1044{
David S. Miller759f89e2007-10-11 03:16:13 -07001045 sparc64_pbm_msi_init(pbm, &pci_sun4v_msiq_ops);
David S. Millere9870c42007-05-07 23:28:50 -07001046}
David S. Miller35a17eb2007-02-10 17:41:02 -08001047#else /* CONFIG_PCI_MSI */
1048static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
1049{
1050}
1051#endif /* !(CONFIG_PCI_MSI) */
1052
Greg Kroah-Hartman7c9503b2012-12-21 14:03:26 -08001053static int pci_sun4v_pbm_init(struct pci_pbm_info *pbm,
1054 struct platform_device *op, u32 devhandle)
David S. Millerbade5622006-02-09 22:05:54 -08001055{
Grant Likely61c7a082010-04-13 16:12:29 -07001056 struct device_node *dp = op->dev.of_node;
David S. Miller3822b502008-08-30 02:50:29 -07001057 int err;
David S. Millerbade5622006-02-09 22:05:54 -08001058
David S. Millerc1b1a5f12008-03-19 04:52:48 -07001059 pbm->numa_node = of_node_to_nid(dp);
1060
David S. Millerca3dd882007-05-09 02:35:27 -07001061 pbm->pci_ops = &sun4v_pci_ops;
1062 pbm->config_space_reg_bits = 12;
David S. Miller34768bc2007-05-07 23:06:27 -07001063
David S. Miller6c108f12007-05-07 23:49:01 -07001064 pbm->index = pci_num_pbms++;
1065
David S. Miller22fecba2008-09-10 00:19:28 -07001066 pbm->op = op;
David S. Millerbade5622006-02-09 22:05:54 -08001067
David S. Miller38337892006-02-12 22:06:53 -08001068 pbm->devhandle = devhandle;
David S. Millerbade5622006-02-09 22:05:54 -08001069
David S. Millere87dc352006-06-21 18:18:47 -07001070 pbm->name = dp->full_name;
David S. Millerbade5622006-02-09 22:05:54 -08001071
David S. Millere87dc352006-06-21 18:18:47 -07001072 printk("%s: SUN4V PCI Bus Module\n", pbm->name);
David S. Millerc1b1a5f12008-03-19 04:52:48 -07001073 printk("%s: On NUMA node %d\n", pbm->name, pbm->numa_node);
David S. Millerbade5622006-02-09 22:05:54 -08001074
David S. Miller9fd8b642007-03-08 21:55:49 -08001075 pci_determine_mem_io_space(pbm);
David S. Millerbade5622006-02-09 22:05:54 -08001076
David S. Millercfa06522007-05-07 21:51:41 -07001077 pci_get_pbm_props(pbm);
David S. Miller3822b502008-08-30 02:50:29 -07001078
1079 err = pci_sun4v_iommu_init(pbm);
1080 if (err)
1081 return err;
1082
David S. Miller35a17eb2007-02-10 17:41:02 -08001083 pci_sun4v_msi_init(pbm);
David S. Miller3822b502008-08-30 02:50:29 -07001084
David S. Millere822358a2008-09-01 18:32:22 -07001085 pci_sun4v_scan_bus(pbm, &op->dev);
David S. Miller3822b502008-08-30 02:50:29 -07001086
Tushar Davef0248c12016-10-28 10:12:41 -07001087 /* if atu_init fails its not complete failure.
1088 * we can still continue using legacy iommu.
1089 */
1090 if (pbm->iommu->atu) {
1091 err = pci_sun4v_atu_init(pbm);
1092 if (err) {
1093 kfree(pbm->iommu->atu);
1094 pbm->iommu->atu = NULL;
1095 pr_err(PFX "ATU init failed, err=%d\n", err);
1096 }
1097 }
1098
David S. Millerd3ae4b52008-09-09 23:54:02 -07001099 pbm->next = pci_pbm_root;
1100 pci_pbm_root = pbm;
1101
David S. Miller3822b502008-08-30 02:50:29 -07001102 return 0;
David S. Millerbade5622006-02-09 22:05:54 -08001103}
1104
Greg Kroah-Hartman7c9503b2012-12-21 14:03:26 -08001105static int pci_sun4v_probe(struct platform_device *op)
David S. Miller8f6a93a2006-02-09 21:32:07 -08001106{
David S. Miller3822b502008-08-30 02:50:29 -07001107 const struct linux_prom64_registers *regs;
David S. Millere01c0d62007-05-25 01:04:15 -07001108 static int hvapi_negotiated = 0;
David S. Miller34768bc2007-05-07 23:06:27 -07001109 struct pci_pbm_info *pbm;
David S. Miller3822b502008-08-30 02:50:29 -07001110 struct device_node *dp;
David S. Miller16ce82d2007-04-26 21:08:21 -07001111 struct iommu *iommu;
Tushar Davef0248c12016-10-28 10:12:41 -07001112 struct atu *atu;
David S. Miller7c8f4862006-02-13 21:50:27 -08001113 u32 devhandle;
chris hyser89143912016-09-28 12:19:45 -07001114 int i, err = -ENODEV;
Tushar Davef0248c12016-10-28 10:12:41 -07001115 static bool hv_atu = true;
David S. Miller38337892006-02-12 22:06:53 -08001116
Grant Likely61c7a082010-04-13 16:12:29 -07001117 dp = op->dev.of_node;
David S. Miller3822b502008-08-30 02:50:29 -07001118
David S. Millere01c0d62007-05-25 01:04:15 -07001119 if (!hvapi_negotiated++) {
chris hyser89143912016-09-28 12:19:45 -07001120 for (i = 0; i < ARRAY_SIZE(vpci_versions); i++) {
1121 vpci_major = vpci_versions[i].major;
1122 vpci_minor = vpci_versions[i].minor;
1123
1124 err = sun4v_hvapi_register(HV_GRP_PCI, vpci_major,
1125 &vpci_minor);
1126 if (!err)
1127 break;
1128 }
David S. Millere01c0d62007-05-25 01:04:15 -07001129
1130 if (err) {
chris hyser89143912016-09-28 12:19:45 -07001131 pr_err(PFX "Could not register hvapi, err=%d\n", err);
David S. Miller3822b502008-08-30 02:50:29 -07001132 return err;
David S. Millere01c0d62007-05-25 01:04:15 -07001133 }
chris hyser89143912016-09-28 12:19:45 -07001134 pr_info(PFX "Registered hvapi major[%lu] minor[%lu]\n",
1135 vpci_major, vpci_minor);
David S. Millerad7ad572007-07-27 22:39:14 -07001136
Tushar Davef0248c12016-10-28 10:12:41 -07001137 err = sun4v_hvapi_register(HV_GRP_ATU, vatu_major, &vatu_minor);
1138 if (err) {
1139 /* don't return an error if we fail to register the
1140 * ATU group, but ATU hcalls won't be available.
1141 */
1142 hv_atu = false;
1143 pr_err(PFX "Could not register hvapi ATU err=%d\n",
1144 err);
1145 } else {
1146 pr_info(PFX "Registered hvapi ATU major[%lu] minor[%lu]\n",
1147 vatu_major, vatu_minor);
1148 }
1149
David S. Millerad7ad572007-07-27 22:39:14 -07001150 dma_ops = &sun4v_dma_ops;
David S. Millere01c0d62007-05-25 01:04:15 -07001151 }
1152
David S. Miller3822b502008-08-30 02:50:29 -07001153 regs = of_get_property(dp, "reg", NULL);
David S. Millerd7472c32008-08-31 01:33:52 -07001154 err = -ENODEV;
David S. Miller3822b502008-08-30 02:50:29 -07001155 if (!regs) {
1156 printk(KERN_ERR PFX "Could not find config registers\n");
David S. Millerd7472c32008-08-31 01:33:52 -07001157 goto out_err;
Cyrill Gorcunov75c6d142007-11-20 17:32:19 -08001158 }
David S. Millere87dc352006-06-21 18:18:47 -07001159 devhandle = (regs->phys_addr >> 32UL) & 0x0fffffff;
David S. Miller38337892006-02-12 22:06:53 -08001160
David S. Millerd7472c32008-08-31 01:33:52 -07001161 err = -ENOMEM;
David S. Millerd3ae4b52008-09-09 23:54:02 -07001162 if (!iommu_batch_initialized) {
1163 for_each_possible_cpu(i) {
1164 unsigned long page = get_zeroed_page(GFP_KERNEL);
David S. Miller7c8f4862006-02-13 21:50:27 -08001165
David S. Millerd3ae4b52008-09-09 23:54:02 -07001166 if (!page)
1167 goto out_err;
David S. Miller7c8f4862006-02-13 21:50:27 -08001168
David S. Millerd3ae4b52008-09-09 23:54:02 -07001169 per_cpu(iommu_batch, i).pglist = (u64 *) page;
1170 }
1171 iommu_batch_initialized = 1;
David S. Millerbade5622006-02-09 22:05:54 -08001172 }
David S. Miller7c8f4862006-02-13 21:50:27 -08001173
David S. Millerd3ae4b52008-09-09 23:54:02 -07001174 pbm = kzalloc(sizeof(*pbm), GFP_KERNEL);
1175 if (!pbm) {
1176 printk(KERN_ERR PFX "Could not allocate pci_pbm_info\n");
David S. Millerd7472c32008-08-31 01:33:52 -07001177 goto out_err;
David S. Miller3822b502008-08-30 02:50:29 -07001178 }
David S. Miller7c8f4862006-02-13 21:50:27 -08001179
David S. Millerd3ae4b52008-09-09 23:54:02 -07001180 iommu = kzalloc(sizeof(struct iommu), GFP_KERNEL);
David S. Miller3822b502008-08-30 02:50:29 -07001181 if (!iommu) {
David S. Millerd3ae4b52008-09-09 23:54:02 -07001182 printk(KERN_ERR PFX "Could not allocate pbm iommu\n");
David S. Millerd7472c32008-08-31 01:33:52 -07001183 goto out_free_controller;
David S. Miller3822b502008-08-30 02:50:29 -07001184 }
David S. Miller7c8f4862006-02-13 21:50:27 -08001185
David S. Millerd3ae4b52008-09-09 23:54:02 -07001186 pbm->iommu = iommu;
Tushar Davef0248c12016-10-28 10:12:41 -07001187 iommu->atu = NULL;
1188 if (hv_atu) {
1189 atu = kzalloc(sizeof(*atu), GFP_KERNEL);
1190 if (!atu)
1191 pr_err(PFX "Could not allocate atu\n");
1192 else
1193 iommu->atu = atu;
1194 }
David S. Millerbade5622006-02-09 22:05:54 -08001195
David S. Millerd3ae4b52008-09-09 23:54:02 -07001196 err = pci_sun4v_pbm_init(pbm, op, devhandle);
1197 if (err)
1198 goto out_free_iommu;
David S. Miller7c8f4862006-02-13 21:50:27 -08001199
David S. Millerd3ae4b52008-09-09 23:54:02 -07001200 dev_set_drvdata(&op->dev, pbm);
David S. Millerbade5622006-02-09 22:05:54 -08001201
David S. Millerd3ae4b52008-09-09 23:54:02 -07001202 return 0;
David S. Miller7c8f4862006-02-13 21:50:27 -08001203
David S. Millerd3ae4b52008-09-09 23:54:02 -07001204out_free_iommu:
Tushar Davef0248c12016-10-28 10:12:41 -07001205 kfree(iommu->atu);
David S. Millerd3ae4b52008-09-09 23:54:02 -07001206 kfree(pbm->iommu);
David S. Millerd7472c32008-08-31 01:33:52 -07001207
1208out_free_controller:
David S. Millerd3ae4b52008-09-09 23:54:02 -07001209 kfree(pbm);
David S. Millerd7472c32008-08-31 01:33:52 -07001210
1211out_err:
1212 return err;
David S. Miller8f6a93a2006-02-09 21:32:07 -08001213}
David S. Miller3822b502008-08-30 02:50:29 -07001214
David S. Miller3628aa02011-03-30 17:37:56 -07001215static const struct of_device_id pci_sun4v_match[] = {
David S. Miller3822b502008-08-30 02:50:29 -07001216 {
1217 .name = "pci",
1218 .compatible = "SUNW,sun4v-pci",
1219 },
1220 {},
1221};
1222
Grant Likely4ebb24f2011-02-22 20:01:33 -07001223static struct platform_driver pci_sun4v_driver = {
Grant Likely40182942010-04-13 16:13:02 -07001224 .driver = {
1225 .name = DRIVER_NAME,
Grant Likely40182942010-04-13 16:13:02 -07001226 .of_match_table = pci_sun4v_match,
1227 },
David S. Miller3822b502008-08-30 02:50:29 -07001228 .probe = pci_sun4v_probe,
1229};
1230
1231static int __init pci_sun4v_init(void)
1232{
Grant Likely4ebb24f2011-02-22 20:01:33 -07001233 return platform_driver_register(&pci_sun4v_driver);
David S. Miller3822b502008-08-30 02:50:29 -07001234}
1235
1236subsys_initcall(pci_sun4v_init);