blob: 999a5509e57eb84844480341802b11307015c34f [file] [log] [blame]
Bjorn Helgaas8cfab3c2018-01-26 12:50:27 -06001// SPDX-License-Identifier: GPL-2.0
Keith Busch185a3832016-01-12 13:18:10 -07002/*
3 * Volume Management Device driver
4 * Copyright (c) 2015, Intel Corporation.
Keith Busch185a3832016-01-12 13:18:10 -07005 */
6
7#include <linux/device.h>
8#include <linux/interrupt.h>
9#include <linux/irq.h>
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/msi.h>
13#include <linux/pci.h>
Jon Derrick3906b912016-11-11 16:08:45 -070014#include <linux/srcu.h>
Keith Busch185a3832016-01-12 13:18:10 -070015#include <linux/rculist.h>
16#include <linux/rcupdate.h>
17
18#include <asm/irqdomain.h>
19#include <asm/device.h>
20#include <asm/msi.h>
21#include <asm/msidef.h>
22
23#define VMD_CFGBAR 0
24#define VMD_MEMBAR1 2
25#define VMD_MEMBAR2 4
26
Jon Derrick2a5a9c92018-05-18 13:28:00 -060027#define PCI_REG_VMCAP 0x40
28#define BUS_RESTRICT_CAP(vmcap) (vmcap & 0x1)
29#define PCI_REG_VMCONFIG 0x44
30#define BUS_RESTRICT_CFG(vmcfg) ((vmcfg >> 8) & 0x3)
Jon Derrick67889582018-05-18 13:27:59 -060031#define PCI_REG_VMLOCK 0x70
32#define MB2_SHADOW_EN(vmlock) (vmlock & 0x2)
33
34enum vmd_features {
35 /*
36 * Device may contain registers which hint the physical location of the
37 * membars, in order to allow proper address translation during
38 * resource assignment to enable guest virtualization
39 */
40 VMD_FEAT_HAS_MEMBAR_SHADOW = (1 << 0),
Jon Derrick2a5a9c92018-05-18 13:28:00 -060041
42 /*
43 * Device may provide root port configuration information which limits
44 * bus numbering
45 */
46 VMD_FEAT_HAS_BUS_RESTRICTIONS = (1 << 1),
Jon Derrick67889582018-05-18 13:27:59 -060047};
48
Keith Busch185a3832016-01-12 13:18:10 -070049/*
50 * Lock for manipulating VMD IRQ lists.
51 */
52static DEFINE_RAW_SPINLOCK(list_lock);
53
54/**
55 * struct vmd_irq - private data to map driver IRQ to the VMD shared vector
56 * @node: list item for parent traversal.
Keith Busch185a3832016-01-12 13:18:10 -070057 * @irq: back pointer to parent.
Keith Busch21c80c92016-08-23 16:36:42 -050058 * @enabled: true if driver enabled IRQ
Keith Busch185a3832016-01-12 13:18:10 -070059 * @virq: the virtual IRQ value provided to the requesting driver.
60 *
61 * Every MSI/MSI-X IRQ requested for a device in a VMD domain will be mapped to
62 * a VMD IRQ using this structure.
63 */
64struct vmd_irq {
65 struct list_head node;
Keith Busch185a3832016-01-12 13:18:10 -070066 struct vmd_irq_list *irq;
Keith Busch21c80c92016-08-23 16:36:42 -050067 bool enabled;
Keith Busch185a3832016-01-12 13:18:10 -070068 unsigned int virq;
69};
70
71/**
72 * struct vmd_irq_list - list of driver requested IRQs mapping to a VMD vector
73 * @irq_list: the list of irq's the VMD one demuxes to.
Jon Derrick3906b912016-11-11 16:08:45 -070074 * @srcu: SRCU struct for local synchronization.
Keith Busch185a3832016-01-12 13:18:10 -070075 * @count: number of child IRQs assigned to this vector; used to track
76 * sharing.
77 */
78struct vmd_irq_list {
79 struct list_head irq_list;
Jon Derrick3906b912016-11-11 16:08:45 -070080 struct srcu_struct srcu;
Keith Busch185a3832016-01-12 13:18:10 -070081 unsigned int count;
82};
83
84struct vmd_dev {
85 struct pci_dev *dev;
86
87 spinlock_t cfg_lock;
88 char __iomem *cfgbar;
89
90 int msix_count;
Keith Busch185a3832016-01-12 13:18:10 -070091 struct vmd_irq_list *irqs;
92
93 struct pci_sysdata sysdata;
94 struct resource resources[3];
95 struct irq_domain *irq_domain;
96 struct pci_bus *bus;
97
Keith Busch185a3832016-01-12 13:18:10 -070098 struct dma_map_ops dma_ops;
99 struct dma_domain dma_domain;
Keith Busch185a3832016-01-12 13:18:10 -0700100};
101
102static inline struct vmd_dev *vmd_from_bus(struct pci_bus *bus)
103{
104 return container_of(bus->sysdata, struct vmd_dev, sysdata);
105}
106
Jon Derrickb3182222016-09-02 11:53:05 -0600107static inline unsigned int index_from_irqs(struct vmd_dev *vmd,
108 struct vmd_irq_list *irqs)
109{
110 return irqs - vmd->irqs;
111}
112
Keith Busch185a3832016-01-12 13:18:10 -0700113/*
114 * Drivers managing a device in a VMD domain allocate their own IRQs as before,
115 * but the MSI entry for the hardware it's driving will be programmed with a
116 * destination ID for the VMD MSI-X table. The VMD muxes interrupts in its
117 * domain into one of its own, and the VMD driver de-muxes these for the
118 * handlers sharing that VMD IRQ. The vmd irq_domain provides the operations
119 * and irq_chip to set this up.
120 */
121static void vmd_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
122{
123 struct vmd_irq *vmdirq = data->chip_data;
124 struct vmd_irq_list *irq = vmdirq->irq;
Jon Derrickb3182222016-09-02 11:53:05 -0600125 struct vmd_dev *vmd = irq_data_get_irq_handler_data(data);
Keith Busch185a3832016-01-12 13:18:10 -0700126
127 msg->address_hi = MSI_ADDR_BASE_HI;
Jon Derrickb3182222016-09-02 11:53:05 -0600128 msg->address_lo = MSI_ADDR_BASE_LO |
129 MSI_ADDR_DEST_ID(index_from_irqs(vmd, irq));
Keith Busch185a3832016-01-12 13:18:10 -0700130 msg->data = 0;
131}
132
133/*
134 * We rely on MSI_FLAG_USE_DEF_CHIP_OPS to set the IRQ mask/unmask ops.
135 */
136static void vmd_irq_enable(struct irq_data *data)
137{
138 struct vmd_irq *vmdirq = data->chip_data;
Jon Derrick3f57ff42016-06-20 09:39:51 -0600139 unsigned long flags;
Keith Busch185a3832016-01-12 13:18:10 -0700140
Jon Derrick3f57ff42016-06-20 09:39:51 -0600141 raw_spin_lock_irqsave(&list_lock, flags);
Keith Busch21c80c92016-08-23 16:36:42 -0500142 WARN_ON(vmdirq->enabled);
Keith Busch185a3832016-01-12 13:18:10 -0700143 list_add_tail_rcu(&vmdirq->node, &vmdirq->irq->irq_list);
Keith Busch21c80c92016-08-23 16:36:42 -0500144 vmdirq->enabled = true;
Jon Derrick3f57ff42016-06-20 09:39:51 -0600145 raw_spin_unlock_irqrestore(&list_lock, flags);
Keith Busch185a3832016-01-12 13:18:10 -0700146
147 data->chip->irq_unmask(data);
148}
149
150static void vmd_irq_disable(struct irq_data *data)
151{
152 struct vmd_irq *vmdirq = data->chip_data;
Jon Derrick3f57ff42016-06-20 09:39:51 -0600153 unsigned long flags;
Keith Busch185a3832016-01-12 13:18:10 -0700154
155 data->chip->irq_mask(data);
156
Jon Derrick3f57ff42016-06-20 09:39:51 -0600157 raw_spin_lock_irqsave(&list_lock, flags);
Keith Busch21c80c92016-08-23 16:36:42 -0500158 if (vmdirq->enabled) {
159 list_del_rcu(&vmdirq->node);
160 vmdirq->enabled = false;
161 }
Jon Derrick3f57ff42016-06-20 09:39:51 -0600162 raw_spin_unlock_irqrestore(&list_lock, flags);
Keith Busch185a3832016-01-12 13:18:10 -0700163}
164
165/*
166 * XXX: Stubbed until we develop acceptable way to not create conflicts with
167 * other devices sharing the same vector.
168 */
169static int vmd_irq_set_affinity(struct irq_data *data,
170 const struct cpumask *dest, bool force)
171{
172 return -EINVAL;
173}
174
175static struct irq_chip vmd_msi_controller = {
176 .name = "VMD-MSI",
177 .irq_enable = vmd_irq_enable,
178 .irq_disable = vmd_irq_disable,
179 .irq_compose_msi_msg = vmd_compose_msi_msg,
180 .irq_set_affinity = vmd_irq_set_affinity,
181};
182
183static irq_hw_number_t vmd_get_hwirq(struct msi_domain_info *info,
184 msi_alloc_info_t *arg)
185{
186 return 0;
187}
188
189/*
190 * XXX: We can be even smarter selecting the best IRQ once we solve the
191 * affinity problem.
192 */
Keith Busch9c205302016-06-20 09:39:53 -0600193static struct vmd_irq_list *vmd_next_irq(struct vmd_dev *vmd, struct msi_desc *desc)
Keith Busch185a3832016-01-12 13:18:10 -0700194{
Keith Busch9c205302016-06-20 09:39:53 -0600195 int i, best = 1;
Jon Derrick3f57ff42016-06-20 09:39:51 -0600196 unsigned long flags;
Keith Busch185a3832016-01-12 13:18:10 -0700197
Keith Buscha7f58b92018-05-08 10:00:22 -0600198 if (vmd->msix_count == 1)
Keith Busch9c205302016-06-20 09:39:53 -0600199 return &vmd->irqs[0];
200
Keith Buscha7f58b92018-05-08 10:00:22 -0600201 /*
202 * White list for fast-interrupt handlers. All others will share the
203 * "slow" interrupt vector.
204 */
205 switch (msi_desc_to_pci_dev(desc)->class) {
206 case PCI_CLASS_STORAGE_EXPRESS:
207 break;
208 default:
209 return &vmd->irqs[0];
210 }
211
Jon Derrick3f57ff42016-06-20 09:39:51 -0600212 raw_spin_lock_irqsave(&list_lock, flags);
Keith Busch185a3832016-01-12 13:18:10 -0700213 for (i = 1; i < vmd->msix_count; i++)
214 if (vmd->irqs[i].count < vmd->irqs[best].count)
215 best = i;
216 vmd->irqs[best].count++;
Jon Derrick3f57ff42016-06-20 09:39:51 -0600217 raw_spin_unlock_irqrestore(&list_lock, flags);
Keith Busch185a3832016-01-12 13:18:10 -0700218
219 return &vmd->irqs[best];
220}
221
222static int vmd_msi_init(struct irq_domain *domain, struct msi_domain_info *info,
223 unsigned int virq, irq_hw_number_t hwirq,
224 msi_alloc_info_t *arg)
225{
Keith Busch9c205302016-06-20 09:39:53 -0600226 struct msi_desc *desc = arg->desc;
227 struct vmd_dev *vmd = vmd_from_bus(msi_desc_to_pci_dev(desc)->bus);
Keith Busch185a3832016-01-12 13:18:10 -0700228 struct vmd_irq *vmdirq = kzalloc(sizeof(*vmdirq), GFP_KERNEL);
Jon Derrickb3182222016-09-02 11:53:05 -0600229 unsigned int index, vector;
Keith Busch185a3832016-01-12 13:18:10 -0700230
231 if (!vmdirq)
232 return -ENOMEM;
233
234 INIT_LIST_HEAD(&vmdirq->node);
Keith Busch9c205302016-06-20 09:39:53 -0600235 vmdirq->irq = vmd_next_irq(vmd, desc);
Keith Busch185a3832016-01-12 13:18:10 -0700236 vmdirq->virq = virq;
Jon Derrickb3182222016-09-02 11:53:05 -0600237 index = index_from_irqs(vmd, vmdirq->irq);
238 vector = pci_irq_vector(vmd->dev, index);
Keith Busch185a3832016-01-12 13:18:10 -0700239
Jon Derrickb3182222016-09-02 11:53:05 -0600240 irq_domain_set_info(domain, virq, vector, info->chip, vmdirq,
Jon Derrick53db86a2016-09-02 11:53:04 -0600241 handle_untracked_irq, vmd, NULL);
Keith Busch185a3832016-01-12 13:18:10 -0700242 return 0;
243}
244
245static void vmd_msi_free(struct irq_domain *domain,
246 struct msi_domain_info *info, unsigned int virq)
247{
248 struct vmd_irq *vmdirq = irq_get_chip_data(virq);
Jon Derrick3f57ff42016-06-20 09:39:51 -0600249 unsigned long flags;
Keith Busch185a3832016-01-12 13:18:10 -0700250
Jon Derrick3906b912016-11-11 16:08:45 -0700251 synchronize_srcu(&vmdirq->irq->srcu);
Keith Buschee6ee492016-08-04 16:09:09 -0600252
Keith Busch185a3832016-01-12 13:18:10 -0700253 /* XXX: Potential optimization to rebalance */
Jon Derrick3f57ff42016-06-20 09:39:51 -0600254 raw_spin_lock_irqsave(&list_lock, flags);
Keith Busch185a3832016-01-12 13:18:10 -0700255 vmdirq->irq->count--;
Jon Derrick3f57ff42016-06-20 09:39:51 -0600256 raw_spin_unlock_irqrestore(&list_lock, flags);
Keith Busch185a3832016-01-12 13:18:10 -0700257
Jon Derrick3906b912016-11-11 16:08:45 -0700258 kfree(vmdirq);
Keith Busch185a3832016-01-12 13:18:10 -0700259}
260
261static int vmd_msi_prepare(struct irq_domain *domain, struct device *dev,
262 int nvec, msi_alloc_info_t *arg)
263{
264 struct pci_dev *pdev = to_pci_dev(dev);
265 struct vmd_dev *vmd = vmd_from_bus(pdev->bus);
266
267 if (nvec > vmd->msix_count)
268 return vmd->msix_count;
269
270 memset(arg, 0, sizeof(*arg));
271 return 0;
272}
273
274static void vmd_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc)
275{
276 arg->desc = desc;
277}
278
279static struct msi_domain_ops vmd_msi_domain_ops = {
280 .get_hwirq = vmd_get_hwirq,
281 .msi_init = vmd_msi_init,
282 .msi_free = vmd_msi_free,
283 .msi_prepare = vmd_msi_prepare,
284 .set_desc = vmd_set_desc,
285};
286
287static struct msi_domain_info vmd_msi_domain_info = {
288 .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
289 MSI_FLAG_PCI_MSIX,
290 .ops = &vmd_msi_domain_ops,
291 .chip = &vmd_msi_controller,
292};
293
Keith Busch185a3832016-01-12 13:18:10 -0700294/*
295 * VMD replaces the requester ID with its own. DMA mappings for devices in a
296 * VMD domain need to be mapped for the VMD, not the device requiring
297 * the mapping.
298 */
299static struct device *to_vmd_dev(struct device *dev)
300{
301 struct pci_dev *pdev = to_pci_dev(dev);
302 struct vmd_dev *vmd = vmd_from_bus(pdev->bus);
303
304 return &vmd->dev->dev;
305}
306
Keith Busch185a3832016-01-12 13:18:10 -0700307static void *vmd_alloc(struct device *dev, size_t size, dma_addr_t *addr,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700308 gfp_t flag, unsigned long attrs)
Keith Busch185a3832016-01-12 13:18:10 -0700309{
Christoph Hellwig190d4e52018-12-06 13:37:00 -0800310 return dma_alloc_attrs(to_vmd_dev(dev), size, addr, flag, attrs);
Keith Busch185a3832016-01-12 13:18:10 -0700311}
312
313static void vmd_free(struct device *dev, size_t size, void *vaddr,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700314 dma_addr_t addr, unsigned long attrs)
Keith Busch185a3832016-01-12 13:18:10 -0700315{
Christoph Hellwig190d4e52018-12-06 13:37:00 -0800316 return dma_free_attrs(to_vmd_dev(dev), size, vaddr, addr, attrs);
Keith Busch185a3832016-01-12 13:18:10 -0700317}
318
319static int vmd_mmap(struct device *dev, struct vm_area_struct *vma,
320 void *cpu_addr, dma_addr_t addr, size_t size,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700321 unsigned long attrs)
Keith Busch185a3832016-01-12 13:18:10 -0700322{
Christoph Hellwig190d4e52018-12-06 13:37:00 -0800323 return dma_mmap_attrs(to_vmd_dev(dev), vma, cpu_addr, addr, size,
324 attrs);
Keith Busch185a3832016-01-12 13:18:10 -0700325}
326
327static int vmd_get_sgtable(struct device *dev, struct sg_table *sgt,
328 void *cpu_addr, dma_addr_t addr, size_t size,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700329 unsigned long attrs)
Keith Busch185a3832016-01-12 13:18:10 -0700330{
Christoph Hellwig190d4e52018-12-06 13:37:00 -0800331 return dma_get_sgtable_attrs(to_vmd_dev(dev), sgt, cpu_addr, addr, size,
332 attrs);
Keith Busch185a3832016-01-12 13:18:10 -0700333}
334
335static dma_addr_t vmd_map_page(struct device *dev, struct page *page,
336 unsigned long offset, size_t size,
337 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700338 unsigned long attrs)
Keith Busch185a3832016-01-12 13:18:10 -0700339{
Christoph Hellwig190d4e52018-12-06 13:37:00 -0800340 return dma_map_page_attrs(to_vmd_dev(dev), page, offset, size, dir,
341 attrs);
Keith Busch185a3832016-01-12 13:18:10 -0700342}
343
344static void vmd_unmap_page(struct device *dev, dma_addr_t addr, size_t size,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700345 enum dma_data_direction dir, unsigned long attrs)
Keith Busch185a3832016-01-12 13:18:10 -0700346{
Christoph Hellwig190d4e52018-12-06 13:37:00 -0800347 dma_unmap_page_attrs(to_vmd_dev(dev), addr, size, dir, attrs);
Keith Busch185a3832016-01-12 13:18:10 -0700348}
349
350static int vmd_map_sg(struct device *dev, struct scatterlist *sg, int nents,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700351 enum dma_data_direction dir, unsigned long attrs)
Keith Busch185a3832016-01-12 13:18:10 -0700352{
Christoph Hellwig190d4e52018-12-06 13:37:00 -0800353 return dma_map_sg_attrs(to_vmd_dev(dev), sg, nents, dir, attrs);
Keith Busch185a3832016-01-12 13:18:10 -0700354}
355
356static void vmd_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700357 enum dma_data_direction dir, unsigned long attrs)
Keith Busch185a3832016-01-12 13:18:10 -0700358{
Christoph Hellwig190d4e52018-12-06 13:37:00 -0800359 dma_unmap_sg_attrs(to_vmd_dev(dev), sg, nents, dir, attrs);
Keith Busch185a3832016-01-12 13:18:10 -0700360}
361
362static void vmd_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
363 size_t size, enum dma_data_direction dir)
364{
Christoph Hellwig190d4e52018-12-06 13:37:00 -0800365 dma_sync_single_for_cpu(to_vmd_dev(dev), addr, size, dir);
Keith Busch185a3832016-01-12 13:18:10 -0700366}
367
368static void vmd_sync_single_for_device(struct device *dev, dma_addr_t addr,
369 size_t size, enum dma_data_direction dir)
370{
Christoph Hellwig190d4e52018-12-06 13:37:00 -0800371 dma_sync_single_for_device(to_vmd_dev(dev), addr, size, dir);
Keith Busch185a3832016-01-12 13:18:10 -0700372}
373
374static void vmd_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
375 int nents, enum dma_data_direction dir)
376{
Christoph Hellwig190d4e52018-12-06 13:37:00 -0800377 dma_sync_sg_for_cpu(to_vmd_dev(dev), sg, nents, dir);
Keith Busch185a3832016-01-12 13:18:10 -0700378}
379
380static void vmd_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
381 int nents, enum dma_data_direction dir)
382{
Christoph Hellwig190d4e52018-12-06 13:37:00 -0800383 dma_sync_sg_for_device(to_vmd_dev(dev), sg, nents, dir);
Keith Busch185a3832016-01-12 13:18:10 -0700384}
385
Keith Busch185a3832016-01-12 13:18:10 -0700386static int vmd_dma_supported(struct device *dev, u64 mask)
387{
Christoph Hellwig190d4e52018-12-06 13:37:00 -0800388 return dma_supported(to_vmd_dev(dev), mask);
Keith Busch185a3832016-01-12 13:18:10 -0700389}
390
Keith Busch185a3832016-01-12 13:18:10 -0700391static u64 vmd_get_required_mask(struct device *dev)
392{
Christoph Hellwig190d4e52018-12-06 13:37:00 -0800393 return dma_get_required_mask(to_vmd_dev(dev));
Keith Busch185a3832016-01-12 13:18:10 -0700394}
Keith Busch185a3832016-01-12 13:18:10 -0700395
396static void vmd_teardown_dma_ops(struct vmd_dev *vmd)
397{
398 struct dma_domain *domain = &vmd->dma_domain;
399
Keith Buschca8a8fa2016-05-17 11:13:24 -0600400 if (get_dma_ops(&vmd->dev->dev))
Keith Busch185a3832016-01-12 13:18:10 -0700401 del_dma_domain(domain);
402}
403
404#define ASSIGN_VMD_DMA_OPS(source, dest, fn) \
405 do { \
406 if (source->fn) \
407 dest->fn = vmd_##fn; \
408 } while (0)
409
410static void vmd_setup_dma_ops(struct vmd_dev *vmd)
411{
Keith Buschca8a8fa2016-05-17 11:13:24 -0600412 const struct dma_map_ops *source = get_dma_ops(&vmd->dev->dev);
Keith Busch185a3832016-01-12 13:18:10 -0700413 struct dma_map_ops *dest = &vmd->dma_ops;
414 struct dma_domain *domain = &vmd->dma_domain;
415
416 domain->domain_nr = vmd->sysdata.domain;
417 domain->dma_ops = dest;
418
419 if (!source)
420 return;
421 ASSIGN_VMD_DMA_OPS(source, dest, alloc);
422 ASSIGN_VMD_DMA_OPS(source, dest, free);
423 ASSIGN_VMD_DMA_OPS(source, dest, mmap);
424 ASSIGN_VMD_DMA_OPS(source, dest, get_sgtable);
425 ASSIGN_VMD_DMA_OPS(source, dest, map_page);
426 ASSIGN_VMD_DMA_OPS(source, dest, unmap_page);
427 ASSIGN_VMD_DMA_OPS(source, dest, map_sg);
428 ASSIGN_VMD_DMA_OPS(source, dest, unmap_sg);
429 ASSIGN_VMD_DMA_OPS(source, dest, sync_single_for_cpu);
430 ASSIGN_VMD_DMA_OPS(source, dest, sync_single_for_device);
431 ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_cpu);
432 ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_device);
Keith Busch185a3832016-01-12 13:18:10 -0700433 ASSIGN_VMD_DMA_OPS(source, dest, dma_supported);
Keith Busch185a3832016-01-12 13:18:10 -0700434 ASSIGN_VMD_DMA_OPS(source, dest, get_required_mask);
Keith Busch185a3832016-01-12 13:18:10 -0700435 add_dma_domain(domain);
436}
437#undef ASSIGN_VMD_DMA_OPS
Keith Busch185a3832016-01-12 13:18:10 -0700438
439static char __iomem *vmd_cfg_addr(struct vmd_dev *vmd, struct pci_bus *bus,
440 unsigned int devfn, int reg, int len)
441{
442 char __iomem *addr = vmd->cfgbar +
443 (bus->number << 20) + (devfn << 12) + reg;
444
445 if ((addr - vmd->cfgbar) + len >=
446 resource_size(&vmd->dev->resource[VMD_CFGBAR]))
447 return NULL;
448
449 return addr;
450}
451
452/*
453 * CPU may deadlock if config space is not serialized on some versions of this
454 * hardware, so all config space access is done under a spinlock.
455 */
456static int vmd_pci_read(struct pci_bus *bus, unsigned int devfn, int reg,
457 int len, u32 *value)
458{
459 struct vmd_dev *vmd = vmd_from_bus(bus);
460 char __iomem *addr = vmd_cfg_addr(vmd, bus, devfn, reg, len);
461 unsigned long flags;
462 int ret = 0;
463
464 if (!addr)
465 return -EFAULT;
466
467 spin_lock_irqsave(&vmd->cfg_lock, flags);
468 switch (len) {
469 case 1:
470 *value = readb(addr);
471 break;
472 case 2:
473 *value = readw(addr);
474 break;
475 case 4:
476 *value = readl(addr);
477 break;
478 default:
479 ret = -EINVAL;
480 break;
481 }
482 spin_unlock_irqrestore(&vmd->cfg_lock, flags);
483 return ret;
484}
485
486/*
487 * VMD h/w converts non-posted config writes to posted memory writes. The
488 * read-back in this function forces the completion so it returns only after
489 * the config space was written, as expected.
490 */
491static int vmd_pci_write(struct pci_bus *bus, unsigned int devfn, int reg,
492 int len, u32 value)
493{
494 struct vmd_dev *vmd = vmd_from_bus(bus);
495 char __iomem *addr = vmd_cfg_addr(vmd, bus, devfn, reg, len);
496 unsigned long flags;
497 int ret = 0;
498
499 if (!addr)
500 return -EFAULT;
501
502 spin_lock_irqsave(&vmd->cfg_lock, flags);
503 switch (len) {
504 case 1:
505 writeb(value, addr);
506 readb(addr);
507 break;
508 case 2:
509 writew(value, addr);
510 readw(addr);
511 break;
512 case 4:
513 writel(value, addr);
514 readl(addr);
515 break;
516 default:
517 ret = -EINVAL;
518 break;
519 }
520 spin_unlock_irqrestore(&vmd->cfg_lock, flags);
521 return ret;
522}
523
524static struct pci_ops vmd_ops = {
525 .read = vmd_pci_read,
526 .write = vmd_pci_write,
527};
528
Jon Derrick2c2c5c52016-02-24 10:06:37 -0700529static void vmd_attach_resources(struct vmd_dev *vmd)
530{
531 vmd->dev->resource[VMD_MEMBAR1].child = &vmd->resources[1];
532 vmd->dev->resource[VMD_MEMBAR2].child = &vmd->resources[2];
533}
534
535static void vmd_detach_resources(struct vmd_dev *vmd)
536{
537 vmd->dev->resource[VMD_MEMBAR1].child = NULL;
538 vmd->dev->resource[VMD_MEMBAR2].child = NULL;
539}
540
Keith Busch185a3832016-01-12 13:18:10 -0700541/*
Bjorn Helgaas575a1442017-06-19 15:26:57 -0500542 * VMD domains start at 0x10000 to not clash with ACPI _SEG domains.
543 * Per ACPI r6.0, sec 6.5.6, _SEG returns an integer, of which the lower
544 * 16 bits are the PCI Segment Group (domain) number. Other bits are
545 * currently reserved.
Keith Busch185a3832016-01-12 13:18:10 -0700546 */
547static int vmd_find_free_domain(void)
548{
549 int domain = 0xffff;
550 struct pci_bus *bus = NULL;
551
552 while ((bus = pci_find_next_bus(bus)) != NULL)
553 domain = max_t(int, domain, pci_domain_nr(bus));
554 return domain + 1;
555}
556
Jon Derrick67889582018-05-18 13:27:59 -0600557static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
Keith Busch185a3832016-01-12 13:18:10 -0700558{
559 struct pci_sysdata *sd = &vmd->sysdata;
Thomas Gleixnerae904ca2017-06-20 01:37:15 +0200560 struct fwnode_handle *fn;
Keith Busch185a3832016-01-12 13:18:10 -0700561 struct resource *res;
562 u32 upper_bits;
563 unsigned long flags;
564 LIST_HEAD(resources);
Jon Derrick67889582018-05-18 13:27:59 -0600565 resource_size_t offset[2] = {0};
Jon Derrick2a5a9c92018-05-18 13:28:00 -0600566 resource_size_t membar2_offset = 0x2000, busn_start = 0;
Jon Derrick02949512018-12-13 09:23:42 -0700567 struct pci_bus *child;
Jon Derrick67889582018-05-18 13:27:59 -0600568
569 /*
570 * Shadow registers may exist in certain VMD device ids which allow
571 * guests to correctly assign host physical addresses to the root ports
572 * and child devices. These registers will either return the host value
573 * or 0, depending on an enable bit in the VMD device.
574 */
575 if (features & VMD_FEAT_HAS_MEMBAR_SHADOW) {
576 u32 vmlock;
577 int ret;
578
579 membar2_offset = 0x2018;
580 ret = pci_read_config_dword(vmd->dev, PCI_REG_VMLOCK, &vmlock);
581 if (ret || vmlock == ~0)
582 return -ENODEV;
583
584 if (MB2_SHADOW_EN(vmlock)) {
585 void __iomem *membar2;
586
587 membar2 = pci_iomap(vmd->dev, VMD_MEMBAR2, 0);
588 if (!membar2)
589 return -ENOMEM;
590 offset[0] = vmd->dev->resource[VMD_MEMBAR1].start -
591 readq(membar2 + 0x2008);
592 offset[1] = vmd->dev->resource[VMD_MEMBAR2].start -
593 readq(membar2 + 0x2010);
594 pci_iounmap(vmd->dev, membar2);
595 }
596 }
Keith Busch185a3832016-01-12 13:18:10 -0700597
Jon Derrick2a5a9c92018-05-18 13:28:00 -0600598 /*
599 * Certain VMD devices may have a root port configuration option which
600 * limits the bus range to between 0-127 or 128-255
601 */
602 if (features & VMD_FEAT_HAS_BUS_RESTRICTIONS) {
603 u32 vmcap, vmconfig;
604
605 pci_read_config_dword(vmd->dev, PCI_REG_VMCAP, &vmcap);
606 pci_read_config_dword(vmd->dev, PCI_REG_VMCONFIG, &vmconfig);
607 if (BUS_RESTRICT_CAP(vmcap) &&
608 (BUS_RESTRICT_CFG(vmconfig) == 0x1))
609 busn_start = 128;
610 }
611
Keith Busch185a3832016-01-12 13:18:10 -0700612 res = &vmd->dev->resource[VMD_CFGBAR];
613 vmd->resources[0] = (struct resource) {
614 .name = "VMD CFGBAR",
Jon Derrick2a5a9c92018-05-18 13:28:00 -0600615 .start = busn_start,
616 .end = busn_start + (resource_size(res) >> 20) - 1,
Keith Busch185a3832016-01-12 13:18:10 -0700617 .flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED,
618 };
619
Keith Busch83cc54a2016-03-02 15:31:03 -0700620 /*
621 * If the window is below 4GB, clear IORESOURCE_MEM_64 so we can
622 * put 32-bit resources in the window.
623 *
624 * There's no hardware reason why a 64-bit window *couldn't*
625 * contain a 32-bit resource, but pbus_size_mem() computes the
626 * bridge window size assuming a 64-bit window will contain no
627 * 32-bit resources. __pci_assign_resource() enforces that
628 * artificial restriction to make sure everything will fit.
629 *
630 * The only way we could use a 64-bit non-prefechable MEMBAR is
631 * if its address is <4GB so that we can convert it to a 32-bit
632 * resource. To be visible to the host OS, all VMD endpoints must
633 * be initially configured by platform BIOS, which includes setting
634 * up these resources. We can assume the device is configured
635 * according to the platform needs.
636 */
Keith Busch185a3832016-01-12 13:18:10 -0700637 res = &vmd->dev->resource[VMD_MEMBAR1];
638 upper_bits = upper_32_bits(res->end);
639 flags = res->flags & ~IORESOURCE_SIZEALIGN;
640 if (!upper_bits)
641 flags &= ~IORESOURCE_MEM_64;
642 vmd->resources[1] = (struct resource) {
643 .name = "VMD MEMBAR1",
644 .start = res->start,
645 .end = res->end,
646 .flags = flags,
Jon Derrick2c2c5c52016-02-24 10:06:37 -0700647 .parent = res,
Keith Busch185a3832016-01-12 13:18:10 -0700648 };
649
650 res = &vmd->dev->resource[VMD_MEMBAR2];
651 upper_bits = upper_32_bits(res->end);
652 flags = res->flags & ~IORESOURCE_SIZEALIGN;
653 if (!upper_bits)
654 flags &= ~IORESOURCE_MEM_64;
655 vmd->resources[2] = (struct resource) {
656 .name = "VMD MEMBAR2",
Jon Derrick67889582018-05-18 13:27:59 -0600657 .start = res->start + membar2_offset,
Keith Busch185a3832016-01-12 13:18:10 -0700658 .end = res->end,
659 .flags = flags,
Jon Derrick2c2c5c52016-02-24 10:06:37 -0700660 .parent = res,
Keith Busch185a3832016-01-12 13:18:10 -0700661 };
662
Keith Busch31618322016-09-13 09:05:40 -0600663 sd->vmd_domain = true;
Keith Busch185a3832016-01-12 13:18:10 -0700664 sd->domain = vmd_find_free_domain();
665 if (sd->domain < 0)
666 return sd->domain;
667
668 sd->node = pcibus_to_node(vmd->dev->bus);
669
Thomas Gleixnerae904ca2017-06-20 01:37:15 +0200670 fn = irq_domain_alloc_named_id_fwnode("VMD-MSI", vmd->sysdata.domain);
671 if (!fn)
672 return -ENODEV;
673
674 vmd->irq_domain = pci_msi_create_irq_domain(fn, &vmd_msi_domain_info,
Keith Busche382dff2016-06-20 09:39:52 -0600675 x86_vector_domain);
Thomas Gleixnerae904ca2017-06-20 01:37:15 +0200676 irq_domain_free_fwnode(fn);
Keith Busch185a3832016-01-12 13:18:10 -0700677 if (!vmd->irq_domain)
678 return -ENODEV;
679
680 pci_add_resource(&resources, &vmd->resources[0]);
Jon Derrick67889582018-05-18 13:27:59 -0600681 pci_add_resource_offset(&resources, &vmd->resources[1], offset[0]);
682 pci_add_resource_offset(&resources, &vmd->resources[2], offset[1]);
683
Jon Derrick2a5a9c92018-05-18 13:28:00 -0600684 vmd->bus = pci_create_root_bus(&vmd->dev->dev, busn_start, &vmd_ops,
685 sd, &resources);
Keith Busch185a3832016-01-12 13:18:10 -0700686 if (!vmd->bus) {
687 pci_free_resource_list(&resources);
688 irq_domain_remove(vmd->irq_domain);
689 return -ENODEV;
690 }
691
Jon Derrick2c2c5c52016-02-24 10:06:37 -0700692 vmd_attach_resources(vmd);
Keith Busch185a3832016-01-12 13:18:10 -0700693 vmd_setup_dma_ops(vmd);
694 dev_set_msi_domain(&vmd->bus->dev, vmd->irq_domain);
Jon Derrick02949512018-12-13 09:23:42 -0700695
696 pci_scan_child_bus(vmd->bus);
697 pci_assign_unassigned_bus_resources(vmd->bus);
698
699 /*
700 * VMD root buses are virtual and don't return true on pci_is_pcie()
701 * and will fail pcie_bus_configure_settings() early. It can instead be
702 * run on each of the real root ports.
703 */
704 list_for_each_entry(child, &vmd->bus->children, node)
705 pcie_bus_configure_settings(child);
706
707 pci_bus_add_devices(vmd->bus);
Keith Busch185a3832016-01-12 13:18:10 -0700708
709 WARN(sysfs_create_link(&vmd->dev->dev.kobj, &vmd->bus->dev.kobj,
710 "domain"), "Can't create symlink to domain\n");
711 return 0;
712}
713
714static irqreturn_t vmd_irq(int irq, void *data)
715{
716 struct vmd_irq_list *irqs = data;
717 struct vmd_irq *vmdirq;
Jon Derrick3906b912016-11-11 16:08:45 -0700718 int idx;
Keith Busch185a3832016-01-12 13:18:10 -0700719
Jon Derrick3906b912016-11-11 16:08:45 -0700720 idx = srcu_read_lock(&irqs->srcu);
Keith Busch185a3832016-01-12 13:18:10 -0700721 list_for_each_entry_rcu(vmdirq, &irqs->irq_list, node)
722 generic_handle_irq(vmdirq->virq);
Jon Derrick3906b912016-11-11 16:08:45 -0700723 srcu_read_unlock(&irqs->srcu, idx);
Keith Busch185a3832016-01-12 13:18:10 -0700724
725 return IRQ_HANDLED;
726}
727
728static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
729{
730 struct vmd_dev *vmd;
731 int i, err;
732
733 if (resource_size(&dev->resource[VMD_CFGBAR]) < (1 << 20))
734 return -ENOMEM;
735
736 vmd = devm_kzalloc(&dev->dev, sizeof(*vmd), GFP_KERNEL);
737 if (!vmd)
738 return -ENOMEM;
739
740 vmd->dev = dev;
741 err = pcim_enable_device(dev);
742 if (err < 0)
743 return err;
744
745 vmd->cfgbar = pcim_iomap(dev, VMD_CFGBAR, 0);
746 if (!vmd->cfgbar)
747 return -ENOMEM;
748
749 pci_set_master(dev);
750 if (dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(64)) &&
751 dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32)))
752 return -ENODEV;
753
754 vmd->msix_count = pci_msix_vec_count(dev);
755 if (vmd->msix_count < 0)
756 return -ENODEV;
757
Keith Busch46a65612017-08-30 12:15:04 -0400758 vmd->msix_count = pci_alloc_irq_vectors(dev, 1, vmd->msix_count,
759 PCI_IRQ_MSIX);
Keith Busch185a3832016-01-12 13:18:10 -0700760 if (vmd->msix_count < 0)
761 return vmd->msix_count;
762
Keith Busch185a3832016-01-12 13:18:10 -0700763 vmd->irqs = devm_kcalloc(&dev->dev, vmd->msix_count, sizeof(*vmd->irqs),
764 GFP_KERNEL);
765 if (!vmd->irqs)
766 return -ENOMEM;
767
Keith Busch185a3832016-01-12 13:18:10 -0700768 for (i = 0; i < vmd->msix_count; i++) {
Jon Derrick3906b912016-11-11 16:08:45 -0700769 err = init_srcu_struct(&vmd->irqs[i].srcu);
770 if (err)
771 return err;
772
Keith Busch185a3832016-01-12 13:18:10 -0700773 INIT_LIST_HEAD(&vmd->irqs[i].irq_list);
Jon Derrick53db86a2016-09-02 11:53:04 -0600774 err = devm_request_irq(&dev->dev, pci_irq_vector(dev, i),
Jisheng Zhang3eefa792017-04-20 18:27:18 +0800775 vmd_irq, IRQF_NO_THREAD,
776 "vmd", &vmd->irqs[i]);
Keith Busch185a3832016-01-12 13:18:10 -0700777 if (err)
778 return err;
779 }
780
781 spin_lock_init(&vmd->cfg_lock);
782 pci_set_drvdata(dev, vmd);
Jon Derrick67889582018-05-18 13:27:59 -0600783 err = vmd_enable_domain(vmd, (unsigned long) id->driver_data);
Keith Busch185a3832016-01-12 13:18:10 -0700784 if (err)
785 return err;
786
787 dev_info(&vmd->dev->dev, "Bound to PCI domain %04x\n",
788 vmd->sysdata.domain);
789 return 0;
790}
791
Jon Derrick3906b912016-11-11 16:08:45 -0700792static void vmd_cleanup_srcu(struct vmd_dev *vmd)
793{
794 int i;
795
796 for (i = 0; i < vmd->msix_count; i++)
797 cleanup_srcu_struct(&vmd->irqs[i].srcu);
798}
799
Keith Busch185a3832016-01-12 13:18:10 -0700800static void vmd_remove(struct pci_dev *dev)
801{
802 struct vmd_dev *vmd = pci_get_drvdata(dev);
803
Keith Busch185a3832016-01-12 13:18:10 -0700804 sysfs_remove_link(&vmd->dev->dev.kobj, "domain");
805 pci_stop_root_bus(vmd->bus);
806 pci_remove_root_bus(vmd->bus);
Jon Derrick0cb259c42017-06-22 09:15:42 -0600807 vmd_cleanup_srcu(vmd);
Keith Busch185a3832016-01-12 13:18:10 -0700808 vmd_teardown_dma_ops(vmd);
Jon Derrickdc8af3a2018-10-15 18:48:07 -0600809 vmd_detach_resources(vmd);
Keith Busch185a3832016-01-12 13:18:10 -0700810 irq_domain_remove(vmd->irq_domain);
811}
812
Borislav Petkov42db5002016-11-26 19:29:57 +0100813#ifdef CONFIG_PM_SLEEP
Keith Busch185a3832016-01-12 13:18:10 -0700814static int vmd_suspend(struct device *dev)
815{
816 struct pci_dev *pdev = to_pci_dev(dev);
Scott Bauere2b18202017-08-11 14:54:32 -0600817 struct vmd_dev *vmd = pci_get_drvdata(pdev);
818 int i;
819
820 for (i = 0; i < vmd->msix_count; i++)
821 devm_free_irq(dev, pci_irq_vector(pdev, i), &vmd->irqs[i]);
Keith Busch185a3832016-01-12 13:18:10 -0700822
823 pci_save_state(pdev);
824 return 0;
825}
826
827static int vmd_resume(struct device *dev)
828{
829 struct pci_dev *pdev = to_pci_dev(dev);
Scott Bauere2b18202017-08-11 14:54:32 -0600830 struct vmd_dev *vmd = pci_get_drvdata(pdev);
831 int err, i;
832
833 for (i = 0; i < vmd->msix_count; i++) {
834 err = devm_request_irq(dev, pci_irq_vector(pdev, i),
835 vmd_irq, IRQF_NO_THREAD,
836 "vmd", &vmd->irqs[i]);
837 if (err)
838 return err;
839 }
Keith Busch185a3832016-01-12 13:18:10 -0700840
841 pci_restore_state(pdev);
842 return 0;
843}
844#endif
845static SIMPLE_DEV_PM_OPS(vmd_dev_pm_ops, vmd_suspend, vmd_resume);
846
847static const struct pci_device_id vmd_ids[] = {
Jon Derrick8b22f3d2018-05-18 13:27:58 -0600848 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_201D),},
Jon Derrickd260d342018-05-18 13:28:02 -0600849 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_28C0),
850 .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW |
851 VMD_FEAT_HAS_BUS_RESTRICTIONS,},
Keith Busch185a3832016-01-12 13:18:10 -0700852 {0,}
853};
854MODULE_DEVICE_TABLE(pci, vmd_ids);
855
856static struct pci_driver vmd_drv = {
857 .name = "vmd",
858 .id_table = vmd_ids,
859 .probe = vmd_probe,
860 .remove = vmd_remove,
861 .driver = {
862 .pm = &vmd_dev_pm_ops,
863 },
864};
865module_pci_driver(vmd_drv);
866
867MODULE_AUTHOR("Intel Corporation");
868MODULE_LICENSE("GPL v2");
869MODULE_VERSION("0.6");