Bjorn Helgaas | 8cfab3c | 2018-01-26 12:50:27 -0600 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 2 | /* |
| 3 | * Volume Management Device driver |
| 4 | * Copyright (c) 2015, Intel Corporation. |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 5 | */ |
| 6 | |
| 7 | #include <linux/device.h> |
| 8 | #include <linux/interrupt.h> |
| 9 | #include <linux/irq.h> |
| 10 | #include <linux/kernel.h> |
| 11 | #include <linux/module.h> |
| 12 | #include <linux/msi.h> |
| 13 | #include <linux/pci.h> |
Jon Derrick | 3906b91 | 2016-11-11 16:08:45 -0700 | [diff] [blame] | 14 | #include <linux/srcu.h> |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 15 | #include <linux/rculist.h> |
| 16 | #include <linux/rcupdate.h> |
| 17 | |
| 18 | #include <asm/irqdomain.h> |
| 19 | #include <asm/device.h> |
| 20 | #include <asm/msi.h> |
| 21 | #include <asm/msidef.h> |
| 22 | |
| 23 | #define VMD_CFGBAR 0 |
| 24 | #define VMD_MEMBAR1 2 |
| 25 | #define VMD_MEMBAR2 4 |
| 26 | |
Jon Derrick | 2a5a9c9 | 2018-05-18 13:28:00 -0600 | [diff] [blame] | 27 | #define PCI_REG_VMCAP 0x40 |
| 28 | #define BUS_RESTRICT_CAP(vmcap) (vmcap & 0x1) |
| 29 | #define PCI_REG_VMCONFIG 0x44 |
| 30 | #define BUS_RESTRICT_CFG(vmcfg) ((vmcfg >> 8) & 0x3) |
Jon Derrick | 6788958 | 2018-05-18 13:27:59 -0600 | [diff] [blame] | 31 | #define PCI_REG_VMLOCK 0x70 |
| 32 | #define MB2_SHADOW_EN(vmlock) (vmlock & 0x2) |
| 33 | |
| 34 | enum vmd_features { |
| 35 | /* |
| 36 | * Device may contain registers which hint the physical location of the |
| 37 | * membars, in order to allow proper address translation during |
| 38 | * resource assignment to enable guest virtualization |
| 39 | */ |
| 40 | VMD_FEAT_HAS_MEMBAR_SHADOW = (1 << 0), |
Jon Derrick | 2a5a9c9 | 2018-05-18 13:28:00 -0600 | [diff] [blame] | 41 | |
| 42 | /* |
| 43 | * Device may provide root port configuration information which limits |
| 44 | * bus numbering |
| 45 | */ |
| 46 | VMD_FEAT_HAS_BUS_RESTRICTIONS = (1 << 1), |
Jon Derrick | 6788958 | 2018-05-18 13:27:59 -0600 | [diff] [blame] | 47 | }; |
| 48 | |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 49 | /* |
| 50 | * Lock for manipulating VMD IRQ lists. |
| 51 | */ |
| 52 | static DEFINE_RAW_SPINLOCK(list_lock); |
| 53 | |
| 54 | /** |
| 55 | * struct vmd_irq - private data to map driver IRQ to the VMD shared vector |
| 56 | * @node: list item for parent traversal. |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 57 | * @irq: back pointer to parent. |
Keith Busch | 21c80c9 | 2016-08-23 16:36:42 -0500 | [diff] [blame] | 58 | * @enabled: true if driver enabled IRQ |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 59 | * @virq: the virtual IRQ value provided to the requesting driver. |
| 60 | * |
| 61 | * Every MSI/MSI-X IRQ requested for a device in a VMD domain will be mapped to |
| 62 | * a VMD IRQ using this structure. |
| 63 | */ |
| 64 | struct vmd_irq { |
| 65 | struct list_head node; |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 66 | struct vmd_irq_list *irq; |
Keith Busch | 21c80c9 | 2016-08-23 16:36:42 -0500 | [diff] [blame] | 67 | bool enabled; |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 68 | unsigned int virq; |
| 69 | }; |
| 70 | |
| 71 | /** |
| 72 | * struct vmd_irq_list - list of driver requested IRQs mapping to a VMD vector |
| 73 | * @irq_list: the list of irq's the VMD one demuxes to. |
Jon Derrick | 3906b91 | 2016-11-11 16:08:45 -0700 | [diff] [blame] | 74 | * @srcu: SRCU struct for local synchronization. |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 75 | * @count: number of child IRQs assigned to this vector; used to track |
| 76 | * sharing. |
| 77 | */ |
| 78 | struct vmd_irq_list { |
| 79 | struct list_head irq_list; |
Jon Derrick | 3906b91 | 2016-11-11 16:08:45 -0700 | [diff] [blame] | 80 | struct srcu_struct srcu; |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 81 | unsigned int count; |
| 82 | }; |
| 83 | |
| 84 | struct vmd_dev { |
| 85 | struct pci_dev *dev; |
| 86 | |
| 87 | spinlock_t cfg_lock; |
| 88 | char __iomem *cfgbar; |
| 89 | |
| 90 | int msix_count; |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 91 | struct vmd_irq_list *irqs; |
| 92 | |
| 93 | struct pci_sysdata sysdata; |
| 94 | struct resource resources[3]; |
| 95 | struct irq_domain *irq_domain; |
| 96 | struct pci_bus *bus; |
| 97 | |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 98 | struct dma_map_ops dma_ops; |
| 99 | struct dma_domain dma_domain; |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 100 | }; |
| 101 | |
| 102 | static inline struct vmd_dev *vmd_from_bus(struct pci_bus *bus) |
| 103 | { |
| 104 | return container_of(bus->sysdata, struct vmd_dev, sysdata); |
| 105 | } |
| 106 | |
Jon Derrick | b318222 | 2016-09-02 11:53:05 -0600 | [diff] [blame] | 107 | static inline unsigned int index_from_irqs(struct vmd_dev *vmd, |
| 108 | struct vmd_irq_list *irqs) |
| 109 | { |
| 110 | return irqs - vmd->irqs; |
| 111 | } |
| 112 | |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 113 | /* |
| 114 | * Drivers managing a device in a VMD domain allocate their own IRQs as before, |
| 115 | * but the MSI entry for the hardware it's driving will be programmed with a |
| 116 | * destination ID for the VMD MSI-X table. The VMD muxes interrupts in its |
| 117 | * domain into one of its own, and the VMD driver de-muxes these for the |
| 118 | * handlers sharing that VMD IRQ. The vmd irq_domain provides the operations |
| 119 | * and irq_chip to set this up. |
| 120 | */ |
| 121 | static void vmd_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) |
| 122 | { |
| 123 | struct vmd_irq *vmdirq = data->chip_data; |
| 124 | struct vmd_irq_list *irq = vmdirq->irq; |
Jon Derrick | b318222 | 2016-09-02 11:53:05 -0600 | [diff] [blame] | 125 | struct vmd_dev *vmd = irq_data_get_irq_handler_data(data); |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 126 | |
| 127 | msg->address_hi = MSI_ADDR_BASE_HI; |
Jon Derrick | b318222 | 2016-09-02 11:53:05 -0600 | [diff] [blame] | 128 | msg->address_lo = MSI_ADDR_BASE_LO | |
| 129 | MSI_ADDR_DEST_ID(index_from_irqs(vmd, irq)); |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 130 | msg->data = 0; |
| 131 | } |
| 132 | |
| 133 | /* |
| 134 | * We rely on MSI_FLAG_USE_DEF_CHIP_OPS to set the IRQ mask/unmask ops. |
| 135 | */ |
| 136 | static void vmd_irq_enable(struct irq_data *data) |
| 137 | { |
| 138 | struct vmd_irq *vmdirq = data->chip_data; |
Jon Derrick | 3f57ff4 | 2016-06-20 09:39:51 -0600 | [diff] [blame] | 139 | unsigned long flags; |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 140 | |
Jon Derrick | 3f57ff4 | 2016-06-20 09:39:51 -0600 | [diff] [blame] | 141 | raw_spin_lock_irqsave(&list_lock, flags); |
Keith Busch | 21c80c9 | 2016-08-23 16:36:42 -0500 | [diff] [blame] | 142 | WARN_ON(vmdirq->enabled); |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 143 | list_add_tail_rcu(&vmdirq->node, &vmdirq->irq->irq_list); |
Keith Busch | 21c80c9 | 2016-08-23 16:36:42 -0500 | [diff] [blame] | 144 | vmdirq->enabled = true; |
Jon Derrick | 3f57ff4 | 2016-06-20 09:39:51 -0600 | [diff] [blame] | 145 | raw_spin_unlock_irqrestore(&list_lock, flags); |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 146 | |
| 147 | data->chip->irq_unmask(data); |
| 148 | } |
| 149 | |
| 150 | static void vmd_irq_disable(struct irq_data *data) |
| 151 | { |
| 152 | struct vmd_irq *vmdirq = data->chip_data; |
Jon Derrick | 3f57ff4 | 2016-06-20 09:39:51 -0600 | [diff] [blame] | 153 | unsigned long flags; |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 154 | |
| 155 | data->chip->irq_mask(data); |
| 156 | |
Jon Derrick | 3f57ff4 | 2016-06-20 09:39:51 -0600 | [diff] [blame] | 157 | raw_spin_lock_irqsave(&list_lock, flags); |
Keith Busch | 21c80c9 | 2016-08-23 16:36:42 -0500 | [diff] [blame] | 158 | if (vmdirq->enabled) { |
| 159 | list_del_rcu(&vmdirq->node); |
| 160 | vmdirq->enabled = false; |
| 161 | } |
Jon Derrick | 3f57ff4 | 2016-06-20 09:39:51 -0600 | [diff] [blame] | 162 | raw_spin_unlock_irqrestore(&list_lock, flags); |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 163 | } |
| 164 | |
| 165 | /* |
| 166 | * XXX: Stubbed until we develop acceptable way to not create conflicts with |
| 167 | * other devices sharing the same vector. |
| 168 | */ |
| 169 | static int vmd_irq_set_affinity(struct irq_data *data, |
| 170 | const struct cpumask *dest, bool force) |
| 171 | { |
| 172 | return -EINVAL; |
| 173 | } |
| 174 | |
| 175 | static struct irq_chip vmd_msi_controller = { |
| 176 | .name = "VMD-MSI", |
| 177 | .irq_enable = vmd_irq_enable, |
| 178 | .irq_disable = vmd_irq_disable, |
| 179 | .irq_compose_msi_msg = vmd_compose_msi_msg, |
| 180 | .irq_set_affinity = vmd_irq_set_affinity, |
| 181 | }; |
| 182 | |
| 183 | static irq_hw_number_t vmd_get_hwirq(struct msi_domain_info *info, |
| 184 | msi_alloc_info_t *arg) |
| 185 | { |
| 186 | return 0; |
| 187 | } |
| 188 | |
| 189 | /* |
| 190 | * XXX: We can be even smarter selecting the best IRQ once we solve the |
| 191 | * affinity problem. |
| 192 | */ |
Keith Busch | 9c20530 | 2016-06-20 09:39:53 -0600 | [diff] [blame] | 193 | static struct vmd_irq_list *vmd_next_irq(struct vmd_dev *vmd, struct msi_desc *desc) |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 194 | { |
Keith Busch | 9c20530 | 2016-06-20 09:39:53 -0600 | [diff] [blame] | 195 | int i, best = 1; |
Jon Derrick | 3f57ff4 | 2016-06-20 09:39:51 -0600 | [diff] [blame] | 196 | unsigned long flags; |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 197 | |
Keith Busch | a7f58b9 | 2018-05-08 10:00:22 -0600 | [diff] [blame] | 198 | if (vmd->msix_count == 1) |
Keith Busch | 9c20530 | 2016-06-20 09:39:53 -0600 | [diff] [blame] | 199 | return &vmd->irqs[0]; |
| 200 | |
Keith Busch | a7f58b9 | 2018-05-08 10:00:22 -0600 | [diff] [blame] | 201 | /* |
| 202 | * White list for fast-interrupt handlers. All others will share the |
| 203 | * "slow" interrupt vector. |
| 204 | */ |
| 205 | switch (msi_desc_to_pci_dev(desc)->class) { |
| 206 | case PCI_CLASS_STORAGE_EXPRESS: |
| 207 | break; |
| 208 | default: |
| 209 | return &vmd->irqs[0]; |
| 210 | } |
| 211 | |
Jon Derrick | 3f57ff4 | 2016-06-20 09:39:51 -0600 | [diff] [blame] | 212 | raw_spin_lock_irqsave(&list_lock, flags); |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 213 | for (i = 1; i < vmd->msix_count; i++) |
| 214 | if (vmd->irqs[i].count < vmd->irqs[best].count) |
| 215 | best = i; |
| 216 | vmd->irqs[best].count++; |
Jon Derrick | 3f57ff4 | 2016-06-20 09:39:51 -0600 | [diff] [blame] | 217 | raw_spin_unlock_irqrestore(&list_lock, flags); |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 218 | |
| 219 | return &vmd->irqs[best]; |
| 220 | } |
| 221 | |
| 222 | static int vmd_msi_init(struct irq_domain *domain, struct msi_domain_info *info, |
| 223 | unsigned int virq, irq_hw_number_t hwirq, |
| 224 | msi_alloc_info_t *arg) |
| 225 | { |
Keith Busch | 9c20530 | 2016-06-20 09:39:53 -0600 | [diff] [blame] | 226 | struct msi_desc *desc = arg->desc; |
| 227 | struct vmd_dev *vmd = vmd_from_bus(msi_desc_to_pci_dev(desc)->bus); |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 228 | struct vmd_irq *vmdirq = kzalloc(sizeof(*vmdirq), GFP_KERNEL); |
Jon Derrick | b318222 | 2016-09-02 11:53:05 -0600 | [diff] [blame] | 229 | unsigned int index, vector; |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 230 | |
| 231 | if (!vmdirq) |
| 232 | return -ENOMEM; |
| 233 | |
| 234 | INIT_LIST_HEAD(&vmdirq->node); |
Keith Busch | 9c20530 | 2016-06-20 09:39:53 -0600 | [diff] [blame] | 235 | vmdirq->irq = vmd_next_irq(vmd, desc); |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 236 | vmdirq->virq = virq; |
Jon Derrick | b318222 | 2016-09-02 11:53:05 -0600 | [diff] [blame] | 237 | index = index_from_irqs(vmd, vmdirq->irq); |
| 238 | vector = pci_irq_vector(vmd->dev, index); |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 239 | |
Jon Derrick | b318222 | 2016-09-02 11:53:05 -0600 | [diff] [blame] | 240 | irq_domain_set_info(domain, virq, vector, info->chip, vmdirq, |
Jon Derrick | 53db86a | 2016-09-02 11:53:04 -0600 | [diff] [blame] | 241 | handle_untracked_irq, vmd, NULL); |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 242 | return 0; |
| 243 | } |
| 244 | |
| 245 | static void vmd_msi_free(struct irq_domain *domain, |
| 246 | struct msi_domain_info *info, unsigned int virq) |
| 247 | { |
| 248 | struct vmd_irq *vmdirq = irq_get_chip_data(virq); |
Jon Derrick | 3f57ff4 | 2016-06-20 09:39:51 -0600 | [diff] [blame] | 249 | unsigned long flags; |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 250 | |
Jon Derrick | 3906b91 | 2016-11-11 16:08:45 -0700 | [diff] [blame] | 251 | synchronize_srcu(&vmdirq->irq->srcu); |
Keith Busch | ee6ee49 | 2016-08-04 16:09:09 -0600 | [diff] [blame] | 252 | |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 253 | /* XXX: Potential optimization to rebalance */ |
Jon Derrick | 3f57ff4 | 2016-06-20 09:39:51 -0600 | [diff] [blame] | 254 | raw_spin_lock_irqsave(&list_lock, flags); |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 255 | vmdirq->irq->count--; |
Jon Derrick | 3f57ff4 | 2016-06-20 09:39:51 -0600 | [diff] [blame] | 256 | raw_spin_unlock_irqrestore(&list_lock, flags); |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 257 | |
Jon Derrick | 3906b91 | 2016-11-11 16:08:45 -0700 | [diff] [blame] | 258 | kfree(vmdirq); |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 259 | } |
| 260 | |
| 261 | static int vmd_msi_prepare(struct irq_domain *domain, struct device *dev, |
| 262 | int nvec, msi_alloc_info_t *arg) |
| 263 | { |
| 264 | struct pci_dev *pdev = to_pci_dev(dev); |
| 265 | struct vmd_dev *vmd = vmd_from_bus(pdev->bus); |
| 266 | |
| 267 | if (nvec > vmd->msix_count) |
| 268 | return vmd->msix_count; |
| 269 | |
| 270 | memset(arg, 0, sizeof(*arg)); |
| 271 | return 0; |
| 272 | } |
| 273 | |
| 274 | static void vmd_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc) |
| 275 | { |
| 276 | arg->desc = desc; |
| 277 | } |
| 278 | |
| 279 | static struct msi_domain_ops vmd_msi_domain_ops = { |
| 280 | .get_hwirq = vmd_get_hwirq, |
| 281 | .msi_init = vmd_msi_init, |
| 282 | .msi_free = vmd_msi_free, |
| 283 | .msi_prepare = vmd_msi_prepare, |
| 284 | .set_desc = vmd_set_desc, |
| 285 | }; |
| 286 | |
| 287 | static struct msi_domain_info vmd_msi_domain_info = { |
| 288 | .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | |
| 289 | MSI_FLAG_PCI_MSIX, |
| 290 | .ops = &vmd_msi_domain_ops, |
| 291 | .chip = &vmd_msi_controller, |
| 292 | }; |
| 293 | |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 294 | /* |
| 295 | * VMD replaces the requester ID with its own. DMA mappings for devices in a |
| 296 | * VMD domain need to be mapped for the VMD, not the device requiring |
| 297 | * the mapping. |
| 298 | */ |
| 299 | static struct device *to_vmd_dev(struct device *dev) |
| 300 | { |
| 301 | struct pci_dev *pdev = to_pci_dev(dev); |
| 302 | struct vmd_dev *vmd = vmd_from_bus(pdev->bus); |
| 303 | |
| 304 | return &vmd->dev->dev; |
| 305 | } |
| 306 | |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 307 | static void *vmd_alloc(struct device *dev, size_t size, dma_addr_t *addr, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 308 | gfp_t flag, unsigned long attrs) |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 309 | { |
Christoph Hellwig | 190d4e5 | 2018-12-06 13:37:00 -0800 | [diff] [blame] | 310 | return dma_alloc_attrs(to_vmd_dev(dev), size, addr, flag, attrs); |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 311 | } |
| 312 | |
| 313 | static void vmd_free(struct device *dev, size_t size, void *vaddr, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 314 | dma_addr_t addr, unsigned long attrs) |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 315 | { |
Christoph Hellwig | 190d4e5 | 2018-12-06 13:37:00 -0800 | [diff] [blame] | 316 | return dma_free_attrs(to_vmd_dev(dev), size, vaddr, addr, attrs); |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 317 | } |
| 318 | |
| 319 | static int vmd_mmap(struct device *dev, struct vm_area_struct *vma, |
| 320 | void *cpu_addr, dma_addr_t addr, size_t size, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 321 | unsigned long attrs) |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 322 | { |
Christoph Hellwig | 190d4e5 | 2018-12-06 13:37:00 -0800 | [diff] [blame] | 323 | return dma_mmap_attrs(to_vmd_dev(dev), vma, cpu_addr, addr, size, |
| 324 | attrs); |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 325 | } |
| 326 | |
| 327 | static int vmd_get_sgtable(struct device *dev, struct sg_table *sgt, |
| 328 | void *cpu_addr, dma_addr_t addr, size_t size, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 329 | unsigned long attrs) |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 330 | { |
Christoph Hellwig | 190d4e5 | 2018-12-06 13:37:00 -0800 | [diff] [blame] | 331 | return dma_get_sgtable_attrs(to_vmd_dev(dev), sgt, cpu_addr, addr, size, |
| 332 | attrs); |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 333 | } |
| 334 | |
| 335 | static dma_addr_t vmd_map_page(struct device *dev, struct page *page, |
| 336 | unsigned long offset, size_t size, |
| 337 | enum dma_data_direction dir, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 338 | unsigned long attrs) |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 339 | { |
Christoph Hellwig | 190d4e5 | 2018-12-06 13:37:00 -0800 | [diff] [blame] | 340 | return dma_map_page_attrs(to_vmd_dev(dev), page, offset, size, dir, |
| 341 | attrs); |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 342 | } |
| 343 | |
| 344 | static void vmd_unmap_page(struct device *dev, dma_addr_t addr, size_t size, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 345 | enum dma_data_direction dir, unsigned long attrs) |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 346 | { |
Christoph Hellwig | 190d4e5 | 2018-12-06 13:37:00 -0800 | [diff] [blame] | 347 | dma_unmap_page_attrs(to_vmd_dev(dev), addr, size, dir, attrs); |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 348 | } |
| 349 | |
| 350 | static int vmd_map_sg(struct device *dev, struct scatterlist *sg, int nents, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 351 | enum dma_data_direction dir, unsigned long attrs) |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 352 | { |
Christoph Hellwig | 190d4e5 | 2018-12-06 13:37:00 -0800 | [diff] [blame] | 353 | return dma_map_sg_attrs(to_vmd_dev(dev), sg, nents, dir, attrs); |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 354 | } |
| 355 | |
| 356 | static void vmd_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 357 | enum dma_data_direction dir, unsigned long attrs) |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 358 | { |
Christoph Hellwig | 190d4e5 | 2018-12-06 13:37:00 -0800 | [diff] [blame] | 359 | dma_unmap_sg_attrs(to_vmd_dev(dev), sg, nents, dir, attrs); |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 360 | } |
| 361 | |
| 362 | static void vmd_sync_single_for_cpu(struct device *dev, dma_addr_t addr, |
| 363 | size_t size, enum dma_data_direction dir) |
| 364 | { |
Christoph Hellwig | 190d4e5 | 2018-12-06 13:37:00 -0800 | [diff] [blame] | 365 | dma_sync_single_for_cpu(to_vmd_dev(dev), addr, size, dir); |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 366 | } |
| 367 | |
| 368 | static void vmd_sync_single_for_device(struct device *dev, dma_addr_t addr, |
| 369 | size_t size, enum dma_data_direction dir) |
| 370 | { |
Christoph Hellwig | 190d4e5 | 2018-12-06 13:37:00 -0800 | [diff] [blame] | 371 | dma_sync_single_for_device(to_vmd_dev(dev), addr, size, dir); |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 372 | } |
| 373 | |
| 374 | static void vmd_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, |
| 375 | int nents, enum dma_data_direction dir) |
| 376 | { |
Christoph Hellwig | 190d4e5 | 2018-12-06 13:37:00 -0800 | [diff] [blame] | 377 | dma_sync_sg_for_cpu(to_vmd_dev(dev), sg, nents, dir); |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 378 | } |
| 379 | |
| 380 | static void vmd_sync_sg_for_device(struct device *dev, struct scatterlist *sg, |
| 381 | int nents, enum dma_data_direction dir) |
| 382 | { |
Christoph Hellwig | 190d4e5 | 2018-12-06 13:37:00 -0800 | [diff] [blame] | 383 | dma_sync_sg_for_device(to_vmd_dev(dev), sg, nents, dir); |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 384 | } |
| 385 | |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 386 | static int vmd_dma_supported(struct device *dev, u64 mask) |
| 387 | { |
Christoph Hellwig | 190d4e5 | 2018-12-06 13:37:00 -0800 | [diff] [blame] | 388 | return dma_supported(to_vmd_dev(dev), mask); |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 389 | } |
| 390 | |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 391 | static u64 vmd_get_required_mask(struct device *dev) |
| 392 | { |
Christoph Hellwig | 190d4e5 | 2018-12-06 13:37:00 -0800 | [diff] [blame] | 393 | return dma_get_required_mask(to_vmd_dev(dev)); |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 394 | } |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 395 | |
| 396 | static void vmd_teardown_dma_ops(struct vmd_dev *vmd) |
| 397 | { |
| 398 | struct dma_domain *domain = &vmd->dma_domain; |
| 399 | |
Keith Busch | ca8a8fa | 2016-05-17 11:13:24 -0600 | [diff] [blame] | 400 | if (get_dma_ops(&vmd->dev->dev)) |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 401 | del_dma_domain(domain); |
| 402 | } |
| 403 | |
| 404 | #define ASSIGN_VMD_DMA_OPS(source, dest, fn) \ |
| 405 | do { \ |
| 406 | if (source->fn) \ |
| 407 | dest->fn = vmd_##fn; \ |
| 408 | } while (0) |
| 409 | |
| 410 | static void vmd_setup_dma_ops(struct vmd_dev *vmd) |
| 411 | { |
Keith Busch | ca8a8fa | 2016-05-17 11:13:24 -0600 | [diff] [blame] | 412 | const struct dma_map_ops *source = get_dma_ops(&vmd->dev->dev); |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 413 | struct dma_map_ops *dest = &vmd->dma_ops; |
| 414 | struct dma_domain *domain = &vmd->dma_domain; |
| 415 | |
| 416 | domain->domain_nr = vmd->sysdata.domain; |
| 417 | domain->dma_ops = dest; |
| 418 | |
| 419 | if (!source) |
| 420 | return; |
| 421 | ASSIGN_VMD_DMA_OPS(source, dest, alloc); |
| 422 | ASSIGN_VMD_DMA_OPS(source, dest, free); |
| 423 | ASSIGN_VMD_DMA_OPS(source, dest, mmap); |
| 424 | ASSIGN_VMD_DMA_OPS(source, dest, get_sgtable); |
| 425 | ASSIGN_VMD_DMA_OPS(source, dest, map_page); |
| 426 | ASSIGN_VMD_DMA_OPS(source, dest, unmap_page); |
| 427 | ASSIGN_VMD_DMA_OPS(source, dest, map_sg); |
| 428 | ASSIGN_VMD_DMA_OPS(source, dest, unmap_sg); |
| 429 | ASSIGN_VMD_DMA_OPS(source, dest, sync_single_for_cpu); |
| 430 | ASSIGN_VMD_DMA_OPS(source, dest, sync_single_for_device); |
| 431 | ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_cpu); |
| 432 | ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_device); |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 433 | ASSIGN_VMD_DMA_OPS(source, dest, dma_supported); |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 434 | ASSIGN_VMD_DMA_OPS(source, dest, get_required_mask); |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 435 | add_dma_domain(domain); |
| 436 | } |
| 437 | #undef ASSIGN_VMD_DMA_OPS |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 438 | |
| 439 | static char __iomem *vmd_cfg_addr(struct vmd_dev *vmd, struct pci_bus *bus, |
| 440 | unsigned int devfn, int reg, int len) |
| 441 | { |
| 442 | char __iomem *addr = vmd->cfgbar + |
| 443 | (bus->number << 20) + (devfn << 12) + reg; |
| 444 | |
| 445 | if ((addr - vmd->cfgbar) + len >= |
| 446 | resource_size(&vmd->dev->resource[VMD_CFGBAR])) |
| 447 | return NULL; |
| 448 | |
| 449 | return addr; |
| 450 | } |
| 451 | |
| 452 | /* |
| 453 | * CPU may deadlock if config space is not serialized on some versions of this |
| 454 | * hardware, so all config space access is done under a spinlock. |
| 455 | */ |
| 456 | static int vmd_pci_read(struct pci_bus *bus, unsigned int devfn, int reg, |
| 457 | int len, u32 *value) |
| 458 | { |
| 459 | struct vmd_dev *vmd = vmd_from_bus(bus); |
| 460 | char __iomem *addr = vmd_cfg_addr(vmd, bus, devfn, reg, len); |
| 461 | unsigned long flags; |
| 462 | int ret = 0; |
| 463 | |
| 464 | if (!addr) |
| 465 | return -EFAULT; |
| 466 | |
| 467 | spin_lock_irqsave(&vmd->cfg_lock, flags); |
| 468 | switch (len) { |
| 469 | case 1: |
| 470 | *value = readb(addr); |
| 471 | break; |
| 472 | case 2: |
| 473 | *value = readw(addr); |
| 474 | break; |
| 475 | case 4: |
| 476 | *value = readl(addr); |
| 477 | break; |
| 478 | default: |
| 479 | ret = -EINVAL; |
| 480 | break; |
| 481 | } |
| 482 | spin_unlock_irqrestore(&vmd->cfg_lock, flags); |
| 483 | return ret; |
| 484 | } |
| 485 | |
| 486 | /* |
| 487 | * VMD h/w converts non-posted config writes to posted memory writes. The |
| 488 | * read-back in this function forces the completion so it returns only after |
| 489 | * the config space was written, as expected. |
| 490 | */ |
| 491 | static int vmd_pci_write(struct pci_bus *bus, unsigned int devfn, int reg, |
| 492 | int len, u32 value) |
| 493 | { |
| 494 | struct vmd_dev *vmd = vmd_from_bus(bus); |
| 495 | char __iomem *addr = vmd_cfg_addr(vmd, bus, devfn, reg, len); |
| 496 | unsigned long flags; |
| 497 | int ret = 0; |
| 498 | |
| 499 | if (!addr) |
| 500 | return -EFAULT; |
| 501 | |
| 502 | spin_lock_irqsave(&vmd->cfg_lock, flags); |
| 503 | switch (len) { |
| 504 | case 1: |
| 505 | writeb(value, addr); |
| 506 | readb(addr); |
| 507 | break; |
| 508 | case 2: |
| 509 | writew(value, addr); |
| 510 | readw(addr); |
| 511 | break; |
| 512 | case 4: |
| 513 | writel(value, addr); |
| 514 | readl(addr); |
| 515 | break; |
| 516 | default: |
| 517 | ret = -EINVAL; |
| 518 | break; |
| 519 | } |
| 520 | spin_unlock_irqrestore(&vmd->cfg_lock, flags); |
| 521 | return ret; |
| 522 | } |
| 523 | |
| 524 | static struct pci_ops vmd_ops = { |
| 525 | .read = vmd_pci_read, |
| 526 | .write = vmd_pci_write, |
| 527 | }; |
| 528 | |
Jon Derrick | 2c2c5c5 | 2016-02-24 10:06:37 -0700 | [diff] [blame] | 529 | static void vmd_attach_resources(struct vmd_dev *vmd) |
| 530 | { |
| 531 | vmd->dev->resource[VMD_MEMBAR1].child = &vmd->resources[1]; |
| 532 | vmd->dev->resource[VMD_MEMBAR2].child = &vmd->resources[2]; |
| 533 | } |
| 534 | |
| 535 | static void vmd_detach_resources(struct vmd_dev *vmd) |
| 536 | { |
| 537 | vmd->dev->resource[VMD_MEMBAR1].child = NULL; |
| 538 | vmd->dev->resource[VMD_MEMBAR2].child = NULL; |
| 539 | } |
| 540 | |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 541 | /* |
Bjorn Helgaas | 575a144 | 2017-06-19 15:26:57 -0500 | [diff] [blame] | 542 | * VMD domains start at 0x10000 to not clash with ACPI _SEG domains. |
| 543 | * Per ACPI r6.0, sec 6.5.6, _SEG returns an integer, of which the lower |
| 544 | * 16 bits are the PCI Segment Group (domain) number. Other bits are |
| 545 | * currently reserved. |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 546 | */ |
| 547 | static int vmd_find_free_domain(void) |
| 548 | { |
| 549 | int domain = 0xffff; |
| 550 | struct pci_bus *bus = NULL; |
| 551 | |
| 552 | while ((bus = pci_find_next_bus(bus)) != NULL) |
| 553 | domain = max_t(int, domain, pci_domain_nr(bus)); |
| 554 | return domain + 1; |
| 555 | } |
| 556 | |
Jon Derrick | 6788958 | 2018-05-18 13:27:59 -0600 | [diff] [blame] | 557 | static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features) |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 558 | { |
| 559 | struct pci_sysdata *sd = &vmd->sysdata; |
Thomas Gleixner | ae904ca | 2017-06-20 01:37:15 +0200 | [diff] [blame] | 560 | struct fwnode_handle *fn; |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 561 | struct resource *res; |
| 562 | u32 upper_bits; |
| 563 | unsigned long flags; |
| 564 | LIST_HEAD(resources); |
Jon Derrick | 6788958 | 2018-05-18 13:27:59 -0600 | [diff] [blame] | 565 | resource_size_t offset[2] = {0}; |
Jon Derrick | 2a5a9c9 | 2018-05-18 13:28:00 -0600 | [diff] [blame] | 566 | resource_size_t membar2_offset = 0x2000, busn_start = 0; |
Jon Derrick | 0294951 | 2018-12-13 09:23:42 -0700 | [diff] [blame] | 567 | struct pci_bus *child; |
Jon Derrick | 6788958 | 2018-05-18 13:27:59 -0600 | [diff] [blame] | 568 | |
| 569 | /* |
| 570 | * Shadow registers may exist in certain VMD device ids which allow |
| 571 | * guests to correctly assign host physical addresses to the root ports |
| 572 | * and child devices. These registers will either return the host value |
| 573 | * or 0, depending on an enable bit in the VMD device. |
| 574 | */ |
| 575 | if (features & VMD_FEAT_HAS_MEMBAR_SHADOW) { |
| 576 | u32 vmlock; |
| 577 | int ret; |
| 578 | |
| 579 | membar2_offset = 0x2018; |
| 580 | ret = pci_read_config_dword(vmd->dev, PCI_REG_VMLOCK, &vmlock); |
| 581 | if (ret || vmlock == ~0) |
| 582 | return -ENODEV; |
| 583 | |
| 584 | if (MB2_SHADOW_EN(vmlock)) { |
| 585 | void __iomem *membar2; |
| 586 | |
| 587 | membar2 = pci_iomap(vmd->dev, VMD_MEMBAR2, 0); |
| 588 | if (!membar2) |
| 589 | return -ENOMEM; |
| 590 | offset[0] = vmd->dev->resource[VMD_MEMBAR1].start - |
| 591 | readq(membar2 + 0x2008); |
| 592 | offset[1] = vmd->dev->resource[VMD_MEMBAR2].start - |
| 593 | readq(membar2 + 0x2010); |
| 594 | pci_iounmap(vmd->dev, membar2); |
| 595 | } |
| 596 | } |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 597 | |
Jon Derrick | 2a5a9c9 | 2018-05-18 13:28:00 -0600 | [diff] [blame] | 598 | /* |
| 599 | * Certain VMD devices may have a root port configuration option which |
| 600 | * limits the bus range to between 0-127 or 128-255 |
| 601 | */ |
| 602 | if (features & VMD_FEAT_HAS_BUS_RESTRICTIONS) { |
| 603 | u32 vmcap, vmconfig; |
| 604 | |
| 605 | pci_read_config_dword(vmd->dev, PCI_REG_VMCAP, &vmcap); |
| 606 | pci_read_config_dword(vmd->dev, PCI_REG_VMCONFIG, &vmconfig); |
| 607 | if (BUS_RESTRICT_CAP(vmcap) && |
| 608 | (BUS_RESTRICT_CFG(vmconfig) == 0x1)) |
| 609 | busn_start = 128; |
| 610 | } |
| 611 | |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 612 | res = &vmd->dev->resource[VMD_CFGBAR]; |
| 613 | vmd->resources[0] = (struct resource) { |
| 614 | .name = "VMD CFGBAR", |
Jon Derrick | 2a5a9c9 | 2018-05-18 13:28:00 -0600 | [diff] [blame] | 615 | .start = busn_start, |
| 616 | .end = busn_start + (resource_size(res) >> 20) - 1, |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 617 | .flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED, |
| 618 | }; |
| 619 | |
Keith Busch | 83cc54a | 2016-03-02 15:31:03 -0700 | [diff] [blame] | 620 | /* |
| 621 | * If the window is below 4GB, clear IORESOURCE_MEM_64 so we can |
| 622 | * put 32-bit resources in the window. |
| 623 | * |
| 624 | * There's no hardware reason why a 64-bit window *couldn't* |
| 625 | * contain a 32-bit resource, but pbus_size_mem() computes the |
| 626 | * bridge window size assuming a 64-bit window will contain no |
| 627 | * 32-bit resources. __pci_assign_resource() enforces that |
| 628 | * artificial restriction to make sure everything will fit. |
| 629 | * |
| 630 | * The only way we could use a 64-bit non-prefechable MEMBAR is |
| 631 | * if its address is <4GB so that we can convert it to a 32-bit |
| 632 | * resource. To be visible to the host OS, all VMD endpoints must |
| 633 | * be initially configured by platform BIOS, which includes setting |
| 634 | * up these resources. We can assume the device is configured |
| 635 | * according to the platform needs. |
| 636 | */ |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 637 | res = &vmd->dev->resource[VMD_MEMBAR1]; |
| 638 | upper_bits = upper_32_bits(res->end); |
| 639 | flags = res->flags & ~IORESOURCE_SIZEALIGN; |
| 640 | if (!upper_bits) |
| 641 | flags &= ~IORESOURCE_MEM_64; |
| 642 | vmd->resources[1] = (struct resource) { |
| 643 | .name = "VMD MEMBAR1", |
| 644 | .start = res->start, |
| 645 | .end = res->end, |
| 646 | .flags = flags, |
Jon Derrick | 2c2c5c5 | 2016-02-24 10:06:37 -0700 | [diff] [blame] | 647 | .parent = res, |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 648 | }; |
| 649 | |
| 650 | res = &vmd->dev->resource[VMD_MEMBAR2]; |
| 651 | upper_bits = upper_32_bits(res->end); |
| 652 | flags = res->flags & ~IORESOURCE_SIZEALIGN; |
| 653 | if (!upper_bits) |
| 654 | flags &= ~IORESOURCE_MEM_64; |
| 655 | vmd->resources[2] = (struct resource) { |
| 656 | .name = "VMD MEMBAR2", |
Jon Derrick | 6788958 | 2018-05-18 13:27:59 -0600 | [diff] [blame] | 657 | .start = res->start + membar2_offset, |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 658 | .end = res->end, |
| 659 | .flags = flags, |
Jon Derrick | 2c2c5c5 | 2016-02-24 10:06:37 -0700 | [diff] [blame] | 660 | .parent = res, |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 661 | }; |
| 662 | |
Keith Busch | 3161832 | 2016-09-13 09:05:40 -0600 | [diff] [blame] | 663 | sd->vmd_domain = true; |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 664 | sd->domain = vmd_find_free_domain(); |
| 665 | if (sd->domain < 0) |
| 666 | return sd->domain; |
| 667 | |
| 668 | sd->node = pcibus_to_node(vmd->dev->bus); |
| 669 | |
Thomas Gleixner | ae904ca | 2017-06-20 01:37:15 +0200 | [diff] [blame] | 670 | fn = irq_domain_alloc_named_id_fwnode("VMD-MSI", vmd->sysdata.domain); |
| 671 | if (!fn) |
| 672 | return -ENODEV; |
| 673 | |
| 674 | vmd->irq_domain = pci_msi_create_irq_domain(fn, &vmd_msi_domain_info, |
Keith Busch | e382dff | 2016-06-20 09:39:52 -0600 | [diff] [blame] | 675 | x86_vector_domain); |
Thomas Gleixner | ae904ca | 2017-06-20 01:37:15 +0200 | [diff] [blame] | 676 | irq_domain_free_fwnode(fn); |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 677 | if (!vmd->irq_domain) |
| 678 | return -ENODEV; |
| 679 | |
| 680 | pci_add_resource(&resources, &vmd->resources[0]); |
Jon Derrick | 6788958 | 2018-05-18 13:27:59 -0600 | [diff] [blame] | 681 | pci_add_resource_offset(&resources, &vmd->resources[1], offset[0]); |
| 682 | pci_add_resource_offset(&resources, &vmd->resources[2], offset[1]); |
| 683 | |
Jon Derrick | 2a5a9c9 | 2018-05-18 13:28:00 -0600 | [diff] [blame] | 684 | vmd->bus = pci_create_root_bus(&vmd->dev->dev, busn_start, &vmd_ops, |
| 685 | sd, &resources); |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 686 | if (!vmd->bus) { |
| 687 | pci_free_resource_list(&resources); |
| 688 | irq_domain_remove(vmd->irq_domain); |
| 689 | return -ENODEV; |
| 690 | } |
| 691 | |
Jon Derrick | 2c2c5c5 | 2016-02-24 10:06:37 -0700 | [diff] [blame] | 692 | vmd_attach_resources(vmd); |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 693 | vmd_setup_dma_ops(vmd); |
| 694 | dev_set_msi_domain(&vmd->bus->dev, vmd->irq_domain); |
Jon Derrick | 0294951 | 2018-12-13 09:23:42 -0700 | [diff] [blame] | 695 | |
| 696 | pci_scan_child_bus(vmd->bus); |
| 697 | pci_assign_unassigned_bus_resources(vmd->bus); |
| 698 | |
| 699 | /* |
| 700 | * VMD root buses are virtual and don't return true on pci_is_pcie() |
| 701 | * and will fail pcie_bus_configure_settings() early. It can instead be |
| 702 | * run on each of the real root ports. |
| 703 | */ |
| 704 | list_for_each_entry(child, &vmd->bus->children, node) |
| 705 | pcie_bus_configure_settings(child); |
| 706 | |
| 707 | pci_bus_add_devices(vmd->bus); |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 708 | |
| 709 | WARN(sysfs_create_link(&vmd->dev->dev.kobj, &vmd->bus->dev.kobj, |
| 710 | "domain"), "Can't create symlink to domain\n"); |
| 711 | return 0; |
| 712 | } |
| 713 | |
| 714 | static irqreturn_t vmd_irq(int irq, void *data) |
| 715 | { |
| 716 | struct vmd_irq_list *irqs = data; |
| 717 | struct vmd_irq *vmdirq; |
Jon Derrick | 3906b91 | 2016-11-11 16:08:45 -0700 | [diff] [blame] | 718 | int idx; |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 719 | |
Jon Derrick | 3906b91 | 2016-11-11 16:08:45 -0700 | [diff] [blame] | 720 | idx = srcu_read_lock(&irqs->srcu); |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 721 | list_for_each_entry_rcu(vmdirq, &irqs->irq_list, node) |
| 722 | generic_handle_irq(vmdirq->virq); |
Jon Derrick | 3906b91 | 2016-11-11 16:08:45 -0700 | [diff] [blame] | 723 | srcu_read_unlock(&irqs->srcu, idx); |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 724 | |
| 725 | return IRQ_HANDLED; |
| 726 | } |
| 727 | |
| 728 | static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id) |
| 729 | { |
| 730 | struct vmd_dev *vmd; |
| 731 | int i, err; |
| 732 | |
| 733 | if (resource_size(&dev->resource[VMD_CFGBAR]) < (1 << 20)) |
| 734 | return -ENOMEM; |
| 735 | |
| 736 | vmd = devm_kzalloc(&dev->dev, sizeof(*vmd), GFP_KERNEL); |
| 737 | if (!vmd) |
| 738 | return -ENOMEM; |
| 739 | |
| 740 | vmd->dev = dev; |
| 741 | err = pcim_enable_device(dev); |
| 742 | if (err < 0) |
| 743 | return err; |
| 744 | |
| 745 | vmd->cfgbar = pcim_iomap(dev, VMD_CFGBAR, 0); |
| 746 | if (!vmd->cfgbar) |
| 747 | return -ENOMEM; |
| 748 | |
| 749 | pci_set_master(dev); |
| 750 | if (dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(64)) && |
| 751 | dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32))) |
| 752 | return -ENODEV; |
| 753 | |
| 754 | vmd->msix_count = pci_msix_vec_count(dev); |
| 755 | if (vmd->msix_count < 0) |
| 756 | return -ENODEV; |
| 757 | |
Keith Busch | 46a6561 | 2017-08-30 12:15:04 -0400 | [diff] [blame] | 758 | vmd->msix_count = pci_alloc_irq_vectors(dev, 1, vmd->msix_count, |
| 759 | PCI_IRQ_MSIX); |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 760 | if (vmd->msix_count < 0) |
| 761 | return vmd->msix_count; |
| 762 | |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 763 | vmd->irqs = devm_kcalloc(&dev->dev, vmd->msix_count, sizeof(*vmd->irqs), |
| 764 | GFP_KERNEL); |
| 765 | if (!vmd->irqs) |
| 766 | return -ENOMEM; |
| 767 | |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 768 | for (i = 0; i < vmd->msix_count; i++) { |
Jon Derrick | 3906b91 | 2016-11-11 16:08:45 -0700 | [diff] [blame] | 769 | err = init_srcu_struct(&vmd->irqs[i].srcu); |
| 770 | if (err) |
| 771 | return err; |
| 772 | |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 773 | INIT_LIST_HEAD(&vmd->irqs[i].irq_list); |
Jon Derrick | 53db86a | 2016-09-02 11:53:04 -0600 | [diff] [blame] | 774 | err = devm_request_irq(&dev->dev, pci_irq_vector(dev, i), |
Jisheng Zhang | 3eefa79 | 2017-04-20 18:27:18 +0800 | [diff] [blame] | 775 | vmd_irq, IRQF_NO_THREAD, |
| 776 | "vmd", &vmd->irqs[i]); |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 777 | if (err) |
| 778 | return err; |
| 779 | } |
| 780 | |
| 781 | spin_lock_init(&vmd->cfg_lock); |
| 782 | pci_set_drvdata(dev, vmd); |
Jon Derrick | 6788958 | 2018-05-18 13:27:59 -0600 | [diff] [blame] | 783 | err = vmd_enable_domain(vmd, (unsigned long) id->driver_data); |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 784 | if (err) |
| 785 | return err; |
| 786 | |
| 787 | dev_info(&vmd->dev->dev, "Bound to PCI domain %04x\n", |
| 788 | vmd->sysdata.domain); |
| 789 | return 0; |
| 790 | } |
| 791 | |
Jon Derrick | 3906b91 | 2016-11-11 16:08:45 -0700 | [diff] [blame] | 792 | static void vmd_cleanup_srcu(struct vmd_dev *vmd) |
| 793 | { |
| 794 | int i; |
| 795 | |
| 796 | for (i = 0; i < vmd->msix_count; i++) |
| 797 | cleanup_srcu_struct(&vmd->irqs[i].srcu); |
| 798 | } |
| 799 | |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 800 | static void vmd_remove(struct pci_dev *dev) |
| 801 | { |
| 802 | struct vmd_dev *vmd = pci_get_drvdata(dev); |
| 803 | |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 804 | sysfs_remove_link(&vmd->dev->dev.kobj, "domain"); |
| 805 | pci_stop_root_bus(vmd->bus); |
| 806 | pci_remove_root_bus(vmd->bus); |
Jon Derrick | 0cb259c4 | 2017-06-22 09:15:42 -0600 | [diff] [blame] | 807 | vmd_cleanup_srcu(vmd); |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 808 | vmd_teardown_dma_ops(vmd); |
Jon Derrick | dc8af3a | 2018-10-15 18:48:07 -0600 | [diff] [blame] | 809 | vmd_detach_resources(vmd); |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 810 | irq_domain_remove(vmd->irq_domain); |
| 811 | } |
| 812 | |
Borislav Petkov | 42db500 | 2016-11-26 19:29:57 +0100 | [diff] [blame] | 813 | #ifdef CONFIG_PM_SLEEP |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 814 | static int vmd_suspend(struct device *dev) |
| 815 | { |
| 816 | struct pci_dev *pdev = to_pci_dev(dev); |
Scott Bauer | e2b1820 | 2017-08-11 14:54:32 -0600 | [diff] [blame] | 817 | struct vmd_dev *vmd = pci_get_drvdata(pdev); |
| 818 | int i; |
| 819 | |
| 820 | for (i = 0; i < vmd->msix_count; i++) |
| 821 | devm_free_irq(dev, pci_irq_vector(pdev, i), &vmd->irqs[i]); |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 822 | |
| 823 | pci_save_state(pdev); |
| 824 | return 0; |
| 825 | } |
| 826 | |
| 827 | static int vmd_resume(struct device *dev) |
| 828 | { |
| 829 | struct pci_dev *pdev = to_pci_dev(dev); |
Scott Bauer | e2b1820 | 2017-08-11 14:54:32 -0600 | [diff] [blame] | 830 | struct vmd_dev *vmd = pci_get_drvdata(pdev); |
| 831 | int err, i; |
| 832 | |
| 833 | for (i = 0; i < vmd->msix_count; i++) { |
| 834 | err = devm_request_irq(dev, pci_irq_vector(pdev, i), |
| 835 | vmd_irq, IRQF_NO_THREAD, |
| 836 | "vmd", &vmd->irqs[i]); |
| 837 | if (err) |
| 838 | return err; |
| 839 | } |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 840 | |
| 841 | pci_restore_state(pdev); |
| 842 | return 0; |
| 843 | } |
| 844 | #endif |
| 845 | static SIMPLE_DEV_PM_OPS(vmd_dev_pm_ops, vmd_suspend, vmd_resume); |
| 846 | |
| 847 | static const struct pci_device_id vmd_ids[] = { |
Jon Derrick | 8b22f3d | 2018-05-18 13:27:58 -0600 | [diff] [blame] | 848 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_201D),}, |
Jon Derrick | d260d34 | 2018-05-18 13:28:02 -0600 | [diff] [blame] | 849 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_28C0), |
| 850 | .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW | |
| 851 | VMD_FEAT_HAS_BUS_RESTRICTIONS,}, |
Keith Busch | 185a383 | 2016-01-12 13:18:10 -0700 | [diff] [blame] | 852 | {0,} |
| 853 | }; |
| 854 | MODULE_DEVICE_TABLE(pci, vmd_ids); |
| 855 | |
| 856 | static struct pci_driver vmd_drv = { |
| 857 | .name = "vmd", |
| 858 | .id_table = vmd_ids, |
| 859 | .probe = vmd_probe, |
| 860 | .remove = vmd_remove, |
| 861 | .driver = { |
| 862 | .pm = &vmd_dev_pm_ops, |
| 863 | }, |
| 864 | }; |
| 865 | module_pci_driver(vmd_drv); |
| 866 | |
| 867 | MODULE_AUTHOR("Intel Corporation"); |
| 868 | MODULE_LICENSE("GPL v2"); |
| 869 | MODULE_VERSION("0.6"); |