blob: 03b5434f4d5b59c82b5244bb32e3c3b8c8800a14 [file] [log] [blame]
Alex Williamson89e1f7d2012-07-31 08:16:24 -06001/*
2 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
3 * Author: Alex Williamson <alex.williamson@redhat.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * Derived from original vfio:
10 * Copyright 2010 Cisco Systems, Inc. All rights reserved.
11 * Author: Tom Lyon, pugs@cisco.com
12 */
13
Alex Williamson80c7e8c2015-04-07 11:14:43 -060014#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
Alex Williamson89e1f7d2012-07-31 08:16:24 -060016#include <linux/device.h>
17#include <linux/eventfd.h>
Alex Williamson8b27ee62013-09-04 11:28:04 -060018#include <linux/file.h>
Alex Williamson89e1f7d2012-07-31 08:16:24 -060019#include <linux/interrupt.h>
20#include <linux/iommu.h>
21#include <linux/module.h>
22#include <linux/mutex.h>
23#include <linux/notifier.h>
24#include <linux/pci.h>
25#include <linux/pm_runtime.h>
26#include <linux/slab.h>
27#include <linux/types.h>
28#include <linux/uaccess.h>
29#include <linux/vfio.h>
Alex Williamsonecaa1f62015-04-07 11:14:41 -060030#include <linux/vgaarb.h>
Alex Williamson89e1f7d2012-07-31 08:16:24 -060031
32#include "vfio_pci_private.h"
33
34#define DRIVER_VERSION "0.2"
35#define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
36#define DRIVER_DESC "VFIO PCI - User Level meta-driver"
37
Alex Williamson80c7e8c2015-04-07 11:14:43 -060038static char ids[1024] __initdata;
39module_param_string(ids, ids, sizeof(ids), 0);
40MODULE_PARM_DESC(ids, "Initial PCI IDs to add to the vfio driver, format is \"vendor:device[:subvendor[:subdevice[:class[:class_mask]]]]\" and multiple comma separated entries can be specified");
41
Alex Williamson89e1f7d2012-07-31 08:16:24 -060042static bool nointxmask;
43module_param_named(nointxmask, nointxmask, bool, S_IRUGO | S_IWUSR);
44MODULE_PARM_DESC(nointxmask,
45 "Disable support for PCI 2.3 style INTx masking. If this resolves problems for specific devices, report lspci -vvvxxx to linux-pci@vger.kernel.org so the device can be fixed automatically via the broken_intx_masking flag.");
46
Alex Williamson88c0dead2015-04-07 11:14:40 -060047#ifdef CONFIG_VFIO_PCI_VGA
48static bool disable_vga;
49module_param(disable_vga, bool, S_IRUGO);
50MODULE_PARM_DESC(disable_vga, "Disable VGA resource access through vfio-pci");
51#endif
52
Alex Williamson6eb70182015-04-07 11:14:46 -060053static bool disable_idle_d3;
54module_param(disable_idle_d3, bool, S_IRUGO | S_IWUSR);
55MODULE_PARM_DESC(disable_idle_d3,
56 "Disable using the PCI D3 low power state for idle, unused devices");
57
Alex Williamson61d79252014-08-07 11:12:04 -060058static DEFINE_MUTEX(driver_lock);
59
Alex Williamson88c0dead2015-04-07 11:14:40 -060060static inline bool vfio_vga_disabled(void)
61{
62#ifdef CONFIG_VFIO_PCI_VGA
63 return disable_vga;
64#else
65 return true;
66#endif
67}
68
Alex Williamsonecaa1f62015-04-07 11:14:41 -060069/*
70 * Our VGA arbiter participation is limited since we don't know anything
71 * about the device itself. However, if the device is the only VGA device
72 * downstream of a bridge and VFIO VGA support is disabled, then we can
73 * safely return legacy VGA IO and memory as not decoded since the user
74 * has no way to get to it and routing can be disabled externally at the
75 * bridge.
76 */
77static unsigned int vfio_pci_set_vga_decode(void *opaque, bool single_vga)
78{
79 struct vfio_pci_device *vdev = opaque;
80 struct pci_dev *tmp = NULL, *pdev = vdev->pdev;
81 unsigned char max_busnr;
82 unsigned int decodes;
83
84 if (single_vga || !vfio_vga_disabled() || pci_is_root_bus(pdev->bus))
85 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM |
86 VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
87
88 max_busnr = pci_bus_max_busnr(pdev->bus);
89 decodes = VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
90
91 while ((tmp = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, tmp)) != NULL) {
92 if (tmp == pdev ||
93 pci_domain_nr(tmp->bus) != pci_domain_nr(pdev->bus) ||
94 pci_is_root_bus(tmp->bus))
95 continue;
96
97 if (tmp->bus->number >= pdev->bus->number &&
98 tmp->bus->number <= max_busnr) {
99 pci_dev_put(tmp);
100 decodes |= VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
101 break;
102 }
103 }
104
105 return decodes;
106}
107
108static inline bool vfio_pci_is_vga(struct pci_dev *pdev)
109{
110 return (pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA;
111}
112
Yongji Xie05f0c032016-06-30 15:21:24 +0800113static void vfio_pci_probe_mmaps(struct vfio_pci_device *vdev)
114{
115 struct resource *res;
116 int bar;
117 struct vfio_pci_dummy_resource *dummy_res;
118
119 INIT_LIST_HEAD(&vdev->dummy_resources_list);
120
121 for (bar = PCI_STD_RESOURCES; bar <= PCI_STD_RESOURCE_END; bar++) {
122 res = vdev->pdev->resource + bar;
123
124 if (!IS_ENABLED(CONFIG_VFIO_PCI_MMAP))
125 goto no_mmap;
126
127 if (!(res->flags & IORESOURCE_MEM))
128 goto no_mmap;
129
130 /*
131 * The PCI core shouldn't set up a resource with a
132 * type but zero size. But there may be bugs that
133 * cause us to do that.
134 */
135 if (!resource_size(res))
136 goto no_mmap;
137
138 if (resource_size(res) >= PAGE_SIZE) {
139 vdev->bar_mmap_supported[bar] = true;
140 continue;
141 }
142
143 if (!(res->start & ~PAGE_MASK)) {
144 /*
145 * Add a dummy resource to reserve the remainder
146 * of the exclusive page in case that hot-add
147 * device's bar is assigned into it.
148 */
149 dummy_res = kzalloc(sizeof(*dummy_res), GFP_KERNEL);
150 if (dummy_res == NULL)
151 goto no_mmap;
152
153 dummy_res->resource.name = "vfio sub-page reserved";
154 dummy_res->resource.start = res->end + 1;
155 dummy_res->resource.end = res->start + PAGE_SIZE - 1;
156 dummy_res->resource.flags = res->flags;
157 if (request_resource(res->parent,
158 &dummy_res->resource)) {
159 kfree(dummy_res);
160 goto no_mmap;
161 }
162 dummy_res->index = bar;
163 list_add(&dummy_res->res_next,
164 &vdev->dummy_resources_list);
165 vdev->bar_mmap_supported[bar] = true;
166 continue;
167 }
168 /*
169 * Here we don't handle the case when the BAR is not page
170 * aligned because we can't expect the BAR will be
171 * assigned into the same location in a page in guest
172 * when we passthrough the BAR. And it's hard to access
173 * this BAR in userspace because we have no way to get
174 * the BAR's location in a page.
175 */
176no_mmap:
177 vdev->bar_mmap_supported[bar] = false;
178 }
179}
180
Alex Williamsonbc4fba72014-08-07 11:12:07 -0600181static void vfio_pci_try_bus_reset(struct vfio_pci_device *vdev);
Alex Williamsonf572a962016-02-22 16:02:45 -0700182static void vfio_pci_disable(struct vfio_pci_device *vdev);
Alex Williamsonbc4fba72014-08-07 11:12:07 -0600183
Alex Williamson450744052016-03-24 13:05:18 -0600184/*
185 * INTx masking requires the ability to disable INTx signaling via PCI_COMMAND
186 * _and_ the ability detect when the device is asserting INTx via PCI_STATUS.
187 * If a device implements the former but not the latter we would typically
188 * expect broken_intx_masking be set and require an exclusive interrupt.
189 * However since we do have control of the device's ability to assert INTx,
190 * we can instead pretend that the device does not implement INTx, virtualizing
191 * the pin register to report zero and maintaining DisINTx set on the host.
192 */
193static bool vfio_pci_nointx(struct pci_dev *pdev)
194{
195 switch (pdev->vendor) {
196 case PCI_VENDOR_ID_INTEL:
197 switch (pdev->device) {
198 /* All i40e (XL710/X710) 10/20/40GbE NICs */
199 case 0x1572:
200 case 0x1574:
201 case 0x1580 ... 0x1581:
202 case 0x1583 ... 0x1589:
203 case 0x37d0 ... 0x37d2:
204 return true;
205 default:
206 return false;
207 }
208 }
209
210 return false;
211}
212
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600213static int vfio_pci_enable(struct vfio_pci_device *vdev)
214{
215 struct pci_dev *pdev = vdev->pdev;
216 int ret;
217 u16 cmd;
218 u8 msix_pos;
219
Alex Williamson6eb70182015-04-07 11:14:46 -0600220 pci_set_power_state(pdev, PCI_D0);
221
Alex Williamson9c22e662014-08-07 11:12:02 -0600222 /* Don't allow our initial saved state to include busmaster */
223 pci_clear_master(pdev);
224
Alex Williamson9a92c502012-12-07 13:43:51 -0700225 ret = pci_enable_device(pdev);
226 if (ret)
227 return ret;
228
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600229 vdev->reset_works = (pci_reset_function(pdev) == 0);
230 pci_save_state(pdev);
231 vdev->pci_saved_state = pci_store_saved_state(pdev);
232 if (!vdev->pci_saved_state)
233 pr_debug("%s: Couldn't store %s saved state\n",
234 __func__, dev_name(&pdev->dev));
235
Alex Williamson450744052016-03-24 13:05:18 -0600236 if (likely(!nointxmask)) {
237 if (vfio_pci_nointx(pdev)) {
238 dev_info(&pdev->dev, "Masking broken INTx support\n");
239 vdev->nointx = true;
240 pci_intx(pdev, 0);
241 } else
242 vdev->pci_2_3 = pci_intx_mask_supported(pdev);
243 }
244
245 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
246 if (vdev->pci_2_3 && (cmd & PCI_COMMAND_INTX_DISABLE)) {
247 cmd &= ~PCI_COMMAND_INTX_DISABLE;
248 pci_write_config_word(pdev, PCI_COMMAND, cmd);
249 }
250
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600251 ret = vfio_config_init(vdev);
Alex Williamson9a92c502012-12-07 13:43:51 -0700252 if (ret) {
Alex Williamsoneb5685f2014-05-30 11:35:53 -0600253 kfree(vdev->pci_saved_state);
254 vdev->pci_saved_state = NULL;
Alex Williamson9a92c502012-12-07 13:43:51 -0700255 pci_disable_device(pdev);
256 return ret;
257 }
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600258
Bjorn Helgaasa9047f22013-04-18 15:12:58 -0600259 msix_pos = pdev->msix_cap;
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600260 if (msix_pos) {
261 u16 flags;
262 u32 table;
263
264 pci_read_config_word(pdev, msix_pos + PCI_MSIX_FLAGS, &flags);
265 pci_read_config_dword(pdev, msix_pos + PCI_MSIX_TABLE, &table);
266
Bjorn Helgaas508d1aa2013-04-18 12:42:58 -0600267 vdev->msix_bar = table & PCI_MSIX_TABLE_BIR;
268 vdev->msix_offset = table & PCI_MSIX_TABLE_OFFSET;
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600269 vdev->msix_size = ((flags & PCI_MSIX_FLAGS_QSIZE) + 1) * 16;
270 } else
271 vdev->msix_bar = 0xFF;
272
Alex Williamsonecaa1f62015-04-07 11:14:41 -0600273 if (!vfio_vga_disabled() && vfio_pci_is_vga(pdev))
Alex Williamson84237a82013-02-18 10:11:13 -0700274 vdev->has_vga = true;
Alex Williamson84237a82013-02-18 10:11:13 -0700275
Alex Williamson5846ff52016-02-22 16:02:43 -0700276
Alex Williamsonf572a962016-02-22 16:02:45 -0700277 if (vfio_pci_is_vga(pdev) &&
278 pdev->vendor == PCI_VENDOR_ID_INTEL &&
279 IS_ENABLED(CONFIG_VFIO_PCI_IGD)) {
280 ret = vfio_pci_igd_init(vdev);
281 if (ret) {
282 dev_warn(&vdev->pdev->dev,
283 "Failed to setup Intel IGD regions\n");
284 vfio_pci_disable(vdev);
285 return ret;
286 }
Alex Williamson5846ff52016-02-22 16:02:43 -0700287 }
288
Yongji Xie05f0c032016-06-30 15:21:24 +0800289 vfio_pci_probe_mmaps(vdev);
290
Alex Williamson9a92c502012-12-07 13:43:51 -0700291 return 0;
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600292}
293
294static void vfio_pci_disable(struct vfio_pci_device *vdev)
295{
Alex Williamson20077222012-12-07 13:43:50 -0700296 struct pci_dev *pdev = vdev->pdev;
Yongji Xie05f0c032016-06-30 15:21:24 +0800297 struct vfio_pci_dummy_resource *dummy_res, *tmp;
Alex Williamson28541d42016-02-22 16:02:39 -0700298 int i, bar;
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600299
Alex Williamson9c22e662014-08-07 11:12:02 -0600300 /* Stop the device from further DMA */
301 pci_clear_master(pdev);
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600302
303 vfio_pci_set_irqs_ioctl(vdev, VFIO_IRQ_SET_DATA_NONE |
304 VFIO_IRQ_SET_ACTION_TRIGGER,
305 vdev->irq_type, 0, 0, NULL);
306
307 vdev->virq_disabled = false;
308
Alex Williamson28541d42016-02-22 16:02:39 -0700309 for (i = 0; i < vdev->num_regions; i++)
310 vdev->region[i].ops->release(vdev, &vdev->region[i]);
311
312 vdev->num_regions = 0;
313 kfree(vdev->region);
314 vdev->region = NULL; /* don't krealloc a freed pointer */
315
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600316 vfio_config_free(vdev);
317
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600318 for (bar = PCI_STD_RESOURCES; bar <= PCI_STD_RESOURCE_END; bar++) {
319 if (!vdev->barmap[bar])
320 continue;
Alex Williamson20077222012-12-07 13:43:50 -0700321 pci_iounmap(pdev, vdev->barmap[bar]);
322 pci_release_selected_regions(pdev, 1 << bar);
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600323 vdev->barmap[bar] = NULL;
324 }
Alex Williamson20077222012-12-07 13:43:50 -0700325
Yongji Xie05f0c032016-06-30 15:21:24 +0800326 list_for_each_entry_safe(dummy_res, tmp,
327 &vdev->dummy_resources_list, res_next) {
328 list_del(&dummy_res->res_next);
329 release_resource(&dummy_res->resource);
330 kfree(dummy_res);
331 }
332
Alex Williamsonbc4fba72014-08-07 11:12:07 -0600333 vdev->needs_reset = true;
334
Alex Williamson20077222012-12-07 13:43:50 -0700335 /*
336 * If we have saved state, restore it. If we can reset the device,
337 * even better. Resetting with current state seems better than
338 * nothing, but saving and restoring current state without reset
339 * is just busy work.
340 */
341 if (pci_load_and_free_saved_state(pdev, &vdev->pci_saved_state)) {
342 pr_info("%s: Couldn't reload %s saved state\n",
343 __func__, dev_name(&pdev->dev));
344
345 if (!vdev->reset_works)
Alex Williamson9c22e662014-08-07 11:12:02 -0600346 goto out;
Alex Williamson20077222012-12-07 13:43:50 -0700347
348 pci_save_state(pdev);
349 }
350
351 /*
352 * Disable INTx and MSI, presumably to avoid spurious interrupts
353 * during reset. Stolen from pci_reset_function()
354 */
355 pci_write_config_word(pdev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
356
Alex Williamsond24cdbf2013-06-10 16:40:57 -0600357 /*
Alex Williamson890ed572014-01-14 20:45:09 -0700358 * Try to reset the device. The success of this is dependent on
359 * being able to lock the device, which is not always possible.
Alex Williamsond24cdbf2013-06-10 16:40:57 -0600360 */
Alex Williamson561d72d2015-04-07 11:14:44 -0600361 if (vdev->reset_works && !pci_try_reset_function(pdev))
362 vdev->needs_reset = false;
Alex Williamson20077222012-12-07 13:43:50 -0700363
364 pci_restore_state(pdev);
Alex Williamson9c22e662014-08-07 11:12:02 -0600365out:
366 pci_disable_device(pdev);
Alex Williamsonbc4fba72014-08-07 11:12:07 -0600367
368 vfio_pci_try_bus_reset(vdev);
Alex Williamson6eb70182015-04-07 11:14:46 -0600369
370 if (!disable_idle_d3)
371 pci_set_power_state(pdev, PCI_D3hot);
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600372}
373
374static void vfio_pci_release(void *device_data)
375{
376 struct vfio_pci_device *vdev = device_data;
377
Alex Williamson61d79252014-08-07 11:12:04 -0600378 mutex_lock(&driver_lock);
379
380 if (!(--vdev->refcnt)) {
Gavin Shan1b69be52014-06-10 11:41:57 +1000381 vfio_spapr_pci_eeh_release(vdev->pdev);
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600382 vfio_pci_disable(vdev);
Gavin Shan1b69be52014-06-10 11:41:57 +1000383 }
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600384
Alex Williamson61d79252014-08-07 11:12:04 -0600385 mutex_unlock(&driver_lock);
386
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600387 module_put(THIS_MODULE);
388}
389
390static int vfio_pci_open(void *device_data)
391{
392 struct vfio_pci_device *vdev = device_data;
Alex Williamson61d79252014-08-07 11:12:04 -0600393 int ret = 0;
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600394
395 if (!try_module_get(THIS_MODULE))
396 return -ENODEV;
397
Alex Williamson61d79252014-08-07 11:12:04 -0600398 mutex_lock(&driver_lock);
399
400 if (!vdev->refcnt) {
Gavin Shan1b69be52014-06-10 11:41:57 +1000401 ret = vfio_pci_enable(vdev);
402 if (ret)
403 goto error;
404
Alexey Kardashevskiy9b936c92014-08-08 10:39:16 -0600405 vfio_spapr_pci_eeh_open(vdev->pdev);
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600406 }
Alex Williamson61d79252014-08-07 11:12:04 -0600407 vdev->refcnt++;
Gavin Shan1b69be52014-06-10 11:41:57 +1000408error:
Alex Williamson61d79252014-08-07 11:12:04 -0600409 mutex_unlock(&driver_lock);
410 if (ret)
411 module_put(THIS_MODULE);
Gavin Shan1b69be52014-06-10 11:41:57 +1000412 return ret;
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600413}
414
415static int vfio_pci_get_irq_count(struct vfio_pci_device *vdev, int irq_type)
416{
417 if (irq_type == VFIO_PCI_INTX_IRQ_INDEX) {
418 u8 pin;
419 pci_read_config_byte(vdev->pdev, PCI_INTERRUPT_PIN, &pin);
Alex Williamson450744052016-03-24 13:05:18 -0600420 if (IS_ENABLED(CONFIG_VFIO_PCI_INTX) && !vdev->nointx && pin)
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600421 return 1;
422
423 } else if (irq_type == VFIO_PCI_MSI_IRQ_INDEX) {
424 u8 pos;
425 u16 flags;
426
Bjorn Helgaasa9047f22013-04-18 15:12:58 -0600427 pos = vdev->pdev->msi_cap;
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600428 if (pos) {
429 pci_read_config_word(vdev->pdev,
430 pos + PCI_MSI_FLAGS, &flags);
Gavin Shanfd49c81f2014-05-30 11:35:54 -0600431 return 1 << ((flags & PCI_MSI_FLAGS_QMASK) >> 1);
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600432 }
433 } else if (irq_type == VFIO_PCI_MSIX_IRQ_INDEX) {
434 u8 pos;
435 u16 flags;
436
Bjorn Helgaasa9047f22013-04-18 15:12:58 -0600437 pos = vdev->pdev->msix_cap;
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600438 if (pos) {
439 pci_read_config_word(vdev->pdev,
440 pos + PCI_MSIX_FLAGS, &flags);
441
442 return (flags & PCI_MSIX_FLAGS_QSIZE) + 1;
443 }
Alex Williamson6140a8f2015-02-06 15:05:08 -0700444 } else if (irq_type == VFIO_PCI_ERR_IRQ_INDEX) {
Vijay Mohan Pandarathildad9f892013-03-11 09:31:22 -0600445 if (pci_is_pcie(vdev->pdev))
446 return 1;
Alex Williamson6140a8f2015-02-06 15:05:08 -0700447 } else if (irq_type == VFIO_PCI_REQ_IRQ_INDEX) {
448 return 1;
449 }
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600450
451 return 0;
452}
453
Alex Williamson8b27ee62013-09-04 11:28:04 -0600454static int vfio_pci_count_devs(struct pci_dev *pdev, void *data)
455{
456 (*(int *)data)++;
457 return 0;
458}
459
460struct vfio_pci_fill_info {
461 int max;
462 int cur;
463 struct vfio_pci_dependent_device *devices;
464};
465
466static int vfio_pci_fill_devs(struct pci_dev *pdev, void *data)
467{
468 struct vfio_pci_fill_info *fill = data;
469 struct iommu_group *iommu_group;
470
471 if (fill->cur == fill->max)
472 return -EAGAIN; /* Something changed, try again */
473
474 iommu_group = iommu_group_get(&pdev->dev);
475 if (!iommu_group)
476 return -EPERM; /* Cannot reset non-isolated devices */
477
478 fill->devices[fill->cur].group_id = iommu_group_id(iommu_group);
479 fill->devices[fill->cur].segment = pci_domain_nr(pdev->bus);
480 fill->devices[fill->cur].bus = pdev->bus->number;
481 fill->devices[fill->cur].devfn = pdev->devfn;
482 fill->cur++;
483 iommu_group_put(iommu_group);
484 return 0;
485}
486
487struct vfio_pci_group_entry {
488 struct vfio_group *group;
489 int id;
490};
491
492struct vfio_pci_group_info {
493 int count;
494 struct vfio_pci_group_entry *groups;
495};
496
497static int vfio_pci_validate_devs(struct pci_dev *pdev, void *data)
498{
499 struct vfio_pci_group_info *info = data;
500 struct iommu_group *group;
501 int id, i;
502
503 group = iommu_group_get(&pdev->dev);
504 if (!group)
505 return -EPERM;
506
507 id = iommu_group_id(group);
508
509 for (i = 0; i < info->count; i++)
510 if (info->groups[i].id == id)
511 break;
512
513 iommu_group_put(group);
514
515 return (i == info->count) ? -EINVAL : 0;
516}
517
518static bool vfio_pci_dev_below_slot(struct pci_dev *pdev, struct pci_slot *slot)
519{
520 for (; pdev; pdev = pdev->bus->self)
521 if (pdev->bus == slot->bus)
522 return (pdev->slot == slot);
523 return false;
524}
525
526struct vfio_pci_walk_info {
527 int (*fn)(struct pci_dev *, void *data);
528 void *data;
529 struct pci_dev *pdev;
530 bool slot;
531 int ret;
532};
533
534static int vfio_pci_walk_wrapper(struct pci_dev *pdev, void *data)
535{
536 struct vfio_pci_walk_info *walk = data;
537
538 if (!walk->slot || vfio_pci_dev_below_slot(pdev, walk->pdev->slot))
539 walk->ret = walk->fn(pdev, walk->data);
540
541 return walk->ret;
542}
543
544static int vfio_pci_for_each_slot_or_bus(struct pci_dev *pdev,
545 int (*fn)(struct pci_dev *,
546 void *data), void *data,
547 bool slot)
548{
549 struct vfio_pci_walk_info walk = {
550 .fn = fn, .data = data, .pdev = pdev, .slot = slot, .ret = 0,
551 };
552
553 pci_walk_bus(pdev->bus, vfio_pci_walk_wrapper, &walk);
554
555 return walk.ret;
556}
557
Alex Williamson188ad9d2016-02-22 16:02:36 -0700558static int msix_sparse_mmap_cap(struct vfio_pci_device *vdev,
559 struct vfio_info_cap *caps)
560{
Alex Williamson188ad9d2016-02-22 16:02:36 -0700561 struct vfio_region_info_cap_sparse_mmap *sparse;
562 size_t end, size;
Kirti Wankhedec535d342016-11-17 02:16:26 +0530563 int nr_areas = 2, i = 0, ret;
Alex Williamson188ad9d2016-02-22 16:02:36 -0700564
565 end = pci_resource_len(vdev->pdev, vdev->msix_bar);
566
567 /* If MSI-X table is aligned to the start or end, only one area */
568 if (((vdev->msix_offset & PAGE_MASK) == 0) ||
569 (PAGE_ALIGN(vdev->msix_offset + vdev->msix_size) >= end))
570 nr_areas = 1;
571
572 size = sizeof(*sparse) + (nr_areas * sizeof(*sparse->areas));
573
Kirti Wankhedec535d342016-11-17 02:16:26 +0530574 sparse = kzalloc(size, GFP_KERNEL);
575 if (!sparse)
576 return -ENOMEM;
Alex Williamson188ad9d2016-02-22 16:02:36 -0700577
Alex Williamson188ad9d2016-02-22 16:02:36 -0700578 sparse->nr_areas = nr_areas;
579
580 if (vdev->msix_offset & PAGE_MASK) {
581 sparse->areas[i].offset = 0;
582 sparse->areas[i].size = vdev->msix_offset & PAGE_MASK;
583 i++;
584 }
585
586 if (PAGE_ALIGN(vdev->msix_offset + vdev->msix_size) < end) {
587 sparse->areas[i].offset = PAGE_ALIGN(vdev->msix_offset +
588 vdev->msix_size);
589 sparse->areas[i].size = end - sparse->areas[i].offset;
590 i++;
591 }
592
Kirti Wankhedec535d342016-11-17 02:16:26 +0530593 ret = vfio_info_add_capability(caps, VFIO_REGION_INFO_CAP_SPARSE_MMAP,
594 sparse);
595 kfree(sparse);
Alex Williamson188ad9d2016-02-22 16:02:36 -0700596
Kirti Wankhedec535d342016-11-17 02:16:26 +0530597 return ret;
Alex Williamson28541d42016-02-22 16:02:39 -0700598}
599
600int vfio_pci_register_dev_region(struct vfio_pci_device *vdev,
601 unsigned int type, unsigned int subtype,
602 const struct vfio_pci_regops *ops,
603 size_t size, u32 flags, void *data)
604{
605 struct vfio_pci_region *region;
606
607 region = krealloc(vdev->region,
608 (vdev->num_regions + 1) * sizeof(*region),
609 GFP_KERNEL);
610 if (!region)
611 return -ENOMEM;
612
613 vdev->region = region;
614 vdev->region[vdev->num_regions].type = type;
615 vdev->region[vdev->num_regions].subtype = subtype;
616 vdev->region[vdev->num_regions].ops = ops;
617 vdev->region[vdev->num_regions].size = size;
618 vdev->region[vdev->num_regions].flags = flags;
619 vdev->region[vdev->num_regions].data = data;
620
621 vdev->num_regions++;
622
623 return 0;
624}
625
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600626static long vfio_pci_ioctl(void *device_data,
627 unsigned int cmd, unsigned long arg)
628{
629 struct vfio_pci_device *vdev = device_data;
630 unsigned long minsz;
631
632 if (cmd == VFIO_DEVICE_GET_INFO) {
633 struct vfio_device_info info;
634
635 minsz = offsetofend(struct vfio_device_info, num_irqs);
636
637 if (copy_from_user(&info, (void __user *)arg, minsz))
638 return -EFAULT;
639
640 if (info.argsz < minsz)
641 return -EINVAL;
642
643 info.flags = VFIO_DEVICE_FLAGS_PCI;
644
645 if (vdev->reset_works)
646 info.flags |= VFIO_DEVICE_FLAGS_RESET;
647
Alex Williamson28541d42016-02-22 16:02:39 -0700648 info.num_regions = VFIO_PCI_NUM_REGIONS + vdev->num_regions;
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600649 info.num_irqs = VFIO_PCI_NUM_IRQS;
650
Michael S. Tsirkin8160c4e2016-02-28 16:31:39 +0200651 return copy_to_user((void __user *)arg, &info, minsz) ?
652 -EFAULT : 0;
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600653
654 } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
655 struct pci_dev *pdev = vdev->pdev;
656 struct vfio_region_info info;
Alex Williamson188ad9d2016-02-22 16:02:36 -0700657 struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
Alex Williamson28541d42016-02-22 16:02:39 -0700658 int i, ret;
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600659
660 minsz = offsetofend(struct vfio_region_info, offset);
661
662 if (copy_from_user(&info, (void __user *)arg, minsz))
663 return -EFAULT;
664
665 if (info.argsz < minsz)
666 return -EINVAL;
667
668 switch (info.index) {
669 case VFIO_PCI_CONFIG_REGION_INDEX:
670 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
671 info.size = pdev->cfg_size;
672 info.flags = VFIO_REGION_INFO_FLAG_READ |
673 VFIO_REGION_INFO_FLAG_WRITE;
674 break;
675 case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
676 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
677 info.size = pci_resource_len(pdev, info.index);
678 if (!info.size) {
679 info.flags = 0;
680 break;
681 }
682
683 info.flags = VFIO_REGION_INFO_FLAG_READ |
684 VFIO_REGION_INFO_FLAG_WRITE;
Yongji Xie05f0c032016-06-30 15:21:24 +0800685 if (vdev->bar_mmap_supported[info.index]) {
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600686 info.flags |= VFIO_REGION_INFO_FLAG_MMAP;
Alex Williamson188ad9d2016-02-22 16:02:36 -0700687 if (info.index == vdev->msix_bar) {
688 ret = msix_sparse_mmap_cap(vdev, &caps);
689 if (ret)
690 return ret;
691 }
692 }
693
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600694 break;
695 case VFIO_PCI_ROM_REGION_INDEX:
696 {
697 void __iomem *io;
698 size_t size;
699
700 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
701 info.flags = 0;
702
703 /* Report the BAR size, not the ROM size */
704 info.size = pci_resource_len(pdev, info.index);
Alex Williamsona13b6452016-02-22 16:02:46 -0700705 if (!info.size) {
706 /* Shadow ROMs appear as PCI option ROMs */
707 if (pdev->resource[PCI_ROM_RESOURCE].flags &
708 IORESOURCE_ROM_SHADOW)
709 info.size = 0x20000;
710 else
711 break;
712 }
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600713
714 /* Is it really there? */
715 io = pci_map_rom(pdev, &size);
716 if (!io || !size) {
717 info.size = 0;
718 break;
719 }
720 pci_unmap_rom(pdev, io);
721
722 info.flags = VFIO_REGION_INFO_FLAG_READ;
723 break;
724 }
Alex Williamson84237a82013-02-18 10:11:13 -0700725 case VFIO_PCI_VGA_REGION_INDEX:
726 if (!vdev->has_vga)
727 return -EINVAL;
728
729 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
730 info.size = 0xc0000;
731 info.flags = VFIO_REGION_INFO_FLAG_READ |
732 VFIO_REGION_INFO_FLAG_WRITE;
733
734 break;
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600735 default:
Kirti Wankhedec535d342016-11-17 02:16:26 +0530736 {
737 struct vfio_region_info_cap_type cap_type;
738
Alex Williamson28541d42016-02-22 16:02:39 -0700739 if (info.index >=
740 VFIO_PCI_NUM_REGIONS + vdev->num_regions)
741 return -EINVAL;
742
743 i = info.index - VFIO_PCI_NUM_REGIONS;
744
745 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
746 info.size = vdev->region[i].size;
747 info.flags = vdev->region[i].flags;
748
Kirti Wankhedec535d342016-11-17 02:16:26 +0530749 cap_type.type = vdev->region[i].type;
750 cap_type.subtype = vdev->region[i].subtype;
751
752 ret = vfio_info_add_capability(&caps,
753 VFIO_REGION_INFO_CAP_TYPE,
754 &cap_type);
Alex Williamson28541d42016-02-22 16:02:39 -0700755 if (ret)
756 return ret;
Kirti Wankhedec535d342016-11-17 02:16:26 +0530757
758 }
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600759 }
760
Alex Williamson188ad9d2016-02-22 16:02:36 -0700761 if (caps.size) {
762 info.flags |= VFIO_REGION_INFO_FLAG_CAPS;
763 if (info.argsz < sizeof(info) + caps.size) {
764 info.argsz = sizeof(info) + caps.size;
765 info.cap_offset = 0;
766 } else {
767 vfio_info_cap_shift(&caps, sizeof(info));
Dan Carpenterc4aec312016-02-25 10:52:12 +0300768 if (copy_to_user((void __user *)arg +
769 sizeof(info), caps.buf,
770 caps.size)) {
Alex Williamson188ad9d2016-02-22 16:02:36 -0700771 kfree(caps.buf);
Dan Carpenterc4aec312016-02-25 10:52:12 +0300772 return -EFAULT;
Alex Williamson188ad9d2016-02-22 16:02:36 -0700773 }
774 info.cap_offset = sizeof(info);
775 }
776
777 kfree(caps.buf);
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600778 }
779
Michael S. Tsirkin8160c4e2016-02-28 16:31:39 +0200780 return copy_to_user((void __user *)arg, &info, minsz) ?
781 -EFAULT : 0;
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600782
783 } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
784 struct vfio_irq_info info;
785
786 minsz = offsetofend(struct vfio_irq_info, count);
787
788 if (copy_from_user(&info, (void __user *)arg, minsz))
789 return -EFAULT;
790
791 if (info.argsz < minsz || info.index >= VFIO_PCI_NUM_IRQS)
792 return -EINVAL;
793
Vijay Mohan Pandarathildad9f892013-03-11 09:31:22 -0600794 switch (info.index) {
795 case VFIO_PCI_INTX_IRQ_INDEX ... VFIO_PCI_MSIX_IRQ_INDEX:
Alex Williamson6140a8f2015-02-06 15:05:08 -0700796 case VFIO_PCI_REQ_IRQ_INDEX:
Vijay Mohan Pandarathildad9f892013-03-11 09:31:22 -0600797 break;
798 case VFIO_PCI_ERR_IRQ_INDEX:
799 if (pci_is_pcie(vdev->pdev))
800 break;
801 /* pass thru to return error */
802 default:
803 return -EINVAL;
804 }
805
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600806 info.flags = VFIO_IRQ_INFO_EVENTFD;
807
808 info.count = vfio_pci_get_irq_count(vdev, info.index);
809
810 if (info.index == VFIO_PCI_INTX_IRQ_INDEX)
811 info.flags |= (VFIO_IRQ_INFO_MASKABLE |
812 VFIO_IRQ_INFO_AUTOMASKED);
813 else
814 info.flags |= VFIO_IRQ_INFO_NORESIZE;
815
Michael S. Tsirkin8160c4e2016-02-28 16:31:39 +0200816 return copy_to_user((void __user *)arg, &info, minsz) ?
817 -EFAULT : 0;
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600818
819 } else if (cmd == VFIO_DEVICE_SET_IRQS) {
820 struct vfio_irq_set hdr;
Vlad Tsyrklevich05692d72016-10-12 18:51:24 +0200821 size_t size;
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600822 u8 *data = NULL;
Vlad Tsyrklevich05692d72016-10-12 18:51:24 +0200823 int max, ret = 0;
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600824
825 minsz = offsetofend(struct vfio_irq_set, count);
826
827 if (copy_from_user(&hdr, (void __user *)arg, minsz))
828 return -EFAULT;
829
830 if (hdr.argsz < minsz || hdr.index >= VFIO_PCI_NUM_IRQS ||
Vlad Tsyrklevich05692d72016-10-12 18:51:24 +0200831 hdr.count >= (U32_MAX - hdr.start) ||
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600832 hdr.flags & ~(VFIO_IRQ_SET_DATA_TYPE_MASK |
833 VFIO_IRQ_SET_ACTION_TYPE_MASK))
834 return -EINVAL;
835
Vlad Tsyrklevich05692d72016-10-12 18:51:24 +0200836 max = vfio_pci_get_irq_count(vdev, hdr.index);
837 if (hdr.start >= max || hdr.start + hdr.count > max)
838 return -EINVAL;
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600839
Vlad Tsyrklevich05692d72016-10-12 18:51:24 +0200840 switch (hdr.flags & VFIO_IRQ_SET_DATA_TYPE_MASK) {
841 case VFIO_IRQ_SET_DATA_NONE:
842 size = 0;
843 break;
844 case VFIO_IRQ_SET_DATA_BOOL:
845 size = sizeof(uint8_t);
846 break;
847 case VFIO_IRQ_SET_DATA_EVENTFD:
848 size = sizeof(int32_t);
849 break;
850 default:
851 return -EINVAL;
852 }
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600853
Vlad Tsyrklevich05692d72016-10-12 18:51:24 +0200854 if (size) {
855 if (hdr.argsz - minsz < hdr.count * size)
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600856 return -EINVAL;
857
Fengguang Wu3a1f7042012-12-07 13:43:49 -0700858 data = memdup_user((void __user *)(arg + minsz),
859 hdr.count * size);
860 if (IS_ERR(data))
861 return PTR_ERR(data);
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600862 }
863
864 mutex_lock(&vdev->igate);
865
866 ret = vfio_pci_set_irqs_ioctl(vdev, hdr.flags, hdr.index,
867 hdr.start, hdr.count, data);
868
869 mutex_unlock(&vdev->igate);
870 kfree(data);
871
872 return ret;
873
Alex Williamson8b27ee62013-09-04 11:28:04 -0600874 } else if (cmd == VFIO_DEVICE_RESET) {
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600875 return vdev->reset_works ?
Alex Williamson890ed572014-01-14 20:45:09 -0700876 pci_try_reset_function(vdev->pdev) : -EINVAL;
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600877
Alex Williamson8b27ee62013-09-04 11:28:04 -0600878 } else if (cmd == VFIO_DEVICE_GET_PCI_HOT_RESET_INFO) {
879 struct vfio_pci_hot_reset_info hdr;
880 struct vfio_pci_fill_info fill = { 0 };
881 struct vfio_pci_dependent_device *devices = NULL;
882 bool slot = false;
883 int ret = 0;
884
885 minsz = offsetofend(struct vfio_pci_hot_reset_info, count);
886
887 if (copy_from_user(&hdr, (void __user *)arg, minsz))
888 return -EFAULT;
889
890 if (hdr.argsz < minsz)
891 return -EINVAL;
892
893 hdr.flags = 0;
894
895 /* Can we do a slot or bus reset or neither? */
896 if (!pci_probe_reset_slot(vdev->pdev->slot))
897 slot = true;
898 else if (pci_probe_reset_bus(vdev->pdev->bus))
899 return -ENODEV;
900
901 /* How many devices are affected? */
902 ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
903 vfio_pci_count_devs,
904 &fill.max, slot);
905 if (ret)
906 return ret;
907
908 WARN_ON(!fill.max); /* Should always be at least one */
909
910 /*
911 * If there's enough space, fill it now, otherwise return
912 * -ENOSPC and the number of devices affected.
913 */
914 if (hdr.argsz < sizeof(hdr) + (fill.max * sizeof(*devices))) {
915 ret = -ENOSPC;
916 hdr.count = fill.max;
917 goto reset_info_exit;
918 }
919
920 devices = kcalloc(fill.max, sizeof(*devices), GFP_KERNEL);
921 if (!devices)
922 return -ENOMEM;
923
924 fill.devices = devices;
925
926 ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
927 vfio_pci_fill_devs,
928 &fill, slot);
929
930 /*
931 * If a device was removed between counting and filling,
932 * we may come up short of fill.max. If a device was
933 * added, we'll have a return of -EAGAIN above.
934 */
935 if (!ret)
936 hdr.count = fill.cur;
937
938reset_info_exit:
939 if (copy_to_user((void __user *)arg, &hdr, minsz))
940 ret = -EFAULT;
941
942 if (!ret) {
943 if (copy_to_user((void __user *)(arg + minsz), devices,
944 hdr.count * sizeof(*devices)))
945 ret = -EFAULT;
946 }
947
948 kfree(devices);
949 return ret;
950
951 } else if (cmd == VFIO_DEVICE_PCI_HOT_RESET) {
952 struct vfio_pci_hot_reset hdr;
953 int32_t *group_fds;
954 struct vfio_pci_group_entry *groups;
955 struct vfio_pci_group_info info;
956 bool slot = false;
957 int i, count = 0, ret = 0;
958
959 minsz = offsetofend(struct vfio_pci_hot_reset, count);
960
961 if (copy_from_user(&hdr, (void __user *)arg, minsz))
962 return -EFAULT;
963
964 if (hdr.argsz < minsz || hdr.flags)
965 return -EINVAL;
966
967 /* Can we do a slot or bus reset or neither? */
968 if (!pci_probe_reset_slot(vdev->pdev->slot))
969 slot = true;
970 else if (pci_probe_reset_bus(vdev->pdev->bus))
971 return -ENODEV;
972
973 /*
974 * We can't let userspace give us an arbitrarily large
975 * buffer to copy, so verify how many we think there
976 * could be. Note groups can have multiple devices so
977 * one group per device is the max.
978 */
979 ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
980 vfio_pci_count_devs,
981 &count, slot);
982 if (ret)
983 return ret;
984
985 /* Somewhere between 1 and count is OK */
986 if (!hdr.count || hdr.count > count)
987 return -EINVAL;
988
989 group_fds = kcalloc(hdr.count, sizeof(*group_fds), GFP_KERNEL);
990 groups = kcalloc(hdr.count, sizeof(*groups), GFP_KERNEL);
991 if (!group_fds || !groups) {
992 kfree(group_fds);
993 kfree(groups);
994 return -ENOMEM;
995 }
996
997 if (copy_from_user(group_fds, (void __user *)(arg + minsz),
998 hdr.count * sizeof(*group_fds))) {
999 kfree(group_fds);
1000 kfree(groups);
1001 return -EFAULT;
1002 }
1003
1004 /*
1005 * For each group_fd, get the group through the vfio external
1006 * user interface and store the group and iommu ID. This
1007 * ensures the group is held across the reset.
1008 */
1009 for (i = 0; i < hdr.count; i++) {
1010 struct vfio_group *group;
1011 struct fd f = fdget(group_fds[i]);
1012 if (!f.file) {
1013 ret = -EBADF;
1014 break;
1015 }
1016
1017 group = vfio_group_get_external_user(f.file);
1018 fdput(f);
1019 if (IS_ERR(group)) {
1020 ret = PTR_ERR(group);
1021 break;
1022 }
1023
1024 groups[i].group = group;
1025 groups[i].id = vfio_external_user_iommu_id(group);
1026 }
1027
1028 kfree(group_fds);
1029
1030 /* release reference to groups on error */
1031 if (ret)
1032 goto hot_reset_release;
1033
1034 info.count = hdr.count;
1035 info.groups = groups;
1036
1037 /*
1038 * Test whether all the affected devices are contained
1039 * by the set of groups provided by the user.
1040 */
1041 ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
1042 vfio_pci_validate_devs,
1043 &info, slot);
1044 if (!ret)
1045 /* User has access, do the reset */
Alex Williamson890ed572014-01-14 20:45:09 -07001046 ret = slot ? pci_try_reset_slot(vdev->pdev->slot) :
1047 pci_try_reset_bus(vdev->pdev->bus);
Alex Williamson8b27ee62013-09-04 11:28:04 -06001048
1049hot_reset_release:
1050 for (i--; i >= 0; i--)
1051 vfio_group_put_external_user(groups[i].group);
1052
1053 kfree(groups);
1054 return ret;
1055 }
1056
Alex Williamson89e1f7d2012-07-31 08:16:24 -06001057 return -ENOTTY;
1058}
1059
Alex Williamson5b279a12013-02-14 14:02:12 -07001060static ssize_t vfio_pci_rw(void *device_data, char __user *buf,
1061 size_t count, loff_t *ppos, bool iswrite)
Alex Williamson89e1f7d2012-07-31 08:16:24 -06001062{
1063 unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
1064 struct vfio_pci_device *vdev = device_data;
Alex Williamson89e1f7d2012-07-31 08:16:24 -06001065
Alex Williamson28541d42016-02-22 16:02:39 -07001066 if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions)
Alex Williamson89e1f7d2012-07-31 08:16:24 -06001067 return -EINVAL;
1068
Alex Williamson5b279a12013-02-14 14:02:12 -07001069 switch (index) {
1070 case VFIO_PCI_CONFIG_REGION_INDEX:
Alex Williamson906ee992013-02-14 14:02:12 -07001071 return vfio_pci_config_rw(vdev, buf, count, ppos, iswrite);
1072
Alex Williamson5b279a12013-02-14 14:02:12 -07001073 case VFIO_PCI_ROM_REGION_INDEX:
1074 if (iswrite)
1075 return -EINVAL;
Alex Williamson906ee992013-02-14 14:02:12 -07001076 return vfio_pci_bar_rw(vdev, buf, count, ppos, false);
Alex Williamson89e1f7d2012-07-31 08:16:24 -06001077
Alex Williamson5b279a12013-02-14 14:02:12 -07001078 case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
Alex Williamson906ee992013-02-14 14:02:12 -07001079 return vfio_pci_bar_rw(vdev, buf, count, ppos, iswrite);
Alex Williamson84237a82013-02-18 10:11:13 -07001080
1081 case VFIO_PCI_VGA_REGION_INDEX:
1082 return vfio_pci_vga_rw(vdev, buf, count, ppos, iswrite);
Alex Williamson28541d42016-02-22 16:02:39 -07001083 default:
1084 index -= VFIO_PCI_NUM_REGIONS;
1085 return vdev->region[index].ops->rw(vdev, buf,
1086 count, ppos, iswrite);
Alex Williamson5b279a12013-02-14 14:02:12 -07001087 }
1088
Alex Williamson89e1f7d2012-07-31 08:16:24 -06001089 return -EINVAL;
1090}
1091
Alex Williamson5b279a12013-02-14 14:02:12 -07001092static ssize_t vfio_pci_read(void *device_data, char __user *buf,
1093 size_t count, loff_t *ppos)
1094{
Alex Williamson906ee992013-02-14 14:02:12 -07001095 if (!count)
1096 return 0;
1097
Alex Williamson5b279a12013-02-14 14:02:12 -07001098 return vfio_pci_rw(device_data, buf, count, ppos, false);
1099}
1100
Alex Williamson89e1f7d2012-07-31 08:16:24 -06001101static ssize_t vfio_pci_write(void *device_data, const char __user *buf,
1102 size_t count, loff_t *ppos)
1103{
Alex Williamson906ee992013-02-14 14:02:12 -07001104 if (!count)
1105 return 0;
1106
1107 return vfio_pci_rw(device_data, (char __user *)buf, count, ppos, true);
Alex Williamson89e1f7d2012-07-31 08:16:24 -06001108}
1109
1110static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
1111{
1112 struct vfio_pci_device *vdev = device_data;
1113 struct pci_dev *pdev = vdev->pdev;
1114 unsigned int index;
Alex Williamson34002f52012-10-10 09:10:31 -06001115 u64 phys_len, req_len, pgoff, req_start;
Alex Williamson89e1f7d2012-07-31 08:16:24 -06001116 int ret;
1117
1118 index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
1119
1120 if (vma->vm_end < vma->vm_start)
1121 return -EINVAL;
1122 if ((vma->vm_flags & VM_SHARED) == 0)
1123 return -EINVAL;
1124 if (index >= VFIO_PCI_ROM_REGION_INDEX)
1125 return -EINVAL;
Yongji Xie05f0c032016-06-30 15:21:24 +08001126 if (!vdev->bar_mmap_supported[index])
Alex Williamson89e1f7d2012-07-31 08:16:24 -06001127 return -EINVAL;
1128
Yongji Xie05f0c032016-06-30 15:21:24 +08001129 phys_len = PAGE_ALIGN(pci_resource_len(pdev, index));
Alex Williamson89e1f7d2012-07-31 08:16:24 -06001130 req_len = vma->vm_end - vma->vm_start;
1131 pgoff = vma->vm_pgoff &
1132 ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
1133 req_start = pgoff << PAGE_SHIFT;
1134
Yongji Xie05f0c032016-06-30 15:21:24 +08001135 if (req_start + req_len > phys_len)
Alex Williamson89e1f7d2012-07-31 08:16:24 -06001136 return -EINVAL;
1137
1138 if (index == vdev->msix_bar) {
1139 /*
1140 * Disallow mmaps overlapping the MSI-X table; users don't
1141 * get to touch this directly. We could find somewhere
1142 * else to map the overlap, but page granularity is only
1143 * a recommendation, not a requirement, so the user needs
1144 * to know which bits are real. Requiring them to mmap
1145 * around the table makes that clear.
1146 */
1147
1148 /* If neither entirely above nor below, then it overlaps */
1149 if (!(req_start >= vdev->msix_offset + vdev->msix_size ||
1150 req_start + req_len <= vdev->msix_offset))
1151 return -EINVAL;
1152 }
1153
1154 /*
1155 * Even though we don't make use of the barmap for the mmap,
1156 * we need to request the region and the barmap tracks that.
1157 */
1158 if (!vdev->barmap[index]) {
1159 ret = pci_request_selected_regions(pdev,
1160 1 << index, "vfio-pci");
1161 if (ret)
1162 return ret;
1163
1164 vdev->barmap[index] = pci_iomap(pdev, index, 0);
1165 }
1166
1167 vma->vm_private_data = vdev;
Alex Williamson89e1f7d2012-07-31 08:16:24 -06001168 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
Alex Williamson34002f52012-10-10 09:10:31 -06001169 vma->vm_pgoff = (pci_resource_start(pdev, index) >> PAGE_SHIFT) + pgoff;
Alex Williamson89e1f7d2012-07-31 08:16:24 -06001170
Alex Williamson34002f52012-10-10 09:10:31 -06001171 return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
Alex Williamson89e1f7d2012-07-31 08:16:24 -06001172 req_len, vma->vm_page_prot);
1173}
1174
Alex Williamson6140a8f2015-02-06 15:05:08 -07001175static void vfio_pci_request(void *device_data, unsigned int count)
1176{
1177 struct vfio_pci_device *vdev = device_data;
1178
1179 mutex_lock(&vdev->igate);
1180
1181 if (vdev->req_trigger) {
Alex Williamson5f55d2a2015-04-28 10:23:30 -06001182 if (!(count % 10))
1183 dev_notice_ratelimited(&vdev->pdev->dev,
1184 "Relaying device request to user (#%u)\n",
1185 count);
Alex Williamson6140a8f2015-02-06 15:05:08 -07001186 eventfd_signal(vdev->req_trigger, 1);
Alex Williamson5f55d2a2015-04-28 10:23:30 -06001187 } else if (count == 0) {
1188 dev_warn(&vdev->pdev->dev,
1189 "No device request channel registered, blocked until released by user\n");
Alex Williamson6140a8f2015-02-06 15:05:08 -07001190 }
1191
1192 mutex_unlock(&vdev->igate);
1193}
1194
Alex Williamson89e1f7d2012-07-31 08:16:24 -06001195static const struct vfio_device_ops vfio_pci_ops = {
1196 .name = "vfio-pci",
1197 .open = vfio_pci_open,
1198 .release = vfio_pci_release,
1199 .ioctl = vfio_pci_ioctl,
1200 .read = vfio_pci_read,
1201 .write = vfio_pci_write,
1202 .mmap = vfio_pci_mmap,
Alex Williamson6140a8f2015-02-06 15:05:08 -07001203 .request = vfio_pci_request,
Alex Williamson89e1f7d2012-07-31 08:16:24 -06001204};
1205
1206static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1207{
Alex Williamson89e1f7d2012-07-31 08:16:24 -06001208 struct vfio_pci_device *vdev;
1209 struct iommu_group *group;
1210 int ret;
1211
Wei Yang7c2e2112015-01-07 10:29:11 -07001212 if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL)
Alex Williamson89e1f7d2012-07-31 08:16:24 -06001213 return -EINVAL;
1214
Alex Williamson03a76b62015-12-21 15:13:33 -07001215 group = vfio_iommu_group_get(&pdev->dev);
Alex Williamson89e1f7d2012-07-31 08:16:24 -06001216 if (!group)
1217 return -EINVAL;
1218
1219 vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
1220 if (!vdev) {
Alex Williamson03a76b62015-12-21 15:13:33 -07001221 vfio_iommu_group_put(group, &pdev->dev);
Alex Williamson89e1f7d2012-07-31 08:16:24 -06001222 return -ENOMEM;
1223 }
1224
1225 vdev->pdev = pdev;
1226 vdev->irq_type = VFIO_PCI_NUM_IRQS;
1227 mutex_init(&vdev->igate);
1228 spin_lock_init(&vdev->irqlock);
Alex Williamson89e1f7d2012-07-31 08:16:24 -06001229
1230 ret = vfio_add_group_dev(&pdev->dev, &vfio_pci_ops, vdev);
1231 if (ret) {
Alex Williamson03a76b62015-12-21 15:13:33 -07001232 vfio_iommu_group_put(group, &pdev->dev);
Alex Williamson89e1f7d2012-07-31 08:16:24 -06001233 kfree(vdev);
Alex Williamson5a0ff172015-04-08 08:11:51 -06001234 return ret;
Alex Williamson89e1f7d2012-07-31 08:16:24 -06001235 }
1236
Alex Williamsonecaa1f62015-04-07 11:14:41 -06001237 if (vfio_pci_is_vga(pdev)) {
1238 vga_client_register(pdev, vdev, NULL, vfio_pci_set_vga_decode);
1239 vga_set_legacy_decoding(pdev,
1240 vfio_pci_set_vga_decode(vdev, false));
1241 }
1242
Alex Williamson6eb70182015-04-07 11:14:46 -06001243 if (!disable_idle_d3) {
1244 /*
1245 * pci-core sets the device power state to an unknown value at
1246 * bootup and after being removed from a driver. The only
1247 * transition it allows from this unknown state is to D0, which
1248 * typically happens when a driver calls pci_enable_device().
1249 * We're not ready to enable the device yet, but we do want to
1250 * be able to get to D3. Therefore first do a D0 transition
1251 * before going to D3.
1252 */
1253 pci_set_power_state(pdev, PCI_D0);
1254 pci_set_power_state(pdev, PCI_D3hot);
1255 }
1256
Alex Williamson89e1f7d2012-07-31 08:16:24 -06001257 return ret;
1258}
1259
1260static void vfio_pci_remove(struct pci_dev *pdev)
1261{
1262 struct vfio_pci_device *vdev;
1263
Alex Williamson61d79252014-08-07 11:12:04 -06001264 vdev = vfio_del_group_dev(&pdev->dev);
Alex Williamsonecaa1f62015-04-07 11:14:41 -06001265 if (!vdev)
1266 return;
1267
Alex Williamson03a76b62015-12-21 15:13:33 -07001268 vfio_iommu_group_put(pdev->dev.iommu_group, &pdev->dev);
Alex Williamson28541d42016-02-22 16:02:39 -07001269 kfree(vdev->region);
Alex Williamsonecaa1f62015-04-07 11:14:41 -06001270 kfree(vdev);
1271
1272 if (vfio_pci_is_vga(pdev)) {
1273 vga_client_register(pdev, NULL, NULL, NULL);
1274 vga_set_legacy_decoding(pdev,
1275 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM |
1276 VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM);
Alex Williamson61d79252014-08-07 11:12:04 -06001277 }
Alex Williamson6eb70182015-04-07 11:14:46 -06001278
1279 if (!disable_idle_d3)
1280 pci_set_power_state(pdev, PCI_D0);
Alex Williamson89e1f7d2012-07-31 08:16:24 -06001281}
1282
Vijay Mohan Pandarathildad9f892013-03-11 09:31:22 -06001283static pci_ers_result_t vfio_pci_aer_err_detected(struct pci_dev *pdev,
1284 pci_channel_state_t state)
1285{
1286 struct vfio_pci_device *vdev;
1287 struct vfio_device *device;
1288
1289 device = vfio_device_get_from_dev(&pdev->dev);
1290 if (device == NULL)
1291 return PCI_ERS_RESULT_DISCONNECT;
1292
1293 vdev = vfio_device_data(device);
1294 if (vdev == NULL) {
1295 vfio_device_put(device);
1296 return PCI_ERS_RESULT_DISCONNECT;
1297 }
1298
Alex Williamson3be3a072014-01-14 16:12:55 -07001299 mutex_lock(&vdev->igate);
1300
Vijay Mohan Pandarathildad9f892013-03-11 09:31:22 -06001301 if (vdev->err_trigger)
1302 eventfd_signal(vdev->err_trigger, 1);
1303
Alex Williamson3be3a072014-01-14 16:12:55 -07001304 mutex_unlock(&vdev->igate);
1305
Vijay Mohan Pandarathildad9f892013-03-11 09:31:22 -06001306 vfio_device_put(device);
1307
1308 return PCI_ERS_RESULT_CAN_RECOVER;
1309}
1310
Julia Lawall7d10f4e02015-11-14 11:07:01 +01001311static const struct pci_error_handlers vfio_err_handlers = {
Vijay Mohan Pandarathildad9f892013-03-11 09:31:22 -06001312 .error_detected = vfio_pci_aer_err_detected,
1313};
1314
Alex Williamson89e1f7d2012-07-31 08:16:24 -06001315static struct pci_driver vfio_pci_driver = {
1316 .name = "vfio-pci",
1317 .id_table = NULL, /* only dynamic ids */
1318 .probe = vfio_pci_probe,
1319 .remove = vfio_pci_remove,
Vijay Mohan Pandarathildad9f892013-03-11 09:31:22 -06001320 .err_handler = &vfio_err_handlers,
Alex Williamson89e1f7d2012-07-31 08:16:24 -06001321};
1322
Alex Williamson93899a62014-09-29 17:18:39 -06001323struct vfio_devices {
1324 struct vfio_device **devices;
1325 int cur_index;
1326 int max_index;
1327};
1328
1329static int vfio_pci_get_devs(struct pci_dev *pdev, void *data)
Alex Williamsonbc4fba72014-08-07 11:12:07 -06001330{
Alex Williamson93899a62014-09-29 17:18:39 -06001331 struct vfio_devices *devs = data;
Alex Williamson20f30012015-06-09 10:08:57 -06001332 struct vfio_device *device;
Alex Williamsonbc4fba72014-08-07 11:12:07 -06001333
Alex Williamson93899a62014-09-29 17:18:39 -06001334 if (devs->cur_index == devs->max_index)
1335 return -ENOSPC;
Alex Williamsonbc4fba72014-08-07 11:12:07 -06001336
Alex Williamson20f30012015-06-09 10:08:57 -06001337 device = vfio_device_get_from_dev(&pdev->dev);
1338 if (!device)
Alex Williamson93899a62014-09-29 17:18:39 -06001339 return -EINVAL;
Alex Williamsonbc4fba72014-08-07 11:12:07 -06001340
Alex Williamson20f30012015-06-09 10:08:57 -06001341 if (pci_dev_driver(pdev) != &vfio_pci_driver) {
1342 vfio_device_put(device);
1343 return -EBUSY;
1344 }
1345
1346 devs->devices[devs->cur_index++] = device;
Alex Williamsonbc4fba72014-08-07 11:12:07 -06001347 return 0;
1348}
1349
1350/*
1351 * Attempt to do a bus/slot reset if there are devices affected by a reset for
1352 * this device that are needs_reset and all of the affected devices are unused
Alex Williamson93899a62014-09-29 17:18:39 -06001353 * (!refcnt). Callers are required to hold driver_lock when calling this to
1354 * prevent device opens and concurrent bus reset attempts. We prevent device
1355 * unbinds by acquiring and holding a reference to the vfio_device.
1356 *
1357 * NB: vfio-core considers a group to be viable even if some devices are
1358 * bound to drivers like pci-stub or pcieport. Here we require all devices
1359 * to be bound to vfio_pci since that's the only way we can be sure they
1360 * stay put.
Alex Williamsonbc4fba72014-08-07 11:12:07 -06001361 */
1362static void vfio_pci_try_bus_reset(struct vfio_pci_device *vdev)
1363{
Alex Williamson93899a62014-09-29 17:18:39 -06001364 struct vfio_devices devs = { .cur_index = 0 };
1365 int i = 0, ret = -EINVAL;
Alex Williamsonbc4fba72014-08-07 11:12:07 -06001366 bool needs_reset = false, slot = false;
Alex Williamson93899a62014-09-29 17:18:39 -06001367 struct vfio_pci_device *tmp;
Alex Williamsonbc4fba72014-08-07 11:12:07 -06001368
1369 if (!pci_probe_reset_slot(vdev->pdev->slot))
1370 slot = true;
1371 else if (pci_probe_reset_bus(vdev->pdev->bus))
1372 return;
1373
Alex Williamson93899a62014-09-29 17:18:39 -06001374 if (vfio_pci_for_each_slot_or_bus(vdev->pdev, vfio_pci_count_devs,
1375 &i, slot) || !i)
1376 return;
1377
1378 devs.max_index = i;
1379 devs.devices = kcalloc(i, sizeof(struct vfio_device *), GFP_KERNEL);
1380 if (!devs.devices)
1381 return;
1382
Alex Williamsonbc4fba72014-08-07 11:12:07 -06001383 if (vfio_pci_for_each_slot_or_bus(vdev->pdev,
Alex Williamson93899a62014-09-29 17:18:39 -06001384 vfio_pci_get_devs, &devs, slot))
1385 goto put_devs;
Alex Williamsonbc4fba72014-08-07 11:12:07 -06001386
Alex Williamson93899a62014-09-29 17:18:39 -06001387 for (i = 0; i < devs.cur_index; i++) {
1388 tmp = vfio_device_data(devs.devices[i]);
1389 if (tmp->needs_reset)
1390 needs_reset = true;
1391 if (tmp->refcnt)
1392 goto put_devs;
1393 }
Alex Williamsonbc4fba72014-08-07 11:12:07 -06001394
Alex Williamson93899a62014-09-29 17:18:39 -06001395 if (needs_reset)
1396 ret = slot ? pci_try_reset_slot(vdev->pdev->slot) :
1397 pci_try_reset_bus(vdev->pdev->bus);
Alex Williamsonbc4fba72014-08-07 11:12:07 -06001398
Alex Williamson93899a62014-09-29 17:18:39 -06001399put_devs:
1400 for (i = 0; i < devs.cur_index; i++) {
Alex Williamson6eb70182015-04-07 11:14:46 -06001401 tmp = vfio_device_data(devs.devices[i]);
1402 if (!ret)
Alex Williamson93899a62014-09-29 17:18:39 -06001403 tmp->needs_reset = false;
Alex Williamson6eb70182015-04-07 11:14:46 -06001404
1405 if (!tmp->refcnt && !disable_idle_d3)
1406 pci_set_power_state(tmp->pdev, PCI_D3hot);
1407
Alex Williamson93899a62014-09-29 17:18:39 -06001408 vfio_device_put(devs.devices[i]);
1409 }
1410
1411 kfree(devs.devices);
Alex Williamsonbc4fba72014-08-07 11:12:07 -06001412}
1413
Alex Williamson89e1f7d2012-07-31 08:16:24 -06001414static void __exit vfio_pci_cleanup(void)
1415{
1416 pci_unregister_driver(&vfio_pci_driver);
Alex Williamson89e1f7d2012-07-31 08:16:24 -06001417 vfio_pci_uninit_perm_bits();
1418}
1419
Alex Williamson80c7e8c2015-04-07 11:14:43 -06001420static void __init vfio_pci_fill_ids(void)
1421{
1422 char *p, *id;
1423 int rc;
1424
1425 /* no ids passed actually */
1426 if (ids[0] == '\0')
1427 return;
1428
1429 /* add ids specified in the module parameter */
1430 p = ids;
1431 while ((id = strsep(&p, ","))) {
1432 unsigned int vendor, device, subvendor = PCI_ANY_ID,
1433 subdevice = PCI_ANY_ID, class = 0, class_mask = 0;
1434 int fields;
1435
1436 if (!strlen(id))
1437 continue;
1438
1439 fields = sscanf(id, "%x:%x:%x:%x:%x:%x",
1440 &vendor, &device, &subvendor, &subdevice,
1441 &class, &class_mask);
1442
1443 if (fields < 2) {
1444 pr_warn("invalid id string \"%s\"\n", id);
1445 continue;
1446 }
1447
1448 rc = pci_add_dynid(&vfio_pci_driver, vendor, device,
1449 subvendor, subdevice, class, class_mask, 0);
1450 if (rc)
1451 pr_warn("failed to add dynamic id [%04hx:%04hx[%04hx:%04hx]] class %#08x/%08x (%d)\n",
1452 vendor, device, subvendor, subdevice,
1453 class, class_mask, rc);
1454 else
1455 pr_info("add [%04hx:%04hx[%04hx:%04hx]] class %#08x/%08x\n",
1456 vendor, device, subvendor, subdevice,
1457 class, class_mask);
1458 }
1459}
1460
Alex Williamson89e1f7d2012-07-31 08:16:24 -06001461static int __init vfio_pci_init(void)
1462{
1463 int ret;
1464
1465 /* Allocate shared config space permision data used by all devices */
1466 ret = vfio_pci_init_perm_bits();
1467 if (ret)
1468 return ret;
1469
Alex Williamson89e1f7d2012-07-31 08:16:24 -06001470 /* Register and scan for devices */
1471 ret = pci_register_driver(&vfio_pci_driver);
1472 if (ret)
1473 goto out_driver;
1474
Alex Williamson80c7e8c2015-04-07 11:14:43 -06001475 vfio_pci_fill_ids();
1476
Alex Williamson89e1f7d2012-07-31 08:16:24 -06001477 return 0;
1478
Alex Williamson89e1f7d2012-07-31 08:16:24 -06001479out_driver:
Alex Williamson89e1f7d2012-07-31 08:16:24 -06001480 vfio_pci_uninit_perm_bits();
1481 return ret;
1482}
1483
1484module_init(vfio_pci_init);
1485module_exit(vfio_pci_cleanup);
1486
1487MODULE_VERSION(DRIVER_VERSION);
1488MODULE_LICENSE("GPL v2");
1489MODULE_AUTHOR(DRIVER_AUTHOR);
1490MODULE_DESCRIPTION(DRIVER_DESC);