blob: b2f0eb4067cbfded05c5d9971767164af278c766 [file] [log] [blame]
Thomas Gleixnerf33f5fe2019-05-22 09:51:24 +02001/* SPDX-License-Identifier: GPL-2.0-or-later */
Michael S. Tsirkin5f4c9762014-12-08 16:39:45 +02002#ifndef _DRIVERS_VIRTIO_VIRTIO_PCI_COMMON_H
3#define _DRIVERS_VIRTIO_VIRTIO_PCI_COMMON_H
Michael S. Tsirkin38eb4a22014-12-07 18:41:16 +02004/*
Michael S. Tsirkina90fdce2014-12-08 12:31:02 +02005 * Virtio PCI driver - APIs for common functionality for all device versions
Michael S. Tsirkin38eb4a22014-12-07 18:41:16 +02006 *
7 * This module allows virtio devices to be used over a virtual PCI device.
8 * This can be used with QEMU based VMMs like KVM or Xen.
9 *
10 * Copyright IBM Corp. 2007
Michael S. Tsirkina90fdce2014-12-08 12:31:02 +020011 * Copyright Red Hat, Inc. 2014
Michael S. Tsirkin38eb4a22014-12-07 18:41:16 +020012 *
13 * Authors:
14 * Anthony Liguori <aliguori@us.ibm.com>
Michael S. Tsirkina90fdce2014-12-08 12:31:02 +020015 * Rusty Russell <rusty@rustcorp.com.au>
16 * Michael S. Tsirkin <mst@redhat.com>
Michael S. Tsirkin38eb4a22014-12-07 18:41:16 +020017 */
18
19#include <linux/module.h>
20#include <linux/list.h>
21#include <linux/pci.h>
22#include <linux/slab.h>
23#include <linux/interrupt.h>
24#include <linux/virtio.h>
25#include <linux/virtio_config.h>
26#include <linux/virtio_ring.h>
Michael S. Tsirkin38eb4a22014-12-07 18:41:16 +020027#include <linux/virtio_pci.h>
28#include <linux/highmem.h>
29#include <linux/spinlock.h>
30
Michael S. Tsirkin0a9b3f42017-04-04 21:44:44 +030031struct virtio_pci_vq_info {
32 /* the actual virtqueue */
33 struct virtqueue *vq;
34
35 /* the list node for the virtqueues list */
36 struct list_head node;
37
38 /* MSI-X vector (or none) */
39 unsigned msix_vector;
40};
41
Michael S. Tsirkin38eb4a22014-12-07 18:41:16 +020042/* Our device structure */
43struct virtio_pci_device {
44 struct virtio_device vdev;
45 struct pci_dev *pci_dev;
46
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +020047 /* In legacy mode, these two point to within ->legacy. */
48 /* Where to read and clear interrupt */
49 u8 __iomem *isr;
50
51 /* Modern only fields */
52 /* The IO mapping for the PCI config space (non-legacy mode) */
53 struct virtio_pci_common_cfg __iomem *common;
54 /* Device-specific data (non-legacy mode) */
55 void __iomem *device;
Michael S. Tsirkin39092132015-01-14 18:50:55 +020056 /* Base of vq notifications (non-legacy mode). */
57 void __iomem *notify_base;
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +020058
59 /* So we can sanity-check accesses. */
Michael S. Tsirkin39092132015-01-14 18:50:55 +020060 size_t notify_len;
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +020061 size_t device_len;
62
63 /* Capability for when we need to map notifications per-vq. */
64 int notify_map_cap;
65
66 /* Multiply queue_notify_off by this value. (non-legacy mode). */
67 u32 notify_offset_multiplier;
68
Gerd Hoffmann59a5b0f72015-06-24 07:54:15 +020069 int modern_bars;
70
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +020071 /* Legacy only field */
Michael S. Tsirkin38eb4a22014-12-07 18:41:16 +020072 /* the IO mapping for the PCI config space */
73 void __iomem *ioaddr;
74
Michael S. Tsirkin0a9b3f42017-04-04 21:44:44 +030075 /* a list of queues so we can dispatch IRQs */
76 spinlock_t lock;
77 struct list_head virtqueues;
78
79 /* array of all queues for house-keeping */
80 struct virtio_pci_vq_info **vqs;
81
Michael S. Tsirkin2008c152017-04-04 21:09:20 +030082 /* MSI-X support */
83 int msix_enabled;
Michael S. Tsirkin0b0f9dc2017-04-04 21:15:41 +030084 int intx_enabled;
Michael S. Tsirkin38eb4a22014-12-07 18:41:16 +020085 cpumask_var_t *msix_affinity_masks;
86 /* Name strings for interrupts. This size should be enough,
87 * and I'm too lazy to allocate each name separately. */
88 char (*msix_names)[256];
Michael S. Tsirkin0b0f9dc2017-04-04 21:15:41 +030089 /* Number of available vectors */
90 unsigned msix_vectors;
91 /* Vectors allocated, excluding per-vq vectors if any */
92 unsigned msix_used_vectors;
93
Michael S. Tsirkin0a9b3f42017-04-04 21:44:44 +030094 /* Whether we have vector per vq */
95 bool per_vq_vectors;
Michael S. Tsirkin38eb4a22014-12-07 18:41:16 +020096
97 struct virtqueue *(*setup_vq)(struct virtio_pci_device *vp_dev,
Michael S. Tsirkin0a9b3f42017-04-04 21:44:44 +030098 struct virtio_pci_vq_info *info,
Michael S. Tsirkin38eb4a22014-12-07 18:41:16 +020099 unsigned idx,
100 void (*callback)(struct virtqueue *vq),
101 const char *name,
Michael S. Tsirkinf94682d2017-03-06 18:32:29 +0200102 bool ctx,
Michael S. Tsirkin38eb4a22014-12-07 18:41:16 +0200103 u16 msix_vec);
Michael S. Tsirkin0a9b3f42017-04-04 21:44:44 +0300104 void (*del_vq)(struct virtio_pci_vq_info *info);
Michael S. Tsirkin38eb4a22014-12-07 18:41:16 +0200105
106 u16 (*config_vector)(struct virtio_pci_device *vp_dev, u16 vector);
107};
108
Michael S. Tsirkin0b0f9dc2017-04-04 21:15:41 +0300109/* Constants for MSI-X */
110/* Use first vector for configuration changes, second and the rest for
111 * virtqueues Thus, we need at least 2 vectors for MSI. */
112enum {
113 VP_MSIX_CONFIG_VECTOR = 0,
114 VP_MSIX_VQ_VECTOR = 1,
115};
116
Michael S. Tsirkin38eb4a22014-12-07 18:41:16 +0200117/* Convert a generic virtio device to our structure */
118static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev)
119{
120 return container_of(vdev, struct virtio_pci_device, vdev);
121}
122
123/* wait for pending irq handlers */
124void vp_synchronize_vectors(struct virtio_device *vdev);
125/* the notify function used when creating a virt queue */
126bool vp_notify(struct virtqueue *vq);
127/* the config->del_vqs() implementation */
128void vp_del_vqs(struct virtio_device *vdev);
129/* the config->find_vqs() implementation */
130int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs,
Christoph Hellwigfb5e31d2017-02-05 18:15:22 +0100131 struct virtqueue *vqs[], vq_callback_t *callbacks[],
Michael S. Tsirkinf94682d2017-03-06 18:32:29 +0200132 const char * const names[], const bool *ctx,
133 struct irq_affinity *desc);
Michael S. Tsirkin38eb4a22014-12-07 18:41:16 +0200134const char *vp_bus_name(struct virtio_device *vdev);
135
136/* Setup the affinity for a virtqueue:
137 * - force the affinity for per vq vector
138 * - OR over all affinities for shared MSI
139 * - ignore the affinity request if we're using INTX
140 */
Caleb Raitto19e226e2018-08-09 18:18:28 -0700141int vp_set_vq_affinity(struct virtqueue *vq, const struct cpumask *cpu_mask);
Michael S. Tsirkin38eb4a22014-12-07 18:41:16 +0200142
Christoph Hellwigbbaba472017-02-05 18:15:23 +0100143const struct cpumask *vp_get_vq_affinity(struct virtio_device *vdev, int index);
144
Michael S. Tsirkin46506da2015-01-15 16:06:26 +0200145#if IS_ENABLED(CONFIG_VIRTIO_PCI_LEGACY)
Michael S. Tsirkinff31d2e2015-01-13 11:23:32 +0200146int virtio_pci_legacy_probe(struct virtio_pci_device *);
147void virtio_pci_legacy_remove(struct virtio_pci_device *);
Michael S. Tsirkin46506da2015-01-15 16:06:26 +0200148#else
149static inline int virtio_pci_legacy_probe(struct virtio_pci_device *vp_dev)
150{
151 return -ENODEV;
152}
153static inline void virtio_pci_legacy_remove(struct virtio_pci_device *vp_dev)
154{
155}
156#endif
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200157int virtio_pci_modern_probe(struct virtio_pci_device *);
158void virtio_pci_modern_remove(struct virtio_pci_device *);
Michael S. Tsirkin38eb4a22014-12-07 18:41:16 +0200159
160#endif