blob: fdbde1db5ec59f7cee313871d7ed5917d0798d20 [file] [log] [blame]
Thomas Gleixnerf33f5fe2019-05-22 09:51:24 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Anthony Liguori33436602007-11-12 21:30:26 -06002/*
Michael S. Tsirkina90fdce2014-12-08 12:31:02 +02003 * Virtio PCI driver - common functionality for all device versions
Anthony Liguori33436602007-11-12 21:30:26 -06004 *
5 * This module allows virtio devices to be used over a virtual PCI device.
6 * This can be used with QEMU based VMMs like KVM or Xen.
7 *
8 * Copyright IBM Corp. 2007
Michael S. Tsirkina90fdce2014-12-08 12:31:02 +02009 * Copyright Red Hat, Inc. 2014
Anthony Liguori33436602007-11-12 21:30:26 -060010 *
11 * Authors:
12 * Anthony Liguori <aliguori@us.ibm.com>
Michael S. Tsirkina90fdce2014-12-08 12:31:02 +020013 * Rusty Russell <rusty@rustcorp.com.au>
14 * Michael S. Tsirkin <mst@redhat.com>
Anthony Liguori33436602007-11-12 21:30:26 -060015 */
16
Michael S. Tsirkin5f4c9762014-12-08 16:39:45 +020017#include "virtio_pci_common.h"
Anthony Liguori33436602007-11-12 21:30:26 -060018
Michael S. Tsirkinac399d82015-01-15 17:54:13 +020019static bool force_legacy = false;
20
21#if IS_ENABLED(CONFIG_VIRTIO_PCI_LEGACY)
22module_param(force_legacy, bool, 0444);
23MODULE_PARM_DESC(force_legacy,
24 "Force legacy mode for transitional virtio 1 devices");
25#endif
26
Jason Wang9e352762021-10-19 15:01:46 +080027/* disable irq handlers */
28void vp_disable_cbs(struct virtio_device *vdev)
Michael S. Tsirkine6af5782011-11-17 17:41:15 +020029{
30 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
31 int i;
32
Jason Wang080cd7c2021-10-19 15:01:47 +080033 if (vp_dev->intx_enabled) {
34 /*
35 * The below synchronize() guarantees that any
36 * interrupt for this line arriving after
37 * synchronize_irq() has completed is guaranteed to see
38 * intx_soft_enabled == false.
39 */
40 WRITE_ONCE(vp_dev->intx_soft_enabled, false);
Michael S. Tsirkin0b0f9dc2017-04-04 21:15:41 +030041 synchronize_irq(vp_dev->pci_dev->irq);
Jason Wang080cd7c2021-10-19 15:01:47 +080042 }
Michael S. Tsirkin0b0f9dc2017-04-04 21:15:41 +030043
44 for (i = 0; i < vp_dev->msix_vectors; ++i)
Jason Wang9e352762021-10-19 15:01:46 +080045 disable_irq(pci_irq_vector(vp_dev->pci_dev, i));
46}
47
48/* enable irq handlers */
49void vp_enable_cbs(struct virtio_device *vdev)
50{
51 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
52 int i;
53
Jason Wang080cd7c2021-10-19 15:01:47 +080054 if (vp_dev->intx_enabled) {
55 disable_irq(vp_dev->pci_dev->irq);
56 /*
57 * The above disable_irq() provides TSO ordering and
58 * as such promotes the below store to store-release.
59 */
60 WRITE_ONCE(vp_dev->intx_soft_enabled, true);
61 enable_irq(vp_dev->pci_dev->irq);
Jason Wang9e352762021-10-19 15:01:46 +080062 return;
Jason Wang080cd7c2021-10-19 15:01:47 +080063 }
Jason Wang9e352762021-10-19 15:01:46 +080064
65 for (i = 0; i < vp_dev->msix_vectors; ++i)
66 enable_irq(pci_irq_vector(vp_dev->pci_dev, i));
Michael S. Tsirkine6af5782011-11-17 17:41:15 +020067}
68
Anthony Liguori33436602007-11-12 21:30:26 -060069/* the notify function used when creating a virt queue */
Michael S. Tsirkin38eb4a22014-12-07 18:41:16 +020070bool vp_notify(struct virtqueue *vq)
Anthony Liguori33436602007-11-12 21:30:26 -060071{
Anthony Liguori33436602007-11-12 21:30:26 -060072 /* we write the queue's selector into the notification register to
73 * signal the other end */
Michael S. Tsirkinf30eaf42014-12-03 18:01:58 +020074 iowrite16(vq->index, (void __iomem *)vq->priv);
Heinz Graalfs46f9c2b2013-10-29 09:38:50 +103075 return true;
Anthony Liguori33436602007-11-12 21:30:26 -060076}
77
Michael S. Tsirkin77cf52462009-05-14 13:55:31 +030078/* Handle a configuration change: Tell driver if it wants to know. */
79static irqreturn_t vp_config_changed(int irq, void *opaque)
80{
81 struct virtio_pci_device *vp_dev = opaque;
Michael S. Tsirkin77cf52462009-05-14 13:55:31 +030082
Michael S. Tsirkin016c98c2014-10-14 10:40:34 +103083 virtio_config_changed(&vp_dev->vdev);
Michael S. Tsirkin77cf52462009-05-14 13:55:31 +030084 return IRQ_HANDLED;
85}
86
87/* Notify all virtqueues on an interrupt. */
88static irqreturn_t vp_vring_interrupt(int irq, void *opaque)
89{
90 struct virtio_pci_device *vp_dev = opaque;
Michael S. Tsirkin0a9b3f42017-04-04 21:44:44 +030091 struct virtio_pci_vq_info *info;
Michael S. Tsirkin77cf52462009-05-14 13:55:31 +030092 irqreturn_t ret = IRQ_NONE;
Michael S. Tsirkin0a9b3f42017-04-04 21:44:44 +030093 unsigned long flags;
Michael S. Tsirkin77cf52462009-05-14 13:55:31 +030094
Michael S. Tsirkin0a9b3f42017-04-04 21:44:44 +030095 spin_lock_irqsave(&vp_dev->lock, flags);
96 list_for_each_entry(info, &vp_dev->virtqueues, node) {
97 if (vring_interrupt(irq, info->vq) == IRQ_HANDLED)
Michael S. Tsirkin77cf52462009-05-14 13:55:31 +030098 ret = IRQ_HANDLED;
99 }
Michael S. Tsirkin0a9b3f42017-04-04 21:44:44 +0300100 spin_unlock_irqrestore(&vp_dev->lock, flags);
Michael S. Tsirkin77cf52462009-05-14 13:55:31 +0300101
102 return ret;
103}
104
Anthony Liguori33436602007-11-12 21:30:26 -0600105/* A small wrapper to also acknowledge the interrupt when it's handled.
106 * I really need an EIO hook for the vring so I can ack the interrupt once we
107 * know that we'll be handling the IRQ but before we invoke the callback since
108 * the callback may notify the host which results in the host attempting to
109 * raise an interrupt that we would then mask once we acknowledged the
110 * interrupt. */
111static irqreturn_t vp_interrupt(int irq, void *opaque)
112{
113 struct virtio_pci_device *vp_dev = opaque;
Anthony Liguori33436602007-11-12 21:30:26 -0600114 u8 isr;
115
Jason Wang080cd7c2021-10-19 15:01:47 +0800116 if (!READ_ONCE(vp_dev->intx_soft_enabled))
117 return IRQ_NONE;
118
Anthony Liguori33436602007-11-12 21:30:26 -0600119 /* reading the ISR has the effect of also clearing it so it's very
120 * important to save off the value. */
Michael S. Tsirkinaf535722014-12-02 14:35:27 +0200121 isr = ioread8(vp_dev->isr);
Anthony Liguori33436602007-11-12 21:30:26 -0600122
123 /* It's definitely not us if the ISR was not high */
124 if (!isr)
125 return IRQ_NONE;
126
127 /* Configuration change? Tell driver if it wants to know. */
Michael S. Tsirkin77cf52462009-05-14 13:55:31 +0300128 if (isr & VIRTIO_PCI_ISR_CONFIG)
129 vp_config_changed(irq, opaque);
Anthony Liguori33436602007-11-12 21:30:26 -0600130
Michael S. Tsirkin77cf52462009-05-14 13:55:31 +0300131 return vp_vring_interrupt(irq, opaque);
Anthony Liguori33436602007-11-12 21:30:26 -0600132}
133
Michael S. Tsirkin0b0f9dc2017-04-04 21:15:41 +0300134static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
135 bool per_vq_vectors, struct irq_affinity *desc)
136{
137 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
138 const char *name = dev_name(&vp_dev->vdev.dev);
Christoph Hellwigba74b6f2017-08-24 18:07:02 +0200139 unsigned flags = PCI_IRQ_MSIX;
Michael S. Tsirkin0b0f9dc2017-04-04 21:15:41 +0300140 unsigned i, v;
141 int err = -ENOMEM;
142
143 vp_dev->msix_vectors = nvectors;
144
Kees Cook6da2ec52018-06-12 13:55:00 -0700145 vp_dev->msix_names = kmalloc_array(nvectors,
146 sizeof(*vp_dev->msix_names),
147 GFP_KERNEL);
Michael S. Tsirkin0b0f9dc2017-04-04 21:15:41 +0300148 if (!vp_dev->msix_names)
149 goto error;
150 vp_dev->msix_affinity_masks
Kees Cook6396bb22018-06-12 14:03:40 -0700151 = kcalloc(nvectors, sizeof(*vp_dev->msix_affinity_masks),
Michael S. Tsirkin0b0f9dc2017-04-04 21:15:41 +0300152 GFP_KERNEL);
153 if (!vp_dev->msix_affinity_masks)
154 goto error;
155 for (i = 0; i < nvectors; ++i)
156 if (!alloc_cpumask_var(&vp_dev->msix_affinity_masks[i],
157 GFP_KERNEL))
158 goto error;
159
Christoph Hellwigba74b6f2017-08-24 18:07:02 +0200160 if (desc) {
161 flags |= PCI_IRQ_AFFINITY;
162 desc->pre_vectors++; /* virtio config vector */
163 }
164
Michael S. Tsirkin0b0f9dc2017-04-04 21:15:41 +0300165 err = pci_alloc_irq_vectors_affinity(vp_dev->pci_dev, nvectors,
Christoph Hellwigba74b6f2017-08-24 18:07:02 +0200166 nvectors, flags, desc);
Michael S. Tsirkin0b0f9dc2017-04-04 21:15:41 +0300167 if (err < 0)
168 goto error;
169 vp_dev->msix_enabled = 1;
170
171 /* Set the vector used for configuration */
172 v = vp_dev->msix_used_vectors;
173 snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
174 "%s-config", name);
175 err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
Jason Wang9e352762021-10-19 15:01:46 +0800176 vp_config_changed, IRQF_NO_AUTOEN,
177 vp_dev->msix_names[v],
Michael S. Tsirkin0b0f9dc2017-04-04 21:15:41 +0300178 vp_dev);
179 if (err)
180 goto error;
181 ++vp_dev->msix_used_vectors;
182
183 v = vp_dev->config_vector(vp_dev, v);
184 /* Verify we had enough resources to assign the vector */
185 if (v == VIRTIO_MSI_NO_VECTOR) {
186 err = -EBUSY;
187 goto error;
188 }
189
190 if (!per_vq_vectors) {
191 /* Shared vector for all VQs */
192 v = vp_dev->msix_used_vectors;
193 snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
194 "%s-virtqueues", name);
195 err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
Jason Wang9e352762021-10-19 15:01:46 +0800196 vp_vring_interrupt, IRQF_NO_AUTOEN,
197 vp_dev->msix_names[v],
Michael S. Tsirkin0b0f9dc2017-04-04 21:15:41 +0300198 vp_dev);
199 if (err)
200 goto error;
201 ++vp_dev->msix_used_vectors;
202 }
203 return 0;
204error:
205 return err;
206}
207
Michael S. Tsirkin0a9b3f42017-04-04 21:44:44 +0300208static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned index,
209 void (*callback)(struct virtqueue *vq),
210 const char *name,
Michael S. Tsirkinf94682d2017-03-06 18:32:29 +0200211 bool ctx,
Michael S. Tsirkin0a9b3f42017-04-04 21:44:44 +0300212 u16 msix_vec)
213{
214 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
215 struct virtio_pci_vq_info *info = kmalloc(sizeof *info, GFP_KERNEL);
216 struct virtqueue *vq;
217 unsigned long flags;
218
219 /* fill out our structure that represents an active queue */
220 if (!info)
221 return ERR_PTR(-ENOMEM);
222
Michael S. Tsirkinf94682d2017-03-06 18:32:29 +0200223 vq = vp_dev->setup_vq(vp_dev, info, index, callback, name, ctx,
Michael S. Tsirkin0a9b3f42017-04-04 21:44:44 +0300224 msix_vec);
225 if (IS_ERR(vq))
226 goto out_info;
227
228 info->vq = vq;
229 if (callback) {
230 spin_lock_irqsave(&vp_dev->lock, flags);
231 list_add(&info->node, &vp_dev->virtqueues);
232 spin_unlock_irqrestore(&vp_dev->lock, flags);
233 } else {
234 INIT_LIST_HEAD(&info->node);
235 }
236
237 vp_dev->vqs[index] = info;
238 return vq;
239
240out_info:
241 kfree(info);
242 return vq;
243}
244
245static void vp_del_vq(struct virtqueue *vq)
246{
247 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
248 struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
249 unsigned long flags;
250
251 spin_lock_irqsave(&vp_dev->lock, flags);
252 list_del(&info->node);
253 spin_unlock_irqrestore(&vp_dev->lock, flags);
254
255 vp_dev->del_vq(info);
256 kfree(info);
257}
258
Michael S. Tsirkin0b0f9dc2017-04-04 21:15:41 +0300259/* the config->del_vqs() implementation */
260void vp_del_vqs(struct virtio_device *vdev)
Michael S. Tsirkind2a7ddd2009-06-12 22:16:36 -0600261{
Michael S. Tsirkine969fed2009-07-26 15:48:08 +0300262 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
Michael S. Tsirkind2a7ddd2009-06-12 22:16:36 -0600263 struct virtqueue *vq, *n;
Michael S. Tsirkin0b0f9dc2017-04-04 21:15:41 +0300264 int i;
Michael S. Tsirkind2a7ddd2009-06-12 22:16:36 -0600265
Michael S. Tsirkine969fed2009-07-26 15:48:08 +0300266 list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
Michael S. Tsirkin0a9b3f42017-04-04 21:44:44 +0300267 if (vp_dev->per_vq_vectors) {
268 int v = vp_dev->vqs[vq->index]->msix_vector;
Christoph Hellwigfa3a3272016-11-17 11:43:13 +0100269
Marc Zyngier2f8dc3a2017-03-08 08:09:27 +0000270 if (v != VIRTIO_MSI_NO_VECTOR) {
271 int irq = pci_irq_vector(vp_dev->pci_dev, v);
272
273 irq_set_affinity_hint(irq, NULL);
274 free_irq(irq, vq);
275 }
Christoph Hellwigfa3a3272016-11-17 11:43:13 +0100276 }
Michael S. Tsirkin0a9b3f42017-04-04 21:44:44 +0300277 vp_del_vq(vq);
Michael S. Tsirkine969fed2009-07-26 15:48:08 +0300278 }
Michael S. Tsirkin0a9b3f42017-04-04 21:44:44 +0300279 vp_dev->per_vq_vectors = false;
Michael S. Tsirkin82af8ce2009-05-14 13:55:41 +0300280
Michael S. Tsirkin0b0f9dc2017-04-04 21:15:41 +0300281 if (vp_dev->intx_enabled) {
282 free_irq(vp_dev->pci_dev->irq, vp_dev);
283 vp_dev->intx_enabled = 0;
284 }
Christoph Hellwig66f2f552016-11-17 11:43:15 +0100285
Michael S. Tsirkin0b0f9dc2017-04-04 21:15:41 +0300286 for (i = 0; i < vp_dev->msix_used_vectors; ++i)
287 free_irq(pci_irq_vector(vp_dev->pci_dev, i), vp_dev);
Christoph Hellwig66f2f552016-11-17 11:43:15 +0100288
Longpeng6a8aae62019-03-09 15:17:40 +0800289 if (vp_dev->msix_affinity_masks) {
290 for (i = 0; i < vp_dev->msix_vectors; i++)
291 if (vp_dev->msix_affinity_masks[i])
292 free_cpumask_var(vp_dev->msix_affinity_masks[i]);
293 }
Christoph Hellwig07ec51482017-02-05 18:15:19 +0100294
Michael S. Tsirkin0b0f9dc2017-04-04 21:15:41 +0300295 if (vp_dev->msix_enabled) {
Christoph Hellwig66f2f552016-11-17 11:43:15 +0100296 /* Disable the vector used for configuration */
297 vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR);
298
Michael S. Tsirkin0b0f9dc2017-04-04 21:15:41 +0300299 pci_free_irq_vectors(vp_dev->pci_dev);
300 vp_dev->msix_enabled = 0;
Christoph Hellwig66f2f552016-11-17 11:43:15 +0100301 }
302
Michael S. Tsirkin0b0f9dc2017-04-04 21:15:41 +0300303 vp_dev->msix_vectors = 0;
304 vp_dev->msix_used_vectors = 0;
305 kfree(vp_dev->msix_names);
306 vp_dev->msix_names = NULL;
307 kfree(vp_dev->msix_affinity_masks);
308 vp_dev->msix_affinity_masks = NULL;
Michael S. Tsirkin0a9b3f42017-04-04 21:44:44 +0300309 kfree(vp_dev->vqs);
310 vp_dev->vqs = NULL;
Michael S. Tsirkind2a7ddd2009-06-12 22:16:36 -0600311}
312
Christoph Hellwiga3cbec62016-11-17 11:43:16 +0100313static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
Christoph Hellwig52a61512017-02-05 18:15:21 +0100314 struct virtqueue *vqs[], vq_callback_t *callbacks[],
Michael S. Tsirkinbf951b12017-04-04 21:08:54 +0300315 const char * const names[], bool per_vq_vectors,
Michael S. Tsirkinf94682d2017-03-06 18:32:29 +0200316 const bool *ctx,
Michael S. Tsirkinbf951b12017-04-04 21:08:54 +0300317 struct irq_affinity *desc)
Michael S. Tsirkine969fed2009-07-26 15:48:08 +0300318{
319 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
Rusty Russellf68d2402009-09-23 22:26:29 -0600320 u16 msix_vec;
Wei Wangddbeac02018-12-28 10:26:25 +0800321 int i, err, nvectors, allocated_vectors, queue_idx = 0;
Michael S. Tsirkine969fed2009-07-26 15:48:08 +0300322
Michael S. Tsirkin0a9b3f42017-04-04 21:44:44 +0300323 vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
324 if (!vp_dev->vqs)
325 return -ENOMEM;
326
Michael S. Tsirkinbf951b12017-04-04 21:08:54 +0300327 if (per_vq_vectors) {
Michael S. Tsirkin0b0f9dc2017-04-04 21:15:41 +0300328 /* Best option: one for change interrupt, one per vq. */
329 nvectors = 1;
330 for (i = 0; i < nvqs; ++i)
Daniel Verkamp303090b2020-01-03 10:40:45 -0800331 if (names[i] && callbacks[i])
Michael S. Tsirkin0b0f9dc2017-04-04 21:15:41 +0300332 ++nvectors;
Michael S. Tsirkinbf951b12017-04-04 21:08:54 +0300333 } else {
Michael S. Tsirkin0b0f9dc2017-04-04 21:15:41 +0300334 /* Second best: one for change, shared for all vqs. */
335 nvectors = 2;
Rusty Russellf68d2402009-09-23 22:26:29 -0600336 }
Michael S. Tsirkine969fed2009-07-26 15:48:08 +0300337
Michael S. Tsirkin0b0f9dc2017-04-04 21:15:41 +0300338 err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors,
339 per_vq_vectors ? desc : NULL);
Christoph Hellwiga3cbec62016-11-17 11:43:16 +0100340 if (err)
Michael S. Tsirkin0b0f9dc2017-04-04 21:15:41 +0300341 goto error_find;
Christoph Hellwiga3cbec62016-11-17 11:43:16 +0100342
Michael S. Tsirkin0a9b3f42017-04-04 21:44:44 +0300343 vp_dev->per_vq_vectors = per_vq_vectors;
Michael S. Tsirkin0b0f9dc2017-04-04 21:15:41 +0300344 allocated_vectors = vp_dev->msix_used_vectors;
Michael S. Tsirkine969fed2009-07-26 15:48:08 +0300345 for (i = 0; i < nvqs; ++i) {
Michael S. Tsirkin6457f122012-09-05 21:47:45 +0300346 if (!names[i]) {
347 vqs[i] = NULL;
348 continue;
Christoph Hellwiga3cbec62016-11-17 11:43:16 +0100349 }
350
Michael S. Tsirkin0b0f9dc2017-04-04 21:15:41 +0300351 if (!callbacks[i])
Christoph Hellwig07ec51482017-02-05 18:15:19 +0100352 msix_vec = VIRTIO_MSI_NO_VECTOR;
Michael S. Tsirkin0a9b3f42017-04-04 21:44:44 +0300353 else if (vp_dev->per_vq_vectors)
Michael S. Tsirkin0b0f9dc2017-04-04 21:15:41 +0300354 msix_vec = allocated_vectors++;
355 else
356 msix_vec = VP_MSIX_VQ_VECTOR;
Wei Wangddbeac02018-12-28 10:26:25 +0800357 vqs[i] = vp_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
Michael S. Tsirkinf94682d2017-03-06 18:32:29 +0200358 ctx ? ctx[i] : false,
Michael S. Tsirkin0a9b3f42017-04-04 21:44:44 +0300359 msix_vec);
Michael S. Tsirkine969fed2009-07-26 15:48:08 +0300360 if (IS_ERR(vqs[i])) {
361 err = PTR_ERR(vqs[i]);
Michael S. Tsirkin0b0f9dc2017-04-04 21:15:41 +0300362 goto error_find;
Michael S. Tsirkine969fed2009-07-26 15:48:08 +0300363 }
Michael S. Tsirkin0b22bd02009-10-22 15:06:06 +0200364
Michael S. Tsirkin0a9b3f42017-04-04 21:44:44 +0300365 if (!vp_dev->per_vq_vectors || msix_vec == VIRTIO_MSI_NO_VECTOR)
Michael S. Tsirkin0b0f9dc2017-04-04 21:15:41 +0300366 continue;
367
Michael S. Tsirkin0b0f9dc2017-04-04 21:15:41 +0300368 /* allocate per-vq irq if available and necessary */
369 snprintf(vp_dev->msix_names[msix_vec],
370 sizeof *vp_dev->msix_names,
371 "%s-%s",
Michael S. Tsirkin0b22bd02009-10-22 15:06:06 +0200372 dev_name(&vp_dev->vdev.dev), names[i]);
Christoph Hellwigfa3a3272016-11-17 11:43:13 +0100373 err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec),
Jason Wang9e352762021-10-19 15:01:46 +0800374 vring_interrupt, IRQF_NO_AUTOEN,
Michael S. Tsirkin0b0f9dc2017-04-04 21:15:41 +0300375 vp_dev->msix_names[msix_vec],
376 vqs[i]);
Michael S. Tsirkin0a9b3f42017-04-04 21:44:44 +0300377 if (err)
Michael S. Tsirkin0b0f9dc2017-04-04 21:15:41 +0300378 goto error_find;
Michael S. Tsirkine969fed2009-07-26 15:48:08 +0300379 }
380 return 0;
381
Michael S. Tsirkin0b0f9dc2017-04-04 21:15:41 +0300382error_find:
383 vp_del_vqs(vdev);
Michael S. Tsirkine969fed2009-07-26 15:48:08 +0300384 return err;
385}
386
Christoph Hellwiga3cbec62016-11-17 11:43:16 +0100387static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs,
388 struct virtqueue *vqs[], vq_callback_t *callbacks[],
Michael S. Tsirkinf94682d2017-03-06 18:32:29 +0200389 const char * const names[], const bool *ctx)
Christoph Hellwiga3cbec62016-11-17 11:43:16 +0100390{
391 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
Wei Wangddbeac02018-12-28 10:26:25 +0800392 int i, err, queue_idx = 0;
Christoph Hellwiga3cbec62016-11-17 11:43:16 +0100393
Michael S. Tsirkin0a9b3f42017-04-04 21:44:44 +0300394 vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
395 if (!vp_dev->vqs)
396 return -ENOMEM;
397
Christoph Hellwiga3cbec62016-11-17 11:43:16 +0100398 err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED,
399 dev_name(&vdev->dev), vp_dev);
400 if (err)
Michael S. Tsirkin0b0f9dc2017-04-04 21:15:41 +0300401 goto out_del_vqs;
Christoph Hellwiga3cbec62016-11-17 11:43:16 +0100402
Michael S. Tsirkin0b0f9dc2017-04-04 21:15:41 +0300403 vp_dev->intx_enabled = 1;
Michael S. Tsirkin0a9b3f42017-04-04 21:44:44 +0300404 vp_dev->per_vq_vectors = false;
Christoph Hellwiga3cbec62016-11-17 11:43:16 +0100405 for (i = 0; i < nvqs; ++i) {
406 if (!names[i]) {
407 vqs[i] = NULL;
408 continue;
409 }
Wei Wangddbeac02018-12-28 10:26:25 +0800410 vqs[i] = vp_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
Michael S. Tsirkinf94682d2017-03-06 18:32:29 +0200411 ctx ? ctx[i] : false,
Michael S. Tsirkin0a9b3f42017-04-04 21:44:44 +0300412 VIRTIO_MSI_NO_VECTOR);
Christoph Hellwiga3cbec62016-11-17 11:43:16 +0100413 if (IS_ERR(vqs[i])) {
414 err = PTR_ERR(vqs[i]);
Michael S. Tsirkin0b0f9dc2017-04-04 21:15:41 +0300415 goto out_del_vqs;
Christoph Hellwiga3cbec62016-11-17 11:43:16 +0100416 }
417 }
418
419 return 0;
Michael S. Tsirkin0b0f9dc2017-04-04 21:15:41 +0300420out_del_vqs:
421 vp_del_vqs(vdev);
Christoph Hellwiga3cbec62016-11-17 11:43:16 +0100422 return err;
423}
424
Michael S. Tsirkin82af8ce2009-05-14 13:55:41 +0300425/* the config->find_vqs() implementation */
Michael S. Tsirkin38eb4a22014-12-07 18:41:16 +0200426int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs,
Christoph Hellwigfb5e31d2017-02-05 18:15:22 +0100427 struct virtqueue *vqs[], vq_callback_t *callbacks[],
Michael S. Tsirkinf94682d2017-03-06 18:32:29 +0200428 const char * const names[], const bool *ctx,
429 struct irq_affinity *desc)
Michael S. Tsirkind2a7ddd2009-06-12 22:16:36 -0600430{
Rusty Russellf68d2402009-09-23 22:26:29 -0600431 int err;
Michael S. Tsirkin82af8ce2009-05-14 13:55:41 +0300432
Michael S. Tsirkinbf951b12017-04-04 21:08:54 +0300433 /* Try MSI-X with one vector per queue. */
Michael S. Tsirkinf94682d2017-03-06 18:32:29 +0200434 err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, true, ctx, desc);
Michael S. Tsirkine969fed2009-07-26 15:48:08 +0300435 if (!err)
436 return 0;
Michael S. Tsirkinbf951b12017-04-04 21:08:54 +0300437 /* Fallback: MSI-X with one vector for config, one shared for queues. */
Michael S. Tsirkinf94682d2017-03-06 18:32:29 +0200438 err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, false, ctx, desc);
Michael S. Tsirkinbf951b12017-04-04 21:08:54 +0300439 if (!err)
440 return 0;
441 /* Finally fall back to regular interrupts. */
Michael S. Tsirkinf94682d2017-03-06 18:32:29 +0200442 return vp_find_vqs_intx(vdev, nvqs, vqs, callbacks, names, ctx);
Michael S. Tsirkind2a7ddd2009-06-12 22:16:36 -0600443}
444
Michael S. Tsirkin38eb4a22014-12-07 18:41:16 +0200445const char *vp_bus_name(struct virtio_device *vdev)
Rick Jones66846042011-11-14 14:17:08 +0000446{
447 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
448
449 return pci_name(vp_dev->pci_dev);
450}
451
Jason Wang75a0a522012-08-28 13:54:14 +0200452/* Setup the affinity for a virtqueue:
453 * - force the affinity for per vq vector
454 * - OR over all affinities for shared MSI
455 * - ignore the affinity request if we're using INTX
456 */
Caleb Raitto19e226e2018-08-09 18:18:28 -0700457int vp_set_vq_affinity(struct virtqueue *vq, const struct cpumask *cpu_mask)
Jason Wang75a0a522012-08-28 13:54:14 +0200458{
459 struct virtio_device *vdev = vq->vdev;
460 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
Michael S. Tsirkin0a9b3f42017-04-04 21:44:44 +0300461 struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
462 struct cpumask *mask;
463 unsigned int irq;
Jason Wang75a0a522012-08-28 13:54:14 +0200464
465 if (!vq->callback)
466 return -EINVAL;
467
Michael S. Tsirkin2008c152017-04-04 21:09:20 +0300468 if (vp_dev->msix_enabled) {
Michael S. Tsirkin0a9b3f42017-04-04 21:44:44 +0300469 mask = vp_dev->msix_affinity_masks[info->msix_vector];
470 irq = pci_irq_vector(vp_dev->pci_dev, info->msix_vector);
Caleb Raitto19e226e2018-08-09 18:18:28 -0700471 if (!cpu_mask)
Jason Wang75a0a522012-08-28 13:54:14 +0200472 irq_set_affinity_hint(irq, NULL);
473 else {
Caleb Raitto19e226e2018-08-09 18:18:28 -0700474 cpumask_copy(mask, cpu_mask);
Jason Wang75a0a522012-08-28 13:54:14 +0200475 irq_set_affinity_hint(irq, mask);
476 }
477 }
478 return 0;
479}
480
Christoph Hellwigbbaba472017-02-05 18:15:23 +0100481const struct cpumask *vp_get_vq_affinity(struct virtio_device *vdev, int index)
482{
483 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
Christoph Hellwigbbaba472017-02-05 18:15:23 +0100484
Michael S. Tsirkin0a9b3f42017-04-04 21:44:44 +0300485 if (!vp_dev->per_vq_vectors ||
486 vp_dev->vqs[index]->msix_vector == VIRTIO_MSI_NO_VECTOR)
Christoph Hellwigbbaba472017-02-05 18:15:23 +0100487 return NULL;
488
Michael S. Tsirkin0a9b3f42017-04-04 21:44:44 +0300489 return pci_irq_get_affinity(vp_dev->pci_dev,
490 vp_dev->vqs[index]->msix_vector);
Christoph Hellwigbbaba472017-02-05 18:15:23 +0100491}
492
Aaron Lu9e266ec2013-09-09 09:57:12 +0930493#ifdef CONFIG_PM_SLEEP
Amit Shahf0fe6f12011-12-22 16:58:26 +0530494static int virtio_pci_freeze(struct device *dev)
495{
496 struct pci_dev *pci_dev = to_pci_dev(dev);
497 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
Amit Shahf0fe6f12011-12-22 16:58:26 +0530498 int ret;
499
Michael S. Tsirkinc6716ba2014-10-14 10:40:35 +1030500 ret = virtio_device_freeze(&vp_dev->vdev);
Amit Shahf0fe6f12011-12-22 16:58:26 +0530501
502 if (!ret)
503 pci_disable_device(pci_dev);
504 return ret;
505}
506
Amit Shahf0fe6f12011-12-22 16:58:26 +0530507static int virtio_pci_restore(struct device *dev)
508{
509 struct pci_dev *pci_dev = to_pci_dev(dev);
510 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
Amit Shahf0fe6f12011-12-22 16:58:26 +0530511 int ret;
512
Amit Shah0517fdd2012-03-29 12:54:43 +0530513 ret = pci_enable_device(pci_dev);
514 if (ret)
515 return ret;
516
517 pci_set_master(pci_dev);
Michael S. Tsirkinc6716ba2014-10-14 10:40:35 +1030518 return virtio_device_restore(&vp_dev->vdev);
Amit Shahf0fe6f12011-12-22 16:58:26 +0530519}
520
Michael S. Tsirkin9a4253d2014-12-11 21:47:49 +0200521static const struct dev_pm_ops virtio_pci_pm_ops = {
Amit Shahf878d0b2012-03-29 12:58:05 +0530522 SET_SYSTEM_SLEEP_PM_OPS(virtio_pci_freeze, virtio_pci_restore)
Amit Shahd0775362011-12-22 16:58:25 +0530523};
Anthony Liguori33436602007-11-12 21:30:26 -0600524#endif
Michael S. Tsirkin9a4253d2014-12-11 21:47:49 +0200525
526
527/* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */
528static const struct pci_device_id virtio_pci_id_table[] = {
Robin H. Johnsoncaf02ab2016-03-06 22:02:30 +0000529 { PCI_DEVICE(PCI_VENDOR_ID_REDHAT_QUMRANET, PCI_ANY_ID) },
Michael S. Tsirkin9a4253d2014-12-11 21:47:49 +0200530 { 0 }
531};
532
533MODULE_DEVICE_TABLE(pci, virtio_pci_id_table);
534
Michael S. Tsirkinff31d2e2015-01-13 11:23:32 +0200535static void virtio_pci_release_dev(struct device *_d)
536{
537 struct virtio_device *vdev = dev_to_virtio(_d);
538 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
539
540 /* As struct device is a kobject, it's not safe to
541 * free the memory (including the reference counter itself)
542 * until it's release callback. */
543 kfree(vp_dev);
544}
545
Michael S. Tsirkin9a4253d2014-12-11 21:47:49 +0200546static int virtio_pci_probe(struct pci_dev *pci_dev,
547 const struct pci_device_id *id)
548{
weiping zhang33635bd2017-12-21 20:40:24 +0800549 struct virtio_pci_device *vp_dev, *reg_dev = NULL;
Michael S. Tsirkinff31d2e2015-01-13 11:23:32 +0200550 int rc;
551
552 /* allocate our structure and fill it out */
553 vp_dev = kzalloc(sizeof(struct virtio_pci_device), GFP_KERNEL);
554 if (!vp_dev)
555 return -ENOMEM;
556
557 pci_set_drvdata(pci_dev, vp_dev);
558 vp_dev->vdev.dev.parent = &pci_dev->dev;
559 vp_dev->vdev.dev.release = virtio_pci_release_dev;
560 vp_dev->pci_dev = pci_dev;
Michael S. Tsirkin0a9b3f42017-04-04 21:44:44 +0300561 INIT_LIST_HEAD(&vp_dev->virtqueues);
562 spin_lock_init(&vp_dev->lock);
Michael S. Tsirkinff31d2e2015-01-13 11:23:32 +0200563
Michael S. Tsirkinff31d2e2015-01-13 11:23:32 +0200564 /* enable the device */
565 rc = pci_enable_device(pci_dev);
566 if (rc)
567 goto err_enable_device;
568
Michael S. Tsirkinac399d82015-01-15 17:54:13 +0200569 if (force_legacy) {
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200570 rc = virtio_pci_legacy_probe(vp_dev);
Michael S. Tsirkinac399d82015-01-15 17:54:13 +0200571 /* Also try modern mode if we can't map BAR0 (no IO space). */
572 if (rc == -ENODEV || rc == -ENOMEM)
573 rc = virtio_pci_modern_probe(vp_dev);
574 if (rc)
575 goto err_probe;
576 } else {
577 rc = virtio_pci_modern_probe(vp_dev);
578 if (rc == -ENODEV)
579 rc = virtio_pci_legacy_probe(vp_dev);
580 if (rc)
581 goto err_probe;
582 }
Michael S. Tsirkinff31d2e2015-01-13 11:23:32 +0200583
584 pci_set_master(pci_dev);
585
Wu Zongyongd89c81692021-10-29 17:14:42 +0800586 vp_dev->is_legacy = vp_dev->ldev.ioaddr ? true : false;
587
Michael S. Tsirkinff31d2e2015-01-13 11:23:32 +0200588 rc = register_virtio_device(&vp_dev->vdev);
weiping zhang33635bd2017-12-21 20:40:24 +0800589 reg_dev = vp_dev;
Michael S. Tsirkinff31d2e2015-01-13 11:23:32 +0200590 if (rc)
591 goto err_register;
592
593 return 0;
594
595err_register:
Wu Zongyongd89c81692021-10-29 17:14:42 +0800596 if (vp_dev->is_legacy)
597 virtio_pci_legacy_remove(vp_dev);
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200598 else
Wu Zongyongd89c81692021-10-29 17:14:42 +0800599 virtio_pci_modern_remove(vp_dev);
Michael S. Tsirkinff31d2e2015-01-13 11:23:32 +0200600err_probe:
Michael S. Tsirkinff31d2e2015-01-13 11:23:32 +0200601 pci_disable_device(pci_dev);
602err_enable_device:
weiping zhang33635bd2017-12-21 20:40:24 +0800603 if (reg_dev)
604 put_device(&vp_dev->vdev.dev);
605 else
606 kfree(vp_dev);
Michael S. Tsirkinff31d2e2015-01-13 11:23:32 +0200607 return rc;
Michael S. Tsirkin9a4253d2014-12-11 21:47:49 +0200608}
609
610static void virtio_pci_remove(struct pci_dev *pci_dev)
611{
Michael S. Tsirkinff31d2e2015-01-13 11:23:32 +0200612 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
Michael S. Tsirkin2989be02016-01-14 16:00:41 +0200613 struct device *dev = get_device(&vp_dev->vdev.dev);
Michael S. Tsirkinff31d2e2015-01-13 11:23:32 +0200614
Parav Pandit43bb40c52021-07-21 17:26:48 +0300615 /*
616 * Device is marked broken on surprise removal so that virtio upper
617 * layers can abort any ongoing operation.
618 */
619 if (!pci_device_is_present(pci_dev))
620 virtio_break_device(&vp_dev->vdev);
621
Tiwei Biecfecc292018-06-01 12:02:39 +0800622 pci_disable_sriov(pci_dev);
623
Michael S. Tsirkinff31d2e2015-01-13 11:23:32 +0200624 unregister_virtio_device(&vp_dev->vdev);
625
Wu Zongyongd89c81692021-10-29 17:14:42 +0800626 if (vp_dev->is_legacy)
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200627 virtio_pci_legacy_remove(vp_dev);
628 else
629 virtio_pci_modern_remove(vp_dev);
Michael S. Tsirkinff31d2e2015-01-13 11:23:32 +0200630
Michael S. Tsirkinff31d2e2015-01-13 11:23:32 +0200631 pci_disable_device(pci_dev);
Michael S. Tsirkin2989be02016-01-14 16:00:41 +0200632 put_device(dev);
Michael S. Tsirkin9a4253d2014-12-11 21:47:49 +0200633}
634
Tiwei Biecfecc292018-06-01 12:02:39 +0800635static int virtio_pci_sriov_configure(struct pci_dev *pci_dev, int num_vfs)
636{
637 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
638 struct virtio_device *vdev = &vp_dev->vdev;
639 int ret;
640
641 if (!(vdev->config->get_status(vdev) & VIRTIO_CONFIG_S_DRIVER_OK))
642 return -EBUSY;
643
644 if (!__virtio_test_bit(vdev, VIRTIO_F_SR_IOV))
645 return -EINVAL;
646
647 if (pci_vfs_assigned(pci_dev))
648 return -EPERM;
649
650 if (num_vfs == 0) {
651 pci_disable_sriov(pci_dev);
652 return 0;
653 }
654
655 ret = pci_enable_sriov(pci_dev, num_vfs);
656 if (ret < 0)
657 return ret;
658
659 return num_vfs;
660}
661
Michael S. Tsirkin9a4253d2014-12-11 21:47:49 +0200662static struct pci_driver virtio_pci_driver = {
663 .name = "virtio-pci",
664 .id_table = virtio_pci_id_table,
665 .probe = virtio_pci_probe,
666 .remove = virtio_pci_remove,
667#ifdef CONFIG_PM_SLEEP
668 .driver.pm = &virtio_pci_pm_ops,
669#endif
Tiwei Biecfecc292018-06-01 12:02:39 +0800670 .sriov_configure = virtio_pci_sriov_configure,
Michael S. Tsirkin9a4253d2014-12-11 21:47:49 +0200671};
672
673module_pci_driver(virtio_pci_driver);
Herbert Xu5ff16112014-12-17 00:54:03 +0200674
675MODULE_AUTHOR("Anthony Liguori <aliguori@us.ibm.com>");
676MODULE_DESCRIPTION("virtio-pci");
677MODULE_LICENSE("GPL");
678MODULE_VERSION("1");