blob: bccad1329871bd645a780c9b6416c26145e142e2 [file] [log] [blame]
Thomas Gleixnerf33f5fe2019-05-22 09:51:24 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +02002/*
3 * Virtio PCI driver - modern (virtio 1.0) device support
4 *
5 * This module allows virtio devices to be used over a virtual PCI device.
6 * This can be used with QEMU based VMMs like KVM or Xen.
7 *
8 * Copyright IBM Corp. 2007
9 * Copyright Red Hat, Inc. 2014
10 *
11 * Authors:
12 * Anthony Liguori <aliguori@us.ibm.com>
13 * Rusty Russell <rusty@rustcorp.com.au>
14 * Michael S. Tsirkin <mst@redhat.com>
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +020015 */
16
Michael S. Tsirkin05dbcb42016-04-03 15:23:37 +030017#include <linux/delay.h>
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +020018#define VIRTIO_PCI_NO_LEGACY
Matej Gencie7c8cc32019-09-11 12:49:53 +000019#define VIRTIO_RING_NO_LEGACY
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +020020#include "virtio_pci_common.h"
21
Michael S. Tsirkinc5d4c2c2015-04-01 13:33:20 +103022/*
23 * Type-safe wrappers for io accesses.
24 * Use these to enforce at compile time the following spec requirement:
25 *
26 * The driver MUST access each field using the “natural” access
27 * method, i.e. 32-bit accesses for 32-bit fields, 16-bit accesses
28 * for 16-bit fields and 8-bit accesses for 8-bit fields.
29 */
Krzysztof Kozlowskife0580a2020-08-14 17:32:20 -070030static inline u8 vp_ioread8(const u8 __iomem *addr)
Michael S. Tsirkinc5d4c2c2015-04-01 13:33:20 +103031{
32 return ioread8(addr);
33}
Krzysztof Kozlowskife0580a2020-08-14 17:32:20 -070034static inline u16 vp_ioread16 (const __le16 __iomem *addr)
Michael S. Tsirkinc5d4c2c2015-04-01 13:33:20 +103035{
36 return ioread16(addr);
37}
38
Krzysztof Kozlowskife0580a2020-08-14 17:32:20 -070039static inline u32 vp_ioread32(const __le32 __iomem *addr)
Michael S. Tsirkinc5d4c2c2015-04-01 13:33:20 +103040{
41 return ioread32(addr);
42}
43
44static inline void vp_iowrite8(u8 value, u8 __iomem *addr)
45{
46 iowrite8(value, addr);
47}
48
Gonglei61bd4052016-11-22 13:51:49 +080049static inline void vp_iowrite16(u16 value, __le16 __iomem *addr)
Michael S. Tsirkinc5d4c2c2015-04-01 13:33:20 +103050{
51 iowrite16(value, addr);
52}
53
Gonglei61bd4052016-11-22 13:51:49 +080054static inline void vp_iowrite32(u32 value, __le32 __iomem *addr)
Michael S. Tsirkinc5d4c2c2015-04-01 13:33:20 +103055{
56 iowrite32(value, addr);
57}
58
Michael S. Tsirkina8557d32015-04-01 14:43:15 +103059static void vp_iowrite64_twopart(u64 val,
60 __le32 __iomem *lo, __le32 __iomem *hi)
61{
62 vp_iowrite32((u32)val, lo);
63 vp_iowrite32(val >> 32, hi);
64}
65
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +020066static void __iomem *map_capability(struct pci_dev *dev, int off,
67 size_t minlen,
68 u32 align,
69 u32 start, u32 size,
70 size_t *len)
71{
72 u8 bar;
73 u32 offset, length;
74 void __iomem *p;
75
76 pci_read_config_byte(dev, off + offsetof(struct virtio_pci_cap,
77 bar),
78 &bar);
79 pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, offset),
80 &offset);
81 pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, length),
82 &length);
83
84 if (length <= start) {
85 dev_err(&dev->dev,
86 "virtio_pci: bad capability len %u (>%u expected)\n",
87 length, start);
88 return NULL;
89 }
90
91 if (length - start < minlen) {
92 dev_err(&dev->dev,
93 "virtio_pci: bad capability len %u (>=%zu expected)\n",
94 length, minlen);
95 return NULL;
96 }
97
98 length -= start;
99
100 if (start + offset < offset) {
101 dev_err(&dev->dev,
102 "virtio_pci: map wrap-around %u+%u\n",
103 start, offset);
104 return NULL;
105 }
106
107 offset += start;
108
109 if (offset & (align - 1)) {
110 dev_err(&dev->dev,
111 "virtio_pci: offset %u not aligned to %u\n",
112 offset, align);
113 return NULL;
114 }
115
116 if (length > size)
117 length = size;
118
119 if (len)
120 *len = length;
121
122 if (minlen + offset < minlen ||
123 minlen + offset > pci_resource_len(dev, bar)) {
124 dev_err(&dev->dev,
125 "virtio_pci: map virtio %zu@%u "
126 "out of range on bar %i length %lu\n",
127 minlen, offset,
128 bar, (unsigned long)pci_resource_len(dev, bar));
129 return NULL;
130 }
131
132 p = pci_iomap_range(dev, bar, offset, length);
133 if (!p)
134 dev_err(&dev->dev,
135 "virtio_pci: unable to map virtio %u@%u on bar %i\n",
136 length, offset, bar);
137 return p;
138}
139
Jason Wang0b0177082021-01-04 14:54:51 +0800140/*
141 * vp_modern_get_features - get features from device
142 * @mdev: the modern virtio-pci device
143 *
144 * Returns the features read from the device
145 */
146static u64 vp_modern_get_features(struct virtio_pci_modern_device *mdev)
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200147{
Jason Wangb5d58092021-01-04 14:54:46 +0800148 struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
Jason Wang0b0177082021-01-04 14:54:51 +0800149
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200150 u64 features;
151
Jason Wang64f20872021-01-04 14:54:45 +0800152 vp_iowrite32(0, &cfg->device_feature_select);
153 features = vp_ioread32(&cfg->device_feature);
154 vp_iowrite32(1, &cfg->device_feature_select);
155 features |= ((u64)vp_ioread32(&cfg->device_feature) << 32);
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200156
157 return features;
158}
159
Jason Wang0b0177082021-01-04 14:54:51 +0800160/* virtio config->get_features() implementation */
161static u64 vp_get_features(struct virtio_device *vdev)
162{
163 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
164
165 return vp_modern_get_features(&vp_dev->mdev);
166}
167
Tiwei Biecfecc292018-06-01 12:02:39 +0800168static void vp_transport_features(struct virtio_device *vdev, u64 features)
169{
170 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
171 struct pci_dev *pci_dev = vp_dev->pci_dev;
172
173 if ((features & BIT_ULL(VIRTIO_F_SR_IOV)) &&
174 pci_find_ext_capability(pci_dev, PCI_EXT_CAP_ID_SRIOV))
175 __virtio_set_bit(vdev, VIRTIO_F_SR_IOV);
176}
177
Jason Wang0b0177082021-01-04 14:54:51 +0800178/*
179 * vp_modern_set_features - set features to device
180 * @mdev: the modern virtio-pci device
181 * @features: the features set to device
182 */
183static void vp_modern_set_features(struct virtio_pci_modern_device *mdev,
184 u64 features)
185{
186 struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
187
188 vp_iowrite32(0, &cfg->guest_feature_select);
189 vp_iowrite32((u32)features, &cfg->guest_feature);
190 vp_iowrite32(1, &cfg->guest_feature_select);
191 vp_iowrite32(features >> 32, &cfg->guest_feature);
192}
193
Jason Wang3fbda9c2021-01-04 14:54:53 +0800194/*
195 * vp_modern_queue_vector - set the MSIX vector for a specific virtqueue
196 * @mdev: the modern virtio-pci device
197 * @index: queue index
198 * @vector: the config vector
199 *
200 * Returns the config vector read from the device
201 */
202static u16 vp_modern_queue_vector(struct virtio_pci_modern_device *mdev,
203 u16 index, u16 vector)
204{
205 struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
206
207 vp_iowrite16(index, &cfg->queue_select);
208 vp_iowrite16(vector, &cfg->queue_msix_vector);
209 /* Flush the write out to device */
210 return vp_ioread16(&cfg->queue_msix_vector);
211}
212
Jason Wange1b0fa22021-01-04 14:54:54 +0800213/*
214 * vp_modern_queue_address - set the virtqueue address
215 * @mdev: the modern virtio-pci device
216 * @index: the queue index
217 * @desc_addr: address of the descriptor area
218 * @driver_addr: address of the driver area
219 * @device_addr: address of the device area
220 */
221static void vp_modern_queue_address(struct virtio_pci_modern_device *mdev,
222 u16 index, u64 desc_addr, u64 driver_addr,
223 u64 device_addr)
224{
225 struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
226
227 vp_iowrite16(index, &cfg->queue_select);
228
229 vp_iowrite64_twopart(desc_addr, &cfg->queue_desc_lo,
230 &cfg->queue_desc_hi);
231 vp_iowrite64_twopart(driver_addr, &cfg->queue_avail_lo,
232 &cfg->queue_avail_hi);
233 vp_iowrite64_twopart(device_addr, &cfg->queue_used_lo,
234 &cfg->queue_used_hi);
235}
236
Jason Wangdc2e6482021-01-04 14:54:55 +0800237/*
238 * vp_modern_set_queue_enable - enable a virtqueue
239 * @mdev: the modern virtio-pci device
240 * @index: the queue index
241 * @enable: whether the virtqueue is enable or not
242 */
243static void vp_modern_set_queue_enable(struct virtio_pci_modern_device *mdev,
244 u16 index, bool enable)
245{
246 vp_iowrite16(index, &mdev->common->queue_select);
247 vp_iowrite16(enable, &mdev->common->queue_enable);
248}
249
250/*
251 * vp_modern_get_queue_enable - enable a virtqueue
252 * @mdev: the modern virtio-pci device
253 * @index: the queue index
254 *
255 * Returns whether a virtqueue is enabled or not
256 */
257static bool vp_modern_get_queue_enable(struct virtio_pci_modern_device *mdev,
258 u16 index)
259{
260 vp_iowrite16(index, &mdev->common->queue_select);
261
262 return vp_ioread16(&mdev->common->queue_enable);
263}
264
Jason Wang75658af2021-01-04 14:54:56 +0800265/*
266 * vp_modern_set_queue_size - set size for a virtqueue
267 * @mdev: the modern virtio-pci device
268 * @index: the queue index
269 * @size: the size of the virtqueue
270 */
271static void vp_modern_set_queue_size(struct virtio_pci_modern_device *mdev,
272 u16 index, u16 size)
273{
274 vp_iowrite16(index, &mdev->common->queue_select);
275 vp_iowrite16(size, &mdev->common->queue_size);
276
277}
278
279/*
280 * vp_modern_get_queue_size - get size for a virtqueue
281 * @mdev: the modern virtio-pci device
282 * @index: the queue index
283 *
284 * Returns the size of the virtqueue
285 */
286static u16 vp_modern_get_queue_size(struct virtio_pci_modern_device *mdev,
287 u16 index)
288{
289 vp_iowrite16(index, &mdev->common->queue_select);
290
291 return vp_ioread16(&mdev->common->queue_size);
292
293}
294
Jason Wang6e52fc42021-01-04 14:54:57 +0800295/*
296 * vp_modern_get_num_queues - get the number of virtqueues
297 * @mdev: the modern virtio-pci device
298 *
299 * Returns the number of virtqueues
300 */
301static u16 vp_modern_get_num_queues(struct virtio_pci_modern_device *mdev)
302{
303 return vp_ioread16(&mdev->common->num_queues);
304}
305
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200306/* virtio config->finalize_features() implementation */
307static int vp_finalize_features(struct virtio_device *vdev)
308{
309 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
Tiwei Biecfecc292018-06-01 12:02:39 +0800310 u64 features = vdev->features;
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200311
312 /* Give virtio_ring a chance to accept features. */
313 vring_transport_features(vdev);
314
Tiwei Biecfecc292018-06-01 12:02:39 +0800315 /* Give virtio_pci a chance to accept features. */
316 vp_transport_features(vdev, features);
317
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200318 if (!__virtio_test_bit(vdev, VIRTIO_F_VERSION_1)) {
319 dev_err(&vdev->dev, "virtio: device uses modern interface "
320 "but does not have VIRTIO_F_VERSION_1\n");
321 return -EINVAL;
322 }
323
Jason Wang0b0177082021-01-04 14:54:51 +0800324 vp_modern_set_features(&vp_dev->mdev, vdev->features);
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200325
326 return 0;
327}
328
329/* virtio config->get() implementation */
330static void vp_get(struct virtio_device *vdev, unsigned offset,
331 void *buf, unsigned len)
332{
333 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
Jason Wangb5d58092021-01-04 14:54:46 +0800334 struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
335 void __iomem *device = mdev->device;
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200336 u8 b;
337 __le16 w;
338 __le32 l;
339
Jason Wangb5d58092021-01-04 14:54:46 +0800340 BUG_ON(offset + len > mdev->device_len);
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200341
342 switch (len) {
343 case 1:
Jason Wang64f20872021-01-04 14:54:45 +0800344 b = ioread8(device + offset);
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200345 memcpy(buf, &b, sizeof b);
346 break;
347 case 2:
Jason Wang64f20872021-01-04 14:54:45 +0800348 w = cpu_to_le16(ioread16(device + offset));
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200349 memcpy(buf, &w, sizeof w);
350 break;
351 case 4:
Jason Wang64f20872021-01-04 14:54:45 +0800352 l = cpu_to_le32(ioread32(device + offset));
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200353 memcpy(buf, &l, sizeof l);
354 break;
355 case 8:
Jason Wang64f20872021-01-04 14:54:45 +0800356 l = cpu_to_le32(ioread32(device + offset));
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200357 memcpy(buf, &l, sizeof l);
Jason Wang64f20872021-01-04 14:54:45 +0800358 l = cpu_to_le32(ioread32(device + offset + sizeof l));
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200359 memcpy(buf + sizeof l, &l, sizeof l);
360 break;
361 default:
362 BUG();
363 }
364}
365
366/* the config->set() implementation. it's symmetric to the config->get()
367 * implementation */
368static void vp_set(struct virtio_device *vdev, unsigned offset,
369 const void *buf, unsigned len)
370{
371 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
Jason Wangb5d58092021-01-04 14:54:46 +0800372 struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
373 void __iomem *device = mdev->device;
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200374 u8 b;
375 __le16 w;
376 __le32 l;
377
Jason Wangb5d58092021-01-04 14:54:46 +0800378 BUG_ON(offset + len > mdev->device_len);
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200379
380 switch (len) {
381 case 1:
382 memcpy(&b, buf, sizeof b);
Jason Wang64f20872021-01-04 14:54:45 +0800383 iowrite8(b, device + offset);
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200384 break;
385 case 2:
386 memcpy(&w, buf, sizeof w);
Jason Wang64f20872021-01-04 14:54:45 +0800387 iowrite16(le16_to_cpu(w), device + offset);
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200388 break;
389 case 4:
390 memcpy(&l, buf, sizeof l);
Jason Wang64f20872021-01-04 14:54:45 +0800391 iowrite32(le32_to_cpu(l), device + offset);
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200392 break;
393 case 8:
394 memcpy(&l, buf, sizeof l);
Jason Wang64f20872021-01-04 14:54:45 +0800395 iowrite32(le32_to_cpu(l), device + offset);
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200396 memcpy(&l, buf + sizeof l, sizeof l);
Jason Wang64f20872021-01-04 14:54:45 +0800397 iowrite32(le32_to_cpu(l), device + offset + sizeof l);
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200398 break;
399 default:
400 BUG();
401 }
402}
403
Jason Wanged2a73d2021-01-04 14:54:52 +0800404/*
405 * vp_modern_generation - get the device genreation
406 * @mdev: the modern virtio-pci device
407 *
408 * Returns the genreation read from device
409 */
410static u32 vp_modern_generation(struct virtio_pci_modern_device *mdev)
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200411{
Jason Wangb5d58092021-01-04 14:54:46 +0800412 struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
Jason Wang64f20872021-01-04 14:54:45 +0800413
414 return vp_ioread8(&cfg->config_generation);
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200415}
416
Jason Wanged2a73d2021-01-04 14:54:52 +0800417static u32 vp_generation(struct virtio_device *vdev)
418{
419 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
420
421 return vp_modern_generation(&vp_dev->mdev);
422}
423
Jason Wange3669122021-01-04 14:54:50 +0800424/*
425 * vp_modern_get_status - get the device status
426 * @mdev: the modern virtio-pci device
427 *
428 * Returns the status read from device
429 */
430static u8 vp_modern_get_status(struct virtio_pci_modern_device *mdev)
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200431{
Jason Wangb5d58092021-01-04 14:54:46 +0800432 struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
Jason Wang64f20872021-01-04 14:54:45 +0800433
434 return vp_ioread8(&cfg->device_status);
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200435}
436
Jason Wange3669122021-01-04 14:54:50 +0800437/* config->{get,set}_status() implementations */
438static u8 vp_get_status(struct virtio_device *vdev)
439{
440 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
441
442 return vp_modern_get_status(&vp_dev->mdev);
443}
444
445/*
446 * vp_modern_set_status - set status to device
447 * @mdev: the modern virtio-pci device
448 * @status: the status set to device
449 */
450static void vp_modern_set_status(struct virtio_pci_modern_device *mdev,
451 u8 status)
452{
453 struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
454
455 vp_iowrite8(status, &cfg->device_status);
456}
457
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200458static void vp_set_status(struct virtio_device *vdev, u8 status)
459{
460 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
Jason Wang64f20872021-01-04 14:54:45 +0800461
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200462 /* We should never be setting status to 0. */
463 BUG_ON(status == 0);
Jason Wange3669122021-01-04 14:54:50 +0800464 vp_modern_set_status(&vp_dev->mdev, status);
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200465}
466
467static void vp_reset(struct virtio_device *vdev)
468{
469 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
Jason Wangb5d58092021-01-04 14:54:46 +0800470 struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
Jason Wang64f20872021-01-04 14:54:45 +0800471
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200472 /* 0 status means a reset. */
Jason Wange3669122021-01-04 14:54:50 +0800473 vp_modern_set_status(mdev, 0);
Michael S. Tsirkin05dbcb42016-04-03 15:23:37 +0300474 /* After writing 0 to device_status, the driver MUST wait for a read of
475 * device_status to return 0 before reinitializing the device.
476 * This will flush out the status write, and flush in device writes,
477 * including MSI-X interrupts, if any.
478 */
Jason Wange3669122021-01-04 14:54:50 +0800479 while (vp_modern_get_status(mdev))
Michael S. Tsirkin05dbcb42016-04-03 15:23:37 +0300480 msleep(1);
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200481 /* Flush pending VQ/configuration callbacks. */
482 vp_synchronize_vectors(vdev);
483}
484
Jason Wang1a5c85f12021-01-04 14:54:49 +0800485/*
486 * vp_modern_config_vector - set the vector for config interrupt
487 * @mdev: the modern virtio-pci device
488 * @vector: the config vector
489 *
490 * Returns the config vector read from the device
491 */
492static u16 vp_modern_config_vector(struct virtio_pci_modern_device *mdev,
493 u16 vector)
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200494{
Jason Wangb5d58092021-01-04 14:54:46 +0800495 struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
Jason Wang64f20872021-01-04 14:54:45 +0800496
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200497 /* Setup the vector used for configuration events */
Jason Wang64f20872021-01-04 14:54:45 +0800498 vp_iowrite16(vector, &cfg->msix_config);
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200499 /* Verify we had enough resources to assign the vector */
500 /* Will also flush the write out to device */
Jason Wang64f20872021-01-04 14:54:45 +0800501 return vp_ioread16(&cfg->msix_config);
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200502}
503
Jason Wang1a5c85f12021-01-04 14:54:49 +0800504static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
505{
506 return vp_modern_config_vector(&vp_dev->mdev, vector);
507}
508
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200509static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
Michael S. Tsirkin0a9b3f42017-04-04 21:44:44 +0300510 struct virtio_pci_vq_info *info,
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200511 unsigned index,
512 void (*callback)(struct virtqueue *vq),
513 const char *name,
Michael S. Tsirkinf94682d2017-03-06 18:32:29 +0200514 bool ctx,
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200515 u16 msix_vec)
516{
Jason Wangb5d58092021-01-04 14:54:46 +0800517
518 struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
519 struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200520 struct virtqueue *vq;
521 u16 num, off;
522 int err;
523
Jason Wang6e52fc42021-01-04 14:54:57 +0800524 if (index >= vp_modern_get_num_queues(mdev))
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200525 return ERR_PTR(-ENOENT);
526
527 /* Select the queue we're interested in */
Michael S. Tsirkina8557d32015-04-01 14:43:15 +1030528 vp_iowrite16(index, &cfg->queue_select);
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200529
530 /* Check if queue is either not available or already active. */
Jason Wang75658af2021-01-04 14:54:56 +0800531 num = vp_modern_get_queue_size(mdev, index);
Jason Wangdc2e6482021-01-04 14:54:55 +0800532 if (!num || vp_modern_get_queue_enable(mdev, index))
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200533 return ERR_PTR(-ENOENT);
534
535 if (num & (num - 1)) {
536 dev_warn(&vp_dev->pci_dev->dev, "bad queue size %u", num);
537 return ERR_PTR(-EINVAL);
538 }
539
540 /* get offset of notification word for this vq */
Michael S. Tsirkina8557d32015-04-01 14:43:15 +1030541 off = vp_ioread16(&cfg->queue_notify_off);
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200542
Michael S. Tsirkin0a9b3f42017-04-04 21:44:44 +0300543 info->msix_vector = msix_vec;
544
Andy Lutomirski7a5589b2016-02-02 21:46:39 -0800545 /* create the vring */
546 vq = vring_create_virtqueue(index, num,
547 SMP_CACHE_BYTES, &vp_dev->vdev,
Michael S. Tsirkinf94682d2017-03-06 18:32:29 +0200548 true, true, ctx,
549 vp_notify, callback, name);
Andy Lutomirski7a5589b2016-02-02 21:46:39 -0800550 if (!vq)
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200551 return ERR_PTR(-ENOMEM);
552
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200553 /* activate the queue */
Jason Wang75658af2021-01-04 14:54:56 +0800554 vp_modern_set_queue_size(mdev, index, virtqueue_get_vring_size(vq));
Jason Wange1b0fa22021-01-04 14:54:54 +0800555 vp_modern_queue_address(mdev, index, virtqueue_get_desc_addr(vq),
556 virtqueue_get_avail_addr(vq),
557 virtqueue_get_used_addr(vq));
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200558
Jason Wangb5d58092021-01-04 14:54:46 +0800559 if (mdev->notify_base) {
Michael S. Tsirkin39092132015-01-14 18:50:55 +0200560 /* offset should not wrap */
Jason Wangb5d58092021-01-04 14:54:46 +0800561 if ((u64)off * mdev->notify_offset_multiplier + 2
562 > mdev->notify_len) {
563 dev_warn(&mdev->pci_dev->dev,
Michael S. Tsirkin39092132015-01-14 18:50:55 +0200564 "bad notification offset %u (x %u) "
565 "for queue %u > %zd",
Jason Wangb5d58092021-01-04 14:54:46 +0800566 off, mdev->notify_offset_multiplier,
567 index, mdev->notify_len);
Michael S. Tsirkin39092132015-01-14 18:50:55 +0200568 err = -EINVAL;
569 goto err_map_notify;
570 }
Jason Wangb5d58092021-01-04 14:54:46 +0800571 vq->priv = (void __force *)mdev->notify_base +
572 off * mdev->notify_offset_multiplier;
Michael S. Tsirkin39092132015-01-14 18:50:55 +0200573 } else {
Jason Wangb5d58092021-01-04 14:54:46 +0800574 vq->priv = (void __force *)map_capability(mdev->pci_dev,
575 mdev->notify_map_cap, 2, 2,
576 off * mdev->notify_offset_multiplier, 2,
577 NULL);
Michael S. Tsirkin39092132015-01-14 18:50:55 +0200578 }
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200579
580 if (!vq->priv) {
581 err = -ENOMEM;
582 goto err_map_notify;
583 }
584
585 if (msix_vec != VIRTIO_MSI_NO_VECTOR) {
Jason Wang3fbda9c2021-01-04 14:54:53 +0800586 msix_vec = vp_modern_queue_vector(mdev, index, msix_vec);
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200587 if (msix_vec == VIRTIO_MSI_NO_VECTOR) {
588 err = -EBUSY;
589 goto err_assign_vector;
590 }
591 }
592
593 return vq;
594
595err_assign_vector:
Jason Wangb5d58092021-01-04 14:54:46 +0800596 if (!mdev->notify_base)
597 pci_iounmap(mdev->pci_dev, (void __iomem __force *)vq->priv);
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200598err_map_notify:
599 vring_del_virtqueue(vq);
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200600 return ERR_PTR(err);
601}
602
603static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned nvqs,
Michael S. Tsirkinf94682d2017-03-06 18:32:29 +0200604 struct virtqueue *vqs[],
605 vq_callback_t *callbacks[],
606 const char * const names[], const bool *ctx,
607 struct irq_affinity *desc)
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200608{
609 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
610 struct virtqueue *vq;
Michael S. Tsirkinf94682d2017-03-06 18:32:29 +0200611 int rc = vp_find_vqs(vdev, nvqs, vqs, callbacks, names, ctx, desc);
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200612
613 if (rc)
614 return rc;
615
616 /* Select and activate all queues. Has to be done last: once we do
617 * this, there's no way to go back except reset.
618 */
Jason Wangdc2e6482021-01-04 14:54:55 +0800619 list_for_each_entry(vq, &vdev->vqs, list)
620 vp_modern_set_queue_enable(&vp_dev->mdev, vq->index, true);
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200621
622 return 0;
623}
624
Michael S. Tsirkin0a9b3f42017-04-04 21:44:44 +0300625static void del_vq(struct virtio_pci_vq_info *info)
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200626{
Michael S. Tsirkin0a9b3f42017-04-04 21:44:44 +0300627 struct virtqueue *vq = info->vq;
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200628 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
Jason Wangb5d58092021-01-04 14:54:46 +0800629 struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
Jason Wangb5d58092021-01-04 14:54:46 +0800630
Jason Wang3fbda9c2021-01-04 14:54:53 +0800631 if (vp_dev->msix_enabled)
632 vp_modern_queue_vector(mdev, vq->index,
633 VIRTIO_MSI_NO_VECTOR);
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200634
Jason Wangb5d58092021-01-04 14:54:46 +0800635 if (!mdev->notify_base)
636 pci_iounmap(mdev->pci_dev, (void __force __iomem *)vq->priv);
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200637
638 vring_del_virtqueue(vq);
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200639}
640
Sebastien Boeuf0dd4ff92020-08-19 18:19:42 -0400641static int virtio_pci_find_shm_cap(struct pci_dev *dev, u8 required_id,
642 u8 *bar, u64 *offset, u64 *len)
643{
644 int pos;
645
646 for (pos = pci_find_capability(dev, PCI_CAP_ID_VNDR); pos > 0;
647 pos = pci_find_next_capability(dev, pos, PCI_CAP_ID_VNDR)) {
648 u8 type, cap_len, id;
649 u32 tmp32;
650 u64 res_offset, res_length;
651
652 pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
653 cfg_type), &type);
654 if (type != VIRTIO_PCI_CAP_SHARED_MEMORY_CFG)
655 continue;
656
657 pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
658 cap_len), &cap_len);
659 if (cap_len != sizeof(struct virtio_pci_cap64)) {
660 dev_err(&dev->dev, "%s: shm cap with bad size offset:"
661 " %d size: %d\n", __func__, pos, cap_len);
662 continue;
663 }
664
665 pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
666 id), &id);
667 if (id != required_id)
668 continue;
669
670 /* Type, and ID match, looks good */
671 pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
672 bar), bar);
673
674 /* Read the lower 32bit of length and offset */
675 pci_read_config_dword(dev, pos + offsetof(struct virtio_pci_cap,
676 offset), &tmp32);
677 res_offset = tmp32;
678 pci_read_config_dword(dev, pos + offsetof(struct virtio_pci_cap,
679 length), &tmp32);
680 res_length = tmp32;
681
682 /* and now the top half */
683 pci_read_config_dword(dev,
684 pos + offsetof(struct virtio_pci_cap64,
685 offset_hi), &tmp32);
686 res_offset |= ((u64)tmp32) << 32;
687 pci_read_config_dword(dev,
688 pos + offsetof(struct virtio_pci_cap64,
689 length_hi), &tmp32);
690 res_length |= ((u64)tmp32) << 32;
691
692 *offset = res_offset;
693 *len = res_length;
694
695 return pos;
696 }
697 return 0;
698}
699
700static bool vp_get_shm_region(struct virtio_device *vdev,
701 struct virtio_shm_region *region, u8 id)
702{
703 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
704 struct pci_dev *pci_dev = vp_dev->pci_dev;
705 u8 bar;
706 u64 offset, len;
707 phys_addr_t phys_addr;
708 size_t bar_len;
709
710 if (!virtio_pci_find_shm_cap(pci_dev, id, &bar, &offset, &len))
711 return false;
712
713 phys_addr = pci_resource_start(pci_dev, bar);
714 bar_len = pci_resource_len(pci_dev, bar);
715
716 if ((offset + len) < offset) {
717 dev_err(&pci_dev->dev, "%s: cap offset+len overflow detected\n",
718 __func__);
719 return false;
720 }
721
722 if (offset + len > bar_len) {
723 dev_err(&pci_dev->dev, "%s: bar shorter than cap offset+len\n",
724 __func__);
725 return false;
726 }
727
728 region->len = len;
729 region->addr = (u64) phys_addr + offset;
730
731 return true;
732}
733
Michael S. Tsirkind3f5f062015-01-13 16:34:58 +0200734static const struct virtio_config_ops virtio_pci_config_nodev_ops = {
735 .get = NULL,
736 .set = NULL,
737 .generation = vp_generation,
738 .get_status = vp_get_status,
739 .set_status = vp_set_status,
740 .reset = vp_reset,
741 .find_vqs = vp_modern_find_vqs,
742 .del_vqs = vp_del_vqs,
743 .get_features = vp_get_features,
744 .finalize_features = vp_finalize_features,
745 .bus_name = vp_bus_name,
746 .set_vq_affinity = vp_set_vq_affinity,
Christoph Hellwigbbaba472017-02-05 18:15:23 +0100747 .get_vq_affinity = vp_get_vq_affinity,
Sebastien Boeuf0dd4ff92020-08-19 18:19:42 -0400748 .get_shm_region = vp_get_shm_region,
Michael S. Tsirkind3f5f062015-01-13 16:34:58 +0200749};
750
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200751static const struct virtio_config_ops virtio_pci_config_ops = {
752 .get = vp_get,
753 .set = vp_set,
754 .generation = vp_generation,
755 .get_status = vp_get_status,
756 .set_status = vp_set_status,
757 .reset = vp_reset,
758 .find_vqs = vp_modern_find_vqs,
759 .del_vqs = vp_del_vqs,
760 .get_features = vp_get_features,
761 .finalize_features = vp_finalize_features,
762 .bus_name = vp_bus_name,
763 .set_vq_affinity = vp_set_vq_affinity,
Christoph Hellwigbbaba472017-02-05 18:15:23 +0100764 .get_vq_affinity = vp_get_vq_affinity,
Sebastien Boeuf0dd4ff92020-08-19 18:19:42 -0400765 .get_shm_region = vp_get_shm_region,
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200766};
767
768/**
769 * virtio_pci_find_capability - walk capabilities to find device info.
770 * @dev: the pci device
771 * @cfg_type: the VIRTIO_PCI_CAP_* value we seek
772 * @ioresource_types: IORESOURCE_MEM and/or IORESOURCE_IO.
Liao Pingfang46af9ad2020-08-03 19:52:24 +0800773 * @bars: the bitmask of BARs
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200774 *
775 * Returns offset of the capability, or 0.
776 */
777static inline int virtio_pci_find_capability(struct pci_dev *dev, u8 cfg_type,
Gerd Hoffmann59a5b0f72015-06-24 07:54:15 +0200778 u32 ioresource_types, int *bars)
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200779{
780 int pos;
781
782 for (pos = pci_find_capability(dev, PCI_CAP_ID_VNDR);
783 pos > 0;
784 pos = pci_find_next_capability(dev, pos, PCI_CAP_ID_VNDR)) {
785 u8 type, bar;
786 pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
787 cfg_type),
788 &type);
789 pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
790 bar),
791 &bar);
792
793 /* Ignore structures with reserved BAR values */
794 if (bar > 0x5)
795 continue;
796
797 if (type == cfg_type) {
798 if (pci_resource_len(dev, bar) &&
Gerd Hoffmann59a5b0f72015-06-24 07:54:15 +0200799 pci_resource_flags(dev, bar) & ioresource_types) {
800 *bars |= (1 << bar);
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200801 return pos;
Gerd Hoffmann59a5b0f72015-06-24 07:54:15 +0200802 }
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200803 }
804 }
805 return 0;
806}
807
Rusty Russell89461c42013-05-30 16:29:32 +0930808/* This is part of the ABI. Don't screw with it. */
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200809static inline void check_offsets(void)
810{
Rusty Russell89461c42013-05-30 16:29:32 +0930811 /* Note: disk space was harmed in compilation of this function. */
812 BUILD_BUG_ON(VIRTIO_PCI_CAP_VNDR !=
813 offsetof(struct virtio_pci_cap, cap_vndr));
814 BUILD_BUG_ON(VIRTIO_PCI_CAP_NEXT !=
815 offsetof(struct virtio_pci_cap, cap_next));
816 BUILD_BUG_ON(VIRTIO_PCI_CAP_LEN !=
817 offsetof(struct virtio_pci_cap, cap_len));
818 BUILD_BUG_ON(VIRTIO_PCI_CAP_CFG_TYPE !=
819 offsetof(struct virtio_pci_cap, cfg_type));
820 BUILD_BUG_ON(VIRTIO_PCI_CAP_BAR !=
821 offsetof(struct virtio_pci_cap, bar));
822 BUILD_BUG_ON(VIRTIO_PCI_CAP_OFFSET !=
823 offsetof(struct virtio_pci_cap, offset));
824 BUILD_BUG_ON(VIRTIO_PCI_CAP_LENGTH !=
825 offsetof(struct virtio_pci_cap, length));
826 BUILD_BUG_ON(VIRTIO_PCI_NOTIFY_CAP_MULT !=
827 offsetof(struct virtio_pci_notify_cap,
828 notify_off_multiplier));
829 BUILD_BUG_ON(VIRTIO_PCI_COMMON_DFSELECT !=
830 offsetof(struct virtio_pci_common_cfg,
831 device_feature_select));
832 BUILD_BUG_ON(VIRTIO_PCI_COMMON_DF !=
833 offsetof(struct virtio_pci_common_cfg, device_feature));
834 BUILD_BUG_ON(VIRTIO_PCI_COMMON_GFSELECT !=
835 offsetof(struct virtio_pci_common_cfg,
836 guest_feature_select));
837 BUILD_BUG_ON(VIRTIO_PCI_COMMON_GF !=
838 offsetof(struct virtio_pci_common_cfg, guest_feature));
839 BUILD_BUG_ON(VIRTIO_PCI_COMMON_MSIX !=
840 offsetof(struct virtio_pci_common_cfg, msix_config));
841 BUILD_BUG_ON(VIRTIO_PCI_COMMON_NUMQ !=
842 offsetof(struct virtio_pci_common_cfg, num_queues));
843 BUILD_BUG_ON(VIRTIO_PCI_COMMON_STATUS !=
844 offsetof(struct virtio_pci_common_cfg, device_status));
845 BUILD_BUG_ON(VIRTIO_PCI_COMMON_CFGGENERATION !=
846 offsetof(struct virtio_pci_common_cfg, config_generation));
847 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SELECT !=
848 offsetof(struct virtio_pci_common_cfg, queue_select));
849 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SIZE !=
850 offsetof(struct virtio_pci_common_cfg, queue_size));
851 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_MSIX !=
852 offsetof(struct virtio_pci_common_cfg, queue_msix_vector));
853 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_ENABLE !=
854 offsetof(struct virtio_pci_common_cfg, queue_enable));
855 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_NOFF !=
856 offsetof(struct virtio_pci_common_cfg, queue_notify_off));
857 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCLO !=
858 offsetof(struct virtio_pci_common_cfg, queue_desc_lo));
859 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCHI !=
860 offsetof(struct virtio_pci_common_cfg, queue_desc_hi));
861 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILLO !=
862 offsetof(struct virtio_pci_common_cfg, queue_avail_lo));
863 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILHI !=
864 offsetof(struct virtio_pci_common_cfg, queue_avail_hi));
865 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDLO !=
866 offsetof(struct virtio_pci_common_cfg, queue_used_lo));
867 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDHI !=
868 offsetof(struct virtio_pci_common_cfg, queue_used_hi));
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200869}
870
Jason Wang117a9de2021-01-04 14:54:47 +0800871/*
872 * vp_modern_probe: probe the modern virtio pci device, note that the
873 * caller is required to enable PCI device before calling this function.
874 * @mdev: the modern virtio-pci device
875 *
876 * Return 0 on succeed otherwise fail
877 */
878static int vp_modern_probe(struct virtio_pci_modern_device *mdev)
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200879{
Jason Wang117a9de2021-01-04 14:54:47 +0800880 struct pci_dev *pci_dev = mdev->pci_dev;
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200881 int err, common, isr, notify, device;
882 u32 notify_length;
Michael S. Tsirkin39092132015-01-14 18:50:55 +0200883 u32 notify_offset;
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200884
885 check_offsets();
886
Jason Wangb5d58092021-01-04 14:54:46 +0800887 mdev->pci_dev = pci_dev;
888
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200889 /* We only own devices >= 0x1000 and <= 0x107f: leave the rest. */
890 if (pci_dev->device < 0x1000 || pci_dev->device > 0x107f)
891 return -ENODEV;
892
893 if (pci_dev->device < 0x1040) {
894 /* Transitional devices: use the PCI subsystem device id as
895 * virtio device id, same as legacy driver always did.
896 */
Jason Wangb5d58092021-01-04 14:54:46 +0800897 mdev->id.device = pci_dev->subsystem_device;
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200898 } else {
899 /* Modern devices: simply use PCI device id, but start from 0x1040. */
Jason Wangb5d58092021-01-04 14:54:46 +0800900 mdev->id.device = pci_dev->device - 0x1040;
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200901 }
Jason Wangb5d58092021-01-04 14:54:46 +0800902 mdev->id.vendor = pci_dev->subsystem_vendor;
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200903
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200904 /* check for a common config: if not, use legacy mode (bar 0). */
905 common = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_COMMON_CFG,
Gerd Hoffmann59a5b0f72015-06-24 07:54:15 +0200906 IORESOURCE_IO | IORESOURCE_MEM,
Jason Wangb5d58092021-01-04 14:54:46 +0800907 &mdev->modern_bars);
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200908 if (!common) {
909 dev_info(&pci_dev->dev,
910 "virtio_pci: leaving for legacy driver\n");
911 return -ENODEV;
912 }
913
914 /* If common is there, these should be too... */
915 isr = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_ISR_CFG,
Gerd Hoffmann59a5b0f72015-06-24 07:54:15 +0200916 IORESOURCE_IO | IORESOURCE_MEM,
Jason Wangb5d58092021-01-04 14:54:46 +0800917 &mdev->modern_bars);
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200918 notify = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_NOTIFY_CFG,
Gerd Hoffmann59a5b0f72015-06-24 07:54:15 +0200919 IORESOURCE_IO | IORESOURCE_MEM,
Jason Wangb5d58092021-01-04 14:54:46 +0800920 &mdev->modern_bars);
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200921 if (!isr || !notify) {
922 dev_err(&pci_dev->dev,
923 "virtio_pci: missing capabilities %i/%i/%i\n",
924 common, isr, notify);
925 return -EINVAL;
926 }
927
Andy Lutomirski7a5589b2016-02-02 21:46:39 -0800928 err = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64));
929 if (err)
930 err = dma_set_mask_and_coherent(&pci_dev->dev,
931 DMA_BIT_MASK(32));
932 if (err)
933 dev_warn(&pci_dev->dev, "Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this might not work.\n");
934
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200935 /* Device capability is only mandatory for devices that have
936 * device-specific configuration.
937 */
938 device = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_DEVICE_CFG,
Gerd Hoffmann59a5b0f72015-06-24 07:54:15 +0200939 IORESOURCE_IO | IORESOURCE_MEM,
Jason Wangb5d58092021-01-04 14:54:46 +0800940 &mdev->modern_bars);
Gerd Hoffmann59a5b0f72015-06-24 07:54:15 +0200941
Jason Wangb5d58092021-01-04 14:54:46 +0800942 err = pci_request_selected_regions(pci_dev, mdev->modern_bars,
Gerd Hoffmann59a5b0f72015-06-24 07:54:15 +0200943 "virtio-pci-modern");
944 if (err)
945 return err;
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200946
947 err = -EINVAL;
Jason Wangb5d58092021-01-04 14:54:46 +0800948 mdev->common = map_capability(pci_dev, common,
949 sizeof(struct virtio_pci_common_cfg), 4,
950 0, sizeof(struct virtio_pci_common_cfg),
951 NULL);
952 if (!mdev->common)
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200953 goto err_map_common;
Jason Wangb5d58092021-01-04 14:54:46 +0800954 mdev->isr = map_capability(pci_dev, isr, sizeof(u8), 1,
955 0, 1,
956 NULL);
957 if (!mdev->isr)
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200958 goto err_map_isr;
959
960 /* Read notify_off_multiplier from config space. */
961 pci_read_config_dword(pci_dev,
962 notify + offsetof(struct virtio_pci_notify_cap,
963 notify_off_multiplier),
Jason Wangb5d58092021-01-04 14:54:46 +0800964 &mdev->notify_offset_multiplier);
Michael S. Tsirkin39092132015-01-14 18:50:55 +0200965 /* Read notify length and offset from config space. */
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200966 pci_read_config_dword(pci_dev,
967 notify + offsetof(struct virtio_pci_notify_cap,
968 cap.length),
969 &notify_length);
970
Michael S. Tsirkin39092132015-01-14 18:50:55 +0200971 pci_read_config_dword(pci_dev,
972 notify + offsetof(struct virtio_pci_notify_cap,
Ladi Prosek4e94ebd2016-02-01 19:36:31 +0100973 cap.offset),
Michael S. Tsirkin39092132015-01-14 18:50:55 +0200974 &notify_offset);
975
976 /* We don't know how many VQs we'll map, ahead of the time.
977 * If notify length is small, map it all now.
978 * Otherwise, map each VQ individually later.
979 */
980 if ((u64)notify_length + (notify_offset % PAGE_SIZE) <= PAGE_SIZE) {
Jason Wangb5d58092021-01-04 14:54:46 +0800981 mdev->notify_base = map_capability(pci_dev, notify, 2, 2,
982 0, notify_length,
983 &mdev->notify_len);
984 if (!mdev->notify_base)
Michael S. Tsirkin39092132015-01-14 18:50:55 +0200985 goto err_map_notify;
986 } else {
Jason Wangb5d58092021-01-04 14:54:46 +0800987 mdev->notify_map_cap = notify;
Michael S. Tsirkin39092132015-01-14 18:50:55 +0200988 }
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200989
990 /* Again, we don't know how much we should map, but PAGE_SIZE
991 * is more than enough for all existing devices.
992 */
993 if (device) {
Jason Wangb5d58092021-01-04 14:54:46 +0800994 mdev->device = map_capability(pci_dev, device, 0, 4,
995 0, PAGE_SIZE,
996 &mdev->device_len);
997 if (!mdev->device)
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200998 goto err_map_device;
Michael S. Tsirkind3f5f062015-01-13 16:34:58 +0200999 }
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +02001000
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +02001001 return 0;
1002
1003err_map_device:
Jason Wangb5d58092021-01-04 14:54:46 +08001004 if (mdev->notify_base)
1005 pci_iounmap(pci_dev, mdev->notify_base);
Michael S. Tsirkin39092132015-01-14 18:50:55 +02001006err_map_notify:
Jason Wangb5d58092021-01-04 14:54:46 +08001007 pci_iounmap(pci_dev, mdev->isr);
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +02001008err_map_isr:
Jason Wangb5d58092021-01-04 14:54:46 +08001009 pci_iounmap(pci_dev, mdev->common);
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +02001010err_map_common:
1011 return err;
1012}
1013
Jason Wang117a9de2021-01-04 14:54:47 +08001014/* the PCI probing function */
1015int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev)
1016{
1017 struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
1018 struct pci_dev *pci_dev = vp_dev->pci_dev;
1019 int err;
1020
1021 mdev->pci_dev = pci_dev;
1022
1023 err = vp_modern_probe(mdev);
1024 if (err)
1025 return err;
1026
1027 if (mdev->device)
1028 vp_dev->vdev.config = &virtio_pci_config_ops;
1029 else
1030 vp_dev->vdev.config = &virtio_pci_config_nodev_ops;
1031
1032 vp_dev->config_vector = vp_config_vector;
1033 vp_dev->setup_vq = setup_vq;
1034 vp_dev->del_vq = del_vq;
1035 vp_dev->isr = mdev->isr;
1036 vp_dev->vdev.id = mdev->id;
1037
1038 return 0;
1039}
1040
Jason Wang32490372021-01-04 14:54:48 +08001041/*
1042 * vp_modern_probe: remove and cleanup the modern virtio pci device
1043 * @mdev: the modern virtio-pci device
1044 */
1045static void vp_modern_remove(struct virtio_pci_modern_device *mdev)
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +02001046{
Jason Wangb5d58092021-01-04 14:54:46 +08001047 struct pci_dev *pci_dev = mdev->pci_dev;
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +02001048
Jason Wangb5d58092021-01-04 14:54:46 +08001049 if (mdev->device)
1050 pci_iounmap(pci_dev, mdev->device);
1051 if (mdev->notify_base)
1052 pci_iounmap(pci_dev, mdev->notify_base);
1053 pci_iounmap(pci_dev, mdev->isr);
1054 pci_iounmap(pci_dev, mdev->common);
1055 pci_release_selected_regions(pci_dev, mdev->modern_bars);
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +02001056}
Jason Wang32490372021-01-04 14:54:48 +08001057
1058void virtio_pci_modern_remove(struct virtio_pci_device *vp_dev)
1059{
1060 struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
1061
1062 vp_modern_remove(mdev);
1063}