blob: 9c241c9bd9209b0c32f311a1e9dd71f7433a41b1 [file] [log] [blame]
Jason Wangfd502722021-01-04 14:55:00 +08001// SPDX-License-Identifier: GPL-2.0-or-later
2
3#include <linux/virtio_pci_modern.h>
4#include <linux/module.h>
5#include <linux/pci.h>
6
7/*
8 * vp_modern_map_capability - map a part of virtio pci capability
9 * @mdev: the modern virtio-pci device
10 * @off: offset of the capability
11 * @minlen: minimal length of the capability
12 * @align: align requirement
13 * @start: start from the capability
14 * @size: map size
15 * @len: the length that is actually mapped
16 *
17 * Returns the io address of for the part of the capability
18 */
Jason Wangfd466b32021-04-15 03:31:45 -040019static void __iomem *
20vp_modern_map_capability(struct virtio_pci_modern_device *mdev, int off,
21 size_t minlen, u32 align, u32 start, u32 size,
22 size_t *len)
Jason Wangfd502722021-01-04 14:55:00 +080023{
24 struct pci_dev *dev = mdev->pci_dev;
25 u8 bar;
26 u32 offset, length;
27 void __iomem *p;
28
29 pci_read_config_byte(dev, off + offsetof(struct virtio_pci_cap,
30 bar),
31 &bar);
32 pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, offset),
33 &offset);
34 pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, length),
35 &length);
36
37 if (length <= start) {
38 dev_err(&dev->dev,
39 "virtio_pci: bad capability len %u (>%u expected)\n",
40 length, start);
41 return NULL;
42 }
43
44 if (length - start < minlen) {
45 dev_err(&dev->dev,
46 "virtio_pci: bad capability len %u (>=%zu expected)\n",
47 length, minlen);
48 return NULL;
49 }
50
51 length -= start;
52
53 if (start + offset < offset) {
54 dev_err(&dev->dev,
55 "virtio_pci: map wrap-around %u+%u\n",
56 start, offset);
57 return NULL;
58 }
59
60 offset += start;
61
62 if (offset & (align - 1)) {
63 dev_err(&dev->dev,
64 "virtio_pci: offset %u not aligned to %u\n",
65 offset, align);
66 return NULL;
67 }
68
69 if (length > size)
70 length = size;
71
72 if (len)
73 *len = length;
74
75 if (minlen + offset < minlen ||
76 minlen + offset > pci_resource_len(dev, bar)) {
77 dev_err(&dev->dev,
78 "virtio_pci: map virtio %zu@%u "
79 "out of range on bar %i length %lu\n",
80 minlen, offset,
81 bar, (unsigned long)pci_resource_len(dev, bar));
82 return NULL;
83 }
84
85 p = pci_iomap_range(dev, bar, offset, length);
86 if (!p)
87 dev_err(&dev->dev,
88 "virtio_pci: unable to map virtio %u@%u on bar %i\n",
89 length, offset, bar);
90 return p;
91}
Jason Wangfd502722021-01-04 14:55:00 +080092
93/**
94 * virtio_pci_find_capability - walk capabilities to find device info.
95 * @dev: the pci device
96 * @cfg_type: the VIRTIO_PCI_CAP_* value we seek
97 * @ioresource_types: IORESOURCE_MEM and/or IORESOURCE_IO.
98 * @bars: the bitmask of BARs
99 *
100 * Returns offset of the capability, or 0.
101 */
102static inline int virtio_pci_find_capability(struct pci_dev *dev, u8 cfg_type,
103 u32 ioresource_types, int *bars)
104{
105 int pos;
106
107 for (pos = pci_find_capability(dev, PCI_CAP_ID_VNDR);
108 pos > 0;
109 pos = pci_find_next_capability(dev, pos, PCI_CAP_ID_VNDR)) {
110 u8 type, bar;
111 pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
112 cfg_type),
113 &type);
114 pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
115 bar),
116 &bar);
117
118 /* Ignore structures with reserved BAR values */
119 if (bar > 0x5)
120 continue;
121
122 if (type == cfg_type) {
123 if (pci_resource_len(dev, bar) &&
124 pci_resource_flags(dev, bar) & ioresource_types) {
125 *bars |= (1 << bar);
126 return pos;
127 }
128 }
129 }
130 return 0;
131}
132
133/* This is part of the ABI. Don't screw with it. */
134static inline void check_offsets(void)
135{
136 /* Note: disk space was harmed in compilation of this function. */
137 BUILD_BUG_ON(VIRTIO_PCI_CAP_VNDR !=
138 offsetof(struct virtio_pci_cap, cap_vndr));
139 BUILD_BUG_ON(VIRTIO_PCI_CAP_NEXT !=
140 offsetof(struct virtio_pci_cap, cap_next));
141 BUILD_BUG_ON(VIRTIO_PCI_CAP_LEN !=
142 offsetof(struct virtio_pci_cap, cap_len));
143 BUILD_BUG_ON(VIRTIO_PCI_CAP_CFG_TYPE !=
144 offsetof(struct virtio_pci_cap, cfg_type));
145 BUILD_BUG_ON(VIRTIO_PCI_CAP_BAR !=
146 offsetof(struct virtio_pci_cap, bar));
147 BUILD_BUG_ON(VIRTIO_PCI_CAP_OFFSET !=
148 offsetof(struct virtio_pci_cap, offset));
149 BUILD_BUG_ON(VIRTIO_PCI_CAP_LENGTH !=
150 offsetof(struct virtio_pci_cap, length));
151 BUILD_BUG_ON(VIRTIO_PCI_NOTIFY_CAP_MULT !=
152 offsetof(struct virtio_pci_notify_cap,
153 notify_off_multiplier));
154 BUILD_BUG_ON(VIRTIO_PCI_COMMON_DFSELECT !=
155 offsetof(struct virtio_pci_common_cfg,
156 device_feature_select));
157 BUILD_BUG_ON(VIRTIO_PCI_COMMON_DF !=
158 offsetof(struct virtio_pci_common_cfg, device_feature));
159 BUILD_BUG_ON(VIRTIO_PCI_COMMON_GFSELECT !=
160 offsetof(struct virtio_pci_common_cfg,
161 guest_feature_select));
162 BUILD_BUG_ON(VIRTIO_PCI_COMMON_GF !=
163 offsetof(struct virtio_pci_common_cfg, guest_feature));
164 BUILD_BUG_ON(VIRTIO_PCI_COMMON_MSIX !=
165 offsetof(struct virtio_pci_common_cfg, msix_config));
166 BUILD_BUG_ON(VIRTIO_PCI_COMMON_NUMQ !=
167 offsetof(struct virtio_pci_common_cfg, num_queues));
168 BUILD_BUG_ON(VIRTIO_PCI_COMMON_STATUS !=
169 offsetof(struct virtio_pci_common_cfg, device_status));
170 BUILD_BUG_ON(VIRTIO_PCI_COMMON_CFGGENERATION !=
171 offsetof(struct virtio_pci_common_cfg, config_generation));
172 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SELECT !=
173 offsetof(struct virtio_pci_common_cfg, queue_select));
174 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SIZE !=
175 offsetof(struct virtio_pci_common_cfg, queue_size));
176 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_MSIX !=
177 offsetof(struct virtio_pci_common_cfg, queue_msix_vector));
178 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_ENABLE !=
179 offsetof(struct virtio_pci_common_cfg, queue_enable));
180 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_NOFF !=
181 offsetof(struct virtio_pci_common_cfg, queue_notify_off));
182 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCLO !=
183 offsetof(struct virtio_pci_common_cfg, queue_desc_lo));
184 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCHI !=
185 offsetof(struct virtio_pci_common_cfg, queue_desc_hi));
186 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILLO !=
187 offsetof(struct virtio_pci_common_cfg, queue_avail_lo));
188 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILHI !=
189 offsetof(struct virtio_pci_common_cfg, queue_avail_hi));
190 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDLO !=
191 offsetof(struct virtio_pci_common_cfg, queue_used_lo));
192 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDHI !=
193 offsetof(struct virtio_pci_common_cfg, queue_used_hi));
194}
195
196/*
197 * vp_modern_probe: probe the modern virtio pci device, note that the
198 * caller is required to enable PCI device before calling this function.
199 * @mdev: the modern virtio-pci device
200 *
201 * Return 0 on succeed otherwise fail
202 */
203int vp_modern_probe(struct virtio_pci_modern_device *mdev)
204{
205 struct pci_dev *pci_dev = mdev->pci_dev;
206 int err, common, isr, notify, device;
207 u32 notify_length;
208 u32 notify_offset;
209
210 check_offsets();
211
212 mdev->pci_dev = pci_dev;
213
214 /* We only own devices >= 0x1000 and <= 0x107f: leave the rest. */
215 if (pci_dev->device < 0x1000 || pci_dev->device > 0x107f)
216 return -ENODEV;
217
218 if (pci_dev->device < 0x1040) {
219 /* Transitional devices: use the PCI subsystem device id as
220 * virtio device id, same as legacy driver always did.
221 */
222 mdev->id.device = pci_dev->subsystem_device;
223 } else {
224 /* Modern devices: simply use PCI device id, but start from 0x1040. */
225 mdev->id.device = pci_dev->device - 0x1040;
226 }
227 mdev->id.vendor = pci_dev->subsystem_vendor;
228
229 /* check for a common config: if not, use legacy mode (bar 0). */
230 common = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_COMMON_CFG,
231 IORESOURCE_IO | IORESOURCE_MEM,
232 &mdev->modern_bars);
233 if (!common) {
234 dev_info(&pci_dev->dev,
235 "virtio_pci: leaving for legacy driver\n");
236 return -ENODEV;
237 }
238
239 /* If common is there, these should be too... */
240 isr = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_ISR_CFG,
241 IORESOURCE_IO | IORESOURCE_MEM,
242 &mdev->modern_bars);
243 notify = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_NOTIFY_CFG,
244 IORESOURCE_IO | IORESOURCE_MEM,
245 &mdev->modern_bars);
246 if (!isr || !notify) {
247 dev_err(&pci_dev->dev,
248 "virtio_pci: missing capabilities %i/%i/%i\n",
249 common, isr, notify);
250 return -EINVAL;
251 }
252
253 err = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64));
254 if (err)
255 err = dma_set_mask_and_coherent(&pci_dev->dev,
256 DMA_BIT_MASK(32));
257 if (err)
258 dev_warn(&pci_dev->dev, "Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this might not work.\n");
259
260 /* Device capability is only mandatory for devices that have
261 * device-specific configuration.
262 */
263 device = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_DEVICE_CFG,
264 IORESOURCE_IO | IORESOURCE_MEM,
265 &mdev->modern_bars);
266
267 err = pci_request_selected_regions(pci_dev, mdev->modern_bars,
268 "virtio-pci-modern");
269 if (err)
270 return err;
271
272 err = -EINVAL;
273 mdev->common = vp_modern_map_capability(mdev, common,
274 sizeof(struct virtio_pci_common_cfg), 4,
275 0, sizeof(struct virtio_pci_common_cfg),
276 NULL);
277 if (!mdev->common)
278 goto err_map_common;
279 mdev->isr = vp_modern_map_capability(mdev, isr, sizeof(u8), 1,
280 0, 1,
281 NULL);
282 if (!mdev->isr)
283 goto err_map_isr;
284
285 /* Read notify_off_multiplier from config space. */
286 pci_read_config_dword(pci_dev,
287 notify + offsetof(struct virtio_pci_notify_cap,
288 notify_off_multiplier),
289 &mdev->notify_offset_multiplier);
290 /* Read notify length and offset from config space. */
291 pci_read_config_dword(pci_dev,
292 notify + offsetof(struct virtio_pci_notify_cap,
293 cap.length),
294 &notify_length);
295
296 pci_read_config_dword(pci_dev,
297 notify + offsetof(struct virtio_pci_notify_cap,
298 cap.offset),
299 &notify_offset);
300
301 /* We don't know how many VQs we'll map, ahead of the time.
302 * If notify length is small, map it all now.
303 * Otherwise, map each VQ individually later.
304 */
305 if ((u64)notify_length + (notify_offset % PAGE_SIZE) <= PAGE_SIZE) {
306 mdev->notify_base = vp_modern_map_capability(mdev, notify,
307 2, 2,
308 0, notify_length,
309 &mdev->notify_len);
310 if (!mdev->notify_base)
311 goto err_map_notify;
312 } else {
313 mdev->notify_map_cap = notify;
314 }
315
316 /* Again, we don't know how much we should map, but PAGE_SIZE
317 * is more than enough for all existing devices.
318 */
319 if (device) {
320 mdev->device = vp_modern_map_capability(mdev, device, 0, 4,
321 0, PAGE_SIZE,
322 &mdev->device_len);
323 if (!mdev->device)
324 goto err_map_device;
325 }
326
327 return 0;
328
329err_map_device:
330 if (mdev->notify_base)
331 pci_iounmap(pci_dev, mdev->notify_base);
332err_map_notify:
333 pci_iounmap(pci_dev, mdev->isr);
334err_map_isr:
335 pci_iounmap(pci_dev, mdev->common);
336err_map_common:
337 return err;
338}
339EXPORT_SYMBOL_GPL(vp_modern_probe);
340
341/*
342 * vp_modern_probe: remove and cleanup the modern virtio pci device
343 * @mdev: the modern virtio-pci device
344 */
345void vp_modern_remove(struct virtio_pci_modern_device *mdev)
346{
347 struct pci_dev *pci_dev = mdev->pci_dev;
348
349 if (mdev->device)
350 pci_iounmap(pci_dev, mdev->device);
351 if (mdev->notify_base)
352 pci_iounmap(pci_dev, mdev->notify_base);
353 pci_iounmap(pci_dev, mdev->isr);
354 pci_iounmap(pci_dev, mdev->common);
355 pci_release_selected_regions(pci_dev, mdev->modern_bars);
356}
357EXPORT_SYMBOL_GPL(vp_modern_remove);
358
359/*
360 * vp_modern_get_features - get features from device
361 * @mdev: the modern virtio-pci device
362 *
363 * Returns the features read from the device
364 */
365u64 vp_modern_get_features(struct virtio_pci_modern_device *mdev)
366{
367 struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
368
369 u64 features;
370
371 vp_iowrite32(0, &cfg->device_feature_select);
372 features = vp_ioread32(&cfg->device_feature);
373 vp_iowrite32(1, &cfg->device_feature_select);
374 features |= ((u64)vp_ioread32(&cfg->device_feature) << 32);
375
376 return features;
377}
378EXPORT_SYMBOL_GPL(vp_modern_get_features);
379
380/*
381 * vp_modern_set_features - set features to device
382 * @mdev: the modern virtio-pci device
383 * @features: the features set to device
384 */
385void vp_modern_set_features(struct virtio_pci_modern_device *mdev,
386 u64 features)
387{
388 struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
389
390 vp_iowrite32(0, &cfg->guest_feature_select);
391 vp_iowrite32((u32)features, &cfg->guest_feature);
392 vp_iowrite32(1, &cfg->guest_feature_select);
393 vp_iowrite32(features >> 32, &cfg->guest_feature);
394}
395EXPORT_SYMBOL_GPL(vp_modern_set_features);
396
397/*
398 * vp_modern_generation - get the device genreation
399 * @mdev: the modern virtio-pci device
400 *
401 * Returns the genreation read from device
402 */
403u32 vp_modern_generation(struct virtio_pci_modern_device *mdev)
404{
405 struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
406
407 return vp_ioread8(&cfg->config_generation);
408}
409EXPORT_SYMBOL_GPL(vp_modern_generation);
410
411/*
412 * vp_modern_get_status - get the device status
413 * @mdev: the modern virtio-pci device
414 *
415 * Returns the status read from device
416 */
417u8 vp_modern_get_status(struct virtio_pci_modern_device *mdev)
418{
419 struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
420
421 return vp_ioread8(&cfg->device_status);
422}
423EXPORT_SYMBOL_GPL(vp_modern_get_status);
424
425/*
426 * vp_modern_set_status - set status to device
427 * @mdev: the modern virtio-pci device
428 * @status: the status set to device
429 */
430void vp_modern_set_status(struct virtio_pci_modern_device *mdev,
431 u8 status)
432{
433 struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
434
435 vp_iowrite8(status, &cfg->device_status);
436}
437EXPORT_SYMBOL_GPL(vp_modern_set_status);
438
439/*
440 * vp_modern_queue_vector - set the MSIX vector for a specific virtqueue
441 * @mdev: the modern virtio-pci device
442 * @index: queue index
443 * @vector: the config vector
444 *
445 * Returns the config vector read from the device
446 */
447u16 vp_modern_queue_vector(struct virtio_pci_modern_device *mdev,
448 u16 index, u16 vector)
449{
450 struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
451
452 vp_iowrite16(index, &cfg->queue_select);
453 vp_iowrite16(vector, &cfg->queue_msix_vector);
454 /* Flush the write out to device */
455 return vp_ioread16(&cfg->queue_msix_vector);
456}
457EXPORT_SYMBOL_GPL(vp_modern_queue_vector);
458
459/*
460 * vp_modern_config_vector - set the vector for config interrupt
461 * @mdev: the modern virtio-pci device
462 * @vector: the config vector
463 *
464 * Returns the config vector read from the device
465 */
466u16 vp_modern_config_vector(struct virtio_pci_modern_device *mdev,
467 u16 vector)
468{
469 struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
470
471 /* Setup the vector used for configuration events */
472 vp_iowrite16(vector, &cfg->msix_config);
473 /* Verify we had enough resources to assign the vector */
474 /* Will also flush the write out to device */
475 return vp_ioread16(&cfg->msix_config);
476}
477EXPORT_SYMBOL_GPL(vp_modern_config_vector);
478
479/*
480 * vp_modern_queue_address - set the virtqueue address
481 * @mdev: the modern virtio-pci device
482 * @index: the queue index
483 * @desc_addr: address of the descriptor area
484 * @driver_addr: address of the driver area
485 * @device_addr: address of the device area
486 */
487void vp_modern_queue_address(struct virtio_pci_modern_device *mdev,
488 u16 index, u64 desc_addr, u64 driver_addr,
489 u64 device_addr)
490{
491 struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
492
493 vp_iowrite16(index, &cfg->queue_select);
494
495 vp_iowrite64_twopart(desc_addr, &cfg->queue_desc_lo,
496 &cfg->queue_desc_hi);
497 vp_iowrite64_twopart(driver_addr, &cfg->queue_avail_lo,
498 &cfg->queue_avail_hi);
499 vp_iowrite64_twopart(device_addr, &cfg->queue_used_lo,
500 &cfg->queue_used_hi);
501}
502EXPORT_SYMBOL_GPL(vp_modern_queue_address);
503
504/*
505 * vp_modern_set_queue_enable - enable a virtqueue
506 * @mdev: the modern virtio-pci device
507 * @index: the queue index
508 * @enable: whether the virtqueue is enable or not
509 */
510void vp_modern_set_queue_enable(struct virtio_pci_modern_device *mdev,
511 u16 index, bool enable)
512{
513 vp_iowrite16(index, &mdev->common->queue_select);
514 vp_iowrite16(enable, &mdev->common->queue_enable);
515}
516EXPORT_SYMBOL_GPL(vp_modern_set_queue_enable);
517
518/*
519 * vp_modern_get_queue_enable - enable a virtqueue
520 * @mdev: the modern virtio-pci device
521 * @index: the queue index
522 *
523 * Returns whether a virtqueue is enabled or not
524 */
525bool vp_modern_get_queue_enable(struct virtio_pci_modern_device *mdev,
526 u16 index)
527{
528 vp_iowrite16(index, &mdev->common->queue_select);
529
530 return vp_ioread16(&mdev->common->queue_enable);
531}
532EXPORT_SYMBOL_GPL(vp_modern_get_queue_enable);
533
534/*
535 * vp_modern_set_queue_size - set size for a virtqueue
536 * @mdev: the modern virtio-pci device
537 * @index: the queue index
538 * @size: the size of the virtqueue
539 */
540void vp_modern_set_queue_size(struct virtio_pci_modern_device *mdev,
541 u16 index, u16 size)
542{
543 vp_iowrite16(index, &mdev->common->queue_select);
544 vp_iowrite16(size, &mdev->common->queue_size);
545
546}
547EXPORT_SYMBOL_GPL(vp_modern_set_queue_size);
548
549/*
550 * vp_modern_get_queue_size - get size for a virtqueue
551 * @mdev: the modern virtio-pci device
552 * @index: the queue index
553 *
554 * Returns the size of the virtqueue
555 */
556u16 vp_modern_get_queue_size(struct virtio_pci_modern_device *mdev,
557 u16 index)
558{
559 vp_iowrite16(index, &mdev->common->queue_select);
560
561 return vp_ioread16(&mdev->common->queue_size);
562
563}
564EXPORT_SYMBOL_GPL(vp_modern_get_queue_size);
565
566/*
567 * vp_modern_get_num_queues - get the number of virtqueues
568 * @mdev: the modern virtio-pci device
569 *
570 * Returns the number of virtqueues
571 */
572u16 vp_modern_get_num_queues(struct virtio_pci_modern_device *mdev)
573{
574 return vp_ioread16(&mdev->common->num_queues);
575}
576EXPORT_SYMBOL_GPL(vp_modern_get_num_queues);
577
578/*
579 * vp_modern_get_queue_notify_off - get notification offset for a virtqueue
580 * @mdev: the modern virtio-pci device
581 * @index: the queue index
582 *
583 * Returns the notification offset for a virtqueue
584 */
Jason Wanga5f7a242021-04-15 03:31:44 -0400585static u16 vp_modern_get_queue_notify_off(struct virtio_pci_modern_device *mdev,
586 u16 index)
Jason Wangfd502722021-01-04 14:55:00 +0800587{
588 vp_iowrite16(index, &mdev->common->queue_select);
589
590 return vp_ioread16(&mdev->common->queue_notify_off);
591}
Jason Wangfd502722021-01-04 14:55:00 +0800592
Jason Wang9e3bb9b2021-04-15 03:31:41 -0400593/*
594 * vp_modern_map_vq_notify - map notification area for a
595 * specific virtqueue
596 * @mdev: the modern virtio-pci device
597 * @index: the queue index
598 *
599 * Returns the address of the notification area
600 */
601void *vp_modern_map_vq_notify(struct virtio_pci_modern_device *mdev,
602 u16 index)
603{
604 u16 off = vp_modern_get_queue_notify_off(mdev, index);
605
606 if (mdev->notify_base) {
607 /* offset should not wrap */
608 if ((u64)off * mdev->notify_offset_multiplier + 2
609 > mdev->notify_len) {
610 dev_warn(&mdev->pci_dev->dev,
611 "bad notification offset %u (x %u) "
612 "for queue %u > %zd",
613 off, mdev->notify_offset_multiplier,
614 index, mdev->notify_len);
615 return NULL;
616 }
617 return (void __force *)mdev->notify_base +
618 off * mdev->notify_offset_multiplier;
619 } else {
620 return (void __force *)vp_modern_map_capability(mdev,
621 mdev->notify_map_cap, 2, 2,
622 off * mdev->notify_offset_multiplier, 2,
623 NULL);
624 }
625}
626EXPORT_SYMBOL_GPL(vp_modern_map_vq_notify);
627
Jason Wangfd502722021-01-04 14:55:00 +0800628MODULE_VERSION("0.1");
629MODULE_DESCRIPTION("Modern Virtio PCI Device");
630MODULE_AUTHOR("Jason Wang <jasowang@redhat.com>");
631MODULE_LICENSE("GPL");