virtio_pci: Use the DMA API if enabled
This switches to vring_create_virtqueue, simplifying the driver and
adding DMA API support.
This fixes virtio-pci on platforms and busses that have IOMMUs. This
will break the experimental QEMU Q35 IOMMU support until QEMU is
fixed. In exchange, it fixes physical virtio hardware as well as
virtio-pci running under Xen.
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c
index 48bc979..8c4e6178 100644
--- a/drivers/virtio/virtio_pci_legacy.c
+++ b/drivers/virtio/virtio_pci_legacy.c
@@ -119,7 +119,6 @@
u16 msix_vec)
{
struct virtqueue *vq;
- unsigned long size;
u16 num;
int err;
@@ -131,27 +130,19 @@
if (!num || ioread32(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN))
return ERR_PTR(-ENOENT);
- info->num = num;
info->msix_vector = msix_vec;
- size = PAGE_ALIGN(vring_size(num, VIRTIO_PCI_VRING_ALIGN));
- info->queue = alloc_pages_exact(size, GFP_KERNEL|__GFP_ZERO);
- if (info->queue == NULL)
+ /* create the vring */
+ vq = vring_create_virtqueue(index, num,
+ VIRTIO_PCI_VRING_ALIGN, &vp_dev->vdev,
+ true, false, vp_notify, callback, name);
+ if (!vq)
return ERR_PTR(-ENOMEM);
/* activate the queue */
- iowrite32(virt_to_phys(info->queue) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT,
+ iowrite32(virtqueue_get_desc_addr(vq) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT,
vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
- /* create the vring */
- vq = vring_new_virtqueue(index, info->num,
- VIRTIO_PCI_VRING_ALIGN, &vp_dev->vdev,
- true, info->queue, vp_notify, callback, name);
- if (!vq) {
- err = -ENOMEM;
- goto out_activate_queue;
- }
-
vq->priv = (void __force *)vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY;
if (msix_vec != VIRTIO_MSI_NO_VECTOR) {
@@ -159,17 +150,15 @@
msix_vec = ioread16(vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
if (msix_vec == VIRTIO_MSI_NO_VECTOR) {
err = -EBUSY;
- goto out_assign;
+ goto out_deactivate;
}
}
return vq;
-out_assign:
- vring_del_virtqueue(vq);
-out_activate_queue:
+out_deactivate:
iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
- free_pages_exact(info->queue, size);
+ vring_del_virtqueue(vq);
return ERR_PTR(err);
}
@@ -177,7 +166,6 @@
{
struct virtqueue *vq = info->vq;
struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
- unsigned long size;
iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
@@ -188,13 +176,10 @@
ioread8(vp_dev->ioaddr + VIRTIO_PCI_ISR);
}
- vring_del_virtqueue(vq);
-
/* Select and deactivate the queue */
iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
- size = PAGE_ALIGN(vring_size(info->num, VIRTIO_PCI_VRING_ALIGN));
- free_pages_exact(info->queue, size);
+ vring_del_virtqueue(vq);
}
static const struct virtio_config_ops virtio_pci_config_ops = {
@@ -227,6 +212,13 @@
return -ENODEV;
}
+ rc = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64));
+ if (rc)
+ rc = dma_set_mask_and_coherent(&pci_dev->dev,
+ DMA_BIT_MASK(32));
+ if (rc)
+ dev_warn(&pci_dev->dev, "Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this might not work.\n");
+
rc = pci_request_region(pci_dev, 0, "virtio-pci-legacy");
if (rc)
return rc;