Jason Wang | c043b4a | 2020-03-26 22:01:22 +0800 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
| 2 | /* |
| 3 | * VIRTIO based driver for vDPA device |
| 4 | * |
| 5 | * Copyright (c) 2020, Red Hat. All rights reserved. |
| 6 | * Author: Jason Wang <jasowang@redhat.com> |
| 7 | * |
| 8 | */ |
| 9 | |
| 10 | #include <linux/init.h> |
| 11 | #include <linux/module.h> |
| 12 | #include <linux/device.h> |
| 13 | #include <linux/kernel.h> |
| 14 | #include <linux/slab.h> |
| 15 | #include <linux/uuid.h> |
| 16 | #include <linux/virtio.h> |
| 17 | #include <linux/vdpa.h> |
| 18 | #include <linux/virtio_config.h> |
| 19 | #include <linux/virtio_ring.h> |
| 20 | |
| 21 | #define MOD_VERSION "0.1" |
| 22 | #define MOD_AUTHOR "Jason Wang <jasowang@redhat.com>" |
| 23 | #define MOD_DESC "vDPA bus driver for virtio devices" |
| 24 | #define MOD_LICENSE "GPL v2" |
| 25 | |
| 26 | struct virtio_vdpa_device { |
| 27 | struct virtio_device vdev; |
| 28 | struct vdpa_device *vdpa; |
| 29 | u64 features; |
| 30 | |
| 31 | /* The lock to protect virtqueue list */ |
| 32 | spinlock_t lock; |
| 33 | /* List of virtio_vdpa_vq_info */ |
| 34 | struct list_head virtqueues; |
| 35 | }; |
| 36 | |
| 37 | struct virtio_vdpa_vq_info { |
| 38 | /* the actual virtqueue */ |
| 39 | struct virtqueue *vq; |
| 40 | |
| 41 | /* the list node for the virtqueues list */ |
| 42 | struct list_head node; |
| 43 | }; |
| 44 | |
| 45 | static inline struct virtio_vdpa_device * |
| 46 | to_virtio_vdpa_device(struct virtio_device *dev) |
| 47 | { |
| 48 | return container_of(dev, struct virtio_vdpa_device, vdev); |
| 49 | } |
| 50 | |
| 51 | static struct vdpa_device *vd_get_vdpa(struct virtio_device *vdev) |
| 52 | { |
| 53 | return to_virtio_vdpa_device(vdev)->vdpa; |
| 54 | } |
| 55 | |
| 56 | static void virtio_vdpa_get(struct virtio_device *vdev, unsigned offset, |
| 57 | void *buf, unsigned len) |
| 58 | { |
| 59 | struct vdpa_device *vdpa = vd_get_vdpa(vdev); |
Jason Wang | c043b4a | 2020-03-26 22:01:22 +0800 | [diff] [blame] | 60 | |
Michael S. Tsirkin | 6399167 | 2020-07-27 10:59:02 -0400 | [diff] [blame] | 61 | vdpa_get_config(vdpa, offset, buf, len); |
Jason Wang | c043b4a | 2020-03-26 22:01:22 +0800 | [diff] [blame] | 62 | } |
| 63 | |
| 64 | static void virtio_vdpa_set(struct virtio_device *vdev, unsigned offset, |
| 65 | const void *buf, unsigned len) |
| 66 | { |
| 67 | struct vdpa_device *vdpa = vd_get_vdpa(vdev); |
| 68 | const struct vdpa_config_ops *ops = vdpa->config; |
| 69 | |
| 70 | ops->set_config(vdpa, offset, buf, len); |
| 71 | } |
| 72 | |
| 73 | static u32 virtio_vdpa_generation(struct virtio_device *vdev) |
| 74 | { |
| 75 | struct vdpa_device *vdpa = vd_get_vdpa(vdev); |
| 76 | const struct vdpa_config_ops *ops = vdpa->config; |
| 77 | |
| 78 | if (ops->get_generation) |
| 79 | return ops->get_generation(vdpa); |
| 80 | |
| 81 | return 0; |
| 82 | } |
| 83 | |
| 84 | static u8 virtio_vdpa_get_status(struct virtio_device *vdev) |
| 85 | { |
| 86 | struct vdpa_device *vdpa = vd_get_vdpa(vdev); |
| 87 | const struct vdpa_config_ops *ops = vdpa->config; |
| 88 | |
| 89 | return ops->get_status(vdpa); |
| 90 | } |
| 91 | |
| 92 | static void virtio_vdpa_set_status(struct virtio_device *vdev, u8 status) |
| 93 | { |
| 94 | struct vdpa_device *vdpa = vd_get_vdpa(vdev); |
| 95 | const struct vdpa_config_ops *ops = vdpa->config; |
| 96 | |
| 97 | return ops->set_status(vdpa, status); |
| 98 | } |
| 99 | |
| 100 | static void virtio_vdpa_reset(struct virtio_device *vdev) |
| 101 | { |
| 102 | struct vdpa_device *vdpa = vd_get_vdpa(vdev); |
Jason Wang | c043b4a | 2020-03-26 22:01:22 +0800 | [diff] [blame] | 103 | |
Michael S. Tsirkin | 6399167 | 2020-07-27 10:59:02 -0400 | [diff] [blame] | 104 | vdpa_reset(vdpa); |
Jason Wang | c043b4a | 2020-03-26 22:01:22 +0800 | [diff] [blame] | 105 | } |
| 106 | |
| 107 | static bool virtio_vdpa_notify(struct virtqueue *vq) |
| 108 | { |
| 109 | struct vdpa_device *vdpa = vd_get_vdpa(vq->vdev); |
| 110 | const struct vdpa_config_ops *ops = vdpa->config; |
| 111 | |
| 112 | ops->kick_vq(vdpa, vq->index); |
| 113 | |
| 114 | return true; |
| 115 | } |
| 116 | |
| 117 | static irqreturn_t virtio_vdpa_config_cb(void *private) |
| 118 | { |
| 119 | struct virtio_vdpa_device *vd_dev = private; |
| 120 | |
| 121 | virtio_config_changed(&vd_dev->vdev); |
| 122 | |
| 123 | return IRQ_HANDLED; |
| 124 | } |
| 125 | |
| 126 | static irqreturn_t virtio_vdpa_virtqueue_cb(void *private) |
| 127 | { |
| 128 | struct virtio_vdpa_vq_info *info = private; |
| 129 | |
| 130 | return vring_interrupt(0, info->vq); |
| 131 | } |
| 132 | |
| 133 | static struct virtqueue * |
| 134 | virtio_vdpa_setup_vq(struct virtio_device *vdev, unsigned int index, |
| 135 | void (*callback)(struct virtqueue *vq), |
| 136 | const char *name, bool ctx) |
| 137 | { |
| 138 | struct virtio_vdpa_device *vd_dev = to_virtio_vdpa_device(vdev); |
| 139 | struct vdpa_device *vdpa = vd_get_vdpa(vdev); |
| 140 | const struct vdpa_config_ops *ops = vdpa->config; |
| 141 | struct virtio_vdpa_vq_info *info; |
| 142 | struct vdpa_callback cb; |
| 143 | struct virtqueue *vq; |
| 144 | u64 desc_addr, driver_addr, device_addr; |
| 145 | unsigned long flags; |
| 146 | u32 align, num; |
| 147 | int err; |
| 148 | |
| 149 | if (!name) |
| 150 | return NULL; |
| 151 | |
| 152 | /* Queue shouldn't already be set up. */ |
| 153 | if (ops->get_vq_ready(vdpa, index)) |
| 154 | return ERR_PTR(-ENOENT); |
| 155 | |
| 156 | /* Allocate and fill out our active queue description */ |
| 157 | info = kmalloc(sizeof(*info), GFP_KERNEL); |
| 158 | if (!info) |
| 159 | return ERR_PTR(-ENOMEM); |
| 160 | |
| 161 | num = ops->get_vq_num_max(vdpa); |
| 162 | if (num == 0) { |
| 163 | err = -ENOENT; |
| 164 | goto error_new_virtqueue; |
| 165 | } |
| 166 | |
| 167 | /* Create the vring */ |
| 168 | align = ops->get_vq_align(vdpa); |
| 169 | vq = vring_create_virtqueue(index, num, align, vdev, |
| 170 | true, true, ctx, |
| 171 | virtio_vdpa_notify, callback, name); |
| 172 | if (!vq) { |
| 173 | err = -ENOMEM; |
| 174 | goto error_new_virtqueue; |
| 175 | } |
| 176 | |
| 177 | /* Setup virtqueue callback */ |
| 178 | cb.callback = virtio_vdpa_virtqueue_cb; |
| 179 | cb.private = info; |
| 180 | ops->set_vq_cb(vdpa, index, &cb); |
| 181 | ops->set_vq_num(vdpa, index, virtqueue_get_vring_size(vq)); |
| 182 | |
| 183 | desc_addr = virtqueue_get_desc_addr(vq); |
| 184 | driver_addr = virtqueue_get_avail_addr(vq); |
| 185 | device_addr = virtqueue_get_used_addr(vq); |
| 186 | |
| 187 | if (ops->set_vq_address(vdpa, index, |
| 188 | desc_addr, driver_addr, |
| 189 | device_addr)) { |
| 190 | err = -EINVAL; |
| 191 | goto err_vq; |
| 192 | } |
| 193 | |
| 194 | ops->set_vq_ready(vdpa, index, 1); |
| 195 | |
| 196 | vq->priv = info; |
| 197 | info->vq = vq; |
| 198 | |
| 199 | spin_lock_irqsave(&vd_dev->lock, flags); |
| 200 | list_add(&info->node, &vd_dev->virtqueues); |
| 201 | spin_unlock_irqrestore(&vd_dev->lock, flags); |
| 202 | |
| 203 | return vq; |
| 204 | |
| 205 | err_vq: |
| 206 | vring_del_virtqueue(vq); |
| 207 | error_new_virtqueue: |
| 208 | ops->set_vq_ready(vdpa, index, 0); |
| 209 | /* VDPA driver should make sure vq is stopeed here */ |
| 210 | WARN_ON(ops->get_vq_ready(vdpa, index)); |
| 211 | kfree(info); |
| 212 | return ERR_PTR(err); |
| 213 | } |
| 214 | |
| 215 | static void virtio_vdpa_del_vq(struct virtqueue *vq) |
| 216 | { |
| 217 | struct virtio_vdpa_device *vd_dev = to_virtio_vdpa_device(vq->vdev); |
| 218 | struct vdpa_device *vdpa = vd_dev->vdpa; |
| 219 | const struct vdpa_config_ops *ops = vdpa->config; |
| 220 | struct virtio_vdpa_vq_info *info = vq->priv; |
| 221 | unsigned int index = vq->index; |
| 222 | unsigned long flags; |
| 223 | |
| 224 | spin_lock_irqsave(&vd_dev->lock, flags); |
| 225 | list_del(&info->node); |
| 226 | spin_unlock_irqrestore(&vd_dev->lock, flags); |
| 227 | |
Jason Wang | 1628c68 | 2021-01-04 14:55:02 +0800 | [diff] [blame^] | 228 | /* Select and deactivate the queue (best effort) */ |
Jason Wang | c043b4a | 2020-03-26 22:01:22 +0800 | [diff] [blame] | 229 | ops->set_vq_ready(vdpa, index, 0); |
Jason Wang | c043b4a | 2020-03-26 22:01:22 +0800 | [diff] [blame] | 230 | |
| 231 | vring_del_virtqueue(vq); |
| 232 | |
| 233 | kfree(info); |
| 234 | } |
| 235 | |
| 236 | static void virtio_vdpa_del_vqs(struct virtio_device *vdev) |
| 237 | { |
| 238 | struct virtqueue *vq, *n; |
| 239 | |
| 240 | list_for_each_entry_safe(vq, n, &vdev->vqs, list) |
| 241 | virtio_vdpa_del_vq(vq); |
| 242 | } |
| 243 | |
| 244 | static int virtio_vdpa_find_vqs(struct virtio_device *vdev, unsigned nvqs, |
| 245 | struct virtqueue *vqs[], |
| 246 | vq_callback_t *callbacks[], |
| 247 | const char * const names[], |
| 248 | const bool *ctx, |
| 249 | struct irq_affinity *desc) |
| 250 | { |
| 251 | struct virtio_vdpa_device *vd_dev = to_virtio_vdpa_device(vdev); |
| 252 | struct vdpa_device *vdpa = vd_get_vdpa(vdev); |
| 253 | const struct vdpa_config_ops *ops = vdpa->config; |
| 254 | struct vdpa_callback cb; |
| 255 | int i, err, queue_idx = 0; |
| 256 | |
| 257 | for (i = 0; i < nvqs; ++i) { |
| 258 | if (!names[i]) { |
| 259 | vqs[i] = NULL; |
| 260 | continue; |
| 261 | } |
| 262 | |
| 263 | vqs[i] = virtio_vdpa_setup_vq(vdev, queue_idx++, |
| 264 | callbacks[i], names[i], ctx ? |
| 265 | ctx[i] : false); |
| 266 | if (IS_ERR(vqs[i])) { |
| 267 | err = PTR_ERR(vqs[i]); |
| 268 | goto err_setup_vq; |
| 269 | } |
| 270 | } |
| 271 | |
| 272 | cb.callback = virtio_vdpa_config_cb; |
| 273 | cb.private = vd_dev; |
| 274 | ops->set_config_cb(vdpa, &cb); |
| 275 | |
| 276 | return 0; |
| 277 | |
| 278 | err_setup_vq: |
| 279 | virtio_vdpa_del_vqs(vdev); |
| 280 | return err; |
| 281 | } |
| 282 | |
| 283 | static u64 virtio_vdpa_get_features(struct virtio_device *vdev) |
| 284 | { |
| 285 | struct vdpa_device *vdpa = vd_get_vdpa(vdev); |
| 286 | const struct vdpa_config_ops *ops = vdpa->config; |
| 287 | |
| 288 | return ops->get_features(vdpa); |
| 289 | } |
| 290 | |
| 291 | static int virtio_vdpa_finalize_features(struct virtio_device *vdev) |
| 292 | { |
| 293 | struct vdpa_device *vdpa = vd_get_vdpa(vdev); |
Jason Wang | c043b4a | 2020-03-26 22:01:22 +0800 | [diff] [blame] | 294 | |
| 295 | /* Give virtio_ring a chance to accept features. */ |
| 296 | vring_transport_features(vdev); |
| 297 | |
Michael S. Tsirkin | 6399167 | 2020-07-27 10:59:02 -0400 | [diff] [blame] | 298 | return vdpa_set_features(vdpa, vdev->features); |
Jason Wang | c043b4a | 2020-03-26 22:01:22 +0800 | [diff] [blame] | 299 | } |
| 300 | |
| 301 | static const char *virtio_vdpa_bus_name(struct virtio_device *vdev) |
| 302 | { |
| 303 | struct virtio_vdpa_device *vd_dev = to_virtio_vdpa_device(vdev); |
| 304 | struct vdpa_device *vdpa = vd_dev->vdpa; |
| 305 | |
| 306 | return dev_name(&vdpa->dev); |
| 307 | } |
| 308 | |
| 309 | static const struct virtio_config_ops virtio_vdpa_config_ops = { |
| 310 | .get = virtio_vdpa_get, |
| 311 | .set = virtio_vdpa_set, |
| 312 | .generation = virtio_vdpa_generation, |
| 313 | .get_status = virtio_vdpa_get_status, |
| 314 | .set_status = virtio_vdpa_set_status, |
| 315 | .reset = virtio_vdpa_reset, |
| 316 | .find_vqs = virtio_vdpa_find_vqs, |
| 317 | .del_vqs = virtio_vdpa_del_vqs, |
| 318 | .get_features = virtio_vdpa_get_features, |
| 319 | .finalize_features = virtio_vdpa_finalize_features, |
| 320 | .bus_name = virtio_vdpa_bus_name, |
| 321 | }; |
| 322 | |
| 323 | static void virtio_vdpa_release_dev(struct device *_d) |
| 324 | { |
| 325 | struct virtio_device *vdev = |
| 326 | container_of(_d, struct virtio_device, dev); |
| 327 | struct virtio_vdpa_device *vd_dev = |
| 328 | container_of(vdev, struct virtio_vdpa_device, vdev); |
| 329 | |
| 330 | kfree(vd_dev); |
| 331 | } |
| 332 | |
| 333 | static int virtio_vdpa_probe(struct vdpa_device *vdpa) |
| 334 | { |
| 335 | const struct vdpa_config_ops *ops = vdpa->config; |
| 336 | struct virtio_vdpa_device *vd_dev, *reg_dev = NULL; |
| 337 | int ret = -EINVAL; |
| 338 | |
| 339 | vd_dev = kzalloc(sizeof(*vd_dev), GFP_KERNEL); |
| 340 | if (!vd_dev) |
| 341 | return -ENOMEM; |
| 342 | |
| 343 | vd_dev->vdev.dev.parent = vdpa_get_dma_dev(vdpa); |
| 344 | vd_dev->vdev.dev.release = virtio_vdpa_release_dev; |
| 345 | vd_dev->vdev.config = &virtio_vdpa_config_ops; |
| 346 | vd_dev->vdpa = vdpa; |
| 347 | INIT_LIST_HEAD(&vd_dev->virtqueues); |
| 348 | spin_lock_init(&vd_dev->lock); |
| 349 | |
| 350 | vd_dev->vdev.id.device = ops->get_device_id(vdpa); |
| 351 | if (vd_dev->vdev.id.device == 0) |
| 352 | goto err; |
| 353 | |
| 354 | vd_dev->vdev.id.vendor = ops->get_vendor_id(vdpa); |
| 355 | ret = register_virtio_device(&vd_dev->vdev); |
| 356 | reg_dev = vd_dev; |
| 357 | if (ret) |
| 358 | goto err; |
| 359 | |
| 360 | vdpa_set_drvdata(vdpa, vd_dev); |
| 361 | |
| 362 | return 0; |
| 363 | |
| 364 | err: |
| 365 | if (reg_dev) |
| 366 | put_device(&vd_dev->vdev.dev); |
| 367 | else |
| 368 | kfree(vd_dev); |
| 369 | return ret; |
| 370 | } |
| 371 | |
| 372 | static void virtio_vdpa_remove(struct vdpa_device *vdpa) |
| 373 | { |
| 374 | struct virtio_vdpa_device *vd_dev = vdpa_get_drvdata(vdpa); |
| 375 | |
| 376 | unregister_virtio_device(&vd_dev->vdev); |
| 377 | } |
| 378 | |
| 379 | static struct vdpa_driver virtio_vdpa_driver = { |
| 380 | .driver = { |
| 381 | .name = "virtio_vdpa", |
| 382 | }, |
| 383 | .probe = virtio_vdpa_probe, |
| 384 | .remove = virtio_vdpa_remove, |
| 385 | }; |
| 386 | |
| 387 | module_vdpa_driver(virtio_vdpa_driver); |
| 388 | |
| 389 | MODULE_VERSION(MOD_VERSION); |
| 390 | MODULE_LICENSE(MOD_LICENSE); |
| 391 | MODULE_AUTHOR(MOD_AUTHOR); |
| 392 | MODULE_DESCRIPTION(MOD_DESC); |