Jean-Philippe Brucker | edcd69a | 2019-01-15 12:19:57 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * Virtio driver for the paravirtualized IOMMU |
| 4 | * |
Jean-Philippe Brucker | ae24fb4 | 2019-07-22 15:40:07 +0100 | [diff] [blame] | 5 | * Copyright (C) 2019 Arm Limited |
Jean-Philippe Brucker | edcd69a | 2019-01-15 12:19:57 +0000 | [diff] [blame] | 6 | */ |
| 7 | |
| 8 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 9 | |
| 10 | #include <linux/amba/bus.h> |
| 11 | #include <linux/delay.h> |
| 12 | #include <linux/dma-iommu.h> |
| 13 | #include <linux/freezer.h> |
| 14 | #include <linux/interval_tree.h> |
| 15 | #include <linux/iommu.h> |
| 16 | #include <linux/module.h> |
| 17 | #include <linux/of_iommu.h> |
| 18 | #include <linux/of_platform.h> |
| 19 | #include <linux/pci.h> |
| 20 | #include <linux/platform_device.h> |
| 21 | #include <linux/virtio.h> |
| 22 | #include <linux/virtio_config.h> |
| 23 | #include <linux/virtio_ids.h> |
| 24 | #include <linux/wait.h> |
| 25 | |
| 26 | #include <uapi/linux/virtio_iommu.h> |
| 27 | |
| 28 | #define MSI_IOVA_BASE 0x8000000 |
| 29 | #define MSI_IOVA_LENGTH 0x100000 |
| 30 | |
| 31 | #define VIOMMU_REQUEST_VQ 0 |
Jean-Philippe Brucker | 169a126 | 2019-01-15 12:19:59 +0000 | [diff] [blame] | 32 | #define VIOMMU_EVENT_VQ 1 |
| 33 | #define VIOMMU_NR_VQS 2 |
Jean-Philippe Brucker | edcd69a | 2019-01-15 12:19:57 +0000 | [diff] [blame] | 34 | |
| 35 | struct viommu_dev { |
| 36 | struct iommu_device iommu; |
| 37 | struct device *dev; |
| 38 | struct virtio_device *vdev; |
| 39 | |
| 40 | struct ida domain_ids; |
| 41 | |
| 42 | struct virtqueue *vqs[VIOMMU_NR_VQS]; |
| 43 | spinlock_t request_lock; |
| 44 | struct list_head requests; |
Jean-Philippe Brucker | 169a126 | 2019-01-15 12:19:59 +0000 | [diff] [blame] | 45 | void *evts; |
Jean-Philippe Brucker | edcd69a | 2019-01-15 12:19:57 +0000 | [diff] [blame] | 46 | |
| 47 | /* Device configuration */ |
| 48 | struct iommu_domain_geometry geometry; |
| 49 | u64 pgsize_bitmap; |
Jean-Philippe Brucker | ae24fb4 | 2019-07-22 15:40:07 +0100 | [diff] [blame] | 50 | u32 first_domain; |
| 51 | u32 last_domain; |
| 52 | /* Supported MAP flags */ |
| 53 | u32 map_flags; |
Jean-Philippe Brucker | 2a5a314 | 2019-01-15 12:19:58 +0000 | [diff] [blame] | 54 | u32 probe_size; |
Jean-Philippe Brucker | edcd69a | 2019-01-15 12:19:57 +0000 | [diff] [blame] | 55 | }; |
| 56 | |
| 57 | struct viommu_mapping { |
| 58 | phys_addr_t paddr; |
| 59 | struct interval_tree_node iova; |
| 60 | u32 flags; |
| 61 | }; |
| 62 | |
| 63 | struct viommu_domain { |
| 64 | struct iommu_domain domain; |
| 65 | struct viommu_dev *viommu; |
| 66 | struct mutex mutex; /* protects viommu pointer */ |
| 67 | unsigned int id; |
Jean-Philippe Brucker | ae24fb4 | 2019-07-22 15:40:07 +0100 | [diff] [blame] | 68 | u32 map_flags; |
Jean-Philippe Brucker | edcd69a | 2019-01-15 12:19:57 +0000 | [diff] [blame] | 69 | |
| 70 | spinlock_t mappings_lock; |
| 71 | struct rb_root_cached mappings; |
| 72 | |
| 73 | unsigned long nr_endpoints; |
| 74 | }; |
| 75 | |
| 76 | struct viommu_endpoint { |
Jean-Philippe Brucker | 2a5a314 | 2019-01-15 12:19:58 +0000 | [diff] [blame] | 77 | struct device *dev; |
Jean-Philippe Brucker | edcd69a | 2019-01-15 12:19:57 +0000 | [diff] [blame] | 78 | struct viommu_dev *viommu; |
| 79 | struct viommu_domain *vdomain; |
Jean-Philippe Brucker | 2a5a314 | 2019-01-15 12:19:58 +0000 | [diff] [blame] | 80 | struct list_head resv_regions; |
Jean-Philippe Brucker | edcd69a | 2019-01-15 12:19:57 +0000 | [diff] [blame] | 81 | }; |
| 82 | |
| 83 | struct viommu_request { |
| 84 | struct list_head list; |
| 85 | void *writeback; |
| 86 | unsigned int write_offset; |
| 87 | unsigned int len; |
| 88 | char buf[]; |
| 89 | }; |
| 90 | |
Jean-Philippe Brucker | 169a126 | 2019-01-15 12:19:59 +0000 | [diff] [blame] | 91 | #define VIOMMU_FAULT_RESV_MASK 0xffffff00 |
| 92 | |
| 93 | struct viommu_event { |
| 94 | union { |
| 95 | u32 head; |
| 96 | struct virtio_iommu_fault fault; |
| 97 | }; |
| 98 | }; |
| 99 | |
Jean-Philippe Brucker | edcd69a | 2019-01-15 12:19:57 +0000 | [diff] [blame] | 100 | #define to_viommu_domain(domain) \ |
| 101 | container_of(domain, struct viommu_domain, domain) |
| 102 | |
| 103 | static int viommu_get_req_errno(void *buf, size_t len) |
| 104 | { |
| 105 | struct virtio_iommu_req_tail *tail = buf + len - sizeof(*tail); |
| 106 | |
| 107 | switch (tail->status) { |
| 108 | case VIRTIO_IOMMU_S_OK: |
| 109 | return 0; |
| 110 | case VIRTIO_IOMMU_S_UNSUPP: |
| 111 | return -ENOSYS; |
| 112 | case VIRTIO_IOMMU_S_INVAL: |
| 113 | return -EINVAL; |
| 114 | case VIRTIO_IOMMU_S_RANGE: |
| 115 | return -ERANGE; |
| 116 | case VIRTIO_IOMMU_S_NOENT: |
| 117 | return -ENOENT; |
| 118 | case VIRTIO_IOMMU_S_FAULT: |
| 119 | return -EFAULT; |
Jean-Philippe Brucker | ae24fb4 | 2019-07-22 15:40:07 +0100 | [diff] [blame] | 120 | case VIRTIO_IOMMU_S_NOMEM: |
| 121 | return -ENOMEM; |
Jean-Philippe Brucker | edcd69a | 2019-01-15 12:19:57 +0000 | [diff] [blame] | 122 | case VIRTIO_IOMMU_S_IOERR: |
| 123 | case VIRTIO_IOMMU_S_DEVERR: |
| 124 | default: |
| 125 | return -EIO; |
| 126 | } |
| 127 | } |
| 128 | |
| 129 | static void viommu_set_req_status(void *buf, size_t len, int status) |
| 130 | { |
| 131 | struct virtio_iommu_req_tail *tail = buf + len - sizeof(*tail); |
| 132 | |
| 133 | tail->status = status; |
| 134 | } |
| 135 | |
| 136 | static off_t viommu_get_write_desc_offset(struct viommu_dev *viommu, |
| 137 | struct virtio_iommu_req_head *req, |
| 138 | size_t len) |
| 139 | { |
| 140 | size_t tail_size = sizeof(struct virtio_iommu_req_tail); |
| 141 | |
Jean-Philippe Brucker | 2a5a314 | 2019-01-15 12:19:58 +0000 | [diff] [blame] | 142 | if (req->type == VIRTIO_IOMMU_T_PROBE) |
| 143 | return len - viommu->probe_size - tail_size; |
| 144 | |
Jean-Philippe Brucker | edcd69a | 2019-01-15 12:19:57 +0000 | [diff] [blame] | 145 | return len - tail_size; |
| 146 | } |
| 147 | |
| 148 | /* |
| 149 | * __viommu_sync_req - Complete all in-flight requests |
| 150 | * |
| 151 | * Wait for all added requests to complete. When this function returns, all |
| 152 | * requests that were in-flight at the time of the call have completed. |
| 153 | */ |
| 154 | static int __viommu_sync_req(struct viommu_dev *viommu) |
| 155 | { |
Jean-Philippe Brucker | edcd69a | 2019-01-15 12:19:57 +0000 | [diff] [blame] | 156 | unsigned int len; |
| 157 | size_t write_len; |
| 158 | struct viommu_request *req; |
| 159 | struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ]; |
| 160 | |
| 161 | assert_spin_locked(&viommu->request_lock); |
| 162 | |
| 163 | virtqueue_kick(vq); |
| 164 | |
| 165 | while (!list_empty(&viommu->requests)) { |
| 166 | len = 0; |
| 167 | req = virtqueue_get_buf(vq, &len); |
| 168 | if (!req) |
| 169 | continue; |
| 170 | |
| 171 | if (!len) |
| 172 | viommu_set_req_status(req->buf, req->len, |
| 173 | VIRTIO_IOMMU_S_IOERR); |
| 174 | |
| 175 | write_len = req->len - req->write_offset; |
| 176 | if (req->writeback && len == write_len) |
| 177 | memcpy(req->writeback, req->buf + req->write_offset, |
| 178 | write_len); |
| 179 | |
| 180 | list_del(&req->list); |
| 181 | kfree(req); |
| 182 | } |
| 183 | |
Cristiane Naves | c1c8058 | 2019-10-25 13:13:40 -0300 | [diff] [blame] | 184 | return 0; |
Jean-Philippe Brucker | edcd69a | 2019-01-15 12:19:57 +0000 | [diff] [blame] | 185 | } |
| 186 | |
| 187 | static int viommu_sync_req(struct viommu_dev *viommu) |
| 188 | { |
| 189 | int ret; |
| 190 | unsigned long flags; |
| 191 | |
| 192 | spin_lock_irqsave(&viommu->request_lock, flags); |
| 193 | ret = __viommu_sync_req(viommu); |
| 194 | if (ret) |
| 195 | dev_dbg(viommu->dev, "could not sync requests (%d)\n", ret); |
| 196 | spin_unlock_irqrestore(&viommu->request_lock, flags); |
| 197 | |
| 198 | return ret; |
| 199 | } |
| 200 | |
| 201 | /* |
| 202 | * __viommu_add_request - Add one request to the queue |
| 203 | * @buf: pointer to the request buffer |
| 204 | * @len: length of the request buffer |
| 205 | * @writeback: copy data back to the buffer when the request completes. |
| 206 | * |
| 207 | * Add a request to the queue. Only synchronize the queue if it's already full. |
| 208 | * Otherwise don't kick the queue nor wait for requests to complete. |
| 209 | * |
| 210 | * When @writeback is true, data written by the device, including the request |
| 211 | * status, is copied into @buf after the request completes. This is unsafe if |
| 212 | * the caller allocates @buf on stack and drops the lock between add_req() and |
| 213 | * sync_req(). |
| 214 | * |
| 215 | * Return 0 if the request was successfully added to the queue. |
| 216 | */ |
| 217 | static int __viommu_add_req(struct viommu_dev *viommu, void *buf, size_t len, |
| 218 | bool writeback) |
| 219 | { |
| 220 | int ret; |
| 221 | off_t write_offset; |
| 222 | struct viommu_request *req; |
| 223 | struct scatterlist top_sg, bottom_sg; |
| 224 | struct scatterlist *sg[2] = { &top_sg, &bottom_sg }; |
| 225 | struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ]; |
| 226 | |
| 227 | assert_spin_locked(&viommu->request_lock); |
| 228 | |
| 229 | write_offset = viommu_get_write_desc_offset(viommu, buf, len); |
| 230 | if (write_offset <= 0) |
| 231 | return -EINVAL; |
| 232 | |
| 233 | req = kzalloc(sizeof(*req) + len, GFP_ATOMIC); |
| 234 | if (!req) |
| 235 | return -ENOMEM; |
| 236 | |
| 237 | req->len = len; |
| 238 | if (writeback) { |
| 239 | req->writeback = buf + write_offset; |
| 240 | req->write_offset = write_offset; |
| 241 | } |
| 242 | memcpy(&req->buf, buf, write_offset); |
| 243 | |
| 244 | sg_init_one(&top_sg, req->buf, write_offset); |
| 245 | sg_init_one(&bottom_sg, req->buf + write_offset, len - write_offset); |
| 246 | |
| 247 | ret = virtqueue_add_sgs(vq, sg, 1, 1, req, GFP_ATOMIC); |
| 248 | if (ret == -ENOSPC) { |
| 249 | /* If the queue is full, sync and retry */ |
| 250 | if (!__viommu_sync_req(viommu)) |
| 251 | ret = virtqueue_add_sgs(vq, sg, 1, 1, req, GFP_ATOMIC); |
| 252 | } |
| 253 | if (ret) |
| 254 | goto err_free; |
| 255 | |
| 256 | list_add_tail(&req->list, &viommu->requests); |
| 257 | return 0; |
| 258 | |
| 259 | err_free: |
| 260 | kfree(req); |
| 261 | return ret; |
| 262 | } |
| 263 | |
| 264 | static int viommu_add_req(struct viommu_dev *viommu, void *buf, size_t len) |
| 265 | { |
| 266 | int ret; |
| 267 | unsigned long flags; |
| 268 | |
| 269 | spin_lock_irqsave(&viommu->request_lock, flags); |
| 270 | ret = __viommu_add_req(viommu, buf, len, false); |
| 271 | if (ret) |
| 272 | dev_dbg(viommu->dev, "could not add request: %d\n", ret); |
| 273 | spin_unlock_irqrestore(&viommu->request_lock, flags); |
| 274 | |
| 275 | return ret; |
| 276 | } |
| 277 | |
| 278 | /* |
| 279 | * Send a request and wait for it to complete. Return the request status (as an |
| 280 | * errno) |
| 281 | */ |
| 282 | static int viommu_send_req_sync(struct viommu_dev *viommu, void *buf, |
| 283 | size_t len) |
| 284 | { |
| 285 | int ret; |
| 286 | unsigned long flags; |
| 287 | |
| 288 | spin_lock_irqsave(&viommu->request_lock, flags); |
| 289 | |
| 290 | ret = __viommu_add_req(viommu, buf, len, true); |
| 291 | if (ret) { |
| 292 | dev_dbg(viommu->dev, "could not add request (%d)\n", ret); |
| 293 | goto out_unlock; |
| 294 | } |
| 295 | |
| 296 | ret = __viommu_sync_req(viommu); |
| 297 | if (ret) { |
| 298 | dev_dbg(viommu->dev, "could not sync requests (%d)\n", ret); |
| 299 | /* Fall-through (get the actual request status) */ |
| 300 | } |
| 301 | |
| 302 | ret = viommu_get_req_errno(buf, len); |
| 303 | out_unlock: |
| 304 | spin_unlock_irqrestore(&viommu->request_lock, flags); |
| 305 | return ret; |
| 306 | } |
| 307 | |
| 308 | /* |
| 309 | * viommu_add_mapping - add a mapping to the internal tree |
| 310 | * |
| 311 | * On success, return the new mapping. Otherwise return NULL. |
| 312 | */ |
| 313 | static int viommu_add_mapping(struct viommu_domain *vdomain, unsigned long iova, |
| 314 | phys_addr_t paddr, size_t size, u32 flags) |
| 315 | { |
| 316 | unsigned long irqflags; |
| 317 | struct viommu_mapping *mapping; |
| 318 | |
| 319 | mapping = kzalloc(sizeof(*mapping), GFP_ATOMIC); |
| 320 | if (!mapping) |
| 321 | return -ENOMEM; |
| 322 | |
| 323 | mapping->paddr = paddr; |
| 324 | mapping->iova.start = iova; |
| 325 | mapping->iova.last = iova + size - 1; |
| 326 | mapping->flags = flags; |
| 327 | |
| 328 | spin_lock_irqsave(&vdomain->mappings_lock, irqflags); |
| 329 | interval_tree_insert(&mapping->iova, &vdomain->mappings); |
| 330 | spin_unlock_irqrestore(&vdomain->mappings_lock, irqflags); |
| 331 | |
| 332 | return 0; |
| 333 | } |
| 334 | |
| 335 | /* |
| 336 | * viommu_del_mappings - remove mappings from the internal tree |
| 337 | * |
| 338 | * @vdomain: the domain |
| 339 | * @iova: start of the range |
| 340 | * @size: size of the range. A size of 0 corresponds to the entire address |
| 341 | * space. |
| 342 | * |
| 343 | * On success, returns the number of unmapped bytes (>= size) |
| 344 | */ |
| 345 | static size_t viommu_del_mappings(struct viommu_domain *vdomain, |
| 346 | unsigned long iova, size_t size) |
| 347 | { |
| 348 | size_t unmapped = 0; |
| 349 | unsigned long flags; |
| 350 | unsigned long last = iova + size - 1; |
| 351 | struct viommu_mapping *mapping = NULL; |
| 352 | struct interval_tree_node *node, *next; |
| 353 | |
| 354 | spin_lock_irqsave(&vdomain->mappings_lock, flags); |
| 355 | next = interval_tree_iter_first(&vdomain->mappings, iova, last); |
| 356 | while (next) { |
| 357 | node = next; |
| 358 | mapping = container_of(node, struct viommu_mapping, iova); |
| 359 | next = interval_tree_iter_next(node, iova, last); |
| 360 | |
| 361 | /* Trying to split a mapping? */ |
| 362 | if (mapping->iova.start < iova) |
| 363 | break; |
| 364 | |
| 365 | /* |
| 366 | * Virtio-iommu doesn't allow UNMAP to split a mapping created |
| 367 | * with a single MAP request, so remove the full mapping. |
| 368 | */ |
| 369 | unmapped += mapping->iova.last - mapping->iova.start + 1; |
| 370 | |
| 371 | interval_tree_remove(node, &vdomain->mappings); |
| 372 | kfree(mapping); |
| 373 | } |
| 374 | spin_unlock_irqrestore(&vdomain->mappings_lock, flags); |
| 375 | |
| 376 | return unmapped; |
| 377 | } |
| 378 | |
| 379 | /* |
| 380 | * viommu_replay_mappings - re-send MAP requests |
| 381 | * |
| 382 | * When reattaching a domain that was previously detached from all endpoints, |
| 383 | * mappings were deleted from the device. Re-create the mappings available in |
| 384 | * the internal tree. |
| 385 | */ |
| 386 | static int viommu_replay_mappings(struct viommu_domain *vdomain) |
| 387 | { |
| 388 | int ret = 0; |
| 389 | unsigned long flags; |
| 390 | struct viommu_mapping *mapping; |
| 391 | struct interval_tree_node *node; |
| 392 | struct virtio_iommu_req_map map; |
| 393 | |
| 394 | spin_lock_irqsave(&vdomain->mappings_lock, flags); |
| 395 | node = interval_tree_iter_first(&vdomain->mappings, 0, -1UL); |
| 396 | while (node) { |
| 397 | mapping = container_of(node, struct viommu_mapping, iova); |
| 398 | map = (struct virtio_iommu_req_map) { |
| 399 | .head.type = VIRTIO_IOMMU_T_MAP, |
| 400 | .domain = cpu_to_le32(vdomain->id), |
| 401 | .virt_start = cpu_to_le64(mapping->iova.start), |
| 402 | .virt_end = cpu_to_le64(mapping->iova.last), |
| 403 | .phys_start = cpu_to_le64(mapping->paddr), |
| 404 | .flags = cpu_to_le32(mapping->flags), |
| 405 | }; |
| 406 | |
| 407 | ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map)); |
| 408 | if (ret) |
| 409 | break; |
| 410 | |
| 411 | node = interval_tree_iter_next(node, 0, -1UL); |
| 412 | } |
| 413 | spin_unlock_irqrestore(&vdomain->mappings_lock, flags); |
| 414 | |
| 415 | return ret; |
| 416 | } |
| 417 | |
Jean-Philippe Brucker | 2a5a314 | 2019-01-15 12:19:58 +0000 | [diff] [blame] | 418 | static int viommu_add_resv_mem(struct viommu_endpoint *vdev, |
| 419 | struct virtio_iommu_probe_resv_mem *mem, |
| 420 | size_t len) |
| 421 | { |
| 422 | size_t size; |
| 423 | u64 start64, end64; |
| 424 | phys_addr_t start, end; |
| 425 | struct iommu_resv_region *region = NULL; |
| 426 | unsigned long prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; |
| 427 | |
| 428 | start = start64 = le64_to_cpu(mem->start); |
| 429 | end = end64 = le64_to_cpu(mem->end); |
| 430 | size = end64 - start64 + 1; |
| 431 | |
| 432 | /* Catch any overflow, including the unlikely end64 - start64 + 1 = 0 */ |
| 433 | if (start != start64 || end != end64 || size < end64 - start64) |
| 434 | return -EOVERFLOW; |
| 435 | |
| 436 | if (len < sizeof(*mem)) |
| 437 | return -EINVAL; |
| 438 | |
| 439 | switch (mem->subtype) { |
| 440 | default: |
| 441 | dev_warn(vdev->dev, "unknown resv mem subtype 0x%x\n", |
| 442 | mem->subtype); |
| 443 | /* Fall-through */ |
| 444 | case VIRTIO_IOMMU_RESV_MEM_T_RESERVED: |
| 445 | region = iommu_alloc_resv_region(start, size, 0, |
| 446 | IOMMU_RESV_RESERVED); |
| 447 | break; |
| 448 | case VIRTIO_IOMMU_RESV_MEM_T_MSI: |
| 449 | region = iommu_alloc_resv_region(start, size, prot, |
| 450 | IOMMU_RESV_MSI); |
| 451 | break; |
| 452 | } |
| 453 | if (!region) |
| 454 | return -ENOMEM; |
| 455 | |
Julia Lawall | fb3637a | 2020-05-05 20:47:47 +0200 | [diff] [blame] | 456 | list_add(®ion->list, &vdev->resv_regions); |
Jean-Philippe Brucker | 2a5a314 | 2019-01-15 12:19:58 +0000 | [diff] [blame] | 457 | return 0; |
| 458 | } |
| 459 | |
| 460 | static int viommu_probe_endpoint(struct viommu_dev *viommu, struct device *dev) |
| 461 | { |
| 462 | int ret; |
| 463 | u16 type, len; |
| 464 | size_t cur = 0; |
| 465 | size_t probe_len; |
| 466 | struct virtio_iommu_req_probe *probe; |
| 467 | struct virtio_iommu_probe_property *prop; |
| 468 | struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); |
Joerg Roedel | a4b6c2a | 2020-03-26 16:08:40 +0100 | [diff] [blame] | 469 | struct viommu_endpoint *vdev = dev_iommu_priv_get(dev); |
Jean-Philippe Brucker | 2a5a314 | 2019-01-15 12:19:58 +0000 | [diff] [blame] | 470 | |
| 471 | if (!fwspec->num_ids) |
| 472 | return -EINVAL; |
| 473 | |
| 474 | probe_len = sizeof(*probe) + viommu->probe_size + |
| 475 | sizeof(struct virtio_iommu_req_tail); |
| 476 | probe = kzalloc(probe_len, GFP_KERNEL); |
| 477 | if (!probe) |
| 478 | return -ENOMEM; |
| 479 | |
| 480 | probe->head.type = VIRTIO_IOMMU_T_PROBE; |
| 481 | /* |
| 482 | * For now, assume that properties of an endpoint that outputs multiple |
| 483 | * IDs are consistent. Only probe the first one. |
| 484 | */ |
| 485 | probe->endpoint = cpu_to_le32(fwspec->ids[0]); |
| 486 | |
| 487 | ret = viommu_send_req_sync(viommu, probe, probe_len); |
| 488 | if (ret) |
| 489 | goto out_free; |
| 490 | |
| 491 | prop = (void *)probe->properties; |
| 492 | type = le16_to_cpu(prop->type) & VIRTIO_IOMMU_PROBE_T_MASK; |
| 493 | |
| 494 | while (type != VIRTIO_IOMMU_PROBE_T_NONE && |
| 495 | cur < viommu->probe_size) { |
| 496 | len = le16_to_cpu(prop->length) + sizeof(*prop); |
| 497 | |
| 498 | switch (type) { |
| 499 | case VIRTIO_IOMMU_PROBE_T_RESV_MEM: |
| 500 | ret = viommu_add_resv_mem(vdev, (void *)prop, len); |
| 501 | break; |
| 502 | default: |
| 503 | dev_err(dev, "unknown viommu prop 0x%x\n", type); |
| 504 | } |
| 505 | |
| 506 | if (ret) |
| 507 | dev_err(dev, "failed to parse viommu prop 0x%x\n", type); |
| 508 | |
| 509 | cur += len; |
| 510 | if (cur >= viommu->probe_size) |
| 511 | break; |
| 512 | |
| 513 | prop = (void *)probe->properties + cur; |
| 514 | type = le16_to_cpu(prop->type) & VIRTIO_IOMMU_PROBE_T_MASK; |
| 515 | } |
| 516 | |
| 517 | out_free: |
| 518 | kfree(probe); |
| 519 | return ret; |
| 520 | } |
| 521 | |
Jean-Philippe Brucker | 169a126 | 2019-01-15 12:19:59 +0000 | [diff] [blame] | 522 | static int viommu_fault_handler(struct viommu_dev *viommu, |
| 523 | struct virtio_iommu_fault *fault) |
| 524 | { |
| 525 | char *reason_str; |
| 526 | |
| 527 | u8 reason = fault->reason; |
| 528 | u32 flags = le32_to_cpu(fault->flags); |
| 529 | u32 endpoint = le32_to_cpu(fault->endpoint); |
| 530 | u64 address = le64_to_cpu(fault->address); |
| 531 | |
| 532 | switch (reason) { |
| 533 | case VIRTIO_IOMMU_FAULT_R_DOMAIN: |
| 534 | reason_str = "domain"; |
| 535 | break; |
| 536 | case VIRTIO_IOMMU_FAULT_R_MAPPING: |
| 537 | reason_str = "page"; |
| 538 | break; |
| 539 | case VIRTIO_IOMMU_FAULT_R_UNKNOWN: |
| 540 | default: |
| 541 | reason_str = "unknown"; |
| 542 | break; |
| 543 | } |
| 544 | |
| 545 | /* TODO: find EP by ID and report_iommu_fault */ |
| 546 | if (flags & VIRTIO_IOMMU_FAULT_F_ADDRESS) |
| 547 | dev_err_ratelimited(viommu->dev, "%s fault from EP %u at %#llx [%s%s%s]\n", |
| 548 | reason_str, endpoint, address, |
| 549 | flags & VIRTIO_IOMMU_FAULT_F_READ ? "R" : "", |
| 550 | flags & VIRTIO_IOMMU_FAULT_F_WRITE ? "W" : "", |
| 551 | flags & VIRTIO_IOMMU_FAULT_F_EXEC ? "X" : ""); |
| 552 | else |
| 553 | dev_err_ratelimited(viommu->dev, "%s fault from EP %u\n", |
| 554 | reason_str, endpoint); |
| 555 | return 0; |
| 556 | } |
| 557 | |
| 558 | static void viommu_event_handler(struct virtqueue *vq) |
| 559 | { |
| 560 | int ret; |
| 561 | unsigned int len; |
| 562 | struct scatterlist sg[1]; |
| 563 | struct viommu_event *evt; |
| 564 | struct viommu_dev *viommu = vq->vdev->priv; |
| 565 | |
| 566 | while ((evt = virtqueue_get_buf(vq, &len)) != NULL) { |
| 567 | if (len > sizeof(*evt)) { |
| 568 | dev_err(viommu->dev, |
| 569 | "invalid event buffer (len %u != %zu)\n", |
| 570 | len, sizeof(*evt)); |
| 571 | } else if (!(evt->head & VIOMMU_FAULT_RESV_MASK)) { |
| 572 | viommu_fault_handler(viommu, &evt->fault); |
| 573 | } |
| 574 | |
| 575 | sg_init_one(sg, evt, sizeof(*evt)); |
| 576 | ret = virtqueue_add_inbuf(vq, sg, 1, evt, GFP_ATOMIC); |
| 577 | if (ret) |
| 578 | dev_err(viommu->dev, "could not add event buffer\n"); |
| 579 | } |
| 580 | |
| 581 | virtqueue_kick(vq); |
| 582 | } |
| 583 | |
Jean-Philippe Brucker | edcd69a | 2019-01-15 12:19:57 +0000 | [diff] [blame] | 584 | /* IOMMU API */ |
| 585 | |
| 586 | static struct iommu_domain *viommu_domain_alloc(unsigned type) |
| 587 | { |
| 588 | struct viommu_domain *vdomain; |
| 589 | |
| 590 | if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA) |
| 591 | return NULL; |
| 592 | |
| 593 | vdomain = kzalloc(sizeof(*vdomain), GFP_KERNEL); |
| 594 | if (!vdomain) |
| 595 | return NULL; |
| 596 | |
| 597 | mutex_init(&vdomain->mutex); |
| 598 | spin_lock_init(&vdomain->mappings_lock); |
| 599 | vdomain->mappings = RB_ROOT_CACHED; |
| 600 | |
| 601 | if (type == IOMMU_DOMAIN_DMA && |
| 602 | iommu_get_dma_cookie(&vdomain->domain)) { |
| 603 | kfree(vdomain); |
| 604 | return NULL; |
| 605 | } |
| 606 | |
| 607 | return &vdomain->domain; |
| 608 | } |
| 609 | |
Jean-Philippe Brucker | 39b3b3c | 2020-03-26 10:35:58 +0100 | [diff] [blame] | 610 | static int viommu_domain_finalise(struct viommu_endpoint *vdev, |
Jean-Philippe Brucker | edcd69a | 2019-01-15 12:19:57 +0000 | [diff] [blame] | 611 | struct iommu_domain *domain) |
| 612 | { |
| 613 | int ret; |
Jean-Philippe Brucker | 39b3b3c | 2020-03-26 10:35:58 +0100 | [diff] [blame] | 614 | unsigned long viommu_page_size; |
| 615 | struct viommu_dev *viommu = vdev->viommu; |
Jean-Philippe Brucker | edcd69a | 2019-01-15 12:19:57 +0000 | [diff] [blame] | 616 | struct viommu_domain *vdomain = to_viommu_domain(domain); |
Jean-Philippe Brucker | edcd69a | 2019-01-15 12:19:57 +0000 | [diff] [blame] | 617 | |
Jean-Philippe Brucker | 39b3b3c | 2020-03-26 10:35:58 +0100 | [diff] [blame] | 618 | viommu_page_size = 1UL << __ffs(viommu->pgsize_bitmap); |
| 619 | if (viommu_page_size > PAGE_SIZE) { |
| 620 | dev_err(vdev->dev, |
| 621 | "granule 0x%lx larger than system page size 0x%lx\n", |
| 622 | viommu_page_size, PAGE_SIZE); |
| 623 | return -EINVAL; |
| 624 | } |
| 625 | |
Jean-Philippe Brucker | 7062af3 | 2020-03-26 10:35:57 +0100 | [diff] [blame] | 626 | ret = ida_alloc_range(&viommu->domain_ids, viommu->first_domain, |
| 627 | viommu->last_domain, GFP_KERNEL); |
| 628 | if (ret < 0) |
| 629 | return ret; |
| 630 | |
| 631 | vdomain->id = (unsigned int)ret; |
Jean-Philippe Brucker | edcd69a | 2019-01-15 12:19:57 +0000 | [diff] [blame] | 632 | |
| 633 | domain->pgsize_bitmap = viommu->pgsize_bitmap; |
| 634 | domain->geometry = viommu->geometry; |
| 635 | |
Jean-Philippe Brucker | 7062af3 | 2020-03-26 10:35:57 +0100 | [diff] [blame] | 636 | vdomain->map_flags = viommu->map_flags; |
| 637 | vdomain->viommu = viommu; |
Jean-Philippe Brucker | edcd69a | 2019-01-15 12:19:57 +0000 | [diff] [blame] | 638 | |
Jean-Philippe Brucker | 7062af3 | 2020-03-26 10:35:57 +0100 | [diff] [blame] | 639 | return 0; |
Jean-Philippe Brucker | edcd69a | 2019-01-15 12:19:57 +0000 | [diff] [blame] | 640 | } |
| 641 | |
| 642 | static void viommu_domain_free(struct iommu_domain *domain) |
| 643 | { |
| 644 | struct viommu_domain *vdomain = to_viommu_domain(domain); |
| 645 | |
| 646 | iommu_put_dma_cookie(domain); |
| 647 | |
| 648 | /* Free all remaining mappings (size 2^64) */ |
| 649 | viommu_del_mappings(vdomain, 0, 0); |
| 650 | |
| 651 | if (vdomain->viommu) |
| 652 | ida_free(&vdomain->viommu->domain_ids, vdomain->id); |
| 653 | |
| 654 | kfree(vdomain); |
| 655 | } |
| 656 | |
| 657 | static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev) |
| 658 | { |
| 659 | int i; |
| 660 | int ret = 0; |
| 661 | struct virtio_iommu_req_attach req; |
| 662 | struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); |
Joerg Roedel | a4b6c2a | 2020-03-26 16:08:40 +0100 | [diff] [blame] | 663 | struct viommu_endpoint *vdev = dev_iommu_priv_get(dev); |
Jean-Philippe Brucker | edcd69a | 2019-01-15 12:19:57 +0000 | [diff] [blame] | 664 | struct viommu_domain *vdomain = to_viommu_domain(domain); |
| 665 | |
| 666 | mutex_lock(&vdomain->mutex); |
| 667 | if (!vdomain->viommu) { |
| 668 | /* |
| 669 | * Properly initialize the domain now that we know which viommu |
| 670 | * owns it. |
| 671 | */ |
Jean-Philippe Brucker | 39b3b3c | 2020-03-26 10:35:58 +0100 | [diff] [blame] | 672 | ret = viommu_domain_finalise(vdev, domain); |
Jean-Philippe Brucker | edcd69a | 2019-01-15 12:19:57 +0000 | [diff] [blame] | 673 | } else if (vdomain->viommu != vdev->viommu) { |
| 674 | dev_err(dev, "cannot attach to foreign vIOMMU\n"); |
| 675 | ret = -EXDEV; |
| 676 | } |
| 677 | mutex_unlock(&vdomain->mutex); |
| 678 | |
| 679 | if (ret) |
| 680 | return ret; |
| 681 | |
| 682 | /* |
| 683 | * In the virtio-iommu device, when attaching the endpoint to a new |
| 684 | * domain, it is detached from the old one and, if as as a result the |
| 685 | * old domain isn't attached to any endpoint, all mappings are removed |
| 686 | * from the old domain and it is freed. |
| 687 | * |
| 688 | * In the driver the old domain still exists, and its mappings will be |
| 689 | * recreated if it gets reattached to an endpoint. Otherwise it will be |
| 690 | * freed explicitly. |
| 691 | * |
| 692 | * vdev->vdomain is protected by group->mutex |
| 693 | */ |
| 694 | if (vdev->vdomain) |
| 695 | vdev->vdomain->nr_endpoints--; |
| 696 | |
| 697 | req = (struct virtio_iommu_req_attach) { |
| 698 | .head.type = VIRTIO_IOMMU_T_ATTACH, |
| 699 | .domain = cpu_to_le32(vdomain->id), |
| 700 | }; |
| 701 | |
| 702 | for (i = 0; i < fwspec->num_ids; i++) { |
| 703 | req.endpoint = cpu_to_le32(fwspec->ids[i]); |
| 704 | |
| 705 | ret = viommu_send_req_sync(vdomain->viommu, &req, sizeof(req)); |
| 706 | if (ret) |
| 707 | return ret; |
| 708 | } |
| 709 | |
| 710 | if (!vdomain->nr_endpoints) { |
| 711 | /* |
| 712 | * This endpoint is the first to be attached to the domain. |
| 713 | * Replay existing mappings (e.g. SW MSI). |
| 714 | */ |
| 715 | ret = viommu_replay_mappings(vdomain); |
| 716 | if (ret) |
| 717 | return ret; |
| 718 | } |
| 719 | |
| 720 | vdomain->nr_endpoints++; |
| 721 | vdev->vdomain = vdomain; |
| 722 | |
| 723 | return 0; |
| 724 | } |
| 725 | |
| 726 | static int viommu_map(struct iommu_domain *domain, unsigned long iova, |
Tom Murphy | 781ca2d | 2019-09-08 09:56:38 -0700 | [diff] [blame] | 727 | phys_addr_t paddr, size_t size, int prot, gfp_t gfp) |
Jean-Philippe Brucker | edcd69a | 2019-01-15 12:19:57 +0000 | [diff] [blame] | 728 | { |
| 729 | int ret; |
Jean-Philippe Brucker | ae24fb4 | 2019-07-22 15:40:07 +0100 | [diff] [blame] | 730 | u32 flags; |
Jean-Philippe Brucker | edcd69a | 2019-01-15 12:19:57 +0000 | [diff] [blame] | 731 | struct virtio_iommu_req_map map; |
| 732 | struct viommu_domain *vdomain = to_viommu_domain(domain); |
| 733 | |
| 734 | flags = (prot & IOMMU_READ ? VIRTIO_IOMMU_MAP_F_READ : 0) | |
| 735 | (prot & IOMMU_WRITE ? VIRTIO_IOMMU_MAP_F_WRITE : 0) | |
| 736 | (prot & IOMMU_MMIO ? VIRTIO_IOMMU_MAP_F_MMIO : 0); |
| 737 | |
Jean-Philippe Brucker | ae24fb4 | 2019-07-22 15:40:07 +0100 | [diff] [blame] | 738 | if (flags & ~vdomain->map_flags) |
| 739 | return -EINVAL; |
| 740 | |
Jean-Philippe Brucker | edcd69a | 2019-01-15 12:19:57 +0000 | [diff] [blame] | 741 | ret = viommu_add_mapping(vdomain, iova, paddr, size, flags); |
| 742 | if (ret) |
| 743 | return ret; |
| 744 | |
| 745 | map = (struct virtio_iommu_req_map) { |
| 746 | .head.type = VIRTIO_IOMMU_T_MAP, |
| 747 | .domain = cpu_to_le32(vdomain->id), |
| 748 | .virt_start = cpu_to_le64(iova), |
| 749 | .phys_start = cpu_to_le64(paddr), |
| 750 | .virt_end = cpu_to_le64(iova + size - 1), |
| 751 | .flags = cpu_to_le32(flags), |
| 752 | }; |
| 753 | |
| 754 | if (!vdomain->nr_endpoints) |
| 755 | return 0; |
| 756 | |
| 757 | ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map)); |
| 758 | if (ret) |
| 759 | viommu_del_mappings(vdomain, iova, size); |
| 760 | |
| 761 | return ret; |
| 762 | } |
| 763 | |
| 764 | static size_t viommu_unmap(struct iommu_domain *domain, unsigned long iova, |
Will Deacon | 56f8af5 | 2019-07-02 16:44:06 +0100 | [diff] [blame] | 765 | size_t size, struct iommu_iotlb_gather *gather) |
Jean-Philippe Brucker | edcd69a | 2019-01-15 12:19:57 +0000 | [diff] [blame] | 766 | { |
| 767 | int ret = 0; |
| 768 | size_t unmapped; |
| 769 | struct virtio_iommu_req_unmap unmap; |
| 770 | struct viommu_domain *vdomain = to_viommu_domain(domain); |
| 771 | |
| 772 | unmapped = viommu_del_mappings(vdomain, iova, size); |
| 773 | if (unmapped < size) |
| 774 | return 0; |
| 775 | |
| 776 | /* Device already removed all mappings after detach. */ |
| 777 | if (!vdomain->nr_endpoints) |
| 778 | return unmapped; |
| 779 | |
| 780 | unmap = (struct virtio_iommu_req_unmap) { |
| 781 | .head.type = VIRTIO_IOMMU_T_UNMAP, |
| 782 | .domain = cpu_to_le32(vdomain->id), |
| 783 | .virt_start = cpu_to_le64(iova), |
| 784 | .virt_end = cpu_to_le64(iova + unmapped - 1), |
| 785 | }; |
| 786 | |
| 787 | ret = viommu_add_req(vdomain->viommu, &unmap, sizeof(unmap)); |
| 788 | return ret ? 0 : unmapped; |
| 789 | } |
| 790 | |
| 791 | static phys_addr_t viommu_iova_to_phys(struct iommu_domain *domain, |
| 792 | dma_addr_t iova) |
| 793 | { |
| 794 | u64 paddr = 0; |
| 795 | unsigned long flags; |
| 796 | struct viommu_mapping *mapping; |
| 797 | struct interval_tree_node *node; |
| 798 | struct viommu_domain *vdomain = to_viommu_domain(domain); |
| 799 | |
| 800 | spin_lock_irqsave(&vdomain->mappings_lock, flags); |
| 801 | node = interval_tree_iter_first(&vdomain->mappings, iova, iova); |
| 802 | if (node) { |
| 803 | mapping = container_of(node, struct viommu_mapping, iova); |
| 804 | paddr = mapping->paddr + (iova - mapping->iova.start); |
| 805 | } |
| 806 | spin_unlock_irqrestore(&vdomain->mappings_lock, flags); |
| 807 | |
| 808 | return paddr; |
| 809 | } |
| 810 | |
Will Deacon | 56f8af5 | 2019-07-02 16:44:06 +0100 | [diff] [blame] | 811 | static void viommu_iotlb_sync(struct iommu_domain *domain, |
| 812 | struct iommu_iotlb_gather *gather) |
Jean-Philippe Brucker | edcd69a | 2019-01-15 12:19:57 +0000 | [diff] [blame] | 813 | { |
| 814 | struct viommu_domain *vdomain = to_viommu_domain(domain); |
| 815 | |
| 816 | viommu_sync_req(vdomain->viommu); |
| 817 | } |
| 818 | |
| 819 | static void viommu_get_resv_regions(struct device *dev, struct list_head *head) |
| 820 | { |
Jean-Philippe Brucker | 2a5a314 | 2019-01-15 12:19:58 +0000 | [diff] [blame] | 821 | struct iommu_resv_region *entry, *new_entry, *msi = NULL; |
Joerg Roedel | a4b6c2a | 2020-03-26 16:08:40 +0100 | [diff] [blame] | 822 | struct viommu_endpoint *vdev = dev_iommu_priv_get(dev); |
Jean-Philippe Brucker | edcd69a | 2019-01-15 12:19:57 +0000 | [diff] [blame] | 823 | int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; |
| 824 | |
Jean-Philippe Brucker | 2a5a314 | 2019-01-15 12:19:58 +0000 | [diff] [blame] | 825 | list_for_each_entry(entry, &vdev->resv_regions, list) { |
| 826 | if (entry->type == IOMMU_RESV_MSI) |
| 827 | msi = entry; |
Jean-Philippe Brucker | edcd69a | 2019-01-15 12:19:57 +0000 | [diff] [blame] | 828 | |
Jean-Philippe Brucker | 2a5a314 | 2019-01-15 12:19:58 +0000 | [diff] [blame] | 829 | new_entry = kmemdup(entry, sizeof(*entry), GFP_KERNEL); |
| 830 | if (!new_entry) |
| 831 | return; |
| 832 | list_add_tail(&new_entry->list, head); |
| 833 | } |
| 834 | |
| 835 | /* |
| 836 | * If the device didn't register any bypass MSI window, add a |
| 837 | * software-mapped region. |
| 838 | */ |
| 839 | if (!msi) { |
| 840 | msi = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH, |
| 841 | prot, IOMMU_RESV_SW_MSI); |
| 842 | if (!msi) |
| 843 | return; |
| 844 | |
| 845 | list_add_tail(&msi->list, head); |
| 846 | } |
| 847 | |
Jean-Philippe Brucker | edcd69a | 2019-01-15 12:19:57 +0000 | [diff] [blame] | 848 | iommu_dma_get_resv_regions(dev, head); |
| 849 | } |
| 850 | |
Jean-Philippe Brucker | edcd69a | 2019-01-15 12:19:57 +0000 | [diff] [blame] | 851 | static struct iommu_ops viommu_ops; |
| 852 | static struct virtio_driver virtio_iommu_drv; |
| 853 | |
Linus Torvalds | 3a1d538 | 2019-07-17 11:26:09 -0700 | [diff] [blame] | 854 | static int viommu_match_node(struct device *dev, const void *data) |
Jean-Philippe Brucker | edcd69a | 2019-01-15 12:19:57 +0000 | [diff] [blame] | 855 | { |
| 856 | return dev->parent->fwnode == data; |
| 857 | } |
| 858 | |
| 859 | static struct viommu_dev *viommu_get_by_fwnode(struct fwnode_handle *fwnode) |
| 860 | { |
| 861 | struct device *dev = driver_find_device(&virtio_iommu_drv.driver, NULL, |
| 862 | fwnode, viommu_match_node); |
| 863 | put_device(dev); |
| 864 | |
| 865 | return dev ? dev_to_virtio(dev)->priv : NULL; |
| 866 | } |
| 867 | |
Joerg Roedel | 21acf65 | 2020-04-29 15:36:58 +0200 | [diff] [blame] | 868 | static struct iommu_device *viommu_probe_device(struct device *dev) |
Jean-Philippe Brucker | edcd69a | 2019-01-15 12:19:57 +0000 | [diff] [blame] | 869 | { |
| 870 | int ret; |
Jean-Philippe Brucker | edcd69a | 2019-01-15 12:19:57 +0000 | [diff] [blame] | 871 | struct viommu_endpoint *vdev; |
| 872 | struct viommu_dev *viommu = NULL; |
| 873 | struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); |
| 874 | |
| 875 | if (!fwspec || fwspec->ops != &viommu_ops) |
Joerg Roedel | 21acf65 | 2020-04-29 15:36:58 +0200 | [diff] [blame] | 876 | return ERR_PTR(-ENODEV); |
Jean-Philippe Brucker | edcd69a | 2019-01-15 12:19:57 +0000 | [diff] [blame] | 877 | |
| 878 | viommu = viommu_get_by_fwnode(fwspec->iommu_fwnode); |
| 879 | if (!viommu) |
Joerg Roedel | 21acf65 | 2020-04-29 15:36:58 +0200 | [diff] [blame] | 880 | return ERR_PTR(-ENODEV); |
Jean-Philippe Brucker | edcd69a | 2019-01-15 12:19:57 +0000 | [diff] [blame] | 881 | |
| 882 | vdev = kzalloc(sizeof(*vdev), GFP_KERNEL); |
| 883 | if (!vdev) |
Joerg Roedel | 21acf65 | 2020-04-29 15:36:58 +0200 | [diff] [blame] | 884 | return ERR_PTR(-ENOMEM); |
Jean-Philippe Brucker | edcd69a | 2019-01-15 12:19:57 +0000 | [diff] [blame] | 885 | |
Jean-Philippe Brucker | 2a5a314 | 2019-01-15 12:19:58 +0000 | [diff] [blame] | 886 | vdev->dev = dev; |
Jean-Philippe Brucker | edcd69a | 2019-01-15 12:19:57 +0000 | [diff] [blame] | 887 | vdev->viommu = viommu; |
Jean-Philippe Brucker | 2a5a314 | 2019-01-15 12:19:58 +0000 | [diff] [blame] | 888 | INIT_LIST_HEAD(&vdev->resv_regions); |
Joerg Roedel | a4b6c2a | 2020-03-26 16:08:40 +0100 | [diff] [blame] | 889 | dev_iommu_priv_set(dev, vdev); |
Jean-Philippe Brucker | edcd69a | 2019-01-15 12:19:57 +0000 | [diff] [blame] | 890 | |
Jean-Philippe Brucker | 2a5a314 | 2019-01-15 12:19:58 +0000 | [diff] [blame] | 891 | if (viommu->probe_size) { |
| 892 | /* Get additional information for this endpoint */ |
| 893 | ret = viommu_probe_endpoint(viommu, dev); |
| 894 | if (ret) |
| 895 | goto err_free_dev; |
| 896 | } |
| 897 | |
Joerg Roedel | 21acf65 | 2020-04-29 15:36:58 +0200 | [diff] [blame] | 898 | return &viommu->iommu; |
Jean-Philippe Brucker | edcd69a | 2019-01-15 12:19:57 +0000 | [diff] [blame] | 899 | |
Jean-Philippe Brucker | edcd69a | 2019-01-15 12:19:57 +0000 | [diff] [blame] | 900 | err_free_dev: |
Thierry Reding | c11738c | 2019-12-18 14:42:05 +0100 | [diff] [blame] | 901 | generic_iommu_put_resv_regions(dev, &vdev->resv_regions); |
Jean-Philippe Brucker | edcd69a | 2019-01-15 12:19:57 +0000 | [diff] [blame] | 902 | kfree(vdev); |
| 903 | |
Joerg Roedel | 21acf65 | 2020-04-29 15:36:58 +0200 | [diff] [blame] | 904 | return ERR_PTR(ret); |
Jean-Philippe Brucker | edcd69a | 2019-01-15 12:19:57 +0000 | [diff] [blame] | 905 | } |
| 906 | |
Joerg Roedel | 21acf65 | 2020-04-29 15:36:58 +0200 | [diff] [blame] | 907 | static void viommu_release_device(struct device *dev) |
Jean-Philippe Brucker | edcd69a | 2019-01-15 12:19:57 +0000 | [diff] [blame] | 908 | { |
Jean-Philippe Brucker | edcd69a | 2019-01-15 12:19:57 +0000 | [diff] [blame] | 909 | struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); |
Joerg Roedel | 21acf65 | 2020-04-29 15:36:58 +0200 | [diff] [blame] | 910 | struct viommu_endpoint *vdev; |
Jean-Philippe Brucker | edcd69a | 2019-01-15 12:19:57 +0000 | [diff] [blame] | 911 | |
| 912 | if (!fwspec || fwspec->ops != &viommu_ops) |
| 913 | return; |
| 914 | |
Joerg Roedel | a4b6c2a | 2020-03-26 16:08:40 +0100 | [diff] [blame] | 915 | vdev = dev_iommu_priv_get(dev); |
Jean-Philippe Brucker | edcd69a | 2019-01-15 12:19:57 +0000 | [diff] [blame] | 916 | |
Thierry Reding | c11738c | 2019-12-18 14:42:05 +0100 | [diff] [blame] | 917 | generic_iommu_put_resv_regions(dev, &vdev->resv_regions); |
Jean-Philippe Brucker | edcd69a | 2019-01-15 12:19:57 +0000 | [diff] [blame] | 918 | kfree(vdev); |
| 919 | } |
| 920 | |
| 921 | static struct iommu_group *viommu_device_group(struct device *dev) |
| 922 | { |
| 923 | if (dev_is_pci(dev)) |
| 924 | return pci_device_group(dev); |
| 925 | else |
| 926 | return generic_device_group(dev); |
| 927 | } |
| 928 | |
| 929 | static int viommu_of_xlate(struct device *dev, struct of_phandle_args *args) |
| 930 | { |
| 931 | return iommu_fwspec_add_ids(dev, args->args, 1); |
| 932 | } |
| 933 | |
| 934 | static struct iommu_ops viommu_ops = { |
| 935 | .domain_alloc = viommu_domain_alloc, |
| 936 | .domain_free = viommu_domain_free, |
| 937 | .attach_dev = viommu_attach_dev, |
| 938 | .map = viommu_map, |
| 939 | .unmap = viommu_unmap, |
| 940 | .iova_to_phys = viommu_iova_to_phys, |
| 941 | .iotlb_sync = viommu_iotlb_sync, |
Joerg Roedel | 21acf65 | 2020-04-29 15:36:58 +0200 | [diff] [blame] | 942 | .probe_device = viommu_probe_device, |
| 943 | .release_device = viommu_release_device, |
Jean-Philippe Brucker | edcd69a | 2019-01-15 12:19:57 +0000 | [diff] [blame] | 944 | .device_group = viommu_device_group, |
| 945 | .get_resv_regions = viommu_get_resv_regions, |
Thierry Reding | c11738c | 2019-12-18 14:42:05 +0100 | [diff] [blame] | 946 | .put_resv_regions = generic_iommu_put_resv_regions, |
Jean-Philippe Brucker | edcd69a | 2019-01-15 12:19:57 +0000 | [diff] [blame] | 947 | .of_xlate = viommu_of_xlate, |
| 948 | }; |
| 949 | |
| 950 | static int viommu_init_vqs(struct viommu_dev *viommu) |
| 951 | { |
| 952 | struct virtio_device *vdev = dev_to_virtio(viommu->dev); |
Jean-Philippe Brucker | 169a126 | 2019-01-15 12:19:59 +0000 | [diff] [blame] | 953 | const char *names[] = { "request", "event" }; |
| 954 | vq_callback_t *callbacks[] = { |
| 955 | NULL, /* No async requests */ |
| 956 | viommu_event_handler, |
| 957 | }; |
Jean-Philippe Brucker | edcd69a | 2019-01-15 12:19:57 +0000 | [diff] [blame] | 958 | |
Jean-Philippe Brucker | 169a126 | 2019-01-15 12:19:59 +0000 | [diff] [blame] | 959 | return virtio_find_vqs(vdev, VIOMMU_NR_VQS, viommu->vqs, callbacks, |
| 960 | names, NULL); |
| 961 | } |
| 962 | |
| 963 | static int viommu_fill_evtq(struct viommu_dev *viommu) |
| 964 | { |
| 965 | int i, ret; |
| 966 | struct scatterlist sg[1]; |
| 967 | struct viommu_event *evts; |
| 968 | struct virtqueue *vq = viommu->vqs[VIOMMU_EVENT_VQ]; |
| 969 | size_t nr_evts = vq->num_free; |
| 970 | |
| 971 | viommu->evts = evts = devm_kmalloc_array(viommu->dev, nr_evts, |
| 972 | sizeof(*evts), GFP_KERNEL); |
| 973 | if (!evts) |
| 974 | return -ENOMEM; |
| 975 | |
| 976 | for (i = 0; i < nr_evts; i++) { |
| 977 | sg_init_one(sg, &evts[i], sizeof(*evts)); |
| 978 | ret = virtqueue_add_inbuf(vq, sg, 1, &evts[i], GFP_KERNEL); |
| 979 | if (ret) |
| 980 | return ret; |
Jean-Philippe Brucker | edcd69a | 2019-01-15 12:19:57 +0000 | [diff] [blame] | 981 | } |
| 982 | |
Jean-Philippe Brucker | edcd69a | 2019-01-15 12:19:57 +0000 | [diff] [blame] | 983 | return 0; |
| 984 | } |
| 985 | |
| 986 | static int viommu_probe(struct virtio_device *vdev) |
| 987 | { |
| 988 | struct device *parent_dev = vdev->dev.parent; |
| 989 | struct viommu_dev *viommu = NULL; |
| 990 | struct device *dev = &vdev->dev; |
| 991 | u64 input_start = 0; |
| 992 | u64 input_end = -1UL; |
| 993 | int ret; |
| 994 | |
| 995 | if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1) || |
| 996 | !virtio_has_feature(vdev, VIRTIO_IOMMU_F_MAP_UNMAP)) |
| 997 | return -ENODEV; |
| 998 | |
| 999 | viommu = devm_kzalloc(dev, sizeof(*viommu), GFP_KERNEL); |
| 1000 | if (!viommu) |
| 1001 | return -ENOMEM; |
| 1002 | |
| 1003 | spin_lock_init(&viommu->request_lock); |
| 1004 | ida_init(&viommu->domain_ids); |
| 1005 | viommu->dev = dev; |
| 1006 | viommu->vdev = vdev; |
| 1007 | INIT_LIST_HEAD(&viommu->requests); |
| 1008 | |
| 1009 | ret = viommu_init_vqs(viommu); |
| 1010 | if (ret) |
| 1011 | return ret; |
| 1012 | |
Michael S. Tsirkin | d83c67c | 2020-08-05 05:39:36 -0400 | [diff] [blame] | 1013 | virtio_cread_le(vdev, struct virtio_iommu_config, page_size_mask, |
| 1014 | &viommu->pgsize_bitmap); |
Jean-Philippe Brucker | edcd69a | 2019-01-15 12:19:57 +0000 | [diff] [blame] | 1015 | |
| 1016 | if (!viommu->pgsize_bitmap) { |
| 1017 | ret = -EINVAL; |
| 1018 | goto err_free_vqs; |
| 1019 | } |
| 1020 | |
Jean-Philippe Brucker | ae24fb4 | 2019-07-22 15:40:07 +0100 | [diff] [blame] | 1021 | viommu->map_flags = VIRTIO_IOMMU_MAP_F_READ | VIRTIO_IOMMU_MAP_F_WRITE; |
| 1022 | viommu->last_domain = ~0U; |
Jean-Philippe Brucker | edcd69a | 2019-01-15 12:19:57 +0000 | [diff] [blame] | 1023 | |
| 1024 | /* Optional features */ |
Michael S. Tsirkin | d83c67c | 2020-08-05 05:39:36 -0400 | [diff] [blame] | 1025 | virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE, |
| 1026 | struct virtio_iommu_config, input_range.start, |
| 1027 | &input_start); |
Jean-Philippe Brucker | edcd69a | 2019-01-15 12:19:57 +0000 | [diff] [blame] | 1028 | |
Michael S. Tsirkin | d83c67c | 2020-08-05 05:39:36 -0400 | [diff] [blame] | 1029 | virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE, |
| 1030 | struct virtio_iommu_config, input_range.end, |
| 1031 | &input_end); |
Jean-Philippe Brucker | edcd69a | 2019-01-15 12:19:57 +0000 | [diff] [blame] | 1032 | |
Michael S. Tsirkin | d83c67c | 2020-08-05 05:39:36 -0400 | [diff] [blame] | 1033 | virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_RANGE, |
| 1034 | struct virtio_iommu_config, domain_range.start, |
| 1035 | &viommu->first_domain); |
Jean-Philippe Brucker | ae24fb4 | 2019-07-22 15:40:07 +0100 | [diff] [blame] | 1036 | |
Michael S. Tsirkin | d83c67c | 2020-08-05 05:39:36 -0400 | [diff] [blame] | 1037 | virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_RANGE, |
| 1038 | struct virtio_iommu_config, domain_range.end, |
| 1039 | &viommu->last_domain); |
Jean-Philippe Brucker | edcd69a | 2019-01-15 12:19:57 +0000 | [diff] [blame] | 1040 | |
Michael S. Tsirkin | d83c67c | 2020-08-05 05:39:36 -0400 | [diff] [blame] | 1041 | virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_PROBE, |
| 1042 | struct virtio_iommu_config, probe_size, |
| 1043 | &viommu->probe_size); |
Jean-Philippe Brucker | 2a5a314 | 2019-01-15 12:19:58 +0000 | [diff] [blame] | 1044 | |
Jean-Philippe Brucker | edcd69a | 2019-01-15 12:19:57 +0000 | [diff] [blame] | 1045 | viommu->geometry = (struct iommu_domain_geometry) { |
| 1046 | .aperture_start = input_start, |
| 1047 | .aperture_end = input_end, |
| 1048 | .force_aperture = true, |
| 1049 | }; |
| 1050 | |
Jean-Philippe Brucker | ae24fb4 | 2019-07-22 15:40:07 +0100 | [diff] [blame] | 1051 | if (virtio_has_feature(vdev, VIRTIO_IOMMU_F_MMIO)) |
| 1052 | viommu->map_flags |= VIRTIO_IOMMU_MAP_F_MMIO; |
| 1053 | |
Jean-Philippe Brucker | edcd69a | 2019-01-15 12:19:57 +0000 | [diff] [blame] | 1054 | viommu_ops.pgsize_bitmap = viommu->pgsize_bitmap; |
| 1055 | |
| 1056 | virtio_device_ready(vdev); |
| 1057 | |
Jean-Philippe Brucker | 169a126 | 2019-01-15 12:19:59 +0000 | [diff] [blame] | 1058 | /* Populate the event queue with buffers */ |
| 1059 | ret = viommu_fill_evtq(viommu); |
| 1060 | if (ret) |
| 1061 | goto err_free_vqs; |
| 1062 | |
Jean-Philippe Brucker | edcd69a | 2019-01-15 12:19:57 +0000 | [diff] [blame] | 1063 | ret = iommu_device_sysfs_add(&viommu->iommu, dev, NULL, "%s", |
| 1064 | virtio_bus_name(vdev)); |
| 1065 | if (ret) |
| 1066 | goto err_free_vqs; |
| 1067 | |
| 1068 | iommu_device_set_ops(&viommu->iommu, &viommu_ops); |
| 1069 | iommu_device_set_fwnode(&viommu->iommu, parent_dev->fwnode); |
| 1070 | |
| 1071 | iommu_device_register(&viommu->iommu); |
| 1072 | |
| 1073 | #ifdef CONFIG_PCI |
| 1074 | if (pci_bus_type.iommu_ops != &viommu_ops) { |
Jean-Philippe Brucker | edcd69a | 2019-01-15 12:19:57 +0000 | [diff] [blame] | 1075 | ret = bus_set_iommu(&pci_bus_type, &viommu_ops); |
| 1076 | if (ret) |
| 1077 | goto err_unregister; |
| 1078 | } |
| 1079 | #endif |
| 1080 | #ifdef CONFIG_ARM_AMBA |
| 1081 | if (amba_bustype.iommu_ops != &viommu_ops) { |
| 1082 | ret = bus_set_iommu(&amba_bustype, &viommu_ops); |
| 1083 | if (ret) |
| 1084 | goto err_unregister; |
| 1085 | } |
| 1086 | #endif |
| 1087 | if (platform_bus_type.iommu_ops != &viommu_ops) { |
| 1088 | ret = bus_set_iommu(&platform_bus_type, &viommu_ops); |
| 1089 | if (ret) |
| 1090 | goto err_unregister; |
| 1091 | } |
| 1092 | |
| 1093 | vdev->priv = viommu; |
| 1094 | |
| 1095 | dev_info(dev, "input address: %u bits\n", |
| 1096 | order_base_2(viommu->geometry.aperture_end)); |
| 1097 | dev_info(dev, "page mask: %#llx\n", viommu->pgsize_bitmap); |
| 1098 | |
| 1099 | return 0; |
| 1100 | |
| 1101 | err_unregister: |
| 1102 | iommu_device_sysfs_remove(&viommu->iommu); |
| 1103 | iommu_device_unregister(&viommu->iommu); |
| 1104 | err_free_vqs: |
| 1105 | vdev->config->del_vqs(vdev); |
| 1106 | |
| 1107 | return ret; |
| 1108 | } |
| 1109 | |
| 1110 | static void viommu_remove(struct virtio_device *vdev) |
| 1111 | { |
| 1112 | struct viommu_dev *viommu = vdev->priv; |
| 1113 | |
| 1114 | iommu_device_sysfs_remove(&viommu->iommu); |
| 1115 | iommu_device_unregister(&viommu->iommu); |
| 1116 | |
| 1117 | /* Stop all virtqueues */ |
| 1118 | vdev->config->reset(vdev); |
| 1119 | vdev->config->del_vqs(vdev); |
| 1120 | |
| 1121 | dev_info(&vdev->dev, "device removed\n"); |
| 1122 | } |
| 1123 | |
| 1124 | static void viommu_config_changed(struct virtio_device *vdev) |
| 1125 | { |
| 1126 | dev_warn(&vdev->dev, "config changed\n"); |
| 1127 | } |
| 1128 | |
| 1129 | static unsigned int features[] = { |
| 1130 | VIRTIO_IOMMU_F_MAP_UNMAP, |
Jean-Philippe Brucker | edcd69a | 2019-01-15 12:19:57 +0000 | [diff] [blame] | 1131 | VIRTIO_IOMMU_F_INPUT_RANGE, |
Jean-Philippe Brucker | ae24fb4 | 2019-07-22 15:40:07 +0100 | [diff] [blame] | 1132 | VIRTIO_IOMMU_F_DOMAIN_RANGE, |
Jean-Philippe Brucker | 2a5a314 | 2019-01-15 12:19:58 +0000 | [diff] [blame] | 1133 | VIRTIO_IOMMU_F_PROBE, |
Jean-Philippe Brucker | ae24fb4 | 2019-07-22 15:40:07 +0100 | [diff] [blame] | 1134 | VIRTIO_IOMMU_F_MMIO, |
Jean-Philippe Brucker | edcd69a | 2019-01-15 12:19:57 +0000 | [diff] [blame] | 1135 | }; |
| 1136 | |
| 1137 | static struct virtio_device_id id_table[] = { |
| 1138 | { VIRTIO_ID_IOMMU, VIRTIO_DEV_ANY_ID }, |
| 1139 | { 0 }, |
| 1140 | }; |
| 1141 | |
| 1142 | static struct virtio_driver virtio_iommu_drv = { |
| 1143 | .driver.name = KBUILD_MODNAME, |
| 1144 | .driver.owner = THIS_MODULE, |
| 1145 | .id_table = id_table, |
| 1146 | .feature_table = features, |
| 1147 | .feature_table_size = ARRAY_SIZE(features), |
| 1148 | .probe = viommu_probe, |
| 1149 | .remove = viommu_remove, |
| 1150 | .config_changed = viommu_config_changed, |
| 1151 | }; |
| 1152 | |
| 1153 | module_virtio_driver(virtio_iommu_drv); |
| 1154 | |
| 1155 | MODULE_DESCRIPTION("Virtio IOMMU driver"); |
| 1156 | MODULE_AUTHOR("Jean-Philippe Brucker <jean-philippe.brucker@arm.com>"); |
| 1157 | MODULE_LICENSE("GPL v2"); |