blob: 6fa012cd727e96173b23fa8e74786dadee74f1e7 [file] [log] [blame]
Jean-Philippe Bruckeredcd69a2019-01-15 12:19:57 +00001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Virtio driver for the paravirtualized IOMMU
4 *
5 * Copyright (C) 2018 Arm Limited
6 */
7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10#include <linux/amba/bus.h>
11#include <linux/delay.h>
12#include <linux/dma-iommu.h>
13#include <linux/freezer.h>
14#include <linux/interval_tree.h>
15#include <linux/iommu.h>
16#include <linux/module.h>
17#include <linux/of_iommu.h>
18#include <linux/of_platform.h>
19#include <linux/pci.h>
20#include <linux/platform_device.h>
21#include <linux/virtio.h>
22#include <linux/virtio_config.h>
23#include <linux/virtio_ids.h>
24#include <linux/wait.h>
25
26#include <uapi/linux/virtio_iommu.h>
27
28#define MSI_IOVA_BASE 0x8000000
29#define MSI_IOVA_LENGTH 0x100000
30
31#define VIOMMU_REQUEST_VQ 0
32#define VIOMMU_NR_VQS 1
33
34struct viommu_dev {
35 struct iommu_device iommu;
36 struct device *dev;
37 struct virtio_device *vdev;
38
39 struct ida domain_ids;
40
41 struct virtqueue *vqs[VIOMMU_NR_VQS];
42 spinlock_t request_lock;
43 struct list_head requests;
44
45 /* Device configuration */
46 struct iommu_domain_geometry geometry;
47 u64 pgsize_bitmap;
48 u8 domain_bits;
49};
50
51struct viommu_mapping {
52 phys_addr_t paddr;
53 struct interval_tree_node iova;
54 u32 flags;
55};
56
57struct viommu_domain {
58 struct iommu_domain domain;
59 struct viommu_dev *viommu;
60 struct mutex mutex; /* protects viommu pointer */
61 unsigned int id;
62
63 spinlock_t mappings_lock;
64 struct rb_root_cached mappings;
65
66 unsigned long nr_endpoints;
67};
68
69struct viommu_endpoint {
70 struct viommu_dev *viommu;
71 struct viommu_domain *vdomain;
72};
73
74struct viommu_request {
75 struct list_head list;
76 void *writeback;
77 unsigned int write_offset;
78 unsigned int len;
79 char buf[];
80};
81
82#define to_viommu_domain(domain) \
83 container_of(domain, struct viommu_domain, domain)
84
85static int viommu_get_req_errno(void *buf, size_t len)
86{
87 struct virtio_iommu_req_tail *tail = buf + len - sizeof(*tail);
88
89 switch (tail->status) {
90 case VIRTIO_IOMMU_S_OK:
91 return 0;
92 case VIRTIO_IOMMU_S_UNSUPP:
93 return -ENOSYS;
94 case VIRTIO_IOMMU_S_INVAL:
95 return -EINVAL;
96 case VIRTIO_IOMMU_S_RANGE:
97 return -ERANGE;
98 case VIRTIO_IOMMU_S_NOENT:
99 return -ENOENT;
100 case VIRTIO_IOMMU_S_FAULT:
101 return -EFAULT;
102 case VIRTIO_IOMMU_S_IOERR:
103 case VIRTIO_IOMMU_S_DEVERR:
104 default:
105 return -EIO;
106 }
107}
108
109static void viommu_set_req_status(void *buf, size_t len, int status)
110{
111 struct virtio_iommu_req_tail *tail = buf + len - sizeof(*tail);
112
113 tail->status = status;
114}
115
116static off_t viommu_get_write_desc_offset(struct viommu_dev *viommu,
117 struct virtio_iommu_req_head *req,
118 size_t len)
119{
120 size_t tail_size = sizeof(struct virtio_iommu_req_tail);
121
122 return len - tail_size;
123}
124
125/*
126 * __viommu_sync_req - Complete all in-flight requests
127 *
128 * Wait for all added requests to complete. When this function returns, all
129 * requests that were in-flight at the time of the call have completed.
130 */
131static int __viommu_sync_req(struct viommu_dev *viommu)
132{
133 int ret = 0;
134 unsigned int len;
135 size_t write_len;
136 struct viommu_request *req;
137 struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ];
138
139 assert_spin_locked(&viommu->request_lock);
140
141 virtqueue_kick(vq);
142
143 while (!list_empty(&viommu->requests)) {
144 len = 0;
145 req = virtqueue_get_buf(vq, &len);
146 if (!req)
147 continue;
148
149 if (!len)
150 viommu_set_req_status(req->buf, req->len,
151 VIRTIO_IOMMU_S_IOERR);
152
153 write_len = req->len - req->write_offset;
154 if (req->writeback && len == write_len)
155 memcpy(req->writeback, req->buf + req->write_offset,
156 write_len);
157
158 list_del(&req->list);
159 kfree(req);
160 }
161
162 return ret;
163}
164
165static int viommu_sync_req(struct viommu_dev *viommu)
166{
167 int ret;
168 unsigned long flags;
169
170 spin_lock_irqsave(&viommu->request_lock, flags);
171 ret = __viommu_sync_req(viommu);
172 if (ret)
173 dev_dbg(viommu->dev, "could not sync requests (%d)\n", ret);
174 spin_unlock_irqrestore(&viommu->request_lock, flags);
175
176 return ret;
177}
178
179/*
180 * __viommu_add_request - Add one request to the queue
181 * @buf: pointer to the request buffer
182 * @len: length of the request buffer
183 * @writeback: copy data back to the buffer when the request completes.
184 *
185 * Add a request to the queue. Only synchronize the queue if it's already full.
186 * Otherwise don't kick the queue nor wait for requests to complete.
187 *
188 * When @writeback is true, data written by the device, including the request
189 * status, is copied into @buf after the request completes. This is unsafe if
190 * the caller allocates @buf on stack and drops the lock between add_req() and
191 * sync_req().
192 *
193 * Return 0 if the request was successfully added to the queue.
194 */
195static int __viommu_add_req(struct viommu_dev *viommu, void *buf, size_t len,
196 bool writeback)
197{
198 int ret;
199 off_t write_offset;
200 struct viommu_request *req;
201 struct scatterlist top_sg, bottom_sg;
202 struct scatterlist *sg[2] = { &top_sg, &bottom_sg };
203 struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ];
204
205 assert_spin_locked(&viommu->request_lock);
206
207 write_offset = viommu_get_write_desc_offset(viommu, buf, len);
208 if (write_offset <= 0)
209 return -EINVAL;
210
211 req = kzalloc(sizeof(*req) + len, GFP_ATOMIC);
212 if (!req)
213 return -ENOMEM;
214
215 req->len = len;
216 if (writeback) {
217 req->writeback = buf + write_offset;
218 req->write_offset = write_offset;
219 }
220 memcpy(&req->buf, buf, write_offset);
221
222 sg_init_one(&top_sg, req->buf, write_offset);
223 sg_init_one(&bottom_sg, req->buf + write_offset, len - write_offset);
224
225 ret = virtqueue_add_sgs(vq, sg, 1, 1, req, GFP_ATOMIC);
226 if (ret == -ENOSPC) {
227 /* If the queue is full, sync and retry */
228 if (!__viommu_sync_req(viommu))
229 ret = virtqueue_add_sgs(vq, sg, 1, 1, req, GFP_ATOMIC);
230 }
231 if (ret)
232 goto err_free;
233
234 list_add_tail(&req->list, &viommu->requests);
235 return 0;
236
237err_free:
238 kfree(req);
239 return ret;
240}
241
242static int viommu_add_req(struct viommu_dev *viommu, void *buf, size_t len)
243{
244 int ret;
245 unsigned long flags;
246
247 spin_lock_irqsave(&viommu->request_lock, flags);
248 ret = __viommu_add_req(viommu, buf, len, false);
249 if (ret)
250 dev_dbg(viommu->dev, "could not add request: %d\n", ret);
251 spin_unlock_irqrestore(&viommu->request_lock, flags);
252
253 return ret;
254}
255
256/*
257 * Send a request and wait for it to complete. Return the request status (as an
258 * errno)
259 */
260static int viommu_send_req_sync(struct viommu_dev *viommu, void *buf,
261 size_t len)
262{
263 int ret;
264 unsigned long flags;
265
266 spin_lock_irqsave(&viommu->request_lock, flags);
267
268 ret = __viommu_add_req(viommu, buf, len, true);
269 if (ret) {
270 dev_dbg(viommu->dev, "could not add request (%d)\n", ret);
271 goto out_unlock;
272 }
273
274 ret = __viommu_sync_req(viommu);
275 if (ret) {
276 dev_dbg(viommu->dev, "could not sync requests (%d)\n", ret);
277 /* Fall-through (get the actual request status) */
278 }
279
280 ret = viommu_get_req_errno(buf, len);
281out_unlock:
282 spin_unlock_irqrestore(&viommu->request_lock, flags);
283 return ret;
284}
285
286/*
287 * viommu_add_mapping - add a mapping to the internal tree
288 *
289 * On success, return the new mapping. Otherwise return NULL.
290 */
291static int viommu_add_mapping(struct viommu_domain *vdomain, unsigned long iova,
292 phys_addr_t paddr, size_t size, u32 flags)
293{
294 unsigned long irqflags;
295 struct viommu_mapping *mapping;
296
297 mapping = kzalloc(sizeof(*mapping), GFP_ATOMIC);
298 if (!mapping)
299 return -ENOMEM;
300
301 mapping->paddr = paddr;
302 mapping->iova.start = iova;
303 mapping->iova.last = iova + size - 1;
304 mapping->flags = flags;
305
306 spin_lock_irqsave(&vdomain->mappings_lock, irqflags);
307 interval_tree_insert(&mapping->iova, &vdomain->mappings);
308 spin_unlock_irqrestore(&vdomain->mappings_lock, irqflags);
309
310 return 0;
311}
312
313/*
314 * viommu_del_mappings - remove mappings from the internal tree
315 *
316 * @vdomain: the domain
317 * @iova: start of the range
318 * @size: size of the range. A size of 0 corresponds to the entire address
319 * space.
320 *
321 * On success, returns the number of unmapped bytes (>= size)
322 */
323static size_t viommu_del_mappings(struct viommu_domain *vdomain,
324 unsigned long iova, size_t size)
325{
326 size_t unmapped = 0;
327 unsigned long flags;
328 unsigned long last = iova + size - 1;
329 struct viommu_mapping *mapping = NULL;
330 struct interval_tree_node *node, *next;
331
332 spin_lock_irqsave(&vdomain->mappings_lock, flags);
333 next = interval_tree_iter_first(&vdomain->mappings, iova, last);
334 while (next) {
335 node = next;
336 mapping = container_of(node, struct viommu_mapping, iova);
337 next = interval_tree_iter_next(node, iova, last);
338
339 /* Trying to split a mapping? */
340 if (mapping->iova.start < iova)
341 break;
342
343 /*
344 * Virtio-iommu doesn't allow UNMAP to split a mapping created
345 * with a single MAP request, so remove the full mapping.
346 */
347 unmapped += mapping->iova.last - mapping->iova.start + 1;
348
349 interval_tree_remove(node, &vdomain->mappings);
350 kfree(mapping);
351 }
352 spin_unlock_irqrestore(&vdomain->mappings_lock, flags);
353
354 return unmapped;
355}
356
357/*
358 * viommu_replay_mappings - re-send MAP requests
359 *
360 * When reattaching a domain that was previously detached from all endpoints,
361 * mappings were deleted from the device. Re-create the mappings available in
362 * the internal tree.
363 */
364static int viommu_replay_mappings(struct viommu_domain *vdomain)
365{
366 int ret = 0;
367 unsigned long flags;
368 struct viommu_mapping *mapping;
369 struct interval_tree_node *node;
370 struct virtio_iommu_req_map map;
371
372 spin_lock_irqsave(&vdomain->mappings_lock, flags);
373 node = interval_tree_iter_first(&vdomain->mappings, 0, -1UL);
374 while (node) {
375 mapping = container_of(node, struct viommu_mapping, iova);
376 map = (struct virtio_iommu_req_map) {
377 .head.type = VIRTIO_IOMMU_T_MAP,
378 .domain = cpu_to_le32(vdomain->id),
379 .virt_start = cpu_to_le64(mapping->iova.start),
380 .virt_end = cpu_to_le64(mapping->iova.last),
381 .phys_start = cpu_to_le64(mapping->paddr),
382 .flags = cpu_to_le32(mapping->flags),
383 };
384
385 ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map));
386 if (ret)
387 break;
388
389 node = interval_tree_iter_next(node, 0, -1UL);
390 }
391 spin_unlock_irqrestore(&vdomain->mappings_lock, flags);
392
393 return ret;
394}
395
396/* IOMMU API */
397
398static struct iommu_domain *viommu_domain_alloc(unsigned type)
399{
400 struct viommu_domain *vdomain;
401
402 if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
403 return NULL;
404
405 vdomain = kzalloc(sizeof(*vdomain), GFP_KERNEL);
406 if (!vdomain)
407 return NULL;
408
409 mutex_init(&vdomain->mutex);
410 spin_lock_init(&vdomain->mappings_lock);
411 vdomain->mappings = RB_ROOT_CACHED;
412
413 if (type == IOMMU_DOMAIN_DMA &&
414 iommu_get_dma_cookie(&vdomain->domain)) {
415 kfree(vdomain);
416 return NULL;
417 }
418
419 return &vdomain->domain;
420}
421
422static int viommu_domain_finalise(struct viommu_dev *viommu,
423 struct iommu_domain *domain)
424{
425 int ret;
426 struct viommu_domain *vdomain = to_viommu_domain(domain);
427 unsigned int max_domain = viommu->domain_bits > 31 ? ~0 :
428 (1U << viommu->domain_bits) - 1;
429
430 vdomain->viommu = viommu;
431
432 domain->pgsize_bitmap = viommu->pgsize_bitmap;
433 domain->geometry = viommu->geometry;
434
435 ret = ida_alloc_max(&viommu->domain_ids, max_domain, GFP_KERNEL);
436 if (ret >= 0)
437 vdomain->id = (unsigned int)ret;
438
439 return ret > 0 ? 0 : ret;
440}
441
442static void viommu_domain_free(struct iommu_domain *domain)
443{
444 struct viommu_domain *vdomain = to_viommu_domain(domain);
445
446 iommu_put_dma_cookie(domain);
447
448 /* Free all remaining mappings (size 2^64) */
449 viommu_del_mappings(vdomain, 0, 0);
450
451 if (vdomain->viommu)
452 ida_free(&vdomain->viommu->domain_ids, vdomain->id);
453
454 kfree(vdomain);
455}
456
457static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev)
458{
459 int i;
460 int ret = 0;
461 struct virtio_iommu_req_attach req;
462 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
463 struct viommu_endpoint *vdev = fwspec->iommu_priv;
464 struct viommu_domain *vdomain = to_viommu_domain(domain);
465
466 mutex_lock(&vdomain->mutex);
467 if (!vdomain->viommu) {
468 /*
469 * Properly initialize the domain now that we know which viommu
470 * owns it.
471 */
472 ret = viommu_domain_finalise(vdev->viommu, domain);
473 } else if (vdomain->viommu != vdev->viommu) {
474 dev_err(dev, "cannot attach to foreign vIOMMU\n");
475 ret = -EXDEV;
476 }
477 mutex_unlock(&vdomain->mutex);
478
479 if (ret)
480 return ret;
481
482 /*
483 * In the virtio-iommu device, when attaching the endpoint to a new
484 * domain, it is detached from the old one and, if as as a result the
485 * old domain isn't attached to any endpoint, all mappings are removed
486 * from the old domain and it is freed.
487 *
488 * In the driver the old domain still exists, and its mappings will be
489 * recreated if it gets reattached to an endpoint. Otherwise it will be
490 * freed explicitly.
491 *
492 * vdev->vdomain is protected by group->mutex
493 */
494 if (vdev->vdomain)
495 vdev->vdomain->nr_endpoints--;
496
497 req = (struct virtio_iommu_req_attach) {
498 .head.type = VIRTIO_IOMMU_T_ATTACH,
499 .domain = cpu_to_le32(vdomain->id),
500 };
501
502 for (i = 0; i < fwspec->num_ids; i++) {
503 req.endpoint = cpu_to_le32(fwspec->ids[i]);
504
505 ret = viommu_send_req_sync(vdomain->viommu, &req, sizeof(req));
506 if (ret)
507 return ret;
508 }
509
510 if (!vdomain->nr_endpoints) {
511 /*
512 * This endpoint is the first to be attached to the domain.
513 * Replay existing mappings (e.g. SW MSI).
514 */
515 ret = viommu_replay_mappings(vdomain);
516 if (ret)
517 return ret;
518 }
519
520 vdomain->nr_endpoints++;
521 vdev->vdomain = vdomain;
522
523 return 0;
524}
525
526static int viommu_map(struct iommu_domain *domain, unsigned long iova,
527 phys_addr_t paddr, size_t size, int prot)
528{
529 int ret;
530 int flags;
531 struct virtio_iommu_req_map map;
532 struct viommu_domain *vdomain = to_viommu_domain(domain);
533
534 flags = (prot & IOMMU_READ ? VIRTIO_IOMMU_MAP_F_READ : 0) |
535 (prot & IOMMU_WRITE ? VIRTIO_IOMMU_MAP_F_WRITE : 0) |
536 (prot & IOMMU_MMIO ? VIRTIO_IOMMU_MAP_F_MMIO : 0);
537
538 ret = viommu_add_mapping(vdomain, iova, paddr, size, flags);
539 if (ret)
540 return ret;
541
542 map = (struct virtio_iommu_req_map) {
543 .head.type = VIRTIO_IOMMU_T_MAP,
544 .domain = cpu_to_le32(vdomain->id),
545 .virt_start = cpu_to_le64(iova),
546 .phys_start = cpu_to_le64(paddr),
547 .virt_end = cpu_to_le64(iova + size - 1),
548 .flags = cpu_to_le32(flags),
549 };
550
551 if (!vdomain->nr_endpoints)
552 return 0;
553
554 ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map));
555 if (ret)
556 viommu_del_mappings(vdomain, iova, size);
557
558 return ret;
559}
560
561static size_t viommu_unmap(struct iommu_domain *domain, unsigned long iova,
562 size_t size)
563{
564 int ret = 0;
565 size_t unmapped;
566 struct virtio_iommu_req_unmap unmap;
567 struct viommu_domain *vdomain = to_viommu_domain(domain);
568
569 unmapped = viommu_del_mappings(vdomain, iova, size);
570 if (unmapped < size)
571 return 0;
572
573 /* Device already removed all mappings after detach. */
574 if (!vdomain->nr_endpoints)
575 return unmapped;
576
577 unmap = (struct virtio_iommu_req_unmap) {
578 .head.type = VIRTIO_IOMMU_T_UNMAP,
579 .domain = cpu_to_le32(vdomain->id),
580 .virt_start = cpu_to_le64(iova),
581 .virt_end = cpu_to_le64(iova + unmapped - 1),
582 };
583
584 ret = viommu_add_req(vdomain->viommu, &unmap, sizeof(unmap));
585 return ret ? 0 : unmapped;
586}
587
588static phys_addr_t viommu_iova_to_phys(struct iommu_domain *domain,
589 dma_addr_t iova)
590{
591 u64 paddr = 0;
592 unsigned long flags;
593 struct viommu_mapping *mapping;
594 struct interval_tree_node *node;
595 struct viommu_domain *vdomain = to_viommu_domain(domain);
596
597 spin_lock_irqsave(&vdomain->mappings_lock, flags);
598 node = interval_tree_iter_first(&vdomain->mappings, iova, iova);
599 if (node) {
600 mapping = container_of(node, struct viommu_mapping, iova);
601 paddr = mapping->paddr + (iova - mapping->iova.start);
602 }
603 spin_unlock_irqrestore(&vdomain->mappings_lock, flags);
604
605 return paddr;
606}
607
608static void viommu_iotlb_sync(struct iommu_domain *domain)
609{
610 struct viommu_domain *vdomain = to_viommu_domain(domain);
611
612 viommu_sync_req(vdomain->viommu);
613}
614
615static void viommu_get_resv_regions(struct device *dev, struct list_head *head)
616{
617 struct iommu_resv_region *region;
618 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
619
620 region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH, prot,
621 IOMMU_RESV_SW_MSI);
622 if (!region)
623 return;
624
625 list_add_tail(&region->list, head);
626 iommu_dma_get_resv_regions(dev, head);
627}
628
629static void viommu_put_resv_regions(struct device *dev, struct list_head *head)
630{
631 struct iommu_resv_region *entry, *next;
632
633 list_for_each_entry_safe(entry, next, head, list)
634 kfree(entry);
635}
636
637static struct iommu_ops viommu_ops;
638static struct virtio_driver virtio_iommu_drv;
639
640static int viommu_match_node(struct device *dev, void *data)
641{
642 return dev->parent->fwnode == data;
643}
644
645static struct viommu_dev *viommu_get_by_fwnode(struct fwnode_handle *fwnode)
646{
647 struct device *dev = driver_find_device(&virtio_iommu_drv.driver, NULL,
648 fwnode, viommu_match_node);
649 put_device(dev);
650
651 return dev ? dev_to_virtio(dev)->priv : NULL;
652}
653
654static int viommu_add_device(struct device *dev)
655{
656 int ret;
657 struct iommu_group *group;
658 struct viommu_endpoint *vdev;
659 struct viommu_dev *viommu = NULL;
660 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
661
662 if (!fwspec || fwspec->ops != &viommu_ops)
663 return -ENODEV;
664
665 viommu = viommu_get_by_fwnode(fwspec->iommu_fwnode);
666 if (!viommu)
667 return -ENODEV;
668
669 vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
670 if (!vdev)
671 return -ENOMEM;
672
673 vdev->viommu = viommu;
674 fwspec->iommu_priv = vdev;
675
676 ret = iommu_device_link(&viommu->iommu, dev);
677 if (ret)
678 goto err_free_dev;
679
680 /*
681 * Last step creates a default domain and attaches to it. Everything
682 * must be ready.
683 */
684 group = iommu_group_get_for_dev(dev);
685 if (IS_ERR(group)) {
686 ret = PTR_ERR(group);
687 goto err_unlink_dev;
688 }
689
690 iommu_group_put(group);
691
692 return PTR_ERR_OR_ZERO(group);
693
694err_unlink_dev:
695 iommu_device_unlink(&viommu->iommu, dev);
696err_free_dev:
697 kfree(vdev);
698
699 return ret;
700}
701
702static void viommu_remove_device(struct device *dev)
703{
704 struct viommu_endpoint *vdev;
705 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
706
707 if (!fwspec || fwspec->ops != &viommu_ops)
708 return;
709
710 vdev = fwspec->iommu_priv;
711
712 iommu_group_remove_device(dev);
713 iommu_device_unlink(&vdev->viommu->iommu, dev);
714 kfree(vdev);
715}
716
717static struct iommu_group *viommu_device_group(struct device *dev)
718{
719 if (dev_is_pci(dev))
720 return pci_device_group(dev);
721 else
722 return generic_device_group(dev);
723}
724
725static int viommu_of_xlate(struct device *dev, struct of_phandle_args *args)
726{
727 return iommu_fwspec_add_ids(dev, args->args, 1);
728}
729
730static struct iommu_ops viommu_ops = {
731 .domain_alloc = viommu_domain_alloc,
732 .domain_free = viommu_domain_free,
733 .attach_dev = viommu_attach_dev,
734 .map = viommu_map,
735 .unmap = viommu_unmap,
736 .iova_to_phys = viommu_iova_to_phys,
737 .iotlb_sync = viommu_iotlb_sync,
738 .add_device = viommu_add_device,
739 .remove_device = viommu_remove_device,
740 .device_group = viommu_device_group,
741 .get_resv_regions = viommu_get_resv_regions,
742 .put_resv_regions = viommu_put_resv_regions,
743 .of_xlate = viommu_of_xlate,
744};
745
746static int viommu_init_vqs(struct viommu_dev *viommu)
747{
748 struct virtio_device *vdev = dev_to_virtio(viommu->dev);
749 const char *name = "request";
750 void *ret;
751
752 ret = virtio_find_single_vq(vdev, NULL, name);
753 if (IS_ERR(ret)) {
754 dev_err(viommu->dev, "cannot find VQ\n");
755 return PTR_ERR(ret);
756 }
757
758 viommu->vqs[VIOMMU_REQUEST_VQ] = ret;
759
760 return 0;
761}
762
763static int viommu_probe(struct virtio_device *vdev)
764{
765 struct device *parent_dev = vdev->dev.parent;
766 struct viommu_dev *viommu = NULL;
767 struct device *dev = &vdev->dev;
768 u64 input_start = 0;
769 u64 input_end = -1UL;
770 int ret;
771
772 if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1) ||
773 !virtio_has_feature(vdev, VIRTIO_IOMMU_F_MAP_UNMAP))
774 return -ENODEV;
775
776 viommu = devm_kzalloc(dev, sizeof(*viommu), GFP_KERNEL);
777 if (!viommu)
778 return -ENOMEM;
779
780 spin_lock_init(&viommu->request_lock);
781 ida_init(&viommu->domain_ids);
782 viommu->dev = dev;
783 viommu->vdev = vdev;
784 INIT_LIST_HEAD(&viommu->requests);
785
786 ret = viommu_init_vqs(viommu);
787 if (ret)
788 return ret;
789
790 virtio_cread(vdev, struct virtio_iommu_config, page_size_mask,
791 &viommu->pgsize_bitmap);
792
793 if (!viommu->pgsize_bitmap) {
794 ret = -EINVAL;
795 goto err_free_vqs;
796 }
797
798 viommu->domain_bits = 32;
799
800 /* Optional features */
801 virtio_cread_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE,
802 struct virtio_iommu_config, input_range.start,
803 &input_start);
804
805 virtio_cread_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE,
806 struct virtio_iommu_config, input_range.end,
807 &input_end);
808
809 virtio_cread_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_BITS,
810 struct virtio_iommu_config, domain_bits,
811 &viommu->domain_bits);
812
813 viommu->geometry = (struct iommu_domain_geometry) {
814 .aperture_start = input_start,
815 .aperture_end = input_end,
816 .force_aperture = true,
817 };
818
819 viommu_ops.pgsize_bitmap = viommu->pgsize_bitmap;
820
821 virtio_device_ready(vdev);
822
823 ret = iommu_device_sysfs_add(&viommu->iommu, dev, NULL, "%s",
824 virtio_bus_name(vdev));
825 if (ret)
826 goto err_free_vqs;
827
828 iommu_device_set_ops(&viommu->iommu, &viommu_ops);
829 iommu_device_set_fwnode(&viommu->iommu, parent_dev->fwnode);
830
831 iommu_device_register(&viommu->iommu);
832
833#ifdef CONFIG_PCI
834 if (pci_bus_type.iommu_ops != &viommu_ops) {
835 pci_request_acs();
836 ret = bus_set_iommu(&pci_bus_type, &viommu_ops);
837 if (ret)
838 goto err_unregister;
839 }
840#endif
841#ifdef CONFIG_ARM_AMBA
842 if (amba_bustype.iommu_ops != &viommu_ops) {
843 ret = bus_set_iommu(&amba_bustype, &viommu_ops);
844 if (ret)
845 goto err_unregister;
846 }
847#endif
848 if (platform_bus_type.iommu_ops != &viommu_ops) {
849 ret = bus_set_iommu(&platform_bus_type, &viommu_ops);
850 if (ret)
851 goto err_unregister;
852 }
853
854 vdev->priv = viommu;
855
856 dev_info(dev, "input address: %u bits\n",
857 order_base_2(viommu->geometry.aperture_end));
858 dev_info(dev, "page mask: %#llx\n", viommu->pgsize_bitmap);
859
860 return 0;
861
862err_unregister:
863 iommu_device_sysfs_remove(&viommu->iommu);
864 iommu_device_unregister(&viommu->iommu);
865err_free_vqs:
866 vdev->config->del_vqs(vdev);
867
868 return ret;
869}
870
871static void viommu_remove(struct virtio_device *vdev)
872{
873 struct viommu_dev *viommu = vdev->priv;
874
875 iommu_device_sysfs_remove(&viommu->iommu);
876 iommu_device_unregister(&viommu->iommu);
877
878 /* Stop all virtqueues */
879 vdev->config->reset(vdev);
880 vdev->config->del_vqs(vdev);
881
882 dev_info(&vdev->dev, "device removed\n");
883}
884
885static void viommu_config_changed(struct virtio_device *vdev)
886{
887 dev_warn(&vdev->dev, "config changed\n");
888}
889
890static unsigned int features[] = {
891 VIRTIO_IOMMU_F_MAP_UNMAP,
892 VIRTIO_IOMMU_F_DOMAIN_BITS,
893 VIRTIO_IOMMU_F_INPUT_RANGE,
894};
895
896static struct virtio_device_id id_table[] = {
897 { VIRTIO_ID_IOMMU, VIRTIO_DEV_ANY_ID },
898 { 0 },
899};
900
901static struct virtio_driver virtio_iommu_drv = {
902 .driver.name = KBUILD_MODNAME,
903 .driver.owner = THIS_MODULE,
904 .id_table = id_table,
905 .feature_table = features,
906 .feature_table_size = ARRAY_SIZE(features),
907 .probe = viommu_probe,
908 .remove = viommu_remove,
909 .config_changed = viommu_config_changed,
910};
911
912module_virtio_driver(virtio_iommu_drv);
913
914MODULE_DESCRIPTION("Virtio IOMMU driver");
915MODULE_AUTHOR("Jean-Philippe Brucker <jean-philippe.brucker@arm.com>");
916MODULE_LICENSE("GPL v2");