Joerg Roedel | fc2100e | 2008-11-26 17:21:24 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. |
| 3 | * Author: Joerg Roedel <joerg.roedel@amd.com> |
| 4 | * |
| 5 | * This program is free software; you can redistribute it and/or modify it |
| 6 | * under the terms of the GNU General Public License version 2 as published |
| 7 | * by the Free Software Foundation. |
| 8 | * |
| 9 | * This program is distributed in the hope that it will be useful, |
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 12 | * GNU General Public License for more details. |
| 13 | * |
| 14 | * You should have received a copy of the GNU General Public License |
| 15 | * along with this program; if not, write to the Free Software |
| 16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
| 17 | */ |
| 18 | |
Ohad Ben-Cohen | 7d3002c | 2011-11-10 11:32:26 +0200 | [diff] [blame^] | 19 | #define pr_fmt(fmt) "%s: " fmt, __func__ |
| 20 | |
Joerg Roedel | 905d66c | 2011-09-06 16:03:26 +0200 | [diff] [blame] | 21 | #include <linux/device.h> |
Ohad Ben-Cohen | 4099818 | 2011-09-02 13:32:32 -0400 | [diff] [blame] | 22 | #include <linux/kernel.h> |
Joerg Roedel | fc2100e | 2008-11-26 17:21:24 +0100 | [diff] [blame] | 23 | #include <linux/bug.h> |
| 24 | #include <linux/types.h> |
Andrew Morton | 60db402 | 2009-05-06 16:03:07 -0700 | [diff] [blame] | 25 | #include <linux/module.h> |
| 26 | #include <linux/slab.h> |
Joerg Roedel | fc2100e | 2008-11-26 17:21:24 +0100 | [diff] [blame] | 27 | #include <linux/errno.h> |
| 28 | #include <linux/iommu.h> |
| 29 | |
Joerg Roedel | ff21776 | 2011-08-26 16:48:26 +0200 | [diff] [blame] | 30 | static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops) |
Joerg Roedel | fc2100e | 2008-11-26 17:21:24 +0100 | [diff] [blame] | 31 | { |
Joerg Roedel | fc2100e | 2008-11-26 17:21:24 +0100 | [diff] [blame] | 32 | } |
| 33 | |
Joerg Roedel | ff21776 | 2011-08-26 16:48:26 +0200 | [diff] [blame] | 34 | /** |
| 35 | * bus_set_iommu - set iommu-callbacks for the bus |
| 36 | * @bus: bus. |
| 37 | * @ops: the callbacks provided by the iommu-driver |
| 38 | * |
| 39 | * This function is called by an iommu driver to set the iommu methods |
| 40 | * used for a particular bus. Drivers for devices on that bus can use |
| 41 | * the iommu-api after these ops are registered. |
| 42 | * This special function is needed because IOMMUs are usually devices on |
| 43 | * the bus itself, so the iommu drivers are not initialized when the bus |
| 44 | * is set up. With this function the iommu-driver can set the iommu-ops |
| 45 | * afterwards. |
| 46 | */ |
| 47 | int bus_set_iommu(struct bus_type *bus, struct iommu_ops *ops) |
Joerg Roedel | fc2100e | 2008-11-26 17:21:24 +0100 | [diff] [blame] | 48 | { |
Joerg Roedel | ff21776 | 2011-08-26 16:48:26 +0200 | [diff] [blame] | 49 | if (bus->iommu_ops != NULL) |
| 50 | return -EBUSY; |
Joerg Roedel | fc2100e | 2008-11-26 17:21:24 +0100 | [diff] [blame] | 51 | |
Ohad Ben-Cohen | 7d3002c | 2011-11-10 11:32:26 +0200 | [diff] [blame^] | 52 | /* |
| 53 | * Set the default pgsize values, which retain the existing |
| 54 | * IOMMU API behavior: drivers will be called to map |
| 55 | * regions that are sized/aligned to order of 4KiB pages. |
| 56 | * |
| 57 | * This will be removed once all drivers are migrated. |
| 58 | */ |
| 59 | if (!ops->pgsize_bitmap) |
| 60 | ops->pgsize_bitmap = ~0xFFFUL; |
| 61 | |
Joerg Roedel | ff21776 | 2011-08-26 16:48:26 +0200 | [diff] [blame] | 62 | bus->iommu_ops = ops; |
| 63 | |
| 64 | /* Do IOMMU specific setup for this bus-type */ |
| 65 | iommu_bus_init(bus, ops); |
| 66 | |
| 67 | return 0; |
Joerg Roedel | fc2100e | 2008-11-26 17:21:24 +0100 | [diff] [blame] | 68 | } |
Joerg Roedel | ff21776 | 2011-08-26 16:48:26 +0200 | [diff] [blame] | 69 | EXPORT_SYMBOL_GPL(bus_set_iommu); |
| 70 | |
Joerg Roedel | a1b60c1 | 2011-09-06 18:46:34 +0200 | [diff] [blame] | 71 | bool iommu_present(struct bus_type *bus) |
Joerg Roedel | fc2100e | 2008-11-26 17:21:24 +0100 | [diff] [blame] | 72 | { |
Joerg Roedel | 94441c3 | 2011-09-06 18:58:54 +0200 | [diff] [blame] | 73 | return bus->iommu_ops != NULL; |
Joerg Roedel | fc2100e | 2008-11-26 17:21:24 +0100 | [diff] [blame] | 74 | } |
Joerg Roedel | a1b60c1 | 2011-09-06 18:46:34 +0200 | [diff] [blame] | 75 | EXPORT_SYMBOL_GPL(iommu_present); |
Joerg Roedel | fc2100e | 2008-11-26 17:21:24 +0100 | [diff] [blame] | 76 | |
Ohad Ben-Cohen | 4f3f8d9 | 2011-09-13 15:25:23 -0400 | [diff] [blame] | 77 | /** |
| 78 | * iommu_set_fault_handler() - set a fault handler for an iommu domain |
| 79 | * @domain: iommu domain |
| 80 | * @handler: fault handler |
Ohad Ben-Cohen | 0ed6d2d | 2011-09-27 07:36:40 -0400 | [diff] [blame] | 81 | * |
| 82 | * This function should be used by IOMMU users which want to be notified |
| 83 | * whenever an IOMMU fault happens. |
| 84 | * |
| 85 | * The fault handler itself should return 0 on success, and an appropriate |
| 86 | * error code otherwise. |
Ohad Ben-Cohen | 4f3f8d9 | 2011-09-13 15:25:23 -0400 | [diff] [blame] | 87 | */ |
| 88 | void iommu_set_fault_handler(struct iommu_domain *domain, |
| 89 | iommu_fault_handler_t handler) |
| 90 | { |
| 91 | BUG_ON(!domain); |
| 92 | |
| 93 | domain->handler = handler; |
| 94 | } |
Ohad Ben-Cohen | 30bd918 | 2011-09-26 09:11:46 -0400 | [diff] [blame] | 95 | EXPORT_SYMBOL_GPL(iommu_set_fault_handler); |
Ohad Ben-Cohen | 4f3f8d9 | 2011-09-13 15:25:23 -0400 | [diff] [blame] | 96 | |
Joerg Roedel | 905d66c | 2011-09-06 16:03:26 +0200 | [diff] [blame] | 97 | struct iommu_domain *iommu_domain_alloc(struct bus_type *bus) |
Joerg Roedel | fc2100e | 2008-11-26 17:21:24 +0100 | [diff] [blame] | 98 | { |
| 99 | struct iommu_domain *domain; |
| 100 | int ret; |
| 101 | |
Joerg Roedel | 94441c3 | 2011-09-06 18:58:54 +0200 | [diff] [blame] | 102 | if (bus == NULL || bus->iommu_ops == NULL) |
Joerg Roedel | 905d66c | 2011-09-06 16:03:26 +0200 | [diff] [blame] | 103 | return NULL; |
| 104 | |
Joerg Roedel | fc2100e | 2008-11-26 17:21:24 +0100 | [diff] [blame] | 105 | domain = kmalloc(sizeof(*domain), GFP_KERNEL); |
| 106 | if (!domain) |
| 107 | return NULL; |
| 108 | |
Joerg Roedel | 94441c3 | 2011-09-06 18:58:54 +0200 | [diff] [blame] | 109 | domain->ops = bus->iommu_ops; |
Joerg Roedel | 905d66c | 2011-09-06 16:03:26 +0200 | [diff] [blame] | 110 | |
Joerg Roedel | 94441c3 | 2011-09-06 18:58:54 +0200 | [diff] [blame] | 111 | ret = domain->ops->domain_init(domain); |
Joerg Roedel | fc2100e | 2008-11-26 17:21:24 +0100 | [diff] [blame] | 112 | if (ret) |
| 113 | goto out_free; |
| 114 | |
| 115 | return domain; |
| 116 | |
| 117 | out_free: |
| 118 | kfree(domain); |
| 119 | |
| 120 | return NULL; |
| 121 | } |
| 122 | EXPORT_SYMBOL_GPL(iommu_domain_alloc); |
| 123 | |
| 124 | void iommu_domain_free(struct iommu_domain *domain) |
| 125 | { |
Joerg Roedel | e5aa7f0 | 2011-09-06 16:44:29 +0200 | [diff] [blame] | 126 | if (likely(domain->ops->domain_destroy != NULL)) |
| 127 | domain->ops->domain_destroy(domain); |
| 128 | |
Joerg Roedel | fc2100e | 2008-11-26 17:21:24 +0100 | [diff] [blame] | 129 | kfree(domain); |
| 130 | } |
| 131 | EXPORT_SYMBOL_GPL(iommu_domain_free); |
| 132 | |
| 133 | int iommu_attach_device(struct iommu_domain *domain, struct device *dev) |
| 134 | { |
Joerg Roedel | e5aa7f0 | 2011-09-06 16:44:29 +0200 | [diff] [blame] | 135 | if (unlikely(domain->ops->attach_dev == NULL)) |
| 136 | return -ENODEV; |
| 137 | |
| 138 | return domain->ops->attach_dev(domain, dev); |
Joerg Roedel | fc2100e | 2008-11-26 17:21:24 +0100 | [diff] [blame] | 139 | } |
| 140 | EXPORT_SYMBOL_GPL(iommu_attach_device); |
| 141 | |
| 142 | void iommu_detach_device(struct iommu_domain *domain, struct device *dev) |
| 143 | { |
Joerg Roedel | e5aa7f0 | 2011-09-06 16:44:29 +0200 | [diff] [blame] | 144 | if (unlikely(domain->ops->detach_dev == NULL)) |
| 145 | return; |
| 146 | |
| 147 | domain->ops->detach_dev(domain, dev); |
Joerg Roedel | fc2100e | 2008-11-26 17:21:24 +0100 | [diff] [blame] | 148 | } |
| 149 | EXPORT_SYMBOL_GPL(iommu_detach_device); |
| 150 | |
Joerg Roedel | fc2100e | 2008-11-26 17:21:24 +0100 | [diff] [blame] | 151 | phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, |
| 152 | unsigned long iova) |
| 153 | { |
Joerg Roedel | e5aa7f0 | 2011-09-06 16:44:29 +0200 | [diff] [blame] | 154 | if (unlikely(domain->ops->iova_to_phys == NULL)) |
| 155 | return 0; |
| 156 | |
| 157 | return domain->ops->iova_to_phys(domain, iova); |
Joerg Roedel | fc2100e | 2008-11-26 17:21:24 +0100 | [diff] [blame] | 158 | } |
| 159 | EXPORT_SYMBOL_GPL(iommu_iova_to_phys); |
Sheng Yang | dbb9fd8 | 2009-03-18 15:33:06 +0800 | [diff] [blame] | 160 | |
| 161 | int iommu_domain_has_cap(struct iommu_domain *domain, |
| 162 | unsigned long cap) |
| 163 | { |
Joerg Roedel | e5aa7f0 | 2011-09-06 16:44:29 +0200 | [diff] [blame] | 164 | if (unlikely(domain->ops->domain_has_cap == NULL)) |
| 165 | return 0; |
| 166 | |
| 167 | return domain->ops->domain_has_cap(domain, cap); |
Sheng Yang | dbb9fd8 | 2009-03-18 15:33:06 +0800 | [diff] [blame] | 168 | } |
| 169 | EXPORT_SYMBOL_GPL(iommu_domain_has_cap); |
Joerg Roedel | cefc53c | 2010-01-08 13:35:09 +0100 | [diff] [blame] | 170 | |
| 171 | int iommu_map(struct iommu_domain *domain, unsigned long iova, |
Ohad Ben-Cohen | 7d3002c | 2011-11-10 11:32:26 +0200 | [diff] [blame^] | 172 | phys_addr_t paddr, size_t size, int prot) |
Joerg Roedel | cefc53c | 2010-01-08 13:35:09 +0100 | [diff] [blame] | 173 | { |
Ohad Ben-Cohen | 7d3002c | 2011-11-10 11:32:26 +0200 | [diff] [blame^] | 174 | unsigned long orig_iova = iova; |
| 175 | unsigned int min_pagesz; |
| 176 | size_t orig_size = size; |
| 177 | int ret = 0; |
Joerg Roedel | cefc53c | 2010-01-08 13:35:09 +0100 | [diff] [blame] | 178 | |
Joerg Roedel | e5aa7f0 | 2011-09-06 16:44:29 +0200 | [diff] [blame] | 179 | if (unlikely(domain->ops->map == NULL)) |
| 180 | return -ENODEV; |
Joerg Roedel | cefc53c | 2010-01-08 13:35:09 +0100 | [diff] [blame] | 181 | |
Ohad Ben-Cohen | 7d3002c | 2011-11-10 11:32:26 +0200 | [diff] [blame^] | 182 | /* find out the minimum page size supported */ |
| 183 | min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap); |
Joerg Roedel | cefc53c | 2010-01-08 13:35:09 +0100 | [diff] [blame] | 184 | |
Ohad Ben-Cohen | 7d3002c | 2011-11-10 11:32:26 +0200 | [diff] [blame^] | 185 | /* |
| 186 | * both the virtual address and the physical one, as well as |
| 187 | * the size of the mapping, must be aligned (at least) to the |
| 188 | * size of the smallest page supported by the hardware |
| 189 | */ |
| 190 | if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) { |
| 191 | pr_err("unaligned: iova 0x%lx pa 0x%lx size 0x%lx min_pagesz " |
| 192 | "0x%x\n", iova, (unsigned long)paddr, |
| 193 | (unsigned long)size, min_pagesz); |
| 194 | return -EINVAL; |
| 195 | } |
Joerg Roedel | cefc53c | 2010-01-08 13:35:09 +0100 | [diff] [blame] | 196 | |
Ohad Ben-Cohen | 7d3002c | 2011-11-10 11:32:26 +0200 | [diff] [blame^] | 197 | pr_debug("map: iova 0x%lx pa 0x%lx size 0x%lx\n", iova, |
| 198 | (unsigned long)paddr, (unsigned long)size); |
| 199 | |
| 200 | while (size) { |
| 201 | unsigned long pgsize, addr_merge = iova | paddr; |
| 202 | unsigned int pgsize_idx; |
| 203 | |
| 204 | /* Max page size that still fits into 'size' */ |
| 205 | pgsize_idx = __fls(size); |
| 206 | |
| 207 | /* need to consider alignment requirements ? */ |
| 208 | if (likely(addr_merge)) { |
| 209 | /* Max page size allowed by both iova and paddr */ |
| 210 | unsigned int align_pgsize_idx = __ffs(addr_merge); |
| 211 | |
| 212 | pgsize_idx = min(pgsize_idx, align_pgsize_idx); |
| 213 | } |
| 214 | |
| 215 | /* build a mask of acceptable page sizes */ |
| 216 | pgsize = (1UL << (pgsize_idx + 1)) - 1; |
| 217 | |
| 218 | /* throw away page sizes not supported by the hardware */ |
| 219 | pgsize &= domain->ops->pgsize_bitmap; |
| 220 | |
| 221 | /* make sure we're still sane */ |
| 222 | BUG_ON(!pgsize); |
| 223 | |
| 224 | /* pick the biggest page */ |
| 225 | pgsize_idx = __fls(pgsize); |
| 226 | pgsize = 1UL << pgsize_idx; |
| 227 | |
| 228 | pr_debug("mapping: iova 0x%lx pa 0x%lx pgsize %lu\n", iova, |
| 229 | (unsigned long)paddr, pgsize); |
| 230 | |
| 231 | ret = domain->ops->map(domain, iova, paddr, pgsize, prot); |
| 232 | if (ret) |
| 233 | break; |
| 234 | |
| 235 | iova += pgsize; |
| 236 | paddr += pgsize; |
| 237 | size -= pgsize; |
| 238 | } |
| 239 | |
| 240 | /* unroll mapping in case something went wrong */ |
| 241 | if (ret) |
| 242 | iommu_unmap(domain, orig_iova, orig_size - size); |
| 243 | |
| 244 | return ret; |
Joerg Roedel | cefc53c | 2010-01-08 13:35:09 +0100 | [diff] [blame] | 245 | } |
| 246 | EXPORT_SYMBOL_GPL(iommu_map); |
| 247 | |
Ohad Ben-Cohen | 7d3002c | 2011-11-10 11:32:26 +0200 | [diff] [blame^] | 248 | size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size) |
Joerg Roedel | cefc53c | 2010-01-08 13:35:09 +0100 | [diff] [blame] | 249 | { |
Ohad Ben-Cohen | 7d3002c | 2011-11-10 11:32:26 +0200 | [diff] [blame^] | 250 | size_t unmapped_page, unmapped = 0; |
| 251 | unsigned int min_pagesz; |
Joerg Roedel | cefc53c | 2010-01-08 13:35:09 +0100 | [diff] [blame] | 252 | |
Joerg Roedel | e5aa7f0 | 2011-09-06 16:44:29 +0200 | [diff] [blame] | 253 | if (unlikely(domain->ops->unmap == NULL)) |
| 254 | return -ENODEV; |
Joerg Roedel | cefc53c | 2010-01-08 13:35:09 +0100 | [diff] [blame] | 255 | |
Ohad Ben-Cohen | 7d3002c | 2011-11-10 11:32:26 +0200 | [diff] [blame^] | 256 | /* find out the minimum page size supported */ |
| 257 | min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap); |
Joerg Roedel | cefc53c | 2010-01-08 13:35:09 +0100 | [diff] [blame] | 258 | |
Ohad Ben-Cohen | 7d3002c | 2011-11-10 11:32:26 +0200 | [diff] [blame^] | 259 | /* |
| 260 | * The virtual address, as well as the size of the mapping, must be |
| 261 | * aligned (at least) to the size of the smallest page supported |
| 262 | * by the hardware |
| 263 | */ |
| 264 | if (!IS_ALIGNED(iova | size, min_pagesz)) { |
| 265 | pr_err("unaligned: iova 0x%lx size 0x%lx min_pagesz 0x%x\n", |
| 266 | iova, (unsigned long)size, min_pagesz); |
| 267 | return -EINVAL; |
| 268 | } |
Joerg Roedel | cefc53c | 2010-01-08 13:35:09 +0100 | [diff] [blame] | 269 | |
Ohad Ben-Cohen | 7d3002c | 2011-11-10 11:32:26 +0200 | [diff] [blame^] | 270 | pr_debug("unmap this: iova 0x%lx size 0x%lx\n", iova, |
| 271 | (unsigned long)size); |
Ohad Ben-Cohen | 5009065 | 2011-11-10 11:32:25 +0200 | [diff] [blame] | 272 | |
Ohad Ben-Cohen | 7d3002c | 2011-11-10 11:32:26 +0200 | [diff] [blame^] | 273 | /* |
| 274 | * Keep iterating until we either unmap 'size' bytes (or more) |
| 275 | * or we hit an area that isn't mapped. |
| 276 | */ |
| 277 | while (unmapped < size) { |
| 278 | size_t left = size - unmapped; |
| 279 | |
| 280 | unmapped_page = domain->ops->unmap(domain, iova, left); |
| 281 | if (!unmapped_page) |
| 282 | break; |
| 283 | |
| 284 | pr_debug("unmapped: iova 0x%lx size %lx\n", iova, |
| 285 | (unsigned long)unmapped_page); |
| 286 | |
| 287 | iova += unmapped_page; |
| 288 | unmapped += unmapped_page; |
| 289 | } |
| 290 | |
| 291 | return unmapped; |
Joerg Roedel | cefc53c | 2010-01-08 13:35:09 +0100 | [diff] [blame] | 292 | } |
| 293 | EXPORT_SYMBOL_GPL(iommu_unmap); |