Jean-Philippe Brucker | fa83433 | 2019-10-02 12:42:41 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * I/O Address Space ID allocator. There is one global IOASID space, split into |
| 4 | * subsets. Users create a subset with DECLARE_IOASID_SET, then allocate and |
Jean-Philippe Brucker | cb4789b | 2020-11-06 16:50:47 +0100 | [diff] [blame] | 5 | * free IOASIDs with ioasid_alloc and ioasid_put. |
Jean-Philippe Brucker | fa83433 | 2019-10-02 12:42:41 -0700 | [diff] [blame] | 6 | */ |
| 7 | #include <linux/ioasid.h> |
| 8 | #include <linux/module.h> |
| 9 | #include <linux/slab.h> |
| 10 | #include <linux/spinlock.h> |
| 11 | #include <linux/xarray.h> |
| 12 | |
| 13 | struct ioasid_data { |
| 14 | ioasid_t id; |
| 15 | struct ioasid_set *set; |
| 16 | void *private; |
| 17 | struct rcu_head rcu; |
Jean-Philippe Brucker | cb4789b | 2020-11-06 16:50:47 +0100 | [diff] [blame] | 18 | refcount_t refs; |
Jean-Philippe Brucker | fa83433 | 2019-10-02 12:42:41 -0700 | [diff] [blame] | 19 | }; |
| 20 | |
Jacob Pan | e5c0bd7 | 2019-10-02 12:42:42 -0700 | [diff] [blame] | 21 | /* |
| 22 | * struct ioasid_allocator_data - Internal data structure to hold information |
| 23 | * about an allocator. There are two types of allocators: |
| 24 | * |
| 25 | * - Default allocator always has its own XArray to track the IOASIDs allocated. |
| 26 | * - Custom allocators may share allocation helpers with different private data. |
| 27 | * Custom allocators that share the same helper functions also share the same |
| 28 | * XArray. |
| 29 | * Rules: |
| 30 | * 1. Default allocator is always available, not dynamically registered. This is |
| 31 | * to prevent race conditions with early boot code that want to register |
| 32 | * custom allocators or allocate IOASIDs. |
| 33 | * 2. Custom allocators take precedence over the default allocator. |
| 34 | * 3. When all custom allocators sharing the same helper functions are |
| 35 | * unregistered (e.g. due to hotplug), all outstanding IOASIDs must be |
| 36 | * freed. Otherwise, outstanding IOASIDs will be lost and orphaned. |
| 37 | * 4. When switching between custom allocators sharing the same helper |
| 38 | * functions, outstanding IOASIDs are preserved. |
| 39 | * 5. When switching between custom allocator and default allocator, all IOASIDs |
| 40 | * must be freed to ensure unadulterated space for the new allocator. |
| 41 | * |
| 42 | * @ops: allocator helper functions and its data |
| 43 | * @list: registered custom allocators |
| 44 | * @slist: allocators share the same ops but different data |
| 45 | * @flags: attributes of the allocator |
| 46 | * @xa: xarray holds the IOASID space |
| 47 | * @rcu: used for kfree_rcu when unregistering allocator |
| 48 | */ |
| 49 | struct ioasid_allocator_data { |
| 50 | struct ioasid_allocator_ops *ops; |
| 51 | struct list_head list; |
| 52 | struct list_head slist; |
| 53 | #define IOASID_ALLOCATOR_CUSTOM BIT(0) /* Needs framework to track results */ |
| 54 | unsigned long flags; |
| 55 | struct xarray xa; |
| 56 | struct rcu_head rcu; |
| 57 | }; |
| 58 | |
| 59 | static DEFINE_SPINLOCK(ioasid_allocator_lock); |
| 60 | static LIST_HEAD(allocators_list); |
| 61 | |
| 62 | static ioasid_t default_alloc(ioasid_t min, ioasid_t max, void *opaque); |
| 63 | static void default_free(ioasid_t ioasid, void *opaque); |
| 64 | |
| 65 | static struct ioasid_allocator_ops default_ops = { |
| 66 | .alloc = default_alloc, |
| 67 | .free = default_free, |
| 68 | }; |
| 69 | |
| 70 | static struct ioasid_allocator_data default_allocator = { |
| 71 | .ops = &default_ops, |
| 72 | .flags = 0, |
| 73 | .xa = XARRAY_INIT(ioasid_xa, XA_FLAGS_ALLOC), |
| 74 | }; |
| 75 | |
| 76 | static struct ioasid_allocator_data *active_allocator = &default_allocator; |
| 77 | |
| 78 | static ioasid_t default_alloc(ioasid_t min, ioasid_t max, void *opaque) |
| 79 | { |
| 80 | ioasid_t id; |
| 81 | |
| 82 | if (xa_alloc(&default_allocator.xa, &id, opaque, XA_LIMIT(min, max), GFP_ATOMIC)) { |
| 83 | pr_err("Failed to alloc ioasid from %d to %d\n", min, max); |
| 84 | return INVALID_IOASID; |
| 85 | } |
| 86 | |
| 87 | return id; |
| 88 | } |
| 89 | |
| 90 | static void default_free(ioasid_t ioasid, void *opaque) |
| 91 | { |
| 92 | struct ioasid_data *ioasid_data; |
| 93 | |
| 94 | ioasid_data = xa_erase(&default_allocator.xa, ioasid); |
| 95 | kfree_rcu(ioasid_data, rcu); |
| 96 | } |
| 97 | |
| 98 | /* Allocate and initialize a new custom allocator with its helper functions */ |
| 99 | static struct ioasid_allocator_data *ioasid_alloc_allocator(struct ioasid_allocator_ops *ops) |
| 100 | { |
| 101 | struct ioasid_allocator_data *ia_data; |
| 102 | |
| 103 | ia_data = kzalloc(sizeof(*ia_data), GFP_ATOMIC); |
| 104 | if (!ia_data) |
| 105 | return NULL; |
| 106 | |
| 107 | xa_init_flags(&ia_data->xa, XA_FLAGS_ALLOC); |
| 108 | INIT_LIST_HEAD(&ia_data->slist); |
| 109 | ia_data->flags |= IOASID_ALLOCATOR_CUSTOM; |
| 110 | ia_data->ops = ops; |
| 111 | |
| 112 | /* For tracking custom allocators that share the same ops */ |
| 113 | list_add_tail(&ops->list, &ia_data->slist); |
| 114 | |
| 115 | return ia_data; |
| 116 | } |
| 117 | |
| 118 | static bool use_same_ops(struct ioasid_allocator_ops *a, struct ioasid_allocator_ops *b) |
| 119 | { |
| 120 | return (a->free == b->free) && (a->alloc == b->alloc); |
| 121 | } |
| 122 | |
| 123 | /** |
| 124 | * ioasid_register_allocator - register a custom allocator |
| 125 | * @ops: the custom allocator ops to be registered |
| 126 | * |
| 127 | * Custom allocators take precedence over the default xarray based allocator. |
| 128 | * Private data associated with the IOASID allocated by the custom allocators |
| 129 | * are managed by IOASID framework similar to data stored in xa by default |
| 130 | * allocator. |
| 131 | * |
| 132 | * There can be multiple allocators registered but only one is active. In case |
| 133 | * of runtime removal of a custom allocator, the next one is activated based |
| 134 | * on the registration ordering. |
| 135 | * |
| 136 | * Multiple allocators can share the same alloc() function, in this case the |
| 137 | * IOASID space is shared. |
| 138 | */ |
| 139 | int ioasid_register_allocator(struct ioasid_allocator_ops *ops) |
| 140 | { |
| 141 | struct ioasid_allocator_data *ia_data; |
| 142 | struct ioasid_allocator_data *pallocator; |
| 143 | int ret = 0; |
| 144 | |
| 145 | spin_lock(&ioasid_allocator_lock); |
| 146 | |
| 147 | ia_data = ioasid_alloc_allocator(ops); |
| 148 | if (!ia_data) { |
| 149 | ret = -ENOMEM; |
| 150 | goto out_unlock; |
| 151 | } |
| 152 | |
| 153 | /* |
| 154 | * No particular preference, we activate the first one and keep |
| 155 | * the later registered allocators in a list in case the first one gets |
| 156 | * removed due to hotplug. |
| 157 | */ |
| 158 | if (list_empty(&allocators_list)) { |
| 159 | WARN_ON(active_allocator != &default_allocator); |
| 160 | /* Use this new allocator if default is not active */ |
| 161 | if (xa_empty(&active_allocator->xa)) { |
| 162 | rcu_assign_pointer(active_allocator, ia_data); |
| 163 | list_add_tail(&ia_data->list, &allocators_list); |
| 164 | goto out_unlock; |
| 165 | } |
| 166 | pr_warn("Default allocator active with outstanding IOASID\n"); |
| 167 | ret = -EAGAIN; |
| 168 | goto out_free; |
| 169 | } |
| 170 | |
| 171 | /* Check if the allocator is already registered */ |
| 172 | list_for_each_entry(pallocator, &allocators_list, list) { |
| 173 | if (pallocator->ops == ops) { |
| 174 | pr_err("IOASID allocator already registered\n"); |
| 175 | ret = -EEXIST; |
| 176 | goto out_free; |
| 177 | } else if (use_same_ops(pallocator->ops, ops)) { |
| 178 | /* |
| 179 | * If the new allocator shares the same ops, |
| 180 | * then they will share the same IOASID space. |
| 181 | * We should put them under the same xarray. |
| 182 | */ |
| 183 | list_add_tail(&ops->list, &pallocator->slist); |
| 184 | goto out_free; |
| 185 | } |
| 186 | } |
| 187 | list_add_tail(&ia_data->list, &allocators_list); |
| 188 | |
| 189 | spin_unlock(&ioasid_allocator_lock); |
| 190 | return 0; |
| 191 | out_free: |
| 192 | kfree(ia_data); |
| 193 | out_unlock: |
| 194 | spin_unlock(&ioasid_allocator_lock); |
| 195 | return ret; |
| 196 | } |
| 197 | EXPORT_SYMBOL_GPL(ioasid_register_allocator); |
| 198 | |
| 199 | /** |
| 200 | * ioasid_unregister_allocator - Remove a custom IOASID allocator ops |
| 201 | * @ops: the custom allocator to be removed |
| 202 | * |
| 203 | * Remove an allocator from the list, activate the next allocator in |
| 204 | * the order it was registered. Or revert to default allocator if all |
| 205 | * custom allocators are unregistered without outstanding IOASIDs. |
| 206 | */ |
| 207 | void ioasid_unregister_allocator(struct ioasid_allocator_ops *ops) |
| 208 | { |
| 209 | struct ioasid_allocator_data *pallocator; |
| 210 | struct ioasid_allocator_ops *sops; |
| 211 | |
| 212 | spin_lock(&ioasid_allocator_lock); |
| 213 | if (list_empty(&allocators_list)) { |
| 214 | pr_warn("No custom IOASID allocators active!\n"); |
| 215 | goto exit_unlock; |
| 216 | } |
| 217 | |
| 218 | list_for_each_entry(pallocator, &allocators_list, list) { |
| 219 | if (!use_same_ops(pallocator->ops, ops)) |
| 220 | continue; |
| 221 | |
| 222 | if (list_is_singular(&pallocator->slist)) { |
| 223 | /* No shared helper functions */ |
| 224 | list_del(&pallocator->list); |
| 225 | /* |
| 226 | * All IOASIDs should have been freed before |
| 227 | * the last allocator that shares the same ops |
| 228 | * is unregistered. |
| 229 | */ |
| 230 | WARN_ON(!xa_empty(&pallocator->xa)); |
| 231 | if (list_empty(&allocators_list)) { |
| 232 | pr_info("No custom IOASID allocators, switch to default.\n"); |
| 233 | rcu_assign_pointer(active_allocator, &default_allocator); |
| 234 | } else if (pallocator == active_allocator) { |
| 235 | rcu_assign_pointer(active_allocator, |
| 236 | list_first_entry(&allocators_list, |
| 237 | struct ioasid_allocator_data, list)); |
| 238 | pr_info("IOASID allocator changed"); |
| 239 | } |
| 240 | kfree_rcu(pallocator, rcu); |
| 241 | break; |
| 242 | } |
| 243 | /* |
| 244 | * Find the matching shared ops to delete, |
| 245 | * but keep outstanding IOASIDs |
| 246 | */ |
| 247 | list_for_each_entry(sops, &pallocator->slist, list) { |
| 248 | if (sops == ops) { |
| 249 | list_del(&ops->list); |
| 250 | break; |
| 251 | } |
| 252 | } |
| 253 | break; |
| 254 | } |
| 255 | |
| 256 | exit_unlock: |
| 257 | spin_unlock(&ioasid_allocator_lock); |
| 258 | } |
| 259 | EXPORT_SYMBOL_GPL(ioasid_unregister_allocator); |
Jean-Philippe Brucker | fa83433 | 2019-10-02 12:42:41 -0700 | [diff] [blame] | 260 | |
| 261 | /** |
| 262 | * ioasid_set_data - Set private data for an allocated ioasid |
| 263 | * @ioasid: the ID to set data |
| 264 | * @data: the private data |
| 265 | * |
| 266 | * For IOASID that is already allocated, private data can be set |
| 267 | * via this API. Future lookup can be done via ioasid_find. |
| 268 | */ |
| 269 | int ioasid_set_data(ioasid_t ioasid, void *data) |
| 270 | { |
| 271 | struct ioasid_data *ioasid_data; |
| 272 | int ret = 0; |
| 273 | |
Jacob Pan | e5c0bd7 | 2019-10-02 12:42:42 -0700 | [diff] [blame] | 274 | spin_lock(&ioasid_allocator_lock); |
| 275 | ioasid_data = xa_load(&active_allocator->xa, ioasid); |
Jean-Philippe Brucker | fa83433 | 2019-10-02 12:42:41 -0700 | [diff] [blame] | 276 | if (ioasid_data) |
| 277 | rcu_assign_pointer(ioasid_data->private, data); |
| 278 | else |
| 279 | ret = -ENOENT; |
Jacob Pan | e5c0bd7 | 2019-10-02 12:42:42 -0700 | [diff] [blame] | 280 | spin_unlock(&ioasid_allocator_lock); |
Jean-Philippe Brucker | fa83433 | 2019-10-02 12:42:41 -0700 | [diff] [blame] | 281 | |
| 282 | /* |
| 283 | * Wait for readers to stop accessing the old private data, so the |
| 284 | * caller can free it. |
| 285 | */ |
| 286 | if (!ret) |
| 287 | synchronize_rcu(); |
| 288 | |
| 289 | return ret; |
| 290 | } |
| 291 | EXPORT_SYMBOL_GPL(ioasid_set_data); |
| 292 | |
| 293 | /** |
| 294 | * ioasid_alloc - Allocate an IOASID |
| 295 | * @set: the IOASID set |
| 296 | * @min: the minimum ID (inclusive) |
| 297 | * @max: the maximum ID (inclusive) |
| 298 | * @private: data private to the caller |
| 299 | * |
| 300 | * Allocate an ID between @min and @max. The @private pointer is stored |
| 301 | * internally and can be retrieved with ioasid_find(). |
| 302 | * |
| 303 | * Return: the allocated ID on success, or %INVALID_IOASID on failure. |
| 304 | */ |
| 305 | ioasid_t ioasid_alloc(struct ioasid_set *set, ioasid_t min, ioasid_t max, |
| 306 | void *private) |
| 307 | { |
| 308 | struct ioasid_data *data; |
Jacob Pan | e5c0bd7 | 2019-10-02 12:42:42 -0700 | [diff] [blame] | 309 | void *adata; |
Jean-Philippe Brucker | fa83433 | 2019-10-02 12:42:41 -0700 | [diff] [blame] | 310 | ioasid_t id; |
| 311 | |
| 312 | data = kzalloc(sizeof(*data), GFP_ATOMIC); |
| 313 | if (!data) |
| 314 | return INVALID_IOASID; |
| 315 | |
| 316 | data->set = set; |
| 317 | data->private = private; |
Jean-Philippe Brucker | cb4789b | 2020-11-06 16:50:47 +0100 | [diff] [blame] | 318 | refcount_set(&data->refs, 1); |
Jean-Philippe Brucker | fa83433 | 2019-10-02 12:42:41 -0700 | [diff] [blame] | 319 | |
Jacob Pan | e5c0bd7 | 2019-10-02 12:42:42 -0700 | [diff] [blame] | 320 | /* |
| 321 | * Custom allocator needs allocator data to perform platform specific |
| 322 | * operations. |
| 323 | */ |
| 324 | spin_lock(&ioasid_allocator_lock); |
| 325 | adata = active_allocator->flags & IOASID_ALLOCATOR_CUSTOM ? active_allocator->ops->pdata : data; |
| 326 | id = active_allocator->ops->alloc(min, max, adata); |
| 327 | if (id == INVALID_IOASID) { |
| 328 | pr_err("Failed ASID allocation %lu\n", active_allocator->flags); |
| 329 | goto exit_free; |
| 330 | } |
| 331 | |
| 332 | if ((active_allocator->flags & IOASID_ALLOCATOR_CUSTOM) && |
| 333 | xa_alloc(&active_allocator->xa, &id, data, XA_LIMIT(id, id), GFP_ATOMIC)) { |
| 334 | /* Custom allocator needs framework to store and track allocation results */ |
| 335 | pr_err("Failed to alloc ioasid from %d\n", id); |
| 336 | active_allocator->ops->free(id, active_allocator->ops->pdata); |
Jean-Philippe Brucker | fa83433 | 2019-10-02 12:42:41 -0700 | [diff] [blame] | 337 | goto exit_free; |
| 338 | } |
| 339 | data->id = id; |
| 340 | |
Jacob Pan | e5c0bd7 | 2019-10-02 12:42:42 -0700 | [diff] [blame] | 341 | spin_unlock(&ioasid_allocator_lock); |
Jean-Philippe Brucker | fa83433 | 2019-10-02 12:42:41 -0700 | [diff] [blame] | 342 | return id; |
| 343 | exit_free: |
Jacob Pan | e5c0bd7 | 2019-10-02 12:42:42 -0700 | [diff] [blame] | 344 | spin_unlock(&ioasid_allocator_lock); |
Jean-Philippe Brucker | fa83433 | 2019-10-02 12:42:41 -0700 | [diff] [blame] | 345 | kfree(data); |
| 346 | return INVALID_IOASID; |
| 347 | } |
| 348 | EXPORT_SYMBOL_GPL(ioasid_alloc); |
| 349 | |
| 350 | /** |
Jean-Philippe Brucker | cb4789b | 2020-11-06 16:50:47 +0100 | [diff] [blame] | 351 | * ioasid_get - obtain a reference to the IOASID |
John Garry | 30209b9 | 2022-01-28 18:44:33 +0800 | [diff] [blame] | 352 | * @ioasid: the ID to get |
Jean-Philippe Brucker | fa83433 | 2019-10-02 12:42:41 -0700 | [diff] [blame] | 353 | */ |
Jean-Philippe Brucker | cb4789b | 2020-11-06 16:50:47 +0100 | [diff] [blame] | 354 | void ioasid_get(ioasid_t ioasid) |
Jean-Philippe Brucker | fa83433 | 2019-10-02 12:42:41 -0700 | [diff] [blame] | 355 | { |
| 356 | struct ioasid_data *ioasid_data; |
| 357 | |
Jacob Pan | e5c0bd7 | 2019-10-02 12:42:42 -0700 | [diff] [blame] | 358 | spin_lock(&ioasid_allocator_lock); |
| 359 | ioasid_data = xa_load(&active_allocator->xa, ioasid); |
Jean-Philippe Brucker | cb4789b | 2020-11-06 16:50:47 +0100 | [diff] [blame] | 360 | if (ioasid_data) |
| 361 | refcount_inc(&ioasid_data->refs); |
| 362 | else |
| 363 | WARN_ON(1); |
| 364 | spin_unlock(&ioasid_allocator_lock); |
| 365 | } |
| 366 | EXPORT_SYMBOL_GPL(ioasid_get); |
| 367 | |
| 368 | /** |
| 369 | * ioasid_put - Release a reference to an ioasid |
| 370 | * @ioasid: the ID to remove |
| 371 | * |
| 372 | * Put a reference to the IOASID, free it when the number of references drops to |
| 373 | * zero. |
| 374 | * |
| 375 | * Return: %true if the IOASID was freed, %false otherwise. |
| 376 | */ |
| 377 | bool ioasid_put(ioasid_t ioasid) |
| 378 | { |
| 379 | bool free = false; |
| 380 | struct ioasid_data *ioasid_data; |
| 381 | |
| 382 | spin_lock(&ioasid_allocator_lock); |
| 383 | ioasid_data = xa_load(&active_allocator->xa, ioasid); |
Jacob Pan | e5c0bd7 | 2019-10-02 12:42:42 -0700 | [diff] [blame] | 384 | if (!ioasid_data) { |
| 385 | pr_err("Trying to free unknown IOASID %u\n", ioasid); |
| 386 | goto exit_unlock; |
| 387 | } |
Jean-Philippe Brucker | fa83433 | 2019-10-02 12:42:41 -0700 | [diff] [blame] | 388 | |
Jean-Philippe Brucker | cb4789b | 2020-11-06 16:50:47 +0100 | [diff] [blame] | 389 | free = refcount_dec_and_test(&ioasid_data->refs); |
| 390 | if (!free) |
| 391 | goto exit_unlock; |
| 392 | |
Jacob Pan | e5c0bd7 | 2019-10-02 12:42:42 -0700 | [diff] [blame] | 393 | active_allocator->ops->free(ioasid, active_allocator->ops->pdata); |
| 394 | /* Custom allocator needs additional steps to free the xa element */ |
| 395 | if (active_allocator->flags & IOASID_ALLOCATOR_CUSTOM) { |
| 396 | ioasid_data = xa_erase(&active_allocator->xa, ioasid); |
| 397 | kfree_rcu(ioasid_data, rcu); |
| 398 | } |
| 399 | |
| 400 | exit_unlock: |
| 401 | spin_unlock(&ioasid_allocator_lock); |
Jean-Philippe Brucker | cb4789b | 2020-11-06 16:50:47 +0100 | [diff] [blame] | 402 | return free; |
Jean-Philippe Brucker | fa83433 | 2019-10-02 12:42:41 -0700 | [diff] [blame] | 403 | } |
Jean-Philippe Brucker | cb4789b | 2020-11-06 16:50:47 +0100 | [diff] [blame] | 404 | EXPORT_SYMBOL_GPL(ioasid_put); |
Jean-Philippe Brucker | fa83433 | 2019-10-02 12:42:41 -0700 | [diff] [blame] | 405 | |
| 406 | /** |
| 407 | * ioasid_find - Find IOASID data |
| 408 | * @set: the IOASID set |
| 409 | * @ioasid: the IOASID to find |
| 410 | * @getter: function to call on the found object |
| 411 | * |
| 412 | * The optional getter function allows to take a reference to the found object |
| 413 | * under the rcu lock. The function can also check if the object is still valid: |
| 414 | * if @getter returns false, then the object is invalid and NULL is returned. |
| 415 | * |
| 416 | * If the IOASID exists, return the private pointer passed to ioasid_alloc. |
| 417 | * Private data can be NULL if not set. Return an error if the IOASID is not |
| 418 | * found, or if @set is not NULL and the IOASID does not belong to the set. |
| 419 | */ |
| 420 | void *ioasid_find(struct ioasid_set *set, ioasid_t ioasid, |
| 421 | bool (*getter)(void *)) |
| 422 | { |
| 423 | void *priv; |
| 424 | struct ioasid_data *ioasid_data; |
Jacob Pan | e5c0bd7 | 2019-10-02 12:42:42 -0700 | [diff] [blame] | 425 | struct ioasid_allocator_data *idata; |
Jean-Philippe Brucker | fa83433 | 2019-10-02 12:42:41 -0700 | [diff] [blame] | 426 | |
| 427 | rcu_read_lock(); |
Jacob Pan | e5c0bd7 | 2019-10-02 12:42:42 -0700 | [diff] [blame] | 428 | idata = rcu_dereference(active_allocator); |
| 429 | ioasid_data = xa_load(&idata->xa, ioasid); |
Jean-Philippe Brucker | fa83433 | 2019-10-02 12:42:41 -0700 | [diff] [blame] | 430 | if (!ioasid_data) { |
| 431 | priv = ERR_PTR(-ENOENT); |
| 432 | goto unlock; |
| 433 | } |
| 434 | if (set && ioasid_data->set != set) { |
| 435 | /* data found but does not belong to the set */ |
| 436 | priv = ERR_PTR(-EACCES); |
| 437 | goto unlock; |
| 438 | } |
| 439 | /* Now IOASID and its set is verified, we can return the private data */ |
| 440 | priv = rcu_dereference(ioasid_data->private); |
| 441 | if (getter && !getter(priv)) |
| 442 | priv = NULL; |
| 443 | unlock: |
| 444 | rcu_read_unlock(); |
| 445 | |
| 446 | return priv; |
| 447 | } |
| 448 | EXPORT_SYMBOL_GPL(ioasid_find); |
| 449 | |
| 450 | MODULE_AUTHOR("Jean-Philippe Brucker <jean-philippe.brucker@arm.com>"); |
| 451 | MODULE_AUTHOR("Jacob Pan <jacob.jun.pan@linux.intel.com>"); |
| 452 | MODULE_DESCRIPTION("IO Address Space ID (IOASID) allocator"); |
| 453 | MODULE_LICENSE("GPL"); |