Alex Williamson | ec53500 | 2013-10-30 11:02:17 -0600 | [diff] [blame] | 1 | /* |
| 2 | * VFIO-KVM bridge pseudo device |
| 3 | * |
| 4 | * Copyright (C) 2013 Red Hat, Inc. All rights reserved. |
| 5 | * Author: Alex Williamson <alex.williamson@redhat.com> |
| 6 | * |
| 7 | * This program is free software; you can redistribute it and/or modify |
| 8 | * it under the terms of the GNU General Public License version 2 as |
| 9 | * published by the Free Software Foundation. |
| 10 | */ |
| 11 | |
| 12 | #include <linux/errno.h> |
| 13 | #include <linux/file.h> |
| 14 | #include <linux/kvm_host.h> |
| 15 | #include <linux/list.h> |
| 16 | #include <linux/module.h> |
| 17 | #include <linux/mutex.h> |
| 18 | #include <linux/slab.h> |
| 19 | #include <linux/uaccess.h> |
| 20 | #include <linux/vfio.h> |
Paolo Bonzini | 3c3c29f | 2014-09-24 13:02:46 +0200 | [diff] [blame] | 21 | #include "vfio.h" |
Alex Williamson | ec53500 | 2013-10-30 11:02:17 -0600 | [diff] [blame] | 22 | |
Alexey Kardashevskiy | 121f80b | 2017-03-22 15:21:56 +1100 | [diff] [blame] | 23 | #ifdef CONFIG_SPAPR_TCE_IOMMU |
| 24 | #include <asm/kvm_ppc.h> |
| 25 | #endif |
| 26 | |
Alex Williamson | ec53500 | 2013-10-30 11:02:17 -0600 | [diff] [blame] | 27 | struct kvm_vfio_group { |
| 28 | struct list_head node; |
| 29 | struct vfio_group *vfio_group; |
| 30 | }; |
| 31 | |
| 32 | struct kvm_vfio { |
| 33 | struct list_head group_list; |
| 34 | struct mutex lock; |
Alex Williamson | e0f0bbc | 2013-10-30 11:02:30 -0600 | [diff] [blame] | 35 | bool noncoherent; |
Alex Williamson | ec53500 | 2013-10-30 11:02:17 -0600 | [diff] [blame] | 36 | }; |
| 37 | |
| 38 | static struct vfio_group *kvm_vfio_group_get_external_user(struct file *filep) |
| 39 | { |
| 40 | struct vfio_group *vfio_group; |
| 41 | struct vfio_group *(*fn)(struct file *); |
| 42 | |
| 43 | fn = symbol_get(vfio_group_get_external_user); |
| 44 | if (!fn) |
| 45 | return ERR_PTR(-EINVAL); |
| 46 | |
| 47 | vfio_group = fn(filep); |
| 48 | |
| 49 | symbol_put(vfio_group_get_external_user); |
| 50 | |
| 51 | return vfio_group; |
| 52 | } |
| 53 | |
| 54 | static void kvm_vfio_group_put_external_user(struct vfio_group *vfio_group) |
| 55 | { |
| 56 | void (*fn)(struct vfio_group *); |
| 57 | |
| 58 | fn = symbol_get(vfio_group_put_external_user); |
| 59 | if (!fn) |
| 60 | return; |
| 61 | |
| 62 | fn(vfio_group); |
| 63 | |
| 64 | symbol_put(vfio_group_put_external_user); |
| 65 | } |
| 66 | |
Jike Song | 2fc1bec | 2016-12-01 13:20:07 +0800 | [diff] [blame] | 67 | static void kvm_vfio_group_set_kvm(struct vfio_group *group, struct kvm *kvm) |
| 68 | { |
| 69 | void (*fn)(struct vfio_group *, struct kvm *); |
| 70 | |
| 71 | fn = symbol_get(vfio_group_set_kvm); |
| 72 | if (!fn) |
| 73 | return; |
| 74 | |
| 75 | fn(group, kvm); |
| 76 | |
| 77 | symbol_put(vfio_group_set_kvm); |
| 78 | } |
| 79 | |
Alex Williamson | 9d830d4 | 2014-02-26 11:38:40 -0700 | [diff] [blame] | 80 | static bool kvm_vfio_group_is_coherent(struct vfio_group *vfio_group) |
| 81 | { |
| 82 | long (*fn)(struct vfio_group *, unsigned long); |
| 83 | long ret; |
| 84 | |
| 85 | fn = symbol_get(vfio_external_check_extension); |
| 86 | if (!fn) |
| 87 | return false; |
| 88 | |
| 89 | ret = fn(vfio_group, VFIO_DMA_CC_IOMMU); |
| 90 | |
| 91 | symbol_put(vfio_external_check_extension); |
| 92 | |
| 93 | return ret > 0; |
| 94 | } |
| 95 | |
Alexey Kardashevskiy | 121f80b | 2017-03-22 15:21:56 +1100 | [diff] [blame] | 96 | #ifdef CONFIG_SPAPR_TCE_IOMMU |
| 97 | static int kvm_vfio_external_user_iommu_id(struct vfio_group *vfio_group) |
| 98 | { |
| 99 | int (*fn)(struct vfio_group *); |
| 100 | int ret = -EINVAL; |
| 101 | |
| 102 | fn = symbol_get(vfio_external_user_iommu_id); |
| 103 | if (!fn) |
| 104 | return ret; |
| 105 | |
| 106 | ret = fn(vfio_group); |
| 107 | |
| 108 | symbol_put(vfio_external_user_iommu_id); |
| 109 | |
| 110 | return ret; |
| 111 | } |
| 112 | |
| 113 | static struct iommu_group *kvm_vfio_group_get_iommu_group( |
| 114 | struct vfio_group *group) |
| 115 | { |
| 116 | int group_id = kvm_vfio_external_user_iommu_id(group); |
| 117 | |
| 118 | if (group_id < 0) |
| 119 | return NULL; |
| 120 | |
| 121 | return iommu_group_get_by_id(group_id); |
| 122 | } |
| 123 | |
| 124 | static void kvm_spapr_tce_release_vfio_group(struct kvm *kvm, |
| 125 | struct vfio_group *vfio_group) |
| 126 | { |
| 127 | struct iommu_group *grp = kvm_vfio_group_get_iommu_group(vfio_group); |
| 128 | |
| 129 | if (WARN_ON_ONCE(!grp)) |
| 130 | return; |
| 131 | |
| 132 | kvm_spapr_tce_release_iommu_group(kvm, grp); |
| 133 | iommu_group_put(grp); |
| 134 | } |
| 135 | #endif |
| 136 | |
Alex Williamson | e0f0bbc | 2013-10-30 11:02:30 -0600 | [diff] [blame] | 137 | /* |
| 138 | * Groups can use the same or different IOMMU domains. If the same then |
| 139 | * adding a new group may change the coherency of groups we've previously |
| 140 | * been told about. We don't want to care about any of that so we retest |
| 141 | * each group and bail as soon as we find one that's noncoherent. This |
| 142 | * means we only ever [un]register_noncoherent_dma once for the whole device. |
| 143 | */ |
| 144 | static void kvm_vfio_update_coherency(struct kvm_device *dev) |
| 145 | { |
| 146 | struct kvm_vfio *kv = dev->private; |
| 147 | bool noncoherent = false; |
| 148 | struct kvm_vfio_group *kvg; |
| 149 | |
| 150 | mutex_lock(&kv->lock); |
| 151 | |
| 152 | list_for_each_entry(kvg, &kv->group_list, node) { |
Alex Williamson | 9d830d4 | 2014-02-26 11:38:40 -0700 | [diff] [blame] | 153 | if (!kvm_vfio_group_is_coherent(kvg->vfio_group)) { |
| 154 | noncoherent = true; |
| 155 | break; |
| 156 | } |
Alex Williamson | e0f0bbc | 2013-10-30 11:02:30 -0600 | [diff] [blame] | 157 | } |
| 158 | |
| 159 | if (noncoherent != kv->noncoherent) { |
| 160 | kv->noncoherent = noncoherent; |
| 161 | |
| 162 | if (kv->noncoherent) |
| 163 | kvm_arch_register_noncoherent_dma(dev->kvm); |
| 164 | else |
| 165 | kvm_arch_unregister_noncoherent_dma(dev->kvm); |
| 166 | } |
| 167 | |
| 168 | mutex_unlock(&kv->lock); |
| 169 | } |
| 170 | |
Alex Williamson | ec53500 | 2013-10-30 11:02:17 -0600 | [diff] [blame] | 171 | static int kvm_vfio_set_group(struct kvm_device *dev, long attr, u64 arg) |
| 172 | { |
| 173 | struct kvm_vfio *kv = dev->private; |
| 174 | struct vfio_group *vfio_group; |
| 175 | struct kvm_vfio_group *kvg; |
Paul Bolle | e81d1ad | 2014-01-10 01:28:46 +0100 | [diff] [blame] | 176 | int32_t __user *argp = (int32_t __user *)(unsigned long)arg; |
Alex Williamson | ec53500 | 2013-10-30 11:02:17 -0600 | [diff] [blame] | 177 | struct fd f; |
| 178 | int32_t fd; |
| 179 | int ret; |
| 180 | |
| 181 | switch (attr) { |
| 182 | case KVM_DEV_VFIO_GROUP_ADD: |
Paul Bolle | e81d1ad | 2014-01-10 01:28:46 +0100 | [diff] [blame] | 183 | if (get_user(fd, argp)) |
Alex Williamson | ec53500 | 2013-10-30 11:02:17 -0600 | [diff] [blame] | 184 | return -EFAULT; |
| 185 | |
| 186 | f = fdget(fd); |
| 187 | if (!f.file) |
| 188 | return -EBADF; |
| 189 | |
| 190 | vfio_group = kvm_vfio_group_get_external_user(f.file); |
| 191 | fdput(f); |
| 192 | |
| 193 | if (IS_ERR(vfio_group)) |
| 194 | return PTR_ERR(vfio_group); |
| 195 | |
| 196 | mutex_lock(&kv->lock); |
| 197 | |
| 198 | list_for_each_entry(kvg, &kv->group_list, node) { |
| 199 | if (kvg->vfio_group == vfio_group) { |
| 200 | mutex_unlock(&kv->lock); |
| 201 | kvm_vfio_group_put_external_user(vfio_group); |
| 202 | return -EEXIST; |
| 203 | } |
| 204 | } |
| 205 | |
| 206 | kvg = kzalloc(sizeof(*kvg), GFP_KERNEL); |
| 207 | if (!kvg) { |
| 208 | mutex_unlock(&kv->lock); |
| 209 | kvm_vfio_group_put_external_user(vfio_group); |
| 210 | return -ENOMEM; |
| 211 | } |
| 212 | |
| 213 | list_add_tail(&kvg->node, &kv->group_list); |
| 214 | kvg->vfio_group = vfio_group; |
| 215 | |
Paolo Bonzini | 5544eb9 | 2015-07-07 15:41:58 +0200 | [diff] [blame] | 216 | kvm_arch_start_assignment(dev->kvm); |
| 217 | |
Alex Williamson | ec53500 | 2013-10-30 11:02:17 -0600 | [diff] [blame] | 218 | mutex_unlock(&kv->lock); |
| 219 | |
Jike Song | 2fc1bec | 2016-12-01 13:20:07 +0800 | [diff] [blame] | 220 | kvm_vfio_group_set_kvm(vfio_group, dev->kvm); |
| 221 | |
Alex Williamson | e0f0bbc | 2013-10-30 11:02:30 -0600 | [diff] [blame] | 222 | kvm_vfio_update_coherency(dev); |
| 223 | |
Alex Williamson | ec53500 | 2013-10-30 11:02:17 -0600 | [diff] [blame] | 224 | return 0; |
| 225 | |
| 226 | case KVM_DEV_VFIO_GROUP_DEL: |
Paul Bolle | e81d1ad | 2014-01-10 01:28:46 +0100 | [diff] [blame] | 227 | if (get_user(fd, argp)) |
Alex Williamson | ec53500 | 2013-10-30 11:02:17 -0600 | [diff] [blame] | 228 | return -EFAULT; |
| 229 | |
| 230 | f = fdget(fd); |
| 231 | if (!f.file) |
| 232 | return -EBADF; |
| 233 | |
| 234 | vfio_group = kvm_vfio_group_get_external_user(f.file); |
| 235 | fdput(f); |
| 236 | |
| 237 | if (IS_ERR(vfio_group)) |
| 238 | return PTR_ERR(vfio_group); |
| 239 | |
| 240 | ret = -ENOENT; |
| 241 | |
| 242 | mutex_lock(&kv->lock); |
| 243 | |
| 244 | list_for_each_entry(kvg, &kv->group_list, node) { |
| 245 | if (kvg->vfio_group != vfio_group) |
| 246 | continue; |
| 247 | |
| 248 | list_del(&kvg->node); |
Alex Williamson | e323369 | 2017-06-28 13:49:52 -0600 | [diff] [blame^] | 249 | kvm_arch_end_assignment(dev->kvm); |
| 250 | #ifdef CONFIG_SPAPR_TCE_IOMMU |
| 251 | kvm_spapr_tce_release_vfio_group(dev->kvm, |
| 252 | kvg->vfio_group); |
| 253 | #endif |
| 254 | kvm_vfio_group_set_kvm(kvg->vfio_group, NULL); |
Alex Williamson | ec53500 | 2013-10-30 11:02:17 -0600 | [diff] [blame] | 255 | kvm_vfio_group_put_external_user(kvg->vfio_group); |
| 256 | kfree(kvg); |
| 257 | ret = 0; |
| 258 | break; |
| 259 | } |
| 260 | |
| 261 | mutex_unlock(&kv->lock); |
| 262 | |
| 263 | kvm_vfio_group_put_external_user(vfio_group); |
| 264 | |
Alex Williamson | e0f0bbc | 2013-10-30 11:02:30 -0600 | [diff] [blame] | 265 | kvm_vfio_update_coherency(dev); |
| 266 | |
Alex Williamson | ec53500 | 2013-10-30 11:02:17 -0600 | [diff] [blame] | 267 | return ret; |
Alexey Kardashevskiy | 121f80b | 2017-03-22 15:21:56 +1100 | [diff] [blame] | 268 | |
| 269 | #ifdef CONFIG_SPAPR_TCE_IOMMU |
| 270 | case KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE: { |
| 271 | struct kvm_vfio_spapr_tce param; |
| 272 | struct kvm_vfio *kv = dev->private; |
| 273 | struct vfio_group *vfio_group; |
| 274 | struct kvm_vfio_group *kvg; |
| 275 | struct fd f; |
| 276 | struct iommu_group *grp; |
| 277 | |
| 278 | if (copy_from_user(¶m, (void __user *)arg, |
| 279 | sizeof(struct kvm_vfio_spapr_tce))) |
| 280 | return -EFAULT; |
| 281 | |
| 282 | f = fdget(param.groupfd); |
| 283 | if (!f.file) |
| 284 | return -EBADF; |
| 285 | |
| 286 | vfio_group = kvm_vfio_group_get_external_user(f.file); |
| 287 | fdput(f); |
| 288 | |
| 289 | if (IS_ERR(vfio_group)) |
| 290 | return PTR_ERR(vfio_group); |
| 291 | |
| 292 | grp = kvm_vfio_group_get_iommu_group(vfio_group); |
| 293 | if (WARN_ON_ONCE(!grp)) { |
| 294 | kvm_vfio_group_put_external_user(vfio_group); |
| 295 | return -EIO; |
| 296 | } |
| 297 | |
| 298 | ret = -ENOENT; |
| 299 | |
| 300 | mutex_lock(&kv->lock); |
| 301 | |
| 302 | list_for_each_entry(kvg, &kv->group_list, node) { |
| 303 | if (kvg->vfio_group != vfio_group) |
| 304 | continue; |
| 305 | |
| 306 | ret = kvm_spapr_tce_attach_iommu_group(dev->kvm, |
| 307 | param.tablefd, grp); |
| 308 | break; |
| 309 | } |
| 310 | |
| 311 | mutex_unlock(&kv->lock); |
| 312 | |
| 313 | iommu_group_put(grp); |
| 314 | kvm_vfio_group_put_external_user(vfio_group); |
| 315 | |
| 316 | return ret; |
| 317 | } |
| 318 | #endif /* CONFIG_SPAPR_TCE_IOMMU */ |
Alex Williamson | ec53500 | 2013-10-30 11:02:17 -0600 | [diff] [blame] | 319 | } |
| 320 | |
| 321 | return -ENXIO; |
| 322 | } |
| 323 | |
| 324 | static int kvm_vfio_set_attr(struct kvm_device *dev, |
| 325 | struct kvm_device_attr *attr) |
| 326 | { |
| 327 | switch (attr->group) { |
| 328 | case KVM_DEV_VFIO_GROUP: |
| 329 | return kvm_vfio_set_group(dev, attr->attr, attr->addr); |
| 330 | } |
| 331 | |
| 332 | return -ENXIO; |
| 333 | } |
| 334 | |
| 335 | static int kvm_vfio_has_attr(struct kvm_device *dev, |
| 336 | struct kvm_device_attr *attr) |
| 337 | { |
| 338 | switch (attr->group) { |
| 339 | case KVM_DEV_VFIO_GROUP: |
| 340 | switch (attr->attr) { |
| 341 | case KVM_DEV_VFIO_GROUP_ADD: |
| 342 | case KVM_DEV_VFIO_GROUP_DEL: |
Alexey Kardashevskiy | 121f80b | 2017-03-22 15:21:56 +1100 | [diff] [blame] | 343 | #ifdef CONFIG_SPAPR_TCE_IOMMU |
| 344 | case KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE: |
| 345 | #endif |
Alex Williamson | ec53500 | 2013-10-30 11:02:17 -0600 | [diff] [blame] | 346 | return 0; |
| 347 | } |
| 348 | |
| 349 | break; |
| 350 | } |
| 351 | |
| 352 | return -ENXIO; |
| 353 | } |
| 354 | |
| 355 | static void kvm_vfio_destroy(struct kvm_device *dev) |
| 356 | { |
| 357 | struct kvm_vfio *kv = dev->private; |
| 358 | struct kvm_vfio_group *kvg, *tmp; |
| 359 | |
| 360 | list_for_each_entry_safe(kvg, tmp, &kv->group_list, node) { |
Alexey Kardashevskiy | 121f80b | 2017-03-22 15:21:56 +1100 | [diff] [blame] | 361 | #ifdef CONFIG_SPAPR_TCE_IOMMU |
| 362 | kvm_spapr_tce_release_vfio_group(dev->kvm, kvg->vfio_group); |
| 363 | #endif |
Jike Song | 2fc1bec | 2016-12-01 13:20:07 +0800 | [diff] [blame] | 364 | kvm_vfio_group_set_kvm(kvg->vfio_group, NULL); |
Alex Williamson | ec53500 | 2013-10-30 11:02:17 -0600 | [diff] [blame] | 365 | kvm_vfio_group_put_external_user(kvg->vfio_group); |
| 366 | list_del(&kvg->node); |
| 367 | kfree(kvg); |
Paolo Bonzini | 5544eb9 | 2015-07-07 15:41:58 +0200 | [diff] [blame] | 368 | kvm_arch_end_assignment(dev->kvm); |
Alex Williamson | ec53500 | 2013-10-30 11:02:17 -0600 | [diff] [blame] | 369 | } |
| 370 | |
Alex Williamson | e0f0bbc | 2013-10-30 11:02:30 -0600 | [diff] [blame] | 371 | kvm_vfio_update_coherency(dev); |
| 372 | |
Alex Williamson | ec53500 | 2013-10-30 11:02:17 -0600 | [diff] [blame] | 373 | kfree(kv); |
| 374 | kfree(dev); /* alloc by kvm_ioctl_create_device, free by .destroy */ |
| 375 | } |
| 376 | |
Will Deacon | 80ce163 | 2014-09-02 10:27:36 +0100 | [diff] [blame] | 377 | static int kvm_vfio_create(struct kvm_device *dev, u32 type); |
| 378 | |
| 379 | static struct kvm_device_ops kvm_vfio_ops = { |
| 380 | .name = "kvm-vfio", |
| 381 | .create = kvm_vfio_create, |
| 382 | .destroy = kvm_vfio_destroy, |
| 383 | .set_attr = kvm_vfio_set_attr, |
| 384 | .has_attr = kvm_vfio_has_attr, |
| 385 | }; |
| 386 | |
Alex Williamson | ec53500 | 2013-10-30 11:02:17 -0600 | [diff] [blame] | 387 | static int kvm_vfio_create(struct kvm_device *dev, u32 type) |
| 388 | { |
| 389 | struct kvm_device *tmp; |
| 390 | struct kvm_vfio *kv; |
| 391 | |
| 392 | /* Only one VFIO "device" per VM */ |
| 393 | list_for_each_entry(tmp, &dev->kvm->devices, vm_node) |
| 394 | if (tmp->ops == &kvm_vfio_ops) |
| 395 | return -EBUSY; |
| 396 | |
| 397 | kv = kzalloc(sizeof(*kv), GFP_KERNEL); |
| 398 | if (!kv) |
| 399 | return -ENOMEM; |
| 400 | |
| 401 | INIT_LIST_HEAD(&kv->group_list); |
| 402 | mutex_init(&kv->lock); |
| 403 | |
| 404 | dev->private = kv; |
| 405 | |
| 406 | return 0; |
| 407 | } |
| 408 | |
Paolo Bonzini | 3c3c29f | 2014-09-24 13:02:46 +0200 | [diff] [blame] | 409 | int kvm_vfio_ops_init(void) |
Will Deacon | 80ce163 | 2014-09-02 10:27:36 +0100 | [diff] [blame] | 410 | { |
| 411 | return kvm_register_device_ops(&kvm_vfio_ops, KVM_DEV_TYPE_VFIO); |
| 412 | } |
Wanpeng Li | 571ee1b | 2014-10-09 18:30:08 +0800 | [diff] [blame] | 413 | |
| 414 | void kvm_vfio_ops_exit(void) |
| 415 | { |
| 416 | kvm_unregister_device_ops(KVM_DEV_TYPE_VFIO); |
| 417 | } |