blob: 6a67bea4019c000af722bf81369c0793b66b3386 [file] [log] [blame]
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +03001/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Copyright IBM Corporation, 2008
Avi Kivity221d0592010-05-23 18:37:00 +030019 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
20 *
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +030021 * Author: Allen M. Kay <allen.m.kay@intel.com>
22 * Author: Weidong Han <weidong.han@intel.com>
23 * Author: Ben-Ami Yassour <benami@il.ibm.com>
24 */
25
26#include <linux/list.h>
27#include <linux/kvm_host.h>
Paul Gortmaker51441d42011-07-27 21:25:05 -040028#include <linux/module.h>
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +030029#include <linux/pci.h>
Paul Gortmaker799fd8b2011-07-27 21:17:59 -040030#include <linux/stat.h>
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +030031#include <linux/dmar.h>
Joerg Roedel19de40a2008-12-03 14:43:34 +010032#include <linux/iommu.h>
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +030033#include <linux/intel-iommu.h>
34
Rusty Russell90ab5ee2012-01-13 09:32:20 +103035static bool allow_unsafe_assigned_interrupts;
Alex Williamson3f68b032011-07-14 13:27:03 -060036module_param_named(allow_unsafe_assigned_interrupts,
37 allow_unsafe_assigned_interrupts, bool, S_IRUGO | S_IWUSR);
38MODULE_PARM_DESC(allow_unsafe_assigned_interrupts,
39 "Enable device assignment on platforms without interrupt remapping support.");
40
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +030041static int kvm_iommu_unmap_memslots(struct kvm *kvm);
42static void kvm_iommu_put_pages(struct kvm *kvm,
43 gfn_t base_gfn, unsigned long npages);
44
Xiao Guangrongd5661042012-07-17 21:56:16 +080045static pfn_t kvm_pin_pages(struct kvm_memory_slot *slot, gfn_t gfn,
46 unsigned long size)
Joerg Roedelfcd95802010-01-11 16:38:18 +010047{
48 gfn_t end_gfn;
49 pfn_t pfn;
50
Xiao Guangrongd5661042012-07-17 21:56:16 +080051 pfn = gfn_to_pfn_memslot(slot, gfn);
Joerg Roedelfcd95802010-01-11 16:38:18 +010052 end_gfn = gfn + (size >> PAGE_SHIFT);
53 gfn += 1;
54
55 if (is_error_pfn(pfn))
56 return pfn;
57
58 while (gfn < end_gfn)
Xiao Guangrongd5661042012-07-17 21:56:16 +080059 gfn_to_pfn_memslot(slot, gfn++);
Joerg Roedelfcd95802010-01-11 16:38:18 +010060
61 return pfn;
62}
63
Marcelo Tosatti3ad26d82009-12-23 14:35:20 -020064int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +030065{
Joerg Roedelfcd95802010-01-11 16:38:18 +010066 gfn_t gfn, end_gfn;
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +030067 pfn_t pfn;
Joerg Roedelfcd95802010-01-11 16:38:18 +010068 int r = 0;
Joerg Roedel19de40a2008-12-03 14:43:34 +010069 struct iommu_domain *domain = kvm->arch.iommu_domain;
Sheng Yang522c68c2009-04-27 20:35:43 +080070 int flags;
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +030071
72 /* check if iommu exists and in use */
73 if (!domain)
74 return 0;
75
Joerg Roedelfcd95802010-01-11 16:38:18 +010076 gfn = slot->base_gfn;
77 end_gfn = gfn + slot->npages;
78
Sheng Yang522c68c2009-04-27 20:35:43 +080079 flags = IOMMU_READ | IOMMU_WRITE;
80 if (kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY)
81 flags |= IOMMU_CACHE;
82
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +030083
Joerg Roedelfcd95802010-01-11 16:38:18 +010084 while (gfn < end_gfn) {
85 unsigned long page_size;
86
87 /* Check if already mapped */
88 if (iommu_iova_to_phys(domain, gfn_to_gpa(gfn))) {
89 gfn += 1;
90 continue;
91 }
92
93 /* Get the page size we could use to map */
94 page_size = kvm_host_page_size(kvm, gfn);
95
96 /* Make sure the page_size does not exceed the memslot */
97 while ((gfn + (page_size >> PAGE_SHIFT)) > end_gfn)
98 page_size >>= 1;
99
100 /* Make sure gfn is aligned to the page size we want to map */
101 while ((gfn << PAGE_SHIFT) & (page_size - 1))
102 page_size >>= 1;
103
104 /*
105 * Pin all pages we are about to map in memory. This is
106 * important because we unmap and unpin in 4kb steps later.
107 */
Xiao Guangrongd5661042012-07-17 21:56:16 +0800108 pfn = kvm_pin_pages(slot, gfn, page_size);
Joerg Roedelfcd95802010-01-11 16:38:18 +0100109 if (is_error_pfn(pfn)) {
Xiao Guangrong16b854c2012-08-03 15:36:52 +0800110 kvm_release_pfn_clean(pfn);
Joerg Roedelfcd95802010-01-11 16:38:18 +0100111 gfn += 1;
112 continue;
113 }
114
115 /* Map into IO address space */
116 r = iommu_map(domain, gfn_to_gpa(gfn), pfn_to_hpa(pfn),
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200117 page_size, flags);
Weidong Hane5fcfc82008-09-25 23:32:02 +0800118 if (r) {
Weidong Han260782b2008-12-02 21:03:39 +0800119 printk(KERN_ERR "kvm_iommu_map_address:"
Joerg Roedel5689cc52010-07-01 16:00:12 +0200120 "iommu failed to map pfn=%llx\n", pfn);
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +0300121 goto unmap_pages;
122 }
Joerg Roedelfcd95802010-01-11 16:38:18 +0100123
124 gfn += page_size >> PAGE_SHIFT;
125
126
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +0300127 }
Joerg Roedelfcd95802010-01-11 16:38:18 +0100128
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +0300129 return 0;
130
131unmap_pages:
Joerg Roedelfcd95802010-01-11 16:38:18 +0100132 kvm_iommu_put_pages(kvm, slot->base_gfn, gfn);
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +0300133 return r;
134}
135
136static int kvm_iommu_map_memslots(struct kvm *kvm)
137{
Xiao Guangrongbe6ba0f2011-11-24 17:39:18 +0800138 int idx, r = 0;
Marcelo Tosatti46a26bf2009-12-23 14:35:16 -0200139 struct kvm_memslots *slots;
Xiao Guangrongbe6ba0f2011-11-24 17:39:18 +0800140 struct kvm_memory_slot *memslot;
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +0300141
Sheng Yang95c87e22010-07-01 15:00:50 +0800142 idx = srcu_read_lock(&kvm->srcu);
Lai Jiangshan90d83dc2010-04-19 17:41:23 +0800143 slots = kvm_memslots(kvm);
Marcelo Tosatti46a26bf2009-12-23 14:35:16 -0200144
Xiao Guangrongbe6ba0f2011-11-24 17:39:18 +0800145 kvm_for_each_memslot(memslot, slots) {
146 r = kvm_iommu_map_pages(kvm, memslot);
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +0300147 if (r)
148 break;
149 }
Sheng Yang95c87e22010-07-01 15:00:50 +0800150 srcu_read_unlock(&kvm->srcu, idx);
Mark McLoughlin682edb42009-02-05 18:23:46 +0000151
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +0300152 return r;
153}
154
Weidong Han260782b2008-12-02 21:03:39 +0800155int kvm_assign_device(struct kvm *kvm,
156 struct kvm_assigned_dev_kernel *assigned_dev)
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +0300157{
158 struct pci_dev *pdev = NULL;
Joerg Roedel19de40a2008-12-03 14:43:34 +0100159 struct iommu_domain *domain = kvm->arch.iommu_domain;
Sheng Yang522c68c2009-04-27 20:35:43 +0800160 int r, last_flags;
Weidong Han260782b2008-12-02 21:03:39 +0800161
162 /* check if iommu exists and in use */
163 if (!domain)
164 return 0;
165
166 pdev = assigned_dev->dev;
167 if (pdev == NULL)
168 return -ENODEV;
169
Joerg Roedel19de40a2008-12-03 14:43:34 +0100170 r = iommu_attach_device(domain, &pdev->dev);
Weidong Han260782b2008-12-02 21:03:39 +0800171 if (r) {
Zhai, Edwinab9f4ec2010-01-29 14:38:44 +0800172 printk(KERN_ERR "assign device %x:%x:%x.%x failed",
173 pci_domain_nr(pdev->bus),
Weidong Han260782b2008-12-02 21:03:39 +0800174 pdev->bus->number,
175 PCI_SLOT(pdev->devfn),
176 PCI_FUNC(pdev->devfn));
177 return r;
178 }
179
Sheng Yang522c68c2009-04-27 20:35:43 +0800180 last_flags = kvm->arch.iommu_flags;
181 if (iommu_domain_has_cap(kvm->arch.iommu_domain,
182 IOMMU_CAP_CACHE_COHERENCY))
183 kvm->arch.iommu_flags |= KVM_IOMMU_CACHE_COHERENCY;
184
185 /* Check if need to update IOMMU page table for guest memory */
186 if ((last_flags ^ kvm->arch.iommu_flags) ==
187 KVM_IOMMU_CACHE_COHERENCY) {
188 kvm_iommu_unmap_memslots(kvm);
189 r = kvm_iommu_map_memslots(kvm);
190 if (r)
191 goto out_unmap;
192 }
193
Greg Rose67778292011-07-22 05:46:07 +0000194 pdev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED;
195
Zhai, Edwinab9f4ec2010-01-29 14:38:44 +0800196 printk(KERN_DEBUG "assign device %x:%x:%x.%x\n",
197 assigned_dev->host_segnr,
Weidong Han260782b2008-12-02 21:03:39 +0800198 assigned_dev->host_busnr,
199 PCI_SLOT(assigned_dev->host_devfn),
200 PCI_FUNC(assigned_dev->host_devfn));
201
202 return 0;
Sheng Yang522c68c2009-04-27 20:35:43 +0800203out_unmap:
204 kvm_iommu_unmap_memslots(kvm);
205 return r;
Weidong Han260782b2008-12-02 21:03:39 +0800206}
207
Weidong Han0a920352008-12-02 21:24:23 +0800208int kvm_deassign_device(struct kvm *kvm,
209 struct kvm_assigned_dev_kernel *assigned_dev)
210{
Joerg Roedel19de40a2008-12-03 14:43:34 +0100211 struct iommu_domain *domain = kvm->arch.iommu_domain;
Weidong Han0a920352008-12-02 21:24:23 +0800212 struct pci_dev *pdev = NULL;
213
214 /* check if iommu exists and in use */
215 if (!domain)
216 return 0;
217
218 pdev = assigned_dev->dev;
219 if (pdev == NULL)
220 return -ENODEV;
221
Joerg Roedel19de40a2008-12-03 14:43:34 +0100222 iommu_detach_device(domain, &pdev->dev);
Weidong Han0a920352008-12-02 21:24:23 +0800223
Greg Rose67778292011-07-22 05:46:07 +0000224 pdev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED;
225
Zhai, Edwinab9f4ec2010-01-29 14:38:44 +0800226 printk(KERN_DEBUG "deassign device %x:%x:%x.%x\n",
227 assigned_dev->host_segnr,
Weidong Han0a920352008-12-02 21:24:23 +0800228 assigned_dev->host_busnr,
229 PCI_SLOT(assigned_dev->host_devfn),
230 PCI_FUNC(assigned_dev->host_devfn));
231
232 return 0;
233}
234
Weidong Han260782b2008-12-02 21:03:39 +0800235int kvm_iommu_map_guest(struct kvm *kvm)
236{
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +0300237 int r;
238
Joerg Roedela1b60c12011-09-06 18:46:34 +0200239 if (!iommu_present(&pci_bus_type)) {
Joerg Roedel19de40a2008-12-03 14:43:34 +0100240 printk(KERN_ERR "%s: iommu not found\n", __func__);
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +0300241 return -ENODEV;
242 }
243
Alex Williamson21a14162012-04-17 21:46:44 -0600244 mutex_lock(&kvm->slots_lock);
245
Joerg Roedel905d66c2011-09-06 16:03:26 +0200246 kvm->arch.iommu_domain = iommu_domain_alloc(&pci_bus_type);
Alex Williamson21a14162012-04-17 21:46:44 -0600247 if (!kvm->arch.iommu_domain) {
248 r = -ENOMEM;
249 goto out_unlock;
250 }
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +0300251
Alex Williamson3f68b032011-07-14 13:27:03 -0600252 if (!allow_unsafe_assigned_interrupts &&
253 !iommu_domain_has_cap(kvm->arch.iommu_domain,
254 IOMMU_CAP_INTR_REMAP)) {
255 printk(KERN_WARNING "%s: No interrupt remapping support,"
256 " disallowing device assignment."
257 " Re-enble with \"allow_unsafe_assigned_interrupts=1\""
258 " module option.\n", __func__);
259 iommu_domain_free(kvm->arch.iommu_domain);
260 kvm->arch.iommu_domain = NULL;
Alex Williamson21a14162012-04-17 21:46:44 -0600261 r = -EPERM;
262 goto out_unlock;
Alex Williamson3f68b032011-07-14 13:27:03 -0600263 }
264
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +0300265 r = kvm_iommu_map_memslots(kvm);
266 if (r)
Alex Williamson21a14162012-04-17 21:46:44 -0600267 kvm_iommu_unmap_memslots(kvm);
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +0300268
Alex Williamson21a14162012-04-17 21:46:44 -0600269out_unlock:
270 mutex_unlock(&kvm->slots_lock);
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +0300271 return r;
272}
273
Joerg Roedelfcd95802010-01-11 16:38:18 +0100274static void kvm_unpin_pages(struct kvm *kvm, pfn_t pfn, unsigned long npages)
275{
276 unsigned long i;
277
278 for (i = 0; i < npages; ++i)
279 kvm_release_pfn_clean(pfn + i);
280}
281
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +0300282static void kvm_iommu_put_pages(struct kvm *kvm,
Weidong Han260782b2008-12-02 21:03:39 +0800283 gfn_t base_gfn, unsigned long npages)
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +0300284{
Joerg Roedelfcd95802010-01-11 16:38:18 +0100285 struct iommu_domain *domain;
286 gfn_t end_gfn, gfn;
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +0300287 pfn_t pfn;
Weidong Han260782b2008-12-02 21:03:39 +0800288 u64 phys;
289
Joerg Roedelfcd95802010-01-11 16:38:18 +0100290 domain = kvm->arch.iommu_domain;
291 end_gfn = base_gfn + npages;
292 gfn = base_gfn;
293
Weidong Han260782b2008-12-02 21:03:39 +0800294 /* check if iommu exists and in use */
295 if (!domain)
296 return;
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +0300297
Joerg Roedelfcd95802010-01-11 16:38:18 +0100298 while (gfn < end_gfn) {
299 unsigned long unmap_pages;
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200300 size_t size;
Weidong Han260782b2008-12-02 21:03:39 +0800301
Joerg Roedelfcd95802010-01-11 16:38:18 +0100302 /* Get physical address */
303 phys = iommu_iova_to_phys(domain, gfn_to_gpa(gfn));
Xiao Guangrong16b854c2012-08-03 15:36:52 +0800304
305 if (!phys) {
306 gfn++;
307 continue;
308 }
309
Joerg Roedelfcd95802010-01-11 16:38:18 +0100310 pfn = phys >> PAGE_SHIFT;
311
312 /* Unmap address from IO address space */
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200313 size = iommu_unmap(domain, gfn_to_gpa(gfn), PAGE_SIZE);
314 unmap_pages = 1ULL << get_order(size);
Joerg Roedelfcd95802010-01-11 16:38:18 +0100315
316 /* Unpin all pages we just unmapped to not leak any memory */
317 kvm_unpin_pages(kvm, pfn, unmap_pages);
318
319 gfn += unmap_pages;
320 }
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +0300321}
322
Alex Williamson32f6daa2012-04-11 09:51:49 -0600323void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
324{
325 kvm_iommu_put_pages(kvm, slot->base_gfn, slot->npages);
326}
327
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +0300328static int kvm_iommu_unmap_memslots(struct kvm *kvm)
329{
Xiao Guangrongbe6ba0f2011-11-24 17:39:18 +0800330 int idx;
Marcelo Tosatti46a26bf2009-12-23 14:35:16 -0200331 struct kvm_memslots *slots;
Xiao Guangrongbe6ba0f2011-11-24 17:39:18 +0800332 struct kvm_memory_slot *memslot;
Mark McLoughlin682edb42009-02-05 18:23:46 +0000333
Sheng Yang95c87e22010-07-01 15:00:50 +0800334 idx = srcu_read_lock(&kvm->srcu);
Lai Jiangshan90d83dc2010-04-19 17:41:23 +0800335 slots = kvm_memslots(kvm);
Marcelo Tosatti46a26bf2009-12-23 14:35:16 -0200336
Xiao Guangrongbe6ba0f2011-11-24 17:39:18 +0800337 kvm_for_each_memslot(memslot, slots)
Alex Williamson32f6daa2012-04-11 09:51:49 -0600338 kvm_iommu_unmap_pages(kvm, memslot);
Xiao Guangrongbe6ba0f2011-11-24 17:39:18 +0800339
Sheng Yang95c87e22010-07-01 15:00:50 +0800340 srcu_read_unlock(&kvm->srcu, idx);
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +0300341
342 return 0;
343}
344
345int kvm_iommu_unmap_guest(struct kvm *kvm)
346{
Joerg Roedel19de40a2008-12-03 14:43:34 +0100347 struct iommu_domain *domain = kvm->arch.iommu_domain;
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +0300348
349 /* check if iommu exists and in use */
350 if (!domain)
351 return 0;
352
Alex Williamson21a14162012-04-17 21:46:44 -0600353 mutex_lock(&kvm->slots_lock);
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +0300354 kvm_iommu_unmap_memslots(kvm);
Alex Williamson21a14162012-04-17 21:46:44 -0600355 kvm->arch.iommu_domain = NULL;
356 mutex_unlock(&kvm->slots_lock);
357
Joerg Roedel19de40a2008-12-03 14:43:34 +0100358 iommu_domain_free(domain);
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +0300359 return 0;
360}