blob: fc7d78876e021b568bf2178deac90c3956735a78 [file] [log] [blame]
Thomas Gleixner2025cf92019-05-29 07:18:02 -07001// SPDX-License-Identifier: GPL-2.0-only
David Woodhouse8a94ade2015-03-24 14:54:56 +00002/*
3 * Copyright © 2015 Intel Corporation.
4 *
David Woodhouse8a94ade2015-03-24 14:54:56 +00005 * Authors: David Woodhouse <dwmw2@infradead.org>
6 */
7
8#include <linux/intel-iommu.h>
David Woodhouse2f26e0a2015-09-09 11:40:47 +01009#include <linux/mmu_notifier.h>
10#include <linux/sched.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +010011#include <linux/sched/mm.h>
David Woodhouse2f26e0a2015-09-09 11:40:47 +010012#include <linux/slab.h>
13#include <linux/intel-svm.h>
14#include <linux/rculist.h>
15#include <linux/pci.h>
16#include <linux/pci-ats.h>
David Woodhousea222a7f2015-10-07 23:35:18 +010017#include <linux/dmar.h>
18#include <linux/interrupt.h>
Souptick Joarder50a7ca32018-08-17 15:44:47 -070019#include <linux/mm_types.h>
Jacob Pan59a62332020-01-02 08:18:08 +080020#include <linux/ioasid.h>
Ashok Raj9d8c3af2017-08-08 13:29:27 -070021#include <asm/page.h>
David Woodhousea222a7f2015-10-07 23:35:18 +010022
Lu Baoluaf395072018-07-14 15:46:56 +080023#include "intel-pasid.h"
24
David Woodhousea222a7f2015-10-07 23:35:18 +010025static irqreturn_t prq_event_thread(int irq, void *d);
David Woodhouse2f26e0a2015-09-09 11:40:47 +010026
David Woodhousea222a7f2015-10-07 23:35:18 +010027#define PRQ_ORDER 0
28
29int intel_svm_enable_prq(struct intel_iommu *iommu)
30{
31 struct page *pages;
32 int irq, ret;
33
34 pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, PRQ_ORDER);
35 if (!pages) {
36 pr_warn("IOMMU: %s: Failed to allocate page request queue\n",
37 iommu->name);
38 return -ENOMEM;
39 }
40 iommu->prq = page_address(pages);
41
42 irq = dmar_alloc_hwirq(DMAR_UNITS_SUPPORTED + iommu->seq_id, iommu->node, iommu);
43 if (irq <= 0) {
44 pr_err("IOMMU: %s: Failed to create IRQ vector for page request queue\n",
45 iommu->name);
46 ret = -EINVAL;
47 err:
48 free_pages((unsigned long)iommu->prq, PRQ_ORDER);
49 iommu->prq = NULL;
50 return ret;
51 }
52 iommu->pr_irq = irq;
53
54 snprintf(iommu->prq_name, sizeof(iommu->prq_name), "dmar%d-prq", iommu->seq_id);
55
56 ret = request_threaded_irq(irq, NULL, prq_event_thread, IRQF_ONESHOT,
57 iommu->prq_name, iommu);
58 if (ret) {
59 pr_err("IOMMU: %s: Failed to request IRQ for page request queue\n",
60 iommu->name);
61 dmar_free_hwirq(irq);
Jerry Snitselaar72d54812017-12-20 09:48:56 -070062 iommu->pr_irq = 0;
David Woodhousea222a7f2015-10-07 23:35:18 +010063 goto err;
64 }
65 dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL);
66 dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL);
67 dmar_writeq(iommu->reg + DMAR_PQA_REG, virt_to_phys(iommu->prq) | PRQ_ORDER);
68
69 return 0;
70}
71
72int intel_svm_finish_prq(struct intel_iommu *iommu)
73{
74 dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL);
75 dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL);
76 dmar_writeq(iommu->reg + DMAR_PQA_REG, 0ULL);
77
Jerry Snitselaar72d54812017-12-20 09:48:56 -070078 if (iommu->pr_irq) {
79 free_irq(iommu->pr_irq, iommu);
80 dmar_free_hwirq(iommu->pr_irq);
81 iommu->pr_irq = 0;
82 }
David Woodhousea222a7f2015-10-07 23:35:18 +010083
84 free_pages((unsigned long)iommu->prq, PRQ_ORDER);
85 iommu->prq = NULL;
86
87 return 0;
88}
89
Jacob Panff3dc652020-01-02 08:18:03 +080090static inline bool intel_svm_capable(struct intel_iommu *iommu)
91{
92 return iommu->flags & VTD_FLAG_SVM_CAPABLE;
93}
94
95void intel_svm_check(struct intel_iommu *iommu)
96{
97 if (!pasid_supported(iommu))
98 return;
99
100 if (cpu_feature_enabled(X86_FEATURE_GBPAGES) &&
101 !cap_fl1gp_support(iommu->cap)) {
102 pr_err("%s SVM disabled, incompatible 1GB page capability\n",
103 iommu->name);
104 return;
105 }
106
107 if (cpu_feature_enabled(X86_FEATURE_LA57) &&
108 !cap_5lp_support(iommu->cap)) {
109 pr_err("%s SVM disabled, incompatible paging mode\n",
110 iommu->name);
111 return;
112 }
113
114 iommu->flags |= VTD_FLAG_SVM_CAPABLE;
115}
116
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100117static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_dev *sdev,
Jacob Pan8744daf2019-08-26 08:53:29 -0700118 unsigned long address, unsigned long pages, int ih)
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100119{
120 struct qi_desc desc;
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100121
Lu Baoluf81b8462019-11-20 14:10:16 +0800122 if (pages == -1) {
Jacob Pan8744daf2019-08-26 08:53:29 -0700123 desc.qw0 = QI_EIOTLB_PASID(svm->pasid) |
124 QI_EIOTLB_DID(sdev->did) |
125 QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) |
126 QI_EIOTLB_TYPE;
Lu Baolu5d308fc2018-12-10 09:58:58 +0800127 desc.qw1 = 0;
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100128 } else {
David Woodhouse5d52f482015-10-20 15:52:13 +0100129 int mask = ilog2(__roundup_pow_of_two(pages));
130
Lu Baolu5d308fc2018-12-10 09:58:58 +0800131 desc.qw0 = QI_EIOTLB_PASID(svm->pasid) |
132 QI_EIOTLB_DID(sdev->did) |
133 QI_EIOTLB_GRAN(QI_GRAN_PSI_PASID) |
134 QI_EIOTLB_TYPE;
135 desc.qw1 = QI_EIOTLB_ADDR(address) |
Lu Baolu5d308fc2018-12-10 09:58:58 +0800136 QI_EIOTLB_IH(ih) |
137 QI_EIOTLB_AM(mask);
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100138 }
Lu Baolu5d308fc2018-12-10 09:58:58 +0800139 desc.qw2 = 0;
140 desc.qw3 = 0;
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100141 qi_submit_sync(&desc, svm->iommu);
142
143 if (sdev->dev_iotlb) {
Lu Baolu5d308fc2018-12-10 09:58:58 +0800144 desc.qw0 = QI_DEV_EIOTLB_PASID(svm->pasid) |
145 QI_DEV_EIOTLB_SID(sdev->sid) |
146 QI_DEV_EIOTLB_QDEP(sdev->qdep) |
147 QI_DEIOTLB_TYPE;
David Woodhouse5d52f482015-10-20 15:52:13 +0100148 if (pages == -1) {
Lu Baolu5d308fc2018-12-10 09:58:58 +0800149 desc.qw1 = QI_DEV_EIOTLB_ADDR(-1ULL >> 1) |
150 QI_DEV_EIOTLB_SIZE;
David Woodhouse5d52f482015-10-20 15:52:13 +0100151 } else if (pages > 1) {
152 /* The least significant zero bit indicates the size. So,
153 * for example, an "address" value of 0x12345f000 will
154 * flush from 0x123440000 to 0x12347ffff (256KiB). */
155 unsigned long last = address + ((unsigned long)(pages - 1) << VTD_PAGE_SHIFT);
Ingo Molnared7158b2018-02-22 10:54:55 +0100156 unsigned long mask = __rounddown_pow_of_two(address ^ last);
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100157
Lu Baolu5d308fc2018-12-10 09:58:58 +0800158 desc.qw1 = QI_DEV_EIOTLB_ADDR((address & ~mask) |
159 (mask - 1)) | QI_DEV_EIOTLB_SIZE;
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100160 } else {
Lu Baolu5d308fc2018-12-10 09:58:58 +0800161 desc.qw1 = QI_DEV_EIOTLB_ADDR(address);
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100162 }
Lu Baolu5d308fc2018-12-10 09:58:58 +0800163 desc.qw2 = 0;
164 desc.qw3 = 0;
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100165 qi_submit_sync(&desc, svm->iommu);
166 }
167}
168
169static void intel_flush_svm_range(struct intel_svm *svm, unsigned long address,
Jacob Pan8744daf2019-08-26 08:53:29 -0700170 unsigned long pages, int ih)
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100171{
172 struct intel_svm_dev *sdev;
173
174 rcu_read_lock();
175 list_for_each_entry_rcu(sdev, &svm->devs, list)
Jacob Pan8744daf2019-08-26 08:53:29 -0700176 intel_flush_svm_range_dev(svm, sdev, address, pages, ih);
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100177 rcu_read_unlock();
178}
179
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100180/* Pages have been freed at this point */
181static void intel_invalidate_range(struct mmu_notifier *mn,
182 struct mm_struct *mm,
183 unsigned long start, unsigned long end)
184{
185 struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
186
187 intel_flush_svm_range(svm, start,
Jacob Pan8744daf2019-08-26 08:53:29 -0700188 (end - start + PAGE_SIZE - 1) >> VTD_PAGE_SHIFT, 0);
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100189}
190
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100191static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
192{
193 struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
David Woodhousee57e58b2016-01-12 19:18:06 +0000194 struct intel_svm_dev *sdev;
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100195
David Woodhousee57e58b2016-01-12 19:18:06 +0000196 /* This might end up being called from exit_mmap(), *before* the page
197 * tables are cleared. And __mmu_notifier_release() will delete us from
198 * the list of notifiers so that our invalidate_range() callback doesn't
199 * get called when the page tables are cleared. So we need to protect
200 * against hardware accessing those page tables.
201 *
202 * We do it by clearing the entry in the PASID table and then flushing
203 * the IOTLB and the PASID table caches. This might upset hardware;
204 * perhaps we'll want to point the PASID to a dummy PGD (like the zero
205 * page) so that we end up taking a fault that the hardware really
206 * *has* to handle gracefully without affecting other processes.
207 */
David Woodhousee57e58b2016-01-12 19:18:06 +0000208 rcu_read_lock();
209 list_for_each_entry_rcu(sdev, &svm->devs, list) {
Lu Baolu1c4f88b2018-12-10 09:59:05 +0800210 intel_pasid_tear_down_entry(svm->iommu, sdev->dev, svm->pasid);
Jacob Pan8744daf2019-08-26 08:53:29 -0700211 intel_flush_svm_range_dev(svm, sdev, 0, -1, 0);
David Woodhousee57e58b2016-01-12 19:18:06 +0000212 }
213 rcu_read_unlock();
214
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100215}
216
217static const struct mmu_notifier_ops intel_mmuops = {
218 .release = intel_mm_release,
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100219 .invalidate_range = intel_invalidate_range,
220};
221
222static DEFINE_MUTEX(pasid_mutex);
Lu Baolu51261aa2018-07-14 15:46:55 +0800223static LIST_HEAD(global_svm_list);
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100224
Jacob Pan034d4732020-01-02 08:18:10 +0800225#define for_each_svm_dev(sdev, svm, d) \
226 list_for_each_entry((sdev), &(svm)->devs, list) \
227 if ((d) != (sdev)->dev) {} else
228
David Woodhouse0204a492015-10-13 17:18:10 +0100229int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ops *ops)
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100230{
231 struct intel_iommu *iommu = intel_svm_device_to_iommu(dev);
Lu Baolud7cbc0f2019-03-25 09:30:29 +0800232 struct device_domain_info *info;
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100233 struct intel_svm_dev *sdev;
234 struct intel_svm *svm = NULL;
David Woodhouse5cec7532015-10-15 15:52:15 +0100235 struct mm_struct *mm = NULL;
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100236 int pasid_max;
237 int ret;
238
Lu Baoluc56cba52019-03-01 11:23:12 +0800239 if (!iommu || dmar_disabled)
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100240 return -EINVAL;
241
Jacob Pan6eba09a2020-01-02 08:18:05 +0800242 if (!intel_svm_capable(iommu))
243 return -ENOTSUPP;
244
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100245 if (dev_is_pci(dev)) {
246 pasid_max = pci_max_pasids(to_pci_dev(dev));
247 if (pasid_max < 0)
248 return -EINVAL;
249 } else
250 pasid_max = 1 << 20;
251
Lu Baolubb37f7d2018-05-04 13:08:19 +0800252 if (flags & SVM_FLAG_SUPERVISOR_MODE) {
David Woodhouse5cec7532015-10-15 15:52:15 +0100253 if (!ecap_srs(iommu->ecap))
254 return -EINVAL;
255 } else if (pasid) {
256 mm = get_task_mm(current);
257 BUG_ON(!mm);
258 }
259
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100260 mutex_lock(&pasid_mutex);
David Woodhouse569e4f72015-10-15 13:59:14 +0100261 if (pasid && !(flags & SVM_FLAG_PRIVATE_PASID)) {
Lu Baolu51261aa2018-07-14 15:46:55 +0800262 struct intel_svm *t;
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100263
Lu Baolu51261aa2018-07-14 15:46:55 +0800264 list_for_each_entry(t, &global_svm_list, list) {
265 if (t->mm != mm || (t->flags & SVM_FLAG_PRIVATE_PASID))
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100266 continue;
267
Lu Baolu51261aa2018-07-14 15:46:55 +0800268 svm = t;
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100269 if (svm->pasid >= pasid_max) {
270 dev_warn(dev,
271 "Limited PASID width. Cannot use existing PASID %d\n",
272 svm->pasid);
273 ret = -ENOSPC;
274 goto out;
275 }
276
Jacob Pan034d4732020-01-02 08:18:10 +0800277 /* Find the matching device in svm list */
278 for_each_svm_dev(sdev, svm, dev) {
279 if (sdev->ops != ops) {
280 ret = -EBUSY;
281 goto out;
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100282 }
Jacob Pan034d4732020-01-02 08:18:10 +0800283 sdev->users++;
284 goto success;
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100285 }
286
287 break;
288 }
289 }
290
291 sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
292 if (!sdev) {
293 ret = -ENOMEM;
294 goto out;
295 }
296 sdev->dev = dev;
297
Lu Baolud7cbc0f2019-03-25 09:30:29 +0800298 ret = intel_iommu_enable_pasid(iommu, dev);
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100299 if (ret || !pasid) {
300 /* If they don't actually want to assign a PASID, this is
301 * just an enabling check/preparation. */
302 kfree(sdev);
303 goto out;
304 }
Lu Baolud7cbc0f2019-03-25 09:30:29 +0800305
306 info = dev->archdata.iommu;
307 if (!info || !info->pasid_supported) {
308 kfree(sdev);
309 goto out;
310 }
311
312 sdev->did = FLPT_DEFAULT_DID;
313 sdev->sid = PCI_DEVID(info->bus, info->devfn);
314 if (info->ats_enabled) {
315 sdev->dev_iotlb = 1;
316 sdev->qdep = info->ats_qdep;
317 if (sdev->qdep >= QI_DEV_EIOTLB_MAX_INVS)
318 sdev->qdep = 0;
319 }
320
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100321 /* Finish the setup now we know we're keeping it */
322 sdev->users = 1;
David Woodhouse0204a492015-10-13 17:18:10 +0100323 sdev->ops = ops;
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100324 init_rcu_head(&sdev->rcu);
325
326 if (!svm) {
327 svm = kzalloc(sizeof(*svm), GFP_KERNEL);
328 if (!svm) {
329 ret = -ENOMEM;
330 kfree(sdev);
331 goto out;
332 }
333 svm->iommu = iommu;
334
Lu Baolu4774cc52018-07-14 15:47:01 +0800335 if (pasid_max > intel_pasid_max_id)
336 pasid_max = intel_pasid_max_id;
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100337
Jacob Pan59a62332020-01-02 08:18:08 +0800338 /* Do not use PASID 0, reserved for RID to PASID */
339 svm->pasid = ioasid_alloc(NULL, PASID_MIN,
340 pasid_max - 1, svm);
341 if (svm->pasid == INVALID_IOASID) {
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100342 kfree(svm);
Lu Baolubbe4b3a2018-02-24 13:42:27 +0800343 kfree(sdev);
Jacob Pan59a62332020-01-02 08:18:08 +0800344 ret = -ENOSPC;
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100345 goto out;
346 }
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100347 svm->notifier.ops = &intel_mmuops;
David Woodhouse5cec7532015-10-15 15:52:15 +0100348 svm->mm = mm;
David Woodhouse569e4f72015-10-15 13:59:14 +0100349 svm->flags = flags;
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100350 INIT_LIST_HEAD_RCU(&svm->devs);
Lu Baolu51261aa2018-07-14 15:46:55 +0800351 INIT_LIST_HEAD(&svm->list);
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100352 ret = -ENOMEM;
David Woodhouse5cec7532015-10-15 15:52:15 +0100353 if (mm) {
354 ret = mmu_notifier_register(&svm->notifier, mm);
355 if (ret) {
Jacob Pan59a62332020-01-02 08:18:08 +0800356 ioasid_free(svm->pasid);
David Woodhouse5cec7532015-10-15 15:52:15 +0100357 kfree(svm);
358 kfree(sdev);
359 goto out;
360 }
Lu Baolu1c4f88b2018-12-10 09:59:05 +0800361 }
Sohil Mehta2f13eb72017-12-20 11:59:27 -0800362
Lu Baolu1c4f88b2018-12-10 09:59:05 +0800363 spin_lock(&iommu->lock);
364 ret = intel_pasid_setup_first_level(iommu, dev,
365 mm ? mm->pgd : init_mm.pgd,
366 svm->pasid, FLPT_DEFAULT_DID,
Lu Baolu87208f22020-01-02 08:18:16 +0800367 (mm ? 0 : PASID_FLAG_SUPERVISOR_MODE) |
368 (cpu_feature_enabled(X86_FEATURE_LA57) ?
369 PASID_FLAG_FL5LP : 0));
Lu Baolu1c4f88b2018-12-10 09:59:05 +0800370 spin_unlock(&iommu->lock);
371 if (ret) {
372 if (mm)
373 mmu_notifier_unregister(&svm->notifier, mm);
Jacob Pan59a62332020-01-02 08:18:08 +0800374 ioasid_free(svm->pasid);
Lu Baolu1c4f88b2018-12-10 09:59:05 +0800375 kfree(svm);
376 kfree(sdev);
377 goto out;
378 }
Lu Baolu51261aa2018-07-14 15:46:55 +0800379
380 list_add_tail(&svm->list, &global_svm_list);
Jacob Pand7af4d92019-05-08 12:22:46 -0700381 } else {
382 /*
383 * Binding a new device with existing PASID, need to setup
384 * the PASID entry.
385 */
386 spin_lock(&iommu->lock);
387 ret = intel_pasid_setup_first_level(iommu, dev,
388 mm ? mm->pgd : init_mm.pgd,
389 svm->pasid, FLPT_DEFAULT_DID,
Lu Baolu87208f22020-01-02 08:18:16 +0800390 (mm ? 0 : PASID_FLAG_SUPERVISOR_MODE) |
391 (cpu_feature_enabled(X86_FEATURE_LA57) ?
392 PASID_FLAG_FL5LP : 0));
Jacob Pand7af4d92019-05-08 12:22:46 -0700393 spin_unlock(&iommu->lock);
394 if (ret) {
395 kfree(sdev);
396 goto out;
397 }
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100398 }
399 list_add_rcu(&sdev->list, &svm->devs);
400
401 success:
402 *pasid = svm->pasid;
403 ret = 0;
404 out:
405 mutex_unlock(&pasid_mutex);
David Woodhouse5cec7532015-10-15 15:52:15 +0100406 if (mm)
407 mmput(mm);
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100408 return ret;
409}
410EXPORT_SYMBOL_GPL(intel_svm_bind_mm);
411
412int intel_svm_unbind_mm(struct device *dev, int pasid)
413{
414 struct intel_svm_dev *sdev;
415 struct intel_iommu *iommu;
416 struct intel_svm *svm;
417 int ret = -EINVAL;
418
419 mutex_lock(&pasid_mutex);
420 iommu = intel_svm_device_to_iommu(dev);
Lu Baolu4774cc52018-07-14 15:47:01 +0800421 if (!iommu)
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100422 goto out;
423
Jacob Pan59a62332020-01-02 08:18:08 +0800424 svm = ioasid_find(NULL, pasid, NULL);
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100425 if (!svm)
426 goto out;
427
Jacob Pan59a62332020-01-02 08:18:08 +0800428 if (IS_ERR(svm)) {
429 ret = PTR_ERR(svm);
430 goto out;
431 }
432
Jacob Pan034d4732020-01-02 08:18:10 +0800433 for_each_svm_dev(sdev, svm, dev) {
434 ret = 0;
435 sdev->users--;
436 if (!sdev->users) {
437 list_del_rcu(&sdev->list);
438 /* Flush the PASID cache and IOTLB for this device.
439 * Note that we do depend on the hardware *not* using
440 * the PASID any more. Just as we depend on other
441 * devices never using PASIDs that they have no right
442 * to use. We have a *shared* PASID table, because it's
443 * large and has to be physically contiguous. So it's
444 * hard to be as defensive as we might like. */
445 intel_pasid_tear_down_entry(iommu, dev, svm->pasid);
446 intel_flush_svm_range_dev(svm, sdev, 0, -1, 0);
447 kfree_rcu(sdev, rcu);
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100448
Jacob Pan034d4732020-01-02 08:18:10 +0800449 if (list_empty(&svm->devs)) {
450 ioasid_free(svm->pasid);
451 if (svm->mm)
452 mmu_notifier_unregister(&svm->notifier, svm->mm);
453 list_del(&svm->list);
454 /* We mandate that no page faults may be outstanding
455 * for the PASID when intel_svm_unbind_mm() is called.
456 * If that is not obeyed, subtle errors will happen.
457 * Let's make them less subtle... */
458 memset(svm, 0x6b, sizeof(*svm));
459 kfree(svm);
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100460 }
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100461 }
Jacob Pan034d4732020-01-02 08:18:10 +0800462 break;
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100463 }
464 out:
465 mutex_unlock(&pasid_mutex);
466
467 return ret;
468}
469EXPORT_SYMBOL_GPL(intel_svm_unbind_mm);
David Woodhousea222a7f2015-10-07 23:35:18 +0100470
CQ Tang15060ab2017-05-10 11:39:03 -0700471int intel_svm_is_pasid_valid(struct device *dev, int pasid)
472{
473 struct intel_iommu *iommu;
474 struct intel_svm *svm;
475 int ret = -EINVAL;
476
477 mutex_lock(&pasid_mutex);
478 iommu = intel_svm_device_to_iommu(dev);
Lu Baolu4774cc52018-07-14 15:47:01 +0800479 if (!iommu)
CQ Tang15060ab2017-05-10 11:39:03 -0700480 goto out;
481
Jacob Pan59a62332020-01-02 08:18:08 +0800482 svm = ioasid_find(NULL, pasid, NULL);
CQ Tang15060ab2017-05-10 11:39:03 -0700483 if (!svm)
484 goto out;
485
Jacob Pan59a62332020-01-02 08:18:08 +0800486 if (IS_ERR(svm)) {
487 ret = PTR_ERR(svm);
488 goto out;
489 }
CQ Tang15060ab2017-05-10 11:39:03 -0700490 /* init_mm is used in this case */
491 if (!svm->mm)
492 ret = 1;
493 else if (atomic_read(&svm->mm->mm_users) > 0)
494 ret = 1;
495 else
496 ret = 0;
497
498 out:
499 mutex_unlock(&pasid_mutex);
500
501 return ret;
502}
503EXPORT_SYMBOL_GPL(intel_svm_is_pasid_valid);
504
David Woodhousea222a7f2015-10-07 23:35:18 +0100505/* Page request queue descriptor */
506struct page_req_dsc {
Jacob Pan5b438f42019-01-11 13:04:57 +0800507 union {
508 struct {
509 u64 type:8;
510 u64 pasid_present:1;
511 u64 priv_data_present:1;
512 u64 rsvd:6;
513 u64 rid:16;
514 u64 pasid:20;
515 u64 exe_req:1;
516 u64 pm_req:1;
517 u64 rsvd2:10;
518 };
519 u64 qw_0;
520 };
521 union {
522 struct {
523 u64 rd_req:1;
524 u64 wr_req:1;
525 u64 lpig:1;
526 u64 prg_index:9;
527 u64 addr:52;
528 };
529 u64 qw_1;
530 };
531 u64 priv_data[2];
David Woodhousea222a7f2015-10-07 23:35:18 +0100532};
533
534#define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x10)
Joerg Roedel7f8312a2015-11-17 16:11:39 +0100535
536static bool access_error(struct vm_area_struct *vma, struct page_req_dsc *req)
537{
538 unsigned long requested = 0;
539
540 if (req->exe_req)
541 requested |= VM_EXEC;
542
543 if (req->rd_req)
544 requested |= VM_READ;
545
546 if (req->wr_req)
547 requested |= VM_WRITE;
548
549 return (requested & ~vma->vm_flags) != 0;
550}
551
Ashok Raj9d8c3af2017-08-08 13:29:27 -0700552static bool is_canonical_address(u64 addr)
553{
554 int shift = 64 - (__VIRTUAL_MASK_SHIFT + 1);
555 long saddr = (long) addr;
556
557 return (((saddr << shift) >> shift) == saddr);
558}
559
David Woodhousea222a7f2015-10-07 23:35:18 +0100560static irqreturn_t prq_event_thread(int irq, void *d)
561{
562 struct intel_iommu *iommu = d;
563 struct intel_svm *svm = NULL;
564 int head, tail, handled = 0;
565
David Woodhouse46924002016-02-15 12:42:38 +0000566 /* Clear PPR bit before reading head/tail registers, to
567 * ensure that we get a new interrupt if needed. */
568 writel(DMA_PRS_PPR, iommu->reg + DMAR_PRS_REG);
569
David Woodhousea222a7f2015-10-07 23:35:18 +0100570 tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK;
571 head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK;
572 while (head != tail) {
David Woodhouse0204a492015-10-13 17:18:10 +0100573 struct intel_svm_dev *sdev;
David Woodhousea222a7f2015-10-07 23:35:18 +0100574 struct vm_area_struct *vma;
575 struct page_req_dsc *req;
576 struct qi_desc resp;
Souptick Joarder50a7ca32018-08-17 15:44:47 -0700577 int result;
578 vm_fault_t ret;
David Woodhousea222a7f2015-10-07 23:35:18 +0100579 u64 address;
580
581 handled = 1;
582
583 req = &iommu->prq[head / sizeof(*req)];
584
585 result = QI_RESP_FAILURE;
David Woodhouse7f92a2e2015-10-16 17:22:31 +0100586 address = (u64)req->addr << VTD_PAGE_SHIFT;
David Woodhousea222a7f2015-10-07 23:35:18 +0100587 if (!req->pasid_present) {
588 pr_err("%s: Page request without PASID: %08llx %08llx\n",
589 iommu->name, ((unsigned long long *)req)[0],
590 ((unsigned long long *)req)[1]);
Lu Baolu19ed3e22018-11-05 10:18:58 +0800591 goto no_pasid;
David Woodhousea222a7f2015-10-07 23:35:18 +0100592 }
593
594 if (!svm || svm->pasid != req->pasid) {
595 rcu_read_lock();
Jacob Pan59a62332020-01-02 08:18:08 +0800596 svm = ioasid_find(NULL, req->pasid, NULL);
David Woodhousea222a7f2015-10-07 23:35:18 +0100597 /* It *can't* go away, because the driver is not permitted
598 * to unbind the mm while any page faults are outstanding.
599 * So we only need RCU to protect the internal idr code. */
600 rcu_read_unlock();
Jacob Pan59a62332020-01-02 08:18:08 +0800601 if (IS_ERR_OR_NULL(svm)) {
David Woodhousea222a7f2015-10-07 23:35:18 +0100602 pr_err("%s: Page request for invalid PASID %d: %08llx %08llx\n",
603 iommu->name, req->pasid, ((unsigned long long *)req)[0],
604 ((unsigned long long *)req)[1]);
David Woodhouse26322ab2015-10-15 21:12:56 +0100605 goto no_pasid;
David Woodhousea222a7f2015-10-07 23:35:18 +0100606 }
607 }
608
609 result = QI_RESP_INVALID;
David Woodhouse5cec7532015-10-15 15:52:15 +0100610 /* Since we're using init_mm.pgd directly, we should never take
611 * any faults on kernel addresses. */
612 if (!svm->mm)
613 goto bad_req;
Ashok Raj9d8c3af2017-08-08 13:29:27 -0700614
615 /* If address is not canonical, return invalid response */
616 if (!is_canonical_address(address))
617 goto bad_req;
618
Jacob Pan902baf62020-03-19 21:32:30 -0700619 /* If the mm is already defunct, don't handle faults. */
620 if (!mmget_not_zero(svm->mm))
621 goto bad_req;
622
David Woodhousea222a7f2015-10-07 23:35:18 +0100623 down_read(&svm->mm->mmap_sem);
624 vma = find_extend_vma(svm->mm, address);
625 if (!vma || address < vma->vm_start)
626 goto invalid;
627
Joerg Roedel7f8312a2015-11-17 16:11:39 +0100628 if (access_error(vma, req))
629 goto invalid;
630
Kirill A. Shutemovdcddffd2016-07-26 15:25:18 -0700631 ret = handle_mm_fault(vma, address,
David Woodhousea222a7f2015-10-07 23:35:18 +0100632 req->wr_req ? FAULT_FLAG_WRITE : 0);
633 if (ret & VM_FAULT_ERROR)
634 goto invalid;
635
636 result = QI_RESP_SUCCESS;
637 invalid:
638 up_read(&svm->mm->mmap_sem);
David Woodhousee57e58b2016-01-12 19:18:06 +0000639 mmput(svm->mm);
David Woodhousea222a7f2015-10-07 23:35:18 +0100640 bad_req:
641 /* Accounting for major/minor faults? */
David Woodhouse0204a492015-10-13 17:18:10 +0100642 rcu_read_lock();
643 list_for_each_entry_rcu(sdev, &svm->devs, list) {
Jacob Pan5b438f42019-01-11 13:04:57 +0800644 if (sdev->sid == req->rid)
David Woodhouse0204a492015-10-13 17:18:10 +0100645 break;
646 }
647 /* Other devices can go away, but the drivers are not permitted
648 * to unbind while any page faults might be in flight. So it's
649 * OK to drop the 'lock' here now we have it. */
650 rcu_read_unlock();
651
652 if (WARN_ON(&sdev->list == &svm->devs))
653 sdev = NULL;
654
655 if (sdev && sdev->ops && sdev->ops->fault_cb) {
656 int rwxp = (req->rd_req << 3) | (req->wr_req << 2) |
Jacob Pan5b438f42019-01-11 13:04:57 +0800657 (req->exe_req << 1) | (req->pm_req);
658 sdev->ops->fault_cb(sdev->dev, req->pasid, req->addr,
659 req->priv_data, rwxp, result);
David Woodhouse0204a492015-10-13 17:18:10 +0100660 }
David Woodhouse26322ab2015-10-15 21:12:56 +0100661 /* We get here in the error case where the PASID lookup failed,
662 and these can be NULL. Do not use them below this point! */
663 sdev = NULL;
664 svm = NULL;
665 no_pasid:
Jacob Pan5b438f42019-01-11 13:04:57 +0800666 if (req->lpig || req->priv_data_present) {
667 /*
668 * Per VT-d spec. v3.0 ch7.7, system software must
669 * respond with page group response if private data
670 * is present (PDP) or last page in group (LPIG) bit
671 * is set. This is an additional VT-d feature beyond
672 * PCI ATS spec.
673 */
Lu Baolu5d308fc2018-12-10 09:58:58 +0800674 resp.qw0 = QI_PGRP_PASID(req->pasid) |
Jacob Pan5b438f42019-01-11 13:04:57 +0800675 QI_PGRP_DID(req->rid) |
David Woodhousea222a7f2015-10-07 23:35:18 +0100676 QI_PGRP_PASID_P(req->pasid_present) |
Jacob Pan5b438f42019-01-11 13:04:57 +0800677 QI_PGRP_PDP(req->pasid_present) |
678 QI_PGRP_RESP_CODE(result) |
David Woodhousea222a7f2015-10-07 23:35:18 +0100679 QI_PGRP_RESP_TYPE;
Lu Baolu5d308fc2018-12-10 09:58:58 +0800680 resp.qw1 = QI_PGRP_IDX(req->prg_index) |
Jacob Pan5b438f42019-01-11 13:04:57 +0800681 QI_PGRP_LPIG(req->lpig);
682
683 if (req->priv_data_present)
684 memcpy(&resp.qw2, req->priv_data,
685 sizeof(req->priv_data));
Jacob Pan5f755852020-01-02 08:18:09 +0800686 resp.qw2 = 0;
687 resp.qw3 = 0;
688 qi_submit_sync(&resp, iommu);
David Woodhousea222a7f2015-10-07 23:35:18 +0100689 }
David Woodhousea222a7f2015-10-07 23:35:18 +0100690 head = (head + sizeof(*req)) & PRQ_RING_MASK;
691 }
692
693 dmar_writeq(iommu->reg + DMAR_PQH_REG, tail);
694
695 return IRQ_RETVAL(handled);
696}