blob: 4dc3ab36e9aead11791a391eaf6bb70c7a266d22 [file] [log] [blame]
Thomas Gleixner2025cf92019-05-29 07:18:02 -07001// SPDX-License-Identifier: GPL-2.0-only
David Woodhouse8a94ade2015-03-24 14:54:56 +00002/*
3 * Copyright © 2015 Intel Corporation.
4 *
David Woodhouse8a94ade2015-03-24 14:54:56 +00005 * Authors: David Woodhouse <dwmw2@infradead.org>
6 */
7
8#include <linux/intel-iommu.h>
David Woodhouse2f26e0a2015-09-09 11:40:47 +01009#include <linux/mmu_notifier.h>
10#include <linux/sched.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +010011#include <linux/sched/mm.h>
David Woodhouse2f26e0a2015-09-09 11:40:47 +010012#include <linux/slab.h>
13#include <linux/intel-svm.h>
14#include <linux/rculist.h>
15#include <linux/pci.h>
16#include <linux/pci-ats.h>
David Woodhousea222a7f2015-10-07 23:35:18 +010017#include <linux/dmar.h>
18#include <linux/interrupt.h>
Souptick Joarder50a7ca32018-08-17 15:44:47 -070019#include <linux/mm_types.h>
Lu Baolu100b8a12021-06-10 10:00:58 +080020#include <linux/xarray.h>
Jacob Pan59a62332020-01-02 08:18:08 +080021#include <linux/ioasid.h>
Ashok Raj9d8c3af2017-08-08 13:29:27 -070022#include <asm/page.h>
Fenghua Yu20f0afd2020-09-15 09:30:13 -070023#include <asm/fpu/api.h>
David Woodhousea222a7f2015-10-07 23:35:18 +010024
Lu Baolu02f3eff2020-07-24 09:49:25 +080025#include "pasid.h"
Lu Baolu40483772021-06-10 10:00:59 +080026#include "../iommu-sva-lib.h"
Lu Baoluaf395072018-07-14 15:46:56 +080027
David Woodhousea222a7f2015-10-07 23:35:18 +010028static irqreturn_t prq_event_thread(int irq, void *d);
Fenghua Yuc7b6bac2020-09-15 09:30:05 -070029static void intel_svm_drain_prq(struct device *dev, u32 pasid);
Lu Baolu40483772021-06-10 10:00:59 +080030#define to_intel_svm_dev(handle) container_of(handle, struct intel_svm_dev, sva)
David Woodhouse2f26e0a2015-09-09 11:40:47 +010031
David Woodhousea222a7f2015-10-07 23:35:18 +010032#define PRQ_ORDER 0
33
Lu Baolu100b8a12021-06-10 10:00:58 +080034static DEFINE_XARRAY_ALLOC(pasid_private_array);
35static int pasid_private_add(ioasid_t pasid, void *priv)
36{
37 return xa_alloc(&pasid_private_array, &pasid, priv,
38 XA_LIMIT(pasid, pasid), GFP_ATOMIC);
39}
40
41static void pasid_private_remove(ioasid_t pasid)
42{
43 xa_erase(&pasid_private_array, pasid);
44}
45
46static void *pasid_private_find(ioasid_t pasid)
47{
48 return xa_load(&pasid_private_array, pasid);
49}
50
Lu Baolu9e52cc02021-06-10 10:01:00 +080051static struct intel_svm_dev *
52svm_lookup_device_by_sid(struct intel_svm *svm, u16 sid)
53{
54 struct intel_svm_dev *sdev = NULL, *t;
55
56 rcu_read_lock();
57 list_for_each_entry_rcu(t, &svm->devs, list) {
58 if (t->sid == sid) {
59 sdev = t;
60 break;
61 }
62 }
63 rcu_read_unlock();
64
65 return sdev;
66}
67
68static struct intel_svm_dev *
69svm_lookup_device_by_dev(struct intel_svm *svm, struct device *dev)
70{
71 struct intel_svm_dev *sdev = NULL, *t;
72
73 rcu_read_lock();
74 list_for_each_entry_rcu(t, &svm->devs, list) {
75 if (t->dev == dev) {
76 sdev = t;
77 break;
78 }
79 }
80 rcu_read_unlock();
81
82 return sdev;
83}
84
David Woodhousea222a7f2015-10-07 23:35:18 +010085int intel_svm_enable_prq(struct intel_iommu *iommu)
86{
Lu Baolu4c82b882021-06-10 10:01:02 +080087 struct iopf_queue *iopfq;
David Woodhousea222a7f2015-10-07 23:35:18 +010088 struct page *pages;
89 int irq, ret;
90
91 pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, PRQ_ORDER);
92 if (!pages) {
93 pr_warn("IOMMU: %s: Failed to allocate page request queue\n",
94 iommu->name);
95 return -ENOMEM;
96 }
97 iommu->prq = page_address(pages);
98
99 irq = dmar_alloc_hwirq(DMAR_UNITS_SUPPORTED + iommu->seq_id, iommu->node, iommu);
100 if (irq <= 0) {
101 pr_err("IOMMU: %s: Failed to create IRQ vector for page request queue\n",
102 iommu->name);
103 ret = -EINVAL;
Lu Baolu4c82b882021-06-10 10:01:02 +0800104 goto free_prq;
David Woodhousea222a7f2015-10-07 23:35:18 +0100105 }
106 iommu->pr_irq = irq;
107
Lu Baolu4c82b882021-06-10 10:01:02 +0800108 snprintf(iommu->iopfq_name, sizeof(iommu->iopfq_name),
109 "dmar%d-iopfq", iommu->seq_id);
110 iopfq = iopf_queue_alloc(iommu->iopfq_name);
111 if (!iopfq) {
112 pr_err("IOMMU: %s: Failed to allocate iopf queue\n", iommu->name);
113 ret = -ENOMEM;
114 goto free_hwirq;
115 }
116 iommu->iopf_queue = iopfq;
117
David Woodhousea222a7f2015-10-07 23:35:18 +0100118 snprintf(iommu->prq_name, sizeof(iommu->prq_name), "dmar%d-prq", iommu->seq_id);
119
120 ret = request_threaded_irq(irq, NULL, prq_event_thread, IRQF_ONESHOT,
121 iommu->prq_name, iommu);
122 if (ret) {
123 pr_err("IOMMU: %s: Failed to request IRQ for page request queue\n",
124 iommu->name);
Lu Baolu4c82b882021-06-10 10:01:02 +0800125 goto free_iopfq;
David Woodhousea222a7f2015-10-07 23:35:18 +0100126 }
127 dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL);
128 dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL);
129 dmar_writeq(iommu->reg + DMAR_PQA_REG, virt_to_phys(iommu->prq) | PRQ_ORDER);
130
Lu Baolu66ac4db2020-05-16 14:20:58 +0800131 init_completion(&iommu->prq_complete);
132
David Woodhousea222a7f2015-10-07 23:35:18 +0100133 return 0;
Lu Baolu4c82b882021-06-10 10:01:02 +0800134
135free_iopfq:
136 iopf_queue_free(iommu->iopf_queue);
137 iommu->iopf_queue = NULL;
138free_hwirq:
139 dmar_free_hwirq(irq);
140 iommu->pr_irq = 0;
141free_prq:
142 free_pages((unsigned long)iommu->prq, PRQ_ORDER);
143 iommu->prq = NULL;
144
145 return ret;
David Woodhousea222a7f2015-10-07 23:35:18 +0100146}
147
148int intel_svm_finish_prq(struct intel_iommu *iommu)
149{
150 dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL);
151 dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL);
152 dmar_writeq(iommu->reg + DMAR_PQA_REG, 0ULL);
153
Jerry Snitselaar72d54812017-12-20 09:48:56 -0700154 if (iommu->pr_irq) {
155 free_irq(iommu->pr_irq, iommu);
156 dmar_free_hwirq(iommu->pr_irq);
157 iommu->pr_irq = 0;
158 }
David Woodhousea222a7f2015-10-07 23:35:18 +0100159
Lu Baolu4c82b882021-06-10 10:01:02 +0800160 if (iommu->iopf_queue) {
161 iopf_queue_free(iommu->iopf_queue);
162 iommu->iopf_queue = NULL;
163 }
164
David Woodhousea222a7f2015-10-07 23:35:18 +0100165 free_pages((unsigned long)iommu->prq, PRQ_ORDER);
166 iommu->prq = NULL;
167
168 return 0;
169}
170
Jacob Panff3dc652020-01-02 08:18:03 +0800171static inline bool intel_svm_capable(struct intel_iommu *iommu)
172{
173 return iommu->flags & VTD_FLAG_SVM_CAPABLE;
174}
175
176void intel_svm_check(struct intel_iommu *iommu)
177{
178 if (!pasid_supported(iommu))
179 return;
180
181 if (cpu_feature_enabled(X86_FEATURE_GBPAGES) &&
182 !cap_fl1gp_support(iommu->cap)) {
183 pr_err("%s SVM disabled, incompatible 1GB page capability\n",
184 iommu->name);
185 return;
186 }
187
188 if (cpu_feature_enabled(X86_FEATURE_LA57) &&
189 !cap_5lp_support(iommu->cap)) {
190 pr_err("%s SVM disabled, incompatible paging mode\n",
191 iommu->name);
192 return;
193 }
194
195 iommu->flags |= VTD_FLAG_SVM_CAPABLE;
196}
197
Lu Baolu2d6ffc62020-12-31 08:53:20 +0800198static void __flush_svm_range_dev(struct intel_svm *svm,
199 struct intel_svm_dev *sdev,
200 unsigned long address,
201 unsigned long pages, int ih)
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100202{
Lu Baolu9872f9b2021-01-14 16:50:21 +0800203 struct device_domain_info *info = get_domain_info(sdev->dev);
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100204
Lu Baolu9872f9b2021-01-14 16:50:21 +0800205 if (WARN_ON(!pages))
206 return;
David Woodhouse5d52f482015-10-20 15:52:13 +0100207
Lu Baolu9872f9b2021-01-14 16:50:21 +0800208 qi_flush_piotlb(sdev->iommu, sdev->did, svm->pasid, address, pages, ih);
209 if (info->ats_enabled)
210 qi_flush_dev_iotlb_pasid(sdev->iommu, sdev->sid, info->pfsid,
211 svm->pasid, sdev->qdep, address,
212 order_base_2(pages));
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100213}
214
Lu Baolu2d6ffc62020-12-31 08:53:20 +0800215static void intel_flush_svm_range_dev(struct intel_svm *svm,
216 struct intel_svm_dev *sdev,
217 unsigned long address,
218 unsigned long pages, int ih)
219{
220 unsigned long shift = ilog2(__roundup_pow_of_two(pages));
221 unsigned long align = (1ULL << (VTD_PAGE_SHIFT + shift));
222 unsigned long start = ALIGN_DOWN(address, align);
223 unsigned long end = ALIGN(address + (pages << VTD_PAGE_SHIFT), align);
224
225 while (start < end) {
226 __flush_svm_range_dev(svm, sdev, start, align >> VTD_PAGE_SHIFT, ih);
227 start += align;
228 }
229}
230
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100231static void intel_flush_svm_range(struct intel_svm *svm, unsigned long address,
Jacob Pan8744daf2019-08-26 08:53:29 -0700232 unsigned long pages, int ih)
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100233{
234 struct intel_svm_dev *sdev;
235
236 rcu_read_lock();
237 list_for_each_entry_rcu(sdev, &svm->devs, list)
Jacob Pan8744daf2019-08-26 08:53:29 -0700238 intel_flush_svm_range_dev(svm, sdev, address, pages, ih);
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100239 rcu_read_unlock();
240}
241
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100242/* Pages have been freed at this point */
243static void intel_invalidate_range(struct mmu_notifier *mn,
244 struct mm_struct *mm,
245 unsigned long start, unsigned long end)
246{
247 struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
248
249 intel_flush_svm_range(svm, start,
Jacob Pan8744daf2019-08-26 08:53:29 -0700250 (end - start + PAGE_SIZE - 1) >> VTD_PAGE_SHIFT, 0);
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100251}
252
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100253static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
254{
255 struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
David Woodhousee57e58b2016-01-12 19:18:06 +0000256 struct intel_svm_dev *sdev;
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100257
David Woodhousee57e58b2016-01-12 19:18:06 +0000258 /* This might end up being called from exit_mmap(), *before* the page
259 * tables are cleared. And __mmu_notifier_release() will delete us from
260 * the list of notifiers so that our invalidate_range() callback doesn't
261 * get called when the page tables are cleared. So we need to protect
262 * against hardware accessing those page tables.
263 *
264 * We do it by clearing the entry in the PASID table and then flushing
265 * the IOTLB and the PASID table caches. This might upset hardware;
266 * perhaps we'll want to point the PASID to a dummy PGD (like the zero
267 * page) so that we end up taking a fault that the hardware really
268 * *has* to handle gracefully without affecting other processes.
269 */
David Woodhousee57e58b2016-01-12 19:18:06 +0000270 rcu_read_lock();
Lu Baolu81ebd912020-05-16 14:20:59 +0800271 list_for_each_entry_rcu(sdev, &svm->devs, list)
Liu Yi L9ad9f452021-01-07 00:03:55 +0800272 intel_pasid_tear_down_entry(sdev->iommu, sdev->dev,
Lu Baolu37e91bd2020-05-16 14:20:57 +0800273 svm->pasid, true);
David Woodhousee57e58b2016-01-12 19:18:06 +0000274 rcu_read_unlock();
275
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100276}
277
278static const struct mmu_notifier_ops intel_mmuops = {
279 .release = intel_mm_release,
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100280 .invalidate_range = intel_invalidate_range,
281};
282
283static DEFINE_MUTEX(pasid_mutex);
284
Lu Baolu19abcf72020-07-24 09:49:22 +0800285static int pasid_to_svm_sdev(struct device *dev, unsigned int pasid,
286 struct intel_svm **rsvm,
287 struct intel_svm_dev **rsdev)
288{
Lu Baolu9e52cc02021-06-10 10:01:00 +0800289 struct intel_svm_dev *sdev = NULL;
Lu Baolu19abcf72020-07-24 09:49:22 +0800290 struct intel_svm *svm;
291
292 /* The caller should hold the pasid_mutex lock */
293 if (WARN_ON(!mutex_is_locked(&pasid_mutex)))
294 return -EINVAL;
295
296 if (pasid == INVALID_IOASID || pasid >= PASID_MAX)
297 return -EINVAL;
298
Lu Baolu100b8a12021-06-10 10:00:58 +0800299 svm = pasid_private_find(pasid);
Lu Baolu19abcf72020-07-24 09:49:22 +0800300 if (IS_ERR(svm))
301 return PTR_ERR(svm);
302
303 if (!svm)
304 goto out;
305
306 /*
307 * If we found svm for the PASID, there must be at least one device
308 * bond.
309 */
310 if (WARN_ON(list_empty(&svm->devs)))
311 return -EINVAL;
Lu Baolu9e52cc02021-06-10 10:01:00 +0800312 sdev = svm_lookup_device_by_dev(svm, dev);
Lu Baolu19abcf72020-07-24 09:49:22 +0800313
314out:
315 *rsvm = svm;
316 *rsdev = sdev;
317
318 return 0;
319}
320
Jacob Pan56722a42020-05-16 14:20:47 +0800321int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
322 struct iommu_gpasid_bind_data *data)
323{
Lu Baoludd6692f2020-07-24 09:49:21 +0800324 struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL);
Lu Baolu19abcf72020-07-24 09:49:22 +0800325 struct intel_svm_dev *sdev = NULL;
Jacob Pan56722a42020-05-16 14:20:47 +0800326 struct dmar_domain *dmar_domain;
Liu Yi Leea4e292020-10-30 10:37:23 +0800327 struct device_domain_info *info;
Lu Baolu19abcf72020-07-24 09:49:22 +0800328 struct intel_svm *svm = NULL;
Lu Baolu420d42f2020-12-31 08:53:23 +0800329 unsigned long iflags;
Jacob Pan56722a42020-05-16 14:20:47 +0800330 int ret = 0;
331
332 if (WARN_ON(!iommu) || !data)
333 return -EINVAL;
334
Jacob Pan6278eec2020-09-25 09:32:47 -0700335 if (data->format != IOMMU_PASID_FORMAT_INTEL_VTD)
336 return -EINVAL;
337
338 /* IOMMU core ensures argsz is more than the start of the union */
339 if (data->argsz < offsetofend(struct iommu_gpasid_bind_data, vendor.vtd))
340 return -EINVAL;
341
342 /* Make sure no undefined flags are used in vendor data */
343 if (data->vendor.vtd.flags & ~(IOMMU_SVA_VTD_GPASID_LAST - 1))
Jacob Pan56722a42020-05-16 14:20:47 +0800344 return -EINVAL;
345
346 if (!dev_is_pci(dev))
347 return -ENOTSUPP;
348
349 /* VT-d supports devices with full 20 bit PASIDs only */
350 if (pci_max_pasids(to_pci_dev(dev)) != PASID_MAX)
351 return -EINVAL;
352
353 /*
354 * We only check host PASID range, we have no knowledge to check
355 * guest PASID range.
356 */
357 if (data->hpasid <= 0 || data->hpasid >= PASID_MAX)
358 return -EINVAL;
359
Liu Yi Leea4e292020-10-30 10:37:23 +0800360 info = get_domain_info(dev);
361 if (!info)
362 return -EINVAL;
363
Jacob Pan56722a42020-05-16 14:20:47 +0800364 dmar_domain = to_dmar_domain(domain);
365
366 mutex_lock(&pasid_mutex);
Lu Baolu19abcf72020-07-24 09:49:22 +0800367 ret = pasid_to_svm_sdev(dev, data->hpasid, &svm, &sdev);
368 if (ret)
Jacob Pan56722a42020-05-16 14:20:47 +0800369 goto out;
Jacob Pan56722a42020-05-16 14:20:47 +0800370
Lu Baolu19abcf72020-07-24 09:49:22 +0800371 if (sdev) {
Jacob Pand315e9e2020-07-24 09:49:20 +0800372 /*
373 * Do not allow multiple bindings of the same device-PASID since
374 * there is only one SL page tables per PASID. We may revisit
375 * once sharing PGD across domains are supported.
376 */
Lu Baolu19abcf72020-07-24 09:49:22 +0800377 dev_warn_ratelimited(dev, "Already bound with PASID %u\n",
378 svm->pasid);
379 ret = -EBUSY;
380 goto out;
381 }
382
383 if (!svm) {
Jacob Pan56722a42020-05-16 14:20:47 +0800384 /* We come here when PASID has never been bond to a device. */
385 svm = kzalloc(sizeof(*svm), GFP_KERNEL);
386 if (!svm) {
387 ret = -ENOMEM;
388 goto out;
389 }
390 /* REVISIT: upper layer/VFIO can track host process that bind
391 * the PASID. ioasid_set = mm might be sufficient for vfio to
392 * check pasid VMM ownership. We can drop the following line
393 * once VFIO and IOASID set check is in place.
394 */
395 svm->mm = get_task_mm(current);
396 svm->pasid = data->hpasid;
397 if (data->flags & IOMMU_SVA_GPASID_VAL) {
398 svm->gpasid = data->gpasid;
399 svm->flags |= SVM_FLAG_GUEST_PASID;
400 }
Lu Baolu100b8a12021-06-10 10:00:58 +0800401 pasid_private_add(data->hpasid, svm);
Jacob Pan56722a42020-05-16 14:20:47 +0800402 INIT_LIST_HEAD_RCU(&svm->devs);
403 mmput(svm->mm);
404 }
405 sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
406 if (!sdev) {
407 ret = -ENOMEM;
408 goto out;
409 }
410 sdev->dev = dev;
Liu Yi Leea4e292020-10-30 10:37:23 +0800411 sdev->sid = PCI_DEVID(info->bus, info->devfn);
Liu Yi L9ad9f452021-01-07 00:03:55 +0800412 sdev->iommu = iommu;
Jacob Pan56722a42020-05-16 14:20:47 +0800413
414 /* Only count users if device has aux domains */
415 if (iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX))
416 sdev->users = 1;
417
418 /* Set up device context entry for PASID if not enabled already */
419 ret = intel_iommu_enable_pasid(iommu, sdev->dev);
420 if (ret) {
421 dev_err_ratelimited(dev, "Failed to enable PASID capability\n");
422 kfree(sdev);
423 goto out;
424 }
425
426 /*
427 * PASID table is per device for better security. Therefore, for
428 * each bind of a new device even with an existing PASID, we need to
429 * call the nested mode setup function here.
430 */
Lu Baolu420d42f2020-12-31 08:53:23 +0800431 spin_lock_irqsave(&iommu->lock, iflags);
Lu Baolubfe62402020-05-19 09:34:23 +0800432 ret = intel_pasid_setup_nested(iommu, dev,
433 (pgd_t *)(uintptr_t)data->gpgd,
Jacob Pan8d3bb3b2020-09-25 09:32:44 -0700434 data->hpasid, &data->vendor.vtd, dmar_domain,
Jacob Pan56722a42020-05-16 14:20:47 +0800435 data->addr_width);
Lu Baolu420d42f2020-12-31 08:53:23 +0800436 spin_unlock_irqrestore(&iommu->lock, iflags);
Jacob Pan56722a42020-05-16 14:20:47 +0800437 if (ret) {
438 dev_err_ratelimited(dev, "Failed to set up PASID %llu in nested mode, Err %d\n",
439 data->hpasid, ret);
440 /*
441 * PASID entry should be in cleared state if nested mode
442 * set up failed. So we only need to clear IOASID tracking
443 * data such that free call will succeed.
444 */
445 kfree(sdev);
446 goto out;
447 }
448
449 svm->flags |= SVM_FLAG_GUEST_MODE;
450
451 init_rcu_head(&sdev->rcu);
452 list_add_rcu(&sdev->list, &svm->devs);
453 out:
454 if (!IS_ERR_OR_NULL(svm) && list_empty(&svm->devs)) {
Lu Baolu100b8a12021-06-10 10:00:58 +0800455 pasid_private_remove(data->hpasid);
Jacob Pan56722a42020-05-16 14:20:47 +0800456 kfree(svm);
457 }
458
459 mutex_unlock(&pasid_mutex);
460 return ret;
461}
462
Fenghua Yuc7b6bac2020-09-15 09:30:05 -0700463int intel_svm_unbind_gpasid(struct device *dev, u32 pasid)
Jacob Pan56722a42020-05-16 14:20:47 +0800464{
Lu Baoludd6692f2020-07-24 09:49:21 +0800465 struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL);
Jacob Pan56722a42020-05-16 14:20:47 +0800466 struct intel_svm_dev *sdev;
467 struct intel_svm *svm;
Lu Baolu19abcf72020-07-24 09:49:22 +0800468 int ret;
Jacob Pan56722a42020-05-16 14:20:47 +0800469
470 if (WARN_ON(!iommu))
471 return -EINVAL;
472
473 mutex_lock(&pasid_mutex);
Lu Baolu19abcf72020-07-24 09:49:22 +0800474 ret = pasid_to_svm_sdev(dev, pasid, &svm, &sdev);
475 if (ret)
Jacob Pan56722a42020-05-16 14:20:47 +0800476 goto out;
Jacob Pan56722a42020-05-16 14:20:47 +0800477
Lu Baolu19abcf72020-07-24 09:49:22 +0800478 if (sdev) {
Jacob Pan56722a42020-05-16 14:20:47 +0800479 if (iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX))
480 sdev->users--;
481 if (!sdev->users) {
482 list_del_rcu(&sdev->list);
Lu Baolu37e91bd2020-05-16 14:20:57 +0800483 intel_pasid_tear_down_entry(iommu, dev,
484 svm->pasid, false);
Lu Baolu66ac4db2020-05-16 14:20:58 +0800485 intel_svm_drain_prq(dev, svm->pasid);
Jacob Pan56722a42020-05-16 14:20:47 +0800486 kfree_rcu(sdev, rcu);
487
488 if (list_empty(&svm->devs)) {
489 /*
490 * We do not free the IOASID here in that
491 * IOMMU driver did not allocate it.
492 * Unlike native SVM, IOASID for guest use was
493 * allocated prior to the bind call.
494 * In any case, if the free call comes before
495 * the unbind, IOMMU driver will get notified
496 * and perform cleanup.
497 */
Lu Baolu100b8a12021-06-10 10:00:58 +0800498 pasid_private_remove(pasid);
Jacob Pan56722a42020-05-16 14:20:47 +0800499 kfree(svm);
500 }
501 }
Jacob Pan56722a42020-05-16 14:20:47 +0800502 }
503out:
504 mutex_unlock(&pasid_mutex);
505 return ret;
506}
507
Fenghua Yu20f0afd2020-09-15 09:30:13 -0700508static void _load_pasid(void *unused)
509{
510 update_pasid();
511}
512
513static void load_pasid(struct mm_struct *mm, u32 pasid)
514{
515 mutex_lock(&mm->context.lock);
516
517 /* Synchronize with READ_ONCE in update_pasid(). */
518 smp_store_release(&mm->pasid, pasid);
519
520 /* Update PASID MSR on all CPUs running the mm's tasks. */
521 on_each_cpu_mask(mm_cpumask(mm), _load_pasid, NULL, true);
522
523 mutex_unlock(&mm->context.lock);
524}
525
Lu Baolu40483772021-06-10 10:00:59 +0800526static int intel_svm_alloc_pasid(struct device *dev, struct mm_struct *mm,
527 unsigned int flags)
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100528{
Lu Baolu40483772021-06-10 10:00:59 +0800529 ioasid_t max_pasid = dev_is_pci(dev) ?
530 pci_max_pasids(to_pci_dev(dev)) : intel_pasid_max_id;
531
532 return iommu_sva_alloc_pasid(mm, PASID_MIN, max_pasid - 1);
533}
534
535static void intel_svm_free_pasid(struct mm_struct *mm)
536{
537 iommu_sva_free_pasid(mm);
538}
539
540static struct iommu_sva *intel_svm_bind_mm(struct intel_iommu *iommu,
541 struct device *dev,
542 struct mm_struct *mm,
543 unsigned int flags)
544{
545 struct device_domain_info *info = get_domain_info(dev);
546 unsigned long iflags, sflags;
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100547 struct intel_svm_dev *sdev;
Lu Baolu40483772021-06-10 10:00:59 +0800548 struct intel_svm *svm;
549 int ret = 0;
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100550
Lu Baolu40483772021-06-10 10:00:59 +0800551 svm = pasid_private_find(mm->pasid);
552 if (!svm) {
553 svm = kzalloc(sizeof(*svm), GFP_KERNEL);
554 if (!svm)
555 return ERR_PTR(-ENOMEM);
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100556
Lu Baolu40483772021-06-10 10:00:59 +0800557 svm->pasid = mm->pasid;
558 svm->mm = mm;
559 svm->flags = flags;
560 INIT_LIST_HEAD_RCU(&svm->devs);
Jacob Pan6eba09a2020-01-02 08:18:05 +0800561
Lu Baolu40483772021-06-10 10:00:59 +0800562 if (!(flags & SVM_FLAG_SUPERVISOR_MODE)) {
563 svm->notifier.ops = &intel_mmuops;
564 ret = mmu_notifier_register(&svm->notifier, mm);
565 if (ret) {
566 kfree(svm);
567 return ERR_PTR(ret);
568 }
569 }
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100570
Lu Baolu40483772021-06-10 10:00:59 +0800571 ret = pasid_private_add(svm->pasid, svm);
572 if (ret) {
573 if (svm->notifier.ops)
574 mmu_notifier_unregister(&svm->notifier, mm);
575 kfree(svm);
576 return ERR_PTR(ret);
Jacob Pan064a57d2020-05-16 14:20:54 +0800577 }
David Woodhouse5cec7532015-10-15 15:52:15 +0100578 }
579
Lu Baolu40483772021-06-10 10:00:59 +0800580 /* Find the matching device in svm list */
Lu Baolu9e52cc02021-06-10 10:01:00 +0800581 sdev = svm_lookup_device_by_dev(svm, dev);
582 if (sdev) {
Lu Baolu40483772021-06-10 10:00:59 +0800583 sdev->users++;
584 goto success;
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100585 }
586
587 sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
588 if (!sdev) {
589 ret = -ENOMEM;
Lu Baolu40483772021-06-10 10:00:59 +0800590 goto free_svm;
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100591 }
Lu Baolu40483772021-06-10 10:00:59 +0800592
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100593 sdev->dev = dev;
Liu Yi L9ad9f452021-01-07 00:03:55 +0800594 sdev->iommu = iommu;
Lu Baolud7cbc0f2019-03-25 09:30:29 +0800595 sdev->did = FLPT_DEFAULT_DID;
596 sdev->sid = PCI_DEVID(info->bus, info->devfn);
Lu Baolu40483772021-06-10 10:00:59 +0800597 sdev->users = 1;
598 sdev->pasid = svm->pasid;
599 sdev->sva.dev = dev;
600 init_rcu_head(&sdev->rcu);
Lu Baolud7cbc0f2019-03-25 09:30:29 +0800601 if (info->ats_enabled) {
602 sdev->dev_iotlb = 1;
603 sdev->qdep = info->ats_qdep;
604 if (sdev->qdep >= QI_DEV_EIOTLB_MAX_INVS)
605 sdev->qdep = 0;
606 }
607
Lu Baolu40483772021-06-10 10:00:59 +0800608 /* Setup the pasid table: */
609 sflags = (flags & SVM_FLAG_SUPERVISOR_MODE) ?
610 PASID_FLAG_SUPERVISOR_MODE : 0;
611 sflags |= cpu_feature_enabled(X86_FEATURE_LA57) ? PASID_FLAG_FL5LP : 0;
612 spin_lock_irqsave(&iommu->lock, iflags);
613 ret = intel_pasid_setup_first_level(iommu, dev, mm->pgd, mm->pasid,
614 FLPT_DEFAULT_DID, sflags);
615 spin_unlock_irqrestore(&iommu->lock, iflags);
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100616
Lu Baolu40483772021-06-10 10:00:59 +0800617 if (ret)
618 goto free_sdev;
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100619
Lu Baolu40483772021-06-10 10:00:59 +0800620 /* The newly allocated pasid is loaded to the mm. */
621 if (!(flags & SVM_FLAG_SUPERVISOR_MODE) && list_empty(&svm->devs))
622 load_pasid(mm, svm->pasid);
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100623
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100624 list_add_rcu(&sdev->list, &svm->devs);
Jacob Pan064a57d2020-05-16 14:20:54 +0800625success:
Lu Baolu40483772021-06-10 10:00:59 +0800626 return &sdev->sva;
627
628free_sdev:
629 kfree(sdev);
630free_svm:
631 if (list_empty(&svm->devs)) {
632 if (svm->notifier.ops)
633 mmu_notifier_unregister(&svm->notifier, mm);
634 pasid_private_remove(mm->pasid);
635 kfree(svm);
636 }
637
638 return ERR_PTR(ret);
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100639}
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100640
Jacob Pan064a57d2020-05-16 14:20:54 +0800641/* Caller must hold pasid_mutex */
Fenghua Yuc7b6bac2020-09-15 09:30:05 -0700642static int intel_svm_unbind_mm(struct device *dev, u32 pasid)
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100643{
644 struct intel_svm_dev *sdev;
645 struct intel_iommu *iommu;
646 struct intel_svm *svm;
Lu Baolu40483772021-06-10 10:00:59 +0800647 struct mm_struct *mm;
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100648 int ret = -EINVAL;
649
Lu Baoludd6692f2020-07-24 09:49:21 +0800650 iommu = device_to_iommu(dev, NULL, NULL);
Lu Baolu4774cc52018-07-14 15:47:01 +0800651 if (!iommu)
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100652 goto out;
653
Lu Baolu19abcf72020-07-24 09:49:22 +0800654 ret = pasid_to_svm_sdev(dev, pasid, &svm, &sdev);
655 if (ret)
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100656 goto out;
Lu Baolu40483772021-06-10 10:00:59 +0800657 mm = svm->mm;
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100658
Lu Baolu19abcf72020-07-24 09:49:22 +0800659 if (sdev) {
Jacob Pan034d4732020-01-02 08:18:10 +0800660 sdev->users--;
661 if (!sdev->users) {
662 list_del_rcu(&sdev->list);
663 /* Flush the PASID cache and IOTLB for this device.
664 * Note that we do depend on the hardware *not* using
665 * the PASID any more. Just as we depend on other
666 * devices never using PASIDs that they have no right
667 * to use. We have a *shared* PASID table, because it's
668 * large and has to be physically contiguous. So it's
669 * hard to be as defensive as we might like. */
Lu Baolu37e91bd2020-05-16 14:20:57 +0800670 intel_pasid_tear_down_entry(iommu, dev,
671 svm->pasid, false);
Lu Baolu66ac4db2020-05-16 14:20:58 +0800672 intel_svm_drain_prq(dev, svm->pasid);
Jacob Pan034d4732020-01-02 08:18:10 +0800673 kfree_rcu(sdev, rcu);
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100674
Jacob Pan034d4732020-01-02 08:18:10 +0800675 if (list_empty(&svm->devs)) {
Lu Baolu40483772021-06-10 10:00:59 +0800676 intel_svm_free_pasid(mm);
677 if (svm->notifier.ops) {
678 mmu_notifier_unregister(&svm->notifier, mm);
Fenghua Yu20f0afd2020-09-15 09:30:13 -0700679 /* Clear mm's pasid. */
Lu Baolu40483772021-06-10 10:00:59 +0800680 load_pasid(mm, PASID_DISABLED);
Fenghua Yu20f0afd2020-09-15 09:30:13 -0700681 }
Lu Baolu100b8a12021-06-10 10:00:58 +0800682 pasid_private_remove(svm->pasid);
Jacob Pan034d4732020-01-02 08:18:10 +0800683 /* We mandate that no page faults may be outstanding
684 * for the PASID when intel_svm_unbind_mm() is called.
685 * If that is not obeyed, subtle errors will happen.
686 * Let's make them less subtle... */
687 memset(svm, 0x6b, sizeof(*svm));
688 kfree(svm);
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100689 }
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100690 }
691 }
Lu Baolu19abcf72020-07-24 09:49:22 +0800692out:
David Woodhouse2f26e0a2015-09-09 11:40:47 +0100693 return ret;
694}
CQ Tang15060ab2017-05-10 11:39:03 -0700695
David Woodhousea222a7f2015-10-07 23:35:18 +0100696/* Page request queue descriptor */
697struct page_req_dsc {
Jacob Pan5b438f42019-01-11 13:04:57 +0800698 union {
699 struct {
700 u64 type:8;
701 u64 pasid_present:1;
702 u64 priv_data_present:1;
703 u64 rsvd:6;
704 u64 rid:16;
705 u64 pasid:20;
706 u64 exe_req:1;
707 u64 pm_req:1;
708 u64 rsvd2:10;
709 };
710 u64 qw_0;
711 };
712 union {
713 struct {
714 u64 rd_req:1;
715 u64 wr_req:1;
716 u64 lpig:1;
717 u64 prg_index:9;
718 u64 addr:52;
719 };
720 u64 qw_1;
721 };
722 u64 priv_data[2];
David Woodhousea222a7f2015-10-07 23:35:18 +0100723};
724
Jacob Pan52355fb2020-03-17 09:10:18 +0800725#define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x20)
Joerg Roedel7f8312a2015-11-17 16:11:39 +0100726
727static bool access_error(struct vm_area_struct *vma, struct page_req_dsc *req)
728{
729 unsigned long requested = 0;
730
731 if (req->exe_req)
732 requested |= VM_EXEC;
733
734 if (req->rd_req)
735 requested |= VM_READ;
736
737 if (req->wr_req)
738 requested |= VM_WRITE;
739
740 return (requested & ~vma->vm_flags) != 0;
741}
742
Ashok Raj9d8c3af2017-08-08 13:29:27 -0700743static bool is_canonical_address(u64 addr)
744{
745 int shift = 64 - (__VIRTUAL_MASK_SHIFT + 1);
746 long saddr = (long) addr;
747
748 return (((saddr << shift) >> shift) == saddr);
749}
750
Lu Baolu66ac4db2020-05-16 14:20:58 +0800751/**
752 * intel_svm_drain_prq - Drain page requests and responses for a pasid
753 * @dev: target device
754 * @pasid: pasid for draining
755 *
756 * Drain all pending page requests and responses related to @pasid in both
757 * software and hardware. This is supposed to be called after the device
758 * driver has stopped DMA, the pasid entry has been cleared, and both IOTLB
759 * and DevTLB have been invalidated.
760 *
761 * It waits until all pending page requests for @pasid in the page fault
762 * queue are completed by the prq handling thread. Then follow the steps
763 * described in VT-d spec CH7.10 to drain all page requests and page
764 * responses pending in the hardware.
765 */
Fenghua Yuc7b6bac2020-09-15 09:30:05 -0700766static void intel_svm_drain_prq(struct device *dev, u32 pasid)
Lu Baolu66ac4db2020-05-16 14:20:58 +0800767{
768 struct device_domain_info *info;
769 struct dmar_domain *domain;
770 struct intel_iommu *iommu;
771 struct qi_desc desc[3];
772 struct pci_dev *pdev;
773 int head, tail;
774 u16 sid, did;
775 int qdep;
776
777 info = get_domain_info(dev);
778 if (WARN_ON(!info || !dev_is_pci(dev)))
779 return;
780
781 if (!info->pri_enabled)
782 return;
783
784 iommu = info->iommu;
785 domain = info->domain;
786 pdev = to_pci_dev(dev);
787 sid = PCI_DEVID(info->bus, info->devfn);
788 did = domain->iommu_did[iommu->seq_id];
789 qdep = pci_ats_queue_depth(pdev);
790
791 /*
792 * Check and wait until all pending page requests in the queue are
793 * handled by the prq handling thread.
794 */
795prq_retry:
796 reinit_completion(&iommu->prq_complete);
797 tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK;
798 head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK;
799 while (head != tail) {
800 struct page_req_dsc *req;
801
802 req = &iommu->prq[head / sizeof(*req)];
803 if (!req->pasid_present || req->pasid != pasid) {
804 head = (head + sizeof(*req)) & PRQ_RING_MASK;
805 continue;
806 }
807
808 wait_for_completion(&iommu->prq_complete);
809 goto prq_retry;
810 }
811
812 /*
813 * Perform steps described in VT-d spec CH7.10 to drain page
814 * requests and responses in hardware.
815 */
816 memset(desc, 0, sizeof(desc));
817 desc[0].qw0 = QI_IWD_STATUS_DATA(QI_DONE) |
818 QI_IWD_FENCE |
819 QI_IWD_TYPE;
820 desc[1].qw0 = QI_EIOTLB_PASID(pasid) |
821 QI_EIOTLB_DID(did) |
822 QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) |
823 QI_EIOTLB_TYPE;
824 desc[2].qw0 = QI_DEV_EIOTLB_PASID(pasid) |
825 QI_DEV_EIOTLB_SID(sid) |
826 QI_DEV_EIOTLB_QDEP(qdep) |
827 QI_DEIOTLB_TYPE |
828 QI_DEV_IOTLB_PFSID(info->pfsid);
829qi_retry:
830 reinit_completion(&iommu->prq_complete);
831 qi_submit_sync(iommu, desc, 3, QI_OPT_WAIT_DRAIN);
832 if (readl(iommu->reg + DMAR_PRS_REG) & DMA_PRS_PRO) {
833 wait_for_completion(&iommu->prq_complete);
834 goto qi_retry;
835 }
836}
837
Lu Baolueb8d93e2020-07-24 09:49:23 +0800838static int prq_to_iommu_prot(struct page_req_dsc *req)
839{
840 int prot = 0;
841
842 if (req->rd_req)
843 prot |= IOMMU_FAULT_PERM_READ;
844 if (req->wr_req)
845 prot |= IOMMU_FAULT_PERM_WRITE;
846 if (req->exe_req)
847 prot |= IOMMU_FAULT_PERM_EXEC;
848 if (req->pm_req)
849 prot |= IOMMU_FAULT_PERM_PRIV;
850
851 return prot;
852}
853
854static int
855intel_svm_prq_report(struct device *dev, struct page_req_dsc *desc)
856{
857 struct iommu_fault_event event;
858
859 if (!dev || !dev_is_pci(dev))
860 return -ENODEV;
861
862 /* Fill in event data for device specific processing */
863 memset(&event, 0, sizeof(struct iommu_fault_event));
864 event.fault.type = IOMMU_FAULT_PAGE_REQ;
Lu Baolu03d20502021-03-20 10:54:11 +0800865 event.fault.prm.addr = (u64)desc->addr << VTD_PAGE_SHIFT;
Lu Baolueb8d93e2020-07-24 09:49:23 +0800866 event.fault.prm.pasid = desc->pasid;
867 event.fault.prm.grpid = desc->prg_index;
868 event.fault.prm.perm = prq_to_iommu_prot(desc);
869
870 if (desc->lpig)
871 event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE;
872 if (desc->pasid_present) {
873 event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
874 event.fault.prm.flags |= IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID;
875 }
876 if (desc->priv_data_present) {
877 /*
878 * Set last page in group bit if private data is present,
879 * page response is required as it does for LPIG.
880 * iommu_report_device_fault() doesn't understand this vendor
881 * specific requirement thus we set last_page as a workaround.
882 */
883 event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE;
884 event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PRIV_DATA;
885 memcpy(event.fault.prm.private_data, desc->priv_data,
886 sizeof(desc->priv_data));
887 }
888
889 return iommu_report_device_fault(dev, &event);
890}
891
Lu Baoluae7f09b2021-06-10 10:01:01 +0800892static void handle_bad_prq_event(struct intel_iommu *iommu,
893 struct page_req_dsc *req, int result)
894{
895 struct qi_desc desc;
896
897 pr_err("%s: Invalid page request: %08llx %08llx\n",
898 iommu->name, ((unsigned long long *)req)[0],
899 ((unsigned long long *)req)[1]);
900
901 /*
902 * Per VT-d spec. v3.0 ch7.7, system software must
903 * respond with page group response if private data
904 * is present (PDP) or last page in group (LPIG) bit
905 * is set. This is an additional VT-d feature beyond
906 * PCI ATS spec.
907 */
908 if (!req->lpig && !req->priv_data_present)
909 return;
910
911 desc.qw0 = QI_PGRP_PASID(req->pasid) |
912 QI_PGRP_DID(req->rid) |
913 QI_PGRP_PASID_P(req->pasid_present) |
914 QI_PGRP_PDP(req->priv_data_present) |
915 QI_PGRP_RESP_CODE(result) |
916 QI_PGRP_RESP_TYPE;
917 desc.qw1 = QI_PGRP_IDX(req->prg_index) |
918 QI_PGRP_LPIG(req->lpig);
919 desc.qw2 = 0;
920 desc.qw3 = 0;
921
922 if (req->priv_data_present)
923 memcpy(&desc.qw2, req->priv_data, sizeof(req->priv_data));
924 qi_submit_sync(iommu, &desc, 1, 0);
925}
926
927static void handle_single_prq_event(struct intel_iommu *iommu,
928 struct mm_struct *mm,
929 struct page_req_dsc *req)
930{
931 u64 address = (u64)req->addr << VTD_PAGE_SHIFT;
932 int result = QI_RESP_INVALID;
933 struct vm_area_struct *vma;
934 struct qi_desc desc;
935 unsigned int flags;
936 vm_fault_t ret;
937
938 /* If the mm is already defunct, don't handle faults. */
939 if (!mmget_not_zero(mm))
940 goto response;
941
942 mmap_read_lock(mm);
943 vma = find_extend_vma(mm, address);
944 if (!vma || address < vma->vm_start)
945 goto invalid;
946
947 if (access_error(vma, req))
948 goto invalid;
949
950 flags = FAULT_FLAG_USER | FAULT_FLAG_REMOTE;
951 if (req->wr_req)
952 flags |= FAULT_FLAG_WRITE;
953
954 ret = handle_mm_fault(vma, address, flags, NULL);
955 if (!(ret & VM_FAULT_ERROR))
956 result = QI_RESP_SUCCESS;
957invalid:
958 mmap_read_unlock(mm);
959 mmput(mm);
960
961response:
962 if (!(req->lpig || req->priv_data_present))
963 return;
964
965 desc.qw0 = QI_PGRP_PASID(req->pasid) |
966 QI_PGRP_DID(req->rid) |
967 QI_PGRP_PASID_P(req->pasid_present) |
968 QI_PGRP_PDP(req->priv_data_present) |
969 QI_PGRP_RESP_CODE(result) |
970 QI_PGRP_RESP_TYPE;
971 desc.qw1 = QI_PGRP_IDX(req->prg_index) |
972 QI_PGRP_LPIG(req->lpig);
973 desc.qw2 = 0;
974 desc.qw3 = 0;
975
976 if (req->priv_data_present)
977 memcpy(&desc.qw2, req->priv_data, sizeof(req->priv_data));
978
979 qi_submit_sync(iommu, &desc, 1, 0);
980}
981
David Woodhousea222a7f2015-10-07 23:35:18 +0100982static irqreturn_t prq_event_thread(int irq, void *d)
983{
Lu Baolueb8d93e2020-07-24 09:49:23 +0800984 struct intel_svm_dev *sdev = NULL;
David Woodhousea222a7f2015-10-07 23:35:18 +0100985 struct intel_iommu *iommu = d;
986 struct intel_svm *svm = NULL;
Lu Baoluae7f09b2021-06-10 10:01:01 +0800987 struct page_req_dsc *req;
988 int head, tail, handled;
989 u64 address;
David Woodhousea222a7f2015-10-07 23:35:18 +0100990
Lu Baoluae7f09b2021-06-10 10:01:01 +0800991 /*
992 * Clear PPR bit before reading head/tail registers, to ensure that
993 * we get a new interrupt if needed.
994 */
David Woodhouse46924002016-02-15 12:42:38 +0000995 writel(DMA_PRS_PPR, iommu->reg + DMAR_PRS_REG);
996
David Woodhousea222a7f2015-10-07 23:35:18 +0100997 tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK;
998 head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK;
Lu Baoluae7f09b2021-06-10 10:01:01 +0800999 handled = (head != tail);
David Woodhousea222a7f2015-10-07 23:35:18 +01001000 while (head != tail) {
David Woodhousea222a7f2015-10-07 23:35:18 +01001001 req = &iommu->prq[head / sizeof(*req)];
David Woodhouse7f92a2e2015-10-16 17:22:31 +01001002 address = (u64)req->addr << VTD_PAGE_SHIFT;
Lu Baoluae7f09b2021-06-10 10:01:01 +08001003
1004 if (unlikely(!req->pasid_present)) {
1005 pr_err("IOMMU: %s: Page request without PASID\n",
1006 iommu->name);
1007bad_req:
1008 svm = NULL;
1009 sdev = NULL;
1010 handle_bad_prq_event(iommu, req, QI_RESP_INVALID);
1011 goto prq_advance;
David Woodhousea222a7f2015-10-07 23:35:18 +01001012 }
Lu Baoluae7f09b2021-06-10 10:01:01 +08001013
1014 if (unlikely(!is_canonical_address(address))) {
1015 pr_err("IOMMU: %s: Address is not canonical\n",
1016 iommu->name);
1017 goto bad_req;
Jacob Pan78a523f2021-03-02 02:13:59 -08001018 }
Lu Baoluae7f09b2021-06-10 10:01:01 +08001019
1020 if (unlikely(req->pm_req && (req->rd_req | req->wr_req))) {
1021 pr_err("IOMMU: %s: Page request in Privilege Mode\n",
1022 iommu->name);
1023 goto bad_req;
Jacob Pan78a523f2021-03-02 02:13:59 -08001024 }
Lu Baoluae7f09b2021-06-10 10:01:01 +08001025
1026 if (unlikely(req->exe_req && req->rd_req)) {
1027 pr_err("IOMMU: %s: Execution request not supported\n",
1028 iommu->name);
1029 goto bad_req;
1030 }
1031
David Woodhousea222a7f2015-10-07 23:35:18 +01001032 if (!svm || svm->pasid != req->pasid) {
Lu Baoluae7f09b2021-06-10 10:01:01 +08001033 /*
1034 * It can't go away, because the driver is not permitted
David Woodhousea222a7f2015-10-07 23:35:18 +01001035 * to unbind the mm while any page faults are outstanding.
Lu Baoluae7f09b2021-06-10 10:01:01 +08001036 */
1037 svm = pasid_private_find(req->pasid);
1038 if (IS_ERR_OR_NULL(svm) || (svm->flags & SVM_FLAG_SUPERVISOR_MODE))
1039 goto bad_req;
David Woodhousea222a7f2015-10-07 23:35:18 +01001040 }
1041
Lu Baoluae7f09b2021-06-10 10:01:01 +08001042 if (!sdev || sdev->sid != req->rid) {
Lu Baolu9e52cc02021-06-10 10:01:00 +08001043 sdev = svm_lookup_device_by_sid(svm, req->rid);
Lu Baoluae7f09b2021-06-10 10:01:01 +08001044 if (!sdev)
1045 goto bad_req;
1046 }
Ashok Raj9d8c3af2017-08-08 13:29:27 -07001047
Lu Baolueb8d93e2020-07-24 09:49:23 +08001048 /*
1049 * If prq is to be handled outside iommu driver via receiver of
1050 * the fault notifiers, we skip the page response here.
1051 */
1052 if (svm->flags & SVM_FLAG_GUEST_MODE) {
Lu Baoluae7f09b2021-06-10 10:01:01 +08001053 if (!intel_svm_prq_report(sdev->dev, req))
Lu Baolueb8d93e2020-07-24 09:49:23 +08001054 goto prq_advance;
1055 else
1056 goto bad_req;
1057 }
1058
Lu Baoluae7f09b2021-06-10 10:01:01 +08001059 handle_single_prq_event(iommu, svm->mm, req);
Lu Baolueb8d93e2020-07-24 09:49:23 +08001060prq_advance:
David Woodhousea222a7f2015-10-07 23:35:18 +01001061 head = (head + sizeof(*req)) & PRQ_RING_MASK;
1062 }
1063
1064 dmar_writeq(iommu->reg + DMAR_PQH_REG, tail);
1065
Lu Baolu66ac4db2020-05-16 14:20:58 +08001066 /*
1067 * Clear the page request overflow bit and wake up all threads that
1068 * are waiting for the completion of this handling.
1069 */
Lu Baolu28a77182021-01-26 16:07:29 +08001070 if (readl(iommu->reg + DMAR_PRS_REG) & DMA_PRS_PRO) {
1071 pr_info_ratelimited("IOMMU: %s: PRQ overflow detected\n",
1072 iommu->name);
1073 head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK;
1074 tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK;
1075 if (head == tail) {
1076 writel(DMA_PRS_PRO, iommu->reg + DMAR_PRS_REG);
1077 pr_info_ratelimited("IOMMU: %s: PRQ overflow cleared",
1078 iommu->name);
1079 }
1080 }
Lu Baolu66ac4db2020-05-16 14:20:58 +08001081
1082 if (!completion_done(&iommu->prq_complete))
1083 complete(&iommu->prq_complete);
1084
David Woodhousea222a7f2015-10-07 23:35:18 +01001085 return IRQ_RETVAL(handled);
1086}
Jacob Pan064a57d2020-05-16 14:20:54 +08001087
Lu Baolu40483772021-06-10 10:00:59 +08001088struct iommu_sva *intel_svm_bind(struct device *dev, struct mm_struct *mm, void *drvdata)
Jacob Pan064a57d2020-05-16 14:20:54 +08001089{
Lu Baolu40483772021-06-10 10:00:59 +08001090 struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL);
Fenghua Yu2a5054c2020-09-15 09:30:06 -07001091 unsigned int flags = 0;
Lu Baolu40483772021-06-10 10:00:59 +08001092 struct iommu_sva *sva;
Jacob Pan064a57d2020-05-16 14:20:54 +08001093 int ret;
1094
Jacob Pan064a57d2020-05-16 14:20:54 +08001095 if (drvdata)
Fenghua Yu2a5054c2020-09-15 09:30:06 -07001096 flags = *(unsigned int *)drvdata;
Jacob Pan064a57d2020-05-16 14:20:54 +08001097
Lu Baolu40483772021-06-10 10:00:59 +08001098 if (flags & SVM_FLAG_SUPERVISOR_MODE) {
1099 if (!ecap_srs(iommu->ecap)) {
1100 dev_err(dev, "%s: Supervisor PASID not supported\n",
1101 iommu->name);
1102 return ERR_PTR(-EOPNOTSUPP);
1103 }
1104
1105 if (mm) {
1106 dev_err(dev, "%s: Supervisor PASID with user provided mm\n",
1107 iommu->name);
1108 return ERR_PTR(-EINVAL);
1109 }
1110
1111 mm = &init_mm;
1112 }
1113
1114 mutex_lock(&pasid_mutex);
1115 ret = intel_svm_alloc_pasid(dev, mm, flags);
1116 if (ret) {
1117 mutex_unlock(&pasid_mutex);
1118 return ERR_PTR(ret);
1119 }
1120
1121 sva = intel_svm_bind_mm(iommu, dev, mm, flags);
1122 if (IS_ERR_OR_NULL(sva))
1123 intel_svm_free_pasid(mm);
Jacob Pan064a57d2020-05-16 14:20:54 +08001124 mutex_unlock(&pasid_mutex);
1125
1126 return sva;
1127}
1128
1129void intel_svm_unbind(struct iommu_sva *sva)
1130{
Lu Baolu40483772021-06-10 10:00:59 +08001131 struct intel_svm_dev *sdev = to_intel_svm_dev(sva);
Jacob Pan064a57d2020-05-16 14:20:54 +08001132
1133 mutex_lock(&pasid_mutex);
Jacob Pan064a57d2020-05-16 14:20:54 +08001134 intel_svm_unbind_mm(sdev->dev, sdev->pasid);
1135 mutex_unlock(&pasid_mutex);
1136}
1137
Fenghua Yuc7b6bac2020-09-15 09:30:05 -07001138u32 intel_svm_get_pasid(struct iommu_sva *sva)
Jacob Pan064a57d2020-05-16 14:20:54 +08001139{
1140 struct intel_svm_dev *sdev;
Fenghua Yuc7b6bac2020-09-15 09:30:05 -07001141 u32 pasid;
Jacob Pan064a57d2020-05-16 14:20:54 +08001142
1143 mutex_lock(&pasid_mutex);
1144 sdev = to_intel_svm_dev(sva);
1145 pasid = sdev->pasid;
1146 mutex_unlock(&pasid_mutex);
1147
1148 return pasid;
1149}
Lu Baolu8b737122020-07-24 09:49:24 +08001150
1151int intel_svm_page_response(struct device *dev,
1152 struct iommu_fault_event *evt,
1153 struct iommu_page_response *msg)
1154{
1155 struct iommu_fault_page_request *prm;
1156 struct intel_svm_dev *sdev = NULL;
1157 struct intel_svm *svm = NULL;
1158 struct intel_iommu *iommu;
1159 bool private_present;
1160 bool pasid_present;
1161 bool last_page;
1162 u8 bus, devfn;
1163 int ret = 0;
1164 u16 sid;
1165
1166 if (!dev || !dev_is_pci(dev))
1167 return -ENODEV;
1168
1169 iommu = device_to_iommu(dev, &bus, &devfn);
1170 if (!iommu)
1171 return -ENODEV;
1172
1173 if (!msg || !evt)
1174 return -EINVAL;
1175
1176 mutex_lock(&pasid_mutex);
1177
1178 prm = &evt->fault.prm;
1179 sid = PCI_DEVID(bus, devfn);
1180 pasid_present = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
1181 private_present = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PRIV_DATA;
1182 last_page = prm->flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE;
1183
1184 if (!pasid_present) {
1185 ret = -EINVAL;
1186 goto out;
1187 }
1188
1189 if (prm->pasid == 0 || prm->pasid >= PASID_MAX) {
1190 ret = -EINVAL;
1191 goto out;
1192 }
1193
1194 ret = pasid_to_svm_sdev(dev, prm->pasid, &svm, &sdev);
1195 if (ret || !sdev) {
1196 ret = -ENODEV;
1197 goto out;
1198 }
1199
1200 /*
1201 * For responses from userspace, need to make sure that the
1202 * pasid has been bound to its mm.
1203 */
1204 if (svm->flags & SVM_FLAG_GUEST_MODE) {
1205 struct mm_struct *mm;
1206
1207 mm = get_task_mm(current);
1208 if (!mm) {
1209 ret = -EINVAL;
1210 goto out;
1211 }
1212
1213 if (mm != svm->mm) {
1214 ret = -ENODEV;
1215 mmput(mm);
1216 goto out;
1217 }
1218
1219 mmput(mm);
1220 }
1221
1222 /*
1223 * Per VT-d spec. v3.0 ch7.7, system software must respond
1224 * with page group response if private data is present (PDP)
1225 * or last page in group (LPIG) bit is set. This is an
1226 * additional VT-d requirement beyond PCI ATS spec.
1227 */
1228 if (last_page || private_present) {
1229 struct qi_desc desc;
1230
1231 desc.qw0 = QI_PGRP_PASID(prm->pasid) | QI_PGRP_DID(sid) |
1232 QI_PGRP_PASID_P(pasid_present) |
1233 QI_PGRP_PDP(private_present) |
1234 QI_PGRP_RESP_CODE(msg->code) |
1235 QI_PGRP_RESP_TYPE;
1236 desc.qw1 = QI_PGRP_IDX(prm->grpid) | QI_PGRP_LPIG(last_page);
1237 desc.qw2 = 0;
1238 desc.qw3 = 0;
1239 if (private_present)
1240 memcpy(&desc.qw2, prm->private_data,
1241 sizeof(prm->private_data));
1242
1243 qi_submit_sync(iommu, &desc, 1, 0);
1244 }
1245out:
1246 mutex_unlock(&pasid_mutex);
1247 return ret;
1248}