blob: 13cbeb997cc1a069302a11200be97ff0c2c1bd99 [file] [log] [blame]
Thomas Gleixner45051532019-05-29 16:57:47 -07001// SPDX-License-Identifier: GPL-2.0-only
Joerg Roedele3c495c2011-11-09 12:31:15 +01002/*
3 * Copyright (C) 2010-2012 Advanced Micro Devices, Inc.
Joerg Roedel63ce3ae2015-02-04 16:12:55 +01004 * Author: Joerg Roedel <jroedel@suse.de>
Joerg Roedele3c495c2011-11-09 12:31:15 +01005 */
6
Joerg Roedel101fa032018-11-27 16:22:31 +01007#define pr_fmt(fmt) "AMD-Vi: " fmt
8
Xiyu Yang via iommu8bc54822021-07-19 16:32:58 +08009#include <linux/refcount.h>
Joerg Roedel8736b2c2011-11-24 16:21:52 +010010#include <linux/mmu_notifier.h>
Joerg Roedeled96f222011-11-23 17:30:39 +010011#include <linux/amd-iommu.h>
12#include <linux/mm_types.h>
Joerg Roedel8736b2c2011-11-24 16:21:52 +010013#include <linux/profile.h>
Joerg Roedele3c495c2011-11-09 12:31:15 +010014#include <linux/module.h>
Joerg Roedel2d5503b2011-11-24 10:41:57 +010015#include <linux/sched.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +010016#include <linux/sched/mm.h>
Joerg Roedel028eeac2011-11-24 12:48:13 +010017#include <linux/wait.h>
Joerg Roedeled96f222011-11-23 17:30:39 +010018#include <linux/pci.h>
19#include <linux/gfp.h>
Tom Lendackye9d1d2b2021-09-08 17:58:39 -050020#include <linux/cc_platform.h>
Joerg Roedeled96f222011-11-23 17:30:39 +010021
Joerg Roedel786dfe42020-05-27 13:53:11 +020022#include "amd_iommu.h"
Joerg Roedele3c495c2011-11-09 12:31:15 +010023
24MODULE_LICENSE("GPL v2");
Joerg Roedel63ce3ae2015-02-04 16:12:55 +010025MODULE_AUTHOR("Joerg Roedel <jroedel@suse.de>");
Joerg Roedele3c495c2011-11-09 12:31:15 +010026
Joerg Roedeled96f222011-11-23 17:30:39 +010027#define MAX_DEVICES 0x10000
28#define PRI_QUEUE_SIZE 512
29
30struct pri_queue {
31 atomic_t inflight;
32 bool finish;
Joerg Roedel028eeac2011-11-24 12:48:13 +010033 int status;
Joerg Roedeled96f222011-11-23 17:30:39 +010034};
35
36struct pasid_state {
37 struct list_head list; /* For global state-list */
Xiyu Yang via iommu8bc54822021-07-19 16:32:58 +080038 refcount_t count; /* Reference count */
Joerg Roedeld73a6d72014-06-20 16:14:22 +020039 unsigned mmu_notifier_count; /* Counting nested mmu_notifier
Joerg Roedele79df312014-05-20 23:18:26 +020040 calls */
Joerg Roedeled96f222011-11-23 17:30:39 +010041 struct mm_struct *mm; /* mm_struct for the faults */
Joerg Roedelff6d0cc2014-07-08 12:49:50 +020042 struct mmu_notifier mn; /* mmu_notifier handle */
Joerg Roedeled96f222011-11-23 17:30:39 +010043 struct pri_queue pri[PRI_QUEUE_SIZE]; /* PRI tag states */
44 struct device_state *device_state; /* Link to our device_state */
Fenghua Yuc7b6bac2020-09-15 09:30:05 -070045 u32 pasid; /* PASID index */
Joerg Roedeld9e16112014-07-09 15:43:11 +020046 bool invalid; /* Used during setup and
47 teardown of the pasid */
Joerg Roedeld73a6d72014-06-20 16:14:22 +020048 spinlock_t lock; /* Protect pri_queues and
49 mmu_notifer_count */
Joerg Roedel028eeac2011-11-24 12:48:13 +010050 wait_queue_head_t wq; /* To wait for count == 0 */
Joerg Roedeled96f222011-11-23 17:30:39 +010051};
52
53struct device_state {
Joerg Roedel741669c2014-05-20 23:18:23 +020054 struct list_head list;
55 u16 devid;
Joerg Roedeled96f222011-11-23 17:30:39 +010056 atomic_t count;
57 struct pci_dev *pdev;
58 struct pasid_state **states;
59 struct iommu_domain *domain;
60 int pasid_levels;
61 int max_pasids;
Joerg Roedel175d6142011-11-28 14:36:36 +010062 amd_iommu_invalid_ppr_cb inv_ppr_cb;
Joerg Roedelbc216622011-12-07 12:24:42 +010063 amd_iommu_invalidate_ctx inv_ctx_cb;
Joerg Roedeled96f222011-11-23 17:30:39 +010064 spinlock_t lock;
Joerg Roedel028eeac2011-11-24 12:48:13 +010065 wait_queue_head_t wq;
66};
67
68struct fault {
69 struct work_struct work;
70 struct device_state *dev_state;
71 struct pasid_state *state;
72 struct mm_struct *mm;
73 u64 address;
74 u16 devid;
Fenghua Yuc7b6bac2020-09-15 09:30:05 -070075 u32 pasid;
Joerg Roedel028eeac2011-11-24 12:48:13 +010076 u16 tag;
77 u16 finish;
78 u16 flags;
Joerg Roedeled96f222011-11-23 17:30:39 +010079};
80
Joerg Roedel741669c2014-05-20 23:18:23 +020081static LIST_HEAD(state_list);
Zheng Yongjun106650f2020-12-28 21:51:12 +080082static DEFINE_SPINLOCK(state_lock);
Joerg Roedeled96f222011-11-23 17:30:39 +010083
Joerg Roedel028eeac2011-11-24 12:48:13 +010084static struct workqueue_struct *iommu_wq;
85
Joerg Roedel2d5503b2011-11-24 10:41:57 +010086static void free_pasid_states(struct device_state *dev_state);
Joerg Roedeled96f222011-11-23 17:30:39 +010087
88static u16 device_id(struct pci_dev *pdev)
89{
90 u16 devid;
91
92 devid = pdev->bus->number;
93 devid = (devid << 8) | pdev->devfn;
94
95 return devid;
96}
97
Joerg Roedelb87d2d72014-05-20 23:18:22 +020098static struct device_state *__get_device_state(u16 devid)
99{
Joerg Roedel741669c2014-05-20 23:18:23 +0200100 struct device_state *dev_state;
101
102 list_for_each_entry(dev_state, &state_list, list) {
103 if (dev_state->devid == devid)
104 return dev_state;
105 }
106
107 return NULL;
Joerg Roedelb87d2d72014-05-20 23:18:22 +0200108}
109
Joerg Roedeled96f222011-11-23 17:30:39 +0100110static struct device_state *get_device_state(u16 devid)
111{
112 struct device_state *dev_state;
113 unsigned long flags;
114
115 spin_lock_irqsave(&state_lock, flags);
Joerg Roedelb87d2d72014-05-20 23:18:22 +0200116 dev_state = __get_device_state(devid);
Joerg Roedeled96f222011-11-23 17:30:39 +0100117 if (dev_state != NULL)
118 atomic_inc(&dev_state->count);
119 spin_unlock_irqrestore(&state_lock, flags);
120
121 return dev_state;
122}
123
124static void free_device_state(struct device_state *dev_state)
125{
Joerg Roedel55c99a42015-07-28 16:58:47 +0200126 struct iommu_group *group;
127
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100128 /*
129 * First detach device from domain - No more PRI requests will arrive
130 * from that device after it is unbound from the IOMMUv2 domain.
131 */
Joerg Roedel55c99a42015-07-28 16:58:47 +0200132 group = iommu_group_get(&dev_state->pdev->dev);
133 if (WARN_ON(!group))
134 return;
135
136 iommu_detach_group(dev_state->domain, group);
137
138 iommu_group_put(group);
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100139
140 /* Everything is down now, free the IOMMUv2 domain */
Joerg Roedeled96f222011-11-23 17:30:39 +0100141 iommu_domain_free(dev_state->domain);
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100142
143 /* Finally get rid of the device-state */
Joerg Roedeled96f222011-11-23 17:30:39 +0100144 kfree(dev_state);
145}
146
147static void put_device_state(struct device_state *dev_state)
148{
149 if (atomic_dec_and_test(&dev_state->count))
Joerg Roedel028eeac2011-11-24 12:48:13 +0100150 wake_up(&dev_state->wq);
Joerg Roedeled96f222011-11-23 17:30:39 +0100151}
152
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100153/* Must be called under dev_state->lock */
154static struct pasid_state **__get_pasid_state_ptr(struct device_state *dev_state,
Fenghua Yuc7b6bac2020-09-15 09:30:05 -0700155 u32 pasid, bool alloc)
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100156{
157 struct pasid_state **root, **ptr;
158 int level, index;
159
160 level = dev_state->pasid_levels;
161 root = dev_state->states;
162
163 while (true) {
164
165 index = (pasid >> (9 * level)) & 0x1ff;
166 ptr = &root[index];
167
168 if (level == 0)
169 break;
170
171 if (*ptr == NULL) {
172 if (!alloc)
173 return NULL;
174
175 *ptr = (void *)get_zeroed_page(GFP_ATOMIC);
176 if (*ptr == NULL)
177 return NULL;
178 }
179
180 root = (struct pasid_state **)*ptr;
181 level -= 1;
182 }
183
184 return ptr;
185}
186
187static int set_pasid_state(struct device_state *dev_state,
188 struct pasid_state *pasid_state,
Fenghua Yuc7b6bac2020-09-15 09:30:05 -0700189 u32 pasid)
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100190{
191 struct pasid_state **ptr;
192 unsigned long flags;
193 int ret;
194
195 spin_lock_irqsave(&dev_state->lock, flags);
196 ptr = __get_pasid_state_ptr(dev_state, pasid, true);
197
198 ret = -ENOMEM;
199 if (ptr == NULL)
200 goto out_unlock;
201
202 ret = -ENOMEM;
203 if (*ptr != NULL)
204 goto out_unlock;
205
206 *ptr = pasid_state;
207
208 ret = 0;
209
210out_unlock:
211 spin_unlock_irqrestore(&dev_state->lock, flags);
212
213 return ret;
214}
215
Fenghua Yuc7b6bac2020-09-15 09:30:05 -0700216static void clear_pasid_state(struct device_state *dev_state, u32 pasid)
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100217{
218 struct pasid_state **ptr;
219 unsigned long flags;
220
221 spin_lock_irqsave(&dev_state->lock, flags);
222 ptr = __get_pasid_state_ptr(dev_state, pasid, true);
223
224 if (ptr == NULL)
225 goto out_unlock;
226
227 *ptr = NULL;
228
229out_unlock:
230 spin_unlock_irqrestore(&dev_state->lock, flags);
231}
232
233static struct pasid_state *get_pasid_state(struct device_state *dev_state,
Fenghua Yuc7b6bac2020-09-15 09:30:05 -0700234 u32 pasid)
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100235{
236 struct pasid_state **ptr, *ret = NULL;
237 unsigned long flags;
238
239 spin_lock_irqsave(&dev_state->lock, flags);
240 ptr = __get_pasid_state_ptr(dev_state, pasid, false);
241
242 if (ptr == NULL)
243 goto out_unlock;
244
245 ret = *ptr;
246 if (ret)
Xiyu Yang via iommu8bc54822021-07-19 16:32:58 +0800247 refcount_inc(&ret->count);
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100248
249out_unlock:
250 spin_unlock_irqrestore(&dev_state->lock, flags);
251
252 return ret;
253}
254
255static void free_pasid_state(struct pasid_state *pasid_state)
256{
257 kfree(pasid_state);
258}
259
260static void put_pasid_state(struct pasid_state *pasid_state)
261{
Xiyu Yang via iommu8bc54822021-07-19 16:32:58 +0800262 if (refcount_dec_and_test(&pasid_state->count))
Joerg Roedel028eeac2011-11-24 12:48:13 +0100263 wake_up(&pasid_state->wq);
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100264}
265
Joerg Roedel028eeac2011-11-24 12:48:13 +0100266static void put_pasid_state_wait(struct pasid_state *pasid_state)
267{
Xiyu Yang via iommu8bc54822021-07-19 16:32:58 +0800268 refcount_dec(&pasid_state->count);
269 wait_event(pasid_state->wq, !refcount_read(&pasid_state->count));
Joerg Roedel028eeac2011-11-24 12:48:13 +0100270 free_pasid_state(pasid_state);
271}
272
Joerg Roedel61feb432014-07-08 14:19:35 +0200273static void unbind_pasid(struct pasid_state *pasid_state)
Joerg Roedel8736b2c2011-11-24 16:21:52 +0100274{
275 struct iommu_domain *domain;
276
277 domain = pasid_state->device_state->domain;
278
Joerg Roedel53d340e2014-07-08 15:01:43 +0200279 /*
280 * Mark pasid_state as invalid, no more faults will we added to the
281 * work queue after this is visible everywhere.
282 */
283 pasid_state->invalid = true;
284
285 /* Make sure this is visible */
286 smp_wmb();
287
288 /* After this the device/pasid can't access the mm anymore */
Joerg Roedel8736b2c2011-11-24 16:21:52 +0100289 amd_iommu_domain_clear_gcr3(domain, pasid_state->pasid);
Joerg Roedel8736b2c2011-11-24 16:21:52 +0100290
291 /* Make sure no more pending faults are in the queue */
292 flush_workqueue(iommu_wq);
Joerg Roedel8736b2c2011-11-24 16:21:52 +0100293}
294
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100295static void free_pasid_states_level1(struct pasid_state **tbl)
296{
297 int i;
298
299 for (i = 0; i < 512; ++i) {
300 if (tbl[i] == NULL)
301 continue;
302
303 free_page((unsigned long)tbl[i]);
304 }
305}
306
307static void free_pasid_states_level2(struct pasid_state **tbl)
308{
309 struct pasid_state **ptr;
310 int i;
311
312 for (i = 0; i < 512; ++i) {
313 if (tbl[i] == NULL)
314 continue;
315
316 ptr = (struct pasid_state **)tbl[i];
317 free_pasid_states_level1(ptr);
318 }
319}
320
321static void free_pasid_states(struct device_state *dev_state)
322{
323 struct pasid_state *pasid_state;
324 int i;
325
326 for (i = 0; i < dev_state->max_pasids; ++i) {
327 pasid_state = get_pasid_state(dev_state, i);
328 if (pasid_state == NULL)
329 continue;
330
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100331 put_pasid_state(pasid_state);
Joerg Roedela40d4c62014-05-20 23:18:24 +0200332
333 /*
334 * This will call the mn_release function and
335 * unbind the PASID
336 */
337 mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
Joerg Roedelc5db16a2014-07-08 14:15:45 +0200338
339 put_pasid_state_wait(pasid_state); /* Reference taken in
Joerg Roedeldaff2f92014-07-30 16:04:40 +0200340 amd_iommu_bind_pasid */
Joerg Roedel75058a32014-07-30 16:04:39 +0200341
342 /* Drop reference taken in amd_iommu_bind_pasid */
343 put_device_state(dev_state);
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100344 }
345
346 if (dev_state->pasid_levels == 2)
347 free_pasid_states_level2(dev_state->states);
348 else if (dev_state->pasid_levels == 1)
349 free_pasid_states_level1(dev_state->states);
Joerg Roedel23d3a982015-08-13 11:15:13 +0200350 else
351 BUG_ON(dev_state->pasid_levels != 0);
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100352
353 free_page((unsigned long)dev_state->states);
354}
355
Joerg Roedel8736b2c2011-11-24 16:21:52 +0100356static struct pasid_state *mn_to_state(struct mmu_notifier *mn)
357{
358 return container_of(mn, struct pasid_state, mn);
359}
360
Joerg Roedele7cc3dd2014-11-13 13:46:09 +1100361static void mn_invalidate_range(struct mmu_notifier *mn,
362 struct mm_struct *mm,
363 unsigned long start, unsigned long end)
Joerg Roedel8736b2c2011-11-24 16:21:52 +0100364{
365 struct pasid_state *pasid_state;
366 struct device_state *dev_state;
367
368 pasid_state = mn_to_state(mn);
369 dev_state = pasid_state->device_state;
370
Joerg Roedele7cc3dd2014-11-13 13:46:09 +1100371 if ((start ^ (end - 1)) < PAGE_SIZE)
372 amd_iommu_flush_page(dev_state->domain, pasid_state->pasid,
373 start);
374 else
375 amd_iommu_flush_tlb(dev_state->domain, pasid_state->pasid);
Joerg Roedel8736b2c2011-11-24 16:21:52 +0100376}
377
Joerg Roedela40d4c62014-05-20 23:18:24 +0200378static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm)
379{
380 struct pasid_state *pasid_state;
381 struct device_state *dev_state;
Joerg Roedeld9e16112014-07-09 15:43:11 +0200382 bool run_inv_ctx_cb;
Joerg Roedela40d4c62014-05-20 23:18:24 +0200383
384 might_sleep();
385
Joerg Roedeld9e16112014-07-09 15:43:11 +0200386 pasid_state = mn_to_state(mn);
387 dev_state = pasid_state->device_state;
388 run_inv_ctx_cb = !pasid_state->invalid;
Joerg Roedela40d4c62014-05-20 23:18:24 +0200389
Dan Carpenter940f700d2015-02-20 13:52:01 +0300390 if (run_inv_ctx_cb && dev_state->inv_ctx_cb)
Joerg Roedela40d4c62014-05-20 23:18:24 +0200391 dev_state->inv_ctx_cb(dev_state->pdev, pasid_state->pasid);
392
Joerg Roedel61feb432014-07-08 14:19:35 +0200393 unbind_pasid(pasid_state);
Joerg Roedela40d4c62014-05-20 23:18:24 +0200394}
395
Julia Lawall759ce232015-11-29 23:02:50 +0100396static const struct mmu_notifier_ops iommu_mn = {
Joerg Roedela40d4c62014-05-20 23:18:24 +0200397 .release = mn_release,
Joerg Roedele7cc3dd2014-11-13 13:46:09 +1100398 .invalidate_range = mn_invalidate_range,
Joerg Roedel8736b2c2011-11-24 16:21:52 +0100399};
400
Joerg Roedel028eeac2011-11-24 12:48:13 +0100401static void set_pri_tag_status(struct pasid_state *pasid_state,
402 u16 tag, int status)
403{
404 unsigned long flags;
405
406 spin_lock_irqsave(&pasid_state->lock, flags);
407 pasid_state->pri[tag].status = status;
408 spin_unlock_irqrestore(&pasid_state->lock, flags);
409}
410
411static void finish_pri_tag(struct device_state *dev_state,
412 struct pasid_state *pasid_state,
413 u16 tag)
414{
415 unsigned long flags;
416
417 spin_lock_irqsave(&pasid_state->lock, flags);
418 if (atomic_dec_and_test(&pasid_state->pri[tag].inflight) &&
419 pasid_state->pri[tag].finish) {
420 amd_iommu_complete_ppr(dev_state->pdev, pasid_state->pasid,
421 pasid_state->pri[tag].status, tag);
422 pasid_state->pri[tag].finish = false;
423 pasid_state->pri[tag].status = PPR_SUCCESS;
424 }
425 spin_unlock_irqrestore(&pasid_state->lock, flags);
426}
427
Jesse Barnes9dc00f42014-12-12 16:55:30 -0800428static void handle_fault_error(struct fault *fault)
429{
430 int status;
431
432 if (!fault->dev_state->inv_ppr_cb) {
433 set_pri_tag_status(fault->state, fault->tag, PPR_INVALID);
434 return;
435 }
436
437 status = fault->dev_state->inv_ppr_cb(fault->dev_state->pdev,
438 fault->pasid,
439 fault->address,
440 fault->flags);
441 switch (status) {
442 case AMD_IOMMU_INV_PRI_RSP_SUCCESS:
443 set_pri_tag_status(fault->state, fault->tag, PPR_SUCCESS);
444 break;
445 case AMD_IOMMU_INV_PRI_RSP_INVALID:
446 set_pri_tag_status(fault->state, fault->tag, PPR_INVALID);
447 break;
448 case AMD_IOMMU_INV_PRI_RSP_FAIL:
449 set_pri_tag_status(fault->state, fault->tag, PPR_FAILURE);
450 break;
451 default:
452 BUG();
453 }
454}
455
Joerg Roedel7b5cc1a2015-11-17 16:11:36 +0100456static bool access_error(struct vm_area_struct *vma, struct fault *fault)
457{
458 unsigned long requested = 0;
459
460 if (fault->flags & PPR_FAULT_EXEC)
461 requested |= VM_EXEC;
462
463 if (fault->flags & PPR_FAULT_READ)
464 requested |= VM_READ;
465
466 if (fault->flags & PPR_FAULT_WRITE)
467 requested |= VM_WRITE;
468
469 return (requested & ~vma->vm_flags) != 0;
470}
471
Joerg Roedel028eeac2011-11-24 12:48:13 +0100472static void do_fault(struct work_struct *work)
473{
474 struct fault *fault = container_of(work, struct fault, work);
Jesse Barnes9dc00f42014-12-12 16:55:30 -0800475 struct vm_area_struct *vma;
Souptick Joarder50a7ca32018-08-17 15:44:47 -0700476 vm_fault_t ret = VM_FAULT_ERROR;
Joerg Roedel43c0ea22015-11-17 16:11:37 +0100477 unsigned int flags = 0;
478 struct mm_struct *mm;
Jesse Barnes9dc00f42014-12-12 16:55:30 -0800479 u64 address;
Joerg Roedel028eeac2011-11-24 12:48:13 +0100480
Jesse Barnes9dc00f42014-12-12 16:55:30 -0800481 mm = fault->state->mm;
482 address = fault->address;
Joerg Roedel028eeac2011-11-24 12:48:13 +0100483
Joerg Roedel43c0ea22015-11-17 16:11:37 +0100484 if (fault->flags & PPR_FAULT_USER)
485 flags |= FAULT_FLAG_USER;
486 if (fault->flags & PPR_FAULT_WRITE)
487 flags |= FAULT_FLAG_WRITE;
Dave Hansen1b2ee122016-02-12 13:02:21 -0800488 flags |= FAULT_FLAG_REMOTE;
Joerg Roedel43c0ea22015-11-17 16:11:37 +0100489
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700490 mmap_read_lock(mm);
Jesse Barnes9dc00f42014-12-12 16:55:30 -0800491 vma = find_extend_vma(mm, address);
Joerg Roedel492e7452015-11-17 16:11:38 +0100492 if (!vma || address < vma->vm_start)
Jesse Barnes9dc00f42014-12-12 16:55:30 -0800493 /* failed to get a vma in the right range */
Jesse Barnes9dc00f42014-12-12 16:55:30 -0800494 goto out;
Joerg Roedel028eeac2011-11-24 12:48:13 +0100495
Joerg Roedel7b5cc1a2015-11-17 16:11:36 +0100496 /* Check if we have the right permissions on the vma */
Joerg Roedel492e7452015-11-17 16:11:38 +0100497 if (access_error(vma, fault))
Jay Cornwalld14f6fc2015-09-16 14:10:03 -0500498 goto out;
Jay Cornwalld14f6fc2015-09-16 14:10:03 -0500499
Peter Xubce617e2020-08-11 18:37:44 -0700500 ret = handle_mm_fault(vma, address, flags, NULL);
Jesse Barnes9dc00f42014-12-12 16:55:30 -0800501out:
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700502 mmap_read_unlock(mm);
Joerg Roedel492e7452015-11-17 16:11:38 +0100503
504 if (ret & VM_FAULT_ERROR)
505 /* failed to service fault */
506 handle_fault_error(fault);
507
Joerg Roedel028eeac2011-11-24 12:48:13 +0100508 finish_pri_tag(fault->dev_state, fault->state, fault->tag);
509
510 put_pasid_state(fault->state);
511
512 kfree(fault);
513}
514
515static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data)
516{
517 struct amd_iommu_fault *iommu_fault;
518 struct pasid_state *pasid_state;
519 struct device_state *dev_state;
Joerg Roedelfb1b69552020-05-27 13:53:05 +0200520 struct pci_dev *pdev = NULL;
Joerg Roedel028eeac2011-11-24 12:48:13 +0100521 unsigned long flags;
522 struct fault *fault;
523 bool finish;
Baoquan Hedaae2d22017-08-09 16:33:43 +0800524 u16 tag, devid;
Joerg Roedel028eeac2011-11-24 12:48:13 +0100525 int ret;
526
527 iommu_fault = data;
528 tag = iommu_fault->tag & 0x1ff;
529 finish = (iommu_fault->tag >> 9) & 1;
530
Baoquan Hedaae2d22017-08-09 16:33:43 +0800531 devid = iommu_fault->device_id;
Sinan Kayad5bf0f42017-12-19 00:37:47 -0500532 pdev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(devid),
533 devid & 0xff);
Baoquan Hedaae2d22017-08-09 16:33:43 +0800534 if (!pdev)
535 return -ENODEV;
Joerg Roedelfb1b69552020-05-27 13:53:05 +0200536
537 ret = NOTIFY_DONE;
Baoquan Hedaae2d22017-08-09 16:33:43 +0800538
539 /* In kdump kernel pci dev is not initialized yet -> send INVALID */
Joerg Roedelfb1b69552020-05-27 13:53:05 +0200540 if (amd_iommu_is_attach_deferred(NULL, &pdev->dev)) {
Baoquan Hedaae2d22017-08-09 16:33:43 +0800541 amd_iommu_complete_ppr(pdev, iommu_fault->pasid,
542 PPR_INVALID, tag);
543 goto out;
544 }
545
Joerg Roedel028eeac2011-11-24 12:48:13 +0100546 dev_state = get_device_state(iommu_fault->device_id);
547 if (dev_state == NULL)
548 goto out;
549
550 pasid_state = get_pasid_state(dev_state, iommu_fault->pasid);
Joerg Roedel53d340e2014-07-08 15:01:43 +0200551 if (pasid_state == NULL || pasid_state->invalid) {
Joerg Roedel028eeac2011-11-24 12:48:13 +0100552 /* We know the device but not the PASID -> send INVALID */
553 amd_iommu_complete_ppr(dev_state->pdev, iommu_fault->pasid,
554 PPR_INVALID, tag);
555 goto out_drop_state;
556 }
557
558 spin_lock_irqsave(&pasid_state->lock, flags);
559 atomic_inc(&pasid_state->pri[tag].inflight);
560 if (finish)
561 pasid_state->pri[tag].finish = true;
562 spin_unlock_irqrestore(&pasid_state->lock, flags);
563
564 fault = kzalloc(sizeof(*fault), GFP_ATOMIC);
565 if (fault == NULL) {
566 /* We are OOM - send success and let the device re-fault */
567 finish_pri_tag(dev_state, pasid_state, tag);
568 goto out_drop_state;
569 }
570
571 fault->dev_state = dev_state;
572 fault->address = iommu_fault->address;
573 fault->state = pasid_state;
574 fault->tag = tag;
575 fault->finish = finish;
Alexey Skidanovb00675b2014-07-08 17:30:16 +0300576 fault->pasid = iommu_fault->pasid;
Joerg Roedel028eeac2011-11-24 12:48:13 +0100577 fault->flags = iommu_fault->flags;
578 INIT_WORK(&fault->work, do_fault);
579
580 queue_work(iommu_wq, &fault->work);
581
582 ret = NOTIFY_OK;
583
584out_drop_state:
Joerg Roedeldc88db72014-07-08 14:55:10 +0200585
586 if (ret != NOTIFY_OK && pasid_state)
587 put_pasid_state(pasid_state);
588
Joerg Roedel028eeac2011-11-24 12:48:13 +0100589 put_device_state(dev_state);
590
591out:
592 return ret;
593}
594
595static struct notifier_block ppr_nb = {
596 .notifier_call = ppr_notifier,
597};
598
Fenghua Yuc7b6bac2020-09-15 09:30:05 -0700599int amd_iommu_bind_pasid(struct pci_dev *pdev, u32 pasid,
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100600 struct task_struct *task)
601{
602 struct pasid_state *pasid_state;
603 struct device_state *dev_state;
Joerg Roedelf0aac632014-07-08 15:15:07 +0200604 struct mm_struct *mm;
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100605 u16 devid;
606 int ret;
607
608 might_sleep();
609
610 if (!amd_iommu_v2_supported())
611 return -ENODEV;
612
613 devid = device_id(pdev);
614 dev_state = get_device_state(devid);
615
616 if (dev_state == NULL)
617 return -EINVAL;
618
619 ret = -EINVAL;
Fenghua Yuc7b6bac2020-09-15 09:30:05 -0700620 if (pasid >= dev_state->max_pasids)
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100621 goto out;
622
623 ret = -ENOMEM;
624 pasid_state = kzalloc(sizeof(*pasid_state), GFP_KERNEL);
625 if (pasid_state == NULL)
626 goto out;
627
Joerg Roedelf0aac632014-07-08 15:15:07 +0200628
Xiyu Yang via iommu8bc54822021-07-19 16:32:58 +0800629 refcount_set(&pasid_state->count, 1);
Joerg Roedel028eeac2011-11-24 12:48:13 +0100630 init_waitqueue_head(&pasid_state->wq);
Joerg Roedel2c13d472012-07-19 10:56:10 +0200631 spin_lock_init(&pasid_state->lock);
632
Joerg Roedelf0aac632014-07-08 15:15:07 +0200633 mm = get_task_mm(task);
Joerg Roedelf0aac632014-07-08 15:15:07 +0200634 pasid_state->mm = mm;
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100635 pasid_state->device_state = dev_state;
636 pasid_state->pasid = pasid;
Joerg Roedeld9e16112014-07-09 15:43:11 +0200637 pasid_state->invalid = true; /* Mark as valid only if we are
638 done with setting up the pasid */
Joerg Roedel8736b2c2011-11-24 16:21:52 +0100639 pasid_state->mn.ops = &iommu_mn;
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100640
641 if (pasid_state->mm == NULL)
642 goto out_free;
643
Joerg Roedelf0aac632014-07-08 15:15:07 +0200644 mmu_notifier_register(&pasid_state->mn, mm);
Joerg Roedel8736b2c2011-11-24 16:21:52 +0100645
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100646 ret = set_pasid_state(dev_state, pasid_state, pasid);
647 if (ret)
Joerg Roedel8736b2c2011-11-24 16:21:52 +0100648 goto out_unregister;
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100649
650 ret = amd_iommu_domain_set_gcr3(dev_state->domain, pasid,
651 __pa(pasid_state->mm->pgd));
652 if (ret)
653 goto out_clear_state;
654
Joerg Roedeld9e16112014-07-09 15:43:11 +0200655 /* Now we are ready to handle faults */
656 pasid_state->invalid = false;
657
Joerg Roedelf0aac632014-07-08 15:15:07 +0200658 /*
659 * Drop the reference to the mm_struct here. We rely on the
660 * mmu_notifier release call-back to inform us when the mm
661 * is going away.
662 */
663 mmput(mm);
664
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100665 return 0;
666
667out_clear_state:
668 clear_pasid_state(dev_state, pasid);
669
Joerg Roedel8736b2c2011-11-24 16:21:52 +0100670out_unregister:
Joerg Roedelf0aac632014-07-08 15:15:07 +0200671 mmu_notifier_unregister(&pasid_state->mn, mm);
Pan Bian73dbd4a2017-04-23 18:23:21 +0800672 mmput(mm);
Joerg Roedel8736b2c2011-11-24 16:21:52 +0100673
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100674out_free:
Joerg Roedel028eeac2011-11-24 12:48:13 +0100675 free_pasid_state(pasid_state);
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100676
677out:
678 put_device_state(dev_state);
679
680 return ret;
681}
682EXPORT_SYMBOL(amd_iommu_bind_pasid);
683
Fenghua Yuc7b6bac2020-09-15 09:30:05 -0700684void amd_iommu_unbind_pasid(struct pci_dev *pdev, u32 pasid)
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100685{
Joerg Roedela40d4c62014-05-20 23:18:24 +0200686 struct pasid_state *pasid_state;
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100687 struct device_state *dev_state;
688 u16 devid;
689
690 might_sleep();
691
692 if (!amd_iommu_v2_supported())
693 return;
694
695 devid = device_id(pdev);
696 dev_state = get_device_state(devid);
697 if (dev_state == NULL)
698 return;
699
Fenghua Yuc7b6bac2020-09-15 09:30:05 -0700700 if (pasid >= dev_state->max_pasids)
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100701 goto out;
702
Joerg Roedela40d4c62014-05-20 23:18:24 +0200703 pasid_state = get_pasid_state(dev_state, pasid);
704 if (pasid_state == NULL)
705 goto out;
706 /*
707 * Drop reference taken here. We are safe because we still hold
708 * the reference taken in the amd_iommu_bind_pasid function.
709 */
710 put_pasid_state(pasid_state);
711
Joerg Roedel53d340e2014-07-08 15:01:43 +0200712 /* Clear the pasid state so that the pasid can be re-used */
713 clear_pasid_state(dev_state, pasid_state->pasid);
714
Joerg Roedelf0aac632014-07-08 15:15:07 +0200715 /*
Joerg Roedelfcaa9602014-07-30 16:04:37 +0200716 * Call mmu_notifier_unregister to drop our reference
717 * to pasid_state->mm
Joerg Roedelf0aac632014-07-08 15:15:07 +0200718 */
Joerg Roedelfcaa9602014-07-30 16:04:37 +0200719 mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100720
Joerg Roedelc5db16a2014-07-08 14:15:45 +0200721 put_pasid_state_wait(pasid_state); /* Reference taken in
Joerg Roedeldaff2f92014-07-30 16:04:40 +0200722 amd_iommu_bind_pasid */
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100723out:
Joerg Roedel75058a32014-07-30 16:04:39 +0200724 /* Drop reference taken in this function */
725 put_device_state(dev_state);
726
727 /* Drop reference taken in amd_iommu_bind_pasid */
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100728 put_device_state(dev_state);
729}
730EXPORT_SYMBOL(amd_iommu_unbind_pasid);
731
Joerg Roedeled96f222011-11-23 17:30:39 +0100732int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
733{
734 struct device_state *dev_state;
Joerg Roedel55c99a42015-07-28 16:58:47 +0200735 struct iommu_group *group;
Joerg Roedeled96f222011-11-23 17:30:39 +0100736 unsigned long flags;
737 int ret, tmp;
738 u16 devid;
739
740 might_sleep();
741
Joerg Roedel2822e582020-08-24 12:54:15 +0200742 /*
743 * When memory encryption is active the device is likely not in a
744 * direct-mapped domain. Forbid using IOMMUv2 functionality for now.
745 */
Tom Lendackye9d1d2b2021-09-08 17:58:39 -0500746 if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
Joerg Roedel2822e582020-08-24 12:54:15 +0200747 return -ENODEV;
748
Joerg Roedeled96f222011-11-23 17:30:39 +0100749 if (!amd_iommu_v2_supported())
750 return -ENODEV;
751
752 if (pasids <= 0 || pasids > (PASID_MASK + 1))
753 return -EINVAL;
754
755 devid = device_id(pdev);
756
757 dev_state = kzalloc(sizeof(*dev_state), GFP_KERNEL);
758 if (dev_state == NULL)
759 return -ENOMEM;
760
761 spin_lock_init(&dev_state->lock);
Joerg Roedel028eeac2011-11-24 12:48:13 +0100762 init_waitqueue_head(&dev_state->wq);
Joerg Roedel741669c2014-05-20 23:18:23 +0200763 dev_state->pdev = pdev;
764 dev_state->devid = devid;
Joerg Roedeled96f222011-11-23 17:30:39 +0100765
766 tmp = pasids;
767 for (dev_state->pasid_levels = 0; (tmp - 1) & ~0x1ff; tmp >>= 9)
768 dev_state->pasid_levels += 1;
769
770 atomic_set(&dev_state->count, 1);
771 dev_state->max_pasids = pasids;
772
773 ret = -ENOMEM;
774 dev_state->states = (void *)get_zeroed_page(GFP_KERNEL);
775 if (dev_state->states == NULL)
776 goto out_free_dev_state;
777
778 dev_state->domain = iommu_domain_alloc(&pci_bus_type);
779 if (dev_state->domain == NULL)
780 goto out_free_states;
781
782 amd_iommu_domain_direct_map(dev_state->domain);
783
784 ret = amd_iommu_domain_enable_v2(dev_state->domain, pasids);
785 if (ret)
786 goto out_free_domain;
787
Joerg Roedel55c99a42015-07-28 16:58:47 +0200788 group = iommu_group_get(&pdev->dev);
Dan Carpenter24c790f2016-11-24 14:05:44 +0300789 if (!group) {
790 ret = -EINVAL;
Joerg Roedeled96f222011-11-23 17:30:39 +0100791 goto out_free_domain;
Dan Carpenter24c790f2016-11-24 14:05:44 +0300792 }
Joerg Roedeled96f222011-11-23 17:30:39 +0100793
Joerg Roedel55c99a42015-07-28 16:58:47 +0200794 ret = iommu_attach_group(dev_state->domain, group);
795 if (ret != 0)
796 goto out_drop_group;
797
798 iommu_group_put(group);
799
Joerg Roedeled96f222011-11-23 17:30:39 +0100800 spin_lock_irqsave(&state_lock, flags);
801
Joerg Roedel741669c2014-05-20 23:18:23 +0200802 if (__get_device_state(devid) != NULL) {
Joerg Roedeled96f222011-11-23 17:30:39 +0100803 spin_unlock_irqrestore(&state_lock, flags);
804 ret = -EBUSY;
805 goto out_free_domain;
806 }
807
Joerg Roedel741669c2014-05-20 23:18:23 +0200808 list_add_tail(&dev_state->list, &state_list);
Joerg Roedeled96f222011-11-23 17:30:39 +0100809
810 spin_unlock_irqrestore(&state_lock, flags);
811
812 return 0;
813
Joerg Roedel55c99a42015-07-28 16:58:47 +0200814out_drop_group:
815 iommu_group_put(group);
816
Joerg Roedeled96f222011-11-23 17:30:39 +0100817out_free_domain:
818 iommu_domain_free(dev_state->domain);
819
820out_free_states:
821 free_page((unsigned long)dev_state->states);
822
823out_free_dev_state:
824 kfree(dev_state);
825
826 return ret;
827}
828EXPORT_SYMBOL(amd_iommu_init_device);
829
830void amd_iommu_free_device(struct pci_dev *pdev)
831{
832 struct device_state *dev_state;
833 unsigned long flags;
834 u16 devid;
835
836 if (!amd_iommu_v2_supported())
837 return;
838
839 devid = device_id(pdev);
840
841 spin_lock_irqsave(&state_lock, flags);
842
Joerg Roedelb87d2d72014-05-20 23:18:22 +0200843 dev_state = __get_device_state(devid);
Joerg Roedeled96f222011-11-23 17:30:39 +0100844 if (dev_state == NULL) {
845 spin_unlock_irqrestore(&state_lock, flags);
846 return;
847 }
848
Joerg Roedel741669c2014-05-20 23:18:23 +0200849 list_del(&dev_state->list);
Joerg Roedeled96f222011-11-23 17:30:39 +0100850
851 spin_unlock_irqrestore(&state_lock, flags);
852
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100853 /* Get rid of any remaining pasid states */
854 free_pasid_states(dev_state);
855
Peter Zijlstra91f65fa2015-02-03 13:25:51 +0100856 put_device_state(dev_state);
857 /*
858 * Wait until the last reference is dropped before freeing
859 * the device state.
860 */
861 wait_event(dev_state->wq, !atomic_read(&dev_state->count));
862 free_device_state(dev_state);
Joerg Roedeled96f222011-11-23 17:30:39 +0100863}
864EXPORT_SYMBOL(amd_iommu_free_device);
865
Joerg Roedel175d6142011-11-28 14:36:36 +0100866int amd_iommu_set_invalid_ppr_cb(struct pci_dev *pdev,
867 amd_iommu_invalid_ppr_cb cb)
868{
869 struct device_state *dev_state;
870 unsigned long flags;
871 u16 devid;
872 int ret;
873
874 if (!amd_iommu_v2_supported())
875 return -ENODEV;
876
877 devid = device_id(pdev);
878
879 spin_lock_irqsave(&state_lock, flags);
880
881 ret = -EINVAL;
Joerg Roedelb87d2d72014-05-20 23:18:22 +0200882 dev_state = __get_device_state(devid);
Joerg Roedel175d6142011-11-28 14:36:36 +0100883 if (dev_state == NULL)
884 goto out_unlock;
885
886 dev_state->inv_ppr_cb = cb;
887
888 ret = 0;
889
890out_unlock:
891 spin_unlock_irqrestore(&state_lock, flags);
892
893 return ret;
894}
895EXPORT_SYMBOL(amd_iommu_set_invalid_ppr_cb);
896
Joerg Roedelbc216622011-12-07 12:24:42 +0100897int amd_iommu_set_invalidate_ctx_cb(struct pci_dev *pdev,
898 amd_iommu_invalidate_ctx cb)
899{
900 struct device_state *dev_state;
901 unsigned long flags;
902 u16 devid;
903 int ret;
904
905 if (!amd_iommu_v2_supported())
906 return -ENODEV;
907
908 devid = device_id(pdev);
909
910 spin_lock_irqsave(&state_lock, flags);
911
912 ret = -EINVAL;
Joerg Roedelb87d2d72014-05-20 23:18:22 +0200913 dev_state = __get_device_state(devid);
Joerg Roedelbc216622011-12-07 12:24:42 +0100914 if (dev_state == NULL)
915 goto out_unlock;
916
917 dev_state->inv_ctx_cb = cb;
918
919 ret = 0;
920
921out_unlock:
922 spin_unlock_irqrestore(&state_lock, flags);
923
924 return ret;
925}
926EXPORT_SYMBOL(amd_iommu_set_invalidate_ctx_cb);
927
Joerg Roedele3c495c2011-11-09 12:31:15 +0100928static int __init amd_iommu_v2_init(void)
929{
Joerg Roedel028eeac2011-11-24 12:48:13 +0100930 int ret;
Joerg Roedeled96f222011-11-23 17:30:39 +0100931
Joerg Roedel63ce3ae2015-02-04 16:12:55 +0100932 pr_info("AMD IOMMUv2 driver by Joerg Roedel <jroedel@suse.de>\n");
Joerg Roedel474d567d2012-03-15 12:46:40 +0100933
934 if (!amd_iommu_v2_supported()) {
Masanari Iida07db0402012-07-22 02:21:32 +0900935 pr_info("AMD IOMMUv2 functionality not available on this system\n");
Joerg Roedel474d567d2012-03-15 12:46:40 +0100936 /*
937 * Load anyway to provide the symbols to other modules
938 * which may use AMD IOMMUv2 optionally.
939 */
940 return 0;
941 }
Joerg Roedele3c495c2011-11-09 12:31:15 +0100942
Joerg Roedel028eeac2011-11-24 12:48:13 +0100943 ret = -ENOMEM;
Bhaktipriya Shridharcf7513e2016-06-18 13:58:30 +0530944 iommu_wq = alloc_workqueue("amd_iommu_v2", WQ_MEM_RECLAIM, 0);
Joerg Roedel8736b2c2011-11-24 16:21:52 +0100945 if (iommu_wq == NULL)
Joerg Roedel741669c2014-05-20 23:18:23 +0200946 goto out;
Joerg Roedel8736b2c2011-11-24 16:21:52 +0100947
Joerg Roedel028eeac2011-11-24 12:48:13 +0100948 amd_iommu_register_ppr_notifier(&ppr_nb);
949
Joerg Roedele3c495c2011-11-09 12:31:15 +0100950 return 0;
Joerg Roedel028eeac2011-11-24 12:48:13 +0100951
Joerg Roedel741669c2014-05-20 23:18:23 +0200952out:
Joerg Roedel028eeac2011-11-24 12:48:13 +0100953 return ret;
Joerg Roedele3c495c2011-11-09 12:31:15 +0100954}
955
956static void __exit amd_iommu_v2_exit(void)
957{
Joerg Roedeled96f222011-11-23 17:30:39 +0100958 struct device_state *dev_state;
Joerg Roedeled96f222011-11-23 17:30:39 +0100959 int i;
960
Joerg Roedel474d567d2012-03-15 12:46:40 +0100961 if (!amd_iommu_v2_supported())
962 return;
963
Joerg Roedel028eeac2011-11-24 12:48:13 +0100964 amd_iommu_unregister_ppr_notifier(&ppr_nb);
965
966 flush_workqueue(iommu_wq);
967
968 /*
969 * The loop below might call flush_workqueue(), so call
970 * destroy_workqueue() after it
971 */
Joerg Roedeled96f222011-11-23 17:30:39 +0100972 for (i = 0; i < MAX_DEVICES; ++i) {
973 dev_state = get_device_state(i);
974
975 if (dev_state == NULL)
976 continue;
977
978 WARN_ON_ONCE(1);
979
Joerg Roedeled96f222011-11-23 17:30:39 +0100980 put_device_state(dev_state);
Joerg Roedel028eeac2011-11-24 12:48:13 +0100981 amd_iommu_free_device(dev_state->pdev);
Joerg Roedeled96f222011-11-23 17:30:39 +0100982 }
983
Joerg Roedel028eeac2011-11-24 12:48:13 +0100984 destroy_workqueue(iommu_wq);
Joerg Roedele3c495c2011-11-09 12:31:15 +0100985}
986
987module_init(amd_iommu_v2_init);
988module_exit(amd_iommu_v2_exit);