blob: aac132bd1ef00101c385441ceca43115176d8bdd [file] [log] [blame]
Thomas Gleixner45051532019-05-29 16:57:47 -07001// SPDX-License-Identifier: GPL-2.0-only
Joerg Roedelb6c02712008-06-26 21:27:53 +02002/*
Joerg Roedel5d0d7152010-10-13 11:13:21 +02003 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
Joerg Roedel63ce3ae2015-02-04 16:12:55 +01004 * Author: Joerg Roedel <jroedel@suse.de>
Joerg Roedelb6c02712008-06-26 21:27:53 +02005 * Leo Duran <leo.duran@amd.com>
Joerg Roedelb6c02712008-06-26 21:27:53 +02006 */
7
Joerg Roedel101fa032018-11-27 16:22:31 +01008#define pr_fmt(fmt) "AMD-Vi: " fmt
Bjorn Helgaas5f226da2019-02-08 16:05:53 -06009#define dev_fmt(fmt) pr_fmt(fmt)
Joerg Roedel101fa032018-11-27 16:22:31 +010010
Joerg Roedel72e1dcc2011-11-10 19:13:51 +010011#include <linux/ratelimit.h>
Joerg Roedelb6c02712008-06-26 21:27:53 +020012#include <linux/pci.h>
Wan Zongshun2bf9a0a2016-04-01 09:06:03 -040013#include <linux/acpi.h>
Wan Zongshun9a4d3bf52016-04-01 09:06:05 -040014#include <linux/amba/bus.h>
Wan Zongshun0076cd32016-05-10 09:21:01 -040015#include <linux/platform_device.h>
Joerg Roedelcb41ed82011-04-05 11:00:53 +020016#include <linux/pci-ats.h>
Akinobu Mitaa66022c2009-12-15 16:48:28 -080017#include <linux/bitmap.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090018#include <linux/slab.h>
Joerg Roedel7f265082008-12-12 13:50:21 +010019#include <linux/debugfs.h>
Joerg Roedelb6c02712008-06-26 21:27:53 +020020#include <linux/scatterlist.h>
FUJITA Tomonori51491362009-01-05 23:47:25 +090021#include <linux/dma-mapping.h>
Christoph Hellwigfec777c2018-03-19 11:38:15 +010022#include <linux/dma-direct.h>
Tom Murphybe62dbf2019-09-08 09:56:41 -070023#include <linux/dma-iommu.h>
Joerg Roedelb6c02712008-06-26 21:27:53 +020024#include <linux/iommu-helper.h>
Joerg Roedelc156e342008-12-02 18:13:27 +010025#include <linux/iommu.h>
Joerg Roedel815b33f2011-04-06 17:26:49 +020026#include <linux/delay.h>
Joerg Roedel403f81d2011-06-14 16:44:25 +020027#include <linux/amd-iommu.h>
Joerg Roedel72e1dcc2011-11-10 19:13:51 +010028#include <linux/notifier.h>
29#include <linux/export.h>
Joerg Roedel2b324502012-06-21 16:29:10 +020030#include <linux/irq.h>
31#include <linux/msi.h>
Joerg Roedel3b839a52015-04-01 14:58:47 +020032#include <linux/dma-contiguous.h>
Jiang Liu7c71d302015-04-13 14:11:33 +080033#include <linux/irqdomain.h>
Joerg Roedel5f6bed52015-12-22 13:34:22 +010034#include <linux/percpu.h>
Joerg Roedel307d5852016-07-05 11:54:04 +020035#include <linux/iova.h>
Joerg Roedel2b324502012-06-21 16:29:10 +020036#include <asm/irq_remapping.h>
37#include <asm/io_apic.h>
38#include <asm/apic.h>
39#include <asm/hw_irq.h>
Joerg Roedel17f5b562011-07-06 17:14:44 +020040#include <asm/msidef.h>
Joerg Roedelb6c02712008-06-26 21:27:53 +020041#include <asm/proto.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090042#include <asm/iommu.h>
Joerg Roedel1d9b16d2008-11-27 18:39:15 +010043#include <asm/gart.h>
Joerg Roedel27c21272011-05-30 15:56:24 +020044#include <asm/dma.h>
Joerg Roedel403f81d2011-06-14 16:44:25 +020045
46#include "amd_iommu_proto.h"
47#include "amd_iommu_types.h"
Joerg Roedel6b474b82012-06-26 16:46:04 +020048#include "irq_remapping.h"
Joerg Roedelb6c02712008-06-26 21:27:53 +020049
50#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
51
Joerg Roedel815b33f2011-04-06 17:26:49 +020052#define LOOP_TIMEOUT 100000
Joerg Roedel136f78a2008-07-11 17:14:27 +020053
Joerg Roedel307d5852016-07-05 11:54:04 +020054/* IO virtual address start page frame number */
55#define IOVA_START_PFN (1)
56#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
Joerg Roedel307d5852016-07-05 11:54:04 +020057
Joerg Roedel81cd07b2016-07-07 18:01:10 +020058/* Reserved IOVA ranges */
59#define MSI_RANGE_START (0xfee00000)
60#define MSI_RANGE_END (0xfeefffff)
61#define HT_RANGE_START (0xfd00000000ULL)
62#define HT_RANGE_END (0xffffffffffULL)
63
Ohad Ben-Cohenaa3de9c2011-11-10 11:32:29 +020064/*
65 * This bitmap is used to advertise the page sizes our hardware support
66 * to the IOMMU core, which will then use this information to split
67 * physically contiguous memory regions it is mapping into page sizes
68 * that we support.
69 *
Joerg Roedel954e3dd2012-12-02 15:35:37 +010070 * 512GB Pages are not supported due to a hardware bug
Ohad Ben-Cohenaa3de9c2011-11-10 11:32:29 +020071 */
Joerg Roedel954e3dd2012-12-02 15:35:37 +010072#define AMD_IOMMU_PGSIZES ((~0xFFFUL) & ~(2ULL << 38))
Ohad Ben-Cohenaa3de9c2011-11-10 11:32:29 +020073
Sebastian Andrzej Siewior2bc00182018-03-22 16:22:35 +010074static DEFINE_SPINLOCK(pd_bitmap_lock);
Joerg Roedelb6c02712008-06-26 21:27:53 +020075
Joerg Roedel8fa5f802011-06-09 12:24:45 +020076/* List of all available dev_data structures */
Sebastian Andrzej Siewior779da732018-03-22 16:22:34 +010077static LLIST_HEAD(dev_data_list);
Joerg Roedel8fa5f802011-06-09 12:24:45 +020078
Joerg Roedel6efed632012-06-14 15:52:58 +020079LIST_HEAD(ioapic_map);
80LIST_HEAD(hpet_map);
Wan Zongshun2a0cb4e2016-04-01 09:06:00 -040081LIST_HEAD(acpihid_map);
Joerg Roedel6efed632012-06-14 15:52:58 +020082
Joerg Roedel0feae532009-08-26 15:26:30 +020083/*
84 * Domain for untranslated devices - only allocated
85 * if iommu=pt passed on kernel cmd line.
86 */
Joerg Roedelb0119e82017-02-01 13:23:08 +010087const struct iommu_ops amd_iommu_ops;
Joerg Roedel26961ef2008-12-03 17:00:17 +010088
Joerg Roedel72e1dcc2011-11-10 19:13:51 +010089static ATOMIC_NOTIFIER_HEAD(ppr_notifier);
Joerg Roedel52815b72011-11-17 17:24:28 +010090int amd_iommu_max_glx_val = -1;
Joerg Roedel72e1dcc2011-11-10 19:13:51 +010091
Joerg Roedel431b2a22008-07-11 17:14:22 +020092/*
93 * general struct to manage commands send to an IOMMU
94 */
Joerg Roedeld6449532008-07-11 17:14:28 +020095struct iommu_cmd {
Joerg Roedelb6c02712008-06-26 21:27:53 +020096 u32 data[4];
97};
98
Joerg Roedel05152a02012-06-15 16:53:51 +020099struct kmem_cache *amd_iommu_irq_cache;
100
Joerg Roedel04bfdd82009-09-02 16:00:23 +0200101static void update_domain(struct protection_domain *domain);
Joerg Roedel7a5a5662015-06-30 08:56:11 +0200102static int protection_domain_init(struct protection_domain *domain);
Joerg Roedelb6809ee2016-02-26 16:48:59 +0100103static void detach_device(struct device *dev);
Joerg Roedel81cd07b2016-07-07 18:01:10 +0200104
Joerg Roedel15898bb2009-11-24 15:39:42 +0100105/****************************************************************************
106 *
107 * Helper functions
108 *
109 ****************************************************************************/
110
Wan Zongshun2bf9a0a2016-04-01 09:06:03 -0400111static inline u16 get_pci_device_id(struct device *dev)
Joerg Roedele3156042016-04-08 15:12:24 +0200112{
113 struct pci_dev *pdev = to_pci_dev(dev);
114
Heiner Kallweit775c0682019-04-24 21:15:25 +0200115 return pci_dev_id(pdev);
Joerg Roedele3156042016-04-08 15:12:24 +0200116}
117
Wan Zongshun2bf9a0a2016-04-01 09:06:03 -0400118static inline int get_acpihid_device_id(struct device *dev,
119 struct acpihid_map_entry **entry)
120{
Andy Shevchenkoae5e6c62019-10-01 17:27:25 +0300121 struct acpi_device *adev = ACPI_COMPANION(dev);
Wan Zongshun2bf9a0a2016-04-01 09:06:03 -0400122 struct acpihid_map_entry *p;
123
Andy Shevchenkoae5e6c62019-10-01 17:27:25 +0300124 if (!adev)
125 return -ENODEV;
126
Wan Zongshun2bf9a0a2016-04-01 09:06:03 -0400127 list_for_each_entry(p, &acpihid_map, list) {
Andy Shevchenkoae5e6c62019-10-01 17:27:25 +0300128 if (acpi_dev_hid_uid_match(adev, p->hid, p->uid)) {
Wan Zongshun2bf9a0a2016-04-01 09:06:03 -0400129 if (entry)
130 *entry = p;
131 return p->devid;
132 }
133 }
134 return -EINVAL;
135}
136
137static inline int get_device_id(struct device *dev)
138{
139 int devid;
140
141 if (dev_is_pci(dev))
142 devid = get_pci_device_id(dev);
143 else
144 devid = get_acpihid_device_id(dev, NULL);
145
146 return devid;
147}
148
Joerg Roedel3f4b87b2015-03-26 13:43:07 +0100149static struct protection_domain *to_pdomain(struct iommu_domain *dom)
150{
151 return container_of(dom, struct protection_domain, domain);
152}
153
Joerg Roedelf62dda62011-06-09 12:55:35 +0200154static struct iommu_dev_data *alloc_dev_data(u16 devid)
Joerg Roedel8fa5f802011-06-09 12:24:45 +0200155{
156 struct iommu_dev_data *dev_data;
Joerg Roedel8fa5f802011-06-09 12:24:45 +0200157
158 dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
159 if (!dev_data)
160 return NULL;
161
Joerg Roedelab7b2572019-09-25 15:22:59 +0200162 spin_lock_init(&dev_data->lock);
Joerg Roedelf62dda62011-06-09 12:55:35 +0200163 dev_data->devid = devid;
Joerg Roedel30bf2df2017-05-15 16:25:03 +0200164 ratelimit_default_init(&dev_data->rs);
165
Sebastian Andrzej Siewior779da732018-03-22 16:22:34 +0100166 llist_add(&dev_data->dev_data_list, &dev_data_list);
Joerg Roedel8fa5f802011-06-09 12:24:45 +0200167 return dev_data;
168}
169
Joerg Roedel3b03bb72011-06-09 18:53:25 +0200170static struct iommu_dev_data *search_dev_data(u16 devid)
171{
172 struct iommu_dev_data *dev_data;
Sebastian Andrzej Siewior779da732018-03-22 16:22:34 +0100173 struct llist_node *node;
Joerg Roedel3b03bb72011-06-09 18:53:25 +0200174
Sebastian Andrzej Siewior779da732018-03-22 16:22:34 +0100175 if (llist_empty(&dev_data_list))
176 return NULL;
177
178 node = dev_data_list.first;
179 llist_for_each_entry(dev_data, node, dev_data_list) {
Joerg Roedel3b03bb72011-06-09 18:53:25 +0200180 if (dev_data->devid == devid)
Sebastian Andrzej Siewior779da732018-03-22 16:22:34 +0100181 return dev_data;
Joerg Roedel3b03bb72011-06-09 18:53:25 +0200182 }
183
Sebastian Andrzej Siewior779da732018-03-22 16:22:34 +0100184 return NULL;
Joerg Roedel3b03bb72011-06-09 18:53:25 +0200185}
186
Logan Gunthorpe33323642019-10-22 16:01:20 -0600187static int clone_alias(struct pci_dev *pdev, u16 alias, void *data)
Joerg Roedele3156042016-04-08 15:12:24 +0200188{
Logan Gunthorpe33323642019-10-22 16:01:20 -0600189 u16 devid = pci_dev_id(pdev);
190
191 if (devid == alias)
192 return 0;
193
194 amd_iommu_rlookup_table[alias] =
195 amd_iommu_rlookup_table[devid];
196 memcpy(amd_iommu_dev_table[alias].data,
197 amd_iommu_dev_table[devid].data,
198 sizeof(amd_iommu_dev_table[alias].data));
199
Joerg Roedele3156042016-04-08 15:12:24 +0200200 return 0;
201}
202
Logan Gunthorpe33323642019-10-22 16:01:20 -0600203static void clone_aliases(struct pci_dev *pdev)
204{
205 if (!pdev)
206 return;
207
208 /*
209 * The IVRS alias stored in the alias table may not be
210 * part of the PCI DMA aliases if it's bus differs
211 * from the original device.
212 */
213 clone_alias(pdev, amd_iommu_alias_table[pci_dev_id(pdev)], NULL);
214
215 pci_for_each_dma_alias(pdev, clone_alias, NULL);
216}
217
218static struct pci_dev *setup_aliases(struct device *dev)
Joerg Roedele3156042016-04-08 15:12:24 +0200219{
220 struct pci_dev *pdev = to_pci_dev(dev);
Logan Gunthorpe33323642019-10-22 16:01:20 -0600221 u16 ivrs_alias;
Joerg Roedele3156042016-04-08 15:12:24 +0200222
Logan Gunthorpe33323642019-10-22 16:01:20 -0600223 /* For ACPI HID devices, there are no aliases */
Arindam Nath5ebb1bc2018-09-18 15:40:58 +0530224 if (!dev_is_pci(dev))
Logan Gunthorpe33323642019-10-22 16:01:20 -0600225 return NULL;
Joerg Roedele3156042016-04-08 15:12:24 +0200226
227 /*
Logan Gunthorpe33323642019-10-22 16:01:20 -0600228 * Add the IVRS alias to the pci aliases if it is on the same
229 * bus. The IVRS table may know about a quirk that we don't.
Joerg Roedele3156042016-04-08 15:12:24 +0200230 */
Logan Gunthorpe33323642019-10-22 16:01:20 -0600231 ivrs_alias = amd_iommu_alias_table[pci_dev_id(pdev)];
232 if (ivrs_alias != pci_dev_id(pdev) &&
James Sewart09298542019-12-10 16:07:30 -0600233 PCI_BUS_NUM(ivrs_alias) == pdev->bus->number)
234 pci_add_dma_alias(pdev, ivrs_alias & 0xff, 1);
Joerg Roedele3156042016-04-08 15:12:24 +0200235
Logan Gunthorpe33323642019-10-22 16:01:20 -0600236 clone_aliases(pdev);
237
238 return pdev;
Joerg Roedele3156042016-04-08 15:12:24 +0200239}
240
Joerg Roedel3b03bb72011-06-09 18:53:25 +0200241static struct iommu_dev_data *find_dev_data(u16 devid)
242{
243 struct iommu_dev_data *dev_data;
Baoquan Hedf3f7a62017-08-09 16:33:41 +0800244 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
Joerg Roedel3b03bb72011-06-09 18:53:25 +0200245
246 dev_data = search_dev_data(devid);
247
Baoquan Hedf3f7a62017-08-09 16:33:41 +0800248 if (dev_data == NULL) {
Joerg Roedel3b03bb72011-06-09 18:53:25 +0200249 dev_data = alloc_dev_data(devid);
Sebastian Andrzej Siewior39ffe392018-03-22 16:22:33 +0100250 if (!dev_data)
251 return NULL;
Joerg Roedel3b03bb72011-06-09 18:53:25 +0200252
Baoquan Hedf3f7a62017-08-09 16:33:41 +0800253 if (translation_pre_enabled(iommu))
254 dev_data->defer_attach = true;
255 }
256
Joerg Roedel3b03bb72011-06-09 18:53:25 +0200257 return dev_data;
258}
259
Baoquan Hedaae2d22017-08-09 16:33:43 +0800260struct iommu_dev_data *get_dev_data(struct device *dev)
Joerg Roedel657cbb62009-11-23 15:26:46 +0100261{
262 return dev->archdata.iommu;
263}
Baoquan Hedaae2d22017-08-09 16:33:43 +0800264EXPORT_SYMBOL(get_dev_data);
Joerg Roedel657cbb62009-11-23 15:26:46 +0100265
Wan Zongshunb097d112016-04-01 09:06:04 -0400266/*
267* Find or create an IOMMU group for a acpihid device.
268*/
269static struct iommu_group *acpihid_device_group(struct device *dev)
270{
271 struct acpihid_map_entry *p, *entry = NULL;
Dan Carpenter2d8e1f02016-04-11 10:14:46 +0300272 int devid;
Wan Zongshunb097d112016-04-01 09:06:04 -0400273
274 devid = get_acpihid_device_id(dev, &entry);
275 if (devid < 0)
276 return ERR_PTR(devid);
277
278 list_for_each_entry(p, &acpihid_map, list) {
279 if ((devid == p->devid) && p->group)
280 entry->group = p->group;
281 }
282
283 if (!entry->group)
284 entry->group = generic_device_group(dev);
Robin Murphyf2f101f2016-11-11 17:59:23 +0000285 else
286 iommu_group_ref_get(entry->group);
Wan Zongshunb097d112016-04-01 09:06:04 -0400287
288 return entry->group;
289}
290
Joerg Roedel5abcdba2011-12-01 15:49:45 +0100291static bool pci_iommuv2_capable(struct pci_dev *pdev)
292{
293 static const int caps[] = {
294 PCI_EXT_CAP_ID_ATS,
Joerg Roedel46277b72011-12-07 14:34:02 +0100295 PCI_EXT_CAP_ID_PRI,
296 PCI_EXT_CAP_ID_PASID,
Joerg Roedel5abcdba2011-12-01 15:49:45 +0100297 };
298 int i, pos;
299
Gil Kupfercef74402018-05-10 17:56:02 -0500300 if (pci_ats_disabled())
301 return false;
302
Joerg Roedel5abcdba2011-12-01 15:49:45 +0100303 for (i = 0; i < 3; ++i) {
304 pos = pci_find_ext_capability(pdev, caps[i]);
305 if (pos == 0)
306 return false;
307 }
308
309 return true;
310}
311
Joerg Roedel6a113dd2011-12-01 12:04:58 +0100312static bool pdev_pri_erratum(struct pci_dev *pdev, u32 erratum)
313{
314 struct iommu_dev_data *dev_data;
315
316 dev_data = get_dev_data(&pdev->dev);
317
318 return dev_data->errata & (1 << erratum) ? true : false;
319}
320
Joerg Roedel71c70982009-11-24 16:43:06 +0100321/*
Joerg Roedel98fc5a62009-11-24 17:19:23 +0100322 * This function checks if the driver got a valid device from the caller to
323 * avoid dereferencing invalid pointers.
324 */
325static bool check_device(struct device *dev)
326{
Wan Zongshun7aba6cb2016-04-01 09:06:02 -0400327 int devid;
Joerg Roedel98fc5a62009-11-24 17:19:23 +0100328
329 if (!dev || !dev->dma_mask)
330 return false;
331
Joerg Roedel98fc5a62009-11-24 17:19:23 +0100332 devid = get_device_id(dev);
Joerg Roedel9ee35e42016-04-21 18:21:31 +0200333 if (devid < 0)
Wan Zongshun7aba6cb2016-04-01 09:06:02 -0400334 return false;
Joerg Roedel98fc5a62009-11-24 17:19:23 +0100335
336 /* Out of our scope? */
337 if (devid > amd_iommu_last_bdf)
338 return false;
339
340 if (amd_iommu_rlookup_table[devid] == NULL)
341 return false;
342
343 return true;
344}
345
Alex Williamson25b11ce2014-09-19 10:03:13 -0600346static void init_iommu_group(struct device *dev)
Alex Williamson2851db22012-10-08 22:49:41 -0600347{
Alex Williamson2851db22012-10-08 22:49:41 -0600348 struct iommu_group *group;
Alex Williamson2851db22012-10-08 22:49:41 -0600349
Alex Williamson65d53522014-07-03 09:51:30 -0600350 group = iommu_group_get_for_dev(dev);
Joerg Roedel0bb6e242015-05-28 18:41:40 +0200351 if (IS_ERR(group))
352 return;
353
Joerg Roedel0bb6e242015-05-28 18:41:40 +0200354 iommu_group_put(group);
Alex Williamsoneb9c9522012-10-08 22:49:35 -0600355}
356
357static int iommu_init_device(struct device *dev)
358{
Alex Williamsoneb9c9522012-10-08 22:49:35 -0600359 struct iommu_dev_data *dev_data;
Joerg Roedel39ab9552017-02-01 16:56:46 +0100360 struct amd_iommu *iommu;
Wan Zongshun7aba6cb2016-04-01 09:06:02 -0400361 int devid;
Alex Williamsoneb9c9522012-10-08 22:49:35 -0600362
363 if (dev->archdata.iommu)
364 return 0;
365
Wan Zongshun7aba6cb2016-04-01 09:06:02 -0400366 devid = get_device_id(dev);
Joerg Roedel9ee35e42016-04-21 18:21:31 +0200367 if (devid < 0)
Wan Zongshun7aba6cb2016-04-01 09:06:02 -0400368 return devid;
369
Joerg Roedel39ab9552017-02-01 16:56:46 +0100370 iommu = amd_iommu_rlookup_table[devid];
371
Wan Zongshun7aba6cb2016-04-01 09:06:02 -0400372 dev_data = find_dev_data(devid);
Alex Williamsoneb9c9522012-10-08 22:49:35 -0600373 if (!dev_data)
374 return -ENOMEM;
375
Logan Gunthorpe33323642019-10-22 16:01:20 -0600376 dev_data->pdev = setup_aliases(dev);
Joerg Roedele3156042016-04-08 15:12:24 +0200377
Yu Zhaoc12b08e2018-12-06 14:39:15 -0700378 /*
379 * By default we use passthrough mode for IOMMUv2 capable device.
380 * But if amd_iommu=force_isolation is set (e.g. to debug DMA to
381 * invalid address), we ignore the capability for the device so
382 * it'll be forced to go into translation mode.
383 */
Joerg Roedelcc7c8ad2019-08-19 15:22:49 +0200384 if ((iommu_default_passthrough() || !amd_iommu_force_isolation) &&
Yu Zhaoc12b08e2018-12-06 14:39:15 -0700385 dev_is_pci(dev) && pci_iommuv2_capable(to_pci_dev(dev))) {
Joerg Roedel5abcdba2011-12-01 15:49:45 +0100386 struct amd_iommu *iommu;
387
Wan Zongshun2bf9a0a2016-04-01 09:06:03 -0400388 iommu = amd_iommu_rlookup_table[dev_data->devid];
Joerg Roedel5abcdba2011-12-01 15:49:45 +0100389 dev_data->iommu_v2 = iommu->is_iommu_v2;
390 }
391
Joerg Roedel657cbb62009-11-23 15:26:46 +0100392 dev->archdata.iommu = dev_data;
393
Joerg Roedele3d10af2017-02-01 17:23:22 +0100394 iommu_device_link(&iommu->iommu, dev);
Alex Williamson066f2e92014-06-12 16:12:37 -0600395
Joerg Roedel657cbb62009-11-23 15:26:46 +0100396 return 0;
397}
398
Joerg Roedel26018872011-06-06 16:50:14 +0200399static void iommu_ignore_device(struct device *dev)
400{
Wan Zongshun7aba6cb2016-04-01 09:06:02 -0400401 int devid;
Joerg Roedel26018872011-06-06 16:50:14 +0200402
403 devid = get_device_id(dev);
Joerg Roedel9ee35e42016-04-21 18:21:31 +0200404 if (devid < 0)
Wan Zongshun7aba6cb2016-04-01 09:06:02 -0400405 return;
406
Joerg Roedel26018872011-06-06 16:50:14 +0200407 amd_iommu_rlookup_table[devid] = NULL;
Logan Gunthorpe33323642019-10-22 16:01:20 -0600408 memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry));
409
410 setup_aliases(dev);
Joerg Roedel26018872011-06-06 16:50:14 +0200411}
412
Joerg Roedel657cbb62009-11-23 15:26:46 +0100413static void iommu_uninit_device(struct device *dev)
414{
Wan Zongshun7aba6cb2016-04-01 09:06:02 -0400415 struct iommu_dev_data *dev_data;
Joerg Roedel39ab9552017-02-01 16:56:46 +0100416 struct amd_iommu *iommu;
417 int devid;
Alex Williamsonc1931092014-07-03 09:51:24 -0600418
Wan Zongshun7aba6cb2016-04-01 09:06:02 -0400419 devid = get_device_id(dev);
Joerg Roedel9ee35e42016-04-21 18:21:31 +0200420 if (devid < 0)
Wan Zongshun7aba6cb2016-04-01 09:06:02 -0400421 return;
422
Joerg Roedel39ab9552017-02-01 16:56:46 +0100423 iommu = amd_iommu_rlookup_table[devid];
424
Wan Zongshun7aba6cb2016-04-01 09:06:02 -0400425 dev_data = search_dev_data(devid);
Alex Williamsonc1931092014-07-03 09:51:24 -0600426 if (!dev_data)
427 return;
428
Joerg Roedelb6809ee2016-02-26 16:48:59 +0100429 if (dev_data->domain)
430 detach_device(dev);
431
Joerg Roedele3d10af2017-02-01 17:23:22 +0100432 iommu_device_unlink(&iommu->iommu, dev);
Alex Williamson066f2e92014-06-12 16:12:37 -0600433
Alex Williamson9dcd6132012-05-30 14:19:07 -0600434 iommu_group_remove_device(dev);
435
Joerg Roedelaafd8ba2015-05-28 18:41:39 +0200436 /* Remove dma-ops */
Bart Van Assche56579332017-01-20 13:04:02 -0800437 dev->dma_ops = NULL;
Joerg Roedelaafd8ba2015-05-28 18:41:39 +0200438
Joerg Roedel8fa5f802011-06-09 12:24:45 +0200439 /*
Alex Williamsonc1931092014-07-03 09:51:24 -0600440 * We keep dev_data around for unplugged devices and reuse it when the
441 * device is re-plugged - not doing so would introduce a ton of races.
Joerg Roedel8fa5f802011-06-09 12:24:45 +0200442 */
Joerg Roedel657cbb62009-11-23 15:26:46 +0100443}
Joerg Roedelb7cc9552009-12-10 11:03:39 +0100444
Andrei Dulea7f1f1682019-09-13 16:42:30 +0200445/*
446 * Helper function to get the first pte of a large mapping
447 */
448static u64 *first_pte_l7(u64 *pte, unsigned long *page_size,
449 unsigned long *count)
450{
451 unsigned long pte_mask, pg_size, cnt;
452 u64 *fpte;
453
454 pg_size = PTE_PAGE_SIZE(*pte);
455 cnt = PAGE_SIZE_PTE_COUNT(pg_size);
456 pte_mask = ~((cnt << 3) - 1);
457 fpte = (u64 *)(((unsigned long)pte) & pte_mask);
458
459 if (page_size)
460 *page_size = pg_size;
461
462 if (count)
463 *count = cnt;
464
465 return fpte;
466}
467
Joerg Roedel431b2a22008-07-11 17:14:22 +0200468/****************************************************************************
469 *
Joerg Roedela80dc3e2008-09-11 16:51:41 +0200470 * Interrupt handling functions
471 *
472 ****************************************************************************/
473
Joerg Roedele3e59872009-09-03 14:02:10 +0200474static void dump_dte_entry(u16 devid)
475{
476 int i;
477
Joerg Roedelee6c2862011-11-09 12:06:03 +0100478 for (i = 0; i < 4; ++i)
Joerg Roedel101fa032018-11-27 16:22:31 +0100479 pr_err("DTE[%d]: %016llx\n", i,
Joerg Roedele3e59872009-09-03 14:02:10 +0200480 amd_iommu_dev_table[devid].data[i]);
481}
482
Joerg Roedel945b4ac2009-09-03 14:25:02 +0200483static void dump_command(unsigned long phys_addr)
484{
Tom Lendacky2543a782017-07-17 16:10:24 -0500485 struct iommu_cmd *cmd = iommu_phys_to_virt(phys_addr);
Joerg Roedel945b4ac2009-09-03 14:25:02 +0200486 int i;
487
488 for (i = 0; i < 4; ++i)
Joerg Roedel101fa032018-11-27 16:22:31 +0100489 pr_err("CMD[%d]: %08x\n", i, cmd->data[i]);
Joerg Roedel945b4ac2009-09-03 14:25:02 +0200490}
491
Joerg Roedel30bf2df2017-05-15 16:25:03 +0200492static void amd_iommu_report_page_fault(u16 devid, u16 domain_id,
493 u64 address, int flags)
494{
495 struct iommu_dev_data *dev_data = NULL;
496 struct pci_dev *pdev;
497
Sinan Kayad5bf0f42017-12-19 00:37:47 -0500498 pdev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(devid),
499 devid & 0xff);
Joerg Roedel30bf2df2017-05-15 16:25:03 +0200500 if (pdev)
501 dev_data = get_dev_data(&pdev->dev);
502
503 if (dev_data && __ratelimit(&dev_data->rs)) {
Bjorn Helgaas5f226da2019-02-08 16:05:53 -0600504 pci_err(pdev, "Event logged [IO_PAGE_FAULT domain=0x%04x address=0x%llx flags=0x%04x]\n",
Joerg Roedel30bf2df2017-05-15 16:25:03 +0200505 domain_id, address, flags);
506 } else if (printk_ratelimit()) {
Joerg Roedel6f5086a62018-11-27 17:18:52 +0100507 pr_err("Event logged [IO_PAGE_FAULT device=%02x:%02x.%x domain=0x%04x address=0x%llx flags=0x%04x]\n",
Joerg Roedel30bf2df2017-05-15 16:25:03 +0200508 PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
509 domain_id, address, flags);
510 }
511
512 if (pdev)
513 pci_dev_put(pdev);
514}
515
Joerg Roedela345b232009-09-03 15:01:43 +0200516static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
Joerg Roedel90008ee2008-09-09 16:41:05 +0200517{
Gary R Hook90ca3852018-03-08 18:34:41 -0600518 struct device *dev = iommu->iommu.dev;
Gary R Hooke7f63ff2018-05-01 14:53:00 -0500519 int type, devid, pasid, flags, tag;
Joerg Roedel3d06fca2012-04-12 14:12:00 +0200520 volatile u32 *event = __evt;
521 int count = 0;
522 u64 address;
523
524retry:
525 type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
526 devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
Suthikulpanit, Suraveeec21f172019-10-14 20:06:05 +0000527 pasid = (event[0] & EVENT_DOMID_MASK_HI) |
528 (event[1] & EVENT_DOMID_MASK_LO);
Joerg Roedel3d06fca2012-04-12 14:12:00 +0200529 flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
530 address = (u64)(((u64)event[3]) << 32) | event[2];
531
532 if (type == 0) {
533 /* Did we hit the erratum? */
534 if (++count == LOOP_TIMEOUT) {
Joerg Roedel101fa032018-11-27 16:22:31 +0100535 pr_err("No event written to event log\n");
Joerg Roedel3d06fca2012-04-12 14:12:00 +0200536 return;
537 }
538 udelay(1);
539 goto retry;
540 }
Joerg Roedel90008ee2008-09-09 16:41:05 +0200541
Joerg Roedel30bf2df2017-05-15 16:25:03 +0200542 if (type == EVENT_TYPE_IO_FAULT) {
Gary R Hookd64c0482018-05-01 14:52:52 -0500543 amd_iommu_report_page_fault(devid, pasid, address, flags);
Joerg Roedel30bf2df2017-05-15 16:25:03 +0200544 return;
Joerg Roedel30bf2df2017-05-15 16:25:03 +0200545 }
Joerg Roedel90008ee2008-09-09 16:41:05 +0200546
547 switch (type) {
548 case EVENT_TYPE_ILL_DEV:
Joerg Roedel6f5086a62018-11-27 17:18:52 +0100549 dev_err(dev, "Event logged [ILLEGAL_DEV_TABLE_ENTRY device=%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x]\n",
Gary R Hook90ca3852018-03-08 18:34:41 -0600550 PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
Gary R Hookd64c0482018-05-01 14:52:52 -0500551 pasid, address, flags);
Joerg Roedele3e59872009-09-03 14:02:10 +0200552 dump_dte_entry(devid);
Joerg Roedel90008ee2008-09-09 16:41:05 +0200553 break;
Joerg Roedel90008ee2008-09-09 16:41:05 +0200554 case EVENT_TYPE_DEV_TAB_ERR:
Joerg Roedel1a21ee12018-11-27 16:43:57 +0100555 dev_err(dev, "Event logged [DEV_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
Joerg Roedel6f5086a62018-11-27 17:18:52 +0100556 "address=0x%llx flags=0x%04x]\n",
Gary R Hook90ca3852018-03-08 18:34:41 -0600557 PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
558 address, flags);
Joerg Roedel90008ee2008-09-09 16:41:05 +0200559 break;
560 case EVENT_TYPE_PAGE_TAB_ERR:
Suthikulpanit, Suraveeec21f172019-10-14 20:06:05 +0000561 dev_err(dev, "Event logged [PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x pasid=0x%04x address=0x%llx flags=0x%04x]\n",
Gary R Hook90ca3852018-03-08 18:34:41 -0600562 PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
Gary R Hookd64c0482018-05-01 14:52:52 -0500563 pasid, address, flags);
Joerg Roedel90008ee2008-09-09 16:41:05 +0200564 break;
565 case EVENT_TYPE_ILL_CMD:
Joerg Roedel6f5086a62018-11-27 17:18:52 +0100566 dev_err(dev, "Event logged [ILLEGAL_COMMAND_ERROR address=0x%llx]\n", address);
Joerg Roedel945b4ac2009-09-03 14:25:02 +0200567 dump_command(address);
Joerg Roedel90008ee2008-09-09 16:41:05 +0200568 break;
569 case EVENT_TYPE_CMD_HARD_ERR:
Joerg Roedel6f5086a62018-11-27 17:18:52 +0100570 dev_err(dev, "Event logged [COMMAND_HARDWARE_ERROR address=0x%llx flags=0x%04x]\n",
Gary R Hookd64c0482018-05-01 14:52:52 -0500571 address, flags);
Joerg Roedel90008ee2008-09-09 16:41:05 +0200572 break;
573 case EVENT_TYPE_IOTLB_INV_TO:
Joerg Roedel6f5086a62018-11-27 17:18:52 +0100574 dev_err(dev, "Event logged [IOTLB_INV_TIMEOUT device=%02x:%02x.%x address=0x%llx]\n",
Gary R Hook90ca3852018-03-08 18:34:41 -0600575 PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
576 address);
Joerg Roedel90008ee2008-09-09 16:41:05 +0200577 break;
578 case EVENT_TYPE_INV_DEV_REQ:
Joerg Roedel6f5086a62018-11-27 17:18:52 +0100579 dev_err(dev, "Event logged [INVALID_DEVICE_REQUEST device=%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x]\n",
Gary R Hook90ca3852018-03-08 18:34:41 -0600580 PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
Gary R Hookd64c0482018-05-01 14:52:52 -0500581 pasid, address, flags);
Joerg Roedel90008ee2008-09-09 16:41:05 +0200582 break;
Gary R Hooke7f63ff2018-05-01 14:53:00 -0500583 case EVENT_TYPE_INV_PPR_REQ:
Suthikulpanit, Suravee470eb3b2019-10-14 20:06:19 +0000584 pasid = PPR_PASID(*((u64 *)__evt));
Gary R Hooke7f63ff2018-05-01 14:53:00 -0500585 tag = event[1] & 0x03FF;
YueHaibingc1ddcf1c2018-11-08 11:57:33 +0000586 dev_err(dev, "Event logged [INVALID_PPR_REQUEST device=%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x tag=0x%03x]\n",
Gary R Hooke7f63ff2018-05-01 14:53:00 -0500587 PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
YueHaibingc1ddcf1c2018-11-08 11:57:33 +0000588 pasid, address, flags, tag);
Joerg Roedel90008ee2008-09-09 16:41:05 +0200589 break;
590 default:
Joerg Roedel1a21ee12018-11-27 16:43:57 +0100591 dev_err(dev, "Event logged [UNKNOWN event[0]=0x%08x event[1]=0x%08x event[2]=0x%08x event[3]=0x%08x\n",
Gary R Hook90ca3852018-03-08 18:34:41 -0600592 event[0], event[1], event[2], event[3]);
Joerg Roedel90008ee2008-09-09 16:41:05 +0200593 }
Joerg Roedel3d06fca2012-04-12 14:12:00 +0200594
595 memset(__evt, 0, 4 * sizeof(u32));
Joerg Roedel90008ee2008-09-09 16:41:05 +0200596}
597
598static void iommu_poll_events(struct amd_iommu *iommu)
599{
600 u32 head, tail;
Joerg Roedel90008ee2008-09-09 16:41:05 +0200601
602 head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
603 tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
604
605 while (head != tail) {
Joerg Roedela345b232009-09-03 15:01:43 +0200606 iommu_print_event(iommu, iommu->evt_buf + head);
Joerg Roedeldeba4bc2015-10-20 17:33:41 +0200607 head = (head + EVENT_ENTRY_SIZE) % EVT_BUFFER_SIZE;
Joerg Roedel90008ee2008-09-09 16:41:05 +0200608 }
609
610 writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
Joerg Roedel90008ee2008-09-09 16:41:05 +0200611}
612
Joerg Roedeleee53532012-06-01 15:20:23 +0200613static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw)
Joerg Roedel72e1dcc2011-11-10 19:13:51 +0100614{
615 struct amd_iommu_fault fault;
Joerg Roedel72e1dcc2011-11-10 19:13:51 +0100616
Joerg Roedel72e1dcc2011-11-10 19:13:51 +0100617 if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) {
Joerg Roedel101fa032018-11-27 16:22:31 +0100618 pr_err_ratelimited("Unknown PPR request received\n");
Joerg Roedel72e1dcc2011-11-10 19:13:51 +0100619 return;
620 }
621
622 fault.address = raw[1];
623 fault.pasid = PPR_PASID(raw[0]);
624 fault.device_id = PPR_DEVID(raw[0]);
625 fault.tag = PPR_TAG(raw[0]);
626 fault.flags = PPR_FLAGS(raw[0]);
627
Joerg Roedel72e1dcc2011-11-10 19:13:51 +0100628 atomic_notifier_call_chain(&ppr_notifier, 0, &fault);
629}
630
631static void iommu_poll_ppr_log(struct amd_iommu *iommu)
632{
Joerg Roedel72e1dcc2011-11-10 19:13:51 +0100633 u32 head, tail;
634
635 if (iommu->ppr_log == NULL)
636 return;
637
Joerg Roedel72e1dcc2011-11-10 19:13:51 +0100638 head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
639 tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
640
641 while (head != tail) {
Joerg Roedeleee53532012-06-01 15:20:23 +0200642 volatile u64 *raw;
643 u64 entry[2];
644 int i;
Joerg Roedel72e1dcc2011-11-10 19:13:51 +0100645
Joerg Roedeleee53532012-06-01 15:20:23 +0200646 raw = (u64 *)(iommu->ppr_log + head);
Joerg Roedel72e1dcc2011-11-10 19:13:51 +0100647
Joerg Roedeleee53532012-06-01 15:20:23 +0200648 /*
649 * Hardware bug: Interrupt may arrive before the entry is
650 * written to memory. If this happens we need to wait for the
651 * entry to arrive.
652 */
653 for (i = 0; i < LOOP_TIMEOUT; ++i) {
654 if (PPR_REQ_TYPE(raw[0]) != 0)
655 break;
656 udelay(1);
657 }
658
659 /* Avoid memcpy function-call overhead */
660 entry[0] = raw[0];
661 entry[1] = raw[1];
662
663 /*
664 * To detect the hardware bug we need to clear the entry
665 * back to zero.
666 */
667 raw[0] = raw[1] = 0UL;
668
669 /* Update head pointer of hardware ring-buffer */
Joerg Roedel72e1dcc2011-11-10 19:13:51 +0100670 head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE;
671 writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
Joerg Roedeleee53532012-06-01 15:20:23 +0200672
Joerg Roedeleee53532012-06-01 15:20:23 +0200673 /* Handle PPR entry */
674 iommu_handle_ppr_entry(iommu, entry);
675
Joerg Roedeleee53532012-06-01 15:20:23 +0200676 /* Refresh ring-buffer information */
677 head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
Joerg Roedel72e1dcc2011-11-10 19:13:51 +0100678 tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
679 }
Joerg Roedel72e1dcc2011-11-10 19:13:51 +0100680}
681
Suravee Suthikulpanitbd6fcef2016-08-23 13:52:37 -0500682#ifdef CONFIG_IRQ_REMAP
683static int (*iommu_ga_log_notifier)(u32);
684
685int amd_iommu_register_ga_log_notifier(int (*notifier)(u32))
686{
687 iommu_ga_log_notifier = notifier;
688
689 return 0;
690}
691EXPORT_SYMBOL(amd_iommu_register_ga_log_notifier);
692
693static void iommu_poll_ga_log(struct amd_iommu *iommu)
694{
695 u32 head, tail, cnt = 0;
696
697 if (iommu->ga_log == NULL)
698 return;
699
700 head = readl(iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
701 tail = readl(iommu->mmio_base + MMIO_GA_TAIL_OFFSET);
702
703 while (head != tail) {
704 volatile u64 *raw;
705 u64 log_entry;
706
707 raw = (u64 *)(iommu->ga_log + head);
708 cnt++;
709
710 /* Avoid memcpy function-call overhead */
711 log_entry = *raw;
712
713 /* Update head pointer of hardware ring-buffer */
714 head = (head + GA_ENTRY_SIZE) % GA_LOG_SIZE;
715 writel(head, iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
716
717 /* Handle GA entry */
718 switch (GA_REQ_TYPE(log_entry)) {
719 case GA_GUEST_NR:
720 if (!iommu_ga_log_notifier)
721 break;
722
Joerg Roedel101fa032018-11-27 16:22:31 +0100723 pr_debug("%s: devid=%#x, ga_tag=%#x\n",
Suravee Suthikulpanitbd6fcef2016-08-23 13:52:37 -0500724 __func__, GA_DEVID(log_entry),
725 GA_TAG(log_entry));
726
727 if (iommu_ga_log_notifier(GA_TAG(log_entry)) != 0)
Joerg Roedel101fa032018-11-27 16:22:31 +0100728 pr_err("GA log notifier failed.\n");
Suravee Suthikulpanitbd6fcef2016-08-23 13:52:37 -0500729 break;
730 default:
731 break;
732 }
733 }
734}
735#endif /* CONFIG_IRQ_REMAP */
736
737#define AMD_IOMMU_INT_MASK \
738 (MMIO_STATUS_EVT_INT_MASK | \
739 MMIO_STATUS_PPR_INT_MASK | \
740 MMIO_STATUS_GALOG_INT_MASK)
741
Joerg Roedel72fe00f2011-05-10 10:50:42 +0200742irqreturn_t amd_iommu_int_thread(int irq, void *data)
Joerg Roedela80dc3e2008-09-11 16:51:41 +0200743{
Suravee Suthikulpanit3f398bc2013-04-22 16:32:34 -0500744 struct amd_iommu *iommu = (struct amd_iommu *) data;
745 u32 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
Joerg Roedel90008ee2008-09-09 16:41:05 +0200746
Suravee Suthikulpanitbd6fcef2016-08-23 13:52:37 -0500747 while (status & AMD_IOMMU_INT_MASK) {
748 /* Enable EVT and PPR and GA interrupts again */
749 writel(AMD_IOMMU_INT_MASK,
Suravee Suthikulpanit3f398bc2013-04-22 16:32:34 -0500750 iommu->mmio_base + MMIO_STATUS_OFFSET);
751
752 if (status & MMIO_STATUS_EVT_INT_MASK) {
Joerg Roedel101fa032018-11-27 16:22:31 +0100753 pr_devel("Processing IOMMU Event Log\n");
Suravee Suthikulpanit3f398bc2013-04-22 16:32:34 -0500754 iommu_poll_events(iommu);
755 }
756
757 if (status & MMIO_STATUS_PPR_INT_MASK) {
Joerg Roedel101fa032018-11-27 16:22:31 +0100758 pr_devel("Processing IOMMU PPR Log\n");
Suravee Suthikulpanit3f398bc2013-04-22 16:32:34 -0500759 iommu_poll_ppr_log(iommu);
760 }
761
Suravee Suthikulpanitbd6fcef2016-08-23 13:52:37 -0500762#ifdef CONFIG_IRQ_REMAP
763 if (status & MMIO_STATUS_GALOG_INT_MASK) {
Joerg Roedel101fa032018-11-27 16:22:31 +0100764 pr_devel("Processing IOMMU GA Log\n");
Suravee Suthikulpanitbd6fcef2016-08-23 13:52:37 -0500765 iommu_poll_ga_log(iommu);
766 }
767#endif
768
Suravee Suthikulpanit3f398bc2013-04-22 16:32:34 -0500769 /*
770 * Hardware bug: ERBT1312
771 * When re-enabling interrupt (by writing 1
772 * to clear the bit), the hardware might also try to set
773 * the interrupt bit in the event status register.
774 * In this scenario, the bit will be set, and disable
775 * subsequent interrupts.
776 *
777 * Workaround: The IOMMU driver should read back the
778 * status register and check if the interrupt bits are cleared.
779 * If not, driver will need to go through the interrupt handler
780 * again and re-clear the bits
781 */
782 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
Joerg Roedel72e1dcc2011-11-10 19:13:51 +0100783 }
Joerg Roedel90008ee2008-09-09 16:41:05 +0200784 return IRQ_HANDLED;
Joerg Roedela80dc3e2008-09-11 16:51:41 +0200785}
786
Joerg Roedel72fe00f2011-05-10 10:50:42 +0200787irqreturn_t amd_iommu_int_handler(int irq, void *data)
788{
789 return IRQ_WAKE_THREAD;
790}
791
Joerg Roedela80dc3e2008-09-11 16:51:41 +0200792/****************************************************************************
793 *
Joerg Roedel431b2a22008-07-11 17:14:22 +0200794 * IOMMU command queuing functions
795 *
796 ****************************************************************************/
797
Joerg Roedelac0ea6e2011-04-06 18:38:20 +0200798static int wait_on_sem(volatile u64 *sem)
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200799{
Joerg Roedelac0ea6e2011-04-06 18:38:20 +0200800 int i = 0;
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200801
Joerg Roedelac0ea6e2011-04-06 18:38:20 +0200802 while (*sem == 0 && i < LOOP_TIMEOUT) {
803 udelay(1);
804 i += 1;
805 }
806
807 if (i == LOOP_TIMEOUT) {
Joerg Roedel101fa032018-11-27 16:22:31 +0100808 pr_alert("Completion-Wait loop timed out\n");
Joerg Roedelac0ea6e2011-04-06 18:38:20 +0200809 return -EIO;
810 }
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200811
812 return 0;
813}
814
Joerg Roedelac0ea6e2011-04-06 18:38:20 +0200815static void copy_cmd_to_buffer(struct amd_iommu *iommu,
Tom Lendackyd334a562017-06-05 14:52:12 -0500816 struct iommu_cmd *cmd)
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200817{
Joerg Roedelac0ea6e2011-04-06 18:38:20 +0200818 u8 *target;
Denys Vlasenkoa5bbbf32019-10-24 14:54:10 +0200819 u32 tail;
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200820
Joerg Roedelac0ea6e2011-04-06 18:38:20 +0200821 /* Copy command to buffer */
Denys Vlasenkoa5bbbf32019-10-24 14:54:10 +0200822 tail = iommu->cmd_buf_tail;
823 target = iommu->cmd_buf + tail;
Joerg Roedelac0ea6e2011-04-06 18:38:20 +0200824 memcpy(target, cmd, sizeof(*cmd));
825
Denys Vlasenkoa5bbbf32019-10-24 14:54:10 +0200826 tail = (tail + sizeof(*cmd)) % CMD_BUFFER_SIZE;
827 iommu->cmd_buf_tail = tail;
828
Joerg Roedelac0ea6e2011-04-06 18:38:20 +0200829 /* Tell the IOMMU about it */
Denys Vlasenkoa5bbbf32019-10-24 14:54:10 +0200830 writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
Joerg Roedelac0ea6e2011-04-06 18:38:20 +0200831}
832
Joerg Roedel815b33f2011-04-06 17:26:49 +0200833static void build_completion_wait(struct iommu_cmd *cmd, u64 address)
Joerg Roedelded46732011-04-06 10:53:48 +0200834{
Tom Lendacky2543a782017-07-17 16:10:24 -0500835 u64 paddr = iommu_virt_to_phys((void *)address);
836
Joerg Roedel815b33f2011-04-06 17:26:49 +0200837 WARN_ON(address & 0x7ULL);
838
Joerg Roedelded46732011-04-06 10:53:48 +0200839 memset(cmd, 0, sizeof(*cmd));
Tom Lendacky2543a782017-07-17 16:10:24 -0500840 cmd->data[0] = lower_32_bits(paddr) | CMD_COMPL_WAIT_STORE_MASK;
841 cmd->data[1] = upper_32_bits(paddr);
Joerg Roedel815b33f2011-04-06 17:26:49 +0200842 cmd->data[2] = 1;
Joerg Roedelded46732011-04-06 10:53:48 +0200843 CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
844}
845
Joerg Roedel94fe79e2011-04-06 11:07:21 +0200846static void build_inv_dte(struct iommu_cmd *cmd, u16 devid)
847{
848 memset(cmd, 0, sizeof(*cmd));
849 cmd->data[0] = devid;
850 CMD_SET_TYPE(cmd, CMD_INV_DEV_ENTRY);
851}
852
Joerg Roedel11b64022011-04-06 11:49:28 +0200853static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
854 size_t size, u16 domid, int pde)
855{
856 u64 pages;
Quentin Lambertae0cbbb2015-02-04 11:40:07 +0100857 bool s;
Joerg Roedel11b64022011-04-06 11:49:28 +0200858
859 pages = iommu_num_pages(address, size, PAGE_SIZE);
Quentin Lambertae0cbbb2015-02-04 11:40:07 +0100860 s = false;
Joerg Roedel11b64022011-04-06 11:49:28 +0200861
862 if (pages > 1) {
863 /*
864 * If we have to flush more than one page, flush all
865 * TLB entries for this domain
866 */
867 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
Quentin Lambertae0cbbb2015-02-04 11:40:07 +0100868 s = true;
Joerg Roedel11b64022011-04-06 11:49:28 +0200869 }
870
871 address &= PAGE_MASK;
872
873 memset(cmd, 0, sizeof(*cmd));
874 cmd->data[1] |= domid;
875 cmd->data[2] = lower_32_bits(address);
876 cmd->data[3] = upper_32_bits(address);
877 CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
878 if (s) /* size bit - we flush more than one 4kb page */
879 cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
Frank Arnolddf805ab2012-08-27 19:21:04 +0200880 if (pde) /* PDE bit - we want to flush everything, not only the PTEs */
Joerg Roedel11b64022011-04-06 11:49:28 +0200881 cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
882}
883
Joerg Roedelcb41ed82011-04-05 11:00:53 +0200884static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep,
885 u64 address, size_t size)
886{
887 u64 pages;
Quentin Lambertae0cbbb2015-02-04 11:40:07 +0100888 bool s;
Joerg Roedelcb41ed82011-04-05 11:00:53 +0200889
890 pages = iommu_num_pages(address, size, PAGE_SIZE);
Quentin Lambertae0cbbb2015-02-04 11:40:07 +0100891 s = false;
Joerg Roedelcb41ed82011-04-05 11:00:53 +0200892
893 if (pages > 1) {
894 /*
895 * If we have to flush more than one page, flush all
896 * TLB entries for this domain
897 */
898 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
Quentin Lambertae0cbbb2015-02-04 11:40:07 +0100899 s = true;
Joerg Roedelcb41ed82011-04-05 11:00:53 +0200900 }
901
902 address &= PAGE_MASK;
903
904 memset(cmd, 0, sizeof(*cmd));
905 cmd->data[0] = devid;
906 cmd->data[0] |= (qdep & 0xff) << 24;
907 cmd->data[1] = devid;
908 cmd->data[2] = lower_32_bits(address);
909 cmd->data[3] = upper_32_bits(address);
910 CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES);
911 if (s)
912 cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
913}
914
Joerg Roedel22e266c2011-11-21 15:59:08 +0100915static void build_inv_iommu_pasid(struct iommu_cmd *cmd, u16 domid, int pasid,
916 u64 address, bool size)
917{
918 memset(cmd, 0, sizeof(*cmd));
919
920 address &= ~(0xfffULL);
921
Suravee Suthikulpanita919a012014-03-05 18:54:18 -0600922 cmd->data[0] = pasid;
Joerg Roedel22e266c2011-11-21 15:59:08 +0100923 cmd->data[1] = domid;
924 cmd->data[2] = lower_32_bits(address);
925 cmd->data[3] = upper_32_bits(address);
926 cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
927 cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
928 if (size)
929 cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
930 CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
931}
932
933static void build_inv_iotlb_pasid(struct iommu_cmd *cmd, u16 devid, int pasid,
934 int qdep, u64 address, bool size)
935{
936 memset(cmd, 0, sizeof(*cmd));
937
938 address &= ~(0xfffULL);
939
940 cmd->data[0] = devid;
Jay Cornwalle8d2d822014-02-26 15:49:31 -0600941 cmd->data[0] |= ((pasid >> 8) & 0xff) << 16;
Joerg Roedel22e266c2011-11-21 15:59:08 +0100942 cmd->data[0] |= (qdep & 0xff) << 24;
943 cmd->data[1] = devid;
Jay Cornwalle8d2d822014-02-26 15:49:31 -0600944 cmd->data[1] |= (pasid & 0xff) << 16;
Joerg Roedel22e266c2011-11-21 15:59:08 +0100945 cmd->data[2] = lower_32_bits(address);
946 cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
947 cmd->data[3] = upper_32_bits(address);
948 if (size)
949 cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
950 CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES);
951}
952
Joerg Roedelc99afa22011-11-21 18:19:25 +0100953static void build_complete_ppr(struct iommu_cmd *cmd, u16 devid, int pasid,
954 int status, int tag, bool gn)
955{
956 memset(cmd, 0, sizeof(*cmd));
957
958 cmd->data[0] = devid;
959 if (gn) {
Suravee Suthikulpanita919a012014-03-05 18:54:18 -0600960 cmd->data[1] = pasid;
Joerg Roedelc99afa22011-11-21 18:19:25 +0100961 cmd->data[2] = CMD_INV_IOMMU_PAGES_GN_MASK;
962 }
963 cmd->data[3] = tag & 0x1ff;
964 cmd->data[3] |= (status & PPR_STATUS_MASK) << PPR_STATUS_SHIFT;
965
966 CMD_SET_TYPE(cmd, CMD_COMPLETE_PPR);
967}
968
Joerg Roedel58fc7f12011-04-11 11:13:24 +0200969static void build_inv_all(struct iommu_cmd *cmd)
970{
971 memset(cmd, 0, sizeof(*cmd));
972 CMD_SET_TYPE(cmd, CMD_INV_ALL);
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200973}
974
Joerg Roedel7ef27982012-06-21 16:46:04 +0200975static void build_inv_irt(struct iommu_cmd *cmd, u16 devid)
976{
977 memset(cmd, 0, sizeof(*cmd));
978 cmd->data[0] = devid;
979 CMD_SET_TYPE(cmd, CMD_INV_IRT);
980}
981
Joerg Roedel431b2a22008-07-11 17:14:22 +0200982/*
Joerg Roedelb6c02712008-06-26 21:27:53 +0200983 * Writes the command to the IOMMUs command buffer and informs the
Joerg Roedelac0ea6e2011-04-06 18:38:20 +0200984 * hardware about the new command.
Joerg Roedel431b2a22008-07-11 17:14:22 +0200985 */
Joerg Roedel4bf5bee2016-09-14 11:41:59 +0200986static int __iommu_queue_command_sync(struct amd_iommu *iommu,
987 struct iommu_cmd *cmd,
988 bool sync)
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200989{
Tom Lendacky23e967e2017-06-05 14:52:26 -0500990 unsigned int count = 0;
Tom Lendackyd334a562017-06-05 14:52:12 -0500991 u32 left, next_tail;
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200992
Tom Lendackyd334a562017-06-05 14:52:12 -0500993 next_tail = (iommu->cmd_buf_tail + sizeof(*cmd)) % CMD_BUFFER_SIZE;
Joerg Roedelac0ea6e2011-04-06 18:38:20 +0200994again:
Tom Lendackyd334a562017-06-05 14:52:12 -0500995 left = (iommu->cmd_buf_head - next_tail) % CMD_BUFFER_SIZE;
Joerg Roedelac0ea6e2011-04-06 18:38:20 +0200996
Huang Rui432abf62016-12-12 07:28:26 -0500997 if (left <= 0x20) {
Tom Lendacky23e967e2017-06-05 14:52:26 -0500998 /* Skip udelay() the first time around */
999 if (count++) {
1000 if (count == LOOP_TIMEOUT) {
Joerg Roedel101fa032018-11-27 16:22:31 +01001001 pr_err("Command buffer timeout\n");
Tom Lendacky23e967e2017-06-05 14:52:26 -05001002 return -EIO;
1003 }
Joerg Roedelac0ea6e2011-04-06 18:38:20 +02001004
Tom Lendacky23e967e2017-06-05 14:52:26 -05001005 udelay(1);
Tom Lendackyd334a562017-06-05 14:52:12 -05001006 }
Joerg Roedel4bf5bee2016-09-14 11:41:59 +02001007
Tom Lendacky23e967e2017-06-05 14:52:26 -05001008 /* Update head and recheck remaining space */
1009 iommu->cmd_buf_head = readl(iommu->mmio_base +
1010 MMIO_CMD_HEAD_OFFSET);
Joerg Roedelac0ea6e2011-04-06 18:38:20 +02001011
1012 goto again;
Joerg Roedel136f78a2008-07-11 17:14:27 +02001013 }
1014
Tom Lendackyd334a562017-06-05 14:52:12 -05001015 copy_cmd_to_buffer(iommu, cmd);
Joerg Roedel519c31b2008-08-14 19:55:15 +02001016
Tom Lendacky23e967e2017-06-05 14:52:26 -05001017 /* Do we need to make sure all commands are processed? */
Joerg Roedelf1ca1512011-09-02 14:10:32 +02001018 iommu->need_sync = sync;
Joerg Roedelac0ea6e2011-04-06 18:38:20 +02001019
Joerg Roedel4bf5bee2016-09-14 11:41:59 +02001020 return 0;
1021}
1022
1023static int iommu_queue_command_sync(struct amd_iommu *iommu,
1024 struct iommu_cmd *cmd,
1025 bool sync)
1026{
1027 unsigned long flags;
1028 int ret;
1029
Scott Wood27790392018-01-21 03:28:54 -06001030 raw_spin_lock_irqsave(&iommu->lock, flags);
Joerg Roedel4bf5bee2016-09-14 11:41:59 +02001031 ret = __iommu_queue_command_sync(iommu, cmd, sync);
Scott Wood27790392018-01-21 03:28:54 -06001032 raw_spin_unlock_irqrestore(&iommu->lock, flags);
Joerg Roedela19ae1e2008-06-26 21:27:55 +02001033
Joerg Roedel4bf5bee2016-09-14 11:41:59 +02001034 return ret;
Joerg Roedel8d201962008-12-02 20:34:41 +01001035}
1036
Joerg Roedelf1ca1512011-09-02 14:10:32 +02001037static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
1038{
1039 return iommu_queue_command_sync(iommu, cmd, true);
1040}
1041
Joerg Roedel8d201962008-12-02 20:34:41 +01001042/*
1043 * This function queues a completion wait command into the command
1044 * buffer of an IOMMU
1045 */
Joerg Roedel8d201962008-12-02 20:34:41 +01001046static int iommu_completion_wait(struct amd_iommu *iommu)
1047{
Joerg Roedel815b33f2011-04-06 17:26:49 +02001048 struct iommu_cmd cmd;
Joerg Roedel4bf5bee2016-09-14 11:41:59 +02001049 unsigned long flags;
Joerg Roedelac0ea6e2011-04-06 18:38:20 +02001050 int ret;
Joerg Roedel8d201962008-12-02 20:34:41 +01001051
1052 if (!iommu->need_sync)
Joerg Roedel815b33f2011-04-06 17:26:49 +02001053 return 0;
Joerg Roedel8d201962008-12-02 20:34:41 +01001054
Joerg Roedel8d201962008-12-02 20:34:41 +01001055
Joerg Roedel4bf5bee2016-09-14 11:41:59 +02001056 build_completion_wait(&cmd, (u64)&iommu->cmd_sem);
1057
Scott Wood27790392018-01-21 03:28:54 -06001058 raw_spin_lock_irqsave(&iommu->lock, flags);
Joerg Roedel4bf5bee2016-09-14 11:41:59 +02001059
1060 iommu->cmd_sem = 0;
1061
1062 ret = __iommu_queue_command_sync(iommu, &cmd, false);
Joerg Roedel8d201962008-12-02 20:34:41 +01001063 if (ret)
Joerg Roedel4bf5bee2016-09-14 11:41:59 +02001064 goto out_unlock;
Joerg Roedel8d201962008-12-02 20:34:41 +01001065
Joerg Roedel4bf5bee2016-09-14 11:41:59 +02001066 ret = wait_on_sem(&iommu->cmd_sem);
1067
1068out_unlock:
Scott Wood27790392018-01-21 03:28:54 -06001069 raw_spin_unlock_irqrestore(&iommu->lock, flags);
Joerg Roedel4bf5bee2016-09-14 11:41:59 +02001070
1071 return ret;
Joerg Roedela19ae1e2008-06-26 21:27:55 +02001072}
1073
Joerg Roedeld8c13082011-04-06 18:51:26 +02001074static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid)
Joerg Roedela19ae1e2008-06-26 21:27:55 +02001075{
1076 struct iommu_cmd cmd;
1077
Joerg Roedeld8c13082011-04-06 18:51:26 +02001078 build_inv_dte(&cmd, devid);
Joerg Roedela19ae1e2008-06-26 21:27:55 +02001079
Joerg Roedeld8c13082011-04-06 18:51:26 +02001080 return iommu_queue_command(iommu, &cmd);
1081}
1082
Joerg Roedel0688a092017-08-23 15:50:03 +02001083static void amd_iommu_flush_dte_all(struct amd_iommu *iommu)
Joerg Roedel7d0c5cc2011-04-07 08:16:10 +02001084{
1085 u32 devid;
1086
1087 for (devid = 0; devid <= 0xffff; ++devid)
1088 iommu_flush_dte(iommu, devid);
1089
1090 iommu_completion_wait(iommu);
Joerg Roedela19ae1e2008-06-26 21:27:55 +02001091}
1092
1093/*
Joerg Roedel7d0c5cc2011-04-07 08:16:10 +02001094 * This function uses heavy locking and may disable irqs for some time. But
1095 * this is no issue because it is only called during resume.
Joerg Roedela19ae1e2008-06-26 21:27:55 +02001096 */
Joerg Roedel0688a092017-08-23 15:50:03 +02001097static void amd_iommu_flush_tlb_all(struct amd_iommu *iommu)
Joerg Roedela19ae1e2008-06-26 21:27:55 +02001098{
Joerg Roedel7d0c5cc2011-04-07 08:16:10 +02001099 u32 dom_id;
Joerg Roedela19ae1e2008-06-26 21:27:55 +02001100
Joerg Roedel7d0c5cc2011-04-07 08:16:10 +02001101 for (dom_id = 0; dom_id <= 0xffff; ++dom_id) {
1102 struct iommu_cmd cmd;
1103 build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
1104 dom_id, 1);
1105 iommu_queue_command(iommu, &cmd);
1106 }
Joerg Roedel431b2a22008-07-11 17:14:22 +02001107
Joerg Roedel7d0c5cc2011-04-07 08:16:10 +02001108 iommu_completion_wait(iommu);
Joerg Roedela19ae1e2008-06-26 21:27:55 +02001109}
1110
Stuart Hayes36b72002019-09-05 12:09:48 -05001111static void amd_iommu_flush_tlb_domid(struct amd_iommu *iommu, u32 dom_id)
1112{
1113 struct iommu_cmd cmd;
1114
1115 build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
1116 dom_id, 1);
1117 iommu_queue_command(iommu, &cmd);
1118
1119 iommu_completion_wait(iommu);
1120}
1121
Joerg Roedel0688a092017-08-23 15:50:03 +02001122static void amd_iommu_flush_all(struct amd_iommu *iommu)
Joerg Roedel58fc7f12011-04-11 11:13:24 +02001123{
1124 struct iommu_cmd cmd;
1125
1126 build_inv_all(&cmd);
1127
1128 iommu_queue_command(iommu, &cmd);
1129 iommu_completion_wait(iommu);
1130}
1131
Joerg Roedel7ef27982012-06-21 16:46:04 +02001132static void iommu_flush_irt(struct amd_iommu *iommu, u16 devid)
1133{
1134 struct iommu_cmd cmd;
1135
1136 build_inv_irt(&cmd, devid);
1137
1138 iommu_queue_command(iommu, &cmd);
1139}
1140
Joerg Roedel0688a092017-08-23 15:50:03 +02001141static void amd_iommu_flush_irt_all(struct amd_iommu *iommu)
Joerg Roedel7ef27982012-06-21 16:46:04 +02001142{
1143 u32 devid;
1144
1145 for (devid = 0; devid <= MAX_DEV_TABLE_ENTRIES; devid++)
1146 iommu_flush_irt(iommu, devid);
1147
1148 iommu_completion_wait(iommu);
1149}
1150
Joerg Roedel7d0c5cc2011-04-07 08:16:10 +02001151void iommu_flush_all_caches(struct amd_iommu *iommu)
1152{
Joerg Roedel58fc7f12011-04-11 11:13:24 +02001153 if (iommu_feature(iommu, FEATURE_IA)) {
Joerg Roedel0688a092017-08-23 15:50:03 +02001154 amd_iommu_flush_all(iommu);
Joerg Roedel58fc7f12011-04-11 11:13:24 +02001155 } else {
Joerg Roedel0688a092017-08-23 15:50:03 +02001156 amd_iommu_flush_dte_all(iommu);
1157 amd_iommu_flush_irt_all(iommu);
1158 amd_iommu_flush_tlb_all(iommu);
Joerg Roedel58fc7f12011-04-11 11:13:24 +02001159 }
Joerg Roedel7d0c5cc2011-04-07 08:16:10 +02001160}
1161
Joerg Roedel431b2a22008-07-11 17:14:22 +02001162/*
Joerg Roedelcb41ed82011-04-05 11:00:53 +02001163 * Command send function for flushing on-device TLB
1164 */
Joerg Roedel6c542042011-06-09 17:07:31 +02001165static int device_flush_iotlb(struct iommu_dev_data *dev_data,
1166 u64 address, size_t size)
Joerg Roedelcb41ed82011-04-05 11:00:53 +02001167{
Joerg Roedelcb41ed82011-04-05 11:00:53 +02001168 struct amd_iommu *iommu;
1169 struct iommu_cmd cmd;
Joerg Roedelcb41ed82011-04-05 11:00:53 +02001170 int qdep;
1171
Joerg Roedelea61cdd2011-06-09 12:56:30 +02001172 qdep = dev_data->ats.qdep;
1173 iommu = amd_iommu_rlookup_table[dev_data->devid];
Joerg Roedelcb41ed82011-04-05 11:00:53 +02001174
Joerg Roedelea61cdd2011-06-09 12:56:30 +02001175 build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address, size);
Joerg Roedelcb41ed82011-04-05 11:00:53 +02001176
1177 return iommu_queue_command(iommu, &cmd);
1178}
1179
Logan Gunthorpe33323642019-10-22 16:01:20 -06001180static int device_flush_dte_alias(struct pci_dev *pdev, u16 alias, void *data)
1181{
1182 struct amd_iommu *iommu = data;
1183
1184 return iommu_flush_dte(iommu, alias);
1185}
1186
Joerg Roedelcb41ed82011-04-05 11:00:53 +02001187/*
Joerg Roedel431b2a22008-07-11 17:14:22 +02001188 * Command send function for invalidating a device table entry
1189 */
Joerg Roedel6c542042011-06-09 17:07:31 +02001190static int device_flush_dte(struct iommu_dev_data *dev_data)
Joerg Roedel3fa43652009-11-26 15:04:38 +01001191{
1192 struct amd_iommu *iommu;
Joerg Roedele25bfb52015-10-20 17:33:38 +02001193 u16 alias;
Joerg Roedelcb41ed82011-04-05 11:00:53 +02001194 int ret;
Joerg Roedel3fa43652009-11-26 15:04:38 +01001195
Joerg Roedel6c542042011-06-09 17:07:31 +02001196 iommu = amd_iommu_rlookup_table[dev_data->devid];
Joerg Roedel3fa43652009-11-26 15:04:38 +01001197
Logan Gunthorpe33323642019-10-22 16:01:20 -06001198 if (dev_data->pdev)
1199 ret = pci_for_each_dma_alias(dev_data->pdev,
1200 device_flush_dte_alias, iommu);
1201 else
1202 ret = iommu_flush_dte(iommu, dev_data->devid);
Joerg Roedelcb41ed82011-04-05 11:00:53 +02001203 if (ret)
1204 return ret;
1205
Logan Gunthorpe33323642019-10-22 16:01:20 -06001206 alias = amd_iommu_alias_table[dev_data->devid];
1207 if (alias != dev_data->devid) {
1208 ret = iommu_flush_dte(iommu, alias);
1209 if (ret)
1210 return ret;
1211 }
1212
Joerg Roedelea61cdd2011-06-09 12:56:30 +02001213 if (dev_data->ats.enabled)
Joerg Roedel6c542042011-06-09 17:07:31 +02001214 ret = device_flush_iotlb(dev_data, 0, ~0UL);
Joerg Roedelcb41ed82011-04-05 11:00:53 +02001215
1216 return ret;
Joerg Roedel3fa43652009-11-26 15:04:38 +01001217}
1218
Joerg Roedel431b2a22008-07-11 17:14:22 +02001219/*
1220 * TLB invalidation function which is called from the mapping functions.
1221 * It invalidates a single PTE if the range to flush is within a single
1222 * page. Otherwise it flushes the whole TLB of the IOMMU.
1223 */
Joerg Roedel17b124b2011-04-06 18:01:35 +02001224static void __domain_flush_pages(struct protection_domain *domain,
1225 u64 address, size_t size, int pde)
Joerg Roedela19ae1e2008-06-26 21:27:55 +02001226{
Joerg Roedelcb41ed82011-04-05 11:00:53 +02001227 struct iommu_dev_data *dev_data;
Joerg Roedel11b64022011-04-06 11:49:28 +02001228 struct iommu_cmd cmd;
1229 int ret = 0, i;
Joerg Roedela19ae1e2008-06-26 21:27:55 +02001230
Joerg Roedel11b64022011-04-06 11:49:28 +02001231 build_inv_iommu_pages(&cmd, address, size, domain->id, pde);
Joerg Roedel999ba412008-07-03 19:35:08 +02001232
Suravee Suthikulpanit6b9376e2017-02-24 02:48:17 -06001233 for (i = 0; i < amd_iommu_get_num_iommus(); ++i) {
Joerg Roedel6de8ad92009-11-23 18:30:32 +01001234 if (!domain->dev_iommu[i])
1235 continue;
1236
1237 /*
1238 * Devices of this domain are behind this IOMMU
1239 * We need a TLB flush
1240 */
Joerg Roedel11b64022011-04-06 11:49:28 +02001241 ret |= iommu_queue_command(amd_iommus[i], &cmd);
Joerg Roedel6de8ad92009-11-23 18:30:32 +01001242 }
1243
Joerg Roedelcb41ed82011-04-05 11:00:53 +02001244 list_for_each_entry(dev_data, &domain->dev_list, list) {
Joerg Roedelcb41ed82011-04-05 11:00:53 +02001245
Joerg Roedelea61cdd2011-06-09 12:56:30 +02001246 if (!dev_data->ats.enabled)
Joerg Roedelcb41ed82011-04-05 11:00:53 +02001247 continue;
1248
Joerg Roedel6c542042011-06-09 17:07:31 +02001249 ret |= device_flush_iotlb(dev_data, address, size);
Joerg Roedelcb41ed82011-04-05 11:00:53 +02001250 }
1251
Joerg Roedel11b64022011-04-06 11:49:28 +02001252 WARN_ON(ret);
Joerg Roedel6de8ad92009-11-23 18:30:32 +01001253}
1254
Joerg Roedel17b124b2011-04-06 18:01:35 +02001255static void domain_flush_pages(struct protection_domain *domain,
1256 u64 address, size_t size)
Joerg Roedel6de8ad92009-11-23 18:30:32 +01001257{
Joerg Roedel17b124b2011-04-06 18:01:35 +02001258 __domain_flush_pages(domain, address, size, 0);
Joerg Roedela19ae1e2008-06-26 21:27:55 +02001259}
Joerg Roedelb6c02712008-06-26 21:27:53 +02001260
Chris Wright42a49f92009-06-15 15:42:00 +02001261/* Flush the whole IO/TLB for a given protection domain - including PDE */
Joerg Roedel17b124b2011-04-06 18:01:35 +02001262static void domain_flush_tlb_pde(struct protection_domain *domain)
Chris Wright42a49f92009-06-15 15:42:00 +02001263{
Joerg Roedel17b124b2011-04-06 18:01:35 +02001264 __domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1);
1265}
1266
1267static void domain_flush_complete(struct protection_domain *domain)
Joerg Roedelb6c02712008-06-26 21:27:53 +02001268{
1269 int i;
1270
Suravee Suthikulpanit6b9376e2017-02-24 02:48:17 -06001271 for (i = 0; i < amd_iommu_get_num_iommus(); ++i) {
Joerg Roedelf1eae7c2016-07-06 12:50:35 +02001272 if (domain && !domain->dev_iommu[i])
Joerg Roedelb6c02712008-06-26 21:27:53 +02001273 continue;
1274
1275 /*
1276 * Devices of this domain are behind this IOMMU
1277 * We need to wait for completion of all commands.
1278 */
1279 iommu_completion_wait(amd_iommus[i]);
1280 }
1281}
1282
Tom Murphy5cd3f2e2019-06-13 23:04:55 +01001283/* Flush the not present cache if it exists */
1284static void domain_flush_np_cache(struct protection_domain *domain,
1285 dma_addr_t iova, size_t size)
1286{
1287 if (unlikely(amd_iommu_np_cache)) {
Joerg Roedel2a78f992019-09-25 15:23:00 +02001288 unsigned long flags;
1289
1290 spin_lock_irqsave(&domain->lock, flags);
Tom Murphy5cd3f2e2019-06-13 23:04:55 +01001291 domain_flush_pages(domain, iova, size);
1292 domain_flush_complete(domain);
Joerg Roedel2a78f992019-09-25 15:23:00 +02001293 spin_unlock_irqrestore(&domain->lock, flags);
Tom Murphy5cd3f2e2019-06-13 23:04:55 +01001294 }
1295}
1296
Joerg Roedelb00d3bc2009-11-26 15:35:33 +01001297
Joerg Roedel43f49602008-12-02 21:01:12 +01001298/*
Joerg Roedelb00d3bc2009-11-26 15:35:33 +01001299 * This function flushes the DTEs for all devices in domain
Joerg Roedel43f49602008-12-02 21:01:12 +01001300 */
Joerg Roedel17b124b2011-04-06 18:01:35 +02001301static void domain_flush_devices(struct protection_domain *domain)
Joerg Roedelbfd1be12009-05-05 15:33:57 +02001302{
Joerg Roedelb00d3bc2009-11-26 15:35:33 +01001303 struct iommu_dev_data *dev_data;
Joerg Roedelb00d3bc2009-11-26 15:35:33 +01001304
1305 list_for_each_entry(dev_data, &domain->dev_list, list)
Joerg Roedel6c542042011-06-09 17:07:31 +02001306 device_flush_dte(dev_data);
Joerg Roedelb00d3bc2009-11-26 15:35:33 +01001307}
1308
Joerg Roedel431b2a22008-07-11 17:14:22 +02001309/****************************************************************************
1310 *
1311 * The functions below are used the create the page table mappings for
1312 * unity mapped regions.
1313 *
1314 ****************************************************************************/
1315
Joerg Roedelac3a7092018-11-09 12:07:06 +01001316static void free_page_list(struct page *freelist)
1317{
1318 while (freelist != NULL) {
1319 unsigned long p = (unsigned long)page_address(freelist);
1320 freelist = freelist->freelist;
1321 free_page(p);
1322 }
1323}
1324
1325static struct page *free_pt_page(unsigned long pt, struct page *freelist)
1326{
1327 struct page *p = virt_to_page((void *)pt);
1328
1329 p->freelist = freelist;
1330
1331 return p;
1332}
1333
1334#define DEFINE_FREE_PT_FN(LVL, FN) \
1335static struct page *free_pt_##LVL (unsigned long __pt, struct page *freelist) \
1336{ \
1337 unsigned long p; \
1338 u64 *pt; \
1339 int i; \
1340 \
1341 pt = (u64 *)__pt; \
1342 \
1343 for (i = 0; i < 512; ++i) { \
1344 /* PTE present? */ \
1345 if (!IOMMU_PTE_PRESENT(pt[i])) \
1346 continue; \
1347 \
1348 /* Large PTE? */ \
1349 if (PM_PTE_LEVEL(pt[i]) == 0 || \
1350 PM_PTE_LEVEL(pt[i]) == 7) \
1351 continue; \
1352 \
1353 p = (unsigned long)IOMMU_PTE_PAGE(pt[i]); \
1354 freelist = FN(p, freelist); \
1355 } \
1356 \
1357 return free_pt_page((unsigned long)pt, freelist); \
1358}
1359
1360DEFINE_FREE_PT_FN(l2, free_pt_page)
1361DEFINE_FREE_PT_FN(l3, free_pt_l2)
1362DEFINE_FREE_PT_FN(l4, free_pt_l3)
1363DEFINE_FREE_PT_FN(l5, free_pt_l4)
1364DEFINE_FREE_PT_FN(l6, free_pt_l5)
1365
Joerg Roedel409afa42018-11-09 12:07:07 +01001366static struct page *free_sub_pt(unsigned long root, int mode,
1367 struct page *freelist)
Joerg Roedelac3a7092018-11-09 12:07:06 +01001368{
Joerg Roedel409afa42018-11-09 12:07:07 +01001369 switch (mode) {
Joerg Roedelac3a7092018-11-09 12:07:06 +01001370 case PAGE_MODE_NONE:
Joerg Roedel69be8852018-11-09 12:07:08 +01001371 case PAGE_MODE_7_LEVEL:
Joerg Roedelac3a7092018-11-09 12:07:06 +01001372 break;
1373 case PAGE_MODE_1_LEVEL:
1374 freelist = free_pt_page(root, freelist);
1375 break;
1376 case PAGE_MODE_2_LEVEL:
1377 freelist = free_pt_l2(root, freelist);
1378 break;
1379 case PAGE_MODE_3_LEVEL:
1380 freelist = free_pt_l3(root, freelist);
1381 break;
1382 case PAGE_MODE_4_LEVEL:
1383 freelist = free_pt_l4(root, freelist);
1384 break;
1385 case PAGE_MODE_5_LEVEL:
1386 freelist = free_pt_l5(root, freelist);
1387 break;
1388 case PAGE_MODE_6_LEVEL:
1389 freelist = free_pt_l6(root, freelist);
1390 break;
1391 default:
1392 BUG();
1393 }
1394
Joerg Roedel409afa42018-11-09 12:07:07 +01001395 return freelist;
1396}
1397
1398static void free_pagetable(struct protection_domain *domain)
1399{
1400 unsigned long root = (unsigned long)domain->pt_root;
1401 struct page *freelist = NULL;
1402
Joerg Roedel69be8852018-11-09 12:07:08 +01001403 BUG_ON(domain->mode < PAGE_MODE_NONE ||
1404 domain->mode > PAGE_MODE_6_LEVEL);
1405
Andrei Dulea34c09892019-09-13 16:42:28 +02001406 freelist = free_sub_pt(root, domain->mode, freelist);
Joerg Roedel409afa42018-11-09 12:07:07 +01001407
Joerg Roedelac3a7092018-11-09 12:07:06 +01001408 free_page_list(freelist);
1409}
1410
Joerg Roedel431b2a22008-07-11 17:14:22 +02001411/*
Joerg Roedel308973d2009-11-24 17:43:32 +01001412 * This function is used to add another level to an IO page table. Adding
1413 * another level increases the size of the address space by 9 bits to a size up
1414 * to 64 bits.
1415 */
Joerg Roedelf15d9a92019-09-25 15:22:55 +02001416static bool increase_address_space(struct protection_domain *domain,
Joerg Roedel46ac18c2019-10-18 11:34:22 +02001417 unsigned long address,
Joerg Roedel308973d2009-11-24 17:43:32 +01001418 gfp_t gfp)
1419{
Joerg Roedel754265b2019-09-06 10:39:54 +02001420 unsigned long flags;
Joerg Roedelf15d9a92019-09-25 15:22:55 +02001421 bool ret = false;
Joerg Roedel308973d2009-11-24 17:43:32 +01001422 u64 *pte;
1423
Joerg Roedel754265b2019-09-06 10:39:54 +02001424 spin_lock_irqsave(&domain->lock, flags);
1425
Joerg Roedel46ac18c2019-10-18 11:34:22 +02001426 if (address <= PM_LEVEL_SIZE(domain->mode) ||
1427 WARN_ON_ONCE(domain->mode == PAGE_MODE_6_LEVEL))
Joerg Roedel754265b2019-09-06 10:39:54 +02001428 goto out;
Joerg Roedel308973d2009-11-24 17:43:32 +01001429
1430 pte = (void *)get_zeroed_page(gfp);
1431 if (!pte)
Joerg Roedel754265b2019-09-06 10:39:54 +02001432 goto out;
Joerg Roedel308973d2009-11-24 17:43:32 +01001433
1434 *pte = PM_LEVEL_PDE(domain->mode,
Tom Lendacky2543a782017-07-17 16:10:24 -05001435 iommu_virt_to_phys(domain->pt_root));
Joerg Roedel308973d2009-11-24 17:43:32 +01001436 domain->pt_root = pte;
1437 domain->mode += 1;
Joerg Roedelf15d9a92019-09-25 15:22:55 +02001438
1439 ret = true;
Joerg Roedel308973d2009-11-24 17:43:32 +01001440
Joerg Roedel754265b2019-09-06 10:39:54 +02001441out:
1442 spin_unlock_irqrestore(&domain->lock, flags);
1443
Joerg Roedelf15d9a92019-09-25 15:22:55 +02001444 return ret;
Joerg Roedel308973d2009-11-24 17:43:32 +01001445}
1446
1447static u64 *alloc_pte(struct protection_domain *domain,
1448 unsigned long address,
Joerg Roedelcbb9d722010-01-15 14:41:15 +01001449 unsigned long page_size,
Joerg Roedel308973d2009-11-24 17:43:32 +01001450 u64 **pte_page,
Joerg Roedelf15d9a92019-09-25 15:22:55 +02001451 gfp_t gfp,
1452 bool *updated)
Joerg Roedel308973d2009-11-24 17:43:32 +01001453{
Joerg Roedelcbb9d722010-01-15 14:41:15 +01001454 int level, end_lvl;
Joerg Roedel308973d2009-11-24 17:43:32 +01001455 u64 *pte, *page;
Joerg Roedelcbb9d722010-01-15 14:41:15 +01001456
1457 BUG_ON(!is_power_of_2(page_size));
Joerg Roedel308973d2009-11-24 17:43:32 +01001458
1459 while (address > PM_LEVEL_SIZE(domain->mode))
Joerg Roedel46ac18c2019-10-18 11:34:22 +02001460 *updated = increase_address_space(domain, address, gfp) || *updated;
Joerg Roedel308973d2009-11-24 17:43:32 +01001461
Joerg Roedelcbb9d722010-01-15 14:41:15 +01001462 level = domain->mode - 1;
1463 pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
1464 address = PAGE_SIZE_ALIGN(address, page_size);
1465 end_lvl = PAGE_SIZE_LEVEL(page_size);
Joerg Roedel308973d2009-11-24 17:43:32 +01001466
1467 while (level > end_lvl) {
Joerg Roedel7bfa5bd2015-12-21 19:07:50 +01001468 u64 __pte, __npte;
Joerg Roedel6d568ef2018-11-09 12:07:09 +01001469 int pte_level;
Joerg Roedel7bfa5bd2015-12-21 19:07:50 +01001470
Joerg Roedel6d568ef2018-11-09 12:07:09 +01001471 __pte = *pte;
1472 pte_level = PM_PTE_LEVEL(__pte);
Joerg Roedel7bfa5bd2015-12-21 19:07:50 +01001473
Andrei Duleacc449542019-09-13 16:42:31 +02001474 /*
1475 * If we replace a series of large PTEs, we need
1476 * to tear down all of them.
1477 */
1478 if (IOMMU_PTE_PRESENT(__pte) &&
Joerg Roedel6d568ef2018-11-09 12:07:09 +01001479 pte_level == PAGE_MODE_7_LEVEL) {
Andrei Duleacc449542019-09-13 16:42:31 +02001480 unsigned long count, i;
1481 u64 *lpte;
1482
1483 lpte = first_pte_l7(pte, NULL, &count);
1484
1485 /*
1486 * Unmap the replicated PTEs that still match the
1487 * original large mapping
1488 */
1489 for (i = 0; i < count; ++i)
1490 cmpxchg64(&lpte[i], __pte, 0ULL);
1491
Joerg Roedelf15d9a92019-09-25 15:22:55 +02001492 *updated = true;
Andrei Duleacc449542019-09-13 16:42:31 +02001493 continue;
1494 }
1495
1496 if (!IOMMU_PTE_PRESENT(__pte) ||
1497 pte_level == PAGE_MODE_NONE) {
Joerg Roedel308973d2009-11-24 17:43:32 +01001498 page = (u64 *)get_zeroed_page(gfp);
Andrei Duleacc449542019-09-13 16:42:31 +02001499
Joerg Roedel308973d2009-11-24 17:43:32 +01001500 if (!page)
1501 return NULL;
Joerg Roedel7bfa5bd2015-12-21 19:07:50 +01001502
Tom Lendacky2543a782017-07-17 16:10:24 -05001503 __npte = PM_LEVEL_PDE(level, iommu_virt_to_phys(page));
Joerg Roedel7bfa5bd2015-12-21 19:07:50 +01001504
Baoquan He134414f2016-09-15 16:50:50 +08001505 /* pte could have been changed somewhere. */
Joerg Roedel9db034d2018-11-09 12:07:10 +01001506 if (cmpxchg64(pte, __pte, __npte) != __pte)
Joerg Roedel7bfa5bd2015-12-21 19:07:50 +01001507 free_page((unsigned long)page);
Andrei Dulea6ccb72f2019-09-13 16:42:29 +02001508 else if (IOMMU_PTE_PRESENT(__pte))
Joerg Roedelf15d9a92019-09-25 15:22:55 +02001509 *updated = true;
Joerg Roedel9db034d2018-11-09 12:07:10 +01001510
1511 continue;
Joerg Roedel308973d2009-11-24 17:43:32 +01001512 }
1513
Joerg Roedelcbb9d722010-01-15 14:41:15 +01001514 /* No level skipping support yet */
Joerg Roedel6d568ef2018-11-09 12:07:09 +01001515 if (pte_level != level)
Joerg Roedelcbb9d722010-01-15 14:41:15 +01001516 return NULL;
1517
Joerg Roedel308973d2009-11-24 17:43:32 +01001518 level -= 1;
1519
Joerg Roedel9db034d2018-11-09 12:07:10 +01001520 pte = IOMMU_PTE_PAGE(__pte);
Joerg Roedel308973d2009-11-24 17:43:32 +01001521
1522 if (pte_page && level == end_lvl)
1523 *pte_page = pte;
1524
1525 pte = &pte[PM_LEVEL_INDEX(level, address)];
1526 }
1527
1528 return pte;
1529}
1530
1531/*
1532 * This function checks if there is a PTE for a given dma address. If
1533 * there is one, it returns the pointer to it.
1534 */
Joerg Roedel3039ca12015-04-01 14:58:48 +02001535static u64 *fetch_pte(struct protection_domain *domain,
1536 unsigned long address,
1537 unsigned long *page_size)
Joerg Roedel308973d2009-11-24 17:43:32 +01001538{
1539 int level;
1540 u64 *pte;
1541
yzhai003@ucr.edu46746862018-06-01 11:30:14 -07001542 *page_size = 0;
1543
Joerg Roedel24cd7722010-01-19 17:27:39 +01001544 if (address > PM_LEVEL_SIZE(domain->mode))
1545 return NULL;
Joerg Roedel308973d2009-11-24 17:43:32 +01001546
Joerg Roedel3039ca12015-04-01 14:58:48 +02001547 level = domain->mode - 1;
1548 pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
1549 *page_size = PTE_LEVEL_PAGE_SIZE(level);
Joerg Roedel24cd7722010-01-19 17:27:39 +01001550
1551 while (level > 0) {
1552
1553 /* Not Present */
Joerg Roedel308973d2009-11-24 17:43:32 +01001554 if (!IOMMU_PTE_PRESENT(*pte))
1555 return NULL;
1556
Joerg Roedel24cd7722010-01-19 17:27:39 +01001557 /* Large PTE */
Joerg Roedel3039ca12015-04-01 14:58:48 +02001558 if (PM_PTE_LEVEL(*pte) == 7 ||
1559 PM_PTE_LEVEL(*pte) == 0)
1560 break;
Joerg Roedel24cd7722010-01-19 17:27:39 +01001561
1562 /* No level skipping support yet */
1563 if (PM_PTE_LEVEL(*pte) != level)
1564 return NULL;
1565
Joerg Roedel308973d2009-11-24 17:43:32 +01001566 level -= 1;
1567
Joerg Roedel24cd7722010-01-19 17:27:39 +01001568 /* Walk to the next level */
Joerg Roedel3039ca12015-04-01 14:58:48 +02001569 pte = IOMMU_PTE_PAGE(*pte);
1570 pte = &pte[PM_LEVEL_INDEX(level, address)];
1571 *page_size = PTE_LEVEL_PAGE_SIZE(level);
1572 }
1573
Andrei Dulea7f1f1682019-09-13 16:42:30 +02001574 /*
1575 * If we have a series of large PTEs, make
1576 * sure to return a pointer to the first one.
1577 */
1578 if (PM_PTE_LEVEL(*pte) == PAGE_MODE_7_LEVEL)
1579 pte = first_pte_l7(pte, page_size, NULL);
Joerg Roedel308973d2009-11-24 17:43:32 +01001580
1581 return pte;
1582}
1583
Joerg Roedel6f820bb2018-11-09 12:07:11 +01001584static struct page *free_clear_pte(u64 *pte, u64 pteval, struct page *freelist)
1585{
1586 unsigned long pt;
1587 int mode;
1588
1589 while (cmpxchg64(pte, pteval, 0) != pteval) {
1590 pr_warn("AMD-Vi: IOMMU pte changed since we read it\n");
1591 pteval = *pte;
1592 }
1593
1594 if (!IOMMU_PTE_PRESENT(pteval))
1595 return freelist;
1596
1597 pt = (unsigned long)IOMMU_PTE_PAGE(pteval);
1598 mode = IOMMU_PTE_MODE(pteval);
1599
1600 return free_sub_pt(pt, mode, freelist);
1601}
1602
Joerg Roedel308973d2009-11-24 17:43:32 +01001603/*
Joerg Roedel431b2a22008-07-11 17:14:22 +02001604 * Generic mapping functions. It maps a physical address into a DMA
1605 * address space. It allocates the page table pages if necessary.
1606 * In the future it can be extended to a generic mapping function
1607 * supporting all features of AMD IOMMU page tables like level skipping
1608 * and full 64 bit address spaces.
1609 */
Joerg Roedel38e817f2008-12-02 17:27:52 +01001610static int iommu_map_page(struct protection_domain *dom,
1611 unsigned long bus_addr,
1612 unsigned long phys_addr,
Joerg Roedelb911b892016-07-05 14:29:11 +02001613 unsigned long page_size,
Joerg Roedelabdc5eb2009-09-03 11:33:51 +02001614 int prot,
Joerg Roedelb911b892016-07-05 14:29:11 +02001615 gfp_t gfp)
Joerg Roedelbd0e5212008-06-26 21:27:56 +02001616{
Joerg Roedel6f820bb2018-11-09 12:07:11 +01001617 struct page *freelist = NULL;
Joerg Roedelf15d9a92019-09-25 15:22:55 +02001618 bool updated = false;
Joerg Roedel8bda3092009-05-12 12:02:46 +02001619 u64 __pte, *pte;
Joerg Roedelf15d9a92019-09-25 15:22:55 +02001620 int ret, i, count;
Joerg Roedelabdc5eb2009-09-03 11:33:51 +02001621
Joerg Roedeld4b03662015-04-01 14:58:52 +02001622 BUG_ON(!IS_ALIGNED(bus_addr, page_size));
1623 BUG_ON(!IS_ALIGNED(phys_addr, page_size));
1624
Joerg Roedelf15d9a92019-09-25 15:22:55 +02001625 ret = -EINVAL;
Joerg Roedelbad1cac2009-09-02 16:52:23 +02001626 if (!(prot & IOMMU_PROT_MASK))
Joerg Roedelf15d9a92019-09-25 15:22:55 +02001627 goto out;
Joerg Roedelbd0e5212008-06-26 21:27:56 +02001628
Joerg Roedeld4b03662015-04-01 14:58:52 +02001629 count = PAGE_SIZE_PTE_COUNT(page_size);
Joerg Roedelf15d9a92019-09-25 15:22:55 +02001630 pte = alloc_pte(dom, bus_addr, page_size, NULL, gfp, &updated);
Joerg Roedelbd0e5212008-06-26 21:27:56 +02001631
Joerg Roedelf15d9a92019-09-25 15:22:55 +02001632 ret = -ENOMEM;
Maurizio Lombardi63eaa752014-09-11 12:28:03 +02001633 if (!pte)
Joerg Roedelf15d9a92019-09-25 15:22:55 +02001634 goto out;
Maurizio Lombardi63eaa752014-09-11 12:28:03 +02001635
Joerg Roedelcbb9d722010-01-15 14:41:15 +01001636 for (i = 0; i < count; ++i)
Joerg Roedel6f820bb2018-11-09 12:07:11 +01001637 freelist = free_clear_pte(&pte[i], pte[i], freelist);
1638
1639 if (freelist != NULL)
Joerg Roedelf15d9a92019-09-25 15:22:55 +02001640 updated = true;
Joerg Roedelbd0e5212008-06-26 21:27:56 +02001641
Joerg Roedeld4b03662015-04-01 14:58:52 +02001642 if (count > 1) {
Tom Lendacky2543a782017-07-17 16:10:24 -05001643 __pte = PAGE_SIZE_PTE(__sme_set(phys_addr), page_size);
Baoquan He07a80a62017-08-09 16:33:36 +08001644 __pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_PR | IOMMU_PTE_FC;
Joerg Roedelcbb9d722010-01-15 14:41:15 +01001645 } else
Linus Torvalds4dfc2782017-09-09 15:03:24 -07001646 __pte = __sme_set(phys_addr) | IOMMU_PTE_PR | IOMMU_PTE_FC;
Joerg Roedelcbb9d722010-01-15 14:41:15 +01001647
Joerg Roedelbd0e5212008-06-26 21:27:56 +02001648 if (prot & IOMMU_PROT_IR)
1649 __pte |= IOMMU_PTE_IR;
1650 if (prot & IOMMU_PROT_IW)
1651 __pte |= IOMMU_PTE_IW;
1652
Joerg Roedelcbb9d722010-01-15 14:41:15 +01001653 for (i = 0; i < count; ++i)
1654 pte[i] = __pte;
Joerg Roedelbd0e5212008-06-26 21:27:56 +02001655
Joerg Roedelf15d9a92019-09-25 15:22:55 +02001656 ret = 0;
1657
1658out:
Joerg Roedel2a78f992019-09-25 15:23:00 +02001659 if (updated) {
1660 unsigned long flags;
1661
1662 spin_lock_irqsave(&dom->lock, flags);
Joerg Roedelf15d9a92019-09-25 15:22:55 +02001663 update_domain(dom);
Joerg Roedel2a78f992019-09-25 15:23:00 +02001664 spin_unlock_irqrestore(&dom->lock, flags);
1665 }
Joerg Roedel04bfdd82009-09-02 16:00:23 +02001666
Joerg Roedel6f820bb2018-11-09 12:07:11 +01001667 /* Everything flushed out, free pages now */
1668 free_page_list(freelist);
1669
Joerg Roedelf15d9a92019-09-25 15:22:55 +02001670 return ret;
Joerg Roedelbd0e5212008-06-26 21:27:56 +02001671}
1672
Joerg Roedel24cd7722010-01-19 17:27:39 +01001673static unsigned long iommu_unmap_page(struct protection_domain *dom,
1674 unsigned long bus_addr,
1675 unsigned long page_size)
Joerg Roedeleb74ff62008-12-02 19:59:10 +01001676{
Joerg Roedel71b390e2015-04-01 14:58:49 +02001677 unsigned long long unmapped;
1678 unsigned long unmap_size;
Joerg Roedel24cd7722010-01-19 17:27:39 +01001679 u64 *pte;
Joerg Roedeleb74ff62008-12-02 19:59:10 +01001680
Joerg Roedel24cd7722010-01-19 17:27:39 +01001681 BUG_ON(!is_power_of_2(page_size));
1682
1683 unmapped = 0;
1684
1685 while (unmapped < page_size) {
1686
Joerg Roedel71b390e2015-04-01 14:58:49 +02001687 pte = fetch_pte(dom, bus_addr, &unmap_size);
Joerg Roedel24cd7722010-01-19 17:27:39 +01001688
Joerg Roedel71b390e2015-04-01 14:58:49 +02001689 if (pte) {
1690 int i, count;
Joerg Roedel24cd7722010-01-19 17:27:39 +01001691
Joerg Roedel71b390e2015-04-01 14:58:49 +02001692 count = PAGE_SIZE_PTE_COUNT(unmap_size);
Joerg Roedel24cd7722010-01-19 17:27:39 +01001693 for (i = 0; i < count; i++)
1694 pte[i] = 0ULL;
1695 }
1696
1697 bus_addr = (bus_addr & ~(unmap_size - 1)) + unmap_size;
1698 unmapped += unmap_size;
1699 }
1700
Alex Williamson60d0ca32013-06-21 14:33:19 -06001701 BUG_ON(unmapped && !is_power_of_2(unmapped));
Joerg Roedel24cd7722010-01-19 17:27:39 +01001702
1703 return unmapped;
Joerg Roedeleb74ff62008-12-02 19:59:10 +01001704}
Joerg Roedeleb74ff62008-12-02 19:59:10 +01001705
Joerg Roedel431b2a22008-07-11 17:14:22 +02001706/****************************************************************************
1707 *
Joerg Roedel431b2a22008-07-11 17:14:22 +02001708 * The next functions belong to the domain allocation. A domain is
1709 * allocated for every IOMMU as the default domain. If device isolation
1710 * is enabled, every device get its own domain. The most important thing
1711 * about domains is the page table mapping the DMA address space they
1712 * contain.
1713 *
1714 ****************************************************************************/
1715
Joerg Roedelec487d12008-06-26 21:27:58 +02001716static u16 domain_id_alloc(void)
1717{
Joerg Roedelec487d12008-06-26 21:27:58 +02001718 int id;
1719
Sebastian Andrzej Siewior2bc00182018-03-22 16:22:35 +01001720 spin_lock(&pd_bitmap_lock);
Joerg Roedelec487d12008-06-26 21:27:58 +02001721 id = find_first_zero_bit(amd_iommu_pd_alloc_bitmap, MAX_DOMAIN_ID);
1722 BUG_ON(id == 0);
1723 if (id > 0 && id < MAX_DOMAIN_ID)
1724 __set_bit(id, amd_iommu_pd_alloc_bitmap);
1725 else
1726 id = 0;
Sebastian Andrzej Siewior2bc00182018-03-22 16:22:35 +01001727 spin_unlock(&pd_bitmap_lock);
Joerg Roedelec487d12008-06-26 21:27:58 +02001728
1729 return id;
1730}
1731
Joerg Roedela2acfb72008-12-02 18:28:53 +01001732static void domain_id_free(int id)
1733{
Sebastian Andrzej Siewior2bc00182018-03-22 16:22:35 +01001734 spin_lock(&pd_bitmap_lock);
Joerg Roedela2acfb72008-12-02 18:28:53 +01001735 if (id > 0 && id < MAX_DOMAIN_ID)
1736 __clear_bit(id, amd_iommu_pd_alloc_bitmap);
Sebastian Andrzej Siewior2bc00182018-03-22 16:22:35 +01001737 spin_unlock(&pd_bitmap_lock);
Joerg Roedela2acfb72008-12-02 18:28:53 +01001738}
Joerg Roedela2acfb72008-12-02 18:28:53 +01001739
Joerg Roedelb16137b2011-11-21 16:50:23 +01001740static void free_gcr3_tbl_level1(u64 *tbl)
1741{
1742 u64 *ptr;
1743 int i;
1744
1745 for (i = 0; i < 512; ++i) {
1746 if (!(tbl[i] & GCR3_VALID))
1747 continue;
1748
Tom Lendacky2543a782017-07-17 16:10:24 -05001749 ptr = iommu_phys_to_virt(tbl[i] & PAGE_MASK);
Joerg Roedelb16137b2011-11-21 16:50:23 +01001750
1751 free_page((unsigned long)ptr);
1752 }
1753}
1754
1755static void free_gcr3_tbl_level2(u64 *tbl)
1756{
1757 u64 *ptr;
1758 int i;
1759
1760 for (i = 0; i < 512; ++i) {
1761 if (!(tbl[i] & GCR3_VALID))
1762 continue;
1763
Tom Lendacky2543a782017-07-17 16:10:24 -05001764 ptr = iommu_phys_to_virt(tbl[i] & PAGE_MASK);
Joerg Roedelb16137b2011-11-21 16:50:23 +01001765
1766 free_gcr3_tbl_level1(ptr);
1767 }
1768}
1769
Joerg Roedel52815b72011-11-17 17:24:28 +01001770static void free_gcr3_table(struct protection_domain *domain)
1771{
Joerg Roedelb16137b2011-11-21 16:50:23 +01001772 if (domain->glx == 2)
1773 free_gcr3_tbl_level2(domain->gcr3_tbl);
1774 else if (domain->glx == 1)
1775 free_gcr3_tbl_level1(domain->gcr3_tbl);
Joerg Roedel23d3a982015-08-13 11:15:13 +02001776 else
1777 BUG_ON(domain->glx != 0);
Joerg Roedelb16137b2011-11-21 16:50:23 +01001778
Joerg Roedel52815b72011-11-17 17:24:28 +01001779 free_page((unsigned long)domain->gcr3_tbl);
1780}
1781
Joerg Roedel431b2a22008-07-11 17:14:22 +02001782/*
1783 * Free a domain, only used if something went wrong in the
1784 * allocation path and we need to free an already allocated page table
1785 */
Tom Murphybe62dbf2019-09-08 09:56:41 -07001786static void dma_ops_domain_free(struct protection_domain *domain)
Joerg Roedelec487d12008-06-26 21:27:58 +02001787{
Tom Murphybe62dbf2019-09-08 09:56:41 -07001788 if (!domain)
Joerg Roedelec487d12008-06-26 21:27:58 +02001789 return;
1790
Tom Murphybe62dbf2019-09-08 09:56:41 -07001791 iommu_put_dma_cookie(&domain->domain);
Joerg Roedel2d4c5152016-07-05 16:21:32 +02001792
Tom Murphybe62dbf2019-09-08 09:56:41 -07001793 free_pagetable(domain);
Joerg Roedelec487d12008-06-26 21:27:58 +02001794
Tom Murphybe62dbf2019-09-08 09:56:41 -07001795 if (domain->id)
1796 domain_id_free(domain->id);
Baoquan Hec3db9012016-09-15 16:50:52 +08001797
Tom Murphybe62dbf2019-09-08 09:56:41 -07001798 kfree(domain);
Joerg Roedelec487d12008-06-26 21:27:58 +02001799}
1800
Joerg Roedel431b2a22008-07-11 17:14:22 +02001801/*
1802 * Allocates a new protection domain usable for the dma_ops functions.
Uwe Kleine-Königb5950762010-11-01 15:38:34 -04001803 * It also initializes the page table and the address allocator data
Joerg Roedel431b2a22008-07-11 17:14:22 +02001804 * structures required for the dma_ops interface
1805 */
Tom Murphybe62dbf2019-09-08 09:56:41 -07001806static struct protection_domain *dma_ops_domain_alloc(void)
Joerg Roedelec487d12008-06-26 21:27:58 +02001807{
Tom Murphybe62dbf2019-09-08 09:56:41 -07001808 struct protection_domain *domain;
Joerg Roedelec487d12008-06-26 21:27:58 +02001809
Tom Murphybe62dbf2019-09-08 09:56:41 -07001810 domain = kzalloc(sizeof(struct protection_domain), GFP_KERNEL);
1811 if (!domain)
Joerg Roedelec487d12008-06-26 21:27:58 +02001812 return NULL;
1813
Tom Murphybe62dbf2019-09-08 09:56:41 -07001814 if (protection_domain_init(domain))
1815 goto free_domain;
Joerg Roedel7a5a5662015-06-30 08:56:11 +02001816
Tom Murphybe62dbf2019-09-08 09:56:41 -07001817 domain->mode = PAGE_MODE_3_LEVEL;
1818 domain->pt_root = (void *)get_zeroed_page(GFP_KERNEL);
1819 domain->flags = PD_DMA_OPS_MASK;
1820 if (!domain->pt_root)
1821 goto free_domain;
Joerg Roedelec487d12008-06-26 21:27:58 +02001822
Tom Murphybe62dbf2019-09-08 09:56:41 -07001823 if (iommu_get_dma_cookie(&domain->domain) == -ENOMEM)
1824 goto free_domain;
Joerg Roedel307d5852016-07-05 11:54:04 +02001825
Tom Murphybe62dbf2019-09-08 09:56:41 -07001826 return domain;
Joerg Roedeld4241a22017-06-02 14:55:56 +02001827
Tom Murphybe62dbf2019-09-08 09:56:41 -07001828free_domain:
1829 dma_ops_domain_free(domain);
Joerg Roedelec487d12008-06-26 21:27:58 +02001830
1831 return NULL;
1832}
1833
Joerg Roedel431b2a22008-07-11 17:14:22 +02001834/*
Joerg Roedel5b28df62008-12-02 17:49:42 +01001835 * little helper function to check whether a given protection domain is a
1836 * dma_ops domain
1837 */
1838static bool dma_ops_domain(struct protection_domain *domain)
1839{
1840 return domain->flags & PD_DMA_OPS_MASK;
1841}
1842
Gary R Hookff18c4e2017-12-20 09:47:08 -07001843static void set_dte_entry(u16 devid, struct protection_domain *domain,
1844 bool ats, bool ppr)
Joerg Roedelb20ac0d2008-06-26 21:27:59 +02001845{
Joerg Roedel132bd682011-11-17 14:18:46 +01001846 u64 pte_root = 0;
Joerg Roedelee6c2862011-11-09 12:06:03 +01001847 u64 flags = 0;
Stuart Hayes36b72002019-09-05 12:09:48 -05001848 u32 old_domid;
Joerg Roedel863c74e2008-12-02 17:56:36 +01001849
Joerg Roedel132bd682011-11-17 14:18:46 +01001850 if (domain->mode != PAGE_MODE_NONE)
Tom Lendacky2543a782017-07-17 16:10:24 -05001851 pte_root = iommu_virt_to_phys(domain->pt_root);
Joerg Roedel132bd682011-11-17 14:18:46 +01001852
Joerg Roedel38ddf412008-09-11 10:38:32 +02001853 pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK)
1854 << DEV_ENTRY_MODE_SHIFT;
Baoquan He07a80a62017-08-09 16:33:36 +08001855 pte_root |= DTE_FLAG_IR | DTE_FLAG_IW | DTE_FLAG_V | DTE_FLAG_TV;
Joerg Roedelb20ac0d2008-06-26 21:27:59 +02001856
Joerg Roedelee6c2862011-11-09 12:06:03 +01001857 flags = amd_iommu_dev_table[devid].data[1];
1858
Joerg Roedelfd7b5532011-04-05 15:31:08 +02001859 if (ats)
1860 flags |= DTE_FLAG_IOTLB;
1861
Gary R Hookff18c4e2017-12-20 09:47:08 -07001862 if (ppr) {
1863 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
1864
1865 if (iommu_feature(iommu, FEATURE_EPHSUP))
1866 pte_root |= 1ULL << DEV_ENTRY_PPR;
1867 }
1868
Joerg Roedel52815b72011-11-17 17:24:28 +01001869 if (domain->flags & PD_IOMMUV2_MASK) {
Tom Lendacky2543a782017-07-17 16:10:24 -05001870 u64 gcr3 = iommu_virt_to_phys(domain->gcr3_tbl);
Joerg Roedel52815b72011-11-17 17:24:28 +01001871 u64 glx = domain->glx;
1872 u64 tmp;
1873
1874 pte_root |= DTE_FLAG_GV;
1875 pte_root |= (glx & DTE_GLX_MASK) << DTE_GLX_SHIFT;
1876
1877 /* First mask out possible old values for GCR3 table */
1878 tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B;
1879 flags &= ~tmp;
1880
1881 tmp = DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C;
1882 flags &= ~tmp;
1883
1884 /* Encode GCR3 table into DTE */
1885 tmp = DTE_GCR3_VAL_A(gcr3) << DTE_GCR3_SHIFT_A;
1886 pte_root |= tmp;
1887
1888 tmp = DTE_GCR3_VAL_B(gcr3) << DTE_GCR3_SHIFT_B;
1889 flags |= tmp;
1890
1891 tmp = DTE_GCR3_VAL_C(gcr3) << DTE_GCR3_SHIFT_C;
1892 flags |= tmp;
1893 }
1894
Baoquan He45a01c42017-08-09 16:33:37 +08001895 flags &= ~DEV_DOMID_MASK;
Joerg Roedelee6c2862011-11-09 12:06:03 +01001896 flags |= domain->id;
1897
Stuart Hayes36b72002019-09-05 12:09:48 -05001898 old_domid = amd_iommu_dev_table[devid].data[1] & DEV_DOMID_MASK;
Joerg Roedelee6c2862011-11-09 12:06:03 +01001899 amd_iommu_dev_table[devid].data[1] = flags;
1900 amd_iommu_dev_table[devid].data[0] = pte_root;
Stuart Hayes36b72002019-09-05 12:09:48 -05001901
1902 /*
1903 * A kdump kernel might be replacing a domain ID that was copied from
1904 * the previous kernel--if so, it needs to flush the translation cache
1905 * entries for the old domain ID that is being overwritten
1906 */
1907 if (old_domid) {
1908 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
1909
1910 amd_iommu_flush_tlb_domid(iommu, old_domid);
1911 }
Joerg Roedelb20ac0d2008-06-26 21:27:59 +02001912}
1913
Joerg Roedel15898bb2009-11-24 15:39:42 +01001914static void clear_dte_entry(u16 devid)
Joerg Roedel355bf552008-12-08 12:02:41 +01001915{
Joerg Roedel355bf552008-12-08 12:02:41 +01001916 /* remove entry from the device table seen by the hardware */
Baoquan He07a80a62017-08-09 16:33:36 +08001917 amd_iommu_dev_table[devid].data[0] = DTE_FLAG_V | DTE_FLAG_TV;
Joerg Roedelcbf3ccd2015-10-20 14:59:36 +02001918 amd_iommu_dev_table[devid].data[1] &= DTE_FLAG_MASK;
Joerg Roedel355bf552008-12-08 12:02:41 +01001919
Joerg Roedelc5cca142009-10-09 18:31:20 +02001920 amd_iommu_apply_erratum_63(devid);
Joerg Roedel7f760dd2009-11-26 14:49:59 +01001921}
1922
Joerg Roedelec9e79e2011-06-09 17:25:50 +02001923static void do_attach(struct iommu_dev_data *dev_data,
1924 struct protection_domain *domain)
Joerg Roedel7f760dd2009-11-26 14:49:59 +01001925{
Joerg Roedel7f760dd2009-11-26 14:49:59 +01001926 struct amd_iommu *iommu;
Joerg Roedelec9e79e2011-06-09 17:25:50 +02001927 bool ats;
Joerg Roedel7f760dd2009-11-26 14:49:59 +01001928
Joerg Roedelec9e79e2011-06-09 17:25:50 +02001929 iommu = amd_iommu_rlookup_table[dev_data->devid];
1930 ats = dev_data->ats.enabled;
Joerg Roedel7f760dd2009-11-26 14:49:59 +01001931
1932 /* Update data structures */
1933 dev_data->domain = domain;
1934 list_add(&dev_data->list, &domain->dev_list);
Joerg Roedel7f760dd2009-11-26 14:49:59 +01001935
1936 /* Do reference counting */
1937 domain->dev_iommu[iommu->index] += 1;
1938 domain->dev_cnt += 1;
1939
Joerg Roedele25bfb52015-10-20 17:33:38 +02001940 /* Update device table */
Gary R Hookff18c4e2017-12-20 09:47:08 -07001941 set_dte_entry(dev_data->devid, domain, ats, dev_data->iommu_v2);
Logan Gunthorpe33323642019-10-22 16:01:20 -06001942 clone_aliases(dev_data->pdev);
Joerg Roedele25bfb52015-10-20 17:33:38 +02001943
Joerg Roedel6c542042011-06-09 17:07:31 +02001944 device_flush_dte(dev_data);
Joerg Roedel7f760dd2009-11-26 14:49:59 +01001945}
1946
Joerg Roedelec9e79e2011-06-09 17:25:50 +02001947static void do_detach(struct iommu_dev_data *dev_data)
Joerg Roedel7f760dd2009-11-26 14:49:59 +01001948{
Suravee Suthikulpanit9825bd92019-01-24 04:16:45 +00001949 struct protection_domain *domain = dev_data->domain;
Joerg Roedel7f760dd2009-11-26 14:49:59 +01001950 struct amd_iommu *iommu;
Joerg Roedel7f760dd2009-11-26 14:49:59 +01001951
Joerg Roedelec9e79e2011-06-09 17:25:50 +02001952 iommu = amd_iommu_rlookup_table[dev_data->devid];
Joerg Roedelc5cca142009-10-09 18:31:20 +02001953
Joerg Roedel7f760dd2009-11-26 14:49:59 +01001954 /* Update data structures */
1955 dev_data->domain = NULL;
1956 list_del(&dev_data->list);
Joerg Roedelf62dda62011-06-09 12:55:35 +02001957 clear_dte_entry(dev_data->devid);
Logan Gunthorpe33323642019-10-22 16:01:20 -06001958 clone_aliases(dev_data->pdev);
Joerg Roedel7f760dd2009-11-26 14:49:59 +01001959
1960 /* Flush the DTE entry */
Joerg Roedel6c542042011-06-09 17:07:31 +02001961 device_flush_dte(dev_data);
Suravee Suthikulpanit9825bd92019-01-24 04:16:45 +00001962
1963 /* Flush IOTLB */
1964 domain_flush_tlb_pde(domain);
1965
1966 /* Wait for the flushes to finish */
1967 domain_flush_complete(domain);
1968
1969 /* decrease reference counters - needs to happen after the flushes */
1970 domain->dev_iommu[iommu->index] -= 1;
1971 domain->dev_cnt -= 1;
Joerg Roedel15898bb2009-11-24 15:39:42 +01001972}
1973
Joerg Roedel52815b72011-11-17 17:24:28 +01001974static void pdev_iommuv2_disable(struct pci_dev *pdev)
1975{
1976 pci_disable_ats(pdev);
1977 pci_disable_pri(pdev);
1978 pci_disable_pasid(pdev);
1979}
1980
Joerg Roedel6a113dd2011-12-01 12:04:58 +01001981/* FIXME: Change generic reset-function to do the same */
1982static int pri_reset_while_enabled(struct pci_dev *pdev)
1983{
1984 u16 control;
1985 int pos;
1986
Joerg Roedel46277b72011-12-07 14:34:02 +01001987 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
Joerg Roedel6a113dd2011-12-01 12:04:58 +01001988 if (!pos)
1989 return -EINVAL;
1990
Joerg Roedel46277b72011-12-07 14:34:02 +01001991 pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
1992 control |= PCI_PRI_CTRL_RESET;
1993 pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control);
Joerg Roedel6a113dd2011-12-01 12:04:58 +01001994
1995 return 0;
1996}
1997
Joerg Roedel52815b72011-11-17 17:24:28 +01001998static int pdev_iommuv2_enable(struct pci_dev *pdev)
1999{
Joerg Roedel6a113dd2011-12-01 12:04:58 +01002000 bool reset_enable;
2001 int reqs, ret;
2002
2003 /* FIXME: Hardcode number of outstanding requests for now */
2004 reqs = 32;
2005 if (pdev_pri_erratum(pdev, AMD_PRI_DEV_ERRATUM_LIMIT_REQ_ONE))
2006 reqs = 1;
2007 reset_enable = pdev_pri_erratum(pdev, AMD_PRI_DEV_ERRATUM_ENABLE_RESET);
Joerg Roedel52815b72011-11-17 17:24:28 +01002008
2009 /* Only allow access to user-accessible pages */
2010 ret = pci_enable_pasid(pdev, 0);
2011 if (ret)
2012 goto out_err;
2013
2014 /* First reset the PRI state of the device */
2015 ret = pci_reset_pri(pdev);
2016 if (ret)
2017 goto out_err;
2018
Joerg Roedel6a113dd2011-12-01 12:04:58 +01002019 /* Enable PRI */
2020 ret = pci_enable_pri(pdev, reqs);
Joerg Roedel52815b72011-11-17 17:24:28 +01002021 if (ret)
2022 goto out_err;
2023
Joerg Roedel6a113dd2011-12-01 12:04:58 +01002024 if (reset_enable) {
2025 ret = pri_reset_while_enabled(pdev);
2026 if (ret)
2027 goto out_err;
2028 }
2029
Joerg Roedel52815b72011-11-17 17:24:28 +01002030 ret = pci_enable_ats(pdev, PAGE_SHIFT);
2031 if (ret)
2032 goto out_err;
2033
2034 return 0;
2035
2036out_err:
2037 pci_disable_pri(pdev);
2038 pci_disable_pasid(pdev);
2039
2040 return ret;
2041}
2042
Joerg Roedel15898bb2009-11-24 15:39:42 +01002043/*
Anna-Maria Gleixner29a0c412018-05-07 14:53:26 +02002044 * If a device is not yet associated with a domain, this function makes the
2045 * device visible in the domain
Joerg Roedel15898bb2009-11-24 15:39:42 +01002046 */
2047static int attach_device(struct device *dev,
2048 struct protection_domain *domain)
2049{
Wan Zongshun2bf9a0a2016-04-01 09:06:03 -04002050 struct pci_dev *pdev;
Joerg Roedelea61cdd2011-06-09 12:56:30 +02002051 struct iommu_dev_data *dev_data;
Joerg Roedel15898bb2009-11-24 15:39:42 +01002052 unsigned long flags;
2053 int ret;
2054
Joerg Roedelf6c0bfc2019-09-25 15:22:57 +02002055 spin_lock_irqsave(&domain->lock, flags);
2056
Joerg Roedelea61cdd2011-06-09 12:56:30 +02002057 dev_data = get_dev_data(dev);
2058
Joerg Roedelab7b2572019-09-25 15:22:59 +02002059 spin_lock(&dev_data->lock);
2060
Joerg Roedel45e528d2019-09-25 15:22:58 +02002061 ret = -EBUSY;
2062 if (dev_data->domain != NULL)
2063 goto out;
2064
Wan Zongshun2bf9a0a2016-04-01 09:06:03 -04002065 if (!dev_is_pci(dev))
2066 goto skip_ats_check;
2067
2068 pdev = to_pci_dev(dev);
Joerg Roedel52815b72011-11-17 17:24:28 +01002069 if (domain->flags & PD_IOMMUV2_MASK) {
Joerg Roedelf6c0bfc2019-09-25 15:22:57 +02002070 ret = -EINVAL;
Joerg Roedel02ca2022015-07-28 16:58:49 +02002071 if (!dev_data->passthrough)
Joerg Roedelf6c0bfc2019-09-25 15:22:57 +02002072 goto out;
Joerg Roedel52815b72011-11-17 17:24:28 +01002073
Joerg Roedel02ca2022015-07-28 16:58:49 +02002074 if (dev_data->iommu_v2) {
2075 if (pdev_iommuv2_enable(pdev) != 0)
Joerg Roedelf6c0bfc2019-09-25 15:22:57 +02002076 goto out;
Joerg Roedel52815b72011-11-17 17:24:28 +01002077
Joerg Roedel02ca2022015-07-28 16:58:49 +02002078 dev_data->ats.enabled = true;
2079 dev_data->ats.qdep = pci_ats_queue_depth(pdev);
Jean-Philippe Brucker83d18bd2019-04-10 16:21:08 +01002080 dev_data->pri_tlp = pci_prg_resp_pasid_required(pdev);
Joerg Roedel02ca2022015-07-28 16:58:49 +02002081 }
Joerg Roedel52815b72011-11-17 17:24:28 +01002082 } else if (amd_iommu_iotlb_sup &&
2083 pci_enable_ats(pdev, PAGE_SHIFT) == 0) {
Joerg Roedelea61cdd2011-06-09 12:56:30 +02002084 dev_data->ats.enabled = true;
2085 dev_data->ats.qdep = pci_ats_queue_depth(pdev);
2086 }
Joerg Roedelfd7b5532011-04-05 15:31:08 +02002087
Wan Zongshun2bf9a0a2016-04-01 09:06:03 -04002088skip_ats_check:
Joerg Roedel45e528d2019-09-25 15:22:58 +02002089 ret = 0;
2090
2091 do_attach(dev_data, domain);
Joerg Roedel15898bb2009-11-24 15:39:42 +01002092
2093 /*
2094 * We might boot into a crash-kernel here. The crashed kernel
2095 * left the caches in the IOMMU dirty. So we have to flush
2096 * here to evict all dirty stuff.
2097 */
Joerg Roedel17b124b2011-04-06 18:01:35 +02002098 domain_flush_tlb_pde(domain);
Joerg Roedel15898bb2009-11-24 15:39:42 +01002099
Filippo Sironi0b15e022019-09-10 19:49:21 +02002100 domain_flush_complete(domain);
2101
Joerg Roedelf6c0bfc2019-09-25 15:22:57 +02002102out:
Joerg Roedelab7b2572019-09-25 15:22:59 +02002103 spin_unlock(&dev_data->lock);
2104
Joerg Roedel3a119052019-09-25 15:22:56 +02002105 spin_unlock_irqrestore(&domain->lock, flags);
Joerg Roedelf6c0bfc2019-09-25 15:22:57 +02002106
Joerg Roedel15898bb2009-11-24 15:39:42 +01002107 return ret;
2108}
2109
2110/*
Joerg Roedel355bf552008-12-08 12:02:41 +01002111 * Removes a device from a protection domain (with devtable_lock held)
2112 */
Joerg Roedel15898bb2009-11-24 15:39:42 +01002113static void detach_device(struct device *dev)
Joerg Roedel355bf552008-12-08 12:02:41 +01002114{
Joerg Roedel52815b72011-11-17 17:24:28 +01002115 struct protection_domain *domain;
Joerg Roedelea61cdd2011-06-09 12:56:30 +02002116 struct iommu_dev_data *dev_data;
Joerg Roedel355bf552008-12-08 12:02:41 +01002117 unsigned long flags;
2118
Joerg Roedelec9e79e2011-06-09 17:25:50 +02002119 dev_data = get_dev_data(dev);
Joerg Roedel52815b72011-11-17 17:24:28 +01002120 domain = dev_data->domain;
Joerg Roedelec9e79e2011-06-09 17:25:50 +02002121
Joerg Roedelf6c0bfc2019-09-25 15:22:57 +02002122 spin_lock_irqsave(&domain->lock, flags);
2123
Joerg Roedelab7b2572019-09-25 15:22:59 +02002124 spin_lock(&dev_data->lock);
2125
Anna-Maria Gleixnerea3fd042018-05-07 14:53:27 +02002126 /*
2127 * First check if the device is still attached. It might already
2128 * be detached from its domain because the generic
2129 * iommu_detach_group code detached it and we try again here in
2130 * our alias handling.
2131 */
2132 if (WARN_ON(!dev_data->domain))
Joerg Roedelf6c0bfc2019-09-25 15:22:57 +02002133 goto out;
Anna-Maria Gleixnerea3fd042018-05-07 14:53:27 +02002134
Joerg Roedelf6c0bfc2019-09-25 15:22:57 +02002135 do_detach(dev_data);
Joerg Roedelfd7b5532011-04-05 15:31:08 +02002136
Wan Zongshun2bf9a0a2016-04-01 09:06:03 -04002137 if (!dev_is_pci(dev))
Joerg Roedelf6c0bfc2019-09-25 15:22:57 +02002138 goto out;
Wan Zongshun2bf9a0a2016-04-01 09:06:03 -04002139
Joerg Roedel02ca2022015-07-28 16:58:49 +02002140 if (domain->flags & PD_IOMMUV2_MASK && dev_data->iommu_v2)
Joerg Roedel52815b72011-11-17 17:24:28 +01002141 pdev_iommuv2_disable(to_pci_dev(dev));
2142 else if (dev_data->ats.enabled)
Joerg Roedelea61cdd2011-06-09 12:56:30 +02002143 pci_disable_ats(to_pci_dev(dev));
Joerg Roedel52815b72011-11-17 17:24:28 +01002144
2145 dev_data->ats.enabled = false;
Joerg Roedelf6c0bfc2019-09-25 15:22:57 +02002146
2147out:
Joerg Roedelab7b2572019-09-25 15:22:59 +02002148 spin_unlock(&dev_data->lock);
2149
Joerg Roedelf6c0bfc2019-09-25 15:22:57 +02002150 spin_unlock_irqrestore(&domain->lock, flags);
Joerg Roedel355bf552008-12-08 12:02:41 +01002151}
Joerg Roedele275a2a2008-12-10 18:27:25 +01002152
Joerg Roedelaafd8ba2015-05-28 18:41:39 +02002153static int amd_iommu_add_device(struct device *dev)
Joerg Roedel15898bb2009-11-24 15:39:42 +01002154{
Joerg Roedel71f77582011-06-09 19:03:15 +02002155 struct iommu_dev_data *dev_data;
Joerg Roedel07ee8692015-05-28 18:41:42 +02002156 struct iommu_domain *domain;
Joerg Roedele275a2a2008-12-10 18:27:25 +01002157 struct amd_iommu *iommu;
Wan Zongshun7aba6cb2016-04-01 09:06:02 -04002158 int ret, devid;
Joerg Roedele275a2a2008-12-10 18:27:25 +01002159
Joerg Roedelaafd8ba2015-05-28 18:41:39 +02002160 if (!check_device(dev) || get_dev_data(dev))
Joerg Roedel98fc5a62009-11-24 17:19:23 +01002161 return 0;
Joerg Roedele275a2a2008-12-10 18:27:25 +01002162
Joerg Roedelaafd8ba2015-05-28 18:41:39 +02002163 devid = get_device_id(dev);
Joerg Roedel9ee35e42016-04-21 18:21:31 +02002164 if (devid < 0)
Wan Zongshun7aba6cb2016-04-01 09:06:02 -04002165 return devid;
2166
Joerg Roedelaafd8ba2015-05-28 18:41:39 +02002167 iommu = amd_iommu_rlookup_table[devid];
Joerg Roedele275a2a2008-12-10 18:27:25 +01002168
Joerg Roedelaafd8ba2015-05-28 18:41:39 +02002169 ret = iommu_init_device(dev);
Joerg Roedel4d58b8a2015-06-11 09:21:39 +02002170 if (ret) {
2171 if (ret != -ENOTSUPP)
Bjorn Helgaas5f226da2019-02-08 16:05:53 -06002172 dev_err(dev, "Failed to initialize - trying to proceed anyway\n");
Joerg Roedel657cbb62009-11-23 15:26:46 +01002173
Joerg Roedelaafd8ba2015-05-28 18:41:39 +02002174 iommu_ignore_device(dev);
Christoph Hellwig356da6d2018-12-06 13:39:32 -08002175 dev->dma_ops = NULL;
Joerg Roedele275a2a2008-12-10 18:27:25 +01002176 goto out;
2177 }
Joerg Roedelaafd8ba2015-05-28 18:41:39 +02002178 init_iommu_group(dev);
Joerg Roedele275a2a2008-12-10 18:27:25 +01002179
Joerg Roedel07ee8692015-05-28 18:41:42 +02002180 dev_data = get_dev_data(dev);
Joerg Roedel4d58b8a2015-06-11 09:21:39 +02002181
2182 BUG_ON(!dev_data);
2183
Joerg Roedelcc7c8ad2019-08-19 15:22:49 +02002184 if (dev_data->iommu_v2)
Joerg Roedel07ee8692015-05-28 18:41:42 +02002185 iommu_request_dm_for_dev(dev);
2186
2187 /* Domains are initialized for this device - have a look what we ended up with */
2188 domain = iommu_get_domain_for_dev(dev);
Joerg Roedel32302322015-07-28 16:58:50 +02002189 if (domain->type == IOMMU_DOMAIN_IDENTITY)
Joerg Roedel07ee8692015-05-28 18:41:42 +02002190 dev_data->passthrough = true;
Tom Murphybe62dbf2019-09-08 09:56:41 -07002191 else if (domain->type == IOMMU_DOMAIN_DMA)
2192 iommu_setup_dma_ops(dev, IOVA_START_PFN << PAGE_SHIFT, 0);
Joerg Roedele275a2a2008-12-10 18:27:25 +01002193
2194out:
Joerg Roedelaafd8ba2015-05-28 18:41:39 +02002195 iommu_completion_wait(iommu);
2196
Joerg Roedele275a2a2008-12-10 18:27:25 +01002197 return 0;
2198}
2199
Joerg Roedelaafd8ba2015-05-28 18:41:39 +02002200static void amd_iommu_remove_device(struct device *dev)
Joerg Roedel8638c492009-12-10 11:12:25 +01002201{
Joerg Roedelaafd8ba2015-05-28 18:41:39 +02002202 struct amd_iommu *iommu;
Wan Zongshun7aba6cb2016-04-01 09:06:02 -04002203 int devid;
Joerg Roedelaafd8ba2015-05-28 18:41:39 +02002204
2205 if (!check_device(dev))
2206 return;
2207
2208 devid = get_device_id(dev);
Joerg Roedel9ee35e42016-04-21 18:21:31 +02002209 if (devid < 0)
Wan Zongshun7aba6cb2016-04-01 09:06:02 -04002210 return;
2211
Joerg Roedelaafd8ba2015-05-28 18:41:39 +02002212 iommu = amd_iommu_rlookup_table[devid];
2213
2214 iommu_uninit_device(dev);
2215 iommu_completion_wait(iommu);
Joerg Roedel8638c492009-12-10 11:12:25 +01002216}
2217
Wan Zongshunb097d112016-04-01 09:06:04 -04002218static struct iommu_group *amd_iommu_device_group(struct device *dev)
2219{
2220 if (dev_is_pci(dev))
2221 return pci_device_group(dev);
2222
2223 return acpihid_device_group(dev);
2224}
2225
Tom Murphybe62dbf2019-09-08 09:56:41 -07002226static int amd_iommu_domain_get_attr(struct iommu_domain *domain,
2227 enum iommu_attr attr, void *data)
2228{
2229 switch (domain->type) {
2230 case IOMMU_DOMAIN_UNMANAGED:
2231 return -ENODEV;
2232 case IOMMU_DOMAIN_DMA:
2233 switch (attr) {
2234 case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
2235 *(int *)data = !amd_iommu_unmap_flush;
2236 return 0;
2237 default:
2238 return -ENODEV;
2239 }
2240 break;
2241 default:
2242 return -EINVAL;
2243 }
2244}
2245
Joerg Roedel431b2a22008-07-11 17:14:22 +02002246/*****************************************************************************
2247 *
2248 * The next functions belong to the dma_ops mapping/unmapping code.
2249 *
2250 *****************************************************************************/
2251
Joerg Roedel04bfdd82009-09-02 16:00:23 +02002252static void update_device_table(struct protection_domain *domain)
2253{
Joerg Roedel492667d2009-11-27 13:25:47 +01002254 struct iommu_dev_data *dev_data;
Joerg Roedel04bfdd82009-09-02 16:00:23 +02002255
Joerg Roedel3254de62016-07-26 15:18:54 +02002256 list_for_each_entry(dev_data, &domain->dev_list, list) {
Gary R Hookff18c4e2017-12-20 09:47:08 -07002257 set_dte_entry(dev_data->devid, domain, dev_data->ats.enabled,
2258 dev_data->iommu_v2);
Logan Gunthorpe33323642019-10-22 16:01:20 -06002259 clone_aliases(dev_data->pdev);
Joerg Roedel3254de62016-07-26 15:18:54 +02002260 }
Joerg Roedel04bfdd82009-09-02 16:00:23 +02002261}
2262
2263static void update_domain(struct protection_domain *domain)
2264{
Joerg Roedel04bfdd82009-09-02 16:00:23 +02002265 update_device_table(domain);
Joerg Roedel17b124b2011-04-06 18:01:35 +02002266
2267 domain_flush_devices(domain);
2268 domain_flush_tlb_pde(domain);
Joerg Roedel04bfdd82009-09-02 16:00:23 +02002269}
2270
Joerg Roedel3a18404c2015-05-28 18:41:45 +02002271int __init amd_iommu_init_api(void)
Joerg Roedel27c21272011-05-30 15:56:24 +02002272{
Joerg Roedel460c26d2017-06-02 14:28:01 +02002273 int ret, err = 0;
Joerg Roedel307d5852016-07-05 11:54:04 +02002274
2275 ret = iova_cache_get();
2276 if (ret)
2277 return ret;
Wan Zongshun9a4d3bf52016-04-01 09:06:05 -04002278
2279 err = bus_set_iommu(&pci_bus_type, &amd_iommu_ops);
2280 if (err)
2281 return err;
2282#ifdef CONFIG_ARM_AMBA
2283 err = bus_set_iommu(&amba_bustype, &amd_iommu_ops);
2284 if (err)
2285 return err;
2286#endif
Wan Zongshun0076cd32016-05-10 09:21:01 -04002287 err = bus_set_iommu(&platform_bus_type, &amd_iommu_ops);
2288 if (err)
2289 return err;
Joerg Roedel460c26d2017-06-02 14:28:01 +02002290
Wan Zongshun9a4d3bf52016-04-01 09:06:05 -04002291 return 0;
Joerg Roedelf5325092010-01-22 17:44:35 +01002292}
2293
Joerg Roedel6631ee92008-06-26 21:28:05 +02002294int __init amd_iommu_init_dma_ops(void)
2295{
Joerg Roedelcc7c8ad2019-08-19 15:22:49 +02002296 swiotlb = (iommu_default_passthrough() || sme_me_mask) ? 1 : 0;
Joerg Roedel6631ee92008-06-26 21:28:05 +02002297
Joerg Roedel62410ee2012-06-12 16:42:43 +02002298 if (amd_iommu_unmap_flush)
Joerg Roedel101fa032018-11-27 16:22:31 +01002299 pr_info("IO/TLB flush on unmap enabled\n");
Joerg Roedel62410ee2012-06-12 16:42:43 +02002300 else
Joerg Roedel101fa032018-11-27 16:22:31 +01002301 pr_info("Lazy IO/TLB flushing enabled\n");
Joerg Roedel62410ee2012-06-12 16:42:43 +02002302
Joerg Roedel6631ee92008-06-26 21:28:05 +02002303 return 0;
Joerg Roedelc5b5da92016-07-06 11:55:37 +02002304
Joerg Roedel6631ee92008-06-26 21:28:05 +02002305}
Joerg Roedel6d98cd82008-12-08 12:05:55 +01002306
2307/*****************************************************************************
2308 *
2309 * The following functions belong to the exported interface of AMD IOMMU
2310 *
2311 * This interface allows access to lower level functions of the IOMMU
2312 * like protection domain handling and assignement of devices to domains
2313 * which is not possible with the dma_ops interface.
2314 *
2315 *****************************************************************************/
2316
Joerg Roedel6d98cd82008-12-08 12:05:55 +01002317static void cleanup_domain(struct protection_domain *domain)
2318{
Joerg Roedel9b29d3c2014-08-05 17:50:15 +02002319 struct iommu_dev_data *entry;
Joerg Roedel6d98cd82008-12-08 12:05:55 +01002320 unsigned long flags;
Joerg Roedel6d98cd82008-12-08 12:05:55 +01002321
Joerg Roedelf6c0bfc2019-09-25 15:22:57 +02002322 spin_lock_irqsave(&domain->lock, flags);
Joerg Roedel6d98cd82008-12-08 12:05:55 +01002323
Joerg Roedel9b29d3c2014-08-05 17:50:15 +02002324 while (!list_empty(&domain->dev_list)) {
2325 entry = list_first_entry(&domain->dev_list,
2326 struct iommu_dev_data, list);
Anna-Maria Gleixnerea3fd042018-05-07 14:53:27 +02002327 BUG_ON(!entry->domain);
Joerg Roedelf6c0bfc2019-09-25 15:22:57 +02002328 do_detach(entry);
Joerg Roedel492667d2009-11-27 13:25:47 +01002329 }
Joerg Roedel6d98cd82008-12-08 12:05:55 +01002330
Joerg Roedelf6c0bfc2019-09-25 15:22:57 +02002331 spin_unlock_irqrestore(&domain->lock, flags);
Joerg Roedel6d98cd82008-12-08 12:05:55 +01002332}
2333
Joerg Roedel26508152009-08-26 16:52:40 +02002334static void protection_domain_free(struct protection_domain *domain)
2335{
2336 if (!domain)
2337 return;
2338
2339 if (domain->id)
2340 domain_id_free(domain->id);
2341
2342 kfree(domain);
2343}
2344
Joerg Roedel7a5a5662015-06-30 08:56:11 +02002345static int protection_domain_init(struct protection_domain *domain)
2346{
2347 spin_lock_init(&domain->lock);
Joerg Roedel7a5a5662015-06-30 08:56:11 +02002348 domain->id = domain_id_alloc();
2349 if (!domain->id)
2350 return -ENOMEM;
2351 INIT_LIST_HEAD(&domain->dev_list);
2352
2353 return 0;
2354}
2355
Joerg Roedel26508152009-08-26 16:52:40 +02002356static struct protection_domain *protection_domain_alloc(void)
Joerg Roedelc156e342008-12-02 18:13:27 +01002357{
2358 struct protection_domain *domain;
2359
2360 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
2361 if (!domain)
Joerg Roedel26508152009-08-26 16:52:40 +02002362 return NULL;
Joerg Roedelc156e342008-12-02 18:13:27 +01002363
Joerg Roedel7a5a5662015-06-30 08:56:11 +02002364 if (protection_domain_init(domain))
Joerg Roedel26508152009-08-26 16:52:40 +02002365 goto out_err;
2366
2367 return domain;
2368
2369out_err:
2370 kfree(domain);
2371
2372 return NULL;
2373}
2374
Joerg Roedel3f4b87b2015-03-26 13:43:07 +01002375static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
2376{
2377 struct protection_domain *pdomain;
2378
Joerg Roedel0bb6e242015-05-28 18:41:40 +02002379 switch (type) {
2380 case IOMMU_DOMAIN_UNMANAGED:
2381 pdomain = protection_domain_alloc();
2382 if (!pdomain)
2383 return NULL;
2384
2385 pdomain->mode = PAGE_MODE_3_LEVEL;
2386 pdomain->pt_root = (void *)get_zeroed_page(GFP_KERNEL);
2387 if (!pdomain->pt_root) {
2388 protection_domain_free(pdomain);
2389 return NULL;
2390 }
2391
2392 pdomain->domain.geometry.aperture_start = 0;
2393 pdomain->domain.geometry.aperture_end = ~0ULL;
2394 pdomain->domain.geometry.force_aperture = true;
2395
2396 break;
2397 case IOMMU_DOMAIN_DMA:
Tom Murphybe62dbf2019-09-08 09:56:41 -07002398 pdomain = dma_ops_domain_alloc();
2399 if (!pdomain) {
Joerg Roedel101fa032018-11-27 16:22:31 +01002400 pr_err("Failed to allocate\n");
Joerg Roedel0bb6e242015-05-28 18:41:40 +02002401 return NULL;
2402 }
Joerg Roedel0bb6e242015-05-28 18:41:40 +02002403 break;
Joerg Roedel07f643a2015-05-28 18:41:41 +02002404 case IOMMU_DOMAIN_IDENTITY:
2405 pdomain = protection_domain_alloc();
2406 if (!pdomain)
2407 return NULL;
2408
2409 pdomain->mode = PAGE_MODE_NONE;
2410 break;
Joerg Roedel0bb6e242015-05-28 18:41:40 +02002411 default:
Joerg Roedel3f4b87b2015-03-26 13:43:07 +01002412 return NULL;
Joerg Roedel0bb6e242015-05-28 18:41:40 +02002413 }
Joerg Roedel3f4b87b2015-03-26 13:43:07 +01002414
2415 return &pdomain->domain;
Joerg Roedel3f4b87b2015-03-26 13:43:07 +01002416}
2417
2418static void amd_iommu_domain_free(struct iommu_domain *dom)
Joerg Roedel26508152009-08-26 16:52:40 +02002419{
2420 struct protection_domain *domain;
Joerg Roedel98383fc2008-12-02 18:34:12 +01002421
Joerg Roedel3f4b87b2015-03-26 13:43:07 +01002422 domain = to_pdomain(dom);
2423
Joerg Roedel98383fc2008-12-02 18:34:12 +01002424 if (domain->dev_cnt > 0)
2425 cleanup_domain(domain);
2426
2427 BUG_ON(domain->dev_cnt != 0);
2428
Joerg Roedelcda70052016-07-07 15:57:04 +02002429 if (!dom)
2430 return;
Joerg Roedel98383fc2008-12-02 18:34:12 +01002431
Joerg Roedelcda70052016-07-07 15:57:04 +02002432 switch (dom->type) {
2433 case IOMMU_DOMAIN_DMA:
Joerg Roedel281e8cc2016-07-07 16:12:02 +02002434 /* Now release the domain */
Tom Murphybe62dbf2019-09-08 09:56:41 -07002435 dma_ops_domain_free(domain);
Joerg Roedelcda70052016-07-07 15:57:04 +02002436 break;
2437 default:
2438 if (domain->mode != PAGE_MODE_NONE)
2439 free_pagetable(domain);
Joerg Roedel52815b72011-11-17 17:24:28 +01002440
Joerg Roedelcda70052016-07-07 15:57:04 +02002441 if (domain->flags & PD_IOMMUV2_MASK)
2442 free_gcr3_table(domain);
2443
2444 protection_domain_free(domain);
2445 break;
2446 }
Joerg Roedel98383fc2008-12-02 18:34:12 +01002447}
2448
Joerg Roedel684f2882008-12-08 12:07:44 +01002449static void amd_iommu_detach_device(struct iommu_domain *dom,
2450 struct device *dev)
2451{
Joerg Roedel657cbb62009-11-23 15:26:46 +01002452 struct iommu_dev_data *dev_data = dev->archdata.iommu;
Joerg Roedel684f2882008-12-08 12:07:44 +01002453 struct amd_iommu *iommu;
Wan Zongshun7aba6cb2016-04-01 09:06:02 -04002454 int devid;
Joerg Roedel684f2882008-12-08 12:07:44 +01002455
Joerg Roedel98fc5a62009-11-24 17:19:23 +01002456 if (!check_device(dev))
Joerg Roedel684f2882008-12-08 12:07:44 +01002457 return;
2458
Joerg Roedel98fc5a62009-11-24 17:19:23 +01002459 devid = get_device_id(dev);
Joerg Roedel9ee35e42016-04-21 18:21:31 +02002460 if (devid < 0)
Wan Zongshun7aba6cb2016-04-01 09:06:02 -04002461 return;
Joerg Roedel684f2882008-12-08 12:07:44 +01002462
Joerg Roedel657cbb62009-11-23 15:26:46 +01002463 if (dev_data->domain != NULL)
Joerg Roedel15898bb2009-11-24 15:39:42 +01002464 detach_device(dev);
Joerg Roedel684f2882008-12-08 12:07:44 +01002465
2466 iommu = amd_iommu_rlookup_table[devid];
2467 if (!iommu)
2468 return;
2469
Suravee Suthikulpanitd98de492016-08-23 13:52:40 -05002470#ifdef CONFIG_IRQ_REMAP
2471 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
2472 (dom->type == IOMMU_DOMAIN_UNMANAGED))
2473 dev_data->use_vapic = 0;
2474#endif
2475
Joerg Roedel684f2882008-12-08 12:07:44 +01002476 iommu_completion_wait(iommu);
2477}
2478
Joerg Roedel01106062008-12-02 19:34:11 +01002479static int amd_iommu_attach_device(struct iommu_domain *dom,
2480 struct device *dev)
2481{
Joerg Roedel3f4b87b2015-03-26 13:43:07 +01002482 struct protection_domain *domain = to_pdomain(dom);
Joerg Roedel657cbb62009-11-23 15:26:46 +01002483 struct iommu_dev_data *dev_data;
Joerg Roedel01106062008-12-02 19:34:11 +01002484 struct amd_iommu *iommu;
Joerg Roedel15898bb2009-11-24 15:39:42 +01002485 int ret;
Joerg Roedel01106062008-12-02 19:34:11 +01002486
Joerg Roedel98fc5a62009-11-24 17:19:23 +01002487 if (!check_device(dev))
Joerg Roedel01106062008-12-02 19:34:11 +01002488 return -EINVAL;
2489
Joerg Roedel657cbb62009-11-23 15:26:46 +01002490 dev_data = dev->archdata.iommu;
Tom Murphybe62dbf2019-09-08 09:56:41 -07002491 dev_data->defer_attach = false;
Joerg Roedel657cbb62009-11-23 15:26:46 +01002492
Joerg Roedelf62dda62011-06-09 12:55:35 +02002493 iommu = amd_iommu_rlookup_table[dev_data->devid];
Joerg Roedel01106062008-12-02 19:34:11 +01002494 if (!iommu)
2495 return -EINVAL;
2496
Joerg Roedel657cbb62009-11-23 15:26:46 +01002497 if (dev_data->domain)
Joerg Roedel15898bb2009-11-24 15:39:42 +01002498 detach_device(dev);
Joerg Roedel01106062008-12-02 19:34:11 +01002499
Joerg Roedel15898bb2009-11-24 15:39:42 +01002500 ret = attach_device(dev, domain);
Joerg Roedel01106062008-12-02 19:34:11 +01002501
Suravee Suthikulpanitd98de492016-08-23 13:52:40 -05002502#ifdef CONFIG_IRQ_REMAP
2503 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) {
2504 if (dom->type == IOMMU_DOMAIN_UNMANAGED)
2505 dev_data->use_vapic = 1;
2506 else
2507 dev_data->use_vapic = 0;
2508 }
2509#endif
2510
Joerg Roedel01106062008-12-02 19:34:11 +01002511 iommu_completion_wait(iommu);
2512
Joerg Roedel15898bb2009-11-24 15:39:42 +01002513 return ret;
Joerg Roedel01106062008-12-02 19:34:11 +01002514}
2515
Joerg Roedel468e2362010-01-21 16:37:36 +01002516static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
Tom Murphy781ca2d2019-09-08 09:56:38 -07002517 phys_addr_t paddr, size_t page_size, int iommu_prot,
2518 gfp_t gfp)
Joerg Roedelc6229ca2008-12-02 19:48:43 +01002519{
Joerg Roedel3f4b87b2015-03-26 13:43:07 +01002520 struct protection_domain *domain = to_pdomain(dom);
Joerg Roedelc6229ca2008-12-02 19:48:43 +01002521 int prot = 0;
2522 int ret;
2523
Joerg Roedel132bd682011-11-17 14:18:46 +01002524 if (domain->mode == PAGE_MODE_NONE)
2525 return -EINVAL;
2526
Joerg Roedelc6229ca2008-12-02 19:48:43 +01002527 if (iommu_prot & IOMMU_READ)
2528 prot |= IOMMU_PROT_IR;
2529 if (iommu_prot & IOMMU_WRITE)
2530 prot |= IOMMU_PROT_IW;
2531
Joerg Roedel3057fb92019-10-18 11:00:33 +02002532 ret = iommu_map_page(domain, iova, paddr, page_size, prot, gfp);
Joerg Roedel5d214fe2010-02-08 14:44:49 +01002533
Tom Murphy5cd3f2e2019-06-13 23:04:55 +01002534 domain_flush_np_cache(domain, iova, page_size);
2535
Joerg Roedel795e74f72010-05-11 17:40:57 +02002536 return ret;
Joerg Roedelc6229ca2008-12-02 19:48:43 +01002537}
2538
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02002539static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
Will Deacon56f8af52019-07-02 16:44:06 +01002540 size_t page_size,
2541 struct iommu_iotlb_gather *gather)
Joerg Roedeleb74ff62008-12-02 19:59:10 +01002542{
Joerg Roedel3f4b87b2015-03-26 13:43:07 +01002543 struct protection_domain *domain = to_pdomain(dom);
Joerg Roedeleb74ff62008-12-02 19:59:10 +01002544
Joerg Roedel132bd682011-11-17 14:18:46 +01002545 if (domain->mode == PAGE_MODE_NONE)
Suravee Suthikulpanitc5611a82018-02-05 05:45:53 -05002546 return 0;
Joerg Roedel132bd682011-11-17 14:18:46 +01002547
Tom Murphy37ec8eb2019-09-08 09:56:37 -07002548 return iommu_unmap_page(domain, iova, page_size);
Joerg Roedeleb74ff62008-12-02 19:59:10 +01002549}
2550
Joerg Roedel645c4c82008-12-02 20:05:50 +01002551static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
Varun Sethibb5547a2013-03-29 01:23:58 +05302552 dma_addr_t iova)
Joerg Roedel645c4c82008-12-02 20:05:50 +01002553{
Joerg Roedel3f4b87b2015-03-26 13:43:07 +01002554 struct protection_domain *domain = to_pdomain(dom);
Joerg Roedel3039ca12015-04-01 14:58:48 +02002555 unsigned long offset_mask, pte_pgsize;
Joerg Roedelf03152b2010-01-21 16:15:24 +01002556 u64 *pte, __pte;
Joerg Roedel645c4c82008-12-02 20:05:50 +01002557
Joerg Roedel132bd682011-11-17 14:18:46 +01002558 if (domain->mode == PAGE_MODE_NONE)
2559 return iova;
2560
Joerg Roedel3039ca12015-04-01 14:58:48 +02002561 pte = fetch_pte(domain, iova, &pte_pgsize);
Joerg Roedel645c4c82008-12-02 20:05:50 +01002562
Joerg Roedela6d41a42009-09-02 17:08:55 +02002563 if (!pte || !IOMMU_PTE_PRESENT(*pte))
Joerg Roedel645c4c82008-12-02 20:05:50 +01002564 return 0;
2565
Joerg Roedelb24b1b62015-04-01 14:58:51 +02002566 offset_mask = pte_pgsize - 1;
Singh, Brijeshb3e9b512018-10-04 21:40:23 +00002567 __pte = __sme_clr(*pte & PM_ADDR_MASK);
Joerg Roedelf03152b2010-01-21 16:15:24 +01002568
Joerg Roedelb24b1b62015-04-01 14:58:51 +02002569 return (__pte & ~offset_mask) | (iova & offset_mask);
Joerg Roedel645c4c82008-12-02 20:05:50 +01002570}
2571
Joerg Roedelab636482014-09-05 10:48:21 +02002572static bool amd_iommu_capable(enum iommu_cap cap)
Sheng Yangdbb9fd82009-03-18 15:33:06 +08002573{
Joerg Roedel80a506b2010-07-27 17:14:24 +02002574 switch (cap) {
2575 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedelab636482014-09-05 10:48:21 +02002576 return true;
Joerg Roedelbdddadc2012-07-02 18:38:13 +02002577 case IOMMU_CAP_INTR_REMAP:
Joerg Roedelab636482014-09-05 10:48:21 +02002578 return (irq_remapping_enabled == 1);
Will Deaconcfdeec22014-10-27 11:24:48 +00002579 case IOMMU_CAP_NOEXEC:
2580 return false;
Lu Baolue84b7cc2018-10-08 10:24:19 +08002581 default:
2582 break;
Joerg Roedel80a506b2010-07-27 17:14:24 +02002583 }
2584
Joerg Roedelab636482014-09-05 10:48:21 +02002585 return false;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08002586}
2587
Eric Augere5b52342017-01-19 20:57:47 +00002588static void amd_iommu_get_resv_regions(struct device *dev,
2589 struct list_head *head)
Joerg Roedel35cf2482015-05-28 18:41:37 +02002590{
Eric Auger4397f322017-01-19 20:57:54 +00002591 struct iommu_resv_region *region;
Joerg Roedel35cf2482015-05-28 18:41:37 +02002592 struct unity_map_entry *entry;
Wan Zongshun7aba6cb2016-04-01 09:06:02 -04002593 int devid;
Joerg Roedel35cf2482015-05-28 18:41:37 +02002594
2595 devid = get_device_id(dev);
Joerg Roedel9ee35e42016-04-21 18:21:31 +02002596 if (devid < 0)
Wan Zongshun7aba6cb2016-04-01 09:06:02 -04002597 return;
Joerg Roedel35cf2482015-05-28 18:41:37 +02002598
2599 list_for_each_entry(entry, &amd_iommu_unity_map, list) {
Joerg Roedel8aafaaf2019-03-28 11:44:59 +01002600 int type, prot = 0;
Eric Auger4397f322017-01-19 20:57:54 +00002601 size_t length;
Joerg Roedel35cf2482015-05-28 18:41:37 +02002602
2603 if (devid < entry->devid_start || devid > entry->devid_end)
2604 continue;
2605
Joerg Roedel8aafaaf2019-03-28 11:44:59 +01002606 type = IOMMU_RESV_DIRECT;
Eric Auger4397f322017-01-19 20:57:54 +00002607 length = entry->address_end - entry->address_start;
2608 if (entry->prot & IOMMU_PROT_IR)
2609 prot |= IOMMU_READ;
2610 if (entry->prot & IOMMU_PROT_IW)
2611 prot |= IOMMU_WRITE;
Joerg Roedel8aafaaf2019-03-28 11:44:59 +01002612 if (entry->prot & IOMMU_UNITY_MAP_FLAG_EXCL_RANGE)
2613 /* Exclusion range */
2614 type = IOMMU_RESV_RESERVED;
Eric Auger4397f322017-01-19 20:57:54 +00002615
2616 region = iommu_alloc_resv_region(entry->address_start,
Joerg Roedel8aafaaf2019-03-28 11:44:59 +01002617 length, prot, type);
Joerg Roedel35cf2482015-05-28 18:41:37 +02002618 if (!region) {
Bjorn Helgaas5f226da2019-02-08 16:05:53 -06002619 dev_err(dev, "Out of memory allocating dm-regions\n");
Joerg Roedel35cf2482015-05-28 18:41:37 +02002620 return;
2621 }
Joerg Roedel35cf2482015-05-28 18:41:37 +02002622 list_add_tail(&region->list, head);
2623 }
Eric Auger4397f322017-01-19 20:57:54 +00002624
2625 region = iommu_alloc_resv_region(MSI_RANGE_START,
2626 MSI_RANGE_END - MSI_RANGE_START + 1,
Robin Murphy9d3a4de2017-03-16 17:00:16 +00002627 0, IOMMU_RESV_MSI);
Eric Auger4397f322017-01-19 20:57:54 +00002628 if (!region)
2629 return;
2630 list_add_tail(&region->list, head);
2631
2632 region = iommu_alloc_resv_region(HT_RANGE_START,
2633 HT_RANGE_END - HT_RANGE_START + 1,
2634 0, IOMMU_RESV_RESERVED);
2635 if (!region)
2636 return;
2637 list_add_tail(&region->list, head);
Joerg Roedel35cf2482015-05-28 18:41:37 +02002638}
2639
Baoquan Hedf3f7a62017-08-09 16:33:41 +08002640static bool amd_iommu_is_attach_deferred(struct iommu_domain *domain,
2641 struct device *dev)
2642{
2643 struct iommu_dev_data *dev_data = dev->archdata.iommu;
2644 return dev_data->defer_attach;
2645}
2646
Suravee Suthikulpaniteb5ecd12018-02-21 14:19:45 +07002647static void amd_iommu_flush_iotlb_all(struct iommu_domain *domain)
2648{
2649 struct protection_domain *dom = to_pdomain(domain);
Joerg Roedel2a78f992019-09-25 15:23:00 +02002650 unsigned long flags;
Suravee Suthikulpaniteb5ecd12018-02-21 14:19:45 +07002651
Joerg Roedel2a78f992019-09-25 15:23:00 +02002652 spin_lock_irqsave(&dom->lock, flags);
Suravee Suthikulpaniteb5ecd12018-02-21 14:19:45 +07002653 domain_flush_tlb_pde(dom);
2654 domain_flush_complete(dom);
Joerg Roedel2a78f992019-09-25 15:23:00 +02002655 spin_unlock_irqrestore(&dom->lock, flags);
Suravee Suthikulpaniteb5ecd12018-02-21 14:19:45 +07002656}
2657
Will Deacon56f8af52019-07-02 16:44:06 +01002658static void amd_iommu_iotlb_sync(struct iommu_domain *domain,
2659 struct iommu_iotlb_gather *gather)
Suravee Suthikulpaniteb5ecd12018-02-21 14:19:45 +07002660{
Will Deacon56f8af52019-07-02 16:44:06 +01002661 amd_iommu_flush_iotlb_all(domain);
Suravee Suthikulpaniteb5ecd12018-02-21 14:19:45 +07002662}
2663
Joerg Roedelb0119e82017-02-01 13:23:08 +01002664const struct iommu_ops amd_iommu_ops = {
Joerg Roedelab636482014-09-05 10:48:21 +02002665 .capable = amd_iommu_capable,
Joerg Roedel3f4b87b2015-03-26 13:43:07 +01002666 .domain_alloc = amd_iommu_domain_alloc,
2667 .domain_free = amd_iommu_domain_free,
Joerg Roedel26961ef2008-12-03 17:00:17 +01002668 .attach_dev = amd_iommu_attach_device,
2669 .detach_dev = amd_iommu_detach_device,
Joerg Roedel468e2362010-01-21 16:37:36 +01002670 .map = amd_iommu_map,
2671 .unmap = amd_iommu_unmap,
Joerg Roedel26961ef2008-12-03 17:00:17 +01002672 .iova_to_phys = amd_iommu_iova_to_phys,
Joerg Roedelaafd8ba2015-05-28 18:41:39 +02002673 .add_device = amd_iommu_add_device,
2674 .remove_device = amd_iommu_remove_device,
Wan Zongshunb097d112016-04-01 09:06:04 -04002675 .device_group = amd_iommu_device_group,
Tom Murphybe62dbf2019-09-08 09:56:41 -07002676 .domain_get_attr = amd_iommu_domain_get_attr,
Eric Augere5b52342017-01-19 20:57:47 +00002677 .get_resv_regions = amd_iommu_get_resv_regions,
Thierry Reding55c25642019-12-18 14:42:03 +01002678 .put_resv_regions = generic_iommu_put_resv_regions,
Baoquan Hedf3f7a62017-08-09 16:33:41 +08002679 .is_attach_deferred = amd_iommu_is_attach_deferred,
Ohad Ben-Cohenaa3de9c2011-11-10 11:32:29 +02002680 .pgsize_bitmap = AMD_IOMMU_PGSIZES,
Suravee Suthikulpaniteb5ecd12018-02-21 14:19:45 +07002681 .flush_iotlb_all = amd_iommu_flush_iotlb_all,
Will Deacon56f8af52019-07-02 16:44:06 +01002682 .iotlb_sync = amd_iommu_iotlb_sync,
Joerg Roedel26961ef2008-12-03 17:00:17 +01002683};
2684
Joerg Roedel0feae532009-08-26 15:26:30 +02002685/*****************************************************************************
2686 *
2687 * The next functions do a basic initialization of IOMMU for pass through
2688 * mode
2689 *
2690 * In passthrough mode the IOMMU is initialized and enabled but not used for
2691 * DMA-API translation.
2692 *
2693 *****************************************************************************/
2694
Joerg Roedel72e1dcc2011-11-10 19:13:51 +01002695/* IOMMUv2 specific functions */
2696int amd_iommu_register_ppr_notifier(struct notifier_block *nb)
2697{
2698 return atomic_notifier_chain_register(&ppr_notifier, nb);
2699}
2700EXPORT_SYMBOL(amd_iommu_register_ppr_notifier);
2701
2702int amd_iommu_unregister_ppr_notifier(struct notifier_block *nb)
2703{
2704 return atomic_notifier_chain_unregister(&ppr_notifier, nb);
2705}
2706EXPORT_SYMBOL(amd_iommu_unregister_ppr_notifier);
Joerg Roedel132bd682011-11-17 14:18:46 +01002707
2708void amd_iommu_domain_direct_map(struct iommu_domain *dom)
2709{
Joerg Roedel3f4b87b2015-03-26 13:43:07 +01002710 struct protection_domain *domain = to_pdomain(dom);
Joerg Roedel132bd682011-11-17 14:18:46 +01002711 unsigned long flags;
2712
2713 spin_lock_irqsave(&domain->lock, flags);
2714
2715 /* Update data structure */
2716 domain->mode = PAGE_MODE_NONE;
Joerg Roedel132bd682011-11-17 14:18:46 +01002717
2718 /* Make changes visible to IOMMUs */
2719 update_domain(domain);
2720
2721 /* Page-table is not visible to IOMMU anymore, so free it */
2722 free_pagetable(domain);
2723
2724 spin_unlock_irqrestore(&domain->lock, flags);
2725}
2726EXPORT_SYMBOL(amd_iommu_domain_direct_map);
Joerg Roedel52815b72011-11-17 17:24:28 +01002727
2728int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids)
2729{
Joerg Roedel3f4b87b2015-03-26 13:43:07 +01002730 struct protection_domain *domain = to_pdomain(dom);
Joerg Roedel52815b72011-11-17 17:24:28 +01002731 unsigned long flags;
2732 int levels, ret;
2733
2734 if (pasids <= 0 || pasids > (PASID_MASK + 1))
2735 return -EINVAL;
2736
2737 /* Number of GCR3 table levels required */
2738 for (levels = 0; (pasids - 1) & ~0x1ff; pasids >>= 9)
2739 levels += 1;
2740
2741 if (levels > amd_iommu_max_glx_val)
2742 return -EINVAL;
2743
2744 spin_lock_irqsave(&domain->lock, flags);
2745
2746 /*
2747 * Save us all sanity checks whether devices already in the
2748 * domain support IOMMUv2. Just force that the domain has no
2749 * devices attached when it is switched into IOMMUv2 mode.
2750 */
2751 ret = -EBUSY;
2752 if (domain->dev_cnt > 0 || domain->flags & PD_IOMMUV2_MASK)
2753 goto out;
2754
2755 ret = -ENOMEM;
2756 domain->gcr3_tbl = (void *)get_zeroed_page(GFP_ATOMIC);
2757 if (domain->gcr3_tbl == NULL)
2758 goto out;
2759
2760 domain->glx = levels;
2761 domain->flags |= PD_IOMMUV2_MASK;
Joerg Roedel52815b72011-11-17 17:24:28 +01002762
2763 update_domain(domain);
2764
2765 ret = 0;
2766
2767out:
2768 spin_unlock_irqrestore(&domain->lock, flags);
2769
2770 return ret;
2771}
2772EXPORT_SYMBOL(amd_iommu_domain_enable_v2);
Joerg Roedel22e266c2011-11-21 15:59:08 +01002773
2774static int __flush_pasid(struct protection_domain *domain, int pasid,
2775 u64 address, bool size)
2776{
2777 struct iommu_dev_data *dev_data;
2778 struct iommu_cmd cmd;
2779 int i, ret;
2780
2781 if (!(domain->flags & PD_IOMMUV2_MASK))
2782 return -EINVAL;
2783
2784 build_inv_iommu_pasid(&cmd, domain->id, pasid, address, size);
2785
2786 /*
2787 * IOMMU TLB needs to be flushed before Device TLB to
2788 * prevent device TLB refill from IOMMU TLB
2789 */
Suravee Suthikulpanit6b9376e2017-02-24 02:48:17 -06002790 for (i = 0; i < amd_iommu_get_num_iommus(); ++i) {
Joerg Roedel22e266c2011-11-21 15:59:08 +01002791 if (domain->dev_iommu[i] == 0)
2792 continue;
2793
2794 ret = iommu_queue_command(amd_iommus[i], &cmd);
2795 if (ret != 0)
2796 goto out;
2797 }
2798
2799 /* Wait until IOMMU TLB flushes are complete */
2800 domain_flush_complete(domain);
2801
2802 /* Now flush device TLBs */
2803 list_for_each_entry(dev_data, &domain->dev_list, list) {
2804 struct amd_iommu *iommu;
2805 int qdep;
2806
Joerg Roedel1c1cc452015-07-30 11:24:45 +02002807 /*
2808 There might be non-IOMMUv2 capable devices in an IOMMUv2
2809 * domain.
2810 */
2811 if (!dev_data->ats.enabled)
2812 continue;
Joerg Roedel22e266c2011-11-21 15:59:08 +01002813
2814 qdep = dev_data->ats.qdep;
2815 iommu = amd_iommu_rlookup_table[dev_data->devid];
2816
2817 build_inv_iotlb_pasid(&cmd, dev_data->devid, pasid,
2818 qdep, address, size);
2819
2820 ret = iommu_queue_command(iommu, &cmd);
2821 if (ret != 0)
2822 goto out;
2823 }
2824
2825 /* Wait until all device TLBs are flushed */
2826 domain_flush_complete(domain);
2827
2828 ret = 0;
2829
2830out:
2831
2832 return ret;
2833}
2834
2835static int __amd_iommu_flush_page(struct protection_domain *domain, int pasid,
2836 u64 address)
2837{
2838 return __flush_pasid(domain, pasid, address, false);
2839}
2840
2841int amd_iommu_flush_page(struct iommu_domain *dom, int pasid,
2842 u64 address)
2843{
Joerg Roedel3f4b87b2015-03-26 13:43:07 +01002844 struct protection_domain *domain = to_pdomain(dom);
Joerg Roedel22e266c2011-11-21 15:59:08 +01002845 unsigned long flags;
2846 int ret;
2847
2848 spin_lock_irqsave(&domain->lock, flags);
2849 ret = __amd_iommu_flush_page(domain, pasid, address);
2850 spin_unlock_irqrestore(&domain->lock, flags);
2851
2852 return ret;
2853}
2854EXPORT_SYMBOL(amd_iommu_flush_page);
2855
2856static int __amd_iommu_flush_tlb(struct protection_domain *domain, int pasid)
2857{
2858 return __flush_pasid(domain, pasid, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
2859 true);
2860}
2861
2862int amd_iommu_flush_tlb(struct iommu_domain *dom, int pasid)
2863{
Joerg Roedel3f4b87b2015-03-26 13:43:07 +01002864 struct protection_domain *domain = to_pdomain(dom);
Joerg Roedel22e266c2011-11-21 15:59:08 +01002865 unsigned long flags;
2866 int ret;
2867
2868 spin_lock_irqsave(&domain->lock, flags);
2869 ret = __amd_iommu_flush_tlb(domain, pasid);
2870 spin_unlock_irqrestore(&domain->lock, flags);
2871
2872 return ret;
2873}
2874EXPORT_SYMBOL(amd_iommu_flush_tlb);
2875
Joerg Roedelb16137b2011-11-21 16:50:23 +01002876static u64 *__get_gcr3_pte(u64 *root, int level, int pasid, bool alloc)
2877{
2878 int index;
2879 u64 *pte;
2880
2881 while (true) {
2882
2883 index = (pasid >> (9 * level)) & 0x1ff;
2884 pte = &root[index];
2885
2886 if (level == 0)
2887 break;
2888
2889 if (!(*pte & GCR3_VALID)) {
2890 if (!alloc)
2891 return NULL;
2892
2893 root = (void *)get_zeroed_page(GFP_ATOMIC);
2894 if (root == NULL)
2895 return NULL;
2896
Tom Lendacky2543a782017-07-17 16:10:24 -05002897 *pte = iommu_virt_to_phys(root) | GCR3_VALID;
Joerg Roedelb16137b2011-11-21 16:50:23 +01002898 }
2899
Tom Lendacky2543a782017-07-17 16:10:24 -05002900 root = iommu_phys_to_virt(*pte & PAGE_MASK);
Joerg Roedelb16137b2011-11-21 16:50:23 +01002901
2902 level -= 1;
2903 }
2904
2905 return pte;
2906}
2907
2908static int __set_gcr3(struct protection_domain *domain, int pasid,
2909 unsigned long cr3)
2910{
2911 u64 *pte;
2912
2913 if (domain->mode != PAGE_MODE_NONE)
2914 return -EINVAL;
2915
2916 pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, true);
2917 if (pte == NULL)
2918 return -ENOMEM;
2919
2920 *pte = (cr3 & PAGE_MASK) | GCR3_VALID;
2921
2922 return __amd_iommu_flush_tlb(domain, pasid);
2923}
2924
2925static int __clear_gcr3(struct protection_domain *domain, int pasid)
2926{
2927 u64 *pte;
2928
2929 if (domain->mode != PAGE_MODE_NONE)
2930 return -EINVAL;
2931
2932 pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, false);
2933 if (pte == NULL)
2934 return 0;
2935
2936 *pte = 0;
2937
2938 return __amd_iommu_flush_tlb(domain, pasid);
2939}
2940
2941int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, int pasid,
2942 unsigned long cr3)
2943{
Joerg Roedel3f4b87b2015-03-26 13:43:07 +01002944 struct protection_domain *domain = to_pdomain(dom);
Joerg Roedelb16137b2011-11-21 16:50:23 +01002945 unsigned long flags;
2946 int ret;
2947
2948 spin_lock_irqsave(&domain->lock, flags);
2949 ret = __set_gcr3(domain, pasid, cr3);
2950 spin_unlock_irqrestore(&domain->lock, flags);
2951
2952 return ret;
2953}
2954EXPORT_SYMBOL(amd_iommu_domain_set_gcr3);
2955
2956int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, int pasid)
2957{
Joerg Roedel3f4b87b2015-03-26 13:43:07 +01002958 struct protection_domain *domain = to_pdomain(dom);
Joerg Roedelb16137b2011-11-21 16:50:23 +01002959 unsigned long flags;
2960 int ret;
2961
2962 spin_lock_irqsave(&domain->lock, flags);
2963 ret = __clear_gcr3(domain, pasid);
2964 spin_unlock_irqrestore(&domain->lock, flags);
2965
2966 return ret;
2967}
2968EXPORT_SYMBOL(amd_iommu_domain_clear_gcr3);
Joerg Roedelc99afa22011-11-21 18:19:25 +01002969
2970int amd_iommu_complete_ppr(struct pci_dev *pdev, int pasid,
2971 int status, int tag)
2972{
2973 struct iommu_dev_data *dev_data;
2974 struct amd_iommu *iommu;
2975 struct iommu_cmd cmd;
2976
2977 dev_data = get_dev_data(&pdev->dev);
2978 iommu = amd_iommu_rlookup_table[dev_data->devid];
2979
2980 build_complete_ppr(&cmd, dev_data->devid, pasid, status,
2981 tag, dev_data->pri_tlp);
2982
2983 return iommu_queue_command(iommu, &cmd);
2984}
2985EXPORT_SYMBOL(amd_iommu_complete_ppr);
Joerg Roedelf3572db2011-11-23 12:36:25 +01002986
2987struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev)
2988{
Joerg Roedel3f4b87b2015-03-26 13:43:07 +01002989 struct protection_domain *pdomain;
Tom Murphybe62dbf2019-09-08 09:56:41 -07002990 struct iommu_domain *io_domain;
2991 struct device *dev = &pdev->dev;
Joerg Roedelf3572db2011-11-23 12:36:25 +01002992
Tom Murphybe62dbf2019-09-08 09:56:41 -07002993 if (!check_device(dev))
2994 return NULL;
2995
2996 pdomain = get_dev_data(dev)->domain;
2997 if (pdomain == NULL && get_dev_data(dev)->defer_attach) {
2998 get_dev_data(dev)->defer_attach = false;
2999 io_domain = iommu_get_domain_for_dev(dev);
3000 pdomain = to_pdomain(io_domain);
3001 attach_device(dev, pdomain);
3002 }
3003 if (pdomain == NULL)
3004 return NULL;
3005
3006 if (!dma_ops_domain(pdomain))
Joerg Roedelf3572db2011-11-23 12:36:25 +01003007 return NULL;
3008
3009 /* Only return IOMMUv2 domains */
Joerg Roedel3f4b87b2015-03-26 13:43:07 +01003010 if (!(pdomain->flags & PD_IOMMUV2_MASK))
Joerg Roedelf3572db2011-11-23 12:36:25 +01003011 return NULL;
3012
Joerg Roedel3f4b87b2015-03-26 13:43:07 +01003013 return &pdomain->domain;
Joerg Roedelf3572db2011-11-23 12:36:25 +01003014}
3015EXPORT_SYMBOL(amd_iommu_get_v2_domain);
Joerg Roedel6a113dd2011-12-01 12:04:58 +01003016
3017void amd_iommu_enable_device_erratum(struct pci_dev *pdev, u32 erratum)
3018{
3019 struct iommu_dev_data *dev_data;
3020
3021 if (!amd_iommu_v2_supported())
3022 return;
3023
3024 dev_data = get_dev_data(&pdev->dev);
3025 dev_data->errata |= (1 << erratum);
3026}
3027EXPORT_SYMBOL(amd_iommu_enable_device_erratum);
Joerg Roedel52efdb82011-12-07 12:01:36 +01003028
3029int amd_iommu_device_info(struct pci_dev *pdev,
3030 struct amd_iommu_device_info *info)
3031{
3032 int max_pasids;
3033 int pos;
3034
3035 if (pdev == NULL || info == NULL)
3036 return -EINVAL;
3037
3038 if (!amd_iommu_v2_supported())
3039 return -EINVAL;
3040
3041 memset(info, 0, sizeof(*info));
3042
Gil Kupfercef74402018-05-10 17:56:02 -05003043 if (!pci_ats_disabled()) {
3044 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS);
3045 if (pos)
3046 info->flags |= AMD_IOMMU_DEVICE_FLAG_ATS_SUP;
3047 }
Joerg Roedel52efdb82011-12-07 12:01:36 +01003048
3049 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
3050 if (pos)
3051 info->flags |= AMD_IOMMU_DEVICE_FLAG_PRI_SUP;
3052
3053 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
3054 if (pos) {
3055 int features;
3056
3057 max_pasids = 1 << (9 * (amd_iommu_max_glx_val + 1));
3058 max_pasids = min(max_pasids, (1 << 20));
3059
3060 info->flags |= AMD_IOMMU_DEVICE_FLAG_PASID_SUP;
3061 info->max_pasids = min(pci_max_pasids(pdev), max_pasids);
3062
3063 features = pci_pasid_features(pdev);
3064 if (features & PCI_PASID_CAP_EXEC)
3065 info->flags |= AMD_IOMMU_DEVICE_FLAG_EXEC_SUP;
3066 if (features & PCI_PASID_CAP_PRIV)
3067 info->flags |= AMD_IOMMU_DEVICE_FLAG_PRIV_SUP;
3068 }
3069
3070 return 0;
3071}
3072EXPORT_SYMBOL(amd_iommu_device_info);
Joerg Roedel2b324502012-06-21 16:29:10 +02003073
3074#ifdef CONFIG_IRQ_REMAP
3075
3076/*****************************************************************************
3077 *
3078 * Interrupt Remapping Implementation
3079 *
3080 *****************************************************************************/
3081
Jiang Liu7c71d302015-04-13 14:11:33 +08003082static struct irq_chip amd_ir_chip;
Arnd Bergmann94c793a2018-04-04 12:56:59 +02003083static DEFINE_SPINLOCK(iommu_table_lock);
Jiang Liu7c71d302015-04-13 14:11:33 +08003084
Joerg Roedel2b324502012-06-21 16:29:10 +02003085static void set_dte_irq_entry(u16 devid, struct irq_remap_table *table)
3086{
3087 u64 dte;
3088
3089 dte = amd_iommu_dev_table[devid].data[2];
3090 dte &= ~DTE_IRQ_PHYS_ADDR_MASK;
Tom Lendacky2543a782017-07-17 16:10:24 -05003091 dte |= iommu_virt_to_phys(table->table);
Joerg Roedel2b324502012-06-21 16:29:10 +02003092 dte |= DTE_IRQ_REMAP_INTCTL;
3093 dte |= DTE_IRQ_TABLE_LEN;
3094 dte |= DTE_IRQ_REMAP_ENABLE;
3095
3096 amd_iommu_dev_table[devid].data[2] = dte;
3097}
3098
Scott Wooddf42a042018-02-14 17:36:28 -06003099static struct irq_remap_table *get_irq_table(u16 devid)
3100{
3101 struct irq_remap_table *table;
3102
3103 if (WARN_ONCE(!amd_iommu_rlookup_table[devid],
3104 "%s: no iommu for devid %x\n", __func__, devid))
3105 return NULL;
3106
3107 table = irq_lookup_table[devid];
3108 if (WARN_ONCE(!table, "%s: no table for devid %x\n", __func__, devid))
3109 return NULL;
3110
3111 return table;
3112}
3113
Sebastian Andrzej Siewior993ca6e2018-03-22 16:22:40 +01003114static struct irq_remap_table *__alloc_irq_table(void)
3115{
3116 struct irq_remap_table *table;
3117
3118 table = kzalloc(sizeof(*table), GFP_KERNEL);
3119 if (!table)
3120 return NULL;
3121
3122 table->table = kmem_cache_alloc(amd_iommu_irq_cache, GFP_KERNEL);
3123 if (!table->table) {
3124 kfree(table);
3125 return NULL;
3126 }
3127 raw_spin_lock_init(&table->lock);
3128
3129 if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
3130 memset(table->table, 0,
3131 MAX_IRQS_PER_TABLE * sizeof(u32));
3132 else
3133 memset(table->table, 0,
3134 (MAX_IRQS_PER_TABLE * (sizeof(u64) * 2)));
3135 return table;
3136}
3137
Sebastian Andrzej Siewior2fcc1e82018-03-22 16:22:39 +01003138static void set_remap_table_entry(struct amd_iommu *iommu, u16 devid,
3139 struct irq_remap_table *table)
3140{
3141 irq_lookup_table[devid] = table;
3142 set_dte_irq_entry(devid, table);
3143 iommu_flush_dte(iommu, devid);
3144}
3145
Logan Gunthorpe3c124432019-10-22 16:01:21 -06003146static int set_remap_table_entry_alias(struct pci_dev *pdev, u16 alias,
3147 void *data)
3148{
3149 struct irq_remap_table *table = data;
3150
3151 irq_lookup_table[alias] = table;
3152 set_dte_irq_entry(alias, table);
3153
3154 iommu_flush_dte(amd_iommu_rlookup_table[alias], alias);
3155
3156 return 0;
3157}
3158
3159static struct irq_remap_table *alloc_irq_table(u16 devid, struct pci_dev *pdev)
Joerg Roedel2b324502012-06-21 16:29:10 +02003160{
3161 struct irq_remap_table *table = NULL;
Sebastian Andrzej Siewior993ca6e2018-03-22 16:22:40 +01003162 struct irq_remap_table *new_table = NULL;
Joerg Roedel2b324502012-06-21 16:29:10 +02003163 struct amd_iommu *iommu;
3164 unsigned long flags;
3165 u16 alias;
3166
Sebastian Andrzej Siewiorea6166f2018-03-22 16:22:36 +01003167 spin_lock_irqsave(&iommu_table_lock, flags);
Joerg Roedel2b324502012-06-21 16:29:10 +02003168
3169 iommu = amd_iommu_rlookup_table[devid];
3170 if (!iommu)
3171 goto out_unlock;
3172
3173 table = irq_lookup_table[devid];
3174 if (table)
Baoquan He09284b92016-09-20 09:05:34 +08003175 goto out_unlock;
Joerg Roedel2b324502012-06-21 16:29:10 +02003176
3177 alias = amd_iommu_alias_table[devid];
3178 table = irq_lookup_table[alias];
3179 if (table) {
Sebastian Andrzej Siewior2fcc1e82018-03-22 16:22:39 +01003180 set_remap_table_entry(iommu, devid, table);
Sebastian Andrzej Siewior993ca6e2018-03-22 16:22:40 +01003181 goto out_wait;
Joerg Roedel2b324502012-06-21 16:29:10 +02003182 }
Sebastian Andrzej Siewior993ca6e2018-03-22 16:22:40 +01003183 spin_unlock_irqrestore(&iommu_table_lock, flags);
Joerg Roedel2b324502012-06-21 16:29:10 +02003184
3185 /* Nothing there yet, allocate new irq remapping table */
Sebastian Andrzej Siewior993ca6e2018-03-22 16:22:40 +01003186 new_table = __alloc_irq_table();
3187 if (!new_table)
3188 return NULL;
3189
3190 spin_lock_irqsave(&iommu_table_lock, flags);
3191
3192 table = irq_lookup_table[devid];
3193 if (table)
Baoquan He09284b92016-09-20 09:05:34 +08003194 goto out_unlock;
Joerg Roedel2b324502012-06-21 16:29:10 +02003195
Sebastian Andrzej Siewior993ca6e2018-03-22 16:22:40 +01003196 table = irq_lookup_table[alias];
3197 if (table) {
3198 set_remap_table_entry(iommu, devid, table);
3199 goto out_wait;
Joerg Roedel2b324502012-06-21 16:29:10 +02003200 }
3201
Sebastian Andrzej Siewior993ca6e2018-03-22 16:22:40 +01003202 table = new_table;
3203 new_table = NULL;
Joerg Roedel2b324502012-06-21 16:29:10 +02003204
Logan Gunthorpe3c124432019-10-22 16:01:21 -06003205 if (pdev)
3206 pci_for_each_dma_alias(pdev, set_remap_table_entry_alias,
3207 table);
3208 else
3209 set_remap_table_entry(iommu, devid, table);
3210
Sebastian Andrzej Siewior2fcc1e82018-03-22 16:22:39 +01003211 if (devid != alias)
3212 set_remap_table_entry(iommu, alias, table);
Joerg Roedel2b324502012-06-21 16:29:10 +02003213
Sebastian Andrzej Siewior993ca6e2018-03-22 16:22:40 +01003214out_wait:
Joerg Roedel2b324502012-06-21 16:29:10 +02003215 iommu_completion_wait(iommu);
3216
3217out_unlock:
Sebastian Andrzej Siewiorea6166f2018-03-22 16:22:36 +01003218 spin_unlock_irqrestore(&iommu_table_lock, flags);
Joerg Roedel2b324502012-06-21 16:29:10 +02003219
Sebastian Andrzej Siewior993ca6e2018-03-22 16:22:40 +01003220 if (new_table) {
3221 kmem_cache_free(amd_iommu_irq_cache, new_table->table);
3222 kfree(new_table);
3223 }
Joerg Roedel2b324502012-06-21 16:29:10 +02003224 return table;
3225}
3226
Logan Gunthorpe3c124432019-10-22 16:01:21 -06003227static int alloc_irq_index(u16 devid, int count, bool align,
3228 struct pci_dev *pdev)
Joerg Roedel2b324502012-06-21 16:29:10 +02003229{
3230 struct irq_remap_table *table;
Joerg Roedel37946d92017-10-06 12:16:39 +02003231 int index, c, alignment = 1;
Joerg Roedel2b324502012-06-21 16:29:10 +02003232 unsigned long flags;
Suravee Suthikulpanit77bdab42016-08-23 13:52:35 -05003233 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
3234
3235 if (!iommu)
3236 return -ENODEV;
Joerg Roedel2b324502012-06-21 16:29:10 +02003237
Logan Gunthorpe3c124432019-10-22 16:01:21 -06003238 table = alloc_irq_table(devid, pdev);
Joerg Roedel2b324502012-06-21 16:29:10 +02003239 if (!table)
3240 return -ENODEV;
3241
Joerg Roedel37946d92017-10-06 12:16:39 +02003242 if (align)
3243 alignment = roundup_pow_of_two(count);
3244
Scott Wood27790392018-01-21 03:28:54 -06003245 raw_spin_lock_irqsave(&table->lock, flags);
Joerg Roedel2b324502012-06-21 16:29:10 +02003246
3247 /* Scan table for free entries */
Joerg Roedel37946d92017-10-06 12:16:39 +02003248 for (index = ALIGN(table->min_index, alignment), c = 0;
Alex Williamson07d1c912017-11-03 10:50:31 -06003249 index < MAX_IRQS_PER_TABLE;) {
Joerg Roedel37946d92017-10-06 12:16:39 +02003250 if (!iommu->irte_ops->is_allocated(table, index)) {
Joerg Roedel2b324502012-06-21 16:29:10 +02003251 c += 1;
Joerg Roedel37946d92017-10-06 12:16:39 +02003252 } else {
3253 c = 0;
Alex Williamson07d1c912017-11-03 10:50:31 -06003254 index = ALIGN(index + 1, alignment);
Joerg Roedel37946d92017-10-06 12:16:39 +02003255 continue;
3256 }
Joerg Roedel2b324502012-06-21 16:29:10 +02003257
3258 if (c == count) {
Joerg Roedel2b324502012-06-21 16:29:10 +02003259 for (; c != 0; --c)
Suravee Suthikulpanit77bdab42016-08-23 13:52:35 -05003260 iommu->irte_ops->set_allocated(table, index - c + 1);
Joerg Roedel2b324502012-06-21 16:29:10 +02003261
3262 index -= count - 1;
Joerg Roedel2b324502012-06-21 16:29:10 +02003263 goto out;
3264 }
Alex Williamson07d1c912017-11-03 10:50:31 -06003265
3266 index++;
Joerg Roedel2b324502012-06-21 16:29:10 +02003267 }
3268
3269 index = -ENOSPC;
3270
3271out:
Scott Wood27790392018-01-21 03:28:54 -06003272 raw_spin_unlock_irqrestore(&table->lock, flags);
Joerg Roedel2b324502012-06-21 16:29:10 +02003273
3274 return index;
3275}
3276
Suravee Suthikulpanitb9fc6b52016-08-23 13:52:39 -05003277static int modify_irte_ga(u16 devid, int index, struct irte_ga *irte,
3278 struct amd_ir_data *data)
Suravee Suthikulpanit880ac602016-08-23 13:52:34 -05003279{
3280 struct irq_remap_table *table;
3281 struct amd_iommu *iommu;
3282 unsigned long flags;
3283 struct irte_ga *entry;
3284
3285 iommu = amd_iommu_rlookup_table[devid];
3286 if (iommu == NULL)
3287 return -EINVAL;
3288
Scott Wooddf42a042018-02-14 17:36:28 -06003289 table = get_irq_table(devid);
Suravee Suthikulpanit880ac602016-08-23 13:52:34 -05003290 if (!table)
3291 return -ENOMEM;
3292
Scott Wood27790392018-01-21 03:28:54 -06003293 raw_spin_lock_irqsave(&table->lock, flags);
Suravee Suthikulpanit880ac602016-08-23 13:52:34 -05003294
3295 entry = (struct irte_ga *)table->table;
3296 entry = &entry[index];
3297 entry->lo.fields_remap.valid = 0;
3298 entry->hi.val = irte->hi.val;
3299 entry->lo.val = irte->lo.val;
3300 entry->lo.fields_remap.valid = 1;
Suravee Suthikulpanitb9fc6b52016-08-23 13:52:39 -05003301 if (data)
3302 data->ref = entry;
Suravee Suthikulpanit880ac602016-08-23 13:52:34 -05003303
Scott Wood27790392018-01-21 03:28:54 -06003304 raw_spin_unlock_irqrestore(&table->lock, flags);
Suravee Suthikulpanit880ac602016-08-23 13:52:34 -05003305
3306 iommu_flush_irt(iommu, devid);
3307 iommu_completion_wait(iommu);
3308
3309 return 0;
3310}
3311
3312static int modify_irte(u16 devid, int index, union irte *irte)
Joerg Roedel2b324502012-06-21 16:29:10 +02003313{
3314 struct irq_remap_table *table;
3315 struct amd_iommu *iommu;
3316 unsigned long flags;
3317
3318 iommu = amd_iommu_rlookup_table[devid];
3319 if (iommu == NULL)
3320 return -EINVAL;
3321
Scott Wooddf42a042018-02-14 17:36:28 -06003322 table = get_irq_table(devid);
Joerg Roedel2b324502012-06-21 16:29:10 +02003323 if (!table)
3324 return -ENOMEM;
3325
Scott Wood27790392018-01-21 03:28:54 -06003326 raw_spin_lock_irqsave(&table->lock, flags);
Suravee Suthikulpanit880ac602016-08-23 13:52:34 -05003327 table->table[index] = irte->val;
Scott Wood27790392018-01-21 03:28:54 -06003328 raw_spin_unlock_irqrestore(&table->lock, flags);
Joerg Roedel2b324502012-06-21 16:29:10 +02003329
3330 iommu_flush_irt(iommu, devid);
3331 iommu_completion_wait(iommu);
3332
3333 return 0;
3334}
3335
3336static void free_irte(u16 devid, int index)
3337{
3338 struct irq_remap_table *table;
3339 struct amd_iommu *iommu;
3340 unsigned long flags;
3341
3342 iommu = amd_iommu_rlookup_table[devid];
3343 if (iommu == NULL)
3344 return;
3345
Scott Wooddf42a042018-02-14 17:36:28 -06003346 table = get_irq_table(devid);
Joerg Roedel2b324502012-06-21 16:29:10 +02003347 if (!table)
3348 return;
3349
Scott Wood27790392018-01-21 03:28:54 -06003350 raw_spin_lock_irqsave(&table->lock, flags);
Suravee Suthikulpanit77bdab42016-08-23 13:52:35 -05003351 iommu->irte_ops->clear_allocated(table, index);
Scott Wood27790392018-01-21 03:28:54 -06003352 raw_spin_unlock_irqrestore(&table->lock, flags);
Joerg Roedel2b324502012-06-21 16:29:10 +02003353
3354 iommu_flush_irt(iommu, devid);
3355 iommu_completion_wait(iommu);
3356}
3357
Suravee Suthikulpanit880ac602016-08-23 13:52:34 -05003358static void irte_prepare(void *entry,
3359 u32 delivery_mode, u32 dest_mode,
Suravee Suthikulpanitd98de492016-08-23 13:52:40 -05003360 u8 vector, u32 dest_apicid, int devid)
Suravee Suthikulpanit880ac602016-08-23 13:52:34 -05003361{
3362 union irte *irte = (union irte *) entry;
3363
3364 irte->val = 0;
3365 irte->fields.vector = vector;
3366 irte->fields.int_type = delivery_mode;
3367 irte->fields.destination = dest_apicid;
3368 irte->fields.dm = dest_mode;
3369 irte->fields.valid = 1;
3370}
3371
3372static void irte_ga_prepare(void *entry,
3373 u32 delivery_mode, u32 dest_mode,
Suravee Suthikulpanitd98de492016-08-23 13:52:40 -05003374 u8 vector, u32 dest_apicid, int devid)
Suravee Suthikulpanit880ac602016-08-23 13:52:34 -05003375{
3376 struct irte_ga *irte = (struct irte_ga *) entry;
3377
3378 irte->lo.val = 0;
3379 irte->hi.val = 0;
Suravee Suthikulpanit880ac602016-08-23 13:52:34 -05003380 irte->lo.fields_remap.int_type = delivery_mode;
3381 irte->lo.fields_remap.dm = dest_mode;
3382 irte->hi.fields.vector = vector;
Suravee Suthikulpanit90fcffd2018-06-27 10:31:22 -05003383 irte->lo.fields_remap.destination = APICID_TO_IRTE_DEST_LO(dest_apicid);
3384 irte->hi.fields.destination = APICID_TO_IRTE_DEST_HI(dest_apicid);
Suravee Suthikulpanit880ac602016-08-23 13:52:34 -05003385 irte->lo.fields_remap.valid = 1;
3386}
3387
3388static void irte_activate(void *entry, u16 devid, u16 index)
3389{
3390 union irte *irte = (union irte *) entry;
3391
3392 irte->fields.valid = 1;
3393 modify_irte(devid, index, irte);
3394}
3395
3396static void irte_ga_activate(void *entry, u16 devid, u16 index)
3397{
3398 struct irte_ga *irte = (struct irte_ga *) entry;
3399
3400 irte->lo.fields_remap.valid = 1;
Suravee Suthikulpanitb9fc6b52016-08-23 13:52:39 -05003401 modify_irte_ga(devid, index, irte, NULL);
Suravee Suthikulpanit880ac602016-08-23 13:52:34 -05003402}
3403
3404static void irte_deactivate(void *entry, u16 devid, u16 index)
3405{
3406 union irte *irte = (union irte *) entry;
3407
3408 irte->fields.valid = 0;
3409 modify_irte(devid, index, irte);
3410}
3411
3412static void irte_ga_deactivate(void *entry, u16 devid, u16 index)
3413{
3414 struct irte_ga *irte = (struct irte_ga *) entry;
3415
3416 irte->lo.fields_remap.valid = 0;
Suravee Suthikulpanitb9fc6b52016-08-23 13:52:39 -05003417 modify_irte_ga(devid, index, irte, NULL);
Suravee Suthikulpanit880ac602016-08-23 13:52:34 -05003418}
3419
3420static void irte_set_affinity(void *entry, u16 devid, u16 index,
3421 u8 vector, u32 dest_apicid)
3422{
3423 union irte *irte = (union irte *) entry;
3424
3425 irte->fields.vector = vector;
3426 irte->fields.destination = dest_apicid;
3427 modify_irte(devid, index, irte);
3428}
3429
3430static void irte_ga_set_affinity(void *entry, u16 devid, u16 index,
3431 u8 vector, u32 dest_apicid)
3432{
3433 struct irte_ga *irte = (struct irte_ga *) entry;
3434
Scott Wood01ee04b2018-01-28 14:22:19 -06003435 if (!irte->lo.fields_remap.guest_mode) {
Suravee Suthikulpanitd98de492016-08-23 13:52:40 -05003436 irte->hi.fields.vector = vector;
Suravee Suthikulpanit90fcffd2018-06-27 10:31:22 -05003437 irte->lo.fields_remap.destination =
3438 APICID_TO_IRTE_DEST_LO(dest_apicid);
3439 irte->hi.fields.destination =
3440 APICID_TO_IRTE_DEST_HI(dest_apicid);
Suravee Suthikulpanitd98de492016-08-23 13:52:40 -05003441 modify_irte_ga(devid, index, irte, NULL);
3442 }
Suravee Suthikulpanit880ac602016-08-23 13:52:34 -05003443}
3444
Suravee Suthikulpanit77bdab42016-08-23 13:52:35 -05003445#define IRTE_ALLOCATED (~1U)
Suravee Suthikulpanit880ac602016-08-23 13:52:34 -05003446static void irte_set_allocated(struct irq_remap_table *table, int index)
3447{
3448 table->table[index] = IRTE_ALLOCATED;
3449}
3450
3451static void irte_ga_set_allocated(struct irq_remap_table *table, int index)
3452{
3453 struct irte_ga *ptr = (struct irte_ga *)table->table;
3454 struct irte_ga *irte = &ptr[index];
3455
3456 memset(&irte->lo.val, 0, sizeof(u64));
3457 memset(&irte->hi.val, 0, sizeof(u64));
3458 irte->hi.fields.vector = 0xff;
3459}
3460
3461static bool irte_is_allocated(struct irq_remap_table *table, int index)
3462{
3463 union irte *ptr = (union irte *)table->table;
3464 union irte *irte = &ptr[index];
3465
3466 return irte->val != 0;
3467}
3468
3469static bool irte_ga_is_allocated(struct irq_remap_table *table, int index)
3470{
3471 struct irte_ga *ptr = (struct irte_ga *)table->table;
3472 struct irte_ga *irte = &ptr[index];
3473
3474 return irte->hi.fields.vector != 0;
3475}
3476
3477static void irte_clear_allocated(struct irq_remap_table *table, int index)
3478{
3479 table->table[index] = 0;
3480}
3481
3482static void irte_ga_clear_allocated(struct irq_remap_table *table, int index)
3483{
3484 struct irte_ga *ptr = (struct irte_ga *)table->table;
3485 struct irte_ga *irte = &ptr[index];
3486
3487 memset(&irte->lo.val, 0, sizeof(u64));
3488 memset(&irte->hi.val, 0, sizeof(u64));
3489}
3490
Jiang Liu7c71d302015-04-13 14:11:33 +08003491static int get_devid(struct irq_alloc_info *info)
Joerg Roedel5527de72012-06-26 11:17:32 +02003492{
Jiang Liu7c71d302015-04-13 14:11:33 +08003493 int devid = -1;
Joerg Roedel5527de72012-06-26 11:17:32 +02003494
Jiang Liu7c71d302015-04-13 14:11:33 +08003495 switch (info->type) {
3496 case X86_IRQ_ALLOC_TYPE_IOAPIC:
3497 devid = get_ioapic_devid(info->ioapic_id);
3498 break;
3499 case X86_IRQ_ALLOC_TYPE_HPET:
3500 devid = get_hpet_devid(info->hpet_id);
3501 break;
3502 case X86_IRQ_ALLOC_TYPE_MSI:
3503 case X86_IRQ_ALLOC_TYPE_MSIX:
3504 devid = get_device_id(&info->msi_dev->dev);
3505 break;
3506 default:
3507 BUG_ON(1);
3508 break;
Joerg Roedel5527de72012-06-26 11:17:32 +02003509 }
3510
Jiang Liu7c71d302015-04-13 14:11:33 +08003511 return devid;
Joerg Roedel5527de72012-06-26 11:17:32 +02003512}
3513
Jiang Liu7c71d302015-04-13 14:11:33 +08003514static struct irq_domain *get_ir_irq_domain(struct irq_alloc_info *info)
Joerg Roedel5527de72012-06-26 11:17:32 +02003515{
Jiang Liu7c71d302015-04-13 14:11:33 +08003516 struct amd_iommu *iommu;
3517 int devid;
Joerg Roedel5527de72012-06-26 11:17:32 +02003518
Jiang Liu7c71d302015-04-13 14:11:33 +08003519 if (!info)
3520 return NULL;
Joerg Roedel5527de72012-06-26 11:17:32 +02003521
Jiang Liu7c71d302015-04-13 14:11:33 +08003522 devid = get_devid(info);
3523 if (devid >= 0) {
3524 iommu = amd_iommu_rlookup_table[devid];
3525 if (iommu)
3526 return iommu->ir_domain;
3527 }
Joerg Roedel5527de72012-06-26 11:17:32 +02003528
Jiang Liu7c71d302015-04-13 14:11:33 +08003529 return NULL;
Joerg Roedel5527de72012-06-26 11:17:32 +02003530}
3531
Jiang Liu7c71d302015-04-13 14:11:33 +08003532static struct irq_domain *get_irq_domain(struct irq_alloc_info *info)
Joerg Roedel0b4d48c2012-06-26 14:54:17 +02003533{
Jiang Liu7c71d302015-04-13 14:11:33 +08003534 struct amd_iommu *iommu;
3535 int devid;
Joerg Roedel0b4d48c2012-06-26 14:54:17 +02003536
Jiang Liu7c71d302015-04-13 14:11:33 +08003537 if (!info)
3538 return NULL;
Joerg Roedel0b4d48c2012-06-26 14:54:17 +02003539
Jiang Liu7c71d302015-04-13 14:11:33 +08003540 switch (info->type) {
3541 case X86_IRQ_ALLOC_TYPE_MSI:
3542 case X86_IRQ_ALLOC_TYPE_MSIX:
3543 devid = get_device_id(&info->msi_dev->dev);
Joerg Roedel9ee35e42016-04-21 18:21:31 +02003544 if (devid < 0)
Wan Zongshun7aba6cb2016-04-01 09:06:02 -04003545 return NULL;
3546
Dan Carpenter1fb260b2016-01-07 12:36:06 +03003547 iommu = amd_iommu_rlookup_table[devid];
3548 if (iommu)
3549 return iommu->msi_domain;
Jiang Liu7c71d302015-04-13 14:11:33 +08003550 break;
3551 default:
3552 break;
3553 }
Joerg Roedel0b4d48c2012-06-26 14:54:17 +02003554
Jiang Liu7c71d302015-04-13 14:11:33 +08003555 return NULL;
Joerg Roedeld9761952012-06-26 16:00:08 +02003556}
3557
Joerg Roedel6b474b82012-06-26 16:46:04 +02003558struct irq_remap_ops amd_iommu_irq_ops = {
Joerg Roedel6b474b82012-06-26 16:46:04 +02003559 .prepare = amd_iommu_prepare,
3560 .enable = amd_iommu_enable,
3561 .disable = amd_iommu_disable,
3562 .reenable = amd_iommu_reenable,
3563 .enable_faulting = amd_iommu_enable_faulting,
Jiang Liu7c71d302015-04-13 14:11:33 +08003564 .get_ir_irq_domain = get_ir_irq_domain,
3565 .get_irq_domain = get_irq_domain,
Joerg Roedel6b474b82012-06-26 16:46:04 +02003566};
Jiang Liu7c71d302015-04-13 14:11:33 +08003567
3568static void irq_remapping_prepare_irte(struct amd_ir_data *data,
3569 struct irq_cfg *irq_cfg,
3570 struct irq_alloc_info *info,
3571 int devid, int index, int sub_handle)
3572{
3573 struct irq_2_irte *irte_info = &data->irq_2_irte;
3574 struct msi_msg *msg = &data->msi_entry;
Jiang Liu7c71d302015-04-13 14:11:33 +08003575 struct IO_APIC_route_entry *entry;
Suravee Suthikulpanit77bdab42016-08-23 13:52:35 -05003576 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
3577
3578 if (!iommu)
3579 return;
Jiang Liu7c71d302015-04-13 14:11:33 +08003580
Jiang Liu7c71d302015-04-13 14:11:33 +08003581 data->irq_2_irte.devid = devid;
3582 data->irq_2_irte.index = index + sub_handle;
Suravee Suthikulpanit77bdab42016-08-23 13:52:35 -05003583 iommu->irte_ops->prepare(data->entry, apic->irq_delivery_mode,
3584 apic->irq_dest_mode, irq_cfg->vector,
Suravee Suthikulpanitd98de492016-08-23 13:52:40 -05003585 irq_cfg->dest_apicid, devid);
Jiang Liu7c71d302015-04-13 14:11:33 +08003586
3587 switch (info->type) {
3588 case X86_IRQ_ALLOC_TYPE_IOAPIC:
3589 /* Setup IOAPIC entry */
3590 entry = info->ioapic_entry;
3591 info->ioapic_entry = NULL;
3592 memset(entry, 0, sizeof(*entry));
3593 entry->vector = index;
3594 entry->mask = 0;
3595 entry->trigger = info->ioapic_trigger;
3596 entry->polarity = info->ioapic_polarity;
3597 /* Mask level triggered irqs. */
3598 if (info->ioapic_trigger)
3599 entry->mask = 1;
3600 break;
3601
3602 case X86_IRQ_ALLOC_TYPE_HPET:
3603 case X86_IRQ_ALLOC_TYPE_MSI:
3604 case X86_IRQ_ALLOC_TYPE_MSIX:
3605 msg->address_hi = MSI_ADDR_BASE_HI;
3606 msg->address_lo = MSI_ADDR_BASE_LO;
3607 msg->data = irte_info->index;
3608 break;
3609
3610 default:
3611 BUG_ON(1);
3612 break;
3613 }
3614}
3615
Suravee Suthikulpanit880ac602016-08-23 13:52:34 -05003616struct amd_irte_ops irte_32_ops = {
3617 .prepare = irte_prepare,
3618 .activate = irte_activate,
3619 .deactivate = irte_deactivate,
3620 .set_affinity = irte_set_affinity,
3621 .set_allocated = irte_set_allocated,
3622 .is_allocated = irte_is_allocated,
3623 .clear_allocated = irte_clear_allocated,
3624};
3625
3626struct amd_irte_ops irte_128_ops = {
3627 .prepare = irte_ga_prepare,
3628 .activate = irte_ga_activate,
3629 .deactivate = irte_ga_deactivate,
3630 .set_affinity = irte_ga_set_affinity,
3631 .set_allocated = irte_ga_set_allocated,
3632 .is_allocated = irte_ga_is_allocated,
3633 .clear_allocated = irte_ga_clear_allocated,
3634};
3635
Jiang Liu7c71d302015-04-13 14:11:33 +08003636static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
3637 unsigned int nr_irqs, void *arg)
3638{
3639 struct irq_alloc_info *info = arg;
3640 struct irq_data *irq_data;
Suravee Suthikulpanit77bdab42016-08-23 13:52:35 -05003641 struct amd_ir_data *data = NULL;
Jiang Liu7c71d302015-04-13 14:11:33 +08003642 struct irq_cfg *cfg;
3643 int i, ret, devid;
Sebastian Andrzej Siewior29d049b2018-03-22 16:22:42 +01003644 int index;
Jiang Liu7c71d302015-04-13 14:11:33 +08003645
3646 if (!info)
3647 return -EINVAL;
3648 if (nr_irqs > 1 && info->type != X86_IRQ_ALLOC_TYPE_MSI &&
3649 info->type != X86_IRQ_ALLOC_TYPE_MSIX)
3650 return -EINVAL;
3651
3652 /*
3653 * With IRQ remapping enabled, don't need contiguous CPU vectors
3654 * to support multiple MSI interrupts.
3655 */
3656 if (info->type == X86_IRQ_ALLOC_TYPE_MSI)
3657 info->flags &= ~X86_IRQ_ALLOC_CONTIGUOUS_VECTORS;
3658
3659 devid = get_devid(info);
3660 if (devid < 0)
3661 return -EINVAL;
3662
3663 ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
3664 if (ret < 0)
3665 return ret;
3666
Jiang Liu7c71d302015-04-13 14:11:33 +08003667 if (info->type == X86_IRQ_ALLOC_TYPE_IOAPIC) {
Sebastian Andrzej Siewiorfde65dd2018-03-22 16:22:37 +01003668 struct irq_remap_table *table;
3669 struct amd_iommu *iommu;
3670
Logan Gunthorpe3c124432019-10-22 16:01:21 -06003671 table = alloc_irq_table(devid, NULL);
Sebastian Andrzej Siewiorfde65dd2018-03-22 16:22:37 +01003672 if (table) {
3673 if (!table->min_index) {
3674 /*
3675 * Keep the first 32 indexes free for IOAPIC
3676 * interrupts.
3677 */
3678 table->min_index = 32;
3679 iommu = amd_iommu_rlookup_table[devid];
3680 for (i = 0; i < 32; ++i)
3681 iommu->irte_ops->set_allocated(table, i);
3682 }
3683 WARN_ON(table->min_index != 32);
Jiang Liu7c71d302015-04-13 14:11:33 +08003684 index = info->ioapic_pin;
Sebastian Andrzej Siewiorfde65dd2018-03-22 16:22:37 +01003685 } else {
Sebastian Andrzej Siewior29d049b2018-03-22 16:22:42 +01003686 index = -ENOMEM;
Sebastian Andrzej Siewiorfde65dd2018-03-22 16:22:37 +01003687 }
Logan Gunthorpe3c124432019-10-22 16:01:21 -06003688 } else if (info->type == X86_IRQ_ALLOC_TYPE_MSI ||
3689 info->type == X86_IRQ_ALLOC_TYPE_MSIX) {
Joerg Roedel53b9ec32017-10-06 12:22:06 +02003690 bool align = (info->type == X86_IRQ_ALLOC_TYPE_MSI);
3691
Logan Gunthorpe3c124432019-10-22 16:01:21 -06003692 index = alloc_irq_index(devid, nr_irqs, align, info->msi_dev);
3693 } else {
3694 index = alloc_irq_index(devid, nr_irqs, false, NULL);
Jiang Liu7c71d302015-04-13 14:11:33 +08003695 }
Logan Gunthorpe3c124432019-10-22 16:01:21 -06003696
Jiang Liu7c71d302015-04-13 14:11:33 +08003697 if (index < 0) {
3698 pr_warn("Failed to allocate IRTE\n");
Wei Yongjun517abe42016-07-28 02:10:26 +00003699 ret = index;
Jiang Liu7c71d302015-04-13 14:11:33 +08003700 goto out_free_parent;
3701 }
3702
3703 for (i = 0; i < nr_irqs; i++) {
3704 irq_data = irq_domain_get_irq_data(domain, virq + i);
3705 cfg = irqd_cfg(irq_data);
3706 if (!irq_data || !cfg) {
3707 ret = -EINVAL;
3708 goto out_free_data;
3709 }
3710
Joerg Roedela130e692015-08-13 11:07:25 +02003711 ret = -ENOMEM;
3712 data = kzalloc(sizeof(*data), GFP_KERNEL);
3713 if (!data)
3714 goto out_free_data;
3715
Suravee Suthikulpanit77bdab42016-08-23 13:52:35 -05003716 if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
3717 data->entry = kzalloc(sizeof(union irte), GFP_KERNEL);
3718 else
3719 data->entry = kzalloc(sizeof(struct irte_ga),
3720 GFP_KERNEL);
3721 if (!data->entry) {
3722 kfree(data);
3723 goto out_free_data;
3724 }
3725
Jiang Liu7c71d302015-04-13 14:11:33 +08003726 irq_data->hwirq = (devid << 16) + i;
3727 irq_data->chip_data = data;
3728 irq_data->chip = &amd_ir_chip;
3729 irq_remapping_prepare_irte(data, cfg, info, devid, index, i);
3730 irq_set_status_flags(virq + i, IRQ_MOVE_PCNTXT);
3731 }
Joerg Roedela130e692015-08-13 11:07:25 +02003732
Jiang Liu7c71d302015-04-13 14:11:33 +08003733 return 0;
3734
3735out_free_data:
3736 for (i--; i >= 0; i--) {
3737 irq_data = irq_domain_get_irq_data(domain, virq + i);
3738 if (irq_data)
3739 kfree(irq_data->chip_data);
3740 }
3741 for (i = 0; i < nr_irqs; i++)
3742 free_irte(devid, index + i);
3743out_free_parent:
3744 irq_domain_free_irqs_common(domain, virq, nr_irqs);
3745 return ret;
3746}
3747
3748static void irq_remapping_free(struct irq_domain *domain, unsigned int virq,
3749 unsigned int nr_irqs)
3750{
3751 struct irq_2_irte *irte_info;
3752 struct irq_data *irq_data;
3753 struct amd_ir_data *data;
3754 int i;
3755
3756 for (i = 0; i < nr_irqs; i++) {
3757 irq_data = irq_domain_get_irq_data(domain, virq + i);
3758 if (irq_data && irq_data->chip_data) {
3759 data = irq_data->chip_data;
3760 irte_info = &data->irq_2_irte;
3761 free_irte(irte_info->devid, irte_info->index);
Suravee Suthikulpanit77bdab42016-08-23 13:52:35 -05003762 kfree(data->entry);
Jiang Liu7c71d302015-04-13 14:11:33 +08003763 kfree(data);
3764 }
3765 }
3766 irq_domain_free_irqs_common(domain, virq, nr_irqs);
3767}
3768
Thomas Gleixner5ba204a2017-09-13 23:29:48 +02003769static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu,
3770 struct amd_ir_data *ir_data,
3771 struct irq_2_irte *irte_info,
3772 struct irq_cfg *cfg);
3773
Thomas Gleixner72491642017-09-13 23:29:10 +02003774static int irq_remapping_activate(struct irq_domain *domain,
Thomas Gleixner702cb0a2017-12-29 16:59:06 +01003775 struct irq_data *irq_data, bool reserve)
Jiang Liu7c71d302015-04-13 14:11:33 +08003776{
3777 struct amd_ir_data *data = irq_data->chip_data;
3778 struct irq_2_irte *irte_info = &data->irq_2_irte;
Suravee Suthikulpanit77bdab42016-08-23 13:52:35 -05003779 struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid];
Thomas Gleixner5ba204a2017-09-13 23:29:48 +02003780 struct irq_cfg *cfg = irqd_cfg(irq_data);
Jiang Liu7c71d302015-04-13 14:11:33 +08003781
Thomas Gleixner5ba204a2017-09-13 23:29:48 +02003782 if (!iommu)
3783 return 0;
3784
3785 iommu->irte_ops->activate(data->entry, irte_info->devid,
3786 irte_info->index);
3787 amd_ir_update_irte(irq_data, iommu, data, irte_info, cfg);
Thomas Gleixner72491642017-09-13 23:29:10 +02003788 return 0;
Jiang Liu7c71d302015-04-13 14:11:33 +08003789}
3790
3791static void irq_remapping_deactivate(struct irq_domain *domain,
3792 struct irq_data *irq_data)
3793{
3794 struct amd_ir_data *data = irq_data->chip_data;
3795 struct irq_2_irte *irte_info = &data->irq_2_irte;
Suravee Suthikulpanit77bdab42016-08-23 13:52:35 -05003796 struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid];
Jiang Liu7c71d302015-04-13 14:11:33 +08003797
Suravee Suthikulpanit77bdab42016-08-23 13:52:35 -05003798 if (iommu)
3799 iommu->irte_ops->deactivate(data->entry, irte_info->devid,
3800 irte_info->index);
Jiang Liu7c71d302015-04-13 14:11:33 +08003801}
3802
Tobias Klausere2f9d452017-05-24 16:31:16 +02003803static const struct irq_domain_ops amd_ir_domain_ops = {
Jiang Liu7c71d302015-04-13 14:11:33 +08003804 .alloc = irq_remapping_alloc,
3805 .free = irq_remapping_free,
3806 .activate = irq_remapping_activate,
3807 .deactivate = irq_remapping_deactivate,
3808};
3809
Suthikulpanit, Suraveeb9c6ff92019-07-23 19:00:37 +00003810int amd_iommu_activate_guest_mode(void *data)
3811{
3812 struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
3813 struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
3814
3815 if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) ||
3816 !entry || entry->lo.fields_vapic.guest_mode)
3817 return 0;
3818
3819 entry->lo.val = 0;
3820 entry->hi.val = 0;
3821
3822 entry->lo.fields_vapic.guest_mode = 1;
3823 entry->lo.fields_vapic.ga_log_intr = 1;
3824 entry->hi.fields.ga_root_ptr = ir_data->ga_root_ptr;
3825 entry->hi.fields.vector = ir_data->ga_vector;
3826 entry->lo.fields_vapic.ga_tag = ir_data->ga_tag;
3827
3828 return modify_irte_ga(ir_data->irq_2_irte.devid,
3829 ir_data->irq_2_irte.index, entry, NULL);
3830}
3831EXPORT_SYMBOL(amd_iommu_activate_guest_mode);
3832
3833int amd_iommu_deactivate_guest_mode(void *data)
3834{
3835 struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
3836 struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
3837 struct irq_cfg *cfg = ir_data->cfg;
3838
3839 if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) ||
3840 !entry || !entry->lo.fields_vapic.guest_mode)
3841 return 0;
3842
3843 entry->lo.val = 0;
3844 entry->hi.val = 0;
3845
3846 entry->lo.fields_remap.dm = apic->irq_dest_mode;
3847 entry->lo.fields_remap.int_type = apic->irq_delivery_mode;
3848 entry->hi.fields.vector = cfg->vector;
3849 entry->lo.fields_remap.destination =
3850 APICID_TO_IRTE_DEST_LO(cfg->dest_apicid);
3851 entry->hi.fields.destination =
3852 APICID_TO_IRTE_DEST_HI(cfg->dest_apicid);
3853
3854 return modify_irte_ga(ir_data->irq_2_irte.devid,
3855 ir_data->irq_2_irte.index, entry, NULL);
3856}
3857EXPORT_SYMBOL(amd_iommu_deactivate_guest_mode);
3858
Suravee Suthikulpanitb9fc6b52016-08-23 13:52:39 -05003859static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info)
3860{
Suthikulpanit, Suraveeb9c6ff92019-07-23 19:00:37 +00003861 int ret;
Suravee Suthikulpanitb9fc6b52016-08-23 13:52:39 -05003862 struct amd_iommu *iommu;
3863 struct amd_iommu_pi_data *pi_data = vcpu_info;
3864 struct vcpu_data *vcpu_pi_info = pi_data->vcpu_data;
3865 struct amd_ir_data *ir_data = data->chip_data;
Suravee Suthikulpanitb9fc6b52016-08-23 13:52:39 -05003866 struct irq_2_irte *irte_info = &ir_data->irq_2_irte;
Suravee Suthikulpanitd98de492016-08-23 13:52:40 -05003867 struct iommu_dev_data *dev_data = search_dev_data(irte_info->devid);
3868
3869 /* Note:
3870 * This device has never been set up for guest mode.
3871 * we should not modify the IRTE
3872 */
3873 if (!dev_data || !dev_data->use_vapic)
3874 return 0;
Suravee Suthikulpanitb9fc6b52016-08-23 13:52:39 -05003875
Suthikulpanit, Suraveeb9c6ff92019-07-23 19:00:37 +00003876 ir_data->cfg = irqd_cfg(data);
Suravee Suthikulpanitb9fc6b52016-08-23 13:52:39 -05003877 pi_data->ir_data = ir_data;
3878
3879 /* Note:
3880 * SVM tries to set up for VAPIC mode, but we are in
3881 * legacy mode. So, we force legacy mode instead.
3882 */
3883 if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) {
Joerg Roedel101fa032018-11-27 16:22:31 +01003884 pr_debug("%s: Fall back to using intr legacy remap\n",
Suravee Suthikulpanitb9fc6b52016-08-23 13:52:39 -05003885 __func__);
3886 pi_data->is_guest_mode = false;
3887 }
3888
3889 iommu = amd_iommu_rlookup_table[irte_info->devid];
3890 if (iommu == NULL)
3891 return -EINVAL;
3892
3893 pi_data->prev_ga_tag = ir_data->cached_ga_tag;
3894 if (pi_data->is_guest_mode) {
Suthikulpanit, Suraveeb9c6ff92019-07-23 19:00:37 +00003895 ir_data->ga_root_ptr = (pi_data->base >> 12);
3896 ir_data->ga_vector = vcpu_pi_info->vector;
3897 ir_data->ga_tag = pi_data->ga_tag;
3898 ret = amd_iommu_activate_guest_mode(ir_data);
3899 if (!ret)
3900 ir_data->cached_ga_tag = pi_data->ga_tag;
Suravee Suthikulpanitb9fc6b52016-08-23 13:52:39 -05003901 } else {
Suthikulpanit, Suraveeb9c6ff92019-07-23 19:00:37 +00003902 ret = amd_iommu_deactivate_guest_mode(ir_data);
Suravee Suthikulpanitb9fc6b52016-08-23 13:52:39 -05003903
3904 /*
3905 * This communicates the ga_tag back to the caller
3906 * so that it can do all the necessary clean up.
3907 */
Suthikulpanit, Suraveeb9c6ff92019-07-23 19:00:37 +00003908 if (!ret)
3909 ir_data->cached_ga_tag = 0;
Suravee Suthikulpanitb9fc6b52016-08-23 13:52:39 -05003910 }
3911
Suthikulpanit, Suraveeb9c6ff92019-07-23 19:00:37 +00003912 return ret;
Suravee Suthikulpanitb9fc6b52016-08-23 13:52:39 -05003913}
3914
Thomas Gleixner5ba204a2017-09-13 23:29:48 +02003915
3916static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu,
3917 struct amd_ir_data *ir_data,
3918 struct irq_2_irte *irte_info,
3919 struct irq_cfg *cfg)
3920{
3921
3922 /*
3923 * Atomically updates the IRTE with the new destination, vector
3924 * and flushes the interrupt entry cache.
3925 */
3926 iommu->irte_ops->set_affinity(ir_data->entry, irte_info->devid,
3927 irte_info->index, cfg->vector,
3928 cfg->dest_apicid);
3929}
3930
Jiang Liu7c71d302015-04-13 14:11:33 +08003931static int amd_ir_set_affinity(struct irq_data *data,
3932 const struct cpumask *mask, bool force)
3933{
3934 struct amd_ir_data *ir_data = data->chip_data;
3935 struct irq_2_irte *irte_info = &ir_data->irq_2_irte;
3936 struct irq_cfg *cfg = irqd_cfg(data);
3937 struct irq_data *parent = data->parent_data;
Suravee Suthikulpanit77bdab42016-08-23 13:52:35 -05003938 struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid];
Jiang Liu7c71d302015-04-13 14:11:33 +08003939 int ret;
3940
Suravee Suthikulpanit77bdab42016-08-23 13:52:35 -05003941 if (!iommu)
3942 return -ENODEV;
3943
Jiang Liu7c71d302015-04-13 14:11:33 +08003944 ret = parent->chip->irq_set_affinity(parent, mask, force);
3945 if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE)
3946 return ret;
3947
Thomas Gleixner5ba204a2017-09-13 23:29:48 +02003948 amd_ir_update_irte(data, iommu, ir_data, irte_info, cfg);
Jiang Liu7c71d302015-04-13 14:11:33 +08003949 /*
3950 * After this point, all the interrupts will start arriving
3951 * at the new destination. So, time to cleanup the previous
3952 * vector allocation.
3953 */
Jiang Liuc6c20022015-04-14 10:30:02 +08003954 send_cleanup_vector(cfg);
Jiang Liu7c71d302015-04-13 14:11:33 +08003955
3956 return IRQ_SET_MASK_OK_DONE;
3957}
3958
3959static void ir_compose_msi_msg(struct irq_data *irq_data, struct msi_msg *msg)
3960{
3961 struct amd_ir_data *ir_data = irq_data->chip_data;
3962
3963 *msg = ir_data->msi_entry;
3964}
3965
3966static struct irq_chip amd_ir_chip = {
Thomas Gleixner290be192017-06-20 01:37:02 +02003967 .name = "AMD-IR",
Thomas Gleixner8a2b7d12018-06-04 17:33:56 +02003968 .irq_ack = apic_ack_irq,
Thomas Gleixner290be192017-06-20 01:37:02 +02003969 .irq_set_affinity = amd_ir_set_affinity,
3970 .irq_set_vcpu_affinity = amd_ir_set_vcpu_affinity,
3971 .irq_compose_msi_msg = ir_compose_msi_msg,
Jiang Liu7c71d302015-04-13 14:11:33 +08003972};
3973
3974int amd_iommu_create_irq_domain(struct amd_iommu *iommu)
3975{
Thomas Gleixner3e49a812017-06-20 01:37:12 +02003976 struct fwnode_handle *fn;
3977
3978 fn = irq_domain_alloc_named_id_fwnode("AMD-IR", iommu->index);
3979 if (!fn)
3980 return -ENOMEM;
3981 iommu->ir_domain = irq_domain_create_tree(fn, &amd_ir_domain_ops, iommu);
3982 irq_domain_free_fwnode(fn);
Jiang Liu7c71d302015-04-13 14:11:33 +08003983 if (!iommu->ir_domain)
3984 return -ENOMEM;
3985
3986 iommu->ir_domain->parent = arch_get_ir_parent_domain();
Thomas Gleixner3e49a812017-06-20 01:37:12 +02003987 iommu->msi_domain = arch_create_remap_msi_irq_domain(iommu->ir_domain,
3988 "AMD-IR-MSI",
3989 iommu->index);
Jiang Liu7c71d302015-04-13 14:11:33 +08003990 return 0;
3991}
Suravee Suthikulpanit8dbea3f2016-08-23 13:52:38 -05003992
3993int amd_iommu_update_ga(int cpu, bool is_run, void *data)
3994{
3995 unsigned long flags;
3996 struct amd_iommu *iommu;
Sebastian Andrzej Siewior4fde5412018-03-22 16:22:38 +01003997 struct irq_remap_table *table;
Suravee Suthikulpanit8dbea3f2016-08-23 13:52:38 -05003998 struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
3999 int devid = ir_data->irq_2_irte.devid;
4000 struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
4001 struct irte_ga *ref = (struct irte_ga *) ir_data->ref;
4002
4003 if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) ||
4004 !ref || !entry || !entry->lo.fields_vapic.guest_mode)
4005 return 0;
4006
4007 iommu = amd_iommu_rlookup_table[devid];
4008 if (!iommu)
4009 return -ENODEV;
4010
Sebastian Andrzej Siewior4fde5412018-03-22 16:22:38 +01004011 table = get_irq_table(devid);
4012 if (!table)
Suravee Suthikulpanit8dbea3f2016-08-23 13:52:38 -05004013 return -ENODEV;
4014
Sebastian Andrzej Siewior4fde5412018-03-22 16:22:38 +01004015 raw_spin_lock_irqsave(&table->lock, flags);
Suravee Suthikulpanit8dbea3f2016-08-23 13:52:38 -05004016
4017 if (ref->lo.fields_vapic.guest_mode) {
Suravee Suthikulpanit90fcffd2018-06-27 10:31:22 -05004018 if (cpu >= 0) {
4019 ref->lo.fields_vapic.destination =
4020 APICID_TO_IRTE_DEST_LO(cpu);
4021 ref->hi.fields.destination =
4022 APICID_TO_IRTE_DEST_HI(cpu);
4023 }
Suravee Suthikulpanit8dbea3f2016-08-23 13:52:38 -05004024 ref->lo.fields_vapic.is_run = is_run;
4025 barrier();
4026 }
4027
Sebastian Andrzej Siewior4fde5412018-03-22 16:22:38 +01004028 raw_spin_unlock_irqrestore(&table->lock, flags);
Suravee Suthikulpanit8dbea3f2016-08-23 13:52:38 -05004029
4030 iommu_flush_irt(iommu, devid);
4031 iommu_completion_wait(iommu);
4032 return 0;
4033}
4034EXPORT_SYMBOL(amd_iommu_update_ga);
Joerg Roedel2b324502012-06-21 16:29:10 +02004035#endif