blob: 9ad5a7019abfdd43a421fb7fa0761de5fd9dea46 [file] [log] [blame]
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -07001/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
mark gross98bcef52008-02-23 15:23:35 -080017 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070021 *
Suresh Siddhae61d98d2008-07-10 11:16:35 -070022 * This file implements early detection/parsing of Remapping Devices
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070023 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24 * tables.
Suresh Siddhae61d98d2008-07-10 11:16:35 -070025 *
26 * These routines are used by both DMA-remapping and Interrupt-remapping
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070027 */
28
Joerg Roedel9f10e5b2015-06-12 09:57:06 +020029#define pr_fmt(fmt) "DMAR: " fmt
Donald Dutilee9071b02012-06-08 17:13:11 -040030
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070031#include <linux/pci.h>
32#include <linux/dmar.h>
Kay, Allen M38717942008-09-09 18:37:29 +030033#include <linux/iova.h>
34#include <linux/intel-iommu.h>
Suresh Siddhafe962e92008-07-10 11:16:42 -070035#include <linux/timer.h>
Suresh Siddha0ac24912009-03-16 17:04:54 -070036#include <linux/irq.h>
37#include <linux/interrupt.h>
Shane Wang69575d32009-09-01 18:25:07 -070038#include <linux/tboot.h>
Len Browneb27cae2009-07-06 23:40:19 -040039#include <linux/dmi.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090040#include <linux/slab.h>
Alex Williamsona5459cf2014-06-12 16:12:31 -060041#include <linux/iommu.h>
Daniel Drake159ba992020-03-12 14:09:55 +080042#include <linux/limits.h>
Suresh Siddha8a8f4222012-03-30 11:47:08 -070043#include <asm/irq_remapping.h>
Konrad Rzeszutek Wilk4db77ff2010-08-26 13:58:04 -040044#include <asm/iommu_table.h>
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070045
Joerg Roedel078e1ee2012-09-26 12:44:43 +020046#include "irq_remapping.h"
47
Jiang Liuc2a0b532014-11-09 22:47:56 +080048typedef int (*dmar_res_handler_t)(struct acpi_dmar_header *, void *);
49struct dmar_res_callback {
50 dmar_res_handler_t cb[ACPI_DMAR_TYPE_RESERVED];
51 void *arg[ACPI_DMAR_TYPE_RESERVED];
52 bool ignore_unhandled;
53 bool print_entry;
54};
55
Jiang Liu3a5670e2014-02-19 14:07:33 +080056/*
57 * Assumptions:
58 * 1) The hotplug framework guarentees that DMAR unit will be hot-added
59 * before IO devices managed by that unit.
60 * 2) The hotplug framework guarantees that DMAR unit will be hot-removed
61 * after IO devices managed by that unit.
62 * 3) Hotplug events are rare.
63 *
64 * Locking rules for DMA and interrupt remapping related global data structures:
65 * 1) Use dmar_global_lock in process context
66 * 2) Use RCU in interrupt context
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070067 */
Jiang Liu3a5670e2014-02-19 14:07:33 +080068DECLARE_RWSEM(dmar_global_lock);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070069LIST_HEAD(dmar_drhd_units);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070070
Suresh Siddha41750d32011-08-23 17:05:18 -070071struct acpi_table_header * __initdata dmar_tbl;
Yinghai Lu8e1568f2009-02-11 01:06:59 -080072static acpi_size dmar_tbl_size;
Jiang Liu2e455282014-02-19 14:07:36 +080073static int dmar_dev_scope_status = 1;
Jiang Liu78d8e702014-11-09 22:47:57 +080074static unsigned long dmar_seq_ids[BITS_TO_LONGS(DMAR_UNITS_SUPPORTED)];
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070075
Jiang Liu694835d2014-01-06 14:18:16 +080076static int alloc_iommu(struct dmar_drhd_unit *drhd);
Jiang Liua868e6b2014-01-06 14:18:20 +080077static void free_iommu(struct intel_iommu *iommu);
Jiang Liu694835d2014-01-06 14:18:16 +080078
Jiang Liu6b197242014-11-09 22:47:58 +080079static void dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070080{
81 /*
82 * add INCLUDE_ALL at the tail, so scan the list will find it at
83 * the very end.
84 */
85 if (drhd->include_all)
Jiang Liu0e242612014-02-19 14:07:34 +080086 list_add_tail_rcu(&drhd->list, &dmar_drhd_units);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070087 else
Jiang Liu0e242612014-02-19 14:07:34 +080088 list_add_rcu(&drhd->list, &dmar_drhd_units);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070089}
90
Jiang Liubb3a6b72014-02-19 14:07:24 +080091void *dmar_alloc_dev_scope(void *start, void *end, int *cnt)
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070092{
93 struct acpi_dmar_device_scope *scope;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070094
95 *cnt = 0;
96 while (start < end) {
97 scope = start;
Bob Moore83118b02014-07-30 12:21:00 +080098 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_NAMESPACE ||
David Woodhouse07cb52f2014-03-07 14:39:27 +000099 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700100 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
101 (*cnt)++;
Linn Crosettoae3e7f32013-04-23 12:26:45 -0600102 else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC &&
103 scope->entry_type != ACPI_DMAR_SCOPE_TYPE_HPET) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400104 pr_warn("Unsupported device scope\n");
Yinghai Lu5715f0f2010-04-08 19:58:22 +0100105 }
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700106 start += scope->length;
107 }
108 if (*cnt == 0)
Jiang Liubb3a6b72014-02-19 14:07:24 +0800109 return NULL;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700110
David Woodhouse832bd852014-03-07 15:08:36 +0000111 return kcalloc(*cnt, sizeof(struct dmar_dev_scope), GFP_KERNEL);
Jiang Liubb3a6b72014-02-19 14:07:24 +0800112}
113
David Woodhouse832bd852014-03-07 15:08:36 +0000114void dmar_free_dev_scope(struct dmar_dev_scope **devices, int *cnt)
Jiang Liuada4d4b2014-01-06 14:18:09 +0800115{
Jiang Liub683b232014-02-19 14:07:32 +0800116 int i;
David Woodhouse832bd852014-03-07 15:08:36 +0000117 struct device *tmp_dev;
Jiang Liub683b232014-02-19 14:07:32 +0800118
Jiang Liuada4d4b2014-01-06 14:18:09 +0800119 if (*devices && *cnt) {
Jiang Liub683b232014-02-19 14:07:32 +0800120 for_each_active_dev_scope(*devices, *cnt, i, tmp_dev)
David Woodhouse832bd852014-03-07 15:08:36 +0000121 put_device(tmp_dev);
Jiang Liuada4d4b2014-01-06 14:18:09 +0800122 kfree(*devices);
Jiang Liuada4d4b2014-01-06 14:18:09 +0800123 }
Jiang Liu0e242612014-02-19 14:07:34 +0800124
125 *devices = NULL;
126 *cnt = 0;
Jiang Liuada4d4b2014-01-06 14:18:09 +0800127}
128
Jiang Liu59ce0512014-02-19 14:07:35 +0800129/* Optimize out kzalloc()/kfree() for normal cases */
130static char dmar_pci_notify_info_buf[64];
131
132static struct dmar_pci_notify_info *
133dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event)
134{
135 int level = 0;
136 size_t size;
137 struct pci_dev *tmp;
138 struct dmar_pci_notify_info *info;
139
140 BUG_ON(dev->is_virtfn);
141
Daniel Drake159ba992020-03-12 14:09:55 +0800142 /*
143 * Ignore devices that have a domain number higher than what can
144 * be looked up in DMAR, e.g. VMD subdevices with domain 0x10000
145 */
146 if (pci_domain_nr(dev->bus) > U16_MAX)
147 return NULL;
148
Jiang Liu59ce0512014-02-19 14:07:35 +0800149 /* Only generate path[] for device addition event */
150 if (event == BUS_NOTIFY_ADD_DEVICE)
151 for (tmp = dev; tmp; tmp = tmp->bus->self)
152 level++;
153
Julia Cartwright0afa6d82019-02-20 16:46:31 +0000154 size = sizeof(*info) + level * sizeof(info->path[0]);
Jiang Liu59ce0512014-02-19 14:07:35 +0800155 if (size <= sizeof(dmar_pci_notify_info_buf)) {
156 info = (struct dmar_pci_notify_info *)dmar_pci_notify_info_buf;
157 } else {
158 info = kzalloc(size, GFP_KERNEL);
159 if (!info) {
160 pr_warn("Out of memory when allocating notify_info "
161 "for %s.\n", pci_name(dev));
Jiang Liu2e455282014-02-19 14:07:36 +0800162 if (dmar_dev_scope_status == 0)
163 dmar_dev_scope_status = -ENOMEM;
Jiang Liu59ce0512014-02-19 14:07:35 +0800164 return NULL;
165 }
166 }
167
168 info->event = event;
169 info->dev = dev;
170 info->seg = pci_domain_nr(dev->bus);
171 info->level = level;
172 if (event == BUS_NOTIFY_ADD_DEVICE) {
Jiang Liu5ae05662014-04-15 10:35:35 +0800173 for (tmp = dev; tmp; tmp = tmp->bus->self) {
174 level--;
Joerg Roedel57384592014-10-02 11:50:25 +0200175 info->path[level].bus = tmp->bus->number;
Jiang Liu59ce0512014-02-19 14:07:35 +0800176 info->path[level].device = PCI_SLOT(tmp->devfn);
177 info->path[level].function = PCI_FUNC(tmp->devfn);
178 if (pci_is_root_bus(tmp->bus))
179 info->bus = tmp->bus->number;
180 }
181 }
182
183 return info;
184}
185
186static inline void dmar_free_pci_notify_info(struct dmar_pci_notify_info *info)
187{
188 if ((void *)info != dmar_pci_notify_info_buf)
189 kfree(info);
190}
191
192static bool dmar_match_pci_path(struct dmar_pci_notify_info *info, int bus,
193 struct acpi_dmar_pci_path *path, int count)
194{
195 int i;
196
197 if (info->bus != bus)
Joerg Roedel80f7b3d2014-09-22 16:30:22 +0200198 goto fallback;
Jiang Liu59ce0512014-02-19 14:07:35 +0800199 if (info->level != count)
Joerg Roedel80f7b3d2014-09-22 16:30:22 +0200200 goto fallback;
Jiang Liu59ce0512014-02-19 14:07:35 +0800201
202 for (i = 0; i < count; i++) {
203 if (path[i].device != info->path[i].device ||
204 path[i].function != info->path[i].function)
Joerg Roedel80f7b3d2014-09-22 16:30:22 +0200205 goto fallback;
Jiang Liu59ce0512014-02-19 14:07:35 +0800206 }
207
208 return true;
Joerg Roedel80f7b3d2014-09-22 16:30:22 +0200209
210fallback:
211
212 if (count != 1)
213 return false;
214
215 i = info->level - 1;
216 if (bus == info->path[i].bus &&
217 path[0].device == info->path[i].device &&
218 path[0].function == info->path[i].function) {
219 pr_info(FW_BUG "RMRR entry for device %02x:%02x.%x is broken - applying workaround\n",
220 bus, path[0].device, path[0].function);
221 return true;
222 }
223
224 return false;
Jiang Liu59ce0512014-02-19 14:07:35 +0800225}
226
227/* Return: > 0 if match found, 0 if no match found, < 0 if error happens */
228int dmar_insert_dev_scope(struct dmar_pci_notify_info *info,
229 void *start, void*end, u16 segment,
David Woodhouse832bd852014-03-07 15:08:36 +0000230 struct dmar_dev_scope *devices,
231 int devices_cnt)
Jiang Liu59ce0512014-02-19 14:07:35 +0800232{
233 int i, level;
David Woodhouse832bd852014-03-07 15:08:36 +0000234 struct device *tmp, *dev = &info->dev->dev;
Jiang Liu59ce0512014-02-19 14:07:35 +0800235 struct acpi_dmar_device_scope *scope;
236 struct acpi_dmar_pci_path *path;
237
238 if (segment != info->seg)
239 return 0;
240
241 for (; start < end; start += scope->length) {
242 scope = start;
243 if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_ENDPOINT &&
244 scope->entry_type != ACPI_DMAR_SCOPE_TYPE_BRIDGE)
245 continue;
246
247 path = (struct acpi_dmar_pci_path *)(scope + 1);
248 level = (scope->length - sizeof(*scope)) / sizeof(*path);
249 if (!dmar_match_pci_path(info, scope->bus, path, level))
250 continue;
251
Roland Dreierffb2d1e2016-06-02 17:46:10 -0700252 /*
253 * We expect devices with endpoint scope to have normal PCI
254 * headers, and devices with bridge scope to have bridge PCI
255 * headers. However PCI NTB devices may be listed in the
256 * DMAR table with bridge scope, even though they have a
257 * normal PCI header. NTB devices are identified by class
258 * "BRIDGE_OTHER" (0680h) - we don't declare a socpe mismatch
259 * for this special case.
260 */
261 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT &&
262 info->dev->hdr_type != PCI_HEADER_TYPE_NORMAL) ||
263 (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE &&
264 (info->dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
265 info->dev->class >> 8 != PCI_CLASS_BRIDGE_OTHER))) {
Jiang Liu59ce0512014-02-19 14:07:35 +0800266 pr_warn("Device scope type does not match for %s\n",
David Woodhouse832bd852014-03-07 15:08:36 +0000267 pci_name(info->dev));
Jiang Liu59ce0512014-02-19 14:07:35 +0800268 return -EINVAL;
269 }
270
271 for_each_dev_scope(devices, devices_cnt, i, tmp)
272 if (tmp == NULL) {
David Woodhouse832bd852014-03-07 15:08:36 +0000273 devices[i].bus = info->dev->bus->number;
274 devices[i].devfn = info->dev->devfn;
275 rcu_assign_pointer(devices[i].dev,
276 get_device(dev));
Jiang Liu59ce0512014-02-19 14:07:35 +0800277 return 1;
278 }
279 BUG_ON(i >= devices_cnt);
280 }
281
282 return 0;
283}
284
285int dmar_remove_dev_scope(struct dmar_pci_notify_info *info, u16 segment,
David Woodhouse832bd852014-03-07 15:08:36 +0000286 struct dmar_dev_scope *devices, int count)
Jiang Liu59ce0512014-02-19 14:07:35 +0800287{
288 int index;
David Woodhouse832bd852014-03-07 15:08:36 +0000289 struct device *tmp;
Jiang Liu59ce0512014-02-19 14:07:35 +0800290
291 if (info->seg != segment)
292 return 0;
293
294 for_each_active_dev_scope(devices, count, index, tmp)
David Woodhouse832bd852014-03-07 15:08:36 +0000295 if (tmp == &info->dev->dev) {
Andreea-Cristina Bernateecbad72014-08-18 15:20:56 +0300296 RCU_INIT_POINTER(devices[index].dev, NULL);
Jiang Liu59ce0512014-02-19 14:07:35 +0800297 synchronize_rcu();
David Woodhouse832bd852014-03-07 15:08:36 +0000298 put_device(tmp);
Jiang Liu59ce0512014-02-19 14:07:35 +0800299 return 1;
300 }
301
302 return 0;
303}
304
305static int dmar_pci_bus_add_dev(struct dmar_pci_notify_info *info)
306{
307 int ret = 0;
308 struct dmar_drhd_unit *dmaru;
309 struct acpi_dmar_hardware_unit *drhd;
310
311 for_each_drhd_unit(dmaru) {
312 if (dmaru->include_all)
313 continue;
314
315 drhd = container_of(dmaru->hdr,
316 struct acpi_dmar_hardware_unit, header);
317 ret = dmar_insert_dev_scope(info, (void *)(drhd + 1),
318 ((void *)drhd) + drhd->header.length,
319 dmaru->segment,
320 dmaru->devices, dmaru->devices_cnt);
321 if (ret != 0)
322 break;
323 }
324 if (ret >= 0)
325 ret = dmar_iommu_notify_scope_dev(info);
Jiang Liu2e455282014-02-19 14:07:36 +0800326 if (ret < 0 && dmar_dev_scope_status == 0)
327 dmar_dev_scope_status = ret;
Jiang Liu59ce0512014-02-19 14:07:35 +0800328
329 return ret;
330}
331
332static void dmar_pci_bus_del_dev(struct dmar_pci_notify_info *info)
333{
334 struct dmar_drhd_unit *dmaru;
335
336 for_each_drhd_unit(dmaru)
337 if (dmar_remove_dev_scope(info, dmaru->segment,
338 dmaru->devices, dmaru->devices_cnt))
339 break;
340 dmar_iommu_notify_scope_dev(info);
341}
342
343static int dmar_pci_bus_notifier(struct notifier_block *nb,
344 unsigned long action, void *data)
345{
346 struct pci_dev *pdev = to_pci_dev(data);
347 struct dmar_pci_notify_info *info;
348
Ashok Raj1c387182016-10-21 15:32:05 -0700349 /* Only care about add/remove events for physical functions.
350 * For VFs we actually do the lookup based on the corresponding
351 * PF in device_to_iommu() anyway. */
Jiang Liu59ce0512014-02-19 14:07:35 +0800352 if (pdev->is_virtfn)
353 return NOTIFY_DONE;
Joerg Roedele6a8c9b2016-02-29 23:49:47 +0100354 if (action != BUS_NOTIFY_ADD_DEVICE &&
355 action != BUS_NOTIFY_REMOVED_DEVICE)
Jiang Liu59ce0512014-02-19 14:07:35 +0800356 return NOTIFY_DONE;
357
358 info = dmar_alloc_pci_notify_info(pdev, action);
359 if (!info)
360 return NOTIFY_DONE;
361
362 down_write(&dmar_global_lock);
363 if (action == BUS_NOTIFY_ADD_DEVICE)
364 dmar_pci_bus_add_dev(info);
Joerg Roedele6a8c9b2016-02-29 23:49:47 +0100365 else if (action == BUS_NOTIFY_REMOVED_DEVICE)
Jiang Liu59ce0512014-02-19 14:07:35 +0800366 dmar_pci_bus_del_dev(info);
367 up_write(&dmar_global_lock);
368
369 dmar_free_pci_notify_info(info);
370
371 return NOTIFY_OK;
372}
373
374static struct notifier_block dmar_pci_bus_nb = {
375 .notifier_call = dmar_pci_bus_notifier,
376 .priority = INT_MIN,
377};
378
Jiang Liu6b197242014-11-09 22:47:58 +0800379static struct dmar_drhd_unit *
380dmar_find_dmaru(struct acpi_dmar_hardware_unit *drhd)
381{
382 struct dmar_drhd_unit *dmaru;
383
384 list_for_each_entry_rcu(dmaru, &dmar_drhd_units, list)
385 if (dmaru->segment == drhd->segment &&
386 dmaru->reg_base_addr == drhd->address)
387 return dmaru;
388
389 return NULL;
390}
391
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700392/**
393 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
394 * structure which uniquely represent one DMA remapping hardware unit
395 * present in the platform
396 */
Jiang Liu6b197242014-11-09 22:47:58 +0800397static int dmar_parse_one_drhd(struct acpi_dmar_header *header, void *arg)
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700398{
399 struct acpi_dmar_hardware_unit *drhd;
400 struct dmar_drhd_unit *dmaru;
401 int ret = 0;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700402
David Woodhousee523b382009-04-10 22:27:48 -0700403 drhd = (struct acpi_dmar_hardware_unit *)header;
Jiang Liu6b197242014-11-09 22:47:58 +0800404 dmaru = dmar_find_dmaru(drhd);
405 if (dmaru)
406 goto out;
407
408 dmaru = kzalloc(sizeof(*dmaru) + header->length, GFP_KERNEL);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700409 if (!dmaru)
410 return -ENOMEM;
411
Jiang Liu6b197242014-11-09 22:47:58 +0800412 /*
413 * If header is allocated from slab by ACPI _DSM method, we need to
414 * copy the content because the memory buffer will be freed on return.
415 */
416 dmaru->hdr = (void *)(dmaru + 1);
417 memcpy(dmaru->hdr, header, header->length);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700418 dmaru->reg_base_addr = drhd->address;
David Woodhouse276dbf92009-04-04 01:45:37 +0100419 dmaru->segment = drhd->segment;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700420 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
David Woodhouse07cb52f2014-03-07 14:39:27 +0000421 dmaru->devices = dmar_alloc_dev_scope((void *)(drhd + 1),
422 ((void *)drhd) + drhd->header.length,
423 &dmaru->devices_cnt);
424 if (dmaru->devices_cnt && dmaru->devices == NULL) {
425 kfree(dmaru);
426 return -ENOMEM;
Jiang Liu2e455282014-02-19 14:07:36 +0800427 }
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700428
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700429 ret = alloc_iommu(dmaru);
430 if (ret) {
David Woodhouse07cb52f2014-03-07 14:39:27 +0000431 dmar_free_dev_scope(&dmaru->devices,
432 &dmaru->devices_cnt);
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700433 kfree(dmaru);
434 return ret;
435 }
436 dmar_register_drhd_unit(dmaru);
Jiang Liuc2a0b532014-11-09 22:47:56 +0800437
Jiang Liu6b197242014-11-09 22:47:58 +0800438out:
Jiang Liuc2a0b532014-11-09 22:47:56 +0800439 if (arg)
440 (*(int *)arg)++;
441
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700442 return 0;
443}
444
Jiang Liua868e6b2014-01-06 14:18:20 +0800445static void dmar_free_drhd(struct dmar_drhd_unit *dmaru)
446{
447 if (dmaru->devices && dmaru->devices_cnt)
448 dmar_free_dev_scope(&dmaru->devices, &dmaru->devices_cnt);
449 if (dmaru->iommu)
450 free_iommu(dmaru->iommu);
451 kfree(dmaru);
452}
453
Jiang Liuc2a0b532014-11-09 22:47:56 +0800454static int __init dmar_parse_one_andd(struct acpi_dmar_header *header,
455 void *arg)
David Woodhousee625b4a2014-03-07 14:34:38 +0000456{
457 struct acpi_dmar_andd *andd = (void *)header;
458
459 /* Check for NUL termination within the designated length */
Bob Moore83118b02014-07-30 12:21:00 +0800460 if (strnlen(andd->device_name, header->length - 8) == header->length - 8) {
Hans de Goede5145afc2020-03-09 15:01:37 +0100461 pr_warn(FW_BUG
David Woodhousee625b4a2014-03-07 14:34:38 +0000462 "Your BIOS is broken; ANDD object name is not NUL-terminated\n"
463 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
464 dmi_get_system_info(DMI_BIOS_VENDOR),
465 dmi_get_system_info(DMI_BIOS_VERSION),
466 dmi_get_system_info(DMI_PRODUCT_VERSION));
Hans de Goede5145afc2020-03-09 15:01:37 +0100467 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
David Woodhousee625b4a2014-03-07 14:34:38 +0000468 return -EINVAL;
469 }
470 pr_info("ANDD device: %x name: %s\n", andd->device_number,
Bob Moore83118b02014-07-30 12:21:00 +0800471 andd->device_name);
David Woodhousee625b4a2014-03-07 14:34:38 +0000472
473 return 0;
474}
475
David Woodhouseaa697072009-10-07 12:18:00 +0100476#ifdef CONFIG_ACPI_NUMA
Jiang Liu6b197242014-11-09 22:47:58 +0800477static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg)
Suresh Siddhaee34b322009-10-02 11:01:21 -0700478{
479 struct acpi_dmar_rhsa *rhsa;
480 struct dmar_drhd_unit *drhd;
481
482 rhsa = (struct acpi_dmar_rhsa *)header;
David Woodhouseaa697072009-10-07 12:18:00 +0100483 for_each_drhd_unit(drhd) {
Suresh Siddhaee34b322009-10-02 11:01:21 -0700484 if (drhd->reg_base_addr == rhsa->base_address) {
485 int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
486
487 if (!node_online(node))
488 node = -1;
489 drhd->iommu->node = node;
David Woodhouseaa697072009-10-07 12:18:00 +0100490 return 0;
491 }
Suresh Siddhaee34b322009-10-02 11:01:21 -0700492 }
Hans de Goede5145afc2020-03-09 15:01:37 +0100493 pr_warn(FW_BUG
Ben Hutchingsfd0c8892010-04-03 19:38:43 +0100494 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
495 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
Zhenzhong Duan1d66a552020-03-12 14:09:54 +0800496 rhsa->base_address,
Ben Hutchingsfd0c8892010-04-03 19:38:43 +0100497 dmi_get_system_info(DMI_BIOS_VENDOR),
498 dmi_get_system_info(DMI_BIOS_VERSION),
499 dmi_get_system_info(DMI_PRODUCT_VERSION));
Hans de Goede5145afc2020-03-09 15:01:37 +0100500 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
Suresh Siddhaee34b322009-10-02 11:01:21 -0700501
David Woodhouseaa697072009-10-07 12:18:00 +0100502 return 0;
Suresh Siddhaee34b322009-10-02 11:01:21 -0700503}
Jiang Liuc2a0b532014-11-09 22:47:56 +0800504#else
505#define dmar_parse_one_rhsa dmar_res_noop
David Woodhouseaa697072009-10-07 12:18:00 +0100506#endif
Suresh Siddhaee34b322009-10-02 11:01:21 -0700507
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700508static void __init
509dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
510{
511 struct acpi_dmar_hardware_unit *drhd;
512 struct acpi_dmar_reserved_memory *rmrr;
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800513 struct acpi_dmar_atsr *atsr;
Roland Dreier17b60972009-09-24 12:14:00 -0700514 struct acpi_dmar_rhsa *rhsa;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700515
516 switch (header->type) {
517 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800518 drhd = container_of(header, struct acpi_dmar_hardware_unit,
519 header);
Donald Dutilee9071b02012-06-08 17:13:11 -0400520 pr_info("DRHD base: %#016Lx flags: %#x\n",
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800521 (unsigned long long)drhd->address, drhd->flags);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700522 break;
523 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800524 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
525 header);
Donald Dutilee9071b02012-06-08 17:13:11 -0400526 pr_info("RMRR base: %#016Lx end: %#016Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700527 (unsigned long long)rmrr->base_address,
528 (unsigned long long)rmrr->end_address);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700529 break;
Bob Moore83118b02014-07-30 12:21:00 +0800530 case ACPI_DMAR_TYPE_ROOT_ATS:
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800531 atsr = container_of(header, struct acpi_dmar_atsr, header);
Donald Dutilee9071b02012-06-08 17:13:11 -0400532 pr_info("ATSR flags: %#x\n", atsr->flags);
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800533 break;
Bob Moore83118b02014-07-30 12:21:00 +0800534 case ACPI_DMAR_TYPE_HARDWARE_AFFINITY:
Roland Dreier17b60972009-09-24 12:14:00 -0700535 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
Donald Dutilee9071b02012-06-08 17:13:11 -0400536 pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
Roland Dreier17b60972009-09-24 12:14:00 -0700537 (unsigned long long)rhsa->base_address,
538 rhsa->proximity_domain);
539 break;
Bob Moore83118b02014-07-30 12:21:00 +0800540 case ACPI_DMAR_TYPE_NAMESPACE:
David Woodhousee625b4a2014-03-07 14:34:38 +0000541 /* We don't print this here because we need to sanity-check
542 it first. So print it in dmar_parse_one_andd() instead. */
543 break;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700544 }
545}
546
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700547/**
548 * dmar_table_detect - checks to see if the platform supports DMAR devices
549 */
550static int __init dmar_table_detect(void)
551{
552 acpi_status status = AE_OK;
553
554 /* if we could find DMAR table, then there are DMAR devices */
Yinghai Lu8e1568f2009-02-11 01:06:59 -0800555 status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
556 (struct acpi_table_header **)&dmar_tbl,
557 &dmar_tbl_size);
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700558
559 if (ACPI_SUCCESS(status) && !dmar_tbl) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400560 pr_warn("Unable to map DMAR\n");
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700561 status = AE_NOT_FOUND;
562 }
563
564 return (ACPI_SUCCESS(status) ? 1 : 0);
565}
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700566
Jiang Liuc2a0b532014-11-09 22:47:56 +0800567static int dmar_walk_remapping_entries(struct acpi_dmar_header *start,
568 size_t len, struct dmar_res_callback *cb)
569{
570 int ret = 0;
571 struct acpi_dmar_header *iter, *next;
572 struct acpi_dmar_header *end = ((void *)start) + len;
573
574 for (iter = start; iter < end && ret == 0; iter = next) {
575 next = (void *)iter + iter->length;
576 if (iter->length == 0) {
577 /* Avoid looping forever on bad ACPI tables */
578 pr_debug(FW_BUG "Invalid 0-length structure\n");
579 break;
580 } else if (next > end) {
581 /* Avoid passing table end */
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200582 pr_warn(FW_BUG "Record passes table end\n");
Jiang Liuc2a0b532014-11-09 22:47:56 +0800583 ret = -EINVAL;
584 break;
585 }
586
587 if (cb->print_entry)
588 dmar_table_print_dmar_entry(iter);
589
590 if (iter->type >= ACPI_DMAR_TYPE_RESERVED) {
591 /* continue for forward compatibility */
592 pr_debug("Unknown DMAR structure type %d\n",
593 iter->type);
594 } else if (cb->cb[iter->type]) {
595 ret = cb->cb[iter->type](iter, cb->arg[iter->type]);
596 } else if (!cb->ignore_unhandled) {
597 pr_warn("No handler for DMAR structure type %d\n",
598 iter->type);
599 ret = -EINVAL;
600 }
601 }
602
603 return ret;
604}
605
606static inline int dmar_walk_dmar_table(struct acpi_table_dmar *dmar,
607 struct dmar_res_callback *cb)
608{
609 return dmar_walk_remapping_entries((void *)(dmar + 1),
610 dmar->header.length - sizeof(*dmar), cb);
611}
612
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700613/**
614 * parse_dmar_table - parses the DMA reporting table
615 */
616static int __init
617parse_dmar_table(void)
618{
619 struct acpi_table_dmar *dmar;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700620 int ret = 0;
Li, Zhen-Hua7cef3342013-05-20 15:57:32 +0800621 int drhd_count = 0;
Jiang Liuc2a0b532014-11-09 22:47:56 +0800622 struct dmar_res_callback cb = {
623 .print_entry = true,
624 .ignore_unhandled = true,
625 .arg[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &drhd_count,
626 .cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_parse_one_drhd,
627 .cb[ACPI_DMAR_TYPE_RESERVED_MEMORY] = &dmar_parse_one_rmrr,
628 .cb[ACPI_DMAR_TYPE_ROOT_ATS] = &dmar_parse_one_atsr,
629 .cb[ACPI_DMAR_TYPE_HARDWARE_AFFINITY] = &dmar_parse_one_rhsa,
630 .cb[ACPI_DMAR_TYPE_NAMESPACE] = &dmar_parse_one_andd,
631 };
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700632
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700633 /*
634 * Do it again, earlier dmar_tbl mapping could be mapped with
635 * fixed map.
636 */
637 dmar_table_detect();
638
Joseph Cihulaa59b50e2009-06-30 19:31:10 -0700639 /*
640 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
641 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
642 */
643 dmar_tbl = tboot_get_dmar_table(dmar_tbl);
644
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700645 dmar = (struct acpi_table_dmar *)dmar_tbl;
646 if (!dmar)
647 return -ENODEV;
648
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700649 if (dmar->width < PAGE_SHIFT - 1) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400650 pr_warn("Invalid DMAR haw\n");
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700651 return -EINVAL;
652 }
653
Donald Dutilee9071b02012-06-08 17:13:11 -0400654 pr_info("Host address width %d\n", dmar->width + 1);
Jiang Liuc2a0b532014-11-09 22:47:56 +0800655 ret = dmar_walk_dmar_table(dmar, &cb);
656 if (ret == 0 && drhd_count == 0)
Li, Zhen-Hua7cef3342013-05-20 15:57:32 +0800657 pr_warn(FW_BUG "No DRHD structure found in DMAR table\n");
Jiang Liuc2a0b532014-11-09 22:47:56 +0800658
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700659 return ret;
660}
661
David Woodhouse832bd852014-03-07 15:08:36 +0000662static int dmar_pci_device_match(struct dmar_dev_scope devices[],
663 int cnt, struct pci_dev *dev)
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700664{
665 int index;
David Woodhouse832bd852014-03-07 15:08:36 +0000666 struct device *tmp;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700667
668 while (dev) {
Jiang Liub683b232014-02-19 14:07:32 +0800669 for_each_active_dev_scope(devices, cnt, index, tmp)
David Woodhouse832bd852014-03-07 15:08:36 +0000670 if (dev_is_pci(tmp) && dev == to_pci_dev(tmp))
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700671 return 1;
672
673 /* Check our parent */
674 dev = dev->bus->self;
675 }
676
677 return 0;
678}
679
680struct dmar_drhd_unit *
681dmar_find_matched_drhd_unit(struct pci_dev *dev)
682{
Jiang Liu0e242612014-02-19 14:07:34 +0800683 struct dmar_drhd_unit *dmaru;
Yu Zhao2e824f72008-12-22 16:54:58 +0800684 struct acpi_dmar_hardware_unit *drhd;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700685
Yinghaidda56542010-04-09 01:07:55 +0100686 dev = pci_physfn(dev);
687
Jiang Liu0e242612014-02-19 14:07:34 +0800688 rcu_read_lock();
Yijing Wang8b161f02013-10-31 17:25:16 +0800689 for_each_drhd_unit(dmaru) {
Yu Zhao2e824f72008-12-22 16:54:58 +0800690 drhd = container_of(dmaru->hdr,
691 struct acpi_dmar_hardware_unit,
692 header);
693
694 if (dmaru->include_all &&
695 drhd->segment == pci_domain_nr(dev->bus))
Jiang Liu0e242612014-02-19 14:07:34 +0800696 goto out;
Yu Zhao2e824f72008-12-22 16:54:58 +0800697
698 if (dmar_pci_device_match(dmaru->devices,
699 dmaru->devices_cnt, dev))
Jiang Liu0e242612014-02-19 14:07:34 +0800700 goto out;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700701 }
Jiang Liu0e242612014-02-19 14:07:34 +0800702 dmaru = NULL;
703out:
704 rcu_read_unlock();
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700705
Jiang Liu0e242612014-02-19 14:07:34 +0800706 return dmaru;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700707}
708
David Woodhouseed403562014-03-07 23:15:42 +0000709static void __init dmar_acpi_insert_dev_scope(u8 device_number,
710 struct acpi_device *adev)
711{
712 struct dmar_drhd_unit *dmaru;
713 struct acpi_dmar_hardware_unit *drhd;
714 struct acpi_dmar_device_scope *scope;
715 struct device *tmp;
716 int i;
717 struct acpi_dmar_pci_path *path;
718
719 for_each_drhd_unit(dmaru) {
720 drhd = container_of(dmaru->hdr,
721 struct acpi_dmar_hardware_unit,
722 header);
723
724 for (scope = (void *)(drhd + 1);
725 (unsigned long)scope < ((unsigned long)drhd) + drhd->header.length;
726 scope = ((void *)scope) + scope->length) {
Bob Moore83118b02014-07-30 12:21:00 +0800727 if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_NAMESPACE)
David Woodhouseed403562014-03-07 23:15:42 +0000728 continue;
729 if (scope->enumeration_id != device_number)
730 continue;
731
732 path = (void *)(scope + 1);
733 pr_info("ACPI device \"%s\" under DMAR at %llx as %02x:%02x.%d\n",
734 dev_name(&adev->dev), dmaru->reg_base_addr,
735 scope->bus, path->device, path->function);
736 for_each_dev_scope(dmaru->devices, dmaru->devices_cnt, i, tmp)
737 if (tmp == NULL) {
738 dmaru->devices[i].bus = scope->bus;
739 dmaru->devices[i].devfn = PCI_DEVFN(path->device,
740 path->function);
741 rcu_assign_pointer(dmaru->devices[i].dev,
742 get_device(&adev->dev));
743 return;
744 }
745 BUG_ON(i >= dmaru->devices_cnt);
746 }
747 }
748 pr_warn("No IOMMU scope found for ANDD enumeration ID %d (%s)\n",
749 device_number, dev_name(&adev->dev));
750}
751
752static int __init dmar_acpi_dev_scope_init(void)
753{
Joerg Roedel11f1a772014-03-25 20:16:40 +0100754 struct acpi_dmar_andd *andd;
755
756 if (dmar_tbl == NULL)
757 return -ENODEV;
758
David Woodhouse7713ec02014-04-01 14:58:36 +0100759 for (andd = (void *)dmar_tbl + sizeof(struct acpi_table_dmar);
760 ((unsigned long)andd) < ((unsigned long)dmar_tbl) + dmar_tbl->length;
761 andd = ((void *)andd) + andd->header.length) {
Bob Moore83118b02014-07-30 12:21:00 +0800762 if (andd->header.type == ACPI_DMAR_TYPE_NAMESPACE) {
David Woodhouseed403562014-03-07 23:15:42 +0000763 acpi_handle h;
764 struct acpi_device *adev;
765
766 if (!ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT,
Bob Moore83118b02014-07-30 12:21:00 +0800767 andd->device_name,
David Woodhouseed403562014-03-07 23:15:42 +0000768 &h))) {
769 pr_err("Failed to find handle for ACPI object %s\n",
Bob Moore83118b02014-07-30 12:21:00 +0800770 andd->device_name);
David Woodhouseed403562014-03-07 23:15:42 +0000771 continue;
772 }
Joerg Roedelc0df9752014-08-21 23:06:48 +0200773 if (acpi_bus_get_device(h, &adev)) {
David Woodhouseed403562014-03-07 23:15:42 +0000774 pr_err("Failed to get device for ACPI object %s\n",
Bob Moore83118b02014-07-30 12:21:00 +0800775 andd->device_name);
David Woodhouseed403562014-03-07 23:15:42 +0000776 continue;
777 }
778 dmar_acpi_insert_dev_scope(andd->device_number, adev);
779 }
David Woodhouseed403562014-03-07 23:15:42 +0000780 }
781 return 0;
782}
783
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700784int __init dmar_dev_scope_init(void)
785{
Jiang Liu2e455282014-02-19 14:07:36 +0800786 struct pci_dev *dev = NULL;
787 struct dmar_pci_notify_info *info;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700788
Jiang Liu2e455282014-02-19 14:07:36 +0800789 if (dmar_dev_scope_status != 1)
790 return dmar_dev_scope_status;
Suresh Siddhac2c72862011-08-23 17:05:19 -0700791
Jiang Liu2e455282014-02-19 14:07:36 +0800792 if (list_empty(&dmar_drhd_units)) {
793 dmar_dev_scope_status = -ENODEV;
794 } else {
795 dmar_dev_scope_status = 0;
Suresh Siddha318fe7d2011-08-23 17:05:20 -0700796
David Woodhouse63b42622014-03-28 11:28:40 +0000797 dmar_acpi_dev_scope_init();
798
Jiang Liu2e455282014-02-19 14:07:36 +0800799 for_each_pci_dev(dev) {
800 if (dev->is_virtfn)
801 continue;
802
803 info = dmar_alloc_pci_notify_info(dev,
804 BUS_NOTIFY_ADD_DEVICE);
805 if (!info) {
806 return dmar_dev_scope_status;
807 } else {
808 dmar_pci_bus_add_dev(info);
809 dmar_free_pci_notify_info(info);
810 }
811 }
812
813 bus_register_notifier(&pci_bus_type, &dmar_pci_bus_nb);
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700814 }
815
Jiang Liu2e455282014-02-19 14:07:36 +0800816 return dmar_dev_scope_status;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700817}
818
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700819
820int __init dmar_table_init(void)
821{
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700822 static int dmar_table_initialized;
Fenghua Yu093f87d2007-11-21 15:07:14 -0800823 int ret;
824
Jiang Liucc053012014-01-06 14:18:24 +0800825 if (dmar_table_initialized == 0) {
826 ret = parse_dmar_table();
827 if (ret < 0) {
828 if (ret != -ENODEV)
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200829 pr_info("Parse DMAR table failure.\n");
Jiang Liucc053012014-01-06 14:18:24 +0800830 } else if (list_empty(&dmar_drhd_units)) {
831 pr_info("No DMAR devices found\n");
832 ret = -ENODEV;
833 }
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700834
Jiang Liucc053012014-01-06 14:18:24 +0800835 if (ret < 0)
836 dmar_table_initialized = ret;
837 else
838 dmar_table_initialized = 1;
Fenghua Yu093f87d2007-11-21 15:07:14 -0800839 }
840
Jiang Liucc053012014-01-06 14:18:24 +0800841 return dmar_table_initialized < 0 ? dmar_table_initialized : 0;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700842}
843
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100844static void warn_invalid_dmar(u64 addr, const char *message)
845{
Hans de Goede5145afc2020-03-09 15:01:37 +0100846 pr_warn_once(FW_BUG
Ben Hutchingsfd0c8892010-04-03 19:38:43 +0100847 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
848 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
849 addr, message,
850 dmi_get_system_info(DMI_BIOS_VENDOR),
851 dmi_get_system_info(DMI_BIOS_VERSION),
852 dmi_get_system_info(DMI_PRODUCT_VERSION));
Hans de Goede5145afc2020-03-09 15:01:37 +0100853 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100854}
David Woodhouse6ecbf012009-12-02 09:20:27 +0000855
Jiang Liuc2a0b532014-11-09 22:47:56 +0800856static int __ref
857dmar_validate_one_drhd(struct acpi_dmar_header *entry, void *arg)
David Woodhouse86cf8982009-11-09 22:15:15 +0000858{
David Woodhouse86cf8982009-11-09 22:15:15 +0000859 struct acpi_dmar_hardware_unit *drhd;
Jiang Liuc2a0b532014-11-09 22:47:56 +0800860 void __iomem *addr;
861 u64 cap, ecap;
David Woodhouse86cf8982009-11-09 22:15:15 +0000862
Jiang Liuc2a0b532014-11-09 22:47:56 +0800863 drhd = (void *)entry;
864 if (!drhd->address) {
865 warn_invalid_dmar(0, "");
866 return -EINVAL;
David Woodhouse86cf8982009-11-09 22:15:15 +0000867 }
Chris Wright2c992202009-12-02 09:17:13 +0000868
Jiang Liu6b197242014-11-09 22:47:58 +0800869 if (arg)
870 addr = ioremap(drhd->address, VTD_PAGE_SIZE);
871 else
872 addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
Jiang Liuc2a0b532014-11-09 22:47:56 +0800873 if (!addr) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200874 pr_warn("Can't validate DRHD address: %llx\n", drhd->address);
Jiang Liuc2a0b532014-11-09 22:47:56 +0800875 return -EINVAL;
876 }
Jiang Liu6b197242014-11-09 22:47:58 +0800877
Jiang Liuc2a0b532014-11-09 22:47:56 +0800878 cap = dmar_readq(addr + DMAR_CAP_REG);
879 ecap = dmar_readq(addr + DMAR_ECAP_REG);
Jiang Liu6b197242014-11-09 22:47:58 +0800880
881 if (arg)
882 iounmap(addr);
883 else
884 early_iounmap(addr, VTD_PAGE_SIZE);
Jiang Liuc2a0b532014-11-09 22:47:56 +0800885
886 if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
887 warn_invalid_dmar(drhd->address, " returns all ones");
888 return -EINVAL;
889 }
890
Chris Wright2c992202009-12-02 09:17:13 +0000891 return 0;
David Woodhouse86cf8982009-11-09 22:15:15 +0000892}
893
Konrad Rzeszutek Wilk480125b2010-08-26 13:57:57 -0400894int __init detect_intel_iommu(void)
Suresh Siddha2ae21012008-07-10 11:16:43 -0700895{
896 int ret;
Jiang Liuc2a0b532014-11-09 22:47:56 +0800897 struct dmar_res_callback validate_drhd_cb = {
898 .cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_validate_one_drhd,
899 .ignore_unhandled = true,
900 };
Suresh Siddha2ae21012008-07-10 11:16:43 -0700901
Jiang Liu3a5670e2014-02-19 14:07:33 +0800902 down_write(&dmar_global_lock);
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700903 ret = dmar_table_detect();
David Woodhouse86cf8982009-11-09 22:15:15 +0000904 if (ret)
Jiang Liuc2a0b532014-11-09 22:47:56 +0800905 ret = !dmar_walk_dmar_table((struct acpi_table_dmar *)dmar_tbl,
906 &validate_drhd_cb);
907 if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
908 iommu_detected = 1;
909 /* Make sure ACS will be enabled */
910 pci_request_acs();
911 }
Suresh Siddhaf5d1b972011-08-23 17:05:22 -0700912
FUJITA Tomonori9d5ce732009-11-10 19:46:16 +0900913#ifdef CONFIG_X86
Jiang Liuc2a0b532014-11-09 22:47:56 +0800914 if (ret)
915 x86_init.iommu.iommu_init = intel_iommu_init;
FUJITA Tomonori9d5ce732009-11-10 19:46:16 +0900916#endif
Jiang Liuc2a0b532014-11-09 22:47:56 +0800917
Jiang Liub707cb02014-01-06 14:18:26 +0800918 early_acpi_os_unmap_memory((void __iomem *)dmar_tbl, dmar_tbl_size);
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700919 dmar_tbl = NULL;
Jiang Liu3a5670e2014-02-19 14:07:33 +0800920 up_write(&dmar_global_lock);
Konrad Rzeszutek Wilk480125b2010-08-26 13:57:57 -0400921
Konrad Rzeszutek Wilk4db77ff2010-08-26 13:58:04 -0400922 return ret ? 1 : -ENODEV;
Suresh Siddha2ae21012008-07-10 11:16:43 -0700923}
924
925
Donald Dutile6f5cf522012-06-04 17:29:02 -0400926static void unmap_iommu(struct intel_iommu *iommu)
927{
928 iounmap(iommu->reg);
929 release_mem_region(iommu->reg_phys, iommu->reg_size);
930}
931
932/**
933 * map_iommu: map the iommu's registers
934 * @iommu: the iommu to map
935 * @phys_addr: the physical address of the base resgister
Donald Dutilee9071b02012-06-08 17:13:11 -0400936 *
Donald Dutile6f5cf522012-06-04 17:29:02 -0400937 * Memory map the iommu's registers. Start w/ a single page, and
Donald Dutilee9071b02012-06-08 17:13:11 -0400938 * possibly expand if that turns out to be insufficent.
Donald Dutile6f5cf522012-06-04 17:29:02 -0400939 */
940static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
941{
942 int map_size, err=0;
943
944 iommu->reg_phys = phys_addr;
945 iommu->reg_size = VTD_PAGE_SIZE;
946
947 if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200948 pr_err("Can't reserve memory\n");
Donald Dutile6f5cf522012-06-04 17:29:02 -0400949 err = -EBUSY;
950 goto out;
951 }
952
953 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
954 if (!iommu->reg) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200955 pr_err("Can't map the region\n");
Donald Dutile6f5cf522012-06-04 17:29:02 -0400956 err = -ENOMEM;
957 goto release;
958 }
959
960 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
961 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
962
963 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
964 err = -EINVAL;
965 warn_invalid_dmar(phys_addr, " returns all ones");
966 goto unmap;
967 }
968
969 /* the registers might be more than one page */
970 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
971 cap_max_fault_reg_offset(iommu->cap));
972 map_size = VTD_PAGE_ALIGN(map_size);
973 if (map_size > iommu->reg_size) {
974 iounmap(iommu->reg);
975 release_mem_region(iommu->reg_phys, iommu->reg_size);
976 iommu->reg_size = map_size;
977 if (!request_mem_region(iommu->reg_phys, iommu->reg_size,
978 iommu->name)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200979 pr_err("Can't reserve memory\n");
Donald Dutile6f5cf522012-06-04 17:29:02 -0400980 err = -EBUSY;
981 goto out;
982 }
983 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
984 if (!iommu->reg) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200985 pr_err("Can't map the region\n");
Donald Dutile6f5cf522012-06-04 17:29:02 -0400986 err = -ENOMEM;
987 goto release;
988 }
989 }
990 err = 0;
991 goto out;
992
993unmap:
994 iounmap(iommu->reg);
995release:
996 release_mem_region(iommu->reg_phys, iommu->reg_size);
997out:
998 return err;
999}
1000
Jiang Liu78d8e702014-11-09 22:47:57 +08001001static int dmar_alloc_seq_id(struct intel_iommu *iommu)
1002{
1003 iommu->seq_id = find_first_zero_bit(dmar_seq_ids,
1004 DMAR_UNITS_SUPPORTED);
1005 if (iommu->seq_id >= DMAR_UNITS_SUPPORTED) {
1006 iommu->seq_id = -1;
1007 } else {
1008 set_bit(iommu->seq_id, dmar_seq_ids);
1009 sprintf(iommu->name, "dmar%d", iommu->seq_id);
1010 }
1011
1012 return iommu->seq_id;
1013}
1014
1015static void dmar_free_seq_id(struct intel_iommu *iommu)
1016{
1017 if (iommu->seq_id >= 0) {
1018 clear_bit(iommu->seq_id, dmar_seq_ids);
1019 iommu->seq_id = -1;
1020 }
1021}
1022
Jiang Liu694835d2014-01-06 14:18:16 +08001023static int alloc_iommu(struct dmar_drhd_unit *drhd)
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001024{
Suresh Siddhac42d9f32008-07-10 11:16:36 -07001025 struct intel_iommu *iommu;
Takao Indoh3a93c842013-04-23 17:35:03 +09001026 u32 ver, sts;
David Woodhouse9bae48c2021-02-02 01:07:06 +01001027 int agaw = -1;
1028 int msagaw = -1;
Donald Dutile6f5cf522012-06-04 17:29:02 -04001029 int err;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07001030
David Woodhouse6ecbf012009-12-02 09:20:27 +00001031 if (!drhd->reg_base_addr) {
Ben Hutchings3a8663e2010-04-03 19:37:23 +01001032 warn_invalid_dmar(0, "");
David Woodhouse6ecbf012009-12-02 09:20:27 +00001033 return -EINVAL;
1034 }
1035
Suresh Siddhac42d9f32008-07-10 11:16:36 -07001036 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
1037 if (!iommu)
Suresh Siddha1886e8a2008-07-10 11:16:37 -07001038 return -ENOMEM;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07001039
Jiang Liu78d8e702014-11-09 22:47:57 +08001040 if (dmar_alloc_seq_id(iommu) < 0) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001041 pr_err("Failed to allocate seq_id\n");
Jiang Liu78d8e702014-11-09 22:47:57 +08001042 err = -ENOSPC;
1043 goto error;
1044 }
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001045
Donald Dutile6f5cf522012-06-04 17:29:02 -04001046 err = map_iommu(iommu, drhd->reg_base_addr);
1047 if (err) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001048 pr_err("Failed to map %s\n", iommu->name);
Jiang Liu78d8e702014-11-09 22:47:57 +08001049 goto error_free_seq_id;
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001050 }
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001051
Donald Dutile6f5cf522012-06-04 17:29:02 -04001052 err = -EINVAL;
David Woodhouse9bae48c2021-02-02 01:07:06 +01001053 if (cap_sagaw(iommu->cap) == 0) {
1054 pr_info("%s: No supported address widths. Not attempting DMA translation.\n",
1055 iommu->name);
1056 drhd->ignored = 1;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001057 }
David Woodhouse9bae48c2021-02-02 01:07:06 +01001058
1059 if (!drhd->ignored) {
1060 agaw = iommu_calculate_agaw(iommu);
1061 if (agaw < 0) {
1062 pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
1063 iommu->seq_id);
1064 drhd->ignored = 1;
1065 }
1066 }
1067 if (!drhd->ignored) {
1068 msagaw = iommu_calculate_max_sagaw(iommu);
1069 if (msagaw < 0) {
1070 pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
1071 iommu->seq_id);
1072 drhd->ignored = 1;
1073 agaw = -1;
1074 }
Weidong Han1b573682008-12-08 15:34:06 +08001075 }
1076 iommu->agaw = agaw;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001077 iommu->msagaw = msagaw;
David Woodhouse67ccac42014-03-09 13:49:45 -07001078 iommu->segment = drhd->segment;
Weidong Han1b573682008-12-08 15:34:06 +08001079
Suresh Siddhaee34b322009-10-02 11:01:21 -07001080 iommu->node = -1;
1081
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001082 ver = readl(iommu->reg + DMAR_VER_REG);
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001083 pr_info("%s: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
1084 iommu->name,
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001085 (unsigned long long)drhd->reg_base_addr,
1086 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
1087 (unsigned long long)iommu->cap,
1088 (unsigned long long)iommu->ecap);
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001089
Takao Indoh3a93c842013-04-23 17:35:03 +09001090 /* Reflect status in gcmd */
1091 sts = readl(iommu->reg + DMAR_GSTS_REG);
1092 if (sts & DMA_GSTS_IRES)
1093 iommu->gcmd |= DMA_GCMD_IRE;
1094 if (sts & DMA_GSTS_TES)
1095 iommu->gcmd |= DMA_GCMD_TE;
1096 if (sts & DMA_GSTS_QIES)
1097 iommu->gcmd |= DMA_GCMD_QIE;
1098
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001099 raw_spin_lock_init(&iommu->register_lock);
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001100
David Woodhouse9bae48c2021-02-02 01:07:06 +01001101 if (intel_iommu_enabled && !drhd->ignored) {
Alex Williamsona5459cf2014-06-12 16:12:31 -06001102 iommu->iommu_dev = iommu_device_create(NULL, iommu,
1103 intel_iommu_groups,
Kees Cook2439d4a2015-07-24 16:27:57 -07001104 "%s", iommu->name);
Alex Williamsona5459cf2014-06-12 16:12:31 -06001105
Joerg Roedelbc847452016-01-07 12:16:51 +01001106 if (IS_ERR(iommu->iommu_dev)) {
1107 err = PTR_ERR(iommu->iommu_dev);
1108 goto err_unmap;
1109 }
Nicholas Krause59203372016-01-04 18:27:57 -05001110 }
1111
Joerg Roedelbc847452016-01-07 12:16:51 +01001112 drhd->iommu = iommu;
Bartosz Golaszewski4db445d2021-02-02 01:07:07 +01001113 iommu->drhd = drhd;
Joerg Roedelbc847452016-01-07 12:16:51 +01001114
Suresh Siddha1886e8a2008-07-10 11:16:37 -07001115 return 0;
David Woodhouse08155652009-08-04 09:17:20 +01001116
Jiang Liu78d8e702014-11-09 22:47:57 +08001117err_unmap:
Donald Dutile6f5cf522012-06-04 17:29:02 -04001118 unmap_iommu(iommu);
Jiang Liu78d8e702014-11-09 22:47:57 +08001119error_free_seq_id:
1120 dmar_free_seq_id(iommu);
1121error:
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001122 kfree(iommu);
Donald Dutile6f5cf522012-06-04 17:29:02 -04001123 return err;
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001124}
1125
Jiang Liua868e6b2014-01-06 14:18:20 +08001126static void free_iommu(struct intel_iommu *iommu)
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001127{
Bartosz Golaszewski4db445d2021-02-02 01:07:07 +01001128 if (intel_iommu_enabled && !iommu->drhd->ignored)
David Woodhouse9bae48c2021-02-02 01:07:06 +01001129 iommu_device_destroy(iommu->iommu_dev);
Alex Williamsona5459cf2014-06-12 16:12:31 -06001130
Jiang Liua868e6b2014-01-06 14:18:20 +08001131 if (iommu->irq) {
David Woodhouse12082252015-10-07 15:37:03 +01001132 if (iommu->pr_irq) {
1133 free_irq(iommu->pr_irq, iommu);
1134 dmar_free_hwirq(iommu->pr_irq);
1135 iommu->pr_irq = 0;
1136 }
Jiang Liua868e6b2014-01-06 14:18:20 +08001137 free_irq(iommu->irq, iommu);
Thomas Gleixnera553b142014-05-07 15:44:11 +00001138 dmar_free_hwirq(iommu->irq);
Jiang Liu34742db2015-04-13 14:11:41 +08001139 iommu->irq = 0;
Jiang Liua868e6b2014-01-06 14:18:20 +08001140 }
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001141
Jiang Liua84da702014-01-06 14:18:23 +08001142 if (iommu->qi) {
1143 free_page((unsigned long)iommu->qi->desc);
1144 kfree(iommu->qi->desc_status);
1145 kfree(iommu->qi);
1146 }
1147
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001148 if (iommu->reg)
Donald Dutile6f5cf522012-06-04 17:29:02 -04001149 unmap_iommu(iommu);
1150
Jiang Liu78d8e702014-11-09 22:47:57 +08001151 dmar_free_seq_id(iommu);
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001152 kfree(iommu);
1153}
Suresh Siddhafe962e92008-07-10 11:16:42 -07001154
1155/*
1156 * Reclaim all the submitted descriptors which have completed its work.
1157 */
1158static inline void reclaim_free_desc(struct q_inval *qi)
1159{
Yu Zhao6ba6c3a2009-05-18 13:51:35 +08001160 while (qi->desc_status[qi->free_tail] == QI_DONE ||
1161 qi->desc_status[qi->free_tail] == QI_ABORT) {
Suresh Siddhafe962e92008-07-10 11:16:42 -07001162 qi->desc_status[qi->free_tail] = QI_FREE;
1163 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
1164 qi->free_cnt++;
1165 }
1166}
1167
Yu Zhao704126a2009-01-04 16:28:52 +08001168static int qi_check_fault(struct intel_iommu *iommu, int index)
1169{
1170 u32 fault;
Yu Zhao6ba6c3a2009-05-18 13:51:35 +08001171 int head, tail;
Yu Zhao704126a2009-01-04 16:28:52 +08001172 struct q_inval *qi = iommu->qi;
1173 int wait_index = (index + 1) % QI_LENGTH;
1174
Yu Zhao6ba6c3a2009-05-18 13:51:35 +08001175 if (qi->desc_status[wait_index] == QI_ABORT)
1176 return -EAGAIN;
1177
Yu Zhao704126a2009-01-04 16:28:52 +08001178 fault = readl(iommu->reg + DMAR_FSTS_REG);
1179
1180 /*
1181 * If IQE happens, the head points to the descriptor associated
1182 * with the error. No new descriptors are fetched until the IQE
1183 * is cleared.
1184 */
1185 if (fault & DMA_FSTS_IQE) {
1186 head = readl(iommu->reg + DMAR_IQH_REG);
Yu Zhao6ba6c3a2009-05-18 13:51:35 +08001187 if ((head >> DMAR_IQ_SHIFT) == index) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001188 pr_err("VT-d detected invalid descriptor: "
Yu Zhao6ba6c3a2009-05-18 13:51:35 +08001189 "low=%llx, high=%llx\n",
1190 (unsigned long long)qi->desc[index].low,
1191 (unsigned long long)qi->desc[index].high);
Yu Zhao704126a2009-01-04 16:28:52 +08001192 memcpy(&qi->desc[index], &qi->desc[wait_index],
1193 sizeof(struct qi_desc));
Yu Zhao704126a2009-01-04 16:28:52 +08001194 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
1195 return -EINVAL;
1196 }
1197 }
1198
Yu Zhao6ba6c3a2009-05-18 13:51:35 +08001199 /*
1200 * If ITE happens, all pending wait_desc commands are aborted.
1201 * No new descriptors are fetched until the ITE is cleared.
1202 */
1203 if (fault & DMA_FSTS_ITE) {
1204 head = readl(iommu->reg + DMAR_IQH_REG);
1205 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
1206 head |= 1;
1207 tail = readl(iommu->reg + DMAR_IQT_REG);
1208 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
1209
1210 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
1211
1212 do {
1213 if (qi->desc_status[head] == QI_IN_USE)
1214 qi->desc_status[head] = QI_ABORT;
1215 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
1216 } while (head != tail);
1217
1218 if (qi->desc_status[wait_index] == QI_ABORT)
1219 return -EAGAIN;
1220 }
1221
1222 if (fault & DMA_FSTS_ICE)
1223 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
1224
Yu Zhao704126a2009-01-04 16:28:52 +08001225 return 0;
1226}
1227
Suresh Siddhafe962e92008-07-10 11:16:42 -07001228/*
1229 * Submit the queued invalidation descriptor to the remapping
1230 * hardware unit and wait for its completion.
1231 */
Yu Zhao704126a2009-01-04 16:28:52 +08001232int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
Suresh Siddhafe962e92008-07-10 11:16:42 -07001233{
Yu Zhao6ba6c3a2009-05-18 13:51:35 +08001234 int rc;
Suresh Siddhafe962e92008-07-10 11:16:42 -07001235 struct q_inval *qi = iommu->qi;
1236 struct qi_desc *hw, wait_desc;
1237 int wait_index, index;
1238 unsigned long flags;
1239
1240 if (!qi)
Yu Zhao704126a2009-01-04 16:28:52 +08001241 return 0;
Suresh Siddhafe962e92008-07-10 11:16:42 -07001242
1243 hw = qi->desc;
1244
Yu Zhao6ba6c3a2009-05-18 13:51:35 +08001245restart:
1246 rc = 0;
1247
Thomas Gleixner3b8f4042011-07-19 17:02:07 +02001248 raw_spin_lock_irqsave(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001249 while (qi->free_cnt < 3) {
Thomas Gleixner3b8f4042011-07-19 17:02:07 +02001250 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001251 cpu_relax();
Thomas Gleixner3b8f4042011-07-19 17:02:07 +02001252 raw_spin_lock_irqsave(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001253 }
1254
1255 index = qi->free_head;
1256 wait_index = (index + 1) % QI_LENGTH;
1257
1258 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
1259
1260 hw[index] = *desc;
1261
Yu Zhao704126a2009-01-04 16:28:52 +08001262 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
1263 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
Suresh Siddhafe962e92008-07-10 11:16:42 -07001264 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
1265
1266 hw[wait_index] = wait_desc;
1267
Suresh Siddhafe962e92008-07-10 11:16:42 -07001268 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
1269 qi->free_cnt -= 2;
1270
Suresh Siddhafe962e92008-07-10 11:16:42 -07001271 /*
1272 * update the HW tail register indicating the presence of
1273 * new descriptors.
1274 */
Yu Zhao6ba6c3a2009-05-18 13:51:35 +08001275 writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001276
1277 while (qi->desc_status[wait_index] != QI_DONE) {
Suresh Siddhaf05810c2008-10-16 16:31:54 -07001278 /*
1279 * We will leave the interrupts disabled, to prevent interrupt
1280 * context to queue another cmd while a cmd is already submitted
1281 * and waiting for completion on this cpu. This is to avoid
1282 * a deadlock where the interrupt context can wait indefinitely
1283 * for free slots in the queue.
1284 */
Yu Zhao704126a2009-01-04 16:28:52 +08001285 rc = qi_check_fault(iommu, index);
1286 if (rc)
Yu Zhao6ba6c3a2009-05-18 13:51:35 +08001287 break;
Yu Zhao704126a2009-01-04 16:28:52 +08001288
Thomas Gleixner3b8f4042011-07-19 17:02:07 +02001289 raw_spin_unlock(&qi->q_lock);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001290 cpu_relax();
Thomas Gleixner3b8f4042011-07-19 17:02:07 +02001291 raw_spin_lock(&qi->q_lock);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001292 }
Yu Zhao6ba6c3a2009-05-18 13:51:35 +08001293
1294 qi->desc_status[index] = QI_DONE;
Suresh Siddhafe962e92008-07-10 11:16:42 -07001295
1296 reclaim_free_desc(qi);
Thomas Gleixner3b8f4042011-07-19 17:02:07 +02001297 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
Yu Zhao704126a2009-01-04 16:28:52 +08001298
Yu Zhao6ba6c3a2009-05-18 13:51:35 +08001299 if (rc == -EAGAIN)
1300 goto restart;
1301
Yu Zhao704126a2009-01-04 16:28:52 +08001302 return rc;
Suresh Siddhafe962e92008-07-10 11:16:42 -07001303}
1304
1305/*
1306 * Flush the global interrupt entry cache.
1307 */
1308void qi_global_iec(struct intel_iommu *iommu)
1309{
1310 struct qi_desc desc;
1311
1312 desc.low = QI_IEC_TYPE;
1313 desc.high = 0;
1314
Yu Zhao704126a2009-01-04 16:28:52 +08001315 /* should never fail */
Suresh Siddhafe962e92008-07-10 11:16:42 -07001316 qi_submit_sync(&desc, iommu);
1317}
1318
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001319void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
1320 u64 type)
Youquan Song3481f212008-10-16 16:31:55 -07001321{
Youquan Song3481f212008-10-16 16:31:55 -07001322 struct qi_desc desc;
1323
Youquan Song3481f212008-10-16 16:31:55 -07001324 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
1325 | QI_CC_GRAN(type) | QI_CC_TYPE;
1326 desc.high = 0;
1327
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001328 qi_submit_sync(&desc, iommu);
Youquan Song3481f212008-10-16 16:31:55 -07001329}
1330
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001331void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
1332 unsigned int size_order, u64 type)
Youquan Song3481f212008-10-16 16:31:55 -07001333{
1334 u8 dw = 0, dr = 0;
1335
1336 struct qi_desc desc;
1337 int ih = 0;
1338
Youquan Song3481f212008-10-16 16:31:55 -07001339 if (cap_write_drain(iommu->cap))
1340 dw = 1;
1341
1342 if (cap_read_drain(iommu->cap))
1343 dr = 1;
1344
1345 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
1346 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
1347 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
1348 | QI_IOTLB_AM(size_order);
1349
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001350 qi_submit_sync(&desc, iommu);
Youquan Song3481f212008-10-16 16:31:55 -07001351}
1352
Jacob Panb68377c2018-06-07 09:57:00 -07001353void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
1354 u16 qdep, u64 addr, unsigned mask)
Yu Zhao6ba6c3a2009-05-18 13:51:35 +08001355{
1356 struct qi_desc desc;
1357
1358 if (mask) {
1359 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
1360 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
1361 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
1362 } else
1363 desc.high = QI_DEV_IOTLB_ADDR(addr);
1364
1365 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
1366 qdep = 0;
1367
1368 desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
Jacob Panb68377c2018-06-07 09:57:00 -07001369 QI_DIOTLB_TYPE | QI_DEV_IOTLB_PFSID(pfsid);
Yu Zhao6ba6c3a2009-05-18 13:51:35 +08001370
1371 qi_submit_sync(&desc, iommu);
1372}
1373
Suresh Siddhafe962e92008-07-10 11:16:42 -07001374/*
Suresh Siddhaeba67e52009-03-16 17:04:56 -07001375 * Disable Queued Invalidation interface.
1376 */
1377void dmar_disable_qi(struct intel_iommu *iommu)
1378{
1379 unsigned long flags;
1380 u32 sts;
1381 cycles_t start_time = get_cycles();
1382
1383 if (!ecap_qis(iommu->ecap))
1384 return;
1385
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001386 raw_spin_lock_irqsave(&iommu->register_lock, flags);
Suresh Siddhaeba67e52009-03-16 17:04:56 -07001387
CQ Tangfda3bec2016-01-13 21:15:03 +00001388 sts = readl(iommu->reg + DMAR_GSTS_REG);
Suresh Siddhaeba67e52009-03-16 17:04:56 -07001389 if (!(sts & DMA_GSTS_QIES))
1390 goto end;
1391
1392 /*
1393 * Give a chance to HW to complete the pending invalidation requests.
1394 */
1395 while ((readl(iommu->reg + DMAR_IQT_REG) !=
1396 readl(iommu->reg + DMAR_IQH_REG)) &&
1397 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
1398 cpu_relax();
1399
1400 iommu->gcmd &= ~DMA_GCMD_QIE;
Suresh Siddhaeba67e52009-03-16 17:04:56 -07001401 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1402
1403 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1404 !(sts & DMA_GSTS_QIES), sts);
1405end:
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001406 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Suresh Siddhaeba67e52009-03-16 17:04:56 -07001407}
1408
1409/*
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001410 * Enable queued invalidation.
1411 */
1412static void __dmar_enable_qi(struct intel_iommu *iommu)
1413{
David Woodhousec416daa2009-05-10 20:30:58 +01001414 u32 sts;
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001415 unsigned long flags;
1416 struct q_inval *qi = iommu->qi;
1417
1418 qi->free_head = qi->free_tail = 0;
1419 qi->free_cnt = QI_LENGTH;
1420
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001421 raw_spin_lock_irqsave(&iommu->register_lock, flags);
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001422
1423 /* write zero to the tail reg */
1424 writel(0, iommu->reg + DMAR_IQT_REG);
1425
1426 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
1427
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001428 iommu->gcmd |= DMA_GCMD_QIE;
David Woodhousec416daa2009-05-10 20:30:58 +01001429 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001430
1431 /* Make sure hardware complete it */
1432 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1433
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001434 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001435}
1436
1437/*
Suresh Siddhafe962e92008-07-10 11:16:42 -07001438 * Enable Queued Invalidation interface. This is a must to support
1439 * interrupt-remapping. Also used by DMA-remapping, which replaces
1440 * register based IOTLB invalidation.
1441 */
1442int dmar_enable_qi(struct intel_iommu *iommu)
1443{
Suresh Siddhafe962e92008-07-10 11:16:42 -07001444 struct q_inval *qi;
Suresh Siddha751cafe2009-10-02 11:01:22 -07001445 struct page *desc_page;
Suresh Siddhafe962e92008-07-10 11:16:42 -07001446
1447 if (!ecap_qis(iommu->ecap))
1448 return -ENOENT;
1449
1450 /*
1451 * queued invalidation is already setup and enabled.
1452 */
1453 if (iommu->qi)
1454 return 0;
1455
Suresh Siddhafa4b57c2009-03-16 17:05:05 -07001456 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001457 if (!iommu->qi)
1458 return -ENOMEM;
1459
1460 qi = iommu->qi;
1461
Suresh Siddha751cafe2009-10-02 11:01:22 -07001462
1463 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
1464 if (!desc_page) {
Suresh Siddhafe962e92008-07-10 11:16:42 -07001465 kfree(qi);
Jiang Liub707cb02014-01-06 14:18:26 +08001466 iommu->qi = NULL;
Suresh Siddhafe962e92008-07-10 11:16:42 -07001467 return -ENOMEM;
1468 }
1469
Suresh Siddha751cafe2009-10-02 11:01:22 -07001470 qi->desc = page_address(desc_page);
1471
Hannes Reinecke37a40712013-02-06 09:50:10 +01001472 qi->desc_status = kzalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001473 if (!qi->desc_status) {
1474 free_page((unsigned long) qi->desc);
1475 kfree(qi);
Jiang Liub707cb02014-01-06 14:18:26 +08001476 iommu->qi = NULL;
Suresh Siddhafe962e92008-07-10 11:16:42 -07001477 return -ENOMEM;
1478 }
1479
Thomas Gleixner3b8f4042011-07-19 17:02:07 +02001480 raw_spin_lock_init(&qi->q_lock);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001481
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001482 __dmar_enable_qi(iommu);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001483
1484 return 0;
1485}
Suresh Siddha0ac24912009-03-16 17:04:54 -07001486
1487/* iommu interrupt handling. Most stuff are MSI-like. */
1488
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001489enum faulttype {
1490 DMA_REMAP,
1491 INTR_REMAP,
1492 UNKNOWN,
1493};
1494
1495static const char *dma_remap_fault_reasons[] =
Suresh Siddha0ac24912009-03-16 17:04:54 -07001496{
1497 "Software",
1498 "Present bit in root entry is clear",
1499 "Present bit in context entry is clear",
1500 "Invalid context entry",
1501 "Access beyond MGAW",
1502 "PTE Write access is not set",
1503 "PTE Read access is not set",
1504 "Next page table ptr is invalid",
1505 "Root table address invalid",
1506 "Context table ptr is invalid",
1507 "non-zero reserved fields in RTP",
1508 "non-zero reserved fields in CTP",
1509 "non-zero reserved fields in PTE",
Li, Zhen-Hua4ecccd92013-03-06 10:43:17 +08001510 "PCE for translation request specifies blocking",
Suresh Siddha0ac24912009-03-16 17:04:54 -07001511};
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001512
Suresh Siddha95a02e92012-03-30 11:47:07 -07001513static const char *irq_remap_fault_reasons[] =
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001514{
1515 "Detected reserved fields in the decoded interrupt-remapped request",
1516 "Interrupt index exceeded the interrupt-remapping table size",
1517 "Present field in the IRTE entry is clear",
1518 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1519 "Detected reserved fields in the IRTE entry",
1520 "Blocked a compatibility format interrupt request",
1521 "Blocked an interrupt request due to source-id verification failure",
1522};
1523
Rashika Kheria21004dc2013-12-18 12:01:46 +05301524static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001525{
Dan Carpenterfefe1ed2012-05-13 20:09:38 +03001526 if (fault_reason >= 0x20 && (fault_reason - 0x20 <
1527 ARRAY_SIZE(irq_remap_fault_reasons))) {
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001528 *fault_type = INTR_REMAP;
Suresh Siddha95a02e92012-03-30 11:47:07 -07001529 return irq_remap_fault_reasons[fault_reason - 0x20];
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001530 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1531 *fault_type = DMA_REMAP;
1532 return dma_remap_fault_reasons[fault_reason];
1533 } else {
1534 *fault_type = UNKNOWN;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001535 return "Unknown";
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001536 }
Suresh Siddha0ac24912009-03-16 17:04:54 -07001537}
1538
David Woodhouse12082252015-10-07 15:37:03 +01001539
1540static inline int dmar_msi_reg(struct intel_iommu *iommu, int irq)
1541{
1542 if (iommu->irq == irq)
1543 return DMAR_FECTL_REG;
1544 else if (iommu->pr_irq == irq)
1545 return DMAR_PECTL_REG;
1546 else
1547 BUG();
1548}
1549
Thomas Gleixner5c2837f2010-09-28 17:15:11 +02001550void dmar_msi_unmask(struct irq_data *data)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001551{
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001552 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
David Woodhouse12082252015-10-07 15:37:03 +01001553 int reg = dmar_msi_reg(iommu, data->irq);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001554 unsigned long flag;
1555
1556 /* unmask it */
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001557 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse12082252015-10-07 15:37:03 +01001558 writel(0, iommu->reg + reg);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001559 /* Read a reg to force flush the post write */
David Woodhouse12082252015-10-07 15:37:03 +01001560 readl(iommu->reg + reg);
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001561 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001562}
1563
Thomas Gleixner5c2837f2010-09-28 17:15:11 +02001564void dmar_msi_mask(struct irq_data *data)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001565{
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001566 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
David Woodhouse12082252015-10-07 15:37:03 +01001567 int reg = dmar_msi_reg(iommu, data->irq);
1568 unsigned long flag;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001569
1570 /* mask it */
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001571 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse12082252015-10-07 15:37:03 +01001572 writel(DMA_FECTL_IM, iommu->reg + reg);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001573 /* Read a reg to force flush the post write */
David Woodhouse12082252015-10-07 15:37:03 +01001574 readl(iommu->reg + reg);
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001575 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001576}
1577
1578void dmar_msi_write(int irq, struct msi_msg *msg)
1579{
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001580 struct intel_iommu *iommu = irq_get_handler_data(irq);
David Woodhouse12082252015-10-07 15:37:03 +01001581 int reg = dmar_msi_reg(iommu, irq);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001582 unsigned long flag;
1583
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001584 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse12082252015-10-07 15:37:03 +01001585 writel(msg->data, iommu->reg + reg + 4);
1586 writel(msg->address_lo, iommu->reg + reg + 8);
1587 writel(msg->address_hi, iommu->reg + reg + 12);
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001588 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001589}
1590
1591void dmar_msi_read(int irq, struct msi_msg *msg)
1592{
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001593 struct intel_iommu *iommu = irq_get_handler_data(irq);
David Woodhouse12082252015-10-07 15:37:03 +01001594 int reg = dmar_msi_reg(iommu, irq);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001595 unsigned long flag;
1596
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001597 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse12082252015-10-07 15:37:03 +01001598 msg->data = readl(iommu->reg + reg + 4);
1599 msg->address_lo = readl(iommu->reg + reg + 8);
1600 msg->address_hi = readl(iommu->reg + reg + 12);
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001601 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001602}
1603
1604static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1605 u8 fault_reason, u16 source_id, unsigned long long addr)
1606{
1607 const char *reason;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001608 int fault_type;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001609
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001610 reason = dmar_get_fault_reason(fault_reason, &fault_type);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001611
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001612 if (fault_type == INTR_REMAP)
Alex Williamsona0fe14d2016-03-17 14:12:31 -06001613 pr_err("[INTR-REMAP] Request device [%02x:%02x.%d] fault index %llx [fault reason %02d] %s\n",
1614 source_id >> 8, PCI_SLOT(source_id & 0xFF),
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001615 PCI_FUNC(source_id & 0xFF), addr >> 48,
1616 fault_reason, reason);
1617 else
Alex Williamsona0fe14d2016-03-17 14:12:31 -06001618 pr_err("[%s] Request device [%02x:%02x.%d] fault addr %llx [fault reason %02d] %s\n",
1619 type ? "DMA Read" : "DMA Write",
1620 source_id >> 8, PCI_SLOT(source_id & 0xFF),
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001621 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001622 return 0;
1623}
1624
1625#define PRIMARY_FAULT_REG_LEN (16)
Suresh Siddha1531a6a2009-03-16 17:04:57 -07001626irqreturn_t dmar_fault(int irq, void *dev_id)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001627{
1628 struct intel_iommu *iommu = dev_id;
1629 int reg, fault_index;
1630 u32 fault_status;
1631 unsigned long flag;
Alex Williamsonc43fce42016-03-17 14:12:25 -06001632 bool ratelimited;
1633 static DEFINE_RATELIMIT_STATE(rs,
1634 DEFAULT_RATELIMIT_INTERVAL,
1635 DEFAULT_RATELIMIT_BURST);
1636
1637 /* Disable printing, simply clear the fault when ratelimited */
1638 ratelimited = !__ratelimit(&rs);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001639
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001640 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001641 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
Alex Williamsonc43fce42016-03-17 14:12:25 -06001642 if (fault_status && !ratelimited)
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001643 pr_err("DRHD: handling fault status reg %x\n", fault_status);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001644
1645 /* TBD: ignore advanced fault log currently */
1646 if (!(fault_status & DMA_FSTS_PPF))
Li, Zhen-Huabd5cdad2013-03-25 16:20:52 +08001647 goto unlock_exit;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001648
1649 fault_index = dma_fsts_fault_record_index(fault_status);
1650 reg = cap_fault_reg_offset(iommu->cap);
1651 while (1) {
1652 u8 fault_reason;
1653 u16 source_id;
1654 u64 guest_addr;
1655 int type;
1656 u32 data;
1657
1658 /* highest 32 bits */
1659 data = readl(iommu->reg + reg +
1660 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1661 if (!(data & DMA_FRCD_F))
1662 break;
1663
Alex Williamsonc43fce42016-03-17 14:12:25 -06001664 if (!ratelimited) {
1665 fault_reason = dma_frcd_fault_reason(data);
1666 type = dma_frcd_type(data);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001667
Alex Williamsonc43fce42016-03-17 14:12:25 -06001668 data = readl(iommu->reg + reg +
1669 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1670 source_id = dma_frcd_source_id(data);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001671
Alex Williamsonc43fce42016-03-17 14:12:25 -06001672 guest_addr = dmar_readq(iommu->reg + reg +
1673 fault_index * PRIMARY_FAULT_REG_LEN);
1674 guest_addr = dma_frcd_page_addr(guest_addr);
1675 }
1676
Suresh Siddha0ac24912009-03-16 17:04:54 -07001677 /* clear the fault */
1678 writel(DMA_FRCD_F, iommu->reg + reg +
1679 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1680
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001681 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001682
Alex Williamsonc43fce42016-03-17 14:12:25 -06001683 if (!ratelimited)
1684 dmar_fault_do_one(iommu, type, fault_reason,
1685 source_id, guest_addr);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001686
1687 fault_index++;
Troy Heber8211a7b2009-08-19 15:26:11 -06001688 if (fault_index >= cap_num_fault_regs(iommu->cap))
Suresh Siddha0ac24912009-03-16 17:04:54 -07001689 fault_index = 0;
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001690 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001691 }
Suresh Siddha0ac24912009-03-16 17:04:54 -07001692
Li, Zhen-Huabd5cdad2013-03-25 16:20:52 +08001693 writel(DMA_FSTS_PFO | DMA_FSTS_PPF, iommu->reg + DMAR_FSTS_REG);
1694
1695unlock_exit:
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001696 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001697 return IRQ_HANDLED;
1698}
1699
1700int dmar_set_interrupt(struct intel_iommu *iommu)
1701{
1702 int irq, ret;
1703
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001704 /*
1705 * Check if the fault interrupt is already initialized.
1706 */
1707 if (iommu->irq)
1708 return 0;
1709
Jiang Liu34742db2015-04-13 14:11:41 +08001710 irq = dmar_alloc_hwirq(iommu->seq_id, iommu->node, iommu);
1711 if (irq > 0) {
1712 iommu->irq = irq;
1713 } else {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001714 pr_err("No free IRQ vectors\n");
Suresh Siddha0ac24912009-03-16 17:04:54 -07001715 return -EINVAL;
1716 }
1717
Thomas Gleixner477694e2011-07-19 16:25:42 +02001718 ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001719 if (ret)
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001720 pr_err("Can't request irq\n");
Suresh Siddha0ac24912009-03-16 17:04:54 -07001721 return ret;
1722}
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001723
1724int __init enable_drhd_fault_handling(void)
1725{
1726 struct dmar_drhd_unit *drhd;
Jiang Liu7c919772014-01-06 14:18:18 +08001727 struct intel_iommu *iommu;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001728
1729 /*
1730 * Enable fault control interrupt.
1731 */
Jiang Liu7c919772014-01-06 14:18:18 +08001732 for_each_iommu(iommu, drhd) {
Li, Zhen-Huabd5cdad2013-03-25 16:20:52 +08001733 u32 fault_status;
Jiang Liu7c919772014-01-06 14:18:18 +08001734 int ret = dmar_set_interrupt(iommu);
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001735
1736 if (ret) {
Donald Dutilee9071b02012-06-08 17:13:11 -04001737 pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001738 (unsigned long long)drhd->reg_base_addr, ret);
1739 return -1;
1740 }
Suresh Siddha7f99d942010-11-30 22:22:29 -08001741
1742 /*
1743 * Clear any previous faults.
1744 */
1745 dmar_fault(iommu->irq, iommu);
Li, Zhen-Huabd5cdad2013-03-25 16:20:52 +08001746 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1747 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001748 }
1749
1750 return 0;
1751}
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001752
1753/*
1754 * Re-enable Queued Invalidation interface.
1755 */
1756int dmar_reenable_qi(struct intel_iommu *iommu)
1757{
1758 if (!ecap_qis(iommu->ecap))
1759 return -ENOENT;
1760
1761 if (!iommu->qi)
1762 return -ENOENT;
1763
1764 /*
1765 * First disable queued invalidation.
1766 */
1767 dmar_disable_qi(iommu);
1768 /*
1769 * Then enable queued invalidation again. Since there is no pending
1770 * invalidation requests now, it's safe to re-enable queued
1771 * invalidation.
1772 */
1773 __dmar_enable_qi(iommu);
1774
1775 return 0;
1776}
Youquan Song074835f2009-09-09 12:05:39 -04001777
1778/*
1779 * Check interrupt remapping support in DMAR table description.
1780 */
Luck, Tony0b8973a2009-12-16 22:59:29 +00001781int __init dmar_ir_support(void)
Youquan Song074835f2009-09-09 12:05:39 -04001782{
1783 struct acpi_table_dmar *dmar;
1784 dmar = (struct acpi_table_dmar *)dmar_tbl;
Arnaud Patard4f506e02010-03-25 18:02:58 +00001785 if (!dmar)
1786 return 0;
Youquan Song074835f2009-09-09 12:05:39 -04001787 return dmar->flags & 0x1;
1788}
Jiang Liu694835d2014-01-06 14:18:16 +08001789
Jiang Liu6b197242014-11-09 22:47:58 +08001790/* Check whether DMAR units are in use */
1791static inline bool dmar_in_use(void)
1792{
1793 return irq_remapping_enabled || intel_iommu_enabled;
1794}
1795
Jiang Liua868e6b2014-01-06 14:18:20 +08001796static int __init dmar_free_unused_resources(void)
1797{
1798 struct dmar_drhd_unit *dmaru, *dmaru_n;
1799
Jiang Liu6b197242014-11-09 22:47:58 +08001800 if (dmar_in_use())
Jiang Liua868e6b2014-01-06 14:18:20 +08001801 return 0;
1802
Jiang Liu2e455282014-02-19 14:07:36 +08001803 if (dmar_dev_scope_status != 1 && !list_empty(&dmar_drhd_units))
1804 bus_unregister_notifier(&pci_bus_type, &dmar_pci_bus_nb);
Jiang Liu59ce0512014-02-19 14:07:35 +08001805
Jiang Liu3a5670e2014-02-19 14:07:33 +08001806 down_write(&dmar_global_lock);
Jiang Liua868e6b2014-01-06 14:18:20 +08001807 list_for_each_entry_safe(dmaru, dmaru_n, &dmar_drhd_units, list) {
1808 list_del(&dmaru->list);
1809 dmar_free_drhd(dmaru);
1810 }
Jiang Liu3a5670e2014-02-19 14:07:33 +08001811 up_write(&dmar_global_lock);
Jiang Liua868e6b2014-01-06 14:18:20 +08001812
1813 return 0;
1814}
1815
1816late_initcall(dmar_free_unused_resources);
Konrad Rzeszutek Wilk4db77ff2010-08-26 13:58:04 -04001817IOMMU_INIT_POST(detect_intel_iommu);
Jiang Liu6b197242014-11-09 22:47:58 +08001818
1819/*
1820 * DMAR Hotplug Support
1821 * For more details, please refer to Intel(R) Virtualization Technology
1822 * for Directed-IO Architecture Specifiction, Rev 2.2, Section 8.8
1823 * "Remapping Hardware Unit Hot Plug".
1824 */
1825static u8 dmar_hp_uuid[] = {
1826 /* 0000 */ 0xA6, 0xA3, 0xC1, 0xD8, 0x9B, 0xBE, 0x9B, 0x4C,
1827 /* 0008 */ 0x91, 0xBF, 0xC3, 0xCB, 0x81, 0xFC, 0x5D, 0xAF
1828};
1829
1830/*
1831 * Currently there's only one revision and BIOS will not check the revision id,
1832 * so use 0 for safety.
1833 */
1834#define DMAR_DSM_REV_ID 0
1835#define DMAR_DSM_FUNC_DRHD 1
1836#define DMAR_DSM_FUNC_ATSR 2
1837#define DMAR_DSM_FUNC_RHSA 3
1838
1839static inline bool dmar_detect_dsm(acpi_handle handle, int func)
1840{
1841 return acpi_check_dsm(handle, dmar_hp_uuid, DMAR_DSM_REV_ID, 1 << func);
1842}
1843
1844static int dmar_walk_dsm_resource(acpi_handle handle, int func,
1845 dmar_res_handler_t handler, void *arg)
1846{
1847 int ret = -ENODEV;
1848 union acpi_object *obj;
1849 struct acpi_dmar_header *start;
1850 struct dmar_res_callback callback;
1851 static int res_type[] = {
1852 [DMAR_DSM_FUNC_DRHD] = ACPI_DMAR_TYPE_HARDWARE_UNIT,
1853 [DMAR_DSM_FUNC_ATSR] = ACPI_DMAR_TYPE_ROOT_ATS,
1854 [DMAR_DSM_FUNC_RHSA] = ACPI_DMAR_TYPE_HARDWARE_AFFINITY,
1855 };
1856
1857 if (!dmar_detect_dsm(handle, func))
1858 return 0;
1859
1860 obj = acpi_evaluate_dsm_typed(handle, dmar_hp_uuid, DMAR_DSM_REV_ID,
1861 func, NULL, ACPI_TYPE_BUFFER);
1862 if (!obj)
1863 return -ENODEV;
1864
1865 memset(&callback, 0, sizeof(callback));
1866 callback.cb[res_type[func]] = handler;
1867 callback.arg[res_type[func]] = arg;
1868 start = (struct acpi_dmar_header *)obj->buffer.pointer;
1869 ret = dmar_walk_remapping_entries(start, obj->buffer.length, &callback);
1870
1871 ACPI_FREE(obj);
1872
1873 return ret;
1874}
1875
1876static int dmar_hp_add_drhd(struct acpi_dmar_header *header, void *arg)
1877{
1878 int ret;
1879 struct dmar_drhd_unit *dmaru;
1880
1881 dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
1882 if (!dmaru)
1883 return -ENODEV;
1884
1885 ret = dmar_ir_hotplug(dmaru, true);
1886 if (ret == 0)
1887 ret = dmar_iommu_hotplug(dmaru, true);
1888
1889 return ret;
1890}
1891
1892static int dmar_hp_remove_drhd(struct acpi_dmar_header *header, void *arg)
1893{
1894 int i, ret;
1895 struct device *dev;
1896 struct dmar_drhd_unit *dmaru;
1897
1898 dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
1899 if (!dmaru)
1900 return 0;
1901
1902 /*
1903 * All PCI devices managed by this unit should have been destroyed.
1904 */
Linus Torvalds194dc872016-07-27 20:03:31 -07001905 if (!dmaru->include_all && dmaru->devices && dmaru->devices_cnt) {
Jiang Liu6b197242014-11-09 22:47:58 +08001906 for_each_active_dev_scope(dmaru->devices,
1907 dmaru->devices_cnt, i, dev)
1908 return -EBUSY;
Linus Torvalds194dc872016-07-27 20:03:31 -07001909 }
Jiang Liu6b197242014-11-09 22:47:58 +08001910
1911 ret = dmar_ir_hotplug(dmaru, false);
1912 if (ret == 0)
1913 ret = dmar_iommu_hotplug(dmaru, false);
1914
1915 return ret;
1916}
1917
1918static int dmar_hp_release_drhd(struct acpi_dmar_header *header, void *arg)
1919{
1920 struct dmar_drhd_unit *dmaru;
1921
1922 dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
1923 if (dmaru) {
1924 list_del_rcu(&dmaru->list);
1925 synchronize_rcu();
1926 dmar_free_drhd(dmaru);
1927 }
1928
1929 return 0;
1930}
1931
1932static int dmar_hotplug_insert(acpi_handle handle)
1933{
1934 int ret;
1935 int drhd_count = 0;
1936
1937 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1938 &dmar_validate_one_drhd, (void *)1);
1939 if (ret)
1940 goto out;
1941
1942 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1943 &dmar_parse_one_drhd, (void *)&drhd_count);
1944 if (ret == 0 && drhd_count == 0) {
1945 pr_warn(FW_BUG "No DRHD structures in buffer returned by _DSM method\n");
1946 goto out;
1947 } else if (ret) {
1948 goto release_drhd;
1949 }
1950
1951 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_RHSA,
1952 &dmar_parse_one_rhsa, NULL);
1953 if (ret)
1954 goto release_drhd;
1955
1956 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
1957 &dmar_parse_one_atsr, NULL);
1958 if (ret)
1959 goto release_atsr;
1960
1961 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1962 &dmar_hp_add_drhd, NULL);
1963 if (!ret)
1964 return 0;
1965
1966 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1967 &dmar_hp_remove_drhd, NULL);
1968release_atsr:
1969 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
1970 &dmar_release_one_atsr, NULL);
1971release_drhd:
1972 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1973 &dmar_hp_release_drhd, NULL);
1974out:
1975 return ret;
1976}
1977
1978static int dmar_hotplug_remove(acpi_handle handle)
1979{
1980 int ret;
1981
1982 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
1983 &dmar_check_one_atsr, NULL);
1984 if (ret)
1985 return ret;
1986
1987 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1988 &dmar_hp_remove_drhd, NULL);
1989 if (ret == 0) {
1990 WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
1991 &dmar_release_one_atsr, NULL));
1992 WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1993 &dmar_hp_release_drhd, NULL));
1994 } else {
1995 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1996 &dmar_hp_add_drhd, NULL);
1997 }
1998
1999 return ret;
2000}
2001
Jiang Liud35165a2014-11-09 22:47:59 +08002002static acpi_status dmar_get_dsm_handle(acpi_handle handle, u32 lvl,
2003 void *context, void **retval)
2004{
2005 acpi_handle *phdl = retval;
2006
2007 if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
2008 *phdl = handle;
2009 return AE_CTRL_TERMINATE;
2010 }
2011
2012 return AE_OK;
2013}
2014
Jiang Liu6b197242014-11-09 22:47:58 +08002015static int dmar_device_hotplug(acpi_handle handle, bool insert)
2016{
2017 int ret;
Jiang Liud35165a2014-11-09 22:47:59 +08002018 acpi_handle tmp = NULL;
2019 acpi_status status;
Jiang Liu6b197242014-11-09 22:47:58 +08002020
2021 if (!dmar_in_use())
2022 return 0;
2023
Jiang Liud35165a2014-11-09 22:47:59 +08002024 if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
2025 tmp = handle;
2026 } else {
2027 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle,
2028 ACPI_UINT32_MAX,
2029 dmar_get_dsm_handle,
2030 NULL, NULL, &tmp);
2031 if (ACPI_FAILURE(status)) {
2032 pr_warn("Failed to locate _DSM method.\n");
2033 return -ENXIO;
2034 }
2035 }
2036 if (tmp == NULL)
Jiang Liu6b197242014-11-09 22:47:58 +08002037 return 0;
2038
2039 down_write(&dmar_global_lock);
2040 if (insert)
Jiang Liud35165a2014-11-09 22:47:59 +08002041 ret = dmar_hotplug_insert(tmp);
Jiang Liu6b197242014-11-09 22:47:58 +08002042 else
Jiang Liud35165a2014-11-09 22:47:59 +08002043 ret = dmar_hotplug_remove(tmp);
Jiang Liu6b197242014-11-09 22:47:58 +08002044 up_write(&dmar_global_lock);
2045
2046 return ret;
2047}
2048
2049int dmar_device_add(acpi_handle handle)
2050{
2051 return dmar_device_hotplug(handle, true);
2052}
2053
2054int dmar_device_remove(acpi_handle handle)
2055{
2056 return dmar_device_hotplug(handle, false);
2057}