blob: c7b1461e8d0abe00560163aff5276a6c386e67e4 [file] [log] [blame]
Thomas Gleixner3b20eb22019-05-29 16:57:35 -07001// SPDX-License-Identifier: GPL-2.0-only
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -07002/*
3 * Copyright (c) 2006, Intel Corporation.
4 *
mark gross98bcef52008-02-23 15:23:35 -08005 * Copyright (C) 2006-2008 Intel Corporation
6 * Author: Ashok Raj <ashok.raj@intel.com>
7 * Author: Shaohua Li <shaohua.li@intel.com>
8 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -07009 *
Suresh Siddhae61d98d2008-07-10 11:16:35 -070010 * This file implements early detection/parsing of Remapping Devices
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070011 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
12 * tables.
Suresh Siddhae61d98d2008-07-10 11:16:35 -070013 *
14 * These routines are used by both DMA-remapping and Interrupt-remapping
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070015 */
16
Joerg Roedel9f10e5b2015-06-12 09:57:06 +020017#define pr_fmt(fmt) "DMAR: " fmt
Donald Dutilee9071b02012-06-08 17:13:11 -040018
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070019#include <linux/pci.h>
20#include <linux/dmar.h>
Kay, Allen M38717942008-09-09 18:37:29 +030021#include <linux/iova.h>
22#include <linux/intel-iommu.h>
Suresh Siddhafe962e92008-07-10 11:16:42 -070023#include <linux/timer.h>
Suresh Siddha0ac24912009-03-16 17:04:54 -070024#include <linux/irq.h>
25#include <linux/interrupt.h>
Shane Wang69575d32009-09-01 18:25:07 -070026#include <linux/tboot.h>
Len Browneb27cae2009-07-06 23:40:19 -040027#include <linux/dmi.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090028#include <linux/slab.h>
Alex Williamsona5459cf2014-06-12 16:12:31 -060029#include <linux/iommu.h>
Anshuman Khandual98fa15f2019-03-05 15:42:58 -080030#include <linux/numa.h>
Suresh Siddha8a8f4222012-03-30 11:47:08 -070031#include <asm/irq_remapping.h>
Konrad Rzeszutek Wilk4db77ff2010-08-26 13:58:04 -040032#include <asm/iommu_table.h>
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070033
Joerg Roedel078e1ee2012-09-26 12:44:43 +020034#include "irq_remapping.h"
35
Jiang Liuc2a0b532014-11-09 22:47:56 +080036typedef int (*dmar_res_handler_t)(struct acpi_dmar_header *, void *);
37struct dmar_res_callback {
38 dmar_res_handler_t cb[ACPI_DMAR_TYPE_RESERVED];
39 void *arg[ACPI_DMAR_TYPE_RESERVED];
40 bool ignore_unhandled;
41 bool print_entry;
42};
43
Jiang Liu3a5670e2014-02-19 14:07:33 +080044/*
45 * Assumptions:
46 * 1) The hotplug framework guarentees that DMAR unit will be hot-added
47 * before IO devices managed by that unit.
48 * 2) The hotplug framework guarantees that DMAR unit will be hot-removed
49 * after IO devices managed by that unit.
50 * 3) Hotplug events are rare.
51 *
52 * Locking rules for DMA and interrupt remapping related global data structures:
53 * 1) Use dmar_global_lock in process context
54 * 2) Use RCU in interrupt context
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070055 */
Jiang Liu3a5670e2014-02-19 14:07:33 +080056DECLARE_RWSEM(dmar_global_lock);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070057LIST_HEAD(dmar_drhd_units);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070058
Suresh Siddha41750d32011-08-23 17:05:18 -070059struct acpi_table_header * __initdata dmar_tbl;
Jiang Liu2e455282014-02-19 14:07:36 +080060static int dmar_dev_scope_status = 1;
Jiang Liu78d8e702014-11-09 22:47:57 +080061static unsigned long dmar_seq_ids[BITS_TO_LONGS(DMAR_UNITS_SUPPORTED)];
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070062
Jiang Liu694835d2014-01-06 14:18:16 +080063static int alloc_iommu(struct dmar_drhd_unit *drhd);
Jiang Liua868e6b2014-01-06 14:18:20 +080064static void free_iommu(struct intel_iommu *iommu);
Jiang Liu694835d2014-01-06 14:18:16 +080065
Joerg Roedelb0119e82017-02-01 13:23:08 +010066extern const struct iommu_ops intel_iommu_ops;
67
Jiang Liu6b197242014-11-09 22:47:58 +080068static void dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070069{
70 /*
71 * add INCLUDE_ALL at the tail, so scan the list will find it at
72 * the very end.
73 */
74 if (drhd->include_all)
Jiang Liu0e242612014-02-19 14:07:34 +080075 list_add_tail_rcu(&drhd->list, &dmar_drhd_units);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070076 else
Jiang Liu0e242612014-02-19 14:07:34 +080077 list_add_rcu(&drhd->list, &dmar_drhd_units);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070078}
79
Jiang Liubb3a6b72014-02-19 14:07:24 +080080void *dmar_alloc_dev_scope(void *start, void *end, int *cnt)
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070081{
82 struct acpi_dmar_device_scope *scope;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070083
84 *cnt = 0;
85 while (start < end) {
86 scope = start;
Bob Moore83118b02014-07-30 12:21:00 +080087 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_NAMESPACE ||
David Woodhouse07cb52f2014-03-07 14:39:27 +000088 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070089 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
90 (*cnt)++;
Linn Crosettoae3e7f32013-04-23 12:26:45 -060091 else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC &&
92 scope->entry_type != ACPI_DMAR_SCOPE_TYPE_HPET) {
Donald Dutilee9071b02012-06-08 17:13:11 -040093 pr_warn("Unsupported device scope\n");
Yinghai Lu5715f0f2010-04-08 19:58:22 +010094 }
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070095 start += scope->length;
96 }
97 if (*cnt == 0)
Jiang Liubb3a6b72014-02-19 14:07:24 +080098 return NULL;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070099
David Woodhouse832bd852014-03-07 15:08:36 +0000100 return kcalloc(*cnt, sizeof(struct dmar_dev_scope), GFP_KERNEL);
Jiang Liubb3a6b72014-02-19 14:07:24 +0800101}
102
David Woodhouse832bd852014-03-07 15:08:36 +0000103void dmar_free_dev_scope(struct dmar_dev_scope **devices, int *cnt)
Jiang Liuada4d4b2014-01-06 14:18:09 +0800104{
Jiang Liub683b232014-02-19 14:07:32 +0800105 int i;
David Woodhouse832bd852014-03-07 15:08:36 +0000106 struct device *tmp_dev;
Jiang Liub683b232014-02-19 14:07:32 +0800107
Jiang Liuada4d4b2014-01-06 14:18:09 +0800108 if (*devices && *cnt) {
Jiang Liub683b232014-02-19 14:07:32 +0800109 for_each_active_dev_scope(*devices, *cnt, i, tmp_dev)
David Woodhouse832bd852014-03-07 15:08:36 +0000110 put_device(tmp_dev);
Jiang Liuada4d4b2014-01-06 14:18:09 +0800111 kfree(*devices);
Jiang Liuada4d4b2014-01-06 14:18:09 +0800112 }
Jiang Liu0e242612014-02-19 14:07:34 +0800113
114 *devices = NULL;
115 *cnt = 0;
Jiang Liuada4d4b2014-01-06 14:18:09 +0800116}
117
Jiang Liu59ce0512014-02-19 14:07:35 +0800118/* Optimize out kzalloc()/kfree() for normal cases */
119static char dmar_pci_notify_info_buf[64];
120
121static struct dmar_pci_notify_info *
122dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event)
123{
124 int level = 0;
125 size_t size;
126 struct pci_dev *tmp;
127 struct dmar_pci_notify_info *info;
128
129 BUG_ON(dev->is_virtfn);
130
131 /* Only generate path[] for device addition event */
132 if (event == BUS_NOTIFY_ADD_DEVICE)
133 for (tmp = dev; tmp; tmp = tmp->bus->self)
134 level++;
135
Gustavo A. R. Silva553d66c2019-04-18 13:46:24 -0500136 size = struct_size(info, path, level);
Jiang Liu59ce0512014-02-19 14:07:35 +0800137 if (size <= sizeof(dmar_pci_notify_info_buf)) {
138 info = (struct dmar_pci_notify_info *)dmar_pci_notify_info_buf;
139 } else {
140 info = kzalloc(size, GFP_KERNEL);
141 if (!info) {
142 pr_warn("Out of memory when allocating notify_info "
143 "for %s.\n", pci_name(dev));
Jiang Liu2e455282014-02-19 14:07:36 +0800144 if (dmar_dev_scope_status == 0)
145 dmar_dev_scope_status = -ENOMEM;
Jiang Liu59ce0512014-02-19 14:07:35 +0800146 return NULL;
147 }
148 }
149
150 info->event = event;
151 info->dev = dev;
152 info->seg = pci_domain_nr(dev->bus);
153 info->level = level;
154 if (event == BUS_NOTIFY_ADD_DEVICE) {
Jiang Liu5ae05662014-04-15 10:35:35 +0800155 for (tmp = dev; tmp; tmp = tmp->bus->self) {
156 level--;
Joerg Roedel57384592014-10-02 11:50:25 +0200157 info->path[level].bus = tmp->bus->number;
Jiang Liu59ce0512014-02-19 14:07:35 +0800158 info->path[level].device = PCI_SLOT(tmp->devfn);
159 info->path[level].function = PCI_FUNC(tmp->devfn);
160 if (pci_is_root_bus(tmp->bus))
161 info->bus = tmp->bus->number;
162 }
163 }
164
165 return info;
166}
167
168static inline void dmar_free_pci_notify_info(struct dmar_pci_notify_info *info)
169{
170 if ((void *)info != dmar_pci_notify_info_buf)
171 kfree(info);
172}
173
174static bool dmar_match_pci_path(struct dmar_pci_notify_info *info, int bus,
175 struct acpi_dmar_pci_path *path, int count)
176{
177 int i;
178
179 if (info->bus != bus)
Joerg Roedel80f7b3d2014-09-22 16:30:22 +0200180 goto fallback;
Jiang Liu59ce0512014-02-19 14:07:35 +0800181 if (info->level != count)
Joerg Roedel80f7b3d2014-09-22 16:30:22 +0200182 goto fallback;
Jiang Liu59ce0512014-02-19 14:07:35 +0800183
184 for (i = 0; i < count; i++) {
185 if (path[i].device != info->path[i].device ||
186 path[i].function != info->path[i].function)
Joerg Roedel80f7b3d2014-09-22 16:30:22 +0200187 goto fallback;
Jiang Liu59ce0512014-02-19 14:07:35 +0800188 }
189
190 return true;
Joerg Roedel80f7b3d2014-09-22 16:30:22 +0200191
192fallback:
193
194 if (count != 1)
195 return false;
196
197 i = info->level - 1;
198 if (bus == info->path[i].bus &&
199 path[0].device == info->path[i].device &&
200 path[0].function == info->path[i].function) {
201 pr_info(FW_BUG "RMRR entry for device %02x:%02x.%x is broken - applying workaround\n",
202 bus, path[0].device, path[0].function);
203 return true;
204 }
205
206 return false;
Jiang Liu59ce0512014-02-19 14:07:35 +0800207}
208
209/* Return: > 0 if match found, 0 if no match found, < 0 if error happens */
210int dmar_insert_dev_scope(struct dmar_pci_notify_info *info,
211 void *start, void*end, u16 segment,
David Woodhouse832bd852014-03-07 15:08:36 +0000212 struct dmar_dev_scope *devices,
213 int devices_cnt)
Jiang Liu59ce0512014-02-19 14:07:35 +0800214{
215 int i, level;
David Woodhouse832bd852014-03-07 15:08:36 +0000216 struct device *tmp, *dev = &info->dev->dev;
Jiang Liu59ce0512014-02-19 14:07:35 +0800217 struct acpi_dmar_device_scope *scope;
218 struct acpi_dmar_pci_path *path;
219
220 if (segment != info->seg)
221 return 0;
222
223 for (; start < end; start += scope->length) {
224 scope = start;
225 if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_ENDPOINT &&
226 scope->entry_type != ACPI_DMAR_SCOPE_TYPE_BRIDGE)
227 continue;
228
229 path = (struct acpi_dmar_pci_path *)(scope + 1);
230 level = (scope->length - sizeof(*scope)) / sizeof(*path);
231 if (!dmar_match_pci_path(info, scope->bus, path, level))
232 continue;
233
Roland Dreierffb2d1e2016-06-02 17:46:10 -0700234 /*
235 * We expect devices with endpoint scope to have normal PCI
236 * headers, and devices with bridge scope to have bridge PCI
237 * headers. However PCI NTB devices may be listed in the
238 * DMAR table with bridge scope, even though they have a
239 * normal PCI header. NTB devices are identified by class
240 * "BRIDGE_OTHER" (0680h) - we don't declare a socpe mismatch
241 * for this special case.
242 */
243 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT &&
244 info->dev->hdr_type != PCI_HEADER_TYPE_NORMAL) ||
245 (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE &&
246 (info->dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
jimyan53291622020-01-15 11:03:55 +0800247 info->dev->class >> 16 != PCI_BASE_CLASS_BRIDGE))) {
Jiang Liu59ce0512014-02-19 14:07:35 +0800248 pr_warn("Device scope type does not match for %s\n",
David Woodhouse832bd852014-03-07 15:08:36 +0000249 pci_name(info->dev));
Jiang Liu59ce0512014-02-19 14:07:35 +0800250 return -EINVAL;
251 }
252
253 for_each_dev_scope(devices, devices_cnt, i, tmp)
254 if (tmp == NULL) {
David Woodhouse832bd852014-03-07 15:08:36 +0000255 devices[i].bus = info->dev->bus->number;
256 devices[i].devfn = info->dev->devfn;
257 rcu_assign_pointer(devices[i].dev,
258 get_device(dev));
Jiang Liu59ce0512014-02-19 14:07:35 +0800259 return 1;
260 }
261 BUG_ON(i >= devices_cnt);
262 }
263
264 return 0;
265}
266
267int dmar_remove_dev_scope(struct dmar_pci_notify_info *info, u16 segment,
David Woodhouse832bd852014-03-07 15:08:36 +0000268 struct dmar_dev_scope *devices, int count)
Jiang Liu59ce0512014-02-19 14:07:35 +0800269{
270 int index;
David Woodhouse832bd852014-03-07 15:08:36 +0000271 struct device *tmp;
Jiang Liu59ce0512014-02-19 14:07:35 +0800272
273 if (info->seg != segment)
274 return 0;
275
276 for_each_active_dev_scope(devices, count, index, tmp)
David Woodhouse832bd852014-03-07 15:08:36 +0000277 if (tmp == &info->dev->dev) {
Andreea-Cristina Bernateecbad72014-08-18 15:20:56 +0300278 RCU_INIT_POINTER(devices[index].dev, NULL);
Jiang Liu59ce0512014-02-19 14:07:35 +0800279 synchronize_rcu();
David Woodhouse832bd852014-03-07 15:08:36 +0000280 put_device(tmp);
Jiang Liu59ce0512014-02-19 14:07:35 +0800281 return 1;
282 }
283
284 return 0;
285}
286
287static int dmar_pci_bus_add_dev(struct dmar_pci_notify_info *info)
288{
289 int ret = 0;
290 struct dmar_drhd_unit *dmaru;
291 struct acpi_dmar_hardware_unit *drhd;
292
293 for_each_drhd_unit(dmaru) {
294 if (dmaru->include_all)
295 continue;
296
297 drhd = container_of(dmaru->hdr,
298 struct acpi_dmar_hardware_unit, header);
299 ret = dmar_insert_dev_scope(info, (void *)(drhd + 1),
300 ((void *)drhd) + drhd->header.length,
301 dmaru->segment,
302 dmaru->devices, dmaru->devices_cnt);
Andy Shevchenkof9808072017-03-16 16:23:54 +0200303 if (ret)
Jiang Liu59ce0512014-02-19 14:07:35 +0800304 break;
305 }
306 if (ret >= 0)
307 ret = dmar_iommu_notify_scope_dev(info);
Jiang Liu2e455282014-02-19 14:07:36 +0800308 if (ret < 0 && dmar_dev_scope_status == 0)
309 dmar_dev_scope_status = ret;
Jiang Liu59ce0512014-02-19 14:07:35 +0800310
311 return ret;
312}
313
314static void dmar_pci_bus_del_dev(struct dmar_pci_notify_info *info)
315{
316 struct dmar_drhd_unit *dmaru;
317
318 for_each_drhd_unit(dmaru)
319 if (dmar_remove_dev_scope(info, dmaru->segment,
320 dmaru->devices, dmaru->devices_cnt))
321 break;
322 dmar_iommu_notify_scope_dev(info);
323}
324
325static int dmar_pci_bus_notifier(struct notifier_block *nb,
326 unsigned long action, void *data)
327{
328 struct pci_dev *pdev = to_pci_dev(data);
329 struct dmar_pci_notify_info *info;
330
Ashok Raj1c387182016-10-21 15:32:05 -0700331 /* Only care about add/remove events for physical functions.
332 * For VFs we actually do the lookup based on the corresponding
333 * PF in device_to_iommu() anyway. */
Jiang Liu59ce0512014-02-19 14:07:35 +0800334 if (pdev->is_virtfn)
335 return NOTIFY_DONE;
Joerg Roedele6a8c9b2016-02-29 23:49:47 +0100336 if (action != BUS_NOTIFY_ADD_DEVICE &&
337 action != BUS_NOTIFY_REMOVED_DEVICE)
Jiang Liu59ce0512014-02-19 14:07:35 +0800338 return NOTIFY_DONE;
339
340 info = dmar_alloc_pci_notify_info(pdev, action);
341 if (!info)
342 return NOTIFY_DONE;
343
344 down_write(&dmar_global_lock);
345 if (action == BUS_NOTIFY_ADD_DEVICE)
346 dmar_pci_bus_add_dev(info);
Joerg Roedele6a8c9b2016-02-29 23:49:47 +0100347 else if (action == BUS_NOTIFY_REMOVED_DEVICE)
Jiang Liu59ce0512014-02-19 14:07:35 +0800348 dmar_pci_bus_del_dev(info);
349 up_write(&dmar_global_lock);
350
351 dmar_free_pci_notify_info(info);
352
353 return NOTIFY_OK;
354}
355
356static struct notifier_block dmar_pci_bus_nb = {
357 .notifier_call = dmar_pci_bus_notifier,
358 .priority = INT_MIN,
359};
360
Jiang Liu6b197242014-11-09 22:47:58 +0800361static struct dmar_drhd_unit *
362dmar_find_dmaru(struct acpi_dmar_hardware_unit *drhd)
363{
364 struct dmar_drhd_unit *dmaru;
365
Qian Caif51524162020-03-05 15:15:02 -0500366 list_for_each_entry_rcu(dmaru, &dmar_drhd_units, list,
367 dmar_rcu_check())
Jiang Liu6b197242014-11-09 22:47:58 +0800368 if (dmaru->segment == drhd->segment &&
369 dmaru->reg_base_addr == drhd->address)
370 return dmaru;
371
372 return NULL;
373}
374
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700375/**
376 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
377 * structure which uniquely represent one DMA remapping hardware unit
378 * present in the platform
379 */
Jiang Liu6b197242014-11-09 22:47:58 +0800380static int dmar_parse_one_drhd(struct acpi_dmar_header *header, void *arg)
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700381{
382 struct acpi_dmar_hardware_unit *drhd;
383 struct dmar_drhd_unit *dmaru;
Andy Shevchenko3f6db652017-03-16 16:23:53 +0200384 int ret;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700385
David Woodhousee523b382009-04-10 22:27:48 -0700386 drhd = (struct acpi_dmar_hardware_unit *)header;
Jiang Liu6b197242014-11-09 22:47:58 +0800387 dmaru = dmar_find_dmaru(drhd);
388 if (dmaru)
389 goto out;
390
391 dmaru = kzalloc(sizeof(*dmaru) + header->length, GFP_KERNEL);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700392 if (!dmaru)
393 return -ENOMEM;
394
Jiang Liu6b197242014-11-09 22:47:58 +0800395 /*
396 * If header is allocated from slab by ACPI _DSM method, we need to
397 * copy the content because the memory buffer will be freed on return.
398 */
399 dmaru->hdr = (void *)(dmaru + 1);
400 memcpy(dmaru->hdr, header, header->length);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700401 dmaru->reg_base_addr = drhd->address;
David Woodhouse276dbf992009-04-04 01:45:37 +0100402 dmaru->segment = drhd->segment;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700403 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
David Woodhouse07cb52f2014-03-07 14:39:27 +0000404 dmaru->devices = dmar_alloc_dev_scope((void *)(drhd + 1),
405 ((void *)drhd) + drhd->header.length,
406 &dmaru->devices_cnt);
407 if (dmaru->devices_cnt && dmaru->devices == NULL) {
408 kfree(dmaru);
409 return -ENOMEM;
Jiang Liu2e455282014-02-19 14:07:36 +0800410 }
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700411
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700412 ret = alloc_iommu(dmaru);
413 if (ret) {
David Woodhouse07cb52f2014-03-07 14:39:27 +0000414 dmar_free_dev_scope(&dmaru->devices,
415 &dmaru->devices_cnt);
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700416 kfree(dmaru);
417 return ret;
418 }
419 dmar_register_drhd_unit(dmaru);
Jiang Liuc2a0b532014-11-09 22:47:56 +0800420
Jiang Liu6b197242014-11-09 22:47:58 +0800421out:
Jiang Liuc2a0b532014-11-09 22:47:56 +0800422 if (arg)
423 (*(int *)arg)++;
424
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700425 return 0;
426}
427
Jiang Liua868e6b2014-01-06 14:18:20 +0800428static void dmar_free_drhd(struct dmar_drhd_unit *dmaru)
429{
430 if (dmaru->devices && dmaru->devices_cnt)
431 dmar_free_dev_scope(&dmaru->devices, &dmaru->devices_cnt);
432 if (dmaru->iommu)
433 free_iommu(dmaru->iommu);
434 kfree(dmaru);
435}
436
Jiang Liuc2a0b532014-11-09 22:47:56 +0800437static int __init dmar_parse_one_andd(struct acpi_dmar_header *header,
438 void *arg)
David Woodhousee625b4a2014-03-07 14:34:38 +0000439{
440 struct acpi_dmar_andd *andd = (void *)header;
441
442 /* Check for NUL termination within the designated length */
Bob Moore83118b02014-07-30 12:21:00 +0800443 if (strnlen(andd->device_name, header->length - 8) == header->length - 8) {
Hans de Goede59833692020-03-09 15:01:37 +0100444 pr_warn(FW_BUG
David Woodhousee625b4a2014-03-07 14:34:38 +0000445 "Your BIOS is broken; ANDD object name is not NUL-terminated\n"
446 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
447 dmi_get_system_info(DMI_BIOS_VENDOR),
448 dmi_get_system_info(DMI_BIOS_VERSION),
449 dmi_get_system_info(DMI_PRODUCT_VERSION));
Hans de Goede59833692020-03-09 15:01:37 +0100450 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
David Woodhousee625b4a2014-03-07 14:34:38 +0000451 return -EINVAL;
452 }
453 pr_info("ANDD device: %x name: %s\n", andd->device_number,
Bob Moore83118b02014-07-30 12:21:00 +0800454 andd->device_name);
David Woodhousee625b4a2014-03-07 14:34:38 +0000455
456 return 0;
457}
458
David Woodhouseaa697072009-10-07 12:18:00 +0100459#ifdef CONFIG_ACPI_NUMA
Jiang Liu6b197242014-11-09 22:47:58 +0800460static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg)
Suresh Siddhaee34b322009-10-02 11:01:21 -0700461{
462 struct acpi_dmar_rhsa *rhsa;
463 struct dmar_drhd_unit *drhd;
464
465 rhsa = (struct acpi_dmar_rhsa *)header;
David Woodhouseaa697072009-10-07 12:18:00 +0100466 for_each_drhd_unit(drhd) {
Suresh Siddhaee34b322009-10-02 11:01:21 -0700467 if (drhd->reg_base_addr == rhsa->base_address) {
468 int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
469
470 if (!node_online(node))
Anshuman Khandual98fa15f2019-03-05 15:42:58 -0800471 node = NUMA_NO_NODE;
Suresh Siddhaee34b322009-10-02 11:01:21 -0700472 drhd->iommu->node = node;
David Woodhouseaa697072009-10-07 12:18:00 +0100473 return 0;
474 }
Suresh Siddhaee34b322009-10-02 11:01:21 -0700475 }
Hans de Goede59833692020-03-09 15:01:37 +0100476 pr_warn(FW_BUG
Ben Hutchingsfd0c8892010-04-03 19:38:43 +0100477 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
478 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
Zhenzhong Duanb0bb0c22020-03-12 14:09:54 +0800479 rhsa->base_address,
Ben Hutchingsfd0c8892010-04-03 19:38:43 +0100480 dmi_get_system_info(DMI_BIOS_VENDOR),
481 dmi_get_system_info(DMI_BIOS_VERSION),
482 dmi_get_system_info(DMI_PRODUCT_VERSION));
Hans de Goede59833692020-03-09 15:01:37 +0100483 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
Suresh Siddhaee34b322009-10-02 11:01:21 -0700484
David Woodhouseaa697072009-10-07 12:18:00 +0100485 return 0;
Suresh Siddhaee34b322009-10-02 11:01:21 -0700486}
Jiang Liuc2a0b532014-11-09 22:47:56 +0800487#else
488#define dmar_parse_one_rhsa dmar_res_noop
David Woodhouseaa697072009-10-07 12:18:00 +0100489#endif
Suresh Siddhaee34b322009-10-02 11:01:21 -0700490
Arnd Bergmann3bd71e12017-09-12 22:10:21 +0200491static void
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700492dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
493{
494 struct acpi_dmar_hardware_unit *drhd;
495 struct acpi_dmar_reserved_memory *rmrr;
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800496 struct acpi_dmar_atsr *atsr;
Roland Dreier17b60972009-09-24 12:14:00 -0700497 struct acpi_dmar_rhsa *rhsa;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700498
499 switch (header->type) {
500 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800501 drhd = container_of(header, struct acpi_dmar_hardware_unit,
502 header);
Donald Dutilee9071b02012-06-08 17:13:11 -0400503 pr_info("DRHD base: %#016Lx flags: %#x\n",
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800504 (unsigned long long)drhd->address, drhd->flags);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700505 break;
506 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800507 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
508 header);
Donald Dutilee9071b02012-06-08 17:13:11 -0400509 pr_info("RMRR base: %#016Lx end: %#016Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700510 (unsigned long long)rmrr->base_address,
511 (unsigned long long)rmrr->end_address);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700512 break;
Bob Moore83118b02014-07-30 12:21:00 +0800513 case ACPI_DMAR_TYPE_ROOT_ATS:
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800514 atsr = container_of(header, struct acpi_dmar_atsr, header);
Donald Dutilee9071b02012-06-08 17:13:11 -0400515 pr_info("ATSR flags: %#x\n", atsr->flags);
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800516 break;
Bob Moore83118b02014-07-30 12:21:00 +0800517 case ACPI_DMAR_TYPE_HARDWARE_AFFINITY:
Roland Dreier17b60972009-09-24 12:14:00 -0700518 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
Donald Dutilee9071b02012-06-08 17:13:11 -0400519 pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
Roland Dreier17b60972009-09-24 12:14:00 -0700520 (unsigned long long)rhsa->base_address,
521 rhsa->proximity_domain);
522 break;
Bob Moore83118b02014-07-30 12:21:00 +0800523 case ACPI_DMAR_TYPE_NAMESPACE:
David Woodhousee625b4a2014-03-07 14:34:38 +0000524 /* We don't print this here because we need to sanity-check
525 it first. So print it in dmar_parse_one_andd() instead. */
526 break;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700527 }
528}
529
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700530/**
531 * dmar_table_detect - checks to see if the platform supports DMAR devices
532 */
533static int __init dmar_table_detect(void)
534{
535 acpi_status status = AE_OK;
536
537 /* if we could find DMAR table, then there are DMAR devices */
Lv Zheng6b11d1d2016-12-14 15:04:39 +0800538 status = acpi_get_table(ACPI_SIG_DMAR, 0, &dmar_tbl);
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700539
540 if (ACPI_SUCCESS(status) && !dmar_tbl) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400541 pr_warn("Unable to map DMAR\n");
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700542 status = AE_NOT_FOUND;
543 }
544
Andy Shevchenko8326c5d2017-03-16 16:23:51 +0200545 return ACPI_SUCCESS(status) ? 0 : -ENOENT;
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700546}
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700547
Jiang Liuc2a0b532014-11-09 22:47:56 +0800548static int dmar_walk_remapping_entries(struct acpi_dmar_header *start,
549 size_t len, struct dmar_res_callback *cb)
550{
Jiang Liuc2a0b532014-11-09 22:47:56 +0800551 struct acpi_dmar_header *iter, *next;
552 struct acpi_dmar_header *end = ((void *)start) + len;
553
Andy Shevchenko4a8ed2b2017-03-16 16:23:52 +0200554 for (iter = start; iter < end; iter = next) {
Jiang Liuc2a0b532014-11-09 22:47:56 +0800555 next = (void *)iter + iter->length;
556 if (iter->length == 0) {
557 /* Avoid looping forever on bad ACPI tables */
558 pr_debug(FW_BUG "Invalid 0-length structure\n");
559 break;
560 } else if (next > end) {
561 /* Avoid passing table end */
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200562 pr_warn(FW_BUG "Record passes table end\n");
Andy Shevchenko4a8ed2b2017-03-16 16:23:52 +0200563 return -EINVAL;
Jiang Liuc2a0b532014-11-09 22:47:56 +0800564 }
565
566 if (cb->print_entry)
567 dmar_table_print_dmar_entry(iter);
568
569 if (iter->type >= ACPI_DMAR_TYPE_RESERVED) {
570 /* continue for forward compatibility */
571 pr_debug("Unknown DMAR structure type %d\n",
572 iter->type);
573 } else if (cb->cb[iter->type]) {
Andy Shevchenko4a8ed2b2017-03-16 16:23:52 +0200574 int ret;
575
Jiang Liuc2a0b532014-11-09 22:47:56 +0800576 ret = cb->cb[iter->type](iter, cb->arg[iter->type]);
Andy Shevchenko4a8ed2b2017-03-16 16:23:52 +0200577 if (ret)
578 return ret;
Jiang Liuc2a0b532014-11-09 22:47:56 +0800579 } else if (!cb->ignore_unhandled) {
580 pr_warn("No handler for DMAR structure type %d\n",
581 iter->type);
Andy Shevchenko4a8ed2b2017-03-16 16:23:52 +0200582 return -EINVAL;
Jiang Liuc2a0b532014-11-09 22:47:56 +0800583 }
584 }
585
Andy Shevchenko4a8ed2b2017-03-16 16:23:52 +0200586 return 0;
Jiang Liuc2a0b532014-11-09 22:47:56 +0800587}
588
589static inline int dmar_walk_dmar_table(struct acpi_table_dmar *dmar,
590 struct dmar_res_callback *cb)
591{
592 return dmar_walk_remapping_entries((void *)(dmar + 1),
593 dmar->header.length - sizeof(*dmar), cb);
594}
595
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700596/**
597 * parse_dmar_table - parses the DMA reporting table
598 */
599static int __init
600parse_dmar_table(void)
601{
602 struct acpi_table_dmar *dmar;
Li, Zhen-Hua7cef3342013-05-20 15:57:32 +0800603 int drhd_count = 0;
Andy Shevchenko3f6db652017-03-16 16:23:53 +0200604 int ret;
Jiang Liuc2a0b532014-11-09 22:47:56 +0800605 struct dmar_res_callback cb = {
606 .print_entry = true,
607 .ignore_unhandled = true,
608 .arg[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &drhd_count,
609 .cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_parse_one_drhd,
610 .cb[ACPI_DMAR_TYPE_RESERVED_MEMORY] = &dmar_parse_one_rmrr,
611 .cb[ACPI_DMAR_TYPE_ROOT_ATS] = &dmar_parse_one_atsr,
612 .cb[ACPI_DMAR_TYPE_HARDWARE_AFFINITY] = &dmar_parse_one_rhsa,
613 .cb[ACPI_DMAR_TYPE_NAMESPACE] = &dmar_parse_one_andd,
614 };
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700615
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700616 /*
617 * Do it again, earlier dmar_tbl mapping could be mapped with
618 * fixed map.
619 */
620 dmar_table_detect();
621
Joseph Cihulaa59b50e2009-06-30 19:31:10 -0700622 /*
623 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
624 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
625 */
626 dmar_tbl = tboot_get_dmar_table(dmar_tbl);
627
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700628 dmar = (struct acpi_table_dmar *)dmar_tbl;
629 if (!dmar)
630 return -ENODEV;
631
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700632 if (dmar->width < PAGE_SHIFT - 1) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400633 pr_warn("Invalid DMAR haw\n");
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700634 return -EINVAL;
635 }
636
Donald Dutilee9071b02012-06-08 17:13:11 -0400637 pr_info("Host address width %d\n", dmar->width + 1);
Jiang Liuc2a0b532014-11-09 22:47:56 +0800638 ret = dmar_walk_dmar_table(dmar, &cb);
639 if (ret == 0 && drhd_count == 0)
Li, Zhen-Hua7cef3342013-05-20 15:57:32 +0800640 pr_warn(FW_BUG "No DRHD structure found in DMAR table\n");
Jiang Liuc2a0b532014-11-09 22:47:56 +0800641
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700642 return ret;
643}
644
David Woodhouse832bd852014-03-07 15:08:36 +0000645static int dmar_pci_device_match(struct dmar_dev_scope devices[],
646 int cnt, struct pci_dev *dev)
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700647{
648 int index;
David Woodhouse832bd852014-03-07 15:08:36 +0000649 struct device *tmp;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700650
651 while (dev) {
Jiang Liub683b232014-02-19 14:07:32 +0800652 for_each_active_dev_scope(devices, cnt, index, tmp)
David Woodhouse832bd852014-03-07 15:08:36 +0000653 if (dev_is_pci(tmp) && dev == to_pci_dev(tmp))
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700654 return 1;
655
656 /* Check our parent */
657 dev = dev->bus->self;
658 }
659
660 return 0;
661}
662
663struct dmar_drhd_unit *
664dmar_find_matched_drhd_unit(struct pci_dev *dev)
665{
Jiang Liu0e242612014-02-19 14:07:34 +0800666 struct dmar_drhd_unit *dmaru;
Yu Zhao2e824f72008-12-22 16:54:58 +0800667 struct acpi_dmar_hardware_unit *drhd;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700668
Yinghaidda56542010-04-09 01:07:55 +0100669 dev = pci_physfn(dev);
670
Jiang Liu0e242612014-02-19 14:07:34 +0800671 rcu_read_lock();
Yijing Wang8b161f02013-10-31 17:25:16 +0800672 for_each_drhd_unit(dmaru) {
Yu Zhao2e824f72008-12-22 16:54:58 +0800673 drhd = container_of(dmaru->hdr,
674 struct acpi_dmar_hardware_unit,
675 header);
676
677 if (dmaru->include_all &&
678 drhd->segment == pci_domain_nr(dev->bus))
Jiang Liu0e242612014-02-19 14:07:34 +0800679 goto out;
Yu Zhao2e824f72008-12-22 16:54:58 +0800680
681 if (dmar_pci_device_match(dmaru->devices,
682 dmaru->devices_cnt, dev))
Jiang Liu0e242612014-02-19 14:07:34 +0800683 goto out;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700684 }
Jiang Liu0e242612014-02-19 14:07:34 +0800685 dmaru = NULL;
686out:
687 rcu_read_unlock();
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700688
Jiang Liu0e242612014-02-19 14:07:34 +0800689 return dmaru;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700690}
691
David Woodhouseed403562014-03-07 23:15:42 +0000692static void __init dmar_acpi_insert_dev_scope(u8 device_number,
693 struct acpi_device *adev)
694{
695 struct dmar_drhd_unit *dmaru;
696 struct acpi_dmar_hardware_unit *drhd;
697 struct acpi_dmar_device_scope *scope;
698 struct device *tmp;
699 int i;
700 struct acpi_dmar_pci_path *path;
701
702 for_each_drhd_unit(dmaru) {
703 drhd = container_of(dmaru->hdr,
704 struct acpi_dmar_hardware_unit,
705 header);
706
707 for (scope = (void *)(drhd + 1);
708 (unsigned long)scope < ((unsigned long)drhd) + drhd->header.length;
709 scope = ((void *)scope) + scope->length) {
Bob Moore83118b02014-07-30 12:21:00 +0800710 if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_NAMESPACE)
David Woodhouseed403562014-03-07 23:15:42 +0000711 continue;
712 if (scope->enumeration_id != device_number)
713 continue;
714
715 path = (void *)(scope + 1);
716 pr_info("ACPI device \"%s\" under DMAR at %llx as %02x:%02x.%d\n",
717 dev_name(&adev->dev), dmaru->reg_base_addr,
718 scope->bus, path->device, path->function);
719 for_each_dev_scope(dmaru->devices, dmaru->devices_cnt, i, tmp)
720 if (tmp == NULL) {
721 dmaru->devices[i].bus = scope->bus;
722 dmaru->devices[i].devfn = PCI_DEVFN(path->device,
723 path->function);
724 rcu_assign_pointer(dmaru->devices[i].dev,
725 get_device(&adev->dev));
726 return;
727 }
728 BUG_ON(i >= dmaru->devices_cnt);
729 }
730 }
731 pr_warn("No IOMMU scope found for ANDD enumeration ID %d (%s)\n",
732 device_number, dev_name(&adev->dev));
733}
734
735static int __init dmar_acpi_dev_scope_init(void)
736{
Joerg Roedel11f1a772014-03-25 20:16:40 +0100737 struct acpi_dmar_andd *andd;
738
739 if (dmar_tbl == NULL)
740 return -ENODEV;
741
David Woodhouse7713ec02014-04-01 14:58:36 +0100742 for (andd = (void *)dmar_tbl + sizeof(struct acpi_table_dmar);
743 ((unsigned long)andd) < ((unsigned long)dmar_tbl) + dmar_tbl->length;
744 andd = ((void *)andd) + andd->header.length) {
Bob Moore83118b02014-07-30 12:21:00 +0800745 if (andd->header.type == ACPI_DMAR_TYPE_NAMESPACE) {
David Woodhouseed403562014-03-07 23:15:42 +0000746 acpi_handle h;
747 struct acpi_device *adev;
748
749 if (!ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT,
Bob Moore83118b02014-07-30 12:21:00 +0800750 andd->device_name,
David Woodhouseed403562014-03-07 23:15:42 +0000751 &h))) {
752 pr_err("Failed to find handle for ACPI object %s\n",
Bob Moore83118b02014-07-30 12:21:00 +0800753 andd->device_name);
David Woodhouseed403562014-03-07 23:15:42 +0000754 continue;
755 }
Joerg Roedelc0df9752014-08-21 23:06:48 +0200756 if (acpi_bus_get_device(h, &adev)) {
David Woodhouseed403562014-03-07 23:15:42 +0000757 pr_err("Failed to get device for ACPI object %s\n",
Bob Moore83118b02014-07-30 12:21:00 +0800758 andd->device_name);
David Woodhouseed403562014-03-07 23:15:42 +0000759 continue;
760 }
761 dmar_acpi_insert_dev_scope(andd->device_number, adev);
762 }
David Woodhouseed403562014-03-07 23:15:42 +0000763 }
764 return 0;
765}
766
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700767int __init dmar_dev_scope_init(void)
768{
Jiang Liu2e455282014-02-19 14:07:36 +0800769 struct pci_dev *dev = NULL;
770 struct dmar_pci_notify_info *info;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700771
Jiang Liu2e455282014-02-19 14:07:36 +0800772 if (dmar_dev_scope_status != 1)
773 return dmar_dev_scope_status;
Suresh Siddhac2c72862011-08-23 17:05:19 -0700774
Jiang Liu2e455282014-02-19 14:07:36 +0800775 if (list_empty(&dmar_drhd_units)) {
776 dmar_dev_scope_status = -ENODEV;
777 } else {
778 dmar_dev_scope_status = 0;
Suresh Siddha318fe7d2011-08-23 17:05:20 -0700779
David Woodhouse63b42622014-03-28 11:28:40 +0000780 dmar_acpi_dev_scope_init();
781
Jiang Liu2e455282014-02-19 14:07:36 +0800782 for_each_pci_dev(dev) {
783 if (dev->is_virtfn)
784 continue;
785
786 info = dmar_alloc_pci_notify_info(dev,
787 BUS_NOTIFY_ADD_DEVICE);
788 if (!info) {
789 return dmar_dev_scope_status;
790 } else {
791 dmar_pci_bus_add_dev(info);
792 dmar_free_pci_notify_info(info);
793 }
794 }
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700795 }
796
Jiang Liu2e455282014-02-19 14:07:36 +0800797 return dmar_dev_scope_status;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700798}
799
Dmitry Safonovd15a3392018-02-12 16:48:20 +0000800void __init dmar_register_bus_notifier(void)
Joerg Roedelec154bf2017-10-06 15:00:53 +0200801{
802 bus_register_notifier(&pci_bus_type, &dmar_pci_bus_nb);
803}
804
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700805
806int __init dmar_table_init(void)
807{
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700808 static int dmar_table_initialized;
Fenghua Yu093f87d2007-11-21 15:07:14 -0800809 int ret;
810
Jiang Liucc053012014-01-06 14:18:24 +0800811 if (dmar_table_initialized == 0) {
812 ret = parse_dmar_table();
813 if (ret < 0) {
814 if (ret != -ENODEV)
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200815 pr_info("Parse DMAR table failure.\n");
Jiang Liucc053012014-01-06 14:18:24 +0800816 } else if (list_empty(&dmar_drhd_units)) {
817 pr_info("No DMAR devices found\n");
818 ret = -ENODEV;
819 }
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700820
Jiang Liucc053012014-01-06 14:18:24 +0800821 if (ret < 0)
822 dmar_table_initialized = ret;
823 else
824 dmar_table_initialized = 1;
Fenghua Yu093f87d2007-11-21 15:07:14 -0800825 }
826
Jiang Liucc053012014-01-06 14:18:24 +0800827 return dmar_table_initialized < 0 ? dmar_table_initialized : 0;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700828}
829
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100830static void warn_invalid_dmar(u64 addr, const char *message)
831{
Hans de Goede59833692020-03-09 15:01:37 +0100832 pr_warn_once(FW_BUG
Ben Hutchingsfd0c8892010-04-03 19:38:43 +0100833 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
834 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
835 addr, message,
836 dmi_get_system_info(DMI_BIOS_VENDOR),
837 dmi_get_system_info(DMI_BIOS_VERSION),
838 dmi_get_system_info(DMI_PRODUCT_VERSION));
Hans de Goede59833692020-03-09 15:01:37 +0100839 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100840}
David Woodhouse6ecbf012009-12-02 09:20:27 +0000841
Jiang Liuc2a0b532014-11-09 22:47:56 +0800842static int __ref
843dmar_validate_one_drhd(struct acpi_dmar_header *entry, void *arg)
David Woodhouse86cf8982009-11-09 22:15:15 +0000844{
David Woodhouse86cf8982009-11-09 22:15:15 +0000845 struct acpi_dmar_hardware_unit *drhd;
Jiang Liuc2a0b532014-11-09 22:47:56 +0800846 void __iomem *addr;
847 u64 cap, ecap;
David Woodhouse86cf8982009-11-09 22:15:15 +0000848
Jiang Liuc2a0b532014-11-09 22:47:56 +0800849 drhd = (void *)entry;
850 if (!drhd->address) {
851 warn_invalid_dmar(0, "");
852 return -EINVAL;
David Woodhouse86cf8982009-11-09 22:15:15 +0000853 }
Chris Wright2c992202009-12-02 09:17:13 +0000854
Jiang Liu6b197242014-11-09 22:47:58 +0800855 if (arg)
856 addr = ioremap(drhd->address, VTD_PAGE_SIZE);
857 else
858 addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
Jiang Liuc2a0b532014-11-09 22:47:56 +0800859 if (!addr) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200860 pr_warn("Can't validate DRHD address: %llx\n", drhd->address);
Jiang Liuc2a0b532014-11-09 22:47:56 +0800861 return -EINVAL;
862 }
Jiang Liu6b197242014-11-09 22:47:58 +0800863
Jiang Liuc2a0b532014-11-09 22:47:56 +0800864 cap = dmar_readq(addr + DMAR_CAP_REG);
865 ecap = dmar_readq(addr + DMAR_ECAP_REG);
Jiang Liu6b197242014-11-09 22:47:58 +0800866
867 if (arg)
868 iounmap(addr);
869 else
870 early_iounmap(addr, VTD_PAGE_SIZE);
Jiang Liuc2a0b532014-11-09 22:47:56 +0800871
872 if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
873 warn_invalid_dmar(drhd->address, " returns all ones");
874 return -EINVAL;
875 }
876
Chris Wright2c992202009-12-02 09:17:13 +0000877 return 0;
David Woodhouse86cf8982009-11-09 22:15:15 +0000878}
879
Konrad Rzeszutek Wilk480125b2010-08-26 13:57:57 -0400880int __init detect_intel_iommu(void)
Suresh Siddha2ae21012008-07-10 11:16:43 -0700881{
882 int ret;
Jiang Liuc2a0b532014-11-09 22:47:56 +0800883 struct dmar_res_callback validate_drhd_cb = {
884 .cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_validate_one_drhd,
885 .ignore_unhandled = true,
886 };
Suresh Siddha2ae21012008-07-10 11:16:43 -0700887
Jiang Liu3a5670e2014-02-19 14:07:33 +0800888 down_write(&dmar_global_lock);
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700889 ret = dmar_table_detect();
Andy Shevchenko8326c5d2017-03-16 16:23:51 +0200890 if (!ret)
891 ret = dmar_walk_dmar_table((struct acpi_table_dmar *)dmar_tbl,
892 &validate_drhd_cb);
893 if (!ret && !no_iommu && !iommu_detected && !dmar_disabled) {
Jiang Liuc2a0b532014-11-09 22:47:56 +0800894 iommu_detected = 1;
895 /* Make sure ACS will be enabled */
896 pci_request_acs();
897 }
Suresh Siddhaf5d1b972011-08-23 17:05:22 -0700898
FUJITA Tomonori9d5ce732009-11-10 19:46:16 +0900899#ifdef CONFIG_X86
Deepa Dinamani6c3a44e2019-11-10 09:27:44 -0800900 if (!ret) {
Jiang Liuc2a0b532014-11-09 22:47:56 +0800901 x86_init.iommu.iommu_init = intel_iommu_init;
Deepa Dinamani6c3a44e2019-11-10 09:27:44 -0800902 x86_platform.iommu_shutdown = intel_iommu_shutdown;
903 }
904
FUJITA Tomonori9d5ce732009-11-10 19:46:16 +0900905#endif
Jiang Liuc2a0b532014-11-09 22:47:56 +0800906
Rafael J. Wysocki696c7f82017-01-05 02:13:31 +0100907 if (dmar_tbl) {
908 acpi_put_table(dmar_tbl);
909 dmar_tbl = NULL;
910 }
Jiang Liu3a5670e2014-02-19 14:07:33 +0800911 up_write(&dmar_global_lock);
Konrad Rzeszutek Wilk480125b2010-08-26 13:57:57 -0400912
Andy Shevchenko8326c5d2017-03-16 16:23:51 +0200913 return ret ? ret : 1;
Suresh Siddha2ae21012008-07-10 11:16:43 -0700914}
915
Donald Dutile6f5cf522012-06-04 17:29:02 -0400916static void unmap_iommu(struct intel_iommu *iommu)
917{
918 iounmap(iommu->reg);
919 release_mem_region(iommu->reg_phys, iommu->reg_size);
920}
921
922/**
923 * map_iommu: map the iommu's registers
924 * @iommu: the iommu to map
925 * @phys_addr: the physical address of the base resgister
Donald Dutilee9071b02012-06-08 17:13:11 -0400926 *
Donald Dutile6f5cf522012-06-04 17:29:02 -0400927 * Memory map the iommu's registers. Start w/ a single page, and
Donald Dutilee9071b02012-06-08 17:13:11 -0400928 * possibly expand if that turns out to be insufficent.
Donald Dutile6f5cf522012-06-04 17:29:02 -0400929 */
930static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
931{
932 int map_size, err=0;
933
934 iommu->reg_phys = phys_addr;
935 iommu->reg_size = VTD_PAGE_SIZE;
936
937 if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200938 pr_err("Can't reserve memory\n");
Donald Dutile6f5cf522012-06-04 17:29:02 -0400939 err = -EBUSY;
940 goto out;
941 }
942
943 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
944 if (!iommu->reg) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200945 pr_err("Can't map the region\n");
Donald Dutile6f5cf522012-06-04 17:29:02 -0400946 err = -ENOMEM;
947 goto release;
948 }
949
950 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
951 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
952
953 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
954 err = -EINVAL;
955 warn_invalid_dmar(phys_addr, " returns all ones");
956 goto unmap;
957 }
958
959 /* the registers might be more than one page */
960 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
961 cap_max_fault_reg_offset(iommu->cap));
962 map_size = VTD_PAGE_ALIGN(map_size);
963 if (map_size > iommu->reg_size) {
964 iounmap(iommu->reg);
965 release_mem_region(iommu->reg_phys, iommu->reg_size);
966 iommu->reg_size = map_size;
967 if (!request_mem_region(iommu->reg_phys, iommu->reg_size,
968 iommu->name)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200969 pr_err("Can't reserve memory\n");
Donald Dutile6f5cf522012-06-04 17:29:02 -0400970 err = -EBUSY;
971 goto out;
972 }
973 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
974 if (!iommu->reg) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200975 pr_err("Can't map the region\n");
Donald Dutile6f5cf522012-06-04 17:29:02 -0400976 err = -ENOMEM;
977 goto release;
978 }
979 }
980 err = 0;
981 goto out;
982
983unmap:
984 iounmap(iommu->reg);
985release:
986 release_mem_region(iommu->reg_phys, iommu->reg_size);
987out:
988 return err;
989}
990
Jiang Liu78d8e702014-11-09 22:47:57 +0800991static int dmar_alloc_seq_id(struct intel_iommu *iommu)
992{
993 iommu->seq_id = find_first_zero_bit(dmar_seq_ids,
994 DMAR_UNITS_SUPPORTED);
995 if (iommu->seq_id >= DMAR_UNITS_SUPPORTED) {
996 iommu->seq_id = -1;
997 } else {
998 set_bit(iommu->seq_id, dmar_seq_ids);
999 sprintf(iommu->name, "dmar%d", iommu->seq_id);
1000 }
1001
1002 return iommu->seq_id;
1003}
1004
1005static void dmar_free_seq_id(struct intel_iommu *iommu)
1006{
1007 if (iommu->seq_id >= 0) {
1008 clear_bit(iommu->seq_id, dmar_seq_ids);
1009 iommu->seq_id = -1;
1010 }
1011}
1012
Jiang Liu694835d2014-01-06 14:18:16 +08001013static int alloc_iommu(struct dmar_drhd_unit *drhd)
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001014{
Suresh Siddhac42d9f32008-07-10 11:16:36 -07001015 struct intel_iommu *iommu;
Takao Indoh3a93c842013-04-23 17:35:03 +09001016 u32 ver, sts;
Joerg Roedel43f73922009-01-03 23:56:27 +01001017 int agaw = 0;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001018 int msagaw = 0;
Donald Dutile6f5cf522012-06-04 17:29:02 -04001019 int err;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07001020
David Woodhouse6ecbf012009-12-02 09:20:27 +00001021 if (!drhd->reg_base_addr) {
Ben Hutchings3a8663e2010-04-03 19:37:23 +01001022 warn_invalid_dmar(0, "");
David Woodhouse6ecbf012009-12-02 09:20:27 +00001023 return -EINVAL;
1024 }
1025
Suresh Siddhac42d9f32008-07-10 11:16:36 -07001026 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
1027 if (!iommu)
Suresh Siddha1886e8a2008-07-10 11:16:37 -07001028 return -ENOMEM;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07001029
Jiang Liu78d8e702014-11-09 22:47:57 +08001030 if (dmar_alloc_seq_id(iommu) < 0) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001031 pr_err("Failed to allocate seq_id\n");
Jiang Liu78d8e702014-11-09 22:47:57 +08001032 err = -ENOSPC;
1033 goto error;
1034 }
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001035
Donald Dutile6f5cf522012-06-04 17:29:02 -04001036 err = map_iommu(iommu, drhd->reg_base_addr);
1037 if (err) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001038 pr_err("Failed to map %s\n", iommu->name);
Jiang Liu78d8e702014-11-09 22:47:57 +08001039 goto error_free_seq_id;
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001040 }
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001041
Donald Dutile6f5cf522012-06-04 17:29:02 -04001042 err = -EINVAL;
Weidong Han1b573682008-12-08 15:34:06 +08001043 agaw = iommu_calculate_agaw(iommu);
1044 if (agaw < 0) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001045 pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
1046 iommu->seq_id);
David Woodhouse08155652009-08-04 09:17:20 +01001047 goto err_unmap;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001048 }
1049 msagaw = iommu_calculate_max_sagaw(iommu);
1050 if (msagaw < 0) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001051 pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
Weidong Han1b573682008-12-08 15:34:06 +08001052 iommu->seq_id);
David Woodhouse08155652009-08-04 09:17:20 +01001053 goto err_unmap;
Weidong Han1b573682008-12-08 15:34:06 +08001054 }
1055 iommu->agaw = agaw;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001056 iommu->msagaw = msagaw;
David Woodhouse67ccac42014-03-09 13:49:45 -07001057 iommu->segment = drhd->segment;
Weidong Han1b573682008-12-08 15:34:06 +08001058
Anshuman Khandual98fa15f2019-03-05 15:42:58 -08001059 iommu->node = NUMA_NO_NODE;
Suresh Siddhaee34b322009-10-02 11:01:21 -07001060
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001061 ver = readl(iommu->reg + DMAR_VER_REG);
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001062 pr_info("%s: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
1063 iommu->name,
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001064 (unsigned long long)drhd->reg_base_addr,
1065 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
1066 (unsigned long long)iommu->cap,
1067 (unsigned long long)iommu->ecap);
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001068
Takao Indoh3a93c842013-04-23 17:35:03 +09001069 /* Reflect status in gcmd */
1070 sts = readl(iommu->reg + DMAR_GSTS_REG);
1071 if (sts & DMA_GSTS_IRES)
1072 iommu->gcmd |= DMA_GCMD_IRE;
1073 if (sts & DMA_GSTS_TES)
1074 iommu->gcmd |= DMA_GCMD_TE;
1075 if (sts & DMA_GSTS_QIES)
1076 iommu->gcmd |= DMA_GCMD_QIE;
1077
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001078 raw_spin_lock_init(&iommu->register_lock);
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001079
Joerg Roedelbc847452016-01-07 12:16:51 +01001080 if (intel_iommu_enabled) {
Joerg Roedel39ab9552017-02-01 16:56:46 +01001081 err = iommu_device_sysfs_add(&iommu->iommu, NULL,
1082 intel_iommu_groups,
1083 "%s", iommu->name);
1084 if (err)
Joerg Roedelbc847452016-01-07 12:16:51 +01001085 goto err_unmap;
Joerg Roedelb0119e82017-02-01 13:23:08 +01001086
1087 iommu_device_set_ops(&iommu->iommu, &intel_iommu_ops);
1088
1089 err = iommu_device_register(&iommu->iommu);
1090 if (err)
1091 goto err_unmap;
Nicholas Krause59203372016-01-04 18:27:57 -05001092 }
1093
Joerg Roedelbc847452016-01-07 12:16:51 +01001094 drhd->iommu = iommu;
1095
Suresh Siddha1886e8a2008-07-10 11:16:37 -07001096 return 0;
David Woodhouse08155652009-08-04 09:17:20 +01001097
Jiang Liu78d8e702014-11-09 22:47:57 +08001098err_unmap:
Donald Dutile6f5cf522012-06-04 17:29:02 -04001099 unmap_iommu(iommu);
Jiang Liu78d8e702014-11-09 22:47:57 +08001100error_free_seq_id:
1101 dmar_free_seq_id(iommu);
1102error:
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001103 kfree(iommu);
Donald Dutile6f5cf522012-06-04 17:29:02 -04001104 return err;
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001105}
1106
Jiang Liua868e6b2014-01-06 14:18:20 +08001107static void free_iommu(struct intel_iommu *iommu)
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001108{
Andy Shevchenkoc37a0172017-02-15 16:42:21 +02001109 if (intel_iommu_enabled) {
1110 iommu_device_unregister(&iommu->iommu);
1111 iommu_device_sysfs_remove(&iommu->iommu);
1112 }
Alex Williamsona5459cf2014-06-12 16:12:31 -06001113
Jiang Liua868e6b2014-01-06 14:18:20 +08001114 if (iommu->irq) {
David Woodhouse12082252015-10-07 15:37:03 +01001115 if (iommu->pr_irq) {
1116 free_irq(iommu->pr_irq, iommu);
1117 dmar_free_hwirq(iommu->pr_irq);
1118 iommu->pr_irq = 0;
1119 }
Jiang Liua868e6b2014-01-06 14:18:20 +08001120 free_irq(iommu->irq, iommu);
Thomas Gleixnera553b142014-05-07 15:44:11 +00001121 dmar_free_hwirq(iommu->irq);
Jiang Liu34742db2015-04-13 14:11:41 +08001122 iommu->irq = 0;
Jiang Liua868e6b2014-01-06 14:18:20 +08001123 }
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001124
Jiang Liua84da702014-01-06 14:18:23 +08001125 if (iommu->qi) {
1126 free_page((unsigned long)iommu->qi->desc);
1127 kfree(iommu->qi->desc_status);
1128 kfree(iommu->qi);
1129 }
1130
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001131 if (iommu->reg)
Donald Dutile6f5cf522012-06-04 17:29:02 -04001132 unmap_iommu(iommu);
1133
Jiang Liu78d8e702014-11-09 22:47:57 +08001134 dmar_free_seq_id(iommu);
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001135 kfree(iommu);
1136}
Suresh Siddhafe962e92008-07-10 11:16:42 -07001137
1138/*
1139 * Reclaim all the submitted descriptors which have completed its work.
1140 */
1141static inline void reclaim_free_desc(struct q_inval *qi)
1142{
Yu Zhao6ba6c3a2009-05-18 13:51:35 +08001143 while (qi->desc_status[qi->free_tail] == QI_DONE ||
1144 qi->desc_status[qi->free_tail] == QI_ABORT) {
Suresh Siddhafe962e92008-07-10 11:16:42 -07001145 qi->desc_status[qi->free_tail] = QI_FREE;
1146 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
1147 qi->free_cnt++;
1148 }
1149}
1150
Yu Zhao704126a2009-01-04 16:28:52 +08001151static int qi_check_fault(struct intel_iommu *iommu, int index)
1152{
1153 u32 fault;
Yu Zhao6ba6c3a2009-05-18 13:51:35 +08001154 int head, tail;
Yu Zhao704126a2009-01-04 16:28:52 +08001155 struct q_inval *qi = iommu->qi;
1156 int wait_index = (index + 1) % QI_LENGTH;
Lu Baolu5d308fc2018-12-10 09:58:58 +08001157 int shift = qi_shift(iommu);
Yu Zhao704126a2009-01-04 16:28:52 +08001158
Yu Zhao6ba6c3a2009-05-18 13:51:35 +08001159 if (qi->desc_status[wait_index] == QI_ABORT)
1160 return -EAGAIN;
1161
Yu Zhao704126a2009-01-04 16:28:52 +08001162 fault = readl(iommu->reg + DMAR_FSTS_REG);
1163
1164 /*
1165 * If IQE happens, the head points to the descriptor associated
1166 * with the error. No new descriptors are fetched until the IQE
1167 * is cleared.
1168 */
1169 if (fault & DMA_FSTS_IQE) {
1170 head = readl(iommu->reg + DMAR_IQH_REG);
Lu Baolu5d308fc2018-12-10 09:58:58 +08001171 if ((head >> shift) == index) {
1172 struct qi_desc *desc = qi->desc + head;
1173
1174 /*
1175 * desc->qw2 and desc->qw3 are either reserved or
1176 * used by software as private data. We won't print
1177 * out these two qw's for security consideration.
1178 */
1179 pr_err("VT-d detected invalid descriptor: qw0 = %llx, qw1 = %llx\n",
1180 (unsigned long long)desc->qw0,
1181 (unsigned long long)desc->qw1);
1182 memcpy(desc, qi->desc + (wait_index << shift),
1183 1 << shift);
Yu Zhao704126a2009-01-04 16:28:52 +08001184 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
1185 return -EINVAL;
1186 }
1187 }
1188
Yu Zhao6ba6c3a2009-05-18 13:51:35 +08001189 /*
1190 * If ITE happens, all pending wait_desc commands are aborted.
1191 * No new descriptors are fetched until the ITE is cleared.
1192 */
1193 if (fault & DMA_FSTS_ITE) {
1194 head = readl(iommu->reg + DMAR_IQH_REG);
Lu Baolu5d308fc2018-12-10 09:58:58 +08001195 head = ((head >> shift) - 1 + QI_LENGTH) % QI_LENGTH;
Yu Zhao6ba6c3a2009-05-18 13:51:35 +08001196 head |= 1;
1197 tail = readl(iommu->reg + DMAR_IQT_REG);
Lu Baolu5d308fc2018-12-10 09:58:58 +08001198 tail = ((tail >> shift) - 1 + QI_LENGTH) % QI_LENGTH;
Yu Zhao6ba6c3a2009-05-18 13:51:35 +08001199
1200 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
1201
1202 do {
1203 if (qi->desc_status[head] == QI_IN_USE)
1204 qi->desc_status[head] = QI_ABORT;
1205 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
1206 } while (head != tail);
1207
1208 if (qi->desc_status[wait_index] == QI_ABORT)
1209 return -EAGAIN;
1210 }
1211
1212 if (fault & DMA_FSTS_ICE)
1213 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
1214
Yu Zhao704126a2009-01-04 16:28:52 +08001215 return 0;
1216}
1217
Suresh Siddhafe962e92008-07-10 11:16:42 -07001218/*
1219 * Submit the queued invalidation descriptor to the remapping
1220 * hardware unit and wait for its completion.
1221 */
Yu Zhao704126a2009-01-04 16:28:52 +08001222int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
Suresh Siddhafe962e92008-07-10 11:16:42 -07001223{
Yu Zhao6ba6c3a2009-05-18 13:51:35 +08001224 int rc;
Suresh Siddhafe962e92008-07-10 11:16:42 -07001225 struct q_inval *qi = iommu->qi;
Lu Baolu5d308fc2018-12-10 09:58:58 +08001226 int offset, shift, length;
1227 struct qi_desc wait_desc;
Suresh Siddhafe962e92008-07-10 11:16:42 -07001228 int wait_index, index;
1229 unsigned long flags;
1230
1231 if (!qi)
Yu Zhao704126a2009-01-04 16:28:52 +08001232 return 0;
Suresh Siddhafe962e92008-07-10 11:16:42 -07001233
Yu Zhao6ba6c3a2009-05-18 13:51:35 +08001234restart:
1235 rc = 0;
1236
Thomas Gleixner3b8f4042011-07-19 17:02:07 +02001237 raw_spin_lock_irqsave(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001238 while (qi->free_cnt < 3) {
Thomas Gleixner3b8f4042011-07-19 17:02:07 +02001239 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001240 cpu_relax();
Thomas Gleixner3b8f4042011-07-19 17:02:07 +02001241 raw_spin_lock_irqsave(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001242 }
1243
1244 index = qi->free_head;
1245 wait_index = (index + 1) % QI_LENGTH;
Lu Baolu5d308fc2018-12-10 09:58:58 +08001246 shift = qi_shift(iommu);
1247 length = 1 << shift;
Suresh Siddhafe962e92008-07-10 11:16:42 -07001248
1249 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
1250
Lu Baolu5d308fc2018-12-10 09:58:58 +08001251 offset = index << shift;
1252 memcpy(qi->desc + offset, desc, length);
1253 wait_desc.qw0 = QI_IWD_STATUS_DATA(QI_DONE) |
Yu Zhao704126a2009-01-04 16:28:52 +08001254 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
Lu Baolu5d308fc2018-12-10 09:58:58 +08001255 wait_desc.qw1 = virt_to_phys(&qi->desc_status[wait_index]);
1256 wait_desc.qw2 = 0;
1257 wait_desc.qw3 = 0;
Suresh Siddhafe962e92008-07-10 11:16:42 -07001258
Lu Baolu5d308fc2018-12-10 09:58:58 +08001259 offset = wait_index << shift;
1260 memcpy(qi->desc + offset, &wait_desc, length);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001261
Suresh Siddhafe962e92008-07-10 11:16:42 -07001262 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
1263 qi->free_cnt -= 2;
1264
Suresh Siddhafe962e92008-07-10 11:16:42 -07001265 /*
1266 * update the HW tail register indicating the presence of
1267 * new descriptors.
1268 */
Lu Baolu5d308fc2018-12-10 09:58:58 +08001269 writel(qi->free_head << shift, iommu->reg + DMAR_IQT_REG);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001270
1271 while (qi->desc_status[wait_index] != QI_DONE) {
Suresh Siddhaf05810c2008-10-16 16:31:54 -07001272 /*
1273 * We will leave the interrupts disabled, to prevent interrupt
1274 * context to queue another cmd while a cmd is already submitted
1275 * and waiting for completion on this cpu. This is to avoid
1276 * a deadlock where the interrupt context can wait indefinitely
1277 * for free slots in the queue.
1278 */
Yu Zhao704126a2009-01-04 16:28:52 +08001279 rc = qi_check_fault(iommu, index);
1280 if (rc)
Yu Zhao6ba6c3a2009-05-18 13:51:35 +08001281 break;
Yu Zhao704126a2009-01-04 16:28:52 +08001282
Thomas Gleixner3b8f4042011-07-19 17:02:07 +02001283 raw_spin_unlock(&qi->q_lock);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001284 cpu_relax();
Thomas Gleixner3b8f4042011-07-19 17:02:07 +02001285 raw_spin_lock(&qi->q_lock);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001286 }
Yu Zhao6ba6c3a2009-05-18 13:51:35 +08001287
1288 qi->desc_status[index] = QI_DONE;
Suresh Siddhafe962e92008-07-10 11:16:42 -07001289
1290 reclaim_free_desc(qi);
Thomas Gleixner3b8f4042011-07-19 17:02:07 +02001291 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
Yu Zhao704126a2009-01-04 16:28:52 +08001292
Yu Zhao6ba6c3a2009-05-18 13:51:35 +08001293 if (rc == -EAGAIN)
1294 goto restart;
1295
Yu Zhao704126a2009-01-04 16:28:52 +08001296 return rc;
Suresh Siddhafe962e92008-07-10 11:16:42 -07001297}
1298
1299/*
1300 * Flush the global interrupt entry cache.
1301 */
1302void qi_global_iec(struct intel_iommu *iommu)
1303{
1304 struct qi_desc desc;
1305
Lu Baolu5d308fc2018-12-10 09:58:58 +08001306 desc.qw0 = QI_IEC_TYPE;
1307 desc.qw1 = 0;
1308 desc.qw2 = 0;
1309 desc.qw3 = 0;
Suresh Siddhafe962e92008-07-10 11:16:42 -07001310
Yu Zhao704126a2009-01-04 16:28:52 +08001311 /* should never fail */
Suresh Siddhafe962e92008-07-10 11:16:42 -07001312 qi_submit_sync(&desc, iommu);
1313}
1314
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001315void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
1316 u64 type)
Youquan Song3481f212008-10-16 16:31:55 -07001317{
Youquan Song3481f212008-10-16 16:31:55 -07001318 struct qi_desc desc;
1319
Lu Baolu5d308fc2018-12-10 09:58:58 +08001320 desc.qw0 = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
Youquan Song3481f212008-10-16 16:31:55 -07001321 | QI_CC_GRAN(type) | QI_CC_TYPE;
Lu Baolu5d308fc2018-12-10 09:58:58 +08001322 desc.qw1 = 0;
1323 desc.qw2 = 0;
1324 desc.qw3 = 0;
Youquan Song3481f212008-10-16 16:31:55 -07001325
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001326 qi_submit_sync(&desc, iommu);
Youquan Song3481f212008-10-16 16:31:55 -07001327}
1328
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001329void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
1330 unsigned int size_order, u64 type)
Youquan Song3481f212008-10-16 16:31:55 -07001331{
1332 u8 dw = 0, dr = 0;
1333
1334 struct qi_desc desc;
1335 int ih = 0;
1336
Youquan Song3481f212008-10-16 16:31:55 -07001337 if (cap_write_drain(iommu->cap))
1338 dw = 1;
1339
1340 if (cap_read_drain(iommu->cap))
1341 dr = 1;
1342
Lu Baolu5d308fc2018-12-10 09:58:58 +08001343 desc.qw0 = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
Youquan Song3481f212008-10-16 16:31:55 -07001344 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
Lu Baolu5d308fc2018-12-10 09:58:58 +08001345 desc.qw1 = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
Youquan Song3481f212008-10-16 16:31:55 -07001346 | QI_IOTLB_AM(size_order);
Lu Baolu5d308fc2018-12-10 09:58:58 +08001347 desc.qw2 = 0;
1348 desc.qw3 = 0;
Youquan Song3481f212008-10-16 16:31:55 -07001349
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001350 qi_submit_sync(&desc, iommu);
Youquan Song3481f212008-10-16 16:31:55 -07001351}
1352
Jacob Pan1c48db42018-06-07 09:57:00 -07001353void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
1354 u16 qdep, u64 addr, unsigned mask)
Yu Zhao6ba6c3a2009-05-18 13:51:35 +08001355{
1356 struct qi_desc desc;
1357
1358 if (mask) {
Joerg Roedelc8acb282017-08-11 11:42:46 +02001359 addr |= (1ULL << (VTD_PAGE_SHIFT + mask - 1)) - 1;
Lu Baolu5d308fc2018-12-10 09:58:58 +08001360 desc.qw1 = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
Yu Zhao6ba6c3a2009-05-18 13:51:35 +08001361 } else
Lu Baolu5d308fc2018-12-10 09:58:58 +08001362 desc.qw1 = QI_DEV_IOTLB_ADDR(addr);
Yu Zhao6ba6c3a2009-05-18 13:51:35 +08001363
1364 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
1365 qdep = 0;
1366
Lu Baolu5d308fc2018-12-10 09:58:58 +08001367 desc.qw0 = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
Jacob Pan1c48db42018-06-07 09:57:00 -07001368 QI_DIOTLB_TYPE | QI_DEV_IOTLB_PFSID(pfsid);
Lu Baolu5d308fc2018-12-10 09:58:58 +08001369 desc.qw2 = 0;
1370 desc.qw3 = 0;
Yu Zhao6ba6c3a2009-05-18 13:51:35 +08001371
1372 qi_submit_sync(&desc, iommu);
1373}
1374
Lu Baolu33cd6e62020-01-02 08:18:18 +08001375/* PASID-based IOTLB invalidation */
1376void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
1377 unsigned long npages, bool ih)
1378{
1379 struct qi_desc desc = {.qw2 = 0, .qw3 = 0};
1380
1381 /*
1382 * npages == -1 means a PASID-selective invalidation, otherwise,
1383 * a positive value for Page-selective-within-PASID invalidation.
1384 * 0 is not a valid input.
1385 */
1386 if (WARN_ON(!npages)) {
1387 pr_err("Invalid input npages = %ld\n", npages);
1388 return;
1389 }
1390
1391 if (npages == -1) {
1392 desc.qw0 = QI_EIOTLB_PASID(pasid) |
1393 QI_EIOTLB_DID(did) |
1394 QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) |
1395 QI_EIOTLB_TYPE;
1396 desc.qw1 = 0;
1397 } else {
1398 int mask = ilog2(__roundup_pow_of_two(npages));
1399 unsigned long align = (1ULL << (VTD_PAGE_SHIFT + mask));
1400
1401 if (WARN_ON_ONCE(!ALIGN(addr, align)))
1402 addr &= ~(align - 1);
1403
1404 desc.qw0 = QI_EIOTLB_PASID(pasid) |
1405 QI_EIOTLB_DID(did) |
1406 QI_EIOTLB_GRAN(QI_GRAN_PSI_PASID) |
1407 QI_EIOTLB_TYPE;
1408 desc.qw1 = QI_EIOTLB_ADDR(addr) |
1409 QI_EIOTLB_IH(ih) |
1410 QI_EIOTLB_AM(mask);
1411 }
1412
1413 qi_submit_sync(&desc, iommu);
1414}
1415
Suresh Siddhafe962e92008-07-10 11:16:42 -07001416/*
Suresh Siddhaeba67e52009-03-16 17:04:56 -07001417 * Disable Queued Invalidation interface.
1418 */
1419void dmar_disable_qi(struct intel_iommu *iommu)
1420{
1421 unsigned long flags;
1422 u32 sts;
1423 cycles_t start_time = get_cycles();
1424
1425 if (!ecap_qis(iommu->ecap))
1426 return;
1427
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001428 raw_spin_lock_irqsave(&iommu->register_lock, flags);
Suresh Siddhaeba67e52009-03-16 17:04:56 -07001429
CQ Tangfda3bec2016-01-13 21:15:03 +00001430 sts = readl(iommu->reg + DMAR_GSTS_REG);
Suresh Siddhaeba67e52009-03-16 17:04:56 -07001431 if (!(sts & DMA_GSTS_QIES))
1432 goto end;
1433
1434 /*
1435 * Give a chance to HW to complete the pending invalidation requests.
1436 */
1437 while ((readl(iommu->reg + DMAR_IQT_REG) !=
1438 readl(iommu->reg + DMAR_IQH_REG)) &&
1439 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
1440 cpu_relax();
1441
1442 iommu->gcmd &= ~DMA_GCMD_QIE;
Suresh Siddhaeba67e52009-03-16 17:04:56 -07001443 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1444
1445 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1446 !(sts & DMA_GSTS_QIES), sts);
1447end:
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001448 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Suresh Siddhaeba67e52009-03-16 17:04:56 -07001449}
1450
1451/*
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001452 * Enable queued invalidation.
1453 */
1454static void __dmar_enable_qi(struct intel_iommu *iommu)
1455{
David Woodhousec416daa2009-05-10 20:30:58 +01001456 u32 sts;
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001457 unsigned long flags;
1458 struct q_inval *qi = iommu->qi;
Lu Baolu5d308fc2018-12-10 09:58:58 +08001459 u64 val = virt_to_phys(qi->desc);
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001460
1461 qi->free_head = qi->free_tail = 0;
1462 qi->free_cnt = QI_LENGTH;
1463
Lu Baolu5d308fc2018-12-10 09:58:58 +08001464 /*
1465 * Set DW=1 and QS=1 in IQA_REG when Scalable Mode capability
1466 * is present.
1467 */
1468 if (ecap_smts(iommu->ecap))
1469 val |= (1 << 11) | 1;
1470
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001471 raw_spin_lock_irqsave(&iommu->register_lock, flags);
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001472
1473 /* write zero to the tail reg */
1474 writel(0, iommu->reg + DMAR_IQT_REG);
1475
Lu Baolu5d308fc2018-12-10 09:58:58 +08001476 dmar_writeq(iommu->reg + DMAR_IQA_REG, val);
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001477
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001478 iommu->gcmd |= DMA_GCMD_QIE;
David Woodhousec416daa2009-05-10 20:30:58 +01001479 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001480
1481 /* Make sure hardware complete it */
1482 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1483
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001484 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001485}
1486
1487/*
Suresh Siddhafe962e92008-07-10 11:16:42 -07001488 * Enable Queued Invalidation interface. This is a must to support
1489 * interrupt-remapping. Also used by DMA-remapping, which replaces
1490 * register based IOTLB invalidation.
1491 */
1492int dmar_enable_qi(struct intel_iommu *iommu)
1493{
Suresh Siddhafe962e92008-07-10 11:16:42 -07001494 struct q_inval *qi;
Suresh Siddha751cafe2009-10-02 11:01:22 -07001495 struct page *desc_page;
Suresh Siddhafe962e92008-07-10 11:16:42 -07001496
1497 if (!ecap_qis(iommu->ecap))
1498 return -ENOENT;
1499
1500 /*
1501 * queued invalidation is already setup and enabled.
1502 */
1503 if (iommu->qi)
1504 return 0;
1505
Suresh Siddhafa4b57c2009-03-16 17:05:05 -07001506 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001507 if (!iommu->qi)
1508 return -ENOMEM;
1509
1510 qi = iommu->qi;
1511
Lu Baolu5d308fc2018-12-10 09:58:58 +08001512 /*
1513 * Need two pages to accommodate 256 descriptors of 256 bits each
1514 * if the remapping hardware supports scalable mode translation.
1515 */
1516 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO,
1517 !!ecap_smts(iommu->ecap));
Suresh Siddha751cafe2009-10-02 11:01:22 -07001518 if (!desc_page) {
Suresh Siddhafe962e92008-07-10 11:16:42 -07001519 kfree(qi);
Jiang Liub707cb02014-01-06 14:18:26 +08001520 iommu->qi = NULL;
Suresh Siddhafe962e92008-07-10 11:16:42 -07001521 return -ENOMEM;
1522 }
1523
Suresh Siddha751cafe2009-10-02 11:01:22 -07001524 qi->desc = page_address(desc_page);
1525
Kees Cook6396bb22018-06-12 14:03:40 -07001526 qi->desc_status = kcalloc(QI_LENGTH, sizeof(int), GFP_ATOMIC);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001527 if (!qi->desc_status) {
1528 free_page((unsigned long) qi->desc);
1529 kfree(qi);
Jiang Liub707cb02014-01-06 14:18:26 +08001530 iommu->qi = NULL;
Suresh Siddhafe962e92008-07-10 11:16:42 -07001531 return -ENOMEM;
1532 }
1533
Thomas Gleixner3b8f4042011-07-19 17:02:07 +02001534 raw_spin_lock_init(&qi->q_lock);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001535
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001536 __dmar_enable_qi(iommu);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001537
1538 return 0;
1539}
Suresh Siddha0ac24912009-03-16 17:04:54 -07001540
1541/* iommu interrupt handling. Most stuff are MSI-like. */
1542
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001543enum faulttype {
1544 DMA_REMAP,
1545 INTR_REMAP,
1546 UNKNOWN,
1547};
1548
1549static const char *dma_remap_fault_reasons[] =
Suresh Siddha0ac24912009-03-16 17:04:54 -07001550{
1551 "Software",
1552 "Present bit in root entry is clear",
1553 "Present bit in context entry is clear",
1554 "Invalid context entry",
1555 "Access beyond MGAW",
1556 "PTE Write access is not set",
1557 "PTE Read access is not set",
1558 "Next page table ptr is invalid",
1559 "Root table address invalid",
1560 "Context table ptr is invalid",
1561 "non-zero reserved fields in RTP",
1562 "non-zero reserved fields in CTP",
1563 "non-zero reserved fields in PTE",
Li, Zhen-Hua4ecccd92013-03-06 10:43:17 +08001564 "PCE for translation request specifies blocking",
Suresh Siddha0ac24912009-03-16 17:04:54 -07001565};
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001566
Kyung Min Parkfd730002019-09-06 11:14:02 -07001567static const char * const dma_remap_sm_fault_reasons[] = {
1568 "SM: Invalid Root Table Address",
1569 "SM: TTM 0 for request with PASID",
1570 "SM: TTM 0 for page group request",
1571 "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x33-0x37 */
1572 "SM: Error attempting to access Root Entry",
1573 "SM: Present bit in Root Entry is clear",
1574 "SM: Non-zero reserved field set in Root Entry",
1575 "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x3B-0x3F */
1576 "SM: Error attempting to access Context Entry",
1577 "SM: Present bit in Context Entry is clear",
1578 "SM: Non-zero reserved field set in the Context Entry",
1579 "SM: Invalid Context Entry",
1580 "SM: DTE field in Context Entry is clear",
1581 "SM: PASID Enable field in Context Entry is clear",
1582 "SM: PASID is larger than the max in Context Entry",
1583 "SM: PRE field in Context-Entry is clear",
1584 "SM: RID_PASID field error in Context-Entry",
1585 "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x49-0x4F */
1586 "SM: Error attempting to access the PASID Directory Entry",
1587 "SM: Present bit in Directory Entry is clear",
1588 "SM: Non-zero reserved field set in PASID Directory Entry",
1589 "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x53-0x57 */
1590 "SM: Error attempting to access PASID Table Entry",
1591 "SM: Present bit in PASID Table Entry is clear",
1592 "SM: Non-zero reserved field set in PASID Table Entry",
1593 "SM: Invalid Scalable-Mode PASID Table Entry",
1594 "SM: ERE field is clear in PASID Table Entry",
1595 "SM: SRE field is clear in PASID Table Entry",
1596 "Unknown", "Unknown",/* 0x5E-0x5F */
1597 "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x60-0x67 */
1598 "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x68-0x6F */
1599 "SM: Error attempting to access first-level paging entry",
1600 "SM: Present bit in first-level paging entry is clear",
1601 "SM: Non-zero reserved field set in first-level paging entry",
1602 "SM: Error attempting to access FL-PML4 entry",
1603 "SM: First-level entry address beyond MGAW in Nested translation",
1604 "SM: Read permission error in FL-PML4 entry in Nested translation",
1605 "SM: Read permission error in first-level paging entry in Nested translation",
1606 "SM: Write permission error in first-level paging entry in Nested translation",
1607 "SM: Error attempting to access second-level paging entry",
1608 "SM: Read/Write permission error in second-level paging entry",
1609 "SM: Non-zero reserved field set in second-level paging entry",
1610 "SM: Invalid second-level page table pointer",
1611 "SM: A/D bit update needed in second-level entry when set up in no snoop",
1612 "Unknown", "Unknown", "Unknown", /* 0x7D-0x7F */
1613 "SM: Address in first-level translation is not canonical",
1614 "SM: U/S set 0 for first-level translation with user privilege",
1615 "SM: No execute permission for request with PASID and ER=1",
1616 "SM: Address beyond the DMA hardware max",
1617 "SM: Second-level entry address beyond the max",
1618 "SM: No write permission for Write/AtomicOp request",
1619 "SM: No read permission for Read/AtomicOp request",
1620 "SM: Invalid address-interrupt address",
1621 "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x88-0x8F */
1622 "SM: A/D bit update needed in first-level entry when set up in no snoop",
1623};
1624
Suresh Siddha95a02e92012-03-30 11:47:07 -07001625static const char *irq_remap_fault_reasons[] =
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001626{
1627 "Detected reserved fields in the decoded interrupt-remapped request",
1628 "Interrupt index exceeded the interrupt-remapping table size",
1629 "Present field in the IRTE entry is clear",
1630 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1631 "Detected reserved fields in the IRTE entry",
1632 "Blocked a compatibility format interrupt request",
1633 "Blocked an interrupt request due to source-id verification failure",
1634};
1635
Rashika Kheria21004dc2013-12-18 12:01:46 +05301636static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001637{
Dan Carpenterfefe1ed2012-05-13 20:09:38 +03001638 if (fault_reason >= 0x20 && (fault_reason - 0x20 <
1639 ARRAY_SIZE(irq_remap_fault_reasons))) {
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001640 *fault_type = INTR_REMAP;
Suresh Siddha95a02e92012-03-30 11:47:07 -07001641 return irq_remap_fault_reasons[fault_reason - 0x20];
Kyung Min Parkfd730002019-09-06 11:14:02 -07001642 } else if (fault_reason >= 0x30 && (fault_reason - 0x30 <
1643 ARRAY_SIZE(dma_remap_sm_fault_reasons))) {
1644 *fault_type = DMA_REMAP;
1645 return dma_remap_sm_fault_reasons[fault_reason - 0x30];
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001646 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1647 *fault_type = DMA_REMAP;
1648 return dma_remap_fault_reasons[fault_reason];
1649 } else {
1650 *fault_type = UNKNOWN;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001651 return "Unknown";
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001652 }
Suresh Siddha0ac24912009-03-16 17:04:54 -07001653}
1654
David Woodhouse12082252015-10-07 15:37:03 +01001655
1656static inline int dmar_msi_reg(struct intel_iommu *iommu, int irq)
1657{
1658 if (iommu->irq == irq)
1659 return DMAR_FECTL_REG;
1660 else if (iommu->pr_irq == irq)
1661 return DMAR_PECTL_REG;
1662 else
1663 BUG();
1664}
1665
Thomas Gleixner5c2837f2010-09-28 17:15:11 +02001666void dmar_msi_unmask(struct irq_data *data)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001667{
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001668 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
David Woodhouse12082252015-10-07 15:37:03 +01001669 int reg = dmar_msi_reg(iommu, data->irq);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001670 unsigned long flag;
1671
1672 /* unmask it */
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001673 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse12082252015-10-07 15:37:03 +01001674 writel(0, iommu->reg + reg);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001675 /* Read a reg to force flush the post write */
David Woodhouse12082252015-10-07 15:37:03 +01001676 readl(iommu->reg + reg);
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001677 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001678}
1679
Thomas Gleixner5c2837f2010-09-28 17:15:11 +02001680void dmar_msi_mask(struct irq_data *data)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001681{
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001682 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
David Woodhouse12082252015-10-07 15:37:03 +01001683 int reg = dmar_msi_reg(iommu, data->irq);
1684 unsigned long flag;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001685
1686 /* mask it */
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001687 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse12082252015-10-07 15:37:03 +01001688 writel(DMA_FECTL_IM, iommu->reg + reg);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001689 /* Read a reg to force flush the post write */
David Woodhouse12082252015-10-07 15:37:03 +01001690 readl(iommu->reg + reg);
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001691 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001692}
1693
1694void dmar_msi_write(int irq, struct msi_msg *msg)
1695{
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001696 struct intel_iommu *iommu = irq_get_handler_data(irq);
David Woodhouse12082252015-10-07 15:37:03 +01001697 int reg = dmar_msi_reg(iommu, irq);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001698 unsigned long flag;
1699
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001700 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse12082252015-10-07 15:37:03 +01001701 writel(msg->data, iommu->reg + reg + 4);
1702 writel(msg->address_lo, iommu->reg + reg + 8);
1703 writel(msg->address_hi, iommu->reg + reg + 12);
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001704 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001705}
1706
1707void dmar_msi_read(int irq, struct msi_msg *msg)
1708{
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001709 struct intel_iommu *iommu = irq_get_handler_data(irq);
David Woodhouse12082252015-10-07 15:37:03 +01001710 int reg = dmar_msi_reg(iommu, irq);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001711 unsigned long flag;
1712
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001713 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse12082252015-10-07 15:37:03 +01001714 msg->data = readl(iommu->reg + reg + 4);
1715 msg->address_lo = readl(iommu->reg + reg + 8);
1716 msg->address_hi = readl(iommu->reg + reg + 12);
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001717 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001718}
1719
1720static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
Kyung Min Parkfd730002019-09-06 11:14:02 -07001721 u8 fault_reason, int pasid, u16 source_id,
1722 unsigned long long addr)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001723{
1724 const char *reason;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001725 int fault_type;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001726
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001727 reason = dmar_get_fault_reason(fault_reason, &fault_type);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001728
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001729 if (fault_type == INTR_REMAP)
Alex Williamsona0fe14d2016-03-17 14:12:31 -06001730 pr_err("[INTR-REMAP] Request device [%02x:%02x.%d] fault index %llx [fault reason %02d] %s\n",
1731 source_id >> 8, PCI_SLOT(source_id & 0xFF),
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001732 PCI_FUNC(source_id & 0xFF), addr >> 48,
1733 fault_reason, reason);
1734 else
Kyung Min Parkfd730002019-09-06 11:14:02 -07001735 pr_err("[%s] Request device [%02x:%02x.%d] PASID %x fault addr %llx [fault reason %02d] %s\n",
Alex Williamsona0fe14d2016-03-17 14:12:31 -06001736 type ? "DMA Read" : "DMA Write",
1737 source_id >> 8, PCI_SLOT(source_id & 0xFF),
Kyung Min Parkfd730002019-09-06 11:14:02 -07001738 PCI_FUNC(source_id & 0xFF), pasid, addr,
1739 fault_reason, reason);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001740 return 0;
1741}
1742
1743#define PRIMARY_FAULT_REG_LEN (16)
Suresh Siddha1531a6a2009-03-16 17:04:57 -07001744irqreturn_t dmar_fault(int irq, void *dev_id)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001745{
1746 struct intel_iommu *iommu = dev_id;
1747 int reg, fault_index;
1748 u32 fault_status;
1749 unsigned long flag;
Alex Williamsonc43fce42016-03-17 14:12:25 -06001750 static DEFINE_RATELIMIT_STATE(rs,
1751 DEFAULT_RATELIMIT_INTERVAL,
1752 DEFAULT_RATELIMIT_BURST);
1753
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001754 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001755 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
Dmitry Safonov6c50d792018-03-31 01:33:11 +01001756 if (fault_status && __ratelimit(&rs))
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001757 pr_err("DRHD: handling fault status reg %x\n", fault_status);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001758
1759 /* TBD: ignore advanced fault log currently */
1760 if (!(fault_status & DMA_FSTS_PPF))
Li, Zhen-Huabd5cdad2013-03-25 16:20:52 +08001761 goto unlock_exit;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001762
1763 fault_index = dma_fsts_fault_record_index(fault_status);
1764 reg = cap_fault_reg_offset(iommu->cap);
1765 while (1) {
Dmitry Safonov6c50d792018-03-31 01:33:11 +01001766 /* Disable printing, simply clear the fault when ratelimited */
1767 bool ratelimited = !__ratelimit(&rs);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001768 u8 fault_reason;
1769 u16 source_id;
1770 u64 guest_addr;
Kyung Min Parkfd730002019-09-06 11:14:02 -07001771 int type, pasid;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001772 u32 data;
Kyung Min Parkfd730002019-09-06 11:14:02 -07001773 bool pasid_present;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001774
1775 /* highest 32 bits */
1776 data = readl(iommu->reg + reg +
1777 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1778 if (!(data & DMA_FRCD_F))
1779 break;
1780
Alex Williamsonc43fce42016-03-17 14:12:25 -06001781 if (!ratelimited) {
1782 fault_reason = dma_frcd_fault_reason(data);
1783 type = dma_frcd_type(data);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001784
Kyung Min Parkfd730002019-09-06 11:14:02 -07001785 pasid = dma_frcd_pasid_value(data);
Alex Williamsonc43fce42016-03-17 14:12:25 -06001786 data = readl(iommu->reg + reg +
1787 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1788 source_id = dma_frcd_source_id(data);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001789
Kyung Min Parkfd730002019-09-06 11:14:02 -07001790 pasid_present = dma_frcd_pasid_present(data);
Alex Williamsonc43fce42016-03-17 14:12:25 -06001791 guest_addr = dmar_readq(iommu->reg + reg +
1792 fault_index * PRIMARY_FAULT_REG_LEN);
1793 guest_addr = dma_frcd_page_addr(guest_addr);
1794 }
1795
Suresh Siddha0ac24912009-03-16 17:04:54 -07001796 /* clear the fault */
1797 writel(DMA_FRCD_F, iommu->reg + reg +
1798 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1799
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001800 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001801
Alex Williamsonc43fce42016-03-17 14:12:25 -06001802 if (!ratelimited)
Kyung Min Parkfd730002019-09-06 11:14:02 -07001803 /* Using pasid -1 if pasid is not present */
Alex Williamsonc43fce42016-03-17 14:12:25 -06001804 dmar_fault_do_one(iommu, type, fault_reason,
Kyung Min Parkfd730002019-09-06 11:14:02 -07001805 pasid_present ? pasid : -1,
Alex Williamsonc43fce42016-03-17 14:12:25 -06001806 source_id, guest_addr);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001807
1808 fault_index++;
Troy Heber8211a7b2009-08-19 15:26:11 -06001809 if (fault_index >= cap_num_fault_regs(iommu->cap))
Suresh Siddha0ac24912009-03-16 17:04:54 -07001810 fault_index = 0;
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001811 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001812 }
Suresh Siddha0ac24912009-03-16 17:04:54 -07001813
Lu Baolu973b5462017-11-03 10:51:33 -06001814 writel(DMA_FSTS_PFO | DMA_FSTS_PPF | DMA_FSTS_PRO,
1815 iommu->reg + DMAR_FSTS_REG);
Li, Zhen-Huabd5cdad2013-03-25 16:20:52 +08001816
1817unlock_exit:
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001818 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001819 return IRQ_HANDLED;
1820}
1821
1822int dmar_set_interrupt(struct intel_iommu *iommu)
1823{
1824 int irq, ret;
1825
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001826 /*
1827 * Check if the fault interrupt is already initialized.
1828 */
1829 if (iommu->irq)
1830 return 0;
1831
Jiang Liu34742db2015-04-13 14:11:41 +08001832 irq = dmar_alloc_hwirq(iommu->seq_id, iommu->node, iommu);
1833 if (irq > 0) {
1834 iommu->irq = irq;
1835 } else {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001836 pr_err("No free IRQ vectors\n");
Suresh Siddha0ac24912009-03-16 17:04:54 -07001837 return -EINVAL;
1838 }
1839
Thomas Gleixner477694e2011-07-19 16:25:42 +02001840 ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001841 if (ret)
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001842 pr_err("Can't request irq\n");
Suresh Siddha0ac24912009-03-16 17:04:54 -07001843 return ret;
1844}
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001845
1846int __init enable_drhd_fault_handling(void)
1847{
1848 struct dmar_drhd_unit *drhd;
Jiang Liu7c919772014-01-06 14:18:18 +08001849 struct intel_iommu *iommu;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001850
1851 /*
1852 * Enable fault control interrupt.
1853 */
Jiang Liu7c919772014-01-06 14:18:18 +08001854 for_each_iommu(iommu, drhd) {
Li, Zhen-Huabd5cdad2013-03-25 16:20:52 +08001855 u32 fault_status;
Jiang Liu7c919772014-01-06 14:18:18 +08001856 int ret = dmar_set_interrupt(iommu);
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001857
1858 if (ret) {
Donald Dutilee9071b02012-06-08 17:13:11 -04001859 pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001860 (unsigned long long)drhd->reg_base_addr, ret);
1861 return -1;
1862 }
Suresh Siddha7f99d942010-11-30 22:22:29 -08001863
1864 /*
1865 * Clear any previous faults.
1866 */
1867 dmar_fault(iommu->irq, iommu);
Li, Zhen-Huabd5cdad2013-03-25 16:20:52 +08001868 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1869 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001870 }
1871
1872 return 0;
1873}
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001874
1875/*
1876 * Re-enable Queued Invalidation interface.
1877 */
1878int dmar_reenable_qi(struct intel_iommu *iommu)
1879{
1880 if (!ecap_qis(iommu->ecap))
1881 return -ENOENT;
1882
1883 if (!iommu->qi)
1884 return -ENOENT;
1885
1886 /*
1887 * First disable queued invalidation.
1888 */
1889 dmar_disable_qi(iommu);
1890 /*
1891 * Then enable queued invalidation again. Since there is no pending
1892 * invalidation requests now, it's safe to re-enable queued
1893 * invalidation.
1894 */
1895 __dmar_enable_qi(iommu);
1896
1897 return 0;
1898}
Youquan Song074835f2009-09-09 12:05:39 -04001899
1900/*
1901 * Check interrupt remapping support in DMAR table description.
1902 */
Luck, Tony0b8973a2009-12-16 22:59:29 +00001903int __init dmar_ir_support(void)
Youquan Song074835f2009-09-09 12:05:39 -04001904{
1905 struct acpi_table_dmar *dmar;
1906 dmar = (struct acpi_table_dmar *)dmar_tbl;
Arnaud Patard4f506e02010-03-25 18:02:58 +00001907 if (!dmar)
1908 return 0;
Youquan Song074835f2009-09-09 12:05:39 -04001909 return dmar->flags & 0x1;
1910}
Jiang Liu694835d2014-01-06 14:18:16 +08001911
Jiang Liu6b197242014-11-09 22:47:58 +08001912/* Check whether DMAR units are in use */
1913static inline bool dmar_in_use(void)
1914{
1915 return irq_remapping_enabled || intel_iommu_enabled;
1916}
1917
Jiang Liua868e6b2014-01-06 14:18:20 +08001918static int __init dmar_free_unused_resources(void)
1919{
1920 struct dmar_drhd_unit *dmaru, *dmaru_n;
1921
Jiang Liu6b197242014-11-09 22:47:58 +08001922 if (dmar_in_use())
Jiang Liua868e6b2014-01-06 14:18:20 +08001923 return 0;
1924
Jiang Liu2e455282014-02-19 14:07:36 +08001925 if (dmar_dev_scope_status != 1 && !list_empty(&dmar_drhd_units))
1926 bus_unregister_notifier(&pci_bus_type, &dmar_pci_bus_nb);
Jiang Liu59ce0512014-02-19 14:07:35 +08001927
Jiang Liu3a5670e2014-02-19 14:07:33 +08001928 down_write(&dmar_global_lock);
Jiang Liua868e6b2014-01-06 14:18:20 +08001929 list_for_each_entry_safe(dmaru, dmaru_n, &dmar_drhd_units, list) {
1930 list_del(&dmaru->list);
1931 dmar_free_drhd(dmaru);
1932 }
Jiang Liu3a5670e2014-02-19 14:07:33 +08001933 up_write(&dmar_global_lock);
Jiang Liua868e6b2014-01-06 14:18:20 +08001934
1935 return 0;
1936}
1937
1938late_initcall(dmar_free_unused_resources);
Konrad Rzeszutek Wilk4db77ff2010-08-26 13:58:04 -04001939IOMMU_INIT_POST(detect_intel_iommu);
Jiang Liu6b197242014-11-09 22:47:58 +08001940
1941/*
1942 * DMAR Hotplug Support
1943 * For more details, please refer to Intel(R) Virtualization Technology
1944 * for Directed-IO Architecture Specifiction, Rev 2.2, Section 8.8
1945 * "Remapping Hardware Unit Hot Plug".
1946 */
Andy Shevchenko94116f82017-06-05 19:40:46 +03001947static guid_t dmar_hp_guid =
1948 GUID_INIT(0xD8C1A3A6, 0xBE9B, 0x4C9B,
1949 0x91, 0xBF, 0xC3, 0xCB, 0x81, 0xFC, 0x5D, 0xAF);
Jiang Liu6b197242014-11-09 22:47:58 +08001950
1951/*
1952 * Currently there's only one revision and BIOS will not check the revision id,
1953 * so use 0 for safety.
1954 */
1955#define DMAR_DSM_REV_ID 0
1956#define DMAR_DSM_FUNC_DRHD 1
1957#define DMAR_DSM_FUNC_ATSR 2
1958#define DMAR_DSM_FUNC_RHSA 3
1959
1960static inline bool dmar_detect_dsm(acpi_handle handle, int func)
1961{
Andy Shevchenko94116f82017-06-05 19:40:46 +03001962 return acpi_check_dsm(handle, &dmar_hp_guid, DMAR_DSM_REV_ID, 1 << func);
Jiang Liu6b197242014-11-09 22:47:58 +08001963}
1964
1965static int dmar_walk_dsm_resource(acpi_handle handle, int func,
1966 dmar_res_handler_t handler, void *arg)
1967{
1968 int ret = -ENODEV;
1969 union acpi_object *obj;
1970 struct acpi_dmar_header *start;
1971 struct dmar_res_callback callback;
1972 static int res_type[] = {
1973 [DMAR_DSM_FUNC_DRHD] = ACPI_DMAR_TYPE_HARDWARE_UNIT,
1974 [DMAR_DSM_FUNC_ATSR] = ACPI_DMAR_TYPE_ROOT_ATS,
1975 [DMAR_DSM_FUNC_RHSA] = ACPI_DMAR_TYPE_HARDWARE_AFFINITY,
1976 };
1977
1978 if (!dmar_detect_dsm(handle, func))
1979 return 0;
1980
Andy Shevchenko94116f82017-06-05 19:40:46 +03001981 obj = acpi_evaluate_dsm_typed(handle, &dmar_hp_guid, DMAR_DSM_REV_ID,
Jiang Liu6b197242014-11-09 22:47:58 +08001982 func, NULL, ACPI_TYPE_BUFFER);
1983 if (!obj)
1984 return -ENODEV;
1985
1986 memset(&callback, 0, sizeof(callback));
1987 callback.cb[res_type[func]] = handler;
1988 callback.arg[res_type[func]] = arg;
1989 start = (struct acpi_dmar_header *)obj->buffer.pointer;
1990 ret = dmar_walk_remapping_entries(start, obj->buffer.length, &callback);
1991
1992 ACPI_FREE(obj);
1993
1994 return ret;
1995}
1996
1997static int dmar_hp_add_drhd(struct acpi_dmar_header *header, void *arg)
1998{
1999 int ret;
2000 struct dmar_drhd_unit *dmaru;
2001
2002 dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
2003 if (!dmaru)
2004 return -ENODEV;
2005
2006 ret = dmar_ir_hotplug(dmaru, true);
2007 if (ret == 0)
2008 ret = dmar_iommu_hotplug(dmaru, true);
2009
2010 return ret;
2011}
2012
2013static int dmar_hp_remove_drhd(struct acpi_dmar_header *header, void *arg)
2014{
2015 int i, ret;
2016 struct device *dev;
2017 struct dmar_drhd_unit *dmaru;
2018
2019 dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
2020 if (!dmaru)
2021 return 0;
2022
2023 /*
2024 * All PCI devices managed by this unit should have been destroyed.
2025 */
Linus Torvalds194dc872016-07-27 20:03:31 -07002026 if (!dmaru->include_all && dmaru->devices && dmaru->devices_cnt) {
Jiang Liu6b197242014-11-09 22:47:58 +08002027 for_each_active_dev_scope(dmaru->devices,
2028 dmaru->devices_cnt, i, dev)
2029 return -EBUSY;
Linus Torvalds194dc872016-07-27 20:03:31 -07002030 }
Jiang Liu6b197242014-11-09 22:47:58 +08002031
2032 ret = dmar_ir_hotplug(dmaru, false);
2033 if (ret == 0)
2034 ret = dmar_iommu_hotplug(dmaru, false);
2035
2036 return ret;
2037}
2038
2039static int dmar_hp_release_drhd(struct acpi_dmar_header *header, void *arg)
2040{
2041 struct dmar_drhd_unit *dmaru;
2042
2043 dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
2044 if (dmaru) {
2045 list_del_rcu(&dmaru->list);
2046 synchronize_rcu();
2047 dmar_free_drhd(dmaru);
2048 }
2049
2050 return 0;
2051}
2052
2053static int dmar_hotplug_insert(acpi_handle handle)
2054{
2055 int ret;
2056 int drhd_count = 0;
2057
2058 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2059 &dmar_validate_one_drhd, (void *)1);
2060 if (ret)
2061 goto out;
2062
2063 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2064 &dmar_parse_one_drhd, (void *)&drhd_count);
2065 if (ret == 0 && drhd_count == 0) {
2066 pr_warn(FW_BUG "No DRHD structures in buffer returned by _DSM method\n");
2067 goto out;
2068 } else if (ret) {
2069 goto release_drhd;
2070 }
2071
2072 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_RHSA,
2073 &dmar_parse_one_rhsa, NULL);
2074 if (ret)
2075 goto release_drhd;
2076
2077 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
2078 &dmar_parse_one_atsr, NULL);
2079 if (ret)
2080 goto release_atsr;
2081
2082 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2083 &dmar_hp_add_drhd, NULL);
2084 if (!ret)
2085 return 0;
2086
2087 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2088 &dmar_hp_remove_drhd, NULL);
2089release_atsr:
2090 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
2091 &dmar_release_one_atsr, NULL);
2092release_drhd:
2093 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2094 &dmar_hp_release_drhd, NULL);
2095out:
2096 return ret;
2097}
2098
2099static int dmar_hotplug_remove(acpi_handle handle)
2100{
2101 int ret;
2102
2103 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
2104 &dmar_check_one_atsr, NULL);
2105 if (ret)
2106 return ret;
2107
2108 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2109 &dmar_hp_remove_drhd, NULL);
2110 if (ret == 0) {
2111 WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
2112 &dmar_release_one_atsr, NULL));
2113 WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2114 &dmar_hp_release_drhd, NULL));
2115 } else {
2116 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2117 &dmar_hp_add_drhd, NULL);
2118 }
2119
2120 return ret;
2121}
2122
Jiang Liud35165a2014-11-09 22:47:59 +08002123static acpi_status dmar_get_dsm_handle(acpi_handle handle, u32 lvl,
2124 void *context, void **retval)
2125{
2126 acpi_handle *phdl = retval;
2127
2128 if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
2129 *phdl = handle;
2130 return AE_CTRL_TERMINATE;
2131 }
2132
2133 return AE_OK;
2134}
2135
Jiang Liu6b197242014-11-09 22:47:58 +08002136static int dmar_device_hotplug(acpi_handle handle, bool insert)
2137{
2138 int ret;
Jiang Liud35165a2014-11-09 22:47:59 +08002139 acpi_handle tmp = NULL;
2140 acpi_status status;
Jiang Liu6b197242014-11-09 22:47:58 +08002141
2142 if (!dmar_in_use())
2143 return 0;
2144
Jiang Liud35165a2014-11-09 22:47:59 +08002145 if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
2146 tmp = handle;
2147 } else {
2148 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle,
2149 ACPI_UINT32_MAX,
2150 dmar_get_dsm_handle,
2151 NULL, NULL, &tmp);
2152 if (ACPI_FAILURE(status)) {
2153 pr_warn("Failed to locate _DSM method.\n");
2154 return -ENXIO;
2155 }
2156 }
2157 if (tmp == NULL)
Jiang Liu6b197242014-11-09 22:47:58 +08002158 return 0;
2159
2160 down_write(&dmar_global_lock);
2161 if (insert)
Jiang Liud35165a2014-11-09 22:47:59 +08002162 ret = dmar_hotplug_insert(tmp);
Jiang Liu6b197242014-11-09 22:47:58 +08002163 else
Jiang Liud35165a2014-11-09 22:47:59 +08002164 ret = dmar_hotplug_remove(tmp);
Jiang Liu6b197242014-11-09 22:47:58 +08002165 up_write(&dmar_global_lock);
2166
2167 return ret;
2168}
2169
2170int dmar_device_add(acpi_handle handle)
2171{
2172 return dmar_device_hotplug(handle, true);
2173}
2174
2175int dmar_device_remove(acpi_handle handle)
2176{
2177 return dmar_device_hotplug(handle, false);
2178}
Lu Baolu89a60792018-10-23 15:45:01 +08002179
2180/*
2181 * dmar_platform_optin - Is %DMA_CTRL_PLATFORM_OPT_IN_FLAG set in DMAR table
2182 *
2183 * Returns true if the platform has %DMA_CTRL_PLATFORM_OPT_IN_FLAG set in
2184 * the ACPI DMAR table. This means that the platform boot firmware has made
2185 * sure no device can issue DMA outside of RMRR regions.
2186 */
2187bool dmar_platform_optin(void)
2188{
2189 struct acpi_table_dmar *dmar;
2190 acpi_status status;
2191 bool ret;
2192
2193 status = acpi_get_table(ACPI_SIG_DMAR, 0,
2194 (struct acpi_table_header **)&dmar);
2195 if (ACPI_FAILURE(status))
2196 return false;
2197
2198 ret = !!(dmar->flags & DMAR_PLATFORM_OPT_IN);
2199 acpi_put_table((struct acpi_table_header *)dmar);
2200
2201 return ret;
2202}
2203EXPORT_SYMBOL_GPL(dmar_platform_optin);