blob: b19f9f4c35847f3ea8a7947591de861af8c348f9 [file] [log] [blame]
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -07001/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
mark gross98bcef52008-02-23 15:23:35 -080017 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070021 *
Suresh Siddhae61d98d2008-07-10 11:16:35 -070022 * This file implements early detection/parsing of Remapping Devices
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070023 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24 * tables.
Suresh Siddhae61d98d2008-07-10 11:16:35 -070025 *
26 * These routines are used by both DMA-remapping and Interrupt-remapping
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070027 */
28
Donald Dutilee9071b02012-06-08 17:13:11 -040029#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt /* has to precede printk.h */
30
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070031#include <linux/pci.h>
32#include <linux/dmar.h>
Kay, Allen M38717942008-09-09 18:37:29 +030033#include <linux/iova.h>
34#include <linux/intel-iommu.h>
Suresh Siddhafe962e92008-07-10 11:16:42 -070035#include <linux/timer.h>
Suresh Siddha0ac24912009-03-16 17:04:54 -070036#include <linux/irq.h>
37#include <linux/interrupt.h>
Shane Wang69575d32009-09-01 18:25:07 -070038#include <linux/tboot.h>
Len Browneb27cae2009-07-06 23:40:19 -040039#include <linux/dmi.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090040#include <linux/slab.h>
Suresh Siddha8a8f4222012-03-30 11:47:08 -070041#include <asm/irq_remapping.h>
Konrad Rzeszutek Wilk4db77ff2010-08-26 13:58:04 -040042#include <asm/iommu_table.h>
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070043
Joerg Roedel078e1ee2012-09-26 12:44:43 +020044#include "irq_remapping.h"
45
Jiang Liu3a5670e2014-02-19 14:07:33 +080046/*
47 * Assumptions:
48 * 1) The hotplug framework guarentees that DMAR unit will be hot-added
49 * before IO devices managed by that unit.
50 * 2) The hotplug framework guarantees that DMAR unit will be hot-removed
51 * after IO devices managed by that unit.
52 * 3) Hotplug events are rare.
53 *
54 * Locking rules for DMA and interrupt remapping related global data structures:
55 * 1) Use dmar_global_lock in process context
56 * 2) Use RCU in interrupt context
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070057 */
Jiang Liu3a5670e2014-02-19 14:07:33 +080058DECLARE_RWSEM(dmar_global_lock);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070059LIST_HEAD(dmar_drhd_units);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070060
Suresh Siddha41750d32011-08-23 17:05:18 -070061struct acpi_table_header * __initdata dmar_tbl;
Yinghai Lu8e1568f2009-02-11 01:06:59 -080062static acpi_size dmar_tbl_size;
Jiang Liu2e455282014-02-19 14:07:36 +080063static int dmar_dev_scope_status = 1;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070064
Jiang Liu694835d2014-01-06 14:18:16 +080065static int alloc_iommu(struct dmar_drhd_unit *drhd);
Jiang Liua868e6b2014-01-06 14:18:20 +080066static void free_iommu(struct intel_iommu *iommu);
Jiang Liu694835d2014-01-06 14:18:16 +080067
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070068static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
69{
70 /*
71 * add INCLUDE_ALL at the tail, so scan the list will find it at
72 * the very end.
73 */
74 if (drhd->include_all)
Jiang Liu0e242612014-02-19 14:07:34 +080075 list_add_tail_rcu(&drhd->list, &dmar_drhd_units);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070076 else
Jiang Liu0e242612014-02-19 14:07:34 +080077 list_add_rcu(&drhd->list, &dmar_drhd_units);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070078}
79
Jiang Liubb3a6b72014-02-19 14:07:24 +080080void *dmar_alloc_dev_scope(void *start, void *end, int *cnt)
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070081{
82 struct acpi_dmar_device_scope *scope;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070083
84 *cnt = 0;
85 while (start < end) {
86 scope = start;
87 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
88 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
89 (*cnt)++;
Linn Crosettoae3e7f32013-04-23 12:26:45 -060090 else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC &&
91 scope->entry_type != ACPI_DMAR_SCOPE_TYPE_HPET) {
Donald Dutilee9071b02012-06-08 17:13:11 -040092 pr_warn("Unsupported device scope\n");
Yinghai Lu5715f0f2010-04-08 19:58:22 +010093 }
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070094 start += scope->length;
95 }
96 if (*cnt == 0)
Jiang Liubb3a6b72014-02-19 14:07:24 +080097 return NULL;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070098
Jiang Liubb3a6b72014-02-19 14:07:24 +080099 return kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
100}
101
Jiang Liu0e242612014-02-19 14:07:34 +0800102void dmar_free_dev_scope(struct pci_dev __rcu ***devices, int *cnt)
Jiang Liuada4d4b2014-01-06 14:18:09 +0800103{
Jiang Liub683b232014-02-19 14:07:32 +0800104 int i;
105 struct pci_dev *tmp_dev;
106
Jiang Liuada4d4b2014-01-06 14:18:09 +0800107 if (*devices && *cnt) {
Jiang Liub683b232014-02-19 14:07:32 +0800108 for_each_active_dev_scope(*devices, *cnt, i, tmp_dev)
109 pci_dev_put(tmp_dev);
Jiang Liuada4d4b2014-01-06 14:18:09 +0800110 kfree(*devices);
Jiang Liuada4d4b2014-01-06 14:18:09 +0800111 }
Jiang Liu0e242612014-02-19 14:07:34 +0800112
113 *devices = NULL;
114 *cnt = 0;
Jiang Liuada4d4b2014-01-06 14:18:09 +0800115}
116
Jiang Liu59ce0512014-02-19 14:07:35 +0800117/* Optimize out kzalloc()/kfree() for normal cases */
118static char dmar_pci_notify_info_buf[64];
119
120static struct dmar_pci_notify_info *
121dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event)
122{
123 int level = 0;
124 size_t size;
125 struct pci_dev *tmp;
126 struct dmar_pci_notify_info *info;
127
128 BUG_ON(dev->is_virtfn);
129
130 /* Only generate path[] for device addition event */
131 if (event == BUS_NOTIFY_ADD_DEVICE)
132 for (tmp = dev; tmp; tmp = tmp->bus->self)
133 level++;
134
135 size = sizeof(*info) + level * sizeof(struct acpi_dmar_pci_path);
136 if (size <= sizeof(dmar_pci_notify_info_buf)) {
137 info = (struct dmar_pci_notify_info *)dmar_pci_notify_info_buf;
138 } else {
139 info = kzalloc(size, GFP_KERNEL);
140 if (!info) {
141 pr_warn("Out of memory when allocating notify_info "
142 "for %s.\n", pci_name(dev));
Jiang Liu2e455282014-02-19 14:07:36 +0800143 if (dmar_dev_scope_status == 0)
144 dmar_dev_scope_status = -ENOMEM;
Jiang Liu59ce0512014-02-19 14:07:35 +0800145 return NULL;
146 }
147 }
148
149 info->event = event;
150 info->dev = dev;
151 info->seg = pci_domain_nr(dev->bus);
152 info->level = level;
153 if (event == BUS_NOTIFY_ADD_DEVICE) {
154 for (tmp = dev, level--; tmp; tmp = tmp->bus->self) {
155 info->path[level].device = PCI_SLOT(tmp->devfn);
156 info->path[level].function = PCI_FUNC(tmp->devfn);
157 if (pci_is_root_bus(tmp->bus))
158 info->bus = tmp->bus->number;
159 }
160 }
161
162 return info;
163}
164
165static inline void dmar_free_pci_notify_info(struct dmar_pci_notify_info *info)
166{
167 if ((void *)info != dmar_pci_notify_info_buf)
168 kfree(info);
169}
170
171static bool dmar_match_pci_path(struct dmar_pci_notify_info *info, int bus,
172 struct acpi_dmar_pci_path *path, int count)
173{
174 int i;
175
176 if (info->bus != bus)
177 return false;
178 if (info->level != count)
179 return false;
180
181 for (i = 0; i < count; i++) {
182 if (path[i].device != info->path[i].device ||
183 path[i].function != info->path[i].function)
184 return false;
185 }
186
187 return true;
188}
189
190/* Return: > 0 if match found, 0 if no match found, < 0 if error happens */
191int dmar_insert_dev_scope(struct dmar_pci_notify_info *info,
192 void *start, void*end, u16 segment,
193 struct pci_dev __rcu **devices, int devices_cnt)
194{
195 int i, level;
196 struct pci_dev *tmp, *dev = info->dev;
197 struct acpi_dmar_device_scope *scope;
198 struct acpi_dmar_pci_path *path;
199
200 if (segment != info->seg)
201 return 0;
202
203 for (; start < end; start += scope->length) {
204 scope = start;
205 if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_ENDPOINT &&
206 scope->entry_type != ACPI_DMAR_SCOPE_TYPE_BRIDGE)
207 continue;
208
209 path = (struct acpi_dmar_pci_path *)(scope + 1);
210 level = (scope->length - sizeof(*scope)) / sizeof(*path);
211 if (!dmar_match_pci_path(info, scope->bus, path, level))
212 continue;
213
214 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT) ^
215 (dev->hdr_type == PCI_HEADER_TYPE_NORMAL)) {
216 pr_warn("Device scope type does not match for %s\n",
217 pci_name(dev));
218 return -EINVAL;
219 }
220
221 for_each_dev_scope(devices, devices_cnt, i, tmp)
222 if (tmp == NULL) {
223 rcu_assign_pointer(devices[i],
224 pci_dev_get(dev));
225 return 1;
226 }
227 BUG_ON(i >= devices_cnt);
228 }
229
230 return 0;
231}
232
233int dmar_remove_dev_scope(struct dmar_pci_notify_info *info, u16 segment,
234 struct pci_dev __rcu **devices, int count)
235{
236 int index;
237 struct pci_dev *tmp;
238
239 if (info->seg != segment)
240 return 0;
241
242 for_each_active_dev_scope(devices, count, index, tmp)
243 if (tmp == info->dev) {
244 rcu_assign_pointer(devices[index], NULL);
245 synchronize_rcu();
246 pci_dev_put(tmp);
247 return 1;
248 }
249
250 return 0;
251}
252
253static int dmar_pci_bus_add_dev(struct dmar_pci_notify_info *info)
254{
255 int ret = 0;
256 struct dmar_drhd_unit *dmaru;
257 struct acpi_dmar_hardware_unit *drhd;
258
259 for_each_drhd_unit(dmaru) {
260 if (dmaru->include_all)
261 continue;
262
263 drhd = container_of(dmaru->hdr,
264 struct acpi_dmar_hardware_unit, header);
265 ret = dmar_insert_dev_scope(info, (void *)(drhd + 1),
266 ((void *)drhd) + drhd->header.length,
267 dmaru->segment,
268 dmaru->devices, dmaru->devices_cnt);
269 if (ret != 0)
270 break;
271 }
272 if (ret >= 0)
273 ret = dmar_iommu_notify_scope_dev(info);
Jiang Liu2e455282014-02-19 14:07:36 +0800274 if (ret < 0 && dmar_dev_scope_status == 0)
275 dmar_dev_scope_status = ret;
Jiang Liu59ce0512014-02-19 14:07:35 +0800276
277 return ret;
278}
279
280static void dmar_pci_bus_del_dev(struct dmar_pci_notify_info *info)
281{
282 struct dmar_drhd_unit *dmaru;
283
284 for_each_drhd_unit(dmaru)
285 if (dmar_remove_dev_scope(info, dmaru->segment,
286 dmaru->devices, dmaru->devices_cnt))
287 break;
288 dmar_iommu_notify_scope_dev(info);
289}
290
291static int dmar_pci_bus_notifier(struct notifier_block *nb,
292 unsigned long action, void *data)
293{
294 struct pci_dev *pdev = to_pci_dev(data);
295 struct dmar_pci_notify_info *info;
296
297 /* Only care about add/remove events for physical functions */
298 if (pdev->is_virtfn)
299 return NOTIFY_DONE;
300 if (action != BUS_NOTIFY_ADD_DEVICE && action != BUS_NOTIFY_DEL_DEVICE)
301 return NOTIFY_DONE;
302
303 info = dmar_alloc_pci_notify_info(pdev, action);
304 if (!info)
305 return NOTIFY_DONE;
306
307 down_write(&dmar_global_lock);
308 if (action == BUS_NOTIFY_ADD_DEVICE)
309 dmar_pci_bus_add_dev(info);
310 else if (action == BUS_NOTIFY_DEL_DEVICE)
311 dmar_pci_bus_del_dev(info);
312 up_write(&dmar_global_lock);
313
314 dmar_free_pci_notify_info(info);
315
316 return NOTIFY_OK;
317}
318
319static struct notifier_block dmar_pci_bus_nb = {
320 .notifier_call = dmar_pci_bus_notifier,
321 .priority = INT_MIN,
322};
323
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700324/**
325 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
326 * structure which uniquely represent one DMA remapping hardware unit
327 * present in the platform
328 */
329static int __init
330dmar_parse_one_drhd(struct acpi_dmar_header *header)
331{
332 struct acpi_dmar_hardware_unit *drhd;
333 struct dmar_drhd_unit *dmaru;
334 int ret = 0;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700335
David Woodhousee523b382009-04-10 22:27:48 -0700336 drhd = (struct acpi_dmar_hardware_unit *)header;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700337 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
338 if (!dmaru)
339 return -ENOMEM;
340
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700341 dmaru->hdr = header;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700342 dmaru->reg_base_addr = drhd->address;
David Woodhouse276dbf92009-04-04 01:45:37 +0100343 dmaru->segment = drhd->segment;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700344 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
Jiang Liu2e455282014-02-19 14:07:36 +0800345 if (!dmaru->include_all) {
346 dmaru->devices = dmar_alloc_dev_scope((void *)(drhd + 1),
347 ((void *)drhd) + drhd->header.length,
348 &dmaru->devices_cnt);
349 if (dmaru->devices_cnt && dmaru->devices == NULL) {
350 kfree(dmaru);
351 return -ENOMEM;
352 }
353 }
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700354
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700355 ret = alloc_iommu(dmaru);
356 if (ret) {
Jiang Liu2e455282014-02-19 14:07:36 +0800357 if (!dmaru->include_all)
358 dmar_free_dev_scope(&dmaru->devices,
359 &dmaru->devices_cnt);
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700360 kfree(dmaru);
361 return ret;
362 }
363 dmar_register_drhd_unit(dmaru);
364 return 0;
365}
366
Jiang Liua868e6b2014-01-06 14:18:20 +0800367static void dmar_free_drhd(struct dmar_drhd_unit *dmaru)
368{
369 if (dmaru->devices && dmaru->devices_cnt)
370 dmar_free_dev_scope(&dmaru->devices, &dmaru->devices_cnt);
371 if (dmaru->iommu)
372 free_iommu(dmaru->iommu);
373 kfree(dmaru);
374}
375
David Woodhouseaa697072009-10-07 12:18:00 +0100376#ifdef CONFIG_ACPI_NUMA
Suresh Siddhaee34b322009-10-02 11:01:21 -0700377static int __init
378dmar_parse_one_rhsa(struct acpi_dmar_header *header)
379{
380 struct acpi_dmar_rhsa *rhsa;
381 struct dmar_drhd_unit *drhd;
382
383 rhsa = (struct acpi_dmar_rhsa *)header;
David Woodhouseaa697072009-10-07 12:18:00 +0100384 for_each_drhd_unit(drhd) {
Suresh Siddhaee34b322009-10-02 11:01:21 -0700385 if (drhd->reg_base_addr == rhsa->base_address) {
386 int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
387
388 if (!node_online(node))
389 node = -1;
390 drhd->iommu->node = node;
David Woodhouseaa697072009-10-07 12:18:00 +0100391 return 0;
392 }
Suresh Siddhaee34b322009-10-02 11:01:21 -0700393 }
Ben Hutchingsfd0c8892010-04-03 19:38:43 +0100394 WARN_TAINT(
395 1, TAINT_FIRMWARE_WORKAROUND,
396 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
397 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
398 drhd->reg_base_addr,
399 dmi_get_system_info(DMI_BIOS_VENDOR),
400 dmi_get_system_info(DMI_BIOS_VERSION),
401 dmi_get_system_info(DMI_PRODUCT_VERSION));
Suresh Siddhaee34b322009-10-02 11:01:21 -0700402
David Woodhouseaa697072009-10-07 12:18:00 +0100403 return 0;
Suresh Siddhaee34b322009-10-02 11:01:21 -0700404}
David Woodhouseaa697072009-10-07 12:18:00 +0100405#endif
Suresh Siddhaee34b322009-10-02 11:01:21 -0700406
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700407static void __init
408dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
409{
410 struct acpi_dmar_hardware_unit *drhd;
411 struct acpi_dmar_reserved_memory *rmrr;
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800412 struct acpi_dmar_atsr *atsr;
Roland Dreier17b60972009-09-24 12:14:00 -0700413 struct acpi_dmar_rhsa *rhsa;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700414
415 switch (header->type) {
416 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800417 drhd = container_of(header, struct acpi_dmar_hardware_unit,
418 header);
Donald Dutilee9071b02012-06-08 17:13:11 -0400419 pr_info("DRHD base: %#016Lx flags: %#x\n",
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800420 (unsigned long long)drhd->address, drhd->flags);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700421 break;
422 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800423 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
424 header);
Donald Dutilee9071b02012-06-08 17:13:11 -0400425 pr_info("RMRR base: %#016Lx end: %#016Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700426 (unsigned long long)rmrr->base_address,
427 (unsigned long long)rmrr->end_address);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700428 break;
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800429 case ACPI_DMAR_TYPE_ATSR:
430 atsr = container_of(header, struct acpi_dmar_atsr, header);
Donald Dutilee9071b02012-06-08 17:13:11 -0400431 pr_info("ATSR flags: %#x\n", atsr->flags);
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800432 break;
Roland Dreier17b60972009-09-24 12:14:00 -0700433 case ACPI_DMAR_HARDWARE_AFFINITY:
434 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
Donald Dutilee9071b02012-06-08 17:13:11 -0400435 pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
Roland Dreier17b60972009-09-24 12:14:00 -0700436 (unsigned long long)rhsa->base_address,
437 rhsa->proximity_domain);
438 break;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700439 }
440}
441
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700442/**
443 * dmar_table_detect - checks to see if the platform supports DMAR devices
444 */
445static int __init dmar_table_detect(void)
446{
447 acpi_status status = AE_OK;
448
449 /* if we could find DMAR table, then there are DMAR devices */
Yinghai Lu8e1568f2009-02-11 01:06:59 -0800450 status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
451 (struct acpi_table_header **)&dmar_tbl,
452 &dmar_tbl_size);
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700453
454 if (ACPI_SUCCESS(status) && !dmar_tbl) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400455 pr_warn("Unable to map DMAR\n");
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700456 status = AE_NOT_FOUND;
457 }
458
459 return (ACPI_SUCCESS(status) ? 1 : 0);
460}
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700461
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700462/**
463 * parse_dmar_table - parses the DMA reporting table
464 */
465static int __init
466parse_dmar_table(void)
467{
468 struct acpi_table_dmar *dmar;
469 struct acpi_dmar_header *entry_header;
470 int ret = 0;
Li, Zhen-Hua7cef3342013-05-20 15:57:32 +0800471 int drhd_count = 0;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700472
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700473 /*
474 * Do it again, earlier dmar_tbl mapping could be mapped with
475 * fixed map.
476 */
477 dmar_table_detect();
478
Joseph Cihulaa59b50e2009-06-30 19:31:10 -0700479 /*
480 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
481 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
482 */
483 dmar_tbl = tboot_get_dmar_table(dmar_tbl);
484
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700485 dmar = (struct acpi_table_dmar *)dmar_tbl;
486 if (!dmar)
487 return -ENODEV;
488
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700489 if (dmar->width < PAGE_SHIFT - 1) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400490 pr_warn("Invalid DMAR haw\n");
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700491 return -EINVAL;
492 }
493
Donald Dutilee9071b02012-06-08 17:13:11 -0400494 pr_info("Host address width %d\n", dmar->width + 1);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700495
496 entry_header = (struct acpi_dmar_header *)(dmar + 1);
497 while (((unsigned long)entry_header) <
498 (((unsigned long)dmar) + dmar_tbl->length)) {
Tony Battersby084eb962009-02-11 13:24:19 -0800499 /* Avoid looping forever on bad ACPI tables */
500 if (entry_header->length == 0) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400501 pr_warn("Invalid 0-length structure\n");
Tony Battersby084eb962009-02-11 13:24:19 -0800502 ret = -EINVAL;
503 break;
504 }
505
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700506 dmar_table_print_dmar_entry(entry_header);
507
508 switch (entry_header->type) {
509 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
Li, Zhen-Hua7cef3342013-05-20 15:57:32 +0800510 drhd_count++;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700511 ret = dmar_parse_one_drhd(entry_header);
512 break;
513 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
514 ret = dmar_parse_one_rmrr(entry_header);
515 break;
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800516 case ACPI_DMAR_TYPE_ATSR:
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800517 ret = dmar_parse_one_atsr(entry_header);
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800518 break;
Roland Dreier17b60972009-09-24 12:14:00 -0700519 case ACPI_DMAR_HARDWARE_AFFINITY:
David Woodhouseaa697072009-10-07 12:18:00 +0100520#ifdef CONFIG_ACPI_NUMA
Suresh Siddhaee34b322009-10-02 11:01:21 -0700521 ret = dmar_parse_one_rhsa(entry_header);
David Woodhouseaa697072009-10-07 12:18:00 +0100522#endif
Roland Dreier17b60972009-09-24 12:14:00 -0700523 break;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700524 default:
Donald Dutilee9071b02012-06-08 17:13:11 -0400525 pr_warn("Unknown DMAR structure type %d\n",
Roland Dreier4de75cf2009-09-24 01:01:29 +0100526 entry_header->type);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700527 ret = 0; /* for forward compatibility */
528 break;
529 }
530 if (ret)
531 break;
532
533 entry_header = ((void *)entry_header + entry_header->length);
534 }
Li, Zhen-Hua7cef3342013-05-20 15:57:32 +0800535 if (drhd_count == 0)
536 pr_warn(FW_BUG "No DRHD structure found in DMAR table\n");
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700537 return ret;
538}
539
Jiang Liu0e242612014-02-19 14:07:34 +0800540static int dmar_pci_device_match(struct pci_dev __rcu *devices[], int cnt,
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700541 struct pci_dev *dev)
542{
543 int index;
Jiang Liub683b232014-02-19 14:07:32 +0800544 struct pci_dev *tmp;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700545
546 while (dev) {
Jiang Liub683b232014-02-19 14:07:32 +0800547 for_each_active_dev_scope(devices, cnt, index, tmp)
548 if (dev == tmp)
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700549 return 1;
550
551 /* Check our parent */
552 dev = dev->bus->self;
553 }
554
555 return 0;
556}
557
558struct dmar_drhd_unit *
559dmar_find_matched_drhd_unit(struct pci_dev *dev)
560{
Jiang Liu0e242612014-02-19 14:07:34 +0800561 struct dmar_drhd_unit *dmaru;
Yu Zhao2e824f72008-12-22 16:54:58 +0800562 struct acpi_dmar_hardware_unit *drhd;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700563
Yinghaidda56542010-04-09 01:07:55 +0100564 dev = pci_physfn(dev);
565
Jiang Liu0e242612014-02-19 14:07:34 +0800566 rcu_read_lock();
Yijing Wang8b161f02013-10-31 17:25:16 +0800567 for_each_drhd_unit(dmaru) {
Yu Zhao2e824f72008-12-22 16:54:58 +0800568 drhd = container_of(dmaru->hdr,
569 struct acpi_dmar_hardware_unit,
570 header);
571
572 if (dmaru->include_all &&
573 drhd->segment == pci_domain_nr(dev->bus))
Jiang Liu0e242612014-02-19 14:07:34 +0800574 goto out;
Yu Zhao2e824f72008-12-22 16:54:58 +0800575
576 if (dmar_pci_device_match(dmaru->devices,
577 dmaru->devices_cnt, dev))
Jiang Liu0e242612014-02-19 14:07:34 +0800578 goto out;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700579 }
Jiang Liu0e242612014-02-19 14:07:34 +0800580 dmaru = NULL;
581out:
582 rcu_read_unlock();
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700583
Jiang Liu0e242612014-02-19 14:07:34 +0800584 return dmaru;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700585}
586
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700587int __init dmar_dev_scope_init(void)
588{
Jiang Liu2e455282014-02-19 14:07:36 +0800589 struct pci_dev *dev = NULL;
590 struct dmar_pci_notify_info *info;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700591
Jiang Liu2e455282014-02-19 14:07:36 +0800592 if (dmar_dev_scope_status != 1)
593 return dmar_dev_scope_status;
Suresh Siddhac2c72862011-08-23 17:05:19 -0700594
Jiang Liu2e455282014-02-19 14:07:36 +0800595 if (list_empty(&dmar_drhd_units)) {
596 dmar_dev_scope_status = -ENODEV;
597 } else {
598 dmar_dev_scope_status = 0;
Suresh Siddha318fe7d2011-08-23 17:05:20 -0700599
Jiang Liu2e455282014-02-19 14:07:36 +0800600 for_each_pci_dev(dev) {
601 if (dev->is_virtfn)
602 continue;
603
604 info = dmar_alloc_pci_notify_info(dev,
605 BUS_NOTIFY_ADD_DEVICE);
606 if (!info) {
607 return dmar_dev_scope_status;
608 } else {
609 dmar_pci_bus_add_dev(info);
610 dmar_free_pci_notify_info(info);
611 }
612 }
613
614 bus_register_notifier(&pci_bus_type, &dmar_pci_bus_nb);
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700615 }
616
Jiang Liu2e455282014-02-19 14:07:36 +0800617 return dmar_dev_scope_status;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700618}
619
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700620
621int __init dmar_table_init(void)
622{
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700623 static int dmar_table_initialized;
Fenghua Yu093f87d2007-11-21 15:07:14 -0800624 int ret;
625
Jiang Liucc053012014-01-06 14:18:24 +0800626 if (dmar_table_initialized == 0) {
627 ret = parse_dmar_table();
628 if (ret < 0) {
629 if (ret != -ENODEV)
630 pr_info("parse DMAR table failure.\n");
631 } else if (list_empty(&dmar_drhd_units)) {
632 pr_info("No DMAR devices found\n");
633 ret = -ENODEV;
634 }
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700635
Jiang Liucc053012014-01-06 14:18:24 +0800636 if (ret < 0)
637 dmar_table_initialized = ret;
638 else
639 dmar_table_initialized = 1;
Fenghua Yu093f87d2007-11-21 15:07:14 -0800640 }
641
Jiang Liucc053012014-01-06 14:18:24 +0800642 return dmar_table_initialized < 0 ? dmar_table_initialized : 0;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700643}
644
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100645static void warn_invalid_dmar(u64 addr, const char *message)
646{
Ben Hutchingsfd0c8892010-04-03 19:38:43 +0100647 WARN_TAINT_ONCE(
648 1, TAINT_FIRMWARE_WORKAROUND,
649 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
650 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
651 addr, message,
652 dmi_get_system_info(DMI_BIOS_VENDOR),
653 dmi_get_system_info(DMI_BIOS_VERSION),
654 dmi_get_system_info(DMI_PRODUCT_VERSION));
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100655}
David Woodhouse6ecbf012009-12-02 09:20:27 +0000656
Rashika Kheria21004dc2013-12-18 12:01:46 +0530657static int __init check_zero_address(void)
David Woodhouse86cf8982009-11-09 22:15:15 +0000658{
659 struct acpi_table_dmar *dmar;
660 struct acpi_dmar_header *entry_header;
661 struct acpi_dmar_hardware_unit *drhd;
662
663 dmar = (struct acpi_table_dmar *)dmar_tbl;
664 entry_header = (struct acpi_dmar_header *)(dmar + 1);
665
666 while (((unsigned long)entry_header) <
667 (((unsigned long)dmar) + dmar_tbl->length)) {
668 /* Avoid looping forever on bad ACPI tables */
669 if (entry_header->length == 0) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400670 pr_warn("Invalid 0-length structure\n");
David Woodhouse86cf8982009-11-09 22:15:15 +0000671 return 0;
672 }
673
674 if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
Chris Wright2c992202009-12-02 09:17:13 +0000675 void __iomem *addr;
676 u64 cap, ecap;
677
David Woodhouse86cf8982009-11-09 22:15:15 +0000678 drhd = (void *)entry_header;
679 if (!drhd->address) {
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100680 warn_invalid_dmar(0, "");
Chris Wright2c992202009-12-02 09:17:13 +0000681 goto failed;
David Woodhouse86cf8982009-11-09 22:15:15 +0000682 }
Chris Wright2c992202009-12-02 09:17:13 +0000683
684 addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
685 if (!addr ) {
686 printk("IOMMU: can't validate: %llx\n", drhd->address);
687 goto failed;
688 }
689 cap = dmar_readq(addr + DMAR_CAP_REG);
690 ecap = dmar_readq(addr + DMAR_ECAP_REG);
691 early_iounmap(addr, VTD_PAGE_SIZE);
692 if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100693 warn_invalid_dmar(drhd->address,
694 " returns all ones");
Chris Wright2c992202009-12-02 09:17:13 +0000695 goto failed;
696 }
David Woodhouse86cf8982009-11-09 22:15:15 +0000697 }
698
699 entry_header = ((void *)entry_header + entry_header->length);
700 }
701 return 1;
Chris Wright2c992202009-12-02 09:17:13 +0000702
703failed:
Chris Wright2c992202009-12-02 09:17:13 +0000704 return 0;
David Woodhouse86cf8982009-11-09 22:15:15 +0000705}
706
Konrad Rzeszutek Wilk480125b2010-08-26 13:57:57 -0400707int __init detect_intel_iommu(void)
Suresh Siddha2ae21012008-07-10 11:16:43 -0700708{
709 int ret;
710
Jiang Liu3a5670e2014-02-19 14:07:33 +0800711 down_write(&dmar_global_lock);
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700712 ret = dmar_table_detect();
David Woodhouse86cf8982009-11-09 22:15:15 +0000713 if (ret)
714 ret = check_zero_address();
Suresh Siddha2ae21012008-07-10 11:16:43 -0700715 {
Linus Torvalds11bd04f2009-12-11 12:18:16 -0800716 if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
Suresh Siddha2ae21012008-07-10 11:16:43 -0700717 iommu_detected = 1;
Chris Wright5d990b62009-12-04 12:15:21 -0800718 /* Make sure ACS will be enabled */
719 pci_request_acs();
720 }
Suresh Siddhaf5d1b972011-08-23 17:05:22 -0700721
FUJITA Tomonori9d5ce732009-11-10 19:46:16 +0900722#ifdef CONFIG_X86
723 if (ret)
724 x86_init.iommu.iommu_init = intel_iommu_init;
725#endif
Youquan Songcacd4212008-10-16 16:31:57 -0700726 }
Jiang Liub707cb02014-01-06 14:18:26 +0800727 early_acpi_os_unmap_memory((void __iomem *)dmar_tbl, dmar_tbl_size);
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700728 dmar_tbl = NULL;
Jiang Liu3a5670e2014-02-19 14:07:33 +0800729 up_write(&dmar_global_lock);
Konrad Rzeszutek Wilk480125b2010-08-26 13:57:57 -0400730
Konrad Rzeszutek Wilk4db77ff2010-08-26 13:58:04 -0400731 return ret ? 1 : -ENODEV;
Suresh Siddha2ae21012008-07-10 11:16:43 -0700732}
733
734
Donald Dutile6f5cf522012-06-04 17:29:02 -0400735static void unmap_iommu(struct intel_iommu *iommu)
736{
737 iounmap(iommu->reg);
738 release_mem_region(iommu->reg_phys, iommu->reg_size);
739}
740
741/**
742 * map_iommu: map the iommu's registers
743 * @iommu: the iommu to map
744 * @phys_addr: the physical address of the base resgister
Donald Dutilee9071b02012-06-08 17:13:11 -0400745 *
Donald Dutile6f5cf522012-06-04 17:29:02 -0400746 * Memory map the iommu's registers. Start w/ a single page, and
Donald Dutilee9071b02012-06-08 17:13:11 -0400747 * possibly expand if that turns out to be insufficent.
Donald Dutile6f5cf522012-06-04 17:29:02 -0400748 */
749static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
750{
751 int map_size, err=0;
752
753 iommu->reg_phys = phys_addr;
754 iommu->reg_size = VTD_PAGE_SIZE;
755
756 if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) {
757 pr_err("IOMMU: can't reserve memory\n");
758 err = -EBUSY;
759 goto out;
760 }
761
762 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
763 if (!iommu->reg) {
764 pr_err("IOMMU: can't map the region\n");
765 err = -ENOMEM;
766 goto release;
767 }
768
769 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
770 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
771
772 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
773 err = -EINVAL;
774 warn_invalid_dmar(phys_addr, " returns all ones");
775 goto unmap;
776 }
777
778 /* the registers might be more than one page */
779 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
780 cap_max_fault_reg_offset(iommu->cap));
781 map_size = VTD_PAGE_ALIGN(map_size);
782 if (map_size > iommu->reg_size) {
783 iounmap(iommu->reg);
784 release_mem_region(iommu->reg_phys, iommu->reg_size);
785 iommu->reg_size = map_size;
786 if (!request_mem_region(iommu->reg_phys, iommu->reg_size,
787 iommu->name)) {
788 pr_err("IOMMU: can't reserve memory\n");
789 err = -EBUSY;
790 goto out;
791 }
792 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
793 if (!iommu->reg) {
794 pr_err("IOMMU: can't map the region\n");
795 err = -ENOMEM;
796 goto release;
797 }
798 }
799 err = 0;
800 goto out;
801
802unmap:
803 iounmap(iommu->reg);
804release:
805 release_mem_region(iommu->reg_phys, iommu->reg_size);
806out:
807 return err;
808}
809
Jiang Liu694835d2014-01-06 14:18:16 +0800810static int alloc_iommu(struct dmar_drhd_unit *drhd)
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700811{
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700812 struct intel_iommu *iommu;
Takao Indoh3a93c842013-04-23 17:35:03 +0900813 u32 ver, sts;
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700814 static int iommu_allocated = 0;
Joerg Roedel43f73922009-01-03 23:56:27 +0100815 int agaw = 0;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700816 int msagaw = 0;
Donald Dutile6f5cf522012-06-04 17:29:02 -0400817 int err;
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700818
David Woodhouse6ecbf012009-12-02 09:20:27 +0000819 if (!drhd->reg_base_addr) {
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100820 warn_invalid_dmar(0, "");
David Woodhouse6ecbf012009-12-02 09:20:27 +0000821 return -EINVAL;
822 }
823
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700824 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
825 if (!iommu)
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700826 return -ENOMEM;
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700827
828 iommu->seq_id = iommu_allocated++;
Suresh Siddha9d783ba2009-03-16 17:04:55 -0700829 sprintf (iommu->name, "dmar%d", iommu->seq_id);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700830
Donald Dutile6f5cf522012-06-04 17:29:02 -0400831 err = map_iommu(iommu, drhd->reg_base_addr);
832 if (err) {
833 pr_err("IOMMU: failed to map %s\n", iommu->name);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700834 goto error;
835 }
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700836
Donald Dutile6f5cf522012-06-04 17:29:02 -0400837 err = -EINVAL;
Weidong Han1b573682008-12-08 15:34:06 +0800838 agaw = iommu_calculate_agaw(iommu);
839 if (agaw < 0) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -0400840 pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
841 iommu->seq_id);
David Woodhouse08155652009-08-04 09:17:20 +0100842 goto err_unmap;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700843 }
844 msagaw = iommu_calculate_max_sagaw(iommu);
845 if (msagaw < 0) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -0400846 pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
Weidong Han1b573682008-12-08 15:34:06 +0800847 iommu->seq_id);
David Woodhouse08155652009-08-04 09:17:20 +0100848 goto err_unmap;
Weidong Han1b573682008-12-08 15:34:06 +0800849 }
850 iommu->agaw = agaw;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700851 iommu->msagaw = msagaw;
Weidong Han1b573682008-12-08 15:34:06 +0800852
Suresh Siddhaee34b322009-10-02 11:01:21 -0700853 iommu->node = -1;
854
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700855 ver = readl(iommu->reg + DMAR_VER_REG);
Yinghai Lu680a7522010-04-08 19:58:23 +0100856 pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
857 iommu->seq_id,
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700858 (unsigned long long)drhd->reg_base_addr,
859 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
860 (unsigned long long)iommu->cap,
861 (unsigned long long)iommu->ecap);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700862
Takao Indoh3a93c842013-04-23 17:35:03 +0900863 /* Reflect status in gcmd */
864 sts = readl(iommu->reg + DMAR_GSTS_REG);
865 if (sts & DMA_GSTS_IRES)
866 iommu->gcmd |= DMA_GCMD_IRE;
867 if (sts & DMA_GSTS_TES)
868 iommu->gcmd |= DMA_GCMD_TE;
869 if (sts & DMA_GSTS_QIES)
870 iommu->gcmd |= DMA_GCMD_QIE;
871
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200872 raw_spin_lock_init(&iommu->register_lock);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700873
874 drhd->iommu = iommu;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700875 return 0;
David Woodhouse08155652009-08-04 09:17:20 +0100876
877 err_unmap:
Donald Dutile6f5cf522012-06-04 17:29:02 -0400878 unmap_iommu(iommu);
David Woodhouse08155652009-08-04 09:17:20 +0100879 error:
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700880 kfree(iommu);
Donald Dutile6f5cf522012-06-04 17:29:02 -0400881 return err;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700882}
883
Jiang Liua868e6b2014-01-06 14:18:20 +0800884static void free_iommu(struct intel_iommu *iommu)
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700885{
Jiang Liua868e6b2014-01-06 14:18:20 +0800886 if (iommu->irq) {
887 free_irq(iommu->irq, iommu);
888 irq_set_handler_data(iommu->irq, NULL);
889 destroy_irq(iommu->irq);
890 }
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700891
Jiang Liua84da702014-01-06 14:18:23 +0800892 if (iommu->qi) {
893 free_page((unsigned long)iommu->qi->desc);
894 kfree(iommu->qi->desc_status);
895 kfree(iommu->qi);
896 }
897
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700898 if (iommu->reg)
Donald Dutile6f5cf522012-06-04 17:29:02 -0400899 unmap_iommu(iommu);
900
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700901 kfree(iommu);
902}
Suresh Siddhafe962e92008-07-10 11:16:42 -0700903
904/*
905 * Reclaim all the submitted descriptors which have completed its work.
906 */
907static inline void reclaim_free_desc(struct q_inval *qi)
908{
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800909 while (qi->desc_status[qi->free_tail] == QI_DONE ||
910 qi->desc_status[qi->free_tail] == QI_ABORT) {
Suresh Siddhafe962e92008-07-10 11:16:42 -0700911 qi->desc_status[qi->free_tail] = QI_FREE;
912 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
913 qi->free_cnt++;
914 }
915}
916
Yu Zhao704126a2009-01-04 16:28:52 +0800917static int qi_check_fault(struct intel_iommu *iommu, int index)
918{
919 u32 fault;
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800920 int head, tail;
Yu Zhao704126a2009-01-04 16:28:52 +0800921 struct q_inval *qi = iommu->qi;
922 int wait_index = (index + 1) % QI_LENGTH;
923
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800924 if (qi->desc_status[wait_index] == QI_ABORT)
925 return -EAGAIN;
926
Yu Zhao704126a2009-01-04 16:28:52 +0800927 fault = readl(iommu->reg + DMAR_FSTS_REG);
928
929 /*
930 * If IQE happens, the head points to the descriptor associated
931 * with the error. No new descriptors are fetched until the IQE
932 * is cleared.
933 */
934 if (fault & DMA_FSTS_IQE) {
935 head = readl(iommu->reg + DMAR_IQH_REG);
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800936 if ((head >> DMAR_IQ_SHIFT) == index) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -0400937 pr_err("VT-d detected invalid descriptor: "
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800938 "low=%llx, high=%llx\n",
939 (unsigned long long)qi->desc[index].low,
940 (unsigned long long)qi->desc[index].high);
Yu Zhao704126a2009-01-04 16:28:52 +0800941 memcpy(&qi->desc[index], &qi->desc[wait_index],
942 sizeof(struct qi_desc));
943 __iommu_flush_cache(iommu, &qi->desc[index],
944 sizeof(struct qi_desc));
945 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
946 return -EINVAL;
947 }
948 }
949
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800950 /*
951 * If ITE happens, all pending wait_desc commands are aborted.
952 * No new descriptors are fetched until the ITE is cleared.
953 */
954 if (fault & DMA_FSTS_ITE) {
955 head = readl(iommu->reg + DMAR_IQH_REG);
956 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
957 head |= 1;
958 tail = readl(iommu->reg + DMAR_IQT_REG);
959 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
960
961 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
962
963 do {
964 if (qi->desc_status[head] == QI_IN_USE)
965 qi->desc_status[head] = QI_ABORT;
966 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
967 } while (head != tail);
968
969 if (qi->desc_status[wait_index] == QI_ABORT)
970 return -EAGAIN;
971 }
972
973 if (fault & DMA_FSTS_ICE)
974 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
975
Yu Zhao704126a2009-01-04 16:28:52 +0800976 return 0;
977}
978
Suresh Siddhafe962e92008-07-10 11:16:42 -0700979/*
980 * Submit the queued invalidation descriptor to the remapping
981 * hardware unit and wait for its completion.
982 */
Yu Zhao704126a2009-01-04 16:28:52 +0800983int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
Suresh Siddhafe962e92008-07-10 11:16:42 -0700984{
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800985 int rc;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700986 struct q_inval *qi = iommu->qi;
987 struct qi_desc *hw, wait_desc;
988 int wait_index, index;
989 unsigned long flags;
990
991 if (!qi)
Yu Zhao704126a2009-01-04 16:28:52 +0800992 return 0;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700993
994 hw = qi->desc;
995
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800996restart:
997 rc = 0;
998
Thomas Gleixner3b8f4042011-07-19 17:02:07 +0200999 raw_spin_lock_irqsave(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001000 while (qi->free_cnt < 3) {
Thomas Gleixner3b8f4042011-07-19 17:02:07 +02001001 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001002 cpu_relax();
Thomas Gleixner3b8f4042011-07-19 17:02:07 +02001003 raw_spin_lock_irqsave(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001004 }
1005
1006 index = qi->free_head;
1007 wait_index = (index + 1) % QI_LENGTH;
1008
1009 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
1010
1011 hw[index] = *desc;
1012
Yu Zhao704126a2009-01-04 16:28:52 +08001013 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
1014 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
Suresh Siddhafe962e92008-07-10 11:16:42 -07001015 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
1016
1017 hw[wait_index] = wait_desc;
1018
1019 __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
1020 __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
1021
1022 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
1023 qi->free_cnt -= 2;
1024
Suresh Siddhafe962e92008-07-10 11:16:42 -07001025 /*
1026 * update the HW tail register indicating the presence of
1027 * new descriptors.
1028 */
Yu Zhao6ba6c3a2009-05-18 13:51:35 +08001029 writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001030
1031 while (qi->desc_status[wait_index] != QI_DONE) {
Suresh Siddhaf05810c2008-10-16 16:31:54 -07001032 /*
1033 * We will leave the interrupts disabled, to prevent interrupt
1034 * context to queue another cmd while a cmd is already submitted
1035 * and waiting for completion on this cpu. This is to avoid
1036 * a deadlock where the interrupt context can wait indefinitely
1037 * for free slots in the queue.
1038 */
Yu Zhao704126a2009-01-04 16:28:52 +08001039 rc = qi_check_fault(iommu, index);
1040 if (rc)
Yu Zhao6ba6c3a2009-05-18 13:51:35 +08001041 break;
Yu Zhao704126a2009-01-04 16:28:52 +08001042
Thomas Gleixner3b8f4042011-07-19 17:02:07 +02001043 raw_spin_unlock(&qi->q_lock);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001044 cpu_relax();
Thomas Gleixner3b8f4042011-07-19 17:02:07 +02001045 raw_spin_lock(&qi->q_lock);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001046 }
Yu Zhao6ba6c3a2009-05-18 13:51:35 +08001047
1048 qi->desc_status[index] = QI_DONE;
Suresh Siddhafe962e92008-07-10 11:16:42 -07001049
1050 reclaim_free_desc(qi);
Thomas Gleixner3b8f4042011-07-19 17:02:07 +02001051 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
Yu Zhao704126a2009-01-04 16:28:52 +08001052
Yu Zhao6ba6c3a2009-05-18 13:51:35 +08001053 if (rc == -EAGAIN)
1054 goto restart;
1055
Yu Zhao704126a2009-01-04 16:28:52 +08001056 return rc;
Suresh Siddhafe962e92008-07-10 11:16:42 -07001057}
1058
1059/*
1060 * Flush the global interrupt entry cache.
1061 */
1062void qi_global_iec(struct intel_iommu *iommu)
1063{
1064 struct qi_desc desc;
1065
1066 desc.low = QI_IEC_TYPE;
1067 desc.high = 0;
1068
Yu Zhao704126a2009-01-04 16:28:52 +08001069 /* should never fail */
Suresh Siddhafe962e92008-07-10 11:16:42 -07001070 qi_submit_sync(&desc, iommu);
1071}
1072
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001073void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
1074 u64 type)
Youquan Song3481f212008-10-16 16:31:55 -07001075{
Youquan Song3481f212008-10-16 16:31:55 -07001076 struct qi_desc desc;
1077
Youquan Song3481f212008-10-16 16:31:55 -07001078 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
1079 | QI_CC_GRAN(type) | QI_CC_TYPE;
1080 desc.high = 0;
1081
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001082 qi_submit_sync(&desc, iommu);
Youquan Song3481f212008-10-16 16:31:55 -07001083}
1084
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001085void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
1086 unsigned int size_order, u64 type)
Youquan Song3481f212008-10-16 16:31:55 -07001087{
1088 u8 dw = 0, dr = 0;
1089
1090 struct qi_desc desc;
1091 int ih = 0;
1092
Youquan Song3481f212008-10-16 16:31:55 -07001093 if (cap_write_drain(iommu->cap))
1094 dw = 1;
1095
1096 if (cap_read_drain(iommu->cap))
1097 dr = 1;
1098
1099 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
1100 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
1101 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
1102 | QI_IOTLB_AM(size_order);
1103
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001104 qi_submit_sync(&desc, iommu);
Youquan Song3481f212008-10-16 16:31:55 -07001105}
1106
Yu Zhao6ba6c3a2009-05-18 13:51:35 +08001107void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
1108 u64 addr, unsigned mask)
1109{
1110 struct qi_desc desc;
1111
1112 if (mask) {
1113 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
1114 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
1115 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
1116 } else
1117 desc.high = QI_DEV_IOTLB_ADDR(addr);
1118
1119 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
1120 qdep = 0;
1121
1122 desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
1123 QI_DIOTLB_TYPE;
1124
1125 qi_submit_sync(&desc, iommu);
1126}
1127
Suresh Siddhafe962e92008-07-10 11:16:42 -07001128/*
Suresh Siddhaeba67e52009-03-16 17:04:56 -07001129 * Disable Queued Invalidation interface.
1130 */
1131void dmar_disable_qi(struct intel_iommu *iommu)
1132{
1133 unsigned long flags;
1134 u32 sts;
1135 cycles_t start_time = get_cycles();
1136
1137 if (!ecap_qis(iommu->ecap))
1138 return;
1139
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001140 raw_spin_lock_irqsave(&iommu->register_lock, flags);
Suresh Siddhaeba67e52009-03-16 17:04:56 -07001141
1142 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
1143 if (!(sts & DMA_GSTS_QIES))
1144 goto end;
1145
1146 /*
1147 * Give a chance to HW to complete the pending invalidation requests.
1148 */
1149 while ((readl(iommu->reg + DMAR_IQT_REG) !=
1150 readl(iommu->reg + DMAR_IQH_REG)) &&
1151 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
1152 cpu_relax();
1153
1154 iommu->gcmd &= ~DMA_GCMD_QIE;
Suresh Siddhaeba67e52009-03-16 17:04:56 -07001155 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1156
1157 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1158 !(sts & DMA_GSTS_QIES), sts);
1159end:
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001160 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Suresh Siddhaeba67e52009-03-16 17:04:56 -07001161}
1162
1163/*
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001164 * Enable queued invalidation.
1165 */
1166static void __dmar_enable_qi(struct intel_iommu *iommu)
1167{
David Woodhousec416daa2009-05-10 20:30:58 +01001168 u32 sts;
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001169 unsigned long flags;
1170 struct q_inval *qi = iommu->qi;
1171
1172 qi->free_head = qi->free_tail = 0;
1173 qi->free_cnt = QI_LENGTH;
1174
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001175 raw_spin_lock_irqsave(&iommu->register_lock, flags);
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001176
1177 /* write zero to the tail reg */
1178 writel(0, iommu->reg + DMAR_IQT_REG);
1179
1180 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
1181
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001182 iommu->gcmd |= DMA_GCMD_QIE;
David Woodhousec416daa2009-05-10 20:30:58 +01001183 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001184
1185 /* Make sure hardware complete it */
1186 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1187
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001188 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001189}
1190
1191/*
Suresh Siddhafe962e92008-07-10 11:16:42 -07001192 * Enable Queued Invalidation interface. This is a must to support
1193 * interrupt-remapping. Also used by DMA-remapping, which replaces
1194 * register based IOTLB invalidation.
1195 */
1196int dmar_enable_qi(struct intel_iommu *iommu)
1197{
Suresh Siddhafe962e92008-07-10 11:16:42 -07001198 struct q_inval *qi;
Suresh Siddha751cafe2009-10-02 11:01:22 -07001199 struct page *desc_page;
Suresh Siddhafe962e92008-07-10 11:16:42 -07001200
1201 if (!ecap_qis(iommu->ecap))
1202 return -ENOENT;
1203
1204 /*
1205 * queued invalidation is already setup and enabled.
1206 */
1207 if (iommu->qi)
1208 return 0;
1209
Suresh Siddhafa4b57c2009-03-16 17:05:05 -07001210 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001211 if (!iommu->qi)
1212 return -ENOMEM;
1213
1214 qi = iommu->qi;
1215
Suresh Siddha751cafe2009-10-02 11:01:22 -07001216
1217 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
1218 if (!desc_page) {
Suresh Siddhafe962e92008-07-10 11:16:42 -07001219 kfree(qi);
Jiang Liub707cb02014-01-06 14:18:26 +08001220 iommu->qi = NULL;
Suresh Siddhafe962e92008-07-10 11:16:42 -07001221 return -ENOMEM;
1222 }
1223
Suresh Siddha751cafe2009-10-02 11:01:22 -07001224 qi->desc = page_address(desc_page);
1225
Hannes Reinecke37a40712013-02-06 09:50:10 +01001226 qi->desc_status = kzalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001227 if (!qi->desc_status) {
1228 free_page((unsigned long) qi->desc);
1229 kfree(qi);
Jiang Liub707cb02014-01-06 14:18:26 +08001230 iommu->qi = NULL;
Suresh Siddhafe962e92008-07-10 11:16:42 -07001231 return -ENOMEM;
1232 }
1233
1234 qi->free_head = qi->free_tail = 0;
1235 qi->free_cnt = QI_LENGTH;
1236
Thomas Gleixner3b8f4042011-07-19 17:02:07 +02001237 raw_spin_lock_init(&qi->q_lock);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001238
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001239 __dmar_enable_qi(iommu);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001240
1241 return 0;
1242}
Suresh Siddha0ac24912009-03-16 17:04:54 -07001243
1244/* iommu interrupt handling. Most stuff are MSI-like. */
1245
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001246enum faulttype {
1247 DMA_REMAP,
1248 INTR_REMAP,
1249 UNKNOWN,
1250};
1251
1252static const char *dma_remap_fault_reasons[] =
Suresh Siddha0ac24912009-03-16 17:04:54 -07001253{
1254 "Software",
1255 "Present bit in root entry is clear",
1256 "Present bit in context entry is clear",
1257 "Invalid context entry",
1258 "Access beyond MGAW",
1259 "PTE Write access is not set",
1260 "PTE Read access is not set",
1261 "Next page table ptr is invalid",
1262 "Root table address invalid",
1263 "Context table ptr is invalid",
1264 "non-zero reserved fields in RTP",
1265 "non-zero reserved fields in CTP",
1266 "non-zero reserved fields in PTE",
Li, Zhen-Hua4ecccd92013-03-06 10:43:17 +08001267 "PCE for translation request specifies blocking",
Suresh Siddha0ac24912009-03-16 17:04:54 -07001268};
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001269
Suresh Siddha95a02e92012-03-30 11:47:07 -07001270static const char *irq_remap_fault_reasons[] =
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001271{
1272 "Detected reserved fields in the decoded interrupt-remapped request",
1273 "Interrupt index exceeded the interrupt-remapping table size",
1274 "Present field in the IRTE entry is clear",
1275 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1276 "Detected reserved fields in the IRTE entry",
1277 "Blocked a compatibility format interrupt request",
1278 "Blocked an interrupt request due to source-id verification failure",
1279};
1280
Rashika Kheria21004dc2013-12-18 12:01:46 +05301281static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001282{
Dan Carpenterfefe1ed2012-05-13 20:09:38 +03001283 if (fault_reason >= 0x20 && (fault_reason - 0x20 <
1284 ARRAY_SIZE(irq_remap_fault_reasons))) {
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001285 *fault_type = INTR_REMAP;
Suresh Siddha95a02e92012-03-30 11:47:07 -07001286 return irq_remap_fault_reasons[fault_reason - 0x20];
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001287 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1288 *fault_type = DMA_REMAP;
1289 return dma_remap_fault_reasons[fault_reason];
1290 } else {
1291 *fault_type = UNKNOWN;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001292 return "Unknown";
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001293 }
Suresh Siddha0ac24912009-03-16 17:04:54 -07001294}
1295
Thomas Gleixner5c2837f2010-09-28 17:15:11 +02001296void dmar_msi_unmask(struct irq_data *data)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001297{
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001298 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001299 unsigned long flag;
1300
1301 /* unmask it */
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001302 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001303 writel(0, iommu->reg + DMAR_FECTL_REG);
1304 /* Read a reg to force flush the post write */
1305 readl(iommu->reg + DMAR_FECTL_REG);
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001306 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001307}
1308
Thomas Gleixner5c2837f2010-09-28 17:15:11 +02001309void dmar_msi_mask(struct irq_data *data)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001310{
1311 unsigned long flag;
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001312 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001313
1314 /* mask it */
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001315 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001316 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1317 /* Read a reg to force flush the post write */
1318 readl(iommu->reg + DMAR_FECTL_REG);
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001319 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001320}
1321
1322void dmar_msi_write(int irq, struct msi_msg *msg)
1323{
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001324 struct intel_iommu *iommu = irq_get_handler_data(irq);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001325 unsigned long flag;
1326
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001327 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001328 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1329 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1330 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001331 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001332}
1333
1334void dmar_msi_read(int irq, struct msi_msg *msg)
1335{
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001336 struct intel_iommu *iommu = irq_get_handler_data(irq);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001337 unsigned long flag;
1338
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001339 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001340 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1341 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1342 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001343 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001344}
1345
1346static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1347 u8 fault_reason, u16 source_id, unsigned long long addr)
1348{
1349 const char *reason;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001350 int fault_type;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001351
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001352 reason = dmar_get_fault_reason(fault_reason, &fault_type);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001353
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001354 if (fault_type == INTR_REMAP)
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001355 pr_err("INTR-REMAP: Request device [[%02x:%02x.%d] "
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001356 "fault index %llx\n"
1357 "INTR-REMAP:[fault reason %02d] %s\n",
1358 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1359 PCI_FUNC(source_id & 0xFF), addr >> 48,
1360 fault_reason, reason);
1361 else
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001362 pr_err("DMAR:[%s] Request device [%02x:%02x.%d] "
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001363 "fault addr %llx \n"
1364 "DMAR:[fault reason %02d] %s\n",
1365 (type ? "DMA Read" : "DMA Write"),
1366 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1367 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001368 return 0;
1369}
1370
1371#define PRIMARY_FAULT_REG_LEN (16)
Suresh Siddha1531a6a2009-03-16 17:04:57 -07001372irqreturn_t dmar_fault(int irq, void *dev_id)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001373{
1374 struct intel_iommu *iommu = dev_id;
1375 int reg, fault_index;
1376 u32 fault_status;
1377 unsigned long flag;
1378
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001379 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001380 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001381 if (fault_status)
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001382 pr_err("DRHD: handling fault status reg %x\n", fault_status);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001383
1384 /* TBD: ignore advanced fault log currently */
1385 if (!(fault_status & DMA_FSTS_PPF))
Li, Zhen-Huabd5cdad2013-03-25 16:20:52 +08001386 goto unlock_exit;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001387
1388 fault_index = dma_fsts_fault_record_index(fault_status);
1389 reg = cap_fault_reg_offset(iommu->cap);
1390 while (1) {
1391 u8 fault_reason;
1392 u16 source_id;
1393 u64 guest_addr;
1394 int type;
1395 u32 data;
1396
1397 /* highest 32 bits */
1398 data = readl(iommu->reg + reg +
1399 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1400 if (!(data & DMA_FRCD_F))
1401 break;
1402
1403 fault_reason = dma_frcd_fault_reason(data);
1404 type = dma_frcd_type(data);
1405
1406 data = readl(iommu->reg + reg +
1407 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1408 source_id = dma_frcd_source_id(data);
1409
1410 guest_addr = dmar_readq(iommu->reg + reg +
1411 fault_index * PRIMARY_FAULT_REG_LEN);
1412 guest_addr = dma_frcd_page_addr(guest_addr);
1413 /* clear the fault */
1414 writel(DMA_FRCD_F, iommu->reg + reg +
1415 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1416
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001417 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001418
1419 dmar_fault_do_one(iommu, type, fault_reason,
1420 source_id, guest_addr);
1421
1422 fault_index++;
Troy Heber8211a7b2009-08-19 15:26:11 -06001423 if (fault_index >= cap_num_fault_regs(iommu->cap))
Suresh Siddha0ac24912009-03-16 17:04:54 -07001424 fault_index = 0;
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001425 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001426 }
Suresh Siddha0ac24912009-03-16 17:04:54 -07001427
Li, Zhen-Huabd5cdad2013-03-25 16:20:52 +08001428 writel(DMA_FSTS_PFO | DMA_FSTS_PPF, iommu->reg + DMAR_FSTS_REG);
1429
1430unlock_exit:
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001431 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001432 return IRQ_HANDLED;
1433}
1434
1435int dmar_set_interrupt(struct intel_iommu *iommu)
1436{
1437 int irq, ret;
1438
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001439 /*
1440 * Check if the fault interrupt is already initialized.
1441 */
1442 if (iommu->irq)
1443 return 0;
1444
Suresh Siddha0ac24912009-03-16 17:04:54 -07001445 irq = create_irq();
1446 if (!irq) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001447 pr_err("IOMMU: no free vectors\n");
Suresh Siddha0ac24912009-03-16 17:04:54 -07001448 return -EINVAL;
1449 }
1450
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001451 irq_set_handler_data(irq, iommu);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001452 iommu->irq = irq;
1453
1454 ret = arch_setup_dmar_msi(irq);
1455 if (ret) {
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001456 irq_set_handler_data(irq, NULL);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001457 iommu->irq = 0;
1458 destroy_irq(irq);
Chris Wrightdd726432009-05-13 15:55:52 -07001459 return ret;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001460 }
1461
Thomas Gleixner477694e2011-07-19 16:25:42 +02001462 ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001463 if (ret)
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001464 pr_err("IOMMU: can't request irq\n");
Suresh Siddha0ac24912009-03-16 17:04:54 -07001465 return ret;
1466}
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001467
1468int __init enable_drhd_fault_handling(void)
1469{
1470 struct dmar_drhd_unit *drhd;
Jiang Liu7c919772014-01-06 14:18:18 +08001471 struct intel_iommu *iommu;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001472
1473 /*
1474 * Enable fault control interrupt.
1475 */
Jiang Liu7c919772014-01-06 14:18:18 +08001476 for_each_iommu(iommu, drhd) {
Li, Zhen-Huabd5cdad2013-03-25 16:20:52 +08001477 u32 fault_status;
Jiang Liu7c919772014-01-06 14:18:18 +08001478 int ret = dmar_set_interrupt(iommu);
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001479
1480 if (ret) {
Donald Dutilee9071b02012-06-08 17:13:11 -04001481 pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001482 (unsigned long long)drhd->reg_base_addr, ret);
1483 return -1;
1484 }
Suresh Siddha7f99d942010-11-30 22:22:29 -08001485
1486 /*
1487 * Clear any previous faults.
1488 */
1489 dmar_fault(iommu->irq, iommu);
Li, Zhen-Huabd5cdad2013-03-25 16:20:52 +08001490 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1491 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001492 }
1493
1494 return 0;
1495}
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001496
1497/*
1498 * Re-enable Queued Invalidation interface.
1499 */
1500int dmar_reenable_qi(struct intel_iommu *iommu)
1501{
1502 if (!ecap_qis(iommu->ecap))
1503 return -ENOENT;
1504
1505 if (!iommu->qi)
1506 return -ENOENT;
1507
1508 /*
1509 * First disable queued invalidation.
1510 */
1511 dmar_disable_qi(iommu);
1512 /*
1513 * Then enable queued invalidation again. Since there is no pending
1514 * invalidation requests now, it's safe to re-enable queued
1515 * invalidation.
1516 */
1517 __dmar_enable_qi(iommu);
1518
1519 return 0;
1520}
Youquan Song074835f2009-09-09 12:05:39 -04001521
1522/*
1523 * Check interrupt remapping support in DMAR table description.
1524 */
Luck, Tony0b8973a2009-12-16 22:59:29 +00001525int __init dmar_ir_support(void)
Youquan Song074835f2009-09-09 12:05:39 -04001526{
1527 struct acpi_table_dmar *dmar;
1528 dmar = (struct acpi_table_dmar *)dmar_tbl;
Arnaud Patard4f506e02010-03-25 18:02:58 +00001529 if (!dmar)
1530 return 0;
Youquan Song074835f2009-09-09 12:05:39 -04001531 return dmar->flags & 0x1;
1532}
Jiang Liu694835d2014-01-06 14:18:16 +08001533
Jiang Liua868e6b2014-01-06 14:18:20 +08001534static int __init dmar_free_unused_resources(void)
1535{
1536 struct dmar_drhd_unit *dmaru, *dmaru_n;
1537
1538 /* DMAR units are in use */
1539 if (irq_remapping_enabled || intel_iommu_enabled)
1540 return 0;
1541
Jiang Liu2e455282014-02-19 14:07:36 +08001542 if (dmar_dev_scope_status != 1 && !list_empty(&dmar_drhd_units))
1543 bus_unregister_notifier(&pci_bus_type, &dmar_pci_bus_nb);
Jiang Liu59ce0512014-02-19 14:07:35 +08001544
Jiang Liu3a5670e2014-02-19 14:07:33 +08001545 down_write(&dmar_global_lock);
Jiang Liua868e6b2014-01-06 14:18:20 +08001546 list_for_each_entry_safe(dmaru, dmaru_n, &dmar_drhd_units, list) {
1547 list_del(&dmaru->list);
1548 dmar_free_drhd(dmaru);
1549 }
Jiang Liu3a5670e2014-02-19 14:07:33 +08001550 up_write(&dmar_global_lock);
Jiang Liua868e6b2014-01-06 14:18:20 +08001551
1552 return 0;
1553}
1554
1555late_initcall(dmar_free_unused_resources);
Konrad Rzeszutek Wilk4db77ff2010-08-26 13:58:04 -04001556IOMMU_INIT_POST(detect_intel_iommu);