blob: 431853386db807333f207edd083a501a483a6279 [file] [log] [blame]
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001/*
David Woodhouseea8ea462014-03-05 17:09:32 +00002 * Copyright © 2006-2014 Intel Corporation.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
David Woodhouseea8ea462014-03-05 17:09:32 +000013 * Authors: David Woodhouse <dwmw2@infradead.org>,
14 * Ashok Raj <ashok.raj@intel.com>,
15 * Shaohua Li <shaohua.li@intel.com>,
16 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
17 * Fenghua Yu <fenghua.yu@intel.com>
Joerg Roedel9f10e5b2015-06-12 09:57:06 +020018 * Joerg Roedel <jroedel@suse.de>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070019 */
20
Joerg Roedel9f10e5b2015-06-12 09:57:06 +020021#define pr_fmt(fmt) "DMAR: " fmt
22
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070023#include <linux/init.h>
24#include <linux/bitmap.h>
mark gross5e0d2a62008-03-04 15:22:08 -080025#include <linux/debugfs.h>
Paul Gortmaker54485c32011-10-29 10:26:25 -040026#include <linux/export.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070027#include <linux/slab.h>
28#include <linux/irq.h>
29#include <linux/interrupt.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070030#include <linux/spinlock.h>
31#include <linux/pci.h>
32#include <linux/dmar.h>
33#include <linux/dma-mapping.h>
34#include <linux/mempool.h>
Jiang Liu75f05562014-02-19 14:07:37 +080035#include <linux/memory.h>
mark gross5e0d2a62008-03-04 15:22:08 -080036#include <linux/timer.h>
Kay, Allen M38717942008-09-09 18:37:29 +030037#include <linux/iova.h>
Joerg Roedel5d450802008-12-03 14:52:32 +010038#include <linux/iommu.h>
Kay, Allen M38717942008-09-09 18:37:29 +030039#include <linux/intel-iommu.h>
Rafael J. Wysocki134fac32011-03-23 22:16:14 +010040#include <linux/syscore_ops.h>
Shane Wang69575d32009-09-01 18:25:07 -070041#include <linux/tboot.h>
Stephen Rothwelladb2fe02009-08-31 15:24:23 +100042#include <linux/dmi.h>
Joerg Roedel5cdede22011-04-04 15:55:18 +020043#include <linux/pci-ats.h>
Tejun Heo0ee332c2011-12-08 10:22:09 -080044#include <linux/memblock.h>
Akinobu Mita36746432014-06-04 16:06:51 -070045#include <linux/dma-contiguous.h>
Joerg Roedel091d42e2015-06-12 11:56:10 +020046#include <linux/crash_dump.h>
Suresh Siddha8a8f4222012-03-30 11:47:08 -070047#include <asm/irq_remapping.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070048#include <asm/cacheflush.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090049#include <asm/iommu.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070050
Joerg Roedel078e1ee2012-09-26 12:44:43 +020051#include "irq_remapping.h"
52
Fenghua Yu5b6985c2008-10-16 18:02:32 -070053#define ROOT_SIZE VTD_PAGE_SIZE
54#define CONTEXT_SIZE VTD_PAGE_SIZE
55
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070056#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
David Woodhouse18436af2015-03-25 15:05:47 +000057#define IS_USB_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070058#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
David Woodhousee0fc7e02009-09-30 09:12:17 -070059#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070060
61#define IOAPIC_RANGE_START (0xfee00000)
62#define IOAPIC_RANGE_END (0xfeefffff)
63#define IOVA_START_ADDR (0x1000)
64
65#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
66
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070067#define MAX_AGAW_WIDTH 64
Jiang Liu5c645b32014-01-06 14:18:12 +080068#define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070069
David Woodhouse2ebe3152009-09-19 07:34:04 -070070#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
71#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
72
73/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
74 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
75#define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
76 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
77#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070078
Robin Murphy1b722502015-01-12 17:51:15 +000079/* IO virtual address start page frame number */
80#define IOVA_START_PFN (1)
81
Mark McLoughlinf27be032008-11-20 15:49:43 +000082#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
Yang Hongyang284901a2009-04-06 19:01:15 -070083#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
Yang Hongyang6a355282009-04-06 19:01:13 -070084#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
mark gross5e0d2a62008-03-04 15:22:08 -080085
Andrew Mortondf08cdc2010-09-22 13:05:11 -070086/* page table handling */
87#define LEVEL_STRIDE (9)
88#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
89
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +020090/*
91 * This bitmap is used to advertise the page sizes our hardware support
92 * to the IOMMU core, which will then use this information to split
93 * physically contiguous memory regions it is mapping into page sizes
94 * that we support.
95 *
96 * Traditionally the IOMMU core just handed us the mappings directly,
97 * after making sure the size is an order of a 4KiB page and that the
98 * mapping has natural alignment.
99 *
100 * To retain this behavior, we currently advertise that we support
101 * all page sizes that are an order of 4KiB.
102 *
103 * If at some point we'd like to utilize the IOMMU core's new behavior,
104 * we could change this to advertise the real page sizes we support.
105 */
106#define INTEL_IOMMU_PGSIZES (~0xFFFUL)
107
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700108static inline int agaw_to_level(int agaw)
109{
110 return agaw + 2;
111}
112
113static inline int agaw_to_width(int agaw)
114{
Jiang Liu5c645b32014-01-06 14:18:12 +0800115 return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700116}
117
118static inline int width_to_agaw(int width)
119{
Jiang Liu5c645b32014-01-06 14:18:12 +0800120 return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700121}
122
123static inline unsigned int level_to_offset_bits(int level)
124{
125 return (level - 1) * LEVEL_STRIDE;
126}
127
128static inline int pfn_level_offset(unsigned long pfn, int level)
129{
130 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
131}
132
133static inline unsigned long level_mask(int level)
134{
135 return -1UL << level_to_offset_bits(level);
136}
137
138static inline unsigned long level_size(int level)
139{
140 return 1UL << level_to_offset_bits(level);
141}
142
143static inline unsigned long align_to_level(unsigned long pfn, int level)
144{
145 return (pfn + level_size(level) - 1) & level_mask(level);
146}
David Woodhousefd18de52009-05-10 23:57:41 +0100147
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100148static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
149{
Jiang Liu5c645b32014-01-06 14:18:12 +0800150 return 1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100151}
152
David Woodhousedd4e8312009-06-27 16:21:20 +0100153/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
154 are never going to work. */
155static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
156{
157 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
158}
159
160static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
161{
162 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
163}
164static inline unsigned long page_to_dma_pfn(struct page *pg)
165{
166 return mm_to_dma_pfn(page_to_pfn(pg));
167}
168static inline unsigned long virt_to_dma_pfn(void *p)
169{
170 return page_to_dma_pfn(virt_to_page(p));
171}
172
Weidong Hand9630fe2008-12-08 11:06:32 +0800173/* global iommu list, set NULL for ignored DMAR units */
174static struct intel_iommu **g_iommus;
175
David Woodhousee0fc7e02009-09-30 09:12:17 -0700176static void __init check_tylersburg_isoch(void);
David Woodhouse9af88142009-02-13 23:18:03 +0000177static int rwbf_quirk;
178
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000179/*
Joseph Cihulab7792602011-05-03 00:08:37 -0700180 * set to 1 to panic kernel if can't successfully enable VT-d
181 * (used when kernel is launched w/ TXT)
182 */
183static int force_on = 0;
184
185/*
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000186 * 0: Present
187 * 1-11: Reserved
188 * 12-63: Context Ptr (12 - (haw-1))
189 * 64-127: Reserved
190 */
191struct root_entry {
David Woodhouse03ecc322015-02-13 14:35:21 +0000192 u64 lo;
193 u64 hi;
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000194};
195#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000196
Joerg Roedel091d42e2015-06-12 11:56:10 +0200197/*
198 * Take a root_entry and return the Lower Context Table Pointer (LCTP)
199 * if marked present.
200 */
201static phys_addr_t root_entry_lctp(struct root_entry *re)
202{
203 if (!(re->lo & 1))
204 return 0;
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000205
Joerg Roedel091d42e2015-06-12 11:56:10 +0200206 return re->lo & VTD_PAGE_MASK;
207}
208
209/*
210 * Take a root_entry and return the Upper Context Table Pointer (UCTP)
211 * if marked present.
212 */
213static phys_addr_t root_entry_uctp(struct root_entry *re)
214{
215 if (!(re->hi & 1))
216 return 0;
217
218 return re->hi & VTD_PAGE_MASK;
219}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000220/*
221 * low 64 bits:
222 * 0: present
223 * 1: fault processing disable
224 * 2-3: translation type
225 * 12-63: address space root
226 * high 64 bits:
227 * 0-2: address width
228 * 3-6: aval
229 * 8-23: domain id
230 */
231struct context_entry {
232 u64 lo;
233 u64 hi;
234};
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000235
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000236static inline bool context_present(struct context_entry *context)
237{
238 return (context->lo & 1);
239}
240static inline void context_set_present(struct context_entry *context)
241{
242 context->lo |= 1;
243}
244
245static inline void context_set_fault_enable(struct context_entry *context)
246{
247 context->lo &= (((u64)-1) << 2) | 1;
248}
249
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000250static inline void context_set_translation_type(struct context_entry *context,
251 unsigned long value)
252{
253 context->lo &= (((u64)-1) << 4) | 3;
254 context->lo |= (value & 3) << 2;
255}
256
257static inline void context_set_address_root(struct context_entry *context,
258 unsigned long value)
259{
Li, Zhen-Hua1a2262f2014-11-05 15:30:19 +0800260 context->lo &= ~VTD_PAGE_MASK;
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000261 context->lo |= value & VTD_PAGE_MASK;
262}
263
264static inline void context_set_address_width(struct context_entry *context,
265 unsigned long value)
266{
267 context->hi |= value & 7;
268}
269
270static inline void context_set_domain_id(struct context_entry *context,
271 unsigned long value)
272{
273 context->hi |= (value & ((1 << 16) - 1)) << 8;
274}
275
Joerg Roedeldbcd8612015-06-12 12:02:09 +0200276static inline int context_domain_id(struct context_entry *c)
277{
278 return((c->hi >> 8) & 0xffff);
279}
280
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000281static inline void context_clear_entry(struct context_entry *context)
282{
283 context->lo = 0;
284 context->hi = 0;
285}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000286
Mark McLoughlin622ba122008-11-20 15:49:46 +0000287/*
288 * 0: readable
289 * 1: writable
290 * 2-6: reserved
291 * 7: super page
Sheng Yang9cf06692009-03-18 15:33:07 +0800292 * 8-10: available
293 * 11: snoop behavior
Mark McLoughlin622ba122008-11-20 15:49:46 +0000294 * 12-63: Host physcial address
295 */
296struct dma_pte {
297 u64 val;
298};
Mark McLoughlin622ba122008-11-20 15:49:46 +0000299
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000300static inline void dma_clear_pte(struct dma_pte *pte)
301{
302 pte->val = 0;
303}
304
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000305static inline u64 dma_pte_addr(struct dma_pte *pte)
306{
David Woodhousec85994e2009-07-01 19:21:24 +0100307#ifdef CONFIG_64BIT
308 return pte->val & VTD_PAGE_MASK;
309#else
310 /* Must have a full atomic 64-bit read */
David Woodhouse1a8bd482010-08-10 01:38:53 +0100311 return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
David Woodhousec85994e2009-07-01 19:21:24 +0100312#endif
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000313}
314
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000315static inline bool dma_pte_present(struct dma_pte *pte)
316{
317 return (pte->val & 3) != 0;
318}
Mark McLoughlin622ba122008-11-20 15:49:46 +0000319
Allen Kay4399c8b2011-10-14 12:32:46 -0700320static inline bool dma_pte_superpage(struct dma_pte *pte)
321{
Joerg Roedelc3c75eb2014-07-04 11:19:10 +0200322 return (pte->val & DMA_PTE_LARGE_PAGE);
Allen Kay4399c8b2011-10-14 12:32:46 -0700323}
324
David Woodhouse75e6bf92009-07-02 11:21:16 +0100325static inline int first_pte_in_page(struct dma_pte *pte)
326{
327 return !((unsigned long)pte & ~VTD_PAGE_MASK);
328}
329
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700330/*
331 * This domain is a statically identity mapping domain.
332 * 1. This domain creats a static 1:1 mapping to all usable memory.
333 * 2. It maps to each iommu if successful.
334 * 3. Each iommu mapps to this domain if successful.
335 */
David Woodhouse19943b02009-08-04 16:19:20 +0100336static struct dmar_domain *si_domain;
337static int hw_pass_through = 1;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700338
Weidong Han1ce28fe2008-12-08 16:35:39 +0800339/* domain represents a virtual machine, more than one devices
340 * across iommus may be owned in one domain, e.g. kvm guest.
341 */
Jiang Liuab8dfe22014-07-11 14:19:27 +0800342#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 0)
Weidong Han1ce28fe2008-12-08 16:35:39 +0800343
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700344/* si_domain contains mulitple devices */
Jiang Liuab8dfe22014-07-11 14:19:27 +0800345#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 1)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700346
Mark McLoughlin99126f72008-11-20 15:49:47 +0000347struct dmar_domain {
348 int id; /* domain id */
Suresh Siddha4c923d42009-10-02 11:01:24 -0700349 int nid; /* node id */
Jiang Liu78d8e702014-11-09 22:47:57 +0800350 DECLARE_BITMAP(iommu_bmp, DMAR_UNITS_SUPPORTED);
Mike Travis1b198bb2012-03-05 15:05:16 -0800351 /* bitmap of iommus this domain uses*/
Mark McLoughlin99126f72008-11-20 15:49:47 +0000352
Joerg Roedel00a77de2015-03-26 13:43:08 +0100353 struct list_head devices; /* all devices' list */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000354 struct iova_domain iovad; /* iova's that belong to this domain */
355
356 struct dma_pte *pgd; /* virtual address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000357 int gaw; /* max guest address width */
358
359 /* adjusted guest address width, 0 is level 2 30-bit */
360 int agaw;
361
Weidong Han3b5410e2008-12-08 09:17:15 +0800362 int flags; /* flags to find out type of domain */
Weidong Han8e6040972008-12-08 15:49:06 +0800363
364 int iommu_coherency;/* indicate coherency of iommu access */
Sheng Yang58c610b2009-03-18 15:33:05 +0800365 int iommu_snooping; /* indicate snooping control feature*/
Weidong Hanc7151a82008-12-08 22:51:37 +0800366 int iommu_count; /* reference count of iommu */
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100367 int iommu_superpage;/* Level of superpages supported:
368 0 == 4KiB (no superpages), 1 == 2MiB,
369 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
Weidong Hanc7151a82008-12-08 22:51:37 +0800370 spinlock_t iommu_lock; /* protect iommu set in domain */
Weidong Hanfe40f1e2008-12-08 23:10:23 +0800371 u64 max_addr; /* maximum mapped address */
Joerg Roedel00a77de2015-03-26 13:43:08 +0100372
373 struct iommu_domain domain; /* generic domain data structure for
374 iommu core */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000375};
376
Mark McLoughlina647dacb2008-11-20 15:49:48 +0000377/* PCI domain-device relationship */
378struct device_domain_info {
379 struct list_head link; /* link to domain siblings */
380 struct list_head global; /* link to global list */
David Woodhouse276dbf992009-04-04 01:45:37 +0100381 u8 bus; /* PCI bus number */
Mark McLoughlina647dacb2008-11-20 15:49:48 +0000382 u8 devfn; /* PCI devfn number */
David Woodhouse0bcb3e22014-03-06 17:12:03 +0000383 struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
Yu Zhao93a23a72009-05-18 13:51:37 +0800384 struct intel_iommu *iommu; /* IOMMU used by this device */
Mark McLoughlina647dacb2008-11-20 15:49:48 +0000385 struct dmar_domain *domain; /* pointer to domain */
386};
387
Jiang Liub94e4112014-02-19 14:07:25 +0800388struct dmar_rmrr_unit {
389 struct list_head list; /* list of rmrr units */
390 struct acpi_dmar_header *hdr; /* ACPI header */
391 u64 base_address; /* reserved base address*/
392 u64 end_address; /* reserved end address */
David Woodhouse832bd852014-03-07 15:08:36 +0000393 struct dmar_dev_scope *devices; /* target devices */
Jiang Liub94e4112014-02-19 14:07:25 +0800394 int devices_cnt; /* target device count */
395};
396
397struct dmar_atsr_unit {
398 struct list_head list; /* list of ATSR units */
399 struct acpi_dmar_header *hdr; /* ACPI header */
David Woodhouse832bd852014-03-07 15:08:36 +0000400 struct dmar_dev_scope *devices; /* target devices */
Jiang Liub94e4112014-02-19 14:07:25 +0800401 int devices_cnt; /* target device count */
402 u8 include_all:1; /* include all ports */
403};
404
405static LIST_HEAD(dmar_atsr_units);
406static LIST_HEAD(dmar_rmrr_units);
407
408#define for_each_rmrr_units(rmrr) \
409 list_for_each_entry(rmrr, &dmar_rmrr_units, list)
410
mark gross5e0d2a62008-03-04 15:22:08 -0800411static void flush_unmaps_timeout(unsigned long data);
412
Jiang Liub707cb02014-01-06 14:18:26 +0800413static DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
mark gross5e0d2a62008-03-04 15:22:08 -0800414
mark gross80b20dd2008-04-18 13:53:58 -0700415#define HIGH_WATER_MARK 250
416struct deferred_flush_tables {
417 int next;
418 struct iova *iova[HIGH_WATER_MARK];
419 struct dmar_domain *domain[HIGH_WATER_MARK];
David Woodhouseea8ea462014-03-05 17:09:32 +0000420 struct page *freelist[HIGH_WATER_MARK];
mark gross80b20dd2008-04-18 13:53:58 -0700421};
422
423static struct deferred_flush_tables *deferred_flush;
424
mark gross5e0d2a62008-03-04 15:22:08 -0800425/* bitmap for indexing intel_iommus */
mark gross5e0d2a62008-03-04 15:22:08 -0800426static int g_num_of_iommus;
427
428static DEFINE_SPINLOCK(async_umap_flush_lock);
429static LIST_HEAD(unmaps_to_do);
430
431static int timer_on;
432static long list_size;
mark gross5e0d2a62008-03-04 15:22:08 -0800433
Jiang Liu92d03cc2014-02-19 14:07:28 +0800434static void domain_exit(struct dmar_domain *domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700435static void domain_remove_dev_info(struct dmar_domain *domain);
Jiang Liub94e4112014-02-19 14:07:25 +0800436static void domain_remove_one_dev_info(struct dmar_domain *domain,
David Woodhousebf9c9ed2014-03-09 16:19:13 -0700437 struct device *dev);
Jiang Liu92d03cc2014-02-19 14:07:28 +0800438static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
David Woodhouse0bcb3e22014-03-06 17:12:03 +0000439 struct device *dev);
Jiang Liu2a46ddf2014-07-11 14:19:30 +0800440static int domain_detach_iommu(struct dmar_domain *domain,
441 struct intel_iommu *iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700442
Suresh Siddhad3f13812011-08-23 17:05:25 -0700443#ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800444int dmar_disabled = 0;
445#else
446int dmar_disabled = 1;
Suresh Siddhad3f13812011-08-23 17:05:25 -0700447#endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800448
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -0200449int intel_iommu_enabled = 0;
450EXPORT_SYMBOL_GPL(intel_iommu_enabled);
451
David Woodhouse2d9e6672010-06-15 10:57:57 +0100452static int dmar_map_gfx = 1;
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700453static int dmar_forcedac;
mark gross5e0d2a62008-03-04 15:22:08 -0800454static int intel_iommu_strict;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100455static int intel_iommu_superpage = 1;
David Woodhousec83b2f22015-06-12 10:15:49 +0100456static int intel_iommu_ecs = 1;
457
458/* We only actually use ECS when PASID support (on the new bit 40)
459 * is also advertised. Some early implementations — the ones with
460 * PASID support on bit 28 — have issues even when we *only* use
461 * extended root/context tables. */
462#define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap) && \
463 ecap_pasid(iommu->ecap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700464
David Woodhousec0771df2011-10-14 20:59:46 +0100465int intel_iommu_gfx_mapped;
466EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
467
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700468#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
469static DEFINE_SPINLOCK(device_domain_lock);
470static LIST_HEAD(device_domain_list);
471
Thierry Redingb22f6432014-06-27 09:03:12 +0200472static const struct iommu_ops intel_iommu_ops;
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +0100473
Joerg Roedel4158c2e2015-06-12 10:14:02 +0200474static bool translation_pre_enabled(struct intel_iommu *iommu)
475{
476 return (iommu->flags & VTD_FLAG_TRANS_PRE_ENABLED);
477}
478
Joerg Roedel091d42e2015-06-12 11:56:10 +0200479static void clear_translation_pre_enabled(struct intel_iommu *iommu)
480{
481 iommu->flags &= ~VTD_FLAG_TRANS_PRE_ENABLED;
482}
483
Joerg Roedel4158c2e2015-06-12 10:14:02 +0200484static void init_translation_status(struct intel_iommu *iommu)
485{
486 u32 gsts;
487
488 gsts = readl(iommu->reg + DMAR_GSTS_REG);
489 if (gsts & DMA_GSTS_TES)
490 iommu->flags |= VTD_FLAG_TRANS_PRE_ENABLED;
491}
492
Joerg Roedel00a77de2015-03-26 13:43:08 +0100493/* Convert generic 'struct iommu_domain to private struct dmar_domain */
494static struct dmar_domain *to_dmar_domain(struct iommu_domain *dom)
495{
496 return container_of(dom, struct dmar_domain, domain);
497}
498
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700499static int __init intel_iommu_setup(char *str)
500{
501 if (!str)
502 return -EINVAL;
503 while (*str) {
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800504 if (!strncmp(str, "on", 2)) {
505 dmar_disabled = 0;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200506 pr_info("IOMMU enabled\n");
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800507 } else if (!strncmp(str, "off", 3)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700508 dmar_disabled = 1;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200509 pr_info("IOMMU disabled\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700510 } else if (!strncmp(str, "igfx_off", 8)) {
511 dmar_map_gfx = 0;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200512 pr_info("Disable GFX device mapping\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700513 } else if (!strncmp(str, "forcedac", 8)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200514 pr_info("Forcing DAC for PCI devices\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700515 dmar_forcedac = 1;
mark gross5e0d2a62008-03-04 15:22:08 -0800516 } else if (!strncmp(str, "strict", 6)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200517 pr_info("Disable batched IOTLB flush\n");
mark gross5e0d2a62008-03-04 15:22:08 -0800518 intel_iommu_strict = 1;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100519 } else if (!strncmp(str, "sp_off", 6)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200520 pr_info("Disable supported super page\n");
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100521 intel_iommu_superpage = 0;
David Woodhousec83b2f22015-06-12 10:15:49 +0100522 } else if (!strncmp(str, "ecs_off", 7)) {
523 printk(KERN_INFO
524 "Intel-IOMMU: disable extended context table support\n");
525 intel_iommu_ecs = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700526 }
527
528 str += strcspn(str, ",");
529 while (*str == ',')
530 str++;
531 }
532 return 0;
533}
534__setup("intel_iommu=", intel_iommu_setup);
535
536static struct kmem_cache *iommu_domain_cache;
537static struct kmem_cache *iommu_devinfo_cache;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700538
Suresh Siddha4c923d42009-10-02 11:01:24 -0700539static inline void *alloc_pgtable_page(int node)
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700540{
Suresh Siddha4c923d42009-10-02 11:01:24 -0700541 struct page *page;
542 void *vaddr = NULL;
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700543
Suresh Siddha4c923d42009-10-02 11:01:24 -0700544 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
545 if (page)
546 vaddr = page_address(page);
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700547 return vaddr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700548}
549
550static inline void free_pgtable_page(void *vaddr)
551{
552 free_page((unsigned long)vaddr);
553}
554
555static inline void *alloc_domain_mem(void)
556{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900557 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700558}
559
Kay, Allen M38717942008-09-09 18:37:29 +0300560static void free_domain_mem(void *vaddr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700561{
562 kmem_cache_free(iommu_domain_cache, vaddr);
563}
564
565static inline void * alloc_devinfo_mem(void)
566{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900567 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700568}
569
570static inline void free_devinfo_mem(void *vaddr)
571{
572 kmem_cache_free(iommu_devinfo_cache, vaddr);
573}
574
Jiang Liuab8dfe22014-07-11 14:19:27 +0800575static inline int domain_type_is_vm(struct dmar_domain *domain)
576{
577 return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE;
578}
579
580static inline int domain_type_is_vm_or_si(struct dmar_domain *domain)
581{
582 return domain->flags & (DOMAIN_FLAG_VIRTUAL_MACHINE |
583 DOMAIN_FLAG_STATIC_IDENTITY);
584}
Weidong Han1b573682008-12-08 15:34:06 +0800585
Jiang Liu162d1b12014-07-11 14:19:35 +0800586static inline int domain_pfn_supported(struct dmar_domain *domain,
587 unsigned long pfn)
588{
589 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
590
591 return !(addr_width < BITS_PER_LONG && pfn >> addr_width);
592}
593
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700594static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
Weidong Han1b573682008-12-08 15:34:06 +0800595{
596 unsigned long sagaw;
597 int agaw = -1;
598
599 sagaw = cap_sagaw(iommu->cap);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700600 for (agaw = width_to_agaw(max_gaw);
Weidong Han1b573682008-12-08 15:34:06 +0800601 agaw >= 0; agaw--) {
602 if (test_bit(agaw, &sagaw))
603 break;
604 }
605
606 return agaw;
607}
608
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700609/*
610 * Calculate max SAGAW for each iommu.
611 */
612int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
613{
614 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
615}
616
617/*
618 * calculate agaw for each iommu.
619 * "SAGAW" may be different across iommus, use a default agaw, and
620 * get a supported less agaw for iommus that don't support the default agaw.
621 */
622int iommu_calculate_agaw(struct intel_iommu *iommu)
623{
624 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
625}
626
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700627/* This functionin only returns single iommu in a domain */
Weidong Han8c11e792008-12-08 15:29:22 +0800628static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
629{
630 int iommu_id;
631
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700632 /* si_domain and vm domain should not get here. */
Jiang Liuab8dfe22014-07-11 14:19:27 +0800633 BUG_ON(domain_type_is_vm_or_si(domain));
Mike Travis1b198bb2012-03-05 15:05:16 -0800634 iommu_id = find_first_bit(domain->iommu_bmp, g_num_of_iommus);
Weidong Han8c11e792008-12-08 15:29:22 +0800635 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
636 return NULL;
637
638 return g_iommus[iommu_id];
639}
640
Weidong Han8e6040972008-12-08 15:49:06 +0800641static void domain_update_iommu_coherency(struct dmar_domain *domain)
642{
David Woodhoused0501962014-03-11 17:10:29 -0700643 struct dmar_drhd_unit *drhd;
644 struct intel_iommu *iommu;
Quentin Lambert2f119c72015-02-06 10:59:53 +0100645 bool found = false;
646 int i;
Weidong Han8e6040972008-12-08 15:49:06 +0800647
David Woodhoused0501962014-03-11 17:10:29 -0700648 domain->iommu_coherency = 1;
Weidong Han8e6040972008-12-08 15:49:06 +0800649
Mike Travis1b198bb2012-03-05 15:05:16 -0800650 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
Quentin Lambert2f119c72015-02-06 10:59:53 +0100651 found = true;
Weidong Han8e6040972008-12-08 15:49:06 +0800652 if (!ecap_coherent(g_iommus[i]->ecap)) {
653 domain->iommu_coherency = 0;
654 break;
655 }
Weidong Han8e6040972008-12-08 15:49:06 +0800656 }
David Woodhoused0501962014-03-11 17:10:29 -0700657 if (found)
658 return;
659
660 /* No hardware attached; use lowest common denominator */
661 rcu_read_lock();
662 for_each_active_iommu(iommu, drhd) {
663 if (!ecap_coherent(iommu->ecap)) {
664 domain->iommu_coherency = 0;
665 break;
666 }
667 }
668 rcu_read_unlock();
Weidong Han8e6040972008-12-08 15:49:06 +0800669}
670
Jiang Liu161f6932014-07-11 14:19:37 +0800671static int domain_update_iommu_snooping(struct intel_iommu *skip)
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100672{
Allen Kay8140a952011-10-14 12:32:17 -0700673 struct dmar_drhd_unit *drhd;
Jiang Liu161f6932014-07-11 14:19:37 +0800674 struct intel_iommu *iommu;
675 int ret = 1;
676
677 rcu_read_lock();
678 for_each_active_iommu(iommu, drhd) {
679 if (iommu != skip) {
680 if (!ecap_sc_support(iommu->ecap)) {
681 ret = 0;
682 break;
683 }
684 }
685 }
686 rcu_read_unlock();
687
688 return ret;
689}
690
691static int domain_update_iommu_superpage(struct intel_iommu *skip)
692{
693 struct dmar_drhd_unit *drhd;
694 struct intel_iommu *iommu;
Allen Kay8140a952011-10-14 12:32:17 -0700695 int mask = 0xf;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100696
697 if (!intel_iommu_superpage) {
Jiang Liu161f6932014-07-11 14:19:37 +0800698 return 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100699 }
700
Allen Kay8140a952011-10-14 12:32:17 -0700701 /* set iommu_superpage to the smallest common denominator */
Jiang Liu0e242612014-02-19 14:07:34 +0800702 rcu_read_lock();
Allen Kay8140a952011-10-14 12:32:17 -0700703 for_each_active_iommu(iommu, drhd) {
Jiang Liu161f6932014-07-11 14:19:37 +0800704 if (iommu != skip) {
705 mask &= cap_super_page_val(iommu->cap);
706 if (!mask)
707 break;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100708 }
709 }
Jiang Liu0e242612014-02-19 14:07:34 +0800710 rcu_read_unlock();
711
Jiang Liu161f6932014-07-11 14:19:37 +0800712 return fls(mask);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100713}
714
Sheng Yang58c610b2009-03-18 15:33:05 +0800715/* Some capabilities may be different across iommus */
716static void domain_update_iommu_cap(struct dmar_domain *domain)
717{
718 domain_update_iommu_coherency(domain);
Jiang Liu161f6932014-07-11 14:19:37 +0800719 domain->iommu_snooping = domain_update_iommu_snooping(NULL);
720 domain->iommu_superpage = domain_update_iommu_superpage(NULL);
Sheng Yang58c610b2009-03-18 15:33:05 +0800721}
722
David Woodhouse03ecc322015-02-13 14:35:21 +0000723static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu,
724 u8 bus, u8 devfn, int alloc)
725{
726 struct root_entry *root = &iommu->root_entry[bus];
727 struct context_entry *context;
728 u64 *entry;
729
David Woodhousec83b2f22015-06-12 10:15:49 +0100730 if (ecs_enabled(iommu)) {
David Woodhouse03ecc322015-02-13 14:35:21 +0000731 if (devfn >= 0x80) {
732 devfn -= 0x80;
733 entry = &root->hi;
734 }
735 devfn *= 2;
736 }
737 entry = &root->lo;
738 if (*entry & 1)
739 context = phys_to_virt(*entry & VTD_PAGE_MASK);
740 else {
741 unsigned long phy_addr;
742 if (!alloc)
743 return NULL;
744
745 context = alloc_pgtable_page(iommu->node);
746 if (!context)
747 return NULL;
748
749 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
750 phy_addr = virt_to_phys((void *)context);
751 *entry = phy_addr | 1;
752 __iommu_flush_cache(iommu, entry, sizeof(*entry));
753 }
754 return &context[devfn];
755}
756
David Woodhouse4ed6a542015-05-11 14:59:20 +0100757static int iommu_dummy(struct device *dev)
758{
759 return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
760}
761
David Woodhouse156baca2014-03-09 14:00:57 -0700762static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
Weidong Hanc7151a82008-12-08 22:51:37 +0800763{
764 struct dmar_drhd_unit *drhd = NULL;
Jiang Liub683b232014-02-19 14:07:32 +0800765 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -0700766 struct device *tmp;
767 struct pci_dev *ptmp, *pdev = NULL;
Yijing Wangaa4d0662014-05-26 20:14:06 +0800768 u16 segment = 0;
Weidong Hanc7151a82008-12-08 22:51:37 +0800769 int i;
770
David Woodhouse4ed6a542015-05-11 14:59:20 +0100771 if (iommu_dummy(dev))
772 return NULL;
773
David Woodhouse156baca2014-03-09 14:00:57 -0700774 if (dev_is_pci(dev)) {
775 pdev = to_pci_dev(dev);
776 segment = pci_domain_nr(pdev->bus);
Rafael J. Wysockica5b74d2015-03-16 23:49:08 +0100777 } else if (has_acpi_companion(dev))
David Woodhouse156baca2014-03-09 14:00:57 -0700778 dev = &ACPI_COMPANION(dev)->dev;
779
Jiang Liu0e242612014-02-19 14:07:34 +0800780 rcu_read_lock();
Jiang Liub683b232014-02-19 14:07:32 +0800781 for_each_active_iommu(iommu, drhd) {
David Woodhouse156baca2014-03-09 14:00:57 -0700782 if (pdev && segment != drhd->segment)
David Woodhouse276dbf992009-04-04 01:45:37 +0100783 continue;
Weidong Hanc7151a82008-12-08 22:51:37 +0800784
Jiang Liub683b232014-02-19 14:07:32 +0800785 for_each_active_dev_scope(drhd->devices,
David Woodhouse156baca2014-03-09 14:00:57 -0700786 drhd->devices_cnt, i, tmp) {
787 if (tmp == dev) {
788 *bus = drhd->devices[i].bus;
789 *devfn = drhd->devices[i].devfn;
790 goto out;
791 }
792
793 if (!pdev || !dev_is_pci(tmp))
David Woodhouse832bd852014-03-07 15:08:36 +0000794 continue;
David Woodhouse156baca2014-03-09 14:00:57 -0700795
796 ptmp = to_pci_dev(tmp);
797 if (ptmp->subordinate &&
798 ptmp->subordinate->number <= pdev->bus->number &&
799 ptmp->subordinate->busn_res.end >= pdev->bus->number)
800 goto got_pdev;
David Woodhouse924b6232009-04-04 00:39:25 +0100801 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800802
David Woodhouse156baca2014-03-09 14:00:57 -0700803 if (pdev && drhd->include_all) {
804 got_pdev:
805 *bus = pdev->bus->number;
806 *devfn = pdev->devfn;
Jiang Liub683b232014-02-19 14:07:32 +0800807 goto out;
David Woodhouse156baca2014-03-09 14:00:57 -0700808 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800809 }
Jiang Liub683b232014-02-19 14:07:32 +0800810 iommu = NULL;
David Woodhouse156baca2014-03-09 14:00:57 -0700811 out:
Jiang Liu0e242612014-02-19 14:07:34 +0800812 rcu_read_unlock();
Weidong Hanc7151a82008-12-08 22:51:37 +0800813
Jiang Liub683b232014-02-19 14:07:32 +0800814 return iommu;
Weidong Hanc7151a82008-12-08 22:51:37 +0800815}
816
Weidong Han5331fe62008-12-08 23:00:00 +0800817static void domain_flush_cache(struct dmar_domain *domain,
818 void *addr, int size)
819{
820 if (!domain->iommu_coherency)
821 clflush_cache_range(addr, size);
822}
823
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700824static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
825{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700826 struct context_entry *context;
David Woodhouse03ecc322015-02-13 14:35:21 +0000827 int ret = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700828 unsigned long flags;
829
830 spin_lock_irqsave(&iommu->lock, flags);
David Woodhouse03ecc322015-02-13 14:35:21 +0000831 context = iommu_context_addr(iommu, bus, devfn, 0);
832 if (context)
833 ret = context_present(context);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700834 spin_unlock_irqrestore(&iommu->lock, flags);
835 return ret;
836}
837
838static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
839{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700840 struct context_entry *context;
841 unsigned long flags;
842
843 spin_lock_irqsave(&iommu->lock, flags);
David Woodhouse03ecc322015-02-13 14:35:21 +0000844 context = iommu_context_addr(iommu, bus, devfn, 0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700845 if (context) {
David Woodhouse03ecc322015-02-13 14:35:21 +0000846 context_clear_entry(context);
847 __iommu_flush_cache(iommu, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700848 }
849 spin_unlock_irqrestore(&iommu->lock, flags);
850}
851
852static void free_context_table(struct intel_iommu *iommu)
853{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700854 int i;
855 unsigned long flags;
856 struct context_entry *context;
857
858 spin_lock_irqsave(&iommu->lock, flags);
859 if (!iommu->root_entry) {
860 goto out;
861 }
862 for (i = 0; i < ROOT_ENTRY_NR; i++) {
David Woodhouse03ecc322015-02-13 14:35:21 +0000863 context = iommu_context_addr(iommu, i, 0, 0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700864 if (context)
865 free_pgtable_page(context);
David Woodhouse03ecc322015-02-13 14:35:21 +0000866
David Woodhousec83b2f22015-06-12 10:15:49 +0100867 if (!ecs_enabled(iommu))
David Woodhouse03ecc322015-02-13 14:35:21 +0000868 continue;
869
870 context = iommu_context_addr(iommu, i, 0x80, 0);
871 if (context)
872 free_pgtable_page(context);
873
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700874 }
875 free_pgtable_page(iommu->root_entry);
876 iommu->root_entry = NULL;
877out:
878 spin_unlock_irqrestore(&iommu->lock, flags);
879}
880
David Woodhouseb026fd22009-06-28 10:37:25 +0100881static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
David Woodhouse5cf0a762014-03-19 16:07:49 +0000882 unsigned long pfn, int *target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700883{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700884 struct dma_pte *parent, *pte = NULL;
885 int level = agaw_to_level(domain->agaw);
Allen Kay4399c8b2011-10-14 12:32:46 -0700886 int offset;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700887
888 BUG_ON(!domain->pgd);
Julian Stecklinaf9423602013-10-09 10:03:52 +0200889
Jiang Liu162d1b12014-07-11 14:19:35 +0800890 if (!domain_pfn_supported(domain, pfn))
Julian Stecklinaf9423602013-10-09 10:03:52 +0200891 /* Address beyond IOMMU's addressing capabilities. */
892 return NULL;
893
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700894 parent = domain->pgd;
895
David Woodhouse5cf0a762014-03-19 16:07:49 +0000896 while (1) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700897 void *tmp_page;
898
David Woodhouseb026fd22009-06-28 10:37:25 +0100899 offset = pfn_level_offset(pfn, level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700900 pte = &parent[offset];
David Woodhouse5cf0a762014-03-19 16:07:49 +0000901 if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100902 break;
David Woodhouse5cf0a762014-03-19 16:07:49 +0000903 if (level == *target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700904 break;
905
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000906 if (!dma_pte_present(pte)) {
David Woodhousec85994e2009-07-01 19:21:24 +0100907 uint64_t pteval;
908
Suresh Siddha4c923d42009-10-02 11:01:24 -0700909 tmp_page = alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700910
David Woodhouse206a73c2009-07-01 19:30:28 +0100911 if (!tmp_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700912 return NULL;
David Woodhouse206a73c2009-07-01 19:30:28 +0100913
David Woodhousec85994e2009-07-01 19:21:24 +0100914 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
Benjamin LaHaise64de5af2009-09-16 21:05:55 -0400915 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
Yijing Wangeffad4b2014-05-26 20:13:47 +0800916 if (cmpxchg64(&pte->val, 0ULL, pteval))
David Woodhousec85994e2009-07-01 19:21:24 +0100917 /* Someone else set it while we were thinking; use theirs. */
918 free_pgtable_page(tmp_page);
Yijing Wangeffad4b2014-05-26 20:13:47 +0800919 else
David Woodhousec85994e2009-07-01 19:21:24 +0100920 domain_flush_cache(domain, pte, sizeof(*pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700921 }
David Woodhouse5cf0a762014-03-19 16:07:49 +0000922 if (level == 1)
923 break;
924
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000925 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700926 level--;
927 }
928
David Woodhouse5cf0a762014-03-19 16:07:49 +0000929 if (!*target_level)
930 *target_level = level;
931
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700932 return pte;
933}
934
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100935
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700936/* return address's pte at specific level */
David Woodhouse90dcfb52009-06-27 17:14:59 +0100937static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
938 unsigned long pfn,
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100939 int level, int *large_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700940{
941 struct dma_pte *parent, *pte = NULL;
942 int total = agaw_to_level(domain->agaw);
943 int offset;
944
945 parent = domain->pgd;
946 while (level <= total) {
David Woodhouse90dcfb52009-06-27 17:14:59 +0100947 offset = pfn_level_offset(pfn, total);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700948 pte = &parent[offset];
949 if (level == total)
950 return pte;
951
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100952 if (!dma_pte_present(pte)) {
953 *large_page = total;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700954 break;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100955 }
956
Yijing Wange16922a2014-05-20 20:37:51 +0800957 if (dma_pte_superpage(pte)) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100958 *large_page = total;
959 return pte;
960 }
961
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000962 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700963 total--;
964 }
965 return NULL;
966}
967
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700968/* clear last level pte, a tlb flush should be followed */
David Woodhouse5cf0a762014-03-19 16:07:49 +0000969static void dma_pte_clear_range(struct dmar_domain *domain,
David Woodhouse595badf52009-06-27 22:09:11 +0100970 unsigned long start_pfn,
971 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700972{
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100973 unsigned int large_page = 1;
David Woodhouse310a5ab2009-06-28 18:52:20 +0100974 struct dma_pte *first_pte, *pte;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700975
Jiang Liu162d1b12014-07-11 14:19:35 +0800976 BUG_ON(!domain_pfn_supported(domain, start_pfn));
977 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouse59c36282009-09-19 07:36:28 -0700978 BUG_ON(start_pfn > last_pfn);
David Woodhouse66eae842009-06-27 19:00:32 +0100979
David Woodhouse04b18e62009-06-27 19:15:01 +0100980 /* we don't need lock here; nobody else touches the iova range */
David Woodhouse59c36282009-09-19 07:36:28 -0700981 do {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100982 large_page = 1;
983 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100984 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100985 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100986 continue;
987 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100988 do {
David Woodhouse310a5ab2009-06-28 18:52:20 +0100989 dma_clear_pte(pte);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100990 start_pfn += lvl_to_nr_pages(large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100991 pte++;
David Woodhouse75e6bf92009-07-02 11:21:16 +0100992 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
993
David Woodhouse310a5ab2009-06-28 18:52:20 +0100994 domain_flush_cache(domain, first_pte,
995 (void *)pte - (void *)first_pte);
David Woodhouse59c36282009-09-19 07:36:28 -0700996
997 } while (start_pfn && start_pfn <= last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700998}
999
Alex Williamson3269ee02013-06-15 10:27:19 -06001000static void dma_pte_free_level(struct dmar_domain *domain, int level,
1001 struct dma_pte *pte, unsigned long pfn,
1002 unsigned long start_pfn, unsigned long last_pfn)
1003{
1004 pfn = max(start_pfn, pfn);
1005 pte = &pte[pfn_level_offset(pfn, level)];
1006
1007 do {
1008 unsigned long level_pfn;
1009 struct dma_pte *level_pte;
1010
1011 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
1012 goto next;
1013
1014 level_pfn = pfn & level_mask(level - 1);
1015 level_pte = phys_to_virt(dma_pte_addr(pte));
1016
1017 if (level > 2)
1018 dma_pte_free_level(domain, level - 1, level_pte,
1019 level_pfn, start_pfn, last_pfn);
1020
1021 /* If range covers entire pagetable, free it */
1022 if (!(start_pfn > level_pfn ||
Alex Williamson08336fd2014-01-21 15:48:18 -08001023 last_pfn < level_pfn + level_size(level) - 1)) {
Alex Williamson3269ee02013-06-15 10:27:19 -06001024 dma_clear_pte(pte);
1025 domain_flush_cache(domain, pte, sizeof(*pte));
1026 free_pgtable_page(level_pte);
1027 }
1028next:
1029 pfn += level_size(level);
1030 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1031}
1032
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001033/* free page table pages. last level pte should already be cleared */
1034static void dma_pte_free_pagetable(struct dmar_domain *domain,
David Woodhoused794dc92009-06-28 00:27:49 +01001035 unsigned long start_pfn,
1036 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001037{
Jiang Liu162d1b12014-07-11 14:19:35 +08001038 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1039 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouse59c36282009-09-19 07:36:28 -07001040 BUG_ON(start_pfn > last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001041
Jiang Liud41a4ad2014-07-11 14:19:34 +08001042 dma_pte_clear_range(domain, start_pfn, last_pfn);
1043
David Woodhousef3a0a522009-06-30 03:40:07 +01001044 /* We don't need lock here; nobody else touches the iova range */
Alex Williamson3269ee02013-06-15 10:27:19 -06001045 dma_pte_free_level(domain, agaw_to_level(domain->agaw),
1046 domain->pgd, 0, start_pfn, last_pfn);
David Woodhouse6660c632009-06-27 22:41:00 +01001047
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001048 /* free pgd */
David Woodhoused794dc92009-06-28 00:27:49 +01001049 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001050 free_pgtable_page(domain->pgd);
1051 domain->pgd = NULL;
1052 }
1053}
1054
David Woodhouseea8ea462014-03-05 17:09:32 +00001055/* When a page at a given level is being unlinked from its parent, we don't
1056 need to *modify* it at all. All we need to do is make a list of all the
1057 pages which can be freed just as soon as we've flushed the IOTLB and we
1058 know the hardware page-walk will no longer touch them.
1059 The 'pte' argument is the *parent* PTE, pointing to the page that is to
1060 be freed. */
1061static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
1062 int level, struct dma_pte *pte,
1063 struct page *freelist)
1064{
1065 struct page *pg;
1066
1067 pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
1068 pg->freelist = freelist;
1069 freelist = pg;
1070
1071 if (level == 1)
1072 return freelist;
1073
Jiang Liuadeb2592014-04-09 10:20:39 +08001074 pte = page_address(pg);
1075 do {
David Woodhouseea8ea462014-03-05 17:09:32 +00001076 if (dma_pte_present(pte) && !dma_pte_superpage(pte))
1077 freelist = dma_pte_list_pagetables(domain, level - 1,
1078 pte, freelist);
Jiang Liuadeb2592014-04-09 10:20:39 +08001079 pte++;
1080 } while (!first_pte_in_page(pte));
David Woodhouseea8ea462014-03-05 17:09:32 +00001081
1082 return freelist;
1083}
1084
1085static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
1086 struct dma_pte *pte, unsigned long pfn,
1087 unsigned long start_pfn,
1088 unsigned long last_pfn,
1089 struct page *freelist)
1090{
1091 struct dma_pte *first_pte = NULL, *last_pte = NULL;
1092
1093 pfn = max(start_pfn, pfn);
1094 pte = &pte[pfn_level_offset(pfn, level)];
1095
1096 do {
1097 unsigned long level_pfn;
1098
1099 if (!dma_pte_present(pte))
1100 goto next;
1101
1102 level_pfn = pfn & level_mask(level);
1103
1104 /* If range covers entire pagetable, free it */
1105 if (start_pfn <= level_pfn &&
1106 last_pfn >= level_pfn + level_size(level) - 1) {
1107 /* These suborbinate page tables are going away entirely. Don't
1108 bother to clear them; we're just going to *free* them. */
1109 if (level > 1 && !dma_pte_superpage(pte))
1110 freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist);
1111
1112 dma_clear_pte(pte);
1113 if (!first_pte)
1114 first_pte = pte;
1115 last_pte = pte;
1116 } else if (level > 1) {
1117 /* Recurse down into a level that isn't *entirely* obsolete */
1118 freelist = dma_pte_clear_level(domain, level - 1,
1119 phys_to_virt(dma_pte_addr(pte)),
1120 level_pfn, start_pfn, last_pfn,
1121 freelist);
1122 }
1123next:
1124 pfn += level_size(level);
1125 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1126
1127 if (first_pte)
1128 domain_flush_cache(domain, first_pte,
1129 (void *)++last_pte - (void *)first_pte);
1130
1131 return freelist;
1132}
1133
1134/* We can't just free the pages because the IOMMU may still be walking
1135 the page tables, and may have cached the intermediate levels. The
1136 pages can only be freed after the IOTLB flush has been done. */
1137struct page *domain_unmap(struct dmar_domain *domain,
1138 unsigned long start_pfn,
1139 unsigned long last_pfn)
1140{
David Woodhouseea8ea462014-03-05 17:09:32 +00001141 struct page *freelist = NULL;
1142
Jiang Liu162d1b12014-07-11 14:19:35 +08001143 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1144 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouseea8ea462014-03-05 17:09:32 +00001145 BUG_ON(start_pfn > last_pfn);
1146
1147 /* we don't need lock here; nobody else touches the iova range */
1148 freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
1149 domain->pgd, 0, start_pfn, last_pfn, NULL);
1150
1151 /* free pgd */
1152 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1153 struct page *pgd_page = virt_to_page(domain->pgd);
1154 pgd_page->freelist = freelist;
1155 freelist = pgd_page;
1156
1157 domain->pgd = NULL;
1158 }
1159
1160 return freelist;
1161}
1162
1163void dma_free_pagelist(struct page *freelist)
1164{
1165 struct page *pg;
1166
1167 while ((pg = freelist)) {
1168 freelist = pg->freelist;
1169 free_pgtable_page(page_address(pg));
1170 }
1171}
1172
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001173/* iommu handling */
1174static int iommu_alloc_root_entry(struct intel_iommu *iommu)
1175{
1176 struct root_entry *root;
1177 unsigned long flags;
1178
Suresh Siddha4c923d42009-10-02 11:01:24 -07001179 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
Jiang Liuffebeb42014-11-09 22:48:02 +08001180 if (!root) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001181 pr_err("Allocating root entry for %s failed\n",
Jiang Liuffebeb42014-11-09 22:48:02 +08001182 iommu->name);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001183 return -ENOMEM;
Jiang Liuffebeb42014-11-09 22:48:02 +08001184 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001185
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001186 __iommu_flush_cache(iommu, root, ROOT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001187
1188 spin_lock_irqsave(&iommu->lock, flags);
1189 iommu->root_entry = root;
1190 spin_unlock_irqrestore(&iommu->lock, flags);
1191
1192 return 0;
1193}
1194
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001195static void iommu_set_root_entry(struct intel_iommu *iommu)
1196{
David Woodhouse03ecc322015-02-13 14:35:21 +00001197 u64 addr;
David Woodhousec416daa2009-05-10 20:30:58 +01001198 u32 sts;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001199 unsigned long flag;
1200
David Woodhouse03ecc322015-02-13 14:35:21 +00001201 addr = virt_to_phys(iommu->root_entry);
David Woodhousec83b2f22015-06-12 10:15:49 +01001202 if (ecs_enabled(iommu))
David Woodhouse03ecc322015-02-13 14:35:21 +00001203 addr |= DMA_RTADDR_RTT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001204
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001205 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse03ecc322015-02-13 14:35:21 +00001206 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001207
David Woodhousec416daa2009-05-10 20:30:58 +01001208 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001209
1210 /* Make sure hardware complete it */
1211 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001212 readl, (sts & DMA_GSTS_RTPS), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001213
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001214 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001215}
1216
1217static void iommu_flush_write_buffer(struct intel_iommu *iommu)
1218{
1219 u32 val;
1220 unsigned long flag;
1221
David Woodhouse9af88142009-02-13 23:18:03 +00001222 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001223 return;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001224
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001225 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse462b60f2009-05-10 20:18:18 +01001226 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001227
1228 /* Make sure hardware complete it */
1229 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001230 readl, (!(val & DMA_GSTS_WBFS)), val);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001231
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001232 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001233}
1234
1235/* return value determine if we need a write buffer flush */
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001236static void __iommu_flush_context(struct intel_iommu *iommu,
1237 u16 did, u16 source_id, u8 function_mask,
1238 u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001239{
1240 u64 val = 0;
1241 unsigned long flag;
1242
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001243 switch (type) {
1244 case DMA_CCMD_GLOBAL_INVL:
1245 val = DMA_CCMD_GLOBAL_INVL;
1246 break;
1247 case DMA_CCMD_DOMAIN_INVL:
1248 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1249 break;
1250 case DMA_CCMD_DEVICE_INVL:
1251 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1252 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1253 break;
1254 default:
1255 BUG();
1256 }
1257 val |= DMA_CCMD_ICC;
1258
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001259 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001260 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1261
1262 /* Make sure hardware complete it */
1263 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1264 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1265
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001266 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001267}
1268
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001269/* return value determine if we need a write buffer flush */
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001270static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1271 u64 addr, unsigned int size_order, u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001272{
1273 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1274 u64 val = 0, val_iva = 0;
1275 unsigned long flag;
1276
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001277 switch (type) {
1278 case DMA_TLB_GLOBAL_FLUSH:
1279 /* global flush doesn't need set IVA_REG */
1280 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1281 break;
1282 case DMA_TLB_DSI_FLUSH:
1283 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1284 break;
1285 case DMA_TLB_PSI_FLUSH:
1286 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
David Woodhouseea8ea462014-03-05 17:09:32 +00001287 /* IH bit is passed in as part of address */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001288 val_iva = size_order | addr;
1289 break;
1290 default:
1291 BUG();
1292 }
1293 /* Note: set drain read/write */
1294#if 0
1295 /*
1296 * This is probably to be super secure.. Looks like we can
1297 * ignore it without any impact.
1298 */
1299 if (cap_read_drain(iommu->cap))
1300 val |= DMA_TLB_READ_DRAIN;
1301#endif
1302 if (cap_write_drain(iommu->cap))
1303 val |= DMA_TLB_WRITE_DRAIN;
1304
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001305 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001306 /* Note: Only uses first TLB reg currently */
1307 if (val_iva)
1308 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1309 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1310
1311 /* Make sure hardware complete it */
1312 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1313 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1314
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001315 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001316
1317 /* check IOTLB invalidation granularity */
1318 if (DMA_TLB_IAIG(val) == 0)
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001319 pr_err("Flush IOTLB failed\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001320 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001321 pr_debug("TLB flush request %Lx, actual %Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001322 (unsigned long long)DMA_TLB_IIRG(type),
1323 (unsigned long long)DMA_TLB_IAIG(val));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001324}
1325
David Woodhouse64ae8922014-03-09 12:52:30 -07001326static struct device_domain_info *
1327iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
1328 u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001329{
Quentin Lambert2f119c72015-02-06 10:59:53 +01001330 bool found = false;
Yu Zhao93a23a72009-05-18 13:51:37 +08001331 unsigned long flags;
1332 struct device_domain_info *info;
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001333 struct pci_dev *pdev;
Yu Zhao93a23a72009-05-18 13:51:37 +08001334
1335 if (!ecap_dev_iotlb_support(iommu->ecap))
1336 return NULL;
1337
1338 if (!iommu->qi)
1339 return NULL;
1340
1341 spin_lock_irqsave(&device_domain_lock, flags);
1342 list_for_each_entry(info, &domain->devices, link)
Jiang Liuc3b497c2014-07-11 14:19:25 +08001343 if (info->iommu == iommu && info->bus == bus &&
1344 info->devfn == devfn) {
Quentin Lambert2f119c72015-02-06 10:59:53 +01001345 found = true;
Yu Zhao93a23a72009-05-18 13:51:37 +08001346 break;
1347 }
1348 spin_unlock_irqrestore(&device_domain_lock, flags);
1349
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001350 if (!found || !info->dev || !dev_is_pci(info->dev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001351 return NULL;
1352
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001353 pdev = to_pci_dev(info->dev);
1354
1355 if (!pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS))
Yu Zhao93a23a72009-05-18 13:51:37 +08001356 return NULL;
1357
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001358 if (!dmar_find_matched_atsr_unit(pdev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001359 return NULL;
1360
Yu Zhao93a23a72009-05-18 13:51:37 +08001361 return info;
1362}
1363
1364static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1365{
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001366 if (!info || !dev_is_pci(info->dev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001367 return;
1368
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001369 pci_enable_ats(to_pci_dev(info->dev), VTD_PAGE_SHIFT);
Yu Zhao93a23a72009-05-18 13:51:37 +08001370}
1371
1372static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1373{
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001374 if (!info->dev || !dev_is_pci(info->dev) ||
1375 !pci_ats_enabled(to_pci_dev(info->dev)))
Yu Zhao93a23a72009-05-18 13:51:37 +08001376 return;
1377
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001378 pci_disable_ats(to_pci_dev(info->dev));
Yu Zhao93a23a72009-05-18 13:51:37 +08001379}
1380
1381static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1382 u64 addr, unsigned mask)
1383{
1384 u16 sid, qdep;
1385 unsigned long flags;
1386 struct device_domain_info *info;
1387
1388 spin_lock_irqsave(&device_domain_lock, flags);
1389 list_for_each_entry(info, &domain->devices, link) {
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001390 struct pci_dev *pdev;
1391 if (!info->dev || !dev_is_pci(info->dev))
1392 continue;
1393
1394 pdev = to_pci_dev(info->dev);
1395 if (!pci_ats_enabled(pdev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001396 continue;
1397
1398 sid = info->bus << 8 | info->devfn;
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001399 qdep = pci_ats_queue_depth(pdev);
Yu Zhao93a23a72009-05-18 13:51:37 +08001400 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1401 }
1402 spin_unlock_irqrestore(&device_domain_lock, flags);
1403}
1404
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001405static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
David Woodhouseea8ea462014-03-05 17:09:32 +00001406 unsigned long pfn, unsigned int pages, int ih, int map)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001407{
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001408 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
David Woodhouse03d6a242009-06-28 15:33:46 +01001409 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001410
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001411 BUG_ON(pages == 0);
1412
David Woodhouseea8ea462014-03-05 17:09:32 +00001413 if (ih)
1414 ih = 1 << 6;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001415 /*
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001416 * Fallback to domain selective flush if no PSI support or the size is
1417 * too big.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001418 * PSI requires page size to be 2 ^ x, and the base address is naturally
1419 * aligned to the size
1420 */
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001421 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1422 iommu->flush.flush_iotlb(iommu, did, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001423 DMA_TLB_DSI_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001424 else
David Woodhouseea8ea462014-03-05 17:09:32 +00001425 iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001426 DMA_TLB_PSI_FLUSH);
Yu Zhaobf92df32009-06-29 11:31:45 +08001427
1428 /*
Nadav Amit82653632010-04-01 13:24:40 +03001429 * In caching mode, changes of pages from non-present to present require
1430 * flush. However, device IOTLB doesn't need to be flushed in this case.
Yu Zhaobf92df32009-06-29 11:31:45 +08001431 */
Nadav Amit82653632010-04-01 13:24:40 +03001432 if (!cap_caching_mode(iommu->cap) || !map)
Yu Zhao93a23a72009-05-18 13:51:37 +08001433 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001434}
1435
mark grossf8bab732008-02-08 04:18:38 -08001436static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1437{
1438 u32 pmen;
1439 unsigned long flags;
1440
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001441 raw_spin_lock_irqsave(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001442 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1443 pmen &= ~DMA_PMEN_EPM;
1444 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1445
1446 /* wait for the protected region status bit to clear */
1447 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1448 readl, !(pmen & DMA_PMEN_PRS), pmen);
1449
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001450 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001451}
1452
Jiang Liu2a41cce2014-07-11 14:19:33 +08001453static void iommu_enable_translation(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001454{
1455 u32 sts;
1456 unsigned long flags;
1457
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001458 raw_spin_lock_irqsave(&iommu->register_lock, flags);
David Woodhousec416daa2009-05-10 20:30:58 +01001459 iommu->gcmd |= DMA_GCMD_TE;
1460 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001461
1462 /* Make sure hardware complete it */
1463 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001464 readl, (sts & DMA_GSTS_TES), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001465
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001466 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001467}
1468
Jiang Liu2a41cce2014-07-11 14:19:33 +08001469static void iommu_disable_translation(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001470{
1471 u32 sts;
1472 unsigned long flag;
1473
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001474 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001475 iommu->gcmd &= ~DMA_GCMD_TE;
1476 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1477
1478 /* Make sure hardware complete it */
1479 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001480 readl, (!(sts & DMA_GSTS_TES)), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001481
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001482 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001483}
1484
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07001485
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001486static int iommu_init_domains(struct intel_iommu *iommu)
1487{
1488 unsigned long ndomains;
1489 unsigned long nlongs;
1490
1491 ndomains = cap_ndoms(iommu->cap);
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001492 pr_debug("%s: Number of Domains supported <%ld>\n",
1493 iommu->name, ndomains);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001494 nlongs = BITS_TO_LONGS(ndomains);
1495
Donald Dutile94a91b502009-08-20 16:51:34 -04001496 spin_lock_init(&iommu->lock);
1497
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001498 /* TBD: there might be 64K domains,
1499 * consider other allocation for future chip
1500 */
1501 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1502 if (!iommu->domain_ids) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001503 pr_err("%s: Allocating domain id array failed\n",
1504 iommu->name);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001505 return -ENOMEM;
1506 }
1507 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1508 GFP_KERNEL);
1509 if (!iommu->domains) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001510 pr_err("%s: Allocating domain array failed\n",
1511 iommu->name);
Jiang Liu852bdb02014-01-06 14:18:11 +08001512 kfree(iommu->domain_ids);
1513 iommu->domain_ids = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001514 return -ENOMEM;
1515 }
1516
1517 /*
1518 * if Caching mode is set, then invalid translations are tagged
1519 * with domainid 0. Hence we need to pre-allocate it.
1520 */
1521 if (cap_caching_mode(iommu->cap))
1522 set_bit(0, iommu->domain_ids);
1523 return 0;
1524}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001525
Jiang Liuffebeb42014-11-09 22:48:02 +08001526static void disable_dmar_iommu(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001527{
1528 struct dmar_domain *domain;
Jiang Liu2a46ddf2014-07-11 14:19:30 +08001529 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001530
Donald Dutile94a91b502009-08-20 16:51:34 -04001531 if ((iommu->domains) && (iommu->domain_ids)) {
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001532 for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
Jiang Liua4eaa862014-02-19 14:07:30 +08001533 /*
1534 * Domain id 0 is reserved for invalid translation
1535 * if hardware supports caching mode.
1536 */
1537 if (cap_caching_mode(iommu->cap) && i == 0)
1538 continue;
1539
Donald Dutile94a91b502009-08-20 16:51:34 -04001540 domain = iommu->domains[i];
1541 clear_bit(i, iommu->domain_ids);
Jiang Liu129ad282014-07-11 14:19:31 +08001542 if (domain_detach_iommu(domain, iommu) == 0 &&
1543 !domain_type_is_vm(domain))
Jiang Liu92d03cc2014-02-19 14:07:28 +08001544 domain_exit(domain);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001545 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001546 }
1547
1548 if (iommu->gcmd & DMA_GCMD_TE)
1549 iommu_disable_translation(iommu);
Jiang Liuffebeb42014-11-09 22:48:02 +08001550}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001551
Jiang Liuffebeb42014-11-09 22:48:02 +08001552static void free_dmar_iommu(struct intel_iommu *iommu)
1553{
1554 if ((iommu->domains) && (iommu->domain_ids)) {
1555 kfree(iommu->domains);
1556 kfree(iommu->domain_ids);
1557 iommu->domains = NULL;
1558 iommu->domain_ids = NULL;
1559 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001560
Weidong Hand9630fe2008-12-08 11:06:32 +08001561 g_iommus[iommu->seq_id] = NULL;
1562
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001563 /* free context mapping */
1564 free_context_table(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001565}
1566
Jiang Liuab8dfe22014-07-11 14:19:27 +08001567static struct dmar_domain *alloc_domain(int flags)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001568{
Jiang Liu92d03cc2014-02-19 14:07:28 +08001569 /* domain id for virtual machine, it won't be set in context */
1570 static atomic_t vm_domid = ATOMIC_INIT(0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001571 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001572
1573 domain = alloc_domain_mem();
1574 if (!domain)
1575 return NULL;
1576
Jiang Liuab8dfe22014-07-11 14:19:27 +08001577 memset(domain, 0, sizeof(*domain));
Suresh Siddha4c923d42009-10-02 11:01:24 -07001578 domain->nid = -1;
Jiang Liuab8dfe22014-07-11 14:19:27 +08001579 domain->flags = flags;
Jiang Liu92d03cc2014-02-19 14:07:28 +08001580 spin_lock_init(&domain->iommu_lock);
1581 INIT_LIST_HEAD(&domain->devices);
Jiang Liuab8dfe22014-07-11 14:19:27 +08001582 if (flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
Jiang Liu92d03cc2014-02-19 14:07:28 +08001583 domain->id = atomic_inc_return(&vm_domid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001584
1585 return domain;
1586}
1587
Jiang Liufb170fb2014-07-11 14:19:28 +08001588static int __iommu_attach_domain(struct dmar_domain *domain,
1589 struct intel_iommu *iommu)
1590{
1591 int num;
1592 unsigned long ndomains;
1593
1594 ndomains = cap_ndoms(iommu->cap);
1595 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1596 if (num < ndomains) {
1597 set_bit(num, iommu->domain_ids);
1598 iommu->domains[num] = domain;
1599 } else {
1600 num = -ENOSPC;
1601 }
1602
1603 return num;
1604}
1605
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001606static int iommu_attach_domain(struct dmar_domain *domain,
1607 struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001608{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001609 int num;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001610 unsigned long flags;
1611
Weidong Han8c11e792008-12-08 15:29:22 +08001612 spin_lock_irqsave(&iommu->lock, flags);
Jiang Liufb170fb2014-07-11 14:19:28 +08001613 num = __iommu_attach_domain(domain, iommu);
Jiang Liu44bde612014-07-11 14:19:29 +08001614 spin_unlock_irqrestore(&iommu->lock, flags);
Jiang Liufb170fb2014-07-11 14:19:28 +08001615 if (num < 0)
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001616 pr_err("%s: No free domain ids\n", iommu->name);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001617
Jiang Liufb170fb2014-07-11 14:19:28 +08001618 return num;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001619}
1620
Jiang Liu44bde612014-07-11 14:19:29 +08001621static int iommu_attach_vm_domain(struct dmar_domain *domain,
1622 struct intel_iommu *iommu)
1623{
1624 int num;
1625 unsigned long ndomains;
1626
1627 ndomains = cap_ndoms(iommu->cap);
1628 for_each_set_bit(num, iommu->domain_ids, ndomains)
1629 if (iommu->domains[num] == domain)
1630 return num;
1631
1632 return __iommu_attach_domain(domain, iommu);
1633}
1634
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001635static void iommu_detach_domain(struct dmar_domain *domain,
1636 struct intel_iommu *iommu)
1637{
1638 unsigned long flags;
1639 int num, ndomains;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001640
1641 spin_lock_irqsave(&iommu->lock, flags);
Jiang Liufb170fb2014-07-11 14:19:28 +08001642 if (domain_type_is_vm_or_si(domain)) {
1643 ndomains = cap_ndoms(iommu->cap);
1644 for_each_set_bit(num, iommu->domain_ids, ndomains) {
1645 if (iommu->domains[num] == domain) {
1646 clear_bit(num, iommu->domain_ids);
1647 iommu->domains[num] = NULL;
1648 break;
1649 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001650 }
Jiang Liufb170fb2014-07-11 14:19:28 +08001651 } else {
1652 clear_bit(domain->id, iommu->domain_ids);
1653 iommu->domains[domain->id] = NULL;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001654 }
Weidong Han8c11e792008-12-08 15:29:22 +08001655 spin_unlock_irqrestore(&iommu->lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001656}
1657
Jiang Liufb170fb2014-07-11 14:19:28 +08001658static void domain_attach_iommu(struct dmar_domain *domain,
1659 struct intel_iommu *iommu)
1660{
1661 unsigned long flags;
1662
1663 spin_lock_irqsave(&domain->iommu_lock, flags);
1664 if (!test_and_set_bit(iommu->seq_id, domain->iommu_bmp)) {
1665 domain->iommu_count++;
1666 if (domain->iommu_count == 1)
1667 domain->nid = iommu->node;
1668 domain_update_iommu_cap(domain);
1669 }
1670 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1671}
1672
1673static int domain_detach_iommu(struct dmar_domain *domain,
1674 struct intel_iommu *iommu)
1675{
1676 unsigned long flags;
1677 int count = INT_MAX;
1678
1679 spin_lock_irqsave(&domain->iommu_lock, flags);
1680 if (test_and_clear_bit(iommu->seq_id, domain->iommu_bmp)) {
1681 count = --domain->iommu_count;
1682 domain_update_iommu_cap(domain);
1683 }
1684 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1685
1686 return count;
1687}
1688
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001689static struct iova_domain reserved_iova_list;
Mark Gross8a443df2008-03-04 14:59:31 -08001690static struct lock_class_key reserved_rbtree_key;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001691
Joseph Cihula51a63e62011-03-21 11:04:24 -07001692static int dmar_init_reserved_ranges(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001693{
1694 struct pci_dev *pdev = NULL;
1695 struct iova *iova;
1696 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001697
Robin Murphy0fb5fe82015-01-12 17:51:16 +00001698 init_iova_domain(&reserved_iova_list, VTD_PAGE_SIZE, IOVA_START_PFN,
1699 DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001700
Mark Gross8a443df2008-03-04 14:59:31 -08001701 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1702 &reserved_rbtree_key);
1703
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001704 /* IOAPIC ranges shouldn't be accessed by DMA */
1705 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1706 IOVA_PFN(IOAPIC_RANGE_END));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001707 if (!iova) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001708 pr_err("Reserve IOAPIC range failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001709 return -ENODEV;
1710 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001711
1712 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1713 for_each_pci_dev(pdev) {
1714 struct resource *r;
1715
1716 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1717 r = &pdev->resource[i];
1718 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1719 continue;
David Woodhouse1a4a4552009-06-28 16:00:42 +01001720 iova = reserve_iova(&reserved_iova_list,
1721 IOVA_PFN(r->start),
1722 IOVA_PFN(r->end));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001723 if (!iova) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001724 pr_err("Reserve iova failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001725 return -ENODEV;
1726 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001727 }
1728 }
Joseph Cihula51a63e62011-03-21 11:04:24 -07001729 return 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001730}
1731
1732static void domain_reserve_special_ranges(struct dmar_domain *domain)
1733{
1734 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1735}
1736
1737static inline int guestwidth_to_adjustwidth(int gaw)
1738{
1739 int agaw;
1740 int r = (gaw - 12) % 9;
1741
1742 if (r == 0)
1743 agaw = gaw;
1744 else
1745 agaw = gaw + 9 - r;
1746 if (agaw > 64)
1747 agaw = 64;
1748 return agaw;
1749}
1750
1751static int domain_init(struct dmar_domain *domain, int guest_width)
1752{
1753 struct intel_iommu *iommu;
1754 int adjust_width, agaw;
1755 unsigned long sagaw;
1756
Robin Murphy0fb5fe82015-01-12 17:51:16 +00001757 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
1758 DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001759 domain_reserve_special_ranges(domain);
1760
1761 /* calculate AGAW */
Weidong Han8c11e792008-12-08 15:29:22 +08001762 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001763 if (guest_width > cap_mgaw(iommu->cap))
1764 guest_width = cap_mgaw(iommu->cap);
1765 domain->gaw = guest_width;
1766 adjust_width = guestwidth_to_adjustwidth(guest_width);
1767 agaw = width_to_agaw(adjust_width);
1768 sagaw = cap_sagaw(iommu->cap);
1769 if (!test_bit(agaw, &sagaw)) {
1770 /* hardware doesn't support it, choose a bigger one */
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001771 pr_debug("Hardware doesn't support agaw %d\n", agaw);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001772 agaw = find_next_bit(&sagaw, 5, agaw);
1773 if (agaw >= 5)
1774 return -ENODEV;
1775 }
1776 domain->agaw = agaw;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001777
Weidong Han8e6040972008-12-08 15:49:06 +08001778 if (ecap_coherent(iommu->ecap))
1779 domain->iommu_coherency = 1;
1780 else
1781 domain->iommu_coherency = 0;
1782
Sheng Yang58c610b2009-03-18 15:33:05 +08001783 if (ecap_sc_support(iommu->ecap))
1784 domain->iommu_snooping = 1;
1785 else
1786 domain->iommu_snooping = 0;
1787
David Woodhouse214e39a2014-03-19 10:38:49 +00001788 if (intel_iommu_superpage)
1789 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
1790 else
1791 domain->iommu_superpage = 0;
1792
Suresh Siddha4c923d42009-10-02 11:01:24 -07001793 domain->nid = iommu->node;
Weidong Hanc7151a82008-12-08 22:51:37 +08001794
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001795 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07001796 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001797 if (!domain->pgd)
1798 return -ENOMEM;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001799 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001800 return 0;
1801}
1802
1803static void domain_exit(struct dmar_domain *domain)
1804{
David Woodhouseea8ea462014-03-05 17:09:32 +00001805 struct page *freelist = NULL;
Alex Williamson71684402015-03-04 11:30:10 -07001806 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001807
1808 /* Domain 0 is reserved, so dont process it */
1809 if (!domain)
1810 return;
1811
Alex Williamson7b668352011-05-24 12:02:41 +01001812 /* Flush any lazy unmaps that may reference this domain */
1813 if (!intel_iommu_strict)
1814 flush_unmaps_timeout(0);
1815
Jiang Liu92d03cc2014-02-19 14:07:28 +08001816 /* remove associated devices */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001817 domain_remove_dev_info(domain);
Jiang Liu92d03cc2014-02-19 14:07:28 +08001818
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001819 /* destroy iovas */
1820 put_iova_domain(&domain->iovad);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001821
David Woodhouseea8ea462014-03-05 17:09:32 +00001822 freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001823
Jiang Liu92d03cc2014-02-19 14:07:28 +08001824 /* clear attached or cached domains */
Jiang Liu0e242612014-02-19 14:07:34 +08001825 rcu_read_lock();
Alex Williamson71684402015-03-04 11:30:10 -07001826 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus)
1827 iommu_detach_domain(domain, g_iommus[i]);
Jiang Liu0e242612014-02-19 14:07:34 +08001828 rcu_read_unlock();
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001829
David Woodhouseea8ea462014-03-05 17:09:32 +00001830 dma_free_pagelist(freelist);
1831
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001832 free_domain_mem(domain);
1833}
1834
David Woodhouse64ae8922014-03-09 12:52:30 -07001835static int domain_context_mapping_one(struct dmar_domain *domain,
1836 struct intel_iommu *iommu,
1837 u8 bus, u8 devfn, int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001838{
1839 struct context_entry *context;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001840 unsigned long flags;
Weidong Hanea6606b2008-12-08 23:08:15 +08001841 struct dma_pte *pgd;
Weidong Hanea6606b2008-12-08 23:08:15 +08001842 int id;
1843 int agaw;
Yu Zhao93a23a72009-05-18 13:51:37 +08001844 struct device_domain_info *info = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001845
1846 pr_debug("Set context mapping for %02x:%02x.%d\n",
1847 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001848
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001849 BUG_ON(!domain->pgd);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001850 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1851 translation != CONTEXT_TT_MULTI_LEVEL);
Weidong Han5331fe62008-12-08 23:00:00 +08001852
David Woodhouse03ecc322015-02-13 14:35:21 +00001853 spin_lock_irqsave(&iommu->lock, flags);
1854 context = iommu_context_addr(iommu, bus, devfn, 1);
1855 spin_unlock_irqrestore(&iommu->lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001856 if (!context)
1857 return -ENOMEM;
1858 spin_lock_irqsave(&iommu->lock, flags);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001859 if (context_present(context)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001860 spin_unlock_irqrestore(&iommu->lock, flags);
1861 return 0;
1862 }
1863
Weidong Hanea6606b2008-12-08 23:08:15 +08001864 id = domain->id;
1865 pgd = domain->pgd;
1866
Jiang Liuab8dfe22014-07-11 14:19:27 +08001867 if (domain_type_is_vm_or_si(domain)) {
Jiang Liu44bde612014-07-11 14:19:29 +08001868 if (domain_type_is_vm(domain)) {
1869 id = iommu_attach_vm_domain(domain, iommu);
Jiang Liufb170fb2014-07-11 14:19:28 +08001870 if (id < 0) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001871 spin_unlock_irqrestore(&iommu->lock, flags);
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001872 pr_err("%s: No free domain ids\n", iommu->name);
Weidong Hanea6606b2008-12-08 23:08:15 +08001873 return -EFAULT;
1874 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001875 }
1876
1877 /* Skip top levels of page tables for
1878 * iommu which has less agaw than default.
Chris Wright1672af12009-12-02 12:06:34 -08001879 * Unnecessary for PT mode.
Weidong Hanea6606b2008-12-08 23:08:15 +08001880 */
Chris Wright1672af12009-12-02 12:06:34 -08001881 if (translation != CONTEXT_TT_PASS_THROUGH) {
1882 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1883 pgd = phys_to_virt(dma_pte_addr(pgd));
1884 if (!dma_pte_present(pgd)) {
1885 spin_unlock_irqrestore(&iommu->lock, flags);
1886 return -ENOMEM;
1887 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001888 }
1889 }
1890 }
1891
1892 context_set_domain_id(context, id);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001893
Yu Zhao93a23a72009-05-18 13:51:37 +08001894 if (translation != CONTEXT_TT_PASS_THROUGH) {
David Woodhouse64ae8922014-03-09 12:52:30 -07001895 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
Yu Zhao93a23a72009-05-18 13:51:37 +08001896 translation = info ? CONTEXT_TT_DEV_IOTLB :
1897 CONTEXT_TT_MULTI_LEVEL;
1898 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001899 /*
1900 * In pass through mode, AW must be programmed to indicate the largest
1901 * AGAW value supported by hardware. And ASR is ignored by hardware.
1902 */
Yu Zhao93a23a72009-05-18 13:51:37 +08001903 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001904 context_set_address_width(context, iommu->msagaw);
Yu Zhao93a23a72009-05-18 13:51:37 +08001905 else {
1906 context_set_address_root(context, virt_to_phys(pgd));
1907 context_set_address_width(context, iommu->agaw);
1908 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001909
1910 context_set_translation_type(context, translation);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001911 context_set_fault_enable(context);
1912 context_set_present(context);
Weidong Han5331fe62008-12-08 23:00:00 +08001913 domain_flush_cache(domain, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001914
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001915 /*
1916 * It's a non-present to present mapping. If hardware doesn't cache
1917 * non-present entry we only need to flush the write-buffer. If the
1918 * _does_ cache non-present entries, then it does so in the special
1919 * domain #0, which we have to flush:
1920 */
1921 if (cap_caching_mode(iommu->cap)) {
1922 iommu->flush.flush_context(iommu, 0,
1923 (((u16)bus) << 8) | devfn,
1924 DMA_CCMD_MASK_NOBIT,
1925 DMA_CCMD_DEVICE_INVL);
Jiang Liu18fd7792014-07-11 14:19:26 +08001926 iommu->flush.flush_iotlb(iommu, id, 0, 0, DMA_TLB_DSI_FLUSH);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001927 } else {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001928 iommu_flush_write_buffer(iommu);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001929 }
Yu Zhao93a23a72009-05-18 13:51:37 +08001930 iommu_enable_dev_iotlb(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001931 spin_unlock_irqrestore(&iommu->lock, flags);
Weidong Hanc7151a82008-12-08 22:51:37 +08001932
Jiang Liufb170fb2014-07-11 14:19:28 +08001933 domain_attach_iommu(domain, iommu);
1934
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001935 return 0;
1936}
1937
Alex Williamson579305f2014-07-03 09:51:43 -06001938struct domain_context_mapping_data {
1939 struct dmar_domain *domain;
1940 struct intel_iommu *iommu;
1941 int translation;
1942};
1943
1944static int domain_context_mapping_cb(struct pci_dev *pdev,
1945 u16 alias, void *opaque)
1946{
1947 struct domain_context_mapping_data *data = opaque;
1948
1949 return domain_context_mapping_one(data->domain, data->iommu,
1950 PCI_BUS_NUM(alias), alias & 0xff,
1951 data->translation);
1952}
1953
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001954static int
David Woodhousee1f167f2014-03-09 15:24:46 -07001955domain_context_mapping(struct dmar_domain *domain, struct device *dev,
1956 int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001957{
David Woodhouse64ae8922014-03-09 12:52:30 -07001958 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07001959 u8 bus, devfn;
Alex Williamson579305f2014-07-03 09:51:43 -06001960 struct domain_context_mapping_data data;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001961
David Woodhousee1f167f2014-03-09 15:24:46 -07001962 iommu = device_to_iommu(dev, &bus, &devfn);
David Woodhouse64ae8922014-03-09 12:52:30 -07001963 if (!iommu)
1964 return -ENODEV;
1965
Alex Williamson579305f2014-07-03 09:51:43 -06001966 if (!dev_is_pci(dev))
1967 return domain_context_mapping_one(domain, iommu, bus, devfn,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001968 translation);
Alex Williamson579305f2014-07-03 09:51:43 -06001969
1970 data.domain = domain;
1971 data.iommu = iommu;
1972 data.translation = translation;
1973
1974 return pci_for_each_dma_alias(to_pci_dev(dev),
1975 &domain_context_mapping_cb, &data);
1976}
1977
1978static int domain_context_mapped_cb(struct pci_dev *pdev,
1979 u16 alias, void *opaque)
1980{
1981 struct intel_iommu *iommu = opaque;
1982
1983 return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001984}
1985
David Woodhousee1f167f2014-03-09 15:24:46 -07001986static int domain_context_mapped(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001987{
Weidong Han5331fe62008-12-08 23:00:00 +08001988 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07001989 u8 bus, devfn;
Weidong Han5331fe62008-12-08 23:00:00 +08001990
David Woodhousee1f167f2014-03-09 15:24:46 -07001991 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001992 if (!iommu)
1993 return -ENODEV;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001994
Alex Williamson579305f2014-07-03 09:51:43 -06001995 if (!dev_is_pci(dev))
1996 return device_context_mapped(iommu, bus, devfn);
David Woodhousee1f167f2014-03-09 15:24:46 -07001997
Alex Williamson579305f2014-07-03 09:51:43 -06001998 return !pci_for_each_dma_alias(to_pci_dev(dev),
1999 domain_context_mapped_cb, iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002000}
2001
Fenghua Yuf5329592009-08-04 15:09:37 -07002002/* Returns a number of VTD pages, but aligned to MM page size */
2003static inline unsigned long aligned_nrpages(unsigned long host_addr,
2004 size_t size)
2005{
2006 host_addr &= ~PAGE_MASK;
2007 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
2008}
2009
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002010/* Return largest possible superpage level for a given mapping */
2011static inline int hardware_largepage_caps(struct dmar_domain *domain,
2012 unsigned long iov_pfn,
2013 unsigned long phy_pfn,
2014 unsigned long pages)
2015{
2016 int support, level = 1;
2017 unsigned long pfnmerge;
2018
2019 support = domain->iommu_superpage;
2020
2021 /* To use a large page, the virtual *and* physical addresses
2022 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
2023 of them will mean we have to use smaller pages. So just
2024 merge them and check both at once. */
2025 pfnmerge = iov_pfn | phy_pfn;
2026
2027 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
2028 pages >>= VTD_STRIDE_SHIFT;
2029 if (!pages)
2030 break;
2031 pfnmerge >>= VTD_STRIDE_SHIFT;
2032 level++;
2033 support--;
2034 }
2035 return level;
2036}
2037
David Woodhouse9051aa02009-06-29 12:30:54 +01002038static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2039 struct scatterlist *sg, unsigned long phys_pfn,
2040 unsigned long nr_pages, int prot)
David Woodhousee1605492009-06-29 11:17:38 +01002041{
2042 struct dma_pte *first_pte = NULL, *pte = NULL;
David Woodhouse9051aa02009-06-29 12:30:54 +01002043 phys_addr_t uninitialized_var(pteval);
Jiang Liucc4f14a2014-11-26 09:42:10 +08002044 unsigned long sg_res = 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002045 unsigned int largepage_lvl = 0;
2046 unsigned long lvl_pages = 0;
David Woodhousee1605492009-06-29 11:17:38 +01002047
Jiang Liu162d1b12014-07-11 14:19:35 +08002048 BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1));
David Woodhousee1605492009-06-29 11:17:38 +01002049
2050 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
2051 return -EINVAL;
2052
2053 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
2054
Jiang Liucc4f14a2014-11-26 09:42:10 +08002055 if (!sg) {
2056 sg_res = nr_pages;
David Woodhouse9051aa02009-06-29 12:30:54 +01002057 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
2058 }
2059
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002060 while (nr_pages > 0) {
David Woodhousec85994e2009-07-01 19:21:24 +01002061 uint64_t tmp;
2062
David Woodhousee1605492009-06-29 11:17:38 +01002063 if (!sg_res) {
Fenghua Yuf5329592009-08-04 15:09:37 -07002064 sg_res = aligned_nrpages(sg->offset, sg->length);
David Woodhousee1605492009-06-29 11:17:38 +01002065 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
2066 sg->dma_length = sg->length;
2067 pteval = page_to_phys(sg_page(sg)) | prot;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002068 phys_pfn = pteval >> VTD_PAGE_SHIFT;
David Woodhousee1605492009-06-29 11:17:38 +01002069 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002070
David Woodhousee1605492009-06-29 11:17:38 +01002071 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002072 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
2073
David Woodhouse5cf0a762014-03-19 16:07:49 +00002074 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
David Woodhousee1605492009-06-29 11:17:38 +01002075 if (!pte)
2076 return -ENOMEM;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002077 /* It is large page*/
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002078 if (largepage_lvl > 1) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002079 pteval |= DMA_PTE_LARGE_PAGE;
Jiang Liud41a4ad2014-07-11 14:19:34 +08002080 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2081 /*
2082 * Ensure that old small page tables are
2083 * removed to make room for superpage,
2084 * if they exist.
2085 */
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002086 dma_pte_free_pagetable(domain, iov_pfn,
Jiang Liud41a4ad2014-07-11 14:19:34 +08002087 iov_pfn + lvl_pages - 1);
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002088 } else {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002089 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002090 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002091
David Woodhousee1605492009-06-29 11:17:38 +01002092 }
2093 /* We don't need lock here, nobody else
2094 * touches the iova range
2095 */
David Woodhouse7766a3f2009-07-01 20:27:03 +01002096 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
David Woodhousec85994e2009-07-01 19:21:24 +01002097 if (tmp) {
David Woodhouse1bf20f02009-06-29 22:06:43 +01002098 static int dumps = 5;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002099 pr_crit("ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
2100 iov_pfn, tmp, (unsigned long long)pteval);
David Woodhouse1bf20f02009-06-29 22:06:43 +01002101 if (dumps) {
2102 dumps--;
2103 debug_dma_dump_mappings(NULL);
2104 }
2105 WARN_ON(1);
2106 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002107
2108 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2109
2110 BUG_ON(nr_pages < lvl_pages);
2111 BUG_ON(sg_res < lvl_pages);
2112
2113 nr_pages -= lvl_pages;
2114 iov_pfn += lvl_pages;
2115 phys_pfn += lvl_pages;
2116 pteval += lvl_pages * VTD_PAGE_SIZE;
2117 sg_res -= lvl_pages;
2118
2119 /* If the next PTE would be the first in a new page, then we
2120 need to flush the cache on the entries we've just written.
2121 And then we'll need to recalculate 'pte', so clear it and
2122 let it get set again in the if (!pte) block above.
2123
2124 If we're done (!nr_pages) we need to flush the cache too.
2125
2126 Also if we've been setting superpages, we may need to
2127 recalculate 'pte' and switch back to smaller pages for the
2128 end of the mapping, if the trailing size is not enough to
2129 use another superpage (i.e. sg_res < lvl_pages). */
David Woodhousee1605492009-06-29 11:17:38 +01002130 pte++;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002131 if (!nr_pages || first_pte_in_page(pte) ||
2132 (largepage_lvl > 1 && sg_res < lvl_pages)) {
David Woodhousee1605492009-06-29 11:17:38 +01002133 domain_flush_cache(domain, first_pte,
2134 (void *)pte - (void *)first_pte);
2135 pte = NULL;
2136 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002137
2138 if (!sg_res && nr_pages)
David Woodhousee1605492009-06-29 11:17:38 +01002139 sg = sg_next(sg);
2140 }
2141 return 0;
2142}
2143
David Woodhouse9051aa02009-06-29 12:30:54 +01002144static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2145 struct scatterlist *sg, unsigned long nr_pages,
2146 int prot)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002147{
David Woodhouse9051aa02009-06-29 12:30:54 +01002148 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
2149}
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002150
David Woodhouse9051aa02009-06-29 12:30:54 +01002151static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2152 unsigned long phys_pfn, unsigned long nr_pages,
2153 int prot)
2154{
2155 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002156}
2157
Weidong Hanc7151a82008-12-08 22:51:37 +08002158static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002159{
Weidong Hanc7151a82008-12-08 22:51:37 +08002160 if (!iommu)
2161 return;
Weidong Han8c11e792008-12-08 15:29:22 +08002162
2163 clear_context_table(iommu, bus, devfn);
2164 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002165 DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002166 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002167}
2168
David Woodhouse109b9b02012-05-25 17:43:02 +01002169static inline void unlink_domain_info(struct device_domain_info *info)
2170{
2171 assert_spin_locked(&device_domain_lock);
2172 list_del(&info->link);
2173 list_del(&info->global);
2174 if (info->dev)
David Woodhouse0bcb3e22014-03-06 17:12:03 +00002175 info->dev->archdata.iommu = NULL;
David Woodhouse109b9b02012-05-25 17:43:02 +01002176}
2177
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002178static void domain_remove_dev_info(struct dmar_domain *domain)
2179{
Yijing Wang3a74ca02014-05-20 20:37:47 +08002180 struct device_domain_info *info, *tmp;
Jiang Liufb170fb2014-07-11 14:19:28 +08002181 unsigned long flags;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002182
2183 spin_lock_irqsave(&device_domain_lock, flags);
Yijing Wang3a74ca02014-05-20 20:37:47 +08002184 list_for_each_entry_safe(info, tmp, &domain->devices, link) {
David Woodhouse109b9b02012-05-25 17:43:02 +01002185 unlink_domain_info(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002186 spin_unlock_irqrestore(&device_domain_lock, flags);
2187
Yu Zhao93a23a72009-05-18 13:51:37 +08002188 iommu_disable_dev_iotlb(info);
David Woodhouse7c7faa12014-03-09 13:33:06 -07002189 iommu_detach_dev(info->iommu, info->bus, info->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002190
Jiang Liuab8dfe22014-07-11 14:19:27 +08002191 if (domain_type_is_vm(domain)) {
David Woodhouse7c7faa12014-03-09 13:33:06 -07002192 iommu_detach_dependent_devices(info->iommu, info->dev);
Jiang Liufb170fb2014-07-11 14:19:28 +08002193 domain_detach_iommu(domain, info->iommu);
Jiang Liu92d03cc2014-02-19 14:07:28 +08002194 }
2195
2196 free_devinfo_mem(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002197 spin_lock_irqsave(&device_domain_lock, flags);
2198 }
2199 spin_unlock_irqrestore(&device_domain_lock, flags);
2200}
2201
2202/*
2203 * find_domain
David Woodhouse1525a292014-03-06 16:19:30 +00002204 * Note: we use struct device->archdata.iommu stores the info
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002205 */
David Woodhouse1525a292014-03-06 16:19:30 +00002206static struct dmar_domain *find_domain(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002207{
2208 struct device_domain_info *info;
2209
2210 /* No lock here, assumes no domain exit in normal case */
David Woodhouse1525a292014-03-06 16:19:30 +00002211 info = dev->archdata.iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002212 if (info)
2213 return info->domain;
2214 return NULL;
2215}
2216
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002217static inline struct device_domain_info *
Jiang Liu745f2582014-02-19 14:07:26 +08002218dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
2219{
2220 struct device_domain_info *info;
2221
2222 list_for_each_entry(info, &device_domain_list, global)
David Woodhouse41e80dca2014-03-09 13:55:54 -07002223 if (info->iommu->segment == segment && info->bus == bus &&
Jiang Liu745f2582014-02-19 14:07:26 +08002224 info->devfn == devfn)
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002225 return info;
Jiang Liu745f2582014-02-19 14:07:26 +08002226
2227 return NULL;
2228}
2229
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002230static struct dmar_domain *dmar_insert_dev_info(struct intel_iommu *iommu,
David Woodhouse41e80dca2014-03-09 13:55:54 -07002231 int bus, int devfn,
David Woodhouseb718cd32014-03-09 13:11:33 -07002232 struct device *dev,
2233 struct dmar_domain *domain)
Jiang Liu745f2582014-02-19 14:07:26 +08002234{
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002235 struct dmar_domain *found = NULL;
Jiang Liu745f2582014-02-19 14:07:26 +08002236 struct device_domain_info *info;
2237 unsigned long flags;
2238
2239 info = alloc_devinfo_mem();
2240 if (!info)
David Woodhouseb718cd32014-03-09 13:11:33 -07002241 return NULL;
Jiang Liu745f2582014-02-19 14:07:26 +08002242
Jiang Liu745f2582014-02-19 14:07:26 +08002243 info->bus = bus;
2244 info->devfn = devfn;
2245 info->dev = dev;
2246 info->domain = domain;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002247 info->iommu = iommu;
Jiang Liu745f2582014-02-19 14:07:26 +08002248
2249 spin_lock_irqsave(&device_domain_lock, flags);
2250 if (dev)
David Woodhouse0bcb3e22014-03-06 17:12:03 +00002251 found = find_domain(dev);
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002252 else {
2253 struct device_domain_info *info2;
David Woodhouse41e80dca2014-03-09 13:55:54 -07002254 info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002255 if (info2)
2256 found = info2->domain;
2257 }
Jiang Liu745f2582014-02-19 14:07:26 +08002258 if (found) {
2259 spin_unlock_irqrestore(&device_domain_lock, flags);
2260 free_devinfo_mem(info);
David Woodhouseb718cd32014-03-09 13:11:33 -07002261 /* Caller must free the original domain */
2262 return found;
Jiang Liu745f2582014-02-19 14:07:26 +08002263 }
2264
David Woodhouseb718cd32014-03-09 13:11:33 -07002265 list_add(&info->link, &domain->devices);
2266 list_add(&info->global, &device_domain_list);
2267 if (dev)
2268 dev->archdata.iommu = info;
2269 spin_unlock_irqrestore(&device_domain_lock, flags);
2270
2271 return domain;
Jiang Liu745f2582014-02-19 14:07:26 +08002272}
2273
Alex Williamson579305f2014-07-03 09:51:43 -06002274static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
2275{
2276 *(u16 *)opaque = alias;
2277 return 0;
2278}
2279
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002280/* domain is initialized */
David Woodhouse146922e2014-03-09 15:44:17 -07002281static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002282{
Alex Williamson579305f2014-07-03 09:51:43 -06002283 struct dmar_domain *domain, *tmp;
2284 struct intel_iommu *iommu;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002285 struct device_domain_info *info;
Alex Williamson579305f2014-07-03 09:51:43 -06002286 u16 dma_alias;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002287 unsigned long flags;
Yijing Wangaa4d0662014-05-26 20:14:06 +08002288 u8 bus, devfn;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002289
David Woodhouse146922e2014-03-09 15:44:17 -07002290 domain = find_domain(dev);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002291 if (domain)
2292 return domain;
2293
David Woodhouse146922e2014-03-09 15:44:17 -07002294 iommu = device_to_iommu(dev, &bus, &devfn);
2295 if (!iommu)
Alex Williamson579305f2014-07-03 09:51:43 -06002296 return NULL;
2297
2298 if (dev_is_pci(dev)) {
2299 struct pci_dev *pdev = to_pci_dev(dev);
2300
2301 pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
2302
2303 spin_lock_irqsave(&device_domain_lock, flags);
2304 info = dmar_search_domain_by_dev_info(pci_domain_nr(pdev->bus),
2305 PCI_BUS_NUM(dma_alias),
2306 dma_alias & 0xff);
2307 if (info) {
2308 iommu = info->iommu;
2309 domain = info->domain;
2310 }
2311 spin_unlock_irqrestore(&device_domain_lock, flags);
2312
2313 /* DMA alias already has a domain, uses it */
2314 if (info)
2315 goto found_domain;
2316 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002317
David Woodhouse146922e2014-03-09 15:44:17 -07002318 /* Allocate and initialize new domain for the device */
Jiang Liuab8dfe22014-07-11 14:19:27 +08002319 domain = alloc_domain(0);
Jiang Liu745f2582014-02-19 14:07:26 +08002320 if (!domain)
Alex Williamson579305f2014-07-03 09:51:43 -06002321 return NULL;
Jiang Liu44bde612014-07-11 14:19:29 +08002322 domain->id = iommu_attach_domain(domain, iommu);
2323 if (domain->id < 0) {
Alex Williamson2fe9723d2011-03-04 14:52:30 -07002324 free_domain_mem(domain);
Alex Williamson579305f2014-07-03 09:51:43 -06002325 return NULL;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002326 }
Jiang Liufb170fb2014-07-11 14:19:28 +08002327 domain_attach_iommu(domain, iommu);
Alex Williamson579305f2014-07-03 09:51:43 -06002328 if (domain_init(domain, gaw)) {
2329 domain_exit(domain);
2330 return NULL;
2331 }
2332
2333 /* register PCI DMA alias device */
2334 if (dev_is_pci(dev)) {
2335 tmp = dmar_insert_dev_info(iommu, PCI_BUS_NUM(dma_alias),
2336 dma_alias & 0xff, NULL, domain);
2337
2338 if (!tmp || tmp != domain) {
2339 domain_exit(domain);
2340 domain = tmp;
2341 }
2342
David Woodhouseb718cd32014-03-09 13:11:33 -07002343 if (!domain)
Alex Williamson579305f2014-07-03 09:51:43 -06002344 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002345 }
2346
2347found_domain:
Alex Williamson579305f2014-07-03 09:51:43 -06002348 tmp = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
2349
2350 if (!tmp || tmp != domain) {
2351 domain_exit(domain);
2352 domain = tmp;
2353 }
David Woodhouseb718cd32014-03-09 13:11:33 -07002354
2355 return domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002356}
2357
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002358static int iommu_identity_mapping;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002359#define IDENTMAP_ALL 1
2360#define IDENTMAP_GFX 2
2361#define IDENTMAP_AZALIA 4
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002362
David Woodhouseb2132032009-06-26 18:50:28 +01002363static int iommu_domain_identity_map(struct dmar_domain *domain,
2364 unsigned long long start,
2365 unsigned long long end)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002366{
David Woodhousec5395d52009-06-28 16:35:56 +01002367 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2368 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002369
David Woodhousec5395d52009-06-28 16:35:56 +01002370 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2371 dma_to_mm_pfn(last_vpfn))) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002372 pr_err("Reserving iova failed\n");
David Woodhouseb2132032009-06-26 18:50:28 +01002373 return -ENOMEM;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002374 }
2375
David Woodhousec5395d52009-06-28 16:35:56 +01002376 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
2377 start, end, domain->id);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002378 /*
2379 * RMRR range might have overlap with physical memory range,
2380 * clear it first
2381 */
David Woodhousec5395d52009-06-28 16:35:56 +01002382 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002383
David Woodhousec5395d52009-06-28 16:35:56 +01002384 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2385 last_vpfn - first_vpfn + 1,
David Woodhouse61df7442009-06-28 11:55:58 +01002386 DMA_PTE_READ|DMA_PTE_WRITE);
David Woodhouseb2132032009-06-26 18:50:28 +01002387}
2388
David Woodhouse0b9d9752014-03-09 15:48:15 -07002389static int iommu_prepare_identity_map(struct device *dev,
David Woodhouseb2132032009-06-26 18:50:28 +01002390 unsigned long long start,
2391 unsigned long long end)
2392{
2393 struct dmar_domain *domain;
2394 int ret;
2395
David Woodhouse0b9d9752014-03-09 15:48:15 -07002396 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
David Woodhouseb2132032009-06-26 18:50:28 +01002397 if (!domain)
2398 return -ENOMEM;
2399
David Woodhouse19943b02009-08-04 16:19:20 +01002400 /* For _hardware_ passthrough, don't bother. But for software
2401 passthrough, we do it anyway -- it may indicate a memory
2402 range which is reserved in E820, so which didn't get set
2403 up to start with in si_domain */
2404 if (domain == si_domain && hw_pass_through) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002405 pr_warn("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2406 dev_name(dev), start, end);
David Woodhouse19943b02009-08-04 16:19:20 +01002407 return 0;
2408 }
2409
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002410 pr_info("Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2411 dev_name(dev), start, end);
2412
David Woodhouse5595b522009-12-02 09:21:55 +00002413 if (end < start) {
2414 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2415 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2416 dmi_get_system_info(DMI_BIOS_VENDOR),
2417 dmi_get_system_info(DMI_BIOS_VERSION),
2418 dmi_get_system_info(DMI_PRODUCT_VERSION));
2419 ret = -EIO;
2420 goto error;
2421 }
2422
David Woodhouse2ff729f2009-08-26 14:25:41 +01002423 if (end >> agaw_to_width(domain->agaw)) {
2424 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2425 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2426 agaw_to_width(domain->agaw),
2427 dmi_get_system_info(DMI_BIOS_VENDOR),
2428 dmi_get_system_info(DMI_BIOS_VERSION),
2429 dmi_get_system_info(DMI_PRODUCT_VERSION));
2430 ret = -EIO;
2431 goto error;
2432 }
David Woodhouse19943b02009-08-04 16:19:20 +01002433
David Woodhouseb2132032009-06-26 18:50:28 +01002434 ret = iommu_domain_identity_map(domain, start, end);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002435 if (ret)
2436 goto error;
2437
2438 /* context entry init */
David Woodhouse0b9d9752014-03-09 15:48:15 -07002439 ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
David Woodhouseb2132032009-06-26 18:50:28 +01002440 if (ret)
2441 goto error;
2442
2443 return 0;
2444
2445 error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002446 domain_exit(domain);
2447 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002448}
2449
2450static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
David Woodhouse0b9d9752014-03-09 15:48:15 -07002451 struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002452{
David Woodhouse0b9d9752014-03-09 15:48:15 -07002453 if (dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002454 return 0;
David Woodhouse0b9d9752014-03-09 15:48:15 -07002455 return iommu_prepare_identity_map(dev, rmrr->base_address,
2456 rmrr->end_address);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002457}
2458
Suresh Siddhad3f13812011-08-23 17:05:25 -07002459#ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002460static inline void iommu_prepare_isa(void)
2461{
2462 struct pci_dev *pdev;
2463 int ret;
2464
2465 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2466 if (!pdev)
2467 return;
2468
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002469 pr_info("Prepare 0-16MiB unity mapping for LPC\n");
David Woodhouse0b9d9752014-03-09 15:48:15 -07002470 ret = iommu_prepare_identity_map(&pdev->dev, 0, 16*1024*1024 - 1);
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002471
2472 if (ret)
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002473 pr_err("Failed to create 0-16MiB identity map - floppy might not work\n");
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002474
Yijing Wang9b27e822014-05-20 20:37:52 +08002475 pci_dev_put(pdev);
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002476}
2477#else
2478static inline void iommu_prepare_isa(void)
2479{
2480 return;
2481}
Suresh Siddhad3f13812011-08-23 17:05:25 -07002482#endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002483
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002484static int md_domain_init(struct dmar_domain *domain, int guest_width);
David Woodhousec7ab48d2009-06-26 19:10:36 +01002485
Matt Kraai071e1372009-08-23 22:30:22 -07002486static int __init si_domain_init(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002487{
2488 struct dmar_drhd_unit *drhd;
2489 struct intel_iommu *iommu;
David Woodhousec7ab48d2009-06-26 19:10:36 +01002490 int nid, ret = 0;
Jiang Liu44bde612014-07-11 14:19:29 +08002491 bool first = true;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002492
Jiang Liuab8dfe22014-07-11 14:19:27 +08002493 si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002494 if (!si_domain)
2495 return -EFAULT;
2496
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002497 for_each_active_iommu(iommu, drhd) {
2498 ret = iommu_attach_domain(si_domain, iommu);
Jiang Liufb170fb2014-07-11 14:19:28 +08002499 if (ret < 0) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002500 domain_exit(si_domain);
2501 return -EFAULT;
Jiang Liu44bde612014-07-11 14:19:29 +08002502 } else if (first) {
2503 si_domain->id = ret;
2504 first = false;
2505 } else if (si_domain->id != ret) {
2506 domain_exit(si_domain);
2507 return -EFAULT;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002508 }
Jiang Liufb170fb2014-07-11 14:19:28 +08002509 domain_attach_iommu(si_domain, iommu);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002510 }
2511
2512 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2513 domain_exit(si_domain);
2514 return -EFAULT;
2515 }
2516
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002517 pr_debug("Identity mapping domain is domain %d\n",
Jiang Liu9544c002014-01-06 14:18:13 +08002518 si_domain->id);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002519
David Woodhouse19943b02009-08-04 16:19:20 +01002520 if (hw)
2521 return 0;
2522
David Woodhousec7ab48d2009-06-26 19:10:36 +01002523 for_each_online_node(nid) {
Tejun Heod4bbf7e2011-11-28 09:46:22 -08002524 unsigned long start_pfn, end_pfn;
2525 int i;
2526
2527 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2528 ret = iommu_domain_identity_map(si_domain,
2529 PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2530 if (ret)
2531 return ret;
2532 }
David Woodhousec7ab48d2009-06-26 19:10:36 +01002533 }
2534
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002535 return 0;
2536}
2537
David Woodhouse9b226622014-03-09 14:03:28 -07002538static int identity_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002539{
2540 struct device_domain_info *info;
2541
2542 if (likely(!iommu_identity_mapping))
2543 return 0;
2544
David Woodhouse9b226622014-03-09 14:03:28 -07002545 info = dev->archdata.iommu;
Mike Traviscb452a42011-05-28 13:15:03 -05002546 if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2547 return (info->domain == si_domain);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002548
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002549 return 0;
2550}
2551
2552static int domain_add_dev_info(struct dmar_domain *domain,
David Woodhouse5913c9b2014-03-09 16:27:31 -07002553 struct device *dev, int translation)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002554{
David Woodhouse0ac72662014-03-09 13:19:22 -07002555 struct dmar_domain *ndomain;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002556 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07002557 u8 bus, devfn;
David Woodhouse5fe60f42009-08-09 10:53:41 +01002558 int ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002559
David Woodhouse5913c9b2014-03-09 16:27:31 -07002560 iommu = device_to_iommu(dev, &bus, &devfn);
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002561 if (!iommu)
2562 return -ENODEV;
2563
David Woodhouse5913c9b2014-03-09 16:27:31 -07002564 ndomain = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
David Woodhouse0ac72662014-03-09 13:19:22 -07002565 if (ndomain != domain)
2566 return -EBUSY;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002567
David Woodhouse5913c9b2014-03-09 16:27:31 -07002568 ret = domain_context_mapping(domain, dev, translation);
David Woodhousee2ad23d2012-05-25 17:42:54 +01002569 if (ret) {
David Woodhouse5913c9b2014-03-09 16:27:31 -07002570 domain_remove_one_dev_info(domain, dev);
David Woodhousee2ad23d2012-05-25 17:42:54 +01002571 return ret;
2572 }
2573
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002574 return 0;
2575}
2576
David Woodhouse0b9d9752014-03-09 15:48:15 -07002577static bool device_has_rmrr(struct device *dev)
Tom Mingarelliea2447f72012-11-20 19:43:17 +00002578{
2579 struct dmar_rmrr_unit *rmrr;
David Woodhouse832bd852014-03-07 15:08:36 +00002580 struct device *tmp;
Tom Mingarelliea2447f72012-11-20 19:43:17 +00002581 int i;
2582
Jiang Liu0e242612014-02-19 14:07:34 +08002583 rcu_read_lock();
Tom Mingarelliea2447f72012-11-20 19:43:17 +00002584 for_each_rmrr_units(rmrr) {
Jiang Liub683b232014-02-19 14:07:32 +08002585 /*
2586 * Return TRUE if this RMRR contains the device that
2587 * is passed in.
2588 */
2589 for_each_active_dev_scope(rmrr->devices,
2590 rmrr->devices_cnt, i, tmp)
David Woodhouse0b9d9752014-03-09 15:48:15 -07002591 if (tmp == dev) {
Jiang Liu0e242612014-02-19 14:07:34 +08002592 rcu_read_unlock();
Tom Mingarelliea2447f72012-11-20 19:43:17 +00002593 return true;
Jiang Liub683b232014-02-19 14:07:32 +08002594 }
Tom Mingarelliea2447f72012-11-20 19:43:17 +00002595 }
Jiang Liu0e242612014-02-19 14:07:34 +08002596 rcu_read_unlock();
Tom Mingarelliea2447f72012-11-20 19:43:17 +00002597 return false;
2598}
2599
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002600/*
2601 * There are a couple cases where we need to restrict the functionality of
2602 * devices associated with RMRRs. The first is when evaluating a device for
2603 * identity mapping because problems exist when devices are moved in and out
2604 * of domains and their respective RMRR information is lost. This means that
2605 * a device with associated RMRRs will never be in a "passthrough" domain.
2606 * The second is use of the device through the IOMMU API. This interface
2607 * expects to have full control of the IOVA space for the device. We cannot
2608 * satisfy both the requirement that RMRR access is maintained and have an
2609 * unencumbered IOVA space. We also have no ability to quiesce the device's
2610 * use of the RMRR space or even inform the IOMMU API user of the restriction.
2611 * We therefore prevent devices associated with an RMRR from participating in
2612 * the IOMMU API, which eliminates them from device assignment.
2613 *
2614 * In both cases we assume that PCI USB devices with RMRRs have them largely
2615 * for historical reasons and that the RMRR space is not actively used post
2616 * boot. This exclusion may change if vendors begin to abuse it.
David Woodhouse18436af2015-03-25 15:05:47 +00002617 *
2618 * The same exception is made for graphics devices, with the requirement that
2619 * any use of the RMRR regions will be torn down before assigning the device
2620 * to a guest.
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002621 */
2622static bool device_is_rmrr_locked(struct device *dev)
2623{
2624 if (!device_has_rmrr(dev))
2625 return false;
2626
2627 if (dev_is_pci(dev)) {
2628 struct pci_dev *pdev = to_pci_dev(dev);
2629
David Woodhouse18436af2015-03-25 15:05:47 +00002630 if (IS_USB_DEVICE(pdev) || IS_GFX_DEVICE(pdev))
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002631 return false;
2632 }
2633
2634 return true;
2635}
2636
David Woodhouse3bdb2592014-03-09 16:03:08 -07002637static int iommu_should_identity_map(struct device *dev, int startup)
David Woodhouse6941af22009-07-04 18:24:27 +01002638{
Tom Mingarelliea2447f72012-11-20 19:43:17 +00002639
David Woodhouse3bdb2592014-03-09 16:03:08 -07002640 if (dev_is_pci(dev)) {
2641 struct pci_dev *pdev = to_pci_dev(dev);
Tom Mingarelliea2447f72012-11-20 19:43:17 +00002642
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002643 if (device_is_rmrr_locked(dev))
David Woodhouse3bdb2592014-03-09 16:03:08 -07002644 return 0;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002645
David Woodhouse3bdb2592014-03-09 16:03:08 -07002646 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2647 return 1;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002648
David Woodhouse3bdb2592014-03-09 16:03:08 -07002649 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2650 return 1;
2651
2652 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2653 return 0;
2654
2655 /*
2656 * We want to start off with all devices in the 1:1 domain, and
2657 * take them out later if we find they can't access all of memory.
2658 *
2659 * However, we can't do this for PCI devices behind bridges,
2660 * because all PCI devices behind the same bridge will end up
2661 * with the same source-id on their transactions.
2662 *
2663 * Practically speaking, we can't change things around for these
2664 * devices at run-time, because we can't be sure there'll be no
2665 * DMA transactions in flight for any of their siblings.
2666 *
2667 * So PCI devices (unless they're on the root bus) as well as
2668 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2669 * the 1:1 domain, just in _case_ one of their siblings turns out
2670 * not to be able to map all of memory.
2671 */
2672 if (!pci_is_pcie(pdev)) {
2673 if (!pci_is_root_bus(pdev->bus))
2674 return 0;
2675 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2676 return 0;
2677 } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
2678 return 0;
2679 } else {
2680 if (device_has_rmrr(dev))
2681 return 0;
2682 }
David Woodhouse6941af22009-07-04 18:24:27 +01002683
David Woodhouse3dfc8132009-07-04 19:11:08 +01002684 /*
David Woodhouse3dfc8132009-07-04 19:11:08 +01002685 * At boot time, we don't yet know if devices will be 64-bit capable.
David Woodhouse3bdb2592014-03-09 16:03:08 -07002686 * Assume that they will — if they turn out not to be, then we can
David Woodhouse3dfc8132009-07-04 19:11:08 +01002687 * take them out of the 1:1 domain later.
2688 */
Chris Wright8fcc5372011-05-28 13:15:02 -05002689 if (!startup) {
2690 /*
2691 * If the device's dma_mask is less than the system's memory
2692 * size then this is not a candidate for identity mapping.
2693 */
David Woodhouse3bdb2592014-03-09 16:03:08 -07002694 u64 dma_mask = *dev->dma_mask;
Chris Wright8fcc5372011-05-28 13:15:02 -05002695
David Woodhouse3bdb2592014-03-09 16:03:08 -07002696 if (dev->coherent_dma_mask &&
2697 dev->coherent_dma_mask < dma_mask)
2698 dma_mask = dev->coherent_dma_mask;
Chris Wright8fcc5372011-05-28 13:15:02 -05002699
David Woodhouse3bdb2592014-03-09 16:03:08 -07002700 return dma_mask >= dma_get_required_mask(dev);
Chris Wright8fcc5372011-05-28 13:15:02 -05002701 }
David Woodhouse6941af22009-07-04 18:24:27 +01002702
2703 return 1;
2704}
2705
David Woodhousecf04eee2014-03-21 16:49:04 +00002706static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw)
2707{
2708 int ret;
2709
2710 if (!iommu_should_identity_map(dev, 1))
2711 return 0;
2712
2713 ret = domain_add_dev_info(si_domain, dev,
2714 hw ? CONTEXT_TT_PASS_THROUGH :
2715 CONTEXT_TT_MULTI_LEVEL);
2716 if (!ret)
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002717 pr_info("%s identity mapping for device %s\n",
2718 hw ? "Hardware" : "Software", dev_name(dev));
David Woodhousecf04eee2014-03-21 16:49:04 +00002719 else if (ret == -ENODEV)
2720 /* device not associated with an iommu */
2721 ret = 0;
2722
2723 return ret;
2724}
2725
2726
Matt Kraai071e1372009-08-23 22:30:22 -07002727static int __init iommu_prepare_static_identity_mapping(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002728{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002729 struct pci_dev *pdev = NULL;
David Woodhousecf04eee2014-03-21 16:49:04 +00002730 struct dmar_drhd_unit *drhd;
2731 struct intel_iommu *iommu;
2732 struct device *dev;
2733 int i;
2734 int ret = 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002735
David Woodhouse19943b02009-08-04 16:19:20 +01002736 ret = si_domain_init(hw);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002737 if (ret)
2738 return -EFAULT;
2739
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002740 for_each_pci_dev(pdev) {
David Woodhousecf04eee2014-03-21 16:49:04 +00002741 ret = dev_prepare_static_identity_mapping(&pdev->dev, hw);
2742 if (ret)
2743 return ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002744 }
2745
David Woodhousecf04eee2014-03-21 16:49:04 +00002746 for_each_active_iommu(iommu, drhd)
2747 for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) {
2748 struct acpi_device_physical_node *pn;
2749 struct acpi_device *adev;
2750
2751 if (dev->bus != &acpi_bus_type)
2752 continue;
2753
2754 adev= to_acpi_device(dev);
2755 mutex_lock(&adev->physical_node_lock);
2756 list_for_each_entry(pn, &adev->physical_node_list, node) {
2757 ret = dev_prepare_static_identity_mapping(pn->dev, hw);
2758 if (ret)
2759 break;
2760 }
2761 mutex_unlock(&adev->physical_node_lock);
2762 if (ret)
2763 return ret;
2764 }
2765
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002766 return 0;
2767}
2768
Jiang Liuffebeb42014-11-09 22:48:02 +08002769static void intel_iommu_init_qi(struct intel_iommu *iommu)
2770{
2771 /*
2772 * Start from the sane iommu hardware state.
2773 * If the queued invalidation is already initialized by us
2774 * (for example, while enabling interrupt-remapping) then
2775 * we got the things already rolling from a sane state.
2776 */
2777 if (!iommu->qi) {
2778 /*
2779 * Clear any previous faults.
2780 */
2781 dmar_fault(-1, iommu);
2782 /*
2783 * Disable queued invalidation if supported and already enabled
2784 * before OS handover.
2785 */
2786 dmar_disable_qi(iommu);
2787 }
2788
2789 if (dmar_enable_qi(iommu)) {
2790 /*
2791 * Queued Invalidate not enabled, use Register Based Invalidate
2792 */
2793 iommu->flush.flush_context = __iommu_flush_context;
2794 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002795 pr_info("%s: Using Register based invalidation\n",
Jiang Liuffebeb42014-11-09 22:48:02 +08002796 iommu->name);
2797 } else {
2798 iommu->flush.flush_context = qi_flush_context;
2799 iommu->flush.flush_iotlb = qi_flush_iotlb;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002800 pr_info("%s: Using Queued invalidation\n", iommu->name);
Jiang Liuffebeb42014-11-09 22:48:02 +08002801 }
2802}
2803
Joerg Roedel091d42e2015-06-12 11:56:10 +02002804static int copy_context_table(struct intel_iommu *iommu,
2805 struct root_entry *old_re,
2806 struct context_entry **tbl,
2807 int bus, bool ext)
2808{
2809 struct context_entry *old_ce = NULL, *new_ce = NULL, ce;
Joerg Roedeldbcd8612015-06-12 12:02:09 +02002810 int tbl_idx, pos = 0, idx, devfn, ret = 0, did;
Joerg Roedel091d42e2015-06-12 11:56:10 +02002811 phys_addr_t old_ce_phys;
2812
2813 tbl_idx = ext ? bus * 2 : bus;
2814
2815 for (devfn = 0; devfn < 256; devfn++) {
2816 /* First calculate the correct index */
2817 idx = (ext ? devfn * 2 : devfn) % 256;
2818
2819 if (idx == 0) {
2820 /* First save what we may have and clean up */
2821 if (new_ce) {
2822 tbl[tbl_idx] = new_ce;
2823 __iommu_flush_cache(iommu, new_ce,
2824 VTD_PAGE_SIZE);
2825 pos = 1;
2826 }
2827
2828 if (old_ce)
2829 iounmap(old_ce);
2830
2831 ret = 0;
2832 if (devfn < 0x80)
2833 old_ce_phys = root_entry_lctp(old_re);
2834 else
2835 old_ce_phys = root_entry_uctp(old_re);
2836
2837 if (!old_ce_phys) {
2838 if (ext && devfn == 0) {
2839 /* No LCTP, try UCTP */
2840 devfn = 0x7f;
2841 continue;
2842 } else {
2843 goto out;
2844 }
2845 }
2846
2847 ret = -ENOMEM;
2848 old_ce = ioremap_cache(old_ce_phys, PAGE_SIZE);
2849 if (!old_ce)
2850 goto out;
2851
2852 new_ce = alloc_pgtable_page(iommu->node);
2853 if (!new_ce)
2854 goto out_unmap;
2855
2856 ret = 0;
2857 }
2858
2859 /* Now copy the context entry */
2860 ce = old_ce[idx];
2861
2862 if (!context_present(&ce))
2863 continue;
2864
Joerg Roedeldbcd8612015-06-12 12:02:09 +02002865 did = context_domain_id(&ce);
2866 if (did >= 0 && did < cap_ndoms(iommu->cap))
2867 set_bit(did, iommu->domain_ids);
2868
Joerg Roedel091d42e2015-06-12 11:56:10 +02002869 new_ce[idx] = ce;
2870 }
2871
2872 tbl[tbl_idx + pos] = new_ce;
2873
2874 __iommu_flush_cache(iommu, new_ce, VTD_PAGE_SIZE);
2875
2876out_unmap:
2877 iounmap(old_ce);
2878
2879out:
2880 return ret;
2881}
2882
2883static int copy_translation_tables(struct intel_iommu *iommu)
2884{
2885 struct context_entry **ctxt_tbls;
2886 struct root_entry *old_rt;
2887 phys_addr_t old_rt_phys;
2888 int ctxt_table_entries;
2889 unsigned long flags;
2890 u64 rtaddr_reg;
2891 int bus, ret;
2892 bool ext;
2893
2894 rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG);
2895 ext = !!(rtaddr_reg & DMA_RTADDR_RTT);
2896
2897 old_rt_phys = rtaddr_reg & VTD_PAGE_MASK;
2898 if (!old_rt_phys)
2899 return -EINVAL;
2900
2901 old_rt = ioremap_cache(old_rt_phys, PAGE_SIZE);
2902 if (!old_rt)
2903 return -ENOMEM;
2904
2905 /* This is too big for the stack - allocate it from slab */
2906 ctxt_table_entries = ext ? 512 : 256;
2907 ret = -ENOMEM;
2908 ctxt_tbls = kzalloc(ctxt_table_entries * sizeof(void *), GFP_KERNEL);
2909 if (!ctxt_tbls)
2910 goto out_unmap;
2911
2912 for (bus = 0; bus < 256; bus++) {
2913 ret = copy_context_table(iommu, &old_rt[bus],
2914 ctxt_tbls, bus, ext);
2915 if (ret) {
2916 pr_err("%s: Failed to copy context table for bus %d\n",
2917 iommu->name, bus);
2918 continue;
2919 }
2920 }
2921
2922 spin_lock_irqsave(&iommu->lock, flags);
2923
2924 /* Context tables are copied, now write them to the root_entry table */
2925 for (bus = 0; bus < 256; bus++) {
2926 int idx = ext ? bus * 2 : bus;
2927 u64 val;
2928
2929 if (ctxt_tbls[idx]) {
2930 val = virt_to_phys(ctxt_tbls[idx]) | 1;
2931 iommu->root_entry[bus].lo = val;
2932 }
2933
2934 if (!ext || !ctxt_tbls[idx + 1])
2935 continue;
2936
2937 val = virt_to_phys(ctxt_tbls[idx + 1]) | 1;
2938 iommu->root_entry[bus].hi = val;
2939 }
2940
2941 spin_unlock_irqrestore(&iommu->lock, flags);
2942
2943 kfree(ctxt_tbls);
2944
2945 __iommu_flush_cache(iommu, iommu->root_entry, PAGE_SIZE);
2946
2947 ret = 0;
2948
2949out_unmap:
2950 iounmap(old_rt);
2951
2952 return ret;
2953}
2954
Joseph Cihulab7792602011-05-03 00:08:37 -07002955static int __init init_dmars(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002956{
2957 struct dmar_drhd_unit *drhd;
2958 struct dmar_rmrr_unit *rmrr;
David Woodhouse832bd852014-03-07 15:08:36 +00002959 struct device *dev;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002960 struct intel_iommu *iommu;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07002961 int i, ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002962
2963 /*
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002964 * for each drhd
2965 * allocate root
2966 * initialize and program root entry to not present
2967 * endfor
2968 */
2969 for_each_drhd_unit(drhd) {
mark gross5e0d2a62008-03-04 15:22:08 -08002970 /*
2971 * lock not needed as this is only incremented in the single
2972 * threaded kernel __init code path all other access are read
2973 * only
2974 */
Jiang Liu78d8e702014-11-09 22:47:57 +08002975 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) {
Mike Travis1b198bb2012-03-05 15:05:16 -08002976 g_num_of_iommus++;
2977 continue;
2978 }
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002979 pr_err_once("Exceeded %d IOMMUs\n", DMAR_UNITS_SUPPORTED);
mark gross5e0d2a62008-03-04 15:22:08 -08002980 }
2981
Jiang Liuffebeb42014-11-09 22:48:02 +08002982 /* Preallocate enough resources for IOMMU hot-addition */
2983 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED)
2984 g_num_of_iommus = DMAR_UNITS_SUPPORTED;
2985
Weidong Hand9630fe2008-12-08 11:06:32 +08002986 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2987 GFP_KERNEL);
2988 if (!g_iommus) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002989 pr_err("Allocating global iommu array failed\n");
Weidong Hand9630fe2008-12-08 11:06:32 +08002990 ret = -ENOMEM;
2991 goto error;
2992 }
2993
mark gross80b20dd2008-04-18 13:53:58 -07002994 deferred_flush = kzalloc(g_num_of_iommus *
2995 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2996 if (!deferred_flush) {
mark gross5e0d2a62008-03-04 15:22:08 -08002997 ret = -ENOMEM;
Jiang Liu989d51f2014-02-19 14:07:21 +08002998 goto free_g_iommus;
mark gross5e0d2a62008-03-04 15:22:08 -08002999 }
3000
Jiang Liu7c919772014-01-06 14:18:18 +08003001 for_each_active_iommu(iommu, drhd) {
Weidong Hand9630fe2008-12-08 11:06:32 +08003002 g_iommus[iommu->seq_id] = iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003003
Joerg Roedelb63d80d2015-06-12 09:14:34 +02003004 intel_iommu_init_qi(iommu);
3005
Suresh Siddhae61d98d2008-07-10 11:16:35 -07003006 ret = iommu_init_domains(iommu);
3007 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08003008 goto free_iommu;
Suresh Siddhae61d98d2008-07-10 11:16:35 -07003009
Joerg Roedel4158c2e2015-06-12 10:14:02 +02003010 init_translation_status(iommu);
3011
Joerg Roedel091d42e2015-06-12 11:56:10 +02003012 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
3013 iommu_disable_translation(iommu);
3014 clear_translation_pre_enabled(iommu);
3015 pr_warn("Translation was enabled for %s but we are not in kdump mode\n",
3016 iommu->name);
3017 }
Joerg Roedel4158c2e2015-06-12 10:14:02 +02003018
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003019 /*
3020 * TBD:
3021 * we could share the same root & context tables
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003022 * among all IOMMU's. Need to Split it later.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003023 */
3024 ret = iommu_alloc_root_entry(iommu);
Jiang Liuffebeb42014-11-09 22:48:02 +08003025 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08003026 goto free_iommu;
Joerg Roedel5f0a7f72015-06-12 09:18:53 +02003027
Joerg Roedel091d42e2015-06-12 11:56:10 +02003028 if (translation_pre_enabled(iommu)) {
3029 pr_info("Translation already enabled - trying to copy translation structures\n");
3030
3031 ret = copy_translation_tables(iommu);
3032 if (ret) {
3033 /*
3034 * We found the IOMMU with translation
3035 * enabled - but failed to copy over the
3036 * old root-entry table. Try to proceed
3037 * by disabling translation now and
3038 * allocating a clean root-entry table.
3039 * This might cause DMAR faults, but
3040 * probably the dump will still succeed.
3041 */
3042 pr_err("Failed to copy translation tables from previous kernel for %s\n",
3043 iommu->name);
3044 iommu_disable_translation(iommu);
3045 clear_translation_pre_enabled(iommu);
3046 } else {
3047 pr_info("Copied translation tables from previous kernel for %s\n",
3048 iommu->name);
3049 }
3050 }
3051
Joerg Roedel5f0a7f72015-06-12 09:18:53 +02003052 iommu_flush_write_buffer(iommu);
3053 iommu_set_root_entry(iommu);
3054 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
3055 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3056
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003057 if (!ecap_pass_through(iommu->ecap))
David Woodhouse19943b02009-08-04 16:19:20 +01003058 hw_pass_through = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003059 }
3060
David Woodhouse19943b02009-08-04 16:19:20 +01003061 if (iommu_pass_through)
David Woodhousee0fc7e02009-09-30 09:12:17 -07003062 iommu_identity_mapping |= IDENTMAP_ALL;
3063
Suresh Siddhad3f13812011-08-23 17:05:25 -07003064#ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
David Woodhousee0fc7e02009-09-30 09:12:17 -07003065 iommu_identity_mapping |= IDENTMAP_GFX;
David Woodhouse19943b02009-08-04 16:19:20 +01003066#endif
David Woodhousee0fc7e02009-09-30 09:12:17 -07003067
3068 check_tylersburg_isoch();
3069
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003070 /*
3071 * If pass through is not set or not enabled, setup context entries for
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003072 * identity mappings for rmrr, gfx, and isa and may fall back to static
3073 * identity mapping if iommu_identity_mapping is set.
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003074 */
David Woodhouse19943b02009-08-04 16:19:20 +01003075 if (iommu_identity_mapping) {
3076 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
3077 if (ret) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003078 pr_crit("Failed to setup IOMMU pass-through\n");
Jiang Liu989d51f2014-02-19 14:07:21 +08003079 goto free_iommu;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003080 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003081 }
David Woodhouse19943b02009-08-04 16:19:20 +01003082 /*
3083 * For each rmrr
3084 * for each dev attached to rmrr
3085 * do
3086 * locate drhd for dev, alloc domain for dev
3087 * allocate free domain
3088 * allocate page table entries for rmrr
3089 * if context not allocated for bus
3090 * allocate and init context
3091 * set present in root table for this bus
3092 * init context with domain, translation etc
3093 * endfor
3094 * endfor
3095 */
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003096 pr_info("Setting RMRR:\n");
David Woodhouse19943b02009-08-04 16:19:20 +01003097 for_each_rmrr_units(rmrr) {
Jiang Liub683b232014-02-19 14:07:32 +08003098 /* some BIOS lists non-exist devices in DMAR table. */
3099 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
David Woodhouse832bd852014-03-07 15:08:36 +00003100 i, dev) {
David Woodhouse0b9d9752014-03-09 15:48:15 -07003101 ret = iommu_prepare_rmrr_dev(rmrr, dev);
David Woodhouse19943b02009-08-04 16:19:20 +01003102 if (ret)
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003103 pr_err("Mapping reserved region failed\n");
David Woodhouse19943b02009-08-04 16:19:20 +01003104 }
3105 }
3106
3107 iommu_prepare_isa();
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07003108
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003109 /*
3110 * for each drhd
3111 * enable fault log
3112 * global invalidate context cache
3113 * global invalidate iotlb
3114 * enable translation
3115 */
Jiang Liu7c919772014-01-06 14:18:18 +08003116 for_each_iommu(iommu, drhd) {
Joseph Cihula51a63e62011-03-21 11:04:24 -07003117 if (drhd->ignored) {
3118 /*
3119 * we always have to disable PMRs or DMA may fail on
3120 * this device
3121 */
3122 if (force_on)
Jiang Liu7c919772014-01-06 14:18:18 +08003123 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003124 continue;
Joseph Cihula51a63e62011-03-21 11:04:24 -07003125 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003126
3127 iommu_flush_write_buffer(iommu);
3128
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07003129 ret = dmar_set_interrupt(iommu);
3130 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08003131 goto free_iommu;
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07003132
Jiang Liu2a41cce2014-07-11 14:19:33 +08003133 iommu_enable_translation(iommu);
David Woodhouseb94996c2009-09-19 15:28:12 -07003134 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003135 }
3136
3137 return 0;
Jiang Liu989d51f2014-02-19 14:07:21 +08003138
3139free_iommu:
Jiang Liuffebeb42014-11-09 22:48:02 +08003140 for_each_active_iommu(iommu, drhd) {
3141 disable_dmar_iommu(iommu);
Jiang Liua868e6b2014-01-06 14:18:20 +08003142 free_dmar_iommu(iommu);
Jiang Liuffebeb42014-11-09 22:48:02 +08003143 }
Jiang Liu9bdc5312014-01-06 14:18:27 +08003144 kfree(deferred_flush);
Jiang Liu989d51f2014-02-19 14:07:21 +08003145free_g_iommus:
Weidong Hand9630fe2008-12-08 11:06:32 +08003146 kfree(g_iommus);
Jiang Liu989d51f2014-02-19 14:07:21 +08003147error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003148 return ret;
3149}
3150
David Woodhouse5a5e02a2009-07-04 09:35:44 +01003151/* This takes a number of _MM_ pages, not VTD pages */
David Woodhouse875764d2009-06-28 21:20:51 +01003152static struct iova *intel_alloc_iova(struct device *dev,
3153 struct dmar_domain *domain,
3154 unsigned long nrpages, uint64_t dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003155{
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003156 struct iova *iova = NULL;
3157
David Woodhouse875764d2009-06-28 21:20:51 +01003158 /* Restrict dma_mask to the width that the iommu can handle */
3159 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
3160
3161 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003162 /*
3163 * First try to allocate an io virtual address in
Yang Hongyang284901a2009-04-06 19:01:15 -07003164 * DMA_BIT_MASK(32) and if that fails then try allocating
Joe Perches36098012007-12-17 11:40:11 -08003165 * from higher range
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003166 */
David Woodhouse875764d2009-06-28 21:20:51 +01003167 iova = alloc_iova(&domain->iovad, nrpages,
3168 IOVA_PFN(DMA_BIT_MASK(32)), 1);
3169 if (iova)
3170 return iova;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003171 }
David Woodhouse875764d2009-06-28 21:20:51 +01003172 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
3173 if (unlikely(!iova)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003174 pr_err("Allocating %ld-page iova for %s failed",
David Woodhouse207e3592014-03-09 16:12:32 -07003175 nrpages, dev_name(dev));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003176 return NULL;
3177 }
3178
3179 return iova;
3180}
3181
David Woodhoused4b709f2014-03-09 16:07:40 -07003182static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003183{
3184 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003185 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003186
David Woodhoused4b709f2014-03-09 16:07:40 -07003187 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003188 if (!domain) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003189 pr_err("Allocating domain for %s failed\n",
David Woodhoused4b709f2014-03-09 16:07:40 -07003190 dev_name(dev));
Al Viro4fe05bb2007-10-29 04:51:16 +00003191 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003192 }
3193
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003194 /* make sure context mapping is ok */
David Woodhoused4b709f2014-03-09 16:07:40 -07003195 if (unlikely(!domain_context_mapped(dev))) {
3196 ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003197 if (ret) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003198 pr_err("Domain context map for %s failed\n",
David Woodhoused4b709f2014-03-09 16:07:40 -07003199 dev_name(dev));
Al Viro4fe05bb2007-10-29 04:51:16 +00003200 return NULL;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003201 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003202 }
3203
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003204 return domain;
3205}
3206
David Woodhoused4b709f2014-03-09 16:07:40 -07003207static inline struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
David Woodhouse147202a2009-07-07 19:43:20 +01003208{
3209 struct device_domain_info *info;
3210
3211 /* No lock here, assumes no domain exit in normal case */
David Woodhoused4b709f2014-03-09 16:07:40 -07003212 info = dev->archdata.iommu;
David Woodhouse147202a2009-07-07 19:43:20 +01003213 if (likely(info))
3214 return info->domain;
3215
3216 return __get_valid_domain_for_dev(dev);
3217}
3218
David Woodhouseecb509e2014-03-09 16:29:55 -07003219/* Check if the dev needs to go through non-identity map and unmap process.*/
David Woodhouse73676832009-07-04 14:08:36 +01003220static int iommu_no_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003221{
3222 int found;
3223
David Woodhouse3d891942014-03-06 15:59:26 +00003224 if (iommu_dummy(dev))
David Woodhouse1e4c64c2009-07-04 10:40:38 +01003225 return 1;
3226
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003227 if (!iommu_identity_mapping)
David Woodhouse1e4c64c2009-07-04 10:40:38 +01003228 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003229
David Woodhouse9b226622014-03-09 14:03:28 -07003230 found = identity_mapping(dev);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003231 if (found) {
David Woodhouseecb509e2014-03-09 16:29:55 -07003232 if (iommu_should_identity_map(dev, 0))
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003233 return 1;
3234 else {
3235 /*
3236 * 32 bit DMA is removed from si_domain and fall back
3237 * to non-identity mapping.
3238 */
David Woodhousebf9c9ed2014-03-09 16:19:13 -07003239 domain_remove_one_dev_info(si_domain, dev);
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003240 pr_info("32bit %s uses non-identity mapping\n",
3241 dev_name(dev));
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003242 return 0;
3243 }
3244 } else {
3245 /*
3246 * In case of a detached 64 bit DMA device from vm, the device
3247 * is put into si_domain for identity mapping.
3248 */
David Woodhouseecb509e2014-03-09 16:29:55 -07003249 if (iommu_should_identity_map(dev, 0)) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003250 int ret;
David Woodhouse5913c9b2014-03-09 16:27:31 -07003251 ret = domain_add_dev_info(si_domain, dev,
David Woodhouse5fe60f42009-08-09 10:53:41 +01003252 hw_pass_through ?
3253 CONTEXT_TT_PASS_THROUGH :
3254 CONTEXT_TT_MULTI_LEVEL);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003255 if (!ret) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003256 pr_info("64bit %s uses identity mapping\n",
3257 dev_name(dev));
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003258 return 1;
3259 }
3260 }
3261 }
3262
David Woodhouse1e4c64c2009-07-04 10:40:38 +01003263 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003264}
3265
David Woodhouse5040a912014-03-09 16:14:00 -07003266static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003267 size_t size, int dir, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003268{
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003269 struct dmar_domain *domain;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003270 phys_addr_t start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003271 struct iova *iova;
3272 int prot = 0;
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003273 int ret;
Weidong Han8c11e792008-12-08 15:29:22 +08003274 struct intel_iommu *iommu;
Fenghua Yu33041ec2009-08-04 15:10:59 -07003275 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003276
3277 BUG_ON(dir == DMA_NONE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003278
David Woodhouse5040a912014-03-09 16:14:00 -07003279 if (iommu_no_mapping(dev))
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003280 return paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003281
David Woodhouse5040a912014-03-09 16:14:00 -07003282 domain = get_valid_domain_for_dev(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003283 if (!domain)
3284 return 0;
3285
Weidong Han8c11e792008-12-08 15:29:22 +08003286 iommu = domain_get_iommu(domain);
David Woodhouse88cb6a72009-06-28 15:03:06 +01003287 size = aligned_nrpages(paddr, size);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003288
David Woodhouse5040a912014-03-09 16:14:00 -07003289 iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003290 if (!iova)
3291 goto error;
3292
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003293 /*
3294 * Check if DMAR supports zero-length reads on write only
3295 * mappings..
3296 */
3297 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003298 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003299 prot |= DMA_PTE_READ;
3300 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3301 prot |= DMA_PTE_WRITE;
3302 /*
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003303 * paddr - (paddr + size) might be partial page, we should map the whole
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003304 * page. Note: if two part of one page are separately mapped, we
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003305 * might have two guest_addr mapping to the same host paddr, but this
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003306 * is not a big problem
3307 */
David Woodhouse0ab36de2009-06-28 14:01:43 +01003308 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
Fenghua Yu33041ec2009-08-04 15:10:59 -07003309 mm_to_dma_pfn(paddr_pfn), size, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003310 if (ret)
3311 goto error;
3312
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003313 /* it's a non-present to present mapping. Only flush if caching mode */
3314 if (cap_caching_mode(iommu->cap))
David Woodhouseea8ea462014-03-05 17:09:32 +00003315 iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 0, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003316 else
Weidong Han8c11e792008-12-08 15:29:22 +08003317 iommu_flush_write_buffer(iommu);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003318
David Woodhouse03d6a242009-06-28 15:33:46 +01003319 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
3320 start_paddr += paddr & ~PAGE_MASK;
3321 return start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003322
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003323error:
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003324 if (iova)
3325 __free_iova(&domain->iovad, iova);
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003326 pr_err("Device %s request: %zx@%llx dir %d --- failed\n",
David Woodhouse5040a912014-03-09 16:14:00 -07003327 dev_name(dev), size, (unsigned long long)paddr, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003328 return 0;
3329}
3330
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003331static dma_addr_t intel_map_page(struct device *dev, struct page *page,
3332 unsigned long offset, size_t size,
3333 enum dma_data_direction dir,
3334 struct dma_attrs *attrs)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003335{
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003336 return __intel_map_single(dev, page_to_phys(page) + offset, size,
David Woodhouse46333e32014-03-10 20:01:21 -07003337 dir, *dev->dma_mask);
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003338}
3339
mark gross5e0d2a62008-03-04 15:22:08 -08003340static void flush_unmaps(void)
3341{
mark gross80b20dd2008-04-18 13:53:58 -07003342 int i, j;
mark gross5e0d2a62008-03-04 15:22:08 -08003343
mark gross5e0d2a62008-03-04 15:22:08 -08003344 timer_on = 0;
3345
3346 /* just flush them all */
3347 for (i = 0; i < g_num_of_iommus; i++) {
Weidong Hana2bb8452008-12-08 11:24:12 +08003348 struct intel_iommu *iommu = g_iommus[i];
3349 if (!iommu)
3350 continue;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07003351
Yu Zhao9dd2fe82009-05-18 13:51:36 +08003352 if (!deferred_flush[i].next)
3353 continue;
3354
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003355 /* In caching mode, global flushes turn emulation expensive */
3356 if (!cap_caching_mode(iommu->cap))
3357 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
Yu Zhao93a23a72009-05-18 13:51:37 +08003358 DMA_TLB_GLOBAL_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08003359 for (j = 0; j < deferred_flush[i].next; j++) {
Yu Zhao93a23a72009-05-18 13:51:37 +08003360 unsigned long mask;
3361 struct iova *iova = deferred_flush[i].iova[j];
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003362 struct dmar_domain *domain = deferred_flush[i].domain[j];
Yu Zhao93a23a72009-05-18 13:51:37 +08003363
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003364 /* On real hardware multiple invalidations are expensive */
3365 if (cap_caching_mode(iommu->cap))
3366 iommu_flush_iotlb_psi(iommu, domain->id,
Jiang Liua156ef92014-07-11 14:19:36 +08003367 iova->pfn_lo, iova_size(iova),
David Woodhouseea8ea462014-03-05 17:09:32 +00003368 !deferred_flush[i].freelist[j], 0);
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003369 else {
Jiang Liua156ef92014-07-11 14:19:36 +08003370 mask = ilog2(mm_to_dma_pfn(iova_size(iova)));
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003371 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
3372 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
3373 }
Yu Zhao93a23a72009-05-18 13:51:37 +08003374 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
David Woodhouseea8ea462014-03-05 17:09:32 +00003375 if (deferred_flush[i].freelist[j])
3376 dma_free_pagelist(deferred_flush[i].freelist[j]);
mark gross80b20dd2008-04-18 13:53:58 -07003377 }
Yu Zhao9dd2fe82009-05-18 13:51:36 +08003378 deferred_flush[i].next = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08003379 }
3380
mark gross5e0d2a62008-03-04 15:22:08 -08003381 list_size = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08003382}
3383
3384static void flush_unmaps_timeout(unsigned long data)
3385{
mark gross80b20dd2008-04-18 13:53:58 -07003386 unsigned long flags;
3387
3388 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08003389 flush_unmaps();
mark gross80b20dd2008-04-18 13:53:58 -07003390 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08003391}
3392
David Woodhouseea8ea462014-03-05 17:09:32 +00003393static void add_unmap(struct dmar_domain *dom, struct iova *iova, struct page *freelist)
mark gross5e0d2a62008-03-04 15:22:08 -08003394{
3395 unsigned long flags;
mark gross80b20dd2008-04-18 13:53:58 -07003396 int next, iommu_id;
Weidong Han8c11e792008-12-08 15:29:22 +08003397 struct intel_iommu *iommu;
mark gross5e0d2a62008-03-04 15:22:08 -08003398
3399 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross80b20dd2008-04-18 13:53:58 -07003400 if (list_size == HIGH_WATER_MARK)
3401 flush_unmaps();
3402
Weidong Han8c11e792008-12-08 15:29:22 +08003403 iommu = domain_get_iommu(dom);
3404 iommu_id = iommu->seq_id;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07003405
mark gross80b20dd2008-04-18 13:53:58 -07003406 next = deferred_flush[iommu_id].next;
3407 deferred_flush[iommu_id].domain[next] = dom;
3408 deferred_flush[iommu_id].iova[next] = iova;
David Woodhouseea8ea462014-03-05 17:09:32 +00003409 deferred_flush[iommu_id].freelist[next] = freelist;
mark gross80b20dd2008-04-18 13:53:58 -07003410 deferred_flush[iommu_id].next++;
mark gross5e0d2a62008-03-04 15:22:08 -08003411
3412 if (!timer_on) {
3413 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
3414 timer_on = 1;
3415 }
3416 list_size++;
3417 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
3418}
3419
Jiang Liud41a4ad2014-07-11 14:19:34 +08003420static void intel_unmap(struct device *dev, dma_addr_t dev_addr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003421{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003422 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01003423 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003424 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08003425 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00003426 struct page *freelist;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003427
David Woodhouse73676832009-07-04 14:08:36 +01003428 if (iommu_no_mapping(dev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003429 return;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003430
David Woodhouse1525a292014-03-06 16:19:30 +00003431 domain = find_domain(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003432 BUG_ON(!domain);
3433
Weidong Han8c11e792008-12-08 15:29:22 +08003434 iommu = domain_get_iommu(domain);
3435
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003436 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
David Woodhouse85b98272009-07-01 19:27:53 +01003437 if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
3438 (unsigned long long)dev_addr))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003439 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003440
David Woodhoused794dc92009-06-28 00:27:49 +01003441 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
3442 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003443
David Woodhoused794dc92009-06-28 00:27:49 +01003444 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
David Woodhouse207e3592014-03-09 16:12:32 -07003445 dev_name(dev), start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003446
David Woodhouseea8ea462014-03-05 17:09:32 +00003447 freelist = domain_unmap(domain, start_pfn, last_pfn);
David Woodhoused794dc92009-06-28 00:27:49 +01003448
mark gross5e0d2a62008-03-04 15:22:08 -08003449 if (intel_iommu_strict) {
David Woodhouse03d6a242009-06-28 15:33:46 +01003450 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
David Woodhouseea8ea462014-03-05 17:09:32 +00003451 last_pfn - start_pfn + 1, !freelist, 0);
mark gross5e0d2a62008-03-04 15:22:08 -08003452 /* free iova */
3453 __free_iova(&domain->iovad, iova);
David Woodhouseea8ea462014-03-05 17:09:32 +00003454 dma_free_pagelist(freelist);
mark gross5e0d2a62008-03-04 15:22:08 -08003455 } else {
David Woodhouseea8ea462014-03-05 17:09:32 +00003456 add_unmap(domain, iova, freelist);
mark gross5e0d2a62008-03-04 15:22:08 -08003457 /*
3458 * queue up the release of the unmap to save the 1/6th of the
3459 * cpu used up by the iotlb flush operation...
3460 */
mark gross5e0d2a62008-03-04 15:22:08 -08003461 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003462}
3463
Jiang Liud41a4ad2014-07-11 14:19:34 +08003464static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
3465 size_t size, enum dma_data_direction dir,
3466 struct dma_attrs *attrs)
3467{
3468 intel_unmap(dev, dev_addr);
3469}
3470
David Woodhouse5040a912014-03-09 16:14:00 -07003471static void *intel_alloc_coherent(struct device *dev, size_t size,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003472 dma_addr_t *dma_handle, gfp_t flags,
3473 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003474{
Akinobu Mita36746432014-06-04 16:06:51 -07003475 struct page *page = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003476 int order;
3477
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003478 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003479 order = get_order(size);
Alex Williamsone8bb9102009-11-04 15:59:34 -07003480
David Woodhouse5040a912014-03-09 16:14:00 -07003481 if (!iommu_no_mapping(dev))
Alex Williamsone8bb9102009-11-04 15:59:34 -07003482 flags &= ~(GFP_DMA | GFP_DMA32);
David Woodhouse5040a912014-03-09 16:14:00 -07003483 else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
3484 if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
Alex Williamsone8bb9102009-11-04 15:59:34 -07003485 flags |= GFP_DMA;
3486 else
3487 flags |= GFP_DMA32;
3488 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003489
Akinobu Mita36746432014-06-04 16:06:51 -07003490 if (flags & __GFP_WAIT) {
3491 unsigned int count = size >> PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003492
Akinobu Mita36746432014-06-04 16:06:51 -07003493 page = dma_alloc_from_contiguous(dev, count, order);
3494 if (page && iommu_no_mapping(dev) &&
3495 page_to_phys(page) + size > dev->coherent_dma_mask) {
3496 dma_release_from_contiguous(dev, page, count);
3497 page = NULL;
3498 }
3499 }
3500
3501 if (!page)
3502 page = alloc_pages(flags, order);
3503 if (!page)
3504 return NULL;
3505 memset(page_address(page), 0, size);
3506
3507 *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003508 DMA_BIDIRECTIONAL,
David Woodhouse5040a912014-03-09 16:14:00 -07003509 dev->coherent_dma_mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003510 if (*dma_handle)
Akinobu Mita36746432014-06-04 16:06:51 -07003511 return page_address(page);
3512 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3513 __free_pages(page, order);
3514
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003515 return NULL;
3516}
3517
David Woodhouse5040a912014-03-09 16:14:00 -07003518static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003519 dma_addr_t dma_handle, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003520{
3521 int order;
Akinobu Mita36746432014-06-04 16:06:51 -07003522 struct page *page = virt_to_page(vaddr);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003523
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003524 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003525 order = get_order(size);
3526
Jiang Liud41a4ad2014-07-11 14:19:34 +08003527 intel_unmap(dev, dma_handle);
Akinobu Mita36746432014-06-04 16:06:51 -07003528 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3529 __free_pages(page, order);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003530}
3531
David Woodhouse5040a912014-03-09 16:14:00 -07003532static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003533 int nelems, enum dma_data_direction dir,
3534 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003535{
Jiang Liud41a4ad2014-07-11 14:19:34 +08003536 intel_unmap(dev, sglist[0].dma_address);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003537}
3538
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003539static int intel_nontranslate_map_sg(struct device *hddev,
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003540 struct scatterlist *sglist, int nelems, int dir)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003541{
3542 int i;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003543 struct scatterlist *sg;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003544
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003545 for_each_sg(sglist, sg, nelems, i) {
FUJITA Tomonori12d4d402007-10-23 09:32:25 +02003546 BUG_ON(!sg_page(sg));
David Woodhouse4cf2e752009-02-11 17:23:43 +00003547 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003548 sg->dma_length = sg->length;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003549 }
3550 return nelems;
3551}
3552
David Woodhouse5040a912014-03-09 16:14:00 -07003553static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003554 enum dma_data_direction dir, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003555{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003556 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003557 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003558 size_t size = 0;
3559 int prot = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003560 struct iova *iova = NULL;
3561 int ret;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003562 struct scatterlist *sg;
David Woodhouseb536d242009-06-28 14:49:31 +01003563 unsigned long start_vpfn;
Weidong Han8c11e792008-12-08 15:29:22 +08003564 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003565
3566 BUG_ON(dir == DMA_NONE);
David Woodhouse5040a912014-03-09 16:14:00 -07003567 if (iommu_no_mapping(dev))
3568 return intel_nontranslate_map_sg(dev, sglist, nelems, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003569
David Woodhouse5040a912014-03-09 16:14:00 -07003570 domain = get_valid_domain_for_dev(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003571 if (!domain)
3572 return 0;
3573
Weidong Han8c11e792008-12-08 15:29:22 +08003574 iommu = domain_get_iommu(domain);
3575
David Woodhouseb536d242009-06-28 14:49:31 +01003576 for_each_sg(sglist, sg, nelems, i)
David Woodhouse88cb6a72009-06-28 15:03:06 +01003577 size += aligned_nrpages(sg->offset, sg->length);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003578
David Woodhouse5040a912014-03-09 16:14:00 -07003579 iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
3580 *dev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003581 if (!iova) {
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003582 sglist->dma_length = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003583 return 0;
3584 }
3585
3586 /*
3587 * Check if DMAR supports zero-length reads on write only
3588 * mappings..
3589 */
3590 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003591 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003592 prot |= DMA_PTE_READ;
3593 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3594 prot |= DMA_PTE_WRITE;
3595
David Woodhouseb536d242009-06-28 14:49:31 +01003596 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
David Woodhousee1605492009-06-29 11:17:38 +01003597
Fenghua Yuf5329592009-08-04 15:09:37 -07003598 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
David Woodhousee1605492009-06-29 11:17:38 +01003599 if (unlikely(ret)) {
David Woodhousee1605492009-06-29 11:17:38 +01003600 dma_pte_free_pagetable(domain, start_vpfn,
3601 start_vpfn + size - 1);
David Woodhousee1605492009-06-29 11:17:38 +01003602 __free_iova(&domain->iovad, iova);
3603 return 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003604 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003605
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003606 /* it's a non-present to present mapping. Only flush if caching mode */
3607 if (cap_caching_mode(iommu->cap))
David Woodhouseea8ea462014-03-05 17:09:32 +00003608 iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 0, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003609 else
Weidong Han8c11e792008-12-08 15:29:22 +08003610 iommu_flush_write_buffer(iommu);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003611
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003612 return nelems;
3613}
3614
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003615static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3616{
3617 return !dma_addr;
3618}
3619
FUJITA Tomonori160c1d82009-01-05 23:59:02 +09003620struct dma_map_ops intel_dma_ops = {
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003621 .alloc = intel_alloc_coherent,
3622 .free = intel_free_coherent,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003623 .map_sg = intel_map_sg,
3624 .unmap_sg = intel_unmap_sg,
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003625 .map_page = intel_map_page,
3626 .unmap_page = intel_unmap_page,
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003627 .mapping_error = intel_mapping_error,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003628};
3629
3630static inline int iommu_domain_cache_init(void)
3631{
3632 int ret = 0;
3633
3634 iommu_domain_cache = kmem_cache_create("iommu_domain",
3635 sizeof(struct dmar_domain),
3636 0,
3637 SLAB_HWCACHE_ALIGN,
3638
3639 NULL);
3640 if (!iommu_domain_cache) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003641 pr_err("Couldn't create iommu_domain cache\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003642 ret = -ENOMEM;
3643 }
3644
3645 return ret;
3646}
3647
3648static inline int iommu_devinfo_cache_init(void)
3649{
3650 int ret = 0;
3651
3652 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3653 sizeof(struct device_domain_info),
3654 0,
3655 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003656 NULL);
3657 if (!iommu_devinfo_cache) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003658 pr_err("Couldn't create devinfo cache\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003659 ret = -ENOMEM;
3660 }
3661
3662 return ret;
3663}
3664
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003665static int __init iommu_init_mempool(void)
3666{
3667 int ret;
3668 ret = iommu_iova_cache_init();
3669 if (ret)
3670 return ret;
3671
3672 ret = iommu_domain_cache_init();
3673 if (ret)
3674 goto domain_error;
3675
3676 ret = iommu_devinfo_cache_init();
3677 if (!ret)
3678 return ret;
3679
3680 kmem_cache_destroy(iommu_domain_cache);
3681domain_error:
Robin Murphy85b45452015-01-12 17:51:14 +00003682 iommu_iova_cache_destroy();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003683
3684 return -ENOMEM;
3685}
3686
3687static void __init iommu_exit_mempool(void)
3688{
3689 kmem_cache_destroy(iommu_devinfo_cache);
3690 kmem_cache_destroy(iommu_domain_cache);
Robin Murphy85b45452015-01-12 17:51:14 +00003691 iommu_iova_cache_destroy();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003692}
3693
Dan Williams556ab452010-07-23 15:47:56 -07003694static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3695{
3696 struct dmar_drhd_unit *drhd;
3697 u32 vtbar;
3698 int rc;
3699
3700 /* We know that this device on this chipset has its own IOMMU.
3701 * If we find it under a different IOMMU, then the BIOS is lying
3702 * to us. Hope that the IOMMU for this device is actually
3703 * disabled, and it needs no translation...
3704 */
3705 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3706 if (rc) {
3707 /* "can't" happen */
3708 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3709 return;
3710 }
3711 vtbar &= 0xffff0000;
3712
3713 /* we know that the this iommu should be at offset 0xa000 from vtbar */
3714 drhd = dmar_find_matched_drhd_unit(pdev);
3715 if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3716 TAINT_FIRMWARE_WORKAROUND,
3717 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3718 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3719}
3720DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3721
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003722static void __init init_no_remapping_devices(void)
3723{
3724 struct dmar_drhd_unit *drhd;
David Woodhouse832bd852014-03-07 15:08:36 +00003725 struct device *dev;
Jiang Liub683b232014-02-19 14:07:32 +08003726 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003727
3728 for_each_drhd_unit(drhd) {
3729 if (!drhd->include_all) {
Jiang Liub683b232014-02-19 14:07:32 +08003730 for_each_active_dev_scope(drhd->devices,
3731 drhd->devices_cnt, i, dev)
3732 break;
David Woodhouse832bd852014-03-07 15:08:36 +00003733 /* ignore DMAR unit if no devices exist */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003734 if (i == drhd->devices_cnt)
3735 drhd->ignored = 1;
3736 }
3737 }
3738
Jiang Liu7c919772014-01-06 14:18:18 +08003739 for_each_active_drhd_unit(drhd) {
Jiang Liu7c919772014-01-06 14:18:18 +08003740 if (drhd->include_all)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003741 continue;
3742
Jiang Liub683b232014-02-19 14:07:32 +08003743 for_each_active_dev_scope(drhd->devices,
3744 drhd->devices_cnt, i, dev)
David Woodhouse832bd852014-03-07 15:08:36 +00003745 if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev)))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003746 break;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003747 if (i < drhd->devices_cnt)
3748 continue;
3749
David Woodhousec0771df2011-10-14 20:59:46 +01003750 /* This IOMMU has *only* gfx devices. Either bypass it or
3751 set the gfx_mapped flag, as appropriate */
3752 if (dmar_map_gfx) {
3753 intel_iommu_gfx_mapped = 1;
3754 } else {
3755 drhd->ignored = 1;
Jiang Liub683b232014-02-19 14:07:32 +08003756 for_each_active_dev_scope(drhd->devices,
3757 drhd->devices_cnt, i, dev)
David Woodhouse832bd852014-03-07 15:08:36 +00003758 dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003759 }
3760 }
3761}
3762
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003763#ifdef CONFIG_SUSPEND
3764static int init_iommu_hw(void)
3765{
3766 struct dmar_drhd_unit *drhd;
3767 struct intel_iommu *iommu = NULL;
3768
3769 for_each_active_iommu(iommu, drhd)
3770 if (iommu->qi)
3771 dmar_reenable_qi(iommu);
3772
Joseph Cihulab7792602011-05-03 00:08:37 -07003773 for_each_iommu(iommu, drhd) {
3774 if (drhd->ignored) {
3775 /*
3776 * we always have to disable PMRs or DMA may fail on
3777 * this device
3778 */
3779 if (force_on)
3780 iommu_disable_protect_mem_regions(iommu);
3781 continue;
3782 }
3783
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003784 iommu_flush_write_buffer(iommu);
3785
3786 iommu_set_root_entry(iommu);
3787
3788 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003789 DMA_CCMD_GLOBAL_INVL);
Jiang Liu2a41cce2014-07-11 14:19:33 +08003790 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3791 iommu_enable_translation(iommu);
David Woodhouseb94996c2009-09-19 15:28:12 -07003792 iommu_disable_protect_mem_regions(iommu);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003793 }
3794
3795 return 0;
3796}
3797
3798static void iommu_flush_all(void)
3799{
3800 struct dmar_drhd_unit *drhd;
3801 struct intel_iommu *iommu;
3802
3803 for_each_active_iommu(iommu, drhd) {
3804 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003805 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003806 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003807 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003808 }
3809}
3810
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003811static int iommu_suspend(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003812{
3813 struct dmar_drhd_unit *drhd;
3814 struct intel_iommu *iommu = NULL;
3815 unsigned long flag;
3816
3817 for_each_active_iommu(iommu, drhd) {
3818 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3819 GFP_ATOMIC);
3820 if (!iommu->iommu_state)
3821 goto nomem;
3822 }
3823
3824 iommu_flush_all();
3825
3826 for_each_active_iommu(iommu, drhd) {
3827 iommu_disable_translation(iommu);
3828
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003829 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003830
3831 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3832 readl(iommu->reg + DMAR_FECTL_REG);
3833 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3834 readl(iommu->reg + DMAR_FEDATA_REG);
3835 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3836 readl(iommu->reg + DMAR_FEADDR_REG);
3837 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3838 readl(iommu->reg + DMAR_FEUADDR_REG);
3839
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003840 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003841 }
3842 return 0;
3843
3844nomem:
3845 for_each_active_iommu(iommu, drhd)
3846 kfree(iommu->iommu_state);
3847
3848 return -ENOMEM;
3849}
3850
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003851static void iommu_resume(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003852{
3853 struct dmar_drhd_unit *drhd;
3854 struct intel_iommu *iommu = NULL;
3855 unsigned long flag;
3856
3857 if (init_iommu_hw()) {
Joseph Cihulab7792602011-05-03 00:08:37 -07003858 if (force_on)
3859 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3860 else
3861 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003862 return;
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003863 }
3864
3865 for_each_active_iommu(iommu, drhd) {
3866
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003867 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003868
3869 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3870 iommu->reg + DMAR_FECTL_REG);
3871 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3872 iommu->reg + DMAR_FEDATA_REG);
3873 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3874 iommu->reg + DMAR_FEADDR_REG);
3875 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3876 iommu->reg + DMAR_FEUADDR_REG);
3877
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003878 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003879 }
3880
3881 for_each_active_iommu(iommu, drhd)
3882 kfree(iommu->iommu_state);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003883}
3884
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003885static struct syscore_ops iommu_syscore_ops = {
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003886 .resume = iommu_resume,
3887 .suspend = iommu_suspend,
3888};
3889
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003890static void __init init_iommu_pm_ops(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003891{
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003892 register_syscore_ops(&iommu_syscore_ops);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003893}
3894
3895#else
Rafael J. Wysocki99592ba2011-06-07 21:32:31 +02003896static inline void init_iommu_pm_ops(void) {}
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003897#endif /* CONFIG_PM */
3898
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003899
Jiang Liuc2a0b532014-11-09 22:47:56 +08003900int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003901{
3902 struct acpi_dmar_reserved_memory *rmrr;
3903 struct dmar_rmrr_unit *rmrru;
3904
3905 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
3906 if (!rmrru)
3907 return -ENOMEM;
3908
3909 rmrru->hdr = header;
3910 rmrr = (struct acpi_dmar_reserved_memory *)header;
3911 rmrru->base_address = rmrr->base_address;
3912 rmrru->end_address = rmrr->end_address;
Jiang Liu2e455282014-02-19 14:07:36 +08003913 rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
3914 ((void *)rmrr) + rmrr->header.length,
3915 &rmrru->devices_cnt);
3916 if (rmrru->devices_cnt && rmrru->devices == NULL) {
3917 kfree(rmrru);
3918 return -ENOMEM;
3919 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003920
Jiang Liu2e455282014-02-19 14:07:36 +08003921 list_add(&rmrru->list, &dmar_rmrr_units);
3922
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003923 return 0;
3924}
3925
Jiang Liu6b197242014-11-09 22:47:58 +08003926static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
3927{
3928 struct dmar_atsr_unit *atsru;
3929 struct acpi_dmar_atsr *tmp;
3930
3931 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
3932 tmp = (struct acpi_dmar_atsr *)atsru->hdr;
3933 if (atsr->segment != tmp->segment)
3934 continue;
3935 if (atsr->header.length != tmp->header.length)
3936 continue;
3937 if (memcmp(atsr, tmp, atsr->header.length) == 0)
3938 return atsru;
3939 }
3940
3941 return NULL;
3942}
3943
3944int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003945{
3946 struct acpi_dmar_atsr *atsr;
3947 struct dmar_atsr_unit *atsru;
3948
Jiang Liu6b197242014-11-09 22:47:58 +08003949 if (system_state != SYSTEM_BOOTING && !intel_iommu_enabled)
3950 return 0;
3951
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003952 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
Jiang Liu6b197242014-11-09 22:47:58 +08003953 atsru = dmar_find_atsr(atsr);
3954 if (atsru)
3955 return 0;
3956
3957 atsru = kzalloc(sizeof(*atsru) + hdr->length, GFP_KERNEL);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003958 if (!atsru)
3959 return -ENOMEM;
3960
Jiang Liu6b197242014-11-09 22:47:58 +08003961 /*
3962 * If memory is allocated from slab by ACPI _DSM method, we need to
3963 * copy the memory content because the memory buffer will be freed
3964 * on return.
3965 */
3966 atsru->hdr = (void *)(atsru + 1);
3967 memcpy(atsru->hdr, hdr, hdr->length);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003968 atsru->include_all = atsr->flags & 0x1;
Jiang Liu2e455282014-02-19 14:07:36 +08003969 if (!atsru->include_all) {
3970 atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
3971 (void *)atsr + atsr->header.length,
3972 &atsru->devices_cnt);
3973 if (atsru->devices_cnt && atsru->devices == NULL) {
3974 kfree(atsru);
3975 return -ENOMEM;
3976 }
3977 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003978
Jiang Liu0e242612014-02-19 14:07:34 +08003979 list_add_rcu(&atsru->list, &dmar_atsr_units);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003980
3981 return 0;
3982}
3983
Jiang Liu9bdc5312014-01-06 14:18:27 +08003984static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
3985{
3986 dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
3987 kfree(atsru);
3988}
3989
Jiang Liu6b197242014-11-09 22:47:58 +08003990int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg)
3991{
3992 struct acpi_dmar_atsr *atsr;
3993 struct dmar_atsr_unit *atsru;
3994
3995 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3996 atsru = dmar_find_atsr(atsr);
3997 if (atsru) {
3998 list_del_rcu(&atsru->list);
3999 synchronize_rcu();
4000 intel_iommu_free_atsr(atsru);
4001 }
4002
4003 return 0;
4004}
4005
4006int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4007{
4008 int i;
4009 struct device *dev;
4010 struct acpi_dmar_atsr *atsr;
4011 struct dmar_atsr_unit *atsru;
4012
4013 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4014 atsru = dmar_find_atsr(atsr);
4015 if (!atsru)
4016 return 0;
4017
4018 if (!atsru->include_all && atsru->devices && atsru->devices_cnt)
4019 for_each_active_dev_scope(atsru->devices, atsru->devices_cnt,
4020 i, dev)
4021 return -EBUSY;
4022
4023 return 0;
4024}
4025
Jiang Liuffebeb42014-11-09 22:48:02 +08004026static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
4027{
4028 int sp, ret = 0;
4029 struct intel_iommu *iommu = dmaru->iommu;
4030
4031 if (g_iommus[iommu->seq_id])
4032 return 0;
4033
4034 if (hw_pass_through && !ecap_pass_through(iommu->ecap)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004035 pr_warn("%s: Doesn't support hardware pass through.\n",
Jiang Liuffebeb42014-11-09 22:48:02 +08004036 iommu->name);
4037 return -ENXIO;
4038 }
4039 if (!ecap_sc_support(iommu->ecap) &&
4040 domain_update_iommu_snooping(iommu)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004041 pr_warn("%s: Doesn't support snooping.\n",
Jiang Liuffebeb42014-11-09 22:48:02 +08004042 iommu->name);
4043 return -ENXIO;
4044 }
4045 sp = domain_update_iommu_superpage(iommu) - 1;
4046 if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004047 pr_warn("%s: Doesn't support large page.\n",
Jiang Liuffebeb42014-11-09 22:48:02 +08004048 iommu->name);
4049 return -ENXIO;
4050 }
4051
4052 /*
4053 * Disable translation if already enabled prior to OS handover.
4054 */
4055 if (iommu->gcmd & DMA_GCMD_TE)
4056 iommu_disable_translation(iommu);
4057
4058 g_iommus[iommu->seq_id] = iommu;
4059 ret = iommu_init_domains(iommu);
4060 if (ret == 0)
4061 ret = iommu_alloc_root_entry(iommu);
4062 if (ret)
4063 goto out;
4064
4065 if (dmaru->ignored) {
4066 /*
4067 * we always have to disable PMRs or DMA may fail on this device
4068 */
4069 if (force_on)
4070 iommu_disable_protect_mem_regions(iommu);
4071 return 0;
4072 }
4073
4074 intel_iommu_init_qi(iommu);
4075 iommu_flush_write_buffer(iommu);
4076 ret = dmar_set_interrupt(iommu);
4077 if (ret)
4078 goto disable_iommu;
4079
4080 iommu_set_root_entry(iommu);
4081 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
4082 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
4083 iommu_enable_translation(iommu);
4084
4085 if (si_domain) {
4086 ret = iommu_attach_domain(si_domain, iommu);
4087 if (ret < 0 || si_domain->id != ret)
4088 goto disable_iommu;
4089 domain_attach_iommu(si_domain, iommu);
4090 }
4091
4092 iommu_disable_protect_mem_regions(iommu);
4093 return 0;
4094
4095disable_iommu:
4096 disable_dmar_iommu(iommu);
4097out:
4098 free_dmar_iommu(iommu);
4099 return ret;
4100}
4101
Jiang Liu6b197242014-11-09 22:47:58 +08004102int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
4103{
Jiang Liuffebeb42014-11-09 22:48:02 +08004104 int ret = 0;
4105 struct intel_iommu *iommu = dmaru->iommu;
4106
4107 if (!intel_iommu_enabled)
4108 return 0;
4109 if (iommu == NULL)
4110 return -EINVAL;
4111
4112 if (insert) {
4113 ret = intel_iommu_add(dmaru);
4114 } else {
4115 disable_dmar_iommu(iommu);
4116 free_dmar_iommu(iommu);
4117 }
4118
4119 return ret;
Jiang Liu6b197242014-11-09 22:47:58 +08004120}
4121
Jiang Liu9bdc5312014-01-06 14:18:27 +08004122static void intel_iommu_free_dmars(void)
4123{
4124 struct dmar_rmrr_unit *rmrru, *rmrr_n;
4125 struct dmar_atsr_unit *atsru, *atsr_n;
4126
4127 list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
4128 list_del(&rmrru->list);
4129 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
4130 kfree(rmrru);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004131 }
4132
Jiang Liu9bdc5312014-01-06 14:18:27 +08004133 list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
4134 list_del(&atsru->list);
4135 intel_iommu_free_atsr(atsru);
4136 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004137}
4138
4139int dmar_find_matched_atsr_unit(struct pci_dev *dev)
4140{
Jiang Liub683b232014-02-19 14:07:32 +08004141 int i, ret = 1;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004142 struct pci_bus *bus;
David Woodhouse832bd852014-03-07 15:08:36 +00004143 struct pci_dev *bridge = NULL;
4144 struct device *tmp;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004145 struct acpi_dmar_atsr *atsr;
4146 struct dmar_atsr_unit *atsru;
4147
4148 dev = pci_physfn(dev);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004149 for (bus = dev->bus; bus; bus = bus->parent) {
Jiang Liub5f82dd2014-02-19 14:07:31 +08004150 bridge = bus->self;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004151 if (!bridge || !pci_is_pcie(bridge) ||
Yijing Wang62f87c02012-07-24 17:20:03 +08004152 pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004153 return 0;
Jiang Liub5f82dd2014-02-19 14:07:31 +08004154 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004155 break;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004156 }
Jiang Liub5f82dd2014-02-19 14:07:31 +08004157 if (!bridge)
4158 return 0;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004159
Jiang Liu0e242612014-02-19 14:07:34 +08004160 rcu_read_lock();
Jiang Liub5f82dd2014-02-19 14:07:31 +08004161 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
4162 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4163 if (atsr->segment != pci_domain_nr(dev->bus))
4164 continue;
4165
Jiang Liub683b232014-02-19 14:07:32 +08004166 for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp)
David Woodhouse832bd852014-03-07 15:08:36 +00004167 if (tmp == &bridge->dev)
Jiang Liub683b232014-02-19 14:07:32 +08004168 goto out;
Jiang Liub5f82dd2014-02-19 14:07:31 +08004169
4170 if (atsru->include_all)
Jiang Liub683b232014-02-19 14:07:32 +08004171 goto out;
Jiang Liub5f82dd2014-02-19 14:07:31 +08004172 }
Jiang Liub683b232014-02-19 14:07:32 +08004173 ret = 0;
4174out:
Jiang Liu0e242612014-02-19 14:07:34 +08004175 rcu_read_unlock();
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004176
Jiang Liub683b232014-02-19 14:07:32 +08004177 return ret;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004178}
4179
Jiang Liu59ce0512014-02-19 14:07:35 +08004180int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
4181{
4182 int ret = 0;
4183 struct dmar_rmrr_unit *rmrru;
4184 struct dmar_atsr_unit *atsru;
4185 struct acpi_dmar_atsr *atsr;
4186 struct acpi_dmar_reserved_memory *rmrr;
4187
4188 if (!intel_iommu_enabled && system_state != SYSTEM_BOOTING)
4189 return 0;
4190
4191 list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
4192 rmrr = container_of(rmrru->hdr,
4193 struct acpi_dmar_reserved_memory, header);
4194 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4195 ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1),
4196 ((void *)rmrr) + rmrr->header.length,
4197 rmrr->segment, rmrru->devices,
4198 rmrru->devices_cnt);
Jiang Liu27e24952014-06-20 15:08:06 +08004199 if(ret < 0)
Jiang Liu59ce0512014-02-19 14:07:35 +08004200 return ret;
4201 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
Jiang Liu27e24952014-06-20 15:08:06 +08004202 dmar_remove_dev_scope(info, rmrr->segment,
4203 rmrru->devices, rmrru->devices_cnt);
Jiang Liu59ce0512014-02-19 14:07:35 +08004204 }
4205 }
4206
4207 list_for_each_entry(atsru, &dmar_atsr_units, list) {
4208 if (atsru->include_all)
4209 continue;
4210
4211 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4212 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4213 ret = dmar_insert_dev_scope(info, (void *)(atsr + 1),
4214 (void *)atsr + atsr->header.length,
4215 atsr->segment, atsru->devices,
4216 atsru->devices_cnt);
4217 if (ret > 0)
4218 break;
4219 else if(ret < 0)
4220 return ret;
4221 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
4222 if (dmar_remove_dev_scope(info, atsr->segment,
4223 atsru->devices, atsru->devices_cnt))
4224 break;
4225 }
4226 }
4227
4228 return 0;
4229}
4230
Fenghua Yu99dcade2009-11-11 07:23:06 -08004231/*
4232 * Here we only respond to action of unbound device from driver.
4233 *
4234 * Added device is not attached to its DMAR domain here yet. That will happen
4235 * when mapping the device to iova.
4236 */
4237static int device_notifier(struct notifier_block *nb,
4238 unsigned long action, void *data)
4239{
4240 struct device *dev = data;
Fenghua Yu99dcade2009-11-11 07:23:06 -08004241 struct dmar_domain *domain;
4242
David Woodhouse3d891942014-03-06 15:59:26 +00004243 if (iommu_dummy(dev))
David Woodhouse44cd6132009-12-02 10:18:30 +00004244 return 0;
4245
Joerg Roedel1196c2f2014-09-30 13:02:03 +02004246 if (action != BUS_NOTIFY_REMOVED_DEVICE)
Jiang Liu7e7dfab2014-02-19 14:07:23 +08004247 return 0;
4248
David Woodhouse1525a292014-03-06 16:19:30 +00004249 domain = find_domain(dev);
Fenghua Yu99dcade2009-11-11 07:23:06 -08004250 if (!domain)
4251 return 0;
4252
Jiang Liu3a5670e2014-02-19 14:07:33 +08004253 down_read(&dmar_global_lock);
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004254 domain_remove_one_dev_info(domain, dev);
Jiang Liuab8dfe22014-07-11 14:19:27 +08004255 if (!domain_type_is_vm_or_si(domain) && list_empty(&domain->devices))
Jiang Liu7e7dfab2014-02-19 14:07:23 +08004256 domain_exit(domain);
Jiang Liu3a5670e2014-02-19 14:07:33 +08004257 up_read(&dmar_global_lock);
Alex Williamsona97590e2011-03-04 14:52:16 -07004258
Fenghua Yu99dcade2009-11-11 07:23:06 -08004259 return 0;
4260}
4261
4262static struct notifier_block device_nb = {
4263 .notifier_call = device_notifier,
4264};
4265
Jiang Liu75f05562014-02-19 14:07:37 +08004266static int intel_iommu_memory_notifier(struct notifier_block *nb,
4267 unsigned long val, void *v)
4268{
4269 struct memory_notify *mhp = v;
4270 unsigned long long start, end;
4271 unsigned long start_vpfn, last_vpfn;
4272
4273 switch (val) {
4274 case MEM_GOING_ONLINE:
4275 start = mhp->start_pfn << PAGE_SHIFT;
4276 end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1;
4277 if (iommu_domain_identity_map(si_domain, start, end)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004278 pr_warn("Failed to build identity map for [%llx-%llx]\n",
Jiang Liu75f05562014-02-19 14:07:37 +08004279 start, end);
4280 return NOTIFY_BAD;
4281 }
4282 break;
4283
4284 case MEM_OFFLINE:
4285 case MEM_CANCEL_ONLINE:
4286 start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
4287 last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1);
4288 while (start_vpfn <= last_vpfn) {
4289 struct iova *iova;
4290 struct dmar_drhd_unit *drhd;
4291 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00004292 struct page *freelist;
Jiang Liu75f05562014-02-19 14:07:37 +08004293
4294 iova = find_iova(&si_domain->iovad, start_vpfn);
4295 if (iova == NULL) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004296 pr_debug("Failed get IOVA for PFN %lx\n",
Jiang Liu75f05562014-02-19 14:07:37 +08004297 start_vpfn);
4298 break;
4299 }
4300
4301 iova = split_and_remove_iova(&si_domain->iovad, iova,
4302 start_vpfn, last_vpfn);
4303 if (iova == NULL) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004304 pr_warn("Failed to split IOVA PFN [%lx-%lx]\n",
Jiang Liu75f05562014-02-19 14:07:37 +08004305 start_vpfn, last_vpfn);
4306 return NOTIFY_BAD;
4307 }
4308
David Woodhouseea8ea462014-03-05 17:09:32 +00004309 freelist = domain_unmap(si_domain, iova->pfn_lo,
4310 iova->pfn_hi);
4311
Jiang Liu75f05562014-02-19 14:07:37 +08004312 rcu_read_lock();
4313 for_each_active_iommu(iommu, drhd)
4314 iommu_flush_iotlb_psi(iommu, si_domain->id,
Jiang Liua156ef92014-07-11 14:19:36 +08004315 iova->pfn_lo, iova_size(iova),
David Woodhouseea8ea462014-03-05 17:09:32 +00004316 !freelist, 0);
Jiang Liu75f05562014-02-19 14:07:37 +08004317 rcu_read_unlock();
David Woodhouseea8ea462014-03-05 17:09:32 +00004318 dma_free_pagelist(freelist);
Jiang Liu75f05562014-02-19 14:07:37 +08004319
4320 start_vpfn = iova->pfn_hi + 1;
4321 free_iova_mem(iova);
4322 }
4323 break;
4324 }
4325
4326 return NOTIFY_OK;
4327}
4328
4329static struct notifier_block intel_iommu_memory_nb = {
4330 .notifier_call = intel_iommu_memory_notifier,
4331 .priority = 0
4332};
4333
Alex Williamsona5459cf2014-06-12 16:12:31 -06004334
4335static ssize_t intel_iommu_show_version(struct device *dev,
4336 struct device_attribute *attr,
4337 char *buf)
4338{
4339 struct intel_iommu *iommu = dev_get_drvdata(dev);
4340 u32 ver = readl(iommu->reg + DMAR_VER_REG);
4341 return sprintf(buf, "%d:%d\n",
4342 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver));
4343}
4344static DEVICE_ATTR(version, S_IRUGO, intel_iommu_show_version, NULL);
4345
4346static ssize_t intel_iommu_show_address(struct device *dev,
4347 struct device_attribute *attr,
4348 char *buf)
4349{
4350 struct intel_iommu *iommu = dev_get_drvdata(dev);
4351 return sprintf(buf, "%llx\n", iommu->reg_phys);
4352}
4353static DEVICE_ATTR(address, S_IRUGO, intel_iommu_show_address, NULL);
4354
4355static ssize_t intel_iommu_show_cap(struct device *dev,
4356 struct device_attribute *attr,
4357 char *buf)
4358{
4359 struct intel_iommu *iommu = dev_get_drvdata(dev);
4360 return sprintf(buf, "%llx\n", iommu->cap);
4361}
4362static DEVICE_ATTR(cap, S_IRUGO, intel_iommu_show_cap, NULL);
4363
4364static ssize_t intel_iommu_show_ecap(struct device *dev,
4365 struct device_attribute *attr,
4366 char *buf)
4367{
4368 struct intel_iommu *iommu = dev_get_drvdata(dev);
4369 return sprintf(buf, "%llx\n", iommu->ecap);
4370}
4371static DEVICE_ATTR(ecap, S_IRUGO, intel_iommu_show_ecap, NULL);
4372
4373static struct attribute *intel_iommu_attrs[] = {
4374 &dev_attr_version.attr,
4375 &dev_attr_address.attr,
4376 &dev_attr_cap.attr,
4377 &dev_attr_ecap.attr,
4378 NULL,
4379};
4380
4381static struct attribute_group intel_iommu_group = {
4382 .name = "intel-iommu",
4383 .attrs = intel_iommu_attrs,
4384};
4385
4386const struct attribute_group *intel_iommu_groups[] = {
4387 &intel_iommu_group,
4388 NULL,
4389};
4390
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004391int __init intel_iommu_init(void)
4392{
Jiang Liu9bdc5312014-01-06 14:18:27 +08004393 int ret = -ENODEV;
Takao Indoh3a93c842013-04-23 17:35:03 +09004394 struct dmar_drhd_unit *drhd;
Jiang Liu7c919772014-01-06 14:18:18 +08004395 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004396
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004397 /* VT-d is required for a TXT/tboot launch, so enforce that */
4398 force_on = tboot_force_iommu();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004399
Jiang Liu3a5670e2014-02-19 14:07:33 +08004400 if (iommu_init_mempool()) {
4401 if (force_on)
4402 panic("tboot: Failed to initialize iommu memory\n");
4403 return -ENOMEM;
4404 }
4405
4406 down_write(&dmar_global_lock);
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004407 if (dmar_table_init()) {
4408 if (force_on)
4409 panic("tboot: Failed to initialize DMAR table\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004410 goto out_free_dmar;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004411 }
4412
Takao Indoh3a93c842013-04-23 17:35:03 +09004413 /*
4414 * Disable translation if already enabled prior to OS handover.
4415 */
Jiang Liu7c919772014-01-06 14:18:18 +08004416 for_each_active_iommu(iommu, drhd)
Takao Indoh3a93c842013-04-23 17:35:03 +09004417 if (iommu->gcmd & DMA_GCMD_TE)
4418 iommu_disable_translation(iommu);
Takao Indoh3a93c842013-04-23 17:35:03 +09004419
Suresh Siddhac2c72862011-08-23 17:05:19 -07004420 if (dmar_dev_scope_init() < 0) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004421 if (force_on)
4422 panic("tboot: Failed to initialize DMAR device scope\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004423 goto out_free_dmar;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004424 }
Suresh Siddha1886e8a2008-07-10 11:16:37 -07004425
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09004426 if (no_iommu || dmar_disabled)
Jiang Liu9bdc5312014-01-06 14:18:27 +08004427 goto out_free_dmar;
Suresh Siddha2ae21012008-07-10 11:16:43 -07004428
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004429 if (list_empty(&dmar_rmrr_units))
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004430 pr_info("No RMRR found\n");
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004431
4432 if (list_empty(&dmar_atsr_units))
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004433 pr_info("No ATSR found\n");
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004434
Joseph Cihula51a63e62011-03-21 11:04:24 -07004435 if (dmar_init_reserved_ranges()) {
4436 if (force_on)
4437 panic("tboot: Failed to reserve iommu ranges\n");
Jiang Liu3a5670e2014-02-19 14:07:33 +08004438 goto out_free_reserved_range;
Joseph Cihula51a63e62011-03-21 11:04:24 -07004439 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004440
4441 init_no_remapping_devices();
4442
Joseph Cihulab7792602011-05-03 00:08:37 -07004443 ret = init_dmars();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004444 if (ret) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004445 if (force_on)
4446 panic("tboot: Failed to initialize DMARs\n");
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004447 pr_err("Initialization failed\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004448 goto out_free_reserved_range;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004449 }
Jiang Liu3a5670e2014-02-19 14:07:33 +08004450 up_write(&dmar_global_lock);
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004451 pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004452
mark gross5e0d2a62008-03-04 15:22:08 -08004453 init_timer(&unmap_timer);
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09004454#ifdef CONFIG_SWIOTLB
4455 swiotlb = 0;
4456#endif
David Woodhouse19943b02009-08-04 16:19:20 +01004457 dma_ops = &intel_dma_ops;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07004458
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004459 init_iommu_pm_ops();
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004460
Alex Williamsona5459cf2014-06-12 16:12:31 -06004461 for_each_active_iommu(iommu, drhd)
4462 iommu->iommu_dev = iommu_device_create(NULL, iommu,
4463 intel_iommu_groups,
4464 iommu->name);
4465
Joerg Roedel4236d97d2011-09-06 17:56:07 +02004466 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
Fenghua Yu99dcade2009-11-11 07:23:06 -08004467 bus_register_notifier(&pci_bus_type, &device_nb);
Jiang Liu75f05562014-02-19 14:07:37 +08004468 if (si_domain && !hw_pass_through)
4469 register_memory_notifier(&intel_iommu_memory_nb);
Fenghua Yu99dcade2009-11-11 07:23:06 -08004470
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -02004471 intel_iommu_enabled = 1;
4472
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004473 return 0;
Jiang Liu9bdc5312014-01-06 14:18:27 +08004474
4475out_free_reserved_range:
4476 put_iova_domain(&reserved_iova_list);
Jiang Liu9bdc5312014-01-06 14:18:27 +08004477out_free_dmar:
4478 intel_iommu_free_dmars();
Jiang Liu3a5670e2014-02-19 14:07:33 +08004479 up_write(&dmar_global_lock);
4480 iommu_exit_mempool();
Jiang Liu9bdc5312014-01-06 14:18:27 +08004481 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004482}
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07004483
Alex Williamson579305f2014-07-03 09:51:43 -06004484static int iommu_detach_dev_cb(struct pci_dev *pdev, u16 alias, void *opaque)
4485{
4486 struct intel_iommu *iommu = opaque;
4487
4488 iommu_detach_dev(iommu, PCI_BUS_NUM(alias), alias & 0xff);
4489 return 0;
4490}
4491
4492/*
4493 * NB - intel-iommu lacks any sort of reference counting for the users of
4494 * dependent devices. If multiple endpoints have intersecting dependent
4495 * devices, unbinding the driver from any one of them will possibly leave
4496 * the others unable to operate.
4497 */
Han, Weidong3199aa62009-02-26 17:31:12 +08004498static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
David Woodhouse0bcb3e22014-03-06 17:12:03 +00004499 struct device *dev)
Han, Weidong3199aa62009-02-26 17:31:12 +08004500{
David Woodhouse0bcb3e22014-03-06 17:12:03 +00004501 if (!iommu || !dev || !dev_is_pci(dev))
Han, Weidong3199aa62009-02-26 17:31:12 +08004502 return;
4503
Alex Williamson579305f2014-07-03 09:51:43 -06004504 pci_for_each_dma_alias(to_pci_dev(dev), &iommu_detach_dev_cb, iommu);
Han, Weidong3199aa62009-02-26 17:31:12 +08004505}
4506
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004507static void domain_remove_one_dev_info(struct dmar_domain *domain,
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004508 struct device *dev)
Weidong Hanc7151a82008-12-08 22:51:37 +08004509{
Yijing Wangbca2b912013-10-31 17:26:04 +08004510 struct device_domain_info *info, *tmp;
Weidong Hanc7151a82008-12-08 22:51:37 +08004511 struct intel_iommu *iommu;
4512 unsigned long flags;
Quentin Lambert2f119c72015-02-06 10:59:53 +01004513 bool found = false;
David Woodhouse156baca2014-03-09 14:00:57 -07004514 u8 bus, devfn;
Weidong Hanc7151a82008-12-08 22:51:37 +08004515
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004516 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08004517 if (!iommu)
4518 return;
4519
4520 spin_lock_irqsave(&device_domain_lock, flags);
Yijing Wangbca2b912013-10-31 17:26:04 +08004521 list_for_each_entry_safe(info, tmp, &domain->devices, link) {
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004522 if (info->iommu == iommu && info->bus == bus &&
4523 info->devfn == devfn) {
David Woodhouse109b9b02012-05-25 17:43:02 +01004524 unlink_domain_info(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08004525 spin_unlock_irqrestore(&device_domain_lock, flags);
4526
Yu Zhao93a23a72009-05-18 13:51:37 +08004527 iommu_disable_dev_iotlb(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08004528 iommu_detach_dev(iommu, info->bus, info->devfn);
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004529 iommu_detach_dependent_devices(iommu, dev);
Weidong Hanc7151a82008-12-08 22:51:37 +08004530 free_devinfo_mem(info);
4531
4532 spin_lock_irqsave(&device_domain_lock, flags);
4533
4534 if (found)
4535 break;
4536 else
4537 continue;
4538 }
4539
4540 /* if there is no other devices under the same iommu
4541 * owned by this domain, clear this iommu in iommu_bmp
4542 * update iommu count and coherency
4543 */
David Woodhouse8bbc4412014-03-09 13:52:37 -07004544 if (info->iommu == iommu)
Quentin Lambert2f119c72015-02-06 10:59:53 +01004545 found = true;
Weidong Hanc7151a82008-12-08 22:51:37 +08004546 }
4547
Roland Dreier3e7abe22011-07-20 06:22:21 -07004548 spin_unlock_irqrestore(&device_domain_lock, flags);
4549
Weidong Hanc7151a82008-12-08 22:51:37 +08004550 if (found == 0) {
Jiang Liufb170fb2014-07-11 14:19:28 +08004551 domain_detach_iommu(domain, iommu);
4552 if (!domain_type_is_vm_or_si(domain))
4553 iommu_detach_domain(domain, iommu);
Weidong Hanc7151a82008-12-08 22:51:37 +08004554 }
Weidong Hanc7151a82008-12-08 22:51:37 +08004555}
4556
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004557static int md_domain_init(struct dmar_domain *domain, int guest_width)
Weidong Han5e98c4b2008-12-08 23:03:27 +08004558{
4559 int adjust_width;
4560
Robin Murphy0fb5fe82015-01-12 17:51:16 +00004561 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
4562 DMA_32BIT_PFN);
Weidong Han5e98c4b2008-12-08 23:03:27 +08004563 domain_reserve_special_ranges(domain);
4564
4565 /* calculate AGAW */
4566 domain->gaw = guest_width;
4567 adjust_width = guestwidth_to_adjustwidth(guest_width);
4568 domain->agaw = width_to_agaw(adjust_width);
4569
Weidong Han5e98c4b2008-12-08 23:03:27 +08004570 domain->iommu_coherency = 0;
Sheng Yangc5b15252009-08-06 13:31:56 +08004571 domain->iommu_snooping = 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01004572 domain->iommu_superpage = 0;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004573 domain->max_addr = 0;
Weidong Han5e98c4b2008-12-08 23:03:27 +08004574
4575 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07004576 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Weidong Han5e98c4b2008-12-08 23:03:27 +08004577 if (!domain->pgd)
4578 return -ENOMEM;
4579 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
4580 return 0;
4581}
4582
Joerg Roedel00a77de2015-03-26 13:43:08 +01004583static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
Kay, Allen M38717942008-09-09 18:37:29 +03004584{
Joerg Roedel5d450802008-12-03 14:52:32 +01004585 struct dmar_domain *dmar_domain;
Joerg Roedel00a77de2015-03-26 13:43:08 +01004586 struct iommu_domain *domain;
4587
4588 if (type != IOMMU_DOMAIN_UNMANAGED)
4589 return NULL;
Kay, Allen M38717942008-09-09 18:37:29 +03004590
Jiang Liuab8dfe22014-07-11 14:19:27 +08004591 dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE);
Joerg Roedel5d450802008-12-03 14:52:32 +01004592 if (!dmar_domain) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004593 pr_err("Can't allocate dmar_domain\n");
Joerg Roedel00a77de2015-03-26 13:43:08 +01004594 return NULL;
Kay, Allen M38717942008-09-09 18:37:29 +03004595 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004596 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004597 pr_err("Domain initialization failed\n");
Jiang Liu92d03cc2014-02-19 14:07:28 +08004598 domain_exit(dmar_domain);
Joerg Roedel00a77de2015-03-26 13:43:08 +01004599 return NULL;
Kay, Allen M38717942008-09-09 18:37:29 +03004600 }
Allen Kay8140a952011-10-14 12:32:17 -07004601 domain_update_iommu_cap(dmar_domain);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004602
Joerg Roedel00a77de2015-03-26 13:43:08 +01004603 domain = &dmar_domain->domain;
Joerg Roedel8a0e7152012-01-26 19:40:54 +01004604 domain->geometry.aperture_start = 0;
4605 domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
4606 domain->geometry.force_aperture = true;
4607
Joerg Roedel00a77de2015-03-26 13:43:08 +01004608 return domain;
Kay, Allen M38717942008-09-09 18:37:29 +03004609}
Kay, Allen M38717942008-09-09 18:37:29 +03004610
Joerg Roedel00a77de2015-03-26 13:43:08 +01004611static void intel_iommu_domain_free(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03004612{
Joerg Roedel00a77de2015-03-26 13:43:08 +01004613 domain_exit(to_dmar_domain(domain));
Kay, Allen M38717942008-09-09 18:37:29 +03004614}
Kay, Allen M38717942008-09-09 18:37:29 +03004615
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004616static int intel_iommu_attach_device(struct iommu_domain *domain,
4617 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03004618{
Joerg Roedel00a77de2015-03-26 13:43:08 +01004619 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004620 struct intel_iommu *iommu;
4621 int addr_width;
David Woodhouse156baca2014-03-09 14:00:57 -07004622 u8 bus, devfn;
Kay, Allen M38717942008-09-09 18:37:29 +03004623
Alex Williamsonc875d2c2014-07-03 09:57:02 -06004624 if (device_is_rmrr_locked(dev)) {
4625 dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n");
4626 return -EPERM;
4627 }
4628
David Woodhouse7207d8f2014-03-09 16:31:06 -07004629 /* normally dev is not mapped */
4630 if (unlikely(domain_context_mapped(dev))) {
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004631 struct dmar_domain *old_domain;
4632
David Woodhouse1525a292014-03-06 16:19:30 +00004633 old_domain = find_domain(dev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004634 if (old_domain) {
Jiang Liuab8dfe22014-07-11 14:19:27 +08004635 if (domain_type_is_vm_or_si(dmar_domain))
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004636 domain_remove_one_dev_info(old_domain, dev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004637 else
4638 domain_remove_dev_info(old_domain);
Joerg Roedel62c22162014-12-09 12:56:45 +01004639
4640 if (!domain_type_is_vm_or_si(old_domain) &&
4641 list_empty(&old_domain->devices))
4642 domain_exit(old_domain);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004643 }
4644 }
4645
David Woodhouse156baca2014-03-09 14:00:57 -07004646 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004647 if (!iommu)
4648 return -ENODEV;
4649
4650 /* check if this iommu agaw is sufficient for max mapped address */
4651 addr_width = agaw_to_width(iommu->agaw);
Tom Lyona99c47a2010-05-17 08:20:45 +01004652 if (addr_width > cap_mgaw(iommu->cap))
4653 addr_width = cap_mgaw(iommu->cap);
4654
4655 if (dmar_domain->max_addr > (1LL << addr_width)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004656 pr_err("%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004657 "sufficient for the mapped address (%llx)\n",
Tom Lyona99c47a2010-05-17 08:20:45 +01004658 __func__, addr_width, dmar_domain->max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004659 return -EFAULT;
4660 }
Tom Lyona99c47a2010-05-17 08:20:45 +01004661 dmar_domain->gaw = addr_width;
4662
4663 /*
4664 * Knock out extra levels of page tables if necessary
4665 */
4666 while (iommu->agaw < dmar_domain->agaw) {
4667 struct dma_pte *pte;
4668
4669 pte = dmar_domain->pgd;
4670 if (dma_pte_present(pte)) {
Sheng Yang25cbff12010-06-12 19:21:42 +08004671 dmar_domain->pgd = (struct dma_pte *)
4672 phys_to_virt(dma_pte_addr(pte));
Jan Kiszka7a661012010-11-02 08:05:51 +01004673 free_pgtable_page(pte);
Tom Lyona99c47a2010-05-17 08:20:45 +01004674 }
4675 dmar_domain->agaw--;
4676 }
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004677
David Woodhouse5913c9b2014-03-09 16:27:31 -07004678 return domain_add_dev_info(dmar_domain, dev, CONTEXT_TT_MULTI_LEVEL);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004679}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004680
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004681static void intel_iommu_detach_device(struct iommu_domain *domain,
4682 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03004683{
Joerg Roedel00a77de2015-03-26 13:43:08 +01004684 domain_remove_one_dev_info(to_dmar_domain(domain), dev);
Kay, Allen M38717942008-09-09 18:37:29 +03004685}
Kay, Allen M38717942008-09-09 18:37:29 +03004686
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004687static int intel_iommu_map(struct iommu_domain *domain,
4688 unsigned long iova, phys_addr_t hpa,
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004689 size_t size, int iommu_prot)
Kay, Allen M38717942008-09-09 18:37:29 +03004690{
Joerg Roedel00a77de2015-03-26 13:43:08 +01004691 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004692 u64 max_addr;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004693 int prot = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004694 int ret;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004695
Joerg Roedeldde57a22008-12-03 15:04:09 +01004696 if (iommu_prot & IOMMU_READ)
4697 prot |= DMA_PTE_READ;
4698 if (iommu_prot & IOMMU_WRITE)
4699 prot |= DMA_PTE_WRITE;
Sheng Yang9cf06692009-03-18 15:33:07 +08004700 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
4701 prot |= DMA_PTE_SNP;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004702
David Woodhouse163cc522009-06-28 00:51:17 +01004703 max_addr = iova + size;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004704 if (dmar_domain->max_addr < max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004705 u64 end;
4706
4707 /* check if minimum agaw is sufficient for mapped address */
Tom Lyon8954da12010-05-17 08:19:52 +01004708 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004709 if (end < max_addr) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004710 pr_err("%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004711 "sufficient for the mapped address (%llx)\n",
Tom Lyon8954da12010-05-17 08:19:52 +01004712 __func__, dmar_domain->gaw, max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004713 return -EFAULT;
4714 }
Joerg Roedeldde57a22008-12-03 15:04:09 +01004715 dmar_domain->max_addr = max_addr;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004716 }
David Woodhousead051222009-06-28 14:22:28 +01004717 /* Round up size to next multiple of PAGE_SIZE, if it and
4718 the low bits of hpa would take us onto the next page */
David Woodhouse88cb6a72009-06-28 15:03:06 +01004719 size = aligned_nrpages(hpa, size);
David Woodhousead051222009-06-28 14:22:28 +01004720 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
4721 hpa >> VTD_PAGE_SHIFT, size, prot);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004722 return ret;
Kay, Allen M38717942008-09-09 18:37:29 +03004723}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004724
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004725static size_t intel_iommu_unmap(struct iommu_domain *domain,
David Woodhouseea8ea462014-03-05 17:09:32 +00004726 unsigned long iova, size_t size)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004727{
Joerg Roedel00a77de2015-03-26 13:43:08 +01004728 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
David Woodhouseea8ea462014-03-05 17:09:32 +00004729 struct page *freelist = NULL;
4730 struct intel_iommu *iommu;
4731 unsigned long start_pfn, last_pfn;
4732 unsigned int npages;
4733 int iommu_id, num, ndomains, level = 0;
Sheng Yang4b99d352009-07-08 11:52:52 +01004734
David Woodhouse5cf0a762014-03-19 16:07:49 +00004735 /* Cope with horrid API which requires us to unmap more than the
4736 size argument if it happens to be a large-page mapping. */
4737 if (!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level))
4738 BUG();
4739
4740 if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
4741 size = VTD_PAGE_SIZE << level_to_offset_bits(level);
4742
David Woodhouseea8ea462014-03-05 17:09:32 +00004743 start_pfn = iova >> VTD_PAGE_SHIFT;
4744 last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
4745
4746 freelist = domain_unmap(dmar_domain, start_pfn, last_pfn);
4747
4748 npages = last_pfn - start_pfn + 1;
4749
4750 for_each_set_bit(iommu_id, dmar_domain->iommu_bmp, g_num_of_iommus) {
4751 iommu = g_iommus[iommu_id];
4752
4753 /*
4754 * find bit position of dmar_domain
4755 */
4756 ndomains = cap_ndoms(iommu->cap);
4757 for_each_set_bit(num, iommu->domain_ids, ndomains) {
4758 if (iommu->domains[num] == dmar_domain)
4759 iommu_flush_iotlb_psi(iommu, num, start_pfn,
4760 npages, !freelist, 0);
4761 }
4762
4763 }
4764
4765 dma_free_pagelist(freelist);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004766
David Woodhouse163cc522009-06-28 00:51:17 +01004767 if (dmar_domain->max_addr == iova + size)
4768 dmar_domain->max_addr = iova;
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004769
David Woodhouse5cf0a762014-03-19 16:07:49 +00004770 return size;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004771}
Kay, Allen M38717942008-09-09 18:37:29 +03004772
Joerg Roedeld14d6572008-12-03 15:06:57 +01004773static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
Varun Sethibb5547a2013-03-29 01:23:58 +05304774 dma_addr_t iova)
Kay, Allen M38717942008-09-09 18:37:29 +03004775{
Joerg Roedel00a77de2015-03-26 13:43:08 +01004776 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
Kay, Allen M38717942008-09-09 18:37:29 +03004777 struct dma_pte *pte;
David Woodhouse5cf0a762014-03-19 16:07:49 +00004778 int level = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004779 u64 phys = 0;
Kay, Allen M38717942008-09-09 18:37:29 +03004780
David Woodhouse5cf0a762014-03-19 16:07:49 +00004781 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
Kay, Allen M38717942008-09-09 18:37:29 +03004782 if (pte)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004783 phys = dma_pte_addr(pte);
Kay, Allen M38717942008-09-09 18:37:29 +03004784
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004785 return phys;
Kay, Allen M38717942008-09-09 18:37:29 +03004786}
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004787
Joerg Roedel5d587b82014-09-05 10:50:45 +02004788static bool intel_iommu_capable(enum iommu_cap cap)
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004789{
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004790 if (cap == IOMMU_CAP_CACHE_COHERENCY)
Joerg Roedel5d587b82014-09-05 10:50:45 +02004791 return domain_update_iommu_snooping(NULL) == 1;
Tom Lyon323f99c2010-07-02 16:56:14 -04004792 if (cap == IOMMU_CAP_INTR_REMAP)
Joerg Roedel5d587b82014-09-05 10:50:45 +02004793 return irq_remapping_enabled == 1;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004794
Joerg Roedel5d587b82014-09-05 10:50:45 +02004795 return false;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004796}
4797
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004798static int intel_iommu_add_device(struct device *dev)
Alex Williamson70ae6f02011-10-21 15:56:11 -04004799{
Alex Williamsona5459cf2014-06-12 16:12:31 -06004800 struct intel_iommu *iommu;
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004801 struct iommu_group *group;
David Woodhouse156baca2014-03-09 14:00:57 -07004802 u8 bus, devfn;
Alex Williamson70ae6f02011-10-21 15:56:11 -04004803
Alex Williamsona5459cf2014-06-12 16:12:31 -06004804 iommu = device_to_iommu(dev, &bus, &devfn);
4805 if (!iommu)
Alex Williamson70ae6f02011-10-21 15:56:11 -04004806 return -ENODEV;
4807
Alex Williamsona5459cf2014-06-12 16:12:31 -06004808 iommu_device_link(iommu->iommu_dev, dev);
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004809
Alex Williamsone17f9ff2014-07-03 09:51:37 -06004810 group = iommu_group_get_for_dev(dev);
Alex Williamson783f1572012-05-30 14:19:43 -06004811
Alex Williamsone17f9ff2014-07-03 09:51:37 -06004812 if (IS_ERR(group))
4813 return PTR_ERR(group);
Alex Williamson70ae6f02011-10-21 15:56:11 -04004814
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004815 iommu_group_put(group);
Alex Williamsone17f9ff2014-07-03 09:51:37 -06004816 return 0;
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004817}
4818
4819static void intel_iommu_remove_device(struct device *dev)
4820{
Alex Williamsona5459cf2014-06-12 16:12:31 -06004821 struct intel_iommu *iommu;
4822 u8 bus, devfn;
4823
4824 iommu = device_to_iommu(dev, &bus, &devfn);
4825 if (!iommu)
4826 return;
4827
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004828 iommu_group_remove_device(dev);
Alex Williamsona5459cf2014-06-12 16:12:31 -06004829
4830 iommu_device_unlink(iommu->iommu_dev, dev);
Alex Williamson70ae6f02011-10-21 15:56:11 -04004831}
4832
Thierry Redingb22f6432014-06-27 09:03:12 +02004833static const struct iommu_ops intel_iommu_ops = {
Joerg Roedel5d587b82014-09-05 10:50:45 +02004834 .capable = intel_iommu_capable,
Joerg Roedel00a77de2015-03-26 13:43:08 +01004835 .domain_alloc = intel_iommu_domain_alloc,
4836 .domain_free = intel_iommu_domain_free,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004837 .attach_dev = intel_iommu_attach_device,
4838 .detach_dev = intel_iommu_detach_device,
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004839 .map = intel_iommu_map,
4840 .unmap = intel_iommu_unmap,
Olav Haugan315786e2014-10-25 09:55:16 -07004841 .map_sg = default_iommu_map_sg,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004842 .iova_to_phys = intel_iommu_iova_to_phys,
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004843 .add_device = intel_iommu_add_device,
4844 .remove_device = intel_iommu_remove_device,
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +02004845 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004846};
David Woodhouse9af88142009-02-13 23:18:03 +00004847
Daniel Vetter94526182013-01-20 23:50:13 +01004848static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
4849{
4850 /* G4x/GM45 integrated gfx dmar support is totally busted. */
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004851 pr_info("Disabling IOMMU for graphics on this chipset\n");
Daniel Vetter94526182013-01-20 23:50:13 +01004852 dmar_map_gfx = 0;
4853}
4854
4855DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
4856DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
4857DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
4858DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
4859DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
4860DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
4861DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
4862
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08004863static void quirk_iommu_rwbf(struct pci_dev *dev)
David Woodhouse9af88142009-02-13 23:18:03 +00004864{
4865 /*
4866 * Mobile 4 Series Chipset neglects to set RWBF capability,
Daniel Vetter210561f2013-01-21 19:48:59 +01004867 * but needs it. Same seems to hold for the desktop versions.
David Woodhouse9af88142009-02-13 23:18:03 +00004868 */
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004869 pr_info("Forcing write-buffer flush capability\n");
David Woodhouse9af88142009-02-13 23:18:03 +00004870 rwbf_quirk = 1;
4871}
4872
4873DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
Daniel Vetter210561f2013-01-21 19:48:59 +01004874DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
4875DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
4876DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
4877DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
4878DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
4879DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
David Woodhousee0fc7e02009-09-30 09:12:17 -07004880
Adam Jacksoneecfd572010-08-25 21:17:34 +01004881#define GGC 0x52
4882#define GGC_MEMORY_SIZE_MASK (0xf << 8)
4883#define GGC_MEMORY_SIZE_NONE (0x0 << 8)
4884#define GGC_MEMORY_SIZE_1M (0x1 << 8)
4885#define GGC_MEMORY_SIZE_2M (0x3 << 8)
4886#define GGC_MEMORY_VT_ENABLED (0x8 << 8)
4887#define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
4888#define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
4889#define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
4890
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08004891static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
David Woodhouse9eecabc2010-09-21 22:28:23 +01004892{
4893 unsigned short ggc;
4894
Adam Jacksoneecfd572010-08-25 21:17:34 +01004895 if (pci_read_config_word(dev, GGC, &ggc))
David Woodhouse9eecabc2010-09-21 22:28:23 +01004896 return;
4897
Adam Jacksoneecfd572010-08-25 21:17:34 +01004898 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004899 pr_info("BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
David Woodhouse9eecabc2010-09-21 22:28:23 +01004900 dmar_map_gfx = 0;
David Woodhouse6fbcfb32011-09-25 19:11:14 -07004901 } else if (dmar_map_gfx) {
4902 /* we have to ensure the gfx device is idle before we flush */
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004903 pr_info("Disabling batched IOTLB flush on Ironlake\n");
David Woodhouse6fbcfb32011-09-25 19:11:14 -07004904 intel_iommu_strict = 1;
4905 }
David Woodhouse9eecabc2010-09-21 22:28:23 +01004906}
4907DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
4908DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
4909DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
4910DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
4911
David Woodhousee0fc7e02009-09-30 09:12:17 -07004912/* On Tylersburg chipsets, some BIOSes have been known to enable the
4913 ISOCH DMAR unit for the Azalia sound device, but not give it any
4914 TLB entries, which causes it to deadlock. Check for that. We do
4915 this in a function called from init_dmars(), instead of in a PCI
4916 quirk, because we don't want to print the obnoxious "BIOS broken"
4917 message if VT-d is actually disabled.
4918*/
4919static void __init check_tylersburg_isoch(void)
4920{
4921 struct pci_dev *pdev;
4922 uint32_t vtisochctrl;
4923
4924 /* If there's no Azalia in the system anyway, forget it. */
4925 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
4926 if (!pdev)
4927 return;
4928 pci_dev_put(pdev);
4929
4930 /* System Management Registers. Might be hidden, in which case
4931 we can't do the sanity check. But that's OK, because the
4932 known-broken BIOSes _don't_ actually hide it, so far. */
4933 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
4934 if (!pdev)
4935 return;
4936
4937 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
4938 pci_dev_put(pdev);
4939 return;
4940 }
4941
4942 pci_dev_put(pdev);
4943
4944 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
4945 if (vtisochctrl & 1)
4946 return;
4947
4948 /* Drop all bits other than the number of TLB entries */
4949 vtisochctrl &= 0x1c;
4950
4951 /* If we have the recommended number of TLB entries (16), fine. */
4952 if (vtisochctrl == 0x10)
4953 return;
4954
4955 /* Zero TLB entries? You get to ride the short bus to school. */
4956 if (!vtisochctrl) {
4957 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
4958 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
4959 dmi_get_system_info(DMI_BIOS_VENDOR),
4960 dmi_get_system_info(DMI_BIOS_VERSION),
4961 dmi_get_system_info(DMI_PRODUCT_VERSION));
4962 iommu_identity_mapping |= IDENTMAP_AZALIA;
4963 return;
4964 }
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004965
4966 pr_warn("Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
David Woodhousee0fc7e02009-09-30 09:12:17 -07004967 vtisochctrl);
4968}