blob: fffe3d166662d0114c8c12fd5f7cbbd37a405113 [file] [log] [blame]
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
mark gross98bcef52008-02-23 15:23:35 -080017 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Fenghua Yu5b6985c2008-10-16 18:02:32 -070021 * Author: Fenghua Yu <fenghua.yu@intel.com>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070022 */
23
24#include <linux/init.h>
25#include <linux/bitmap.h>
mark gross5e0d2a62008-03-04 15:22:08 -080026#include <linux/debugfs.h>
Paul Gortmaker54485c32011-10-29 10:26:25 -040027#include <linux/export.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070028#include <linux/slab.h>
29#include <linux/irq.h>
30#include <linux/interrupt.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070031#include <linux/spinlock.h>
32#include <linux/pci.h>
33#include <linux/dmar.h>
34#include <linux/dma-mapping.h>
35#include <linux/mempool.h>
mark gross5e0d2a62008-03-04 15:22:08 -080036#include <linux/timer.h>
Kay, Allen M38717942008-09-09 18:37:29 +030037#include <linux/iova.h>
Joerg Roedel5d450802008-12-03 14:52:32 +010038#include <linux/iommu.h>
Kay, Allen M38717942008-09-09 18:37:29 +030039#include <linux/intel-iommu.h>
Rafael J. Wysocki134fac32011-03-23 22:16:14 +010040#include <linux/syscore_ops.h>
Shane Wang69575d32009-09-01 18:25:07 -070041#include <linux/tboot.h>
Stephen Rothwelladb2fe02009-08-31 15:24:23 +100042#include <linux/dmi.h>
Joerg Roedel5cdede22011-04-04 15:55:18 +020043#include <linux/pci-ats.h>
Tejun Heo0ee332c2011-12-08 10:22:09 -080044#include <linux/memblock.h>
Suresh Siddha8a8f4222012-03-30 11:47:08 -070045#include <asm/irq_remapping.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070046#include <asm/cacheflush.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090047#include <asm/iommu.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070048
Joerg Roedel078e1ee2012-09-26 12:44:43 +020049#include "irq_remapping.h"
Varun Sethi61e015a2013-04-23 10:05:24 +053050#include "pci.h"
Joerg Roedel078e1ee2012-09-26 12:44:43 +020051
Fenghua Yu5b6985c2008-10-16 18:02:32 -070052#define ROOT_SIZE VTD_PAGE_SIZE
53#define CONTEXT_SIZE VTD_PAGE_SIZE
54
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070055#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
56#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
David Woodhousee0fc7e02009-09-30 09:12:17 -070057#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070058
59#define IOAPIC_RANGE_START (0xfee00000)
60#define IOAPIC_RANGE_END (0xfeefffff)
61#define IOVA_START_ADDR (0x1000)
62
63#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
64
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070065#define MAX_AGAW_WIDTH 64
Jiang Liu5c645b32014-01-06 14:18:12 +080066#define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070067
David Woodhouse2ebe3152009-09-19 07:34:04 -070068#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
69#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
70
71/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
72 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
73#define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
74 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
75#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070076
Mark McLoughlinf27be032008-11-20 15:49:43 +000077#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
Yang Hongyang284901a2009-04-06 19:01:15 -070078#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
Yang Hongyang6a355282009-04-06 19:01:13 -070079#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
mark gross5e0d2a62008-03-04 15:22:08 -080080
Andrew Mortondf08cdc2010-09-22 13:05:11 -070081/* page table handling */
82#define LEVEL_STRIDE (9)
83#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
84
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +020085/*
86 * This bitmap is used to advertise the page sizes our hardware support
87 * to the IOMMU core, which will then use this information to split
88 * physically contiguous memory regions it is mapping into page sizes
89 * that we support.
90 *
91 * Traditionally the IOMMU core just handed us the mappings directly,
92 * after making sure the size is an order of a 4KiB page and that the
93 * mapping has natural alignment.
94 *
95 * To retain this behavior, we currently advertise that we support
96 * all page sizes that are an order of 4KiB.
97 *
98 * If at some point we'd like to utilize the IOMMU core's new behavior,
99 * we could change this to advertise the real page sizes we support.
100 */
101#define INTEL_IOMMU_PGSIZES (~0xFFFUL)
102
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700103static inline int agaw_to_level(int agaw)
104{
105 return agaw + 2;
106}
107
108static inline int agaw_to_width(int agaw)
109{
Jiang Liu5c645b32014-01-06 14:18:12 +0800110 return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700111}
112
113static inline int width_to_agaw(int width)
114{
Jiang Liu5c645b32014-01-06 14:18:12 +0800115 return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700116}
117
118static inline unsigned int level_to_offset_bits(int level)
119{
120 return (level - 1) * LEVEL_STRIDE;
121}
122
123static inline int pfn_level_offset(unsigned long pfn, int level)
124{
125 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
126}
127
128static inline unsigned long level_mask(int level)
129{
130 return -1UL << level_to_offset_bits(level);
131}
132
133static inline unsigned long level_size(int level)
134{
135 return 1UL << level_to_offset_bits(level);
136}
137
138static inline unsigned long align_to_level(unsigned long pfn, int level)
139{
140 return (pfn + level_size(level) - 1) & level_mask(level);
141}
David Woodhousefd18de52009-05-10 23:57:41 +0100142
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100143static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
144{
Jiang Liu5c645b32014-01-06 14:18:12 +0800145 return 1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100146}
147
David Woodhousedd4e8312009-06-27 16:21:20 +0100148/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
149 are never going to work. */
150static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
151{
152 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
153}
154
155static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
156{
157 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
158}
159static inline unsigned long page_to_dma_pfn(struct page *pg)
160{
161 return mm_to_dma_pfn(page_to_pfn(pg));
162}
163static inline unsigned long virt_to_dma_pfn(void *p)
164{
165 return page_to_dma_pfn(virt_to_page(p));
166}
167
Weidong Hand9630fe2008-12-08 11:06:32 +0800168/* global iommu list, set NULL for ignored DMAR units */
169static struct intel_iommu **g_iommus;
170
David Woodhousee0fc7e02009-09-30 09:12:17 -0700171static void __init check_tylersburg_isoch(void);
David Woodhouse9af88142009-02-13 23:18:03 +0000172static int rwbf_quirk;
173
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000174/*
Joseph Cihulab7792602011-05-03 00:08:37 -0700175 * set to 1 to panic kernel if can't successfully enable VT-d
176 * (used when kernel is launched w/ TXT)
177 */
178static int force_on = 0;
179
180/*
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000181 * 0: Present
182 * 1-11: Reserved
183 * 12-63: Context Ptr (12 - (haw-1))
184 * 64-127: Reserved
185 */
186struct root_entry {
187 u64 val;
188 u64 rsvd1;
189};
190#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
191static inline bool root_present(struct root_entry *root)
192{
193 return (root->val & 1);
194}
195static inline void set_root_present(struct root_entry *root)
196{
197 root->val |= 1;
198}
199static inline void set_root_value(struct root_entry *root, unsigned long value)
200{
201 root->val |= value & VTD_PAGE_MASK;
202}
203
204static inline struct context_entry *
205get_context_addr_from_root(struct root_entry *root)
206{
207 return (struct context_entry *)
208 (root_present(root)?phys_to_virt(
209 root->val & VTD_PAGE_MASK) :
210 NULL);
211}
212
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000213/*
214 * low 64 bits:
215 * 0: present
216 * 1: fault processing disable
217 * 2-3: translation type
218 * 12-63: address space root
219 * high 64 bits:
220 * 0-2: address width
221 * 3-6: aval
222 * 8-23: domain id
223 */
224struct context_entry {
225 u64 lo;
226 u64 hi;
227};
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000228
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000229static inline bool context_present(struct context_entry *context)
230{
231 return (context->lo & 1);
232}
233static inline void context_set_present(struct context_entry *context)
234{
235 context->lo |= 1;
236}
237
238static inline void context_set_fault_enable(struct context_entry *context)
239{
240 context->lo &= (((u64)-1) << 2) | 1;
241}
242
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000243static inline void context_set_translation_type(struct context_entry *context,
244 unsigned long value)
245{
246 context->lo &= (((u64)-1) << 4) | 3;
247 context->lo |= (value & 3) << 2;
248}
249
250static inline void context_set_address_root(struct context_entry *context,
251 unsigned long value)
252{
253 context->lo |= value & VTD_PAGE_MASK;
254}
255
256static inline void context_set_address_width(struct context_entry *context,
257 unsigned long value)
258{
259 context->hi |= value & 7;
260}
261
262static inline void context_set_domain_id(struct context_entry *context,
263 unsigned long value)
264{
265 context->hi |= (value & ((1 << 16) - 1)) << 8;
266}
267
268static inline void context_clear_entry(struct context_entry *context)
269{
270 context->lo = 0;
271 context->hi = 0;
272}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000273
Mark McLoughlin622ba122008-11-20 15:49:46 +0000274/*
275 * 0: readable
276 * 1: writable
277 * 2-6: reserved
278 * 7: super page
Sheng Yang9cf06692009-03-18 15:33:07 +0800279 * 8-10: available
280 * 11: snoop behavior
Mark McLoughlin622ba122008-11-20 15:49:46 +0000281 * 12-63: Host physcial address
282 */
283struct dma_pte {
284 u64 val;
285};
Mark McLoughlin622ba122008-11-20 15:49:46 +0000286
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000287static inline void dma_clear_pte(struct dma_pte *pte)
288{
289 pte->val = 0;
290}
291
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000292static inline u64 dma_pte_addr(struct dma_pte *pte)
293{
David Woodhousec85994e2009-07-01 19:21:24 +0100294#ifdef CONFIG_64BIT
295 return pte->val & VTD_PAGE_MASK;
296#else
297 /* Must have a full atomic 64-bit read */
David Woodhouse1a8bd482010-08-10 01:38:53 +0100298 return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
David Woodhousec85994e2009-07-01 19:21:24 +0100299#endif
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000300}
301
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000302static inline bool dma_pte_present(struct dma_pte *pte)
303{
304 return (pte->val & 3) != 0;
305}
Mark McLoughlin622ba122008-11-20 15:49:46 +0000306
Allen Kay4399c8b2011-10-14 12:32:46 -0700307static inline bool dma_pte_superpage(struct dma_pte *pte)
308{
309 return (pte->val & (1 << 7));
310}
311
David Woodhouse75e6bf92009-07-02 11:21:16 +0100312static inline int first_pte_in_page(struct dma_pte *pte)
313{
314 return !((unsigned long)pte & ~VTD_PAGE_MASK);
315}
316
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700317/*
318 * This domain is a statically identity mapping domain.
319 * 1. This domain creats a static 1:1 mapping to all usable memory.
320 * 2. It maps to each iommu if successful.
321 * 3. Each iommu mapps to this domain if successful.
322 */
David Woodhouse19943b02009-08-04 16:19:20 +0100323static struct dmar_domain *si_domain;
324static int hw_pass_through = 1;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700325
Weidong Han3b5410e2008-12-08 09:17:15 +0800326/* devices under the same p2p bridge are owned in one domain */
Mike Daycdc7b832008-12-12 17:16:30 +0100327#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
Weidong Han3b5410e2008-12-08 09:17:15 +0800328
Weidong Han1ce28fe2008-12-08 16:35:39 +0800329/* domain represents a virtual machine, more than one devices
330 * across iommus may be owned in one domain, e.g. kvm guest.
331 */
332#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
333
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700334/* si_domain contains mulitple devices */
335#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2)
336
Mike Travis1b198bb2012-03-05 15:05:16 -0800337/* define the limit of IOMMUs supported in each domain */
338#ifdef CONFIG_X86
339# define IOMMU_UNITS_SUPPORTED MAX_IO_APICS
340#else
341# define IOMMU_UNITS_SUPPORTED 64
342#endif
343
Mark McLoughlin99126f72008-11-20 15:49:47 +0000344struct dmar_domain {
345 int id; /* domain id */
Suresh Siddha4c923d42009-10-02 11:01:24 -0700346 int nid; /* node id */
Mike Travis1b198bb2012-03-05 15:05:16 -0800347 DECLARE_BITMAP(iommu_bmp, IOMMU_UNITS_SUPPORTED);
348 /* bitmap of iommus this domain uses*/
Mark McLoughlin99126f72008-11-20 15:49:47 +0000349
350 struct list_head devices; /* all devices' list */
351 struct iova_domain iovad; /* iova's that belong to this domain */
352
353 struct dma_pte *pgd; /* virtual address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000354 int gaw; /* max guest address width */
355
356 /* adjusted guest address width, 0 is level 2 30-bit */
357 int agaw;
358
Weidong Han3b5410e2008-12-08 09:17:15 +0800359 int flags; /* flags to find out type of domain */
Weidong Han8e6040972008-12-08 15:49:06 +0800360
361 int iommu_coherency;/* indicate coherency of iommu access */
Sheng Yang58c610b2009-03-18 15:33:05 +0800362 int iommu_snooping; /* indicate snooping control feature*/
Weidong Hanc7151a82008-12-08 22:51:37 +0800363 int iommu_count; /* reference count of iommu */
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100364 int iommu_superpage;/* Level of superpages supported:
365 0 == 4KiB (no superpages), 1 == 2MiB,
366 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
Weidong Hanc7151a82008-12-08 22:51:37 +0800367 spinlock_t iommu_lock; /* protect iommu set in domain */
Weidong Hanfe40f1e2008-12-08 23:10:23 +0800368 u64 max_addr; /* maximum mapped address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000369};
370
Mark McLoughlina647dacb2008-11-20 15:49:48 +0000371/* PCI domain-device relationship */
372struct device_domain_info {
373 struct list_head link; /* link to domain siblings */
374 struct list_head global; /* link to global list */
David Woodhouse276dbf992009-04-04 01:45:37 +0100375 int segment; /* PCI domain */
376 u8 bus; /* PCI bus number */
Mark McLoughlina647dacb2008-11-20 15:49:48 +0000377 u8 devfn; /* PCI devfn number */
Stefan Assmann45e829e2009-12-03 06:49:24 -0500378 struct pci_dev *dev; /* it's NULL for PCIe-to-PCI bridge */
Yu Zhao93a23a72009-05-18 13:51:37 +0800379 struct intel_iommu *iommu; /* IOMMU used by this device */
Mark McLoughlina647dacb2008-11-20 15:49:48 +0000380 struct dmar_domain *domain; /* pointer to domain */
381};
382
Jiang Liub94e4112014-02-19 14:07:25 +0800383struct dmar_rmrr_unit {
384 struct list_head list; /* list of rmrr units */
385 struct acpi_dmar_header *hdr; /* ACPI header */
386 u64 base_address; /* reserved base address*/
387 u64 end_address; /* reserved end address */
388 struct pci_dev **devices; /* target devices */
389 int devices_cnt; /* target device count */
390};
391
392struct dmar_atsr_unit {
393 struct list_head list; /* list of ATSR units */
394 struct acpi_dmar_header *hdr; /* ACPI header */
395 struct pci_dev **devices; /* target devices */
396 int devices_cnt; /* target device count */
397 u8 include_all:1; /* include all ports */
398};
399
400static LIST_HEAD(dmar_atsr_units);
401static LIST_HEAD(dmar_rmrr_units);
402
403#define for_each_rmrr_units(rmrr) \
404 list_for_each_entry(rmrr, &dmar_rmrr_units, list)
405
mark gross5e0d2a62008-03-04 15:22:08 -0800406static void flush_unmaps_timeout(unsigned long data);
407
Jiang Liub707cb02014-01-06 14:18:26 +0800408static DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
mark gross5e0d2a62008-03-04 15:22:08 -0800409
mark gross80b20dd2008-04-18 13:53:58 -0700410#define HIGH_WATER_MARK 250
411struct deferred_flush_tables {
412 int next;
413 struct iova *iova[HIGH_WATER_MARK];
414 struct dmar_domain *domain[HIGH_WATER_MARK];
415};
416
417static struct deferred_flush_tables *deferred_flush;
418
mark gross5e0d2a62008-03-04 15:22:08 -0800419/* bitmap for indexing intel_iommus */
mark gross5e0d2a62008-03-04 15:22:08 -0800420static int g_num_of_iommus;
421
422static DEFINE_SPINLOCK(async_umap_flush_lock);
423static LIST_HEAD(unmaps_to_do);
424
425static int timer_on;
426static long list_size;
mark gross5e0d2a62008-03-04 15:22:08 -0800427
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700428static void domain_remove_dev_info(struct dmar_domain *domain);
Jiang Liub94e4112014-02-19 14:07:25 +0800429static void domain_remove_one_dev_info(struct dmar_domain *domain,
430 struct pci_dev *pdev);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700431
Suresh Siddhad3f13812011-08-23 17:05:25 -0700432#ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800433int dmar_disabled = 0;
434#else
435int dmar_disabled = 1;
Suresh Siddhad3f13812011-08-23 17:05:25 -0700436#endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800437
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -0200438int intel_iommu_enabled = 0;
439EXPORT_SYMBOL_GPL(intel_iommu_enabled);
440
David Woodhouse2d9e6672010-06-15 10:57:57 +0100441static int dmar_map_gfx = 1;
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700442static int dmar_forcedac;
mark gross5e0d2a62008-03-04 15:22:08 -0800443static int intel_iommu_strict;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100444static int intel_iommu_superpage = 1;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700445
David Woodhousec0771df2011-10-14 20:59:46 +0100446int intel_iommu_gfx_mapped;
447EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
448
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700449#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
450static DEFINE_SPINLOCK(device_domain_lock);
451static LIST_HEAD(device_domain_list);
452
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +0100453static struct iommu_ops intel_iommu_ops;
454
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700455static int __init intel_iommu_setup(char *str)
456{
457 if (!str)
458 return -EINVAL;
459 while (*str) {
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800460 if (!strncmp(str, "on", 2)) {
461 dmar_disabled = 0;
462 printk(KERN_INFO "Intel-IOMMU: enabled\n");
463 } else if (!strncmp(str, "off", 3)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700464 dmar_disabled = 1;
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800465 printk(KERN_INFO "Intel-IOMMU: disabled\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700466 } else if (!strncmp(str, "igfx_off", 8)) {
467 dmar_map_gfx = 0;
468 printk(KERN_INFO
469 "Intel-IOMMU: disable GFX device mapping\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700470 } else if (!strncmp(str, "forcedac", 8)) {
mark gross5e0d2a62008-03-04 15:22:08 -0800471 printk(KERN_INFO
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700472 "Intel-IOMMU: Forcing DAC for PCI devices\n");
473 dmar_forcedac = 1;
mark gross5e0d2a62008-03-04 15:22:08 -0800474 } else if (!strncmp(str, "strict", 6)) {
475 printk(KERN_INFO
476 "Intel-IOMMU: disable batched IOTLB flush\n");
477 intel_iommu_strict = 1;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100478 } else if (!strncmp(str, "sp_off", 6)) {
479 printk(KERN_INFO
480 "Intel-IOMMU: disable supported super page\n");
481 intel_iommu_superpage = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700482 }
483
484 str += strcspn(str, ",");
485 while (*str == ',')
486 str++;
487 }
488 return 0;
489}
490__setup("intel_iommu=", intel_iommu_setup);
491
492static struct kmem_cache *iommu_domain_cache;
493static struct kmem_cache *iommu_devinfo_cache;
494static struct kmem_cache *iommu_iova_cache;
495
Suresh Siddha4c923d42009-10-02 11:01:24 -0700496static inline void *alloc_pgtable_page(int node)
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700497{
Suresh Siddha4c923d42009-10-02 11:01:24 -0700498 struct page *page;
499 void *vaddr = NULL;
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700500
Suresh Siddha4c923d42009-10-02 11:01:24 -0700501 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
502 if (page)
503 vaddr = page_address(page);
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700504 return vaddr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700505}
506
507static inline void free_pgtable_page(void *vaddr)
508{
509 free_page((unsigned long)vaddr);
510}
511
512static inline void *alloc_domain_mem(void)
513{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900514 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700515}
516
Kay, Allen M38717942008-09-09 18:37:29 +0300517static void free_domain_mem(void *vaddr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700518{
519 kmem_cache_free(iommu_domain_cache, vaddr);
520}
521
522static inline void * alloc_devinfo_mem(void)
523{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900524 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700525}
526
527static inline void free_devinfo_mem(void *vaddr)
528{
529 kmem_cache_free(iommu_devinfo_cache, vaddr);
530}
531
532struct iova *alloc_iova_mem(void)
533{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900534 return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700535}
536
537void free_iova_mem(struct iova *iova)
538{
539 kmem_cache_free(iommu_iova_cache, iova);
540}
541
Weidong Han1b573682008-12-08 15:34:06 +0800542
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700543static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
Weidong Han1b573682008-12-08 15:34:06 +0800544{
545 unsigned long sagaw;
546 int agaw = -1;
547
548 sagaw = cap_sagaw(iommu->cap);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700549 for (agaw = width_to_agaw(max_gaw);
Weidong Han1b573682008-12-08 15:34:06 +0800550 agaw >= 0; agaw--) {
551 if (test_bit(agaw, &sagaw))
552 break;
553 }
554
555 return agaw;
556}
557
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700558/*
559 * Calculate max SAGAW for each iommu.
560 */
561int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
562{
563 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
564}
565
566/*
567 * calculate agaw for each iommu.
568 * "SAGAW" may be different across iommus, use a default agaw, and
569 * get a supported less agaw for iommus that don't support the default agaw.
570 */
571int iommu_calculate_agaw(struct intel_iommu *iommu)
572{
573 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
574}
575
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700576/* This functionin only returns single iommu in a domain */
Weidong Han8c11e792008-12-08 15:29:22 +0800577static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
578{
579 int iommu_id;
580
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700581 /* si_domain and vm domain should not get here. */
Weidong Han1ce28fe2008-12-08 16:35:39 +0800582 BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700583 BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY);
Weidong Han1ce28fe2008-12-08 16:35:39 +0800584
Mike Travis1b198bb2012-03-05 15:05:16 -0800585 iommu_id = find_first_bit(domain->iommu_bmp, g_num_of_iommus);
Weidong Han8c11e792008-12-08 15:29:22 +0800586 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
587 return NULL;
588
589 return g_iommus[iommu_id];
590}
591
Weidong Han8e6040972008-12-08 15:49:06 +0800592static void domain_update_iommu_coherency(struct dmar_domain *domain)
593{
594 int i;
595
Alex Williamson2e12bc22011-11-11 17:26:44 -0700596 i = find_first_bit(domain->iommu_bmp, g_num_of_iommus);
597
598 domain->iommu_coherency = i < g_num_of_iommus ? 1 : 0;
Weidong Han8e6040972008-12-08 15:49:06 +0800599
Mike Travis1b198bb2012-03-05 15:05:16 -0800600 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
Weidong Han8e6040972008-12-08 15:49:06 +0800601 if (!ecap_coherent(g_iommus[i]->ecap)) {
602 domain->iommu_coherency = 0;
603 break;
604 }
Weidong Han8e6040972008-12-08 15:49:06 +0800605 }
606}
607
Sheng Yang58c610b2009-03-18 15:33:05 +0800608static void domain_update_iommu_snooping(struct dmar_domain *domain)
609{
610 int i;
611
612 domain->iommu_snooping = 1;
613
Mike Travis1b198bb2012-03-05 15:05:16 -0800614 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
Sheng Yang58c610b2009-03-18 15:33:05 +0800615 if (!ecap_sc_support(g_iommus[i]->ecap)) {
616 domain->iommu_snooping = 0;
617 break;
618 }
Sheng Yang58c610b2009-03-18 15:33:05 +0800619 }
620}
621
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100622static void domain_update_iommu_superpage(struct dmar_domain *domain)
623{
Allen Kay8140a952011-10-14 12:32:17 -0700624 struct dmar_drhd_unit *drhd;
625 struct intel_iommu *iommu = NULL;
626 int mask = 0xf;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100627
628 if (!intel_iommu_superpage) {
629 domain->iommu_superpage = 0;
630 return;
631 }
632
Allen Kay8140a952011-10-14 12:32:17 -0700633 /* set iommu_superpage to the smallest common denominator */
634 for_each_active_iommu(iommu, drhd) {
635 mask &= cap_super_page_val(iommu->cap);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100636 if (!mask) {
637 break;
638 }
639 }
640 domain->iommu_superpage = fls(mask);
641}
642
Sheng Yang58c610b2009-03-18 15:33:05 +0800643/* Some capabilities may be different across iommus */
644static void domain_update_iommu_cap(struct dmar_domain *domain)
645{
646 domain_update_iommu_coherency(domain);
647 domain_update_iommu_snooping(domain);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100648 domain_update_iommu_superpage(domain);
Sheng Yang58c610b2009-03-18 15:33:05 +0800649}
650
David Woodhouse276dbf992009-04-04 01:45:37 +0100651static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
Weidong Hanc7151a82008-12-08 22:51:37 +0800652{
653 struct dmar_drhd_unit *drhd = NULL;
654 int i;
655
Jiang Liu7c919772014-01-06 14:18:18 +0800656 for_each_active_drhd_unit(drhd) {
David Woodhouse276dbf992009-04-04 01:45:37 +0100657 if (segment != drhd->segment)
658 continue;
Weidong Hanc7151a82008-12-08 22:51:37 +0800659
David Woodhouse924b6232009-04-04 00:39:25 +0100660 for (i = 0; i < drhd->devices_cnt; i++) {
Dirk Hohndel288e4872009-01-11 15:33:51 +0000661 if (drhd->devices[i] &&
662 drhd->devices[i]->bus->number == bus &&
Weidong Hanc7151a82008-12-08 22:51:37 +0800663 drhd->devices[i]->devfn == devfn)
664 return drhd->iommu;
David Woodhouse4958c5d2009-04-06 13:30:01 -0700665 if (drhd->devices[i] &&
666 drhd->devices[i]->subordinate &&
David Woodhouse924b6232009-04-04 00:39:25 +0100667 drhd->devices[i]->subordinate->number <= bus &&
Yinghai Lub918c622012-05-17 18:51:11 -0700668 drhd->devices[i]->subordinate->busn_res.end >= bus)
David Woodhouse924b6232009-04-04 00:39:25 +0100669 return drhd->iommu;
670 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800671
672 if (drhd->include_all)
673 return drhd->iommu;
674 }
675
676 return NULL;
677}
678
Weidong Han5331fe62008-12-08 23:00:00 +0800679static void domain_flush_cache(struct dmar_domain *domain,
680 void *addr, int size)
681{
682 if (!domain->iommu_coherency)
683 clflush_cache_range(addr, size);
684}
685
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700686/* Gets context entry for a given bus and devfn */
687static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
688 u8 bus, u8 devfn)
689{
690 struct root_entry *root;
691 struct context_entry *context;
692 unsigned long phy_addr;
693 unsigned long flags;
694
695 spin_lock_irqsave(&iommu->lock, flags);
696 root = &iommu->root_entry[bus];
697 context = get_context_addr_from_root(root);
698 if (!context) {
Suresh Siddha4c923d42009-10-02 11:01:24 -0700699 context = (struct context_entry *)
700 alloc_pgtable_page(iommu->node);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700701 if (!context) {
702 spin_unlock_irqrestore(&iommu->lock, flags);
703 return NULL;
704 }
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700705 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700706 phy_addr = virt_to_phys((void *)context);
707 set_root_value(root, phy_addr);
708 set_root_present(root);
709 __iommu_flush_cache(iommu, root, sizeof(*root));
710 }
711 spin_unlock_irqrestore(&iommu->lock, flags);
712 return &context[devfn];
713}
714
715static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
716{
717 struct root_entry *root;
718 struct context_entry *context;
719 int ret;
720 unsigned long flags;
721
722 spin_lock_irqsave(&iommu->lock, flags);
723 root = &iommu->root_entry[bus];
724 context = get_context_addr_from_root(root);
725 if (!context) {
726 ret = 0;
727 goto out;
728 }
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000729 ret = context_present(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700730out:
731 spin_unlock_irqrestore(&iommu->lock, flags);
732 return ret;
733}
734
735static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
736{
737 struct root_entry *root;
738 struct context_entry *context;
739 unsigned long flags;
740
741 spin_lock_irqsave(&iommu->lock, flags);
742 root = &iommu->root_entry[bus];
743 context = get_context_addr_from_root(root);
744 if (context) {
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000745 context_clear_entry(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700746 __iommu_flush_cache(iommu, &context[devfn], \
747 sizeof(*context));
748 }
749 spin_unlock_irqrestore(&iommu->lock, flags);
750}
751
752static void free_context_table(struct intel_iommu *iommu)
753{
754 struct root_entry *root;
755 int i;
756 unsigned long flags;
757 struct context_entry *context;
758
759 spin_lock_irqsave(&iommu->lock, flags);
760 if (!iommu->root_entry) {
761 goto out;
762 }
763 for (i = 0; i < ROOT_ENTRY_NR; i++) {
764 root = &iommu->root_entry[i];
765 context = get_context_addr_from_root(root);
766 if (context)
767 free_pgtable_page(context);
768 }
769 free_pgtable_page(iommu->root_entry);
770 iommu->root_entry = NULL;
771out:
772 spin_unlock_irqrestore(&iommu->lock, flags);
773}
774
David Woodhouseb026fd22009-06-28 10:37:25 +0100775static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
Allen Kay4399c8b2011-10-14 12:32:46 -0700776 unsigned long pfn, int target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700777{
David Woodhouseb026fd22009-06-28 10:37:25 +0100778 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700779 struct dma_pte *parent, *pte = NULL;
780 int level = agaw_to_level(domain->agaw);
Allen Kay4399c8b2011-10-14 12:32:46 -0700781 int offset;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700782
783 BUG_ON(!domain->pgd);
Julian Stecklinaf9423602013-10-09 10:03:52 +0200784
785 if (addr_width < BITS_PER_LONG && pfn >> addr_width)
786 /* Address beyond IOMMU's addressing capabilities. */
787 return NULL;
788
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700789 parent = domain->pgd;
790
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700791 while (level > 0) {
792 void *tmp_page;
793
David Woodhouseb026fd22009-06-28 10:37:25 +0100794 offset = pfn_level_offset(pfn, level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700795 pte = &parent[offset];
Allen Kay4399c8b2011-10-14 12:32:46 -0700796 if (!target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100797 break;
798 if (level == target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700799 break;
800
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000801 if (!dma_pte_present(pte)) {
David Woodhousec85994e2009-07-01 19:21:24 +0100802 uint64_t pteval;
803
Suresh Siddha4c923d42009-10-02 11:01:24 -0700804 tmp_page = alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700805
David Woodhouse206a73c2009-07-01 19:30:28 +0100806 if (!tmp_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700807 return NULL;
David Woodhouse206a73c2009-07-01 19:30:28 +0100808
David Woodhousec85994e2009-07-01 19:21:24 +0100809 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
Benjamin LaHaise64de5af2009-09-16 21:05:55 -0400810 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
David Woodhousec85994e2009-07-01 19:21:24 +0100811 if (cmpxchg64(&pte->val, 0ULL, pteval)) {
812 /* Someone else set it while we were thinking; use theirs. */
813 free_pgtable_page(tmp_page);
814 } else {
815 dma_pte_addr(pte);
816 domain_flush_cache(domain, pte, sizeof(*pte));
817 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700818 }
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000819 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700820 level--;
821 }
822
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700823 return pte;
824}
825
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100826
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700827/* return address's pte at specific level */
David Woodhouse90dcfb52009-06-27 17:14:59 +0100828static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
829 unsigned long pfn,
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100830 int level, int *large_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700831{
832 struct dma_pte *parent, *pte = NULL;
833 int total = agaw_to_level(domain->agaw);
834 int offset;
835
836 parent = domain->pgd;
837 while (level <= total) {
David Woodhouse90dcfb52009-06-27 17:14:59 +0100838 offset = pfn_level_offset(pfn, total);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700839 pte = &parent[offset];
840 if (level == total)
841 return pte;
842
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100843 if (!dma_pte_present(pte)) {
844 *large_page = total;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700845 break;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100846 }
847
848 if (pte->val & DMA_PTE_LARGE_PAGE) {
849 *large_page = total;
850 return pte;
851 }
852
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000853 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700854 total--;
855 }
856 return NULL;
857}
858
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700859/* clear last level pte, a tlb flush should be followed */
Allen Kay292827c2011-10-14 12:31:54 -0700860static int dma_pte_clear_range(struct dmar_domain *domain,
David Woodhouse595badf52009-06-27 22:09:11 +0100861 unsigned long start_pfn,
862 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700863{
David Woodhouse04b18e62009-06-27 19:15:01 +0100864 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100865 unsigned int large_page = 1;
David Woodhouse310a5ab2009-06-28 18:52:20 +0100866 struct dma_pte *first_pte, *pte;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700867
David Woodhouse04b18e62009-06-27 19:15:01 +0100868 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
David Woodhouse595badf52009-06-27 22:09:11 +0100869 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
David Woodhouse59c36282009-09-19 07:36:28 -0700870 BUG_ON(start_pfn > last_pfn);
David Woodhouse66eae842009-06-27 19:00:32 +0100871
David Woodhouse04b18e62009-06-27 19:15:01 +0100872 /* we don't need lock here; nobody else touches the iova range */
David Woodhouse59c36282009-09-19 07:36:28 -0700873 do {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100874 large_page = 1;
875 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100876 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100877 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100878 continue;
879 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100880 do {
David Woodhouse310a5ab2009-06-28 18:52:20 +0100881 dma_clear_pte(pte);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100882 start_pfn += lvl_to_nr_pages(large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100883 pte++;
David Woodhouse75e6bf92009-07-02 11:21:16 +0100884 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
885
David Woodhouse310a5ab2009-06-28 18:52:20 +0100886 domain_flush_cache(domain, first_pte,
887 (void *)pte - (void *)first_pte);
David Woodhouse59c36282009-09-19 07:36:28 -0700888
889 } while (start_pfn && start_pfn <= last_pfn);
Allen Kay292827c2011-10-14 12:31:54 -0700890
Jiang Liu5c645b32014-01-06 14:18:12 +0800891 return min_t(int, (large_page - 1) * 9, MAX_AGAW_PFN_WIDTH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700892}
893
Alex Williamson3269ee02013-06-15 10:27:19 -0600894static void dma_pte_free_level(struct dmar_domain *domain, int level,
895 struct dma_pte *pte, unsigned long pfn,
896 unsigned long start_pfn, unsigned long last_pfn)
897{
898 pfn = max(start_pfn, pfn);
899 pte = &pte[pfn_level_offset(pfn, level)];
900
901 do {
902 unsigned long level_pfn;
903 struct dma_pte *level_pte;
904
905 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
906 goto next;
907
908 level_pfn = pfn & level_mask(level - 1);
909 level_pte = phys_to_virt(dma_pte_addr(pte));
910
911 if (level > 2)
912 dma_pte_free_level(domain, level - 1, level_pte,
913 level_pfn, start_pfn, last_pfn);
914
915 /* If range covers entire pagetable, free it */
916 if (!(start_pfn > level_pfn ||
Alex Williamson08336fd2014-01-21 15:48:18 -0800917 last_pfn < level_pfn + level_size(level) - 1)) {
Alex Williamson3269ee02013-06-15 10:27:19 -0600918 dma_clear_pte(pte);
919 domain_flush_cache(domain, pte, sizeof(*pte));
920 free_pgtable_page(level_pte);
921 }
922next:
923 pfn += level_size(level);
924 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
925}
926
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700927/* free page table pages. last level pte should already be cleared */
928static void dma_pte_free_pagetable(struct dmar_domain *domain,
David Woodhoused794dc92009-06-28 00:27:49 +0100929 unsigned long start_pfn,
930 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700931{
David Woodhouse6660c632009-06-27 22:41:00 +0100932 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700933
David Woodhouse6660c632009-06-27 22:41:00 +0100934 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
935 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
David Woodhouse59c36282009-09-19 07:36:28 -0700936 BUG_ON(start_pfn > last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700937
David Woodhousef3a0a522009-06-30 03:40:07 +0100938 /* We don't need lock here; nobody else touches the iova range */
Alex Williamson3269ee02013-06-15 10:27:19 -0600939 dma_pte_free_level(domain, agaw_to_level(domain->agaw),
940 domain->pgd, 0, start_pfn, last_pfn);
David Woodhouse6660c632009-06-27 22:41:00 +0100941
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700942 /* free pgd */
David Woodhoused794dc92009-06-28 00:27:49 +0100943 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700944 free_pgtable_page(domain->pgd);
945 domain->pgd = NULL;
946 }
947}
948
949/* iommu handling */
950static int iommu_alloc_root_entry(struct intel_iommu *iommu)
951{
952 struct root_entry *root;
953 unsigned long flags;
954
Suresh Siddha4c923d42009-10-02 11:01:24 -0700955 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700956 if (!root)
957 return -ENOMEM;
958
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700959 __iommu_flush_cache(iommu, root, ROOT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700960
961 spin_lock_irqsave(&iommu->lock, flags);
962 iommu->root_entry = root;
963 spin_unlock_irqrestore(&iommu->lock, flags);
964
965 return 0;
966}
967
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700968static void iommu_set_root_entry(struct intel_iommu *iommu)
969{
970 void *addr;
David Woodhousec416daa2009-05-10 20:30:58 +0100971 u32 sts;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700972 unsigned long flag;
973
974 addr = iommu->root_entry;
975
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200976 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700977 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
978
David Woodhousec416daa2009-05-10 20:30:58 +0100979 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700980
981 /* Make sure hardware complete it */
982 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +0100983 readl, (sts & DMA_GSTS_RTPS), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700984
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200985 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700986}
987
988static void iommu_flush_write_buffer(struct intel_iommu *iommu)
989{
990 u32 val;
991 unsigned long flag;
992
David Woodhouse9af88142009-02-13 23:18:03 +0000993 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700994 return;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700995
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200996 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse462b60f2009-05-10 20:18:18 +0100997 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700998
999 /* Make sure hardware complete it */
1000 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001001 readl, (!(val & DMA_GSTS_WBFS)), val);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001002
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001003 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001004}
1005
1006/* return value determine if we need a write buffer flush */
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001007static void __iommu_flush_context(struct intel_iommu *iommu,
1008 u16 did, u16 source_id, u8 function_mask,
1009 u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001010{
1011 u64 val = 0;
1012 unsigned long flag;
1013
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001014 switch (type) {
1015 case DMA_CCMD_GLOBAL_INVL:
1016 val = DMA_CCMD_GLOBAL_INVL;
1017 break;
1018 case DMA_CCMD_DOMAIN_INVL:
1019 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1020 break;
1021 case DMA_CCMD_DEVICE_INVL:
1022 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1023 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1024 break;
1025 default:
1026 BUG();
1027 }
1028 val |= DMA_CCMD_ICC;
1029
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001030 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001031 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1032
1033 /* Make sure hardware complete it */
1034 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1035 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1036
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001037 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001038}
1039
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001040/* return value determine if we need a write buffer flush */
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001041static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1042 u64 addr, unsigned int size_order, u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001043{
1044 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1045 u64 val = 0, val_iva = 0;
1046 unsigned long flag;
1047
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001048 switch (type) {
1049 case DMA_TLB_GLOBAL_FLUSH:
1050 /* global flush doesn't need set IVA_REG */
1051 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1052 break;
1053 case DMA_TLB_DSI_FLUSH:
1054 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1055 break;
1056 case DMA_TLB_PSI_FLUSH:
1057 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1058 /* Note: always flush non-leaf currently */
1059 val_iva = size_order | addr;
1060 break;
1061 default:
1062 BUG();
1063 }
1064 /* Note: set drain read/write */
1065#if 0
1066 /*
1067 * This is probably to be super secure.. Looks like we can
1068 * ignore it without any impact.
1069 */
1070 if (cap_read_drain(iommu->cap))
1071 val |= DMA_TLB_READ_DRAIN;
1072#endif
1073 if (cap_write_drain(iommu->cap))
1074 val |= DMA_TLB_WRITE_DRAIN;
1075
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001076 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001077 /* Note: Only uses first TLB reg currently */
1078 if (val_iva)
1079 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1080 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1081
1082 /* Make sure hardware complete it */
1083 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1084 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1085
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001086 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001087
1088 /* check IOTLB invalidation granularity */
1089 if (DMA_TLB_IAIG(val) == 0)
1090 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
1091 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1092 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001093 (unsigned long long)DMA_TLB_IIRG(type),
1094 (unsigned long long)DMA_TLB_IAIG(val));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001095}
1096
Yu Zhao93a23a72009-05-18 13:51:37 +08001097static struct device_domain_info *iommu_support_dev_iotlb(
1098 struct dmar_domain *domain, int segment, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001099{
Yu Zhao93a23a72009-05-18 13:51:37 +08001100 int found = 0;
1101 unsigned long flags;
1102 struct device_domain_info *info;
1103 struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn);
1104
1105 if (!ecap_dev_iotlb_support(iommu->ecap))
1106 return NULL;
1107
1108 if (!iommu->qi)
1109 return NULL;
1110
1111 spin_lock_irqsave(&device_domain_lock, flags);
1112 list_for_each_entry(info, &domain->devices, link)
1113 if (info->bus == bus && info->devfn == devfn) {
1114 found = 1;
1115 break;
1116 }
1117 spin_unlock_irqrestore(&device_domain_lock, flags);
1118
1119 if (!found || !info->dev)
1120 return NULL;
1121
1122 if (!pci_find_ext_capability(info->dev, PCI_EXT_CAP_ID_ATS))
1123 return NULL;
1124
1125 if (!dmar_find_matched_atsr_unit(info->dev))
1126 return NULL;
1127
1128 info->iommu = iommu;
1129
1130 return info;
1131}
1132
1133static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1134{
1135 if (!info)
1136 return;
1137
1138 pci_enable_ats(info->dev, VTD_PAGE_SHIFT);
1139}
1140
1141static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1142{
1143 if (!info->dev || !pci_ats_enabled(info->dev))
1144 return;
1145
1146 pci_disable_ats(info->dev);
1147}
1148
1149static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1150 u64 addr, unsigned mask)
1151{
1152 u16 sid, qdep;
1153 unsigned long flags;
1154 struct device_domain_info *info;
1155
1156 spin_lock_irqsave(&device_domain_lock, flags);
1157 list_for_each_entry(info, &domain->devices, link) {
1158 if (!info->dev || !pci_ats_enabled(info->dev))
1159 continue;
1160
1161 sid = info->bus << 8 | info->devfn;
1162 qdep = pci_ats_queue_depth(info->dev);
1163 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1164 }
1165 spin_unlock_irqrestore(&device_domain_lock, flags);
1166}
1167
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001168static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
Nadav Amit82653632010-04-01 13:24:40 +03001169 unsigned long pfn, unsigned int pages, int map)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001170{
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001171 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
David Woodhouse03d6a242009-06-28 15:33:46 +01001172 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001173
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001174 BUG_ON(pages == 0);
1175
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001176 /*
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001177 * Fallback to domain selective flush if no PSI support or the size is
1178 * too big.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001179 * PSI requires page size to be 2 ^ x, and the base address is naturally
1180 * aligned to the size
1181 */
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001182 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1183 iommu->flush.flush_iotlb(iommu, did, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001184 DMA_TLB_DSI_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001185 else
1186 iommu->flush.flush_iotlb(iommu, did, addr, mask,
1187 DMA_TLB_PSI_FLUSH);
Yu Zhaobf92df32009-06-29 11:31:45 +08001188
1189 /*
Nadav Amit82653632010-04-01 13:24:40 +03001190 * In caching mode, changes of pages from non-present to present require
1191 * flush. However, device IOTLB doesn't need to be flushed in this case.
Yu Zhaobf92df32009-06-29 11:31:45 +08001192 */
Nadav Amit82653632010-04-01 13:24:40 +03001193 if (!cap_caching_mode(iommu->cap) || !map)
Yu Zhao93a23a72009-05-18 13:51:37 +08001194 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001195}
1196
mark grossf8bab732008-02-08 04:18:38 -08001197static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1198{
1199 u32 pmen;
1200 unsigned long flags;
1201
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001202 raw_spin_lock_irqsave(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001203 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1204 pmen &= ~DMA_PMEN_EPM;
1205 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1206
1207 /* wait for the protected region status bit to clear */
1208 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1209 readl, !(pmen & DMA_PMEN_PRS), pmen);
1210
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001211 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001212}
1213
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001214static int iommu_enable_translation(struct intel_iommu *iommu)
1215{
1216 u32 sts;
1217 unsigned long flags;
1218
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001219 raw_spin_lock_irqsave(&iommu->register_lock, flags);
David Woodhousec416daa2009-05-10 20:30:58 +01001220 iommu->gcmd |= DMA_GCMD_TE;
1221 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001222
1223 /* Make sure hardware complete it */
1224 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001225 readl, (sts & DMA_GSTS_TES), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001226
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001227 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001228 return 0;
1229}
1230
1231static int iommu_disable_translation(struct intel_iommu *iommu)
1232{
1233 u32 sts;
1234 unsigned long flag;
1235
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001236 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001237 iommu->gcmd &= ~DMA_GCMD_TE;
1238 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1239
1240 /* Make sure hardware complete it */
1241 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001242 readl, (!(sts & DMA_GSTS_TES)), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001243
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001244 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001245 return 0;
1246}
1247
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07001248
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001249static int iommu_init_domains(struct intel_iommu *iommu)
1250{
1251 unsigned long ndomains;
1252 unsigned long nlongs;
1253
1254 ndomains = cap_ndoms(iommu->cap);
Jiang Liu852bdb02014-01-06 14:18:11 +08001255 pr_debug("IOMMU%d: Number of Domains supported <%ld>\n",
1256 iommu->seq_id, ndomains);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001257 nlongs = BITS_TO_LONGS(ndomains);
1258
Donald Dutile94a91b502009-08-20 16:51:34 -04001259 spin_lock_init(&iommu->lock);
1260
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001261 /* TBD: there might be 64K domains,
1262 * consider other allocation for future chip
1263 */
1264 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1265 if (!iommu->domain_ids) {
Jiang Liu852bdb02014-01-06 14:18:11 +08001266 pr_err("IOMMU%d: allocating domain id array failed\n",
1267 iommu->seq_id);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001268 return -ENOMEM;
1269 }
1270 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1271 GFP_KERNEL);
1272 if (!iommu->domains) {
Jiang Liu852bdb02014-01-06 14:18:11 +08001273 pr_err("IOMMU%d: allocating domain array failed\n",
1274 iommu->seq_id);
1275 kfree(iommu->domain_ids);
1276 iommu->domain_ids = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001277 return -ENOMEM;
1278 }
1279
1280 /*
1281 * if Caching mode is set, then invalid translations are tagged
1282 * with domainid 0. Hence we need to pre-allocate it.
1283 */
1284 if (cap_caching_mode(iommu->cap))
1285 set_bit(0, iommu->domain_ids);
1286 return 0;
1287}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001288
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001289
1290static void domain_exit(struct dmar_domain *domain);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001291static void vm_domain_exit(struct dmar_domain *domain);
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001292
Jiang Liua868e6b2014-01-06 14:18:20 +08001293static void free_dmar_iommu(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001294{
1295 struct dmar_domain *domain;
Jiang Liu5ced12a2014-01-06 14:18:22 +08001296 int i, count;
Weidong Hanc7151a82008-12-08 22:51:37 +08001297 unsigned long flags;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001298
Donald Dutile94a91b502009-08-20 16:51:34 -04001299 if ((iommu->domains) && (iommu->domain_ids)) {
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001300 for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
Donald Dutile94a91b502009-08-20 16:51:34 -04001301 domain = iommu->domains[i];
1302 clear_bit(i, iommu->domain_ids);
Weidong Hanc7151a82008-12-08 22:51:37 +08001303
Donald Dutile94a91b502009-08-20 16:51:34 -04001304 spin_lock_irqsave(&domain->iommu_lock, flags);
Jiang Liu5ced12a2014-01-06 14:18:22 +08001305 count = --domain->iommu_count;
1306 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1307 if (count == 0) {
Donald Dutile94a91b502009-08-20 16:51:34 -04001308 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
1309 vm_domain_exit(domain);
1310 else
1311 domain_exit(domain);
1312 }
Weidong Han5e98c4b2008-12-08 23:03:27 +08001313 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001314 }
1315
1316 if (iommu->gcmd & DMA_GCMD_TE)
1317 iommu_disable_translation(iommu);
1318
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001319 kfree(iommu->domains);
1320 kfree(iommu->domain_ids);
Jiang Liua868e6b2014-01-06 14:18:20 +08001321 iommu->domains = NULL;
1322 iommu->domain_ids = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001323
Weidong Hand9630fe2008-12-08 11:06:32 +08001324 g_iommus[iommu->seq_id] = NULL;
1325
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001326 /* free context mapping */
1327 free_context_table(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001328}
1329
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001330static struct dmar_domain *alloc_domain(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001331{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001332 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001333
1334 domain = alloc_domain_mem();
1335 if (!domain)
1336 return NULL;
1337
Suresh Siddha4c923d42009-10-02 11:01:24 -07001338 domain->nid = -1;
Mike Travis1b198bb2012-03-05 15:05:16 -08001339 memset(domain->iommu_bmp, 0, sizeof(domain->iommu_bmp));
Weidong Hand71a2f32008-12-07 21:13:41 +08001340 domain->flags = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001341
1342 return domain;
1343}
1344
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001345static int iommu_attach_domain(struct dmar_domain *domain,
1346 struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001347{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001348 int num;
1349 unsigned long ndomains;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001350 unsigned long flags;
1351
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001352 ndomains = cap_ndoms(iommu->cap);
Weidong Han8c11e792008-12-08 15:29:22 +08001353
1354 spin_lock_irqsave(&iommu->lock, flags);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001355
1356 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1357 if (num >= ndomains) {
1358 spin_unlock_irqrestore(&iommu->lock, flags);
1359 printk(KERN_ERR "IOMMU: no free domain ids\n");
1360 return -ENOMEM;
1361 }
1362
1363 domain->id = num;
1364 set_bit(num, iommu->domain_ids);
Mike Travis1b198bb2012-03-05 15:05:16 -08001365 set_bit(iommu->seq_id, domain->iommu_bmp);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001366 iommu->domains[num] = domain;
1367 spin_unlock_irqrestore(&iommu->lock, flags);
1368
1369 return 0;
1370}
1371
1372static void iommu_detach_domain(struct dmar_domain *domain,
1373 struct intel_iommu *iommu)
1374{
1375 unsigned long flags;
1376 int num, ndomains;
1377 int found = 0;
1378
1379 spin_lock_irqsave(&iommu->lock, flags);
1380 ndomains = cap_ndoms(iommu->cap);
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001381 for_each_set_bit(num, iommu->domain_ids, ndomains) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001382 if (iommu->domains[num] == domain) {
1383 found = 1;
1384 break;
1385 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001386 }
1387
1388 if (found) {
1389 clear_bit(num, iommu->domain_ids);
Mike Travis1b198bb2012-03-05 15:05:16 -08001390 clear_bit(iommu->seq_id, domain->iommu_bmp);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001391 iommu->domains[num] = NULL;
1392 }
Weidong Han8c11e792008-12-08 15:29:22 +08001393 spin_unlock_irqrestore(&iommu->lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001394}
1395
1396static struct iova_domain reserved_iova_list;
Mark Gross8a443df2008-03-04 14:59:31 -08001397static struct lock_class_key reserved_rbtree_key;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001398
Joseph Cihula51a63e62011-03-21 11:04:24 -07001399static int dmar_init_reserved_ranges(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001400{
1401 struct pci_dev *pdev = NULL;
1402 struct iova *iova;
1403 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001404
David Millerf6611972008-02-06 01:36:23 -08001405 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001406
Mark Gross8a443df2008-03-04 14:59:31 -08001407 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1408 &reserved_rbtree_key);
1409
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001410 /* IOAPIC ranges shouldn't be accessed by DMA */
1411 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1412 IOVA_PFN(IOAPIC_RANGE_END));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001413 if (!iova) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001414 printk(KERN_ERR "Reserve IOAPIC range failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001415 return -ENODEV;
1416 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001417
1418 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1419 for_each_pci_dev(pdev) {
1420 struct resource *r;
1421
1422 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1423 r = &pdev->resource[i];
1424 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1425 continue;
David Woodhouse1a4a4552009-06-28 16:00:42 +01001426 iova = reserve_iova(&reserved_iova_list,
1427 IOVA_PFN(r->start),
1428 IOVA_PFN(r->end));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001429 if (!iova) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001430 printk(KERN_ERR "Reserve iova failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001431 return -ENODEV;
1432 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001433 }
1434 }
Joseph Cihula51a63e62011-03-21 11:04:24 -07001435 return 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001436}
1437
1438static void domain_reserve_special_ranges(struct dmar_domain *domain)
1439{
1440 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1441}
1442
1443static inline int guestwidth_to_adjustwidth(int gaw)
1444{
1445 int agaw;
1446 int r = (gaw - 12) % 9;
1447
1448 if (r == 0)
1449 agaw = gaw;
1450 else
1451 agaw = gaw + 9 - r;
1452 if (agaw > 64)
1453 agaw = 64;
1454 return agaw;
1455}
1456
1457static int domain_init(struct dmar_domain *domain, int guest_width)
1458{
1459 struct intel_iommu *iommu;
1460 int adjust_width, agaw;
1461 unsigned long sagaw;
1462
David Millerf6611972008-02-06 01:36:23 -08001463 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Weidong Hanc7151a82008-12-08 22:51:37 +08001464 spin_lock_init(&domain->iommu_lock);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001465
1466 domain_reserve_special_ranges(domain);
1467
1468 /* calculate AGAW */
Weidong Han8c11e792008-12-08 15:29:22 +08001469 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001470 if (guest_width > cap_mgaw(iommu->cap))
1471 guest_width = cap_mgaw(iommu->cap);
1472 domain->gaw = guest_width;
1473 adjust_width = guestwidth_to_adjustwidth(guest_width);
1474 agaw = width_to_agaw(adjust_width);
1475 sagaw = cap_sagaw(iommu->cap);
1476 if (!test_bit(agaw, &sagaw)) {
1477 /* hardware doesn't support it, choose a bigger one */
1478 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1479 agaw = find_next_bit(&sagaw, 5, agaw);
1480 if (agaw >= 5)
1481 return -ENODEV;
1482 }
1483 domain->agaw = agaw;
1484 INIT_LIST_HEAD(&domain->devices);
1485
Weidong Han8e6040972008-12-08 15:49:06 +08001486 if (ecap_coherent(iommu->ecap))
1487 domain->iommu_coherency = 1;
1488 else
1489 domain->iommu_coherency = 0;
1490
Sheng Yang58c610b2009-03-18 15:33:05 +08001491 if (ecap_sc_support(iommu->ecap))
1492 domain->iommu_snooping = 1;
1493 else
1494 domain->iommu_snooping = 0;
1495
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001496 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
Weidong Hanc7151a82008-12-08 22:51:37 +08001497 domain->iommu_count = 1;
Suresh Siddha4c923d42009-10-02 11:01:24 -07001498 domain->nid = iommu->node;
Weidong Hanc7151a82008-12-08 22:51:37 +08001499
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001500 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07001501 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001502 if (!domain->pgd)
1503 return -ENOMEM;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001504 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001505 return 0;
1506}
1507
1508static void domain_exit(struct dmar_domain *domain)
1509{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001510 struct dmar_drhd_unit *drhd;
1511 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001512
1513 /* Domain 0 is reserved, so dont process it */
1514 if (!domain)
1515 return;
1516
Alex Williamson7b668352011-05-24 12:02:41 +01001517 /* Flush any lazy unmaps that may reference this domain */
1518 if (!intel_iommu_strict)
1519 flush_unmaps_timeout(0);
1520
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001521 domain_remove_dev_info(domain);
1522 /* destroy iovas */
1523 put_iova_domain(&domain->iovad);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001524
1525 /* clear ptes */
David Woodhouse595badf52009-06-27 22:09:11 +01001526 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001527
1528 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01001529 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001530
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001531 for_each_active_iommu(iommu, drhd)
Mike Travis1b198bb2012-03-05 15:05:16 -08001532 if (test_bit(iommu->seq_id, domain->iommu_bmp))
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001533 iommu_detach_domain(domain, iommu);
1534
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001535 free_domain_mem(domain);
1536}
1537
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001538static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1539 u8 bus, u8 devfn, int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001540{
1541 struct context_entry *context;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001542 unsigned long flags;
Weidong Han5331fe62008-12-08 23:00:00 +08001543 struct intel_iommu *iommu;
Weidong Hanea6606b2008-12-08 23:08:15 +08001544 struct dma_pte *pgd;
1545 unsigned long num;
1546 unsigned long ndomains;
1547 int id;
1548 int agaw;
Yu Zhao93a23a72009-05-18 13:51:37 +08001549 struct device_domain_info *info = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001550
1551 pr_debug("Set context mapping for %02x:%02x.%d\n",
1552 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001553
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001554 BUG_ON(!domain->pgd);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001555 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1556 translation != CONTEXT_TT_MULTI_LEVEL);
Weidong Han5331fe62008-12-08 23:00:00 +08001557
David Woodhouse276dbf992009-04-04 01:45:37 +01001558 iommu = device_to_iommu(segment, bus, devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001559 if (!iommu)
1560 return -ENODEV;
1561
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001562 context = device_to_context_entry(iommu, bus, devfn);
1563 if (!context)
1564 return -ENOMEM;
1565 spin_lock_irqsave(&iommu->lock, flags);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001566 if (context_present(context)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001567 spin_unlock_irqrestore(&iommu->lock, flags);
1568 return 0;
1569 }
1570
Weidong Hanea6606b2008-12-08 23:08:15 +08001571 id = domain->id;
1572 pgd = domain->pgd;
1573
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001574 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
1575 domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001576 int found = 0;
1577
1578 /* find an available domain id for this device in iommu */
1579 ndomains = cap_ndoms(iommu->cap);
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001580 for_each_set_bit(num, iommu->domain_ids, ndomains) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001581 if (iommu->domains[num] == domain) {
1582 id = num;
1583 found = 1;
1584 break;
1585 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001586 }
1587
1588 if (found == 0) {
1589 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1590 if (num >= ndomains) {
1591 spin_unlock_irqrestore(&iommu->lock, flags);
1592 printk(KERN_ERR "IOMMU: no free domain ids\n");
1593 return -EFAULT;
1594 }
1595
1596 set_bit(num, iommu->domain_ids);
1597 iommu->domains[num] = domain;
1598 id = num;
1599 }
1600
1601 /* Skip top levels of page tables for
1602 * iommu which has less agaw than default.
Chris Wright1672af12009-12-02 12:06:34 -08001603 * Unnecessary for PT mode.
Weidong Hanea6606b2008-12-08 23:08:15 +08001604 */
Chris Wright1672af12009-12-02 12:06:34 -08001605 if (translation != CONTEXT_TT_PASS_THROUGH) {
1606 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1607 pgd = phys_to_virt(dma_pte_addr(pgd));
1608 if (!dma_pte_present(pgd)) {
1609 spin_unlock_irqrestore(&iommu->lock, flags);
1610 return -ENOMEM;
1611 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001612 }
1613 }
1614 }
1615
1616 context_set_domain_id(context, id);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001617
Yu Zhao93a23a72009-05-18 13:51:37 +08001618 if (translation != CONTEXT_TT_PASS_THROUGH) {
1619 info = iommu_support_dev_iotlb(domain, segment, bus, devfn);
1620 translation = info ? CONTEXT_TT_DEV_IOTLB :
1621 CONTEXT_TT_MULTI_LEVEL;
1622 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001623 /*
1624 * In pass through mode, AW must be programmed to indicate the largest
1625 * AGAW value supported by hardware. And ASR is ignored by hardware.
1626 */
Yu Zhao93a23a72009-05-18 13:51:37 +08001627 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001628 context_set_address_width(context, iommu->msagaw);
Yu Zhao93a23a72009-05-18 13:51:37 +08001629 else {
1630 context_set_address_root(context, virt_to_phys(pgd));
1631 context_set_address_width(context, iommu->agaw);
1632 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001633
1634 context_set_translation_type(context, translation);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001635 context_set_fault_enable(context);
1636 context_set_present(context);
Weidong Han5331fe62008-12-08 23:00:00 +08001637 domain_flush_cache(domain, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001638
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001639 /*
1640 * It's a non-present to present mapping. If hardware doesn't cache
1641 * non-present entry we only need to flush the write-buffer. If the
1642 * _does_ cache non-present entries, then it does so in the special
1643 * domain #0, which we have to flush:
1644 */
1645 if (cap_caching_mode(iommu->cap)) {
1646 iommu->flush.flush_context(iommu, 0,
1647 (((u16)bus) << 8) | devfn,
1648 DMA_CCMD_MASK_NOBIT,
1649 DMA_CCMD_DEVICE_INVL);
Nadav Amit82653632010-04-01 13:24:40 +03001650 iommu->flush.flush_iotlb(iommu, domain->id, 0, 0, DMA_TLB_DSI_FLUSH);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001651 } else {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001652 iommu_flush_write_buffer(iommu);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001653 }
Yu Zhao93a23a72009-05-18 13:51:37 +08001654 iommu_enable_dev_iotlb(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001655 spin_unlock_irqrestore(&iommu->lock, flags);
Weidong Hanc7151a82008-12-08 22:51:37 +08001656
1657 spin_lock_irqsave(&domain->iommu_lock, flags);
Mike Travis1b198bb2012-03-05 15:05:16 -08001658 if (!test_and_set_bit(iommu->seq_id, domain->iommu_bmp)) {
Weidong Hanc7151a82008-12-08 22:51:37 +08001659 domain->iommu_count++;
Suresh Siddha4c923d42009-10-02 11:01:24 -07001660 if (domain->iommu_count == 1)
1661 domain->nid = iommu->node;
Sheng Yang58c610b2009-03-18 15:33:05 +08001662 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08001663 }
1664 spin_unlock_irqrestore(&domain->iommu_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001665 return 0;
1666}
1667
1668static int
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001669domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
1670 int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001671{
1672 int ret;
1673 struct pci_dev *tmp, *parent;
1674
David Woodhouse276dbf992009-04-04 01:45:37 +01001675 ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus),
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001676 pdev->bus->number, pdev->devfn,
1677 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001678 if (ret)
1679 return ret;
1680
1681 /* dependent device mapping */
1682 tmp = pci_find_upstream_pcie_bridge(pdev);
1683 if (!tmp)
1684 return 0;
1685 /* Secondary interface's bus number and devfn 0 */
1686 parent = pdev->bus->self;
1687 while (parent != tmp) {
David Woodhouse276dbf992009-04-04 01:45:37 +01001688 ret = domain_context_mapping_one(domain,
1689 pci_domain_nr(parent->bus),
1690 parent->bus->number,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001691 parent->devfn, translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001692 if (ret)
1693 return ret;
1694 parent = parent->bus->self;
1695 }
Stefan Assmann45e829e2009-12-03 06:49:24 -05001696 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001697 return domain_context_mapping_one(domain,
David Woodhouse276dbf992009-04-04 01:45:37 +01001698 pci_domain_nr(tmp->subordinate),
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001699 tmp->subordinate->number, 0,
1700 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001701 else /* this is a legacy PCI bridge */
1702 return domain_context_mapping_one(domain,
David Woodhouse276dbf992009-04-04 01:45:37 +01001703 pci_domain_nr(tmp->bus),
1704 tmp->bus->number,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001705 tmp->devfn,
1706 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001707}
1708
Weidong Han5331fe62008-12-08 23:00:00 +08001709static int domain_context_mapped(struct pci_dev *pdev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001710{
1711 int ret;
1712 struct pci_dev *tmp, *parent;
Weidong Han5331fe62008-12-08 23:00:00 +08001713 struct intel_iommu *iommu;
1714
David Woodhouse276dbf992009-04-04 01:45:37 +01001715 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
1716 pdev->devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001717 if (!iommu)
1718 return -ENODEV;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001719
David Woodhouse276dbf992009-04-04 01:45:37 +01001720 ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001721 if (!ret)
1722 return ret;
1723 /* dependent device mapping */
1724 tmp = pci_find_upstream_pcie_bridge(pdev);
1725 if (!tmp)
1726 return ret;
1727 /* Secondary interface's bus number and devfn 0 */
1728 parent = pdev->bus->self;
1729 while (parent != tmp) {
Weidong Han8c11e792008-12-08 15:29:22 +08001730 ret = device_context_mapped(iommu, parent->bus->number,
David Woodhouse276dbf992009-04-04 01:45:37 +01001731 parent->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001732 if (!ret)
1733 return ret;
1734 parent = parent->bus->self;
1735 }
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +09001736 if (pci_is_pcie(tmp))
David Woodhouse276dbf992009-04-04 01:45:37 +01001737 return device_context_mapped(iommu, tmp->subordinate->number,
1738 0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001739 else
David Woodhouse276dbf992009-04-04 01:45:37 +01001740 return device_context_mapped(iommu, tmp->bus->number,
1741 tmp->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001742}
1743
Fenghua Yuf5329592009-08-04 15:09:37 -07001744/* Returns a number of VTD pages, but aligned to MM page size */
1745static inline unsigned long aligned_nrpages(unsigned long host_addr,
1746 size_t size)
1747{
1748 host_addr &= ~PAGE_MASK;
1749 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
1750}
1751
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001752/* Return largest possible superpage level for a given mapping */
1753static inline int hardware_largepage_caps(struct dmar_domain *domain,
1754 unsigned long iov_pfn,
1755 unsigned long phy_pfn,
1756 unsigned long pages)
1757{
1758 int support, level = 1;
1759 unsigned long pfnmerge;
1760
1761 support = domain->iommu_superpage;
1762
1763 /* To use a large page, the virtual *and* physical addresses
1764 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
1765 of them will mean we have to use smaller pages. So just
1766 merge them and check both at once. */
1767 pfnmerge = iov_pfn | phy_pfn;
1768
1769 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
1770 pages >>= VTD_STRIDE_SHIFT;
1771 if (!pages)
1772 break;
1773 pfnmerge >>= VTD_STRIDE_SHIFT;
1774 level++;
1775 support--;
1776 }
1777 return level;
1778}
1779
David Woodhouse9051aa02009-06-29 12:30:54 +01001780static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1781 struct scatterlist *sg, unsigned long phys_pfn,
1782 unsigned long nr_pages, int prot)
David Woodhousee1605492009-06-29 11:17:38 +01001783{
1784 struct dma_pte *first_pte = NULL, *pte = NULL;
David Woodhouse9051aa02009-06-29 12:30:54 +01001785 phys_addr_t uninitialized_var(pteval);
David Woodhousee1605492009-06-29 11:17:38 +01001786 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
David Woodhouse9051aa02009-06-29 12:30:54 +01001787 unsigned long sg_res;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001788 unsigned int largepage_lvl = 0;
1789 unsigned long lvl_pages = 0;
David Woodhousee1605492009-06-29 11:17:38 +01001790
1791 BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
1792
1793 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1794 return -EINVAL;
1795
1796 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
1797
David Woodhouse9051aa02009-06-29 12:30:54 +01001798 if (sg)
1799 sg_res = 0;
1800 else {
1801 sg_res = nr_pages + 1;
1802 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
1803 }
1804
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001805 while (nr_pages > 0) {
David Woodhousec85994e2009-07-01 19:21:24 +01001806 uint64_t tmp;
1807
David Woodhousee1605492009-06-29 11:17:38 +01001808 if (!sg_res) {
Fenghua Yuf5329592009-08-04 15:09:37 -07001809 sg_res = aligned_nrpages(sg->offset, sg->length);
David Woodhousee1605492009-06-29 11:17:38 +01001810 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
1811 sg->dma_length = sg->length;
1812 pteval = page_to_phys(sg_page(sg)) | prot;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001813 phys_pfn = pteval >> VTD_PAGE_SHIFT;
David Woodhousee1605492009-06-29 11:17:38 +01001814 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001815
David Woodhousee1605492009-06-29 11:17:38 +01001816 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001817 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
1818
1819 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, largepage_lvl);
David Woodhousee1605492009-06-29 11:17:38 +01001820 if (!pte)
1821 return -ENOMEM;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001822 /* It is large page*/
Woodhouse, David6491d4d2012-12-19 13:25:35 +00001823 if (largepage_lvl > 1) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001824 pteval |= DMA_PTE_LARGE_PAGE;
Woodhouse, David6491d4d2012-12-19 13:25:35 +00001825 /* Ensure that old small page tables are removed to make room
1826 for superpage, if they exist. */
1827 dma_pte_clear_range(domain, iov_pfn,
1828 iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1);
1829 dma_pte_free_pagetable(domain, iov_pfn,
1830 iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1);
1831 } else {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001832 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
Woodhouse, David6491d4d2012-12-19 13:25:35 +00001833 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001834
David Woodhousee1605492009-06-29 11:17:38 +01001835 }
1836 /* We don't need lock here, nobody else
1837 * touches the iova range
1838 */
David Woodhouse7766a3f2009-07-01 20:27:03 +01001839 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
David Woodhousec85994e2009-07-01 19:21:24 +01001840 if (tmp) {
David Woodhouse1bf20f02009-06-29 22:06:43 +01001841 static int dumps = 5;
David Woodhousec85994e2009-07-01 19:21:24 +01001842 printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
1843 iov_pfn, tmp, (unsigned long long)pteval);
David Woodhouse1bf20f02009-06-29 22:06:43 +01001844 if (dumps) {
1845 dumps--;
1846 debug_dma_dump_mappings(NULL);
1847 }
1848 WARN_ON(1);
1849 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001850
1851 lvl_pages = lvl_to_nr_pages(largepage_lvl);
1852
1853 BUG_ON(nr_pages < lvl_pages);
1854 BUG_ON(sg_res < lvl_pages);
1855
1856 nr_pages -= lvl_pages;
1857 iov_pfn += lvl_pages;
1858 phys_pfn += lvl_pages;
1859 pteval += lvl_pages * VTD_PAGE_SIZE;
1860 sg_res -= lvl_pages;
1861
1862 /* If the next PTE would be the first in a new page, then we
1863 need to flush the cache on the entries we've just written.
1864 And then we'll need to recalculate 'pte', so clear it and
1865 let it get set again in the if (!pte) block above.
1866
1867 If we're done (!nr_pages) we need to flush the cache too.
1868
1869 Also if we've been setting superpages, we may need to
1870 recalculate 'pte' and switch back to smaller pages for the
1871 end of the mapping, if the trailing size is not enough to
1872 use another superpage (i.e. sg_res < lvl_pages). */
David Woodhousee1605492009-06-29 11:17:38 +01001873 pte++;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001874 if (!nr_pages || first_pte_in_page(pte) ||
1875 (largepage_lvl > 1 && sg_res < lvl_pages)) {
David Woodhousee1605492009-06-29 11:17:38 +01001876 domain_flush_cache(domain, first_pte,
1877 (void *)pte - (void *)first_pte);
1878 pte = NULL;
1879 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001880
1881 if (!sg_res && nr_pages)
David Woodhousee1605492009-06-29 11:17:38 +01001882 sg = sg_next(sg);
1883 }
1884 return 0;
1885}
1886
David Woodhouse9051aa02009-06-29 12:30:54 +01001887static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1888 struct scatterlist *sg, unsigned long nr_pages,
1889 int prot)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001890{
David Woodhouse9051aa02009-06-29 12:30:54 +01001891 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
1892}
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001893
David Woodhouse9051aa02009-06-29 12:30:54 +01001894static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1895 unsigned long phys_pfn, unsigned long nr_pages,
1896 int prot)
1897{
1898 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001899}
1900
Weidong Hanc7151a82008-12-08 22:51:37 +08001901static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001902{
Weidong Hanc7151a82008-12-08 22:51:37 +08001903 if (!iommu)
1904 return;
Weidong Han8c11e792008-12-08 15:29:22 +08001905
1906 clear_context_table(iommu, bus, devfn);
1907 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001908 DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001909 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001910}
1911
David Woodhouse109b9b02012-05-25 17:43:02 +01001912static inline void unlink_domain_info(struct device_domain_info *info)
1913{
1914 assert_spin_locked(&device_domain_lock);
1915 list_del(&info->link);
1916 list_del(&info->global);
1917 if (info->dev)
1918 info->dev->dev.archdata.iommu = NULL;
1919}
1920
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001921static void domain_remove_dev_info(struct dmar_domain *domain)
1922{
1923 struct device_domain_info *info;
1924 unsigned long flags;
Weidong Hanc7151a82008-12-08 22:51:37 +08001925 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001926
1927 spin_lock_irqsave(&device_domain_lock, flags);
1928 while (!list_empty(&domain->devices)) {
1929 info = list_entry(domain->devices.next,
1930 struct device_domain_info, link);
David Woodhouse109b9b02012-05-25 17:43:02 +01001931 unlink_domain_info(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001932 spin_unlock_irqrestore(&device_domain_lock, flags);
1933
Yu Zhao93a23a72009-05-18 13:51:37 +08001934 iommu_disable_dev_iotlb(info);
David Woodhouse276dbf992009-04-04 01:45:37 +01001935 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08001936 iommu_detach_dev(iommu, info->bus, info->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001937 free_devinfo_mem(info);
1938
1939 spin_lock_irqsave(&device_domain_lock, flags);
1940 }
1941 spin_unlock_irqrestore(&device_domain_lock, flags);
1942}
1943
1944/*
1945 * find_domain
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001946 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001947 */
Kay, Allen M38717942008-09-09 18:37:29 +03001948static struct dmar_domain *
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001949find_domain(struct pci_dev *pdev)
1950{
1951 struct device_domain_info *info;
1952
1953 /* No lock here, assumes no domain exit in normal case */
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001954 info = pdev->dev.archdata.iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001955 if (info)
1956 return info->domain;
1957 return NULL;
1958}
1959
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001960/* domain is initialized */
1961static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1962{
1963 struct dmar_domain *domain, *found = NULL;
1964 struct intel_iommu *iommu;
1965 struct dmar_drhd_unit *drhd;
1966 struct device_domain_info *info, *tmp;
1967 struct pci_dev *dev_tmp;
1968 unsigned long flags;
1969 int bus = 0, devfn = 0;
David Woodhouse276dbf992009-04-04 01:45:37 +01001970 int segment;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001971 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001972
1973 domain = find_domain(pdev);
1974 if (domain)
1975 return domain;
1976
David Woodhouse276dbf992009-04-04 01:45:37 +01001977 segment = pci_domain_nr(pdev->bus);
1978
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001979 dev_tmp = pci_find_upstream_pcie_bridge(pdev);
1980 if (dev_tmp) {
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +09001981 if (pci_is_pcie(dev_tmp)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001982 bus = dev_tmp->subordinate->number;
1983 devfn = 0;
1984 } else {
1985 bus = dev_tmp->bus->number;
1986 devfn = dev_tmp->devfn;
1987 }
1988 spin_lock_irqsave(&device_domain_lock, flags);
1989 list_for_each_entry(info, &device_domain_list, global) {
David Woodhouse276dbf992009-04-04 01:45:37 +01001990 if (info->segment == segment &&
1991 info->bus == bus && info->devfn == devfn) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001992 found = info->domain;
1993 break;
1994 }
1995 }
1996 spin_unlock_irqrestore(&device_domain_lock, flags);
1997 /* pcie-pci bridge already has a domain, uses it */
1998 if (found) {
1999 domain = found;
2000 goto found_domain;
2001 }
2002 }
2003
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002004 domain = alloc_domain();
2005 if (!domain)
2006 goto error;
2007
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002008 /* Allocate new domain for the device */
2009 drhd = dmar_find_matched_drhd_unit(pdev);
2010 if (!drhd) {
2011 printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",
2012 pci_name(pdev));
Julia Lawalld2900bd2012-07-24 16:18:14 +02002013 free_domain_mem(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002014 return NULL;
2015 }
2016 iommu = drhd->iommu;
2017
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002018 ret = iommu_attach_domain(domain, iommu);
2019 if (ret) {
Alex Williamson2fe9723d2011-03-04 14:52:30 -07002020 free_domain_mem(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002021 goto error;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002022 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002023
2024 if (domain_init(domain, gaw)) {
2025 domain_exit(domain);
2026 goto error;
2027 }
2028
2029 /* register pcie-to-pci device */
2030 if (dev_tmp) {
2031 info = alloc_devinfo_mem();
2032 if (!info) {
2033 domain_exit(domain);
2034 goto error;
2035 }
David Woodhouse276dbf992009-04-04 01:45:37 +01002036 info->segment = segment;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002037 info->bus = bus;
2038 info->devfn = devfn;
2039 info->dev = NULL;
2040 info->domain = domain;
2041 /* This domain is shared by devices under p2p bridge */
Weidong Han3b5410e2008-12-08 09:17:15 +08002042 domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002043
2044 /* pcie-to-pci bridge already has a domain, uses it */
2045 found = NULL;
2046 spin_lock_irqsave(&device_domain_lock, flags);
2047 list_for_each_entry(tmp, &device_domain_list, global) {
David Woodhouse276dbf992009-04-04 01:45:37 +01002048 if (tmp->segment == segment &&
2049 tmp->bus == bus && tmp->devfn == devfn) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002050 found = tmp->domain;
2051 break;
2052 }
2053 }
2054 if (found) {
Jiri Slaby00dfff72010-06-14 17:17:32 +02002055 spin_unlock_irqrestore(&device_domain_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002056 free_devinfo_mem(info);
2057 domain_exit(domain);
2058 domain = found;
2059 } else {
2060 list_add(&info->link, &domain->devices);
2061 list_add(&info->global, &device_domain_list);
Jiri Slaby00dfff72010-06-14 17:17:32 +02002062 spin_unlock_irqrestore(&device_domain_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002063 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002064 }
2065
2066found_domain:
2067 info = alloc_devinfo_mem();
2068 if (!info)
2069 goto error;
David Woodhouse276dbf992009-04-04 01:45:37 +01002070 info->segment = segment;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002071 info->bus = pdev->bus->number;
2072 info->devfn = pdev->devfn;
2073 info->dev = pdev;
2074 info->domain = domain;
2075 spin_lock_irqsave(&device_domain_lock, flags);
2076 /* somebody is fast */
2077 found = find_domain(pdev);
2078 if (found != NULL) {
2079 spin_unlock_irqrestore(&device_domain_lock, flags);
2080 if (found != domain) {
2081 domain_exit(domain);
2082 domain = found;
2083 }
2084 free_devinfo_mem(info);
2085 return domain;
2086 }
2087 list_add(&info->link, &domain->devices);
2088 list_add(&info->global, &device_domain_list);
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07002089 pdev->dev.archdata.iommu = info;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002090 spin_unlock_irqrestore(&device_domain_lock, flags);
2091 return domain;
2092error:
2093 /* recheck it here, maybe others set it */
2094 return find_domain(pdev);
2095}
2096
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002097static int iommu_identity_mapping;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002098#define IDENTMAP_ALL 1
2099#define IDENTMAP_GFX 2
2100#define IDENTMAP_AZALIA 4
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002101
David Woodhouseb2132032009-06-26 18:50:28 +01002102static int iommu_domain_identity_map(struct dmar_domain *domain,
2103 unsigned long long start,
2104 unsigned long long end)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002105{
David Woodhousec5395d52009-06-28 16:35:56 +01002106 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2107 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002108
David Woodhousec5395d52009-06-28 16:35:56 +01002109 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2110 dma_to_mm_pfn(last_vpfn))) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002111 printk(KERN_ERR "IOMMU: reserve iova failed\n");
David Woodhouseb2132032009-06-26 18:50:28 +01002112 return -ENOMEM;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002113 }
2114
David Woodhousec5395d52009-06-28 16:35:56 +01002115 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
2116 start, end, domain->id);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002117 /*
2118 * RMRR range might have overlap with physical memory range,
2119 * clear it first
2120 */
David Woodhousec5395d52009-06-28 16:35:56 +01002121 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002122
David Woodhousec5395d52009-06-28 16:35:56 +01002123 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2124 last_vpfn - first_vpfn + 1,
David Woodhouse61df7442009-06-28 11:55:58 +01002125 DMA_PTE_READ|DMA_PTE_WRITE);
David Woodhouseb2132032009-06-26 18:50:28 +01002126}
2127
2128static int iommu_prepare_identity_map(struct pci_dev *pdev,
2129 unsigned long long start,
2130 unsigned long long end)
2131{
2132 struct dmar_domain *domain;
2133 int ret;
2134
David Woodhousec7ab48d2009-06-26 19:10:36 +01002135 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
David Woodhouseb2132032009-06-26 18:50:28 +01002136 if (!domain)
2137 return -ENOMEM;
2138
David Woodhouse19943b02009-08-04 16:19:20 +01002139 /* For _hardware_ passthrough, don't bother. But for software
2140 passthrough, we do it anyway -- it may indicate a memory
2141 range which is reserved in E820, so which didn't get set
2142 up to start with in si_domain */
2143 if (domain == si_domain && hw_pass_through) {
2144 printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2145 pci_name(pdev), start, end);
2146 return 0;
2147 }
2148
2149 printk(KERN_INFO
2150 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2151 pci_name(pdev), start, end);
David Woodhouse2ff729f2009-08-26 14:25:41 +01002152
David Woodhouse5595b522009-12-02 09:21:55 +00002153 if (end < start) {
2154 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2155 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2156 dmi_get_system_info(DMI_BIOS_VENDOR),
2157 dmi_get_system_info(DMI_BIOS_VERSION),
2158 dmi_get_system_info(DMI_PRODUCT_VERSION));
2159 ret = -EIO;
2160 goto error;
2161 }
2162
David Woodhouse2ff729f2009-08-26 14:25:41 +01002163 if (end >> agaw_to_width(domain->agaw)) {
2164 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2165 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2166 agaw_to_width(domain->agaw),
2167 dmi_get_system_info(DMI_BIOS_VENDOR),
2168 dmi_get_system_info(DMI_BIOS_VERSION),
2169 dmi_get_system_info(DMI_PRODUCT_VERSION));
2170 ret = -EIO;
2171 goto error;
2172 }
David Woodhouse19943b02009-08-04 16:19:20 +01002173
David Woodhouseb2132032009-06-26 18:50:28 +01002174 ret = iommu_domain_identity_map(domain, start, end);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002175 if (ret)
2176 goto error;
2177
2178 /* context entry init */
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002179 ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL);
David Woodhouseb2132032009-06-26 18:50:28 +01002180 if (ret)
2181 goto error;
2182
2183 return 0;
2184
2185 error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002186 domain_exit(domain);
2187 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002188}
2189
2190static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
2191 struct pci_dev *pdev)
2192{
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07002193 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002194 return 0;
2195 return iommu_prepare_identity_map(pdev, rmrr->base_address,
David Woodhouse70e535d2011-05-31 00:22:52 +01002196 rmrr->end_address);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002197}
2198
Suresh Siddhad3f13812011-08-23 17:05:25 -07002199#ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002200static inline void iommu_prepare_isa(void)
2201{
2202 struct pci_dev *pdev;
2203 int ret;
2204
2205 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2206 if (!pdev)
2207 return;
2208
David Woodhousec7ab48d2009-06-26 19:10:36 +01002209 printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
David Woodhouse70e535d2011-05-31 00:22:52 +01002210 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024 - 1);
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002211
2212 if (ret)
David Woodhousec7ab48d2009-06-26 19:10:36 +01002213 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
2214 "floppy might not work\n");
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002215
2216}
2217#else
2218static inline void iommu_prepare_isa(void)
2219{
2220 return;
2221}
Suresh Siddhad3f13812011-08-23 17:05:25 -07002222#endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002223
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002224static int md_domain_init(struct dmar_domain *domain, int guest_width);
David Woodhousec7ab48d2009-06-26 19:10:36 +01002225
Matt Kraai071e1372009-08-23 22:30:22 -07002226static int __init si_domain_init(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002227{
2228 struct dmar_drhd_unit *drhd;
2229 struct intel_iommu *iommu;
David Woodhousec7ab48d2009-06-26 19:10:36 +01002230 int nid, ret = 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002231
2232 si_domain = alloc_domain();
2233 if (!si_domain)
2234 return -EFAULT;
2235
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002236 for_each_active_iommu(iommu, drhd) {
2237 ret = iommu_attach_domain(si_domain, iommu);
2238 if (ret) {
2239 domain_exit(si_domain);
2240 return -EFAULT;
2241 }
2242 }
2243
2244 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2245 domain_exit(si_domain);
2246 return -EFAULT;
2247 }
2248
2249 si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
Jiang Liu9544c002014-01-06 14:18:13 +08002250 pr_debug("IOMMU: identity mapping domain is domain %d\n",
2251 si_domain->id);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002252
David Woodhouse19943b02009-08-04 16:19:20 +01002253 if (hw)
2254 return 0;
2255
David Woodhousec7ab48d2009-06-26 19:10:36 +01002256 for_each_online_node(nid) {
Tejun Heod4bbf7e2011-11-28 09:46:22 -08002257 unsigned long start_pfn, end_pfn;
2258 int i;
2259
2260 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2261 ret = iommu_domain_identity_map(si_domain,
2262 PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2263 if (ret)
2264 return ret;
2265 }
David Woodhousec7ab48d2009-06-26 19:10:36 +01002266 }
2267
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002268 return 0;
2269}
2270
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002271static int identity_mapping(struct pci_dev *pdev)
2272{
2273 struct device_domain_info *info;
2274
2275 if (likely(!iommu_identity_mapping))
2276 return 0;
2277
Mike Traviscb452a42011-05-28 13:15:03 -05002278 info = pdev->dev.archdata.iommu;
2279 if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2280 return (info->domain == si_domain);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002281
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002282 return 0;
2283}
2284
2285static int domain_add_dev_info(struct dmar_domain *domain,
David Woodhouse5fe60f42009-08-09 10:53:41 +01002286 struct pci_dev *pdev,
2287 int translation)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002288{
2289 struct device_domain_info *info;
2290 unsigned long flags;
David Woodhouse5fe60f42009-08-09 10:53:41 +01002291 int ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002292
2293 info = alloc_devinfo_mem();
2294 if (!info)
2295 return -ENOMEM;
2296
2297 info->segment = pci_domain_nr(pdev->bus);
2298 info->bus = pdev->bus->number;
2299 info->devfn = pdev->devfn;
2300 info->dev = pdev;
2301 info->domain = domain;
2302
2303 spin_lock_irqsave(&device_domain_lock, flags);
2304 list_add(&info->link, &domain->devices);
2305 list_add(&info->global, &device_domain_list);
2306 pdev->dev.archdata.iommu = info;
2307 spin_unlock_irqrestore(&device_domain_lock, flags);
2308
David Woodhousee2ad23d2012-05-25 17:42:54 +01002309 ret = domain_context_mapping(domain, pdev, translation);
2310 if (ret) {
2311 spin_lock_irqsave(&device_domain_lock, flags);
David Woodhouse109b9b02012-05-25 17:43:02 +01002312 unlink_domain_info(info);
David Woodhousee2ad23d2012-05-25 17:42:54 +01002313 spin_unlock_irqrestore(&device_domain_lock, flags);
2314 free_devinfo_mem(info);
2315 return ret;
2316 }
2317
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002318 return 0;
2319}
2320
Tom Mingarelliea2447f72012-11-20 19:43:17 +00002321static bool device_has_rmrr(struct pci_dev *dev)
2322{
2323 struct dmar_rmrr_unit *rmrr;
2324 int i;
2325
2326 for_each_rmrr_units(rmrr) {
2327 for (i = 0; i < rmrr->devices_cnt; i++) {
2328 /*
2329 * Return TRUE if this RMRR contains the device that
2330 * is passed in.
2331 */
2332 if (rmrr->devices[i] == dev)
2333 return true;
2334 }
2335 }
2336 return false;
2337}
2338
David Woodhouse6941af22009-07-04 18:24:27 +01002339static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
2340{
Tom Mingarelliea2447f72012-11-20 19:43:17 +00002341
2342 /*
2343 * We want to prevent any device associated with an RMRR from
2344 * getting placed into the SI Domain. This is done because
2345 * problems exist when devices are moved in and out of domains
2346 * and their respective RMRR info is lost. We exempt USB devices
2347 * from this process due to their usage of RMRRs that are known
2348 * to not be needed after BIOS hand-off to OS.
2349 */
2350 if (device_has_rmrr(pdev) &&
2351 (pdev->class >> 8) != PCI_CLASS_SERIAL_USB)
2352 return 0;
2353
David Woodhousee0fc7e02009-09-30 09:12:17 -07002354 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2355 return 1;
2356
2357 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2358 return 1;
2359
2360 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2361 return 0;
David Woodhouse6941af22009-07-04 18:24:27 +01002362
David Woodhouse3dfc8132009-07-04 19:11:08 +01002363 /*
2364 * We want to start off with all devices in the 1:1 domain, and
2365 * take them out later if we find they can't access all of memory.
2366 *
2367 * However, we can't do this for PCI devices behind bridges,
2368 * because all PCI devices behind the same bridge will end up
2369 * with the same source-id on their transactions.
2370 *
2371 * Practically speaking, we can't change things around for these
2372 * devices at run-time, because we can't be sure there'll be no
2373 * DMA transactions in flight for any of their siblings.
2374 *
2375 * So PCI devices (unless they're on the root bus) as well as
2376 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2377 * the 1:1 domain, just in _case_ one of their siblings turns out
2378 * not to be able to map all of memory.
2379 */
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +09002380 if (!pci_is_pcie(pdev)) {
David Woodhouse3dfc8132009-07-04 19:11:08 +01002381 if (!pci_is_root_bus(pdev->bus))
2382 return 0;
2383 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2384 return 0;
Yijing Wang62f87c02012-07-24 17:20:03 +08002385 } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
David Woodhouse3dfc8132009-07-04 19:11:08 +01002386 return 0;
2387
2388 /*
2389 * At boot time, we don't yet know if devices will be 64-bit capable.
2390 * Assume that they will -- if they turn out not to be, then we can
2391 * take them out of the 1:1 domain later.
2392 */
Chris Wright8fcc5372011-05-28 13:15:02 -05002393 if (!startup) {
2394 /*
2395 * If the device's dma_mask is less than the system's memory
2396 * size then this is not a candidate for identity mapping.
2397 */
2398 u64 dma_mask = pdev->dma_mask;
2399
2400 if (pdev->dev.coherent_dma_mask &&
2401 pdev->dev.coherent_dma_mask < dma_mask)
2402 dma_mask = pdev->dev.coherent_dma_mask;
2403
2404 return dma_mask >= dma_get_required_mask(&pdev->dev);
2405 }
David Woodhouse6941af22009-07-04 18:24:27 +01002406
2407 return 1;
2408}
2409
Matt Kraai071e1372009-08-23 22:30:22 -07002410static int __init iommu_prepare_static_identity_mapping(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002411{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002412 struct pci_dev *pdev = NULL;
2413 int ret;
2414
David Woodhouse19943b02009-08-04 16:19:20 +01002415 ret = si_domain_init(hw);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002416 if (ret)
2417 return -EFAULT;
2418
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002419 for_each_pci_dev(pdev) {
David Woodhouse6941af22009-07-04 18:24:27 +01002420 if (iommu_should_identity_map(pdev, 1)) {
David Woodhouse5fe60f42009-08-09 10:53:41 +01002421 ret = domain_add_dev_info(si_domain, pdev,
Mike Traviseae460b2012-03-05 15:05:16 -08002422 hw ? CONTEXT_TT_PASS_THROUGH :
2423 CONTEXT_TT_MULTI_LEVEL);
2424 if (ret) {
2425 /* device not associated with an iommu */
2426 if (ret == -ENODEV)
2427 continue;
David Woodhouse62edf5d2009-07-04 10:59:46 +01002428 return ret;
Mike Traviseae460b2012-03-05 15:05:16 -08002429 }
2430 pr_info("IOMMU: %s identity mapping for device %s\n",
2431 hw ? "hardware" : "software", pci_name(pdev));
David Woodhouse62edf5d2009-07-04 10:59:46 +01002432 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002433 }
2434
2435 return 0;
2436}
2437
Joseph Cihulab7792602011-05-03 00:08:37 -07002438static int __init init_dmars(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002439{
2440 struct dmar_drhd_unit *drhd;
2441 struct dmar_rmrr_unit *rmrr;
2442 struct pci_dev *pdev;
2443 struct intel_iommu *iommu;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07002444 int i, ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002445
2446 /*
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002447 * for each drhd
2448 * allocate root
2449 * initialize and program root entry to not present
2450 * endfor
2451 */
2452 for_each_drhd_unit(drhd) {
mark gross5e0d2a62008-03-04 15:22:08 -08002453 /*
2454 * lock not needed as this is only incremented in the single
2455 * threaded kernel __init code path all other access are read
2456 * only
2457 */
Mike Travis1b198bb2012-03-05 15:05:16 -08002458 if (g_num_of_iommus < IOMMU_UNITS_SUPPORTED) {
2459 g_num_of_iommus++;
2460 continue;
2461 }
2462 printk_once(KERN_ERR "intel-iommu: exceeded %d IOMMUs\n",
2463 IOMMU_UNITS_SUPPORTED);
mark gross5e0d2a62008-03-04 15:22:08 -08002464 }
2465
Weidong Hand9630fe2008-12-08 11:06:32 +08002466 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2467 GFP_KERNEL);
2468 if (!g_iommus) {
2469 printk(KERN_ERR "Allocating global iommu array failed\n");
2470 ret = -ENOMEM;
2471 goto error;
2472 }
2473
mark gross80b20dd2008-04-18 13:53:58 -07002474 deferred_flush = kzalloc(g_num_of_iommus *
2475 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2476 if (!deferred_flush) {
mark gross5e0d2a62008-03-04 15:22:08 -08002477 ret = -ENOMEM;
Jiang Liu989d51f2014-02-19 14:07:21 +08002478 goto free_g_iommus;
mark gross5e0d2a62008-03-04 15:22:08 -08002479 }
2480
Jiang Liu7c919772014-01-06 14:18:18 +08002481 for_each_active_iommu(iommu, drhd) {
Weidong Hand9630fe2008-12-08 11:06:32 +08002482 g_iommus[iommu->seq_id] = iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002483
Suresh Siddhae61d98d2008-07-10 11:16:35 -07002484 ret = iommu_init_domains(iommu);
2485 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08002486 goto free_iommu;
Suresh Siddhae61d98d2008-07-10 11:16:35 -07002487
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002488 /*
2489 * TBD:
2490 * we could share the same root & context tables
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002491 * among all IOMMU's. Need to Split it later.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002492 */
2493 ret = iommu_alloc_root_entry(iommu);
2494 if (ret) {
2495 printk(KERN_ERR "IOMMU: allocate root entry failed\n");
Jiang Liu989d51f2014-02-19 14:07:21 +08002496 goto free_iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002497 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002498 if (!ecap_pass_through(iommu->ecap))
David Woodhouse19943b02009-08-04 16:19:20 +01002499 hw_pass_through = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002500 }
2501
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002502 /*
2503 * Start from the sane iommu hardware state.
2504 */
Jiang Liu7c919772014-01-06 14:18:18 +08002505 for_each_active_iommu(iommu, drhd) {
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002506 /*
2507 * If the queued invalidation is already initialized by us
2508 * (for example, while enabling interrupt-remapping) then
2509 * we got the things already rolling from a sane state.
2510 */
2511 if (iommu->qi)
2512 continue;
2513
2514 /*
2515 * Clear any previous faults.
2516 */
2517 dmar_fault(-1, iommu);
2518 /*
2519 * Disable queued invalidation if supported and already enabled
2520 * before OS handover.
2521 */
2522 dmar_disable_qi(iommu);
2523 }
2524
Jiang Liu7c919772014-01-06 14:18:18 +08002525 for_each_active_iommu(iommu, drhd) {
Youquan Songa77b67d2008-10-16 16:31:56 -07002526 if (dmar_enable_qi(iommu)) {
2527 /*
2528 * Queued Invalidate not enabled, use Register Based
2529 * Invalidate
2530 */
2531 iommu->flush.flush_context = __iommu_flush_context;
2532 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
Yinghai Lu680a7522010-04-08 19:58:23 +01002533 printk(KERN_INFO "IOMMU %d 0x%Lx: using Register based "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002534 "invalidation\n",
Yinghai Lu680a7522010-04-08 19:58:23 +01002535 iommu->seq_id,
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002536 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002537 } else {
2538 iommu->flush.flush_context = qi_flush_context;
2539 iommu->flush.flush_iotlb = qi_flush_iotlb;
Yinghai Lu680a7522010-04-08 19:58:23 +01002540 printk(KERN_INFO "IOMMU %d 0x%Lx: using Queued "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002541 "invalidation\n",
Yinghai Lu680a7522010-04-08 19:58:23 +01002542 iommu->seq_id,
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002543 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002544 }
2545 }
2546
David Woodhouse19943b02009-08-04 16:19:20 +01002547 if (iommu_pass_through)
David Woodhousee0fc7e02009-09-30 09:12:17 -07002548 iommu_identity_mapping |= IDENTMAP_ALL;
2549
Suresh Siddhad3f13812011-08-23 17:05:25 -07002550#ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
David Woodhousee0fc7e02009-09-30 09:12:17 -07002551 iommu_identity_mapping |= IDENTMAP_GFX;
David Woodhouse19943b02009-08-04 16:19:20 +01002552#endif
David Woodhousee0fc7e02009-09-30 09:12:17 -07002553
2554 check_tylersburg_isoch();
2555
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002556 /*
2557 * If pass through is not set or not enabled, setup context entries for
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002558 * identity mappings for rmrr, gfx, and isa and may fall back to static
2559 * identity mapping if iommu_identity_mapping is set.
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002560 */
David Woodhouse19943b02009-08-04 16:19:20 +01002561 if (iommu_identity_mapping) {
2562 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
2563 if (ret) {
2564 printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
Jiang Liu989d51f2014-02-19 14:07:21 +08002565 goto free_iommu;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002566 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002567 }
David Woodhouse19943b02009-08-04 16:19:20 +01002568 /*
2569 * For each rmrr
2570 * for each dev attached to rmrr
2571 * do
2572 * locate drhd for dev, alloc domain for dev
2573 * allocate free domain
2574 * allocate page table entries for rmrr
2575 * if context not allocated for bus
2576 * allocate and init context
2577 * set present in root table for this bus
2578 * init context with domain, translation etc
2579 * endfor
2580 * endfor
2581 */
2582 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2583 for_each_rmrr_units(rmrr) {
2584 for (i = 0; i < rmrr->devices_cnt; i++) {
2585 pdev = rmrr->devices[i];
2586 /*
2587 * some BIOS lists non-exist devices in DMAR
2588 * table.
2589 */
2590 if (!pdev)
2591 continue;
2592 ret = iommu_prepare_rmrr_dev(rmrr, pdev);
2593 if (ret)
2594 printk(KERN_ERR
2595 "IOMMU: mapping reserved region failed\n");
2596 }
2597 }
2598
2599 iommu_prepare_isa();
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002600
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002601 /*
2602 * for each drhd
2603 * enable fault log
2604 * global invalidate context cache
2605 * global invalidate iotlb
2606 * enable translation
2607 */
Jiang Liu7c919772014-01-06 14:18:18 +08002608 for_each_iommu(iommu, drhd) {
Joseph Cihula51a63e62011-03-21 11:04:24 -07002609 if (drhd->ignored) {
2610 /*
2611 * we always have to disable PMRs or DMA may fail on
2612 * this device
2613 */
2614 if (force_on)
Jiang Liu7c919772014-01-06 14:18:18 +08002615 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002616 continue;
Joseph Cihula51a63e62011-03-21 11:04:24 -07002617 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002618
2619 iommu_flush_write_buffer(iommu);
2620
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07002621 ret = dmar_set_interrupt(iommu);
2622 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08002623 goto free_iommu;
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07002624
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002625 iommu_set_root_entry(iommu);
2626
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002627 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002628 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
mark grossf8bab732008-02-08 04:18:38 -08002629
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002630 ret = iommu_enable_translation(iommu);
2631 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08002632 goto free_iommu;
David Woodhouseb94996c2009-09-19 15:28:12 -07002633
2634 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002635 }
2636
2637 return 0;
Jiang Liu989d51f2014-02-19 14:07:21 +08002638
2639free_iommu:
Jiang Liu7c919772014-01-06 14:18:18 +08002640 for_each_active_iommu(iommu, drhd)
Jiang Liua868e6b2014-01-06 14:18:20 +08002641 free_dmar_iommu(iommu);
Jiang Liu9bdc5312014-01-06 14:18:27 +08002642 kfree(deferred_flush);
Jiang Liu989d51f2014-02-19 14:07:21 +08002643free_g_iommus:
Weidong Hand9630fe2008-12-08 11:06:32 +08002644 kfree(g_iommus);
Jiang Liu989d51f2014-02-19 14:07:21 +08002645error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002646 return ret;
2647}
2648
David Woodhouse5a5e02a2009-07-04 09:35:44 +01002649/* This takes a number of _MM_ pages, not VTD pages */
David Woodhouse875764d2009-06-28 21:20:51 +01002650static struct iova *intel_alloc_iova(struct device *dev,
2651 struct dmar_domain *domain,
2652 unsigned long nrpages, uint64_t dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002653{
2654 struct pci_dev *pdev = to_pci_dev(dev);
2655 struct iova *iova = NULL;
2656
David Woodhouse875764d2009-06-28 21:20:51 +01002657 /* Restrict dma_mask to the width that the iommu can handle */
2658 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
2659
2660 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002661 /*
2662 * First try to allocate an io virtual address in
Yang Hongyang284901a2009-04-06 19:01:15 -07002663 * DMA_BIT_MASK(32) and if that fails then try allocating
Joe Perches36098012007-12-17 11:40:11 -08002664 * from higher range
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002665 */
David Woodhouse875764d2009-06-28 21:20:51 +01002666 iova = alloc_iova(&domain->iovad, nrpages,
2667 IOVA_PFN(DMA_BIT_MASK(32)), 1);
2668 if (iova)
2669 return iova;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002670 }
David Woodhouse875764d2009-06-28 21:20:51 +01002671 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
2672 if (unlikely(!iova)) {
2673 printk(KERN_ERR "Allocating %ld-page iova for %s failed",
2674 nrpages, pci_name(pdev));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002675 return NULL;
2676 }
2677
2678 return iova;
2679}
2680
David Woodhouse147202a2009-07-07 19:43:20 +01002681static struct dmar_domain *__get_valid_domain_for_dev(struct pci_dev *pdev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002682{
2683 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002684 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002685
2686 domain = get_domain_for_dev(pdev,
2687 DEFAULT_DOMAIN_ADDRESS_WIDTH);
2688 if (!domain) {
2689 printk(KERN_ERR
2690 "Allocating domain for %s failed", pci_name(pdev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002691 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002692 }
2693
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002694 /* make sure context mapping is ok */
Weidong Han5331fe62008-12-08 23:00:00 +08002695 if (unlikely(!domain_context_mapped(pdev))) {
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002696 ret = domain_context_mapping(domain, pdev,
2697 CONTEXT_TT_MULTI_LEVEL);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002698 if (ret) {
2699 printk(KERN_ERR
2700 "Domain context map for %s failed",
2701 pci_name(pdev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002702 return NULL;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002703 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002704 }
2705
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002706 return domain;
2707}
2708
David Woodhouse147202a2009-07-07 19:43:20 +01002709static inline struct dmar_domain *get_valid_domain_for_dev(struct pci_dev *dev)
2710{
2711 struct device_domain_info *info;
2712
2713 /* No lock here, assumes no domain exit in normal case */
2714 info = dev->dev.archdata.iommu;
2715 if (likely(info))
2716 return info->domain;
2717
2718 return __get_valid_domain_for_dev(dev);
2719}
2720
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002721static int iommu_dummy(struct pci_dev *pdev)
2722{
2723 return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
2724}
2725
2726/* Check if the pdev needs to go through non-identity map and unmap process.*/
David Woodhouse73676832009-07-04 14:08:36 +01002727static int iommu_no_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002728{
David Woodhouse73676832009-07-04 14:08:36 +01002729 struct pci_dev *pdev;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002730 int found;
2731
Yijing Wangdbad0862013-12-05 19:43:42 +08002732 if (unlikely(!dev_is_pci(dev)))
David Woodhouse73676832009-07-04 14:08:36 +01002733 return 1;
2734
2735 pdev = to_pci_dev(dev);
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002736 if (iommu_dummy(pdev))
2737 return 1;
2738
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002739 if (!iommu_identity_mapping)
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002740 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002741
2742 found = identity_mapping(pdev);
2743 if (found) {
David Woodhouse6941af22009-07-04 18:24:27 +01002744 if (iommu_should_identity_map(pdev, 0))
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002745 return 1;
2746 else {
2747 /*
2748 * 32 bit DMA is removed from si_domain and fall back
2749 * to non-identity mapping.
2750 */
2751 domain_remove_one_dev_info(si_domain, pdev);
2752 printk(KERN_INFO "32bit %s uses non-identity mapping\n",
2753 pci_name(pdev));
2754 return 0;
2755 }
2756 } else {
2757 /*
2758 * In case of a detached 64 bit DMA device from vm, the device
2759 * is put into si_domain for identity mapping.
2760 */
David Woodhouse6941af22009-07-04 18:24:27 +01002761 if (iommu_should_identity_map(pdev, 0)) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002762 int ret;
David Woodhouse5fe60f42009-08-09 10:53:41 +01002763 ret = domain_add_dev_info(si_domain, pdev,
2764 hw_pass_through ?
2765 CONTEXT_TT_PASS_THROUGH :
2766 CONTEXT_TT_MULTI_LEVEL);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002767 if (!ret) {
2768 printk(KERN_INFO "64bit %s uses identity mapping\n",
2769 pci_name(pdev));
2770 return 1;
2771 }
2772 }
2773 }
2774
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002775 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002776}
2777
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002778static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2779 size_t size, int dir, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002780{
2781 struct pci_dev *pdev = to_pci_dev(hwdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002782 struct dmar_domain *domain;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002783 phys_addr_t start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002784 struct iova *iova;
2785 int prot = 0;
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002786 int ret;
Weidong Han8c11e792008-12-08 15:29:22 +08002787 struct intel_iommu *iommu;
Fenghua Yu33041ec2009-08-04 15:10:59 -07002788 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002789
2790 BUG_ON(dir == DMA_NONE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002791
David Woodhouse73676832009-07-04 14:08:36 +01002792 if (iommu_no_mapping(hwdev))
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002793 return paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002794
2795 domain = get_valid_domain_for_dev(pdev);
2796 if (!domain)
2797 return 0;
2798
Weidong Han8c11e792008-12-08 15:29:22 +08002799 iommu = domain_get_iommu(domain);
David Woodhouse88cb6a72009-06-28 15:03:06 +01002800 size = aligned_nrpages(paddr, size);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002801
Mike Travisc681d0b2011-05-28 13:15:05 -05002802 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size), dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002803 if (!iova)
2804 goto error;
2805
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002806 /*
2807 * Check if DMAR supports zero-length reads on write only
2808 * mappings..
2809 */
2810 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08002811 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002812 prot |= DMA_PTE_READ;
2813 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2814 prot |= DMA_PTE_WRITE;
2815 /*
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002816 * paddr - (paddr + size) might be partial page, we should map the whole
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002817 * page. Note: if two part of one page are separately mapped, we
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002818 * might have two guest_addr mapping to the same host paddr, but this
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002819 * is not a big problem
2820 */
David Woodhouse0ab36de2009-06-28 14:01:43 +01002821 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
Fenghua Yu33041ec2009-08-04 15:10:59 -07002822 mm_to_dma_pfn(paddr_pfn), size, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002823 if (ret)
2824 goto error;
2825
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002826 /* it's a non-present to present mapping. Only flush if caching mode */
2827 if (cap_caching_mode(iommu->cap))
Nadav Amit82653632010-04-01 13:24:40 +03002828 iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002829 else
Weidong Han8c11e792008-12-08 15:29:22 +08002830 iommu_flush_write_buffer(iommu);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002831
David Woodhouse03d6a242009-06-28 15:33:46 +01002832 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
2833 start_paddr += paddr & ~PAGE_MASK;
2834 return start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002835
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002836error:
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002837 if (iova)
2838 __free_iova(&domain->iovad, iova);
David Woodhouse4cf2e752009-02-11 17:23:43 +00002839 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002840 pci_name(pdev), size, (unsigned long long)paddr, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002841 return 0;
2842}
2843
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002844static dma_addr_t intel_map_page(struct device *dev, struct page *page,
2845 unsigned long offset, size_t size,
2846 enum dma_data_direction dir,
2847 struct dma_attrs *attrs)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002848{
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002849 return __intel_map_single(dev, page_to_phys(page) + offset, size,
2850 dir, to_pci_dev(dev)->dma_mask);
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002851}
2852
mark gross5e0d2a62008-03-04 15:22:08 -08002853static void flush_unmaps(void)
2854{
mark gross80b20dd2008-04-18 13:53:58 -07002855 int i, j;
mark gross5e0d2a62008-03-04 15:22:08 -08002856
mark gross5e0d2a62008-03-04 15:22:08 -08002857 timer_on = 0;
2858
2859 /* just flush them all */
2860 for (i = 0; i < g_num_of_iommus; i++) {
Weidong Hana2bb8452008-12-08 11:24:12 +08002861 struct intel_iommu *iommu = g_iommus[i];
2862 if (!iommu)
2863 continue;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07002864
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002865 if (!deferred_flush[i].next)
2866 continue;
2867
Nadav Amit78d5f0f2010-04-08 23:00:41 +03002868 /* In caching mode, global flushes turn emulation expensive */
2869 if (!cap_caching_mode(iommu->cap))
2870 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
Yu Zhao93a23a72009-05-18 13:51:37 +08002871 DMA_TLB_GLOBAL_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002872 for (j = 0; j < deferred_flush[i].next; j++) {
Yu Zhao93a23a72009-05-18 13:51:37 +08002873 unsigned long mask;
2874 struct iova *iova = deferred_flush[i].iova[j];
Nadav Amit78d5f0f2010-04-08 23:00:41 +03002875 struct dmar_domain *domain = deferred_flush[i].domain[j];
Yu Zhao93a23a72009-05-18 13:51:37 +08002876
Nadav Amit78d5f0f2010-04-08 23:00:41 +03002877 /* On real hardware multiple invalidations are expensive */
2878 if (cap_caching_mode(iommu->cap))
2879 iommu_flush_iotlb_psi(iommu, domain->id,
2880 iova->pfn_lo, iova->pfn_hi - iova->pfn_lo + 1, 0);
2881 else {
2882 mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1));
2883 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
2884 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
2885 }
Yu Zhao93a23a72009-05-18 13:51:37 +08002886 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
mark gross80b20dd2008-04-18 13:53:58 -07002887 }
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002888 deferred_flush[i].next = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08002889 }
2890
mark gross5e0d2a62008-03-04 15:22:08 -08002891 list_size = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08002892}
2893
2894static void flush_unmaps_timeout(unsigned long data)
2895{
mark gross80b20dd2008-04-18 13:53:58 -07002896 unsigned long flags;
2897
2898 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08002899 flush_unmaps();
mark gross80b20dd2008-04-18 13:53:58 -07002900 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08002901}
2902
2903static void add_unmap(struct dmar_domain *dom, struct iova *iova)
2904{
2905 unsigned long flags;
mark gross80b20dd2008-04-18 13:53:58 -07002906 int next, iommu_id;
Weidong Han8c11e792008-12-08 15:29:22 +08002907 struct intel_iommu *iommu;
mark gross5e0d2a62008-03-04 15:22:08 -08002908
2909 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross80b20dd2008-04-18 13:53:58 -07002910 if (list_size == HIGH_WATER_MARK)
2911 flush_unmaps();
2912
Weidong Han8c11e792008-12-08 15:29:22 +08002913 iommu = domain_get_iommu(dom);
2914 iommu_id = iommu->seq_id;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07002915
mark gross80b20dd2008-04-18 13:53:58 -07002916 next = deferred_flush[iommu_id].next;
2917 deferred_flush[iommu_id].domain[next] = dom;
2918 deferred_flush[iommu_id].iova[next] = iova;
2919 deferred_flush[iommu_id].next++;
mark gross5e0d2a62008-03-04 15:22:08 -08002920
2921 if (!timer_on) {
2922 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
2923 timer_on = 1;
2924 }
2925 list_size++;
2926 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2927}
2928
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002929static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2930 size_t size, enum dma_data_direction dir,
2931 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002932{
2933 struct pci_dev *pdev = to_pci_dev(dev);
2934 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01002935 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002936 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08002937 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002938
David Woodhouse73676832009-07-04 14:08:36 +01002939 if (iommu_no_mapping(dev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002940 return;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002941
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002942 domain = find_domain(pdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002943 BUG_ON(!domain);
2944
Weidong Han8c11e792008-12-08 15:29:22 +08002945 iommu = domain_get_iommu(domain);
2946
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002947 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
David Woodhouse85b98272009-07-01 19:27:53 +01002948 if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
2949 (unsigned long long)dev_addr))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002950 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002951
David Woodhoused794dc92009-06-28 00:27:49 +01002952 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2953 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002954
David Woodhoused794dc92009-06-28 00:27:49 +01002955 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
2956 pci_name(pdev), start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002957
2958 /* clear the whole page */
David Woodhoused794dc92009-06-28 00:27:49 +01002959 dma_pte_clear_range(domain, start_pfn, last_pfn);
2960
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002961 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01002962 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2963
mark gross5e0d2a62008-03-04 15:22:08 -08002964 if (intel_iommu_strict) {
David Woodhouse03d6a242009-06-28 15:33:46 +01002965 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
Nadav Amit82653632010-04-01 13:24:40 +03002966 last_pfn - start_pfn + 1, 0);
mark gross5e0d2a62008-03-04 15:22:08 -08002967 /* free iova */
2968 __free_iova(&domain->iovad, iova);
2969 } else {
2970 add_unmap(domain, iova);
2971 /*
2972 * queue up the release of the unmap to save the 1/6th of the
2973 * cpu used up by the iotlb flush operation...
2974 */
mark gross5e0d2a62008-03-04 15:22:08 -08002975 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002976}
2977
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002978static void *intel_alloc_coherent(struct device *hwdev, size_t size,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02002979 dma_addr_t *dma_handle, gfp_t flags,
2980 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002981{
2982 void *vaddr;
2983 int order;
2984
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002985 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002986 order = get_order(size);
Alex Williamsone8bb9102009-11-04 15:59:34 -07002987
2988 if (!iommu_no_mapping(hwdev))
2989 flags &= ~(GFP_DMA | GFP_DMA32);
2990 else if (hwdev->coherent_dma_mask < dma_get_required_mask(hwdev)) {
2991 if (hwdev->coherent_dma_mask < DMA_BIT_MASK(32))
2992 flags |= GFP_DMA;
2993 else
2994 flags |= GFP_DMA32;
2995 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002996
2997 vaddr = (void *)__get_free_pages(flags, order);
2998 if (!vaddr)
2999 return NULL;
3000 memset(vaddr, 0, size);
3001
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003002 *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
3003 DMA_BIDIRECTIONAL,
3004 hwdev->coherent_dma_mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003005 if (*dma_handle)
3006 return vaddr;
3007 free_pages((unsigned long)vaddr, order);
3008 return NULL;
3009}
3010
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003011static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003012 dma_addr_t dma_handle, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003013{
3014 int order;
3015
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003016 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003017 order = get_order(size);
3018
David Woodhouse0db9b7a2009-07-14 02:01:57 +01003019 intel_unmap_page(hwdev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003020 free_pages((unsigned long)vaddr, order);
3021}
3022
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003023static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
3024 int nelems, enum dma_data_direction dir,
3025 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003026{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003027 struct pci_dev *pdev = to_pci_dev(hwdev);
3028 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01003029 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003030 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08003031 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003032
David Woodhouse73676832009-07-04 14:08:36 +01003033 if (iommu_no_mapping(hwdev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003034 return;
3035
3036 domain = find_domain(pdev);
Weidong Han8c11e792008-12-08 15:29:22 +08003037 BUG_ON(!domain);
3038
3039 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003040
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003041 iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
David Woodhouse85b98272009-07-01 19:27:53 +01003042 if (WARN_ONCE(!iova, "Driver unmaps unmatched sglist at PFN %llx\n",
3043 (unsigned long long)sglist[0].dma_address))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003044 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003045
David Woodhoused794dc92009-06-28 00:27:49 +01003046 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
3047 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003048
3049 /* clear the whole page */
David Woodhoused794dc92009-06-28 00:27:49 +01003050 dma_pte_clear_range(domain, start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003051
David Woodhoused794dc92009-06-28 00:27:49 +01003052 /* free page tables */
3053 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
3054
David Woodhouseacea0012009-07-14 01:55:11 +01003055 if (intel_iommu_strict) {
3056 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
Nadav Amit82653632010-04-01 13:24:40 +03003057 last_pfn - start_pfn + 1, 0);
David Woodhouseacea0012009-07-14 01:55:11 +01003058 /* free iova */
3059 __free_iova(&domain->iovad, iova);
3060 } else {
3061 add_unmap(domain, iova);
3062 /*
3063 * queue up the release of the unmap to save the 1/6th of the
3064 * cpu used up by the iotlb flush operation...
3065 */
3066 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003067}
3068
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003069static int intel_nontranslate_map_sg(struct device *hddev,
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003070 struct scatterlist *sglist, int nelems, int dir)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003071{
3072 int i;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003073 struct scatterlist *sg;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003074
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003075 for_each_sg(sglist, sg, nelems, i) {
FUJITA Tomonori12d4d402007-10-23 09:32:25 +02003076 BUG_ON(!sg_page(sg));
David Woodhouse4cf2e752009-02-11 17:23:43 +00003077 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003078 sg->dma_length = sg->length;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003079 }
3080 return nelems;
3081}
3082
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003083static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
3084 enum dma_data_direction dir, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003085{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003086 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003087 struct pci_dev *pdev = to_pci_dev(hwdev);
3088 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003089 size_t size = 0;
3090 int prot = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003091 struct iova *iova = NULL;
3092 int ret;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003093 struct scatterlist *sg;
David Woodhouseb536d242009-06-28 14:49:31 +01003094 unsigned long start_vpfn;
Weidong Han8c11e792008-12-08 15:29:22 +08003095 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003096
3097 BUG_ON(dir == DMA_NONE);
David Woodhouse73676832009-07-04 14:08:36 +01003098 if (iommu_no_mapping(hwdev))
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003099 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003100
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003101 domain = get_valid_domain_for_dev(pdev);
3102 if (!domain)
3103 return 0;
3104
Weidong Han8c11e792008-12-08 15:29:22 +08003105 iommu = domain_get_iommu(domain);
3106
David Woodhouseb536d242009-06-28 14:49:31 +01003107 for_each_sg(sglist, sg, nelems, i)
David Woodhouse88cb6a72009-06-28 15:03:06 +01003108 size += aligned_nrpages(sg->offset, sg->length);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003109
David Woodhouse5a5e02a2009-07-04 09:35:44 +01003110 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
3111 pdev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003112 if (!iova) {
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003113 sglist->dma_length = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003114 return 0;
3115 }
3116
3117 /*
3118 * Check if DMAR supports zero-length reads on write only
3119 * mappings..
3120 */
3121 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003122 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003123 prot |= DMA_PTE_READ;
3124 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3125 prot |= DMA_PTE_WRITE;
3126
David Woodhouseb536d242009-06-28 14:49:31 +01003127 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
David Woodhousee1605492009-06-29 11:17:38 +01003128
Fenghua Yuf5329592009-08-04 15:09:37 -07003129 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
David Woodhousee1605492009-06-29 11:17:38 +01003130 if (unlikely(ret)) {
3131 /* clear the page */
3132 dma_pte_clear_range(domain, start_vpfn,
3133 start_vpfn + size - 1);
3134 /* free page tables */
3135 dma_pte_free_pagetable(domain, start_vpfn,
3136 start_vpfn + size - 1);
3137 /* free iova */
3138 __free_iova(&domain->iovad, iova);
3139 return 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003140 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003141
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003142 /* it's a non-present to present mapping. Only flush if caching mode */
3143 if (cap_caching_mode(iommu->cap))
Nadav Amit82653632010-04-01 13:24:40 +03003144 iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003145 else
Weidong Han8c11e792008-12-08 15:29:22 +08003146 iommu_flush_write_buffer(iommu);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003147
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003148 return nelems;
3149}
3150
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003151static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3152{
3153 return !dma_addr;
3154}
3155
FUJITA Tomonori160c1d82009-01-05 23:59:02 +09003156struct dma_map_ops intel_dma_ops = {
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003157 .alloc = intel_alloc_coherent,
3158 .free = intel_free_coherent,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003159 .map_sg = intel_map_sg,
3160 .unmap_sg = intel_unmap_sg,
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003161 .map_page = intel_map_page,
3162 .unmap_page = intel_unmap_page,
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003163 .mapping_error = intel_mapping_error,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003164};
3165
3166static inline int iommu_domain_cache_init(void)
3167{
3168 int ret = 0;
3169
3170 iommu_domain_cache = kmem_cache_create("iommu_domain",
3171 sizeof(struct dmar_domain),
3172 0,
3173 SLAB_HWCACHE_ALIGN,
3174
3175 NULL);
3176 if (!iommu_domain_cache) {
3177 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
3178 ret = -ENOMEM;
3179 }
3180
3181 return ret;
3182}
3183
3184static inline int iommu_devinfo_cache_init(void)
3185{
3186 int ret = 0;
3187
3188 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3189 sizeof(struct device_domain_info),
3190 0,
3191 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003192 NULL);
3193 if (!iommu_devinfo_cache) {
3194 printk(KERN_ERR "Couldn't create devinfo cache\n");
3195 ret = -ENOMEM;
3196 }
3197
3198 return ret;
3199}
3200
3201static inline int iommu_iova_cache_init(void)
3202{
3203 int ret = 0;
3204
3205 iommu_iova_cache = kmem_cache_create("iommu_iova",
3206 sizeof(struct iova),
3207 0,
3208 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003209 NULL);
3210 if (!iommu_iova_cache) {
3211 printk(KERN_ERR "Couldn't create iova cache\n");
3212 ret = -ENOMEM;
3213 }
3214
3215 return ret;
3216}
3217
3218static int __init iommu_init_mempool(void)
3219{
3220 int ret;
3221 ret = iommu_iova_cache_init();
3222 if (ret)
3223 return ret;
3224
3225 ret = iommu_domain_cache_init();
3226 if (ret)
3227 goto domain_error;
3228
3229 ret = iommu_devinfo_cache_init();
3230 if (!ret)
3231 return ret;
3232
3233 kmem_cache_destroy(iommu_domain_cache);
3234domain_error:
3235 kmem_cache_destroy(iommu_iova_cache);
3236
3237 return -ENOMEM;
3238}
3239
3240static void __init iommu_exit_mempool(void)
3241{
3242 kmem_cache_destroy(iommu_devinfo_cache);
3243 kmem_cache_destroy(iommu_domain_cache);
3244 kmem_cache_destroy(iommu_iova_cache);
3245
3246}
3247
Dan Williams556ab452010-07-23 15:47:56 -07003248static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3249{
3250 struct dmar_drhd_unit *drhd;
3251 u32 vtbar;
3252 int rc;
3253
3254 /* We know that this device on this chipset has its own IOMMU.
3255 * If we find it under a different IOMMU, then the BIOS is lying
3256 * to us. Hope that the IOMMU for this device is actually
3257 * disabled, and it needs no translation...
3258 */
3259 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3260 if (rc) {
3261 /* "can't" happen */
3262 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3263 return;
3264 }
3265 vtbar &= 0xffff0000;
3266
3267 /* we know that the this iommu should be at offset 0xa000 from vtbar */
3268 drhd = dmar_find_matched_drhd_unit(pdev);
3269 if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3270 TAINT_FIRMWARE_WORKAROUND,
3271 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3272 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3273}
3274DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3275
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003276static void __init init_no_remapping_devices(void)
3277{
3278 struct dmar_drhd_unit *drhd;
3279
3280 for_each_drhd_unit(drhd) {
3281 if (!drhd->include_all) {
3282 int i;
3283 for (i = 0; i < drhd->devices_cnt; i++)
3284 if (drhd->devices[i] != NULL)
3285 break;
3286 /* ignore DMAR unit if no pci devices exist */
3287 if (i == drhd->devices_cnt)
3288 drhd->ignored = 1;
3289 }
3290 }
3291
Jiang Liu7c919772014-01-06 14:18:18 +08003292 for_each_active_drhd_unit(drhd) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003293 int i;
Jiang Liu7c919772014-01-06 14:18:18 +08003294 if (drhd->include_all)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003295 continue;
3296
3297 for (i = 0; i < drhd->devices_cnt; i++)
3298 if (drhd->devices[i] &&
David Woodhousec0771df2011-10-14 20:59:46 +01003299 !IS_GFX_DEVICE(drhd->devices[i]))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003300 break;
3301
3302 if (i < drhd->devices_cnt)
3303 continue;
3304
David Woodhousec0771df2011-10-14 20:59:46 +01003305 /* This IOMMU has *only* gfx devices. Either bypass it or
3306 set the gfx_mapped flag, as appropriate */
3307 if (dmar_map_gfx) {
3308 intel_iommu_gfx_mapped = 1;
3309 } else {
3310 drhd->ignored = 1;
3311 for (i = 0; i < drhd->devices_cnt; i++) {
3312 if (!drhd->devices[i])
3313 continue;
3314 drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3315 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003316 }
3317 }
3318}
3319
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003320#ifdef CONFIG_SUSPEND
3321static int init_iommu_hw(void)
3322{
3323 struct dmar_drhd_unit *drhd;
3324 struct intel_iommu *iommu = NULL;
3325
3326 for_each_active_iommu(iommu, drhd)
3327 if (iommu->qi)
3328 dmar_reenable_qi(iommu);
3329
Joseph Cihulab7792602011-05-03 00:08:37 -07003330 for_each_iommu(iommu, drhd) {
3331 if (drhd->ignored) {
3332 /*
3333 * we always have to disable PMRs or DMA may fail on
3334 * this device
3335 */
3336 if (force_on)
3337 iommu_disable_protect_mem_regions(iommu);
3338 continue;
3339 }
3340
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003341 iommu_flush_write_buffer(iommu);
3342
3343 iommu_set_root_entry(iommu);
3344
3345 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003346 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003347 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003348 DMA_TLB_GLOBAL_FLUSH);
Joseph Cihulab7792602011-05-03 00:08:37 -07003349 if (iommu_enable_translation(iommu))
3350 return 1;
David Woodhouseb94996c2009-09-19 15:28:12 -07003351 iommu_disable_protect_mem_regions(iommu);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003352 }
3353
3354 return 0;
3355}
3356
3357static void iommu_flush_all(void)
3358{
3359 struct dmar_drhd_unit *drhd;
3360 struct intel_iommu *iommu;
3361
3362 for_each_active_iommu(iommu, drhd) {
3363 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003364 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003365 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003366 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003367 }
3368}
3369
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003370static int iommu_suspend(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003371{
3372 struct dmar_drhd_unit *drhd;
3373 struct intel_iommu *iommu = NULL;
3374 unsigned long flag;
3375
3376 for_each_active_iommu(iommu, drhd) {
3377 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3378 GFP_ATOMIC);
3379 if (!iommu->iommu_state)
3380 goto nomem;
3381 }
3382
3383 iommu_flush_all();
3384
3385 for_each_active_iommu(iommu, drhd) {
3386 iommu_disable_translation(iommu);
3387
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003388 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003389
3390 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3391 readl(iommu->reg + DMAR_FECTL_REG);
3392 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3393 readl(iommu->reg + DMAR_FEDATA_REG);
3394 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3395 readl(iommu->reg + DMAR_FEADDR_REG);
3396 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3397 readl(iommu->reg + DMAR_FEUADDR_REG);
3398
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003399 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003400 }
3401 return 0;
3402
3403nomem:
3404 for_each_active_iommu(iommu, drhd)
3405 kfree(iommu->iommu_state);
3406
3407 return -ENOMEM;
3408}
3409
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003410static void iommu_resume(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003411{
3412 struct dmar_drhd_unit *drhd;
3413 struct intel_iommu *iommu = NULL;
3414 unsigned long flag;
3415
3416 if (init_iommu_hw()) {
Joseph Cihulab7792602011-05-03 00:08:37 -07003417 if (force_on)
3418 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3419 else
3420 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003421 return;
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003422 }
3423
3424 for_each_active_iommu(iommu, drhd) {
3425
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003426 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003427
3428 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3429 iommu->reg + DMAR_FECTL_REG);
3430 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3431 iommu->reg + DMAR_FEDATA_REG);
3432 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3433 iommu->reg + DMAR_FEADDR_REG);
3434 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3435 iommu->reg + DMAR_FEUADDR_REG);
3436
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003437 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003438 }
3439
3440 for_each_active_iommu(iommu, drhd)
3441 kfree(iommu->iommu_state);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003442}
3443
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003444static struct syscore_ops iommu_syscore_ops = {
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003445 .resume = iommu_resume,
3446 .suspend = iommu_suspend,
3447};
3448
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003449static void __init init_iommu_pm_ops(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003450{
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003451 register_syscore_ops(&iommu_syscore_ops);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003452}
3453
3454#else
Rafael J. Wysocki99592ba2011-06-07 21:32:31 +02003455static inline void init_iommu_pm_ops(void) {}
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003456#endif /* CONFIG_PM */
3457
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003458static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
3459{
3460 list_add(&rmrr->list, &dmar_rmrr_units);
3461}
3462
3463
3464int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header)
3465{
3466 struct acpi_dmar_reserved_memory *rmrr;
3467 struct dmar_rmrr_unit *rmrru;
3468
3469 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
3470 if (!rmrru)
3471 return -ENOMEM;
3472
3473 rmrru->hdr = header;
3474 rmrr = (struct acpi_dmar_reserved_memory *)header;
3475 rmrru->base_address = rmrr->base_address;
3476 rmrru->end_address = rmrr->end_address;
3477
3478 dmar_register_rmrr_unit(rmrru);
3479 return 0;
3480}
3481
3482static int __init
3483rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
3484{
3485 struct acpi_dmar_reserved_memory *rmrr;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003486
3487 rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
Jiang Liu9bdc5312014-01-06 14:18:27 +08003488 return dmar_parse_dev_scope((void *)(rmrr + 1),
3489 ((void *)rmrr) + rmrr->header.length,
3490 &rmrru->devices_cnt, &rmrru->devices,
3491 rmrr->segment);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003492}
3493
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003494int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
3495{
3496 struct acpi_dmar_atsr *atsr;
3497 struct dmar_atsr_unit *atsru;
3498
3499 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3500 atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
3501 if (!atsru)
3502 return -ENOMEM;
3503
3504 atsru->hdr = hdr;
3505 atsru->include_all = atsr->flags & 0x1;
3506
3507 list_add(&atsru->list, &dmar_atsr_units);
3508
3509 return 0;
3510}
3511
3512static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
3513{
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003514 struct acpi_dmar_atsr *atsr;
3515
3516 if (atsru->include_all)
3517 return 0;
3518
3519 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
Jiang Liu9bdc5312014-01-06 14:18:27 +08003520 return dmar_parse_dev_scope((void *)(atsr + 1),
3521 (void *)atsr + atsr->header.length,
3522 &atsru->devices_cnt, &atsru->devices,
3523 atsr->segment);
3524}
3525
3526static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
3527{
3528 dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
3529 kfree(atsru);
3530}
3531
3532static void intel_iommu_free_dmars(void)
3533{
3534 struct dmar_rmrr_unit *rmrru, *rmrr_n;
3535 struct dmar_atsr_unit *atsru, *atsr_n;
3536
3537 list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
3538 list_del(&rmrru->list);
3539 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
3540 kfree(rmrru);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003541 }
3542
Jiang Liu9bdc5312014-01-06 14:18:27 +08003543 list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
3544 list_del(&atsru->list);
3545 intel_iommu_free_atsr(atsru);
3546 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003547}
3548
3549int dmar_find_matched_atsr_unit(struct pci_dev *dev)
3550{
3551 int i;
3552 struct pci_bus *bus;
3553 struct acpi_dmar_atsr *atsr;
3554 struct dmar_atsr_unit *atsru;
3555
3556 dev = pci_physfn(dev);
3557
3558 list_for_each_entry(atsru, &dmar_atsr_units, list) {
3559 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3560 if (atsr->segment == pci_domain_nr(dev->bus))
3561 goto found;
3562 }
3563
3564 return 0;
3565
3566found:
3567 for (bus = dev->bus; bus; bus = bus->parent) {
3568 struct pci_dev *bridge = bus->self;
3569
3570 if (!bridge || !pci_is_pcie(bridge) ||
Yijing Wang62f87c02012-07-24 17:20:03 +08003571 pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003572 return 0;
3573
Yijing Wang62f87c02012-07-24 17:20:03 +08003574 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT) {
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003575 for (i = 0; i < atsru->devices_cnt; i++)
3576 if (atsru->devices[i] == bridge)
3577 return 1;
3578 break;
3579 }
3580 }
3581
3582 if (atsru->include_all)
3583 return 1;
3584
3585 return 0;
3586}
3587
Sergey Senozhatskyc8f369a2011-10-26 18:45:39 +03003588int __init dmar_parse_rmrr_atsr_dev(void)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003589{
Jiang Liu9bdc5312014-01-06 14:18:27 +08003590 struct dmar_rmrr_unit *rmrr;
3591 struct dmar_atsr_unit *atsr;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003592 int ret = 0;
3593
Jiang Liu9bdc5312014-01-06 14:18:27 +08003594 list_for_each_entry(rmrr, &dmar_rmrr_units, list) {
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003595 ret = rmrr_parse_dev(rmrr);
3596 if (ret)
3597 return ret;
3598 }
3599
Jiang Liu9bdc5312014-01-06 14:18:27 +08003600 list_for_each_entry(atsr, &dmar_atsr_units, list) {
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003601 ret = atsr_parse_dev(atsr);
3602 if (ret)
3603 return ret;
3604 }
3605
3606 return ret;
3607}
3608
Fenghua Yu99dcade2009-11-11 07:23:06 -08003609/*
3610 * Here we only respond to action of unbound device from driver.
3611 *
3612 * Added device is not attached to its DMAR domain here yet. That will happen
3613 * when mapping the device to iova.
3614 */
3615static int device_notifier(struct notifier_block *nb,
3616 unsigned long action, void *data)
3617{
3618 struct device *dev = data;
3619 struct pci_dev *pdev = to_pci_dev(dev);
3620 struct dmar_domain *domain;
3621
Jiang Liu816997d2014-02-19 14:07:22 +08003622 if (iommu_dummy(pdev))
David Woodhouse44cd6132009-12-02 10:18:30 +00003623 return 0;
3624
Jiang Liu7e7dfab2014-02-19 14:07:23 +08003625 if (action != BUS_NOTIFY_UNBOUND_DRIVER &&
3626 action != BUS_NOTIFY_DEL_DEVICE)
3627 return 0;
3628
Fenghua Yu99dcade2009-11-11 07:23:06 -08003629 domain = find_domain(pdev);
3630 if (!domain)
3631 return 0;
3632
Jiang Liu7e7dfab2014-02-19 14:07:23 +08003633 domain_remove_one_dev_info(domain, pdev);
3634 if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
3635 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) &&
3636 list_empty(&domain->devices))
3637 domain_exit(domain);
Alex Williamsona97590e2011-03-04 14:52:16 -07003638
Fenghua Yu99dcade2009-11-11 07:23:06 -08003639 return 0;
3640}
3641
3642static struct notifier_block device_nb = {
3643 .notifier_call = device_notifier,
3644};
3645
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003646int __init intel_iommu_init(void)
3647{
Jiang Liu9bdc5312014-01-06 14:18:27 +08003648 int ret = -ENODEV;
Takao Indoh3a93c842013-04-23 17:35:03 +09003649 struct dmar_drhd_unit *drhd;
Jiang Liu7c919772014-01-06 14:18:18 +08003650 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003651
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003652 /* VT-d is required for a TXT/tboot launch, so enforce that */
3653 force_on = tboot_force_iommu();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003654
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003655 if (dmar_table_init()) {
3656 if (force_on)
3657 panic("tboot: Failed to initialize DMAR table\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08003658 goto out_free_dmar;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003659 }
3660
Takao Indoh3a93c842013-04-23 17:35:03 +09003661 /*
3662 * Disable translation if already enabled prior to OS handover.
3663 */
Jiang Liu7c919772014-01-06 14:18:18 +08003664 for_each_active_iommu(iommu, drhd)
Takao Indoh3a93c842013-04-23 17:35:03 +09003665 if (iommu->gcmd & DMA_GCMD_TE)
3666 iommu_disable_translation(iommu);
Takao Indoh3a93c842013-04-23 17:35:03 +09003667
Suresh Siddhac2c72862011-08-23 17:05:19 -07003668 if (dmar_dev_scope_init() < 0) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003669 if (force_on)
3670 panic("tboot: Failed to initialize DMAR device scope\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08003671 goto out_free_dmar;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003672 }
Suresh Siddha1886e8a2008-07-10 11:16:37 -07003673
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09003674 if (no_iommu || dmar_disabled)
Jiang Liu9bdc5312014-01-06 14:18:27 +08003675 goto out_free_dmar;
Suresh Siddha2ae21012008-07-10 11:16:43 -07003676
Joseph Cihula51a63e62011-03-21 11:04:24 -07003677 if (iommu_init_mempool()) {
3678 if (force_on)
3679 panic("tboot: Failed to initialize iommu memory\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08003680 goto out_free_dmar;
Joseph Cihula51a63e62011-03-21 11:04:24 -07003681 }
3682
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003683 if (list_empty(&dmar_rmrr_units))
3684 printk(KERN_INFO "DMAR: No RMRR found\n");
3685
3686 if (list_empty(&dmar_atsr_units))
3687 printk(KERN_INFO "DMAR: No ATSR found\n");
3688
Joseph Cihula51a63e62011-03-21 11:04:24 -07003689 if (dmar_init_reserved_ranges()) {
3690 if (force_on)
3691 panic("tboot: Failed to reserve iommu ranges\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08003692 goto out_free_mempool;
Joseph Cihula51a63e62011-03-21 11:04:24 -07003693 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003694
3695 init_no_remapping_devices();
3696
Joseph Cihulab7792602011-05-03 00:08:37 -07003697 ret = init_dmars();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003698 if (ret) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003699 if (force_on)
3700 panic("tboot: Failed to initialize DMARs\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003701 printk(KERN_ERR "IOMMU: dmar init failed\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08003702 goto out_free_reserved_range;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003703 }
3704 printk(KERN_INFO
3705 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
3706
mark gross5e0d2a62008-03-04 15:22:08 -08003707 init_timer(&unmap_timer);
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09003708#ifdef CONFIG_SWIOTLB
3709 swiotlb = 0;
3710#endif
David Woodhouse19943b02009-08-04 16:19:20 +01003711 dma_ops = &intel_dma_ops;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003712
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003713 init_iommu_pm_ops();
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003714
Joerg Roedel4236d97d2011-09-06 17:56:07 +02003715 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003716
Fenghua Yu99dcade2009-11-11 07:23:06 -08003717 bus_register_notifier(&pci_bus_type, &device_nb);
3718
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -02003719 intel_iommu_enabled = 1;
3720
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003721 return 0;
Jiang Liu9bdc5312014-01-06 14:18:27 +08003722
3723out_free_reserved_range:
3724 put_iova_domain(&reserved_iova_list);
3725out_free_mempool:
3726 iommu_exit_mempool();
3727out_free_dmar:
3728 intel_iommu_free_dmars();
3729 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003730}
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07003731
Han, Weidong3199aa62009-02-26 17:31:12 +08003732static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
3733 struct pci_dev *pdev)
3734{
3735 struct pci_dev *tmp, *parent;
3736
3737 if (!iommu || !pdev)
3738 return;
3739
3740 /* dependent device detach */
3741 tmp = pci_find_upstream_pcie_bridge(pdev);
3742 /* Secondary interface's bus number and devfn 0 */
3743 if (tmp) {
3744 parent = pdev->bus->self;
3745 while (parent != tmp) {
3746 iommu_detach_dev(iommu, parent->bus->number,
David Woodhouse276dbf992009-04-04 01:45:37 +01003747 parent->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003748 parent = parent->bus->self;
3749 }
Stefan Assmann45e829e2009-12-03 06:49:24 -05003750 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
Han, Weidong3199aa62009-02-26 17:31:12 +08003751 iommu_detach_dev(iommu,
3752 tmp->subordinate->number, 0);
3753 else /* this is a legacy PCI bridge */
David Woodhouse276dbf992009-04-04 01:45:37 +01003754 iommu_detach_dev(iommu, tmp->bus->number,
3755 tmp->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003756 }
3757}
3758
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003759static void domain_remove_one_dev_info(struct dmar_domain *domain,
Weidong Hanc7151a82008-12-08 22:51:37 +08003760 struct pci_dev *pdev)
3761{
Yijing Wangbca2b912013-10-31 17:26:04 +08003762 struct device_domain_info *info, *tmp;
Weidong Hanc7151a82008-12-08 22:51:37 +08003763 struct intel_iommu *iommu;
3764 unsigned long flags;
3765 int found = 0;
Weidong Hanc7151a82008-12-08 22:51:37 +08003766
David Woodhouse276dbf992009-04-04 01:45:37 +01003767 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3768 pdev->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08003769 if (!iommu)
3770 return;
3771
3772 spin_lock_irqsave(&device_domain_lock, flags);
Yijing Wangbca2b912013-10-31 17:26:04 +08003773 list_for_each_entry_safe(info, tmp, &domain->devices, link) {
Mike Habeck8519dc42011-05-28 13:15:07 -05003774 if (info->segment == pci_domain_nr(pdev->bus) &&
3775 info->bus == pdev->bus->number &&
Weidong Hanc7151a82008-12-08 22:51:37 +08003776 info->devfn == pdev->devfn) {
David Woodhouse109b9b02012-05-25 17:43:02 +01003777 unlink_domain_info(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08003778 spin_unlock_irqrestore(&device_domain_lock, flags);
3779
Yu Zhao93a23a72009-05-18 13:51:37 +08003780 iommu_disable_dev_iotlb(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08003781 iommu_detach_dev(iommu, info->bus, info->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003782 iommu_detach_dependent_devices(iommu, pdev);
Weidong Hanc7151a82008-12-08 22:51:37 +08003783 free_devinfo_mem(info);
3784
3785 spin_lock_irqsave(&device_domain_lock, flags);
3786
3787 if (found)
3788 break;
3789 else
3790 continue;
3791 }
3792
3793 /* if there is no other devices under the same iommu
3794 * owned by this domain, clear this iommu in iommu_bmp
3795 * update iommu count and coherency
3796 */
David Woodhouse276dbf992009-04-04 01:45:37 +01003797 if (iommu == device_to_iommu(info->segment, info->bus,
3798 info->devfn))
Weidong Hanc7151a82008-12-08 22:51:37 +08003799 found = 1;
3800 }
3801
Roland Dreier3e7abe22011-07-20 06:22:21 -07003802 spin_unlock_irqrestore(&device_domain_lock, flags);
3803
Weidong Hanc7151a82008-12-08 22:51:37 +08003804 if (found == 0) {
3805 unsigned long tmp_flags;
3806 spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
Mike Travis1b198bb2012-03-05 15:05:16 -08003807 clear_bit(iommu->seq_id, domain->iommu_bmp);
Weidong Hanc7151a82008-12-08 22:51:37 +08003808 domain->iommu_count--;
Sheng Yang58c610b2009-03-18 15:33:05 +08003809 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08003810 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
Alex Williamsona97590e2011-03-04 14:52:16 -07003811
Alex Williamson9b4554b2011-05-24 12:19:04 -04003812 if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
3813 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)) {
3814 spin_lock_irqsave(&iommu->lock, tmp_flags);
3815 clear_bit(domain->id, iommu->domain_ids);
3816 iommu->domains[domain->id] = NULL;
3817 spin_unlock_irqrestore(&iommu->lock, tmp_flags);
3818 }
Weidong Hanc7151a82008-12-08 22:51:37 +08003819 }
Weidong Hanc7151a82008-12-08 22:51:37 +08003820}
3821
3822static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
3823{
3824 struct device_domain_info *info;
3825 struct intel_iommu *iommu;
3826 unsigned long flags1, flags2;
3827
3828 spin_lock_irqsave(&device_domain_lock, flags1);
3829 while (!list_empty(&domain->devices)) {
3830 info = list_entry(domain->devices.next,
3831 struct device_domain_info, link);
David Woodhouse109b9b02012-05-25 17:43:02 +01003832 unlink_domain_info(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08003833 spin_unlock_irqrestore(&device_domain_lock, flags1);
3834
Yu Zhao93a23a72009-05-18 13:51:37 +08003835 iommu_disable_dev_iotlb(info);
David Woodhouse276dbf992009-04-04 01:45:37 +01003836 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08003837 iommu_detach_dev(iommu, info->bus, info->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003838 iommu_detach_dependent_devices(iommu, info->dev);
Weidong Hanc7151a82008-12-08 22:51:37 +08003839
3840 /* clear this iommu in iommu_bmp, update iommu count
Sheng Yang58c610b2009-03-18 15:33:05 +08003841 * and capabilities
Weidong Hanc7151a82008-12-08 22:51:37 +08003842 */
3843 spin_lock_irqsave(&domain->iommu_lock, flags2);
3844 if (test_and_clear_bit(iommu->seq_id,
Mike Travis1b198bb2012-03-05 15:05:16 -08003845 domain->iommu_bmp)) {
Weidong Hanc7151a82008-12-08 22:51:37 +08003846 domain->iommu_count--;
Sheng Yang58c610b2009-03-18 15:33:05 +08003847 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08003848 }
3849 spin_unlock_irqrestore(&domain->iommu_lock, flags2);
3850
3851 free_devinfo_mem(info);
3852 spin_lock_irqsave(&device_domain_lock, flags1);
3853 }
3854 spin_unlock_irqrestore(&device_domain_lock, flags1);
3855}
3856
Weidong Han5e98c4b2008-12-08 23:03:27 +08003857/* domain id for virtual machine, it won't be set in context */
Jiang Liu18d99162014-01-06 14:18:10 +08003858static atomic_t vm_domid = ATOMIC_INIT(0);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003859
3860static struct dmar_domain *iommu_alloc_vm_domain(void)
3861{
3862 struct dmar_domain *domain;
3863
3864 domain = alloc_domain_mem();
3865 if (!domain)
3866 return NULL;
3867
Jiang Liu18d99162014-01-06 14:18:10 +08003868 domain->id = atomic_inc_return(&vm_domid);
Suresh Siddha4c923d42009-10-02 11:01:24 -07003869 domain->nid = -1;
Mike Travis1b198bb2012-03-05 15:05:16 -08003870 memset(domain->iommu_bmp, 0, sizeof(domain->iommu_bmp));
Weidong Han5e98c4b2008-12-08 23:03:27 +08003871 domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
3872
3873 return domain;
3874}
3875
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003876static int md_domain_init(struct dmar_domain *domain, int guest_width)
Weidong Han5e98c4b2008-12-08 23:03:27 +08003877{
3878 int adjust_width;
3879
3880 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003881 spin_lock_init(&domain->iommu_lock);
3882
3883 domain_reserve_special_ranges(domain);
3884
3885 /* calculate AGAW */
3886 domain->gaw = guest_width;
3887 adjust_width = guestwidth_to_adjustwidth(guest_width);
3888 domain->agaw = width_to_agaw(adjust_width);
3889
3890 INIT_LIST_HEAD(&domain->devices);
3891
3892 domain->iommu_count = 0;
3893 domain->iommu_coherency = 0;
Sheng Yangc5b15252009-08-06 13:31:56 +08003894 domain->iommu_snooping = 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01003895 domain->iommu_superpage = 0;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003896 domain->max_addr = 0;
Suresh Siddha4c923d42009-10-02 11:01:24 -07003897 domain->nid = -1;
Weidong Han5e98c4b2008-12-08 23:03:27 +08003898
3899 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07003900 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003901 if (!domain->pgd)
3902 return -ENOMEM;
3903 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
3904 return 0;
3905}
3906
3907static void iommu_free_vm_domain(struct dmar_domain *domain)
3908{
3909 unsigned long flags;
3910 struct dmar_drhd_unit *drhd;
3911 struct intel_iommu *iommu;
3912 unsigned long i;
3913 unsigned long ndomains;
3914
Jiang Liu7c919772014-01-06 14:18:18 +08003915 for_each_active_iommu(iommu, drhd) {
Weidong Han5e98c4b2008-12-08 23:03:27 +08003916 ndomains = cap_ndoms(iommu->cap);
Akinobu Mitaa45946a2010-03-11 14:04:08 -08003917 for_each_set_bit(i, iommu->domain_ids, ndomains) {
Weidong Han5e98c4b2008-12-08 23:03:27 +08003918 if (iommu->domains[i] == domain) {
3919 spin_lock_irqsave(&iommu->lock, flags);
3920 clear_bit(i, iommu->domain_ids);
3921 iommu->domains[i] = NULL;
3922 spin_unlock_irqrestore(&iommu->lock, flags);
3923 break;
3924 }
Weidong Han5e98c4b2008-12-08 23:03:27 +08003925 }
3926 }
3927}
3928
3929static void vm_domain_exit(struct dmar_domain *domain)
3930{
Weidong Han5e98c4b2008-12-08 23:03:27 +08003931 /* Domain 0 is reserved, so dont process it */
3932 if (!domain)
3933 return;
3934
3935 vm_domain_remove_all_dev_info(domain);
3936 /* destroy iovas */
3937 put_iova_domain(&domain->iovad);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003938
3939 /* clear ptes */
David Woodhouse595badf52009-06-27 22:09:11 +01003940 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Weidong Han5e98c4b2008-12-08 23:03:27 +08003941
3942 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01003943 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Weidong Han5e98c4b2008-12-08 23:03:27 +08003944
3945 iommu_free_vm_domain(domain);
3946 free_domain_mem(domain);
3947}
3948
Joerg Roedel5d450802008-12-03 14:52:32 +01003949static int intel_iommu_domain_init(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03003950{
Joerg Roedel5d450802008-12-03 14:52:32 +01003951 struct dmar_domain *dmar_domain;
Kay, Allen M38717942008-09-09 18:37:29 +03003952
Joerg Roedel5d450802008-12-03 14:52:32 +01003953 dmar_domain = iommu_alloc_vm_domain();
3954 if (!dmar_domain) {
Kay, Allen M38717942008-09-09 18:37:29 +03003955 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01003956 "intel_iommu_domain_init: dmar_domain == NULL\n");
3957 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03003958 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003959 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
Kay, Allen M38717942008-09-09 18:37:29 +03003960 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01003961 "intel_iommu_domain_init() failed\n");
3962 vm_domain_exit(dmar_domain);
3963 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03003964 }
Allen Kay8140a952011-10-14 12:32:17 -07003965 domain_update_iommu_cap(dmar_domain);
Joerg Roedel5d450802008-12-03 14:52:32 +01003966 domain->priv = dmar_domain;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003967
Joerg Roedel8a0e7152012-01-26 19:40:54 +01003968 domain->geometry.aperture_start = 0;
3969 domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
3970 domain->geometry.force_aperture = true;
3971
Joerg Roedel5d450802008-12-03 14:52:32 +01003972 return 0;
Kay, Allen M38717942008-09-09 18:37:29 +03003973}
Kay, Allen M38717942008-09-09 18:37:29 +03003974
Joerg Roedel5d450802008-12-03 14:52:32 +01003975static void intel_iommu_domain_destroy(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03003976{
Joerg Roedel5d450802008-12-03 14:52:32 +01003977 struct dmar_domain *dmar_domain = domain->priv;
3978
3979 domain->priv = NULL;
3980 vm_domain_exit(dmar_domain);
Kay, Allen M38717942008-09-09 18:37:29 +03003981}
Kay, Allen M38717942008-09-09 18:37:29 +03003982
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003983static int intel_iommu_attach_device(struct iommu_domain *domain,
3984 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03003985{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003986 struct dmar_domain *dmar_domain = domain->priv;
3987 struct pci_dev *pdev = to_pci_dev(dev);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003988 struct intel_iommu *iommu;
3989 int addr_width;
Kay, Allen M38717942008-09-09 18:37:29 +03003990
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003991 /* normally pdev is not mapped */
3992 if (unlikely(domain_context_mapped(pdev))) {
3993 struct dmar_domain *old_domain;
3994
3995 old_domain = find_domain(pdev);
3996 if (old_domain) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003997 if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
3998 dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)
3999 domain_remove_one_dev_info(old_domain, pdev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004000 else
4001 domain_remove_dev_info(old_domain);
4002 }
4003 }
4004
David Woodhouse276dbf992009-04-04 01:45:37 +01004005 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
4006 pdev->devfn);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004007 if (!iommu)
4008 return -ENODEV;
4009
4010 /* check if this iommu agaw is sufficient for max mapped address */
4011 addr_width = agaw_to_width(iommu->agaw);
Tom Lyona99c47a2010-05-17 08:20:45 +01004012 if (addr_width > cap_mgaw(iommu->cap))
4013 addr_width = cap_mgaw(iommu->cap);
4014
4015 if (dmar_domain->max_addr > (1LL << addr_width)) {
4016 printk(KERN_ERR "%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004017 "sufficient for the mapped address (%llx)\n",
Tom Lyona99c47a2010-05-17 08:20:45 +01004018 __func__, addr_width, dmar_domain->max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004019 return -EFAULT;
4020 }
Tom Lyona99c47a2010-05-17 08:20:45 +01004021 dmar_domain->gaw = addr_width;
4022
4023 /*
4024 * Knock out extra levels of page tables if necessary
4025 */
4026 while (iommu->agaw < dmar_domain->agaw) {
4027 struct dma_pte *pte;
4028
4029 pte = dmar_domain->pgd;
4030 if (dma_pte_present(pte)) {
Sheng Yang25cbff12010-06-12 19:21:42 +08004031 dmar_domain->pgd = (struct dma_pte *)
4032 phys_to_virt(dma_pte_addr(pte));
Jan Kiszka7a661012010-11-02 08:05:51 +01004033 free_pgtable_page(pte);
Tom Lyona99c47a2010-05-17 08:20:45 +01004034 }
4035 dmar_domain->agaw--;
4036 }
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004037
David Woodhouse5fe60f42009-08-09 10:53:41 +01004038 return domain_add_dev_info(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004039}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004040
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004041static void intel_iommu_detach_device(struct iommu_domain *domain,
4042 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03004043{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004044 struct dmar_domain *dmar_domain = domain->priv;
4045 struct pci_dev *pdev = to_pci_dev(dev);
4046
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004047 domain_remove_one_dev_info(dmar_domain, pdev);
Kay, Allen M38717942008-09-09 18:37:29 +03004048}
Kay, Allen M38717942008-09-09 18:37:29 +03004049
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004050static int intel_iommu_map(struct iommu_domain *domain,
4051 unsigned long iova, phys_addr_t hpa,
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004052 size_t size, int iommu_prot)
Kay, Allen M38717942008-09-09 18:37:29 +03004053{
Joerg Roedeldde57a22008-12-03 15:04:09 +01004054 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004055 u64 max_addr;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004056 int prot = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004057 int ret;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004058
Joerg Roedeldde57a22008-12-03 15:04:09 +01004059 if (iommu_prot & IOMMU_READ)
4060 prot |= DMA_PTE_READ;
4061 if (iommu_prot & IOMMU_WRITE)
4062 prot |= DMA_PTE_WRITE;
Sheng Yang9cf06692009-03-18 15:33:07 +08004063 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
4064 prot |= DMA_PTE_SNP;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004065
David Woodhouse163cc522009-06-28 00:51:17 +01004066 max_addr = iova + size;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004067 if (dmar_domain->max_addr < max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004068 u64 end;
4069
4070 /* check if minimum agaw is sufficient for mapped address */
Tom Lyon8954da12010-05-17 08:19:52 +01004071 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004072 if (end < max_addr) {
Tom Lyon8954da12010-05-17 08:19:52 +01004073 printk(KERN_ERR "%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004074 "sufficient for the mapped address (%llx)\n",
Tom Lyon8954da12010-05-17 08:19:52 +01004075 __func__, dmar_domain->gaw, max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004076 return -EFAULT;
4077 }
Joerg Roedeldde57a22008-12-03 15:04:09 +01004078 dmar_domain->max_addr = max_addr;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004079 }
David Woodhousead051222009-06-28 14:22:28 +01004080 /* Round up size to next multiple of PAGE_SIZE, if it and
4081 the low bits of hpa would take us onto the next page */
David Woodhouse88cb6a72009-06-28 15:03:06 +01004082 size = aligned_nrpages(hpa, size);
David Woodhousead051222009-06-28 14:22:28 +01004083 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
4084 hpa >> VTD_PAGE_SHIFT, size, prot);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004085 return ret;
Kay, Allen M38717942008-09-09 18:37:29 +03004086}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004087
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004088static size_t intel_iommu_unmap(struct iommu_domain *domain,
4089 unsigned long iova, size_t size)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004090{
Joerg Roedeldde57a22008-12-03 15:04:09 +01004091 struct dmar_domain *dmar_domain = domain->priv;
Allen Kay292827c2011-10-14 12:31:54 -07004092 int order;
Sheng Yang4b99d352009-07-08 11:52:52 +01004093
Allen Kay292827c2011-10-14 12:31:54 -07004094 order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
David Woodhouse163cc522009-06-28 00:51:17 +01004095 (iova + size - 1) >> VTD_PAGE_SHIFT);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004096
David Woodhouse163cc522009-06-28 00:51:17 +01004097 if (dmar_domain->max_addr == iova + size)
4098 dmar_domain->max_addr = iova;
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004099
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004100 return PAGE_SIZE << order;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004101}
Kay, Allen M38717942008-09-09 18:37:29 +03004102
Joerg Roedeld14d6572008-12-03 15:06:57 +01004103static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
Varun Sethibb5547a2013-03-29 01:23:58 +05304104 dma_addr_t iova)
Kay, Allen M38717942008-09-09 18:37:29 +03004105{
Joerg Roedeld14d6572008-12-03 15:06:57 +01004106 struct dmar_domain *dmar_domain = domain->priv;
Kay, Allen M38717942008-09-09 18:37:29 +03004107 struct dma_pte *pte;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004108 u64 phys = 0;
Kay, Allen M38717942008-09-09 18:37:29 +03004109
Youquan Song6dd9a7c2011-05-25 19:13:49 +01004110 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, 0);
Kay, Allen M38717942008-09-09 18:37:29 +03004111 if (pte)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004112 phys = dma_pte_addr(pte);
Kay, Allen M38717942008-09-09 18:37:29 +03004113
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004114 return phys;
Kay, Allen M38717942008-09-09 18:37:29 +03004115}
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004116
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004117static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
4118 unsigned long cap)
4119{
4120 struct dmar_domain *dmar_domain = domain->priv;
4121
4122 if (cap == IOMMU_CAP_CACHE_COHERENCY)
4123 return dmar_domain->iommu_snooping;
Tom Lyon323f99c2010-07-02 16:56:14 -04004124 if (cap == IOMMU_CAP_INTR_REMAP)
Suresh Siddha95a02e92012-03-30 11:47:07 -07004125 return irq_remapping_enabled;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004126
4127 return 0;
4128}
4129
Alex Williamson783f1572012-05-30 14:19:43 -06004130#define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
4131
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004132static int intel_iommu_add_device(struct device *dev)
Alex Williamson70ae6f02011-10-21 15:56:11 -04004133{
4134 struct pci_dev *pdev = to_pci_dev(dev);
Alex Williamson3da4af02012-11-13 10:22:03 -07004135 struct pci_dev *bridge, *dma_pdev = NULL;
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004136 struct iommu_group *group;
4137 int ret;
Alex Williamson70ae6f02011-10-21 15:56:11 -04004138
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004139 if (!device_to_iommu(pci_domain_nr(pdev->bus),
4140 pdev->bus->number, pdev->devfn))
Alex Williamson70ae6f02011-10-21 15:56:11 -04004141 return -ENODEV;
4142
4143 bridge = pci_find_upstream_pcie_bridge(pdev);
4144 if (bridge) {
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004145 if (pci_is_pcie(bridge))
4146 dma_pdev = pci_get_domain_bus_and_slot(
4147 pci_domain_nr(pdev->bus),
4148 bridge->subordinate->number, 0);
Alex Williamson3da4af02012-11-13 10:22:03 -07004149 if (!dma_pdev)
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004150 dma_pdev = pci_dev_get(bridge);
4151 } else
4152 dma_pdev = pci_dev_get(pdev);
4153
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004154 /* Account for quirked devices */
Alex Williamson783f1572012-05-30 14:19:43 -06004155 swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev));
4156
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004157 /*
4158 * If it's a multifunction device that does not support our
Alex Williamsonc14d2692013-05-30 12:39:18 -06004159 * required ACS flags, add to the same group as lowest numbered
4160 * function that also does not suport the required ACS flags.
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004161 */
Alex Williamson783f1572012-05-30 14:19:43 -06004162 if (dma_pdev->multifunction &&
Alex Williamsonc14d2692013-05-30 12:39:18 -06004163 !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)) {
4164 u8 i, slot = PCI_SLOT(dma_pdev->devfn);
4165
4166 for (i = 0; i < 8; i++) {
4167 struct pci_dev *tmp;
4168
4169 tmp = pci_get_slot(dma_pdev->bus, PCI_DEVFN(slot, i));
4170 if (!tmp)
4171 continue;
4172
4173 if (!pci_acs_enabled(tmp, REQ_ACS_FLAGS)) {
4174 swap_pci_ref(&dma_pdev, tmp);
4175 break;
4176 }
4177 pci_dev_put(tmp);
4178 }
4179 }
Alex Williamson783f1572012-05-30 14:19:43 -06004180
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004181 /*
4182 * Devices on the root bus go through the iommu. If that's not us,
4183 * find the next upstream device and test ACS up to the root bus.
4184 * Finding the next device may require skipping virtual buses.
4185 */
Alex Williamson783f1572012-05-30 14:19:43 -06004186 while (!pci_is_root_bus(dma_pdev->bus)) {
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004187 struct pci_bus *bus = dma_pdev->bus;
4188
4189 while (!bus->self) {
4190 if (!pci_is_root_bus(bus))
4191 bus = bus->parent;
4192 else
4193 goto root_bus;
4194 }
4195
4196 if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
Alex Williamson783f1572012-05-30 14:19:43 -06004197 break;
4198
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004199 swap_pci_ref(&dma_pdev, pci_dev_get(bus->self));
Alex Williamson70ae6f02011-10-21 15:56:11 -04004200 }
4201
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004202root_bus:
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004203 group = iommu_group_get(&dma_pdev->dev);
4204 pci_dev_put(dma_pdev);
4205 if (!group) {
4206 group = iommu_group_alloc();
4207 if (IS_ERR(group))
4208 return PTR_ERR(group);
4209 }
Alex Williamsonbcb71ab2011-10-21 15:56:24 -04004210
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004211 ret = iommu_group_add_device(group, dev);
Alex Williamson70ae6f02011-10-21 15:56:11 -04004212
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004213 iommu_group_put(group);
4214 return ret;
4215}
4216
4217static void intel_iommu_remove_device(struct device *dev)
4218{
4219 iommu_group_remove_device(dev);
Alex Williamson70ae6f02011-10-21 15:56:11 -04004220}
4221
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004222static struct iommu_ops intel_iommu_ops = {
4223 .domain_init = intel_iommu_domain_init,
4224 .domain_destroy = intel_iommu_domain_destroy,
4225 .attach_dev = intel_iommu_attach_device,
4226 .detach_dev = intel_iommu_detach_device,
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004227 .map = intel_iommu_map,
4228 .unmap = intel_iommu_unmap,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004229 .iova_to_phys = intel_iommu_iova_to_phys,
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004230 .domain_has_cap = intel_iommu_domain_has_cap,
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004231 .add_device = intel_iommu_add_device,
4232 .remove_device = intel_iommu_remove_device,
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +02004233 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004234};
David Woodhouse9af88142009-02-13 23:18:03 +00004235
Daniel Vetter94526182013-01-20 23:50:13 +01004236static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
4237{
4238 /* G4x/GM45 integrated gfx dmar support is totally busted. */
4239 printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
4240 dmar_map_gfx = 0;
4241}
4242
4243DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
4244DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
4245DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
4246DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
4247DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
4248DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
4249DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
4250
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08004251static void quirk_iommu_rwbf(struct pci_dev *dev)
David Woodhouse9af88142009-02-13 23:18:03 +00004252{
4253 /*
4254 * Mobile 4 Series Chipset neglects to set RWBF capability,
Daniel Vetter210561f2013-01-21 19:48:59 +01004255 * but needs it. Same seems to hold for the desktop versions.
David Woodhouse9af88142009-02-13 23:18:03 +00004256 */
4257 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
4258 rwbf_quirk = 1;
4259}
4260
4261DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
Daniel Vetter210561f2013-01-21 19:48:59 +01004262DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
4263DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
4264DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
4265DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
4266DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
4267DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
David Woodhousee0fc7e02009-09-30 09:12:17 -07004268
Adam Jacksoneecfd572010-08-25 21:17:34 +01004269#define GGC 0x52
4270#define GGC_MEMORY_SIZE_MASK (0xf << 8)
4271#define GGC_MEMORY_SIZE_NONE (0x0 << 8)
4272#define GGC_MEMORY_SIZE_1M (0x1 << 8)
4273#define GGC_MEMORY_SIZE_2M (0x3 << 8)
4274#define GGC_MEMORY_VT_ENABLED (0x8 << 8)
4275#define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
4276#define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
4277#define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
4278
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08004279static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
David Woodhouse9eecabc2010-09-21 22:28:23 +01004280{
4281 unsigned short ggc;
4282
Adam Jacksoneecfd572010-08-25 21:17:34 +01004283 if (pci_read_config_word(dev, GGC, &ggc))
David Woodhouse9eecabc2010-09-21 22:28:23 +01004284 return;
4285
Adam Jacksoneecfd572010-08-25 21:17:34 +01004286 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
David Woodhouse9eecabc2010-09-21 22:28:23 +01004287 printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
4288 dmar_map_gfx = 0;
David Woodhouse6fbcfb32011-09-25 19:11:14 -07004289 } else if (dmar_map_gfx) {
4290 /* we have to ensure the gfx device is idle before we flush */
4291 printk(KERN_INFO "DMAR: Disabling batched IOTLB flush on Ironlake\n");
4292 intel_iommu_strict = 1;
4293 }
David Woodhouse9eecabc2010-09-21 22:28:23 +01004294}
4295DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
4296DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
4297DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
4298DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
4299
David Woodhousee0fc7e02009-09-30 09:12:17 -07004300/* On Tylersburg chipsets, some BIOSes have been known to enable the
4301 ISOCH DMAR unit for the Azalia sound device, but not give it any
4302 TLB entries, which causes it to deadlock. Check for that. We do
4303 this in a function called from init_dmars(), instead of in a PCI
4304 quirk, because we don't want to print the obnoxious "BIOS broken"
4305 message if VT-d is actually disabled.
4306*/
4307static void __init check_tylersburg_isoch(void)
4308{
4309 struct pci_dev *pdev;
4310 uint32_t vtisochctrl;
4311
4312 /* If there's no Azalia in the system anyway, forget it. */
4313 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
4314 if (!pdev)
4315 return;
4316 pci_dev_put(pdev);
4317
4318 /* System Management Registers. Might be hidden, in which case
4319 we can't do the sanity check. But that's OK, because the
4320 known-broken BIOSes _don't_ actually hide it, so far. */
4321 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
4322 if (!pdev)
4323 return;
4324
4325 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
4326 pci_dev_put(pdev);
4327 return;
4328 }
4329
4330 pci_dev_put(pdev);
4331
4332 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
4333 if (vtisochctrl & 1)
4334 return;
4335
4336 /* Drop all bits other than the number of TLB entries */
4337 vtisochctrl &= 0x1c;
4338
4339 /* If we have the recommended number of TLB entries (16), fine. */
4340 if (vtisochctrl == 0x10)
4341 return;
4342
4343 /* Zero TLB entries? You get to ride the short bus to school. */
4344 if (!vtisochctrl) {
4345 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
4346 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
4347 dmi_get_system_info(DMI_BIOS_VENDOR),
4348 dmi_get_system_info(DMI_BIOS_VERSION),
4349 dmi_get_system_info(DMI_PRODUCT_VERSION));
4350 iommu_identity_mapping |= IDENTMAP_AZALIA;
4351 return;
4352 }
4353
4354 printk(KERN_WARNING "DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
4355 vtisochctrl);
4356}