blob: ee48fd07314016c286b88bae235564fc7b0e8736 [file] [log] [blame]
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
mark gross98bcef52008-02-23 15:23:35 -080017 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Fenghua Yu5b6985c2008-10-16 18:02:32 -070021 * Author: Fenghua Yu <fenghua.yu@intel.com>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070022 */
23
24#include <linux/init.h>
25#include <linux/bitmap.h>
mark gross5e0d2a62008-03-04 15:22:08 -080026#include <linux/debugfs.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070027#include <linux/slab.h>
28#include <linux/irq.h>
29#include <linux/interrupt.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070030#include <linux/spinlock.h>
31#include <linux/pci.h>
32#include <linux/dmar.h>
33#include <linux/dma-mapping.h>
34#include <linux/mempool.h>
mark gross5e0d2a62008-03-04 15:22:08 -080035#include <linux/timer.h>
Kay, Allen M38717942008-09-09 18:37:29 +030036#include <linux/iova.h>
Joerg Roedel5d450802008-12-03 14:52:32 +010037#include <linux/iommu.h>
Kay, Allen M38717942008-09-09 18:37:29 +030038#include <linux/intel-iommu.h>
Fenghua Yuf59c7b62009-03-27 14:22:42 -070039#include <linux/sysdev.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070040#include <asm/cacheflush.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090041#include <asm/iommu.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070042#include "pci.h"
43
Fenghua Yu5b6985c2008-10-16 18:02:32 -070044#define ROOT_SIZE VTD_PAGE_SIZE
45#define CONTEXT_SIZE VTD_PAGE_SIZE
46
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070047#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
48#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
49
50#define IOAPIC_RANGE_START (0xfee00000)
51#define IOAPIC_RANGE_END (0xfeefffff)
52#define IOVA_START_ADDR (0x1000)
53
54#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
55
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070056#define MAX_AGAW_WIDTH 64
57
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070058#define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1)
David Woodhouse595badf52009-06-27 22:09:11 +010059#define DOMAIN_MAX_PFN(gaw) ((((u64)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070060
Mark McLoughlinf27be032008-11-20 15:49:43 +000061#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
Yang Hongyang284901a2009-04-06 19:01:15 -070062#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
Yang Hongyang6a355282009-04-06 19:01:13 -070063#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
mark gross5e0d2a62008-03-04 15:22:08 -080064
David Woodhousefd18de52009-05-10 23:57:41 +010065
David Woodhousedd4e8312009-06-27 16:21:20 +010066/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
67 are never going to work. */
68static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
69{
70 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
71}
72
73static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
74{
75 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
76}
77static inline unsigned long page_to_dma_pfn(struct page *pg)
78{
79 return mm_to_dma_pfn(page_to_pfn(pg));
80}
81static inline unsigned long virt_to_dma_pfn(void *p)
82{
83 return page_to_dma_pfn(virt_to_page(p));
84}
85
Weidong Hand9630fe2008-12-08 11:06:32 +080086/* global iommu list, set NULL for ignored DMAR units */
87static struct intel_iommu **g_iommus;
88
David Woodhouse9af88142009-02-13 23:18:03 +000089static int rwbf_quirk;
90
Mark McLoughlin46b08e12008-11-20 15:49:44 +000091/*
92 * 0: Present
93 * 1-11: Reserved
94 * 12-63: Context Ptr (12 - (haw-1))
95 * 64-127: Reserved
96 */
97struct root_entry {
98 u64 val;
99 u64 rsvd1;
100};
101#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
102static inline bool root_present(struct root_entry *root)
103{
104 return (root->val & 1);
105}
106static inline void set_root_present(struct root_entry *root)
107{
108 root->val |= 1;
109}
110static inline void set_root_value(struct root_entry *root, unsigned long value)
111{
112 root->val |= value & VTD_PAGE_MASK;
113}
114
115static inline struct context_entry *
116get_context_addr_from_root(struct root_entry *root)
117{
118 return (struct context_entry *)
119 (root_present(root)?phys_to_virt(
120 root->val & VTD_PAGE_MASK) :
121 NULL);
122}
123
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000124/*
125 * low 64 bits:
126 * 0: present
127 * 1: fault processing disable
128 * 2-3: translation type
129 * 12-63: address space root
130 * high 64 bits:
131 * 0-2: address width
132 * 3-6: aval
133 * 8-23: domain id
134 */
135struct context_entry {
136 u64 lo;
137 u64 hi;
138};
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000139
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000140static inline bool context_present(struct context_entry *context)
141{
142 return (context->lo & 1);
143}
144static inline void context_set_present(struct context_entry *context)
145{
146 context->lo |= 1;
147}
148
149static inline void context_set_fault_enable(struct context_entry *context)
150{
151 context->lo &= (((u64)-1) << 2) | 1;
152}
153
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000154static inline void context_set_translation_type(struct context_entry *context,
155 unsigned long value)
156{
157 context->lo &= (((u64)-1) << 4) | 3;
158 context->lo |= (value & 3) << 2;
159}
160
161static inline void context_set_address_root(struct context_entry *context,
162 unsigned long value)
163{
164 context->lo |= value & VTD_PAGE_MASK;
165}
166
167static inline void context_set_address_width(struct context_entry *context,
168 unsigned long value)
169{
170 context->hi |= value & 7;
171}
172
173static inline void context_set_domain_id(struct context_entry *context,
174 unsigned long value)
175{
176 context->hi |= (value & ((1 << 16) - 1)) << 8;
177}
178
179static inline void context_clear_entry(struct context_entry *context)
180{
181 context->lo = 0;
182 context->hi = 0;
183}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000184
Mark McLoughlin622ba122008-11-20 15:49:46 +0000185/*
186 * 0: readable
187 * 1: writable
188 * 2-6: reserved
189 * 7: super page
Sheng Yang9cf06692009-03-18 15:33:07 +0800190 * 8-10: available
191 * 11: snoop behavior
Mark McLoughlin622ba122008-11-20 15:49:46 +0000192 * 12-63: Host physcial address
193 */
194struct dma_pte {
195 u64 val;
196};
Mark McLoughlin622ba122008-11-20 15:49:46 +0000197
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000198static inline void dma_clear_pte(struct dma_pte *pte)
199{
200 pte->val = 0;
201}
202
203static inline void dma_set_pte_readable(struct dma_pte *pte)
204{
205 pte->val |= DMA_PTE_READ;
206}
207
208static inline void dma_set_pte_writable(struct dma_pte *pte)
209{
210 pte->val |= DMA_PTE_WRITE;
211}
212
Sheng Yang9cf06692009-03-18 15:33:07 +0800213static inline void dma_set_pte_snp(struct dma_pte *pte)
214{
215 pte->val |= DMA_PTE_SNP;
216}
217
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000218static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
219{
220 pte->val = (pte->val & ~3) | (prot & 3);
221}
222
223static inline u64 dma_pte_addr(struct dma_pte *pte)
224{
David Woodhousec85994e2009-07-01 19:21:24 +0100225#ifdef CONFIG_64BIT
226 return pte->val & VTD_PAGE_MASK;
227#else
228 /* Must have a full atomic 64-bit read */
229 return __cmpxchg64(pte, 0ULL, 0ULL) & VTD_PAGE_MASK;
230#endif
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000231}
232
David Woodhousedd4e8312009-06-27 16:21:20 +0100233static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn)
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000234{
David Woodhousedd4e8312009-06-27 16:21:20 +0100235 pte->val |= (uint64_t)pfn << VTD_PAGE_SHIFT;
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000236}
237
238static inline bool dma_pte_present(struct dma_pte *pte)
239{
240 return (pte->val & 3) != 0;
241}
Mark McLoughlin622ba122008-11-20 15:49:46 +0000242
David Woodhouse75e6bf92009-07-02 11:21:16 +0100243static inline int first_pte_in_page(struct dma_pte *pte)
244{
245 return !((unsigned long)pte & ~VTD_PAGE_MASK);
246}
247
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700248/*
249 * This domain is a statically identity mapping domain.
250 * 1. This domain creats a static 1:1 mapping to all usable memory.
251 * 2. It maps to each iommu if successful.
252 * 3. Each iommu mapps to this domain if successful.
253 */
254struct dmar_domain *si_domain;
255
Weidong Han3b5410e2008-12-08 09:17:15 +0800256/* devices under the same p2p bridge are owned in one domain */
Mike Daycdc7b832008-12-12 17:16:30 +0100257#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
Weidong Han3b5410e2008-12-08 09:17:15 +0800258
Weidong Han1ce28fe2008-12-08 16:35:39 +0800259/* domain represents a virtual machine, more than one devices
260 * across iommus may be owned in one domain, e.g. kvm guest.
261 */
262#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
263
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700264/* si_domain contains mulitple devices */
265#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2)
266
Mark McLoughlin99126f72008-11-20 15:49:47 +0000267struct dmar_domain {
268 int id; /* domain id */
Weidong Han8c11e792008-12-08 15:29:22 +0800269 unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/
Mark McLoughlin99126f72008-11-20 15:49:47 +0000270
271 struct list_head devices; /* all devices' list */
272 struct iova_domain iovad; /* iova's that belong to this domain */
273
274 struct dma_pte *pgd; /* virtual address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000275 int gaw; /* max guest address width */
276
277 /* adjusted guest address width, 0 is level 2 30-bit */
278 int agaw;
279
Weidong Han3b5410e2008-12-08 09:17:15 +0800280 int flags; /* flags to find out type of domain */
Weidong Han8e6040972008-12-08 15:49:06 +0800281
282 int iommu_coherency;/* indicate coherency of iommu access */
Sheng Yang58c610b2009-03-18 15:33:05 +0800283 int iommu_snooping; /* indicate snooping control feature*/
Weidong Hanc7151a82008-12-08 22:51:37 +0800284 int iommu_count; /* reference count of iommu */
285 spinlock_t iommu_lock; /* protect iommu set in domain */
Weidong Hanfe40f1e2008-12-08 23:10:23 +0800286 u64 max_addr; /* maximum mapped address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000287};
288
Mark McLoughlina647dacb2008-11-20 15:49:48 +0000289/* PCI domain-device relationship */
290struct device_domain_info {
291 struct list_head link; /* link to domain siblings */
292 struct list_head global; /* link to global list */
David Woodhouse276dbf992009-04-04 01:45:37 +0100293 int segment; /* PCI domain */
294 u8 bus; /* PCI bus number */
Mark McLoughlina647dacb2008-11-20 15:49:48 +0000295 u8 devfn; /* PCI devfn number */
296 struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */
Yu Zhao93a23a72009-05-18 13:51:37 +0800297 struct intel_iommu *iommu; /* IOMMU used by this device */
Mark McLoughlina647dacb2008-11-20 15:49:48 +0000298 struct dmar_domain *domain; /* pointer to domain */
299};
300
mark gross5e0d2a62008-03-04 15:22:08 -0800301static void flush_unmaps_timeout(unsigned long data);
302
303DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
304
mark gross80b20dd2008-04-18 13:53:58 -0700305#define HIGH_WATER_MARK 250
306struct deferred_flush_tables {
307 int next;
308 struct iova *iova[HIGH_WATER_MARK];
309 struct dmar_domain *domain[HIGH_WATER_MARK];
310};
311
312static struct deferred_flush_tables *deferred_flush;
313
mark gross5e0d2a62008-03-04 15:22:08 -0800314/* bitmap for indexing intel_iommus */
mark gross5e0d2a62008-03-04 15:22:08 -0800315static int g_num_of_iommus;
316
317static DEFINE_SPINLOCK(async_umap_flush_lock);
318static LIST_HEAD(unmaps_to_do);
319
320static int timer_on;
321static long list_size;
mark gross5e0d2a62008-03-04 15:22:08 -0800322
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700323static void domain_remove_dev_info(struct dmar_domain *domain);
324
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800325#ifdef CONFIG_DMAR_DEFAULT_ON
326int dmar_disabled = 0;
327#else
328int dmar_disabled = 1;
329#endif /*CONFIG_DMAR_DEFAULT_ON*/
330
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700331static int __initdata dmar_map_gfx = 1;
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700332static int dmar_forcedac;
mark gross5e0d2a62008-03-04 15:22:08 -0800333static int intel_iommu_strict;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700334
335#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
336static DEFINE_SPINLOCK(device_domain_lock);
337static LIST_HEAD(device_domain_list);
338
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +0100339static struct iommu_ops intel_iommu_ops;
340
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700341static int __init intel_iommu_setup(char *str)
342{
343 if (!str)
344 return -EINVAL;
345 while (*str) {
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800346 if (!strncmp(str, "on", 2)) {
347 dmar_disabled = 0;
348 printk(KERN_INFO "Intel-IOMMU: enabled\n");
349 } else if (!strncmp(str, "off", 3)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700350 dmar_disabled = 1;
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800351 printk(KERN_INFO "Intel-IOMMU: disabled\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700352 } else if (!strncmp(str, "igfx_off", 8)) {
353 dmar_map_gfx = 0;
354 printk(KERN_INFO
355 "Intel-IOMMU: disable GFX device mapping\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700356 } else if (!strncmp(str, "forcedac", 8)) {
mark gross5e0d2a62008-03-04 15:22:08 -0800357 printk(KERN_INFO
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700358 "Intel-IOMMU: Forcing DAC for PCI devices\n");
359 dmar_forcedac = 1;
mark gross5e0d2a62008-03-04 15:22:08 -0800360 } else if (!strncmp(str, "strict", 6)) {
361 printk(KERN_INFO
362 "Intel-IOMMU: disable batched IOTLB flush\n");
363 intel_iommu_strict = 1;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700364 }
365
366 str += strcspn(str, ",");
367 while (*str == ',')
368 str++;
369 }
370 return 0;
371}
372__setup("intel_iommu=", intel_iommu_setup);
373
374static struct kmem_cache *iommu_domain_cache;
375static struct kmem_cache *iommu_devinfo_cache;
376static struct kmem_cache *iommu_iova_cache;
377
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700378static inline void *iommu_kmem_cache_alloc(struct kmem_cache *cachep)
379{
380 unsigned int flags;
381 void *vaddr;
382
383 /* trying to avoid low memory issues */
384 flags = current->flags & PF_MEMALLOC;
385 current->flags |= PF_MEMALLOC;
386 vaddr = kmem_cache_alloc(cachep, GFP_ATOMIC);
387 current->flags &= (~PF_MEMALLOC | flags);
388 return vaddr;
389}
390
391
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700392static inline void *alloc_pgtable_page(void)
393{
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700394 unsigned int flags;
395 void *vaddr;
396
397 /* trying to avoid low memory issues */
398 flags = current->flags & PF_MEMALLOC;
399 current->flags |= PF_MEMALLOC;
400 vaddr = (void *)get_zeroed_page(GFP_ATOMIC);
401 current->flags &= (~PF_MEMALLOC | flags);
402 return vaddr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700403}
404
405static inline void free_pgtable_page(void *vaddr)
406{
407 free_page((unsigned long)vaddr);
408}
409
410static inline void *alloc_domain_mem(void)
411{
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700412 return iommu_kmem_cache_alloc(iommu_domain_cache);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700413}
414
Kay, Allen M38717942008-09-09 18:37:29 +0300415static void free_domain_mem(void *vaddr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700416{
417 kmem_cache_free(iommu_domain_cache, vaddr);
418}
419
420static inline void * alloc_devinfo_mem(void)
421{
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700422 return iommu_kmem_cache_alloc(iommu_devinfo_cache);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700423}
424
425static inline void free_devinfo_mem(void *vaddr)
426{
427 kmem_cache_free(iommu_devinfo_cache, vaddr);
428}
429
430struct iova *alloc_iova_mem(void)
431{
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700432 return iommu_kmem_cache_alloc(iommu_iova_cache);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700433}
434
435void free_iova_mem(struct iova *iova)
436{
437 kmem_cache_free(iommu_iova_cache, iova);
438}
439
Weidong Han1b573682008-12-08 15:34:06 +0800440
441static inline int width_to_agaw(int width);
442
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700443static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
Weidong Han1b573682008-12-08 15:34:06 +0800444{
445 unsigned long sagaw;
446 int agaw = -1;
447
448 sagaw = cap_sagaw(iommu->cap);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700449 for (agaw = width_to_agaw(max_gaw);
Weidong Han1b573682008-12-08 15:34:06 +0800450 agaw >= 0; agaw--) {
451 if (test_bit(agaw, &sagaw))
452 break;
453 }
454
455 return agaw;
456}
457
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700458/*
459 * Calculate max SAGAW for each iommu.
460 */
461int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
462{
463 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
464}
465
466/*
467 * calculate agaw for each iommu.
468 * "SAGAW" may be different across iommus, use a default agaw, and
469 * get a supported less agaw for iommus that don't support the default agaw.
470 */
471int iommu_calculate_agaw(struct intel_iommu *iommu)
472{
473 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
474}
475
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700476/* This functionin only returns single iommu in a domain */
Weidong Han8c11e792008-12-08 15:29:22 +0800477static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
478{
479 int iommu_id;
480
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700481 /* si_domain and vm domain should not get here. */
Weidong Han1ce28fe2008-12-08 16:35:39 +0800482 BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700483 BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY);
Weidong Han1ce28fe2008-12-08 16:35:39 +0800484
Weidong Han8c11e792008-12-08 15:29:22 +0800485 iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
486 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
487 return NULL;
488
489 return g_iommus[iommu_id];
490}
491
Weidong Han8e6040972008-12-08 15:49:06 +0800492static void domain_update_iommu_coherency(struct dmar_domain *domain)
493{
494 int i;
495
496 domain->iommu_coherency = 1;
497
498 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
499 for (; i < g_num_of_iommus; ) {
500 if (!ecap_coherent(g_iommus[i]->ecap)) {
501 domain->iommu_coherency = 0;
502 break;
503 }
504 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
505 }
506}
507
Sheng Yang58c610b2009-03-18 15:33:05 +0800508static void domain_update_iommu_snooping(struct dmar_domain *domain)
509{
510 int i;
511
512 domain->iommu_snooping = 1;
513
514 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
515 for (; i < g_num_of_iommus; ) {
516 if (!ecap_sc_support(g_iommus[i]->ecap)) {
517 domain->iommu_snooping = 0;
518 break;
519 }
520 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
521 }
522}
523
524/* Some capabilities may be different across iommus */
525static void domain_update_iommu_cap(struct dmar_domain *domain)
526{
527 domain_update_iommu_coherency(domain);
528 domain_update_iommu_snooping(domain);
529}
530
David Woodhouse276dbf992009-04-04 01:45:37 +0100531static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
Weidong Hanc7151a82008-12-08 22:51:37 +0800532{
533 struct dmar_drhd_unit *drhd = NULL;
534 int i;
535
536 for_each_drhd_unit(drhd) {
537 if (drhd->ignored)
538 continue;
David Woodhouse276dbf992009-04-04 01:45:37 +0100539 if (segment != drhd->segment)
540 continue;
Weidong Hanc7151a82008-12-08 22:51:37 +0800541
David Woodhouse924b6232009-04-04 00:39:25 +0100542 for (i = 0; i < drhd->devices_cnt; i++) {
Dirk Hohndel288e4872009-01-11 15:33:51 +0000543 if (drhd->devices[i] &&
544 drhd->devices[i]->bus->number == bus &&
Weidong Hanc7151a82008-12-08 22:51:37 +0800545 drhd->devices[i]->devfn == devfn)
546 return drhd->iommu;
David Woodhouse4958c5d2009-04-06 13:30:01 -0700547 if (drhd->devices[i] &&
548 drhd->devices[i]->subordinate &&
David Woodhouse924b6232009-04-04 00:39:25 +0100549 drhd->devices[i]->subordinate->number <= bus &&
550 drhd->devices[i]->subordinate->subordinate >= bus)
551 return drhd->iommu;
552 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800553
554 if (drhd->include_all)
555 return drhd->iommu;
556 }
557
558 return NULL;
559}
560
Weidong Han5331fe62008-12-08 23:00:00 +0800561static void domain_flush_cache(struct dmar_domain *domain,
562 void *addr, int size)
563{
564 if (!domain->iommu_coherency)
565 clflush_cache_range(addr, size);
566}
567
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700568/* Gets context entry for a given bus and devfn */
569static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
570 u8 bus, u8 devfn)
571{
572 struct root_entry *root;
573 struct context_entry *context;
574 unsigned long phy_addr;
575 unsigned long flags;
576
577 spin_lock_irqsave(&iommu->lock, flags);
578 root = &iommu->root_entry[bus];
579 context = get_context_addr_from_root(root);
580 if (!context) {
581 context = (struct context_entry *)alloc_pgtable_page();
582 if (!context) {
583 spin_unlock_irqrestore(&iommu->lock, flags);
584 return NULL;
585 }
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700586 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700587 phy_addr = virt_to_phys((void *)context);
588 set_root_value(root, phy_addr);
589 set_root_present(root);
590 __iommu_flush_cache(iommu, root, sizeof(*root));
591 }
592 spin_unlock_irqrestore(&iommu->lock, flags);
593 return &context[devfn];
594}
595
596static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
597{
598 struct root_entry *root;
599 struct context_entry *context;
600 int ret;
601 unsigned long flags;
602
603 spin_lock_irqsave(&iommu->lock, flags);
604 root = &iommu->root_entry[bus];
605 context = get_context_addr_from_root(root);
606 if (!context) {
607 ret = 0;
608 goto out;
609 }
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000610 ret = context_present(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700611out:
612 spin_unlock_irqrestore(&iommu->lock, flags);
613 return ret;
614}
615
616static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
617{
618 struct root_entry *root;
619 struct context_entry *context;
620 unsigned long flags;
621
622 spin_lock_irqsave(&iommu->lock, flags);
623 root = &iommu->root_entry[bus];
624 context = get_context_addr_from_root(root);
625 if (context) {
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000626 context_clear_entry(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700627 __iommu_flush_cache(iommu, &context[devfn], \
628 sizeof(*context));
629 }
630 spin_unlock_irqrestore(&iommu->lock, flags);
631}
632
633static void free_context_table(struct intel_iommu *iommu)
634{
635 struct root_entry *root;
636 int i;
637 unsigned long flags;
638 struct context_entry *context;
639
640 spin_lock_irqsave(&iommu->lock, flags);
641 if (!iommu->root_entry) {
642 goto out;
643 }
644 for (i = 0; i < ROOT_ENTRY_NR; i++) {
645 root = &iommu->root_entry[i];
646 context = get_context_addr_from_root(root);
647 if (context)
648 free_pgtable_page(context);
649 }
650 free_pgtable_page(iommu->root_entry);
651 iommu->root_entry = NULL;
652out:
653 spin_unlock_irqrestore(&iommu->lock, flags);
654}
655
656/* page table handling */
657#define LEVEL_STRIDE (9)
658#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
659
660static inline int agaw_to_level(int agaw)
661{
662 return agaw + 2;
663}
664
665static inline int agaw_to_width(int agaw)
666{
667 return 30 + agaw * LEVEL_STRIDE;
668
669}
670
671static inline int width_to_agaw(int width)
672{
673 return (width - 30) / LEVEL_STRIDE;
674}
675
676static inline unsigned int level_to_offset_bits(int level)
677{
David Woodhouse6660c632009-06-27 22:41:00 +0100678 return (level - 1) * LEVEL_STRIDE;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700679}
680
David Woodhouse77dfa562009-06-27 16:40:08 +0100681static inline int pfn_level_offset(unsigned long pfn, int level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700682{
David Woodhouse6660c632009-06-27 22:41:00 +0100683 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700684}
685
David Woodhouse6660c632009-06-27 22:41:00 +0100686static inline unsigned long level_mask(int level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700687{
David Woodhouse6660c632009-06-27 22:41:00 +0100688 return -1UL << level_to_offset_bits(level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700689}
690
David Woodhouse6660c632009-06-27 22:41:00 +0100691static inline unsigned long level_size(int level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700692{
David Woodhouse6660c632009-06-27 22:41:00 +0100693 return 1UL << level_to_offset_bits(level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700694}
695
David Woodhouse6660c632009-06-27 22:41:00 +0100696static inline unsigned long align_to_level(unsigned long pfn, int level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700697{
David Woodhouse6660c632009-06-27 22:41:00 +0100698 return (pfn + level_size(level) - 1) & level_mask(level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700699}
700
David Woodhouseb026fd22009-06-28 10:37:25 +0100701static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
702 unsigned long pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700703{
David Woodhouseb026fd22009-06-28 10:37:25 +0100704 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700705 struct dma_pte *parent, *pte = NULL;
706 int level = agaw_to_level(domain->agaw);
707 int offset;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700708
709 BUG_ON(!domain->pgd);
David Woodhouseb026fd22009-06-28 10:37:25 +0100710 BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700711 parent = domain->pgd;
712
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700713 while (level > 0) {
714 void *tmp_page;
715
David Woodhouseb026fd22009-06-28 10:37:25 +0100716 offset = pfn_level_offset(pfn, level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700717 pte = &parent[offset];
718 if (level == 1)
719 break;
720
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000721 if (!dma_pte_present(pte)) {
David Woodhousec85994e2009-07-01 19:21:24 +0100722 uint64_t pteval;
723
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700724 tmp_page = alloc_pgtable_page();
725
David Woodhouse206a73c2009-07-01 19:30:28 +0100726 if (!tmp_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700727 return NULL;
David Woodhouse206a73c2009-07-01 19:30:28 +0100728
David Woodhousec85994e2009-07-01 19:21:24 +0100729 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
730 pteval = (virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
731 if (cmpxchg64(&pte->val, 0ULL, pteval)) {
732 /* Someone else set it while we were thinking; use theirs. */
733 free_pgtable_page(tmp_page);
734 } else {
735 dma_pte_addr(pte);
736 domain_flush_cache(domain, pte, sizeof(*pte));
737 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700738 }
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000739 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700740 level--;
741 }
742
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700743 return pte;
744}
745
746/* return address's pte at specific level */
David Woodhouse90dcfb52009-06-27 17:14:59 +0100747static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
748 unsigned long pfn,
749 int level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700750{
751 struct dma_pte *parent, *pte = NULL;
752 int total = agaw_to_level(domain->agaw);
753 int offset;
754
755 parent = domain->pgd;
756 while (level <= total) {
David Woodhouse90dcfb52009-06-27 17:14:59 +0100757 offset = pfn_level_offset(pfn, total);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700758 pte = &parent[offset];
759 if (level == total)
760 return pte;
761
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000762 if (!dma_pte_present(pte))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700763 break;
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000764 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700765 total--;
766 }
767 return NULL;
768}
769
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700770/* clear last level pte, a tlb flush should be followed */
David Woodhouse595badf52009-06-27 22:09:11 +0100771static void dma_pte_clear_range(struct dmar_domain *domain,
772 unsigned long start_pfn,
773 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700774{
David Woodhouse04b18e62009-06-27 19:15:01 +0100775 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
David Woodhouse310a5ab2009-06-28 18:52:20 +0100776 struct dma_pte *first_pte, *pte;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700777
David Woodhouse04b18e62009-06-27 19:15:01 +0100778 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
David Woodhouse595badf52009-06-27 22:09:11 +0100779 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
David Woodhouse66eae842009-06-27 19:00:32 +0100780
David Woodhouse04b18e62009-06-27 19:15:01 +0100781 /* we don't need lock here; nobody else touches the iova range */
David Woodhouse595badf52009-06-27 22:09:11 +0100782 while (start_pfn <= last_pfn) {
David Woodhouse310a5ab2009-06-28 18:52:20 +0100783 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1);
784 if (!pte) {
785 start_pfn = align_to_level(start_pfn + 1, 2);
786 continue;
787 }
David Woodhouse75e6bf92009-07-02 11:21:16 +0100788 do {
David Woodhouse310a5ab2009-06-28 18:52:20 +0100789 dma_clear_pte(pte);
790 start_pfn++;
791 pte++;
David Woodhouse75e6bf92009-07-02 11:21:16 +0100792 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
793
David Woodhouse310a5ab2009-06-28 18:52:20 +0100794 domain_flush_cache(domain, first_pte,
795 (void *)pte - (void *)first_pte);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700796 }
797}
798
799/* free page table pages. last level pte should already be cleared */
800static void dma_pte_free_pagetable(struct dmar_domain *domain,
David Woodhoused794dc92009-06-28 00:27:49 +0100801 unsigned long start_pfn,
802 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700803{
David Woodhouse6660c632009-06-27 22:41:00 +0100804 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
David Woodhousef3a0a522009-06-30 03:40:07 +0100805 struct dma_pte *first_pte, *pte;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700806 int total = agaw_to_level(domain->agaw);
807 int level;
David Woodhouse6660c632009-06-27 22:41:00 +0100808 unsigned long tmp;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700809
David Woodhouse6660c632009-06-27 22:41:00 +0100810 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
811 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700812
David Woodhousef3a0a522009-06-30 03:40:07 +0100813 /* We don't need lock here; nobody else touches the iova range */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700814 level = 2;
815 while (level <= total) {
David Woodhouse6660c632009-06-27 22:41:00 +0100816 tmp = align_to_level(start_pfn, level);
817
David Woodhousef3a0a522009-06-30 03:40:07 +0100818 /* If we can't even clear one PTE at this level, we're done */
David Woodhouse6660c632009-06-27 22:41:00 +0100819 if (tmp + level_size(level) - 1 > last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700820 return;
821
David Woodhouse3d7b0e42009-06-30 03:38:09 +0100822 while (tmp + level_size(level) - 1 <= last_pfn) {
David Woodhousef3a0a522009-06-30 03:40:07 +0100823 first_pte = pte = dma_pfn_level_pte(domain, tmp, level);
824 if (!pte) {
825 tmp = align_to_level(tmp + 1, level + 1);
826 continue;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700827 }
David Woodhouse75e6bf92009-07-02 11:21:16 +0100828 do {
David Woodhouse6a43e572009-07-02 12:02:34 +0100829 if (dma_pte_present(pte)) {
830 free_pgtable_page(phys_to_virt(dma_pte_addr(pte)));
831 dma_clear_pte(pte);
832 }
David Woodhousef3a0a522009-06-30 03:40:07 +0100833 pte++;
834 tmp += level_size(level);
David Woodhouse75e6bf92009-07-02 11:21:16 +0100835 } while (!first_pte_in_page(pte) &&
836 tmp + level_size(level) - 1 <= last_pfn);
837
David Woodhousef3a0a522009-06-30 03:40:07 +0100838 domain_flush_cache(domain, first_pte,
839 (void *)pte - (void *)first_pte);
840
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700841 }
842 level++;
843 }
844 /* free pgd */
David Woodhoused794dc92009-06-28 00:27:49 +0100845 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700846 free_pgtable_page(domain->pgd);
847 domain->pgd = NULL;
848 }
849}
850
851/* iommu handling */
852static int iommu_alloc_root_entry(struct intel_iommu *iommu)
853{
854 struct root_entry *root;
855 unsigned long flags;
856
857 root = (struct root_entry *)alloc_pgtable_page();
858 if (!root)
859 return -ENOMEM;
860
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700861 __iommu_flush_cache(iommu, root, ROOT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700862
863 spin_lock_irqsave(&iommu->lock, flags);
864 iommu->root_entry = root;
865 spin_unlock_irqrestore(&iommu->lock, flags);
866
867 return 0;
868}
869
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700870static void iommu_set_root_entry(struct intel_iommu *iommu)
871{
872 void *addr;
David Woodhousec416daa2009-05-10 20:30:58 +0100873 u32 sts;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700874 unsigned long flag;
875
876 addr = iommu->root_entry;
877
878 spin_lock_irqsave(&iommu->register_lock, flag);
879 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
880
David Woodhousec416daa2009-05-10 20:30:58 +0100881 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700882
883 /* Make sure hardware complete it */
884 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +0100885 readl, (sts & DMA_GSTS_RTPS), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700886
887 spin_unlock_irqrestore(&iommu->register_lock, flag);
888}
889
890static void iommu_flush_write_buffer(struct intel_iommu *iommu)
891{
892 u32 val;
893 unsigned long flag;
894
David Woodhouse9af88142009-02-13 23:18:03 +0000895 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700896 return;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700897
898 spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse462b60f2009-05-10 20:18:18 +0100899 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700900
901 /* Make sure hardware complete it */
902 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +0100903 readl, (!(val & DMA_GSTS_WBFS)), val);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700904
905 spin_unlock_irqrestore(&iommu->register_lock, flag);
906}
907
908/* return value determine if we need a write buffer flush */
David Woodhouse4c25a2c2009-05-10 17:16:06 +0100909static void __iommu_flush_context(struct intel_iommu *iommu,
910 u16 did, u16 source_id, u8 function_mask,
911 u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700912{
913 u64 val = 0;
914 unsigned long flag;
915
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700916 switch (type) {
917 case DMA_CCMD_GLOBAL_INVL:
918 val = DMA_CCMD_GLOBAL_INVL;
919 break;
920 case DMA_CCMD_DOMAIN_INVL:
921 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
922 break;
923 case DMA_CCMD_DEVICE_INVL:
924 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
925 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
926 break;
927 default:
928 BUG();
929 }
930 val |= DMA_CCMD_ICC;
931
932 spin_lock_irqsave(&iommu->register_lock, flag);
933 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
934
935 /* Make sure hardware complete it */
936 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
937 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
938
939 spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700940}
941
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700942/* return value determine if we need a write buffer flush */
David Woodhouse1f0ef2a2009-05-10 19:58:49 +0100943static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
944 u64 addr, unsigned int size_order, u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700945{
946 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
947 u64 val = 0, val_iva = 0;
948 unsigned long flag;
949
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700950 switch (type) {
951 case DMA_TLB_GLOBAL_FLUSH:
952 /* global flush doesn't need set IVA_REG */
953 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
954 break;
955 case DMA_TLB_DSI_FLUSH:
956 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
957 break;
958 case DMA_TLB_PSI_FLUSH:
959 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
960 /* Note: always flush non-leaf currently */
961 val_iva = size_order | addr;
962 break;
963 default:
964 BUG();
965 }
966 /* Note: set drain read/write */
967#if 0
968 /*
969 * This is probably to be super secure.. Looks like we can
970 * ignore it without any impact.
971 */
972 if (cap_read_drain(iommu->cap))
973 val |= DMA_TLB_READ_DRAIN;
974#endif
975 if (cap_write_drain(iommu->cap))
976 val |= DMA_TLB_WRITE_DRAIN;
977
978 spin_lock_irqsave(&iommu->register_lock, flag);
979 /* Note: Only uses first TLB reg currently */
980 if (val_iva)
981 dmar_writeq(iommu->reg + tlb_offset, val_iva);
982 dmar_writeq(iommu->reg + tlb_offset + 8, val);
983
984 /* Make sure hardware complete it */
985 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
986 dmar_readq, (!(val & DMA_TLB_IVT)), val);
987
988 spin_unlock_irqrestore(&iommu->register_lock, flag);
989
990 /* check IOTLB invalidation granularity */
991 if (DMA_TLB_IAIG(val) == 0)
992 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
993 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
994 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700995 (unsigned long long)DMA_TLB_IIRG(type),
996 (unsigned long long)DMA_TLB_IAIG(val));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700997}
998
Yu Zhao93a23a72009-05-18 13:51:37 +0800999static struct device_domain_info *iommu_support_dev_iotlb(
1000 struct dmar_domain *domain, int segment, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001001{
Yu Zhao93a23a72009-05-18 13:51:37 +08001002 int found = 0;
1003 unsigned long flags;
1004 struct device_domain_info *info;
1005 struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn);
1006
1007 if (!ecap_dev_iotlb_support(iommu->ecap))
1008 return NULL;
1009
1010 if (!iommu->qi)
1011 return NULL;
1012
1013 spin_lock_irqsave(&device_domain_lock, flags);
1014 list_for_each_entry(info, &domain->devices, link)
1015 if (info->bus == bus && info->devfn == devfn) {
1016 found = 1;
1017 break;
1018 }
1019 spin_unlock_irqrestore(&device_domain_lock, flags);
1020
1021 if (!found || !info->dev)
1022 return NULL;
1023
1024 if (!pci_find_ext_capability(info->dev, PCI_EXT_CAP_ID_ATS))
1025 return NULL;
1026
1027 if (!dmar_find_matched_atsr_unit(info->dev))
1028 return NULL;
1029
1030 info->iommu = iommu;
1031
1032 return info;
1033}
1034
1035static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1036{
1037 if (!info)
1038 return;
1039
1040 pci_enable_ats(info->dev, VTD_PAGE_SHIFT);
1041}
1042
1043static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1044{
1045 if (!info->dev || !pci_ats_enabled(info->dev))
1046 return;
1047
1048 pci_disable_ats(info->dev);
1049}
1050
1051static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1052 u64 addr, unsigned mask)
1053{
1054 u16 sid, qdep;
1055 unsigned long flags;
1056 struct device_domain_info *info;
1057
1058 spin_lock_irqsave(&device_domain_lock, flags);
1059 list_for_each_entry(info, &domain->devices, link) {
1060 if (!info->dev || !pci_ats_enabled(info->dev))
1061 continue;
1062
1063 sid = info->bus << 8 | info->devfn;
1064 qdep = pci_ats_queue_depth(info->dev);
1065 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1066 }
1067 spin_unlock_irqrestore(&device_domain_lock, flags);
1068}
1069
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001070static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
David Woodhouse03d6a242009-06-28 15:33:46 +01001071 unsigned long pfn, unsigned int pages)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001072{
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001073 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
David Woodhouse03d6a242009-06-28 15:33:46 +01001074 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001075
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001076 BUG_ON(pages == 0);
1077
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001078 /*
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001079 * Fallback to domain selective flush if no PSI support or the size is
1080 * too big.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001081 * PSI requires page size to be 2 ^ x, and the base address is naturally
1082 * aligned to the size
1083 */
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001084 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1085 iommu->flush.flush_iotlb(iommu, did, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001086 DMA_TLB_DSI_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001087 else
1088 iommu->flush.flush_iotlb(iommu, did, addr, mask,
1089 DMA_TLB_PSI_FLUSH);
Yu Zhaobf92df32009-06-29 11:31:45 +08001090
1091 /*
1092 * In caching mode, domain ID 0 is reserved for non-present to present
1093 * mapping flush. Device IOTLB doesn't need to be flushed in this case.
1094 */
1095 if (!cap_caching_mode(iommu->cap) || did)
Yu Zhao93a23a72009-05-18 13:51:37 +08001096 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001097}
1098
mark grossf8bab732008-02-08 04:18:38 -08001099static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1100{
1101 u32 pmen;
1102 unsigned long flags;
1103
1104 spin_lock_irqsave(&iommu->register_lock, flags);
1105 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1106 pmen &= ~DMA_PMEN_EPM;
1107 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1108
1109 /* wait for the protected region status bit to clear */
1110 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1111 readl, !(pmen & DMA_PMEN_PRS), pmen);
1112
1113 spin_unlock_irqrestore(&iommu->register_lock, flags);
1114}
1115
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001116static int iommu_enable_translation(struct intel_iommu *iommu)
1117{
1118 u32 sts;
1119 unsigned long flags;
1120
1121 spin_lock_irqsave(&iommu->register_lock, flags);
David Woodhousec416daa2009-05-10 20:30:58 +01001122 iommu->gcmd |= DMA_GCMD_TE;
1123 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001124
1125 /* Make sure hardware complete it */
1126 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001127 readl, (sts & DMA_GSTS_TES), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001128
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001129 spin_unlock_irqrestore(&iommu->register_lock, flags);
1130 return 0;
1131}
1132
1133static int iommu_disable_translation(struct intel_iommu *iommu)
1134{
1135 u32 sts;
1136 unsigned long flag;
1137
1138 spin_lock_irqsave(&iommu->register_lock, flag);
1139 iommu->gcmd &= ~DMA_GCMD_TE;
1140 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1141
1142 /* Make sure hardware complete it */
1143 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001144 readl, (!(sts & DMA_GSTS_TES)), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001145
1146 spin_unlock_irqrestore(&iommu->register_lock, flag);
1147 return 0;
1148}
1149
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07001150
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001151static int iommu_init_domains(struct intel_iommu *iommu)
1152{
1153 unsigned long ndomains;
1154 unsigned long nlongs;
1155
1156 ndomains = cap_ndoms(iommu->cap);
1157 pr_debug("Number of Domains supportd <%ld>\n", ndomains);
1158 nlongs = BITS_TO_LONGS(ndomains);
1159
1160 /* TBD: there might be 64K domains,
1161 * consider other allocation for future chip
1162 */
1163 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1164 if (!iommu->domain_ids) {
1165 printk(KERN_ERR "Allocating domain id array failed\n");
1166 return -ENOMEM;
1167 }
1168 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1169 GFP_KERNEL);
1170 if (!iommu->domains) {
1171 printk(KERN_ERR "Allocating domain array failed\n");
1172 kfree(iommu->domain_ids);
1173 return -ENOMEM;
1174 }
1175
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001176 spin_lock_init(&iommu->lock);
1177
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001178 /*
1179 * if Caching mode is set, then invalid translations are tagged
1180 * with domainid 0. Hence we need to pre-allocate it.
1181 */
1182 if (cap_caching_mode(iommu->cap))
1183 set_bit(0, iommu->domain_ids);
1184 return 0;
1185}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001186
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001187
1188static void domain_exit(struct dmar_domain *domain);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001189static void vm_domain_exit(struct dmar_domain *domain);
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001190
1191void free_dmar_iommu(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001192{
1193 struct dmar_domain *domain;
1194 int i;
Weidong Hanc7151a82008-12-08 22:51:37 +08001195 unsigned long flags;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001196
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001197 i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap));
1198 for (; i < cap_ndoms(iommu->cap); ) {
1199 domain = iommu->domains[i];
1200 clear_bit(i, iommu->domain_ids);
Weidong Hanc7151a82008-12-08 22:51:37 +08001201
1202 spin_lock_irqsave(&domain->iommu_lock, flags);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001203 if (--domain->iommu_count == 0) {
1204 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
1205 vm_domain_exit(domain);
1206 else
1207 domain_exit(domain);
1208 }
Weidong Hanc7151a82008-12-08 22:51:37 +08001209 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1210
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001211 i = find_next_bit(iommu->domain_ids,
1212 cap_ndoms(iommu->cap), i+1);
1213 }
1214
1215 if (iommu->gcmd & DMA_GCMD_TE)
1216 iommu_disable_translation(iommu);
1217
1218 if (iommu->irq) {
1219 set_irq_data(iommu->irq, NULL);
1220 /* This will mask the irq */
1221 free_irq(iommu->irq, iommu);
1222 destroy_irq(iommu->irq);
1223 }
1224
1225 kfree(iommu->domains);
1226 kfree(iommu->domain_ids);
1227
Weidong Hand9630fe2008-12-08 11:06:32 +08001228 g_iommus[iommu->seq_id] = NULL;
1229
1230 /* if all iommus are freed, free g_iommus */
1231 for (i = 0; i < g_num_of_iommus; i++) {
1232 if (g_iommus[i])
1233 break;
1234 }
1235
1236 if (i == g_num_of_iommus)
1237 kfree(g_iommus);
1238
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001239 /* free context mapping */
1240 free_context_table(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001241}
1242
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001243static struct dmar_domain *alloc_domain(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001244{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001245 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001246
1247 domain = alloc_domain_mem();
1248 if (!domain)
1249 return NULL;
1250
Weidong Han8c11e792008-12-08 15:29:22 +08001251 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
Weidong Hand71a2f32008-12-07 21:13:41 +08001252 domain->flags = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001253
1254 return domain;
1255}
1256
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001257static int iommu_attach_domain(struct dmar_domain *domain,
1258 struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001259{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001260 int num;
1261 unsigned long ndomains;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001262 unsigned long flags;
1263
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001264 ndomains = cap_ndoms(iommu->cap);
Weidong Han8c11e792008-12-08 15:29:22 +08001265
1266 spin_lock_irqsave(&iommu->lock, flags);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001267
1268 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1269 if (num >= ndomains) {
1270 spin_unlock_irqrestore(&iommu->lock, flags);
1271 printk(KERN_ERR "IOMMU: no free domain ids\n");
1272 return -ENOMEM;
1273 }
1274
1275 domain->id = num;
1276 set_bit(num, iommu->domain_ids);
1277 set_bit(iommu->seq_id, &domain->iommu_bmp);
1278 iommu->domains[num] = domain;
1279 spin_unlock_irqrestore(&iommu->lock, flags);
1280
1281 return 0;
1282}
1283
1284static void iommu_detach_domain(struct dmar_domain *domain,
1285 struct intel_iommu *iommu)
1286{
1287 unsigned long flags;
1288 int num, ndomains;
1289 int found = 0;
1290
1291 spin_lock_irqsave(&iommu->lock, flags);
1292 ndomains = cap_ndoms(iommu->cap);
1293 num = find_first_bit(iommu->domain_ids, ndomains);
1294 for (; num < ndomains; ) {
1295 if (iommu->domains[num] == domain) {
1296 found = 1;
1297 break;
1298 }
1299 num = find_next_bit(iommu->domain_ids,
1300 cap_ndoms(iommu->cap), num+1);
1301 }
1302
1303 if (found) {
1304 clear_bit(num, iommu->domain_ids);
1305 clear_bit(iommu->seq_id, &domain->iommu_bmp);
1306 iommu->domains[num] = NULL;
1307 }
Weidong Han8c11e792008-12-08 15:29:22 +08001308 spin_unlock_irqrestore(&iommu->lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001309}
1310
1311static struct iova_domain reserved_iova_list;
Mark Gross8a443df2008-03-04 14:59:31 -08001312static struct lock_class_key reserved_rbtree_key;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001313
1314static void dmar_init_reserved_ranges(void)
1315{
1316 struct pci_dev *pdev = NULL;
1317 struct iova *iova;
1318 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001319
David Millerf6611972008-02-06 01:36:23 -08001320 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001321
Mark Gross8a443df2008-03-04 14:59:31 -08001322 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1323 &reserved_rbtree_key);
1324
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001325 /* IOAPIC ranges shouldn't be accessed by DMA */
1326 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1327 IOVA_PFN(IOAPIC_RANGE_END));
1328 if (!iova)
1329 printk(KERN_ERR "Reserve IOAPIC range failed\n");
1330
1331 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1332 for_each_pci_dev(pdev) {
1333 struct resource *r;
1334
1335 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1336 r = &pdev->resource[i];
1337 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1338 continue;
David Woodhouse1a4a4552009-06-28 16:00:42 +01001339 iova = reserve_iova(&reserved_iova_list,
1340 IOVA_PFN(r->start),
1341 IOVA_PFN(r->end));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001342 if (!iova)
1343 printk(KERN_ERR "Reserve iova failed\n");
1344 }
1345 }
1346
1347}
1348
1349static void domain_reserve_special_ranges(struct dmar_domain *domain)
1350{
1351 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1352}
1353
1354static inline int guestwidth_to_adjustwidth(int gaw)
1355{
1356 int agaw;
1357 int r = (gaw - 12) % 9;
1358
1359 if (r == 0)
1360 agaw = gaw;
1361 else
1362 agaw = gaw + 9 - r;
1363 if (agaw > 64)
1364 agaw = 64;
1365 return agaw;
1366}
1367
1368static int domain_init(struct dmar_domain *domain, int guest_width)
1369{
1370 struct intel_iommu *iommu;
1371 int adjust_width, agaw;
1372 unsigned long sagaw;
1373
David Millerf6611972008-02-06 01:36:23 -08001374 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Weidong Hanc7151a82008-12-08 22:51:37 +08001375 spin_lock_init(&domain->iommu_lock);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001376
1377 domain_reserve_special_ranges(domain);
1378
1379 /* calculate AGAW */
Weidong Han8c11e792008-12-08 15:29:22 +08001380 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001381 if (guest_width > cap_mgaw(iommu->cap))
1382 guest_width = cap_mgaw(iommu->cap);
1383 domain->gaw = guest_width;
1384 adjust_width = guestwidth_to_adjustwidth(guest_width);
1385 agaw = width_to_agaw(adjust_width);
1386 sagaw = cap_sagaw(iommu->cap);
1387 if (!test_bit(agaw, &sagaw)) {
1388 /* hardware doesn't support it, choose a bigger one */
1389 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1390 agaw = find_next_bit(&sagaw, 5, agaw);
1391 if (agaw >= 5)
1392 return -ENODEV;
1393 }
1394 domain->agaw = agaw;
1395 INIT_LIST_HEAD(&domain->devices);
1396
Weidong Han8e6040972008-12-08 15:49:06 +08001397 if (ecap_coherent(iommu->ecap))
1398 domain->iommu_coherency = 1;
1399 else
1400 domain->iommu_coherency = 0;
1401
Sheng Yang58c610b2009-03-18 15:33:05 +08001402 if (ecap_sc_support(iommu->ecap))
1403 domain->iommu_snooping = 1;
1404 else
1405 domain->iommu_snooping = 0;
1406
Weidong Hanc7151a82008-12-08 22:51:37 +08001407 domain->iommu_count = 1;
1408
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001409 /* always allocate the top pgd */
1410 domain->pgd = (struct dma_pte *)alloc_pgtable_page();
1411 if (!domain->pgd)
1412 return -ENOMEM;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001413 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001414 return 0;
1415}
1416
1417static void domain_exit(struct dmar_domain *domain)
1418{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001419 struct dmar_drhd_unit *drhd;
1420 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001421
1422 /* Domain 0 is reserved, so dont process it */
1423 if (!domain)
1424 return;
1425
1426 domain_remove_dev_info(domain);
1427 /* destroy iovas */
1428 put_iova_domain(&domain->iovad);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001429
1430 /* clear ptes */
David Woodhouse595badf52009-06-27 22:09:11 +01001431 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001432
1433 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01001434 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001435
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001436 for_each_active_iommu(iommu, drhd)
1437 if (test_bit(iommu->seq_id, &domain->iommu_bmp))
1438 iommu_detach_domain(domain, iommu);
1439
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001440 free_domain_mem(domain);
1441}
1442
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001443static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1444 u8 bus, u8 devfn, int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001445{
1446 struct context_entry *context;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001447 unsigned long flags;
Weidong Han5331fe62008-12-08 23:00:00 +08001448 struct intel_iommu *iommu;
Weidong Hanea6606b2008-12-08 23:08:15 +08001449 struct dma_pte *pgd;
1450 unsigned long num;
1451 unsigned long ndomains;
1452 int id;
1453 int agaw;
Yu Zhao93a23a72009-05-18 13:51:37 +08001454 struct device_domain_info *info = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001455
1456 pr_debug("Set context mapping for %02x:%02x.%d\n",
1457 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001458
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001459 BUG_ON(!domain->pgd);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001460 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1461 translation != CONTEXT_TT_MULTI_LEVEL);
Weidong Han5331fe62008-12-08 23:00:00 +08001462
David Woodhouse276dbf992009-04-04 01:45:37 +01001463 iommu = device_to_iommu(segment, bus, devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001464 if (!iommu)
1465 return -ENODEV;
1466
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001467 context = device_to_context_entry(iommu, bus, devfn);
1468 if (!context)
1469 return -ENOMEM;
1470 spin_lock_irqsave(&iommu->lock, flags);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001471 if (context_present(context)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001472 spin_unlock_irqrestore(&iommu->lock, flags);
1473 return 0;
1474 }
1475
Weidong Hanea6606b2008-12-08 23:08:15 +08001476 id = domain->id;
1477 pgd = domain->pgd;
1478
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001479 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
1480 domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001481 int found = 0;
1482
1483 /* find an available domain id for this device in iommu */
1484 ndomains = cap_ndoms(iommu->cap);
1485 num = find_first_bit(iommu->domain_ids, ndomains);
1486 for (; num < ndomains; ) {
1487 if (iommu->domains[num] == domain) {
1488 id = num;
1489 found = 1;
1490 break;
1491 }
1492 num = find_next_bit(iommu->domain_ids,
1493 cap_ndoms(iommu->cap), num+1);
1494 }
1495
1496 if (found == 0) {
1497 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1498 if (num >= ndomains) {
1499 spin_unlock_irqrestore(&iommu->lock, flags);
1500 printk(KERN_ERR "IOMMU: no free domain ids\n");
1501 return -EFAULT;
1502 }
1503
1504 set_bit(num, iommu->domain_ids);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001505 set_bit(iommu->seq_id, &domain->iommu_bmp);
Weidong Hanea6606b2008-12-08 23:08:15 +08001506 iommu->domains[num] = domain;
1507 id = num;
1508 }
1509
1510 /* Skip top levels of page tables for
1511 * iommu which has less agaw than default.
1512 */
1513 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1514 pgd = phys_to_virt(dma_pte_addr(pgd));
1515 if (!dma_pte_present(pgd)) {
1516 spin_unlock_irqrestore(&iommu->lock, flags);
1517 return -ENOMEM;
1518 }
1519 }
1520 }
1521
1522 context_set_domain_id(context, id);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001523
Yu Zhao93a23a72009-05-18 13:51:37 +08001524 if (translation != CONTEXT_TT_PASS_THROUGH) {
1525 info = iommu_support_dev_iotlb(domain, segment, bus, devfn);
1526 translation = info ? CONTEXT_TT_DEV_IOTLB :
1527 CONTEXT_TT_MULTI_LEVEL;
1528 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001529 /*
1530 * In pass through mode, AW must be programmed to indicate the largest
1531 * AGAW value supported by hardware. And ASR is ignored by hardware.
1532 */
Yu Zhao93a23a72009-05-18 13:51:37 +08001533 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001534 context_set_address_width(context, iommu->msagaw);
Yu Zhao93a23a72009-05-18 13:51:37 +08001535 else {
1536 context_set_address_root(context, virt_to_phys(pgd));
1537 context_set_address_width(context, iommu->agaw);
1538 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001539
1540 context_set_translation_type(context, translation);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001541 context_set_fault_enable(context);
1542 context_set_present(context);
Weidong Han5331fe62008-12-08 23:00:00 +08001543 domain_flush_cache(domain, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001544
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001545 /*
1546 * It's a non-present to present mapping. If hardware doesn't cache
1547 * non-present entry we only need to flush the write-buffer. If the
1548 * _does_ cache non-present entries, then it does so in the special
1549 * domain #0, which we have to flush:
1550 */
1551 if (cap_caching_mode(iommu->cap)) {
1552 iommu->flush.flush_context(iommu, 0,
1553 (((u16)bus) << 8) | devfn,
1554 DMA_CCMD_MASK_NOBIT,
1555 DMA_CCMD_DEVICE_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001556 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001557 } else {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001558 iommu_flush_write_buffer(iommu);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001559 }
Yu Zhao93a23a72009-05-18 13:51:37 +08001560 iommu_enable_dev_iotlb(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001561 spin_unlock_irqrestore(&iommu->lock, flags);
Weidong Hanc7151a82008-12-08 22:51:37 +08001562
1563 spin_lock_irqsave(&domain->iommu_lock, flags);
1564 if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) {
1565 domain->iommu_count++;
Sheng Yang58c610b2009-03-18 15:33:05 +08001566 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08001567 }
1568 spin_unlock_irqrestore(&domain->iommu_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001569 return 0;
1570}
1571
1572static int
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001573domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
1574 int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001575{
1576 int ret;
1577 struct pci_dev *tmp, *parent;
1578
David Woodhouse276dbf992009-04-04 01:45:37 +01001579 ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus),
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001580 pdev->bus->number, pdev->devfn,
1581 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001582 if (ret)
1583 return ret;
1584
1585 /* dependent device mapping */
1586 tmp = pci_find_upstream_pcie_bridge(pdev);
1587 if (!tmp)
1588 return 0;
1589 /* Secondary interface's bus number and devfn 0 */
1590 parent = pdev->bus->self;
1591 while (parent != tmp) {
David Woodhouse276dbf992009-04-04 01:45:37 +01001592 ret = domain_context_mapping_one(domain,
1593 pci_domain_nr(parent->bus),
1594 parent->bus->number,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001595 parent->devfn, translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001596 if (ret)
1597 return ret;
1598 parent = parent->bus->self;
1599 }
1600 if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
1601 return domain_context_mapping_one(domain,
David Woodhouse276dbf992009-04-04 01:45:37 +01001602 pci_domain_nr(tmp->subordinate),
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001603 tmp->subordinate->number, 0,
1604 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001605 else /* this is a legacy PCI bridge */
1606 return domain_context_mapping_one(domain,
David Woodhouse276dbf992009-04-04 01:45:37 +01001607 pci_domain_nr(tmp->bus),
1608 tmp->bus->number,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001609 tmp->devfn,
1610 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001611}
1612
Weidong Han5331fe62008-12-08 23:00:00 +08001613static int domain_context_mapped(struct pci_dev *pdev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001614{
1615 int ret;
1616 struct pci_dev *tmp, *parent;
Weidong Han5331fe62008-12-08 23:00:00 +08001617 struct intel_iommu *iommu;
1618
David Woodhouse276dbf992009-04-04 01:45:37 +01001619 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
1620 pdev->devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001621 if (!iommu)
1622 return -ENODEV;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001623
David Woodhouse276dbf992009-04-04 01:45:37 +01001624 ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001625 if (!ret)
1626 return ret;
1627 /* dependent device mapping */
1628 tmp = pci_find_upstream_pcie_bridge(pdev);
1629 if (!tmp)
1630 return ret;
1631 /* Secondary interface's bus number and devfn 0 */
1632 parent = pdev->bus->self;
1633 while (parent != tmp) {
Weidong Han8c11e792008-12-08 15:29:22 +08001634 ret = device_context_mapped(iommu, parent->bus->number,
David Woodhouse276dbf992009-04-04 01:45:37 +01001635 parent->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001636 if (!ret)
1637 return ret;
1638 parent = parent->bus->self;
1639 }
1640 if (tmp->is_pcie)
David Woodhouse276dbf992009-04-04 01:45:37 +01001641 return device_context_mapped(iommu, tmp->subordinate->number,
1642 0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001643 else
David Woodhouse276dbf992009-04-04 01:45:37 +01001644 return device_context_mapped(iommu, tmp->bus->number,
1645 tmp->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001646}
1647
David Woodhouse9051aa02009-06-29 12:30:54 +01001648static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1649 struct scatterlist *sg, unsigned long phys_pfn,
1650 unsigned long nr_pages, int prot)
David Woodhousee1605492009-06-29 11:17:38 +01001651{
1652 struct dma_pte *first_pte = NULL, *pte = NULL;
David Woodhouse9051aa02009-06-29 12:30:54 +01001653 phys_addr_t uninitialized_var(pteval);
David Woodhousee1605492009-06-29 11:17:38 +01001654 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
David Woodhouse9051aa02009-06-29 12:30:54 +01001655 unsigned long sg_res;
David Woodhousee1605492009-06-29 11:17:38 +01001656
1657 BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
1658
1659 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1660 return -EINVAL;
1661
1662 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
1663
David Woodhouse9051aa02009-06-29 12:30:54 +01001664 if (sg)
1665 sg_res = 0;
1666 else {
1667 sg_res = nr_pages + 1;
1668 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
1669 }
1670
David Woodhousee1605492009-06-29 11:17:38 +01001671 while (nr_pages--) {
David Woodhousec85994e2009-07-01 19:21:24 +01001672 uint64_t tmp;
1673
David Woodhousee1605492009-06-29 11:17:38 +01001674 if (!sg_res) {
1675 sg_res = (sg->offset + sg->length + VTD_PAGE_SIZE - 1) >> VTD_PAGE_SHIFT;
1676 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
1677 sg->dma_length = sg->length;
1678 pteval = page_to_phys(sg_page(sg)) | prot;
1679 }
1680 if (!pte) {
1681 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn);
1682 if (!pte)
1683 return -ENOMEM;
1684 }
1685 /* We don't need lock here, nobody else
1686 * touches the iova range
1687 */
David Woodhouse7766a3f2009-07-01 20:27:03 +01001688 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
David Woodhousec85994e2009-07-01 19:21:24 +01001689 if (tmp) {
David Woodhouse1bf20f02009-06-29 22:06:43 +01001690 static int dumps = 5;
David Woodhousec85994e2009-07-01 19:21:24 +01001691 printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
1692 iov_pfn, tmp, (unsigned long long)pteval);
David Woodhouse1bf20f02009-06-29 22:06:43 +01001693 if (dumps) {
1694 dumps--;
1695 debug_dma_dump_mappings(NULL);
1696 }
1697 WARN_ON(1);
1698 }
David Woodhousee1605492009-06-29 11:17:38 +01001699 pte++;
David Woodhouse75e6bf92009-07-02 11:21:16 +01001700 if (!nr_pages || first_pte_in_page(pte)) {
David Woodhousee1605492009-06-29 11:17:38 +01001701 domain_flush_cache(domain, first_pte,
1702 (void *)pte - (void *)first_pte);
1703 pte = NULL;
1704 }
1705 iov_pfn++;
1706 pteval += VTD_PAGE_SIZE;
1707 sg_res--;
1708 if (!sg_res)
1709 sg = sg_next(sg);
1710 }
1711 return 0;
1712}
1713
David Woodhouse9051aa02009-06-29 12:30:54 +01001714static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1715 struct scatterlist *sg, unsigned long nr_pages,
1716 int prot)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001717{
David Woodhouse9051aa02009-06-29 12:30:54 +01001718 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
1719}
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001720
David Woodhouse9051aa02009-06-29 12:30:54 +01001721static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1722 unsigned long phys_pfn, unsigned long nr_pages,
1723 int prot)
1724{
1725 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001726}
1727
Weidong Hanc7151a82008-12-08 22:51:37 +08001728static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001729{
Weidong Hanc7151a82008-12-08 22:51:37 +08001730 if (!iommu)
1731 return;
Weidong Han8c11e792008-12-08 15:29:22 +08001732
1733 clear_context_table(iommu, bus, devfn);
1734 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001735 DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001736 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001737}
1738
1739static void domain_remove_dev_info(struct dmar_domain *domain)
1740{
1741 struct device_domain_info *info;
1742 unsigned long flags;
Weidong Hanc7151a82008-12-08 22:51:37 +08001743 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001744
1745 spin_lock_irqsave(&device_domain_lock, flags);
1746 while (!list_empty(&domain->devices)) {
1747 info = list_entry(domain->devices.next,
1748 struct device_domain_info, link);
1749 list_del(&info->link);
1750 list_del(&info->global);
1751 if (info->dev)
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001752 info->dev->dev.archdata.iommu = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001753 spin_unlock_irqrestore(&device_domain_lock, flags);
1754
Yu Zhao93a23a72009-05-18 13:51:37 +08001755 iommu_disable_dev_iotlb(info);
David Woodhouse276dbf992009-04-04 01:45:37 +01001756 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08001757 iommu_detach_dev(iommu, info->bus, info->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001758 free_devinfo_mem(info);
1759
1760 spin_lock_irqsave(&device_domain_lock, flags);
1761 }
1762 spin_unlock_irqrestore(&device_domain_lock, flags);
1763}
1764
1765/*
1766 * find_domain
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001767 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001768 */
Kay, Allen M38717942008-09-09 18:37:29 +03001769static struct dmar_domain *
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001770find_domain(struct pci_dev *pdev)
1771{
1772 struct device_domain_info *info;
1773
1774 /* No lock here, assumes no domain exit in normal case */
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001775 info = pdev->dev.archdata.iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001776 if (info)
1777 return info->domain;
1778 return NULL;
1779}
1780
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001781/* domain is initialized */
1782static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1783{
1784 struct dmar_domain *domain, *found = NULL;
1785 struct intel_iommu *iommu;
1786 struct dmar_drhd_unit *drhd;
1787 struct device_domain_info *info, *tmp;
1788 struct pci_dev *dev_tmp;
1789 unsigned long flags;
1790 int bus = 0, devfn = 0;
David Woodhouse276dbf992009-04-04 01:45:37 +01001791 int segment;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001792 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001793
1794 domain = find_domain(pdev);
1795 if (domain)
1796 return domain;
1797
David Woodhouse276dbf992009-04-04 01:45:37 +01001798 segment = pci_domain_nr(pdev->bus);
1799
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001800 dev_tmp = pci_find_upstream_pcie_bridge(pdev);
1801 if (dev_tmp) {
1802 if (dev_tmp->is_pcie) {
1803 bus = dev_tmp->subordinate->number;
1804 devfn = 0;
1805 } else {
1806 bus = dev_tmp->bus->number;
1807 devfn = dev_tmp->devfn;
1808 }
1809 spin_lock_irqsave(&device_domain_lock, flags);
1810 list_for_each_entry(info, &device_domain_list, global) {
David Woodhouse276dbf992009-04-04 01:45:37 +01001811 if (info->segment == segment &&
1812 info->bus == bus && info->devfn == devfn) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001813 found = info->domain;
1814 break;
1815 }
1816 }
1817 spin_unlock_irqrestore(&device_domain_lock, flags);
1818 /* pcie-pci bridge already has a domain, uses it */
1819 if (found) {
1820 domain = found;
1821 goto found_domain;
1822 }
1823 }
1824
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001825 domain = alloc_domain();
1826 if (!domain)
1827 goto error;
1828
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001829 /* Allocate new domain for the device */
1830 drhd = dmar_find_matched_drhd_unit(pdev);
1831 if (!drhd) {
1832 printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",
1833 pci_name(pdev));
1834 return NULL;
1835 }
1836 iommu = drhd->iommu;
1837
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001838 ret = iommu_attach_domain(domain, iommu);
1839 if (ret) {
1840 domain_exit(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001841 goto error;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001842 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001843
1844 if (domain_init(domain, gaw)) {
1845 domain_exit(domain);
1846 goto error;
1847 }
1848
1849 /* register pcie-to-pci device */
1850 if (dev_tmp) {
1851 info = alloc_devinfo_mem();
1852 if (!info) {
1853 domain_exit(domain);
1854 goto error;
1855 }
David Woodhouse276dbf992009-04-04 01:45:37 +01001856 info->segment = segment;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001857 info->bus = bus;
1858 info->devfn = devfn;
1859 info->dev = NULL;
1860 info->domain = domain;
1861 /* This domain is shared by devices under p2p bridge */
Weidong Han3b5410e2008-12-08 09:17:15 +08001862 domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001863
1864 /* pcie-to-pci bridge already has a domain, uses it */
1865 found = NULL;
1866 spin_lock_irqsave(&device_domain_lock, flags);
1867 list_for_each_entry(tmp, &device_domain_list, global) {
David Woodhouse276dbf992009-04-04 01:45:37 +01001868 if (tmp->segment == segment &&
1869 tmp->bus == bus && tmp->devfn == devfn) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001870 found = tmp->domain;
1871 break;
1872 }
1873 }
1874 if (found) {
1875 free_devinfo_mem(info);
1876 domain_exit(domain);
1877 domain = found;
1878 } else {
1879 list_add(&info->link, &domain->devices);
1880 list_add(&info->global, &device_domain_list);
1881 }
1882 spin_unlock_irqrestore(&device_domain_lock, flags);
1883 }
1884
1885found_domain:
1886 info = alloc_devinfo_mem();
1887 if (!info)
1888 goto error;
David Woodhouse276dbf992009-04-04 01:45:37 +01001889 info->segment = segment;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001890 info->bus = pdev->bus->number;
1891 info->devfn = pdev->devfn;
1892 info->dev = pdev;
1893 info->domain = domain;
1894 spin_lock_irqsave(&device_domain_lock, flags);
1895 /* somebody is fast */
1896 found = find_domain(pdev);
1897 if (found != NULL) {
1898 spin_unlock_irqrestore(&device_domain_lock, flags);
1899 if (found != domain) {
1900 domain_exit(domain);
1901 domain = found;
1902 }
1903 free_devinfo_mem(info);
1904 return domain;
1905 }
1906 list_add(&info->link, &domain->devices);
1907 list_add(&info->global, &device_domain_list);
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001908 pdev->dev.archdata.iommu = info;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001909 spin_unlock_irqrestore(&device_domain_lock, flags);
1910 return domain;
1911error:
1912 /* recheck it here, maybe others set it */
1913 return find_domain(pdev);
1914}
1915
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001916static int iommu_identity_mapping;
1917
David Woodhouseb2132032009-06-26 18:50:28 +01001918static int iommu_domain_identity_map(struct dmar_domain *domain,
1919 unsigned long long start,
1920 unsigned long long end)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001921{
David Woodhousec5395d52009-06-28 16:35:56 +01001922 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
1923 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001924
David Woodhousec5395d52009-06-28 16:35:56 +01001925 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
1926 dma_to_mm_pfn(last_vpfn))) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001927 printk(KERN_ERR "IOMMU: reserve iova failed\n");
David Woodhouseb2132032009-06-26 18:50:28 +01001928 return -ENOMEM;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001929 }
1930
David Woodhousec5395d52009-06-28 16:35:56 +01001931 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
1932 start, end, domain->id);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001933 /*
1934 * RMRR range might have overlap with physical memory range,
1935 * clear it first
1936 */
David Woodhousec5395d52009-06-28 16:35:56 +01001937 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001938
David Woodhousec5395d52009-06-28 16:35:56 +01001939 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
1940 last_vpfn - first_vpfn + 1,
David Woodhouse61df7442009-06-28 11:55:58 +01001941 DMA_PTE_READ|DMA_PTE_WRITE);
David Woodhouseb2132032009-06-26 18:50:28 +01001942}
1943
1944static int iommu_prepare_identity_map(struct pci_dev *pdev,
1945 unsigned long long start,
1946 unsigned long long end)
1947{
1948 struct dmar_domain *domain;
1949 int ret;
1950
1951 printk(KERN_INFO
1952 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
1953 pci_name(pdev), start, end);
1954
David Woodhousec7ab48d2009-06-26 19:10:36 +01001955 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
David Woodhouseb2132032009-06-26 18:50:28 +01001956 if (!domain)
1957 return -ENOMEM;
1958
1959 ret = iommu_domain_identity_map(domain, start, end);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001960 if (ret)
1961 goto error;
1962
1963 /* context entry init */
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001964 ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL);
David Woodhouseb2132032009-06-26 18:50:28 +01001965 if (ret)
1966 goto error;
1967
1968 return 0;
1969
1970 error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001971 domain_exit(domain);
1972 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001973}
1974
1975static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
1976 struct pci_dev *pdev)
1977{
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001978 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001979 return 0;
1980 return iommu_prepare_identity_map(pdev, rmrr->base_address,
1981 rmrr->end_address + 1);
1982}
1983
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07001984#ifdef CONFIG_DMAR_FLOPPY_WA
1985static inline void iommu_prepare_isa(void)
1986{
1987 struct pci_dev *pdev;
1988 int ret;
1989
1990 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
1991 if (!pdev)
1992 return;
1993
David Woodhousec7ab48d2009-06-26 19:10:36 +01001994 printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07001995 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024);
1996
1997 if (ret)
David Woodhousec7ab48d2009-06-26 19:10:36 +01001998 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
1999 "floppy might not work\n");
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002000
2001}
2002#else
2003static inline void iommu_prepare_isa(void)
2004{
2005 return;
2006}
2007#endif /* !CONFIG_DMAR_FLPY_WA */
2008
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002009/* Initialize each context entry as pass through.*/
2010static int __init init_context_pass_through(void)
2011{
2012 struct pci_dev *pdev = NULL;
2013 struct dmar_domain *domain;
2014 int ret;
2015
2016 for_each_pci_dev(pdev) {
2017 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
2018 ret = domain_context_mapping(domain, pdev,
2019 CONTEXT_TT_PASS_THROUGH);
2020 if (ret)
2021 return ret;
2022 }
2023 return 0;
2024}
2025
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002026static int md_domain_init(struct dmar_domain *domain, int guest_width);
David Woodhousec7ab48d2009-06-26 19:10:36 +01002027
2028static int __init si_domain_work_fn(unsigned long start_pfn,
2029 unsigned long end_pfn, void *datax)
2030{
2031 int *ret = datax;
2032
2033 *ret = iommu_domain_identity_map(si_domain,
2034 (uint64_t)start_pfn << PAGE_SHIFT,
2035 (uint64_t)end_pfn << PAGE_SHIFT);
2036 return *ret;
2037
2038}
2039
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002040static int si_domain_init(void)
2041{
2042 struct dmar_drhd_unit *drhd;
2043 struct intel_iommu *iommu;
David Woodhousec7ab48d2009-06-26 19:10:36 +01002044 int nid, ret = 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002045
2046 si_domain = alloc_domain();
2047 if (!si_domain)
2048 return -EFAULT;
2049
David Woodhousec7ab48d2009-06-26 19:10:36 +01002050 pr_debug("Identity mapping domain is domain %d\n", si_domain->id);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002051
2052 for_each_active_iommu(iommu, drhd) {
2053 ret = iommu_attach_domain(si_domain, iommu);
2054 if (ret) {
2055 domain_exit(si_domain);
2056 return -EFAULT;
2057 }
2058 }
2059
2060 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2061 domain_exit(si_domain);
2062 return -EFAULT;
2063 }
2064
2065 si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
2066
David Woodhousec7ab48d2009-06-26 19:10:36 +01002067 for_each_online_node(nid) {
2068 work_with_active_regions(nid, si_domain_work_fn, &ret);
2069 if (ret)
2070 return ret;
2071 }
2072
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002073 return 0;
2074}
2075
2076static void domain_remove_one_dev_info(struct dmar_domain *domain,
2077 struct pci_dev *pdev);
2078static int identity_mapping(struct pci_dev *pdev)
2079{
2080 struct device_domain_info *info;
2081
2082 if (likely(!iommu_identity_mapping))
2083 return 0;
2084
2085
2086 list_for_each_entry(info, &si_domain->devices, link)
2087 if (info->dev == pdev)
2088 return 1;
2089 return 0;
2090}
2091
2092static int domain_add_dev_info(struct dmar_domain *domain,
2093 struct pci_dev *pdev)
2094{
2095 struct device_domain_info *info;
2096 unsigned long flags;
2097
2098 info = alloc_devinfo_mem();
2099 if (!info)
2100 return -ENOMEM;
2101
2102 info->segment = pci_domain_nr(pdev->bus);
2103 info->bus = pdev->bus->number;
2104 info->devfn = pdev->devfn;
2105 info->dev = pdev;
2106 info->domain = domain;
2107
2108 spin_lock_irqsave(&device_domain_lock, flags);
2109 list_add(&info->link, &domain->devices);
2110 list_add(&info->global, &device_domain_list);
2111 pdev->dev.archdata.iommu = info;
2112 spin_unlock_irqrestore(&device_domain_lock, flags);
2113
2114 return 0;
2115}
2116
David Woodhouse6941af22009-07-04 18:24:27 +01002117static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
2118{
2119 if (iommu_identity_mapping == 2)
2120 return IS_GFX_DEVICE(pdev);
2121
David Woodhouse3dfc8132009-07-04 19:11:08 +01002122 /*
2123 * We want to start off with all devices in the 1:1 domain, and
2124 * take them out later if we find they can't access all of memory.
2125 *
2126 * However, we can't do this for PCI devices behind bridges,
2127 * because all PCI devices behind the same bridge will end up
2128 * with the same source-id on their transactions.
2129 *
2130 * Practically speaking, we can't change things around for these
2131 * devices at run-time, because we can't be sure there'll be no
2132 * DMA transactions in flight for any of their siblings.
2133 *
2134 * So PCI devices (unless they're on the root bus) as well as
2135 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2136 * the 1:1 domain, just in _case_ one of their siblings turns out
2137 * not to be able to map all of memory.
2138 */
2139 if (!pdev->is_pcie) {
2140 if (!pci_is_root_bus(pdev->bus))
2141 return 0;
2142 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2143 return 0;
2144 } else if (pdev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
2145 return 0;
2146
2147 /*
2148 * At boot time, we don't yet know if devices will be 64-bit capable.
2149 * Assume that they will -- if they turn out not to be, then we can
2150 * take them out of the 1:1 domain later.
2151 */
David Woodhouse6941af22009-07-04 18:24:27 +01002152 if (!startup)
2153 return pdev->dma_mask > DMA_BIT_MASK(32);
2154
2155 return 1;
2156}
2157
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002158static int iommu_prepare_static_identity_mapping(void)
2159{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002160 struct pci_dev *pdev = NULL;
2161 int ret;
2162
2163 ret = si_domain_init();
2164 if (ret)
2165 return -EFAULT;
2166
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002167 for_each_pci_dev(pdev) {
David Woodhouse6941af22009-07-04 18:24:27 +01002168 if (iommu_should_identity_map(pdev, 1)) {
David Woodhouse62edf5d2009-07-04 10:59:46 +01002169 printk(KERN_INFO "IOMMU: identity mapping for device %s\n",
2170 pci_name(pdev));
David Woodhousec7ab48d2009-06-26 19:10:36 +01002171
David Woodhouse62edf5d2009-07-04 10:59:46 +01002172 ret = domain_context_mapping(si_domain, pdev,
2173 CONTEXT_TT_MULTI_LEVEL);
2174 if (ret)
2175 return ret;
2176 ret = domain_add_dev_info(si_domain, pdev);
2177 if (ret)
2178 return ret;
2179 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002180 }
2181
2182 return 0;
2183}
2184
2185int __init init_dmars(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002186{
2187 struct dmar_drhd_unit *drhd;
2188 struct dmar_rmrr_unit *rmrr;
2189 struct pci_dev *pdev;
2190 struct intel_iommu *iommu;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07002191 int i, ret;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002192 int pass_through = 1;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002193
2194 /*
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002195 * In case pass through can not be enabled, iommu tries to use identity
2196 * mapping.
2197 */
2198 if (iommu_pass_through)
2199 iommu_identity_mapping = 1;
2200
2201 /*
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002202 * for each drhd
2203 * allocate root
2204 * initialize and program root entry to not present
2205 * endfor
2206 */
2207 for_each_drhd_unit(drhd) {
mark gross5e0d2a62008-03-04 15:22:08 -08002208 g_num_of_iommus++;
2209 /*
2210 * lock not needed as this is only incremented in the single
2211 * threaded kernel __init code path all other access are read
2212 * only
2213 */
2214 }
2215
Weidong Hand9630fe2008-12-08 11:06:32 +08002216 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2217 GFP_KERNEL);
2218 if (!g_iommus) {
2219 printk(KERN_ERR "Allocating global iommu array failed\n");
2220 ret = -ENOMEM;
2221 goto error;
2222 }
2223
mark gross80b20dd2008-04-18 13:53:58 -07002224 deferred_flush = kzalloc(g_num_of_iommus *
2225 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2226 if (!deferred_flush) {
Weidong Hand9630fe2008-12-08 11:06:32 +08002227 kfree(g_iommus);
mark gross5e0d2a62008-03-04 15:22:08 -08002228 ret = -ENOMEM;
2229 goto error;
2230 }
2231
mark gross5e0d2a62008-03-04 15:22:08 -08002232 for_each_drhd_unit(drhd) {
2233 if (drhd->ignored)
2234 continue;
Suresh Siddha1886e8a2008-07-10 11:16:37 -07002235
2236 iommu = drhd->iommu;
Weidong Hand9630fe2008-12-08 11:06:32 +08002237 g_iommus[iommu->seq_id] = iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002238
Suresh Siddhae61d98d2008-07-10 11:16:35 -07002239 ret = iommu_init_domains(iommu);
2240 if (ret)
2241 goto error;
2242
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002243 /*
2244 * TBD:
2245 * we could share the same root & context tables
2246 * amoung all IOMMU's. Need to Split it later.
2247 */
2248 ret = iommu_alloc_root_entry(iommu);
2249 if (ret) {
2250 printk(KERN_ERR "IOMMU: allocate root entry failed\n");
2251 goto error;
2252 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002253 if (!ecap_pass_through(iommu->ecap))
2254 pass_through = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002255 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002256 if (iommu_pass_through)
2257 if (!pass_through) {
2258 printk(KERN_INFO
2259 "Pass Through is not supported by hardware.\n");
2260 iommu_pass_through = 0;
2261 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002262
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002263 /*
2264 * Start from the sane iommu hardware state.
2265 */
Youquan Songa77b67d2008-10-16 16:31:56 -07002266 for_each_drhd_unit(drhd) {
2267 if (drhd->ignored)
2268 continue;
2269
2270 iommu = drhd->iommu;
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002271
2272 /*
2273 * If the queued invalidation is already initialized by us
2274 * (for example, while enabling interrupt-remapping) then
2275 * we got the things already rolling from a sane state.
2276 */
2277 if (iommu->qi)
2278 continue;
2279
2280 /*
2281 * Clear any previous faults.
2282 */
2283 dmar_fault(-1, iommu);
2284 /*
2285 * Disable queued invalidation if supported and already enabled
2286 * before OS handover.
2287 */
2288 dmar_disable_qi(iommu);
2289 }
2290
2291 for_each_drhd_unit(drhd) {
2292 if (drhd->ignored)
2293 continue;
2294
2295 iommu = drhd->iommu;
2296
Youquan Songa77b67d2008-10-16 16:31:56 -07002297 if (dmar_enable_qi(iommu)) {
2298 /*
2299 * Queued Invalidate not enabled, use Register Based
2300 * Invalidate
2301 */
2302 iommu->flush.flush_context = __iommu_flush_context;
2303 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
2304 printk(KERN_INFO "IOMMU 0x%Lx: using Register based "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002305 "invalidation\n",
2306 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002307 } else {
2308 iommu->flush.flush_context = qi_flush_context;
2309 iommu->flush.flush_iotlb = qi_flush_iotlb;
2310 printk(KERN_INFO "IOMMU 0x%Lx: using Queued "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002311 "invalidation\n",
2312 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002313 }
2314 }
2315
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002316 /*
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002317 * If pass through is set and enabled, context entries of all pci
2318 * devices are intialized by pass through translation type.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002319 */
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002320 if (iommu_pass_through) {
2321 ret = init_context_pass_through();
2322 if (ret) {
2323 printk(KERN_ERR "IOMMU: Pass through init failed.\n");
2324 iommu_pass_through = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002325 }
2326 }
2327
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002328 /*
2329 * If pass through is not set or not enabled, setup context entries for
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002330 * identity mappings for rmrr, gfx, and isa and may fall back to static
2331 * identity mapping if iommu_identity_mapping is set.
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002332 */
2333 if (!iommu_pass_through) {
David Woodhouse62edf5d2009-07-04 10:59:46 +01002334#ifdef CONFIG_DMAR_BROKEN_GFX_WA
2335 if (!iommu_identity_mapping)
2336 iommu_identity_mapping = 2;
2337#endif
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002338 if (iommu_identity_mapping)
2339 iommu_prepare_static_identity_mapping();
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002340 /*
2341 * For each rmrr
2342 * for each dev attached to rmrr
2343 * do
2344 * locate drhd for dev, alloc domain for dev
2345 * allocate free domain
2346 * allocate page table entries for rmrr
2347 * if context not allocated for bus
2348 * allocate and init context
2349 * set present in root table for this bus
2350 * init context with domain, translation etc
2351 * endfor
2352 * endfor
2353 */
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002354 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002355 for_each_rmrr_units(rmrr) {
2356 for (i = 0; i < rmrr->devices_cnt; i++) {
2357 pdev = rmrr->devices[i];
2358 /*
2359 * some BIOS lists non-exist devices in DMAR
2360 * table.
2361 */
2362 if (!pdev)
2363 continue;
2364 ret = iommu_prepare_rmrr_dev(rmrr, pdev);
2365 if (ret)
2366 printk(KERN_ERR
2367 "IOMMU: mapping reserved region failed\n");
2368 }
2369 }
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07002370
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002371 iommu_prepare_isa();
2372 }
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002373
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002374 /*
2375 * for each drhd
2376 * enable fault log
2377 * global invalidate context cache
2378 * global invalidate iotlb
2379 * enable translation
2380 */
2381 for_each_drhd_unit(drhd) {
2382 if (drhd->ignored)
2383 continue;
2384 iommu = drhd->iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002385
2386 iommu_flush_write_buffer(iommu);
2387
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07002388 ret = dmar_set_interrupt(iommu);
2389 if (ret)
2390 goto error;
2391
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002392 iommu_set_root_entry(iommu);
2393
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002394 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002395 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
mark grossf8bab732008-02-08 04:18:38 -08002396 iommu_disable_protect_mem_regions(iommu);
2397
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002398 ret = iommu_enable_translation(iommu);
2399 if (ret)
2400 goto error;
2401 }
2402
2403 return 0;
2404error:
2405 for_each_drhd_unit(drhd) {
2406 if (drhd->ignored)
2407 continue;
2408 iommu = drhd->iommu;
2409 free_iommu(iommu);
2410 }
Weidong Hand9630fe2008-12-08 11:06:32 +08002411 kfree(g_iommus);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002412 return ret;
2413}
2414
David Woodhouse5a5e02a2009-07-04 09:35:44 +01002415/* Returns a number of VTD pages, but aligned to MM page size */
David Woodhouse88cb6a72009-06-28 15:03:06 +01002416static inline unsigned long aligned_nrpages(unsigned long host_addr,
2417 size_t size)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002418{
David Woodhouse88cb6a72009-06-28 15:03:06 +01002419 host_addr &= ~PAGE_MASK;
David Woodhouse5a5e02a2009-07-04 09:35:44 +01002420 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002421}
2422
David Woodhouse5a5e02a2009-07-04 09:35:44 +01002423/* This takes a number of _MM_ pages, not VTD pages */
David Woodhouse875764d2009-06-28 21:20:51 +01002424static struct iova *intel_alloc_iova(struct device *dev,
2425 struct dmar_domain *domain,
2426 unsigned long nrpages, uint64_t dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002427{
2428 struct pci_dev *pdev = to_pci_dev(dev);
2429 struct iova *iova = NULL;
2430
David Woodhouse875764d2009-06-28 21:20:51 +01002431 /* Restrict dma_mask to the width that the iommu can handle */
2432 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
2433
2434 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002435 /*
2436 * First try to allocate an io virtual address in
Yang Hongyang284901a2009-04-06 19:01:15 -07002437 * DMA_BIT_MASK(32) and if that fails then try allocating
Joe Perches36098012007-12-17 11:40:11 -08002438 * from higher range
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002439 */
David Woodhouse875764d2009-06-28 21:20:51 +01002440 iova = alloc_iova(&domain->iovad, nrpages,
2441 IOVA_PFN(DMA_BIT_MASK(32)), 1);
2442 if (iova)
2443 return iova;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002444 }
David Woodhouse875764d2009-06-28 21:20:51 +01002445 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
2446 if (unlikely(!iova)) {
2447 printk(KERN_ERR "Allocating %ld-page iova for %s failed",
2448 nrpages, pci_name(pdev));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002449 return NULL;
2450 }
2451
2452 return iova;
2453}
2454
David Woodhouse147202a2009-07-07 19:43:20 +01002455static struct dmar_domain *__get_valid_domain_for_dev(struct pci_dev *pdev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002456{
2457 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002458 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002459
2460 domain = get_domain_for_dev(pdev,
2461 DEFAULT_DOMAIN_ADDRESS_WIDTH);
2462 if (!domain) {
2463 printk(KERN_ERR
2464 "Allocating domain for %s failed", pci_name(pdev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002465 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002466 }
2467
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002468 /* make sure context mapping is ok */
Weidong Han5331fe62008-12-08 23:00:00 +08002469 if (unlikely(!domain_context_mapped(pdev))) {
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002470 ret = domain_context_mapping(domain, pdev,
2471 CONTEXT_TT_MULTI_LEVEL);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002472 if (ret) {
2473 printk(KERN_ERR
2474 "Domain context map for %s failed",
2475 pci_name(pdev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002476 return NULL;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002477 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002478 }
2479
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002480 return domain;
2481}
2482
David Woodhouse147202a2009-07-07 19:43:20 +01002483static inline struct dmar_domain *get_valid_domain_for_dev(struct pci_dev *dev)
2484{
2485 struct device_domain_info *info;
2486
2487 /* No lock here, assumes no domain exit in normal case */
2488 info = dev->dev.archdata.iommu;
2489 if (likely(info))
2490 return info->domain;
2491
2492 return __get_valid_domain_for_dev(dev);
2493}
2494
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002495static int iommu_dummy(struct pci_dev *pdev)
2496{
2497 return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
2498}
2499
2500/* Check if the pdev needs to go through non-identity map and unmap process.*/
David Woodhouse73676832009-07-04 14:08:36 +01002501static int iommu_no_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002502{
David Woodhouse73676832009-07-04 14:08:36 +01002503 struct pci_dev *pdev;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002504 int found;
2505
David Woodhouse73676832009-07-04 14:08:36 +01002506 if (unlikely(dev->bus != &pci_bus_type))
2507 return 1;
2508
2509 pdev = to_pci_dev(dev);
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002510 if (iommu_dummy(pdev))
2511 return 1;
2512
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002513 if (!iommu_identity_mapping)
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002514 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002515
2516 found = identity_mapping(pdev);
2517 if (found) {
David Woodhouse6941af22009-07-04 18:24:27 +01002518 if (iommu_should_identity_map(pdev, 0))
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002519 return 1;
2520 else {
2521 /*
2522 * 32 bit DMA is removed from si_domain and fall back
2523 * to non-identity mapping.
2524 */
2525 domain_remove_one_dev_info(si_domain, pdev);
2526 printk(KERN_INFO "32bit %s uses non-identity mapping\n",
2527 pci_name(pdev));
2528 return 0;
2529 }
2530 } else {
2531 /*
2532 * In case of a detached 64 bit DMA device from vm, the device
2533 * is put into si_domain for identity mapping.
2534 */
David Woodhouse6941af22009-07-04 18:24:27 +01002535 if (iommu_should_identity_map(pdev, 0)) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002536 int ret;
2537 ret = domain_add_dev_info(si_domain, pdev);
David Woodhouse1b7bc0a2009-07-04 10:49:46 +01002538 if (ret)
2539 return 0;
2540 ret = domain_context_mapping(si_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002541 if (!ret) {
2542 printk(KERN_INFO "64bit %s uses identity mapping\n",
2543 pci_name(pdev));
2544 return 1;
2545 }
2546 }
2547 }
2548
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002549 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002550}
2551
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002552static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2553 size_t size, int dir, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002554{
2555 struct pci_dev *pdev = to_pci_dev(hwdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002556 struct dmar_domain *domain;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002557 phys_addr_t start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002558 struct iova *iova;
2559 int prot = 0;
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002560 int ret;
Weidong Han8c11e792008-12-08 15:29:22 +08002561 struct intel_iommu *iommu;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002562
2563 BUG_ON(dir == DMA_NONE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002564
David Woodhouse73676832009-07-04 14:08:36 +01002565 if (iommu_no_mapping(hwdev))
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002566 return paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002567
2568 domain = get_valid_domain_for_dev(pdev);
2569 if (!domain)
2570 return 0;
2571
Weidong Han8c11e792008-12-08 15:29:22 +08002572 iommu = domain_get_iommu(domain);
David Woodhouse88cb6a72009-06-28 15:03:06 +01002573 size = aligned_nrpages(paddr, size);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002574
David Woodhouse5a5e02a2009-07-04 09:35:44 +01002575 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
2576 pdev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002577 if (!iova)
2578 goto error;
2579
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002580 /*
2581 * Check if DMAR supports zero-length reads on write only
2582 * mappings..
2583 */
2584 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08002585 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002586 prot |= DMA_PTE_READ;
2587 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2588 prot |= DMA_PTE_WRITE;
2589 /*
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002590 * paddr - (paddr + size) might be partial page, we should map the whole
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002591 * page. Note: if two part of one page are separately mapped, we
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002592 * might have two guest_addr mapping to the same host paddr, but this
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002593 * is not a big problem
2594 */
David Woodhouse0ab36de2009-06-28 14:01:43 +01002595 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
2596 paddr >> VTD_PAGE_SHIFT, size, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002597 if (ret)
2598 goto error;
2599
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002600 /* it's a non-present to present mapping. Only flush if caching mode */
2601 if (cap_caching_mode(iommu->cap))
David Woodhouse03d6a242009-06-28 15:33:46 +01002602 iommu_flush_iotlb_psi(iommu, 0, mm_to_dma_pfn(iova->pfn_lo), size);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002603 else
Weidong Han8c11e792008-12-08 15:29:22 +08002604 iommu_flush_write_buffer(iommu);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002605
David Woodhouse03d6a242009-06-28 15:33:46 +01002606 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
2607 start_paddr += paddr & ~PAGE_MASK;
2608 return start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002609
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002610error:
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002611 if (iova)
2612 __free_iova(&domain->iovad, iova);
David Woodhouse4cf2e752009-02-11 17:23:43 +00002613 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002614 pci_name(pdev), size, (unsigned long long)paddr, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002615 return 0;
2616}
2617
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002618static dma_addr_t intel_map_page(struct device *dev, struct page *page,
2619 unsigned long offset, size_t size,
2620 enum dma_data_direction dir,
2621 struct dma_attrs *attrs)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002622{
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002623 return __intel_map_single(dev, page_to_phys(page) + offset, size,
2624 dir, to_pci_dev(dev)->dma_mask);
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002625}
2626
mark gross5e0d2a62008-03-04 15:22:08 -08002627static void flush_unmaps(void)
2628{
mark gross80b20dd2008-04-18 13:53:58 -07002629 int i, j;
mark gross5e0d2a62008-03-04 15:22:08 -08002630
mark gross5e0d2a62008-03-04 15:22:08 -08002631 timer_on = 0;
2632
2633 /* just flush them all */
2634 for (i = 0; i < g_num_of_iommus; i++) {
Weidong Hana2bb8452008-12-08 11:24:12 +08002635 struct intel_iommu *iommu = g_iommus[i];
2636 if (!iommu)
2637 continue;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07002638
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002639 if (!deferred_flush[i].next)
2640 continue;
2641
2642 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
Yu Zhao93a23a72009-05-18 13:51:37 +08002643 DMA_TLB_GLOBAL_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002644 for (j = 0; j < deferred_flush[i].next; j++) {
Yu Zhao93a23a72009-05-18 13:51:37 +08002645 unsigned long mask;
2646 struct iova *iova = deferred_flush[i].iova[j];
2647
2648 mask = (iova->pfn_hi - iova->pfn_lo + 1) << PAGE_SHIFT;
2649 mask = ilog2(mask >> VTD_PAGE_SHIFT);
2650 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
2651 iova->pfn_lo << PAGE_SHIFT, mask);
2652 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
mark gross80b20dd2008-04-18 13:53:58 -07002653 }
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002654 deferred_flush[i].next = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08002655 }
2656
mark gross5e0d2a62008-03-04 15:22:08 -08002657 list_size = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08002658}
2659
2660static void flush_unmaps_timeout(unsigned long data)
2661{
mark gross80b20dd2008-04-18 13:53:58 -07002662 unsigned long flags;
2663
2664 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08002665 flush_unmaps();
mark gross80b20dd2008-04-18 13:53:58 -07002666 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08002667}
2668
2669static void add_unmap(struct dmar_domain *dom, struct iova *iova)
2670{
2671 unsigned long flags;
mark gross80b20dd2008-04-18 13:53:58 -07002672 int next, iommu_id;
Weidong Han8c11e792008-12-08 15:29:22 +08002673 struct intel_iommu *iommu;
mark gross5e0d2a62008-03-04 15:22:08 -08002674
2675 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross80b20dd2008-04-18 13:53:58 -07002676 if (list_size == HIGH_WATER_MARK)
2677 flush_unmaps();
2678
Weidong Han8c11e792008-12-08 15:29:22 +08002679 iommu = domain_get_iommu(dom);
2680 iommu_id = iommu->seq_id;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07002681
mark gross80b20dd2008-04-18 13:53:58 -07002682 next = deferred_flush[iommu_id].next;
2683 deferred_flush[iommu_id].domain[next] = dom;
2684 deferred_flush[iommu_id].iova[next] = iova;
2685 deferred_flush[iommu_id].next++;
mark gross5e0d2a62008-03-04 15:22:08 -08002686
2687 if (!timer_on) {
2688 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
2689 timer_on = 1;
2690 }
2691 list_size++;
2692 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2693}
2694
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002695static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2696 size_t size, enum dma_data_direction dir,
2697 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002698{
2699 struct pci_dev *pdev = to_pci_dev(dev);
2700 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01002701 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002702 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08002703 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002704
David Woodhouse73676832009-07-04 14:08:36 +01002705 if (iommu_no_mapping(dev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002706 return;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002707
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002708 domain = find_domain(pdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002709 BUG_ON(!domain);
2710
Weidong Han8c11e792008-12-08 15:29:22 +08002711 iommu = domain_get_iommu(domain);
2712
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002713 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
David Woodhouse85b98272009-07-01 19:27:53 +01002714 if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
2715 (unsigned long long)dev_addr))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002716 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002717
David Woodhoused794dc92009-06-28 00:27:49 +01002718 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2719 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002720
David Woodhoused794dc92009-06-28 00:27:49 +01002721 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
2722 pci_name(pdev), start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002723
2724 /* clear the whole page */
David Woodhoused794dc92009-06-28 00:27:49 +01002725 dma_pte_clear_range(domain, start_pfn, last_pfn);
2726
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002727 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01002728 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2729
mark gross5e0d2a62008-03-04 15:22:08 -08002730 if (intel_iommu_strict) {
David Woodhouse03d6a242009-06-28 15:33:46 +01002731 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
David Woodhoused794dc92009-06-28 00:27:49 +01002732 last_pfn - start_pfn + 1);
mark gross5e0d2a62008-03-04 15:22:08 -08002733 /* free iova */
2734 __free_iova(&domain->iovad, iova);
2735 } else {
2736 add_unmap(domain, iova);
2737 /*
2738 * queue up the release of the unmap to save the 1/6th of the
2739 * cpu used up by the iotlb flush operation...
2740 */
mark gross5e0d2a62008-03-04 15:22:08 -08002741 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002742}
2743
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002744static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
2745 int dir)
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002746{
2747 intel_unmap_page(dev, dev_addr, size, dir, NULL);
2748}
2749
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002750static void *intel_alloc_coherent(struct device *hwdev, size_t size,
2751 dma_addr_t *dma_handle, gfp_t flags)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002752{
2753 void *vaddr;
2754 int order;
2755
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002756 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002757 order = get_order(size);
2758 flags &= ~(GFP_DMA | GFP_DMA32);
2759
2760 vaddr = (void *)__get_free_pages(flags, order);
2761 if (!vaddr)
2762 return NULL;
2763 memset(vaddr, 0, size);
2764
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002765 *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
2766 DMA_BIDIRECTIONAL,
2767 hwdev->coherent_dma_mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002768 if (*dma_handle)
2769 return vaddr;
2770 free_pages((unsigned long)vaddr, order);
2771 return NULL;
2772}
2773
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002774static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
2775 dma_addr_t dma_handle)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002776{
2777 int order;
2778
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002779 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002780 order = get_order(size);
2781
2782 intel_unmap_single(hwdev, dma_handle, size, DMA_BIDIRECTIONAL);
2783 free_pages((unsigned long)vaddr, order);
2784}
2785
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002786static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2787 int nelems, enum dma_data_direction dir,
2788 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002789{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002790 struct pci_dev *pdev = to_pci_dev(hwdev);
2791 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01002792 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002793 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08002794 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002795
David Woodhouse73676832009-07-04 14:08:36 +01002796 if (iommu_no_mapping(hwdev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002797 return;
2798
2799 domain = find_domain(pdev);
Weidong Han8c11e792008-12-08 15:29:22 +08002800 BUG_ON(!domain);
2801
2802 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002803
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002804 iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
David Woodhouse85b98272009-07-01 19:27:53 +01002805 if (WARN_ONCE(!iova, "Driver unmaps unmatched sglist at PFN %llx\n",
2806 (unsigned long long)sglist[0].dma_address))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002807 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002808
David Woodhoused794dc92009-06-28 00:27:49 +01002809 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2810 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002811
2812 /* clear the whole page */
David Woodhoused794dc92009-06-28 00:27:49 +01002813 dma_pte_clear_range(domain, start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002814
David Woodhoused794dc92009-06-28 00:27:49 +01002815 /* free page tables */
2816 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2817
David Woodhouseacea0012009-07-14 01:55:11 +01002818 if (intel_iommu_strict) {
2819 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
2820 last_pfn - start_pfn + 1);
2821 /* free iova */
2822 __free_iova(&domain->iovad, iova);
2823 } else {
2824 add_unmap(domain, iova);
2825 /*
2826 * queue up the release of the unmap to save the 1/6th of the
2827 * cpu used up by the iotlb flush operation...
2828 */
2829 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002830}
2831
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002832static int intel_nontranslate_map_sg(struct device *hddev,
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002833 struct scatterlist *sglist, int nelems, int dir)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002834{
2835 int i;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002836 struct scatterlist *sg;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002837
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002838 for_each_sg(sglist, sg, nelems, i) {
FUJITA Tomonori12d4d402007-10-23 09:32:25 +02002839 BUG_ON(!sg_page(sg));
David Woodhouse4cf2e752009-02-11 17:23:43 +00002840 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002841 sg->dma_length = sg->length;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002842 }
2843 return nelems;
2844}
2845
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002846static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
2847 enum dma_data_direction dir, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002848{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002849 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002850 struct pci_dev *pdev = to_pci_dev(hwdev);
2851 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002852 size_t size = 0;
2853 int prot = 0;
David Woodhouseb536d242009-06-28 14:49:31 +01002854 size_t offset_pfn = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002855 struct iova *iova = NULL;
2856 int ret;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002857 struct scatterlist *sg;
David Woodhouseb536d242009-06-28 14:49:31 +01002858 unsigned long start_vpfn;
Weidong Han8c11e792008-12-08 15:29:22 +08002859 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002860
2861 BUG_ON(dir == DMA_NONE);
David Woodhouse73676832009-07-04 14:08:36 +01002862 if (iommu_no_mapping(hwdev))
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002863 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002864
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002865 domain = get_valid_domain_for_dev(pdev);
2866 if (!domain)
2867 return 0;
2868
Weidong Han8c11e792008-12-08 15:29:22 +08002869 iommu = domain_get_iommu(domain);
2870
David Woodhouseb536d242009-06-28 14:49:31 +01002871 for_each_sg(sglist, sg, nelems, i)
David Woodhouse88cb6a72009-06-28 15:03:06 +01002872 size += aligned_nrpages(sg->offset, sg->length);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002873
David Woodhouse5a5e02a2009-07-04 09:35:44 +01002874 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
2875 pdev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002876 if (!iova) {
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002877 sglist->dma_length = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002878 return 0;
2879 }
2880
2881 /*
2882 * Check if DMAR supports zero-length reads on write only
2883 * mappings..
2884 */
2885 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08002886 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002887 prot |= DMA_PTE_READ;
2888 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2889 prot |= DMA_PTE_WRITE;
2890
David Woodhouseb536d242009-06-28 14:49:31 +01002891 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
David Woodhousee1605492009-06-29 11:17:38 +01002892
2893 ret = domain_sg_mapping(domain, start_vpfn, sglist, mm_to_dma_pfn(size), prot);
2894 if (unlikely(ret)) {
2895 /* clear the page */
2896 dma_pte_clear_range(domain, start_vpfn,
2897 start_vpfn + size - 1);
2898 /* free page tables */
2899 dma_pte_free_pagetable(domain, start_vpfn,
2900 start_vpfn + size - 1);
2901 /* free iova */
2902 __free_iova(&domain->iovad, iova);
2903 return 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002904 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002905
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002906 /* it's a non-present to present mapping. Only flush if caching mode */
2907 if (cap_caching_mode(iommu->cap))
David Woodhouse03d6a242009-06-28 15:33:46 +01002908 iommu_flush_iotlb_psi(iommu, 0, start_vpfn, offset_pfn);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002909 else
Weidong Han8c11e792008-12-08 15:29:22 +08002910 iommu_flush_write_buffer(iommu);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002911
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002912 return nelems;
2913}
2914
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09002915static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
2916{
2917 return !dma_addr;
2918}
2919
FUJITA Tomonori160c1d82009-01-05 23:59:02 +09002920struct dma_map_ops intel_dma_ops = {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002921 .alloc_coherent = intel_alloc_coherent,
2922 .free_coherent = intel_free_coherent,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002923 .map_sg = intel_map_sg,
2924 .unmap_sg = intel_unmap_sg,
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002925 .map_page = intel_map_page,
2926 .unmap_page = intel_unmap_page,
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09002927 .mapping_error = intel_mapping_error,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002928};
2929
2930static inline int iommu_domain_cache_init(void)
2931{
2932 int ret = 0;
2933
2934 iommu_domain_cache = kmem_cache_create("iommu_domain",
2935 sizeof(struct dmar_domain),
2936 0,
2937 SLAB_HWCACHE_ALIGN,
2938
2939 NULL);
2940 if (!iommu_domain_cache) {
2941 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
2942 ret = -ENOMEM;
2943 }
2944
2945 return ret;
2946}
2947
2948static inline int iommu_devinfo_cache_init(void)
2949{
2950 int ret = 0;
2951
2952 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
2953 sizeof(struct device_domain_info),
2954 0,
2955 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002956 NULL);
2957 if (!iommu_devinfo_cache) {
2958 printk(KERN_ERR "Couldn't create devinfo cache\n");
2959 ret = -ENOMEM;
2960 }
2961
2962 return ret;
2963}
2964
2965static inline int iommu_iova_cache_init(void)
2966{
2967 int ret = 0;
2968
2969 iommu_iova_cache = kmem_cache_create("iommu_iova",
2970 sizeof(struct iova),
2971 0,
2972 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002973 NULL);
2974 if (!iommu_iova_cache) {
2975 printk(KERN_ERR "Couldn't create iova cache\n");
2976 ret = -ENOMEM;
2977 }
2978
2979 return ret;
2980}
2981
2982static int __init iommu_init_mempool(void)
2983{
2984 int ret;
2985 ret = iommu_iova_cache_init();
2986 if (ret)
2987 return ret;
2988
2989 ret = iommu_domain_cache_init();
2990 if (ret)
2991 goto domain_error;
2992
2993 ret = iommu_devinfo_cache_init();
2994 if (!ret)
2995 return ret;
2996
2997 kmem_cache_destroy(iommu_domain_cache);
2998domain_error:
2999 kmem_cache_destroy(iommu_iova_cache);
3000
3001 return -ENOMEM;
3002}
3003
3004static void __init iommu_exit_mempool(void)
3005{
3006 kmem_cache_destroy(iommu_devinfo_cache);
3007 kmem_cache_destroy(iommu_domain_cache);
3008 kmem_cache_destroy(iommu_iova_cache);
3009
3010}
3011
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003012static void __init init_no_remapping_devices(void)
3013{
3014 struct dmar_drhd_unit *drhd;
3015
3016 for_each_drhd_unit(drhd) {
3017 if (!drhd->include_all) {
3018 int i;
3019 for (i = 0; i < drhd->devices_cnt; i++)
3020 if (drhd->devices[i] != NULL)
3021 break;
3022 /* ignore DMAR unit if no pci devices exist */
3023 if (i == drhd->devices_cnt)
3024 drhd->ignored = 1;
3025 }
3026 }
3027
3028 if (dmar_map_gfx)
3029 return;
3030
3031 for_each_drhd_unit(drhd) {
3032 int i;
3033 if (drhd->ignored || drhd->include_all)
3034 continue;
3035
3036 for (i = 0; i < drhd->devices_cnt; i++)
3037 if (drhd->devices[i] &&
3038 !IS_GFX_DEVICE(drhd->devices[i]))
3039 break;
3040
3041 if (i < drhd->devices_cnt)
3042 continue;
3043
3044 /* bypass IOMMU if it is just for gfx devices */
3045 drhd->ignored = 1;
3046 for (i = 0; i < drhd->devices_cnt; i++) {
3047 if (!drhd->devices[i])
3048 continue;
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07003049 drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003050 }
3051 }
3052}
3053
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003054#ifdef CONFIG_SUSPEND
3055static int init_iommu_hw(void)
3056{
3057 struct dmar_drhd_unit *drhd;
3058 struct intel_iommu *iommu = NULL;
3059
3060 for_each_active_iommu(iommu, drhd)
3061 if (iommu->qi)
3062 dmar_reenable_qi(iommu);
3063
3064 for_each_active_iommu(iommu, drhd) {
3065 iommu_flush_write_buffer(iommu);
3066
3067 iommu_set_root_entry(iommu);
3068
3069 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003070 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003071 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003072 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003073 iommu_disable_protect_mem_regions(iommu);
3074 iommu_enable_translation(iommu);
3075 }
3076
3077 return 0;
3078}
3079
3080static void iommu_flush_all(void)
3081{
3082 struct dmar_drhd_unit *drhd;
3083 struct intel_iommu *iommu;
3084
3085 for_each_active_iommu(iommu, drhd) {
3086 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003087 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003088 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003089 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003090 }
3091}
3092
3093static int iommu_suspend(struct sys_device *dev, pm_message_t state)
3094{
3095 struct dmar_drhd_unit *drhd;
3096 struct intel_iommu *iommu = NULL;
3097 unsigned long flag;
3098
3099 for_each_active_iommu(iommu, drhd) {
3100 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3101 GFP_ATOMIC);
3102 if (!iommu->iommu_state)
3103 goto nomem;
3104 }
3105
3106 iommu_flush_all();
3107
3108 for_each_active_iommu(iommu, drhd) {
3109 iommu_disable_translation(iommu);
3110
3111 spin_lock_irqsave(&iommu->register_lock, flag);
3112
3113 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3114 readl(iommu->reg + DMAR_FECTL_REG);
3115 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3116 readl(iommu->reg + DMAR_FEDATA_REG);
3117 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3118 readl(iommu->reg + DMAR_FEADDR_REG);
3119 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3120 readl(iommu->reg + DMAR_FEUADDR_REG);
3121
3122 spin_unlock_irqrestore(&iommu->register_lock, flag);
3123 }
3124 return 0;
3125
3126nomem:
3127 for_each_active_iommu(iommu, drhd)
3128 kfree(iommu->iommu_state);
3129
3130 return -ENOMEM;
3131}
3132
3133static int iommu_resume(struct sys_device *dev)
3134{
3135 struct dmar_drhd_unit *drhd;
3136 struct intel_iommu *iommu = NULL;
3137 unsigned long flag;
3138
3139 if (init_iommu_hw()) {
3140 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
3141 return -EIO;
3142 }
3143
3144 for_each_active_iommu(iommu, drhd) {
3145
3146 spin_lock_irqsave(&iommu->register_lock, flag);
3147
3148 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3149 iommu->reg + DMAR_FECTL_REG);
3150 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3151 iommu->reg + DMAR_FEDATA_REG);
3152 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3153 iommu->reg + DMAR_FEADDR_REG);
3154 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3155 iommu->reg + DMAR_FEUADDR_REG);
3156
3157 spin_unlock_irqrestore(&iommu->register_lock, flag);
3158 }
3159
3160 for_each_active_iommu(iommu, drhd)
3161 kfree(iommu->iommu_state);
3162
3163 return 0;
3164}
3165
3166static struct sysdev_class iommu_sysclass = {
3167 .name = "iommu",
3168 .resume = iommu_resume,
3169 .suspend = iommu_suspend,
3170};
3171
3172static struct sys_device device_iommu = {
3173 .cls = &iommu_sysclass,
3174};
3175
3176static int __init init_iommu_sysfs(void)
3177{
3178 int error;
3179
3180 error = sysdev_class_register(&iommu_sysclass);
3181 if (error)
3182 return error;
3183
3184 error = sysdev_register(&device_iommu);
3185 if (error)
3186 sysdev_class_unregister(&iommu_sysclass);
3187
3188 return error;
3189}
3190
3191#else
3192static int __init init_iommu_sysfs(void)
3193{
3194 return 0;
3195}
3196#endif /* CONFIG_PM */
3197
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003198int __init intel_iommu_init(void)
3199{
3200 int ret = 0;
3201
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003202 if (dmar_table_init())
3203 return -ENODEV;
3204
Suresh Siddha1886e8a2008-07-10 11:16:37 -07003205 if (dmar_dev_scope_init())
3206 return -ENODEV;
3207
Suresh Siddha2ae21012008-07-10 11:16:43 -07003208 /*
3209 * Check the need for DMA-remapping initialization now.
3210 * Above initialization will also be used by Interrupt-remapping.
3211 */
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003212 if (no_iommu || (swiotlb && !iommu_pass_through) || dmar_disabled)
Suresh Siddha2ae21012008-07-10 11:16:43 -07003213 return -ENODEV;
3214
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003215 iommu_init_mempool();
3216 dmar_init_reserved_ranges();
3217
3218 init_no_remapping_devices();
3219
3220 ret = init_dmars();
3221 if (ret) {
3222 printk(KERN_ERR "IOMMU: dmar init failed\n");
3223 put_iova_domain(&reserved_iova_list);
3224 iommu_exit_mempool();
3225 return ret;
3226 }
3227 printk(KERN_INFO
3228 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
3229
mark gross5e0d2a62008-03-04 15:22:08 -08003230 init_timer(&unmap_timer);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003231 force_iommu = 1;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003232
3233 if (!iommu_pass_through) {
3234 printk(KERN_INFO
3235 "Multi-level page-table translation for DMAR.\n");
3236 dma_ops = &intel_dma_ops;
3237 } else
3238 printk(KERN_INFO
3239 "DMAR: Pass through translation for DMAR.\n");
3240
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003241 init_iommu_sysfs();
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003242
3243 register_iommu(&intel_iommu_ops);
3244
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003245 return 0;
3246}
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07003247
Han, Weidong3199aa62009-02-26 17:31:12 +08003248static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
3249 struct pci_dev *pdev)
3250{
3251 struct pci_dev *tmp, *parent;
3252
3253 if (!iommu || !pdev)
3254 return;
3255
3256 /* dependent device detach */
3257 tmp = pci_find_upstream_pcie_bridge(pdev);
3258 /* Secondary interface's bus number and devfn 0 */
3259 if (tmp) {
3260 parent = pdev->bus->self;
3261 while (parent != tmp) {
3262 iommu_detach_dev(iommu, parent->bus->number,
David Woodhouse276dbf992009-04-04 01:45:37 +01003263 parent->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003264 parent = parent->bus->self;
3265 }
3266 if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
3267 iommu_detach_dev(iommu,
3268 tmp->subordinate->number, 0);
3269 else /* this is a legacy PCI bridge */
David Woodhouse276dbf992009-04-04 01:45:37 +01003270 iommu_detach_dev(iommu, tmp->bus->number,
3271 tmp->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003272 }
3273}
3274
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003275static void domain_remove_one_dev_info(struct dmar_domain *domain,
Weidong Hanc7151a82008-12-08 22:51:37 +08003276 struct pci_dev *pdev)
3277{
3278 struct device_domain_info *info;
3279 struct intel_iommu *iommu;
3280 unsigned long flags;
3281 int found = 0;
3282 struct list_head *entry, *tmp;
3283
David Woodhouse276dbf992009-04-04 01:45:37 +01003284 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3285 pdev->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08003286 if (!iommu)
3287 return;
3288
3289 spin_lock_irqsave(&device_domain_lock, flags);
3290 list_for_each_safe(entry, tmp, &domain->devices) {
3291 info = list_entry(entry, struct device_domain_info, link);
David Woodhouse276dbf992009-04-04 01:45:37 +01003292 /* No need to compare PCI domain; it has to be the same */
Weidong Hanc7151a82008-12-08 22:51:37 +08003293 if (info->bus == pdev->bus->number &&
3294 info->devfn == pdev->devfn) {
3295 list_del(&info->link);
3296 list_del(&info->global);
3297 if (info->dev)
3298 info->dev->dev.archdata.iommu = NULL;
3299 spin_unlock_irqrestore(&device_domain_lock, flags);
3300
Yu Zhao93a23a72009-05-18 13:51:37 +08003301 iommu_disable_dev_iotlb(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08003302 iommu_detach_dev(iommu, info->bus, info->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003303 iommu_detach_dependent_devices(iommu, pdev);
Weidong Hanc7151a82008-12-08 22:51:37 +08003304 free_devinfo_mem(info);
3305
3306 spin_lock_irqsave(&device_domain_lock, flags);
3307
3308 if (found)
3309 break;
3310 else
3311 continue;
3312 }
3313
3314 /* if there is no other devices under the same iommu
3315 * owned by this domain, clear this iommu in iommu_bmp
3316 * update iommu count and coherency
3317 */
David Woodhouse276dbf992009-04-04 01:45:37 +01003318 if (iommu == device_to_iommu(info->segment, info->bus,
3319 info->devfn))
Weidong Hanc7151a82008-12-08 22:51:37 +08003320 found = 1;
3321 }
3322
3323 if (found == 0) {
3324 unsigned long tmp_flags;
3325 spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
3326 clear_bit(iommu->seq_id, &domain->iommu_bmp);
3327 domain->iommu_count--;
Sheng Yang58c610b2009-03-18 15:33:05 +08003328 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08003329 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
3330 }
3331
3332 spin_unlock_irqrestore(&device_domain_lock, flags);
3333}
3334
3335static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
3336{
3337 struct device_domain_info *info;
3338 struct intel_iommu *iommu;
3339 unsigned long flags1, flags2;
3340
3341 spin_lock_irqsave(&device_domain_lock, flags1);
3342 while (!list_empty(&domain->devices)) {
3343 info = list_entry(domain->devices.next,
3344 struct device_domain_info, link);
3345 list_del(&info->link);
3346 list_del(&info->global);
3347 if (info->dev)
3348 info->dev->dev.archdata.iommu = NULL;
3349
3350 spin_unlock_irqrestore(&device_domain_lock, flags1);
3351
Yu Zhao93a23a72009-05-18 13:51:37 +08003352 iommu_disable_dev_iotlb(info);
David Woodhouse276dbf992009-04-04 01:45:37 +01003353 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08003354 iommu_detach_dev(iommu, info->bus, info->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003355 iommu_detach_dependent_devices(iommu, info->dev);
Weidong Hanc7151a82008-12-08 22:51:37 +08003356
3357 /* clear this iommu in iommu_bmp, update iommu count
Sheng Yang58c610b2009-03-18 15:33:05 +08003358 * and capabilities
Weidong Hanc7151a82008-12-08 22:51:37 +08003359 */
3360 spin_lock_irqsave(&domain->iommu_lock, flags2);
3361 if (test_and_clear_bit(iommu->seq_id,
3362 &domain->iommu_bmp)) {
3363 domain->iommu_count--;
Sheng Yang58c610b2009-03-18 15:33:05 +08003364 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08003365 }
3366 spin_unlock_irqrestore(&domain->iommu_lock, flags2);
3367
3368 free_devinfo_mem(info);
3369 spin_lock_irqsave(&device_domain_lock, flags1);
3370 }
3371 spin_unlock_irqrestore(&device_domain_lock, flags1);
3372}
3373
Weidong Han5e98c4b2008-12-08 23:03:27 +08003374/* domain id for virtual machine, it won't be set in context */
3375static unsigned long vm_domid;
3376
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003377static int vm_domain_min_agaw(struct dmar_domain *domain)
3378{
3379 int i;
3380 int min_agaw = domain->agaw;
3381
3382 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
3383 for (; i < g_num_of_iommus; ) {
3384 if (min_agaw > g_iommus[i]->agaw)
3385 min_agaw = g_iommus[i]->agaw;
3386
3387 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
3388 }
3389
3390 return min_agaw;
3391}
3392
Weidong Han5e98c4b2008-12-08 23:03:27 +08003393static struct dmar_domain *iommu_alloc_vm_domain(void)
3394{
3395 struct dmar_domain *domain;
3396
3397 domain = alloc_domain_mem();
3398 if (!domain)
3399 return NULL;
3400
3401 domain->id = vm_domid++;
3402 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
3403 domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
3404
3405 return domain;
3406}
3407
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003408static int md_domain_init(struct dmar_domain *domain, int guest_width)
Weidong Han5e98c4b2008-12-08 23:03:27 +08003409{
3410 int adjust_width;
3411
3412 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003413 spin_lock_init(&domain->iommu_lock);
3414
3415 domain_reserve_special_ranges(domain);
3416
3417 /* calculate AGAW */
3418 domain->gaw = guest_width;
3419 adjust_width = guestwidth_to_adjustwidth(guest_width);
3420 domain->agaw = width_to_agaw(adjust_width);
3421
3422 INIT_LIST_HEAD(&domain->devices);
3423
3424 domain->iommu_count = 0;
3425 domain->iommu_coherency = 0;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003426 domain->max_addr = 0;
Weidong Han5e98c4b2008-12-08 23:03:27 +08003427
3428 /* always allocate the top pgd */
3429 domain->pgd = (struct dma_pte *)alloc_pgtable_page();
3430 if (!domain->pgd)
3431 return -ENOMEM;
3432 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
3433 return 0;
3434}
3435
3436static void iommu_free_vm_domain(struct dmar_domain *domain)
3437{
3438 unsigned long flags;
3439 struct dmar_drhd_unit *drhd;
3440 struct intel_iommu *iommu;
3441 unsigned long i;
3442 unsigned long ndomains;
3443
3444 for_each_drhd_unit(drhd) {
3445 if (drhd->ignored)
3446 continue;
3447 iommu = drhd->iommu;
3448
3449 ndomains = cap_ndoms(iommu->cap);
3450 i = find_first_bit(iommu->domain_ids, ndomains);
3451 for (; i < ndomains; ) {
3452 if (iommu->domains[i] == domain) {
3453 spin_lock_irqsave(&iommu->lock, flags);
3454 clear_bit(i, iommu->domain_ids);
3455 iommu->domains[i] = NULL;
3456 spin_unlock_irqrestore(&iommu->lock, flags);
3457 break;
3458 }
3459 i = find_next_bit(iommu->domain_ids, ndomains, i+1);
3460 }
3461 }
3462}
3463
3464static void vm_domain_exit(struct dmar_domain *domain)
3465{
Weidong Han5e98c4b2008-12-08 23:03:27 +08003466 /* Domain 0 is reserved, so dont process it */
3467 if (!domain)
3468 return;
3469
3470 vm_domain_remove_all_dev_info(domain);
3471 /* destroy iovas */
3472 put_iova_domain(&domain->iovad);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003473
3474 /* clear ptes */
David Woodhouse595badf52009-06-27 22:09:11 +01003475 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Weidong Han5e98c4b2008-12-08 23:03:27 +08003476
3477 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01003478 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Weidong Han5e98c4b2008-12-08 23:03:27 +08003479
3480 iommu_free_vm_domain(domain);
3481 free_domain_mem(domain);
3482}
3483
Joerg Roedel5d450802008-12-03 14:52:32 +01003484static int intel_iommu_domain_init(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03003485{
Joerg Roedel5d450802008-12-03 14:52:32 +01003486 struct dmar_domain *dmar_domain;
Kay, Allen M38717942008-09-09 18:37:29 +03003487
Joerg Roedel5d450802008-12-03 14:52:32 +01003488 dmar_domain = iommu_alloc_vm_domain();
3489 if (!dmar_domain) {
Kay, Allen M38717942008-09-09 18:37:29 +03003490 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01003491 "intel_iommu_domain_init: dmar_domain == NULL\n");
3492 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03003493 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003494 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
Kay, Allen M38717942008-09-09 18:37:29 +03003495 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01003496 "intel_iommu_domain_init() failed\n");
3497 vm_domain_exit(dmar_domain);
3498 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03003499 }
Joerg Roedel5d450802008-12-03 14:52:32 +01003500 domain->priv = dmar_domain;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003501
Joerg Roedel5d450802008-12-03 14:52:32 +01003502 return 0;
Kay, Allen M38717942008-09-09 18:37:29 +03003503}
Kay, Allen M38717942008-09-09 18:37:29 +03003504
Joerg Roedel5d450802008-12-03 14:52:32 +01003505static void intel_iommu_domain_destroy(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03003506{
Joerg Roedel5d450802008-12-03 14:52:32 +01003507 struct dmar_domain *dmar_domain = domain->priv;
3508
3509 domain->priv = NULL;
3510 vm_domain_exit(dmar_domain);
Kay, Allen M38717942008-09-09 18:37:29 +03003511}
Kay, Allen M38717942008-09-09 18:37:29 +03003512
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003513static int intel_iommu_attach_device(struct iommu_domain *domain,
3514 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03003515{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003516 struct dmar_domain *dmar_domain = domain->priv;
3517 struct pci_dev *pdev = to_pci_dev(dev);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003518 struct intel_iommu *iommu;
3519 int addr_width;
3520 u64 end;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003521 int ret;
Kay, Allen M38717942008-09-09 18:37:29 +03003522
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003523 /* normally pdev is not mapped */
3524 if (unlikely(domain_context_mapped(pdev))) {
3525 struct dmar_domain *old_domain;
3526
3527 old_domain = find_domain(pdev);
3528 if (old_domain) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003529 if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
3530 dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)
3531 domain_remove_one_dev_info(old_domain, pdev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003532 else
3533 domain_remove_dev_info(old_domain);
3534 }
3535 }
3536
David Woodhouse276dbf992009-04-04 01:45:37 +01003537 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3538 pdev->devfn);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003539 if (!iommu)
3540 return -ENODEV;
3541
3542 /* check if this iommu agaw is sufficient for max mapped address */
3543 addr_width = agaw_to_width(iommu->agaw);
3544 end = DOMAIN_MAX_ADDR(addr_width);
3545 end = end & VTD_PAGE_MASK;
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003546 if (end < dmar_domain->max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003547 printk(KERN_ERR "%s: iommu agaw (%d) is not "
3548 "sufficient for the mapped address (%llx)\n",
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003549 __func__, iommu->agaw, dmar_domain->max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003550 return -EFAULT;
3551 }
3552
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003553 ret = domain_add_dev_info(dmar_domain, pdev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003554 if (ret)
3555 return ret;
3556
Yu Zhao93a23a72009-05-18 13:51:37 +08003557 ret = domain_context_mapping(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003558 return ret;
3559}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003560
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003561static void intel_iommu_detach_device(struct iommu_domain *domain,
3562 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03003563{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003564 struct dmar_domain *dmar_domain = domain->priv;
3565 struct pci_dev *pdev = to_pci_dev(dev);
3566
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003567 domain_remove_one_dev_info(dmar_domain, pdev);
Kay, Allen M38717942008-09-09 18:37:29 +03003568}
Kay, Allen M38717942008-09-09 18:37:29 +03003569
Joerg Roedeldde57a22008-12-03 15:04:09 +01003570static int intel_iommu_map_range(struct iommu_domain *domain,
3571 unsigned long iova, phys_addr_t hpa,
3572 size_t size, int iommu_prot)
Kay, Allen M38717942008-09-09 18:37:29 +03003573{
Joerg Roedeldde57a22008-12-03 15:04:09 +01003574 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003575 u64 max_addr;
3576 int addr_width;
Joerg Roedeldde57a22008-12-03 15:04:09 +01003577 int prot = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003578 int ret;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003579
Joerg Roedeldde57a22008-12-03 15:04:09 +01003580 if (iommu_prot & IOMMU_READ)
3581 prot |= DMA_PTE_READ;
3582 if (iommu_prot & IOMMU_WRITE)
3583 prot |= DMA_PTE_WRITE;
Sheng Yang9cf06692009-03-18 15:33:07 +08003584 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
3585 prot |= DMA_PTE_SNP;
Joerg Roedeldde57a22008-12-03 15:04:09 +01003586
David Woodhouse163cc522009-06-28 00:51:17 +01003587 max_addr = iova + size;
Joerg Roedeldde57a22008-12-03 15:04:09 +01003588 if (dmar_domain->max_addr < max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003589 int min_agaw;
3590 u64 end;
3591
3592 /* check if minimum agaw is sufficient for mapped address */
Joerg Roedeldde57a22008-12-03 15:04:09 +01003593 min_agaw = vm_domain_min_agaw(dmar_domain);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003594 addr_width = agaw_to_width(min_agaw);
3595 end = DOMAIN_MAX_ADDR(addr_width);
3596 end = end & VTD_PAGE_MASK;
3597 if (end < max_addr) {
3598 printk(KERN_ERR "%s: iommu agaw (%d) is not "
3599 "sufficient for the mapped address (%llx)\n",
3600 __func__, min_agaw, max_addr);
3601 return -EFAULT;
3602 }
Joerg Roedeldde57a22008-12-03 15:04:09 +01003603 dmar_domain->max_addr = max_addr;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003604 }
David Woodhousead051222009-06-28 14:22:28 +01003605 /* Round up size to next multiple of PAGE_SIZE, if it and
3606 the low bits of hpa would take us onto the next page */
David Woodhouse88cb6a72009-06-28 15:03:06 +01003607 size = aligned_nrpages(hpa, size);
David Woodhousead051222009-06-28 14:22:28 +01003608 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
3609 hpa >> VTD_PAGE_SHIFT, size, prot);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003610 return ret;
Kay, Allen M38717942008-09-09 18:37:29 +03003611}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003612
Joerg Roedeldde57a22008-12-03 15:04:09 +01003613static void intel_iommu_unmap_range(struct iommu_domain *domain,
3614 unsigned long iova, size_t size)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003615{
Joerg Roedeldde57a22008-12-03 15:04:09 +01003616 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003617
David Woodhouse163cc522009-06-28 00:51:17 +01003618 dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
3619 (iova + size - 1) >> VTD_PAGE_SHIFT);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003620
David Woodhouse163cc522009-06-28 00:51:17 +01003621 if (dmar_domain->max_addr == iova + size)
3622 dmar_domain->max_addr = iova;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003623}
Kay, Allen M38717942008-09-09 18:37:29 +03003624
Joerg Roedeld14d6572008-12-03 15:06:57 +01003625static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
3626 unsigned long iova)
Kay, Allen M38717942008-09-09 18:37:29 +03003627{
Joerg Roedeld14d6572008-12-03 15:06:57 +01003628 struct dmar_domain *dmar_domain = domain->priv;
Kay, Allen M38717942008-09-09 18:37:29 +03003629 struct dma_pte *pte;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003630 u64 phys = 0;
Kay, Allen M38717942008-09-09 18:37:29 +03003631
David Woodhouseb026fd22009-06-28 10:37:25 +01003632 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT);
Kay, Allen M38717942008-09-09 18:37:29 +03003633 if (pte)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003634 phys = dma_pte_addr(pte);
Kay, Allen M38717942008-09-09 18:37:29 +03003635
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003636 return phys;
Kay, Allen M38717942008-09-09 18:37:29 +03003637}
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003638
Sheng Yangdbb9fd82009-03-18 15:33:06 +08003639static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
3640 unsigned long cap)
3641{
3642 struct dmar_domain *dmar_domain = domain->priv;
3643
3644 if (cap == IOMMU_CAP_CACHE_COHERENCY)
3645 return dmar_domain->iommu_snooping;
3646
3647 return 0;
3648}
3649
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003650static struct iommu_ops intel_iommu_ops = {
3651 .domain_init = intel_iommu_domain_init,
3652 .domain_destroy = intel_iommu_domain_destroy,
3653 .attach_dev = intel_iommu_attach_device,
3654 .detach_dev = intel_iommu_detach_device,
3655 .map = intel_iommu_map_range,
3656 .unmap = intel_iommu_unmap_range,
3657 .iova_to_phys = intel_iommu_iova_to_phys,
Sheng Yangdbb9fd82009-03-18 15:33:06 +08003658 .domain_has_cap = intel_iommu_domain_has_cap,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003659};
David Woodhouse9af88142009-02-13 23:18:03 +00003660
3661static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
3662{
3663 /*
3664 * Mobile 4 Series Chipset neglects to set RWBF capability,
3665 * but needs it:
3666 */
3667 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
3668 rwbf_quirk = 1;
3669}
3670
3671DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);