blob: fe6830af6cfa3184547e19bb9c2bd2e360878166 [file] [log] [blame]
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001/*
David Woodhouseea8ea462014-03-05 17:09:32 +00002 * Copyright © 2006-2014 Intel Corporation.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
David Woodhouseea8ea462014-03-05 17:09:32 +000013 * Authors: David Woodhouse <dwmw2@infradead.org>,
14 * Ashok Raj <ashok.raj@intel.com>,
15 * Shaohua Li <shaohua.li@intel.com>,
16 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
17 * Fenghua Yu <fenghua.yu@intel.com>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070018 */
19
20#include <linux/init.h>
21#include <linux/bitmap.h>
mark gross5e0d2a62008-03-04 15:22:08 -080022#include <linux/debugfs.h>
Paul Gortmaker54485c32011-10-29 10:26:25 -040023#include <linux/export.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070024#include <linux/slab.h>
25#include <linux/irq.h>
26#include <linux/interrupt.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070027#include <linux/spinlock.h>
28#include <linux/pci.h>
29#include <linux/dmar.h>
30#include <linux/dma-mapping.h>
31#include <linux/mempool.h>
Jiang Liu75f05562014-02-19 14:07:37 +080032#include <linux/memory.h>
mark gross5e0d2a62008-03-04 15:22:08 -080033#include <linux/timer.h>
Kay, Allen M38717942008-09-09 18:37:29 +030034#include <linux/iova.h>
Joerg Roedel5d450802008-12-03 14:52:32 +010035#include <linux/iommu.h>
Kay, Allen M38717942008-09-09 18:37:29 +030036#include <linux/intel-iommu.h>
Rafael J. Wysocki134fac32011-03-23 22:16:14 +010037#include <linux/syscore_ops.h>
Shane Wang69575d32009-09-01 18:25:07 -070038#include <linux/tboot.h>
Stephen Rothwelladb2fe02009-08-31 15:24:23 +100039#include <linux/dmi.h>
Joerg Roedel5cdede22011-04-04 15:55:18 +020040#include <linux/pci-ats.h>
Tejun Heo0ee332c2011-12-08 10:22:09 -080041#include <linux/memblock.h>
Akinobu Mita36746432014-06-04 16:06:51 -070042#include <linux/dma-contiguous.h>
Suresh Siddha8a8f4222012-03-30 11:47:08 -070043#include <asm/irq_remapping.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070044#include <asm/cacheflush.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090045#include <asm/iommu.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070046
Joerg Roedel078e1ee2012-09-26 12:44:43 +020047#include "irq_remapping.h"
48
Fenghua Yu5b6985c2008-10-16 18:02:32 -070049#define ROOT_SIZE VTD_PAGE_SIZE
50#define CONTEXT_SIZE VTD_PAGE_SIZE
51
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070052#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
53#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
David Woodhousee0fc7e02009-09-30 09:12:17 -070054#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070055
56#define IOAPIC_RANGE_START (0xfee00000)
57#define IOAPIC_RANGE_END (0xfeefffff)
58#define IOVA_START_ADDR (0x1000)
59
60#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
61
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070062#define MAX_AGAW_WIDTH 64
Jiang Liu5c645b32014-01-06 14:18:12 +080063#define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070064
David Woodhouse2ebe3152009-09-19 07:34:04 -070065#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
66#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
67
68/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
69 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
70#define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
71 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
72#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070073
Mark McLoughlinf27be032008-11-20 15:49:43 +000074#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
Yang Hongyang284901a2009-04-06 19:01:15 -070075#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
Yang Hongyang6a355282009-04-06 19:01:13 -070076#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
mark gross5e0d2a62008-03-04 15:22:08 -080077
Andrew Mortondf08cdc2010-09-22 13:05:11 -070078/* page table handling */
79#define LEVEL_STRIDE (9)
80#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
81
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +020082/*
83 * This bitmap is used to advertise the page sizes our hardware support
84 * to the IOMMU core, which will then use this information to split
85 * physically contiguous memory regions it is mapping into page sizes
86 * that we support.
87 *
88 * Traditionally the IOMMU core just handed us the mappings directly,
89 * after making sure the size is an order of a 4KiB page and that the
90 * mapping has natural alignment.
91 *
92 * To retain this behavior, we currently advertise that we support
93 * all page sizes that are an order of 4KiB.
94 *
95 * If at some point we'd like to utilize the IOMMU core's new behavior,
96 * we could change this to advertise the real page sizes we support.
97 */
98#define INTEL_IOMMU_PGSIZES (~0xFFFUL)
99
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700100static inline int agaw_to_level(int agaw)
101{
102 return agaw + 2;
103}
104
105static inline int agaw_to_width(int agaw)
106{
Jiang Liu5c645b32014-01-06 14:18:12 +0800107 return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700108}
109
110static inline int width_to_agaw(int width)
111{
Jiang Liu5c645b32014-01-06 14:18:12 +0800112 return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700113}
114
115static inline unsigned int level_to_offset_bits(int level)
116{
117 return (level - 1) * LEVEL_STRIDE;
118}
119
120static inline int pfn_level_offset(unsigned long pfn, int level)
121{
122 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
123}
124
125static inline unsigned long level_mask(int level)
126{
127 return -1UL << level_to_offset_bits(level);
128}
129
130static inline unsigned long level_size(int level)
131{
132 return 1UL << level_to_offset_bits(level);
133}
134
135static inline unsigned long align_to_level(unsigned long pfn, int level)
136{
137 return (pfn + level_size(level) - 1) & level_mask(level);
138}
David Woodhousefd18de52009-05-10 23:57:41 +0100139
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100140static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
141{
Jiang Liu5c645b32014-01-06 14:18:12 +0800142 return 1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100143}
144
David Woodhousedd4e8312009-06-27 16:21:20 +0100145/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
146 are never going to work. */
147static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
148{
149 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
150}
151
152static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
153{
154 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
155}
156static inline unsigned long page_to_dma_pfn(struct page *pg)
157{
158 return mm_to_dma_pfn(page_to_pfn(pg));
159}
160static inline unsigned long virt_to_dma_pfn(void *p)
161{
162 return page_to_dma_pfn(virt_to_page(p));
163}
164
Weidong Hand9630fe2008-12-08 11:06:32 +0800165/* global iommu list, set NULL for ignored DMAR units */
166static struct intel_iommu **g_iommus;
167
David Woodhousee0fc7e02009-09-30 09:12:17 -0700168static void __init check_tylersburg_isoch(void);
David Woodhouse9af88142009-02-13 23:18:03 +0000169static int rwbf_quirk;
170
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000171/*
Joseph Cihulab7792602011-05-03 00:08:37 -0700172 * set to 1 to panic kernel if can't successfully enable VT-d
173 * (used when kernel is launched w/ TXT)
174 */
175static int force_on = 0;
176
177/*
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000178 * 0: Present
179 * 1-11: Reserved
180 * 12-63: Context Ptr (12 - (haw-1))
181 * 64-127: Reserved
182 */
183struct root_entry {
184 u64 val;
185 u64 rsvd1;
186};
187#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
188static inline bool root_present(struct root_entry *root)
189{
190 return (root->val & 1);
191}
192static inline void set_root_present(struct root_entry *root)
193{
194 root->val |= 1;
195}
196static inline void set_root_value(struct root_entry *root, unsigned long value)
197{
Li, Zhen-Hua1a2262f2014-11-05 15:30:19 +0800198 root->val &= ~VTD_PAGE_MASK;
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000199 root->val |= value & VTD_PAGE_MASK;
200}
201
202static inline struct context_entry *
203get_context_addr_from_root(struct root_entry *root)
204{
205 return (struct context_entry *)
206 (root_present(root)?phys_to_virt(
207 root->val & VTD_PAGE_MASK) :
208 NULL);
209}
210
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000211/*
212 * low 64 bits:
213 * 0: present
214 * 1: fault processing disable
215 * 2-3: translation type
216 * 12-63: address space root
217 * high 64 bits:
218 * 0-2: address width
219 * 3-6: aval
220 * 8-23: domain id
221 */
222struct context_entry {
223 u64 lo;
224 u64 hi;
225};
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000226
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000227static inline bool context_present(struct context_entry *context)
228{
229 return (context->lo & 1);
230}
231static inline void context_set_present(struct context_entry *context)
232{
233 context->lo |= 1;
234}
235
236static inline void context_set_fault_enable(struct context_entry *context)
237{
238 context->lo &= (((u64)-1) << 2) | 1;
239}
240
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000241static inline void context_set_translation_type(struct context_entry *context,
242 unsigned long value)
243{
244 context->lo &= (((u64)-1) << 4) | 3;
245 context->lo |= (value & 3) << 2;
246}
247
248static inline void context_set_address_root(struct context_entry *context,
249 unsigned long value)
250{
Li, Zhen-Hua1a2262f2014-11-05 15:30:19 +0800251 context->lo &= ~VTD_PAGE_MASK;
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000252 context->lo |= value & VTD_PAGE_MASK;
253}
254
255static inline void context_set_address_width(struct context_entry *context,
256 unsigned long value)
257{
258 context->hi |= value & 7;
259}
260
261static inline void context_set_domain_id(struct context_entry *context,
262 unsigned long value)
263{
264 context->hi |= (value & ((1 << 16) - 1)) << 8;
265}
266
267static inline void context_clear_entry(struct context_entry *context)
268{
269 context->lo = 0;
270 context->hi = 0;
271}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000272
Mark McLoughlin622ba122008-11-20 15:49:46 +0000273/*
274 * 0: readable
275 * 1: writable
276 * 2-6: reserved
277 * 7: super page
Sheng Yang9cf06692009-03-18 15:33:07 +0800278 * 8-10: available
279 * 11: snoop behavior
Mark McLoughlin622ba122008-11-20 15:49:46 +0000280 * 12-63: Host physcial address
281 */
282struct dma_pte {
283 u64 val;
284};
Mark McLoughlin622ba122008-11-20 15:49:46 +0000285
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000286static inline void dma_clear_pte(struct dma_pte *pte)
287{
288 pte->val = 0;
289}
290
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000291static inline u64 dma_pte_addr(struct dma_pte *pte)
292{
David Woodhousec85994e2009-07-01 19:21:24 +0100293#ifdef CONFIG_64BIT
294 return pte->val & VTD_PAGE_MASK;
295#else
296 /* Must have a full atomic 64-bit read */
David Woodhouse1a8bd482010-08-10 01:38:53 +0100297 return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
David Woodhousec85994e2009-07-01 19:21:24 +0100298#endif
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000299}
300
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000301static inline bool dma_pte_present(struct dma_pte *pte)
302{
303 return (pte->val & 3) != 0;
304}
Mark McLoughlin622ba122008-11-20 15:49:46 +0000305
Allen Kay4399c8b2011-10-14 12:32:46 -0700306static inline bool dma_pte_superpage(struct dma_pte *pte)
307{
Joerg Roedelc3c75eb2014-07-04 11:19:10 +0200308 return (pte->val & DMA_PTE_LARGE_PAGE);
Allen Kay4399c8b2011-10-14 12:32:46 -0700309}
310
David Woodhouse75e6bf92009-07-02 11:21:16 +0100311static inline int first_pte_in_page(struct dma_pte *pte)
312{
313 return !((unsigned long)pte & ~VTD_PAGE_MASK);
314}
315
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700316/*
317 * This domain is a statically identity mapping domain.
318 * 1. This domain creats a static 1:1 mapping to all usable memory.
319 * 2. It maps to each iommu if successful.
320 * 3. Each iommu mapps to this domain if successful.
321 */
David Woodhouse19943b02009-08-04 16:19:20 +0100322static struct dmar_domain *si_domain;
323static int hw_pass_through = 1;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700324
Weidong Han1ce28fe2008-12-08 16:35:39 +0800325/* domain represents a virtual machine, more than one devices
326 * across iommus may be owned in one domain, e.g. kvm guest.
327 */
Jiang Liuab8dfe22014-07-11 14:19:27 +0800328#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 0)
Weidong Han1ce28fe2008-12-08 16:35:39 +0800329
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700330/* si_domain contains mulitple devices */
Jiang Liuab8dfe22014-07-11 14:19:27 +0800331#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 1)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700332
Mark McLoughlin99126f72008-11-20 15:49:47 +0000333struct dmar_domain {
334 int id; /* domain id */
Suresh Siddha4c923d42009-10-02 11:01:24 -0700335 int nid; /* node id */
Jiang Liu78d8e702014-11-09 22:47:57 +0800336 DECLARE_BITMAP(iommu_bmp, DMAR_UNITS_SUPPORTED);
Mike Travis1b198bb2012-03-05 15:05:16 -0800337 /* bitmap of iommus this domain uses*/
Mark McLoughlin99126f72008-11-20 15:49:47 +0000338
339 struct list_head devices; /* all devices' list */
340 struct iova_domain iovad; /* iova's that belong to this domain */
341
342 struct dma_pte *pgd; /* virtual address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000343 int gaw; /* max guest address width */
344
345 /* adjusted guest address width, 0 is level 2 30-bit */
346 int agaw;
347
Weidong Han3b5410e2008-12-08 09:17:15 +0800348 int flags; /* flags to find out type of domain */
Weidong Han8e6040972008-12-08 15:49:06 +0800349
350 int iommu_coherency;/* indicate coherency of iommu access */
Sheng Yang58c610b2009-03-18 15:33:05 +0800351 int iommu_snooping; /* indicate snooping control feature*/
Weidong Hanc7151a82008-12-08 22:51:37 +0800352 int iommu_count; /* reference count of iommu */
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100353 int iommu_superpage;/* Level of superpages supported:
354 0 == 4KiB (no superpages), 1 == 2MiB,
355 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
Weidong Hanc7151a82008-12-08 22:51:37 +0800356 spinlock_t iommu_lock; /* protect iommu set in domain */
Weidong Hanfe40f1e2008-12-08 23:10:23 +0800357 u64 max_addr; /* maximum mapped address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000358};
359
Mark McLoughlina647dacb2008-11-20 15:49:48 +0000360/* PCI domain-device relationship */
361struct device_domain_info {
362 struct list_head link; /* link to domain siblings */
363 struct list_head global; /* link to global list */
David Woodhouse276dbf992009-04-04 01:45:37 +0100364 u8 bus; /* PCI bus number */
Mark McLoughlina647dacb2008-11-20 15:49:48 +0000365 u8 devfn; /* PCI devfn number */
David Woodhouse0bcb3e22014-03-06 17:12:03 +0000366 struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
Yu Zhao93a23a72009-05-18 13:51:37 +0800367 struct intel_iommu *iommu; /* IOMMU used by this device */
Mark McLoughlina647dacb2008-11-20 15:49:48 +0000368 struct dmar_domain *domain; /* pointer to domain */
369};
370
Jiang Liub94e4112014-02-19 14:07:25 +0800371struct dmar_rmrr_unit {
372 struct list_head list; /* list of rmrr units */
373 struct acpi_dmar_header *hdr; /* ACPI header */
374 u64 base_address; /* reserved base address*/
375 u64 end_address; /* reserved end address */
David Woodhouse832bd852014-03-07 15:08:36 +0000376 struct dmar_dev_scope *devices; /* target devices */
Jiang Liub94e4112014-02-19 14:07:25 +0800377 int devices_cnt; /* target device count */
378};
379
380struct dmar_atsr_unit {
381 struct list_head list; /* list of ATSR units */
382 struct acpi_dmar_header *hdr; /* ACPI header */
David Woodhouse832bd852014-03-07 15:08:36 +0000383 struct dmar_dev_scope *devices; /* target devices */
Jiang Liub94e4112014-02-19 14:07:25 +0800384 int devices_cnt; /* target device count */
385 u8 include_all:1; /* include all ports */
386};
387
388static LIST_HEAD(dmar_atsr_units);
389static LIST_HEAD(dmar_rmrr_units);
390
391#define for_each_rmrr_units(rmrr) \
392 list_for_each_entry(rmrr, &dmar_rmrr_units, list)
393
mark gross5e0d2a62008-03-04 15:22:08 -0800394static void flush_unmaps_timeout(unsigned long data);
395
Jiang Liub707cb02014-01-06 14:18:26 +0800396static DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
mark gross5e0d2a62008-03-04 15:22:08 -0800397
mark gross80b20dd2008-04-18 13:53:58 -0700398#define HIGH_WATER_MARK 250
399struct deferred_flush_tables {
400 int next;
401 struct iova *iova[HIGH_WATER_MARK];
402 struct dmar_domain *domain[HIGH_WATER_MARK];
David Woodhouseea8ea462014-03-05 17:09:32 +0000403 struct page *freelist[HIGH_WATER_MARK];
mark gross80b20dd2008-04-18 13:53:58 -0700404};
405
406static struct deferred_flush_tables *deferred_flush;
407
mark gross5e0d2a62008-03-04 15:22:08 -0800408/* bitmap for indexing intel_iommus */
mark gross5e0d2a62008-03-04 15:22:08 -0800409static int g_num_of_iommus;
410
411static DEFINE_SPINLOCK(async_umap_flush_lock);
412static LIST_HEAD(unmaps_to_do);
413
414static int timer_on;
415static long list_size;
mark gross5e0d2a62008-03-04 15:22:08 -0800416
Jiang Liu92d03cc2014-02-19 14:07:28 +0800417static void domain_exit(struct dmar_domain *domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700418static void domain_remove_dev_info(struct dmar_domain *domain);
Jiang Liub94e4112014-02-19 14:07:25 +0800419static void domain_remove_one_dev_info(struct dmar_domain *domain,
David Woodhousebf9c9ed2014-03-09 16:19:13 -0700420 struct device *dev);
Jiang Liu92d03cc2014-02-19 14:07:28 +0800421static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
David Woodhouse0bcb3e22014-03-06 17:12:03 +0000422 struct device *dev);
Jiang Liu2a46ddf2014-07-11 14:19:30 +0800423static int domain_detach_iommu(struct dmar_domain *domain,
424 struct intel_iommu *iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700425
Suresh Siddhad3f13812011-08-23 17:05:25 -0700426#ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800427int dmar_disabled = 0;
428#else
429int dmar_disabled = 1;
Suresh Siddhad3f13812011-08-23 17:05:25 -0700430#endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800431
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -0200432int intel_iommu_enabled = 0;
433EXPORT_SYMBOL_GPL(intel_iommu_enabled);
434
David Woodhouse2d9e6672010-06-15 10:57:57 +0100435static int dmar_map_gfx = 1;
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700436static int dmar_forcedac;
mark gross5e0d2a62008-03-04 15:22:08 -0800437static int intel_iommu_strict;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100438static int intel_iommu_superpage = 1;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700439
David Woodhousec0771df2011-10-14 20:59:46 +0100440int intel_iommu_gfx_mapped;
441EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
442
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700443#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
444static DEFINE_SPINLOCK(device_domain_lock);
445static LIST_HEAD(device_domain_list);
446
Thierry Redingb22f6432014-06-27 09:03:12 +0200447static const struct iommu_ops intel_iommu_ops;
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +0100448
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700449static int __init intel_iommu_setup(char *str)
450{
451 if (!str)
452 return -EINVAL;
453 while (*str) {
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800454 if (!strncmp(str, "on", 2)) {
455 dmar_disabled = 0;
456 printk(KERN_INFO "Intel-IOMMU: enabled\n");
457 } else if (!strncmp(str, "off", 3)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700458 dmar_disabled = 1;
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800459 printk(KERN_INFO "Intel-IOMMU: disabled\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700460 } else if (!strncmp(str, "igfx_off", 8)) {
461 dmar_map_gfx = 0;
462 printk(KERN_INFO
463 "Intel-IOMMU: disable GFX device mapping\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700464 } else if (!strncmp(str, "forcedac", 8)) {
mark gross5e0d2a62008-03-04 15:22:08 -0800465 printk(KERN_INFO
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700466 "Intel-IOMMU: Forcing DAC for PCI devices\n");
467 dmar_forcedac = 1;
mark gross5e0d2a62008-03-04 15:22:08 -0800468 } else if (!strncmp(str, "strict", 6)) {
469 printk(KERN_INFO
470 "Intel-IOMMU: disable batched IOTLB flush\n");
471 intel_iommu_strict = 1;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100472 } else if (!strncmp(str, "sp_off", 6)) {
473 printk(KERN_INFO
474 "Intel-IOMMU: disable supported super page\n");
475 intel_iommu_superpage = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700476 }
477
478 str += strcspn(str, ",");
479 while (*str == ',')
480 str++;
481 }
482 return 0;
483}
484__setup("intel_iommu=", intel_iommu_setup);
485
486static struct kmem_cache *iommu_domain_cache;
487static struct kmem_cache *iommu_devinfo_cache;
488static struct kmem_cache *iommu_iova_cache;
489
Suresh Siddha4c923d42009-10-02 11:01:24 -0700490static inline void *alloc_pgtable_page(int node)
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700491{
Suresh Siddha4c923d42009-10-02 11:01:24 -0700492 struct page *page;
493 void *vaddr = NULL;
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700494
Suresh Siddha4c923d42009-10-02 11:01:24 -0700495 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
496 if (page)
497 vaddr = page_address(page);
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700498 return vaddr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700499}
500
501static inline void free_pgtable_page(void *vaddr)
502{
503 free_page((unsigned long)vaddr);
504}
505
506static inline void *alloc_domain_mem(void)
507{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900508 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700509}
510
Kay, Allen M38717942008-09-09 18:37:29 +0300511static void free_domain_mem(void *vaddr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700512{
513 kmem_cache_free(iommu_domain_cache, vaddr);
514}
515
516static inline void * alloc_devinfo_mem(void)
517{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900518 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700519}
520
521static inline void free_devinfo_mem(void *vaddr)
522{
523 kmem_cache_free(iommu_devinfo_cache, vaddr);
524}
525
526struct iova *alloc_iova_mem(void)
527{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900528 return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700529}
530
531void free_iova_mem(struct iova *iova)
532{
533 kmem_cache_free(iommu_iova_cache, iova);
534}
535
Jiang Liuab8dfe22014-07-11 14:19:27 +0800536static inline int domain_type_is_vm(struct dmar_domain *domain)
537{
538 return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE;
539}
540
541static inline int domain_type_is_vm_or_si(struct dmar_domain *domain)
542{
543 return domain->flags & (DOMAIN_FLAG_VIRTUAL_MACHINE |
544 DOMAIN_FLAG_STATIC_IDENTITY);
545}
Weidong Han1b573682008-12-08 15:34:06 +0800546
Jiang Liu162d1b12014-07-11 14:19:35 +0800547static inline int domain_pfn_supported(struct dmar_domain *domain,
548 unsigned long pfn)
549{
550 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
551
552 return !(addr_width < BITS_PER_LONG && pfn >> addr_width);
553}
554
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700555static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
Weidong Han1b573682008-12-08 15:34:06 +0800556{
557 unsigned long sagaw;
558 int agaw = -1;
559
560 sagaw = cap_sagaw(iommu->cap);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700561 for (agaw = width_to_agaw(max_gaw);
Weidong Han1b573682008-12-08 15:34:06 +0800562 agaw >= 0; agaw--) {
563 if (test_bit(agaw, &sagaw))
564 break;
565 }
566
567 return agaw;
568}
569
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700570/*
571 * Calculate max SAGAW for each iommu.
572 */
573int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
574{
575 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
576}
577
578/*
579 * calculate agaw for each iommu.
580 * "SAGAW" may be different across iommus, use a default agaw, and
581 * get a supported less agaw for iommus that don't support the default agaw.
582 */
583int iommu_calculate_agaw(struct intel_iommu *iommu)
584{
585 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
586}
587
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700588/* This functionin only returns single iommu in a domain */
Weidong Han8c11e792008-12-08 15:29:22 +0800589static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
590{
591 int iommu_id;
592
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700593 /* si_domain and vm domain should not get here. */
Jiang Liuab8dfe22014-07-11 14:19:27 +0800594 BUG_ON(domain_type_is_vm_or_si(domain));
Mike Travis1b198bb2012-03-05 15:05:16 -0800595 iommu_id = find_first_bit(domain->iommu_bmp, g_num_of_iommus);
Weidong Han8c11e792008-12-08 15:29:22 +0800596 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
597 return NULL;
598
599 return g_iommus[iommu_id];
600}
601
Weidong Han8e6040972008-12-08 15:49:06 +0800602static void domain_update_iommu_coherency(struct dmar_domain *domain)
603{
David Woodhoused0501962014-03-11 17:10:29 -0700604 struct dmar_drhd_unit *drhd;
605 struct intel_iommu *iommu;
606 int i, found = 0;
Weidong Han8e6040972008-12-08 15:49:06 +0800607
David Woodhoused0501962014-03-11 17:10:29 -0700608 domain->iommu_coherency = 1;
Weidong Han8e6040972008-12-08 15:49:06 +0800609
Mike Travis1b198bb2012-03-05 15:05:16 -0800610 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
David Woodhoused0501962014-03-11 17:10:29 -0700611 found = 1;
Weidong Han8e6040972008-12-08 15:49:06 +0800612 if (!ecap_coherent(g_iommus[i]->ecap)) {
613 domain->iommu_coherency = 0;
614 break;
615 }
Weidong Han8e6040972008-12-08 15:49:06 +0800616 }
David Woodhoused0501962014-03-11 17:10:29 -0700617 if (found)
618 return;
619
620 /* No hardware attached; use lowest common denominator */
621 rcu_read_lock();
622 for_each_active_iommu(iommu, drhd) {
623 if (!ecap_coherent(iommu->ecap)) {
624 domain->iommu_coherency = 0;
625 break;
626 }
627 }
628 rcu_read_unlock();
Weidong Han8e6040972008-12-08 15:49:06 +0800629}
630
Jiang Liu161f6932014-07-11 14:19:37 +0800631static int domain_update_iommu_snooping(struct intel_iommu *skip)
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100632{
Allen Kay8140a952011-10-14 12:32:17 -0700633 struct dmar_drhd_unit *drhd;
Jiang Liu161f6932014-07-11 14:19:37 +0800634 struct intel_iommu *iommu;
635 int ret = 1;
636
637 rcu_read_lock();
638 for_each_active_iommu(iommu, drhd) {
639 if (iommu != skip) {
640 if (!ecap_sc_support(iommu->ecap)) {
641 ret = 0;
642 break;
643 }
644 }
645 }
646 rcu_read_unlock();
647
648 return ret;
649}
650
651static int domain_update_iommu_superpage(struct intel_iommu *skip)
652{
653 struct dmar_drhd_unit *drhd;
654 struct intel_iommu *iommu;
Allen Kay8140a952011-10-14 12:32:17 -0700655 int mask = 0xf;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100656
657 if (!intel_iommu_superpage) {
Jiang Liu161f6932014-07-11 14:19:37 +0800658 return 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100659 }
660
Allen Kay8140a952011-10-14 12:32:17 -0700661 /* set iommu_superpage to the smallest common denominator */
Jiang Liu0e2426122014-02-19 14:07:34 +0800662 rcu_read_lock();
Allen Kay8140a952011-10-14 12:32:17 -0700663 for_each_active_iommu(iommu, drhd) {
Jiang Liu161f6932014-07-11 14:19:37 +0800664 if (iommu != skip) {
665 mask &= cap_super_page_val(iommu->cap);
666 if (!mask)
667 break;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100668 }
669 }
Jiang Liu0e2426122014-02-19 14:07:34 +0800670 rcu_read_unlock();
671
Jiang Liu161f6932014-07-11 14:19:37 +0800672 return fls(mask);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100673}
674
Sheng Yang58c610b2009-03-18 15:33:05 +0800675/* Some capabilities may be different across iommus */
676static void domain_update_iommu_cap(struct dmar_domain *domain)
677{
678 domain_update_iommu_coherency(domain);
Jiang Liu161f6932014-07-11 14:19:37 +0800679 domain->iommu_snooping = domain_update_iommu_snooping(NULL);
680 domain->iommu_superpage = domain_update_iommu_superpage(NULL);
Sheng Yang58c610b2009-03-18 15:33:05 +0800681}
682
David Woodhouse156baca2014-03-09 14:00:57 -0700683static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
Weidong Hanc7151a82008-12-08 22:51:37 +0800684{
685 struct dmar_drhd_unit *drhd = NULL;
Jiang Liub683b232014-02-19 14:07:32 +0800686 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -0700687 struct device *tmp;
688 struct pci_dev *ptmp, *pdev = NULL;
Yijing Wangaa4d0662014-05-26 20:14:06 +0800689 u16 segment = 0;
Weidong Hanc7151a82008-12-08 22:51:37 +0800690 int i;
691
David Woodhouse156baca2014-03-09 14:00:57 -0700692 if (dev_is_pci(dev)) {
693 pdev = to_pci_dev(dev);
694 segment = pci_domain_nr(pdev->bus);
695 } else if (ACPI_COMPANION(dev))
696 dev = &ACPI_COMPANION(dev)->dev;
697
Jiang Liu0e2426122014-02-19 14:07:34 +0800698 rcu_read_lock();
Jiang Liub683b232014-02-19 14:07:32 +0800699 for_each_active_iommu(iommu, drhd) {
David Woodhouse156baca2014-03-09 14:00:57 -0700700 if (pdev && segment != drhd->segment)
David Woodhouse276dbf992009-04-04 01:45:37 +0100701 continue;
Weidong Hanc7151a82008-12-08 22:51:37 +0800702
Jiang Liub683b232014-02-19 14:07:32 +0800703 for_each_active_dev_scope(drhd->devices,
David Woodhouse156baca2014-03-09 14:00:57 -0700704 drhd->devices_cnt, i, tmp) {
705 if (tmp == dev) {
706 *bus = drhd->devices[i].bus;
707 *devfn = drhd->devices[i].devfn;
708 goto out;
709 }
710
711 if (!pdev || !dev_is_pci(tmp))
David Woodhouse832bd852014-03-07 15:08:36 +0000712 continue;
David Woodhouse156baca2014-03-09 14:00:57 -0700713
714 ptmp = to_pci_dev(tmp);
715 if (ptmp->subordinate &&
716 ptmp->subordinate->number <= pdev->bus->number &&
717 ptmp->subordinate->busn_res.end >= pdev->bus->number)
718 goto got_pdev;
David Woodhouse924b6232009-04-04 00:39:25 +0100719 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800720
David Woodhouse156baca2014-03-09 14:00:57 -0700721 if (pdev && drhd->include_all) {
722 got_pdev:
723 *bus = pdev->bus->number;
724 *devfn = pdev->devfn;
Jiang Liub683b232014-02-19 14:07:32 +0800725 goto out;
David Woodhouse156baca2014-03-09 14:00:57 -0700726 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800727 }
Jiang Liub683b232014-02-19 14:07:32 +0800728 iommu = NULL;
David Woodhouse156baca2014-03-09 14:00:57 -0700729 out:
Jiang Liu0e2426122014-02-19 14:07:34 +0800730 rcu_read_unlock();
Weidong Hanc7151a82008-12-08 22:51:37 +0800731
Jiang Liub683b232014-02-19 14:07:32 +0800732 return iommu;
Weidong Hanc7151a82008-12-08 22:51:37 +0800733}
734
Weidong Han5331fe62008-12-08 23:00:00 +0800735static void domain_flush_cache(struct dmar_domain *domain,
736 void *addr, int size)
737{
738 if (!domain->iommu_coherency)
739 clflush_cache_range(addr, size);
740}
741
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700742/* Gets context entry for a given bus and devfn */
743static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
744 u8 bus, u8 devfn)
745{
746 struct root_entry *root;
747 struct context_entry *context;
748 unsigned long phy_addr;
749 unsigned long flags;
750
751 spin_lock_irqsave(&iommu->lock, flags);
752 root = &iommu->root_entry[bus];
753 context = get_context_addr_from_root(root);
754 if (!context) {
Suresh Siddha4c923d42009-10-02 11:01:24 -0700755 context = (struct context_entry *)
756 alloc_pgtable_page(iommu->node);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700757 if (!context) {
758 spin_unlock_irqrestore(&iommu->lock, flags);
759 return NULL;
760 }
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700761 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700762 phy_addr = virt_to_phys((void *)context);
763 set_root_value(root, phy_addr);
764 set_root_present(root);
765 __iommu_flush_cache(iommu, root, sizeof(*root));
766 }
767 spin_unlock_irqrestore(&iommu->lock, flags);
768 return &context[devfn];
769}
770
771static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
772{
773 struct root_entry *root;
774 struct context_entry *context;
775 int ret;
776 unsigned long flags;
777
778 spin_lock_irqsave(&iommu->lock, flags);
779 root = &iommu->root_entry[bus];
780 context = get_context_addr_from_root(root);
781 if (!context) {
782 ret = 0;
783 goto out;
784 }
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000785 ret = context_present(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700786out:
787 spin_unlock_irqrestore(&iommu->lock, flags);
788 return ret;
789}
790
791static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
792{
793 struct root_entry *root;
794 struct context_entry *context;
795 unsigned long flags;
796
797 spin_lock_irqsave(&iommu->lock, flags);
798 root = &iommu->root_entry[bus];
799 context = get_context_addr_from_root(root);
800 if (context) {
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000801 context_clear_entry(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700802 __iommu_flush_cache(iommu, &context[devfn], \
803 sizeof(*context));
804 }
805 spin_unlock_irqrestore(&iommu->lock, flags);
806}
807
808static void free_context_table(struct intel_iommu *iommu)
809{
810 struct root_entry *root;
811 int i;
812 unsigned long flags;
813 struct context_entry *context;
814
815 spin_lock_irqsave(&iommu->lock, flags);
816 if (!iommu->root_entry) {
817 goto out;
818 }
819 for (i = 0; i < ROOT_ENTRY_NR; i++) {
820 root = &iommu->root_entry[i];
821 context = get_context_addr_from_root(root);
822 if (context)
823 free_pgtable_page(context);
824 }
825 free_pgtable_page(iommu->root_entry);
826 iommu->root_entry = NULL;
827out:
828 spin_unlock_irqrestore(&iommu->lock, flags);
829}
830
David Woodhouseb026fd22009-06-28 10:37:25 +0100831static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
David Woodhouse5cf0a762014-03-19 16:07:49 +0000832 unsigned long pfn, int *target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700833{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700834 struct dma_pte *parent, *pte = NULL;
835 int level = agaw_to_level(domain->agaw);
Allen Kay4399c8b2011-10-14 12:32:46 -0700836 int offset;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700837
838 BUG_ON(!domain->pgd);
Julian Stecklinaf9423602013-10-09 10:03:52 +0200839
Jiang Liu162d1b12014-07-11 14:19:35 +0800840 if (!domain_pfn_supported(domain, pfn))
Julian Stecklinaf9423602013-10-09 10:03:52 +0200841 /* Address beyond IOMMU's addressing capabilities. */
842 return NULL;
843
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700844 parent = domain->pgd;
845
David Woodhouse5cf0a762014-03-19 16:07:49 +0000846 while (1) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700847 void *tmp_page;
848
David Woodhouseb026fd22009-06-28 10:37:25 +0100849 offset = pfn_level_offset(pfn, level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700850 pte = &parent[offset];
David Woodhouse5cf0a762014-03-19 16:07:49 +0000851 if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100852 break;
David Woodhouse5cf0a762014-03-19 16:07:49 +0000853 if (level == *target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700854 break;
855
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000856 if (!dma_pte_present(pte)) {
David Woodhousec85994e2009-07-01 19:21:24 +0100857 uint64_t pteval;
858
Suresh Siddha4c923d42009-10-02 11:01:24 -0700859 tmp_page = alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700860
David Woodhouse206a73c2009-07-01 19:30:28 +0100861 if (!tmp_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700862 return NULL;
David Woodhouse206a73c2009-07-01 19:30:28 +0100863
David Woodhousec85994e2009-07-01 19:21:24 +0100864 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
Benjamin LaHaise64de5af2009-09-16 21:05:55 -0400865 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
Yijing Wangeffad4b2014-05-26 20:13:47 +0800866 if (cmpxchg64(&pte->val, 0ULL, pteval))
David Woodhousec85994e2009-07-01 19:21:24 +0100867 /* Someone else set it while we were thinking; use theirs. */
868 free_pgtable_page(tmp_page);
Yijing Wangeffad4b2014-05-26 20:13:47 +0800869 else
David Woodhousec85994e2009-07-01 19:21:24 +0100870 domain_flush_cache(domain, pte, sizeof(*pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700871 }
David Woodhouse5cf0a762014-03-19 16:07:49 +0000872 if (level == 1)
873 break;
874
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000875 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700876 level--;
877 }
878
David Woodhouse5cf0a762014-03-19 16:07:49 +0000879 if (!*target_level)
880 *target_level = level;
881
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700882 return pte;
883}
884
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100885
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700886/* return address's pte at specific level */
David Woodhouse90dcfb52009-06-27 17:14:59 +0100887static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
888 unsigned long pfn,
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100889 int level, int *large_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700890{
891 struct dma_pte *parent, *pte = NULL;
892 int total = agaw_to_level(domain->agaw);
893 int offset;
894
895 parent = domain->pgd;
896 while (level <= total) {
David Woodhouse90dcfb52009-06-27 17:14:59 +0100897 offset = pfn_level_offset(pfn, total);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700898 pte = &parent[offset];
899 if (level == total)
900 return pte;
901
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100902 if (!dma_pte_present(pte)) {
903 *large_page = total;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700904 break;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100905 }
906
Yijing Wange16922a2014-05-20 20:37:51 +0800907 if (dma_pte_superpage(pte)) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100908 *large_page = total;
909 return pte;
910 }
911
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000912 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700913 total--;
914 }
915 return NULL;
916}
917
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700918/* clear last level pte, a tlb flush should be followed */
David Woodhouse5cf0a762014-03-19 16:07:49 +0000919static void dma_pte_clear_range(struct dmar_domain *domain,
David Woodhouse595badf52009-06-27 22:09:11 +0100920 unsigned long start_pfn,
921 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700922{
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100923 unsigned int large_page = 1;
David Woodhouse310a5ab2009-06-28 18:52:20 +0100924 struct dma_pte *first_pte, *pte;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700925
Jiang Liu162d1b12014-07-11 14:19:35 +0800926 BUG_ON(!domain_pfn_supported(domain, start_pfn));
927 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouse59c36282009-09-19 07:36:28 -0700928 BUG_ON(start_pfn > last_pfn);
David Woodhouse66eae842009-06-27 19:00:32 +0100929
David Woodhouse04b18e62009-06-27 19:15:01 +0100930 /* we don't need lock here; nobody else touches the iova range */
David Woodhouse59c36282009-09-19 07:36:28 -0700931 do {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100932 large_page = 1;
933 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100934 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100935 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100936 continue;
937 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100938 do {
David Woodhouse310a5ab2009-06-28 18:52:20 +0100939 dma_clear_pte(pte);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100940 start_pfn += lvl_to_nr_pages(large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100941 pte++;
David Woodhouse75e6bf92009-07-02 11:21:16 +0100942 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
943
David Woodhouse310a5ab2009-06-28 18:52:20 +0100944 domain_flush_cache(domain, first_pte,
945 (void *)pte - (void *)first_pte);
David Woodhouse59c36282009-09-19 07:36:28 -0700946
947 } while (start_pfn && start_pfn <= last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700948}
949
Alex Williamson3269ee02013-06-15 10:27:19 -0600950static void dma_pte_free_level(struct dmar_domain *domain, int level,
951 struct dma_pte *pte, unsigned long pfn,
952 unsigned long start_pfn, unsigned long last_pfn)
953{
954 pfn = max(start_pfn, pfn);
955 pte = &pte[pfn_level_offset(pfn, level)];
956
957 do {
958 unsigned long level_pfn;
959 struct dma_pte *level_pte;
960
961 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
962 goto next;
963
964 level_pfn = pfn & level_mask(level - 1);
965 level_pte = phys_to_virt(dma_pte_addr(pte));
966
967 if (level > 2)
968 dma_pte_free_level(domain, level - 1, level_pte,
969 level_pfn, start_pfn, last_pfn);
970
971 /* If range covers entire pagetable, free it */
972 if (!(start_pfn > level_pfn ||
Alex Williamson08336fd2014-01-21 15:48:18 -0800973 last_pfn < level_pfn + level_size(level) - 1)) {
Alex Williamson3269ee02013-06-15 10:27:19 -0600974 dma_clear_pte(pte);
975 domain_flush_cache(domain, pte, sizeof(*pte));
976 free_pgtable_page(level_pte);
977 }
978next:
979 pfn += level_size(level);
980 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
981}
982
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700983/* free page table pages. last level pte should already be cleared */
984static void dma_pte_free_pagetable(struct dmar_domain *domain,
David Woodhoused794dc92009-06-28 00:27:49 +0100985 unsigned long start_pfn,
986 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700987{
Jiang Liu162d1b12014-07-11 14:19:35 +0800988 BUG_ON(!domain_pfn_supported(domain, start_pfn));
989 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouse59c36282009-09-19 07:36:28 -0700990 BUG_ON(start_pfn > last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700991
Jiang Liud41a4ad2014-07-11 14:19:34 +0800992 dma_pte_clear_range(domain, start_pfn, last_pfn);
993
David Woodhousef3a0a522009-06-30 03:40:07 +0100994 /* We don't need lock here; nobody else touches the iova range */
Alex Williamson3269ee02013-06-15 10:27:19 -0600995 dma_pte_free_level(domain, agaw_to_level(domain->agaw),
996 domain->pgd, 0, start_pfn, last_pfn);
David Woodhouse6660c632009-06-27 22:41:00 +0100997
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700998 /* free pgd */
David Woodhoused794dc92009-06-28 00:27:49 +0100999 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001000 free_pgtable_page(domain->pgd);
1001 domain->pgd = NULL;
1002 }
1003}
1004
David Woodhouseea8ea462014-03-05 17:09:32 +00001005/* When a page at a given level is being unlinked from its parent, we don't
1006 need to *modify* it at all. All we need to do is make a list of all the
1007 pages which can be freed just as soon as we've flushed the IOTLB and we
1008 know the hardware page-walk will no longer touch them.
1009 The 'pte' argument is the *parent* PTE, pointing to the page that is to
1010 be freed. */
1011static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
1012 int level, struct dma_pte *pte,
1013 struct page *freelist)
1014{
1015 struct page *pg;
1016
1017 pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
1018 pg->freelist = freelist;
1019 freelist = pg;
1020
1021 if (level == 1)
1022 return freelist;
1023
Jiang Liuadeb2592014-04-09 10:20:39 +08001024 pte = page_address(pg);
1025 do {
David Woodhouseea8ea462014-03-05 17:09:32 +00001026 if (dma_pte_present(pte) && !dma_pte_superpage(pte))
1027 freelist = dma_pte_list_pagetables(domain, level - 1,
1028 pte, freelist);
Jiang Liuadeb2592014-04-09 10:20:39 +08001029 pte++;
1030 } while (!first_pte_in_page(pte));
David Woodhouseea8ea462014-03-05 17:09:32 +00001031
1032 return freelist;
1033}
1034
1035static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
1036 struct dma_pte *pte, unsigned long pfn,
1037 unsigned long start_pfn,
1038 unsigned long last_pfn,
1039 struct page *freelist)
1040{
1041 struct dma_pte *first_pte = NULL, *last_pte = NULL;
1042
1043 pfn = max(start_pfn, pfn);
1044 pte = &pte[pfn_level_offset(pfn, level)];
1045
1046 do {
1047 unsigned long level_pfn;
1048
1049 if (!dma_pte_present(pte))
1050 goto next;
1051
1052 level_pfn = pfn & level_mask(level);
1053
1054 /* If range covers entire pagetable, free it */
1055 if (start_pfn <= level_pfn &&
1056 last_pfn >= level_pfn + level_size(level) - 1) {
1057 /* These suborbinate page tables are going away entirely. Don't
1058 bother to clear them; we're just going to *free* them. */
1059 if (level > 1 && !dma_pte_superpage(pte))
1060 freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist);
1061
1062 dma_clear_pte(pte);
1063 if (!first_pte)
1064 first_pte = pte;
1065 last_pte = pte;
1066 } else if (level > 1) {
1067 /* Recurse down into a level that isn't *entirely* obsolete */
1068 freelist = dma_pte_clear_level(domain, level - 1,
1069 phys_to_virt(dma_pte_addr(pte)),
1070 level_pfn, start_pfn, last_pfn,
1071 freelist);
1072 }
1073next:
1074 pfn += level_size(level);
1075 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1076
1077 if (first_pte)
1078 domain_flush_cache(domain, first_pte,
1079 (void *)++last_pte - (void *)first_pte);
1080
1081 return freelist;
1082}
1083
1084/* We can't just free the pages because the IOMMU may still be walking
1085 the page tables, and may have cached the intermediate levels. The
1086 pages can only be freed after the IOTLB flush has been done. */
1087struct page *domain_unmap(struct dmar_domain *domain,
1088 unsigned long start_pfn,
1089 unsigned long last_pfn)
1090{
David Woodhouseea8ea462014-03-05 17:09:32 +00001091 struct page *freelist = NULL;
1092
Jiang Liu162d1b12014-07-11 14:19:35 +08001093 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1094 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouseea8ea462014-03-05 17:09:32 +00001095 BUG_ON(start_pfn > last_pfn);
1096
1097 /* we don't need lock here; nobody else touches the iova range */
1098 freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
1099 domain->pgd, 0, start_pfn, last_pfn, NULL);
1100
1101 /* free pgd */
1102 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1103 struct page *pgd_page = virt_to_page(domain->pgd);
1104 pgd_page->freelist = freelist;
1105 freelist = pgd_page;
1106
1107 domain->pgd = NULL;
1108 }
1109
1110 return freelist;
1111}
1112
1113void dma_free_pagelist(struct page *freelist)
1114{
1115 struct page *pg;
1116
1117 while ((pg = freelist)) {
1118 freelist = pg->freelist;
1119 free_pgtable_page(page_address(pg));
1120 }
1121}
1122
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001123/* iommu handling */
1124static int iommu_alloc_root_entry(struct intel_iommu *iommu)
1125{
1126 struct root_entry *root;
1127 unsigned long flags;
1128
Suresh Siddha4c923d42009-10-02 11:01:24 -07001129 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
Jiang Liuffebeb42014-11-09 22:48:02 +08001130 if (!root) {
1131 pr_err("IOMMU: allocating root entry for %s failed\n",
1132 iommu->name);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001133 return -ENOMEM;
Jiang Liuffebeb42014-11-09 22:48:02 +08001134 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001135
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001136 __iommu_flush_cache(iommu, root, ROOT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001137
1138 spin_lock_irqsave(&iommu->lock, flags);
1139 iommu->root_entry = root;
1140 spin_unlock_irqrestore(&iommu->lock, flags);
1141
1142 return 0;
1143}
1144
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001145static void iommu_set_root_entry(struct intel_iommu *iommu)
1146{
1147 void *addr;
David Woodhousec416daa2009-05-10 20:30:58 +01001148 u32 sts;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001149 unsigned long flag;
1150
1151 addr = iommu->root_entry;
1152
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001153 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001154 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
1155
David Woodhousec416daa2009-05-10 20:30:58 +01001156 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001157
1158 /* Make sure hardware complete it */
1159 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001160 readl, (sts & DMA_GSTS_RTPS), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001161
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001162 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001163}
1164
1165static void iommu_flush_write_buffer(struct intel_iommu *iommu)
1166{
1167 u32 val;
1168 unsigned long flag;
1169
David Woodhouse9af88142009-02-13 23:18:03 +00001170 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001171 return;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001172
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001173 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse462b60f2009-05-10 20:18:18 +01001174 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001175
1176 /* Make sure hardware complete it */
1177 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001178 readl, (!(val & DMA_GSTS_WBFS)), val);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001179
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001180 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001181}
1182
1183/* return value determine if we need a write buffer flush */
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001184static void __iommu_flush_context(struct intel_iommu *iommu,
1185 u16 did, u16 source_id, u8 function_mask,
1186 u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001187{
1188 u64 val = 0;
1189 unsigned long flag;
1190
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001191 switch (type) {
1192 case DMA_CCMD_GLOBAL_INVL:
1193 val = DMA_CCMD_GLOBAL_INVL;
1194 break;
1195 case DMA_CCMD_DOMAIN_INVL:
1196 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1197 break;
1198 case DMA_CCMD_DEVICE_INVL:
1199 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1200 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1201 break;
1202 default:
1203 BUG();
1204 }
1205 val |= DMA_CCMD_ICC;
1206
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001207 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001208 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1209
1210 /* Make sure hardware complete it */
1211 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1212 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1213
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001214 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001215}
1216
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001217/* return value determine if we need a write buffer flush */
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001218static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1219 u64 addr, unsigned int size_order, u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001220{
1221 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1222 u64 val = 0, val_iva = 0;
1223 unsigned long flag;
1224
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001225 switch (type) {
1226 case DMA_TLB_GLOBAL_FLUSH:
1227 /* global flush doesn't need set IVA_REG */
1228 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1229 break;
1230 case DMA_TLB_DSI_FLUSH:
1231 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1232 break;
1233 case DMA_TLB_PSI_FLUSH:
1234 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
David Woodhouseea8ea462014-03-05 17:09:32 +00001235 /* IH bit is passed in as part of address */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001236 val_iva = size_order | addr;
1237 break;
1238 default:
1239 BUG();
1240 }
1241 /* Note: set drain read/write */
1242#if 0
1243 /*
1244 * This is probably to be super secure.. Looks like we can
1245 * ignore it without any impact.
1246 */
1247 if (cap_read_drain(iommu->cap))
1248 val |= DMA_TLB_READ_DRAIN;
1249#endif
1250 if (cap_write_drain(iommu->cap))
1251 val |= DMA_TLB_WRITE_DRAIN;
1252
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001253 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001254 /* Note: Only uses first TLB reg currently */
1255 if (val_iva)
1256 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1257 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1258
1259 /* Make sure hardware complete it */
1260 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1261 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1262
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001263 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001264
1265 /* check IOTLB invalidation granularity */
1266 if (DMA_TLB_IAIG(val) == 0)
1267 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
1268 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1269 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001270 (unsigned long long)DMA_TLB_IIRG(type),
1271 (unsigned long long)DMA_TLB_IAIG(val));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001272}
1273
David Woodhouse64ae8922014-03-09 12:52:30 -07001274static struct device_domain_info *
1275iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
1276 u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001277{
Yu Zhao93a23a72009-05-18 13:51:37 +08001278 int found = 0;
1279 unsigned long flags;
1280 struct device_domain_info *info;
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001281 struct pci_dev *pdev;
Yu Zhao93a23a72009-05-18 13:51:37 +08001282
1283 if (!ecap_dev_iotlb_support(iommu->ecap))
1284 return NULL;
1285
1286 if (!iommu->qi)
1287 return NULL;
1288
1289 spin_lock_irqsave(&device_domain_lock, flags);
1290 list_for_each_entry(info, &domain->devices, link)
Jiang Liuc3b497c2014-07-11 14:19:25 +08001291 if (info->iommu == iommu && info->bus == bus &&
1292 info->devfn == devfn) {
Yu Zhao93a23a72009-05-18 13:51:37 +08001293 found = 1;
1294 break;
1295 }
1296 spin_unlock_irqrestore(&device_domain_lock, flags);
1297
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001298 if (!found || !info->dev || !dev_is_pci(info->dev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001299 return NULL;
1300
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001301 pdev = to_pci_dev(info->dev);
1302
1303 if (!pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS))
Yu Zhao93a23a72009-05-18 13:51:37 +08001304 return NULL;
1305
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001306 if (!dmar_find_matched_atsr_unit(pdev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001307 return NULL;
1308
Yu Zhao93a23a72009-05-18 13:51:37 +08001309 return info;
1310}
1311
1312static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1313{
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001314 if (!info || !dev_is_pci(info->dev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001315 return;
1316
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001317 pci_enable_ats(to_pci_dev(info->dev), VTD_PAGE_SHIFT);
Yu Zhao93a23a72009-05-18 13:51:37 +08001318}
1319
1320static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1321{
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001322 if (!info->dev || !dev_is_pci(info->dev) ||
1323 !pci_ats_enabled(to_pci_dev(info->dev)))
Yu Zhao93a23a72009-05-18 13:51:37 +08001324 return;
1325
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001326 pci_disable_ats(to_pci_dev(info->dev));
Yu Zhao93a23a72009-05-18 13:51:37 +08001327}
1328
1329static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1330 u64 addr, unsigned mask)
1331{
1332 u16 sid, qdep;
1333 unsigned long flags;
1334 struct device_domain_info *info;
1335
1336 spin_lock_irqsave(&device_domain_lock, flags);
1337 list_for_each_entry(info, &domain->devices, link) {
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001338 struct pci_dev *pdev;
1339 if (!info->dev || !dev_is_pci(info->dev))
1340 continue;
1341
1342 pdev = to_pci_dev(info->dev);
1343 if (!pci_ats_enabled(pdev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001344 continue;
1345
1346 sid = info->bus << 8 | info->devfn;
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001347 qdep = pci_ats_queue_depth(pdev);
Yu Zhao93a23a72009-05-18 13:51:37 +08001348 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1349 }
1350 spin_unlock_irqrestore(&device_domain_lock, flags);
1351}
1352
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001353static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
David Woodhouseea8ea462014-03-05 17:09:32 +00001354 unsigned long pfn, unsigned int pages, int ih, int map)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001355{
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001356 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
David Woodhouse03d6a242009-06-28 15:33:46 +01001357 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001358
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001359 BUG_ON(pages == 0);
1360
David Woodhouseea8ea462014-03-05 17:09:32 +00001361 if (ih)
1362 ih = 1 << 6;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001363 /*
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001364 * Fallback to domain selective flush if no PSI support or the size is
1365 * too big.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001366 * PSI requires page size to be 2 ^ x, and the base address is naturally
1367 * aligned to the size
1368 */
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001369 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1370 iommu->flush.flush_iotlb(iommu, did, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001371 DMA_TLB_DSI_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001372 else
David Woodhouseea8ea462014-03-05 17:09:32 +00001373 iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001374 DMA_TLB_PSI_FLUSH);
Yu Zhaobf92df32009-06-29 11:31:45 +08001375
1376 /*
Nadav Amit82653632010-04-01 13:24:40 +03001377 * In caching mode, changes of pages from non-present to present require
1378 * flush. However, device IOTLB doesn't need to be flushed in this case.
Yu Zhaobf92df32009-06-29 11:31:45 +08001379 */
Nadav Amit82653632010-04-01 13:24:40 +03001380 if (!cap_caching_mode(iommu->cap) || !map)
Yu Zhao93a23a72009-05-18 13:51:37 +08001381 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001382}
1383
mark grossf8bab732008-02-08 04:18:38 -08001384static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1385{
1386 u32 pmen;
1387 unsigned long flags;
1388
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001389 raw_spin_lock_irqsave(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001390 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1391 pmen &= ~DMA_PMEN_EPM;
1392 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1393
1394 /* wait for the protected region status bit to clear */
1395 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1396 readl, !(pmen & DMA_PMEN_PRS), pmen);
1397
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001398 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001399}
1400
Jiang Liu2a41cce2014-07-11 14:19:33 +08001401static void iommu_enable_translation(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001402{
1403 u32 sts;
1404 unsigned long flags;
1405
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001406 raw_spin_lock_irqsave(&iommu->register_lock, flags);
David Woodhousec416daa2009-05-10 20:30:58 +01001407 iommu->gcmd |= DMA_GCMD_TE;
1408 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001409
1410 /* Make sure hardware complete it */
1411 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001412 readl, (sts & DMA_GSTS_TES), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001413
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001414 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001415}
1416
Jiang Liu2a41cce2014-07-11 14:19:33 +08001417static void iommu_disable_translation(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001418{
1419 u32 sts;
1420 unsigned long flag;
1421
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001422 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001423 iommu->gcmd &= ~DMA_GCMD_TE;
1424 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1425
1426 /* Make sure hardware complete it */
1427 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001428 readl, (!(sts & DMA_GSTS_TES)), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001429
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001430 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001431}
1432
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07001433
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001434static int iommu_init_domains(struct intel_iommu *iommu)
1435{
1436 unsigned long ndomains;
1437 unsigned long nlongs;
1438
1439 ndomains = cap_ndoms(iommu->cap);
Jiang Liu852bdb02014-01-06 14:18:11 +08001440 pr_debug("IOMMU%d: Number of Domains supported <%ld>\n",
1441 iommu->seq_id, ndomains);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001442 nlongs = BITS_TO_LONGS(ndomains);
1443
Donald Dutile94a91b502009-08-20 16:51:34 -04001444 spin_lock_init(&iommu->lock);
1445
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001446 /* TBD: there might be 64K domains,
1447 * consider other allocation for future chip
1448 */
1449 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1450 if (!iommu->domain_ids) {
Jiang Liu852bdb02014-01-06 14:18:11 +08001451 pr_err("IOMMU%d: allocating domain id array failed\n",
1452 iommu->seq_id);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001453 return -ENOMEM;
1454 }
1455 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1456 GFP_KERNEL);
1457 if (!iommu->domains) {
Jiang Liu852bdb02014-01-06 14:18:11 +08001458 pr_err("IOMMU%d: allocating domain array failed\n",
1459 iommu->seq_id);
1460 kfree(iommu->domain_ids);
1461 iommu->domain_ids = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001462 return -ENOMEM;
1463 }
1464
1465 /*
1466 * if Caching mode is set, then invalid translations are tagged
1467 * with domainid 0. Hence we need to pre-allocate it.
1468 */
1469 if (cap_caching_mode(iommu->cap))
1470 set_bit(0, iommu->domain_ids);
1471 return 0;
1472}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001473
Jiang Liuffebeb42014-11-09 22:48:02 +08001474static void disable_dmar_iommu(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001475{
1476 struct dmar_domain *domain;
Jiang Liu2a46ddf2014-07-11 14:19:30 +08001477 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001478
Donald Dutile94a91b502009-08-20 16:51:34 -04001479 if ((iommu->domains) && (iommu->domain_ids)) {
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001480 for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
Jiang Liua4eaa862014-02-19 14:07:30 +08001481 /*
1482 * Domain id 0 is reserved for invalid translation
1483 * if hardware supports caching mode.
1484 */
1485 if (cap_caching_mode(iommu->cap) && i == 0)
1486 continue;
1487
Donald Dutile94a91b502009-08-20 16:51:34 -04001488 domain = iommu->domains[i];
1489 clear_bit(i, iommu->domain_ids);
Jiang Liu129ad282014-07-11 14:19:31 +08001490 if (domain_detach_iommu(domain, iommu) == 0 &&
1491 !domain_type_is_vm(domain))
Jiang Liu92d03cc2014-02-19 14:07:28 +08001492 domain_exit(domain);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001493 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001494 }
1495
1496 if (iommu->gcmd & DMA_GCMD_TE)
1497 iommu_disable_translation(iommu);
Jiang Liuffebeb42014-11-09 22:48:02 +08001498}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001499
Jiang Liuffebeb42014-11-09 22:48:02 +08001500static void free_dmar_iommu(struct intel_iommu *iommu)
1501{
1502 if ((iommu->domains) && (iommu->domain_ids)) {
1503 kfree(iommu->domains);
1504 kfree(iommu->domain_ids);
1505 iommu->domains = NULL;
1506 iommu->domain_ids = NULL;
1507 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001508
Weidong Hand9630fe2008-12-08 11:06:32 +08001509 g_iommus[iommu->seq_id] = NULL;
1510
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001511 /* free context mapping */
1512 free_context_table(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001513}
1514
Jiang Liuab8dfe22014-07-11 14:19:27 +08001515static struct dmar_domain *alloc_domain(int flags)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001516{
Jiang Liu92d03cc2014-02-19 14:07:28 +08001517 /* domain id for virtual machine, it won't be set in context */
1518 static atomic_t vm_domid = ATOMIC_INIT(0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001519 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001520
1521 domain = alloc_domain_mem();
1522 if (!domain)
1523 return NULL;
1524
Jiang Liuab8dfe22014-07-11 14:19:27 +08001525 memset(domain, 0, sizeof(*domain));
Suresh Siddha4c923d42009-10-02 11:01:24 -07001526 domain->nid = -1;
Jiang Liuab8dfe22014-07-11 14:19:27 +08001527 domain->flags = flags;
Jiang Liu92d03cc2014-02-19 14:07:28 +08001528 spin_lock_init(&domain->iommu_lock);
1529 INIT_LIST_HEAD(&domain->devices);
Jiang Liuab8dfe22014-07-11 14:19:27 +08001530 if (flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
Jiang Liu92d03cc2014-02-19 14:07:28 +08001531 domain->id = atomic_inc_return(&vm_domid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001532
1533 return domain;
1534}
1535
Jiang Liufb170fb2014-07-11 14:19:28 +08001536static int __iommu_attach_domain(struct dmar_domain *domain,
1537 struct intel_iommu *iommu)
1538{
1539 int num;
1540 unsigned long ndomains;
1541
1542 ndomains = cap_ndoms(iommu->cap);
1543 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1544 if (num < ndomains) {
1545 set_bit(num, iommu->domain_ids);
1546 iommu->domains[num] = domain;
1547 } else {
1548 num = -ENOSPC;
1549 }
1550
1551 return num;
1552}
1553
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001554static int iommu_attach_domain(struct dmar_domain *domain,
1555 struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001556{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001557 int num;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001558 unsigned long flags;
1559
Weidong Han8c11e792008-12-08 15:29:22 +08001560 spin_lock_irqsave(&iommu->lock, flags);
Jiang Liufb170fb2014-07-11 14:19:28 +08001561 num = __iommu_attach_domain(domain, iommu);
Jiang Liu44bde612014-07-11 14:19:29 +08001562 spin_unlock_irqrestore(&iommu->lock, flags);
Jiang Liufb170fb2014-07-11 14:19:28 +08001563 if (num < 0)
1564 pr_err("IOMMU: no free domain ids\n");
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001565
Jiang Liufb170fb2014-07-11 14:19:28 +08001566 return num;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001567}
1568
Jiang Liu44bde612014-07-11 14:19:29 +08001569static int iommu_attach_vm_domain(struct dmar_domain *domain,
1570 struct intel_iommu *iommu)
1571{
1572 int num;
1573 unsigned long ndomains;
1574
1575 ndomains = cap_ndoms(iommu->cap);
1576 for_each_set_bit(num, iommu->domain_ids, ndomains)
1577 if (iommu->domains[num] == domain)
1578 return num;
1579
1580 return __iommu_attach_domain(domain, iommu);
1581}
1582
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001583static void iommu_detach_domain(struct dmar_domain *domain,
1584 struct intel_iommu *iommu)
1585{
1586 unsigned long flags;
1587 int num, ndomains;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001588
1589 spin_lock_irqsave(&iommu->lock, flags);
Jiang Liufb170fb2014-07-11 14:19:28 +08001590 if (domain_type_is_vm_or_si(domain)) {
1591 ndomains = cap_ndoms(iommu->cap);
1592 for_each_set_bit(num, iommu->domain_ids, ndomains) {
1593 if (iommu->domains[num] == domain) {
1594 clear_bit(num, iommu->domain_ids);
1595 iommu->domains[num] = NULL;
1596 break;
1597 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001598 }
Jiang Liufb170fb2014-07-11 14:19:28 +08001599 } else {
1600 clear_bit(domain->id, iommu->domain_ids);
1601 iommu->domains[domain->id] = NULL;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001602 }
Weidong Han8c11e792008-12-08 15:29:22 +08001603 spin_unlock_irqrestore(&iommu->lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001604}
1605
Jiang Liufb170fb2014-07-11 14:19:28 +08001606static void domain_attach_iommu(struct dmar_domain *domain,
1607 struct intel_iommu *iommu)
1608{
1609 unsigned long flags;
1610
1611 spin_lock_irqsave(&domain->iommu_lock, flags);
1612 if (!test_and_set_bit(iommu->seq_id, domain->iommu_bmp)) {
1613 domain->iommu_count++;
1614 if (domain->iommu_count == 1)
1615 domain->nid = iommu->node;
1616 domain_update_iommu_cap(domain);
1617 }
1618 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1619}
1620
1621static int domain_detach_iommu(struct dmar_domain *domain,
1622 struct intel_iommu *iommu)
1623{
1624 unsigned long flags;
1625 int count = INT_MAX;
1626
1627 spin_lock_irqsave(&domain->iommu_lock, flags);
1628 if (test_and_clear_bit(iommu->seq_id, domain->iommu_bmp)) {
1629 count = --domain->iommu_count;
1630 domain_update_iommu_cap(domain);
1631 }
1632 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1633
1634 return count;
1635}
1636
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001637static struct iova_domain reserved_iova_list;
Mark Gross8a443df2008-03-04 14:59:31 -08001638static struct lock_class_key reserved_rbtree_key;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001639
Joseph Cihula51a63e62011-03-21 11:04:24 -07001640static int dmar_init_reserved_ranges(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001641{
1642 struct pci_dev *pdev = NULL;
1643 struct iova *iova;
1644 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001645
David Millerf6611972008-02-06 01:36:23 -08001646 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001647
Mark Gross8a443df2008-03-04 14:59:31 -08001648 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1649 &reserved_rbtree_key);
1650
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001651 /* IOAPIC ranges shouldn't be accessed by DMA */
1652 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1653 IOVA_PFN(IOAPIC_RANGE_END));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001654 if (!iova) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001655 printk(KERN_ERR "Reserve IOAPIC range failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001656 return -ENODEV;
1657 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001658
1659 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1660 for_each_pci_dev(pdev) {
1661 struct resource *r;
1662
1663 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1664 r = &pdev->resource[i];
1665 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1666 continue;
David Woodhouse1a4a4552009-06-28 16:00:42 +01001667 iova = reserve_iova(&reserved_iova_list,
1668 IOVA_PFN(r->start),
1669 IOVA_PFN(r->end));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001670 if (!iova) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001671 printk(KERN_ERR "Reserve iova failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001672 return -ENODEV;
1673 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001674 }
1675 }
Joseph Cihula51a63e62011-03-21 11:04:24 -07001676 return 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001677}
1678
1679static void domain_reserve_special_ranges(struct dmar_domain *domain)
1680{
1681 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1682}
1683
1684static inline int guestwidth_to_adjustwidth(int gaw)
1685{
1686 int agaw;
1687 int r = (gaw - 12) % 9;
1688
1689 if (r == 0)
1690 agaw = gaw;
1691 else
1692 agaw = gaw + 9 - r;
1693 if (agaw > 64)
1694 agaw = 64;
1695 return agaw;
1696}
1697
1698static int domain_init(struct dmar_domain *domain, int guest_width)
1699{
1700 struct intel_iommu *iommu;
1701 int adjust_width, agaw;
1702 unsigned long sagaw;
1703
David Millerf6611972008-02-06 01:36:23 -08001704 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001705 domain_reserve_special_ranges(domain);
1706
1707 /* calculate AGAW */
Weidong Han8c11e792008-12-08 15:29:22 +08001708 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001709 if (guest_width > cap_mgaw(iommu->cap))
1710 guest_width = cap_mgaw(iommu->cap);
1711 domain->gaw = guest_width;
1712 adjust_width = guestwidth_to_adjustwidth(guest_width);
1713 agaw = width_to_agaw(adjust_width);
1714 sagaw = cap_sagaw(iommu->cap);
1715 if (!test_bit(agaw, &sagaw)) {
1716 /* hardware doesn't support it, choose a bigger one */
1717 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1718 agaw = find_next_bit(&sagaw, 5, agaw);
1719 if (agaw >= 5)
1720 return -ENODEV;
1721 }
1722 domain->agaw = agaw;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001723
Weidong Han8e6040972008-12-08 15:49:06 +08001724 if (ecap_coherent(iommu->ecap))
1725 domain->iommu_coherency = 1;
1726 else
1727 domain->iommu_coherency = 0;
1728
Sheng Yang58c610b2009-03-18 15:33:05 +08001729 if (ecap_sc_support(iommu->ecap))
1730 domain->iommu_snooping = 1;
1731 else
1732 domain->iommu_snooping = 0;
1733
David Woodhouse214e39a2014-03-19 10:38:49 +00001734 if (intel_iommu_superpage)
1735 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
1736 else
1737 domain->iommu_superpage = 0;
1738
Suresh Siddha4c923d42009-10-02 11:01:24 -07001739 domain->nid = iommu->node;
Weidong Hanc7151a82008-12-08 22:51:37 +08001740
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001741 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07001742 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001743 if (!domain->pgd)
1744 return -ENOMEM;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001745 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001746 return 0;
1747}
1748
1749static void domain_exit(struct dmar_domain *domain)
1750{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001751 struct dmar_drhd_unit *drhd;
1752 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00001753 struct page *freelist = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001754
1755 /* Domain 0 is reserved, so dont process it */
1756 if (!domain)
1757 return;
1758
Alex Williamson7b668352011-05-24 12:02:41 +01001759 /* Flush any lazy unmaps that may reference this domain */
1760 if (!intel_iommu_strict)
1761 flush_unmaps_timeout(0);
1762
Jiang Liu92d03cc2014-02-19 14:07:28 +08001763 /* remove associated devices */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001764 domain_remove_dev_info(domain);
Jiang Liu92d03cc2014-02-19 14:07:28 +08001765
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001766 /* destroy iovas */
1767 put_iova_domain(&domain->iovad);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001768
David Woodhouseea8ea462014-03-05 17:09:32 +00001769 freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001770
Jiang Liu92d03cc2014-02-19 14:07:28 +08001771 /* clear attached or cached domains */
Jiang Liu0e2426122014-02-19 14:07:34 +08001772 rcu_read_lock();
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001773 for_each_active_iommu(iommu, drhd)
Jiang Liufb170fb2014-07-11 14:19:28 +08001774 iommu_detach_domain(domain, iommu);
Jiang Liu0e2426122014-02-19 14:07:34 +08001775 rcu_read_unlock();
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001776
David Woodhouseea8ea462014-03-05 17:09:32 +00001777 dma_free_pagelist(freelist);
1778
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001779 free_domain_mem(domain);
1780}
1781
David Woodhouse64ae8922014-03-09 12:52:30 -07001782static int domain_context_mapping_one(struct dmar_domain *domain,
1783 struct intel_iommu *iommu,
1784 u8 bus, u8 devfn, int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001785{
1786 struct context_entry *context;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001787 unsigned long flags;
Weidong Hanea6606b2008-12-08 23:08:15 +08001788 struct dma_pte *pgd;
Weidong Hanea6606b2008-12-08 23:08:15 +08001789 int id;
1790 int agaw;
Yu Zhao93a23a72009-05-18 13:51:37 +08001791 struct device_domain_info *info = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001792
1793 pr_debug("Set context mapping for %02x:%02x.%d\n",
1794 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001795
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001796 BUG_ON(!domain->pgd);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001797 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1798 translation != CONTEXT_TT_MULTI_LEVEL);
Weidong Han5331fe62008-12-08 23:00:00 +08001799
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001800 context = device_to_context_entry(iommu, bus, devfn);
1801 if (!context)
1802 return -ENOMEM;
1803 spin_lock_irqsave(&iommu->lock, flags);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001804 if (context_present(context)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001805 spin_unlock_irqrestore(&iommu->lock, flags);
1806 return 0;
1807 }
1808
Weidong Hanea6606b2008-12-08 23:08:15 +08001809 id = domain->id;
1810 pgd = domain->pgd;
1811
Jiang Liuab8dfe22014-07-11 14:19:27 +08001812 if (domain_type_is_vm_or_si(domain)) {
Jiang Liu44bde612014-07-11 14:19:29 +08001813 if (domain_type_is_vm(domain)) {
1814 id = iommu_attach_vm_domain(domain, iommu);
Jiang Liufb170fb2014-07-11 14:19:28 +08001815 if (id < 0) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001816 spin_unlock_irqrestore(&iommu->lock, flags);
Jiang Liufb170fb2014-07-11 14:19:28 +08001817 pr_err("IOMMU: no free domain ids\n");
Weidong Hanea6606b2008-12-08 23:08:15 +08001818 return -EFAULT;
1819 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001820 }
1821
1822 /* Skip top levels of page tables for
1823 * iommu which has less agaw than default.
Chris Wright1672af12009-12-02 12:06:34 -08001824 * Unnecessary for PT mode.
Weidong Hanea6606b2008-12-08 23:08:15 +08001825 */
Chris Wright1672af12009-12-02 12:06:34 -08001826 if (translation != CONTEXT_TT_PASS_THROUGH) {
1827 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1828 pgd = phys_to_virt(dma_pte_addr(pgd));
1829 if (!dma_pte_present(pgd)) {
1830 spin_unlock_irqrestore(&iommu->lock, flags);
1831 return -ENOMEM;
1832 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001833 }
1834 }
1835 }
1836
1837 context_set_domain_id(context, id);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001838
Yu Zhao93a23a72009-05-18 13:51:37 +08001839 if (translation != CONTEXT_TT_PASS_THROUGH) {
David Woodhouse64ae8922014-03-09 12:52:30 -07001840 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
Yu Zhao93a23a72009-05-18 13:51:37 +08001841 translation = info ? CONTEXT_TT_DEV_IOTLB :
1842 CONTEXT_TT_MULTI_LEVEL;
1843 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001844 /*
1845 * In pass through mode, AW must be programmed to indicate the largest
1846 * AGAW value supported by hardware. And ASR is ignored by hardware.
1847 */
Yu Zhao93a23a72009-05-18 13:51:37 +08001848 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001849 context_set_address_width(context, iommu->msagaw);
Yu Zhao93a23a72009-05-18 13:51:37 +08001850 else {
1851 context_set_address_root(context, virt_to_phys(pgd));
1852 context_set_address_width(context, iommu->agaw);
1853 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001854
1855 context_set_translation_type(context, translation);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001856 context_set_fault_enable(context);
1857 context_set_present(context);
Weidong Han5331fe62008-12-08 23:00:00 +08001858 domain_flush_cache(domain, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001859
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001860 /*
1861 * It's a non-present to present mapping. If hardware doesn't cache
1862 * non-present entry we only need to flush the write-buffer. If the
1863 * _does_ cache non-present entries, then it does so in the special
1864 * domain #0, which we have to flush:
1865 */
1866 if (cap_caching_mode(iommu->cap)) {
1867 iommu->flush.flush_context(iommu, 0,
1868 (((u16)bus) << 8) | devfn,
1869 DMA_CCMD_MASK_NOBIT,
1870 DMA_CCMD_DEVICE_INVL);
Jiang Liu18fd7792014-07-11 14:19:26 +08001871 iommu->flush.flush_iotlb(iommu, id, 0, 0, DMA_TLB_DSI_FLUSH);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001872 } else {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001873 iommu_flush_write_buffer(iommu);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001874 }
Yu Zhao93a23a72009-05-18 13:51:37 +08001875 iommu_enable_dev_iotlb(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001876 spin_unlock_irqrestore(&iommu->lock, flags);
Weidong Hanc7151a82008-12-08 22:51:37 +08001877
Jiang Liufb170fb2014-07-11 14:19:28 +08001878 domain_attach_iommu(domain, iommu);
1879
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001880 return 0;
1881}
1882
Alex Williamson579305f2014-07-03 09:51:43 -06001883struct domain_context_mapping_data {
1884 struct dmar_domain *domain;
1885 struct intel_iommu *iommu;
1886 int translation;
1887};
1888
1889static int domain_context_mapping_cb(struct pci_dev *pdev,
1890 u16 alias, void *opaque)
1891{
1892 struct domain_context_mapping_data *data = opaque;
1893
1894 return domain_context_mapping_one(data->domain, data->iommu,
1895 PCI_BUS_NUM(alias), alias & 0xff,
1896 data->translation);
1897}
1898
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001899static int
David Woodhousee1f167f2014-03-09 15:24:46 -07001900domain_context_mapping(struct dmar_domain *domain, struct device *dev,
1901 int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001902{
David Woodhouse64ae8922014-03-09 12:52:30 -07001903 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07001904 u8 bus, devfn;
Alex Williamson579305f2014-07-03 09:51:43 -06001905 struct domain_context_mapping_data data;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001906
David Woodhousee1f167f2014-03-09 15:24:46 -07001907 iommu = device_to_iommu(dev, &bus, &devfn);
David Woodhouse64ae8922014-03-09 12:52:30 -07001908 if (!iommu)
1909 return -ENODEV;
1910
Alex Williamson579305f2014-07-03 09:51:43 -06001911 if (!dev_is_pci(dev))
1912 return domain_context_mapping_one(domain, iommu, bus, devfn,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001913 translation);
Alex Williamson579305f2014-07-03 09:51:43 -06001914
1915 data.domain = domain;
1916 data.iommu = iommu;
1917 data.translation = translation;
1918
1919 return pci_for_each_dma_alias(to_pci_dev(dev),
1920 &domain_context_mapping_cb, &data);
1921}
1922
1923static int domain_context_mapped_cb(struct pci_dev *pdev,
1924 u16 alias, void *opaque)
1925{
1926 struct intel_iommu *iommu = opaque;
1927
1928 return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001929}
1930
David Woodhousee1f167f2014-03-09 15:24:46 -07001931static int domain_context_mapped(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001932{
Weidong Han5331fe62008-12-08 23:00:00 +08001933 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07001934 u8 bus, devfn;
Weidong Han5331fe62008-12-08 23:00:00 +08001935
David Woodhousee1f167f2014-03-09 15:24:46 -07001936 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001937 if (!iommu)
1938 return -ENODEV;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001939
Alex Williamson579305f2014-07-03 09:51:43 -06001940 if (!dev_is_pci(dev))
1941 return device_context_mapped(iommu, bus, devfn);
David Woodhousee1f167f2014-03-09 15:24:46 -07001942
Alex Williamson579305f2014-07-03 09:51:43 -06001943 return !pci_for_each_dma_alias(to_pci_dev(dev),
1944 domain_context_mapped_cb, iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001945}
1946
Fenghua Yuf5329592009-08-04 15:09:37 -07001947/* Returns a number of VTD pages, but aligned to MM page size */
1948static inline unsigned long aligned_nrpages(unsigned long host_addr,
1949 size_t size)
1950{
1951 host_addr &= ~PAGE_MASK;
1952 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
1953}
1954
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001955/* Return largest possible superpage level for a given mapping */
1956static inline int hardware_largepage_caps(struct dmar_domain *domain,
1957 unsigned long iov_pfn,
1958 unsigned long phy_pfn,
1959 unsigned long pages)
1960{
1961 int support, level = 1;
1962 unsigned long pfnmerge;
1963
1964 support = domain->iommu_superpage;
1965
1966 /* To use a large page, the virtual *and* physical addresses
1967 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
1968 of them will mean we have to use smaller pages. So just
1969 merge them and check both at once. */
1970 pfnmerge = iov_pfn | phy_pfn;
1971
1972 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
1973 pages >>= VTD_STRIDE_SHIFT;
1974 if (!pages)
1975 break;
1976 pfnmerge >>= VTD_STRIDE_SHIFT;
1977 level++;
1978 support--;
1979 }
1980 return level;
1981}
1982
David Woodhouse9051aa02009-06-29 12:30:54 +01001983static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1984 struct scatterlist *sg, unsigned long phys_pfn,
1985 unsigned long nr_pages, int prot)
David Woodhousee1605492009-06-29 11:17:38 +01001986{
1987 struct dma_pte *first_pte = NULL, *pte = NULL;
David Woodhouse9051aa02009-06-29 12:30:54 +01001988 phys_addr_t uninitialized_var(pteval);
Jiang Liucc4f14a2014-11-26 09:42:10 +08001989 unsigned long sg_res = 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001990 unsigned int largepage_lvl = 0;
1991 unsigned long lvl_pages = 0;
David Woodhousee1605492009-06-29 11:17:38 +01001992
Jiang Liu162d1b12014-07-11 14:19:35 +08001993 BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1));
David Woodhousee1605492009-06-29 11:17:38 +01001994
1995 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1996 return -EINVAL;
1997
1998 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
1999
Jiang Liucc4f14a2014-11-26 09:42:10 +08002000 if (!sg) {
2001 sg_res = nr_pages;
David Woodhouse9051aa02009-06-29 12:30:54 +01002002 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
2003 }
2004
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002005 while (nr_pages > 0) {
David Woodhousec85994e2009-07-01 19:21:24 +01002006 uint64_t tmp;
2007
David Woodhousee1605492009-06-29 11:17:38 +01002008 if (!sg_res) {
Fenghua Yuf5329592009-08-04 15:09:37 -07002009 sg_res = aligned_nrpages(sg->offset, sg->length);
David Woodhousee1605492009-06-29 11:17:38 +01002010 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
2011 sg->dma_length = sg->length;
2012 pteval = page_to_phys(sg_page(sg)) | prot;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002013 phys_pfn = pteval >> VTD_PAGE_SHIFT;
David Woodhousee1605492009-06-29 11:17:38 +01002014 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002015
David Woodhousee1605492009-06-29 11:17:38 +01002016 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002017 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
2018
David Woodhouse5cf0a762014-03-19 16:07:49 +00002019 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
David Woodhousee1605492009-06-29 11:17:38 +01002020 if (!pte)
2021 return -ENOMEM;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002022 /* It is large page*/
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002023 if (largepage_lvl > 1) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002024 pteval |= DMA_PTE_LARGE_PAGE;
Jiang Liud41a4ad2014-07-11 14:19:34 +08002025 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2026 /*
2027 * Ensure that old small page tables are
2028 * removed to make room for superpage,
2029 * if they exist.
2030 */
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002031 dma_pte_free_pagetable(domain, iov_pfn,
Jiang Liud41a4ad2014-07-11 14:19:34 +08002032 iov_pfn + lvl_pages - 1);
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002033 } else {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002034 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002035 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002036
David Woodhousee1605492009-06-29 11:17:38 +01002037 }
2038 /* We don't need lock here, nobody else
2039 * touches the iova range
2040 */
David Woodhouse7766a3f2009-07-01 20:27:03 +01002041 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
David Woodhousec85994e2009-07-01 19:21:24 +01002042 if (tmp) {
David Woodhouse1bf20f02009-06-29 22:06:43 +01002043 static int dumps = 5;
David Woodhousec85994e2009-07-01 19:21:24 +01002044 printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
2045 iov_pfn, tmp, (unsigned long long)pteval);
David Woodhouse1bf20f02009-06-29 22:06:43 +01002046 if (dumps) {
2047 dumps--;
2048 debug_dma_dump_mappings(NULL);
2049 }
2050 WARN_ON(1);
2051 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002052
2053 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2054
2055 BUG_ON(nr_pages < lvl_pages);
2056 BUG_ON(sg_res < lvl_pages);
2057
2058 nr_pages -= lvl_pages;
2059 iov_pfn += lvl_pages;
2060 phys_pfn += lvl_pages;
2061 pteval += lvl_pages * VTD_PAGE_SIZE;
2062 sg_res -= lvl_pages;
2063
2064 /* If the next PTE would be the first in a new page, then we
2065 need to flush the cache on the entries we've just written.
2066 And then we'll need to recalculate 'pte', so clear it and
2067 let it get set again in the if (!pte) block above.
2068
2069 If we're done (!nr_pages) we need to flush the cache too.
2070
2071 Also if we've been setting superpages, we may need to
2072 recalculate 'pte' and switch back to smaller pages for the
2073 end of the mapping, if the trailing size is not enough to
2074 use another superpage (i.e. sg_res < lvl_pages). */
David Woodhousee1605492009-06-29 11:17:38 +01002075 pte++;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002076 if (!nr_pages || first_pte_in_page(pte) ||
2077 (largepage_lvl > 1 && sg_res < lvl_pages)) {
David Woodhousee1605492009-06-29 11:17:38 +01002078 domain_flush_cache(domain, first_pte,
2079 (void *)pte - (void *)first_pte);
2080 pte = NULL;
2081 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002082
2083 if (!sg_res && nr_pages)
David Woodhousee1605492009-06-29 11:17:38 +01002084 sg = sg_next(sg);
2085 }
2086 return 0;
2087}
2088
David Woodhouse9051aa02009-06-29 12:30:54 +01002089static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2090 struct scatterlist *sg, unsigned long nr_pages,
2091 int prot)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002092{
David Woodhouse9051aa02009-06-29 12:30:54 +01002093 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
2094}
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002095
David Woodhouse9051aa02009-06-29 12:30:54 +01002096static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2097 unsigned long phys_pfn, unsigned long nr_pages,
2098 int prot)
2099{
2100 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002101}
2102
Weidong Hanc7151a82008-12-08 22:51:37 +08002103static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002104{
Weidong Hanc7151a82008-12-08 22:51:37 +08002105 if (!iommu)
2106 return;
Weidong Han8c11e792008-12-08 15:29:22 +08002107
2108 clear_context_table(iommu, bus, devfn);
2109 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002110 DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002111 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002112}
2113
David Woodhouse109b9b02012-05-25 17:43:02 +01002114static inline void unlink_domain_info(struct device_domain_info *info)
2115{
2116 assert_spin_locked(&device_domain_lock);
2117 list_del(&info->link);
2118 list_del(&info->global);
2119 if (info->dev)
David Woodhouse0bcb3e22014-03-06 17:12:03 +00002120 info->dev->archdata.iommu = NULL;
David Woodhouse109b9b02012-05-25 17:43:02 +01002121}
2122
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002123static void domain_remove_dev_info(struct dmar_domain *domain)
2124{
Yijing Wang3a74ca02014-05-20 20:37:47 +08002125 struct device_domain_info *info, *tmp;
Jiang Liufb170fb2014-07-11 14:19:28 +08002126 unsigned long flags;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002127
2128 spin_lock_irqsave(&device_domain_lock, flags);
Yijing Wang3a74ca02014-05-20 20:37:47 +08002129 list_for_each_entry_safe(info, tmp, &domain->devices, link) {
David Woodhouse109b9b02012-05-25 17:43:02 +01002130 unlink_domain_info(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002131 spin_unlock_irqrestore(&device_domain_lock, flags);
2132
Yu Zhao93a23a72009-05-18 13:51:37 +08002133 iommu_disable_dev_iotlb(info);
David Woodhouse7c7faa12014-03-09 13:33:06 -07002134 iommu_detach_dev(info->iommu, info->bus, info->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002135
Jiang Liuab8dfe22014-07-11 14:19:27 +08002136 if (domain_type_is_vm(domain)) {
David Woodhouse7c7faa12014-03-09 13:33:06 -07002137 iommu_detach_dependent_devices(info->iommu, info->dev);
Jiang Liufb170fb2014-07-11 14:19:28 +08002138 domain_detach_iommu(domain, info->iommu);
Jiang Liu92d03cc2014-02-19 14:07:28 +08002139 }
2140
2141 free_devinfo_mem(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002142 spin_lock_irqsave(&device_domain_lock, flags);
2143 }
2144 spin_unlock_irqrestore(&device_domain_lock, flags);
2145}
2146
2147/*
2148 * find_domain
David Woodhouse1525a292014-03-06 16:19:30 +00002149 * Note: we use struct device->archdata.iommu stores the info
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002150 */
David Woodhouse1525a292014-03-06 16:19:30 +00002151static struct dmar_domain *find_domain(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002152{
2153 struct device_domain_info *info;
2154
2155 /* No lock here, assumes no domain exit in normal case */
David Woodhouse1525a292014-03-06 16:19:30 +00002156 info = dev->archdata.iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002157 if (info)
2158 return info->domain;
2159 return NULL;
2160}
2161
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002162static inline struct device_domain_info *
Jiang Liu745f2582014-02-19 14:07:26 +08002163dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
2164{
2165 struct device_domain_info *info;
2166
2167 list_for_each_entry(info, &device_domain_list, global)
David Woodhouse41e80dca2014-03-09 13:55:54 -07002168 if (info->iommu->segment == segment && info->bus == bus &&
Jiang Liu745f2582014-02-19 14:07:26 +08002169 info->devfn == devfn)
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002170 return info;
Jiang Liu745f2582014-02-19 14:07:26 +08002171
2172 return NULL;
2173}
2174
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002175static struct dmar_domain *dmar_insert_dev_info(struct intel_iommu *iommu,
David Woodhouse41e80dca2014-03-09 13:55:54 -07002176 int bus, int devfn,
David Woodhouseb718cd32014-03-09 13:11:33 -07002177 struct device *dev,
2178 struct dmar_domain *domain)
Jiang Liu745f2582014-02-19 14:07:26 +08002179{
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002180 struct dmar_domain *found = NULL;
Jiang Liu745f2582014-02-19 14:07:26 +08002181 struct device_domain_info *info;
2182 unsigned long flags;
2183
2184 info = alloc_devinfo_mem();
2185 if (!info)
David Woodhouseb718cd32014-03-09 13:11:33 -07002186 return NULL;
Jiang Liu745f2582014-02-19 14:07:26 +08002187
Jiang Liu745f2582014-02-19 14:07:26 +08002188 info->bus = bus;
2189 info->devfn = devfn;
2190 info->dev = dev;
2191 info->domain = domain;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002192 info->iommu = iommu;
Jiang Liu745f2582014-02-19 14:07:26 +08002193
2194 spin_lock_irqsave(&device_domain_lock, flags);
2195 if (dev)
David Woodhouse0bcb3e22014-03-06 17:12:03 +00002196 found = find_domain(dev);
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002197 else {
2198 struct device_domain_info *info2;
David Woodhouse41e80dca2014-03-09 13:55:54 -07002199 info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002200 if (info2)
2201 found = info2->domain;
2202 }
Jiang Liu745f2582014-02-19 14:07:26 +08002203 if (found) {
2204 spin_unlock_irqrestore(&device_domain_lock, flags);
2205 free_devinfo_mem(info);
David Woodhouseb718cd32014-03-09 13:11:33 -07002206 /* Caller must free the original domain */
2207 return found;
Jiang Liu745f2582014-02-19 14:07:26 +08002208 }
2209
David Woodhouseb718cd32014-03-09 13:11:33 -07002210 list_add(&info->link, &domain->devices);
2211 list_add(&info->global, &device_domain_list);
2212 if (dev)
2213 dev->archdata.iommu = info;
2214 spin_unlock_irqrestore(&device_domain_lock, flags);
2215
2216 return domain;
Jiang Liu745f2582014-02-19 14:07:26 +08002217}
2218
Alex Williamson579305f2014-07-03 09:51:43 -06002219static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
2220{
2221 *(u16 *)opaque = alias;
2222 return 0;
2223}
2224
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002225/* domain is initialized */
David Woodhouse146922e2014-03-09 15:44:17 -07002226static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002227{
Alex Williamson579305f2014-07-03 09:51:43 -06002228 struct dmar_domain *domain, *tmp;
2229 struct intel_iommu *iommu;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002230 struct device_domain_info *info;
Alex Williamson579305f2014-07-03 09:51:43 -06002231 u16 dma_alias;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002232 unsigned long flags;
Yijing Wangaa4d0662014-05-26 20:14:06 +08002233 u8 bus, devfn;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002234
David Woodhouse146922e2014-03-09 15:44:17 -07002235 domain = find_domain(dev);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002236 if (domain)
2237 return domain;
2238
David Woodhouse146922e2014-03-09 15:44:17 -07002239 iommu = device_to_iommu(dev, &bus, &devfn);
2240 if (!iommu)
Alex Williamson579305f2014-07-03 09:51:43 -06002241 return NULL;
2242
2243 if (dev_is_pci(dev)) {
2244 struct pci_dev *pdev = to_pci_dev(dev);
2245
2246 pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
2247
2248 spin_lock_irqsave(&device_domain_lock, flags);
2249 info = dmar_search_domain_by_dev_info(pci_domain_nr(pdev->bus),
2250 PCI_BUS_NUM(dma_alias),
2251 dma_alias & 0xff);
2252 if (info) {
2253 iommu = info->iommu;
2254 domain = info->domain;
2255 }
2256 spin_unlock_irqrestore(&device_domain_lock, flags);
2257
2258 /* DMA alias already has a domain, uses it */
2259 if (info)
2260 goto found_domain;
2261 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002262
David Woodhouse146922e2014-03-09 15:44:17 -07002263 /* Allocate and initialize new domain for the device */
Jiang Liuab8dfe22014-07-11 14:19:27 +08002264 domain = alloc_domain(0);
Jiang Liu745f2582014-02-19 14:07:26 +08002265 if (!domain)
Alex Williamson579305f2014-07-03 09:51:43 -06002266 return NULL;
Jiang Liu44bde612014-07-11 14:19:29 +08002267 domain->id = iommu_attach_domain(domain, iommu);
2268 if (domain->id < 0) {
Alex Williamson2fe9723d2011-03-04 14:52:30 -07002269 free_domain_mem(domain);
Alex Williamson579305f2014-07-03 09:51:43 -06002270 return NULL;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002271 }
Jiang Liufb170fb2014-07-11 14:19:28 +08002272 domain_attach_iommu(domain, iommu);
Alex Williamson579305f2014-07-03 09:51:43 -06002273 if (domain_init(domain, gaw)) {
2274 domain_exit(domain);
2275 return NULL;
2276 }
2277
2278 /* register PCI DMA alias device */
2279 if (dev_is_pci(dev)) {
2280 tmp = dmar_insert_dev_info(iommu, PCI_BUS_NUM(dma_alias),
2281 dma_alias & 0xff, NULL, domain);
2282
2283 if (!tmp || tmp != domain) {
2284 domain_exit(domain);
2285 domain = tmp;
2286 }
2287
David Woodhouseb718cd32014-03-09 13:11:33 -07002288 if (!domain)
Alex Williamson579305f2014-07-03 09:51:43 -06002289 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002290 }
2291
2292found_domain:
Alex Williamson579305f2014-07-03 09:51:43 -06002293 tmp = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
2294
2295 if (!tmp || tmp != domain) {
2296 domain_exit(domain);
2297 domain = tmp;
2298 }
David Woodhouseb718cd32014-03-09 13:11:33 -07002299
2300 return domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002301}
2302
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002303static int iommu_identity_mapping;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002304#define IDENTMAP_ALL 1
2305#define IDENTMAP_GFX 2
2306#define IDENTMAP_AZALIA 4
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002307
David Woodhouseb2132032009-06-26 18:50:28 +01002308static int iommu_domain_identity_map(struct dmar_domain *domain,
2309 unsigned long long start,
2310 unsigned long long end)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002311{
David Woodhousec5395d52009-06-28 16:35:56 +01002312 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2313 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002314
David Woodhousec5395d52009-06-28 16:35:56 +01002315 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2316 dma_to_mm_pfn(last_vpfn))) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002317 printk(KERN_ERR "IOMMU: reserve iova failed\n");
David Woodhouseb2132032009-06-26 18:50:28 +01002318 return -ENOMEM;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002319 }
2320
David Woodhousec5395d52009-06-28 16:35:56 +01002321 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
2322 start, end, domain->id);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002323 /*
2324 * RMRR range might have overlap with physical memory range,
2325 * clear it first
2326 */
David Woodhousec5395d52009-06-28 16:35:56 +01002327 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002328
David Woodhousec5395d52009-06-28 16:35:56 +01002329 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2330 last_vpfn - first_vpfn + 1,
David Woodhouse61df7442009-06-28 11:55:58 +01002331 DMA_PTE_READ|DMA_PTE_WRITE);
David Woodhouseb2132032009-06-26 18:50:28 +01002332}
2333
David Woodhouse0b9d9752014-03-09 15:48:15 -07002334static int iommu_prepare_identity_map(struct device *dev,
David Woodhouseb2132032009-06-26 18:50:28 +01002335 unsigned long long start,
2336 unsigned long long end)
2337{
2338 struct dmar_domain *domain;
2339 int ret;
2340
David Woodhouse0b9d9752014-03-09 15:48:15 -07002341 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
David Woodhouseb2132032009-06-26 18:50:28 +01002342 if (!domain)
2343 return -ENOMEM;
2344
David Woodhouse19943b02009-08-04 16:19:20 +01002345 /* For _hardware_ passthrough, don't bother. But for software
2346 passthrough, we do it anyway -- it may indicate a memory
2347 range which is reserved in E820, so which didn't get set
2348 up to start with in si_domain */
2349 if (domain == si_domain && hw_pass_through) {
2350 printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
David Woodhouse0b9d9752014-03-09 15:48:15 -07002351 dev_name(dev), start, end);
David Woodhouse19943b02009-08-04 16:19:20 +01002352 return 0;
2353 }
2354
2355 printk(KERN_INFO
2356 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
David Woodhouse0b9d9752014-03-09 15:48:15 -07002357 dev_name(dev), start, end);
David Woodhouse2ff729f2009-08-26 14:25:41 +01002358
David Woodhouse5595b522009-12-02 09:21:55 +00002359 if (end < start) {
2360 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2361 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2362 dmi_get_system_info(DMI_BIOS_VENDOR),
2363 dmi_get_system_info(DMI_BIOS_VERSION),
2364 dmi_get_system_info(DMI_PRODUCT_VERSION));
2365 ret = -EIO;
2366 goto error;
2367 }
2368
David Woodhouse2ff729f2009-08-26 14:25:41 +01002369 if (end >> agaw_to_width(domain->agaw)) {
2370 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2371 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2372 agaw_to_width(domain->agaw),
2373 dmi_get_system_info(DMI_BIOS_VENDOR),
2374 dmi_get_system_info(DMI_BIOS_VERSION),
2375 dmi_get_system_info(DMI_PRODUCT_VERSION));
2376 ret = -EIO;
2377 goto error;
2378 }
David Woodhouse19943b02009-08-04 16:19:20 +01002379
David Woodhouseb2132032009-06-26 18:50:28 +01002380 ret = iommu_domain_identity_map(domain, start, end);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002381 if (ret)
2382 goto error;
2383
2384 /* context entry init */
David Woodhouse0b9d9752014-03-09 15:48:15 -07002385 ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
David Woodhouseb2132032009-06-26 18:50:28 +01002386 if (ret)
2387 goto error;
2388
2389 return 0;
2390
2391 error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002392 domain_exit(domain);
2393 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002394}
2395
2396static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
David Woodhouse0b9d9752014-03-09 15:48:15 -07002397 struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002398{
David Woodhouse0b9d9752014-03-09 15:48:15 -07002399 if (dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002400 return 0;
David Woodhouse0b9d9752014-03-09 15:48:15 -07002401 return iommu_prepare_identity_map(dev, rmrr->base_address,
2402 rmrr->end_address);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002403}
2404
Suresh Siddhad3f13812011-08-23 17:05:25 -07002405#ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002406static inline void iommu_prepare_isa(void)
2407{
2408 struct pci_dev *pdev;
2409 int ret;
2410
2411 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2412 if (!pdev)
2413 return;
2414
David Woodhousec7ab48d2009-06-26 19:10:36 +01002415 printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
David Woodhouse0b9d9752014-03-09 15:48:15 -07002416 ret = iommu_prepare_identity_map(&pdev->dev, 0, 16*1024*1024 - 1);
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002417
2418 if (ret)
David Woodhousec7ab48d2009-06-26 19:10:36 +01002419 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
2420 "floppy might not work\n");
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002421
Yijing Wang9b27e822014-05-20 20:37:52 +08002422 pci_dev_put(pdev);
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002423}
2424#else
2425static inline void iommu_prepare_isa(void)
2426{
2427 return;
2428}
Suresh Siddhad3f13812011-08-23 17:05:25 -07002429#endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002430
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002431static int md_domain_init(struct dmar_domain *domain, int guest_width);
David Woodhousec7ab48d2009-06-26 19:10:36 +01002432
Matt Kraai071e1372009-08-23 22:30:22 -07002433static int __init si_domain_init(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002434{
2435 struct dmar_drhd_unit *drhd;
2436 struct intel_iommu *iommu;
David Woodhousec7ab48d2009-06-26 19:10:36 +01002437 int nid, ret = 0;
Jiang Liu44bde612014-07-11 14:19:29 +08002438 bool first = true;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002439
Jiang Liuab8dfe22014-07-11 14:19:27 +08002440 si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002441 if (!si_domain)
2442 return -EFAULT;
2443
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002444 for_each_active_iommu(iommu, drhd) {
2445 ret = iommu_attach_domain(si_domain, iommu);
Jiang Liufb170fb2014-07-11 14:19:28 +08002446 if (ret < 0) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002447 domain_exit(si_domain);
2448 return -EFAULT;
Jiang Liu44bde612014-07-11 14:19:29 +08002449 } else if (first) {
2450 si_domain->id = ret;
2451 first = false;
2452 } else if (si_domain->id != ret) {
2453 domain_exit(si_domain);
2454 return -EFAULT;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002455 }
Jiang Liufb170fb2014-07-11 14:19:28 +08002456 domain_attach_iommu(si_domain, iommu);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002457 }
2458
2459 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2460 domain_exit(si_domain);
2461 return -EFAULT;
2462 }
2463
Jiang Liu9544c002014-01-06 14:18:13 +08002464 pr_debug("IOMMU: identity mapping domain is domain %d\n",
2465 si_domain->id);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002466
David Woodhouse19943b02009-08-04 16:19:20 +01002467 if (hw)
2468 return 0;
2469
David Woodhousec7ab48d2009-06-26 19:10:36 +01002470 for_each_online_node(nid) {
Tejun Heod4bbf7e2011-11-28 09:46:22 -08002471 unsigned long start_pfn, end_pfn;
2472 int i;
2473
2474 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2475 ret = iommu_domain_identity_map(si_domain,
2476 PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2477 if (ret)
2478 return ret;
2479 }
David Woodhousec7ab48d2009-06-26 19:10:36 +01002480 }
2481
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002482 return 0;
2483}
2484
David Woodhouse9b226622014-03-09 14:03:28 -07002485static int identity_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002486{
2487 struct device_domain_info *info;
2488
2489 if (likely(!iommu_identity_mapping))
2490 return 0;
2491
David Woodhouse9b226622014-03-09 14:03:28 -07002492 info = dev->archdata.iommu;
Mike Traviscb452a42011-05-28 13:15:03 -05002493 if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2494 return (info->domain == si_domain);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002495
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002496 return 0;
2497}
2498
2499static int domain_add_dev_info(struct dmar_domain *domain,
David Woodhouse5913c9b2014-03-09 16:27:31 -07002500 struct device *dev, int translation)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002501{
David Woodhouse0ac72662014-03-09 13:19:22 -07002502 struct dmar_domain *ndomain;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002503 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07002504 u8 bus, devfn;
David Woodhouse5fe60f42009-08-09 10:53:41 +01002505 int ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002506
David Woodhouse5913c9b2014-03-09 16:27:31 -07002507 iommu = device_to_iommu(dev, &bus, &devfn);
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002508 if (!iommu)
2509 return -ENODEV;
2510
David Woodhouse5913c9b2014-03-09 16:27:31 -07002511 ndomain = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
David Woodhouse0ac72662014-03-09 13:19:22 -07002512 if (ndomain != domain)
2513 return -EBUSY;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002514
David Woodhouse5913c9b2014-03-09 16:27:31 -07002515 ret = domain_context_mapping(domain, dev, translation);
David Woodhousee2ad23d2012-05-25 17:42:54 +01002516 if (ret) {
David Woodhouse5913c9b2014-03-09 16:27:31 -07002517 domain_remove_one_dev_info(domain, dev);
David Woodhousee2ad23d2012-05-25 17:42:54 +01002518 return ret;
2519 }
2520
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002521 return 0;
2522}
2523
David Woodhouse0b9d9752014-03-09 15:48:15 -07002524static bool device_has_rmrr(struct device *dev)
Tom Mingarelliea2447f72012-11-20 19:43:17 +00002525{
2526 struct dmar_rmrr_unit *rmrr;
David Woodhouse832bd852014-03-07 15:08:36 +00002527 struct device *tmp;
Tom Mingarelliea2447f72012-11-20 19:43:17 +00002528 int i;
2529
Jiang Liu0e2426122014-02-19 14:07:34 +08002530 rcu_read_lock();
Tom Mingarelliea2447f72012-11-20 19:43:17 +00002531 for_each_rmrr_units(rmrr) {
Jiang Liub683b232014-02-19 14:07:32 +08002532 /*
2533 * Return TRUE if this RMRR contains the device that
2534 * is passed in.
2535 */
2536 for_each_active_dev_scope(rmrr->devices,
2537 rmrr->devices_cnt, i, tmp)
David Woodhouse0b9d9752014-03-09 15:48:15 -07002538 if (tmp == dev) {
Jiang Liu0e2426122014-02-19 14:07:34 +08002539 rcu_read_unlock();
Tom Mingarelliea2447f72012-11-20 19:43:17 +00002540 return true;
Jiang Liub683b232014-02-19 14:07:32 +08002541 }
Tom Mingarelliea2447f72012-11-20 19:43:17 +00002542 }
Jiang Liu0e2426122014-02-19 14:07:34 +08002543 rcu_read_unlock();
Tom Mingarelliea2447f72012-11-20 19:43:17 +00002544 return false;
2545}
2546
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002547/*
2548 * There are a couple cases where we need to restrict the functionality of
2549 * devices associated with RMRRs. The first is when evaluating a device for
2550 * identity mapping because problems exist when devices are moved in and out
2551 * of domains and their respective RMRR information is lost. This means that
2552 * a device with associated RMRRs will never be in a "passthrough" domain.
2553 * The second is use of the device through the IOMMU API. This interface
2554 * expects to have full control of the IOVA space for the device. We cannot
2555 * satisfy both the requirement that RMRR access is maintained and have an
2556 * unencumbered IOVA space. We also have no ability to quiesce the device's
2557 * use of the RMRR space or even inform the IOMMU API user of the restriction.
2558 * We therefore prevent devices associated with an RMRR from participating in
2559 * the IOMMU API, which eliminates them from device assignment.
2560 *
2561 * In both cases we assume that PCI USB devices with RMRRs have them largely
2562 * for historical reasons and that the RMRR space is not actively used post
2563 * boot. This exclusion may change if vendors begin to abuse it.
2564 */
2565static bool device_is_rmrr_locked(struct device *dev)
2566{
2567 if (!device_has_rmrr(dev))
2568 return false;
2569
2570 if (dev_is_pci(dev)) {
2571 struct pci_dev *pdev = to_pci_dev(dev);
2572
2573 if ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
2574 return false;
2575 }
2576
2577 return true;
2578}
2579
David Woodhouse3bdb2592014-03-09 16:03:08 -07002580static int iommu_should_identity_map(struct device *dev, int startup)
David Woodhouse6941af22009-07-04 18:24:27 +01002581{
Tom Mingarelliea2447f72012-11-20 19:43:17 +00002582
David Woodhouse3bdb2592014-03-09 16:03:08 -07002583 if (dev_is_pci(dev)) {
2584 struct pci_dev *pdev = to_pci_dev(dev);
Tom Mingarelliea2447f72012-11-20 19:43:17 +00002585
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002586 if (device_is_rmrr_locked(dev))
David Woodhouse3bdb2592014-03-09 16:03:08 -07002587 return 0;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002588
David Woodhouse3bdb2592014-03-09 16:03:08 -07002589 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2590 return 1;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002591
David Woodhouse3bdb2592014-03-09 16:03:08 -07002592 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2593 return 1;
2594
2595 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2596 return 0;
2597
2598 /*
2599 * We want to start off with all devices in the 1:1 domain, and
2600 * take them out later if we find they can't access all of memory.
2601 *
2602 * However, we can't do this for PCI devices behind bridges,
2603 * because all PCI devices behind the same bridge will end up
2604 * with the same source-id on their transactions.
2605 *
2606 * Practically speaking, we can't change things around for these
2607 * devices at run-time, because we can't be sure there'll be no
2608 * DMA transactions in flight for any of their siblings.
2609 *
2610 * So PCI devices (unless they're on the root bus) as well as
2611 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2612 * the 1:1 domain, just in _case_ one of their siblings turns out
2613 * not to be able to map all of memory.
2614 */
2615 if (!pci_is_pcie(pdev)) {
2616 if (!pci_is_root_bus(pdev->bus))
2617 return 0;
2618 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2619 return 0;
2620 } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
2621 return 0;
2622 } else {
2623 if (device_has_rmrr(dev))
2624 return 0;
2625 }
David Woodhouse6941af22009-07-04 18:24:27 +01002626
David Woodhouse3dfc8132009-07-04 19:11:08 +01002627 /*
David Woodhouse3dfc8132009-07-04 19:11:08 +01002628 * At boot time, we don't yet know if devices will be 64-bit capable.
David Woodhouse3bdb2592014-03-09 16:03:08 -07002629 * Assume that they will — if they turn out not to be, then we can
David Woodhouse3dfc8132009-07-04 19:11:08 +01002630 * take them out of the 1:1 domain later.
2631 */
Chris Wright8fcc5372011-05-28 13:15:02 -05002632 if (!startup) {
2633 /*
2634 * If the device's dma_mask is less than the system's memory
2635 * size then this is not a candidate for identity mapping.
2636 */
David Woodhouse3bdb2592014-03-09 16:03:08 -07002637 u64 dma_mask = *dev->dma_mask;
Chris Wright8fcc5372011-05-28 13:15:02 -05002638
David Woodhouse3bdb2592014-03-09 16:03:08 -07002639 if (dev->coherent_dma_mask &&
2640 dev->coherent_dma_mask < dma_mask)
2641 dma_mask = dev->coherent_dma_mask;
Chris Wright8fcc5372011-05-28 13:15:02 -05002642
David Woodhouse3bdb2592014-03-09 16:03:08 -07002643 return dma_mask >= dma_get_required_mask(dev);
Chris Wright8fcc5372011-05-28 13:15:02 -05002644 }
David Woodhouse6941af22009-07-04 18:24:27 +01002645
2646 return 1;
2647}
2648
David Woodhousecf04eee2014-03-21 16:49:04 +00002649static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw)
2650{
2651 int ret;
2652
2653 if (!iommu_should_identity_map(dev, 1))
2654 return 0;
2655
2656 ret = domain_add_dev_info(si_domain, dev,
2657 hw ? CONTEXT_TT_PASS_THROUGH :
2658 CONTEXT_TT_MULTI_LEVEL);
2659 if (!ret)
2660 pr_info("IOMMU: %s identity mapping for device %s\n",
2661 hw ? "hardware" : "software", dev_name(dev));
2662 else if (ret == -ENODEV)
2663 /* device not associated with an iommu */
2664 ret = 0;
2665
2666 return ret;
2667}
2668
2669
Matt Kraai071e1372009-08-23 22:30:22 -07002670static int __init iommu_prepare_static_identity_mapping(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002671{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002672 struct pci_dev *pdev = NULL;
David Woodhousecf04eee2014-03-21 16:49:04 +00002673 struct dmar_drhd_unit *drhd;
2674 struct intel_iommu *iommu;
2675 struct device *dev;
2676 int i;
2677 int ret = 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002678
David Woodhouse19943b02009-08-04 16:19:20 +01002679 ret = si_domain_init(hw);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002680 if (ret)
2681 return -EFAULT;
2682
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002683 for_each_pci_dev(pdev) {
David Woodhousecf04eee2014-03-21 16:49:04 +00002684 ret = dev_prepare_static_identity_mapping(&pdev->dev, hw);
2685 if (ret)
2686 return ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002687 }
2688
David Woodhousecf04eee2014-03-21 16:49:04 +00002689 for_each_active_iommu(iommu, drhd)
2690 for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) {
2691 struct acpi_device_physical_node *pn;
2692 struct acpi_device *adev;
2693
2694 if (dev->bus != &acpi_bus_type)
2695 continue;
2696
2697 adev= to_acpi_device(dev);
2698 mutex_lock(&adev->physical_node_lock);
2699 list_for_each_entry(pn, &adev->physical_node_list, node) {
2700 ret = dev_prepare_static_identity_mapping(pn->dev, hw);
2701 if (ret)
2702 break;
2703 }
2704 mutex_unlock(&adev->physical_node_lock);
2705 if (ret)
2706 return ret;
2707 }
2708
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002709 return 0;
2710}
2711
Jiang Liuffebeb42014-11-09 22:48:02 +08002712static void intel_iommu_init_qi(struct intel_iommu *iommu)
2713{
2714 /*
2715 * Start from the sane iommu hardware state.
2716 * If the queued invalidation is already initialized by us
2717 * (for example, while enabling interrupt-remapping) then
2718 * we got the things already rolling from a sane state.
2719 */
2720 if (!iommu->qi) {
2721 /*
2722 * Clear any previous faults.
2723 */
2724 dmar_fault(-1, iommu);
2725 /*
2726 * Disable queued invalidation if supported and already enabled
2727 * before OS handover.
2728 */
2729 dmar_disable_qi(iommu);
2730 }
2731
2732 if (dmar_enable_qi(iommu)) {
2733 /*
2734 * Queued Invalidate not enabled, use Register Based Invalidate
2735 */
2736 iommu->flush.flush_context = __iommu_flush_context;
2737 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
2738 pr_info("IOMMU: %s using Register based invalidation\n",
2739 iommu->name);
2740 } else {
2741 iommu->flush.flush_context = qi_flush_context;
2742 iommu->flush.flush_iotlb = qi_flush_iotlb;
2743 pr_info("IOMMU: %s using Queued invalidation\n", iommu->name);
2744 }
2745}
2746
Joseph Cihulab7792602011-05-03 00:08:37 -07002747static int __init init_dmars(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002748{
2749 struct dmar_drhd_unit *drhd;
2750 struct dmar_rmrr_unit *rmrr;
David Woodhouse832bd852014-03-07 15:08:36 +00002751 struct device *dev;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002752 struct intel_iommu *iommu;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07002753 int i, ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002754
2755 /*
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002756 * for each drhd
2757 * allocate root
2758 * initialize and program root entry to not present
2759 * endfor
2760 */
2761 for_each_drhd_unit(drhd) {
mark gross5e0d2a62008-03-04 15:22:08 -08002762 /*
2763 * lock not needed as this is only incremented in the single
2764 * threaded kernel __init code path all other access are read
2765 * only
2766 */
Jiang Liu78d8e702014-11-09 22:47:57 +08002767 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) {
Mike Travis1b198bb2012-03-05 15:05:16 -08002768 g_num_of_iommus++;
2769 continue;
2770 }
2771 printk_once(KERN_ERR "intel-iommu: exceeded %d IOMMUs\n",
Jiang Liu78d8e702014-11-09 22:47:57 +08002772 DMAR_UNITS_SUPPORTED);
mark gross5e0d2a62008-03-04 15:22:08 -08002773 }
2774
Jiang Liuffebeb42014-11-09 22:48:02 +08002775 /* Preallocate enough resources for IOMMU hot-addition */
2776 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED)
2777 g_num_of_iommus = DMAR_UNITS_SUPPORTED;
2778
Weidong Hand9630fe2008-12-08 11:06:32 +08002779 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2780 GFP_KERNEL);
2781 if (!g_iommus) {
2782 printk(KERN_ERR "Allocating global iommu array failed\n");
2783 ret = -ENOMEM;
2784 goto error;
2785 }
2786
mark gross80b20dd2008-04-18 13:53:58 -07002787 deferred_flush = kzalloc(g_num_of_iommus *
2788 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2789 if (!deferred_flush) {
mark gross5e0d2a62008-03-04 15:22:08 -08002790 ret = -ENOMEM;
Jiang Liu989d51f2014-02-19 14:07:21 +08002791 goto free_g_iommus;
mark gross5e0d2a62008-03-04 15:22:08 -08002792 }
2793
Jiang Liu7c919772014-01-06 14:18:18 +08002794 for_each_active_iommu(iommu, drhd) {
Weidong Hand9630fe2008-12-08 11:06:32 +08002795 g_iommus[iommu->seq_id] = iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002796
Suresh Siddhae61d98d2008-07-10 11:16:35 -07002797 ret = iommu_init_domains(iommu);
2798 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08002799 goto free_iommu;
Suresh Siddhae61d98d2008-07-10 11:16:35 -07002800
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002801 /*
2802 * TBD:
2803 * we could share the same root & context tables
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002804 * among all IOMMU's. Need to Split it later.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002805 */
2806 ret = iommu_alloc_root_entry(iommu);
Jiang Liuffebeb42014-11-09 22:48:02 +08002807 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08002808 goto free_iommu;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002809 if (!ecap_pass_through(iommu->ecap))
David Woodhouse19943b02009-08-04 16:19:20 +01002810 hw_pass_through = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002811 }
2812
Jiang Liuffebeb42014-11-09 22:48:02 +08002813 for_each_active_iommu(iommu, drhd)
2814 intel_iommu_init_qi(iommu);
Youquan Songa77b67d2008-10-16 16:31:56 -07002815
David Woodhouse19943b02009-08-04 16:19:20 +01002816 if (iommu_pass_through)
David Woodhousee0fc7e02009-09-30 09:12:17 -07002817 iommu_identity_mapping |= IDENTMAP_ALL;
2818
Suresh Siddhad3f13812011-08-23 17:05:25 -07002819#ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
David Woodhousee0fc7e02009-09-30 09:12:17 -07002820 iommu_identity_mapping |= IDENTMAP_GFX;
David Woodhouse19943b02009-08-04 16:19:20 +01002821#endif
David Woodhousee0fc7e02009-09-30 09:12:17 -07002822
2823 check_tylersburg_isoch();
2824
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002825 /*
2826 * If pass through is not set or not enabled, setup context entries for
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002827 * identity mappings for rmrr, gfx, and isa and may fall back to static
2828 * identity mapping if iommu_identity_mapping is set.
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002829 */
David Woodhouse19943b02009-08-04 16:19:20 +01002830 if (iommu_identity_mapping) {
2831 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
2832 if (ret) {
2833 printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
Jiang Liu989d51f2014-02-19 14:07:21 +08002834 goto free_iommu;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002835 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002836 }
David Woodhouse19943b02009-08-04 16:19:20 +01002837 /*
2838 * For each rmrr
2839 * for each dev attached to rmrr
2840 * do
2841 * locate drhd for dev, alloc domain for dev
2842 * allocate free domain
2843 * allocate page table entries for rmrr
2844 * if context not allocated for bus
2845 * allocate and init context
2846 * set present in root table for this bus
2847 * init context with domain, translation etc
2848 * endfor
2849 * endfor
2850 */
2851 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2852 for_each_rmrr_units(rmrr) {
Jiang Liub683b232014-02-19 14:07:32 +08002853 /* some BIOS lists non-exist devices in DMAR table. */
2854 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
David Woodhouse832bd852014-03-07 15:08:36 +00002855 i, dev) {
David Woodhouse0b9d9752014-03-09 15:48:15 -07002856 ret = iommu_prepare_rmrr_dev(rmrr, dev);
David Woodhouse19943b02009-08-04 16:19:20 +01002857 if (ret)
2858 printk(KERN_ERR
2859 "IOMMU: mapping reserved region failed\n");
2860 }
2861 }
2862
2863 iommu_prepare_isa();
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002864
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002865 /*
2866 * for each drhd
2867 * enable fault log
2868 * global invalidate context cache
2869 * global invalidate iotlb
2870 * enable translation
2871 */
Jiang Liu7c919772014-01-06 14:18:18 +08002872 for_each_iommu(iommu, drhd) {
Joseph Cihula51a63e62011-03-21 11:04:24 -07002873 if (drhd->ignored) {
2874 /*
2875 * we always have to disable PMRs or DMA may fail on
2876 * this device
2877 */
2878 if (force_on)
Jiang Liu7c919772014-01-06 14:18:18 +08002879 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002880 continue;
Joseph Cihula51a63e62011-03-21 11:04:24 -07002881 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002882
2883 iommu_flush_write_buffer(iommu);
2884
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07002885 ret = dmar_set_interrupt(iommu);
2886 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08002887 goto free_iommu;
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07002888
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002889 iommu_set_root_entry(iommu);
2890
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002891 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002892 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
Jiang Liu2a41cce2014-07-11 14:19:33 +08002893 iommu_enable_translation(iommu);
David Woodhouseb94996c2009-09-19 15:28:12 -07002894 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002895 }
2896
2897 return 0;
Jiang Liu989d51f2014-02-19 14:07:21 +08002898
2899free_iommu:
Jiang Liuffebeb42014-11-09 22:48:02 +08002900 for_each_active_iommu(iommu, drhd) {
2901 disable_dmar_iommu(iommu);
Jiang Liua868e6b2014-01-06 14:18:20 +08002902 free_dmar_iommu(iommu);
Jiang Liuffebeb42014-11-09 22:48:02 +08002903 }
Jiang Liu9bdc5312014-01-06 14:18:27 +08002904 kfree(deferred_flush);
Jiang Liu989d51f2014-02-19 14:07:21 +08002905free_g_iommus:
Weidong Hand9630fe2008-12-08 11:06:32 +08002906 kfree(g_iommus);
Jiang Liu989d51f2014-02-19 14:07:21 +08002907error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002908 return ret;
2909}
2910
David Woodhouse5a5e02a2009-07-04 09:35:44 +01002911/* This takes a number of _MM_ pages, not VTD pages */
David Woodhouse875764d2009-06-28 21:20:51 +01002912static struct iova *intel_alloc_iova(struct device *dev,
2913 struct dmar_domain *domain,
2914 unsigned long nrpages, uint64_t dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002915{
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002916 struct iova *iova = NULL;
2917
David Woodhouse875764d2009-06-28 21:20:51 +01002918 /* Restrict dma_mask to the width that the iommu can handle */
2919 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
2920
2921 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002922 /*
2923 * First try to allocate an io virtual address in
Yang Hongyang284901a2009-04-06 19:01:15 -07002924 * DMA_BIT_MASK(32) and if that fails then try allocating
Joe Perches36098012007-12-17 11:40:11 -08002925 * from higher range
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002926 */
David Woodhouse875764d2009-06-28 21:20:51 +01002927 iova = alloc_iova(&domain->iovad, nrpages,
2928 IOVA_PFN(DMA_BIT_MASK(32)), 1);
2929 if (iova)
2930 return iova;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002931 }
David Woodhouse875764d2009-06-28 21:20:51 +01002932 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
2933 if (unlikely(!iova)) {
2934 printk(KERN_ERR "Allocating %ld-page iova for %s failed",
David Woodhouse207e3592014-03-09 16:12:32 -07002935 nrpages, dev_name(dev));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002936 return NULL;
2937 }
2938
2939 return iova;
2940}
2941
David Woodhoused4b709f2014-03-09 16:07:40 -07002942static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002943{
2944 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002945 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002946
David Woodhoused4b709f2014-03-09 16:07:40 -07002947 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002948 if (!domain) {
David Woodhoused4b709f2014-03-09 16:07:40 -07002949 printk(KERN_ERR "Allocating domain for %s failed",
2950 dev_name(dev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002951 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002952 }
2953
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002954 /* make sure context mapping is ok */
David Woodhoused4b709f2014-03-09 16:07:40 -07002955 if (unlikely(!domain_context_mapped(dev))) {
2956 ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002957 if (ret) {
David Woodhoused4b709f2014-03-09 16:07:40 -07002958 printk(KERN_ERR "Domain context map for %s failed",
2959 dev_name(dev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002960 return NULL;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002961 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002962 }
2963
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002964 return domain;
2965}
2966
David Woodhoused4b709f2014-03-09 16:07:40 -07002967static inline struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
David Woodhouse147202a2009-07-07 19:43:20 +01002968{
2969 struct device_domain_info *info;
2970
2971 /* No lock here, assumes no domain exit in normal case */
David Woodhoused4b709f2014-03-09 16:07:40 -07002972 info = dev->archdata.iommu;
David Woodhouse147202a2009-07-07 19:43:20 +01002973 if (likely(info))
2974 return info->domain;
2975
2976 return __get_valid_domain_for_dev(dev);
2977}
2978
David Woodhouse3d891942014-03-06 15:59:26 +00002979static int iommu_dummy(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002980{
David Woodhouse3d891942014-03-06 15:59:26 +00002981 return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002982}
2983
David Woodhouseecb509e2014-03-09 16:29:55 -07002984/* Check if the dev needs to go through non-identity map and unmap process.*/
David Woodhouse73676832009-07-04 14:08:36 +01002985static int iommu_no_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002986{
2987 int found;
2988
David Woodhouse3d891942014-03-06 15:59:26 +00002989 if (iommu_dummy(dev))
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002990 return 1;
2991
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002992 if (!iommu_identity_mapping)
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002993 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002994
David Woodhouse9b226622014-03-09 14:03:28 -07002995 found = identity_mapping(dev);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002996 if (found) {
David Woodhouseecb509e2014-03-09 16:29:55 -07002997 if (iommu_should_identity_map(dev, 0))
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002998 return 1;
2999 else {
3000 /*
3001 * 32 bit DMA is removed from si_domain and fall back
3002 * to non-identity mapping.
3003 */
David Woodhousebf9c9ed2014-03-09 16:19:13 -07003004 domain_remove_one_dev_info(si_domain, dev);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003005 printk(KERN_INFO "32bit %s uses non-identity mapping\n",
David Woodhouseecb509e2014-03-09 16:29:55 -07003006 dev_name(dev));
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003007 return 0;
3008 }
3009 } else {
3010 /*
3011 * In case of a detached 64 bit DMA device from vm, the device
3012 * is put into si_domain for identity mapping.
3013 */
David Woodhouseecb509e2014-03-09 16:29:55 -07003014 if (iommu_should_identity_map(dev, 0)) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003015 int ret;
David Woodhouse5913c9b2014-03-09 16:27:31 -07003016 ret = domain_add_dev_info(si_domain, dev,
David Woodhouse5fe60f42009-08-09 10:53:41 +01003017 hw_pass_through ?
3018 CONTEXT_TT_PASS_THROUGH :
3019 CONTEXT_TT_MULTI_LEVEL);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003020 if (!ret) {
3021 printk(KERN_INFO "64bit %s uses identity mapping\n",
David Woodhouseecb509e2014-03-09 16:29:55 -07003022 dev_name(dev));
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003023 return 1;
3024 }
3025 }
3026 }
3027
David Woodhouse1e4c64c2009-07-04 10:40:38 +01003028 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003029}
3030
David Woodhouse5040a912014-03-09 16:14:00 -07003031static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003032 size_t size, int dir, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003033{
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003034 struct dmar_domain *domain;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003035 phys_addr_t start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003036 struct iova *iova;
3037 int prot = 0;
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003038 int ret;
Weidong Han8c11e792008-12-08 15:29:22 +08003039 struct intel_iommu *iommu;
Fenghua Yu33041ec2009-08-04 15:10:59 -07003040 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003041
3042 BUG_ON(dir == DMA_NONE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003043
David Woodhouse5040a912014-03-09 16:14:00 -07003044 if (iommu_no_mapping(dev))
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003045 return paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003046
David Woodhouse5040a912014-03-09 16:14:00 -07003047 domain = get_valid_domain_for_dev(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003048 if (!domain)
3049 return 0;
3050
Weidong Han8c11e792008-12-08 15:29:22 +08003051 iommu = domain_get_iommu(domain);
David Woodhouse88cb6a72009-06-28 15:03:06 +01003052 size = aligned_nrpages(paddr, size);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003053
David Woodhouse5040a912014-03-09 16:14:00 -07003054 iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003055 if (!iova)
3056 goto error;
3057
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003058 /*
3059 * Check if DMAR supports zero-length reads on write only
3060 * mappings..
3061 */
3062 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003063 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003064 prot |= DMA_PTE_READ;
3065 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3066 prot |= DMA_PTE_WRITE;
3067 /*
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003068 * paddr - (paddr + size) might be partial page, we should map the whole
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003069 * page. Note: if two part of one page are separately mapped, we
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003070 * might have two guest_addr mapping to the same host paddr, but this
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003071 * is not a big problem
3072 */
David Woodhouse0ab36de2009-06-28 14:01:43 +01003073 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
Fenghua Yu33041ec2009-08-04 15:10:59 -07003074 mm_to_dma_pfn(paddr_pfn), size, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003075 if (ret)
3076 goto error;
3077
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003078 /* it's a non-present to present mapping. Only flush if caching mode */
3079 if (cap_caching_mode(iommu->cap))
David Woodhouseea8ea462014-03-05 17:09:32 +00003080 iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 0, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003081 else
Weidong Han8c11e792008-12-08 15:29:22 +08003082 iommu_flush_write_buffer(iommu);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003083
David Woodhouse03d6a242009-06-28 15:33:46 +01003084 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
3085 start_paddr += paddr & ~PAGE_MASK;
3086 return start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003087
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003088error:
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003089 if (iova)
3090 __free_iova(&domain->iovad, iova);
David Woodhouse4cf2e752009-02-11 17:23:43 +00003091 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
David Woodhouse5040a912014-03-09 16:14:00 -07003092 dev_name(dev), size, (unsigned long long)paddr, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003093 return 0;
3094}
3095
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003096static dma_addr_t intel_map_page(struct device *dev, struct page *page,
3097 unsigned long offset, size_t size,
3098 enum dma_data_direction dir,
3099 struct dma_attrs *attrs)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003100{
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003101 return __intel_map_single(dev, page_to_phys(page) + offset, size,
David Woodhouse46333e32014-03-10 20:01:21 -07003102 dir, *dev->dma_mask);
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003103}
3104
mark gross5e0d2a62008-03-04 15:22:08 -08003105static void flush_unmaps(void)
3106{
mark gross80b20dd2008-04-18 13:53:58 -07003107 int i, j;
mark gross5e0d2a62008-03-04 15:22:08 -08003108
mark gross5e0d2a62008-03-04 15:22:08 -08003109 timer_on = 0;
3110
3111 /* just flush them all */
3112 for (i = 0; i < g_num_of_iommus; i++) {
Weidong Hana2bb8452008-12-08 11:24:12 +08003113 struct intel_iommu *iommu = g_iommus[i];
3114 if (!iommu)
3115 continue;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07003116
Yu Zhao9dd2fe82009-05-18 13:51:36 +08003117 if (!deferred_flush[i].next)
3118 continue;
3119
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003120 /* In caching mode, global flushes turn emulation expensive */
3121 if (!cap_caching_mode(iommu->cap))
3122 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
Yu Zhao93a23a72009-05-18 13:51:37 +08003123 DMA_TLB_GLOBAL_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08003124 for (j = 0; j < deferred_flush[i].next; j++) {
Yu Zhao93a23a72009-05-18 13:51:37 +08003125 unsigned long mask;
3126 struct iova *iova = deferred_flush[i].iova[j];
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003127 struct dmar_domain *domain = deferred_flush[i].domain[j];
Yu Zhao93a23a72009-05-18 13:51:37 +08003128
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003129 /* On real hardware multiple invalidations are expensive */
3130 if (cap_caching_mode(iommu->cap))
3131 iommu_flush_iotlb_psi(iommu, domain->id,
Jiang Liua156ef92014-07-11 14:19:36 +08003132 iova->pfn_lo, iova_size(iova),
David Woodhouseea8ea462014-03-05 17:09:32 +00003133 !deferred_flush[i].freelist[j], 0);
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003134 else {
Jiang Liua156ef92014-07-11 14:19:36 +08003135 mask = ilog2(mm_to_dma_pfn(iova_size(iova)));
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003136 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
3137 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
3138 }
Yu Zhao93a23a72009-05-18 13:51:37 +08003139 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
David Woodhouseea8ea462014-03-05 17:09:32 +00003140 if (deferred_flush[i].freelist[j])
3141 dma_free_pagelist(deferred_flush[i].freelist[j]);
mark gross80b20dd2008-04-18 13:53:58 -07003142 }
Yu Zhao9dd2fe82009-05-18 13:51:36 +08003143 deferred_flush[i].next = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08003144 }
3145
mark gross5e0d2a62008-03-04 15:22:08 -08003146 list_size = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08003147}
3148
3149static void flush_unmaps_timeout(unsigned long data)
3150{
mark gross80b20dd2008-04-18 13:53:58 -07003151 unsigned long flags;
3152
3153 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08003154 flush_unmaps();
mark gross80b20dd2008-04-18 13:53:58 -07003155 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08003156}
3157
David Woodhouseea8ea462014-03-05 17:09:32 +00003158static void add_unmap(struct dmar_domain *dom, struct iova *iova, struct page *freelist)
mark gross5e0d2a62008-03-04 15:22:08 -08003159{
3160 unsigned long flags;
mark gross80b20dd2008-04-18 13:53:58 -07003161 int next, iommu_id;
Weidong Han8c11e792008-12-08 15:29:22 +08003162 struct intel_iommu *iommu;
mark gross5e0d2a62008-03-04 15:22:08 -08003163
3164 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross80b20dd2008-04-18 13:53:58 -07003165 if (list_size == HIGH_WATER_MARK)
3166 flush_unmaps();
3167
Weidong Han8c11e792008-12-08 15:29:22 +08003168 iommu = domain_get_iommu(dom);
3169 iommu_id = iommu->seq_id;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07003170
mark gross80b20dd2008-04-18 13:53:58 -07003171 next = deferred_flush[iommu_id].next;
3172 deferred_flush[iommu_id].domain[next] = dom;
3173 deferred_flush[iommu_id].iova[next] = iova;
David Woodhouseea8ea462014-03-05 17:09:32 +00003174 deferred_flush[iommu_id].freelist[next] = freelist;
mark gross80b20dd2008-04-18 13:53:58 -07003175 deferred_flush[iommu_id].next++;
mark gross5e0d2a62008-03-04 15:22:08 -08003176
3177 if (!timer_on) {
3178 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
3179 timer_on = 1;
3180 }
3181 list_size++;
3182 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
3183}
3184
Jiang Liud41a4ad2014-07-11 14:19:34 +08003185static void intel_unmap(struct device *dev, dma_addr_t dev_addr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003186{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003187 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01003188 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003189 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08003190 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00003191 struct page *freelist;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003192
David Woodhouse73676832009-07-04 14:08:36 +01003193 if (iommu_no_mapping(dev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003194 return;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003195
David Woodhouse1525a292014-03-06 16:19:30 +00003196 domain = find_domain(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003197 BUG_ON(!domain);
3198
Weidong Han8c11e792008-12-08 15:29:22 +08003199 iommu = domain_get_iommu(domain);
3200
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003201 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
David Woodhouse85b98272009-07-01 19:27:53 +01003202 if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
3203 (unsigned long long)dev_addr))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003204 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003205
David Woodhoused794dc92009-06-28 00:27:49 +01003206 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
3207 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003208
David Woodhoused794dc92009-06-28 00:27:49 +01003209 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
David Woodhouse207e3592014-03-09 16:12:32 -07003210 dev_name(dev), start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003211
David Woodhouseea8ea462014-03-05 17:09:32 +00003212 freelist = domain_unmap(domain, start_pfn, last_pfn);
David Woodhoused794dc92009-06-28 00:27:49 +01003213
mark gross5e0d2a62008-03-04 15:22:08 -08003214 if (intel_iommu_strict) {
David Woodhouse03d6a242009-06-28 15:33:46 +01003215 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
David Woodhouseea8ea462014-03-05 17:09:32 +00003216 last_pfn - start_pfn + 1, !freelist, 0);
mark gross5e0d2a62008-03-04 15:22:08 -08003217 /* free iova */
3218 __free_iova(&domain->iovad, iova);
David Woodhouseea8ea462014-03-05 17:09:32 +00003219 dma_free_pagelist(freelist);
mark gross5e0d2a62008-03-04 15:22:08 -08003220 } else {
David Woodhouseea8ea462014-03-05 17:09:32 +00003221 add_unmap(domain, iova, freelist);
mark gross5e0d2a62008-03-04 15:22:08 -08003222 /*
3223 * queue up the release of the unmap to save the 1/6th of the
3224 * cpu used up by the iotlb flush operation...
3225 */
mark gross5e0d2a62008-03-04 15:22:08 -08003226 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003227}
3228
Jiang Liud41a4ad2014-07-11 14:19:34 +08003229static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
3230 size_t size, enum dma_data_direction dir,
3231 struct dma_attrs *attrs)
3232{
3233 intel_unmap(dev, dev_addr);
3234}
3235
David Woodhouse5040a912014-03-09 16:14:00 -07003236static void *intel_alloc_coherent(struct device *dev, size_t size,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003237 dma_addr_t *dma_handle, gfp_t flags,
3238 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003239{
Akinobu Mita36746432014-06-04 16:06:51 -07003240 struct page *page = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003241 int order;
3242
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003243 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003244 order = get_order(size);
Alex Williamsone8bb9102009-11-04 15:59:34 -07003245
David Woodhouse5040a912014-03-09 16:14:00 -07003246 if (!iommu_no_mapping(dev))
Alex Williamsone8bb9102009-11-04 15:59:34 -07003247 flags &= ~(GFP_DMA | GFP_DMA32);
David Woodhouse5040a912014-03-09 16:14:00 -07003248 else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
3249 if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
Alex Williamsone8bb9102009-11-04 15:59:34 -07003250 flags |= GFP_DMA;
3251 else
3252 flags |= GFP_DMA32;
3253 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003254
Akinobu Mita36746432014-06-04 16:06:51 -07003255 if (flags & __GFP_WAIT) {
3256 unsigned int count = size >> PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003257
Akinobu Mita36746432014-06-04 16:06:51 -07003258 page = dma_alloc_from_contiguous(dev, count, order);
3259 if (page && iommu_no_mapping(dev) &&
3260 page_to_phys(page) + size > dev->coherent_dma_mask) {
3261 dma_release_from_contiguous(dev, page, count);
3262 page = NULL;
3263 }
3264 }
3265
3266 if (!page)
3267 page = alloc_pages(flags, order);
3268 if (!page)
3269 return NULL;
3270 memset(page_address(page), 0, size);
3271
3272 *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003273 DMA_BIDIRECTIONAL,
David Woodhouse5040a912014-03-09 16:14:00 -07003274 dev->coherent_dma_mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003275 if (*dma_handle)
Akinobu Mita36746432014-06-04 16:06:51 -07003276 return page_address(page);
3277 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3278 __free_pages(page, order);
3279
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003280 return NULL;
3281}
3282
David Woodhouse5040a912014-03-09 16:14:00 -07003283static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003284 dma_addr_t dma_handle, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003285{
3286 int order;
Akinobu Mita36746432014-06-04 16:06:51 -07003287 struct page *page = virt_to_page(vaddr);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003288
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003289 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003290 order = get_order(size);
3291
Jiang Liud41a4ad2014-07-11 14:19:34 +08003292 intel_unmap(dev, dma_handle);
Akinobu Mita36746432014-06-04 16:06:51 -07003293 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3294 __free_pages(page, order);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003295}
3296
David Woodhouse5040a912014-03-09 16:14:00 -07003297static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003298 int nelems, enum dma_data_direction dir,
3299 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003300{
Jiang Liud41a4ad2014-07-11 14:19:34 +08003301 intel_unmap(dev, sglist[0].dma_address);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003302}
3303
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003304static int intel_nontranslate_map_sg(struct device *hddev,
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003305 struct scatterlist *sglist, int nelems, int dir)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003306{
3307 int i;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003308 struct scatterlist *sg;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003309
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003310 for_each_sg(sglist, sg, nelems, i) {
FUJITA Tomonori12d4d402007-10-23 09:32:25 +02003311 BUG_ON(!sg_page(sg));
David Woodhouse4cf2e752009-02-11 17:23:43 +00003312 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003313 sg->dma_length = sg->length;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003314 }
3315 return nelems;
3316}
3317
David Woodhouse5040a912014-03-09 16:14:00 -07003318static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003319 enum dma_data_direction dir, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003320{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003321 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003322 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003323 size_t size = 0;
3324 int prot = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003325 struct iova *iova = NULL;
3326 int ret;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003327 struct scatterlist *sg;
David Woodhouseb536d242009-06-28 14:49:31 +01003328 unsigned long start_vpfn;
Weidong Han8c11e792008-12-08 15:29:22 +08003329 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003330
3331 BUG_ON(dir == DMA_NONE);
David Woodhouse5040a912014-03-09 16:14:00 -07003332 if (iommu_no_mapping(dev))
3333 return intel_nontranslate_map_sg(dev, sglist, nelems, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003334
David Woodhouse5040a912014-03-09 16:14:00 -07003335 domain = get_valid_domain_for_dev(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003336 if (!domain)
3337 return 0;
3338
Weidong Han8c11e792008-12-08 15:29:22 +08003339 iommu = domain_get_iommu(domain);
3340
David Woodhouseb536d242009-06-28 14:49:31 +01003341 for_each_sg(sglist, sg, nelems, i)
David Woodhouse88cb6a72009-06-28 15:03:06 +01003342 size += aligned_nrpages(sg->offset, sg->length);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003343
David Woodhouse5040a912014-03-09 16:14:00 -07003344 iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
3345 *dev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003346 if (!iova) {
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003347 sglist->dma_length = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003348 return 0;
3349 }
3350
3351 /*
3352 * Check if DMAR supports zero-length reads on write only
3353 * mappings..
3354 */
3355 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003356 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003357 prot |= DMA_PTE_READ;
3358 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3359 prot |= DMA_PTE_WRITE;
3360
David Woodhouseb536d242009-06-28 14:49:31 +01003361 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
David Woodhousee1605492009-06-29 11:17:38 +01003362
Fenghua Yuf5329592009-08-04 15:09:37 -07003363 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
David Woodhousee1605492009-06-29 11:17:38 +01003364 if (unlikely(ret)) {
David Woodhousee1605492009-06-29 11:17:38 +01003365 dma_pte_free_pagetable(domain, start_vpfn,
3366 start_vpfn + size - 1);
David Woodhousee1605492009-06-29 11:17:38 +01003367 __free_iova(&domain->iovad, iova);
3368 return 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003369 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003370
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003371 /* it's a non-present to present mapping. Only flush if caching mode */
3372 if (cap_caching_mode(iommu->cap))
David Woodhouseea8ea462014-03-05 17:09:32 +00003373 iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 0, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003374 else
Weidong Han8c11e792008-12-08 15:29:22 +08003375 iommu_flush_write_buffer(iommu);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003376
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003377 return nelems;
3378}
3379
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003380static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3381{
3382 return !dma_addr;
3383}
3384
FUJITA Tomonori160c1d82009-01-05 23:59:02 +09003385struct dma_map_ops intel_dma_ops = {
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003386 .alloc = intel_alloc_coherent,
3387 .free = intel_free_coherent,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003388 .map_sg = intel_map_sg,
3389 .unmap_sg = intel_unmap_sg,
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003390 .map_page = intel_map_page,
3391 .unmap_page = intel_unmap_page,
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003392 .mapping_error = intel_mapping_error,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003393};
3394
3395static inline int iommu_domain_cache_init(void)
3396{
3397 int ret = 0;
3398
3399 iommu_domain_cache = kmem_cache_create("iommu_domain",
3400 sizeof(struct dmar_domain),
3401 0,
3402 SLAB_HWCACHE_ALIGN,
3403
3404 NULL);
3405 if (!iommu_domain_cache) {
3406 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
3407 ret = -ENOMEM;
3408 }
3409
3410 return ret;
3411}
3412
3413static inline int iommu_devinfo_cache_init(void)
3414{
3415 int ret = 0;
3416
3417 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3418 sizeof(struct device_domain_info),
3419 0,
3420 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003421 NULL);
3422 if (!iommu_devinfo_cache) {
3423 printk(KERN_ERR "Couldn't create devinfo cache\n");
3424 ret = -ENOMEM;
3425 }
3426
3427 return ret;
3428}
3429
3430static inline int iommu_iova_cache_init(void)
3431{
3432 int ret = 0;
3433
3434 iommu_iova_cache = kmem_cache_create("iommu_iova",
3435 sizeof(struct iova),
3436 0,
3437 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003438 NULL);
3439 if (!iommu_iova_cache) {
3440 printk(KERN_ERR "Couldn't create iova cache\n");
3441 ret = -ENOMEM;
3442 }
3443
3444 return ret;
3445}
3446
3447static int __init iommu_init_mempool(void)
3448{
3449 int ret;
3450 ret = iommu_iova_cache_init();
3451 if (ret)
3452 return ret;
3453
3454 ret = iommu_domain_cache_init();
3455 if (ret)
3456 goto domain_error;
3457
3458 ret = iommu_devinfo_cache_init();
3459 if (!ret)
3460 return ret;
3461
3462 kmem_cache_destroy(iommu_domain_cache);
3463domain_error:
3464 kmem_cache_destroy(iommu_iova_cache);
3465
3466 return -ENOMEM;
3467}
3468
3469static void __init iommu_exit_mempool(void)
3470{
3471 kmem_cache_destroy(iommu_devinfo_cache);
3472 kmem_cache_destroy(iommu_domain_cache);
3473 kmem_cache_destroy(iommu_iova_cache);
3474
3475}
3476
Dan Williams556ab452010-07-23 15:47:56 -07003477static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3478{
3479 struct dmar_drhd_unit *drhd;
3480 u32 vtbar;
3481 int rc;
3482
3483 /* We know that this device on this chipset has its own IOMMU.
3484 * If we find it under a different IOMMU, then the BIOS is lying
3485 * to us. Hope that the IOMMU for this device is actually
3486 * disabled, and it needs no translation...
3487 */
3488 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3489 if (rc) {
3490 /* "can't" happen */
3491 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3492 return;
3493 }
3494 vtbar &= 0xffff0000;
3495
3496 /* we know that the this iommu should be at offset 0xa000 from vtbar */
3497 drhd = dmar_find_matched_drhd_unit(pdev);
3498 if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3499 TAINT_FIRMWARE_WORKAROUND,
3500 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3501 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3502}
3503DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3504
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003505static void __init init_no_remapping_devices(void)
3506{
3507 struct dmar_drhd_unit *drhd;
David Woodhouse832bd852014-03-07 15:08:36 +00003508 struct device *dev;
Jiang Liub683b232014-02-19 14:07:32 +08003509 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003510
3511 for_each_drhd_unit(drhd) {
3512 if (!drhd->include_all) {
Jiang Liub683b232014-02-19 14:07:32 +08003513 for_each_active_dev_scope(drhd->devices,
3514 drhd->devices_cnt, i, dev)
3515 break;
David Woodhouse832bd852014-03-07 15:08:36 +00003516 /* ignore DMAR unit if no devices exist */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003517 if (i == drhd->devices_cnt)
3518 drhd->ignored = 1;
3519 }
3520 }
3521
Jiang Liu7c919772014-01-06 14:18:18 +08003522 for_each_active_drhd_unit(drhd) {
Jiang Liu7c919772014-01-06 14:18:18 +08003523 if (drhd->include_all)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003524 continue;
3525
Jiang Liub683b232014-02-19 14:07:32 +08003526 for_each_active_dev_scope(drhd->devices,
3527 drhd->devices_cnt, i, dev)
David Woodhouse832bd852014-03-07 15:08:36 +00003528 if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev)))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003529 break;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003530 if (i < drhd->devices_cnt)
3531 continue;
3532
David Woodhousec0771df2011-10-14 20:59:46 +01003533 /* This IOMMU has *only* gfx devices. Either bypass it or
3534 set the gfx_mapped flag, as appropriate */
3535 if (dmar_map_gfx) {
3536 intel_iommu_gfx_mapped = 1;
3537 } else {
3538 drhd->ignored = 1;
Jiang Liub683b232014-02-19 14:07:32 +08003539 for_each_active_dev_scope(drhd->devices,
3540 drhd->devices_cnt, i, dev)
David Woodhouse832bd852014-03-07 15:08:36 +00003541 dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003542 }
3543 }
3544}
3545
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003546#ifdef CONFIG_SUSPEND
3547static int init_iommu_hw(void)
3548{
3549 struct dmar_drhd_unit *drhd;
3550 struct intel_iommu *iommu = NULL;
3551
3552 for_each_active_iommu(iommu, drhd)
3553 if (iommu->qi)
3554 dmar_reenable_qi(iommu);
3555
Joseph Cihulab7792602011-05-03 00:08:37 -07003556 for_each_iommu(iommu, drhd) {
3557 if (drhd->ignored) {
3558 /*
3559 * we always have to disable PMRs or DMA may fail on
3560 * this device
3561 */
3562 if (force_on)
3563 iommu_disable_protect_mem_regions(iommu);
3564 continue;
3565 }
3566
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003567 iommu_flush_write_buffer(iommu);
3568
3569 iommu_set_root_entry(iommu);
3570
3571 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003572 DMA_CCMD_GLOBAL_INVL);
Jiang Liu2a41cce2014-07-11 14:19:33 +08003573 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3574 iommu_enable_translation(iommu);
David Woodhouseb94996c2009-09-19 15:28:12 -07003575 iommu_disable_protect_mem_regions(iommu);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003576 }
3577
3578 return 0;
3579}
3580
3581static void iommu_flush_all(void)
3582{
3583 struct dmar_drhd_unit *drhd;
3584 struct intel_iommu *iommu;
3585
3586 for_each_active_iommu(iommu, drhd) {
3587 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003588 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003589 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003590 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003591 }
3592}
3593
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003594static int iommu_suspend(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003595{
3596 struct dmar_drhd_unit *drhd;
3597 struct intel_iommu *iommu = NULL;
3598 unsigned long flag;
3599
3600 for_each_active_iommu(iommu, drhd) {
3601 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3602 GFP_ATOMIC);
3603 if (!iommu->iommu_state)
3604 goto nomem;
3605 }
3606
3607 iommu_flush_all();
3608
3609 for_each_active_iommu(iommu, drhd) {
3610 iommu_disable_translation(iommu);
3611
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003612 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003613
3614 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3615 readl(iommu->reg + DMAR_FECTL_REG);
3616 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3617 readl(iommu->reg + DMAR_FEDATA_REG);
3618 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3619 readl(iommu->reg + DMAR_FEADDR_REG);
3620 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3621 readl(iommu->reg + DMAR_FEUADDR_REG);
3622
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003623 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003624 }
3625 return 0;
3626
3627nomem:
3628 for_each_active_iommu(iommu, drhd)
3629 kfree(iommu->iommu_state);
3630
3631 return -ENOMEM;
3632}
3633
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003634static void iommu_resume(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003635{
3636 struct dmar_drhd_unit *drhd;
3637 struct intel_iommu *iommu = NULL;
3638 unsigned long flag;
3639
3640 if (init_iommu_hw()) {
Joseph Cihulab7792602011-05-03 00:08:37 -07003641 if (force_on)
3642 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3643 else
3644 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003645 return;
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003646 }
3647
3648 for_each_active_iommu(iommu, drhd) {
3649
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003650 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003651
3652 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3653 iommu->reg + DMAR_FECTL_REG);
3654 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3655 iommu->reg + DMAR_FEDATA_REG);
3656 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3657 iommu->reg + DMAR_FEADDR_REG);
3658 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3659 iommu->reg + DMAR_FEUADDR_REG);
3660
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003661 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003662 }
3663
3664 for_each_active_iommu(iommu, drhd)
3665 kfree(iommu->iommu_state);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003666}
3667
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003668static struct syscore_ops iommu_syscore_ops = {
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003669 .resume = iommu_resume,
3670 .suspend = iommu_suspend,
3671};
3672
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003673static void __init init_iommu_pm_ops(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003674{
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003675 register_syscore_ops(&iommu_syscore_ops);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003676}
3677
3678#else
Rafael J. Wysocki99592ba2011-06-07 21:32:31 +02003679static inline void init_iommu_pm_ops(void) {}
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003680#endif /* CONFIG_PM */
3681
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003682
Jiang Liuc2a0b532014-11-09 22:47:56 +08003683int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003684{
3685 struct acpi_dmar_reserved_memory *rmrr;
3686 struct dmar_rmrr_unit *rmrru;
3687
3688 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
3689 if (!rmrru)
3690 return -ENOMEM;
3691
3692 rmrru->hdr = header;
3693 rmrr = (struct acpi_dmar_reserved_memory *)header;
3694 rmrru->base_address = rmrr->base_address;
3695 rmrru->end_address = rmrr->end_address;
Jiang Liu2e455282014-02-19 14:07:36 +08003696 rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
3697 ((void *)rmrr) + rmrr->header.length,
3698 &rmrru->devices_cnt);
3699 if (rmrru->devices_cnt && rmrru->devices == NULL) {
3700 kfree(rmrru);
3701 return -ENOMEM;
3702 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003703
Jiang Liu2e455282014-02-19 14:07:36 +08003704 list_add(&rmrru->list, &dmar_rmrr_units);
3705
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003706 return 0;
3707}
3708
Jiang Liu6b197242014-11-09 22:47:58 +08003709static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
3710{
3711 struct dmar_atsr_unit *atsru;
3712 struct acpi_dmar_atsr *tmp;
3713
3714 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
3715 tmp = (struct acpi_dmar_atsr *)atsru->hdr;
3716 if (atsr->segment != tmp->segment)
3717 continue;
3718 if (atsr->header.length != tmp->header.length)
3719 continue;
3720 if (memcmp(atsr, tmp, atsr->header.length) == 0)
3721 return atsru;
3722 }
3723
3724 return NULL;
3725}
3726
3727int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003728{
3729 struct acpi_dmar_atsr *atsr;
3730 struct dmar_atsr_unit *atsru;
3731
Jiang Liu6b197242014-11-09 22:47:58 +08003732 if (system_state != SYSTEM_BOOTING && !intel_iommu_enabled)
3733 return 0;
3734
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003735 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
Jiang Liu6b197242014-11-09 22:47:58 +08003736 atsru = dmar_find_atsr(atsr);
3737 if (atsru)
3738 return 0;
3739
3740 atsru = kzalloc(sizeof(*atsru) + hdr->length, GFP_KERNEL);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003741 if (!atsru)
3742 return -ENOMEM;
3743
Jiang Liu6b197242014-11-09 22:47:58 +08003744 /*
3745 * If memory is allocated from slab by ACPI _DSM method, we need to
3746 * copy the memory content because the memory buffer will be freed
3747 * on return.
3748 */
3749 atsru->hdr = (void *)(atsru + 1);
3750 memcpy(atsru->hdr, hdr, hdr->length);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003751 atsru->include_all = atsr->flags & 0x1;
Jiang Liu2e455282014-02-19 14:07:36 +08003752 if (!atsru->include_all) {
3753 atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
3754 (void *)atsr + atsr->header.length,
3755 &atsru->devices_cnt);
3756 if (atsru->devices_cnt && atsru->devices == NULL) {
3757 kfree(atsru);
3758 return -ENOMEM;
3759 }
3760 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003761
Jiang Liu0e2426122014-02-19 14:07:34 +08003762 list_add_rcu(&atsru->list, &dmar_atsr_units);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003763
3764 return 0;
3765}
3766
Jiang Liu9bdc5312014-01-06 14:18:27 +08003767static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
3768{
3769 dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
3770 kfree(atsru);
3771}
3772
Jiang Liu6b197242014-11-09 22:47:58 +08003773int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg)
3774{
3775 struct acpi_dmar_atsr *atsr;
3776 struct dmar_atsr_unit *atsru;
3777
3778 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3779 atsru = dmar_find_atsr(atsr);
3780 if (atsru) {
3781 list_del_rcu(&atsru->list);
3782 synchronize_rcu();
3783 intel_iommu_free_atsr(atsru);
3784 }
3785
3786 return 0;
3787}
3788
3789int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
3790{
3791 int i;
3792 struct device *dev;
3793 struct acpi_dmar_atsr *atsr;
3794 struct dmar_atsr_unit *atsru;
3795
3796 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3797 atsru = dmar_find_atsr(atsr);
3798 if (!atsru)
3799 return 0;
3800
3801 if (!atsru->include_all && atsru->devices && atsru->devices_cnt)
3802 for_each_active_dev_scope(atsru->devices, atsru->devices_cnt,
3803 i, dev)
3804 return -EBUSY;
3805
3806 return 0;
3807}
3808
Jiang Liuffebeb42014-11-09 22:48:02 +08003809static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
3810{
3811 int sp, ret = 0;
3812 struct intel_iommu *iommu = dmaru->iommu;
3813
3814 if (g_iommus[iommu->seq_id])
3815 return 0;
3816
3817 if (hw_pass_through && !ecap_pass_through(iommu->ecap)) {
3818 pr_warn("IOMMU: %s doesn't support hardware pass through.\n",
3819 iommu->name);
3820 return -ENXIO;
3821 }
3822 if (!ecap_sc_support(iommu->ecap) &&
3823 domain_update_iommu_snooping(iommu)) {
3824 pr_warn("IOMMU: %s doesn't support snooping.\n",
3825 iommu->name);
3826 return -ENXIO;
3827 }
3828 sp = domain_update_iommu_superpage(iommu) - 1;
3829 if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
3830 pr_warn("IOMMU: %s doesn't support large page.\n",
3831 iommu->name);
3832 return -ENXIO;
3833 }
3834
3835 /*
3836 * Disable translation if already enabled prior to OS handover.
3837 */
3838 if (iommu->gcmd & DMA_GCMD_TE)
3839 iommu_disable_translation(iommu);
3840
3841 g_iommus[iommu->seq_id] = iommu;
3842 ret = iommu_init_domains(iommu);
3843 if (ret == 0)
3844 ret = iommu_alloc_root_entry(iommu);
3845 if (ret)
3846 goto out;
3847
3848 if (dmaru->ignored) {
3849 /*
3850 * we always have to disable PMRs or DMA may fail on this device
3851 */
3852 if (force_on)
3853 iommu_disable_protect_mem_regions(iommu);
3854 return 0;
3855 }
3856
3857 intel_iommu_init_qi(iommu);
3858 iommu_flush_write_buffer(iommu);
3859 ret = dmar_set_interrupt(iommu);
3860 if (ret)
3861 goto disable_iommu;
3862
3863 iommu_set_root_entry(iommu);
3864 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
3865 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3866 iommu_enable_translation(iommu);
3867
3868 if (si_domain) {
3869 ret = iommu_attach_domain(si_domain, iommu);
3870 if (ret < 0 || si_domain->id != ret)
3871 goto disable_iommu;
3872 domain_attach_iommu(si_domain, iommu);
3873 }
3874
3875 iommu_disable_protect_mem_regions(iommu);
3876 return 0;
3877
3878disable_iommu:
3879 disable_dmar_iommu(iommu);
3880out:
3881 free_dmar_iommu(iommu);
3882 return ret;
3883}
3884
Jiang Liu6b197242014-11-09 22:47:58 +08003885int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
3886{
Jiang Liuffebeb42014-11-09 22:48:02 +08003887 int ret = 0;
3888 struct intel_iommu *iommu = dmaru->iommu;
3889
3890 if (!intel_iommu_enabled)
3891 return 0;
3892 if (iommu == NULL)
3893 return -EINVAL;
3894
3895 if (insert) {
3896 ret = intel_iommu_add(dmaru);
3897 } else {
3898 disable_dmar_iommu(iommu);
3899 free_dmar_iommu(iommu);
3900 }
3901
3902 return ret;
Jiang Liu6b197242014-11-09 22:47:58 +08003903}
3904
Jiang Liu9bdc5312014-01-06 14:18:27 +08003905static void intel_iommu_free_dmars(void)
3906{
3907 struct dmar_rmrr_unit *rmrru, *rmrr_n;
3908 struct dmar_atsr_unit *atsru, *atsr_n;
3909
3910 list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
3911 list_del(&rmrru->list);
3912 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
3913 kfree(rmrru);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003914 }
3915
Jiang Liu9bdc5312014-01-06 14:18:27 +08003916 list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
3917 list_del(&atsru->list);
3918 intel_iommu_free_atsr(atsru);
3919 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003920}
3921
3922int dmar_find_matched_atsr_unit(struct pci_dev *dev)
3923{
Jiang Liub683b232014-02-19 14:07:32 +08003924 int i, ret = 1;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003925 struct pci_bus *bus;
David Woodhouse832bd852014-03-07 15:08:36 +00003926 struct pci_dev *bridge = NULL;
3927 struct device *tmp;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003928 struct acpi_dmar_atsr *atsr;
3929 struct dmar_atsr_unit *atsru;
3930
3931 dev = pci_physfn(dev);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003932 for (bus = dev->bus; bus; bus = bus->parent) {
Jiang Liub5f82dd2014-02-19 14:07:31 +08003933 bridge = bus->self;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003934 if (!bridge || !pci_is_pcie(bridge) ||
Yijing Wang62f87c02012-07-24 17:20:03 +08003935 pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003936 return 0;
Jiang Liub5f82dd2014-02-19 14:07:31 +08003937 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003938 break;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003939 }
Jiang Liub5f82dd2014-02-19 14:07:31 +08003940 if (!bridge)
3941 return 0;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003942
Jiang Liu0e2426122014-02-19 14:07:34 +08003943 rcu_read_lock();
Jiang Liub5f82dd2014-02-19 14:07:31 +08003944 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
3945 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3946 if (atsr->segment != pci_domain_nr(dev->bus))
3947 continue;
3948
Jiang Liub683b232014-02-19 14:07:32 +08003949 for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp)
David Woodhouse832bd852014-03-07 15:08:36 +00003950 if (tmp == &bridge->dev)
Jiang Liub683b232014-02-19 14:07:32 +08003951 goto out;
Jiang Liub5f82dd2014-02-19 14:07:31 +08003952
3953 if (atsru->include_all)
Jiang Liub683b232014-02-19 14:07:32 +08003954 goto out;
Jiang Liub5f82dd2014-02-19 14:07:31 +08003955 }
Jiang Liub683b232014-02-19 14:07:32 +08003956 ret = 0;
3957out:
Jiang Liu0e2426122014-02-19 14:07:34 +08003958 rcu_read_unlock();
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003959
Jiang Liub683b232014-02-19 14:07:32 +08003960 return ret;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003961}
3962
Jiang Liu59ce0512014-02-19 14:07:35 +08003963int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
3964{
3965 int ret = 0;
3966 struct dmar_rmrr_unit *rmrru;
3967 struct dmar_atsr_unit *atsru;
3968 struct acpi_dmar_atsr *atsr;
3969 struct acpi_dmar_reserved_memory *rmrr;
3970
3971 if (!intel_iommu_enabled && system_state != SYSTEM_BOOTING)
3972 return 0;
3973
3974 list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
3975 rmrr = container_of(rmrru->hdr,
3976 struct acpi_dmar_reserved_memory, header);
3977 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
3978 ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1),
3979 ((void *)rmrr) + rmrr->header.length,
3980 rmrr->segment, rmrru->devices,
3981 rmrru->devices_cnt);
Jiang Liu27e24952014-06-20 15:08:06 +08003982 if(ret < 0)
Jiang Liu59ce0512014-02-19 14:07:35 +08003983 return ret;
3984 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
Jiang Liu27e24952014-06-20 15:08:06 +08003985 dmar_remove_dev_scope(info, rmrr->segment,
3986 rmrru->devices, rmrru->devices_cnt);
Jiang Liu59ce0512014-02-19 14:07:35 +08003987 }
3988 }
3989
3990 list_for_each_entry(atsru, &dmar_atsr_units, list) {
3991 if (atsru->include_all)
3992 continue;
3993
3994 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3995 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
3996 ret = dmar_insert_dev_scope(info, (void *)(atsr + 1),
3997 (void *)atsr + atsr->header.length,
3998 atsr->segment, atsru->devices,
3999 atsru->devices_cnt);
4000 if (ret > 0)
4001 break;
4002 else if(ret < 0)
4003 return ret;
4004 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
4005 if (dmar_remove_dev_scope(info, atsr->segment,
4006 atsru->devices, atsru->devices_cnt))
4007 break;
4008 }
4009 }
4010
4011 return 0;
4012}
4013
Fenghua Yu99dcade2009-11-11 07:23:06 -08004014/*
4015 * Here we only respond to action of unbound device from driver.
4016 *
4017 * Added device is not attached to its DMAR domain here yet. That will happen
4018 * when mapping the device to iova.
4019 */
4020static int device_notifier(struct notifier_block *nb,
4021 unsigned long action, void *data)
4022{
4023 struct device *dev = data;
Fenghua Yu99dcade2009-11-11 07:23:06 -08004024 struct dmar_domain *domain;
4025
David Woodhouse3d891942014-03-06 15:59:26 +00004026 if (iommu_dummy(dev))
David Woodhouse44cd6132009-12-02 10:18:30 +00004027 return 0;
4028
Joerg Roedel1196c2f2014-09-30 13:02:03 +02004029 if (action != BUS_NOTIFY_REMOVED_DEVICE)
Jiang Liu7e7dfab2014-02-19 14:07:23 +08004030 return 0;
4031
Joerg Roedele7f9fa52014-08-05 12:55:45 +02004032 /*
4033 * If the device is still attached to a device driver we can't
4034 * tear down the domain yet as DMA mappings may still be in use.
4035 * Wait for the BUS_NOTIFY_UNBOUND_DRIVER event to do that.
4036 */
4037 if (action == BUS_NOTIFY_DEL_DEVICE && dev->driver != NULL)
4038 return 0;
4039
David Woodhouse1525a292014-03-06 16:19:30 +00004040 domain = find_domain(dev);
Fenghua Yu99dcade2009-11-11 07:23:06 -08004041 if (!domain)
4042 return 0;
4043
Jiang Liu3a5670e2014-02-19 14:07:33 +08004044 down_read(&dmar_global_lock);
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004045 domain_remove_one_dev_info(domain, dev);
Jiang Liuab8dfe22014-07-11 14:19:27 +08004046 if (!domain_type_is_vm_or_si(domain) && list_empty(&domain->devices))
Jiang Liu7e7dfab2014-02-19 14:07:23 +08004047 domain_exit(domain);
Jiang Liu3a5670e2014-02-19 14:07:33 +08004048 up_read(&dmar_global_lock);
Alex Williamsona97590e2011-03-04 14:52:16 -07004049
Fenghua Yu99dcade2009-11-11 07:23:06 -08004050 return 0;
4051}
4052
4053static struct notifier_block device_nb = {
4054 .notifier_call = device_notifier,
4055};
4056
Jiang Liu75f05562014-02-19 14:07:37 +08004057static int intel_iommu_memory_notifier(struct notifier_block *nb,
4058 unsigned long val, void *v)
4059{
4060 struct memory_notify *mhp = v;
4061 unsigned long long start, end;
4062 unsigned long start_vpfn, last_vpfn;
4063
4064 switch (val) {
4065 case MEM_GOING_ONLINE:
4066 start = mhp->start_pfn << PAGE_SHIFT;
4067 end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1;
4068 if (iommu_domain_identity_map(si_domain, start, end)) {
4069 pr_warn("dmar: failed to build identity map for [%llx-%llx]\n",
4070 start, end);
4071 return NOTIFY_BAD;
4072 }
4073 break;
4074
4075 case MEM_OFFLINE:
4076 case MEM_CANCEL_ONLINE:
4077 start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
4078 last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1);
4079 while (start_vpfn <= last_vpfn) {
4080 struct iova *iova;
4081 struct dmar_drhd_unit *drhd;
4082 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00004083 struct page *freelist;
Jiang Liu75f05562014-02-19 14:07:37 +08004084
4085 iova = find_iova(&si_domain->iovad, start_vpfn);
4086 if (iova == NULL) {
4087 pr_debug("dmar: failed get IOVA for PFN %lx\n",
4088 start_vpfn);
4089 break;
4090 }
4091
4092 iova = split_and_remove_iova(&si_domain->iovad, iova,
4093 start_vpfn, last_vpfn);
4094 if (iova == NULL) {
4095 pr_warn("dmar: failed to split IOVA PFN [%lx-%lx]\n",
4096 start_vpfn, last_vpfn);
4097 return NOTIFY_BAD;
4098 }
4099
David Woodhouseea8ea462014-03-05 17:09:32 +00004100 freelist = domain_unmap(si_domain, iova->pfn_lo,
4101 iova->pfn_hi);
4102
Jiang Liu75f05562014-02-19 14:07:37 +08004103 rcu_read_lock();
4104 for_each_active_iommu(iommu, drhd)
4105 iommu_flush_iotlb_psi(iommu, si_domain->id,
Jiang Liua156ef92014-07-11 14:19:36 +08004106 iova->pfn_lo, iova_size(iova),
David Woodhouseea8ea462014-03-05 17:09:32 +00004107 !freelist, 0);
Jiang Liu75f05562014-02-19 14:07:37 +08004108 rcu_read_unlock();
David Woodhouseea8ea462014-03-05 17:09:32 +00004109 dma_free_pagelist(freelist);
Jiang Liu75f05562014-02-19 14:07:37 +08004110
4111 start_vpfn = iova->pfn_hi + 1;
4112 free_iova_mem(iova);
4113 }
4114 break;
4115 }
4116
4117 return NOTIFY_OK;
4118}
4119
4120static struct notifier_block intel_iommu_memory_nb = {
4121 .notifier_call = intel_iommu_memory_notifier,
4122 .priority = 0
4123};
4124
Alex Williamsona5459cf2014-06-12 16:12:31 -06004125
4126static ssize_t intel_iommu_show_version(struct device *dev,
4127 struct device_attribute *attr,
4128 char *buf)
4129{
4130 struct intel_iommu *iommu = dev_get_drvdata(dev);
4131 u32 ver = readl(iommu->reg + DMAR_VER_REG);
4132 return sprintf(buf, "%d:%d\n",
4133 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver));
4134}
4135static DEVICE_ATTR(version, S_IRUGO, intel_iommu_show_version, NULL);
4136
4137static ssize_t intel_iommu_show_address(struct device *dev,
4138 struct device_attribute *attr,
4139 char *buf)
4140{
4141 struct intel_iommu *iommu = dev_get_drvdata(dev);
4142 return sprintf(buf, "%llx\n", iommu->reg_phys);
4143}
4144static DEVICE_ATTR(address, S_IRUGO, intel_iommu_show_address, NULL);
4145
4146static ssize_t intel_iommu_show_cap(struct device *dev,
4147 struct device_attribute *attr,
4148 char *buf)
4149{
4150 struct intel_iommu *iommu = dev_get_drvdata(dev);
4151 return sprintf(buf, "%llx\n", iommu->cap);
4152}
4153static DEVICE_ATTR(cap, S_IRUGO, intel_iommu_show_cap, NULL);
4154
4155static ssize_t intel_iommu_show_ecap(struct device *dev,
4156 struct device_attribute *attr,
4157 char *buf)
4158{
4159 struct intel_iommu *iommu = dev_get_drvdata(dev);
4160 return sprintf(buf, "%llx\n", iommu->ecap);
4161}
4162static DEVICE_ATTR(ecap, S_IRUGO, intel_iommu_show_ecap, NULL);
4163
4164static struct attribute *intel_iommu_attrs[] = {
4165 &dev_attr_version.attr,
4166 &dev_attr_address.attr,
4167 &dev_attr_cap.attr,
4168 &dev_attr_ecap.attr,
4169 NULL,
4170};
4171
4172static struct attribute_group intel_iommu_group = {
4173 .name = "intel-iommu",
4174 .attrs = intel_iommu_attrs,
4175};
4176
4177const struct attribute_group *intel_iommu_groups[] = {
4178 &intel_iommu_group,
4179 NULL,
4180};
4181
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004182int __init intel_iommu_init(void)
4183{
Jiang Liu9bdc5312014-01-06 14:18:27 +08004184 int ret = -ENODEV;
Takao Indoh3a93c842013-04-23 17:35:03 +09004185 struct dmar_drhd_unit *drhd;
Jiang Liu7c919772014-01-06 14:18:18 +08004186 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004187
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004188 /* VT-d is required for a TXT/tboot launch, so enforce that */
4189 force_on = tboot_force_iommu();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004190
Jiang Liu3a5670e2014-02-19 14:07:33 +08004191 if (iommu_init_mempool()) {
4192 if (force_on)
4193 panic("tboot: Failed to initialize iommu memory\n");
4194 return -ENOMEM;
4195 }
4196
4197 down_write(&dmar_global_lock);
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004198 if (dmar_table_init()) {
4199 if (force_on)
4200 panic("tboot: Failed to initialize DMAR table\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004201 goto out_free_dmar;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004202 }
4203
Takao Indoh3a93c842013-04-23 17:35:03 +09004204 /*
4205 * Disable translation if already enabled prior to OS handover.
4206 */
Jiang Liu7c919772014-01-06 14:18:18 +08004207 for_each_active_iommu(iommu, drhd)
Takao Indoh3a93c842013-04-23 17:35:03 +09004208 if (iommu->gcmd & DMA_GCMD_TE)
4209 iommu_disable_translation(iommu);
Takao Indoh3a93c842013-04-23 17:35:03 +09004210
Suresh Siddhac2c72862011-08-23 17:05:19 -07004211 if (dmar_dev_scope_init() < 0) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004212 if (force_on)
4213 panic("tboot: Failed to initialize DMAR device scope\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004214 goto out_free_dmar;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004215 }
Suresh Siddha1886e8a2008-07-10 11:16:37 -07004216
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09004217 if (no_iommu || dmar_disabled)
Jiang Liu9bdc5312014-01-06 14:18:27 +08004218 goto out_free_dmar;
Suresh Siddha2ae21012008-07-10 11:16:43 -07004219
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004220 if (list_empty(&dmar_rmrr_units))
4221 printk(KERN_INFO "DMAR: No RMRR found\n");
4222
4223 if (list_empty(&dmar_atsr_units))
4224 printk(KERN_INFO "DMAR: No ATSR found\n");
4225
Joseph Cihula51a63e62011-03-21 11:04:24 -07004226 if (dmar_init_reserved_ranges()) {
4227 if (force_on)
4228 panic("tboot: Failed to reserve iommu ranges\n");
Jiang Liu3a5670e2014-02-19 14:07:33 +08004229 goto out_free_reserved_range;
Joseph Cihula51a63e62011-03-21 11:04:24 -07004230 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004231
4232 init_no_remapping_devices();
4233
Joseph Cihulab7792602011-05-03 00:08:37 -07004234 ret = init_dmars();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004235 if (ret) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004236 if (force_on)
4237 panic("tboot: Failed to initialize DMARs\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004238 printk(KERN_ERR "IOMMU: dmar init failed\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004239 goto out_free_reserved_range;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004240 }
Jiang Liu3a5670e2014-02-19 14:07:33 +08004241 up_write(&dmar_global_lock);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004242 printk(KERN_INFO
4243 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
4244
mark gross5e0d2a62008-03-04 15:22:08 -08004245 init_timer(&unmap_timer);
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09004246#ifdef CONFIG_SWIOTLB
4247 swiotlb = 0;
4248#endif
David Woodhouse19943b02009-08-04 16:19:20 +01004249 dma_ops = &intel_dma_ops;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07004250
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004251 init_iommu_pm_ops();
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004252
Alex Williamsona5459cf2014-06-12 16:12:31 -06004253 for_each_active_iommu(iommu, drhd)
4254 iommu->iommu_dev = iommu_device_create(NULL, iommu,
4255 intel_iommu_groups,
4256 iommu->name);
4257
Joerg Roedel4236d97d2011-09-06 17:56:07 +02004258 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
Fenghua Yu99dcade2009-11-11 07:23:06 -08004259 bus_register_notifier(&pci_bus_type, &device_nb);
Jiang Liu75f05562014-02-19 14:07:37 +08004260 if (si_domain && !hw_pass_through)
4261 register_memory_notifier(&intel_iommu_memory_nb);
Fenghua Yu99dcade2009-11-11 07:23:06 -08004262
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -02004263 intel_iommu_enabled = 1;
4264
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004265 return 0;
Jiang Liu9bdc5312014-01-06 14:18:27 +08004266
4267out_free_reserved_range:
4268 put_iova_domain(&reserved_iova_list);
Jiang Liu9bdc5312014-01-06 14:18:27 +08004269out_free_dmar:
4270 intel_iommu_free_dmars();
Jiang Liu3a5670e2014-02-19 14:07:33 +08004271 up_write(&dmar_global_lock);
4272 iommu_exit_mempool();
Jiang Liu9bdc5312014-01-06 14:18:27 +08004273 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004274}
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07004275
Alex Williamson579305f2014-07-03 09:51:43 -06004276static int iommu_detach_dev_cb(struct pci_dev *pdev, u16 alias, void *opaque)
4277{
4278 struct intel_iommu *iommu = opaque;
4279
4280 iommu_detach_dev(iommu, PCI_BUS_NUM(alias), alias & 0xff);
4281 return 0;
4282}
4283
4284/*
4285 * NB - intel-iommu lacks any sort of reference counting for the users of
4286 * dependent devices. If multiple endpoints have intersecting dependent
4287 * devices, unbinding the driver from any one of them will possibly leave
4288 * the others unable to operate.
4289 */
Han, Weidong3199aa62009-02-26 17:31:12 +08004290static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
David Woodhouse0bcb3e22014-03-06 17:12:03 +00004291 struct device *dev)
Han, Weidong3199aa62009-02-26 17:31:12 +08004292{
David Woodhouse0bcb3e22014-03-06 17:12:03 +00004293 if (!iommu || !dev || !dev_is_pci(dev))
Han, Weidong3199aa62009-02-26 17:31:12 +08004294 return;
4295
Alex Williamson579305f2014-07-03 09:51:43 -06004296 pci_for_each_dma_alias(to_pci_dev(dev), &iommu_detach_dev_cb, iommu);
Han, Weidong3199aa62009-02-26 17:31:12 +08004297}
4298
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004299static void domain_remove_one_dev_info(struct dmar_domain *domain,
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004300 struct device *dev)
Weidong Hanc7151a82008-12-08 22:51:37 +08004301{
Yijing Wangbca2b912013-10-31 17:26:04 +08004302 struct device_domain_info *info, *tmp;
Weidong Hanc7151a82008-12-08 22:51:37 +08004303 struct intel_iommu *iommu;
4304 unsigned long flags;
4305 int found = 0;
David Woodhouse156baca2014-03-09 14:00:57 -07004306 u8 bus, devfn;
Weidong Hanc7151a82008-12-08 22:51:37 +08004307
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004308 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08004309 if (!iommu)
4310 return;
4311
4312 spin_lock_irqsave(&device_domain_lock, flags);
Yijing Wangbca2b912013-10-31 17:26:04 +08004313 list_for_each_entry_safe(info, tmp, &domain->devices, link) {
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004314 if (info->iommu == iommu && info->bus == bus &&
4315 info->devfn == devfn) {
David Woodhouse109b9b02012-05-25 17:43:02 +01004316 unlink_domain_info(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08004317 spin_unlock_irqrestore(&device_domain_lock, flags);
4318
Yu Zhao93a23a72009-05-18 13:51:37 +08004319 iommu_disable_dev_iotlb(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08004320 iommu_detach_dev(iommu, info->bus, info->devfn);
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004321 iommu_detach_dependent_devices(iommu, dev);
Weidong Hanc7151a82008-12-08 22:51:37 +08004322 free_devinfo_mem(info);
4323
4324 spin_lock_irqsave(&device_domain_lock, flags);
4325
4326 if (found)
4327 break;
4328 else
4329 continue;
4330 }
4331
4332 /* if there is no other devices under the same iommu
4333 * owned by this domain, clear this iommu in iommu_bmp
4334 * update iommu count and coherency
4335 */
David Woodhouse8bbc4412014-03-09 13:52:37 -07004336 if (info->iommu == iommu)
Weidong Hanc7151a82008-12-08 22:51:37 +08004337 found = 1;
4338 }
4339
Roland Dreier3e7abe22011-07-20 06:22:21 -07004340 spin_unlock_irqrestore(&device_domain_lock, flags);
4341
Weidong Hanc7151a82008-12-08 22:51:37 +08004342 if (found == 0) {
Jiang Liufb170fb2014-07-11 14:19:28 +08004343 domain_detach_iommu(domain, iommu);
4344 if (!domain_type_is_vm_or_si(domain))
4345 iommu_detach_domain(domain, iommu);
Weidong Hanc7151a82008-12-08 22:51:37 +08004346 }
Weidong Hanc7151a82008-12-08 22:51:37 +08004347}
4348
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004349static int md_domain_init(struct dmar_domain *domain, int guest_width)
Weidong Han5e98c4b2008-12-08 23:03:27 +08004350{
4351 int adjust_width;
4352
4353 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Weidong Han5e98c4b2008-12-08 23:03:27 +08004354 domain_reserve_special_ranges(domain);
4355
4356 /* calculate AGAW */
4357 domain->gaw = guest_width;
4358 adjust_width = guestwidth_to_adjustwidth(guest_width);
4359 domain->agaw = width_to_agaw(adjust_width);
4360
Weidong Han5e98c4b2008-12-08 23:03:27 +08004361 domain->iommu_coherency = 0;
Sheng Yangc5b15252009-08-06 13:31:56 +08004362 domain->iommu_snooping = 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01004363 domain->iommu_superpage = 0;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004364 domain->max_addr = 0;
Weidong Han5e98c4b2008-12-08 23:03:27 +08004365
4366 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07004367 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Weidong Han5e98c4b2008-12-08 23:03:27 +08004368 if (!domain->pgd)
4369 return -ENOMEM;
4370 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
4371 return 0;
4372}
4373
Joerg Roedel5d450802008-12-03 14:52:32 +01004374static int intel_iommu_domain_init(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03004375{
Joerg Roedel5d450802008-12-03 14:52:32 +01004376 struct dmar_domain *dmar_domain;
Kay, Allen M38717942008-09-09 18:37:29 +03004377
Jiang Liuab8dfe22014-07-11 14:19:27 +08004378 dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE);
Joerg Roedel5d450802008-12-03 14:52:32 +01004379 if (!dmar_domain) {
Kay, Allen M38717942008-09-09 18:37:29 +03004380 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01004381 "intel_iommu_domain_init: dmar_domain == NULL\n");
4382 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03004383 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004384 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
Kay, Allen M38717942008-09-09 18:37:29 +03004385 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01004386 "intel_iommu_domain_init() failed\n");
Jiang Liu92d03cc2014-02-19 14:07:28 +08004387 domain_exit(dmar_domain);
Joerg Roedel5d450802008-12-03 14:52:32 +01004388 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03004389 }
Allen Kay8140a952011-10-14 12:32:17 -07004390 domain_update_iommu_cap(dmar_domain);
Joerg Roedel5d450802008-12-03 14:52:32 +01004391 domain->priv = dmar_domain;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004392
Joerg Roedel8a0e7152012-01-26 19:40:54 +01004393 domain->geometry.aperture_start = 0;
4394 domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
4395 domain->geometry.force_aperture = true;
4396
Joerg Roedel5d450802008-12-03 14:52:32 +01004397 return 0;
Kay, Allen M38717942008-09-09 18:37:29 +03004398}
Kay, Allen M38717942008-09-09 18:37:29 +03004399
Joerg Roedel5d450802008-12-03 14:52:32 +01004400static void intel_iommu_domain_destroy(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03004401{
Joerg Roedel5d450802008-12-03 14:52:32 +01004402 struct dmar_domain *dmar_domain = domain->priv;
4403
4404 domain->priv = NULL;
Jiang Liu92d03cc2014-02-19 14:07:28 +08004405 domain_exit(dmar_domain);
Kay, Allen M38717942008-09-09 18:37:29 +03004406}
Kay, Allen M38717942008-09-09 18:37:29 +03004407
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004408static int intel_iommu_attach_device(struct iommu_domain *domain,
4409 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03004410{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004411 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004412 struct intel_iommu *iommu;
4413 int addr_width;
David Woodhouse156baca2014-03-09 14:00:57 -07004414 u8 bus, devfn;
Kay, Allen M38717942008-09-09 18:37:29 +03004415
Alex Williamsonc875d2c2014-07-03 09:57:02 -06004416 if (device_is_rmrr_locked(dev)) {
4417 dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n");
4418 return -EPERM;
4419 }
4420
David Woodhouse7207d8f2014-03-09 16:31:06 -07004421 /* normally dev is not mapped */
4422 if (unlikely(domain_context_mapped(dev))) {
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004423 struct dmar_domain *old_domain;
4424
David Woodhouse1525a292014-03-06 16:19:30 +00004425 old_domain = find_domain(dev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004426 if (old_domain) {
Jiang Liuab8dfe22014-07-11 14:19:27 +08004427 if (domain_type_is_vm_or_si(dmar_domain))
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004428 domain_remove_one_dev_info(old_domain, dev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004429 else
4430 domain_remove_dev_info(old_domain);
4431 }
4432 }
4433
David Woodhouse156baca2014-03-09 14:00:57 -07004434 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004435 if (!iommu)
4436 return -ENODEV;
4437
4438 /* check if this iommu agaw is sufficient for max mapped address */
4439 addr_width = agaw_to_width(iommu->agaw);
Tom Lyona99c47a2010-05-17 08:20:45 +01004440 if (addr_width > cap_mgaw(iommu->cap))
4441 addr_width = cap_mgaw(iommu->cap);
4442
4443 if (dmar_domain->max_addr > (1LL << addr_width)) {
4444 printk(KERN_ERR "%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004445 "sufficient for the mapped address (%llx)\n",
Tom Lyona99c47a2010-05-17 08:20:45 +01004446 __func__, addr_width, dmar_domain->max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004447 return -EFAULT;
4448 }
Tom Lyona99c47a2010-05-17 08:20:45 +01004449 dmar_domain->gaw = addr_width;
4450
4451 /*
4452 * Knock out extra levels of page tables if necessary
4453 */
4454 while (iommu->agaw < dmar_domain->agaw) {
4455 struct dma_pte *pte;
4456
4457 pte = dmar_domain->pgd;
4458 if (dma_pte_present(pte)) {
Sheng Yang25cbff12010-06-12 19:21:42 +08004459 dmar_domain->pgd = (struct dma_pte *)
4460 phys_to_virt(dma_pte_addr(pte));
Jan Kiszka7a661012010-11-02 08:05:51 +01004461 free_pgtable_page(pte);
Tom Lyona99c47a2010-05-17 08:20:45 +01004462 }
4463 dmar_domain->agaw--;
4464 }
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004465
David Woodhouse5913c9b2014-03-09 16:27:31 -07004466 return domain_add_dev_info(dmar_domain, dev, CONTEXT_TT_MULTI_LEVEL);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004467}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004468
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004469static void intel_iommu_detach_device(struct iommu_domain *domain,
4470 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03004471{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004472 struct dmar_domain *dmar_domain = domain->priv;
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004473
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004474 domain_remove_one_dev_info(dmar_domain, dev);
Kay, Allen M38717942008-09-09 18:37:29 +03004475}
Kay, Allen M38717942008-09-09 18:37:29 +03004476
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004477static int intel_iommu_map(struct iommu_domain *domain,
4478 unsigned long iova, phys_addr_t hpa,
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004479 size_t size, int iommu_prot)
Kay, Allen M38717942008-09-09 18:37:29 +03004480{
Joerg Roedeldde57a22008-12-03 15:04:09 +01004481 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004482 u64 max_addr;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004483 int prot = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004484 int ret;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004485
Joerg Roedeldde57a22008-12-03 15:04:09 +01004486 if (iommu_prot & IOMMU_READ)
4487 prot |= DMA_PTE_READ;
4488 if (iommu_prot & IOMMU_WRITE)
4489 prot |= DMA_PTE_WRITE;
Sheng Yang9cf06692009-03-18 15:33:07 +08004490 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
4491 prot |= DMA_PTE_SNP;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004492
David Woodhouse163cc522009-06-28 00:51:17 +01004493 max_addr = iova + size;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004494 if (dmar_domain->max_addr < max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004495 u64 end;
4496
4497 /* check if minimum agaw is sufficient for mapped address */
Tom Lyon8954da12010-05-17 08:19:52 +01004498 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004499 if (end < max_addr) {
Tom Lyon8954da12010-05-17 08:19:52 +01004500 printk(KERN_ERR "%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004501 "sufficient for the mapped address (%llx)\n",
Tom Lyon8954da12010-05-17 08:19:52 +01004502 __func__, dmar_domain->gaw, max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004503 return -EFAULT;
4504 }
Joerg Roedeldde57a22008-12-03 15:04:09 +01004505 dmar_domain->max_addr = max_addr;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004506 }
David Woodhousead051222009-06-28 14:22:28 +01004507 /* Round up size to next multiple of PAGE_SIZE, if it and
4508 the low bits of hpa would take us onto the next page */
David Woodhouse88cb6a72009-06-28 15:03:06 +01004509 size = aligned_nrpages(hpa, size);
David Woodhousead051222009-06-28 14:22:28 +01004510 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
4511 hpa >> VTD_PAGE_SHIFT, size, prot);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004512 return ret;
Kay, Allen M38717942008-09-09 18:37:29 +03004513}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004514
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004515static size_t intel_iommu_unmap(struct iommu_domain *domain,
David Woodhouseea8ea462014-03-05 17:09:32 +00004516 unsigned long iova, size_t size)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004517{
Joerg Roedeldde57a22008-12-03 15:04:09 +01004518 struct dmar_domain *dmar_domain = domain->priv;
David Woodhouseea8ea462014-03-05 17:09:32 +00004519 struct page *freelist = NULL;
4520 struct intel_iommu *iommu;
4521 unsigned long start_pfn, last_pfn;
4522 unsigned int npages;
4523 int iommu_id, num, ndomains, level = 0;
Sheng Yang4b99d352009-07-08 11:52:52 +01004524
David Woodhouse5cf0a762014-03-19 16:07:49 +00004525 /* Cope with horrid API which requires us to unmap more than the
4526 size argument if it happens to be a large-page mapping. */
4527 if (!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level))
4528 BUG();
4529
4530 if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
4531 size = VTD_PAGE_SIZE << level_to_offset_bits(level);
4532
David Woodhouseea8ea462014-03-05 17:09:32 +00004533 start_pfn = iova >> VTD_PAGE_SHIFT;
4534 last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
4535
4536 freelist = domain_unmap(dmar_domain, start_pfn, last_pfn);
4537
4538 npages = last_pfn - start_pfn + 1;
4539
4540 for_each_set_bit(iommu_id, dmar_domain->iommu_bmp, g_num_of_iommus) {
4541 iommu = g_iommus[iommu_id];
4542
4543 /*
4544 * find bit position of dmar_domain
4545 */
4546 ndomains = cap_ndoms(iommu->cap);
4547 for_each_set_bit(num, iommu->domain_ids, ndomains) {
4548 if (iommu->domains[num] == dmar_domain)
4549 iommu_flush_iotlb_psi(iommu, num, start_pfn,
4550 npages, !freelist, 0);
4551 }
4552
4553 }
4554
4555 dma_free_pagelist(freelist);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004556
David Woodhouse163cc522009-06-28 00:51:17 +01004557 if (dmar_domain->max_addr == iova + size)
4558 dmar_domain->max_addr = iova;
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004559
David Woodhouse5cf0a762014-03-19 16:07:49 +00004560 return size;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004561}
Kay, Allen M38717942008-09-09 18:37:29 +03004562
Joerg Roedeld14d6572008-12-03 15:06:57 +01004563static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
Varun Sethibb5547a2013-03-29 01:23:58 +05304564 dma_addr_t iova)
Kay, Allen M38717942008-09-09 18:37:29 +03004565{
Joerg Roedeld14d6572008-12-03 15:06:57 +01004566 struct dmar_domain *dmar_domain = domain->priv;
Kay, Allen M38717942008-09-09 18:37:29 +03004567 struct dma_pte *pte;
David Woodhouse5cf0a762014-03-19 16:07:49 +00004568 int level = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004569 u64 phys = 0;
Kay, Allen M38717942008-09-09 18:37:29 +03004570
David Woodhouse5cf0a762014-03-19 16:07:49 +00004571 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
Kay, Allen M38717942008-09-09 18:37:29 +03004572 if (pte)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004573 phys = dma_pte_addr(pte);
Kay, Allen M38717942008-09-09 18:37:29 +03004574
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004575 return phys;
Kay, Allen M38717942008-09-09 18:37:29 +03004576}
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004577
Joerg Roedel5d587b82014-09-05 10:50:45 +02004578static bool intel_iommu_capable(enum iommu_cap cap)
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004579{
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004580 if (cap == IOMMU_CAP_CACHE_COHERENCY)
Joerg Roedel5d587b82014-09-05 10:50:45 +02004581 return domain_update_iommu_snooping(NULL) == 1;
Tom Lyon323f99c2010-07-02 16:56:14 -04004582 if (cap == IOMMU_CAP_INTR_REMAP)
Joerg Roedel5d587b82014-09-05 10:50:45 +02004583 return irq_remapping_enabled == 1;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004584
Joerg Roedel5d587b82014-09-05 10:50:45 +02004585 return false;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004586}
4587
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004588static int intel_iommu_add_device(struct device *dev)
Alex Williamson70ae6f02011-10-21 15:56:11 -04004589{
Alex Williamsona5459cf2014-06-12 16:12:31 -06004590 struct intel_iommu *iommu;
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004591 struct iommu_group *group;
David Woodhouse156baca2014-03-09 14:00:57 -07004592 u8 bus, devfn;
Alex Williamson70ae6f02011-10-21 15:56:11 -04004593
Alex Williamsona5459cf2014-06-12 16:12:31 -06004594 iommu = device_to_iommu(dev, &bus, &devfn);
4595 if (!iommu)
Alex Williamson70ae6f02011-10-21 15:56:11 -04004596 return -ENODEV;
4597
Alex Williamsona5459cf2014-06-12 16:12:31 -06004598 iommu_device_link(iommu->iommu_dev, dev);
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004599
Alex Williamsone17f9ff2014-07-03 09:51:37 -06004600 group = iommu_group_get_for_dev(dev);
Alex Williamson783f1572012-05-30 14:19:43 -06004601
Alex Williamsone17f9ff2014-07-03 09:51:37 -06004602 if (IS_ERR(group))
4603 return PTR_ERR(group);
Alex Williamson70ae6f02011-10-21 15:56:11 -04004604
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004605 iommu_group_put(group);
Alex Williamsone17f9ff2014-07-03 09:51:37 -06004606 return 0;
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004607}
4608
4609static void intel_iommu_remove_device(struct device *dev)
4610{
Alex Williamsona5459cf2014-06-12 16:12:31 -06004611 struct intel_iommu *iommu;
4612 u8 bus, devfn;
4613
4614 iommu = device_to_iommu(dev, &bus, &devfn);
4615 if (!iommu)
4616 return;
4617
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004618 iommu_group_remove_device(dev);
Alex Williamsona5459cf2014-06-12 16:12:31 -06004619
4620 iommu_device_unlink(iommu->iommu_dev, dev);
Alex Williamson70ae6f02011-10-21 15:56:11 -04004621}
4622
Thierry Redingb22f6432014-06-27 09:03:12 +02004623static const struct iommu_ops intel_iommu_ops = {
Joerg Roedel5d587b82014-09-05 10:50:45 +02004624 .capable = intel_iommu_capable,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004625 .domain_init = intel_iommu_domain_init,
4626 .domain_destroy = intel_iommu_domain_destroy,
4627 .attach_dev = intel_iommu_attach_device,
4628 .detach_dev = intel_iommu_detach_device,
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004629 .map = intel_iommu_map,
4630 .unmap = intel_iommu_unmap,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004631 .iova_to_phys = intel_iommu_iova_to_phys,
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004632 .add_device = intel_iommu_add_device,
4633 .remove_device = intel_iommu_remove_device,
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +02004634 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004635};
David Woodhouse9af88142009-02-13 23:18:03 +00004636
Daniel Vetter94526182013-01-20 23:50:13 +01004637static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
4638{
4639 /* G4x/GM45 integrated gfx dmar support is totally busted. */
4640 printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
4641 dmar_map_gfx = 0;
4642}
4643
4644DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
4645DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
4646DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
4647DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
4648DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
4649DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
4650DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
4651
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08004652static void quirk_iommu_rwbf(struct pci_dev *dev)
David Woodhouse9af88142009-02-13 23:18:03 +00004653{
4654 /*
4655 * Mobile 4 Series Chipset neglects to set RWBF capability,
Daniel Vetter210561f2013-01-21 19:48:59 +01004656 * but needs it. Same seems to hold for the desktop versions.
David Woodhouse9af88142009-02-13 23:18:03 +00004657 */
4658 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
4659 rwbf_quirk = 1;
4660}
4661
4662DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
Daniel Vetter210561f2013-01-21 19:48:59 +01004663DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
4664DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
4665DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
4666DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
4667DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
4668DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
David Woodhousee0fc7e02009-09-30 09:12:17 -07004669
Adam Jacksoneecfd572010-08-25 21:17:34 +01004670#define GGC 0x52
4671#define GGC_MEMORY_SIZE_MASK (0xf << 8)
4672#define GGC_MEMORY_SIZE_NONE (0x0 << 8)
4673#define GGC_MEMORY_SIZE_1M (0x1 << 8)
4674#define GGC_MEMORY_SIZE_2M (0x3 << 8)
4675#define GGC_MEMORY_VT_ENABLED (0x8 << 8)
4676#define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
4677#define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
4678#define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
4679
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08004680static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
David Woodhouse9eecabc2010-09-21 22:28:23 +01004681{
4682 unsigned short ggc;
4683
Adam Jacksoneecfd572010-08-25 21:17:34 +01004684 if (pci_read_config_word(dev, GGC, &ggc))
David Woodhouse9eecabc2010-09-21 22:28:23 +01004685 return;
4686
Adam Jacksoneecfd572010-08-25 21:17:34 +01004687 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
David Woodhouse9eecabc2010-09-21 22:28:23 +01004688 printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
4689 dmar_map_gfx = 0;
David Woodhouse6fbcfb32011-09-25 19:11:14 -07004690 } else if (dmar_map_gfx) {
4691 /* we have to ensure the gfx device is idle before we flush */
4692 printk(KERN_INFO "DMAR: Disabling batched IOTLB flush on Ironlake\n");
4693 intel_iommu_strict = 1;
4694 }
David Woodhouse9eecabc2010-09-21 22:28:23 +01004695}
4696DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
4697DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
4698DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
4699DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
4700
David Woodhousee0fc7e02009-09-30 09:12:17 -07004701/* On Tylersburg chipsets, some BIOSes have been known to enable the
4702 ISOCH DMAR unit for the Azalia sound device, but not give it any
4703 TLB entries, which causes it to deadlock. Check for that. We do
4704 this in a function called from init_dmars(), instead of in a PCI
4705 quirk, because we don't want to print the obnoxious "BIOS broken"
4706 message if VT-d is actually disabled.
4707*/
4708static void __init check_tylersburg_isoch(void)
4709{
4710 struct pci_dev *pdev;
4711 uint32_t vtisochctrl;
4712
4713 /* If there's no Azalia in the system anyway, forget it. */
4714 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
4715 if (!pdev)
4716 return;
4717 pci_dev_put(pdev);
4718
4719 /* System Management Registers. Might be hidden, in which case
4720 we can't do the sanity check. But that's OK, because the
4721 known-broken BIOSes _don't_ actually hide it, so far. */
4722 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
4723 if (!pdev)
4724 return;
4725
4726 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
4727 pci_dev_put(pdev);
4728 return;
4729 }
4730
4731 pci_dev_put(pdev);
4732
4733 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
4734 if (vtisochctrl & 1)
4735 return;
4736
4737 /* Drop all bits other than the number of TLB entries */
4738 vtisochctrl &= 0x1c;
4739
4740 /* If we have the recommended number of TLB entries (16), fine. */
4741 if (vtisochctrl == 0x10)
4742 return;
4743
4744 /* Zero TLB entries? You get to ride the short bus to school. */
4745 if (!vtisochctrl) {
4746 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
4747 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
4748 dmi_get_system_info(DMI_BIOS_VENDOR),
4749 dmi_get_system_info(DMI_BIOS_VERSION),
4750 dmi_get_system_info(DMI_PRODUCT_VERSION));
4751 iommu_identity_mapping |= IDENTMAP_AZALIA;
4752 return;
4753 }
4754
4755 printk(KERN_WARNING "DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
4756 vtisochctrl);
4757}