blob: bf92cd1805c245ad1e43a80b2ba6e16269769a43 [file] [log] [blame]
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001/*
David Woodhouseea8ea462014-03-05 17:09:32 +00002 * Copyright © 2006-2014 Intel Corporation.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
David Woodhouseea8ea462014-03-05 17:09:32 +000013 * Authors: David Woodhouse <dwmw2@infradead.org>,
14 * Ashok Raj <ashok.raj@intel.com>,
15 * Shaohua Li <shaohua.li@intel.com>,
16 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
17 * Fenghua Yu <fenghua.yu@intel.com>
Joerg Roedel9f10e5b2015-06-12 09:57:06 +020018 * Joerg Roedel <jroedel@suse.de>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070019 */
20
Joerg Roedel9f10e5b2015-06-12 09:57:06 +020021#define pr_fmt(fmt) "DMAR: " fmt
22
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070023#include <linux/init.h>
24#include <linux/bitmap.h>
mark gross5e0d2a62008-03-04 15:22:08 -080025#include <linux/debugfs.h>
Paul Gortmaker54485c32011-10-29 10:26:25 -040026#include <linux/export.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070027#include <linux/slab.h>
28#include <linux/irq.h>
29#include <linux/interrupt.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070030#include <linux/spinlock.h>
31#include <linux/pci.h>
32#include <linux/dmar.h>
33#include <linux/dma-mapping.h>
34#include <linux/mempool.h>
Jiang Liu75f05562014-02-19 14:07:37 +080035#include <linux/memory.h>
mark gross5e0d2a62008-03-04 15:22:08 -080036#include <linux/timer.h>
Kay, Allen M38717942008-09-09 18:37:29 +030037#include <linux/iova.h>
Joerg Roedel5d450802008-12-03 14:52:32 +010038#include <linux/iommu.h>
Kay, Allen M38717942008-09-09 18:37:29 +030039#include <linux/intel-iommu.h>
Rafael J. Wysocki134fac32011-03-23 22:16:14 +010040#include <linux/syscore_ops.h>
Shane Wang69575d32009-09-01 18:25:07 -070041#include <linux/tboot.h>
Stephen Rothwelladb2fe02009-08-31 15:24:23 +100042#include <linux/dmi.h>
Joerg Roedel5cdede22011-04-04 15:55:18 +020043#include <linux/pci-ats.h>
Tejun Heo0ee332c2011-12-08 10:22:09 -080044#include <linux/memblock.h>
Akinobu Mita36746432014-06-04 16:06:51 -070045#include <linux/dma-contiguous.h>
Joerg Roedel091d42e2015-06-12 11:56:10 +020046#include <linux/crash_dump.h>
Suresh Siddha8a8f4222012-03-30 11:47:08 -070047#include <asm/irq_remapping.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070048#include <asm/cacheflush.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090049#include <asm/iommu.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070050
Joerg Roedel078e1ee2012-09-26 12:44:43 +020051#include "irq_remapping.h"
52
Fenghua Yu5b6985c2008-10-16 18:02:32 -070053#define ROOT_SIZE VTD_PAGE_SIZE
54#define CONTEXT_SIZE VTD_PAGE_SIZE
55
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070056#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
David Woodhouse18436af2015-03-25 15:05:47 +000057#define IS_USB_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070058#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
David Woodhousee0fc7e02009-09-30 09:12:17 -070059#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070060
61#define IOAPIC_RANGE_START (0xfee00000)
62#define IOAPIC_RANGE_END (0xfeefffff)
63#define IOVA_START_ADDR (0x1000)
64
65#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
66
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070067#define MAX_AGAW_WIDTH 64
Jiang Liu5c645b32014-01-06 14:18:12 +080068#define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070069
David Woodhouse2ebe3152009-09-19 07:34:04 -070070#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
71#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
72
73/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
74 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
75#define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
76 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
77#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070078
Robin Murphy1b722502015-01-12 17:51:15 +000079/* IO virtual address start page frame number */
80#define IOVA_START_PFN (1)
81
Mark McLoughlinf27be032008-11-20 15:49:43 +000082#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
Yang Hongyang284901a2009-04-06 19:01:15 -070083#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
Yang Hongyang6a355282009-04-06 19:01:13 -070084#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
mark gross5e0d2a62008-03-04 15:22:08 -080085
Andrew Mortondf08cdc2010-09-22 13:05:11 -070086/* page table handling */
87#define LEVEL_STRIDE (9)
88#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
89
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +020090/*
91 * This bitmap is used to advertise the page sizes our hardware support
92 * to the IOMMU core, which will then use this information to split
93 * physically contiguous memory regions it is mapping into page sizes
94 * that we support.
95 *
96 * Traditionally the IOMMU core just handed us the mappings directly,
97 * after making sure the size is an order of a 4KiB page and that the
98 * mapping has natural alignment.
99 *
100 * To retain this behavior, we currently advertise that we support
101 * all page sizes that are an order of 4KiB.
102 *
103 * If at some point we'd like to utilize the IOMMU core's new behavior,
104 * we could change this to advertise the real page sizes we support.
105 */
106#define INTEL_IOMMU_PGSIZES (~0xFFFUL)
107
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700108static inline int agaw_to_level(int agaw)
109{
110 return agaw + 2;
111}
112
113static inline int agaw_to_width(int agaw)
114{
Jiang Liu5c645b32014-01-06 14:18:12 +0800115 return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700116}
117
118static inline int width_to_agaw(int width)
119{
Jiang Liu5c645b32014-01-06 14:18:12 +0800120 return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700121}
122
123static inline unsigned int level_to_offset_bits(int level)
124{
125 return (level - 1) * LEVEL_STRIDE;
126}
127
128static inline int pfn_level_offset(unsigned long pfn, int level)
129{
130 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
131}
132
133static inline unsigned long level_mask(int level)
134{
135 return -1UL << level_to_offset_bits(level);
136}
137
138static inline unsigned long level_size(int level)
139{
140 return 1UL << level_to_offset_bits(level);
141}
142
143static inline unsigned long align_to_level(unsigned long pfn, int level)
144{
145 return (pfn + level_size(level) - 1) & level_mask(level);
146}
David Woodhousefd18de52009-05-10 23:57:41 +0100147
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100148static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
149{
Jiang Liu5c645b32014-01-06 14:18:12 +0800150 return 1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100151}
152
David Woodhousedd4e8312009-06-27 16:21:20 +0100153/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
154 are never going to work. */
155static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
156{
157 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
158}
159
160static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
161{
162 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
163}
164static inline unsigned long page_to_dma_pfn(struct page *pg)
165{
166 return mm_to_dma_pfn(page_to_pfn(pg));
167}
168static inline unsigned long virt_to_dma_pfn(void *p)
169{
170 return page_to_dma_pfn(virt_to_page(p));
171}
172
Weidong Hand9630fe2008-12-08 11:06:32 +0800173/* global iommu list, set NULL for ignored DMAR units */
174static struct intel_iommu **g_iommus;
175
David Woodhousee0fc7e02009-09-30 09:12:17 -0700176static void __init check_tylersburg_isoch(void);
David Woodhouse9af88142009-02-13 23:18:03 +0000177static int rwbf_quirk;
178
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000179/*
Joseph Cihulab7792602011-05-03 00:08:37 -0700180 * set to 1 to panic kernel if can't successfully enable VT-d
181 * (used when kernel is launched w/ TXT)
182 */
183static int force_on = 0;
184
185/*
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000186 * 0: Present
187 * 1-11: Reserved
188 * 12-63: Context Ptr (12 - (haw-1))
189 * 64-127: Reserved
190 */
191struct root_entry {
David Woodhouse03ecc322015-02-13 14:35:21 +0000192 u64 lo;
193 u64 hi;
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000194};
195#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000196
Joerg Roedel091d42e2015-06-12 11:56:10 +0200197/*
198 * Take a root_entry and return the Lower Context Table Pointer (LCTP)
199 * if marked present.
200 */
201static phys_addr_t root_entry_lctp(struct root_entry *re)
202{
203 if (!(re->lo & 1))
204 return 0;
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000205
Joerg Roedel091d42e2015-06-12 11:56:10 +0200206 return re->lo & VTD_PAGE_MASK;
207}
208
209/*
210 * Take a root_entry and return the Upper Context Table Pointer (UCTP)
211 * if marked present.
212 */
213static phys_addr_t root_entry_uctp(struct root_entry *re)
214{
215 if (!(re->hi & 1))
216 return 0;
217
218 return re->hi & VTD_PAGE_MASK;
219}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000220/*
221 * low 64 bits:
222 * 0: present
223 * 1: fault processing disable
224 * 2-3: translation type
225 * 12-63: address space root
226 * high 64 bits:
227 * 0-2: address width
228 * 3-6: aval
229 * 8-23: domain id
230 */
231struct context_entry {
232 u64 lo;
233 u64 hi;
234};
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000235
Joerg Roedelcf484d02015-06-12 12:21:46 +0200236static inline void context_clear_pasid_enable(struct context_entry *context)
237{
238 context->lo &= ~(1ULL << 11);
239}
240
241static inline bool context_pasid_enabled(struct context_entry *context)
242{
243 return !!(context->lo & (1ULL << 11));
244}
245
246static inline void context_set_copied(struct context_entry *context)
247{
248 context->hi |= (1ull << 3);
249}
250
251static inline bool context_copied(struct context_entry *context)
252{
253 return !!(context->hi & (1ULL << 3));
254}
255
256static inline bool __context_present(struct context_entry *context)
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000257{
258 return (context->lo & 1);
259}
Joerg Roedelcf484d02015-06-12 12:21:46 +0200260
261static inline bool context_present(struct context_entry *context)
262{
263 return context_pasid_enabled(context) ?
264 __context_present(context) :
265 __context_present(context) && !context_copied(context);
266}
267
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000268static inline void context_set_present(struct context_entry *context)
269{
270 context->lo |= 1;
271}
272
273static inline void context_set_fault_enable(struct context_entry *context)
274{
275 context->lo &= (((u64)-1) << 2) | 1;
276}
277
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000278static inline void context_set_translation_type(struct context_entry *context,
279 unsigned long value)
280{
281 context->lo &= (((u64)-1) << 4) | 3;
282 context->lo |= (value & 3) << 2;
283}
284
285static inline void context_set_address_root(struct context_entry *context,
286 unsigned long value)
287{
Li, Zhen-Hua1a2262f2014-11-05 15:30:19 +0800288 context->lo &= ~VTD_PAGE_MASK;
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000289 context->lo |= value & VTD_PAGE_MASK;
290}
291
292static inline void context_set_address_width(struct context_entry *context,
293 unsigned long value)
294{
295 context->hi |= value & 7;
296}
297
298static inline void context_set_domain_id(struct context_entry *context,
299 unsigned long value)
300{
301 context->hi |= (value & ((1 << 16) - 1)) << 8;
302}
303
Joerg Roedeldbcd8612015-06-12 12:02:09 +0200304static inline int context_domain_id(struct context_entry *c)
305{
306 return((c->hi >> 8) & 0xffff);
307}
308
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000309static inline void context_clear_entry(struct context_entry *context)
310{
311 context->lo = 0;
312 context->hi = 0;
313}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000314
Mark McLoughlin622ba122008-11-20 15:49:46 +0000315/*
316 * 0: readable
317 * 1: writable
318 * 2-6: reserved
319 * 7: super page
Sheng Yang9cf06692009-03-18 15:33:07 +0800320 * 8-10: available
321 * 11: snoop behavior
Mark McLoughlin622ba122008-11-20 15:49:46 +0000322 * 12-63: Host physcial address
323 */
324struct dma_pte {
325 u64 val;
326};
Mark McLoughlin622ba122008-11-20 15:49:46 +0000327
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000328static inline void dma_clear_pte(struct dma_pte *pte)
329{
330 pte->val = 0;
331}
332
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000333static inline u64 dma_pte_addr(struct dma_pte *pte)
334{
David Woodhousec85994e2009-07-01 19:21:24 +0100335#ifdef CONFIG_64BIT
336 return pte->val & VTD_PAGE_MASK;
337#else
338 /* Must have a full atomic 64-bit read */
David Woodhouse1a8bd482010-08-10 01:38:53 +0100339 return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
David Woodhousec85994e2009-07-01 19:21:24 +0100340#endif
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000341}
342
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000343static inline bool dma_pte_present(struct dma_pte *pte)
344{
345 return (pte->val & 3) != 0;
346}
Mark McLoughlin622ba122008-11-20 15:49:46 +0000347
Allen Kay4399c8b2011-10-14 12:32:46 -0700348static inline bool dma_pte_superpage(struct dma_pte *pte)
349{
Joerg Roedelc3c75eb2014-07-04 11:19:10 +0200350 return (pte->val & DMA_PTE_LARGE_PAGE);
Allen Kay4399c8b2011-10-14 12:32:46 -0700351}
352
David Woodhouse75e6bf92009-07-02 11:21:16 +0100353static inline int first_pte_in_page(struct dma_pte *pte)
354{
355 return !((unsigned long)pte & ~VTD_PAGE_MASK);
356}
357
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700358/*
359 * This domain is a statically identity mapping domain.
360 * 1. This domain creats a static 1:1 mapping to all usable memory.
361 * 2. It maps to each iommu if successful.
362 * 3. Each iommu mapps to this domain if successful.
363 */
David Woodhouse19943b02009-08-04 16:19:20 +0100364static struct dmar_domain *si_domain;
365static int hw_pass_through = 1;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700366
Weidong Han1ce28fe2008-12-08 16:35:39 +0800367/* domain represents a virtual machine, more than one devices
368 * across iommus may be owned in one domain, e.g. kvm guest.
369 */
Jiang Liuab8dfe22014-07-11 14:19:27 +0800370#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 0)
Weidong Han1ce28fe2008-12-08 16:35:39 +0800371
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700372/* si_domain contains mulitple devices */
Jiang Liuab8dfe22014-07-11 14:19:27 +0800373#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 1)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700374
Mark McLoughlin99126f72008-11-20 15:49:47 +0000375struct dmar_domain {
376 int id; /* domain id */
Suresh Siddha4c923d42009-10-02 11:01:24 -0700377 int nid; /* node id */
Jiang Liu78d8e702014-11-09 22:47:57 +0800378 DECLARE_BITMAP(iommu_bmp, DMAR_UNITS_SUPPORTED);
Mike Travis1b198bb2012-03-05 15:05:16 -0800379 /* bitmap of iommus this domain uses*/
Mark McLoughlin99126f72008-11-20 15:49:47 +0000380
Joerg Roedelc0e8a6c2015-07-21 09:39:46 +0200381 u16 iommu_did[DMAR_UNITS_SUPPORTED];
382 /* Domain ids per IOMMU. Use u16 since
383 * domain ids are 16 bit wide according
384 * to VT-d spec, section 9.3 */
385
Joerg Roedel00a77de2015-03-26 13:43:08 +0100386 struct list_head devices; /* all devices' list */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000387 struct iova_domain iovad; /* iova's that belong to this domain */
388
389 struct dma_pte *pgd; /* virtual address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000390 int gaw; /* max guest address width */
391
392 /* adjusted guest address width, 0 is level 2 30-bit */
393 int agaw;
394
Weidong Han3b5410e2008-12-08 09:17:15 +0800395 int flags; /* flags to find out type of domain */
Weidong Han8e6040972008-12-08 15:49:06 +0800396
397 int iommu_coherency;/* indicate coherency of iommu access */
Sheng Yang58c610b2009-03-18 15:33:05 +0800398 int iommu_snooping; /* indicate snooping control feature*/
Weidong Hanc7151a82008-12-08 22:51:37 +0800399 int iommu_count; /* reference count of iommu */
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100400 int iommu_superpage;/* Level of superpages supported:
401 0 == 4KiB (no superpages), 1 == 2MiB,
402 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
Weidong Hanc7151a82008-12-08 22:51:37 +0800403 spinlock_t iommu_lock; /* protect iommu set in domain */
Weidong Hanfe40f1e2008-12-08 23:10:23 +0800404 u64 max_addr; /* maximum mapped address */
Joerg Roedel00a77de2015-03-26 13:43:08 +0100405
406 struct iommu_domain domain; /* generic domain data structure for
407 iommu core */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000408};
409
Mark McLoughlina647dacb2008-11-20 15:49:48 +0000410/* PCI domain-device relationship */
411struct device_domain_info {
412 struct list_head link; /* link to domain siblings */
413 struct list_head global; /* link to global list */
David Woodhouse276dbf992009-04-04 01:45:37 +0100414 u8 bus; /* PCI bus number */
Mark McLoughlina647dacb2008-11-20 15:49:48 +0000415 u8 devfn; /* PCI devfn number */
David Woodhouse0bcb3e22014-03-06 17:12:03 +0000416 struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
Yu Zhao93a23a72009-05-18 13:51:37 +0800417 struct intel_iommu *iommu; /* IOMMU used by this device */
Mark McLoughlina647dacb2008-11-20 15:49:48 +0000418 struct dmar_domain *domain; /* pointer to domain */
419};
420
Jiang Liub94e4112014-02-19 14:07:25 +0800421struct dmar_rmrr_unit {
422 struct list_head list; /* list of rmrr units */
423 struct acpi_dmar_header *hdr; /* ACPI header */
424 u64 base_address; /* reserved base address*/
425 u64 end_address; /* reserved end address */
David Woodhouse832bd852014-03-07 15:08:36 +0000426 struct dmar_dev_scope *devices; /* target devices */
Jiang Liub94e4112014-02-19 14:07:25 +0800427 int devices_cnt; /* target device count */
428};
429
430struct dmar_atsr_unit {
431 struct list_head list; /* list of ATSR units */
432 struct acpi_dmar_header *hdr; /* ACPI header */
David Woodhouse832bd852014-03-07 15:08:36 +0000433 struct dmar_dev_scope *devices; /* target devices */
Jiang Liub94e4112014-02-19 14:07:25 +0800434 int devices_cnt; /* target device count */
435 u8 include_all:1; /* include all ports */
436};
437
438static LIST_HEAD(dmar_atsr_units);
439static LIST_HEAD(dmar_rmrr_units);
440
441#define for_each_rmrr_units(rmrr) \
442 list_for_each_entry(rmrr, &dmar_rmrr_units, list)
443
mark gross5e0d2a62008-03-04 15:22:08 -0800444static void flush_unmaps_timeout(unsigned long data);
445
Jiang Liub707cb02014-01-06 14:18:26 +0800446static DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
mark gross5e0d2a62008-03-04 15:22:08 -0800447
mark gross80b20dd2008-04-18 13:53:58 -0700448#define HIGH_WATER_MARK 250
449struct deferred_flush_tables {
450 int next;
451 struct iova *iova[HIGH_WATER_MARK];
452 struct dmar_domain *domain[HIGH_WATER_MARK];
David Woodhouseea8ea462014-03-05 17:09:32 +0000453 struct page *freelist[HIGH_WATER_MARK];
mark gross80b20dd2008-04-18 13:53:58 -0700454};
455
456static struct deferred_flush_tables *deferred_flush;
457
mark gross5e0d2a62008-03-04 15:22:08 -0800458/* bitmap for indexing intel_iommus */
mark gross5e0d2a62008-03-04 15:22:08 -0800459static int g_num_of_iommus;
460
461static DEFINE_SPINLOCK(async_umap_flush_lock);
462static LIST_HEAD(unmaps_to_do);
463
464static int timer_on;
465static long list_size;
mark gross5e0d2a62008-03-04 15:22:08 -0800466
Jiang Liu92d03cc2014-02-19 14:07:28 +0800467static void domain_exit(struct dmar_domain *domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700468static void domain_remove_dev_info(struct dmar_domain *domain);
Jiang Liub94e4112014-02-19 14:07:25 +0800469static void domain_remove_one_dev_info(struct dmar_domain *domain,
David Woodhousebf9c9ed2014-03-09 16:19:13 -0700470 struct device *dev);
Jiang Liu92d03cc2014-02-19 14:07:28 +0800471static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
David Woodhouse0bcb3e22014-03-06 17:12:03 +0000472 struct device *dev);
Jiang Liu2a46ddf2014-07-11 14:19:30 +0800473static int domain_detach_iommu(struct dmar_domain *domain,
474 struct intel_iommu *iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700475
Suresh Siddhad3f13812011-08-23 17:05:25 -0700476#ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800477int dmar_disabled = 0;
478#else
479int dmar_disabled = 1;
Suresh Siddhad3f13812011-08-23 17:05:25 -0700480#endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800481
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -0200482int intel_iommu_enabled = 0;
483EXPORT_SYMBOL_GPL(intel_iommu_enabled);
484
David Woodhouse2d9e6672010-06-15 10:57:57 +0100485static int dmar_map_gfx = 1;
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700486static int dmar_forcedac;
mark gross5e0d2a62008-03-04 15:22:08 -0800487static int intel_iommu_strict;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100488static int intel_iommu_superpage = 1;
David Woodhousec83b2f22015-06-12 10:15:49 +0100489static int intel_iommu_ecs = 1;
490
491/* We only actually use ECS when PASID support (on the new bit 40)
492 * is also advertised. Some early implementations — the ones with
493 * PASID support on bit 28 — have issues even when we *only* use
494 * extended root/context tables. */
495#define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap) && \
496 ecap_pasid(iommu->ecap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700497
David Woodhousec0771df2011-10-14 20:59:46 +0100498int intel_iommu_gfx_mapped;
499EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
500
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700501#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
502static DEFINE_SPINLOCK(device_domain_lock);
503static LIST_HEAD(device_domain_list);
504
Thierry Redingb22f6432014-06-27 09:03:12 +0200505static const struct iommu_ops intel_iommu_ops;
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +0100506
Joerg Roedel4158c2e2015-06-12 10:14:02 +0200507static bool translation_pre_enabled(struct intel_iommu *iommu)
508{
509 return (iommu->flags & VTD_FLAG_TRANS_PRE_ENABLED);
510}
511
Joerg Roedel091d42e2015-06-12 11:56:10 +0200512static void clear_translation_pre_enabled(struct intel_iommu *iommu)
513{
514 iommu->flags &= ~VTD_FLAG_TRANS_PRE_ENABLED;
515}
516
Joerg Roedel4158c2e2015-06-12 10:14:02 +0200517static void init_translation_status(struct intel_iommu *iommu)
518{
519 u32 gsts;
520
521 gsts = readl(iommu->reg + DMAR_GSTS_REG);
522 if (gsts & DMA_GSTS_TES)
523 iommu->flags |= VTD_FLAG_TRANS_PRE_ENABLED;
524}
525
Joerg Roedel00a77de2015-03-26 13:43:08 +0100526/* Convert generic 'struct iommu_domain to private struct dmar_domain */
527static struct dmar_domain *to_dmar_domain(struct iommu_domain *dom)
528{
529 return container_of(dom, struct dmar_domain, domain);
530}
531
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700532static int __init intel_iommu_setup(char *str)
533{
534 if (!str)
535 return -EINVAL;
536 while (*str) {
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800537 if (!strncmp(str, "on", 2)) {
538 dmar_disabled = 0;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200539 pr_info("IOMMU enabled\n");
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800540 } else if (!strncmp(str, "off", 3)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700541 dmar_disabled = 1;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200542 pr_info("IOMMU disabled\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700543 } else if (!strncmp(str, "igfx_off", 8)) {
544 dmar_map_gfx = 0;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200545 pr_info("Disable GFX device mapping\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700546 } else if (!strncmp(str, "forcedac", 8)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200547 pr_info("Forcing DAC for PCI devices\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700548 dmar_forcedac = 1;
mark gross5e0d2a62008-03-04 15:22:08 -0800549 } else if (!strncmp(str, "strict", 6)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200550 pr_info("Disable batched IOTLB flush\n");
mark gross5e0d2a62008-03-04 15:22:08 -0800551 intel_iommu_strict = 1;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100552 } else if (!strncmp(str, "sp_off", 6)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200553 pr_info("Disable supported super page\n");
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100554 intel_iommu_superpage = 0;
David Woodhousec83b2f22015-06-12 10:15:49 +0100555 } else if (!strncmp(str, "ecs_off", 7)) {
556 printk(KERN_INFO
557 "Intel-IOMMU: disable extended context table support\n");
558 intel_iommu_ecs = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700559 }
560
561 str += strcspn(str, ",");
562 while (*str == ',')
563 str++;
564 }
565 return 0;
566}
567__setup("intel_iommu=", intel_iommu_setup);
568
569static struct kmem_cache *iommu_domain_cache;
570static struct kmem_cache *iommu_devinfo_cache;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700571
Suresh Siddha4c923d42009-10-02 11:01:24 -0700572static inline void *alloc_pgtable_page(int node)
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700573{
Suresh Siddha4c923d42009-10-02 11:01:24 -0700574 struct page *page;
575 void *vaddr = NULL;
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700576
Suresh Siddha4c923d42009-10-02 11:01:24 -0700577 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
578 if (page)
579 vaddr = page_address(page);
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700580 return vaddr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700581}
582
583static inline void free_pgtable_page(void *vaddr)
584{
585 free_page((unsigned long)vaddr);
586}
587
588static inline void *alloc_domain_mem(void)
589{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900590 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700591}
592
Kay, Allen M38717942008-09-09 18:37:29 +0300593static void free_domain_mem(void *vaddr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700594{
595 kmem_cache_free(iommu_domain_cache, vaddr);
596}
597
598static inline void * alloc_devinfo_mem(void)
599{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900600 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700601}
602
603static inline void free_devinfo_mem(void *vaddr)
604{
605 kmem_cache_free(iommu_devinfo_cache, vaddr);
606}
607
Jiang Liuab8dfe22014-07-11 14:19:27 +0800608static inline int domain_type_is_vm(struct dmar_domain *domain)
609{
610 return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE;
611}
612
613static inline int domain_type_is_vm_or_si(struct dmar_domain *domain)
614{
615 return domain->flags & (DOMAIN_FLAG_VIRTUAL_MACHINE |
616 DOMAIN_FLAG_STATIC_IDENTITY);
617}
Weidong Han1b573682008-12-08 15:34:06 +0800618
Jiang Liu162d1b12014-07-11 14:19:35 +0800619static inline int domain_pfn_supported(struct dmar_domain *domain,
620 unsigned long pfn)
621{
622 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
623
624 return !(addr_width < BITS_PER_LONG && pfn >> addr_width);
625}
626
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700627static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
Weidong Han1b573682008-12-08 15:34:06 +0800628{
629 unsigned long sagaw;
630 int agaw = -1;
631
632 sagaw = cap_sagaw(iommu->cap);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700633 for (agaw = width_to_agaw(max_gaw);
Weidong Han1b573682008-12-08 15:34:06 +0800634 agaw >= 0; agaw--) {
635 if (test_bit(agaw, &sagaw))
636 break;
637 }
638
639 return agaw;
640}
641
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700642/*
643 * Calculate max SAGAW for each iommu.
644 */
645int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
646{
647 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
648}
649
650/*
651 * calculate agaw for each iommu.
652 * "SAGAW" may be different across iommus, use a default agaw, and
653 * get a supported less agaw for iommus that don't support the default agaw.
654 */
655int iommu_calculate_agaw(struct intel_iommu *iommu)
656{
657 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
658}
659
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700660/* This functionin only returns single iommu in a domain */
Weidong Han8c11e792008-12-08 15:29:22 +0800661static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
662{
663 int iommu_id;
664
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700665 /* si_domain and vm domain should not get here. */
Jiang Liuab8dfe22014-07-11 14:19:27 +0800666 BUG_ON(domain_type_is_vm_or_si(domain));
Mike Travis1b198bb2012-03-05 15:05:16 -0800667 iommu_id = find_first_bit(domain->iommu_bmp, g_num_of_iommus);
Weidong Han8c11e792008-12-08 15:29:22 +0800668 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
669 return NULL;
670
671 return g_iommus[iommu_id];
672}
673
Weidong Han8e6040972008-12-08 15:49:06 +0800674static void domain_update_iommu_coherency(struct dmar_domain *domain)
675{
David Woodhoused0501962014-03-11 17:10:29 -0700676 struct dmar_drhd_unit *drhd;
677 struct intel_iommu *iommu;
Quentin Lambert2f119c72015-02-06 10:59:53 +0100678 bool found = false;
679 int i;
Weidong Han8e6040972008-12-08 15:49:06 +0800680
David Woodhoused0501962014-03-11 17:10:29 -0700681 domain->iommu_coherency = 1;
Weidong Han8e6040972008-12-08 15:49:06 +0800682
Mike Travis1b198bb2012-03-05 15:05:16 -0800683 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
Quentin Lambert2f119c72015-02-06 10:59:53 +0100684 found = true;
Weidong Han8e6040972008-12-08 15:49:06 +0800685 if (!ecap_coherent(g_iommus[i]->ecap)) {
686 domain->iommu_coherency = 0;
687 break;
688 }
Weidong Han8e6040972008-12-08 15:49:06 +0800689 }
David Woodhoused0501962014-03-11 17:10:29 -0700690 if (found)
691 return;
692
693 /* No hardware attached; use lowest common denominator */
694 rcu_read_lock();
695 for_each_active_iommu(iommu, drhd) {
696 if (!ecap_coherent(iommu->ecap)) {
697 domain->iommu_coherency = 0;
698 break;
699 }
700 }
701 rcu_read_unlock();
Weidong Han8e6040972008-12-08 15:49:06 +0800702}
703
Jiang Liu161f6932014-07-11 14:19:37 +0800704static int domain_update_iommu_snooping(struct intel_iommu *skip)
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100705{
Allen Kay8140a952011-10-14 12:32:17 -0700706 struct dmar_drhd_unit *drhd;
Jiang Liu161f6932014-07-11 14:19:37 +0800707 struct intel_iommu *iommu;
708 int ret = 1;
709
710 rcu_read_lock();
711 for_each_active_iommu(iommu, drhd) {
712 if (iommu != skip) {
713 if (!ecap_sc_support(iommu->ecap)) {
714 ret = 0;
715 break;
716 }
717 }
718 }
719 rcu_read_unlock();
720
721 return ret;
722}
723
724static int domain_update_iommu_superpage(struct intel_iommu *skip)
725{
726 struct dmar_drhd_unit *drhd;
727 struct intel_iommu *iommu;
Allen Kay8140a952011-10-14 12:32:17 -0700728 int mask = 0xf;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100729
730 if (!intel_iommu_superpage) {
Jiang Liu161f6932014-07-11 14:19:37 +0800731 return 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100732 }
733
Allen Kay8140a952011-10-14 12:32:17 -0700734 /* set iommu_superpage to the smallest common denominator */
Jiang Liu0e242612014-02-19 14:07:34 +0800735 rcu_read_lock();
Allen Kay8140a952011-10-14 12:32:17 -0700736 for_each_active_iommu(iommu, drhd) {
Jiang Liu161f6932014-07-11 14:19:37 +0800737 if (iommu != skip) {
738 mask &= cap_super_page_val(iommu->cap);
739 if (!mask)
740 break;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100741 }
742 }
Jiang Liu0e242612014-02-19 14:07:34 +0800743 rcu_read_unlock();
744
Jiang Liu161f6932014-07-11 14:19:37 +0800745 return fls(mask);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100746}
747
Sheng Yang58c610b2009-03-18 15:33:05 +0800748/* Some capabilities may be different across iommus */
749static void domain_update_iommu_cap(struct dmar_domain *domain)
750{
751 domain_update_iommu_coherency(domain);
Jiang Liu161f6932014-07-11 14:19:37 +0800752 domain->iommu_snooping = domain_update_iommu_snooping(NULL);
753 domain->iommu_superpage = domain_update_iommu_superpage(NULL);
Sheng Yang58c610b2009-03-18 15:33:05 +0800754}
755
David Woodhouse03ecc322015-02-13 14:35:21 +0000756static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu,
757 u8 bus, u8 devfn, int alloc)
758{
759 struct root_entry *root = &iommu->root_entry[bus];
760 struct context_entry *context;
761 u64 *entry;
762
David Woodhousec83b2f22015-06-12 10:15:49 +0100763 if (ecs_enabled(iommu)) {
David Woodhouse03ecc322015-02-13 14:35:21 +0000764 if (devfn >= 0x80) {
765 devfn -= 0x80;
766 entry = &root->hi;
767 }
768 devfn *= 2;
769 }
770 entry = &root->lo;
771 if (*entry & 1)
772 context = phys_to_virt(*entry & VTD_PAGE_MASK);
773 else {
774 unsigned long phy_addr;
775 if (!alloc)
776 return NULL;
777
778 context = alloc_pgtable_page(iommu->node);
779 if (!context)
780 return NULL;
781
782 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
783 phy_addr = virt_to_phys((void *)context);
784 *entry = phy_addr | 1;
785 __iommu_flush_cache(iommu, entry, sizeof(*entry));
786 }
787 return &context[devfn];
788}
789
David Woodhouse4ed6a542015-05-11 14:59:20 +0100790static int iommu_dummy(struct device *dev)
791{
792 return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
793}
794
David Woodhouse156baca2014-03-09 14:00:57 -0700795static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
Weidong Hanc7151a82008-12-08 22:51:37 +0800796{
797 struct dmar_drhd_unit *drhd = NULL;
Jiang Liub683b232014-02-19 14:07:32 +0800798 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -0700799 struct device *tmp;
800 struct pci_dev *ptmp, *pdev = NULL;
Yijing Wangaa4d0662014-05-26 20:14:06 +0800801 u16 segment = 0;
Weidong Hanc7151a82008-12-08 22:51:37 +0800802 int i;
803
David Woodhouse4ed6a542015-05-11 14:59:20 +0100804 if (iommu_dummy(dev))
805 return NULL;
806
David Woodhouse156baca2014-03-09 14:00:57 -0700807 if (dev_is_pci(dev)) {
808 pdev = to_pci_dev(dev);
809 segment = pci_domain_nr(pdev->bus);
Rafael J. Wysockica5b74d2015-03-16 23:49:08 +0100810 } else if (has_acpi_companion(dev))
David Woodhouse156baca2014-03-09 14:00:57 -0700811 dev = &ACPI_COMPANION(dev)->dev;
812
Jiang Liu0e242612014-02-19 14:07:34 +0800813 rcu_read_lock();
Jiang Liub683b232014-02-19 14:07:32 +0800814 for_each_active_iommu(iommu, drhd) {
David Woodhouse156baca2014-03-09 14:00:57 -0700815 if (pdev && segment != drhd->segment)
David Woodhouse276dbf992009-04-04 01:45:37 +0100816 continue;
Weidong Hanc7151a82008-12-08 22:51:37 +0800817
Jiang Liub683b232014-02-19 14:07:32 +0800818 for_each_active_dev_scope(drhd->devices,
David Woodhouse156baca2014-03-09 14:00:57 -0700819 drhd->devices_cnt, i, tmp) {
820 if (tmp == dev) {
821 *bus = drhd->devices[i].bus;
822 *devfn = drhd->devices[i].devfn;
823 goto out;
824 }
825
826 if (!pdev || !dev_is_pci(tmp))
David Woodhouse832bd852014-03-07 15:08:36 +0000827 continue;
David Woodhouse156baca2014-03-09 14:00:57 -0700828
829 ptmp = to_pci_dev(tmp);
830 if (ptmp->subordinate &&
831 ptmp->subordinate->number <= pdev->bus->number &&
832 ptmp->subordinate->busn_res.end >= pdev->bus->number)
833 goto got_pdev;
David Woodhouse924b6232009-04-04 00:39:25 +0100834 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800835
David Woodhouse156baca2014-03-09 14:00:57 -0700836 if (pdev && drhd->include_all) {
837 got_pdev:
838 *bus = pdev->bus->number;
839 *devfn = pdev->devfn;
Jiang Liub683b232014-02-19 14:07:32 +0800840 goto out;
David Woodhouse156baca2014-03-09 14:00:57 -0700841 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800842 }
Jiang Liub683b232014-02-19 14:07:32 +0800843 iommu = NULL;
David Woodhouse156baca2014-03-09 14:00:57 -0700844 out:
Jiang Liu0e242612014-02-19 14:07:34 +0800845 rcu_read_unlock();
Weidong Hanc7151a82008-12-08 22:51:37 +0800846
Jiang Liub683b232014-02-19 14:07:32 +0800847 return iommu;
Weidong Hanc7151a82008-12-08 22:51:37 +0800848}
849
Weidong Han5331fe62008-12-08 23:00:00 +0800850static void domain_flush_cache(struct dmar_domain *domain,
851 void *addr, int size)
852{
853 if (!domain->iommu_coherency)
854 clflush_cache_range(addr, size);
855}
856
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700857static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
858{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700859 struct context_entry *context;
David Woodhouse03ecc322015-02-13 14:35:21 +0000860 int ret = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700861 unsigned long flags;
862
863 spin_lock_irqsave(&iommu->lock, flags);
David Woodhouse03ecc322015-02-13 14:35:21 +0000864 context = iommu_context_addr(iommu, bus, devfn, 0);
865 if (context)
866 ret = context_present(context);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700867 spin_unlock_irqrestore(&iommu->lock, flags);
868 return ret;
869}
870
871static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
872{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700873 struct context_entry *context;
874 unsigned long flags;
875
876 spin_lock_irqsave(&iommu->lock, flags);
David Woodhouse03ecc322015-02-13 14:35:21 +0000877 context = iommu_context_addr(iommu, bus, devfn, 0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700878 if (context) {
David Woodhouse03ecc322015-02-13 14:35:21 +0000879 context_clear_entry(context);
880 __iommu_flush_cache(iommu, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700881 }
882 spin_unlock_irqrestore(&iommu->lock, flags);
883}
884
885static void free_context_table(struct intel_iommu *iommu)
886{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700887 int i;
888 unsigned long flags;
889 struct context_entry *context;
890
891 spin_lock_irqsave(&iommu->lock, flags);
892 if (!iommu->root_entry) {
893 goto out;
894 }
895 for (i = 0; i < ROOT_ENTRY_NR; i++) {
David Woodhouse03ecc322015-02-13 14:35:21 +0000896 context = iommu_context_addr(iommu, i, 0, 0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700897 if (context)
898 free_pgtable_page(context);
David Woodhouse03ecc322015-02-13 14:35:21 +0000899
David Woodhousec83b2f22015-06-12 10:15:49 +0100900 if (!ecs_enabled(iommu))
David Woodhouse03ecc322015-02-13 14:35:21 +0000901 continue;
902
903 context = iommu_context_addr(iommu, i, 0x80, 0);
904 if (context)
905 free_pgtable_page(context);
906
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700907 }
908 free_pgtable_page(iommu->root_entry);
909 iommu->root_entry = NULL;
910out:
911 spin_unlock_irqrestore(&iommu->lock, flags);
912}
913
David Woodhouseb026fd22009-06-28 10:37:25 +0100914static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
David Woodhouse5cf0a762014-03-19 16:07:49 +0000915 unsigned long pfn, int *target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700916{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700917 struct dma_pte *parent, *pte = NULL;
918 int level = agaw_to_level(domain->agaw);
Allen Kay4399c8b2011-10-14 12:32:46 -0700919 int offset;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700920
921 BUG_ON(!domain->pgd);
Julian Stecklinaf9423602013-10-09 10:03:52 +0200922
Jiang Liu162d1b12014-07-11 14:19:35 +0800923 if (!domain_pfn_supported(domain, pfn))
Julian Stecklinaf9423602013-10-09 10:03:52 +0200924 /* Address beyond IOMMU's addressing capabilities. */
925 return NULL;
926
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700927 parent = domain->pgd;
928
David Woodhouse5cf0a762014-03-19 16:07:49 +0000929 while (1) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700930 void *tmp_page;
931
David Woodhouseb026fd22009-06-28 10:37:25 +0100932 offset = pfn_level_offset(pfn, level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700933 pte = &parent[offset];
David Woodhouse5cf0a762014-03-19 16:07:49 +0000934 if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100935 break;
David Woodhouse5cf0a762014-03-19 16:07:49 +0000936 if (level == *target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700937 break;
938
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000939 if (!dma_pte_present(pte)) {
David Woodhousec85994e2009-07-01 19:21:24 +0100940 uint64_t pteval;
941
Suresh Siddha4c923d42009-10-02 11:01:24 -0700942 tmp_page = alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700943
David Woodhouse206a73c2009-07-01 19:30:28 +0100944 if (!tmp_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700945 return NULL;
David Woodhouse206a73c2009-07-01 19:30:28 +0100946
David Woodhousec85994e2009-07-01 19:21:24 +0100947 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
Benjamin LaHaise64de5af2009-09-16 21:05:55 -0400948 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
Yijing Wangeffad4b2014-05-26 20:13:47 +0800949 if (cmpxchg64(&pte->val, 0ULL, pteval))
David Woodhousec85994e2009-07-01 19:21:24 +0100950 /* Someone else set it while we were thinking; use theirs. */
951 free_pgtable_page(tmp_page);
Yijing Wangeffad4b2014-05-26 20:13:47 +0800952 else
David Woodhousec85994e2009-07-01 19:21:24 +0100953 domain_flush_cache(domain, pte, sizeof(*pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700954 }
David Woodhouse5cf0a762014-03-19 16:07:49 +0000955 if (level == 1)
956 break;
957
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000958 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700959 level--;
960 }
961
David Woodhouse5cf0a762014-03-19 16:07:49 +0000962 if (!*target_level)
963 *target_level = level;
964
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700965 return pte;
966}
967
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100968
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700969/* return address's pte at specific level */
David Woodhouse90dcfb52009-06-27 17:14:59 +0100970static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
971 unsigned long pfn,
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100972 int level, int *large_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700973{
974 struct dma_pte *parent, *pte = NULL;
975 int total = agaw_to_level(domain->agaw);
976 int offset;
977
978 parent = domain->pgd;
979 while (level <= total) {
David Woodhouse90dcfb52009-06-27 17:14:59 +0100980 offset = pfn_level_offset(pfn, total);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700981 pte = &parent[offset];
982 if (level == total)
983 return pte;
984
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100985 if (!dma_pte_present(pte)) {
986 *large_page = total;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700987 break;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100988 }
989
Yijing Wange16922a2014-05-20 20:37:51 +0800990 if (dma_pte_superpage(pte)) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100991 *large_page = total;
992 return pte;
993 }
994
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000995 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700996 total--;
997 }
998 return NULL;
999}
1000
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001001/* clear last level pte, a tlb flush should be followed */
David Woodhouse5cf0a762014-03-19 16:07:49 +00001002static void dma_pte_clear_range(struct dmar_domain *domain,
David Woodhouse595badf52009-06-27 22:09:11 +01001003 unsigned long start_pfn,
1004 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001005{
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001006 unsigned int large_page = 1;
David Woodhouse310a5ab2009-06-28 18:52:20 +01001007 struct dma_pte *first_pte, *pte;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001008
Jiang Liu162d1b12014-07-11 14:19:35 +08001009 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1010 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouse59c36282009-09-19 07:36:28 -07001011 BUG_ON(start_pfn > last_pfn);
David Woodhouse66eae842009-06-27 19:00:32 +01001012
David Woodhouse04b18e62009-06-27 19:15:01 +01001013 /* we don't need lock here; nobody else touches the iova range */
David Woodhouse59c36282009-09-19 07:36:28 -07001014 do {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001015 large_page = 1;
1016 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +01001017 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001018 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
David Woodhouse310a5ab2009-06-28 18:52:20 +01001019 continue;
1020 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001021 do {
David Woodhouse310a5ab2009-06-28 18:52:20 +01001022 dma_clear_pte(pte);
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001023 start_pfn += lvl_to_nr_pages(large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +01001024 pte++;
David Woodhouse75e6bf92009-07-02 11:21:16 +01001025 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
1026
David Woodhouse310a5ab2009-06-28 18:52:20 +01001027 domain_flush_cache(domain, first_pte,
1028 (void *)pte - (void *)first_pte);
David Woodhouse59c36282009-09-19 07:36:28 -07001029
1030 } while (start_pfn && start_pfn <= last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001031}
1032
Alex Williamson3269ee02013-06-15 10:27:19 -06001033static void dma_pte_free_level(struct dmar_domain *domain, int level,
1034 struct dma_pte *pte, unsigned long pfn,
1035 unsigned long start_pfn, unsigned long last_pfn)
1036{
1037 pfn = max(start_pfn, pfn);
1038 pte = &pte[pfn_level_offset(pfn, level)];
1039
1040 do {
1041 unsigned long level_pfn;
1042 struct dma_pte *level_pte;
1043
1044 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
1045 goto next;
1046
1047 level_pfn = pfn & level_mask(level - 1);
1048 level_pte = phys_to_virt(dma_pte_addr(pte));
1049
1050 if (level > 2)
1051 dma_pte_free_level(domain, level - 1, level_pte,
1052 level_pfn, start_pfn, last_pfn);
1053
1054 /* If range covers entire pagetable, free it */
1055 if (!(start_pfn > level_pfn ||
Alex Williamson08336fd2014-01-21 15:48:18 -08001056 last_pfn < level_pfn + level_size(level) - 1)) {
Alex Williamson3269ee02013-06-15 10:27:19 -06001057 dma_clear_pte(pte);
1058 domain_flush_cache(domain, pte, sizeof(*pte));
1059 free_pgtable_page(level_pte);
1060 }
1061next:
1062 pfn += level_size(level);
1063 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1064}
1065
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001066/* free page table pages. last level pte should already be cleared */
1067static void dma_pte_free_pagetable(struct dmar_domain *domain,
David Woodhoused794dc92009-06-28 00:27:49 +01001068 unsigned long start_pfn,
1069 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001070{
Jiang Liu162d1b12014-07-11 14:19:35 +08001071 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1072 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouse59c36282009-09-19 07:36:28 -07001073 BUG_ON(start_pfn > last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001074
Jiang Liud41a4ad2014-07-11 14:19:34 +08001075 dma_pte_clear_range(domain, start_pfn, last_pfn);
1076
David Woodhousef3a0a522009-06-30 03:40:07 +01001077 /* We don't need lock here; nobody else touches the iova range */
Alex Williamson3269ee02013-06-15 10:27:19 -06001078 dma_pte_free_level(domain, agaw_to_level(domain->agaw),
1079 domain->pgd, 0, start_pfn, last_pfn);
David Woodhouse6660c632009-06-27 22:41:00 +01001080
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001081 /* free pgd */
David Woodhoused794dc92009-06-28 00:27:49 +01001082 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001083 free_pgtable_page(domain->pgd);
1084 domain->pgd = NULL;
1085 }
1086}
1087
David Woodhouseea8ea462014-03-05 17:09:32 +00001088/* When a page at a given level is being unlinked from its parent, we don't
1089 need to *modify* it at all. All we need to do is make a list of all the
1090 pages which can be freed just as soon as we've flushed the IOTLB and we
1091 know the hardware page-walk will no longer touch them.
1092 The 'pte' argument is the *parent* PTE, pointing to the page that is to
1093 be freed. */
1094static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
1095 int level, struct dma_pte *pte,
1096 struct page *freelist)
1097{
1098 struct page *pg;
1099
1100 pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
1101 pg->freelist = freelist;
1102 freelist = pg;
1103
1104 if (level == 1)
1105 return freelist;
1106
Jiang Liuadeb2592014-04-09 10:20:39 +08001107 pte = page_address(pg);
1108 do {
David Woodhouseea8ea462014-03-05 17:09:32 +00001109 if (dma_pte_present(pte) && !dma_pte_superpage(pte))
1110 freelist = dma_pte_list_pagetables(domain, level - 1,
1111 pte, freelist);
Jiang Liuadeb2592014-04-09 10:20:39 +08001112 pte++;
1113 } while (!first_pte_in_page(pte));
David Woodhouseea8ea462014-03-05 17:09:32 +00001114
1115 return freelist;
1116}
1117
1118static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
1119 struct dma_pte *pte, unsigned long pfn,
1120 unsigned long start_pfn,
1121 unsigned long last_pfn,
1122 struct page *freelist)
1123{
1124 struct dma_pte *first_pte = NULL, *last_pte = NULL;
1125
1126 pfn = max(start_pfn, pfn);
1127 pte = &pte[pfn_level_offset(pfn, level)];
1128
1129 do {
1130 unsigned long level_pfn;
1131
1132 if (!dma_pte_present(pte))
1133 goto next;
1134
1135 level_pfn = pfn & level_mask(level);
1136
1137 /* If range covers entire pagetable, free it */
1138 if (start_pfn <= level_pfn &&
1139 last_pfn >= level_pfn + level_size(level) - 1) {
1140 /* These suborbinate page tables are going away entirely. Don't
1141 bother to clear them; we're just going to *free* them. */
1142 if (level > 1 && !dma_pte_superpage(pte))
1143 freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist);
1144
1145 dma_clear_pte(pte);
1146 if (!first_pte)
1147 first_pte = pte;
1148 last_pte = pte;
1149 } else if (level > 1) {
1150 /* Recurse down into a level that isn't *entirely* obsolete */
1151 freelist = dma_pte_clear_level(domain, level - 1,
1152 phys_to_virt(dma_pte_addr(pte)),
1153 level_pfn, start_pfn, last_pfn,
1154 freelist);
1155 }
1156next:
1157 pfn += level_size(level);
1158 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1159
1160 if (first_pte)
1161 domain_flush_cache(domain, first_pte,
1162 (void *)++last_pte - (void *)first_pte);
1163
1164 return freelist;
1165}
1166
1167/* We can't just free the pages because the IOMMU may still be walking
1168 the page tables, and may have cached the intermediate levels. The
1169 pages can only be freed after the IOTLB flush has been done. */
1170struct page *domain_unmap(struct dmar_domain *domain,
1171 unsigned long start_pfn,
1172 unsigned long last_pfn)
1173{
David Woodhouseea8ea462014-03-05 17:09:32 +00001174 struct page *freelist = NULL;
1175
Jiang Liu162d1b12014-07-11 14:19:35 +08001176 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1177 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouseea8ea462014-03-05 17:09:32 +00001178 BUG_ON(start_pfn > last_pfn);
1179
1180 /* we don't need lock here; nobody else touches the iova range */
1181 freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
1182 domain->pgd, 0, start_pfn, last_pfn, NULL);
1183
1184 /* free pgd */
1185 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1186 struct page *pgd_page = virt_to_page(domain->pgd);
1187 pgd_page->freelist = freelist;
1188 freelist = pgd_page;
1189
1190 domain->pgd = NULL;
1191 }
1192
1193 return freelist;
1194}
1195
1196void dma_free_pagelist(struct page *freelist)
1197{
1198 struct page *pg;
1199
1200 while ((pg = freelist)) {
1201 freelist = pg->freelist;
1202 free_pgtable_page(page_address(pg));
1203 }
1204}
1205
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001206/* iommu handling */
1207static int iommu_alloc_root_entry(struct intel_iommu *iommu)
1208{
1209 struct root_entry *root;
1210 unsigned long flags;
1211
Suresh Siddha4c923d42009-10-02 11:01:24 -07001212 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
Jiang Liuffebeb42014-11-09 22:48:02 +08001213 if (!root) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001214 pr_err("Allocating root entry for %s failed\n",
Jiang Liuffebeb42014-11-09 22:48:02 +08001215 iommu->name);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001216 return -ENOMEM;
Jiang Liuffebeb42014-11-09 22:48:02 +08001217 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001218
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001219 __iommu_flush_cache(iommu, root, ROOT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001220
1221 spin_lock_irqsave(&iommu->lock, flags);
1222 iommu->root_entry = root;
1223 spin_unlock_irqrestore(&iommu->lock, flags);
1224
1225 return 0;
1226}
1227
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001228static void iommu_set_root_entry(struct intel_iommu *iommu)
1229{
David Woodhouse03ecc322015-02-13 14:35:21 +00001230 u64 addr;
David Woodhousec416daa2009-05-10 20:30:58 +01001231 u32 sts;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001232 unsigned long flag;
1233
David Woodhouse03ecc322015-02-13 14:35:21 +00001234 addr = virt_to_phys(iommu->root_entry);
David Woodhousec83b2f22015-06-12 10:15:49 +01001235 if (ecs_enabled(iommu))
David Woodhouse03ecc322015-02-13 14:35:21 +00001236 addr |= DMA_RTADDR_RTT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001237
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001238 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse03ecc322015-02-13 14:35:21 +00001239 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001240
David Woodhousec416daa2009-05-10 20:30:58 +01001241 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001242
1243 /* Make sure hardware complete it */
1244 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001245 readl, (sts & DMA_GSTS_RTPS), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001246
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001247 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001248}
1249
1250static void iommu_flush_write_buffer(struct intel_iommu *iommu)
1251{
1252 u32 val;
1253 unsigned long flag;
1254
David Woodhouse9af88142009-02-13 23:18:03 +00001255 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001256 return;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001257
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001258 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse462b60f2009-05-10 20:18:18 +01001259 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001260
1261 /* Make sure hardware complete it */
1262 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001263 readl, (!(val & DMA_GSTS_WBFS)), val);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001264
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001265 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001266}
1267
1268/* return value determine if we need a write buffer flush */
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001269static void __iommu_flush_context(struct intel_iommu *iommu,
1270 u16 did, u16 source_id, u8 function_mask,
1271 u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001272{
1273 u64 val = 0;
1274 unsigned long flag;
1275
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001276 switch (type) {
1277 case DMA_CCMD_GLOBAL_INVL:
1278 val = DMA_CCMD_GLOBAL_INVL;
1279 break;
1280 case DMA_CCMD_DOMAIN_INVL:
1281 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1282 break;
1283 case DMA_CCMD_DEVICE_INVL:
1284 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1285 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1286 break;
1287 default:
1288 BUG();
1289 }
1290 val |= DMA_CCMD_ICC;
1291
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001292 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001293 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1294
1295 /* Make sure hardware complete it */
1296 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1297 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1298
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001299 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001300}
1301
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001302/* return value determine if we need a write buffer flush */
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001303static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1304 u64 addr, unsigned int size_order, u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001305{
1306 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1307 u64 val = 0, val_iva = 0;
1308 unsigned long flag;
1309
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001310 switch (type) {
1311 case DMA_TLB_GLOBAL_FLUSH:
1312 /* global flush doesn't need set IVA_REG */
1313 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1314 break;
1315 case DMA_TLB_DSI_FLUSH:
1316 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1317 break;
1318 case DMA_TLB_PSI_FLUSH:
1319 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
David Woodhouseea8ea462014-03-05 17:09:32 +00001320 /* IH bit is passed in as part of address */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001321 val_iva = size_order | addr;
1322 break;
1323 default:
1324 BUG();
1325 }
1326 /* Note: set drain read/write */
1327#if 0
1328 /*
1329 * This is probably to be super secure.. Looks like we can
1330 * ignore it without any impact.
1331 */
1332 if (cap_read_drain(iommu->cap))
1333 val |= DMA_TLB_READ_DRAIN;
1334#endif
1335 if (cap_write_drain(iommu->cap))
1336 val |= DMA_TLB_WRITE_DRAIN;
1337
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001338 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001339 /* Note: Only uses first TLB reg currently */
1340 if (val_iva)
1341 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1342 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1343
1344 /* Make sure hardware complete it */
1345 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1346 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1347
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001348 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001349
1350 /* check IOTLB invalidation granularity */
1351 if (DMA_TLB_IAIG(val) == 0)
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001352 pr_err("Flush IOTLB failed\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001353 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001354 pr_debug("TLB flush request %Lx, actual %Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001355 (unsigned long long)DMA_TLB_IIRG(type),
1356 (unsigned long long)DMA_TLB_IAIG(val));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001357}
1358
David Woodhouse64ae8922014-03-09 12:52:30 -07001359static struct device_domain_info *
1360iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
1361 u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001362{
Quentin Lambert2f119c72015-02-06 10:59:53 +01001363 bool found = false;
Yu Zhao93a23a72009-05-18 13:51:37 +08001364 unsigned long flags;
1365 struct device_domain_info *info;
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001366 struct pci_dev *pdev;
Yu Zhao93a23a72009-05-18 13:51:37 +08001367
1368 if (!ecap_dev_iotlb_support(iommu->ecap))
1369 return NULL;
1370
1371 if (!iommu->qi)
1372 return NULL;
1373
1374 spin_lock_irqsave(&device_domain_lock, flags);
1375 list_for_each_entry(info, &domain->devices, link)
Jiang Liuc3b497c2014-07-11 14:19:25 +08001376 if (info->iommu == iommu && info->bus == bus &&
1377 info->devfn == devfn) {
Quentin Lambert2f119c72015-02-06 10:59:53 +01001378 found = true;
Yu Zhao93a23a72009-05-18 13:51:37 +08001379 break;
1380 }
1381 spin_unlock_irqrestore(&device_domain_lock, flags);
1382
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001383 if (!found || !info->dev || !dev_is_pci(info->dev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001384 return NULL;
1385
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001386 pdev = to_pci_dev(info->dev);
1387
1388 if (!pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS))
Yu Zhao93a23a72009-05-18 13:51:37 +08001389 return NULL;
1390
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001391 if (!dmar_find_matched_atsr_unit(pdev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001392 return NULL;
1393
Yu Zhao93a23a72009-05-18 13:51:37 +08001394 return info;
1395}
1396
1397static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1398{
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001399 if (!info || !dev_is_pci(info->dev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001400 return;
1401
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001402 pci_enable_ats(to_pci_dev(info->dev), VTD_PAGE_SHIFT);
Yu Zhao93a23a72009-05-18 13:51:37 +08001403}
1404
1405static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1406{
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001407 if (!info->dev || !dev_is_pci(info->dev) ||
1408 !pci_ats_enabled(to_pci_dev(info->dev)))
Yu Zhao93a23a72009-05-18 13:51:37 +08001409 return;
1410
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001411 pci_disable_ats(to_pci_dev(info->dev));
Yu Zhao93a23a72009-05-18 13:51:37 +08001412}
1413
1414static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1415 u64 addr, unsigned mask)
1416{
1417 u16 sid, qdep;
1418 unsigned long flags;
1419 struct device_domain_info *info;
1420
1421 spin_lock_irqsave(&device_domain_lock, flags);
1422 list_for_each_entry(info, &domain->devices, link) {
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001423 struct pci_dev *pdev;
1424 if (!info->dev || !dev_is_pci(info->dev))
1425 continue;
1426
1427 pdev = to_pci_dev(info->dev);
1428 if (!pci_ats_enabled(pdev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001429 continue;
1430
1431 sid = info->bus << 8 | info->devfn;
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001432 qdep = pci_ats_queue_depth(pdev);
Yu Zhao93a23a72009-05-18 13:51:37 +08001433 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1434 }
1435 spin_unlock_irqrestore(&device_domain_lock, flags);
1436}
1437
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001438static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
David Woodhouseea8ea462014-03-05 17:09:32 +00001439 unsigned long pfn, unsigned int pages, int ih, int map)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001440{
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001441 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
David Woodhouse03d6a242009-06-28 15:33:46 +01001442 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001443
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001444 BUG_ON(pages == 0);
1445
David Woodhouseea8ea462014-03-05 17:09:32 +00001446 if (ih)
1447 ih = 1 << 6;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001448 /*
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001449 * Fallback to domain selective flush if no PSI support or the size is
1450 * too big.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001451 * PSI requires page size to be 2 ^ x, and the base address is naturally
1452 * aligned to the size
1453 */
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001454 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1455 iommu->flush.flush_iotlb(iommu, did, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001456 DMA_TLB_DSI_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001457 else
David Woodhouseea8ea462014-03-05 17:09:32 +00001458 iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001459 DMA_TLB_PSI_FLUSH);
Yu Zhaobf92df32009-06-29 11:31:45 +08001460
1461 /*
Nadav Amit82653632010-04-01 13:24:40 +03001462 * In caching mode, changes of pages from non-present to present require
1463 * flush. However, device IOTLB doesn't need to be flushed in this case.
Yu Zhaobf92df32009-06-29 11:31:45 +08001464 */
Nadav Amit82653632010-04-01 13:24:40 +03001465 if (!cap_caching_mode(iommu->cap) || !map)
Yu Zhao93a23a72009-05-18 13:51:37 +08001466 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001467}
1468
mark grossf8bab732008-02-08 04:18:38 -08001469static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1470{
1471 u32 pmen;
1472 unsigned long flags;
1473
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001474 raw_spin_lock_irqsave(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001475 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1476 pmen &= ~DMA_PMEN_EPM;
1477 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1478
1479 /* wait for the protected region status bit to clear */
1480 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1481 readl, !(pmen & DMA_PMEN_PRS), pmen);
1482
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001483 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001484}
1485
Jiang Liu2a41cce2014-07-11 14:19:33 +08001486static void iommu_enable_translation(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001487{
1488 u32 sts;
1489 unsigned long flags;
1490
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001491 raw_spin_lock_irqsave(&iommu->register_lock, flags);
David Woodhousec416daa2009-05-10 20:30:58 +01001492 iommu->gcmd |= DMA_GCMD_TE;
1493 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001494
1495 /* Make sure hardware complete it */
1496 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001497 readl, (sts & DMA_GSTS_TES), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001498
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001499 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001500}
1501
Jiang Liu2a41cce2014-07-11 14:19:33 +08001502static void iommu_disable_translation(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001503{
1504 u32 sts;
1505 unsigned long flag;
1506
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001507 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001508 iommu->gcmd &= ~DMA_GCMD_TE;
1509 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1510
1511 /* Make sure hardware complete it */
1512 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001513 readl, (!(sts & DMA_GSTS_TES)), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001514
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001515 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001516}
1517
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07001518
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001519static int iommu_init_domains(struct intel_iommu *iommu)
1520{
1521 unsigned long ndomains;
1522 unsigned long nlongs;
1523
1524 ndomains = cap_ndoms(iommu->cap);
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001525 pr_debug("%s: Number of Domains supported <%ld>\n",
1526 iommu->name, ndomains);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001527 nlongs = BITS_TO_LONGS(ndomains);
1528
Donald Dutile94a91b502009-08-20 16:51:34 -04001529 spin_lock_init(&iommu->lock);
1530
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001531 /* TBD: there might be 64K domains,
1532 * consider other allocation for future chip
1533 */
1534 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1535 if (!iommu->domain_ids) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001536 pr_err("%s: Allocating domain id array failed\n",
1537 iommu->name);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001538 return -ENOMEM;
1539 }
1540 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1541 GFP_KERNEL);
1542 if (!iommu->domains) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001543 pr_err("%s: Allocating domain array failed\n",
1544 iommu->name);
Jiang Liu852bdb02014-01-06 14:18:11 +08001545 kfree(iommu->domain_ids);
1546 iommu->domain_ids = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001547 return -ENOMEM;
1548 }
1549
1550 /*
Joerg Roedelc0e8a6c2015-07-21 09:39:46 +02001551 * If Caching mode is set, then invalid translations are tagged
1552 * with domain-id 0, hence we need to pre-allocate it. We also
1553 * use domain-id 0 as a marker for non-allocated domain-id, so
1554 * make sure it is not used for a real domain.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001555 */
Joerg Roedelc0e8a6c2015-07-21 09:39:46 +02001556 set_bit(0, iommu->domain_ids);
1557
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001558 return 0;
1559}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001560
Jiang Liuffebeb42014-11-09 22:48:02 +08001561static void disable_dmar_iommu(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001562{
1563 struct dmar_domain *domain;
Jiang Liu2a46ddf2014-07-11 14:19:30 +08001564 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001565
Donald Dutile94a91b502009-08-20 16:51:34 -04001566 if ((iommu->domains) && (iommu->domain_ids)) {
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001567 for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
Jiang Liua4eaa862014-02-19 14:07:30 +08001568 /*
1569 * Domain id 0 is reserved for invalid translation
Joerg Roedelc0e8a6c2015-07-21 09:39:46 +02001570 * if hardware supports caching mode and used as
1571 * a non-allocated marker.
Jiang Liua4eaa862014-02-19 14:07:30 +08001572 */
Joerg Roedelc0e8a6c2015-07-21 09:39:46 +02001573 if (i == 0)
Jiang Liua4eaa862014-02-19 14:07:30 +08001574 continue;
1575
Donald Dutile94a91b502009-08-20 16:51:34 -04001576 domain = iommu->domains[i];
1577 clear_bit(i, iommu->domain_ids);
Jiang Liu129ad282014-07-11 14:19:31 +08001578 if (domain_detach_iommu(domain, iommu) == 0 &&
1579 !domain_type_is_vm(domain))
Jiang Liu92d03cc2014-02-19 14:07:28 +08001580 domain_exit(domain);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001581 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001582 }
1583
1584 if (iommu->gcmd & DMA_GCMD_TE)
1585 iommu_disable_translation(iommu);
Jiang Liuffebeb42014-11-09 22:48:02 +08001586}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001587
Jiang Liuffebeb42014-11-09 22:48:02 +08001588static void free_dmar_iommu(struct intel_iommu *iommu)
1589{
1590 if ((iommu->domains) && (iommu->domain_ids)) {
1591 kfree(iommu->domains);
1592 kfree(iommu->domain_ids);
1593 iommu->domains = NULL;
1594 iommu->domain_ids = NULL;
1595 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001596
Weidong Hand9630fe2008-12-08 11:06:32 +08001597 g_iommus[iommu->seq_id] = NULL;
1598
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001599 /* free context mapping */
1600 free_context_table(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001601}
1602
Jiang Liuab8dfe22014-07-11 14:19:27 +08001603static struct dmar_domain *alloc_domain(int flags)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001604{
Jiang Liu92d03cc2014-02-19 14:07:28 +08001605 /* domain id for virtual machine, it won't be set in context */
1606 static atomic_t vm_domid = ATOMIC_INIT(0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001607 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001608
1609 domain = alloc_domain_mem();
1610 if (!domain)
1611 return NULL;
1612
Jiang Liuab8dfe22014-07-11 14:19:27 +08001613 memset(domain, 0, sizeof(*domain));
Suresh Siddha4c923d42009-10-02 11:01:24 -07001614 domain->nid = -1;
Jiang Liuab8dfe22014-07-11 14:19:27 +08001615 domain->flags = flags;
Jiang Liu92d03cc2014-02-19 14:07:28 +08001616 spin_lock_init(&domain->iommu_lock);
1617 INIT_LIST_HEAD(&domain->devices);
Jiang Liuab8dfe22014-07-11 14:19:27 +08001618 if (flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
Jiang Liu92d03cc2014-02-19 14:07:28 +08001619 domain->id = atomic_inc_return(&vm_domid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001620
1621 return domain;
1622}
1623
Jiang Liufb170fb2014-07-11 14:19:28 +08001624static int __iommu_attach_domain(struct dmar_domain *domain,
1625 struct intel_iommu *iommu)
1626{
1627 int num;
1628 unsigned long ndomains;
1629
1630 ndomains = cap_ndoms(iommu->cap);
1631 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1632 if (num < ndomains) {
1633 set_bit(num, iommu->domain_ids);
1634 iommu->domains[num] = domain;
Joerg Roedelc0e8a6c2015-07-21 09:39:46 +02001635 domain->iommu_did[iommu->seq_id] = num;
Jiang Liufb170fb2014-07-11 14:19:28 +08001636 } else {
1637 num = -ENOSPC;
1638 }
1639
1640 return num;
1641}
1642
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001643static int iommu_attach_domain(struct dmar_domain *domain,
1644 struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001645{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001646 int num;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001647 unsigned long flags;
1648
Weidong Han8c11e792008-12-08 15:29:22 +08001649 spin_lock_irqsave(&iommu->lock, flags);
Jiang Liufb170fb2014-07-11 14:19:28 +08001650 num = __iommu_attach_domain(domain, iommu);
Jiang Liu44bde612014-07-11 14:19:29 +08001651 spin_unlock_irqrestore(&iommu->lock, flags);
Jiang Liufb170fb2014-07-11 14:19:28 +08001652 if (num < 0)
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001653 pr_err("%s: No free domain ids\n", iommu->name);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001654
Jiang Liufb170fb2014-07-11 14:19:28 +08001655 return num;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001656}
1657
Jiang Liu44bde612014-07-11 14:19:29 +08001658static int iommu_attach_vm_domain(struct dmar_domain *domain,
1659 struct intel_iommu *iommu)
1660{
1661 int num;
Jiang Liu44bde612014-07-11 14:19:29 +08001662
Joerg Roedelc0e8a6c2015-07-21 09:39:46 +02001663 num = domain->iommu_did[iommu->seq_id];
1664 if (num)
1665 return num;
Jiang Liu44bde612014-07-11 14:19:29 +08001666
1667 return __iommu_attach_domain(domain, iommu);
1668}
1669
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001670static void iommu_detach_domain(struct dmar_domain *domain,
1671 struct intel_iommu *iommu)
1672{
1673 unsigned long flags;
Joerg Roedelc0e8a6c2015-07-21 09:39:46 +02001674 int num;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001675
1676 spin_lock_irqsave(&iommu->lock, flags);
Joerg Roedelc0e8a6c2015-07-21 09:39:46 +02001677
1678 num = domain->iommu_did[iommu->seq_id];
1679
1680 if (num == 0)
1681 return;
1682
1683 clear_bit(num, iommu->domain_ids);
1684 iommu->domains[num] = NULL;
1685
Weidong Han8c11e792008-12-08 15:29:22 +08001686 spin_unlock_irqrestore(&iommu->lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001687}
1688
Jiang Liufb170fb2014-07-11 14:19:28 +08001689static void domain_attach_iommu(struct dmar_domain *domain,
1690 struct intel_iommu *iommu)
1691{
1692 unsigned long flags;
1693
1694 spin_lock_irqsave(&domain->iommu_lock, flags);
1695 if (!test_and_set_bit(iommu->seq_id, domain->iommu_bmp)) {
1696 domain->iommu_count++;
1697 if (domain->iommu_count == 1)
1698 domain->nid = iommu->node;
1699 domain_update_iommu_cap(domain);
1700 }
1701 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1702}
1703
1704static int domain_detach_iommu(struct dmar_domain *domain,
1705 struct intel_iommu *iommu)
1706{
1707 unsigned long flags;
1708 int count = INT_MAX;
1709
1710 spin_lock_irqsave(&domain->iommu_lock, flags);
1711 if (test_and_clear_bit(iommu->seq_id, domain->iommu_bmp)) {
1712 count = --domain->iommu_count;
1713 domain_update_iommu_cap(domain);
Joerg Roedelc0e8a6c2015-07-21 09:39:46 +02001714 domain->iommu_did[iommu->seq_id] = 0;
Jiang Liufb170fb2014-07-11 14:19:28 +08001715 }
1716 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1717
1718 return count;
1719}
1720
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001721static struct iova_domain reserved_iova_list;
Mark Gross8a443df2008-03-04 14:59:31 -08001722static struct lock_class_key reserved_rbtree_key;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001723
Joseph Cihula51a63e62011-03-21 11:04:24 -07001724static int dmar_init_reserved_ranges(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001725{
1726 struct pci_dev *pdev = NULL;
1727 struct iova *iova;
1728 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001729
Robin Murphy0fb5fe82015-01-12 17:51:16 +00001730 init_iova_domain(&reserved_iova_list, VTD_PAGE_SIZE, IOVA_START_PFN,
1731 DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001732
Mark Gross8a443df2008-03-04 14:59:31 -08001733 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1734 &reserved_rbtree_key);
1735
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001736 /* IOAPIC ranges shouldn't be accessed by DMA */
1737 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1738 IOVA_PFN(IOAPIC_RANGE_END));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001739 if (!iova) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001740 pr_err("Reserve IOAPIC range failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001741 return -ENODEV;
1742 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001743
1744 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1745 for_each_pci_dev(pdev) {
1746 struct resource *r;
1747
1748 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1749 r = &pdev->resource[i];
1750 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1751 continue;
David Woodhouse1a4a4552009-06-28 16:00:42 +01001752 iova = reserve_iova(&reserved_iova_list,
1753 IOVA_PFN(r->start),
1754 IOVA_PFN(r->end));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001755 if (!iova) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001756 pr_err("Reserve iova failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001757 return -ENODEV;
1758 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001759 }
1760 }
Joseph Cihula51a63e62011-03-21 11:04:24 -07001761 return 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001762}
1763
1764static void domain_reserve_special_ranges(struct dmar_domain *domain)
1765{
1766 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1767}
1768
1769static inline int guestwidth_to_adjustwidth(int gaw)
1770{
1771 int agaw;
1772 int r = (gaw - 12) % 9;
1773
1774 if (r == 0)
1775 agaw = gaw;
1776 else
1777 agaw = gaw + 9 - r;
1778 if (agaw > 64)
1779 agaw = 64;
1780 return agaw;
1781}
1782
1783static int domain_init(struct dmar_domain *domain, int guest_width)
1784{
1785 struct intel_iommu *iommu;
1786 int adjust_width, agaw;
1787 unsigned long sagaw;
1788
Robin Murphy0fb5fe82015-01-12 17:51:16 +00001789 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
1790 DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001791 domain_reserve_special_ranges(domain);
1792
1793 /* calculate AGAW */
Weidong Han8c11e792008-12-08 15:29:22 +08001794 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001795 if (guest_width > cap_mgaw(iommu->cap))
1796 guest_width = cap_mgaw(iommu->cap);
1797 domain->gaw = guest_width;
1798 adjust_width = guestwidth_to_adjustwidth(guest_width);
1799 agaw = width_to_agaw(adjust_width);
1800 sagaw = cap_sagaw(iommu->cap);
1801 if (!test_bit(agaw, &sagaw)) {
1802 /* hardware doesn't support it, choose a bigger one */
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001803 pr_debug("Hardware doesn't support agaw %d\n", agaw);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001804 agaw = find_next_bit(&sagaw, 5, agaw);
1805 if (agaw >= 5)
1806 return -ENODEV;
1807 }
1808 domain->agaw = agaw;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001809
Weidong Han8e6040972008-12-08 15:49:06 +08001810 if (ecap_coherent(iommu->ecap))
1811 domain->iommu_coherency = 1;
1812 else
1813 domain->iommu_coherency = 0;
1814
Sheng Yang58c610b2009-03-18 15:33:05 +08001815 if (ecap_sc_support(iommu->ecap))
1816 domain->iommu_snooping = 1;
1817 else
1818 domain->iommu_snooping = 0;
1819
David Woodhouse214e39a2014-03-19 10:38:49 +00001820 if (intel_iommu_superpage)
1821 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
1822 else
1823 domain->iommu_superpage = 0;
1824
Suresh Siddha4c923d42009-10-02 11:01:24 -07001825 domain->nid = iommu->node;
Weidong Hanc7151a82008-12-08 22:51:37 +08001826
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001827 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07001828 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001829 if (!domain->pgd)
1830 return -ENOMEM;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001831 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001832 return 0;
1833}
1834
1835static void domain_exit(struct dmar_domain *domain)
1836{
Alex Williamson46ebb7a2015-07-14 14:48:53 -06001837 struct dmar_drhd_unit *drhd;
1838 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00001839 struct page *freelist = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001840
1841 /* Domain 0 is reserved, so dont process it */
1842 if (!domain)
1843 return;
1844
Alex Williamson7b668352011-05-24 12:02:41 +01001845 /* Flush any lazy unmaps that may reference this domain */
1846 if (!intel_iommu_strict)
1847 flush_unmaps_timeout(0);
1848
Jiang Liu92d03cc2014-02-19 14:07:28 +08001849 /* remove associated devices */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001850 domain_remove_dev_info(domain);
Jiang Liu92d03cc2014-02-19 14:07:28 +08001851
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001852 /* destroy iovas */
1853 put_iova_domain(&domain->iovad);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001854
David Woodhouseea8ea462014-03-05 17:09:32 +00001855 freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001856
Jiang Liu92d03cc2014-02-19 14:07:28 +08001857 /* clear attached or cached domains */
Jiang Liu0e242612014-02-19 14:07:34 +08001858 rcu_read_lock();
Alex Williamson46ebb7a2015-07-14 14:48:53 -06001859 for_each_active_iommu(iommu, drhd)
1860 if (domain_type_is_vm(domain) ||
1861 test_bit(iommu->seq_id, domain->iommu_bmp))
1862 iommu_detach_domain(domain, iommu);
Jiang Liu0e242612014-02-19 14:07:34 +08001863 rcu_read_unlock();
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001864
David Woodhouseea8ea462014-03-05 17:09:32 +00001865 dma_free_pagelist(freelist);
1866
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001867 free_domain_mem(domain);
1868}
1869
David Woodhouse64ae8922014-03-09 12:52:30 -07001870static int domain_context_mapping_one(struct dmar_domain *domain,
1871 struct intel_iommu *iommu,
1872 u8 bus, u8 devfn, int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001873{
1874 struct context_entry *context;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001875 unsigned long flags;
Weidong Hanea6606b2008-12-08 23:08:15 +08001876 struct dma_pte *pgd;
Weidong Hanea6606b2008-12-08 23:08:15 +08001877 int id;
1878 int agaw;
Yu Zhao93a23a72009-05-18 13:51:37 +08001879 struct device_domain_info *info = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001880
1881 pr_debug("Set context mapping for %02x:%02x.%d\n",
1882 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001883
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001884 BUG_ON(!domain->pgd);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001885 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1886 translation != CONTEXT_TT_MULTI_LEVEL);
Weidong Han5331fe62008-12-08 23:00:00 +08001887
David Woodhouse03ecc322015-02-13 14:35:21 +00001888 spin_lock_irqsave(&iommu->lock, flags);
1889 context = iommu_context_addr(iommu, bus, devfn, 1);
1890 spin_unlock_irqrestore(&iommu->lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001891 if (!context)
1892 return -ENOMEM;
1893 spin_lock_irqsave(&iommu->lock, flags);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001894 if (context_present(context)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001895 spin_unlock_irqrestore(&iommu->lock, flags);
1896 return 0;
1897 }
1898
Joerg Roedelcf484d02015-06-12 12:21:46 +02001899 context_clear_entry(context);
1900
Weidong Hanea6606b2008-12-08 23:08:15 +08001901 id = domain->id;
1902 pgd = domain->pgd;
1903
Jiang Liuab8dfe22014-07-11 14:19:27 +08001904 if (domain_type_is_vm_or_si(domain)) {
Jiang Liu44bde612014-07-11 14:19:29 +08001905 if (domain_type_is_vm(domain)) {
1906 id = iommu_attach_vm_domain(domain, iommu);
Jiang Liufb170fb2014-07-11 14:19:28 +08001907 if (id < 0) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001908 spin_unlock_irqrestore(&iommu->lock, flags);
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001909 pr_err("%s: No free domain ids\n", iommu->name);
Weidong Hanea6606b2008-12-08 23:08:15 +08001910 return -EFAULT;
1911 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001912 }
1913
1914 /* Skip top levels of page tables for
1915 * iommu which has less agaw than default.
Chris Wright1672af12009-12-02 12:06:34 -08001916 * Unnecessary for PT mode.
Weidong Hanea6606b2008-12-08 23:08:15 +08001917 */
Chris Wright1672af12009-12-02 12:06:34 -08001918 if (translation != CONTEXT_TT_PASS_THROUGH) {
1919 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1920 pgd = phys_to_virt(dma_pte_addr(pgd));
1921 if (!dma_pte_present(pgd)) {
1922 spin_unlock_irqrestore(&iommu->lock, flags);
1923 return -ENOMEM;
1924 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001925 }
1926 }
1927 }
1928
1929 context_set_domain_id(context, id);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001930
Yu Zhao93a23a72009-05-18 13:51:37 +08001931 if (translation != CONTEXT_TT_PASS_THROUGH) {
David Woodhouse64ae8922014-03-09 12:52:30 -07001932 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
Yu Zhao93a23a72009-05-18 13:51:37 +08001933 translation = info ? CONTEXT_TT_DEV_IOTLB :
1934 CONTEXT_TT_MULTI_LEVEL;
1935 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001936 /*
1937 * In pass through mode, AW must be programmed to indicate the largest
1938 * AGAW value supported by hardware. And ASR is ignored by hardware.
1939 */
Yu Zhao93a23a72009-05-18 13:51:37 +08001940 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001941 context_set_address_width(context, iommu->msagaw);
Yu Zhao93a23a72009-05-18 13:51:37 +08001942 else {
1943 context_set_address_root(context, virt_to_phys(pgd));
1944 context_set_address_width(context, iommu->agaw);
1945 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001946
1947 context_set_translation_type(context, translation);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001948 context_set_fault_enable(context);
1949 context_set_present(context);
Weidong Han5331fe62008-12-08 23:00:00 +08001950 domain_flush_cache(domain, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001951
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001952 /*
1953 * It's a non-present to present mapping. If hardware doesn't cache
1954 * non-present entry we only need to flush the write-buffer. If the
1955 * _does_ cache non-present entries, then it does so in the special
1956 * domain #0, which we have to flush:
1957 */
1958 if (cap_caching_mode(iommu->cap)) {
1959 iommu->flush.flush_context(iommu, 0,
1960 (((u16)bus) << 8) | devfn,
1961 DMA_CCMD_MASK_NOBIT,
1962 DMA_CCMD_DEVICE_INVL);
Jiang Liu18fd7792014-07-11 14:19:26 +08001963 iommu->flush.flush_iotlb(iommu, id, 0, 0, DMA_TLB_DSI_FLUSH);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001964 } else {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001965 iommu_flush_write_buffer(iommu);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001966 }
Yu Zhao93a23a72009-05-18 13:51:37 +08001967 iommu_enable_dev_iotlb(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001968 spin_unlock_irqrestore(&iommu->lock, flags);
Weidong Hanc7151a82008-12-08 22:51:37 +08001969
Jiang Liufb170fb2014-07-11 14:19:28 +08001970 domain_attach_iommu(domain, iommu);
1971
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001972 return 0;
1973}
1974
Alex Williamson579305f2014-07-03 09:51:43 -06001975struct domain_context_mapping_data {
1976 struct dmar_domain *domain;
1977 struct intel_iommu *iommu;
1978 int translation;
1979};
1980
1981static int domain_context_mapping_cb(struct pci_dev *pdev,
1982 u16 alias, void *opaque)
1983{
1984 struct domain_context_mapping_data *data = opaque;
1985
1986 return domain_context_mapping_one(data->domain, data->iommu,
1987 PCI_BUS_NUM(alias), alias & 0xff,
1988 data->translation);
1989}
1990
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001991static int
David Woodhousee1f167f2014-03-09 15:24:46 -07001992domain_context_mapping(struct dmar_domain *domain, struct device *dev,
1993 int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001994{
David Woodhouse64ae8922014-03-09 12:52:30 -07001995 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07001996 u8 bus, devfn;
Alex Williamson579305f2014-07-03 09:51:43 -06001997 struct domain_context_mapping_data data;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001998
David Woodhousee1f167f2014-03-09 15:24:46 -07001999 iommu = device_to_iommu(dev, &bus, &devfn);
David Woodhouse64ae8922014-03-09 12:52:30 -07002000 if (!iommu)
2001 return -ENODEV;
2002
Alex Williamson579305f2014-07-03 09:51:43 -06002003 if (!dev_is_pci(dev))
2004 return domain_context_mapping_one(domain, iommu, bus, devfn,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002005 translation);
Alex Williamson579305f2014-07-03 09:51:43 -06002006
2007 data.domain = domain;
2008 data.iommu = iommu;
2009 data.translation = translation;
2010
2011 return pci_for_each_dma_alias(to_pci_dev(dev),
2012 &domain_context_mapping_cb, &data);
2013}
2014
2015static int domain_context_mapped_cb(struct pci_dev *pdev,
2016 u16 alias, void *opaque)
2017{
2018 struct intel_iommu *iommu = opaque;
2019
2020 return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002021}
2022
David Woodhousee1f167f2014-03-09 15:24:46 -07002023static int domain_context_mapped(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002024{
Weidong Han5331fe62008-12-08 23:00:00 +08002025 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07002026 u8 bus, devfn;
Weidong Han5331fe62008-12-08 23:00:00 +08002027
David Woodhousee1f167f2014-03-09 15:24:46 -07002028 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08002029 if (!iommu)
2030 return -ENODEV;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002031
Alex Williamson579305f2014-07-03 09:51:43 -06002032 if (!dev_is_pci(dev))
2033 return device_context_mapped(iommu, bus, devfn);
David Woodhousee1f167f2014-03-09 15:24:46 -07002034
Alex Williamson579305f2014-07-03 09:51:43 -06002035 return !pci_for_each_dma_alias(to_pci_dev(dev),
2036 domain_context_mapped_cb, iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002037}
2038
Fenghua Yuf5329592009-08-04 15:09:37 -07002039/* Returns a number of VTD pages, but aligned to MM page size */
2040static inline unsigned long aligned_nrpages(unsigned long host_addr,
2041 size_t size)
2042{
2043 host_addr &= ~PAGE_MASK;
2044 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
2045}
2046
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002047/* Return largest possible superpage level for a given mapping */
2048static inline int hardware_largepage_caps(struct dmar_domain *domain,
2049 unsigned long iov_pfn,
2050 unsigned long phy_pfn,
2051 unsigned long pages)
2052{
2053 int support, level = 1;
2054 unsigned long pfnmerge;
2055
2056 support = domain->iommu_superpage;
2057
2058 /* To use a large page, the virtual *and* physical addresses
2059 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
2060 of them will mean we have to use smaller pages. So just
2061 merge them and check both at once. */
2062 pfnmerge = iov_pfn | phy_pfn;
2063
2064 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
2065 pages >>= VTD_STRIDE_SHIFT;
2066 if (!pages)
2067 break;
2068 pfnmerge >>= VTD_STRIDE_SHIFT;
2069 level++;
2070 support--;
2071 }
2072 return level;
2073}
2074
David Woodhouse9051aa02009-06-29 12:30:54 +01002075static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2076 struct scatterlist *sg, unsigned long phys_pfn,
2077 unsigned long nr_pages, int prot)
David Woodhousee1605492009-06-29 11:17:38 +01002078{
2079 struct dma_pte *first_pte = NULL, *pte = NULL;
David Woodhouse9051aa02009-06-29 12:30:54 +01002080 phys_addr_t uninitialized_var(pteval);
Jiang Liucc4f14a2014-11-26 09:42:10 +08002081 unsigned long sg_res = 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002082 unsigned int largepage_lvl = 0;
2083 unsigned long lvl_pages = 0;
David Woodhousee1605492009-06-29 11:17:38 +01002084
Jiang Liu162d1b12014-07-11 14:19:35 +08002085 BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1));
David Woodhousee1605492009-06-29 11:17:38 +01002086
2087 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
2088 return -EINVAL;
2089
2090 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
2091
Jiang Liucc4f14a2014-11-26 09:42:10 +08002092 if (!sg) {
2093 sg_res = nr_pages;
David Woodhouse9051aa02009-06-29 12:30:54 +01002094 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
2095 }
2096
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002097 while (nr_pages > 0) {
David Woodhousec85994e2009-07-01 19:21:24 +01002098 uint64_t tmp;
2099
David Woodhousee1605492009-06-29 11:17:38 +01002100 if (!sg_res) {
Fenghua Yuf5329592009-08-04 15:09:37 -07002101 sg_res = aligned_nrpages(sg->offset, sg->length);
David Woodhousee1605492009-06-29 11:17:38 +01002102 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
2103 sg->dma_length = sg->length;
2104 pteval = page_to_phys(sg_page(sg)) | prot;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002105 phys_pfn = pteval >> VTD_PAGE_SHIFT;
David Woodhousee1605492009-06-29 11:17:38 +01002106 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002107
David Woodhousee1605492009-06-29 11:17:38 +01002108 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002109 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
2110
David Woodhouse5cf0a762014-03-19 16:07:49 +00002111 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
David Woodhousee1605492009-06-29 11:17:38 +01002112 if (!pte)
2113 return -ENOMEM;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002114 /* It is large page*/
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002115 if (largepage_lvl > 1) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002116 pteval |= DMA_PTE_LARGE_PAGE;
Jiang Liud41a4ad2014-07-11 14:19:34 +08002117 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2118 /*
2119 * Ensure that old small page tables are
2120 * removed to make room for superpage,
2121 * if they exist.
2122 */
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002123 dma_pte_free_pagetable(domain, iov_pfn,
Jiang Liud41a4ad2014-07-11 14:19:34 +08002124 iov_pfn + lvl_pages - 1);
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002125 } else {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002126 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002127 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002128
David Woodhousee1605492009-06-29 11:17:38 +01002129 }
2130 /* We don't need lock here, nobody else
2131 * touches the iova range
2132 */
David Woodhouse7766a3f2009-07-01 20:27:03 +01002133 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
David Woodhousec85994e2009-07-01 19:21:24 +01002134 if (tmp) {
David Woodhouse1bf20f02009-06-29 22:06:43 +01002135 static int dumps = 5;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002136 pr_crit("ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
2137 iov_pfn, tmp, (unsigned long long)pteval);
David Woodhouse1bf20f02009-06-29 22:06:43 +01002138 if (dumps) {
2139 dumps--;
2140 debug_dma_dump_mappings(NULL);
2141 }
2142 WARN_ON(1);
2143 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002144
2145 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2146
2147 BUG_ON(nr_pages < lvl_pages);
2148 BUG_ON(sg_res < lvl_pages);
2149
2150 nr_pages -= lvl_pages;
2151 iov_pfn += lvl_pages;
2152 phys_pfn += lvl_pages;
2153 pteval += lvl_pages * VTD_PAGE_SIZE;
2154 sg_res -= lvl_pages;
2155
2156 /* If the next PTE would be the first in a new page, then we
2157 need to flush the cache on the entries we've just written.
2158 And then we'll need to recalculate 'pte', so clear it and
2159 let it get set again in the if (!pte) block above.
2160
2161 If we're done (!nr_pages) we need to flush the cache too.
2162
2163 Also if we've been setting superpages, we may need to
2164 recalculate 'pte' and switch back to smaller pages for the
2165 end of the mapping, if the trailing size is not enough to
2166 use another superpage (i.e. sg_res < lvl_pages). */
David Woodhousee1605492009-06-29 11:17:38 +01002167 pte++;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002168 if (!nr_pages || first_pte_in_page(pte) ||
2169 (largepage_lvl > 1 && sg_res < lvl_pages)) {
David Woodhousee1605492009-06-29 11:17:38 +01002170 domain_flush_cache(domain, first_pte,
2171 (void *)pte - (void *)first_pte);
2172 pte = NULL;
2173 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002174
2175 if (!sg_res && nr_pages)
David Woodhousee1605492009-06-29 11:17:38 +01002176 sg = sg_next(sg);
2177 }
2178 return 0;
2179}
2180
David Woodhouse9051aa02009-06-29 12:30:54 +01002181static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2182 struct scatterlist *sg, unsigned long nr_pages,
2183 int prot)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002184{
David Woodhouse9051aa02009-06-29 12:30:54 +01002185 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
2186}
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002187
David Woodhouse9051aa02009-06-29 12:30:54 +01002188static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2189 unsigned long phys_pfn, unsigned long nr_pages,
2190 int prot)
2191{
2192 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002193}
2194
Weidong Hanc7151a82008-12-08 22:51:37 +08002195static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002196{
Weidong Hanc7151a82008-12-08 22:51:37 +08002197 if (!iommu)
2198 return;
Weidong Han8c11e792008-12-08 15:29:22 +08002199
2200 clear_context_table(iommu, bus, devfn);
2201 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002202 DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002203 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002204}
2205
David Woodhouse109b9b02012-05-25 17:43:02 +01002206static inline void unlink_domain_info(struct device_domain_info *info)
2207{
2208 assert_spin_locked(&device_domain_lock);
2209 list_del(&info->link);
2210 list_del(&info->global);
2211 if (info->dev)
David Woodhouse0bcb3e22014-03-06 17:12:03 +00002212 info->dev->archdata.iommu = NULL;
David Woodhouse109b9b02012-05-25 17:43:02 +01002213}
2214
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002215static void domain_remove_dev_info(struct dmar_domain *domain)
2216{
Yijing Wang3a74ca02014-05-20 20:37:47 +08002217 struct device_domain_info *info, *tmp;
Jiang Liufb170fb2014-07-11 14:19:28 +08002218 unsigned long flags;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002219
2220 spin_lock_irqsave(&device_domain_lock, flags);
Yijing Wang3a74ca02014-05-20 20:37:47 +08002221 list_for_each_entry_safe(info, tmp, &domain->devices, link) {
David Woodhouse109b9b02012-05-25 17:43:02 +01002222 unlink_domain_info(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002223 spin_unlock_irqrestore(&device_domain_lock, flags);
2224
Yu Zhao93a23a72009-05-18 13:51:37 +08002225 iommu_disable_dev_iotlb(info);
David Woodhouse7c7faa12014-03-09 13:33:06 -07002226 iommu_detach_dev(info->iommu, info->bus, info->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002227
Jiang Liuab8dfe22014-07-11 14:19:27 +08002228 if (domain_type_is_vm(domain)) {
David Woodhouse7c7faa12014-03-09 13:33:06 -07002229 iommu_detach_dependent_devices(info->iommu, info->dev);
Jiang Liufb170fb2014-07-11 14:19:28 +08002230 domain_detach_iommu(domain, info->iommu);
Jiang Liu92d03cc2014-02-19 14:07:28 +08002231 }
2232
2233 free_devinfo_mem(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002234 spin_lock_irqsave(&device_domain_lock, flags);
2235 }
2236 spin_unlock_irqrestore(&device_domain_lock, flags);
2237}
2238
2239/*
2240 * find_domain
David Woodhouse1525a292014-03-06 16:19:30 +00002241 * Note: we use struct device->archdata.iommu stores the info
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002242 */
David Woodhouse1525a292014-03-06 16:19:30 +00002243static struct dmar_domain *find_domain(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002244{
2245 struct device_domain_info *info;
2246
2247 /* No lock here, assumes no domain exit in normal case */
David Woodhouse1525a292014-03-06 16:19:30 +00002248 info = dev->archdata.iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002249 if (info)
2250 return info->domain;
2251 return NULL;
2252}
2253
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002254static inline struct device_domain_info *
Jiang Liu745f2582014-02-19 14:07:26 +08002255dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
2256{
2257 struct device_domain_info *info;
2258
2259 list_for_each_entry(info, &device_domain_list, global)
David Woodhouse41e80dca2014-03-09 13:55:54 -07002260 if (info->iommu->segment == segment && info->bus == bus &&
Jiang Liu745f2582014-02-19 14:07:26 +08002261 info->devfn == devfn)
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002262 return info;
Jiang Liu745f2582014-02-19 14:07:26 +08002263
2264 return NULL;
2265}
2266
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002267static struct dmar_domain *dmar_insert_dev_info(struct intel_iommu *iommu,
David Woodhouse41e80dca2014-03-09 13:55:54 -07002268 int bus, int devfn,
David Woodhouseb718cd32014-03-09 13:11:33 -07002269 struct device *dev,
2270 struct dmar_domain *domain)
Jiang Liu745f2582014-02-19 14:07:26 +08002271{
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002272 struct dmar_domain *found = NULL;
Jiang Liu745f2582014-02-19 14:07:26 +08002273 struct device_domain_info *info;
2274 unsigned long flags;
2275
2276 info = alloc_devinfo_mem();
2277 if (!info)
David Woodhouseb718cd32014-03-09 13:11:33 -07002278 return NULL;
Jiang Liu745f2582014-02-19 14:07:26 +08002279
Jiang Liu745f2582014-02-19 14:07:26 +08002280 info->bus = bus;
2281 info->devfn = devfn;
2282 info->dev = dev;
2283 info->domain = domain;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002284 info->iommu = iommu;
Jiang Liu745f2582014-02-19 14:07:26 +08002285
2286 spin_lock_irqsave(&device_domain_lock, flags);
2287 if (dev)
David Woodhouse0bcb3e22014-03-06 17:12:03 +00002288 found = find_domain(dev);
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002289 else {
2290 struct device_domain_info *info2;
David Woodhouse41e80dca2014-03-09 13:55:54 -07002291 info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002292 if (info2)
2293 found = info2->domain;
2294 }
Jiang Liu745f2582014-02-19 14:07:26 +08002295 if (found) {
2296 spin_unlock_irqrestore(&device_domain_lock, flags);
2297 free_devinfo_mem(info);
David Woodhouseb718cd32014-03-09 13:11:33 -07002298 /* Caller must free the original domain */
2299 return found;
Jiang Liu745f2582014-02-19 14:07:26 +08002300 }
2301
David Woodhouseb718cd32014-03-09 13:11:33 -07002302 list_add(&info->link, &domain->devices);
2303 list_add(&info->global, &device_domain_list);
2304 if (dev)
2305 dev->archdata.iommu = info;
2306 spin_unlock_irqrestore(&device_domain_lock, flags);
2307
2308 return domain;
Jiang Liu745f2582014-02-19 14:07:26 +08002309}
2310
Alex Williamson579305f2014-07-03 09:51:43 -06002311static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
2312{
2313 *(u16 *)opaque = alias;
2314 return 0;
2315}
2316
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002317/* domain is initialized */
David Woodhouse146922e2014-03-09 15:44:17 -07002318static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002319{
Alex Williamson579305f2014-07-03 09:51:43 -06002320 struct dmar_domain *domain, *tmp;
2321 struct intel_iommu *iommu;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002322 struct device_domain_info *info;
Alex Williamson579305f2014-07-03 09:51:43 -06002323 u16 dma_alias;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002324 unsigned long flags;
Yijing Wangaa4d0662014-05-26 20:14:06 +08002325 u8 bus, devfn;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002326
David Woodhouse146922e2014-03-09 15:44:17 -07002327 domain = find_domain(dev);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002328 if (domain)
2329 return domain;
2330
David Woodhouse146922e2014-03-09 15:44:17 -07002331 iommu = device_to_iommu(dev, &bus, &devfn);
2332 if (!iommu)
Alex Williamson579305f2014-07-03 09:51:43 -06002333 return NULL;
2334
2335 if (dev_is_pci(dev)) {
2336 struct pci_dev *pdev = to_pci_dev(dev);
2337
2338 pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
2339
2340 spin_lock_irqsave(&device_domain_lock, flags);
2341 info = dmar_search_domain_by_dev_info(pci_domain_nr(pdev->bus),
2342 PCI_BUS_NUM(dma_alias),
2343 dma_alias & 0xff);
2344 if (info) {
2345 iommu = info->iommu;
2346 domain = info->domain;
2347 }
2348 spin_unlock_irqrestore(&device_domain_lock, flags);
2349
2350 /* DMA alias already has a domain, uses it */
2351 if (info)
2352 goto found_domain;
2353 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002354
David Woodhouse146922e2014-03-09 15:44:17 -07002355 /* Allocate and initialize new domain for the device */
Jiang Liuab8dfe22014-07-11 14:19:27 +08002356 domain = alloc_domain(0);
Jiang Liu745f2582014-02-19 14:07:26 +08002357 if (!domain)
Alex Williamson579305f2014-07-03 09:51:43 -06002358 return NULL;
Jiang Liu44bde612014-07-11 14:19:29 +08002359 domain->id = iommu_attach_domain(domain, iommu);
2360 if (domain->id < 0) {
Alex Williamson2fe9723d2011-03-04 14:52:30 -07002361 free_domain_mem(domain);
Alex Williamson579305f2014-07-03 09:51:43 -06002362 return NULL;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002363 }
Jiang Liufb170fb2014-07-11 14:19:28 +08002364 domain_attach_iommu(domain, iommu);
Alex Williamson579305f2014-07-03 09:51:43 -06002365 if (domain_init(domain, gaw)) {
2366 domain_exit(domain);
2367 return NULL;
2368 }
2369
2370 /* register PCI DMA alias device */
2371 if (dev_is_pci(dev)) {
2372 tmp = dmar_insert_dev_info(iommu, PCI_BUS_NUM(dma_alias),
2373 dma_alias & 0xff, NULL, domain);
2374
2375 if (!tmp || tmp != domain) {
2376 domain_exit(domain);
2377 domain = tmp;
2378 }
2379
David Woodhouseb718cd32014-03-09 13:11:33 -07002380 if (!domain)
Alex Williamson579305f2014-07-03 09:51:43 -06002381 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002382 }
2383
2384found_domain:
Alex Williamson579305f2014-07-03 09:51:43 -06002385 tmp = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
2386
2387 if (!tmp || tmp != domain) {
2388 domain_exit(domain);
2389 domain = tmp;
2390 }
David Woodhouseb718cd32014-03-09 13:11:33 -07002391
2392 return domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002393}
2394
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002395static int iommu_identity_mapping;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002396#define IDENTMAP_ALL 1
2397#define IDENTMAP_GFX 2
2398#define IDENTMAP_AZALIA 4
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002399
David Woodhouseb2132032009-06-26 18:50:28 +01002400static int iommu_domain_identity_map(struct dmar_domain *domain,
2401 unsigned long long start,
2402 unsigned long long end)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002403{
David Woodhousec5395d52009-06-28 16:35:56 +01002404 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2405 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002406
David Woodhousec5395d52009-06-28 16:35:56 +01002407 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2408 dma_to_mm_pfn(last_vpfn))) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002409 pr_err("Reserving iova failed\n");
David Woodhouseb2132032009-06-26 18:50:28 +01002410 return -ENOMEM;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002411 }
2412
David Woodhousec5395d52009-06-28 16:35:56 +01002413 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
2414 start, end, domain->id);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002415 /*
2416 * RMRR range might have overlap with physical memory range,
2417 * clear it first
2418 */
David Woodhousec5395d52009-06-28 16:35:56 +01002419 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002420
David Woodhousec5395d52009-06-28 16:35:56 +01002421 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2422 last_vpfn - first_vpfn + 1,
David Woodhouse61df7442009-06-28 11:55:58 +01002423 DMA_PTE_READ|DMA_PTE_WRITE);
David Woodhouseb2132032009-06-26 18:50:28 +01002424}
2425
David Woodhouse0b9d9752014-03-09 15:48:15 -07002426static int iommu_prepare_identity_map(struct device *dev,
David Woodhouseb2132032009-06-26 18:50:28 +01002427 unsigned long long start,
2428 unsigned long long end)
2429{
2430 struct dmar_domain *domain;
2431 int ret;
2432
David Woodhouse0b9d9752014-03-09 15:48:15 -07002433 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
David Woodhouseb2132032009-06-26 18:50:28 +01002434 if (!domain)
2435 return -ENOMEM;
2436
David Woodhouse19943b02009-08-04 16:19:20 +01002437 /* For _hardware_ passthrough, don't bother. But for software
2438 passthrough, we do it anyway -- it may indicate a memory
2439 range which is reserved in E820, so which didn't get set
2440 up to start with in si_domain */
2441 if (domain == si_domain && hw_pass_through) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002442 pr_warn("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2443 dev_name(dev), start, end);
David Woodhouse19943b02009-08-04 16:19:20 +01002444 return 0;
2445 }
2446
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002447 pr_info("Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2448 dev_name(dev), start, end);
2449
David Woodhouse5595b522009-12-02 09:21:55 +00002450 if (end < start) {
2451 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2452 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2453 dmi_get_system_info(DMI_BIOS_VENDOR),
2454 dmi_get_system_info(DMI_BIOS_VERSION),
2455 dmi_get_system_info(DMI_PRODUCT_VERSION));
2456 ret = -EIO;
2457 goto error;
2458 }
2459
David Woodhouse2ff729f2009-08-26 14:25:41 +01002460 if (end >> agaw_to_width(domain->agaw)) {
2461 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2462 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2463 agaw_to_width(domain->agaw),
2464 dmi_get_system_info(DMI_BIOS_VENDOR),
2465 dmi_get_system_info(DMI_BIOS_VERSION),
2466 dmi_get_system_info(DMI_PRODUCT_VERSION));
2467 ret = -EIO;
2468 goto error;
2469 }
David Woodhouse19943b02009-08-04 16:19:20 +01002470
David Woodhouseb2132032009-06-26 18:50:28 +01002471 ret = iommu_domain_identity_map(domain, start, end);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002472 if (ret)
2473 goto error;
2474
2475 /* context entry init */
David Woodhouse0b9d9752014-03-09 15:48:15 -07002476 ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
David Woodhouseb2132032009-06-26 18:50:28 +01002477 if (ret)
2478 goto error;
2479
2480 return 0;
2481
2482 error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002483 domain_exit(domain);
2484 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002485}
2486
2487static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
David Woodhouse0b9d9752014-03-09 15:48:15 -07002488 struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002489{
David Woodhouse0b9d9752014-03-09 15:48:15 -07002490 if (dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002491 return 0;
David Woodhouse0b9d9752014-03-09 15:48:15 -07002492 return iommu_prepare_identity_map(dev, rmrr->base_address,
2493 rmrr->end_address);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002494}
2495
Suresh Siddhad3f13812011-08-23 17:05:25 -07002496#ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002497static inline void iommu_prepare_isa(void)
2498{
2499 struct pci_dev *pdev;
2500 int ret;
2501
2502 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2503 if (!pdev)
2504 return;
2505
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002506 pr_info("Prepare 0-16MiB unity mapping for LPC\n");
David Woodhouse0b9d9752014-03-09 15:48:15 -07002507 ret = iommu_prepare_identity_map(&pdev->dev, 0, 16*1024*1024 - 1);
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002508
2509 if (ret)
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002510 pr_err("Failed to create 0-16MiB identity map - floppy might not work\n");
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002511
Yijing Wang9b27e822014-05-20 20:37:52 +08002512 pci_dev_put(pdev);
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002513}
2514#else
2515static inline void iommu_prepare_isa(void)
2516{
2517 return;
2518}
Suresh Siddhad3f13812011-08-23 17:05:25 -07002519#endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002520
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002521static int md_domain_init(struct dmar_domain *domain, int guest_width);
David Woodhousec7ab48d2009-06-26 19:10:36 +01002522
Matt Kraai071e1372009-08-23 22:30:22 -07002523static int __init si_domain_init(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002524{
2525 struct dmar_drhd_unit *drhd;
2526 struct intel_iommu *iommu;
David Woodhousec7ab48d2009-06-26 19:10:36 +01002527 int nid, ret = 0;
Jiang Liu44bde612014-07-11 14:19:29 +08002528 bool first = true;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002529
Jiang Liuab8dfe22014-07-11 14:19:27 +08002530 si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002531 if (!si_domain)
2532 return -EFAULT;
2533
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002534 for_each_active_iommu(iommu, drhd) {
2535 ret = iommu_attach_domain(si_domain, iommu);
Jiang Liufb170fb2014-07-11 14:19:28 +08002536 if (ret < 0) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002537 domain_exit(si_domain);
2538 return -EFAULT;
Jiang Liu44bde612014-07-11 14:19:29 +08002539 } else if (first) {
2540 si_domain->id = ret;
2541 first = false;
2542 } else if (si_domain->id != ret) {
2543 domain_exit(si_domain);
2544 return -EFAULT;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002545 }
Jiang Liufb170fb2014-07-11 14:19:28 +08002546 domain_attach_iommu(si_domain, iommu);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002547 }
2548
2549 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2550 domain_exit(si_domain);
2551 return -EFAULT;
2552 }
2553
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002554 pr_debug("Identity mapping domain is domain %d\n",
Jiang Liu9544c002014-01-06 14:18:13 +08002555 si_domain->id);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002556
David Woodhouse19943b02009-08-04 16:19:20 +01002557 if (hw)
2558 return 0;
2559
David Woodhousec7ab48d2009-06-26 19:10:36 +01002560 for_each_online_node(nid) {
Tejun Heod4bbf7e2011-11-28 09:46:22 -08002561 unsigned long start_pfn, end_pfn;
2562 int i;
2563
2564 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2565 ret = iommu_domain_identity_map(si_domain,
2566 PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2567 if (ret)
2568 return ret;
2569 }
David Woodhousec7ab48d2009-06-26 19:10:36 +01002570 }
2571
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002572 return 0;
2573}
2574
David Woodhouse9b226622014-03-09 14:03:28 -07002575static int identity_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002576{
2577 struct device_domain_info *info;
2578
2579 if (likely(!iommu_identity_mapping))
2580 return 0;
2581
David Woodhouse9b226622014-03-09 14:03:28 -07002582 info = dev->archdata.iommu;
Mike Traviscb452a42011-05-28 13:15:03 -05002583 if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2584 return (info->domain == si_domain);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002585
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002586 return 0;
2587}
2588
2589static int domain_add_dev_info(struct dmar_domain *domain,
David Woodhouse5913c9b2014-03-09 16:27:31 -07002590 struct device *dev, int translation)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002591{
David Woodhouse0ac72662014-03-09 13:19:22 -07002592 struct dmar_domain *ndomain;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002593 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07002594 u8 bus, devfn;
David Woodhouse5fe60f42009-08-09 10:53:41 +01002595 int ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002596
David Woodhouse5913c9b2014-03-09 16:27:31 -07002597 iommu = device_to_iommu(dev, &bus, &devfn);
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002598 if (!iommu)
2599 return -ENODEV;
2600
David Woodhouse5913c9b2014-03-09 16:27:31 -07002601 ndomain = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
David Woodhouse0ac72662014-03-09 13:19:22 -07002602 if (ndomain != domain)
2603 return -EBUSY;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002604
David Woodhouse5913c9b2014-03-09 16:27:31 -07002605 ret = domain_context_mapping(domain, dev, translation);
David Woodhousee2ad23d2012-05-25 17:42:54 +01002606 if (ret) {
David Woodhouse5913c9b2014-03-09 16:27:31 -07002607 domain_remove_one_dev_info(domain, dev);
David Woodhousee2ad23d2012-05-25 17:42:54 +01002608 return ret;
2609 }
2610
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002611 return 0;
2612}
2613
David Woodhouse0b9d9752014-03-09 15:48:15 -07002614static bool device_has_rmrr(struct device *dev)
Tom Mingarelliea2447f72012-11-20 19:43:17 +00002615{
2616 struct dmar_rmrr_unit *rmrr;
David Woodhouse832bd852014-03-07 15:08:36 +00002617 struct device *tmp;
Tom Mingarelliea2447f72012-11-20 19:43:17 +00002618 int i;
2619
Jiang Liu0e242612014-02-19 14:07:34 +08002620 rcu_read_lock();
Tom Mingarelliea2447f72012-11-20 19:43:17 +00002621 for_each_rmrr_units(rmrr) {
Jiang Liub683b232014-02-19 14:07:32 +08002622 /*
2623 * Return TRUE if this RMRR contains the device that
2624 * is passed in.
2625 */
2626 for_each_active_dev_scope(rmrr->devices,
2627 rmrr->devices_cnt, i, tmp)
David Woodhouse0b9d9752014-03-09 15:48:15 -07002628 if (tmp == dev) {
Jiang Liu0e242612014-02-19 14:07:34 +08002629 rcu_read_unlock();
Tom Mingarelliea2447f72012-11-20 19:43:17 +00002630 return true;
Jiang Liub683b232014-02-19 14:07:32 +08002631 }
Tom Mingarelliea2447f72012-11-20 19:43:17 +00002632 }
Jiang Liu0e242612014-02-19 14:07:34 +08002633 rcu_read_unlock();
Tom Mingarelliea2447f72012-11-20 19:43:17 +00002634 return false;
2635}
2636
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002637/*
2638 * There are a couple cases where we need to restrict the functionality of
2639 * devices associated with RMRRs. The first is when evaluating a device for
2640 * identity mapping because problems exist when devices are moved in and out
2641 * of domains and their respective RMRR information is lost. This means that
2642 * a device with associated RMRRs will never be in a "passthrough" domain.
2643 * The second is use of the device through the IOMMU API. This interface
2644 * expects to have full control of the IOVA space for the device. We cannot
2645 * satisfy both the requirement that RMRR access is maintained and have an
2646 * unencumbered IOVA space. We also have no ability to quiesce the device's
2647 * use of the RMRR space or even inform the IOMMU API user of the restriction.
2648 * We therefore prevent devices associated with an RMRR from participating in
2649 * the IOMMU API, which eliminates them from device assignment.
2650 *
2651 * In both cases we assume that PCI USB devices with RMRRs have them largely
2652 * for historical reasons and that the RMRR space is not actively used post
2653 * boot. This exclusion may change if vendors begin to abuse it.
David Woodhouse18436af2015-03-25 15:05:47 +00002654 *
2655 * The same exception is made for graphics devices, with the requirement that
2656 * any use of the RMRR regions will be torn down before assigning the device
2657 * to a guest.
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002658 */
2659static bool device_is_rmrr_locked(struct device *dev)
2660{
2661 if (!device_has_rmrr(dev))
2662 return false;
2663
2664 if (dev_is_pci(dev)) {
2665 struct pci_dev *pdev = to_pci_dev(dev);
2666
David Woodhouse18436af2015-03-25 15:05:47 +00002667 if (IS_USB_DEVICE(pdev) || IS_GFX_DEVICE(pdev))
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002668 return false;
2669 }
2670
2671 return true;
2672}
2673
David Woodhouse3bdb2592014-03-09 16:03:08 -07002674static int iommu_should_identity_map(struct device *dev, int startup)
David Woodhouse6941af22009-07-04 18:24:27 +01002675{
Tom Mingarelliea2447f72012-11-20 19:43:17 +00002676
David Woodhouse3bdb2592014-03-09 16:03:08 -07002677 if (dev_is_pci(dev)) {
2678 struct pci_dev *pdev = to_pci_dev(dev);
Tom Mingarelliea2447f72012-11-20 19:43:17 +00002679
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002680 if (device_is_rmrr_locked(dev))
David Woodhouse3bdb2592014-03-09 16:03:08 -07002681 return 0;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002682
David Woodhouse3bdb2592014-03-09 16:03:08 -07002683 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2684 return 1;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002685
David Woodhouse3bdb2592014-03-09 16:03:08 -07002686 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2687 return 1;
2688
2689 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2690 return 0;
2691
2692 /*
2693 * We want to start off with all devices in the 1:1 domain, and
2694 * take them out later if we find they can't access all of memory.
2695 *
2696 * However, we can't do this for PCI devices behind bridges,
2697 * because all PCI devices behind the same bridge will end up
2698 * with the same source-id on their transactions.
2699 *
2700 * Practically speaking, we can't change things around for these
2701 * devices at run-time, because we can't be sure there'll be no
2702 * DMA transactions in flight for any of their siblings.
2703 *
2704 * So PCI devices (unless they're on the root bus) as well as
2705 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2706 * the 1:1 domain, just in _case_ one of their siblings turns out
2707 * not to be able to map all of memory.
2708 */
2709 if (!pci_is_pcie(pdev)) {
2710 if (!pci_is_root_bus(pdev->bus))
2711 return 0;
2712 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2713 return 0;
2714 } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
2715 return 0;
2716 } else {
2717 if (device_has_rmrr(dev))
2718 return 0;
2719 }
David Woodhouse6941af22009-07-04 18:24:27 +01002720
David Woodhouse3dfc8132009-07-04 19:11:08 +01002721 /*
David Woodhouse3dfc8132009-07-04 19:11:08 +01002722 * At boot time, we don't yet know if devices will be 64-bit capable.
David Woodhouse3bdb2592014-03-09 16:03:08 -07002723 * Assume that they will — if they turn out not to be, then we can
David Woodhouse3dfc8132009-07-04 19:11:08 +01002724 * take them out of the 1:1 domain later.
2725 */
Chris Wright8fcc5372011-05-28 13:15:02 -05002726 if (!startup) {
2727 /*
2728 * If the device's dma_mask is less than the system's memory
2729 * size then this is not a candidate for identity mapping.
2730 */
David Woodhouse3bdb2592014-03-09 16:03:08 -07002731 u64 dma_mask = *dev->dma_mask;
Chris Wright8fcc5372011-05-28 13:15:02 -05002732
David Woodhouse3bdb2592014-03-09 16:03:08 -07002733 if (dev->coherent_dma_mask &&
2734 dev->coherent_dma_mask < dma_mask)
2735 dma_mask = dev->coherent_dma_mask;
Chris Wright8fcc5372011-05-28 13:15:02 -05002736
David Woodhouse3bdb2592014-03-09 16:03:08 -07002737 return dma_mask >= dma_get_required_mask(dev);
Chris Wright8fcc5372011-05-28 13:15:02 -05002738 }
David Woodhouse6941af22009-07-04 18:24:27 +01002739
2740 return 1;
2741}
2742
David Woodhousecf04eee2014-03-21 16:49:04 +00002743static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw)
2744{
2745 int ret;
2746
2747 if (!iommu_should_identity_map(dev, 1))
2748 return 0;
2749
2750 ret = domain_add_dev_info(si_domain, dev,
2751 hw ? CONTEXT_TT_PASS_THROUGH :
2752 CONTEXT_TT_MULTI_LEVEL);
2753 if (!ret)
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002754 pr_info("%s identity mapping for device %s\n",
2755 hw ? "Hardware" : "Software", dev_name(dev));
David Woodhousecf04eee2014-03-21 16:49:04 +00002756 else if (ret == -ENODEV)
2757 /* device not associated with an iommu */
2758 ret = 0;
2759
2760 return ret;
2761}
2762
2763
Matt Kraai071e1372009-08-23 22:30:22 -07002764static int __init iommu_prepare_static_identity_mapping(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002765{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002766 struct pci_dev *pdev = NULL;
David Woodhousecf04eee2014-03-21 16:49:04 +00002767 struct dmar_drhd_unit *drhd;
2768 struct intel_iommu *iommu;
2769 struct device *dev;
2770 int i;
2771 int ret = 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002772
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002773 for_each_pci_dev(pdev) {
David Woodhousecf04eee2014-03-21 16:49:04 +00002774 ret = dev_prepare_static_identity_mapping(&pdev->dev, hw);
2775 if (ret)
2776 return ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002777 }
2778
David Woodhousecf04eee2014-03-21 16:49:04 +00002779 for_each_active_iommu(iommu, drhd)
2780 for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) {
2781 struct acpi_device_physical_node *pn;
2782 struct acpi_device *adev;
2783
2784 if (dev->bus != &acpi_bus_type)
2785 continue;
Joerg Roedel86080cc2015-06-12 12:27:16 +02002786
David Woodhousecf04eee2014-03-21 16:49:04 +00002787 adev= to_acpi_device(dev);
2788 mutex_lock(&adev->physical_node_lock);
2789 list_for_each_entry(pn, &adev->physical_node_list, node) {
2790 ret = dev_prepare_static_identity_mapping(pn->dev, hw);
2791 if (ret)
2792 break;
2793 }
2794 mutex_unlock(&adev->physical_node_lock);
2795 if (ret)
2796 return ret;
2797 }
2798
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002799 return 0;
2800}
2801
Jiang Liuffebeb42014-11-09 22:48:02 +08002802static void intel_iommu_init_qi(struct intel_iommu *iommu)
2803{
2804 /*
2805 * Start from the sane iommu hardware state.
2806 * If the queued invalidation is already initialized by us
2807 * (for example, while enabling interrupt-remapping) then
2808 * we got the things already rolling from a sane state.
2809 */
2810 if (!iommu->qi) {
2811 /*
2812 * Clear any previous faults.
2813 */
2814 dmar_fault(-1, iommu);
2815 /*
2816 * Disable queued invalidation if supported and already enabled
2817 * before OS handover.
2818 */
2819 dmar_disable_qi(iommu);
2820 }
2821
2822 if (dmar_enable_qi(iommu)) {
2823 /*
2824 * Queued Invalidate not enabled, use Register Based Invalidate
2825 */
2826 iommu->flush.flush_context = __iommu_flush_context;
2827 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002828 pr_info("%s: Using Register based invalidation\n",
Jiang Liuffebeb42014-11-09 22:48:02 +08002829 iommu->name);
2830 } else {
2831 iommu->flush.flush_context = qi_flush_context;
2832 iommu->flush.flush_iotlb = qi_flush_iotlb;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002833 pr_info("%s: Using Queued invalidation\n", iommu->name);
Jiang Liuffebeb42014-11-09 22:48:02 +08002834 }
2835}
2836
Joerg Roedel091d42e2015-06-12 11:56:10 +02002837static int copy_context_table(struct intel_iommu *iommu,
2838 struct root_entry *old_re,
2839 struct context_entry **tbl,
2840 int bus, bool ext)
2841{
2842 struct context_entry *old_ce = NULL, *new_ce = NULL, ce;
Joerg Roedeldbcd8612015-06-12 12:02:09 +02002843 int tbl_idx, pos = 0, idx, devfn, ret = 0, did;
Joerg Roedel091d42e2015-06-12 11:56:10 +02002844 phys_addr_t old_ce_phys;
2845
2846 tbl_idx = ext ? bus * 2 : bus;
2847
2848 for (devfn = 0; devfn < 256; devfn++) {
2849 /* First calculate the correct index */
2850 idx = (ext ? devfn * 2 : devfn) % 256;
2851
2852 if (idx == 0) {
2853 /* First save what we may have and clean up */
2854 if (new_ce) {
2855 tbl[tbl_idx] = new_ce;
2856 __iommu_flush_cache(iommu, new_ce,
2857 VTD_PAGE_SIZE);
2858 pos = 1;
2859 }
2860
2861 if (old_ce)
2862 iounmap(old_ce);
2863
2864 ret = 0;
2865 if (devfn < 0x80)
2866 old_ce_phys = root_entry_lctp(old_re);
2867 else
2868 old_ce_phys = root_entry_uctp(old_re);
2869
2870 if (!old_ce_phys) {
2871 if (ext && devfn == 0) {
2872 /* No LCTP, try UCTP */
2873 devfn = 0x7f;
2874 continue;
2875 } else {
2876 goto out;
2877 }
2878 }
2879
2880 ret = -ENOMEM;
2881 old_ce = ioremap_cache(old_ce_phys, PAGE_SIZE);
2882 if (!old_ce)
2883 goto out;
2884
2885 new_ce = alloc_pgtable_page(iommu->node);
2886 if (!new_ce)
2887 goto out_unmap;
2888
2889 ret = 0;
2890 }
2891
2892 /* Now copy the context entry */
2893 ce = old_ce[idx];
2894
Joerg Roedelcf484d02015-06-12 12:21:46 +02002895 if (!__context_present(&ce))
Joerg Roedel091d42e2015-06-12 11:56:10 +02002896 continue;
2897
Joerg Roedeldbcd8612015-06-12 12:02:09 +02002898 did = context_domain_id(&ce);
2899 if (did >= 0 && did < cap_ndoms(iommu->cap))
2900 set_bit(did, iommu->domain_ids);
2901
Joerg Roedelcf484d02015-06-12 12:21:46 +02002902 /*
2903 * We need a marker for copied context entries. This
2904 * marker needs to work for the old format as well as
2905 * for extended context entries.
2906 *
2907 * Bit 67 of the context entry is used. In the old
2908 * format this bit is available to software, in the
2909 * extended format it is the PGE bit, but PGE is ignored
2910 * by HW if PASIDs are disabled (and thus still
2911 * available).
2912 *
2913 * So disable PASIDs first and then mark the entry
2914 * copied. This means that we don't copy PASID
2915 * translations from the old kernel, but this is fine as
2916 * faults there are not fatal.
2917 */
2918 context_clear_pasid_enable(&ce);
2919 context_set_copied(&ce);
2920
Joerg Roedel091d42e2015-06-12 11:56:10 +02002921 new_ce[idx] = ce;
2922 }
2923
2924 tbl[tbl_idx + pos] = new_ce;
2925
2926 __iommu_flush_cache(iommu, new_ce, VTD_PAGE_SIZE);
2927
2928out_unmap:
2929 iounmap(old_ce);
2930
2931out:
2932 return ret;
2933}
2934
2935static int copy_translation_tables(struct intel_iommu *iommu)
2936{
2937 struct context_entry **ctxt_tbls;
2938 struct root_entry *old_rt;
2939 phys_addr_t old_rt_phys;
2940 int ctxt_table_entries;
2941 unsigned long flags;
2942 u64 rtaddr_reg;
2943 int bus, ret;
Joerg Roedelc3361f22015-06-12 12:39:25 +02002944 bool new_ext, ext;
Joerg Roedel091d42e2015-06-12 11:56:10 +02002945
2946 rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG);
2947 ext = !!(rtaddr_reg & DMA_RTADDR_RTT);
Joerg Roedelc3361f22015-06-12 12:39:25 +02002948 new_ext = !!ecap_ecs(iommu->ecap);
2949
2950 /*
2951 * The RTT bit can only be changed when translation is disabled,
2952 * but disabling translation means to open a window for data
2953 * corruption. So bail out and don't copy anything if we would
2954 * have to change the bit.
2955 */
2956 if (new_ext != ext)
2957 return -EINVAL;
Joerg Roedel091d42e2015-06-12 11:56:10 +02002958
2959 old_rt_phys = rtaddr_reg & VTD_PAGE_MASK;
2960 if (!old_rt_phys)
2961 return -EINVAL;
2962
2963 old_rt = ioremap_cache(old_rt_phys, PAGE_SIZE);
2964 if (!old_rt)
2965 return -ENOMEM;
2966
2967 /* This is too big for the stack - allocate it from slab */
2968 ctxt_table_entries = ext ? 512 : 256;
2969 ret = -ENOMEM;
2970 ctxt_tbls = kzalloc(ctxt_table_entries * sizeof(void *), GFP_KERNEL);
2971 if (!ctxt_tbls)
2972 goto out_unmap;
2973
2974 for (bus = 0; bus < 256; bus++) {
2975 ret = copy_context_table(iommu, &old_rt[bus],
2976 ctxt_tbls, bus, ext);
2977 if (ret) {
2978 pr_err("%s: Failed to copy context table for bus %d\n",
2979 iommu->name, bus);
2980 continue;
2981 }
2982 }
2983
2984 spin_lock_irqsave(&iommu->lock, flags);
2985
2986 /* Context tables are copied, now write them to the root_entry table */
2987 for (bus = 0; bus < 256; bus++) {
2988 int idx = ext ? bus * 2 : bus;
2989 u64 val;
2990
2991 if (ctxt_tbls[idx]) {
2992 val = virt_to_phys(ctxt_tbls[idx]) | 1;
2993 iommu->root_entry[bus].lo = val;
2994 }
2995
2996 if (!ext || !ctxt_tbls[idx + 1])
2997 continue;
2998
2999 val = virt_to_phys(ctxt_tbls[idx + 1]) | 1;
3000 iommu->root_entry[bus].hi = val;
3001 }
3002
3003 spin_unlock_irqrestore(&iommu->lock, flags);
3004
3005 kfree(ctxt_tbls);
3006
3007 __iommu_flush_cache(iommu, iommu->root_entry, PAGE_SIZE);
3008
3009 ret = 0;
3010
3011out_unmap:
3012 iounmap(old_rt);
3013
3014 return ret;
3015}
3016
Joseph Cihulab7792602011-05-03 00:08:37 -07003017static int __init init_dmars(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003018{
3019 struct dmar_drhd_unit *drhd;
3020 struct dmar_rmrr_unit *rmrr;
Joerg Roedela87f4912015-06-12 12:32:54 +02003021 bool copied_tables = false;
David Woodhouse832bd852014-03-07 15:08:36 +00003022 struct device *dev;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003023 struct intel_iommu *iommu;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07003024 int i, ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003025
3026 /*
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003027 * for each drhd
3028 * allocate root
3029 * initialize and program root entry to not present
3030 * endfor
3031 */
3032 for_each_drhd_unit(drhd) {
mark gross5e0d2a62008-03-04 15:22:08 -08003033 /*
3034 * lock not needed as this is only incremented in the single
3035 * threaded kernel __init code path all other access are read
3036 * only
3037 */
Jiang Liu78d8e702014-11-09 22:47:57 +08003038 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) {
Mike Travis1b198bb2012-03-05 15:05:16 -08003039 g_num_of_iommus++;
3040 continue;
3041 }
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003042 pr_err_once("Exceeded %d IOMMUs\n", DMAR_UNITS_SUPPORTED);
mark gross5e0d2a62008-03-04 15:22:08 -08003043 }
3044
Jiang Liuffebeb42014-11-09 22:48:02 +08003045 /* Preallocate enough resources for IOMMU hot-addition */
3046 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED)
3047 g_num_of_iommus = DMAR_UNITS_SUPPORTED;
3048
Weidong Hand9630fe2008-12-08 11:06:32 +08003049 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
3050 GFP_KERNEL);
3051 if (!g_iommus) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003052 pr_err("Allocating global iommu array failed\n");
Weidong Hand9630fe2008-12-08 11:06:32 +08003053 ret = -ENOMEM;
3054 goto error;
3055 }
3056
mark gross80b20dd2008-04-18 13:53:58 -07003057 deferred_flush = kzalloc(g_num_of_iommus *
3058 sizeof(struct deferred_flush_tables), GFP_KERNEL);
3059 if (!deferred_flush) {
mark gross5e0d2a62008-03-04 15:22:08 -08003060 ret = -ENOMEM;
Jiang Liu989d51f2014-02-19 14:07:21 +08003061 goto free_g_iommus;
mark gross5e0d2a62008-03-04 15:22:08 -08003062 }
3063
Jiang Liu7c919772014-01-06 14:18:18 +08003064 for_each_active_iommu(iommu, drhd) {
Weidong Hand9630fe2008-12-08 11:06:32 +08003065 g_iommus[iommu->seq_id] = iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003066
Joerg Roedelb63d80d2015-06-12 09:14:34 +02003067 intel_iommu_init_qi(iommu);
3068
Suresh Siddhae61d98d2008-07-10 11:16:35 -07003069 ret = iommu_init_domains(iommu);
3070 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08003071 goto free_iommu;
Suresh Siddhae61d98d2008-07-10 11:16:35 -07003072
Joerg Roedel4158c2e2015-06-12 10:14:02 +02003073 init_translation_status(iommu);
3074
Joerg Roedel091d42e2015-06-12 11:56:10 +02003075 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
3076 iommu_disable_translation(iommu);
3077 clear_translation_pre_enabled(iommu);
3078 pr_warn("Translation was enabled for %s but we are not in kdump mode\n",
3079 iommu->name);
3080 }
Joerg Roedel4158c2e2015-06-12 10:14:02 +02003081
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003082 /*
3083 * TBD:
3084 * we could share the same root & context tables
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003085 * among all IOMMU's. Need to Split it later.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003086 */
3087 ret = iommu_alloc_root_entry(iommu);
Jiang Liuffebeb42014-11-09 22:48:02 +08003088 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08003089 goto free_iommu;
Joerg Roedel5f0a7f72015-06-12 09:18:53 +02003090
Joerg Roedel091d42e2015-06-12 11:56:10 +02003091 if (translation_pre_enabled(iommu)) {
3092 pr_info("Translation already enabled - trying to copy translation structures\n");
3093
3094 ret = copy_translation_tables(iommu);
3095 if (ret) {
3096 /*
3097 * We found the IOMMU with translation
3098 * enabled - but failed to copy over the
3099 * old root-entry table. Try to proceed
3100 * by disabling translation now and
3101 * allocating a clean root-entry table.
3102 * This might cause DMAR faults, but
3103 * probably the dump will still succeed.
3104 */
3105 pr_err("Failed to copy translation tables from previous kernel for %s\n",
3106 iommu->name);
3107 iommu_disable_translation(iommu);
3108 clear_translation_pre_enabled(iommu);
3109 } else {
3110 pr_info("Copied translation tables from previous kernel for %s\n",
3111 iommu->name);
Joerg Roedela87f4912015-06-12 12:32:54 +02003112 copied_tables = true;
Joerg Roedel091d42e2015-06-12 11:56:10 +02003113 }
3114 }
3115
Joerg Roedel5f0a7f72015-06-12 09:18:53 +02003116 iommu_flush_write_buffer(iommu);
3117 iommu_set_root_entry(iommu);
3118 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
3119 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3120
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003121 if (!ecap_pass_through(iommu->ecap))
David Woodhouse19943b02009-08-04 16:19:20 +01003122 hw_pass_through = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003123 }
3124
David Woodhouse19943b02009-08-04 16:19:20 +01003125 if (iommu_pass_through)
David Woodhousee0fc7e02009-09-30 09:12:17 -07003126 iommu_identity_mapping |= IDENTMAP_ALL;
3127
Suresh Siddhad3f13812011-08-23 17:05:25 -07003128#ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
David Woodhousee0fc7e02009-09-30 09:12:17 -07003129 iommu_identity_mapping |= IDENTMAP_GFX;
David Woodhouse19943b02009-08-04 16:19:20 +01003130#endif
David Woodhousee0fc7e02009-09-30 09:12:17 -07003131
Joerg Roedel86080cc2015-06-12 12:27:16 +02003132 if (iommu_identity_mapping) {
3133 ret = si_domain_init(hw_pass_through);
3134 if (ret)
3135 goto free_iommu;
3136 }
3137
David Woodhousee0fc7e02009-09-30 09:12:17 -07003138 check_tylersburg_isoch();
3139
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003140 /*
Joerg Roedela87f4912015-06-12 12:32:54 +02003141 * If we copied translations from a previous kernel in the kdump
3142 * case, we can not assign the devices to domains now, as that
3143 * would eliminate the old mappings. So skip this part and defer
3144 * the assignment to device driver initialization time.
3145 */
3146 if (copied_tables)
3147 goto domains_done;
3148
3149 /*
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003150 * If pass through is not set or not enabled, setup context entries for
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003151 * identity mappings for rmrr, gfx, and isa and may fall back to static
3152 * identity mapping if iommu_identity_mapping is set.
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003153 */
David Woodhouse19943b02009-08-04 16:19:20 +01003154 if (iommu_identity_mapping) {
3155 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
3156 if (ret) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003157 pr_crit("Failed to setup IOMMU pass-through\n");
Jiang Liu989d51f2014-02-19 14:07:21 +08003158 goto free_iommu;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003159 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003160 }
David Woodhouse19943b02009-08-04 16:19:20 +01003161 /*
3162 * For each rmrr
3163 * for each dev attached to rmrr
3164 * do
3165 * locate drhd for dev, alloc domain for dev
3166 * allocate free domain
3167 * allocate page table entries for rmrr
3168 * if context not allocated for bus
3169 * allocate and init context
3170 * set present in root table for this bus
3171 * init context with domain, translation etc
3172 * endfor
3173 * endfor
3174 */
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003175 pr_info("Setting RMRR:\n");
David Woodhouse19943b02009-08-04 16:19:20 +01003176 for_each_rmrr_units(rmrr) {
Jiang Liub683b232014-02-19 14:07:32 +08003177 /* some BIOS lists non-exist devices in DMAR table. */
3178 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
David Woodhouse832bd852014-03-07 15:08:36 +00003179 i, dev) {
David Woodhouse0b9d9752014-03-09 15:48:15 -07003180 ret = iommu_prepare_rmrr_dev(rmrr, dev);
David Woodhouse19943b02009-08-04 16:19:20 +01003181 if (ret)
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003182 pr_err("Mapping reserved region failed\n");
David Woodhouse19943b02009-08-04 16:19:20 +01003183 }
3184 }
3185
3186 iommu_prepare_isa();
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07003187
Joerg Roedela87f4912015-06-12 12:32:54 +02003188domains_done:
3189
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003190 /*
3191 * for each drhd
3192 * enable fault log
3193 * global invalidate context cache
3194 * global invalidate iotlb
3195 * enable translation
3196 */
Jiang Liu7c919772014-01-06 14:18:18 +08003197 for_each_iommu(iommu, drhd) {
Joseph Cihula51a63e62011-03-21 11:04:24 -07003198 if (drhd->ignored) {
3199 /*
3200 * we always have to disable PMRs or DMA may fail on
3201 * this device
3202 */
3203 if (force_on)
Jiang Liu7c919772014-01-06 14:18:18 +08003204 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003205 continue;
Joseph Cihula51a63e62011-03-21 11:04:24 -07003206 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003207
3208 iommu_flush_write_buffer(iommu);
3209
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07003210 ret = dmar_set_interrupt(iommu);
3211 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08003212 goto free_iommu;
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07003213
Joerg Roedel8939ddf2015-06-12 14:40:01 +02003214 if (!translation_pre_enabled(iommu))
3215 iommu_enable_translation(iommu);
3216
David Woodhouseb94996c2009-09-19 15:28:12 -07003217 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003218 }
3219
3220 return 0;
Jiang Liu989d51f2014-02-19 14:07:21 +08003221
3222free_iommu:
Jiang Liuffebeb42014-11-09 22:48:02 +08003223 for_each_active_iommu(iommu, drhd) {
3224 disable_dmar_iommu(iommu);
Jiang Liua868e6b2014-01-06 14:18:20 +08003225 free_dmar_iommu(iommu);
Jiang Liuffebeb42014-11-09 22:48:02 +08003226 }
Jiang Liu9bdc5312014-01-06 14:18:27 +08003227 kfree(deferred_flush);
Jiang Liu989d51f2014-02-19 14:07:21 +08003228free_g_iommus:
Weidong Hand9630fe2008-12-08 11:06:32 +08003229 kfree(g_iommus);
Jiang Liu989d51f2014-02-19 14:07:21 +08003230error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003231 return ret;
3232}
3233
David Woodhouse5a5e02a2009-07-04 09:35:44 +01003234/* This takes a number of _MM_ pages, not VTD pages */
David Woodhouse875764d2009-06-28 21:20:51 +01003235static struct iova *intel_alloc_iova(struct device *dev,
3236 struct dmar_domain *domain,
3237 unsigned long nrpages, uint64_t dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003238{
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003239 struct iova *iova = NULL;
3240
David Woodhouse875764d2009-06-28 21:20:51 +01003241 /* Restrict dma_mask to the width that the iommu can handle */
3242 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
3243
3244 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003245 /*
3246 * First try to allocate an io virtual address in
Yang Hongyang284901a2009-04-06 19:01:15 -07003247 * DMA_BIT_MASK(32) and if that fails then try allocating
Joe Perches36098012007-12-17 11:40:11 -08003248 * from higher range
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003249 */
David Woodhouse875764d2009-06-28 21:20:51 +01003250 iova = alloc_iova(&domain->iovad, nrpages,
3251 IOVA_PFN(DMA_BIT_MASK(32)), 1);
3252 if (iova)
3253 return iova;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003254 }
David Woodhouse875764d2009-06-28 21:20:51 +01003255 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
3256 if (unlikely(!iova)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003257 pr_err("Allocating %ld-page iova for %s failed",
David Woodhouse207e3592014-03-09 16:12:32 -07003258 nrpages, dev_name(dev));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003259 return NULL;
3260 }
3261
3262 return iova;
3263}
3264
David Woodhoused4b709f2014-03-09 16:07:40 -07003265static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003266{
3267 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003268 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003269
David Woodhoused4b709f2014-03-09 16:07:40 -07003270 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003271 if (!domain) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003272 pr_err("Allocating domain for %s failed\n",
David Woodhoused4b709f2014-03-09 16:07:40 -07003273 dev_name(dev));
Al Viro4fe05bb2007-10-29 04:51:16 +00003274 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003275 }
3276
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003277 /* make sure context mapping is ok */
David Woodhoused4b709f2014-03-09 16:07:40 -07003278 if (unlikely(!domain_context_mapped(dev))) {
3279 ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003280 if (ret) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003281 pr_err("Domain context map for %s failed\n",
David Woodhoused4b709f2014-03-09 16:07:40 -07003282 dev_name(dev));
Al Viro4fe05bb2007-10-29 04:51:16 +00003283 return NULL;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003284 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003285 }
3286
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003287 return domain;
3288}
3289
David Woodhoused4b709f2014-03-09 16:07:40 -07003290static inline struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
David Woodhouse147202a2009-07-07 19:43:20 +01003291{
3292 struct device_domain_info *info;
3293
3294 /* No lock here, assumes no domain exit in normal case */
David Woodhoused4b709f2014-03-09 16:07:40 -07003295 info = dev->archdata.iommu;
David Woodhouse147202a2009-07-07 19:43:20 +01003296 if (likely(info))
3297 return info->domain;
3298
3299 return __get_valid_domain_for_dev(dev);
3300}
3301
David Woodhouseecb509e2014-03-09 16:29:55 -07003302/* Check if the dev needs to go through non-identity map and unmap process.*/
David Woodhouse73676832009-07-04 14:08:36 +01003303static int iommu_no_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003304{
3305 int found;
3306
David Woodhouse3d891942014-03-06 15:59:26 +00003307 if (iommu_dummy(dev))
David Woodhouse1e4c64c2009-07-04 10:40:38 +01003308 return 1;
3309
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003310 if (!iommu_identity_mapping)
David Woodhouse1e4c64c2009-07-04 10:40:38 +01003311 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003312
David Woodhouse9b226622014-03-09 14:03:28 -07003313 found = identity_mapping(dev);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003314 if (found) {
David Woodhouseecb509e2014-03-09 16:29:55 -07003315 if (iommu_should_identity_map(dev, 0))
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003316 return 1;
3317 else {
3318 /*
3319 * 32 bit DMA is removed from si_domain and fall back
3320 * to non-identity mapping.
3321 */
David Woodhousebf9c9ed2014-03-09 16:19:13 -07003322 domain_remove_one_dev_info(si_domain, dev);
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003323 pr_info("32bit %s uses non-identity mapping\n",
3324 dev_name(dev));
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003325 return 0;
3326 }
3327 } else {
3328 /*
3329 * In case of a detached 64 bit DMA device from vm, the device
3330 * is put into si_domain for identity mapping.
3331 */
David Woodhouseecb509e2014-03-09 16:29:55 -07003332 if (iommu_should_identity_map(dev, 0)) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003333 int ret;
David Woodhouse5913c9b2014-03-09 16:27:31 -07003334 ret = domain_add_dev_info(si_domain, dev,
David Woodhouse5fe60f42009-08-09 10:53:41 +01003335 hw_pass_through ?
3336 CONTEXT_TT_PASS_THROUGH :
3337 CONTEXT_TT_MULTI_LEVEL);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003338 if (!ret) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003339 pr_info("64bit %s uses identity mapping\n",
3340 dev_name(dev));
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003341 return 1;
3342 }
3343 }
3344 }
3345
David Woodhouse1e4c64c2009-07-04 10:40:38 +01003346 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003347}
3348
David Woodhouse5040a912014-03-09 16:14:00 -07003349static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003350 size_t size, int dir, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003351{
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003352 struct dmar_domain *domain;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003353 phys_addr_t start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003354 struct iova *iova;
3355 int prot = 0;
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003356 int ret;
Weidong Han8c11e792008-12-08 15:29:22 +08003357 struct intel_iommu *iommu;
Fenghua Yu33041ec2009-08-04 15:10:59 -07003358 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003359
3360 BUG_ON(dir == DMA_NONE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003361
David Woodhouse5040a912014-03-09 16:14:00 -07003362 if (iommu_no_mapping(dev))
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003363 return paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003364
David Woodhouse5040a912014-03-09 16:14:00 -07003365 domain = get_valid_domain_for_dev(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003366 if (!domain)
3367 return 0;
3368
Weidong Han8c11e792008-12-08 15:29:22 +08003369 iommu = domain_get_iommu(domain);
David Woodhouse88cb6a72009-06-28 15:03:06 +01003370 size = aligned_nrpages(paddr, size);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003371
David Woodhouse5040a912014-03-09 16:14:00 -07003372 iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003373 if (!iova)
3374 goto error;
3375
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003376 /*
3377 * Check if DMAR supports zero-length reads on write only
3378 * mappings..
3379 */
3380 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003381 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003382 prot |= DMA_PTE_READ;
3383 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3384 prot |= DMA_PTE_WRITE;
3385 /*
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003386 * paddr - (paddr + size) might be partial page, we should map the whole
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003387 * page. Note: if two part of one page are separately mapped, we
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003388 * might have two guest_addr mapping to the same host paddr, but this
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003389 * is not a big problem
3390 */
David Woodhouse0ab36de2009-06-28 14:01:43 +01003391 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
Fenghua Yu33041ec2009-08-04 15:10:59 -07003392 mm_to_dma_pfn(paddr_pfn), size, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003393 if (ret)
3394 goto error;
3395
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003396 /* it's a non-present to present mapping. Only flush if caching mode */
3397 if (cap_caching_mode(iommu->cap))
David Woodhouseea8ea462014-03-05 17:09:32 +00003398 iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 0, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003399 else
Weidong Han8c11e792008-12-08 15:29:22 +08003400 iommu_flush_write_buffer(iommu);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003401
David Woodhouse03d6a242009-06-28 15:33:46 +01003402 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
3403 start_paddr += paddr & ~PAGE_MASK;
3404 return start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003405
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003406error:
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003407 if (iova)
3408 __free_iova(&domain->iovad, iova);
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003409 pr_err("Device %s request: %zx@%llx dir %d --- failed\n",
David Woodhouse5040a912014-03-09 16:14:00 -07003410 dev_name(dev), size, (unsigned long long)paddr, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003411 return 0;
3412}
3413
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003414static dma_addr_t intel_map_page(struct device *dev, struct page *page,
3415 unsigned long offset, size_t size,
3416 enum dma_data_direction dir,
3417 struct dma_attrs *attrs)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003418{
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003419 return __intel_map_single(dev, page_to_phys(page) + offset, size,
David Woodhouse46333e32014-03-10 20:01:21 -07003420 dir, *dev->dma_mask);
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003421}
3422
mark gross5e0d2a62008-03-04 15:22:08 -08003423static void flush_unmaps(void)
3424{
mark gross80b20dd2008-04-18 13:53:58 -07003425 int i, j;
mark gross5e0d2a62008-03-04 15:22:08 -08003426
mark gross5e0d2a62008-03-04 15:22:08 -08003427 timer_on = 0;
3428
3429 /* just flush them all */
3430 for (i = 0; i < g_num_of_iommus; i++) {
Weidong Hana2bb8452008-12-08 11:24:12 +08003431 struct intel_iommu *iommu = g_iommus[i];
3432 if (!iommu)
3433 continue;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07003434
Yu Zhao9dd2fe82009-05-18 13:51:36 +08003435 if (!deferred_flush[i].next)
3436 continue;
3437
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003438 /* In caching mode, global flushes turn emulation expensive */
3439 if (!cap_caching_mode(iommu->cap))
3440 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
Yu Zhao93a23a72009-05-18 13:51:37 +08003441 DMA_TLB_GLOBAL_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08003442 for (j = 0; j < deferred_flush[i].next; j++) {
Yu Zhao93a23a72009-05-18 13:51:37 +08003443 unsigned long mask;
3444 struct iova *iova = deferred_flush[i].iova[j];
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003445 struct dmar_domain *domain = deferred_flush[i].domain[j];
Yu Zhao93a23a72009-05-18 13:51:37 +08003446
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003447 /* On real hardware multiple invalidations are expensive */
3448 if (cap_caching_mode(iommu->cap))
3449 iommu_flush_iotlb_psi(iommu, domain->id,
Jiang Liua156ef92014-07-11 14:19:36 +08003450 iova->pfn_lo, iova_size(iova),
David Woodhouseea8ea462014-03-05 17:09:32 +00003451 !deferred_flush[i].freelist[j], 0);
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003452 else {
Jiang Liua156ef92014-07-11 14:19:36 +08003453 mask = ilog2(mm_to_dma_pfn(iova_size(iova)));
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003454 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
3455 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
3456 }
Yu Zhao93a23a72009-05-18 13:51:37 +08003457 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
David Woodhouseea8ea462014-03-05 17:09:32 +00003458 if (deferred_flush[i].freelist[j])
3459 dma_free_pagelist(deferred_flush[i].freelist[j]);
mark gross80b20dd2008-04-18 13:53:58 -07003460 }
Yu Zhao9dd2fe82009-05-18 13:51:36 +08003461 deferred_flush[i].next = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08003462 }
3463
mark gross5e0d2a62008-03-04 15:22:08 -08003464 list_size = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08003465}
3466
3467static void flush_unmaps_timeout(unsigned long data)
3468{
mark gross80b20dd2008-04-18 13:53:58 -07003469 unsigned long flags;
3470
3471 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08003472 flush_unmaps();
mark gross80b20dd2008-04-18 13:53:58 -07003473 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08003474}
3475
David Woodhouseea8ea462014-03-05 17:09:32 +00003476static void add_unmap(struct dmar_domain *dom, struct iova *iova, struct page *freelist)
mark gross5e0d2a62008-03-04 15:22:08 -08003477{
3478 unsigned long flags;
mark gross80b20dd2008-04-18 13:53:58 -07003479 int next, iommu_id;
Weidong Han8c11e792008-12-08 15:29:22 +08003480 struct intel_iommu *iommu;
mark gross5e0d2a62008-03-04 15:22:08 -08003481
3482 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross80b20dd2008-04-18 13:53:58 -07003483 if (list_size == HIGH_WATER_MARK)
3484 flush_unmaps();
3485
Weidong Han8c11e792008-12-08 15:29:22 +08003486 iommu = domain_get_iommu(dom);
3487 iommu_id = iommu->seq_id;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07003488
mark gross80b20dd2008-04-18 13:53:58 -07003489 next = deferred_flush[iommu_id].next;
3490 deferred_flush[iommu_id].domain[next] = dom;
3491 deferred_flush[iommu_id].iova[next] = iova;
David Woodhouseea8ea462014-03-05 17:09:32 +00003492 deferred_flush[iommu_id].freelist[next] = freelist;
mark gross80b20dd2008-04-18 13:53:58 -07003493 deferred_flush[iommu_id].next++;
mark gross5e0d2a62008-03-04 15:22:08 -08003494
3495 if (!timer_on) {
3496 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
3497 timer_on = 1;
3498 }
3499 list_size++;
3500 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
3501}
3502
Jiang Liud41a4ad2014-07-11 14:19:34 +08003503static void intel_unmap(struct device *dev, dma_addr_t dev_addr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003504{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003505 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01003506 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003507 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08003508 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00003509 struct page *freelist;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003510
David Woodhouse73676832009-07-04 14:08:36 +01003511 if (iommu_no_mapping(dev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003512 return;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003513
David Woodhouse1525a292014-03-06 16:19:30 +00003514 domain = find_domain(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003515 BUG_ON(!domain);
3516
Weidong Han8c11e792008-12-08 15:29:22 +08003517 iommu = domain_get_iommu(domain);
3518
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003519 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
David Woodhouse85b98272009-07-01 19:27:53 +01003520 if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
3521 (unsigned long long)dev_addr))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003522 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003523
David Woodhoused794dc92009-06-28 00:27:49 +01003524 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
3525 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003526
David Woodhoused794dc92009-06-28 00:27:49 +01003527 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
David Woodhouse207e3592014-03-09 16:12:32 -07003528 dev_name(dev), start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003529
David Woodhouseea8ea462014-03-05 17:09:32 +00003530 freelist = domain_unmap(domain, start_pfn, last_pfn);
David Woodhoused794dc92009-06-28 00:27:49 +01003531
mark gross5e0d2a62008-03-04 15:22:08 -08003532 if (intel_iommu_strict) {
David Woodhouse03d6a242009-06-28 15:33:46 +01003533 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
David Woodhouseea8ea462014-03-05 17:09:32 +00003534 last_pfn - start_pfn + 1, !freelist, 0);
mark gross5e0d2a62008-03-04 15:22:08 -08003535 /* free iova */
3536 __free_iova(&domain->iovad, iova);
David Woodhouseea8ea462014-03-05 17:09:32 +00003537 dma_free_pagelist(freelist);
mark gross5e0d2a62008-03-04 15:22:08 -08003538 } else {
David Woodhouseea8ea462014-03-05 17:09:32 +00003539 add_unmap(domain, iova, freelist);
mark gross5e0d2a62008-03-04 15:22:08 -08003540 /*
3541 * queue up the release of the unmap to save the 1/6th of the
3542 * cpu used up by the iotlb flush operation...
3543 */
mark gross5e0d2a62008-03-04 15:22:08 -08003544 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003545}
3546
Jiang Liud41a4ad2014-07-11 14:19:34 +08003547static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
3548 size_t size, enum dma_data_direction dir,
3549 struct dma_attrs *attrs)
3550{
3551 intel_unmap(dev, dev_addr);
3552}
3553
David Woodhouse5040a912014-03-09 16:14:00 -07003554static void *intel_alloc_coherent(struct device *dev, size_t size,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003555 dma_addr_t *dma_handle, gfp_t flags,
3556 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003557{
Akinobu Mita36746432014-06-04 16:06:51 -07003558 struct page *page = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003559 int order;
3560
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003561 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003562 order = get_order(size);
Alex Williamsone8bb9102009-11-04 15:59:34 -07003563
David Woodhouse5040a912014-03-09 16:14:00 -07003564 if (!iommu_no_mapping(dev))
Alex Williamsone8bb9102009-11-04 15:59:34 -07003565 flags &= ~(GFP_DMA | GFP_DMA32);
David Woodhouse5040a912014-03-09 16:14:00 -07003566 else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
3567 if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
Alex Williamsone8bb9102009-11-04 15:59:34 -07003568 flags |= GFP_DMA;
3569 else
3570 flags |= GFP_DMA32;
3571 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003572
Akinobu Mita36746432014-06-04 16:06:51 -07003573 if (flags & __GFP_WAIT) {
3574 unsigned int count = size >> PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003575
Akinobu Mita36746432014-06-04 16:06:51 -07003576 page = dma_alloc_from_contiguous(dev, count, order);
3577 if (page && iommu_no_mapping(dev) &&
3578 page_to_phys(page) + size > dev->coherent_dma_mask) {
3579 dma_release_from_contiguous(dev, page, count);
3580 page = NULL;
3581 }
3582 }
3583
3584 if (!page)
3585 page = alloc_pages(flags, order);
3586 if (!page)
3587 return NULL;
3588 memset(page_address(page), 0, size);
3589
3590 *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003591 DMA_BIDIRECTIONAL,
David Woodhouse5040a912014-03-09 16:14:00 -07003592 dev->coherent_dma_mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003593 if (*dma_handle)
Akinobu Mita36746432014-06-04 16:06:51 -07003594 return page_address(page);
3595 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3596 __free_pages(page, order);
3597
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003598 return NULL;
3599}
3600
David Woodhouse5040a912014-03-09 16:14:00 -07003601static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003602 dma_addr_t dma_handle, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003603{
3604 int order;
Akinobu Mita36746432014-06-04 16:06:51 -07003605 struct page *page = virt_to_page(vaddr);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003606
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003607 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003608 order = get_order(size);
3609
Jiang Liud41a4ad2014-07-11 14:19:34 +08003610 intel_unmap(dev, dma_handle);
Akinobu Mita36746432014-06-04 16:06:51 -07003611 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3612 __free_pages(page, order);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003613}
3614
David Woodhouse5040a912014-03-09 16:14:00 -07003615static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003616 int nelems, enum dma_data_direction dir,
3617 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003618{
Jiang Liud41a4ad2014-07-11 14:19:34 +08003619 intel_unmap(dev, sglist[0].dma_address);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003620}
3621
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003622static int intel_nontranslate_map_sg(struct device *hddev,
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003623 struct scatterlist *sglist, int nelems, int dir)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003624{
3625 int i;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003626 struct scatterlist *sg;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003627
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003628 for_each_sg(sglist, sg, nelems, i) {
FUJITA Tomonori12d4d402007-10-23 09:32:25 +02003629 BUG_ON(!sg_page(sg));
David Woodhouse4cf2e752009-02-11 17:23:43 +00003630 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003631 sg->dma_length = sg->length;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003632 }
3633 return nelems;
3634}
3635
David Woodhouse5040a912014-03-09 16:14:00 -07003636static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003637 enum dma_data_direction dir, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003638{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003639 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003640 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003641 size_t size = 0;
3642 int prot = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003643 struct iova *iova = NULL;
3644 int ret;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003645 struct scatterlist *sg;
David Woodhouseb536d242009-06-28 14:49:31 +01003646 unsigned long start_vpfn;
Weidong Han8c11e792008-12-08 15:29:22 +08003647 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003648
3649 BUG_ON(dir == DMA_NONE);
David Woodhouse5040a912014-03-09 16:14:00 -07003650 if (iommu_no_mapping(dev))
3651 return intel_nontranslate_map_sg(dev, sglist, nelems, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003652
David Woodhouse5040a912014-03-09 16:14:00 -07003653 domain = get_valid_domain_for_dev(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003654 if (!domain)
3655 return 0;
3656
Weidong Han8c11e792008-12-08 15:29:22 +08003657 iommu = domain_get_iommu(domain);
3658
David Woodhouseb536d242009-06-28 14:49:31 +01003659 for_each_sg(sglist, sg, nelems, i)
David Woodhouse88cb6a72009-06-28 15:03:06 +01003660 size += aligned_nrpages(sg->offset, sg->length);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003661
David Woodhouse5040a912014-03-09 16:14:00 -07003662 iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
3663 *dev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003664 if (!iova) {
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003665 sglist->dma_length = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003666 return 0;
3667 }
3668
3669 /*
3670 * Check if DMAR supports zero-length reads on write only
3671 * mappings..
3672 */
3673 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003674 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003675 prot |= DMA_PTE_READ;
3676 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3677 prot |= DMA_PTE_WRITE;
3678
David Woodhouseb536d242009-06-28 14:49:31 +01003679 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
David Woodhousee1605492009-06-29 11:17:38 +01003680
Fenghua Yuf5329592009-08-04 15:09:37 -07003681 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
David Woodhousee1605492009-06-29 11:17:38 +01003682 if (unlikely(ret)) {
David Woodhousee1605492009-06-29 11:17:38 +01003683 dma_pte_free_pagetable(domain, start_vpfn,
3684 start_vpfn + size - 1);
David Woodhousee1605492009-06-29 11:17:38 +01003685 __free_iova(&domain->iovad, iova);
3686 return 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003687 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003688
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003689 /* it's a non-present to present mapping. Only flush if caching mode */
3690 if (cap_caching_mode(iommu->cap))
David Woodhouseea8ea462014-03-05 17:09:32 +00003691 iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 0, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003692 else
Weidong Han8c11e792008-12-08 15:29:22 +08003693 iommu_flush_write_buffer(iommu);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003694
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003695 return nelems;
3696}
3697
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003698static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3699{
3700 return !dma_addr;
3701}
3702
FUJITA Tomonori160c1d82009-01-05 23:59:02 +09003703struct dma_map_ops intel_dma_ops = {
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003704 .alloc = intel_alloc_coherent,
3705 .free = intel_free_coherent,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003706 .map_sg = intel_map_sg,
3707 .unmap_sg = intel_unmap_sg,
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003708 .map_page = intel_map_page,
3709 .unmap_page = intel_unmap_page,
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003710 .mapping_error = intel_mapping_error,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003711};
3712
3713static inline int iommu_domain_cache_init(void)
3714{
3715 int ret = 0;
3716
3717 iommu_domain_cache = kmem_cache_create("iommu_domain",
3718 sizeof(struct dmar_domain),
3719 0,
3720 SLAB_HWCACHE_ALIGN,
3721
3722 NULL);
3723 if (!iommu_domain_cache) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003724 pr_err("Couldn't create iommu_domain cache\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003725 ret = -ENOMEM;
3726 }
3727
3728 return ret;
3729}
3730
3731static inline int iommu_devinfo_cache_init(void)
3732{
3733 int ret = 0;
3734
3735 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3736 sizeof(struct device_domain_info),
3737 0,
3738 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003739 NULL);
3740 if (!iommu_devinfo_cache) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003741 pr_err("Couldn't create devinfo cache\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003742 ret = -ENOMEM;
3743 }
3744
3745 return ret;
3746}
3747
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003748static int __init iommu_init_mempool(void)
3749{
3750 int ret;
3751 ret = iommu_iova_cache_init();
3752 if (ret)
3753 return ret;
3754
3755 ret = iommu_domain_cache_init();
3756 if (ret)
3757 goto domain_error;
3758
3759 ret = iommu_devinfo_cache_init();
3760 if (!ret)
3761 return ret;
3762
3763 kmem_cache_destroy(iommu_domain_cache);
3764domain_error:
Robin Murphy85b45452015-01-12 17:51:14 +00003765 iommu_iova_cache_destroy();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003766
3767 return -ENOMEM;
3768}
3769
3770static void __init iommu_exit_mempool(void)
3771{
3772 kmem_cache_destroy(iommu_devinfo_cache);
3773 kmem_cache_destroy(iommu_domain_cache);
Robin Murphy85b45452015-01-12 17:51:14 +00003774 iommu_iova_cache_destroy();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003775}
3776
Dan Williams556ab452010-07-23 15:47:56 -07003777static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3778{
3779 struct dmar_drhd_unit *drhd;
3780 u32 vtbar;
3781 int rc;
3782
3783 /* We know that this device on this chipset has its own IOMMU.
3784 * If we find it under a different IOMMU, then the BIOS is lying
3785 * to us. Hope that the IOMMU for this device is actually
3786 * disabled, and it needs no translation...
3787 */
3788 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3789 if (rc) {
3790 /* "can't" happen */
3791 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3792 return;
3793 }
3794 vtbar &= 0xffff0000;
3795
3796 /* we know that the this iommu should be at offset 0xa000 from vtbar */
3797 drhd = dmar_find_matched_drhd_unit(pdev);
3798 if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3799 TAINT_FIRMWARE_WORKAROUND,
3800 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3801 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3802}
3803DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3804
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003805static void __init init_no_remapping_devices(void)
3806{
3807 struct dmar_drhd_unit *drhd;
David Woodhouse832bd852014-03-07 15:08:36 +00003808 struct device *dev;
Jiang Liub683b232014-02-19 14:07:32 +08003809 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003810
3811 for_each_drhd_unit(drhd) {
3812 if (!drhd->include_all) {
Jiang Liub683b232014-02-19 14:07:32 +08003813 for_each_active_dev_scope(drhd->devices,
3814 drhd->devices_cnt, i, dev)
3815 break;
David Woodhouse832bd852014-03-07 15:08:36 +00003816 /* ignore DMAR unit if no devices exist */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003817 if (i == drhd->devices_cnt)
3818 drhd->ignored = 1;
3819 }
3820 }
3821
Jiang Liu7c919772014-01-06 14:18:18 +08003822 for_each_active_drhd_unit(drhd) {
Jiang Liu7c919772014-01-06 14:18:18 +08003823 if (drhd->include_all)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003824 continue;
3825
Jiang Liub683b232014-02-19 14:07:32 +08003826 for_each_active_dev_scope(drhd->devices,
3827 drhd->devices_cnt, i, dev)
David Woodhouse832bd852014-03-07 15:08:36 +00003828 if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev)))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003829 break;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003830 if (i < drhd->devices_cnt)
3831 continue;
3832
David Woodhousec0771df2011-10-14 20:59:46 +01003833 /* This IOMMU has *only* gfx devices. Either bypass it or
3834 set the gfx_mapped flag, as appropriate */
3835 if (dmar_map_gfx) {
3836 intel_iommu_gfx_mapped = 1;
3837 } else {
3838 drhd->ignored = 1;
Jiang Liub683b232014-02-19 14:07:32 +08003839 for_each_active_dev_scope(drhd->devices,
3840 drhd->devices_cnt, i, dev)
David Woodhouse832bd852014-03-07 15:08:36 +00003841 dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003842 }
3843 }
3844}
3845
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003846#ifdef CONFIG_SUSPEND
3847static int init_iommu_hw(void)
3848{
3849 struct dmar_drhd_unit *drhd;
3850 struct intel_iommu *iommu = NULL;
3851
3852 for_each_active_iommu(iommu, drhd)
3853 if (iommu->qi)
3854 dmar_reenable_qi(iommu);
3855
Joseph Cihulab7792602011-05-03 00:08:37 -07003856 for_each_iommu(iommu, drhd) {
3857 if (drhd->ignored) {
3858 /*
3859 * we always have to disable PMRs or DMA may fail on
3860 * this device
3861 */
3862 if (force_on)
3863 iommu_disable_protect_mem_regions(iommu);
3864 continue;
3865 }
3866
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003867 iommu_flush_write_buffer(iommu);
3868
3869 iommu_set_root_entry(iommu);
3870
3871 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003872 DMA_CCMD_GLOBAL_INVL);
Jiang Liu2a41cce2014-07-11 14:19:33 +08003873 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3874 iommu_enable_translation(iommu);
David Woodhouseb94996c2009-09-19 15:28:12 -07003875 iommu_disable_protect_mem_regions(iommu);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003876 }
3877
3878 return 0;
3879}
3880
3881static void iommu_flush_all(void)
3882{
3883 struct dmar_drhd_unit *drhd;
3884 struct intel_iommu *iommu;
3885
3886 for_each_active_iommu(iommu, drhd) {
3887 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003888 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003889 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003890 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003891 }
3892}
3893
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003894static int iommu_suspend(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003895{
3896 struct dmar_drhd_unit *drhd;
3897 struct intel_iommu *iommu = NULL;
3898 unsigned long flag;
3899
3900 for_each_active_iommu(iommu, drhd) {
3901 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3902 GFP_ATOMIC);
3903 if (!iommu->iommu_state)
3904 goto nomem;
3905 }
3906
3907 iommu_flush_all();
3908
3909 for_each_active_iommu(iommu, drhd) {
3910 iommu_disable_translation(iommu);
3911
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003912 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003913
3914 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3915 readl(iommu->reg + DMAR_FECTL_REG);
3916 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3917 readl(iommu->reg + DMAR_FEDATA_REG);
3918 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3919 readl(iommu->reg + DMAR_FEADDR_REG);
3920 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3921 readl(iommu->reg + DMAR_FEUADDR_REG);
3922
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003923 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003924 }
3925 return 0;
3926
3927nomem:
3928 for_each_active_iommu(iommu, drhd)
3929 kfree(iommu->iommu_state);
3930
3931 return -ENOMEM;
3932}
3933
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003934static void iommu_resume(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003935{
3936 struct dmar_drhd_unit *drhd;
3937 struct intel_iommu *iommu = NULL;
3938 unsigned long flag;
3939
3940 if (init_iommu_hw()) {
Joseph Cihulab7792602011-05-03 00:08:37 -07003941 if (force_on)
3942 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3943 else
3944 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003945 return;
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003946 }
3947
3948 for_each_active_iommu(iommu, drhd) {
3949
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003950 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003951
3952 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3953 iommu->reg + DMAR_FECTL_REG);
3954 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3955 iommu->reg + DMAR_FEDATA_REG);
3956 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3957 iommu->reg + DMAR_FEADDR_REG);
3958 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3959 iommu->reg + DMAR_FEUADDR_REG);
3960
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003961 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003962 }
3963
3964 for_each_active_iommu(iommu, drhd)
3965 kfree(iommu->iommu_state);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003966}
3967
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003968static struct syscore_ops iommu_syscore_ops = {
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003969 .resume = iommu_resume,
3970 .suspend = iommu_suspend,
3971};
3972
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003973static void __init init_iommu_pm_ops(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003974{
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003975 register_syscore_ops(&iommu_syscore_ops);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003976}
3977
3978#else
Rafael J. Wysocki99592ba2011-06-07 21:32:31 +02003979static inline void init_iommu_pm_ops(void) {}
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003980#endif /* CONFIG_PM */
3981
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003982
Jiang Liuc2a0b532014-11-09 22:47:56 +08003983int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003984{
3985 struct acpi_dmar_reserved_memory *rmrr;
3986 struct dmar_rmrr_unit *rmrru;
3987
3988 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
3989 if (!rmrru)
3990 return -ENOMEM;
3991
3992 rmrru->hdr = header;
3993 rmrr = (struct acpi_dmar_reserved_memory *)header;
3994 rmrru->base_address = rmrr->base_address;
3995 rmrru->end_address = rmrr->end_address;
Jiang Liu2e455282014-02-19 14:07:36 +08003996 rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
3997 ((void *)rmrr) + rmrr->header.length,
3998 &rmrru->devices_cnt);
3999 if (rmrru->devices_cnt && rmrru->devices == NULL) {
4000 kfree(rmrru);
4001 return -ENOMEM;
4002 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004003
Jiang Liu2e455282014-02-19 14:07:36 +08004004 list_add(&rmrru->list, &dmar_rmrr_units);
4005
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004006 return 0;
4007}
4008
Jiang Liu6b197242014-11-09 22:47:58 +08004009static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
4010{
4011 struct dmar_atsr_unit *atsru;
4012 struct acpi_dmar_atsr *tmp;
4013
4014 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
4015 tmp = (struct acpi_dmar_atsr *)atsru->hdr;
4016 if (atsr->segment != tmp->segment)
4017 continue;
4018 if (atsr->header.length != tmp->header.length)
4019 continue;
4020 if (memcmp(atsr, tmp, atsr->header.length) == 0)
4021 return atsru;
4022 }
4023
4024 return NULL;
4025}
4026
4027int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004028{
4029 struct acpi_dmar_atsr *atsr;
4030 struct dmar_atsr_unit *atsru;
4031
Jiang Liu6b197242014-11-09 22:47:58 +08004032 if (system_state != SYSTEM_BOOTING && !intel_iommu_enabled)
4033 return 0;
4034
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004035 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
Jiang Liu6b197242014-11-09 22:47:58 +08004036 atsru = dmar_find_atsr(atsr);
4037 if (atsru)
4038 return 0;
4039
4040 atsru = kzalloc(sizeof(*atsru) + hdr->length, GFP_KERNEL);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004041 if (!atsru)
4042 return -ENOMEM;
4043
Jiang Liu6b197242014-11-09 22:47:58 +08004044 /*
4045 * If memory is allocated from slab by ACPI _DSM method, we need to
4046 * copy the memory content because the memory buffer will be freed
4047 * on return.
4048 */
4049 atsru->hdr = (void *)(atsru + 1);
4050 memcpy(atsru->hdr, hdr, hdr->length);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004051 atsru->include_all = atsr->flags & 0x1;
Jiang Liu2e455282014-02-19 14:07:36 +08004052 if (!atsru->include_all) {
4053 atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
4054 (void *)atsr + atsr->header.length,
4055 &atsru->devices_cnt);
4056 if (atsru->devices_cnt && atsru->devices == NULL) {
4057 kfree(atsru);
4058 return -ENOMEM;
4059 }
4060 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004061
Jiang Liu0e242612014-02-19 14:07:34 +08004062 list_add_rcu(&atsru->list, &dmar_atsr_units);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004063
4064 return 0;
4065}
4066
Jiang Liu9bdc5312014-01-06 14:18:27 +08004067static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
4068{
4069 dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
4070 kfree(atsru);
4071}
4072
Jiang Liu6b197242014-11-09 22:47:58 +08004073int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4074{
4075 struct acpi_dmar_atsr *atsr;
4076 struct dmar_atsr_unit *atsru;
4077
4078 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4079 atsru = dmar_find_atsr(atsr);
4080 if (atsru) {
4081 list_del_rcu(&atsru->list);
4082 synchronize_rcu();
4083 intel_iommu_free_atsr(atsru);
4084 }
4085
4086 return 0;
4087}
4088
4089int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4090{
4091 int i;
4092 struct device *dev;
4093 struct acpi_dmar_atsr *atsr;
4094 struct dmar_atsr_unit *atsru;
4095
4096 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4097 atsru = dmar_find_atsr(atsr);
4098 if (!atsru)
4099 return 0;
4100
4101 if (!atsru->include_all && atsru->devices && atsru->devices_cnt)
4102 for_each_active_dev_scope(atsru->devices, atsru->devices_cnt,
4103 i, dev)
4104 return -EBUSY;
4105
4106 return 0;
4107}
4108
Jiang Liuffebeb42014-11-09 22:48:02 +08004109static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
4110{
4111 int sp, ret = 0;
4112 struct intel_iommu *iommu = dmaru->iommu;
4113
4114 if (g_iommus[iommu->seq_id])
4115 return 0;
4116
4117 if (hw_pass_through && !ecap_pass_through(iommu->ecap)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004118 pr_warn("%s: Doesn't support hardware pass through.\n",
Jiang Liuffebeb42014-11-09 22:48:02 +08004119 iommu->name);
4120 return -ENXIO;
4121 }
4122 if (!ecap_sc_support(iommu->ecap) &&
4123 domain_update_iommu_snooping(iommu)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004124 pr_warn("%s: Doesn't support snooping.\n",
Jiang Liuffebeb42014-11-09 22:48:02 +08004125 iommu->name);
4126 return -ENXIO;
4127 }
4128 sp = domain_update_iommu_superpage(iommu) - 1;
4129 if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004130 pr_warn("%s: Doesn't support large page.\n",
Jiang Liuffebeb42014-11-09 22:48:02 +08004131 iommu->name);
4132 return -ENXIO;
4133 }
4134
4135 /*
4136 * Disable translation if already enabled prior to OS handover.
4137 */
4138 if (iommu->gcmd & DMA_GCMD_TE)
4139 iommu_disable_translation(iommu);
4140
4141 g_iommus[iommu->seq_id] = iommu;
4142 ret = iommu_init_domains(iommu);
4143 if (ret == 0)
4144 ret = iommu_alloc_root_entry(iommu);
4145 if (ret)
4146 goto out;
4147
4148 if (dmaru->ignored) {
4149 /*
4150 * we always have to disable PMRs or DMA may fail on this device
4151 */
4152 if (force_on)
4153 iommu_disable_protect_mem_regions(iommu);
4154 return 0;
4155 }
4156
4157 intel_iommu_init_qi(iommu);
4158 iommu_flush_write_buffer(iommu);
4159 ret = dmar_set_interrupt(iommu);
4160 if (ret)
4161 goto disable_iommu;
4162
4163 iommu_set_root_entry(iommu);
4164 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
4165 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
4166 iommu_enable_translation(iommu);
4167
4168 if (si_domain) {
4169 ret = iommu_attach_domain(si_domain, iommu);
4170 if (ret < 0 || si_domain->id != ret)
4171 goto disable_iommu;
4172 domain_attach_iommu(si_domain, iommu);
4173 }
4174
4175 iommu_disable_protect_mem_regions(iommu);
4176 return 0;
4177
4178disable_iommu:
4179 disable_dmar_iommu(iommu);
4180out:
4181 free_dmar_iommu(iommu);
4182 return ret;
4183}
4184
Jiang Liu6b197242014-11-09 22:47:58 +08004185int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
4186{
Jiang Liuffebeb42014-11-09 22:48:02 +08004187 int ret = 0;
4188 struct intel_iommu *iommu = dmaru->iommu;
4189
4190 if (!intel_iommu_enabled)
4191 return 0;
4192 if (iommu == NULL)
4193 return -EINVAL;
4194
4195 if (insert) {
4196 ret = intel_iommu_add(dmaru);
4197 } else {
4198 disable_dmar_iommu(iommu);
4199 free_dmar_iommu(iommu);
4200 }
4201
4202 return ret;
Jiang Liu6b197242014-11-09 22:47:58 +08004203}
4204
Jiang Liu9bdc5312014-01-06 14:18:27 +08004205static void intel_iommu_free_dmars(void)
4206{
4207 struct dmar_rmrr_unit *rmrru, *rmrr_n;
4208 struct dmar_atsr_unit *atsru, *atsr_n;
4209
4210 list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
4211 list_del(&rmrru->list);
4212 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
4213 kfree(rmrru);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004214 }
4215
Jiang Liu9bdc5312014-01-06 14:18:27 +08004216 list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
4217 list_del(&atsru->list);
4218 intel_iommu_free_atsr(atsru);
4219 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004220}
4221
4222int dmar_find_matched_atsr_unit(struct pci_dev *dev)
4223{
Jiang Liub683b232014-02-19 14:07:32 +08004224 int i, ret = 1;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004225 struct pci_bus *bus;
David Woodhouse832bd852014-03-07 15:08:36 +00004226 struct pci_dev *bridge = NULL;
4227 struct device *tmp;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004228 struct acpi_dmar_atsr *atsr;
4229 struct dmar_atsr_unit *atsru;
4230
4231 dev = pci_physfn(dev);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004232 for (bus = dev->bus; bus; bus = bus->parent) {
Jiang Liub5f82dd2014-02-19 14:07:31 +08004233 bridge = bus->self;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004234 if (!bridge || !pci_is_pcie(bridge) ||
Yijing Wang62f87c02012-07-24 17:20:03 +08004235 pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004236 return 0;
Jiang Liub5f82dd2014-02-19 14:07:31 +08004237 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004238 break;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004239 }
Jiang Liub5f82dd2014-02-19 14:07:31 +08004240 if (!bridge)
4241 return 0;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004242
Jiang Liu0e242612014-02-19 14:07:34 +08004243 rcu_read_lock();
Jiang Liub5f82dd2014-02-19 14:07:31 +08004244 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
4245 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4246 if (atsr->segment != pci_domain_nr(dev->bus))
4247 continue;
4248
Jiang Liub683b232014-02-19 14:07:32 +08004249 for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp)
David Woodhouse832bd852014-03-07 15:08:36 +00004250 if (tmp == &bridge->dev)
Jiang Liub683b232014-02-19 14:07:32 +08004251 goto out;
Jiang Liub5f82dd2014-02-19 14:07:31 +08004252
4253 if (atsru->include_all)
Jiang Liub683b232014-02-19 14:07:32 +08004254 goto out;
Jiang Liub5f82dd2014-02-19 14:07:31 +08004255 }
Jiang Liub683b232014-02-19 14:07:32 +08004256 ret = 0;
4257out:
Jiang Liu0e242612014-02-19 14:07:34 +08004258 rcu_read_unlock();
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004259
Jiang Liub683b232014-02-19 14:07:32 +08004260 return ret;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004261}
4262
Jiang Liu59ce0512014-02-19 14:07:35 +08004263int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
4264{
4265 int ret = 0;
4266 struct dmar_rmrr_unit *rmrru;
4267 struct dmar_atsr_unit *atsru;
4268 struct acpi_dmar_atsr *atsr;
4269 struct acpi_dmar_reserved_memory *rmrr;
4270
4271 if (!intel_iommu_enabled && system_state != SYSTEM_BOOTING)
4272 return 0;
4273
4274 list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
4275 rmrr = container_of(rmrru->hdr,
4276 struct acpi_dmar_reserved_memory, header);
4277 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4278 ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1),
4279 ((void *)rmrr) + rmrr->header.length,
4280 rmrr->segment, rmrru->devices,
4281 rmrru->devices_cnt);
Jiang Liu27e24952014-06-20 15:08:06 +08004282 if(ret < 0)
Jiang Liu59ce0512014-02-19 14:07:35 +08004283 return ret;
4284 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
Jiang Liu27e24952014-06-20 15:08:06 +08004285 dmar_remove_dev_scope(info, rmrr->segment,
4286 rmrru->devices, rmrru->devices_cnt);
Jiang Liu59ce0512014-02-19 14:07:35 +08004287 }
4288 }
4289
4290 list_for_each_entry(atsru, &dmar_atsr_units, list) {
4291 if (atsru->include_all)
4292 continue;
4293
4294 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4295 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4296 ret = dmar_insert_dev_scope(info, (void *)(atsr + 1),
4297 (void *)atsr + atsr->header.length,
4298 atsr->segment, atsru->devices,
4299 atsru->devices_cnt);
4300 if (ret > 0)
4301 break;
4302 else if(ret < 0)
4303 return ret;
4304 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
4305 if (dmar_remove_dev_scope(info, atsr->segment,
4306 atsru->devices, atsru->devices_cnt))
4307 break;
4308 }
4309 }
4310
4311 return 0;
4312}
4313
Fenghua Yu99dcade2009-11-11 07:23:06 -08004314/*
4315 * Here we only respond to action of unbound device from driver.
4316 *
4317 * Added device is not attached to its DMAR domain here yet. That will happen
4318 * when mapping the device to iova.
4319 */
4320static int device_notifier(struct notifier_block *nb,
4321 unsigned long action, void *data)
4322{
4323 struct device *dev = data;
Fenghua Yu99dcade2009-11-11 07:23:06 -08004324 struct dmar_domain *domain;
4325
David Woodhouse3d891942014-03-06 15:59:26 +00004326 if (iommu_dummy(dev))
David Woodhouse44cd6132009-12-02 10:18:30 +00004327 return 0;
4328
Joerg Roedel1196c2f2014-09-30 13:02:03 +02004329 if (action != BUS_NOTIFY_REMOVED_DEVICE)
Jiang Liu7e7dfab2014-02-19 14:07:23 +08004330 return 0;
4331
David Woodhouse1525a292014-03-06 16:19:30 +00004332 domain = find_domain(dev);
Fenghua Yu99dcade2009-11-11 07:23:06 -08004333 if (!domain)
4334 return 0;
4335
Jiang Liu3a5670e2014-02-19 14:07:33 +08004336 down_read(&dmar_global_lock);
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004337 domain_remove_one_dev_info(domain, dev);
Jiang Liuab8dfe22014-07-11 14:19:27 +08004338 if (!domain_type_is_vm_or_si(domain) && list_empty(&domain->devices))
Jiang Liu7e7dfab2014-02-19 14:07:23 +08004339 domain_exit(domain);
Jiang Liu3a5670e2014-02-19 14:07:33 +08004340 up_read(&dmar_global_lock);
Alex Williamsona97590e2011-03-04 14:52:16 -07004341
Fenghua Yu99dcade2009-11-11 07:23:06 -08004342 return 0;
4343}
4344
4345static struct notifier_block device_nb = {
4346 .notifier_call = device_notifier,
4347};
4348
Jiang Liu75f05562014-02-19 14:07:37 +08004349static int intel_iommu_memory_notifier(struct notifier_block *nb,
4350 unsigned long val, void *v)
4351{
4352 struct memory_notify *mhp = v;
4353 unsigned long long start, end;
4354 unsigned long start_vpfn, last_vpfn;
4355
4356 switch (val) {
4357 case MEM_GOING_ONLINE:
4358 start = mhp->start_pfn << PAGE_SHIFT;
4359 end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1;
4360 if (iommu_domain_identity_map(si_domain, start, end)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004361 pr_warn("Failed to build identity map for [%llx-%llx]\n",
Jiang Liu75f05562014-02-19 14:07:37 +08004362 start, end);
4363 return NOTIFY_BAD;
4364 }
4365 break;
4366
4367 case MEM_OFFLINE:
4368 case MEM_CANCEL_ONLINE:
4369 start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
4370 last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1);
4371 while (start_vpfn <= last_vpfn) {
4372 struct iova *iova;
4373 struct dmar_drhd_unit *drhd;
4374 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00004375 struct page *freelist;
Jiang Liu75f05562014-02-19 14:07:37 +08004376
4377 iova = find_iova(&si_domain->iovad, start_vpfn);
4378 if (iova == NULL) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004379 pr_debug("Failed get IOVA for PFN %lx\n",
Jiang Liu75f05562014-02-19 14:07:37 +08004380 start_vpfn);
4381 break;
4382 }
4383
4384 iova = split_and_remove_iova(&si_domain->iovad, iova,
4385 start_vpfn, last_vpfn);
4386 if (iova == NULL) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004387 pr_warn("Failed to split IOVA PFN [%lx-%lx]\n",
Jiang Liu75f05562014-02-19 14:07:37 +08004388 start_vpfn, last_vpfn);
4389 return NOTIFY_BAD;
4390 }
4391
David Woodhouseea8ea462014-03-05 17:09:32 +00004392 freelist = domain_unmap(si_domain, iova->pfn_lo,
4393 iova->pfn_hi);
4394
Jiang Liu75f05562014-02-19 14:07:37 +08004395 rcu_read_lock();
4396 for_each_active_iommu(iommu, drhd)
4397 iommu_flush_iotlb_psi(iommu, si_domain->id,
Jiang Liua156ef92014-07-11 14:19:36 +08004398 iova->pfn_lo, iova_size(iova),
David Woodhouseea8ea462014-03-05 17:09:32 +00004399 !freelist, 0);
Jiang Liu75f05562014-02-19 14:07:37 +08004400 rcu_read_unlock();
David Woodhouseea8ea462014-03-05 17:09:32 +00004401 dma_free_pagelist(freelist);
Jiang Liu75f05562014-02-19 14:07:37 +08004402
4403 start_vpfn = iova->pfn_hi + 1;
4404 free_iova_mem(iova);
4405 }
4406 break;
4407 }
4408
4409 return NOTIFY_OK;
4410}
4411
4412static struct notifier_block intel_iommu_memory_nb = {
4413 .notifier_call = intel_iommu_memory_notifier,
4414 .priority = 0
4415};
4416
Alex Williamsona5459cf2014-06-12 16:12:31 -06004417
4418static ssize_t intel_iommu_show_version(struct device *dev,
4419 struct device_attribute *attr,
4420 char *buf)
4421{
4422 struct intel_iommu *iommu = dev_get_drvdata(dev);
4423 u32 ver = readl(iommu->reg + DMAR_VER_REG);
4424 return sprintf(buf, "%d:%d\n",
4425 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver));
4426}
4427static DEVICE_ATTR(version, S_IRUGO, intel_iommu_show_version, NULL);
4428
4429static ssize_t intel_iommu_show_address(struct device *dev,
4430 struct device_attribute *attr,
4431 char *buf)
4432{
4433 struct intel_iommu *iommu = dev_get_drvdata(dev);
4434 return sprintf(buf, "%llx\n", iommu->reg_phys);
4435}
4436static DEVICE_ATTR(address, S_IRUGO, intel_iommu_show_address, NULL);
4437
4438static ssize_t intel_iommu_show_cap(struct device *dev,
4439 struct device_attribute *attr,
4440 char *buf)
4441{
4442 struct intel_iommu *iommu = dev_get_drvdata(dev);
4443 return sprintf(buf, "%llx\n", iommu->cap);
4444}
4445static DEVICE_ATTR(cap, S_IRUGO, intel_iommu_show_cap, NULL);
4446
4447static ssize_t intel_iommu_show_ecap(struct device *dev,
4448 struct device_attribute *attr,
4449 char *buf)
4450{
4451 struct intel_iommu *iommu = dev_get_drvdata(dev);
4452 return sprintf(buf, "%llx\n", iommu->ecap);
4453}
4454static DEVICE_ATTR(ecap, S_IRUGO, intel_iommu_show_ecap, NULL);
4455
Alex Williamson2238c082015-07-14 15:24:53 -06004456static ssize_t intel_iommu_show_ndoms(struct device *dev,
4457 struct device_attribute *attr,
4458 char *buf)
4459{
4460 struct intel_iommu *iommu = dev_get_drvdata(dev);
4461 return sprintf(buf, "%ld\n", cap_ndoms(iommu->cap));
4462}
4463static DEVICE_ATTR(domains_supported, S_IRUGO, intel_iommu_show_ndoms, NULL);
4464
4465static ssize_t intel_iommu_show_ndoms_used(struct device *dev,
4466 struct device_attribute *attr,
4467 char *buf)
4468{
4469 struct intel_iommu *iommu = dev_get_drvdata(dev);
4470 return sprintf(buf, "%d\n", bitmap_weight(iommu->domain_ids,
4471 cap_ndoms(iommu->cap)));
4472}
4473static DEVICE_ATTR(domains_used, S_IRUGO, intel_iommu_show_ndoms_used, NULL);
4474
Alex Williamsona5459cf2014-06-12 16:12:31 -06004475static struct attribute *intel_iommu_attrs[] = {
4476 &dev_attr_version.attr,
4477 &dev_attr_address.attr,
4478 &dev_attr_cap.attr,
4479 &dev_attr_ecap.attr,
Alex Williamson2238c082015-07-14 15:24:53 -06004480 &dev_attr_domains_supported.attr,
4481 &dev_attr_domains_used.attr,
Alex Williamsona5459cf2014-06-12 16:12:31 -06004482 NULL,
4483};
4484
4485static struct attribute_group intel_iommu_group = {
4486 .name = "intel-iommu",
4487 .attrs = intel_iommu_attrs,
4488};
4489
4490const struct attribute_group *intel_iommu_groups[] = {
4491 &intel_iommu_group,
4492 NULL,
4493};
4494
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004495int __init intel_iommu_init(void)
4496{
Jiang Liu9bdc5312014-01-06 14:18:27 +08004497 int ret = -ENODEV;
Takao Indoh3a93c842013-04-23 17:35:03 +09004498 struct dmar_drhd_unit *drhd;
Jiang Liu7c919772014-01-06 14:18:18 +08004499 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004500
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004501 /* VT-d is required for a TXT/tboot launch, so enforce that */
4502 force_on = tboot_force_iommu();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004503
Jiang Liu3a5670e2014-02-19 14:07:33 +08004504 if (iommu_init_mempool()) {
4505 if (force_on)
4506 panic("tboot: Failed to initialize iommu memory\n");
4507 return -ENOMEM;
4508 }
4509
4510 down_write(&dmar_global_lock);
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004511 if (dmar_table_init()) {
4512 if (force_on)
4513 panic("tboot: Failed to initialize DMAR table\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004514 goto out_free_dmar;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004515 }
4516
Suresh Siddhac2c72862011-08-23 17:05:19 -07004517 if (dmar_dev_scope_init() < 0) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004518 if (force_on)
4519 panic("tboot: Failed to initialize DMAR device scope\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004520 goto out_free_dmar;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004521 }
Suresh Siddha1886e8a2008-07-10 11:16:37 -07004522
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09004523 if (no_iommu || dmar_disabled)
Jiang Liu9bdc5312014-01-06 14:18:27 +08004524 goto out_free_dmar;
Suresh Siddha2ae21012008-07-10 11:16:43 -07004525
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004526 if (list_empty(&dmar_rmrr_units))
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004527 pr_info("No RMRR found\n");
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004528
4529 if (list_empty(&dmar_atsr_units))
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004530 pr_info("No ATSR found\n");
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004531
Joseph Cihula51a63e62011-03-21 11:04:24 -07004532 if (dmar_init_reserved_ranges()) {
4533 if (force_on)
4534 panic("tboot: Failed to reserve iommu ranges\n");
Jiang Liu3a5670e2014-02-19 14:07:33 +08004535 goto out_free_reserved_range;
Joseph Cihula51a63e62011-03-21 11:04:24 -07004536 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004537
4538 init_no_remapping_devices();
4539
Joseph Cihulab7792602011-05-03 00:08:37 -07004540 ret = init_dmars();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004541 if (ret) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004542 if (force_on)
4543 panic("tboot: Failed to initialize DMARs\n");
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004544 pr_err("Initialization failed\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004545 goto out_free_reserved_range;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004546 }
Jiang Liu3a5670e2014-02-19 14:07:33 +08004547 up_write(&dmar_global_lock);
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004548 pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004549
mark gross5e0d2a62008-03-04 15:22:08 -08004550 init_timer(&unmap_timer);
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09004551#ifdef CONFIG_SWIOTLB
4552 swiotlb = 0;
4553#endif
David Woodhouse19943b02009-08-04 16:19:20 +01004554 dma_ops = &intel_dma_ops;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07004555
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004556 init_iommu_pm_ops();
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004557
Alex Williamsona5459cf2014-06-12 16:12:31 -06004558 for_each_active_iommu(iommu, drhd)
4559 iommu->iommu_dev = iommu_device_create(NULL, iommu,
4560 intel_iommu_groups,
Kees Cook2439d4a2015-07-24 16:27:57 -07004561 "%s", iommu->name);
Alex Williamsona5459cf2014-06-12 16:12:31 -06004562
Joerg Roedel4236d97d2011-09-06 17:56:07 +02004563 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
Fenghua Yu99dcade2009-11-11 07:23:06 -08004564 bus_register_notifier(&pci_bus_type, &device_nb);
Jiang Liu75f05562014-02-19 14:07:37 +08004565 if (si_domain && !hw_pass_through)
4566 register_memory_notifier(&intel_iommu_memory_nb);
Fenghua Yu99dcade2009-11-11 07:23:06 -08004567
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -02004568 intel_iommu_enabled = 1;
4569
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004570 return 0;
Jiang Liu9bdc5312014-01-06 14:18:27 +08004571
4572out_free_reserved_range:
4573 put_iova_domain(&reserved_iova_list);
Jiang Liu9bdc5312014-01-06 14:18:27 +08004574out_free_dmar:
4575 intel_iommu_free_dmars();
Jiang Liu3a5670e2014-02-19 14:07:33 +08004576 up_write(&dmar_global_lock);
4577 iommu_exit_mempool();
Jiang Liu9bdc5312014-01-06 14:18:27 +08004578 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004579}
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07004580
Alex Williamson579305f2014-07-03 09:51:43 -06004581static int iommu_detach_dev_cb(struct pci_dev *pdev, u16 alias, void *opaque)
4582{
4583 struct intel_iommu *iommu = opaque;
4584
4585 iommu_detach_dev(iommu, PCI_BUS_NUM(alias), alias & 0xff);
4586 return 0;
4587}
4588
4589/*
4590 * NB - intel-iommu lacks any sort of reference counting for the users of
4591 * dependent devices. If multiple endpoints have intersecting dependent
4592 * devices, unbinding the driver from any one of them will possibly leave
4593 * the others unable to operate.
4594 */
Han, Weidong3199aa62009-02-26 17:31:12 +08004595static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
David Woodhouse0bcb3e22014-03-06 17:12:03 +00004596 struct device *dev)
Han, Weidong3199aa62009-02-26 17:31:12 +08004597{
David Woodhouse0bcb3e22014-03-06 17:12:03 +00004598 if (!iommu || !dev || !dev_is_pci(dev))
Han, Weidong3199aa62009-02-26 17:31:12 +08004599 return;
4600
Alex Williamson579305f2014-07-03 09:51:43 -06004601 pci_for_each_dma_alias(to_pci_dev(dev), &iommu_detach_dev_cb, iommu);
Han, Weidong3199aa62009-02-26 17:31:12 +08004602}
4603
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004604static void domain_remove_one_dev_info(struct dmar_domain *domain,
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004605 struct device *dev)
Weidong Hanc7151a82008-12-08 22:51:37 +08004606{
Yijing Wangbca2b912013-10-31 17:26:04 +08004607 struct device_domain_info *info, *tmp;
Weidong Hanc7151a82008-12-08 22:51:37 +08004608 struct intel_iommu *iommu;
4609 unsigned long flags;
Quentin Lambert2f119c72015-02-06 10:59:53 +01004610 bool found = false;
David Woodhouse156baca2014-03-09 14:00:57 -07004611 u8 bus, devfn;
Weidong Hanc7151a82008-12-08 22:51:37 +08004612
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004613 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08004614 if (!iommu)
4615 return;
4616
4617 spin_lock_irqsave(&device_domain_lock, flags);
Yijing Wangbca2b912013-10-31 17:26:04 +08004618 list_for_each_entry_safe(info, tmp, &domain->devices, link) {
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004619 if (info->iommu == iommu && info->bus == bus &&
4620 info->devfn == devfn) {
David Woodhouse109b9b02012-05-25 17:43:02 +01004621 unlink_domain_info(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08004622 spin_unlock_irqrestore(&device_domain_lock, flags);
4623
Yu Zhao93a23a72009-05-18 13:51:37 +08004624 iommu_disable_dev_iotlb(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08004625 iommu_detach_dev(iommu, info->bus, info->devfn);
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004626 iommu_detach_dependent_devices(iommu, dev);
Weidong Hanc7151a82008-12-08 22:51:37 +08004627 free_devinfo_mem(info);
4628
4629 spin_lock_irqsave(&device_domain_lock, flags);
4630
4631 if (found)
4632 break;
4633 else
4634 continue;
4635 }
4636
4637 /* if there is no other devices under the same iommu
4638 * owned by this domain, clear this iommu in iommu_bmp
4639 * update iommu count and coherency
4640 */
David Woodhouse8bbc4412014-03-09 13:52:37 -07004641 if (info->iommu == iommu)
Quentin Lambert2f119c72015-02-06 10:59:53 +01004642 found = true;
Weidong Hanc7151a82008-12-08 22:51:37 +08004643 }
4644
Roland Dreier3e7abe22011-07-20 06:22:21 -07004645 spin_unlock_irqrestore(&device_domain_lock, flags);
4646
Weidong Hanc7151a82008-12-08 22:51:37 +08004647 if (found == 0) {
Jiang Liufb170fb2014-07-11 14:19:28 +08004648 domain_detach_iommu(domain, iommu);
4649 if (!domain_type_is_vm_or_si(domain))
4650 iommu_detach_domain(domain, iommu);
Weidong Hanc7151a82008-12-08 22:51:37 +08004651 }
Weidong Hanc7151a82008-12-08 22:51:37 +08004652}
4653
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004654static int md_domain_init(struct dmar_domain *domain, int guest_width)
Weidong Han5e98c4b2008-12-08 23:03:27 +08004655{
4656 int adjust_width;
4657
Robin Murphy0fb5fe82015-01-12 17:51:16 +00004658 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
4659 DMA_32BIT_PFN);
Weidong Han5e98c4b2008-12-08 23:03:27 +08004660 domain_reserve_special_ranges(domain);
4661
4662 /* calculate AGAW */
4663 domain->gaw = guest_width;
4664 adjust_width = guestwidth_to_adjustwidth(guest_width);
4665 domain->agaw = width_to_agaw(adjust_width);
4666
Weidong Han5e98c4b2008-12-08 23:03:27 +08004667 domain->iommu_coherency = 0;
Sheng Yangc5b15252009-08-06 13:31:56 +08004668 domain->iommu_snooping = 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01004669 domain->iommu_superpage = 0;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004670 domain->max_addr = 0;
Weidong Han5e98c4b2008-12-08 23:03:27 +08004671
4672 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07004673 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Weidong Han5e98c4b2008-12-08 23:03:27 +08004674 if (!domain->pgd)
4675 return -ENOMEM;
4676 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
4677 return 0;
4678}
4679
Joerg Roedel00a77de2015-03-26 13:43:08 +01004680static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
Kay, Allen M38717942008-09-09 18:37:29 +03004681{
Joerg Roedel5d450802008-12-03 14:52:32 +01004682 struct dmar_domain *dmar_domain;
Joerg Roedel00a77de2015-03-26 13:43:08 +01004683 struct iommu_domain *domain;
4684
4685 if (type != IOMMU_DOMAIN_UNMANAGED)
4686 return NULL;
Kay, Allen M38717942008-09-09 18:37:29 +03004687
Jiang Liuab8dfe22014-07-11 14:19:27 +08004688 dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE);
Joerg Roedel5d450802008-12-03 14:52:32 +01004689 if (!dmar_domain) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004690 pr_err("Can't allocate dmar_domain\n");
Joerg Roedel00a77de2015-03-26 13:43:08 +01004691 return NULL;
Kay, Allen M38717942008-09-09 18:37:29 +03004692 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004693 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004694 pr_err("Domain initialization failed\n");
Jiang Liu92d03cc2014-02-19 14:07:28 +08004695 domain_exit(dmar_domain);
Joerg Roedel00a77de2015-03-26 13:43:08 +01004696 return NULL;
Kay, Allen M38717942008-09-09 18:37:29 +03004697 }
Allen Kay8140a952011-10-14 12:32:17 -07004698 domain_update_iommu_cap(dmar_domain);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004699
Joerg Roedel00a77de2015-03-26 13:43:08 +01004700 domain = &dmar_domain->domain;
Joerg Roedel8a0e7152012-01-26 19:40:54 +01004701 domain->geometry.aperture_start = 0;
4702 domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
4703 domain->geometry.force_aperture = true;
4704
Joerg Roedel00a77de2015-03-26 13:43:08 +01004705 return domain;
Kay, Allen M38717942008-09-09 18:37:29 +03004706}
Kay, Allen M38717942008-09-09 18:37:29 +03004707
Joerg Roedel00a77de2015-03-26 13:43:08 +01004708static void intel_iommu_domain_free(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03004709{
Joerg Roedel00a77de2015-03-26 13:43:08 +01004710 domain_exit(to_dmar_domain(domain));
Kay, Allen M38717942008-09-09 18:37:29 +03004711}
Kay, Allen M38717942008-09-09 18:37:29 +03004712
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004713static int intel_iommu_attach_device(struct iommu_domain *domain,
4714 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03004715{
Joerg Roedel00a77de2015-03-26 13:43:08 +01004716 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004717 struct intel_iommu *iommu;
4718 int addr_width;
David Woodhouse156baca2014-03-09 14:00:57 -07004719 u8 bus, devfn;
Kay, Allen M38717942008-09-09 18:37:29 +03004720
Alex Williamsonc875d2c2014-07-03 09:57:02 -06004721 if (device_is_rmrr_locked(dev)) {
4722 dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n");
4723 return -EPERM;
4724 }
4725
David Woodhouse7207d8f2014-03-09 16:31:06 -07004726 /* normally dev is not mapped */
4727 if (unlikely(domain_context_mapped(dev))) {
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004728 struct dmar_domain *old_domain;
4729
David Woodhouse1525a292014-03-06 16:19:30 +00004730 old_domain = find_domain(dev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004731 if (old_domain) {
Jiang Liuab8dfe22014-07-11 14:19:27 +08004732 if (domain_type_is_vm_or_si(dmar_domain))
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004733 domain_remove_one_dev_info(old_domain, dev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004734 else
4735 domain_remove_dev_info(old_domain);
Joerg Roedel62c22162014-12-09 12:56:45 +01004736
4737 if (!domain_type_is_vm_or_si(old_domain) &&
4738 list_empty(&old_domain->devices))
4739 domain_exit(old_domain);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004740 }
4741 }
4742
David Woodhouse156baca2014-03-09 14:00:57 -07004743 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004744 if (!iommu)
4745 return -ENODEV;
4746
4747 /* check if this iommu agaw is sufficient for max mapped address */
4748 addr_width = agaw_to_width(iommu->agaw);
Tom Lyona99c47a2010-05-17 08:20:45 +01004749 if (addr_width > cap_mgaw(iommu->cap))
4750 addr_width = cap_mgaw(iommu->cap);
4751
4752 if (dmar_domain->max_addr > (1LL << addr_width)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004753 pr_err("%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004754 "sufficient for the mapped address (%llx)\n",
Tom Lyona99c47a2010-05-17 08:20:45 +01004755 __func__, addr_width, dmar_domain->max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004756 return -EFAULT;
4757 }
Tom Lyona99c47a2010-05-17 08:20:45 +01004758 dmar_domain->gaw = addr_width;
4759
4760 /*
4761 * Knock out extra levels of page tables if necessary
4762 */
4763 while (iommu->agaw < dmar_domain->agaw) {
4764 struct dma_pte *pte;
4765
4766 pte = dmar_domain->pgd;
4767 if (dma_pte_present(pte)) {
Sheng Yang25cbff12010-06-12 19:21:42 +08004768 dmar_domain->pgd = (struct dma_pte *)
4769 phys_to_virt(dma_pte_addr(pte));
Jan Kiszka7a661012010-11-02 08:05:51 +01004770 free_pgtable_page(pte);
Tom Lyona99c47a2010-05-17 08:20:45 +01004771 }
4772 dmar_domain->agaw--;
4773 }
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004774
David Woodhouse5913c9b2014-03-09 16:27:31 -07004775 return domain_add_dev_info(dmar_domain, dev, CONTEXT_TT_MULTI_LEVEL);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004776}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004777
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004778static void intel_iommu_detach_device(struct iommu_domain *domain,
4779 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03004780{
Joerg Roedel00a77de2015-03-26 13:43:08 +01004781 domain_remove_one_dev_info(to_dmar_domain(domain), dev);
Kay, Allen M38717942008-09-09 18:37:29 +03004782}
Kay, Allen M38717942008-09-09 18:37:29 +03004783
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004784static int intel_iommu_map(struct iommu_domain *domain,
4785 unsigned long iova, phys_addr_t hpa,
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004786 size_t size, int iommu_prot)
Kay, Allen M38717942008-09-09 18:37:29 +03004787{
Joerg Roedel00a77de2015-03-26 13:43:08 +01004788 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004789 u64 max_addr;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004790 int prot = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004791 int ret;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004792
Joerg Roedeldde57a22008-12-03 15:04:09 +01004793 if (iommu_prot & IOMMU_READ)
4794 prot |= DMA_PTE_READ;
4795 if (iommu_prot & IOMMU_WRITE)
4796 prot |= DMA_PTE_WRITE;
Sheng Yang9cf06692009-03-18 15:33:07 +08004797 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
4798 prot |= DMA_PTE_SNP;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004799
David Woodhouse163cc522009-06-28 00:51:17 +01004800 max_addr = iova + size;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004801 if (dmar_domain->max_addr < max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004802 u64 end;
4803
4804 /* check if minimum agaw is sufficient for mapped address */
Tom Lyon8954da12010-05-17 08:19:52 +01004805 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004806 if (end < max_addr) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004807 pr_err("%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004808 "sufficient for the mapped address (%llx)\n",
Tom Lyon8954da12010-05-17 08:19:52 +01004809 __func__, dmar_domain->gaw, max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004810 return -EFAULT;
4811 }
Joerg Roedeldde57a22008-12-03 15:04:09 +01004812 dmar_domain->max_addr = max_addr;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004813 }
David Woodhousead051222009-06-28 14:22:28 +01004814 /* Round up size to next multiple of PAGE_SIZE, if it and
4815 the low bits of hpa would take us onto the next page */
David Woodhouse88cb6a72009-06-28 15:03:06 +01004816 size = aligned_nrpages(hpa, size);
David Woodhousead051222009-06-28 14:22:28 +01004817 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
4818 hpa >> VTD_PAGE_SHIFT, size, prot);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004819 return ret;
Kay, Allen M38717942008-09-09 18:37:29 +03004820}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004821
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004822static size_t intel_iommu_unmap(struct iommu_domain *domain,
David Woodhouseea8ea462014-03-05 17:09:32 +00004823 unsigned long iova, size_t size)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004824{
Joerg Roedel00a77de2015-03-26 13:43:08 +01004825 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
David Woodhouseea8ea462014-03-05 17:09:32 +00004826 struct page *freelist = NULL;
4827 struct intel_iommu *iommu;
4828 unsigned long start_pfn, last_pfn;
4829 unsigned int npages;
4830 int iommu_id, num, ndomains, level = 0;
Sheng Yang4b99d352009-07-08 11:52:52 +01004831
David Woodhouse5cf0a762014-03-19 16:07:49 +00004832 /* Cope with horrid API which requires us to unmap more than the
4833 size argument if it happens to be a large-page mapping. */
4834 if (!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level))
4835 BUG();
4836
4837 if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
4838 size = VTD_PAGE_SIZE << level_to_offset_bits(level);
4839
David Woodhouseea8ea462014-03-05 17:09:32 +00004840 start_pfn = iova >> VTD_PAGE_SHIFT;
4841 last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
4842
4843 freelist = domain_unmap(dmar_domain, start_pfn, last_pfn);
4844
4845 npages = last_pfn - start_pfn + 1;
4846
4847 for_each_set_bit(iommu_id, dmar_domain->iommu_bmp, g_num_of_iommus) {
4848 iommu = g_iommus[iommu_id];
4849
4850 /*
4851 * find bit position of dmar_domain
4852 */
4853 ndomains = cap_ndoms(iommu->cap);
4854 for_each_set_bit(num, iommu->domain_ids, ndomains) {
4855 if (iommu->domains[num] == dmar_domain)
4856 iommu_flush_iotlb_psi(iommu, num, start_pfn,
4857 npages, !freelist, 0);
4858 }
4859
4860 }
4861
4862 dma_free_pagelist(freelist);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004863
David Woodhouse163cc522009-06-28 00:51:17 +01004864 if (dmar_domain->max_addr == iova + size)
4865 dmar_domain->max_addr = iova;
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004866
David Woodhouse5cf0a762014-03-19 16:07:49 +00004867 return size;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004868}
Kay, Allen M38717942008-09-09 18:37:29 +03004869
Joerg Roedeld14d6572008-12-03 15:06:57 +01004870static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
Varun Sethibb5547a2013-03-29 01:23:58 +05304871 dma_addr_t iova)
Kay, Allen M38717942008-09-09 18:37:29 +03004872{
Joerg Roedel00a77de2015-03-26 13:43:08 +01004873 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
Kay, Allen M38717942008-09-09 18:37:29 +03004874 struct dma_pte *pte;
David Woodhouse5cf0a762014-03-19 16:07:49 +00004875 int level = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004876 u64 phys = 0;
Kay, Allen M38717942008-09-09 18:37:29 +03004877
David Woodhouse5cf0a762014-03-19 16:07:49 +00004878 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
Kay, Allen M38717942008-09-09 18:37:29 +03004879 if (pte)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004880 phys = dma_pte_addr(pte);
Kay, Allen M38717942008-09-09 18:37:29 +03004881
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004882 return phys;
Kay, Allen M38717942008-09-09 18:37:29 +03004883}
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004884
Joerg Roedel5d587b82014-09-05 10:50:45 +02004885static bool intel_iommu_capable(enum iommu_cap cap)
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004886{
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004887 if (cap == IOMMU_CAP_CACHE_COHERENCY)
Joerg Roedel5d587b82014-09-05 10:50:45 +02004888 return domain_update_iommu_snooping(NULL) == 1;
Tom Lyon323f99c2010-07-02 16:56:14 -04004889 if (cap == IOMMU_CAP_INTR_REMAP)
Joerg Roedel5d587b82014-09-05 10:50:45 +02004890 return irq_remapping_enabled == 1;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004891
Joerg Roedel5d587b82014-09-05 10:50:45 +02004892 return false;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004893}
4894
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004895static int intel_iommu_add_device(struct device *dev)
Alex Williamson70ae6f02011-10-21 15:56:11 -04004896{
Alex Williamsona5459cf2014-06-12 16:12:31 -06004897 struct intel_iommu *iommu;
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004898 struct iommu_group *group;
David Woodhouse156baca2014-03-09 14:00:57 -07004899 u8 bus, devfn;
Alex Williamson70ae6f02011-10-21 15:56:11 -04004900
Alex Williamsona5459cf2014-06-12 16:12:31 -06004901 iommu = device_to_iommu(dev, &bus, &devfn);
4902 if (!iommu)
Alex Williamson70ae6f02011-10-21 15:56:11 -04004903 return -ENODEV;
4904
Alex Williamsona5459cf2014-06-12 16:12:31 -06004905 iommu_device_link(iommu->iommu_dev, dev);
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004906
Alex Williamsone17f9ff2014-07-03 09:51:37 -06004907 group = iommu_group_get_for_dev(dev);
Alex Williamson783f1572012-05-30 14:19:43 -06004908
Alex Williamsone17f9ff2014-07-03 09:51:37 -06004909 if (IS_ERR(group))
4910 return PTR_ERR(group);
Alex Williamson70ae6f02011-10-21 15:56:11 -04004911
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004912 iommu_group_put(group);
Alex Williamsone17f9ff2014-07-03 09:51:37 -06004913 return 0;
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004914}
4915
4916static void intel_iommu_remove_device(struct device *dev)
4917{
Alex Williamsona5459cf2014-06-12 16:12:31 -06004918 struct intel_iommu *iommu;
4919 u8 bus, devfn;
4920
4921 iommu = device_to_iommu(dev, &bus, &devfn);
4922 if (!iommu)
4923 return;
4924
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004925 iommu_group_remove_device(dev);
Alex Williamsona5459cf2014-06-12 16:12:31 -06004926
4927 iommu_device_unlink(iommu->iommu_dev, dev);
Alex Williamson70ae6f02011-10-21 15:56:11 -04004928}
4929
Thierry Redingb22f6432014-06-27 09:03:12 +02004930static const struct iommu_ops intel_iommu_ops = {
Joerg Roedel5d587b82014-09-05 10:50:45 +02004931 .capable = intel_iommu_capable,
Joerg Roedel00a77de2015-03-26 13:43:08 +01004932 .domain_alloc = intel_iommu_domain_alloc,
4933 .domain_free = intel_iommu_domain_free,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004934 .attach_dev = intel_iommu_attach_device,
4935 .detach_dev = intel_iommu_detach_device,
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004936 .map = intel_iommu_map,
4937 .unmap = intel_iommu_unmap,
Olav Haugan315786e2014-10-25 09:55:16 -07004938 .map_sg = default_iommu_map_sg,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004939 .iova_to_phys = intel_iommu_iova_to_phys,
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004940 .add_device = intel_iommu_add_device,
4941 .remove_device = intel_iommu_remove_device,
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +02004942 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004943};
David Woodhouse9af88142009-02-13 23:18:03 +00004944
Daniel Vetter94526182013-01-20 23:50:13 +01004945static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
4946{
4947 /* G4x/GM45 integrated gfx dmar support is totally busted. */
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004948 pr_info("Disabling IOMMU for graphics on this chipset\n");
Daniel Vetter94526182013-01-20 23:50:13 +01004949 dmar_map_gfx = 0;
4950}
4951
4952DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
4953DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
4954DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
4955DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
4956DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
4957DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
4958DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
4959
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08004960static void quirk_iommu_rwbf(struct pci_dev *dev)
David Woodhouse9af88142009-02-13 23:18:03 +00004961{
4962 /*
4963 * Mobile 4 Series Chipset neglects to set RWBF capability,
Daniel Vetter210561f2013-01-21 19:48:59 +01004964 * but needs it. Same seems to hold for the desktop versions.
David Woodhouse9af88142009-02-13 23:18:03 +00004965 */
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004966 pr_info("Forcing write-buffer flush capability\n");
David Woodhouse9af88142009-02-13 23:18:03 +00004967 rwbf_quirk = 1;
4968}
4969
4970DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
Daniel Vetter210561f2013-01-21 19:48:59 +01004971DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
4972DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
4973DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
4974DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
4975DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
4976DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
David Woodhousee0fc7e02009-09-30 09:12:17 -07004977
Adam Jacksoneecfd572010-08-25 21:17:34 +01004978#define GGC 0x52
4979#define GGC_MEMORY_SIZE_MASK (0xf << 8)
4980#define GGC_MEMORY_SIZE_NONE (0x0 << 8)
4981#define GGC_MEMORY_SIZE_1M (0x1 << 8)
4982#define GGC_MEMORY_SIZE_2M (0x3 << 8)
4983#define GGC_MEMORY_VT_ENABLED (0x8 << 8)
4984#define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
4985#define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
4986#define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
4987
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08004988static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
David Woodhouse9eecabc2010-09-21 22:28:23 +01004989{
4990 unsigned short ggc;
4991
Adam Jacksoneecfd572010-08-25 21:17:34 +01004992 if (pci_read_config_word(dev, GGC, &ggc))
David Woodhouse9eecabc2010-09-21 22:28:23 +01004993 return;
4994
Adam Jacksoneecfd572010-08-25 21:17:34 +01004995 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004996 pr_info("BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
David Woodhouse9eecabc2010-09-21 22:28:23 +01004997 dmar_map_gfx = 0;
David Woodhouse6fbcfb32011-09-25 19:11:14 -07004998 } else if (dmar_map_gfx) {
4999 /* we have to ensure the gfx device is idle before we flush */
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02005000 pr_info("Disabling batched IOTLB flush on Ironlake\n");
David Woodhouse6fbcfb32011-09-25 19:11:14 -07005001 intel_iommu_strict = 1;
5002 }
David Woodhouse9eecabc2010-09-21 22:28:23 +01005003}
5004DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
5005DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
5006DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
5007DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
5008
David Woodhousee0fc7e02009-09-30 09:12:17 -07005009/* On Tylersburg chipsets, some BIOSes have been known to enable the
5010 ISOCH DMAR unit for the Azalia sound device, but not give it any
5011 TLB entries, which causes it to deadlock. Check for that. We do
5012 this in a function called from init_dmars(), instead of in a PCI
5013 quirk, because we don't want to print the obnoxious "BIOS broken"
5014 message if VT-d is actually disabled.
5015*/
5016static void __init check_tylersburg_isoch(void)
5017{
5018 struct pci_dev *pdev;
5019 uint32_t vtisochctrl;
5020
5021 /* If there's no Azalia in the system anyway, forget it. */
5022 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
5023 if (!pdev)
5024 return;
5025 pci_dev_put(pdev);
5026
5027 /* System Management Registers. Might be hidden, in which case
5028 we can't do the sanity check. But that's OK, because the
5029 known-broken BIOSes _don't_ actually hide it, so far. */
5030 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
5031 if (!pdev)
5032 return;
5033
5034 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
5035 pci_dev_put(pdev);
5036 return;
5037 }
5038
5039 pci_dev_put(pdev);
5040
5041 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
5042 if (vtisochctrl & 1)
5043 return;
5044
5045 /* Drop all bits other than the number of TLB entries */
5046 vtisochctrl &= 0x1c;
5047
5048 /* If we have the recommended number of TLB entries (16), fine. */
5049 if (vtisochctrl == 0x10)
5050 return;
5051
5052 /* Zero TLB entries? You get to ride the short bus to school. */
5053 if (!vtisochctrl) {
5054 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
5055 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
5056 dmi_get_system_info(DMI_BIOS_VENDOR),
5057 dmi_get_system_info(DMI_BIOS_VERSION),
5058 dmi_get_system_info(DMI_PRODUCT_VERSION));
5059 iommu_identity_mapping |= IDENTMAP_AZALIA;
5060 return;
5061 }
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02005062
5063 pr_warn("Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
David Woodhousee0fc7e02009-09-30 09:12:17 -07005064 vtisochctrl);
5065}